2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <linux/debugfs.h>
44 #include <linux/seq_file.h>
45 #include <linux/uaccess.h>
46 #include <linux/crc16.h>
49 #include <asm/system.h>
50 #include <asm/unaligned.h>
52 #include <net/bluetooth/bluetooth.h>
53 #include <net/bluetooth/hci_core.h>
54 #include <net/bluetooth/l2cap.h>
56 #define VERSION "2.14"
58 static int enable_ertm = 0;
60 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
61 static u8 l2cap_fixed_chan[8] = { 0x02, };
63 static const struct proto_ops l2cap_sock_ops;
65 static struct workqueue_struct *_busy_wq;
67 static struct bt_sock_list l2cap_sk_list = {
68 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
71 static void l2cap_busy_work(struct work_struct *work);
73 static void __l2cap_sock_close(struct sock *sk, int reason);
74 static void l2cap_sock_close(struct sock *sk);
75 static void l2cap_sock_kill(struct sock *sk);
77 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
78 u8 code, u8 ident, u16 dlen, void *data);
80 /* ---- L2CAP timers ---- */
81 static void l2cap_sock_timeout(unsigned long arg)
83 struct sock *sk = (struct sock *) arg;
86 BT_DBG("sock %p state %d", sk, sk->sk_state);
90 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
91 reason = ECONNREFUSED;
92 else if (sk->sk_state == BT_CONNECT &&
93 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
94 reason = ECONNREFUSED;
98 __l2cap_sock_close(sk, reason);
106 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
108 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
109 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
112 static void l2cap_sock_clear_timer(struct sock *sk)
114 BT_DBG("sock %p state %d", sk, sk->sk_state);
115 sk_stop_timer(sk, &sk->sk_timer);
118 /* ---- L2CAP channels ---- */
119 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
122 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
123 if (l2cap_pi(s)->dcid == cid)
129 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
132 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
133 if (l2cap_pi(s)->scid == cid)
139 /* Find channel with given SCID.
140 * Returns locked socket */
141 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
145 s = __l2cap_get_chan_by_scid(l, cid);
148 read_unlock(&l->lock);
152 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
155 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
156 if (l2cap_pi(s)->ident == ident)
162 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
166 s = __l2cap_get_chan_by_ident(l, ident);
169 read_unlock(&l->lock);
173 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
175 u16 cid = L2CAP_CID_DYN_START;
177 for (; cid < L2CAP_CID_DYN_END; cid++) {
178 if (!__l2cap_get_chan_by_scid(l, cid))
185 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
190 l2cap_pi(l->head)->prev_c = sk;
192 l2cap_pi(sk)->next_c = l->head;
193 l2cap_pi(sk)->prev_c = NULL;
197 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
199 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
201 write_lock_bh(&l->lock);
206 l2cap_pi(next)->prev_c = prev;
208 l2cap_pi(prev)->next_c = next;
209 write_unlock_bh(&l->lock);
214 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
216 struct l2cap_chan_list *l = &conn->chan_list;
218 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
219 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
221 conn->disc_reason = 0x13;
223 l2cap_pi(sk)->conn = conn;
225 if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) {
226 /* Alloc CID for connection-oriented socket */
227 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
228 } else if (sk->sk_type == SOCK_DGRAM) {
229 /* Connectionless socket */
230 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
231 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
232 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
234 /* Raw socket can send/recv signalling messages only */
235 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
236 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
237 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
240 __l2cap_chan_link(l, sk);
243 bt_accept_enqueue(parent, sk);
247 * Must be called on the locked socket. */
248 static void l2cap_chan_del(struct sock *sk, int err)
250 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
251 struct sock *parent = bt_sk(sk)->parent;
253 l2cap_sock_clear_timer(sk);
255 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
258 /* Unlink from channel list */
259 l2cap_chan_unlink(&conn->chan_list, sk);
260 l2cap_pi(sk)->conn = NULL;
261 hci_conn_put(conn->hcon);
264 sk->sk_state = BT_CLOSED;
265 sock_set_flag(sk, SOCK_ZAPPED);
271 bt_accept_unlink(sk);
272 parent->sk_data_ready(parent, 0);
274 sk->sk_state_change(sk);
276 skb_queue_purge(TX_QUEUE(sk));
278 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
279 struct srej_list *l, *tmp;
281 del_timer(&l2cap_pi(sk)->retrans_timer);
282 del_timer(&l2cap_pi(sk)->monitor_timer);
283 del_timer(&l2cap_pi(sk)->ack_timer);
285 skb_queue_purge(SREJ_QUEUE(sk));
286 skb_queue_purge(BUSY_QUEUE(sk));
288 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
295 /* Service level security */
296 static inline int l2cap_check_security(struct sock *sk)
298 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
301 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
302 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
303 auth_type = HCI_AT_NO_BONDING_MITM;
305 auth_type = HCI_AT_NO_BONDING;
307 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
308 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
310 switch (l2cap_pi(sk)->sec_level) {
311 case BT_SECURITY_HIGH:
312 auth_type = HCI_AT_GENERAL_BONDING_MITM;
314 case BT_SECURITY_MEDIUM:
315 auth_type = HCI_AT_GENERAL_BONDING;
318 auth_type = HCI_AT_NO_BONDING;
323 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
327 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
331 /* Get next available identificator.
332 * 1 - 128 are used by kernel.
333 * 129 - 199 are reserved.
334 * 200 - 254 are used by utilities like l2ping, etc.
337 spin_lock_bh(&conn->lock);
339 if (++conn->tx_ident > 128)
344 spin_unlock_bh(&conn->lock);
349 static inline void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
351 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
353 BT_DBG("code 0x%2.2x", code);
358 hci_send_acl(conn->hcon, skb, 0);
361 static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
364 struct l2cap_hdr *lh;
365 struct l2cap_conn *conn = pi->conn;
366 struct sock *sk = (struct sock *)pi;
367 int count, hlen = L2CAP_HDR_SIZE + 2;
369 if (sk->sk_state != BT_CONNECTED)
372 if (pi->fcs == L2CAP_FCS_CRC16)
375 BT_DBG("pi %p, control 0x%2.2x", pi, control);
377 count = min_t(unsigned int, conn->mtu, hlen);
378 control |= L2CAP_CTRL_FRAME_TYPE;
380 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
381 control |= L2CAP_CTRL_FINAL;
382 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
385 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
386 control |= L2CAP_CTRL_POLL;
387 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
390 skb = bt_skb_alloc(count, GFP_ATOMIC);
394 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
395 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
396 lh->cid = cpu_to_le16(pi->dcid);
397 put_unaligned_le16(control, skb_put(skb, 2));
399 if (pi->fcs == L2CAP_FCS_CRC16) {
400 u16 fcs = crc16(0, (u8 *)lh, count - 2);
401 put_unaligned_le16(fcs, skb_put(skb, 2));
404 hci_send_acl(pi->conn->hcon, skb, 0);
407 static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
409 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
410 control |= L2CAP_SUPER_RCV_NOT_READY;
411 pi->conn_state |= L2CAP_CONN_RNR_SENT;
413 control |= L2CAP_SUPER_RCV_READY;
415 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
417 l2cap_send_sframe(pi, control);
420 static inline int __l2cap_no_conn_pending(struct sock *sk)
422 return !(l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND);
425 static void l2cap_do_start(struct sock *sk)
427 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
429 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
430 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
433 if (l2cap_check_security(sk) && __l2cap_no_conn_pending(sk)) {
434 struct l2cap_conn_req req;
435 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
436 req.psm = l2cap_pi(sk)->psm;
438 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
439 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
441 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
442 L2CAP_CONN_REQ, sizeof(req), &req);
445 struct l2cap_info_req req;
446 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
448 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
449 conn->info_ident = l2cap_get_ident(conn);
451 mod_timer(&conn->info_timer, jiffies +
452 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
454 l2cap_send_cmd(conn, conn->info_ident,
455 L2CAP_INFO_REQ, sizeof(req), &req);
459 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk, int err)
461 struct l2cap_disconn_req req;
466 skb_queue_purge(TX_QUEUE(sk));
468 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
469 del_timer(&l2cap_pi(sk)->retrans_timer);
470 del_timer(&l2cap_pi(sk)->monitor_timer);
471 del_timer(&l2cap_pi(sk)->ack_timer);
474 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
475 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
476 l2cap_send_cmd(conn, l2cap_get_ident(conn),
477 L2CAP_DISCONN_REQ, sizeof(req), &req);
479 sk->sk_state = BT_DISCONN;
483 /* ---- L2CAP connections ---- */
484 static void l2cap_conn_start(struct l2cap_conn *conn)
486 struct l2cap_chan_list *l = &conn->chan_list;
489 BT_DBG("conn %p", conn);
493 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
496 if (sk->sk_type != SOCK_SEQPACKET &&
497 sk->sk_type != SOCK_STREAM) {
502 if (sk->sk_state == BT_CONNECT) {
503 if (l2cap_check_security(sk) &&
504 __l2cap_no_conn_pending(sk)) {
505 struct l2cap_conn_req req;
506 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
507 req.psm = l2cap_pi(sk)->psm;
509 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
510 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
512 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
513 L2CAP_CONN_REQ, sizeof(req), &req);
515 } else if (sk->sk_state == BT_CONNECT2) {
516 struct l2cap_conn_rsp rsp;
517 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
518 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
520 if (l2cap_check_security(sk)) {
521 if (bt_sk(sk)->defer_setup) {
522 struct sock *parent = bt_sk(sk)->parent;
523 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
524 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
525 parent->sk_data_ready(parent, 0);
528 sk->sk_state = BT_CONFIG;
529 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
530 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
533 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
534 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
537 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
538 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
544 read_unlock(&l->lock);
547 static void l2cap_conn_ready(struct l2cap_conn *conn)
549 struct l2cap_chan_list *l = &conn->chan_list;
552 BT_DBG("conn %p", conn);
556 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
559 if (sk->sk_type != SOCK_SEQPACKET &&
560 sk->sk_type != SOCK_STREAM) {
561 l2cap_sock_clear_timer(sk);
562 sk->sk_state = BT_CONNECTED;
563 sk->sk_state_change(sk);
564 } else if (sk->sk_state == BT_CONNECT)
570 read_unlock(&l->lock);
573 /* Notify sockets that we cannot guaranty reliability anymore */
574 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
576 struct l2cap_chan_list *l = &conn->chan_list;
579 BT_DBG("conn %p", conn);
583 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
584 if (l2cap_pi(sk)->force_reliable)
588 read_unlock(&l->lock);
591 static void l2cap_info_timeout(unsigned long arg)
593 struct l2cap_conn *conn = (void *) arg;
595 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
596 conn->info_ident = 0;
598 l2cap_conn_start(conn);
601 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
603 struct l2cap_conn *conn = hcon->l2cap_data;
608 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
612 hcon->l2cap_data = conn;
615 BT_DBG("hcon %p conn %p", hcon, conn);
617 conn->mtu = hcon->hdev->acl_mtu;
618 conn->src = &hcon->hdev->bdaddr;
619 conn->dst = &hcon->dst;
623 spin_lock_init(&conn->lock);
624 rwlock_init(&conn->chan_list.lock);
626 setup_timer(&conn->info_timer, l2cap_info_timeout,
627 (unsigned long) conn);
629 conn->disc_reason = 0x13;
634 static void l2cap_conn_del(struct hci_conn *hcon, int err)
636 struct l2cap_conn *conn = hcon->l2cap_data;
642 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
644 kfree_skb(conn->rx_skb);
647 while ((sk = conn->chan_list.head)) {
649 l2cap_chan_del(sk, err);
654 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
655 del_timer_sync(&conn->info_timer);
657 hcon->l2cap_data = NULL;
661 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
663 struct l2cap_chan_list *l = &conn->chan_list;
664 write_lock_bh(&l->lock);
665 __l2cap_chan_add(conn, sk, parent);
666 write_unlock_bh(&l->lock);
669 /* ---- Socket interface ---- */
670 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
673 struct hlist_node *node;
674 sk_for_each(sk, node, &l2cap_sk_list.head)
675 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
682 /* Find socket with psm and source bdaddr.
683 * Returns closest match.
685 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
687 struct sock *sk = NULL, *sk1 = NULL;
688 struct hlist_node *node;
690 sk_for_each(sk, node, &l2cap_sk_list.head) {
691 if (state && sk->sk_state != state)
694 if (l2cap_pi(sk)->psm == psm) {
696 if (!bacmp(&bt_sk(sk)->src, src))
700 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
704 return node ? sk : sk1;
707 /* Find socket with given address (psm, src).
708 * Returns locked socket */
709 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
712 read_lock(&l2cap_sk_list.lock);
713 s = __l2cap_get_sock_by_psm(state, psm, src);
716 read_unlock(&l2cap_sk_list.lock);
720 static void l2cap_sock_destruct(struct sock *sk)
724 skb_queue_purge(&sk->sk_receive_queue);
725 skb_queue_purge(&sk->sk_write_queue);
728 static void l2cap_sock_cleanup_listen(struct sock *parent)
732 BT_DBG("parent %p", parent);
734 /* Close not yet accepted channels */
735 while ((sk = bt_accept_dequeue(parent, NULL)))
736 l2cap_sock_close(sk);
738 parent->sk_state = BT_CLOSED;
739 sock_set_flag(parent, SOCK_ZAPPED);
742 /* Kill socket (only if zapped and orphan)
743 * Must be called on unlocked socket.
745 static void l2cap_sock_kill(struct sock *sk)
747 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
750 BT_DBG("sk %p state %d", sk, sk->sk_state);
752 /* Kill poor orphan */
753 bt_sock_unlink(&l2cap_sk_list, sk);
754 sock_set_flag(sk, SOCK_DEAD);
758 static void __l2cap_sock_close(struct sock *sk, int reason)
760 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
762 switch (sk->sk_state) {
764 l2cap_sock_cleanup_listen(sk);
769 if (sk->sk_type == SOCK_SEQPACKET ||
770 sk->sk_type == SOCK_STREAM) {
771 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
773 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
774 l2cap_send_disconn_req(conn, sk, reason);
776 l2cap_chan_del(sk, reason);
780 if (sk->sk_type == SOCK_SEQPACKET ||
781 sk->sk_type == SOCK_STREAM) {
782 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
783 struct l2cap_conn_rsp rsp;
786 if (bt_sk(sk)->defer_setup)
787 result = L2CAP_CR_SEC_BLOCK;
789 result = L2CAP_CR_BAD_PSM;
791 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
792 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
793 rsp.result = cpu_to_le16(result);
794 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
795 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
796 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
798 l2cap_chan_del(sk, reason);
803 l2cap_chan_del(sk, reason);
807 sock_set_flag(sk, SOCK_ZAPPED);
812 /* Must be called on unlocked socket. */
813 static void l2cap_sock_close(struct sock *sk)
815 l2cap_sock_clear_timer(sk);
817 __l2cap_sock_close(sk, ECONNRESET);
822 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
824 struct l2cap_pinfo *pi = l2cap_pi(sk);
829 sk->sk_type = parent->sk_type;
830 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
832 pi->imtu = l2cap_pi(parent)->imtu;
833 pi->omtu = l2cap_pi(parent)->omtu;
834 pi->mode = l2cap_pi(parent)->mode;
835 pi->fcs = l2cap_pi(parent)->fcs;
836 pi->max_tx = l2cap_pi(parent)->max_tx;
837 pi->tx_win = l2cap_pi(parent)->tx_win;
838 pi->sec_level = l2cap_pi(parent)->sec_level;
839 pi->role_switch = l2cap_pi(parent)->role_switch;
840 pi->force_reliable = l2cap_pi(parent)->force_reliable;
842 pi->imtu = L2CAP_DEFAULT_MTU;
844 if (enable_ertm && sk->sk_type == SOCK_STREAM)
845 pi->mode = L2CAP_MODE_ERTM;
847 pi->mode = L2CAP_MODE_BASIC;
848 pi->max_tx = L2CAP_DEFAULT_MAX_TX;
849 pi->fcs = L2CAP_FCS_CRC16;
850 pi->tx_win = L2CAP_DEFAULT_TX_WINDOW;
851 pi->sec_level = BT_SECURITY_LOW;
853 pi->force_reliable = 0;
856 /* Default config options */
858 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
859 skb_queue_head_init(TX_QUEUE(sk));
860 skb_queue_head_init(SREJ_QUEUE(sk));
861 skb_queue_head_init(BUSY_QUEUE(sk));
862 INIT_LIST_HEAD(SREJ_LIST(sk));
865 static struct proto l2cap_proto = {
867 .owner = THIS_MODULE,
868 .obj_size = sizeof(struct l2cap_pinfo)
871 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
875 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
879 sock_init_data(sock, sk);
880 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
882 sk->sk_destruct = l2cap_sock_destruct;
883 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
885 sock_reset_flag(sk, SOCK_ZAPPED);
887 sk->sk_protocol = proto;
888 sk->sk_state = BT_OPEN;
890 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
892 bt_sock_link(&l2cap_sk_list, sk);
896 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
901 BT_DBG("sock %p", sock);
903 sock->state = SS_UNCONNECTED;
905 if (sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM &&
906 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
907 return -ESOCKTNOSUPPORT;
909 if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
912 sock->ops = &l2cap_sock_ops;
914 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
918 l2cap_sock_init(sk, NULL);
922 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
924 struct sock *sk = sock->sk;
925 struct sockaddr_l2 la;
930 if (!addr || addr->sa_family != AF_BLUETOOTH)
933 memset(&la, 0, sizeof(la));
934 len = min_t(unsigned int, sizeof(la), alen);
935 memcpy(&la, addr, len);
942 if (sk->sk_state != BT_OPEN) {
947 if (la.l2_psm && __le16_to_cpu(la.l2_psm) < 0x1001 &&
948 !capable(CAP_NET_BIND_SERVICE)) {
953 write_lock_bh(&l2cap_sk_list.lock);
955 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
958 /* Save source address */
959 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
960 l2cap_pi(sk)->psm = la.l2_psm;
961 l2cap_pi(sk)->sport = la.l2_psm;
962 sk->sk_state = BT_BOUND;
964 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
965 __le16_to_cpu(la.l2_psm) == 0x0003)
966 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
969 write_unlock_bh(&l2cap_sk_list.lock);
976 static int l2cap_do_connect(struct sock *sk)
978 bdaddr_t *src = &bt_sk(sk)->src;
979 bdaddr_t *dst = &bt_sk(sk)->dst;
980 struct l2cap_conn *conn;
981 struct hci_conn *hcon;
982 struct hci_dev *hdev;
986 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
989 hdev = hci_get_route(dst, src);
991 return -EHOSTUNREACH;
993 hci_dev_lock_bh(hdev);
997 if (sk->sk_type == SOCK_RAW) {
998 switch (l2cap_pi(sk)->sec_level) {
999 case BT_SECURITY_HIGH:
1000 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
1002 case BT_SECURITY_MEDIUM:
1003 auth_type = HCI_AT_DEDICATED_BONDING;
1006 auth_type = HCI_AT_NO_BONDING;
1009 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
1010 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
1011 auth_type = HCI_AT_NO_BONDING_MITM;
1013 auth_type = HCI_AT_NO_BONDING;
1015 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
1016 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
1018 switch (l2cap_pi(sk)->sec_level) {
1019 case BT_SECURITY_HIGH:
1020 auth_type = HCI_AT_GENERAL_BONDING_MITM;
1022 case BT_SECURITY_MEDIUM:
1023 auth_type = HCI_AT_GENERAL_BONDING;
1026 auth_type = HCI_AT_NO_BONDING;
1031 hcon = hci_connect(hdev, ACL_LINK, dst,
1032 l2cap_pi(sk)->sec_level, auth_type);
1036 conn = l2cap_conn_add(hcon, 0);
1044 /* Update source addr of the socket */
1045 bacpy(src, conn->src);
1047 l2cap_chan_add(conn, sk, NULL);
1049 sk->sk_state = BT_CONNECT;
1050 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1052 if (hcon->state == BT_CONNECTED) {
1053 if (sk->sk_type != SOCK_SEQPACKET &&
1054 sk->sk_type != SOCK_STREAM) {
1055 l2cap_sock_clear_timer(sk);
1056 sk->sk_state = BT_CONNECTED;
1062 hci_dev_unlock_bh(hdev);
1067 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
1069 struct sock *sk = sock->sk;
1070 struct sockaddr_l2 la;
1073 BT_DBG("sk %p", sk);
1075 if (!addr || alen < sizeof(addr->sa_family) ||
1076 addr->sa_family != AF_BLUETOOTH)
1079 memset(&la, 0, sizeof(la));
1080 len = min_t(unsigned int, sizeof(la), alen);
1081 memcpy(&la, addr, len);
1088 if ((sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM)
1094 switch (l2cap_pi(sk)->mode) {
1095 case L2CAP_MODE_BASIC:
1097 case L2CAP_MODE_ERTM:
1098 case L2CAP_MODE_STREAMING:
1107 switch (sk->sk_state) {
1111 /* Already connecting */
1115 /* Already connected */
1128 /* Set destination address and psm */
1129 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
1130 l2cap_pi(sk)->psm = la.l2_psm;
1132 err = l2cap_do_connect(sk);
1137 err = bt_sock_wait_state(sk, BT_CONNECTED,
1138 sock_sndtimeo(sk, flags & O_NONBLOCK));
1144 static int l2cap_sock_listen(struct socket *sock, int backlog)
1146 struct sock *sk = sock->sk;
1149 BT_DBG("sk %p backlog %d", sk, backlog);
1153 if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM)
1154 || sk->sk_state != BT_BOUND) {
1159 switch (l2cap_pi(sk)->mode) {
1160 case L2CAP_MODE_BASIC:
1162 case L2CAP_MODE_ERTM:
1163 case L2CAP_MODE_STREAMING:
1172 if (!l2cap_pi(sk)->psm) {
1173 bdaddr_t *src = &bt_sk(sk)->src;
1178 write_lock_bh(&l2cap_sk_list.lock);
1180 for (psm = 0x1001; psm < 0x1100; psm += 2)
1181 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
1182 l2cap_pi(sk)->psm = cpu_to_le16(psm);
1183 l2cap_pi(sk)->sport = cpu_to_le16(psm);
1188 write_unlock_bh(&l2cap_sk_list.lock);
1194 sk->sk_max_ack_backlog = backlog;
1195 sk->sk_ack_backlog = 0;
1196 sk->sk_state = BT_LISTEN;
1203 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
1205 DECLARE_WAITQUEUE(wait, current);
1206 struct sock *sk = sock->sk, *nsk;
1210 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1212 if (sk->sk_state != BT_LISTEN) {
1217 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1219 BT_DBG("sk %p timeo %ld", sk, timeo);
1221 /* Wait for an incoming connection. (wake-one). */
1222 add_wait_queue_exclusive(sk_sleep(sk), &wait);
1223 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1224 set_current_state(TASK_INTERRUPTIBLE);
1231 timeo = schedule_timeout(timeo);
1232 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1234 if (sk->sk_state != BT_LISTEN) {
1239 if (signal_pending(current)) {
1240 err = sock_intr_errno(timeo);
1244 set_current_state(TASK_RUNNING);
1245 remove_wait_queue(sk_sleep(sk), &wait);
1250 newsock->state = SS_CONNECTED;
1252 BT_DBG("new socket %p", nsk);
1259 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1261 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1262 struct sock *sk = sock->sk;
1264 BT_DBG("sock %p, sk %p", sock, sk);
1266 addr->sa_family = AF_BLUETOOTH;
1267 *len = sizeof(struct sockaddr_l2);
1270 la->l2_psm = l2cap_pi(sk)->psm;
1271 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1272 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1274 la->l2_psm = l2cap_pi(sk)->sport;
1275 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1276 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
1282 static int __l2cap_wait_ack(struct sock *sk)
1284 DECLARE_WAITQUEUE(wait, current);
1288 add_wait_queue(sk_sleep(sk), &wait);
1289 while ((l2cap_pi(sk)->unacked_frames > 0 && l2cap_pi(sk)->conn)) {
1290 set_current_state(TASK_INTERRUPTIBLE);
1295 if (signal_pending(current)) {
1296 err = sock_intr_errno(timeo);
1301 timeo = schedule_timeout(timeo);
1304 err = sock_error(sk);
1308 set_current_state(TASK_RUNNING);
1309 remove_wait_queue(sk_sleep(sk), &wait);
1313 static void l2cap_monitor_timeout(unsigned long arg)
1315 struct sock *sk = (void *) arg;
1318 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
1319 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk, ECONNABORTED);
1324 l2cap_pi(sk)->retry_count++;
1325 __mod_monitor_timer();
1327 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1331 static void l2cap_retrans_timeout(unsigned long arg)
1333 struct sock *sk = (void *) arg;
1336 l2cap_pi(sk)->retry_count = 1;
1337 __mod_monitor_timer();
1339 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
1341 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1345 static void l2cap_drop_acked_frames(struct sock *sk)
1347 struct sk_buff *skb;
1349 while ((skb = skb_peek(TX_QUEUE(sk))) &&
1350 l2cap_pi(sk)->unacked_frames) {
1351 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
1354 skb = skb_dequeue(TX_QUEUE(sk));
1357 l2cap_pi(sk)->unacked_frames--;
1360 if (!l2cap_pi(sk)->unacked_frames)
1361 del_timer(&l2cap_pi(sk)->retrans_timer);
1364 static inline void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1366 struct l2cap_pinfo *pi = l2cap_pi(sk);
1368 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1370 hci_send_acl(pi->conn->hcon, skb, 0);
1373 static int l2cap_streaming_send(struct sock *sk)
1375 struct sk_buff *skb, *tx_skb;
1376 struct l2cap_pinfo *pi = l2cap_pi(sk);
1379 while ((skb = sk->sk_send_head)) {
1380 tx_skb = skb_clone(skb, GFP_ATOMIC);
1382 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1383 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1384 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1386 if (pi->fcs == L2CAP_FCS_CRC16) {
1387 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1388 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1391 l2cap_do_send(sk, tx_skb);
1393 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1395 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1396 sk->sk_send_head = NULL;
1398 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1400 skb = skb_dequeue(TX_QUEUE(sk));
1406 static void l2cap_retransmit_one_frame(struct sock *sk, u8 tx_seq)
1408 struct l2cap_pinfo *pi = l2cap_pi(sk);
1409 struct sk_buff *skb, *tx_skb;
1412 skb = skb_peek(TX_QUEUE(sk));
1417 if (bt_cb(skb)->tx_seq == tx_seq)
1420 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1423 } while ((skb = skb_queue_next(TX_QUEUE(sk), skb)));
1425 if (pi->remote_max_tx &&
1426 bt_cb(skb)->retries == pi->remote_max_tx) {
1427 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1431 tx_skb = skb_clone(skb, GFP_ATOMIC);
1432 bt_cb(skb)->retries++;
1433 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1435 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1436 control |= L2CAP_CTRL_FINAL;
1437 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1440 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1441 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1443 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1445 if (pi->fcs == L2CAP_FCS_CRC16) {
1446 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1447 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1450 l2cap_do_send(sk, tx_skb);
1453 static int l2cap_ertm_send(struct sock *sk)
1455 struct sk_buff *skb, *tx_skb;
1456 struct l2cap_pinfo *pi = l2cap_pi(sk);
1460 if (sk->sk_state != BT_CONNECTED)
1463 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk))) {
1465 if (pi->remote_max_tx &&
1466 bt_cb(skb)->retries == pi->remote_max_tx) {
1467 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1471 tx_skb = skb_clone(skb, GFP_ATOMIC);
1473 bt_cb(skb)->retries++;
1475 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1476 control &= L2CAP_CTRL_SAR;
1478 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1479 control |= L2CAP_CTRL_FINAL;
1480 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1482 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1483 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1484 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1487 if (pi->fcs == L2CAP_FCS_CRC16) {
1488 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1489 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1492 l2cap_do_send(sk, tx_skb);
1494 __mod_retrans_timer();
1496 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1497 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1499 pi->unacked_frames++;
1502 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1503 sk->sk_send_head = NULL;
1505 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1513 static int l2cap_retransmit_frames(struct sock *sk)
1515 struct l2cap_pinfo *pi = l2cap_pi(sk);
1518 spin_lock_bh(&pi->send_lock);
1520 if (!skb_queue_empty(TX_QUEUE(sk)))
1521 sk->sk_send_head = TX_QUEUE(sk)->next;
1523 pi->next_tx_seq = pi->expected_ack_seq;
1524 ret = l2cap_ertm_send(sk);
1526 spin_unlock_bh(&pi->send_lock);
1531 static void l2cap_send_ack(struct l2cap_pinfo *pi)
1533 struct sock *sk = (struct sock *)pi;
1537 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1539 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1540 control |= L2CAP_SUPER_RCV_NOT_READY;
1541 pi->conn_state |= L2CAP_CONN_RNR_SENT;
1542 l2cap_send_sframe(pi, control);
1546 spin_lock_bh(&pi->send_lock);
1547 nframes = l2cap_ertm_send(sk);
1548 spin_unlock_bh(&pi->send_lock);
1553 control |= L2CAP_SUPER_RCV_READY;
1554 l2cap_send_sframe(pi, control);
1557 static void l2cap_send_srejtail(struct sock *sk)
1559 struct srej_list *tail;
1562 control = L2CAP_SUPER_SELECT_REJECT;
1563 control |= L2CAP_CTRL_FINAL;
1565 tail = list_entry(SREJ_LIST(sk)->prev, struct srej_list, list);
1566 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1568 l2cap_send_sframe(l2cap_pi(sk), control);
1571 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1573 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1574 struct sk_buff **frag;
1577 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1583 /* Continuation fragments (no L2CAP header) */
1584 frag = &skb_shinfo(skb)->frag_list;
1586 count = min_t(unsigned int, conn->mtu, len);
1588 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1591 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1597 frag = &(*frag)->next;
1603 static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1605 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1606 struct sk_buff *skb;
1607 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1608 struct l2cap_hdr *lh;
1610 BT_DBG("sk %p len %d", sk, (int)len);
1612 count = min_t(unsigned int, (conn->mtu - hlen), len);
1613 skb = bt_skb_send_alloc(sk, count + hlen,
1614 msg->msg_flags & MSG_DONTWAIT, &err);
1616 return ERR_PTR(-ENOMEM);
1618 /* Create L2CAP header */
1619 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1620 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1621 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1622 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1624 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1625 if (unlikely(err < 0)) {
1627 return ERR_PTR(err);
1632 static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1634 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1635 struct sk_buff *skb;
1636 int err, count, hlen = L2CAP_HDR_SIZE;
1637 struct l2cap_hdr *lh;
1639 BT_DBG("sk %p len %d", sk, (int)len);
1641 count = min_t(unsigned int, (conn->mtu - hlen), len);
1642 skb = bt_skb_send_alloc(sk, count + hlen,
1643 msg->msg_flags & MSG_DONTWAIT, &err);
1645 return ERR_PTR(-ENOMEM);
1647 /* Create L2CAP header */
1648 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1649 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1650 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1652 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1653 if (unlikely(err < 0)) {
1655 return ERR_PTR(err);
1660 static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1662 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1663 struct sk_buff *skb;
1664 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1665 struct l2cap_hdr *lh;
1667 BT_DBG("sk %p len %d", sk, (int)len);
1670 return ERR_PTR(-ENOTCONN);
1675 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1678 count = min_t(unsigned int, (conn->mtu - hlen), len);
1679 skb = bt_skb_send_alloc(sk, count + hlen,
1680 msg->msg_flags & MSG_DONTWAIT, &err);
1682 return ERR_PTR(-ENOMEM);
1684 /* Create L2CAP header */
1685 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1686 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1687 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1688 put_unaligned_le16(control, skb_put(skb, 2));
1690 put_unaligned_le16(sdulen, skb_put(skb, 2));
1692 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1693 if (unlikely(err < 0)) {
1695 return ERR_PTR(err);
1698 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1699 put_unaligned_le16(0, skb_put(skb, 2));
1701 bt_cb(skb)->retries = 0;
1705 static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1707 struct l2cap_pinfo *pi = l2cap_pi(sk);
1708 struct sk_buff *skb;
1709 struct sk_buff_head sar_queue;
1713 skb_queue_head_init(&sar_queue);
1714 control = L2CAP_SDU_START;
1715 skb = l2cap_create_iframe_pdu(sk, msg, pi->remote_mps, control, len);
1717 return PTR_ERR(skb);
1719 __skb_queue_tail(&sar_queue, skb);
1720 len -= pi->remote_mps;
1721 size += pi->remote_mps;
1726 if (len > pi->remote_mps) {
1727 control = L2CAP_SDU_CONTINUE;
1728 buflen = pi->remote_mps;
1730 control = L2CAP_SDU_END;
1734 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1736 skb_queue_purge(&sar_queue);
1737 return PTR_ERR(skb);
1740 __skb_queue_tail(&sar_queue, skb);
1744 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1745 spin_lock_bh(&pi->send_lock);
1746 if (sk->sk_send_head == NULL)
1747 sk->sk_send_head = sar_queue.next;
1748 spin_unlock_bh(&pi->send_lock);
1753 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1755 struct sock *sk = sock->sk;
1756 struct l2cap_pinfo *pi = l2cap_pi(sk);
1757 struct sk_buff *skb;
1761 BT_DBG("sock %p, sk %p", sock, sk);
1763 err = sock_error(sk);
1767 if (msg->msg_flags & MSG_OOB)
1772 if (sk->sk_state != BT_CONNECTED) {
1777 /* Connectionless channel */
1778 if (sk->sk_type == SOCK_DGRAM) {
1779 skb = l2cap_create_connless_pdu(sk, msg, len);
1783 l2cap_do_send(sk, skb);
1790 case L2CAP_MODE_BASIC:
1791 /* Check outgoing MTU */
1792 if (len > pi->omtu) {
1797 /* Create a basic PDU */
1798 skb = l2cap_create_basic_pdu(sk, msg, len);
1804 l2cap_do_send(sk, skb);
1808 case L2CAP_MODE_ERTM:
1809 case L2CAP_MODE_STREAMING:
1810 /* Entire SDU fits into one PDU */
1811 if (len <= pi->remote_mps) {
1812 control = L2CAP_SDU_UNSEGMENTED;
1813 skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
1818 __skb_queue_tail(TX_QUEUE(sk), skb);
1820 if (pi->mode == L2CAP_MODE_ERTM)
1821 spin_lock_bh(&pi->send_lock);
1823 if (sk->sk_send_head == NULL)
1824 sk->sk_send_head = skb;
1826 if (pi->mode == L2CAP_MODE_ERTM)
1827 spin_unlock_bh(&pi->send_lock);
1829 /* Segment SDU into multiples PDUs */
1830 err = l2cap_sar_segment_sdu(sk, msg, len);
1835 if (pi->mode == L2CAP_MODE_STREAMING) {
1836 err = l2cap_streaming_send(sk);
1838 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY &&
1839 pi->conn_state && L2CAP_CONN_WAIT_F) {
1843 spin_lock_bh(&pi->send_lock);
1844 err = l2cap_ertm_send(sk);
1845 spin_unlock_bh(&pi->send_lock);
1853 BT_DBG("bad state %1.1x", pi->mode);
1862 static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1864 struct sock *sk = sock->sk;
1868 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1869 struct l2cap_conn_rsp rsp;
1871 sk->sk_state = BT_CONFIG;
1873 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1874 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1875 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1876 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1877 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1878 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1886 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1889 static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
1891 struct sock *sk = sock->sk;
1892 struct l2cap_options opts;
1896 BT_DBG("sk %p", sk);
1902 opts.imtu = l2cap_pi(sk)->imtu;
1903 opts.omtu = l2cap_pi(sk)->omtu;
1904 opts.flush_to = l2cap_pi(sk)->flush_to;
1905 opts.mode = l2cap_pi(sk)->mode;
1906 opts.fcs = l2cap_pi(sk)->fcs;
1907 opts.max_tx = l2cap_pi(sk)->max_tx;
1908 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
1910 len = min_t(unsigned int, sizeof(opts), optlen);
1911 if (copy_from_user((char *) &opts, optval, len)) {
1916 if (opts.txwin_size > L2CAP_DEFAULT_TX_WINDOW) {
1921 l2cap_pi(sk)->mode = opts.mode;
1922 switch (l2cap_pi(sk)->mode) {
1923 case L2CAP_MODE_BASIC:
1925 case L2CAP_MODE_ERTM:
1926 case L2CAP_MODE_STREAMING:
1935 l2cap_pi(sk)->imtu = opts.imtu;
1936 l2cap_pi(sk)->omtu = opts.omtu;
1937 l2cap_pi(sk)->fcs = opts.fcs;
1938 l2cap_pi(sk)->max_tx = opts.max_tx;
1939 l2cap_pi(sk)->tx_win = (__u8)opts.txwin_size;
1943 if (get_user(opt, (u32 __user *) optval)) {
1948 if (opt & L2CAP_LM_AUTH)
1949 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
1950 if (opt & L2CAP_LM_ENCRYPT)
1951 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
1952 if (opt & L2CAP_LM_SECURE)
1953 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
1955 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
1956 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
1968 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
1970 struct sock *sk = sock->sk;
1971 struct bt_security sec;
1975 BT_DBG("sk %p", sk);
1977 if (level == SOL_L2CAP)
1978 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
1980 if (level != SOL_BLUETOOTH)
1981 return -ENOPROTOOPT;
1987 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
1988 && sk->sk_type != SOCK_RAW) {
1993 sec.level = BT_SECURITY_LOW;
1995 len = min_t(unsigned int, sizeof(sec), optlen);
1996 if (copy_from_user((char *) &sec, optval, len)) {
2001 if (sec.level < BT_SECURITY_LOW ||
2002 sec.level > BT_SECURITY_HIGH) {
2007 l2cap_pi(sk)->sec_level = sec.level;
2010 case BT_DEFER_SETUP:
2011 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2016 if (get_user(opt, (u32 __user *) optval)) {
2021 bt_sk(sk)->defer_setup = opt;
2033 static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
2035 struct sock *sk = sock->sk;
2036 struct l2cap_options opts;
2037 struct l2cap_conninfo cinfo;
2041 BT_DBG("sk %p", sk);
2043 if (get_user(len, optlen))
2050 opts.imtu = l2cap_pi(sk)->imtu;
2051 opts.omtu = l2cap_pi(sk)->omtu;
2052 opts.flush_to = l2cap_pi(sk)->flush_to;
2053 opts.mode = l2cap_pi(sk)->mode;
2054 opts.fcs = l2cap_pi(sk)->fcs;
2055 opts.max_tx = l2cap_pi(sk)->max_tx;
2056 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
2058 len = min_t(unsigned int, len, sizeof(opts));
2059 if (copy_to_user(optval, (char *) &opts, len))
2065 switch (l2cap_pi(sk)->sec_level) {
2066 case BT_SECURITY_LOW:
2067 opt = L2CAP_LM_AUTH;
2069 case BT_SECURITY_MEDIUM:
2070 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
2072 case BT_SECURITY_HIGH:
2073 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
2081 if (l2cap_pi(sk)->role_switch)
2082 opt |= L2CAP_LM_MASTER;
2084 if (l2cap_pi(sk)->force_reliable)
2085 opt |= L2CAP_LM_RELIABLE;
2087 if (put_user(opt, (u32 __user *) optval))
2091 case L2CAP_CONNINFO:
2092 if (sk->sk_state != BT_CONNECTED &&
2093 !(sk->sk_state == BT_CONNECT2 &&
2094 bt_sk(sk)->defer_setup)) {
2099 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
2100 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
2102 len = min_t(unsigned int, len, sizeof(cinfo));
2103 if (copy_to_user(optval, (char *) &cinfo, len))
2117 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
2119 struct sock *sk = sock->sk;
2120 struct bt_security sec;
2123 BT_DBG("sk %p", sk);
2125 if (level == SOL_L2CAP)
2126 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
2128 if (level != SOL_BLUETOOTH)
2129 return -ENOPROTOOPT;
2131 if (get_user(len, optlen))
2138 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
2139 && sk->sk_type != SOCK_RAW) {
2144 sec.level = l2cap_pi(sk)->sec_level;
2146 len = min_t(unsigned int, len, sizeof(sec));
2147 if (copy_to_user(optval, (char *) &sec, len))
2152 case BT_DEFER_SETUP:
2153 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2158 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
2172 static int l2cap_sock_shutdown(struct socket *sock, int how)
2174 struct sock *sk = sock->sk;
2177 BT_DBG("sock %p, sk %p", sock, sk);
2183 if (!sk->sk_shutdown) {
2184 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2185 err = __l2cap_wait_ack(sk);
2187 sk->sk_shutdown = SHUTDOWN_MASK;
2188 l2cap_sock_clear_timer(sk);
2189 __l2cap_sock_close(sk, 0);
2191 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
2192 err = bt_sock_wait_state(sk, BT_CLOSED,
2196 if (!err && sk->sk_err)
2203 static int l2cap_sock_release(struct socket *sock)
2205 struct sock *sk = sock->sk;
2208 BT_DBG("sock %p, sk %p", sock, sk);
2213 err = l2cap_sock_shutdown(sock, 2);
2216 l2cap_sock_kill(sk);
2220 static void l2cap_chan_ready(struct sock *sk)
2222 struct sock *parent = bt_sk(sk)->parent;
2224 BT_DBG("sk %p, parent %p", sk, parent);
2226 l2cap_pi(sk)->conf_state = 0;
2227 l2cap_sock_clear_timer(sk);
2230 /* Outgoing channel.
2231 * Wake up socket sleeping on connect.
2233 sk->sk_state = BT_CONNECTED;
2234 sk->sk_state_change(sk);
2236 /* Incoming channel.
2237 * Wake up socket sleeping on accept.
2239 parent->sk_data_ready(parent, 0);
2243 /* Copy frame to all raw sockets on that connection */
2244 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2246 struct l2cap_chan_list *l = &conn->chan_list;
2247 struct sk_buff *nskb;
2250 BT_DBG("conn %p", conn);
2252 read_lock(&l->lock);
2253 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2254 if (sk->sk_type != SOCK_RAW)
2257 /* Don't send frame to the socket it came from */
2260 nskb = skb_clone(skb, GFP_ATOMIC);
2264 if (sock_queue_rcv_skb(sk, nskb))
2267 read_unlock(&l->lock);
2270 /* ---- L2CAP signalling commands ---- */
2271 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2272 u8 code, u8 ident, u16 dlen, void *data)
2274 struct sk_buff *skb, **frag;
2275 struct l2cap_cmd_hdr *cmd;
2276 struct l2cap_hdr *lh;
2279 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2280 conn, code, ident, dlen);
2282 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2283 count = min_t(unsigned int, conn->mtu, len);
2285 skb = bt_skb_alloc(count, GFP_ATOMIC);
2289 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2290 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2291 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2293 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2296 cmd->len = cpu_to_le16(dlen);
2299 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2300 memcpy(skb_put(skb, count), data, count);
2306 /* Continuation fragments (no L2CAP header) */
2307 frag = &skb_shinfo(skb)->frag_list;
2309 count = min_t(unsigned int, conn->mtu, len);
2311 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2315 memcpy(skb_put(*frag, count), data, count);
2320 frag = &(*frag)->next;
2330 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2332 struct l2cap_conf_opt *opt = *ptr;
2335 len = L2CAP_CONF_OPT_SIZE + opt->len;
2343 *val = *((u8 *) opt->val);
2347 *val = __le16_to_cpu(*((__le16 *) opt->val));
2351 *val = __le32_to_cpu(*((__le32 *) opt->val));
2355 *val = (unsigned long) opt->val;
2359 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2363 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2365 struct l2cap_conf_opt *opt = *ptr;
2367 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2374 *((u8 *) opt->val) = val;
2378 *((__le16 *) opt->val) = cpu_to_le16(val);
2382 *((__le32 *) opt->val) = cpu_to_le32(val);
2386 memcpy(opt->val, (void *) val, len);
2390 *ptr += L2CAP_CONF_OPT_SIZE + len;
2393 static void l2cap_ack_timeout(unsigned long arg)
2395 struct sock *sk = (void *) arg;
2398 l2cap_send_ack(l2cap_pi(sk));
2402 static inline void l2cap_ertm_init(struct sock *sk)
2404 l2cap_pi(sk)->expected_ack_seq = 0;
2405 l2cap_pi(sk)->unacked_frames = 0;
2406 l2cap_pi(sk)->buffer_seq = 0;
2407 l2cap_pi(sk)->num_acked = 0;
2408 l2cap_pi(sk)->frames_sent = 0;
2410 setup_timer(&l2cap_pi(sk)->retrans_timer,
2411 l2cap_retrans_timeout, (unsigned long) sk);
2412 setup_timer(&l2cap_pi(sk)->monitor_timer,
2413 l2cap_monitor_timeout, (unsigned long) sk);
2414 setup_timer(&l2cap_pi(sk)->ack_timer,
2415 l2cap_ack_timeout, (unsigned long) sk);
2417 __skb_queue_head_init(SREJ_QUEUE(sk));
2418 __skb_queue_head_init(BUSY_QUEUE(sk));
2419 spin_lock_init(&l2cap_pi(sk)->send_lock);
2421 INIT_WORK(&l2cap_pi(sk)->busy_work, l2cap_busy_work);
2424 static int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
2426 u32 local_feat_mask = l2cap_feat_mask;
2428 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
2431 case L2CAP_MODE_ERTM:
2432 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
2433 case L2CAP_MODE_STREAMING:
2434 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
2440 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2443 case L2CAP_MODE_STREAMING:
2444 case L2CAP_MODE_ERTM:
2445 if (l2cap_mode_supported(mode, remote_feat_mask))
2449 return L2CAP_MODE_BASIC;
2453 static int l2cap_build_conf_req(struct sock *sk, void *data)
2455 struct l2cap_pinfo *pi = l2cap_pi(sk);
2456 struct l2cap_conf_req *req = data;
2457 struct l2cap_conf_rfc rfc = { .mode = pi->mode };
2458 void *ptr = req->data;
2460 BT_DBG("sk %p", sk);
2462 if (pi->num_conf_req || pi->num_conf_rsp)
2466 case L2CAP_MODE_STREAMING:
2467 case L2CAP_MODE_ERTM:
2468 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2469 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2470 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
2473 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2479 case L2CAP_MODE_BASIC:
2480 if (pi->imtu != L2CAP_DEFAULT_MTU)
2481 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2484 case L2CAP_MODE_ERTM:
2485 rfc.mode = L2CAP_MODE_ERTM;
2486 rfc.txwin_size = pi->tx_win;
2487 rfc.max_transmit = pi->max_tx;
2488 rfc.retrans_timeout = 0;
2489 rfc.monitor_timeout = 0;
2490 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2491 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2492 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2494 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2495 sizeof(rfc), (unsigned long) &rfc);
2497 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2500 if (pi->fcs == L2CAP_FCS_NONE ||
2501 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2502 pi->fcs = L2CAP_FCS_NONE;
2503 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2507 case L2CAP_MODE_STREAMING:
2508 rfc.mode = L2CAP_MODE_STREAMING;
2510 rfc.max_transmit = 0;
2511 rfc.retrans_timeout = 0;
2512 rfc.monitor_timeout = 0;
2513 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2514 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2515 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2517 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2518 sizeof(rfc), (unsigned long) &rfc);
2520 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2523 if (pi->fcs == L2CAP_FCS_NONE ||
2524 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2525 pi->fcs = L2CAP_FCS_NONE;
2526 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2531 /* FIXME: Need actual value of the flush timeout */
2532 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2533 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2535 req->dcid = cpu_to_le16(pi->dcid);
2536 req->flags = cpu_to_le16(0);
2541 static int l2cap_parse_conf_req(struct sock *sk, void *data)
2543 struct l2cap_pinfo *pi = l2cap_pi(sk);
2544 struct l2cap_conf_rsp *rsp = data;
2545 void *ptr = rsp->data;
2546 void *req = pi->conf_req;
2547 int len = pi->conf_len;
2548 int type, hint, olen;
2550 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2551 u16 mtu = L2CAP_DEFAULT_MTU;
2552 u16 result = L2CAP_CONF_SUCCESS;
2554 BT_DBG("sk %p", sk);
2556 while (len >= L2CAP_CONF_OPT_SIZE) {
2557 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2559 hint = type & L2CAP_CONF_HINT;
2560 type &= L2CAP_CONF_MASK;
2563 case L2CAP_CONF_MTU:
2567 case L2CAP_CONF_FLUSH_TO:
2571 case L2CAP_CONF_QOS:
2574 case L2CAP_CONF_RFC:
2575 if (olen == sizeof(rfc))
2576 memcpy(&rfc, (void *) val, olen);
2579 case L2CAP_CONF_FCS:
2580 if (val == L2CAP_FCS_NONE)
2581 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
2589 result = L2CAP_CONF_UNKNOWN;
2590 *((u8 *) ptr++) = type;
2595 if (pi->num_conf_rsp || pi->num_conf_req)
2599 case L2CAP_MODE_STREAMING:
2600 case L2CAP_MODE_ERTM:
2601 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2602 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2603 return -ECONNREFUSED;
2606 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2611 if (pi->mode != rfc.mode) {
2612 result = L2CAP_CONF_UNACCEPT;
2613 rfc.mode = pi->mode;
2615 if (pi->num_conf_rsp == 1)
2616 return -ECONNREFUSED;
2618 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2619 sizeof(rfc), (unsigned long) &rfc);
2623 if (result == L2CAP_CONF_SUCCESS) {
2624 /* Configure output options and let the other side know
2625 * which ones we don't like. */
2627 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2628 result = L2CAP_CONF_UNACCEPT;
2631 pi->conf_state |= L2CAP_CONF_MTU_DONE;
2633 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2636 case L2CAP_MODE_BASIC:
2637 pi->fcs = L2CAP_FCS_NONE;
2638 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2641 case L2CAP_MODE_ERTM:
2642 pi->remote_tx_win = rfc.txwin_size;
2643 pi->remote_max_tx = rfc.max_transmit;
2644 if (rfc.max_pdu_size > pi->conn->mtu - 10)
2645 rfc.max_pdu_size = le16_to_cpu(pi->conn->mtu - 10);
2647 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2649 rfc.retrans_timeout =
2650 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2651 rfc.monitor_timeout =
2652 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2654 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2656 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2657 sizeof(rfc), (unsigned long) &rfc);
2661 case L2CAP_MODE_STREAMING:
2662 if (rfc.max_pdu_size > pi->conn->mtu - 10)
2663 rfc.max_pdu_size = le16_to_cpu(pi->conn->mtu - 10);
2665 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2667 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2669 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2670 sizeof(rfc), (unsigned long) &rfc);
2675 result = L2CAP_CONF_UNACCEPT;
2677 memset(&rfc, 0, sizeof(rfc));
2678 rfc.mode = pi->mode;
2681 if (result == L2CAP_CONF_SUCCESS)
2682 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
2684 rsp->scid = cpu_to_le16(pi->dcid);
2685 rsp->result = cpu_to_le16(result);
2686 rsp->flags = cpu_to_le16(0x0000);
2691 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
2693 struct l2cap_pinfo *pi = l2cap_pi(sk);
2694 struct l2cap_conf_req *req = data;
2695 void *ptr = req->data;
2698 struct l2cap_conf_rfc rfc;
2700 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
2702 while (len >= L2CAP_CONF_OPT_SIZE) {
2703 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2706 case L2CAP_CONF_MTU:
2707 if (val < L2CAP_DEFAULT_MIN_MTU) {
2708 *result = L2CAP_CONF_UNACCEPT;
2709 pi->omtu = L2CAP_DEFAULT_MIN_MTU;
2712 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2715 case L2CAP_CONF_FLUSH_TO:
2717 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2721 case L2CAP_CONF_RFC:
2722 if (olen == sizeof(rfc))
2723 memcpy(&rfc, (void *)val, olen);
2725 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
2726 rfc.mode != pi->mode)
2727 return -ECONNREFUSED;
2729 pi->mode = rfc.mode;
2732 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2733 sizeof(rfc), (unsigned long) &rfc);
2738 if (*result == L2CAP_CONF_SUCCESS) {
2740 case L2CAP_MODE_ERTM:
2741 pi->remote_tx_win = rfc.txwin_size;
2742 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2743 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2744 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2746 case L2CAP_MODE_STREAMING:
2747 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2751 req->dcid = cpu_to_le16(pi->dcid);
2752 req->flags = cpu_to_le16(0x0000);
2757 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
2759 struct l2cap_conf_rsp *rsp = data;
2760 void *ptr = rsp->data;
2762 BT_DBG("sk %p", sk);
2764 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2765 rsp->result = cpu_to_le16(result);
2766 rsp->flags = cpu_to_le16(flags);
2771 static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
2773 struct l2cap_pinfo *pi = l2cap_pi(sk);
2776 struct l2cap_conf_rfc rfc;
2778 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
2780 if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
2783 while (len >= L2CAP_CONF_OPT_SIZE) {
2784 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2787 case L2CAP_CONF_RFC:
2788 if (olen == sizeof(rfc))
2789 memcpy(&rfc, (void *)val, olen);
2796 case L2CAP_MODE_ERTM:
2797 pi->remote_tx_win = rfc.txwin_size;
2798 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2799 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2800 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2802 case L2CAP_MODE_STREAMING:
2803 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2807 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2809 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2811 if (rej->reason != 0x0000)
2814 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2815 cmd->ident == conn->info_ident) {
2816 del_timer(&conn->info_timer);
2818 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2819 conn->info_ident = 0;
2821 l2cap_conn_start(conn);
2827 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2829 struct l2cap_chan_list *list = &conn->chan_list;
2830 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2831 struct l2cap_conn_rsp rsp;
2832 struct sock *sk, *parent;
2833 int result, status = L2CAP_CS_NO_INFO;
2835 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2836 __le16 psm = req->psm;
2838 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2840 /* Check if we have socket listening on psm */
2841 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2843 result = L2CAP_CR_BAD_PSM;
2847 /* Check if the ACL is secure enough (if not SDP) */
2848 if (psm != cpu_to_le16(0x0001) &&
2849 !hci_conn_check_link_mode(conn->hcon)) {
2850 conn->disc_reason = 0x05;
2851 result = L2CAP_CR_SEC_BLOCK;
2855 result = L2CAP_CR_NO_MEM;
2857 /* Check for backlog size */
2858 if (sk_acceptq_is_full(parent)) {
2859 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2863 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2867 write_lock_bh(&list->lock);
2869 /* Check if we already have channel with that dcid */
2870 if (__l2cap_get_chan_by_dcid(list, scid)) {
2871 write_unlock_bh(&list->lock);
2872 sock_set_flag(sk, SOCK_ZAPPED);
2873 l2cap_sock_kill(sk);
2877 hci_conn_hold(conn->hcon);
2879 l2cap_sock_init(sk, parent);
2880 bacpy(&bt_sk(sk)->src, conn->src);
2881 bacpy(&bt_sk(sk)->dst, conn->dst);
2882 l2cap_pi(sk)->psm = psm;
2883 l2cap_pi(sk)->dcid = scid;
2885 __l2cap_chan_add(conn, sk, parent);
2886 dcid = l2cap_pi(sk)->scid;
2888 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2890 l2cap_pi(sk)->ident = cmd->ident;
2892 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2893 if (l2cap_check_security(sk)) {
2894 if (bt_sk(sk)->defer_setup) {
2895 sk->sk_state = BT_CONNECT2;
2896 result = L2CAP_CR_PEND;
2897 status = L2CAP_CS_AUTHOR_PEND;
2898 parent->sk_data_ready(parent, 0);
2900 sk->sk_state = BT_CONFIG;
2901 result = L2CAP_CR_SUCCESS;
2902 status = L2CAP_CS_NO_INFO;
2905 sk->sk_state = BT_CONNECT2;
2906 result = L2CAP_CR_PEND;
2907 status = L2CAP_CS_AUTHEN_PEND;
2910 sk->sk_state = BT_CONNECT2;
2911 result = L2CAP_CR_PEND;
2912 status = L2CAP_CS_NO_INFO;
2915 write_unlock_bh(&list->lock);
2918 bh_unlock_sock(parent);
2921 rsp.scid = cpu_to_le16(scid);
2922 rsp.dcid = cpu_to_le16(dcid);
2923 rsp.result = cpu_to_le16(result);
2924 rsp.status = cpu_to_le16(status);
2925 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2927 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2928 struct l2cap_info_req info;
2929 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2931 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2932 conn->info_ident = l2cap_get_ident(conn);
2934 mod_timer(&conn->info_timer, jiffies +
2935 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2937 l2cap_send_cmd(conn, conn->info_ident,
2938 L2CAP_INFO_REQ, sizeof(info), &info);
2944 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2946 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2947 u16 scid, dcid, result, status;
2951 scid = __le16_to_cpu(rsp->scid);
2952 dcid = __le16_to_cpu(rsp->dcid);
2953 result = __le16_to_cpu(rsp->result);
2954 status = __le16_to_cpu(rsp->status);
2956 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2959 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2963 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
2969 case L2CAP_CR_SUCCESS:
2970 sk->sk_state = BT_CONFIG;
2971 l2cap_pi(sk)->ident = 0;
2972 l2cap_pi(sk)->dcid = dcid;
2973 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2974 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2976 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2977 l2cap_build_conf_req(sk, req), req);
2978 l2cap_pi(sk)->num_conf_req++;
2982 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
2986 l2cap_chan_del(sk, ECONNREFUSED);
2994 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2996 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3002 dcid = __le16_to_cpu(req->dcid);
3003 flags = __le16_to_cpu(req->flags);
3005 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3007 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
3011 if (sk->sk_state == BT_DISCONN)
3014 /* Reject if config buffer is too small. */
3015 len = cmd_len - sizeof(*req);
3016 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
3017 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3018 l2cap_build_conf_rsp(sk, rsp,
3019 L2CAP_CONF_REJECT, flags), rsp);
3024 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
3025 l2cap_pi(sk)->conf_len += len;
3027 if (flags & 0x0001) {
3028 /* Incomplete config. Send empty response. */
3029 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3030 l2cap_build_conf_rsp(sk, rsp,
3031 L2CAP_CONF_SUCCESS, 0x0001), rsp);
3035 /* Complete config. */
3036 len = l2cap_parse_conf_req(sk, rsp);
3038 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3042 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3043 l2cap_pi(sk)->num_conf_rsp++;
3045 /* Reset config buffer. */
3046 l2cap_pi(sk)->conf_len = 0;
3048 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
3051 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
3052 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
3053 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
3054 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
3056 sk->sk_state = BT_CONNECTED;
3058 l2cap_pi(sk)->next_tx_seq = 0;
3059 l2cap_pi(sk)->expected_tx_seq = 0;
3060 __skb_queue_head_init(TX_QUEUE(sk));
3061 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3062 l2cap_ertm_init(sk);
3064 l2cap_chan_ready(sk);
3068 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
3070 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3071 l2cap_build_conf_req(sk, buf), buf);
3072 l2cap_pi(sk)->num_conf_req++;
3080 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3082 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3083 u16 scid, flags, result;
3085 int len = cmd->len - sizeof(*rsp);
3087 scid = __le16_to_cpu(rsp->scid);
3088 flags = __le16_to_cpu(rsp->flags);
3089 result = __le16_to_cpu(rsp->result);
3091 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
3092 scid, flags, result);
3094 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3099 case L2CAP_CONF_SUCCESS:
3100 l2cap_conf_rfc_get(sk, rsp->data, len);
3103 case L2CAP_CONF_UNACCEPT:
3104 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3107 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3108 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3112 /* throw out any old stored conf requests */
3113 result = L2CAP_CONF_SUCCESS;
3114 len = l2cap_parse_conf_rsp(sk, rsp->data,
3117 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3121 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3122 L2CAP_CONF_REQ, len, req);
3123 l2cap_pi(sk)->num_conf_req++;
3124 if (result != L2CAP_CONF_SUCCESS)
3130 sk->sk_err = ECONNRESET;
3131 l2cap_sock_set_timer(sk, HZ * 5);
3132 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3139 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
3141 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
3142 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
3143 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
3144 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
3146 sk->sk_state = BT_CONNECTED;
3147 l2cap_pi(sk)->next_tx_seq = 0;
3148 l2cap_pi(sk)->expected_tx_seq = 0;
3149 __skb_queue_head_init(TX_QUEUE(sk));
3150 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3151 l2cap_ertm_init(sk);
3153 l2cap_chan_ready(sk);
3161 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3163 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3164 struct l2cap_disconn_rsp rsp;
3168 scid = __le16_to_cpu(req->scid);
3169 dcid = __le16_to_cpu(req->dcid);
3171 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3173 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
3177 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3178 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3179 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3181 sk->sk_shutdown = SHUTDOWN_MASK;
3183 l2cap_chan_del(sk, ECONNRESET);
3186 l2cap_sock_kill(sk);
3190 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3192 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3196 scid = __le16_to_cpu(rsp->scid);
3197 dcid = __le16_to_cpu(rsp->dcid);
3199 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3201 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3205 l2cap_chan_del(sk, 0);
3208 l2cap_sock_kill(sk);
3212 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3214 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3217 type = __le16_to_cpu(req->type);
3219 BT_DBG("type 0x%4.4x", type);
3221 if (type == L2CAP_IT_FEAT_MASK) {
3223 u32 feat_mask = l2cap_feat_mask;
3224 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3225 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3226 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3228 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3230 put_unaligned_le32(feat_mask, rsp->data);
3231 l2cap_send_cmd(conn, cmd->ident,
3232 L2CAP_INFO_RSP, sizeof(buf), buf);
3233 } else if (type == L2CAP_IT_FIXED_CHAN) {
3235 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3236 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3237 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3238 memcpy(buf + 4, l2cap_fixed_chan, 8);
3239 l2cap_send_cmd(conn, cmd->ident,
3240 L2CAP_INFO_RSP, sizeof(buf), buf);
3242 struct l2cap_info_rsp rsp;
3243 rsp.type = cpu_to_le16(type);
3244 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3245 l2cap_send_cmd(conn, cmd->ident,
3246 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3252 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3254 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3257 type = __le16_to_cpu(rsp->type);
3258 result = __le16_to_cpu(rsp->result);
3260 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3262 del_timer(&conn->info_timer);
3264 if (type == L2CAP_IT_FEAT_MASK) {
3265 conn->feat_mask = get_unaligned_le32(rsp->data);
3267 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3268 struct l2cap_info_req req;
3269 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3271 conn->info_ident = l2cap_get_ident(conn);
3273 l2cap_send_cmd(conn, conn->info_ident,
3274 L2CAP_INFO_REQ, sizeof(req), &req);
3276 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3277 conn->info_ident = 0;
3279 l2cap_conn_start(conn);
3281 } else if (type == L2CAP_IT_FIXED_CHAN) {
3282 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3283 conn->info_ident = 0;
3285 l2cap_conn_start(conn);
3291 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
3293 u8 *data = skb->data;
3295 struct l2cap_cmd_hdr cmd;
3298 l2cap_raw_recv(conn, skb);
3300 while (len >= L2CAP_CMD_HDR_SIZE) {
3302 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3303 data += L2CAP_CMD_HDR_SIZE;
3304 len -= L2CAP_CMD_HDR_SIZE;
3306 cmd_len = le16_to_cpu(cmd.len);
3308 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3310 if (cmd_len > len || !cmd.ident) {
3311 BT_DBG("corrupted command");
3316 case L2CAP_COMMAND_REJ:
3317 l2cap_command_rej(conn, &cmd, data);
3320 case L2CAP_CONN_REQ:
3321 err = l2cap_connect_req(conn, &cmd, data);
3324 case L2CAP_CONN_RSP:
3325 err = l2cap_connect_rsp(conn, &cmd, data);
3328 case L2CAP_CONF_REQ:
3329 err = l2cap_config_req(conn, &cmd, cmd_len, data);
3332 case L2CAP_CONF_RSP:
3333 err = l2cap_config_rsp(conn, &cmd, data);
3336 case L2CAP_DISCONN_REQ:
3337 err = l2cap_disconnect_req(conn, &cmd, data);
3340 case L2CAP_DISCONN_RSP:
3341 err = l2cap_disconnect_rsp(conn, &cmd, data);
3344 case L2CAP_ECHO_REQ:
3345 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
3348 case L2CAP_ECHO_RSP:
3351 case L2CAP_INFO_REQ:
3352 err = l2cap_information_req(conn, &cmd, data);
3355 case L2CAP_INFO_RSP:
3356 err = l2cap_information_rsp(conn, &cmd, data);
3360 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
3366 struct l2cap_cmd_rej rej;
3367 BT_DBG("error %d", err);
3369 /* FIXME: Map err to a valid reason */
3370 rej.reason = cpu_to_le16(0);
3371 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3381 static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
3383 u16 our_fcs, rcv_fcs;
3384 int hdr_size = L2CAP_HDR_SIZE + 2;
3386 if (pi->fcs == L2CAP_FCS_CRC16) {
3387 skb_trim(skb, skb->len - 2);
3388 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3389 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3391 if (our_fcs != rcv_fcs)
3397 static inline void l2cap_send_i_or_rr_or_rnr(struct sock *sk)
3399 struct l2cap_pinfo *pi = l2cap_pi(sk);
3402 pi->frames_sent = 0;
3404 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3406 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3407 control |= L2CAP_SUPER_RCV_NOT_READY | L2CAP_CTRL_FINAL;
3408 l2cap_send_sframe(pi, control);
3409 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3410 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
3413 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY)
3414 l2cap_retransmit_frames(sk);
3416 spin_lock_bh(&pi->send_lock);
3417 l2cap_ertm_send(sk);
3418 spin_unlock_bh(&pi->send_lock);
3420 if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
3421 pi->frames_sent == 0) {
3422 control |= L2CAP_SUPER_RCV_READY;
3423 l2cap_send_sframe(pi, control);
3427 static int l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
3429 struct sk_buff *next_skb;
3430 struct l2cap_pinfo *pi = l2cap_pi(sk);
3431 int tx_seq_offset, next_tx_seq_offset;
3433 bt_cb(skb)->tx_seq = tx_seq;
3434 bt_cb(skb)->sar = sar;
3436 next_skb = skb_peek(SREJ_QUEUE(sk));
3438 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3442 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
3443 if (tx_seq_offset < 0)
3444 tx_seq_offset += 64;
3447 if (bt_cb(next_skb)->tx_seq == tx_seq)
3450 next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
3451 pi->buffer_seq) % 64;
3452 if (next_tx_seq_offset < 0)
3453 next_tx_seq_offset += 64;
3455 if (next_tx_seq_offset > tx_seq_offset) {
3456 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
3460 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
3463 } while ((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
3465 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3470 static int l2cap_ertm_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3472 struct l2cap_pinfo *pi = l2cap_pi(sk);
3473 struct sk_buff *_skb;
3476 switch (control & L2CAP_CTRL_SAR) {
3477 case L2CAP_SDU_UNSEGMENTED:
3478 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3481 err = sock_queue_rcv_skb(sk, skb);
3487 case L2CAP_SDU_START:
3488 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3491 pi->sdu_len = get_unaligned_le16(skb->data);
3493 if (pi->sdu_len > pi->imtu)
3496 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3500 /* pull sdu_len bytes only after alloc, because of Local Busy
3501 * condition we have to be sure that this will be executed
3502 * only once, i.e., when alloc does not fail */
3505 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3507 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3508 pi->partial_sdu_len = skb->len;
3511 case L2CAP_SDU_CONTINUE:
3512 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3518 pi->partial_sdu_len += skb->len;
3519 if (pi->partial_sdu_len > pi->sdu_len)
3522 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3527 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3533 if (!(pi->conn_state & L2CAP_CONN_SAR_RETRY)) {
3534 pi->partial_sdu_len += skb->len;
3536 if (pi->partial_sdu_len > pi->imtu)
3539 if (pi->partial_sdu_len != pi->sdu_len)
3542 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3545 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3547 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
3551 err = sock_queue_rcv_skb(sk, _skb);
3554 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
3558 pi->conn_state &= ~L2CAP_CONN_SAR_RETRY;
3559 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3573 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3578 static void l2cap_busy_work(struct work_struct *work)
3580 DECLARE_WAITQUEUE(wait, current);
3581 struct l2cap_pinfo *pi =
3582 container_of(work, struct l2cap_pinfo, busy_work);
3583 struct sock *sk = (struct sock *)pi;
3584 int n_tries = 0, timeo = HZ/5, err;
3585 struct sk_buff *skb;
3590 add_wait_queue(sk_sleep(sk), &wait);
3591 while ((skb = skb_peek(BUSY_QUEUE(sk)))) {
3592 set_current_state(TASK_INTERRUPTIBLE);
3594 if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
3596 l2cap_send_disconn_req(pi->conn, sk, EBUSY);
3603 if (signal_pending(current)) {
3604 err = sock_intr_errno(timeo);
3609 timeo = schedule_timeout(timeo);
3612 err = sock_error(sk);
3616 while ((skb = skb_dequeue(BUSY_QUEUE(sk)))) {
3617 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3618 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3620 skb_queue_head(BUSY_QUEUE(sk), skb);
3624 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3631 if (!(pi->conn_state & L2CAP_CONN_RNR_SENT))
3634 control = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3635 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
3636 l2cap_send_sframe(pi, control);
3637 l2cap_pi(sk)->retry_count = 1;
3639 del_timer(&pi->retrans_timer);
3640 __mod_monitor_timer();
3642 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
3645 pi->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
3646 pi->conn_state &= ~L2CAP_CONN_RNR_SENT;
3648 set_current_state(TASK_RUNNING);
3649 remove_wait_queue(sk_sleep(sk), &wait);
3654 static int l2cap_push_rx_skb(struct sock *sk, struct sk_buff *skb, u16 control)
3656 struct l2cap_pinfo *pi = l2cap_pi(sk);
3659 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3660 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3661 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3665 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3667 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3671 /* Busy Condition */
3672 pi->conn_state |= L2CAP_CONN_LOCAL_BUSY;
3673 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3674 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3676 sctrl = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3677 sctrl |= L2CAP_SUPER_RCV_NOT_READY;
3678 l2cap_send_sframe(pi, sctrl);
3680 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3682 del_timer(&pi->ack_timer);
3684 queue_work(_busy_wq, &pi->busy_work);
3689 static int l2cap_streaming_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3691 struct l2cap_pinfo *pi = l2cap_pi(sk);
3692 struct sk_buff *_skb;
3696 * TODO: We have to notify the userland if some data is lost with the
3700 switch (control & L2CAP_CTRL_SAR) {
3701 case L2CAP_SDU_UNSEGMENTED:
3702 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3707 err = sock_queue_rcv_skb(sk, skb);
3713 case L2CAP_SDU_START:
3714 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3719 pi->sdu_len = get_unaligned_le16(skb->data);
3722 if (pi->sdu_len > pi->imtu) {
3727 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3733 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3735 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3736 pi->partial_sdu_len = skb->len;
3740 case L2CAP_SDU_CONTINUE:
3741 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3744 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3746 pi->partial_sdu_len += skb->len;
3747 if (pi->partial_sdu_len > pi->sdu_len)
3755 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3758 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3760 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3761 pi->partial_sdu_len += skb->len;
3763 if (pi->partial_sdu_len > pi->imtu)
3766 if (pi->partial_sdu_len == pi->sdu_len) {
3767 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3768 err = sock_queue_rcv_skb(sk, _skb);
3783 static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
3785 struct sk_buff *skb;
3788 while ((skb = skb_peek(SREJ_QUEUE(sk)))) {
3789 if (bt_cb(skb)->tx_seq != tx_seq)
3792 skb = skb_dequeue(SREJ_QUEUE(sk));
3793 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3794 l2cap_ertm_reassembly_sdu(sk, skb, control);
3795 l2cap_pi(sk)->buffer_seq_srej =
3796 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
3797 tx_seq = (tx_seq + 1) % 64;
3801 static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
3803 struct l2cap_pinfo *pi = l2cap_pi(sk);
3804 struct srej_list *l, *tmp;
3807 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
3808 if (l->tx_seq == tx_seq) {
3813 control = L2CAP_SUPER_SELECT_REJECT;
3814 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3815 l2cap_send_sframe(pi, control);
3817 list_add_tail(&l->list, SREJ_LIST(sk));
3821 static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
3823 struct l2cap_pinfo *pi = l2cap_pi(sk);
3824 struct srej_list *new;
3827 while (tx_seq != pi->expected_tx_seq) {
3828 control = L2CAP_SUPER_SELECT_REJECT;
3829 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3830 l2cap_send_sframe(pi, control);
3832 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3833 new->tx_seq = pi->expected_tx_seq;
3834 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3835 list_add_tail(&new->list, SREJ_LIST(sk));
3837 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3840 static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3842 struct l2cap_pinfo *pi = l2cap_pi(sk);
3843 u8 tx_seq = __get_txseq(rx_control);
3844 u8 req_seq = __get_reqseq(rx_control);
3845 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3846 int tx_seq_offset, expected_tx_seq_offset;
3847 int num_to_ack = (pi->tx_win/6) + 1;
3850 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3852 if (L2CAP_CTRL_FINAL & rx_control &&
3853 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
3854 del_timer(&pi->monitor_timer);
3855 if (pi->unacked_frames > 0)
3856 __mod_retrans_timer();
3857 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
3860 pi->expected_ack_seq = req_seq;
3861 l2cap_drop_acked_frames(sk);
3863 if (tx_seq == pi->expected_tx_seq)
3866 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
3867 if (tx_seq_offset < 0)
3868 tx_seq_offset += 64;
3870 /* invalid tx_seq */
3871 if (tx_seq_offset >= pi->tx_win) {
3872 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3876 if (pi->conn_state == L2CAP_CONN_LOCAL_BUSY)
3879 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3880 struct srej_list *first;
3882 first = list_first_entry(SREJ_LIST(sk),
3883 struct srej_list, list);
3884 if (tx_seq == first->tx_seq) {
3885 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3886 l2cap_check_srej_gap(sk, tx_seq);
3888 list_del(&first->list);
3891 if (list_empty(SREJ_LIST(sk))) {
3892 pi->buffer_seq = pi->buffer_seq_srej;
3893 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3897 struct srej_list *l;
3899 /* duplicated tx_seq */
3900 if (l2cap_add_to_srej_queue(sk, skb, tx_seq, sar) < 0)
3903 list_for_each_entry(l, SREJ_LIST(sk), list) {
3904 if (l->tx_seq == tx_seq) {
3905 l2cap_resend_srejframe(sk, tx_seq);
3909 l2cap_send_srejframe(sk, tx_seq);
3912 expected_tx_seq_offset =
3913 (pi->expected_tx_seq - pi->buffer_seq) % 64;
3914 if (expected_tx_seq_offset < 0)
3915 expected_tx_seq_offset += 64;
3917 /* duplicated tx_seq */
3918 if (tx_seq_offset < expected_tx_seq_offset)
3921 pi->conn_state |= L2CAP_CONN_SREJ_SENT;
3923 INIT_LIST_HEAD(SREJ_LIST(sk));
3924 pi->buffer_seq_srej = pi->buffer_seq;
3926 __skb_queue_head_init(SREJ_QUEUE(sk));
3927 __skb_queue_head_init(BUSY_QUEUE(sk));
3928 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3930 pi->conn_state |= L2CAP_CONN_SEND_PBIT;
3932 l2cap_send_srejframe(sk, tx_seq);
3934 del_timer(&pi->ack_timer);
3939 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3941 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3942 bt_cb(skb)->tx_seq = tx_seq;
3943 bt_cb(skb)->sar = sar;
3944 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3948 err = l2cap_push_rx_skb(sk, skb, rx_control);
3952 if (rx_control & L2CAP_CTRL_FINAL) {
3953 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3954 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3956 l2cap_retransmit_frames(sk);
3961 pi->num_acked = (pi->num_acked + 1) % num_to_ack;
3962 if (pi->num_acked == num_to_ack - 1)
3972 static inline void l2cap_data_channel_rrframe(struct sock *sk, u16 rx_control)
3974 struct l2cap_pinfo *pi = l2cap_pi(sk);
3976 pi->expected_ack_seq = __get_reqseq(rx_control);
3977 l2cap_drop_acked_frames(sk);
3979 if (rx_control & L2CAP_CTRL_POLL) {
3980 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
3981 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3982 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3983 (pi->unacked_frames > 0))
3984 __mod_retrans_timer();
3986 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3987 l2cap_send_srejtail(sk);
3989 l2cap_send_i_or_rr_or_rnr(sk);
3992 } else if (rx_control & L2CAP_CTRL_FINAL) {
3993 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3995 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3996 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3998 l2cap_retransmit_frames(sk);
4001 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
4002 (pi->unacked_frames > 0))
4003 __mod_retrans_timer();
4005 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4006 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
4009 spin_lock_bh(&pi->send_lock);
4010 l2cap_ertm_send(sk);
4011 spin_unlock_bh(&pi->send_lock);
4016 static inline void l2cap_data_channel_rejframe(struct sock *sk, u16 rx_control)
4018 struct l2cap_pinfo *pi = l2cap_pi(sk);
4019 u8 tx_seq = __get_reqseq(rx_control);
4021 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4023 pi->expected_ack_seq = tx_seq;
4024 l2cap_drop_acked_frames(sk);
4026 if (rx_control & L2CAP_CTRL_FINAL) {
4027 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
4028 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
4030 l2cap_retransmit_frames(sk);
4032 l2cap_retransmit_frames(sk);
4034 if (pi->conn_state & L2CAP_CONN_WAIT_F)
4035 pi->conn_state |= L2CAP_CONN_REJ_ACT;
4038 static inline void l2cap_data_channel_srejframe(struct sock *sk, u16 rx_control)
4040 struct l2cap_pinfo *pi = l2cap_pi(sk);
4041 u8 tx_seq = __get_reqseq(rx_control);
4043 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4045 if (rx_control & L2CAP_CTRL_POLL) {
4046 pi->expected_ack_seq = tx_seq;
4047 l2cap_drop_acked_frames(sk);
4049 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4050 l2cap_retransmit_one_frame(sk, tx_seq);
4052 spin_lock_bh(&pi->send_lock);
4053 l2cap_ertm_send(sk);
4054 spin_unlock_bh(&pi->send_lock);
4056 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
4057 pi->srej_save_reqseq = tx_seq;
4058 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
4060 } else if (rx_control & L2CAP_CTRL_FINAL) {
4061 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
4062 pi->srej_save_reqseq == tx_seq)
4063 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
4065 l2cap_retransmit_one_frame(sk, tx_seq);
4067 l2cap_retransmit_one_frame(sk, tx_seq);
4068 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
4069 pi->srej_save_reqseq = tx_seq;
4070 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
4075 static inline void l2cap_data_channel_rnrframe(struct sock *sk, u16 rx_control)
4077 struct l2cap_pinfo *pi = l2cap_pi(sk);
4078 u8 tx_seq = __get_reqseq(rx_control);
4080 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
4081 pi->expected_ack_seq = tx_seq;
4082 l2cap_drop_acked_frames(sk);
4084 if (rx_control & L2CAP_CTRL_POLL)
4085 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4087 if (!(pi->conn_state & L2CAP_CONN_SREJ_SENT)) {
4088 del_timer(&pi->retrans_timer);
4089 if (rx_control & L2CAP_CTRL_POLL)
4090 l2cap_send_rr_or_rnr(pi, L2CAP_CTRL_FINAL);
4094 if (rx_control & L2CAP_CTRL_POLL)
4095 l2cap_send_srejtail(sk);
4097 l2cap_send_sframe(pi, L2CAP_SUPER_RCV_READY);
4100 static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
4102 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
4104 if (L2CAP_CTRL_FINAL & rx_control &&
4105 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
4106 del_timer(&l2cap_pi(sk)->monitor_timer);
4107 if (l2cap_pi(sk)->unacked_frames > 0)
4108 __mod_retrans_timer();
4109 l2cap_pi(sk)->conn_state &= ~L2CAP_CONN_WAIT_F;
4112 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
4113 case L2CAP_SUPER_RCV_READY:
4114 l2cap_data_channel_rrframe(sk, rx_control);
4117 case L2CAP_SUPER_REJECT:
4118 l2cap_data_channel_rejframe(sk, rx_control);
4121 case L2CAP_SUPER_SELECT_REJECT:
4122 l2cap_data_channel_srejframe(sk, rx_control);
4125 case L2CAP_SUPER_RCV_NOT_READY:
4126 l2cap_data_channel_rnrframe(sk, rx_control);
4134 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4137 struct l2cap_pinfo *pi;
4140 int len, next_tx_seq_offset, req_seq_offset;
4142 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
4144 BT_DBG("unknown cid 0x%4.4x", cid);
4150 BT_DBG("sk %p, len %d", sk, skb->len);
4152 if (sk->sk_state != BT_CONNECTED)
4156 case L2CAP_MODE_BASIC:
4157 /* If socket recv buffers overflows we drop data here
4158 * which is *bad* because L2CAP has to be reliable.
4159 * But we don't have any other choice. L2CAP doesn't
4160 * provide flow control mechanism. */
4162 if (pi->imtu < skb->len)
4165 if (!sock_queue_rcv_skb(sk, skb))
4169 case L2CAP_MODE_ERTM:
4170 control = get_unaligned_le16(skb->data);
4175 * We can just drop the corrupted I-frame here.
4176 * Receiver will miss it and start proper recovery
4177 * procedures and ask retransmission.
4179 if (l2cap_check_fcs(pi, skb))
4182 if (__is_sar_start(control) && __is_iframe(control))
4185 if (pi->fcs == L2CAP_FCS_CRC16)
4188 if (len > pi->mps) {
4189 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4193 req_seq = __get_reqseq(control);
4194 req_seq_offset = (req_seq - pi->expected_ack_seq) % 64;
4195 if (req_seq_offset < 0)
4196 req_seq_offset += 64;
4198 next_tx_seq_offset =
4199 (pi->next_tx_seq - pi->expected_ack_seq) % 64;
4200 if (next_tx_seq_offset < 0)
4201 next_tx_seq_offset += 64;
4203 /* check for invalid req-seq */
4204 if (req_seq_offset > next_tx_seq_offset) {
4205 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4209 if (__is_iframe(control)) {
4211 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4215 l2cap_data_channel_iframe(sk, control, skb);
4218 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4222 l2cap_data_channel_sframe(sk, control, skb);
4227 case L2CAP_MODE_STREAMING:
4228 control = get_unaligned_le16(skb->data);
4232 if (l2cap_check_fcs(pi, skb))
4235 if (__is_sar_start(control))
4238 if (pi->fcs == L2CAP_FCS_CRC16)
4241 if (len > pi->mps || len < 0 || __is_sframe(control))
4244 tx_seq = __get_txseq(control);
4246 if (pi->expected_tx_seq == tx_seq)
4247 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
4249 pi->expected_tx_seq = (tx_seq + 1) % 64;
4251 l2cap_streaming_reassembly_sdu(sk, skb, control);
4256 BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode);
4270 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4274 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
4278 BT_DBG("sk %p, len %d", sk, skb->len);
4280 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
4283 if (l2cap_pi(sk)->imtu < skb->len)
4286 if (!sock_queue_rcv_skb(sk, skb))
4298 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4300 struct l2cap_hdr *lh = (void *) skb->data;
4304 skb_pull(skb, L2CAP_HDR_SIZE);
4305 cid = __le16_to_cpu(lh->cid);
4306 len = __le16_to_cpu(lh->len);
4308 if (len != skb->len) {
4313 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4316 case L2CAP_CID_SIGNALING:
4317 l2cap_sig_channel(conn, skb);
4320 case L2CAP_CID_CONN_LESS:
4321 psm = get_unaligned_le16(skb->data);
4323 l2cap_conless_channel(conn, psm, skb);
4327 l2cap_data_channel(conn, cid, skb);
4332 /* ---- L2CAP interface with lower layer (HCI) ---- */
4334 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4336 int exact = 0, lm1 = 0, lm2 = 0;
4337 register struct sock *sk;
4338 struct hlist_node *node;
4340 if (type != ACL_LINK)
4343 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4345 /* Find listening sockets and check their link_mode */
4346 read_lock(&l2cap_sk_list.lock);
4347 sk_for_each(sk, node, &l2cap_sk_list.head) {
4348 if (sk->sk_state != BT_LISTEN)
4351 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4352 lm1 |= HCI_LM_ACCEPT;
4353 if (l2cap_pi(sk)->role_switch)
4354 lm1 |= HCI_LM_MASTER;
4356 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4357 lm2 |= HCI_LM_ACCEPT;
4358 if (l2cap_pi(sk)->role_switch)
4359 lm2 |= HCI_LM_MASTER;
4362 read_unlock(&l2cap_sk_list.lock);
4364 return exact ? lm1 : lm2;
4367 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4369 struct l2cap_conn *conn;
4371 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4373 if (hcon->type != ACL_LINK)
4377 conn = l2cap_conn_add(hcon, status);
4379 l2cap_conn_ready(conn);
4381 l2cap_conn_del(hcon, bt_err(status));
4386 static int l2cap_disconn_ind(struct hci_conn *hcon)
4388 struct l2cap_conn *conn = hcon->l2cap_data;
4390 BT_DBG("hcon %p", hcon);
4392 if (hcon->type != ACL_LINK || !conn)
4395 return conn->disc_reason;
4398 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4400 BT_DBG("hcon %p reason %d", hcon, reason);
4402 if (hcon->type != ACL_LINK)
4405 l2cap_conn_del(hcon, bt_err(reason));
4410 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
4412 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
4415 if (encrypt == 0x00) {
4416 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
4417 l2cap_sock_clear_timer(sk);
4418 l2cap_sock_set_timer(sk, HZ * 5);
4419 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
4420 __l2cap_sock_close(sk, ECONNREFUSED);
4422 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
4423 l2cap_sock_clear_timer(sk);
4427 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4429 struct l2cap_chan_list *l;
4430 struct l2cap_conn *conn = hcon->l2cap_data;
4436 l = &conn->chan_list;
4438 BT_DBG("conn %p", conn);
4440 read_lock(&l->lock);
4442 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
4445 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
4450 if (!status && (sk->sk_state == BT_CONNECTED ||
4451 sk->sk_state == BT_CONFIG)) {
4452 l2cap_check_encryption(sk, encrypt);
4457 if (sk->sk_state == BT_CONNECT) {
4459 struct l2cap_conn_req req;
4460 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
4461 req.psm = l2cap_pi(sk)->psm;
4463 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
4464 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
4466 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4467 L2CAP_CONN_REQ, sizeof(req), &req);
4469 l2cap_sock_clear_timer(sk);
4470 l2cap_sock_set_timer(sk, HZ / 10);
4472 } else if (sk->sk_state == BT_CONNECT2) {
4473 struct l2cap_conn_rsp rsp;
4477 sk->sk_state = BT_CONFIG;
4478 result = L2CAP_CR_SUCCESS;
4480 sk->sk_state = BT_DISCONN;
4481 l2cap_sock_set_timer(sk, HZ / 10);
4482 result = L2CAP_CR_SEC_BLOCK;
4485 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
4486 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
4487 rsp.result = cpu_to_le16(result);
4488 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4489 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4490 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
4496 read_unlock(&l->lock);
4501 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4503 struct l2cap_conn *conn = hcon->l2cap_data;
4505 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
4508 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4510 if (flags & ACL_START) {
4511 struct l2cap_hdr *hdr;
4515 BT_ERR("Unexpected start frame (len %d)", skb->len);
4516 kfree_skb(conn->rx_skb);
4517 conn->rx_skb = NULL;
4519 l2cap_conn_unreliable(conn, ECOMM);
4523 BT_ERR("Frame is too short (len %d)", skb->len);
4524 l2cap_conn_unreliable(conn, ECOMM);
4528 hdr = (struct l2cap_hdr *) skb->data;
4529 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4531 if (len == skb->len) {
4532 /* Complete frame received */
4533 l2cap_recv_frame(conn, skb);
4537 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4539 if (skb->len > len) {
4540 BT_ERR("Frame is too long (len %d, expected len %d)",
4542 l2cap_conn_unreliable(conn, ECOMM);
4546 /* Allocate skb for the complete frame (with header) */
4547 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4551 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4553 conn->rx_len = len - skb->len;
4555 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4557 if (!conn->rx_len) {
4558 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4559 l2cap_conn_unreliable(conn, ECOMM);
4563 if (skb->len > conn->rx_len) {
4564 BT_ERR("Fragment is too long (len %d, expected %d)",
4565 skb->len, conn->rx_len);
4566 kfree_skb(conn->rx_skb);
4567 conn->rx_skb = NULL;
4569 l2cap_conn_unreliable(conn, ECOMM);
4573 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4575 conn->rx_len -= skb->len;
4577 if (!conn->rx_len) {
4578 /* Complete frame received */
4579 l2cap_recv_frame(conn, conn->rx_skb);
4580 conn->rx_skb = NULL;
4589 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4592 struct hlist_node *node;
4594 read_lock_bh(&l2cap_sk_list.lock);
4596 sk_for_each(sk, node, &l2cap_sk_list.head) {
4597 struct l2cap_pinfo *pi = l2cap_pi(sk);
4599 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
4600 batostr(&bt_sk(sk)->src),
4601 batostr(&bt_sk(sk)->dst),
4602 sk->sk_state, __le16_to_cpu(pi->psm),
4604 pi->imtu, pi->omtu, pi->sec_level);
4607 read_unlock_bh(&l2cap_sk_list.lock);
4612 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4614 return single_open(file, l2cap_debugfs_show, inode->i_private);
4617 static const struct file_operations l2cap_debugfs_fops = {
4618 .open = l2cap_debugfs_open,
4620 .llseek = seq_lseek,
4621 .release = single_release,
4624 static struct dentry *l2cap_debugfs;
4626 static const struct proto_ops l2cap_sock_ops = {
4627 .family = PF_BLUETOOTH,
4628 .owner = THIS_MODULE,
4629 .release = l2cap_sock_release,
4630 .bind = l2cap_sock_bind,
4631 .connect = l2cap_sock_connect,
4632 .listen = l2cap_sock_listen,
4633 .accept = l2cap_sock_accept,
4634 .getname = l2cap_sock_getname,
4635 .sendmsg = l2cap_sock_sendmsg,
4636 .recvmsg = l2cap_sock_recvmsg,
4637 .poll = bt_sock_poll,
4638 .ioctl = bt_sock_ioctl,
4639 .mmap = sock_no_mmap,
4640 .socketpair = sock_no_socketpair,
4641 .shutdown = l2cap_sock_shutdown,
4642 .setsockopt = l2cap_sock_setsockopt,
4643 .getsockopt = l2cap_sock_getsockopt
4646 static const struct net_proto_family l2cap_sock_family_ops = {
4647 .family = PF_BLUETOOTH,
4648 .owner = THIS_MODULE,
4649 .create = l2cap_sock_create,
4652 static struct hci_proto l2cap_hci_proto = {
4654 .id = HCI_PROTO_L2CAP,
4655 .connect_ind = l2cap_connect_ind,
4656 .connect_cfm = l2cap_connect_cfm,
4657 .disconn_ind = l2cap_disconn_ind,
4658 .disconn_cfm = l2cap_disconn_cfm,
4659 .security_cfm = l2cap_security_cfm,
4660 .recv_acldata = l2cap_recv_acldata
4663 static int __init l2cap_init(void)
4667 err = proto_register(&l2cap_proto, 0);
4671 _busy_wq = create_singlethread_workqueue("l2cap");
4675 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
4677 BT_ERR("L2CAP socket registration failed");
4681 err = hci_register_proto(&l2cap_hci_proto);
4683 BT_ERR("L2CAP protocol registration failed");
4684 bt_sock_unregister(BTPROTO_L2CAP);
4689 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4690 bt_debugfs, NULL, &l2cap_debugfs_fops);
4692 BT_ERR("Failed to create L2CAP debug file");
4695 BT_INFO("L2CAP ver %s", VERSION);
4696 BT_INFO("L2CAP socket layer initialized");
4701 proto_unregister(&l2cap_proto);
4705 static void __exit l2cap_exit(void)
4707 debugfs_remove(l2cap_debugfs);
4709 flush_workqueue(_busy_wq);
4710 destroy_workqueue(_busy_wq);
4712 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
4713 BT_ERR("L2CAP socket unregistration failed");
4715 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4716 BT_ERR("L2CAP protocol unregistration failed");
4718 proto_unregister(&l2cap_proto);
4721 void l2cap_load(void)
4723 /* Dummy function to trigger automatic L2CAP module loading by
4724 * other modules that use L2CAP sockets but don't use any other
4725 * symbols from it. */
4727 EXPORT_SYMBOL(l2cap_load);
4729 module_init(l2cap_init);
4730 module_exit(l2cap_exit);
4732 module_param(enable_ertm, bool, 0644);
4733 MODULE_PARM_DESC(enable_ertm, "Enable enhanced retransmission mode");
4735 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4736 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
4737 MODULE_VERSION(VERSION);
4738 MODULE_LICENSE("GPL");
4739 MODULE_ALIAS("bt-proto-0");