2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <linux/debugfs.h>
44 #include <linux/seq_file.h>
45 #include <linux/uaccess.h>
46 #include <linux/crc16.h>
49 #include <asm/system.h>
50 #include <asm/unaligned.h>
52 #include <net/bluetooth/bluetooth.h>
53 #include <net/bluetooth/hci_core.h>
54 #include <net/bluetooth/l2cap.h>
56 #define VERSION "2.14"
58 #ifdef CONFIG_BT_L2CAP_EXT_FEATURES
59 static int enable_ertm = 1;
61 static int enable_ertm = 0;
63 static int max_transmit = L2CAP_DEFAULT_MAX_TX;
64 static int tx_window = L2CAP_DEFAULT_TX_WINDOW;
66 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
67 static u8 l2cap_fixed_chan[8] = { 0x02, };
69 static const struct proto_ops l2cap_sock_ops;
71 static struct workqueue_struct *_busy_wq;
73 static struct bt_sock_list l2cap_sk_list = {
74 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
77 static void l2cap_busy_work(struct work_struct *work);
79 static void __l2cap_sock_close(struct sock *sk, int reason);
80 static void l2cap_sock_close(struct sock *sk);
81 static void l2cap_sock_kill(struct sock *sk);
83 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
84 u8 code, u8 ident, u16 dlen, void *data);
86 /* ---- L2CAP timers ---- */
87 static void l2cap_sock_timeout(unsigned long arg)
89 struct sock *sk = (struct sock *) arg;
92 BT_DBG("sock %p state %d", sk, sk->sk_state);
96 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
97 reason = ECONNREFUSED;
98 else if (sk->sk_state == BT_CONNECT &&
99 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
100 reason = ECONNREFUSED;
104 __l2cap_sock_close(sk, reason);
112 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
114 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
115 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
118 static void l2cap_sock_clear_timer(struct sock *sk)
120 BT_DBG("sock %p state %d", sk, sk->sk_state);
121 sk_stop_timer(sk, &sk->sk_timer);
124 /* ---- L2CAP channels ---- */
125 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
128 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
129 if (l2cap_pi(s)->dcid == cid)
135 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
138 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
139 if (l2cap_pi(s)->scid == cid)
145 /* Find channel with given SCID.
146 * Returns locked socket */
147 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
151 s = __l2cap_get_chan_by_scid(l, cid);
154 read_unlock(&l->lock);
158 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
161 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
162 if (l2cap_pi(s)->ident == ident)
168 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
172 s = __l2cap_get_chan_by_ident(l, ident);
175 read_unlock(&l->lock);
179 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
181 u16 cid = L2CAP_CID_DYN_START;
183 for (; cid < L2CAP_CID_DYN_END; cid++) {
184 if (!__l2cap_get_chan_by_scid(l, cid))
191 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
196 l2cap_pi(l->head)->prev_c = sk;
198 l2cap_pi(sk)->next_c = l->head;
199 l2cap_pi(sk)->prev_c = NULL;
203 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
205 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
207 write_lock_bh(&l->lock);
212 l2cap_pi(next)->prev_c = prev;
214 l2cap_pi(prev)->next_c = next;
215 write_unlock_bh(&l->lock);
220 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
222 struct l2cap_chan_list *l = &conn->chan_list;
224 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
225 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
227 conn->disc_reason = 0x13;
229 l2cap_pi(sk)->conn = conn;
231 if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) {
232 /* Alloc CID for connection-oriented socket */
233 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
234 } else if (sk->sk_type == SOCK_DGRAM) {
235 /* Connectionless socket */
236 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
237 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
238 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
240 /* Raw socket can send/recv signalling messages only */
241 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
242 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
243 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
246 __l2cap_chan_link(l, sk);
249 bt_accept_enqueue(parent, sk);
253 * Must be called on the locked socket. */
254 static void l2cap_chan_del(struct sock *sk, int err)
256 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
257 struct sock *parent = bt_sk(sk)->parent;
259 l2cap_sock_clear_timer(sk);
261 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
264 /* Unlink from channel list */
265 l2cap_chan_unlink(&conn->chan_list, sk);
266 l2cap_pi(sk)->conn = NULL;
267 hci_conn_put(conn->hcon);
270 sk->sk_state = BT_CLOSED;
271 sock_set_flag(sk, SOCK_ZAPPED);
277 bt_accept_unlink(sk);
278 parent->sk_data_ready(parent, 0);
280 sk->sk_state_change(sk);
283 /* Service level security */
284 static inline int l2cap_check_security(struct sock *sk)
286 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
289 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
290 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
291 auth_type = HCI_AT_NO_BONDING_MITM;
293 auth_type = HCI_AT_NO_BONDING;
295 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
296 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
298 switch (l2cap_pi(sk)->sec_level) {
299 case BT_SECURITY_HIGH:
300 auth_type = HCI_AT_GENERAL_BONDING_MITM;
302 case BT_SECURITY_MEDIUM:
303 auth_type = HCI_AT_GENERAL_BONDING;
306 auth_type = HCI_AT_NO_BONDING;
311 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
315 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
319 /* Get next available identificator.
320 * 1 - 128 are used by kernel.
321 * 129 - 199 are reserved.
322 * 200 - 254 are used by utilities like l2ping, etc.
325 spin_lock_bh(&conn->lock);
327 if (++conn->tx_ident > 128)
332 spin_unlock_bh(&conn->lock);
337 static inline void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
339 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
341 BT_DBG("code 0x%2.2x", code);
346 hci_send_acl(conn->hcon, skb, 0);
349 static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
352 struct l2cap_hdr *lh;
353 struct l2cap_conn *conn = pi->conn;
354 int count, hlen = L2CAP_HDR_SIZE + 2;
356 if (pi->fcs == L2CAP_FCS_CRC16)
359 BT_DBG("pi %p, control 0x%2.2x", pi, control);
361 count = min_t(unsigned int, conn->mtu, hlen);
362 control |= L2CAP_CTRL_FRAME_TYPE;
364 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
365 control |= L2CAP_CTRL_FINAL;
366 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
369 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
370 control |= L2CAP_CTRL_POLL;
371 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
374 skb = bt_skb_alloc(count, GFP_ATOMIC);
378 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
379 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
380 lh->cid = cpu_to_le16(pi->dcid);
381 put_unaligned_le16(control, skb_put(skb, 2));
383 if (pi->fcs == L2CAP_FCS_CRC16) {
384 u16 fcs = crc16(0, (u8 *)lh, count - 2);
385 put_unaligned_le16(fcs, skb_put(skb, 2));
388 hci_send_acl(pi->conn->hcon, skb, 0);
391 static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
393 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
394 control |= L2CAP_SUPER_RCV_NOT_READY;
395 pi->conn_state |= L2CAP_CONN_RNR_SENT;
397 control |= L2CAP_SUPER_RCV_READY;
399 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
401 l2cap_send_sframe(pi, control);
404 static void l2cap_do_start(struct sock *sk)
406 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
408 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
409 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
412 if (l2cap_check_security(sk)) {
413 struct l2cap_conn_req req;
414 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
415 req.psm = l2cap_pi(sk)->psm;
417 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
419 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
420 L2CAP_CONN_REQ, sizeof(req), &req);
423 struct l2cap_info_req req;
424 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
426 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
427 conn->info_ident = l2cap_get_ident(conn);
429 mod_timer(&conn->info_timer, jiffies +
430 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
432 l2cap_send_cmd(conn, conn->info_ident,
433 L2CAP_INFO_REQ, sizeof(req), &req);
437 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk)
439 struct l2cap_disconn_req req;
441 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
442 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
443 l2cap_send_cmd(conn, l2cap_get_ident(conn),
444 L2CAP_DISCONN_REQ, sizeof(req), &req);
447 /* ---- L2CAP connections ---- */
448 static void l2cap_conn_start(struct l2cap_conn *conn)
450 struct l2cap_chan_list *l = &conn->chan_list;
453 BT_DBG("conn %p", conn);
457 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
460 if (sk->sk_type != SOCK_SEQPACKET &&
461 sk->sk_type != SOCK_STREAM) {
466 if (sk->sk_state == BT_CONNECT) {
467 if (l2cap_check_security(sk)) {
468 struct l2cap_conn_req req;
469 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
470 req.psm = l2cap_pi(sk)->psm;
472 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
474 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
475 L2CAP_CONN_REQ, sizeof(req), &req);
477 } else if (sk->sk_state == BT_CONNECT2) {
478 struct l2cap_conn_rsp rsp;
479 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
480 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
482 if (l2cap_check_security(sk)) {
483 if (bt_sk(sk)->defer_setup) {
484 struct sock *parent = bt_sk(sk)->parent;
485 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
486 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
487 parent->sk_data_ready(parent, 0);
490 sk->sk_state = BT_CONFIG;
491 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
492 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
495 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
496 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
499 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
500 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
506 read_unlock(&l->lock);
509 static void l2cap_conn_ready(struct l2cap_conn *conn)
511 struct l2cap_chan_list *l = &conn->chan_list;
514 BT_DBG("conn %p", conn);
518 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
521 if (sk->sk_type != SOCK_SEQPACKET &&
522 sk->sk_type != SOCK_STREAM) {
523 l2cap_sock_clear_timer(sk);
524 sk->sk_state = BT_CONNECTED;
525 sk->sk_state_change(sk);
526 } else if (sk->sk_state == BT_CONNECT)
532 read_unlock(&l->lock);
535 /* Notify sockets that we cannot guaranty reliability anymore */
536 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
538 struct l2cap_chan_list *l = &conn->chan_list;
541 BT_DBG("conn %p", conn);
545 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
546 if (l2cap_pi(sk)->force_reliable)
550 read_unlock(&l->lock);
553 static void l2cap_info_timeout(unsigned long arg)
555 struct l2cap_conn *conn = (void *) arg;
557 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
558 conn->info_ident = 0;
560 l2cap_conn_start(conn);
563 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
565 struct l2cap_conn *conn = hcon->l2cap_data;
570 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
574 hcon->l2cap_data = conn;
577 BT_DBG("hcon %p conn %p", hcon, conn);
579 conn->mtu = hcon->hdev->acl_mtu;
580 conn->src = &hcon->hdev->bdaddr;
581 conn->dst = &hcon->dst;
585 spin_lock_init(&conn->lock);
586 rwlock_init(&conn->chan_list.lock);
588 setup_timer(&conn->info_timer, l2cap_info_timeout,
589 (unsigned long) conn);
591 conn->disc_reason = 0x13;
596 static void l2cap_conn_del(struct hci_conn *hcon, int err)
598 struct l2cap_conn *conn = hcon->l2cap_data;
604 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
606 kfree_skb(conn->rx_skb);
609 while ((sk = conn->chan_list.head)) {
611 l2cap_chan_del(sk, err);
616 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
617 del_timer_sync(&conn->info_timer);
619 hcon->l2cap_data = NULL;
623 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
625 struct l2cap_chan_list *l = &conn->chan_list;
626 write_lock_bh(&l->lock);
627 __l2cap_chan_add(conn, sk, parent);
628 write_unlock_bh(&l->lock);
631 /* ---- Socket interface ---- */
632 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
635 struct hlist_node *node;
636 sk_for_each(sk, node, &l2cap_sk_list.head)
637 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
644 /* Find socket with psm and source bdaddr.
645 * Returns closest match.
647 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
649 struct sock *sk = NULL, *sk1 = NULL;
650 struct hlist_node *node;
652 sk_for_each(sk, node, &l2cap_sk_list.head) {
653 if (state && sk->sk_state != state)
656 if (l2cap_pi(sk)->psm == psm) {
658 if (!bacmp(&bt_sk(sk)->src, src))
662 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
666 return node ? sk : sk1;
669 /* Find socket with given address (psm, src).
670 * Returns locked socket */
671 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
674 read_lock(&l2cap_sk_list.lock);
675 s = __l2cap_get_sock_by_psm(state, psm, src);
678 read_unlock(&l2cap_sk_list.lock);
682 static void l2cap_sock_destruct(struct sock *sk)
686 skb_queue_purge(&sk->sk_receive_queue);
687 skb_queue_purge(&sk->sk_write_queue);
690 static void l2cap_sock_cleanup_listen(struct sock *parent)
694 BT_DBG("parent %p", parent);
696 /* Close not yet accepted channels */
697 while ((sk = bt_accept_dequeue(parent, NULL)))
698 l2cap_sock_close(sk);
700 parent->sk_state = BT_CLOSED;
701 sock_set_flag(parent, SOCK_ZAPPED);
704 /* Kill socket (only if zapped and orphan)
705 * Must be called on unlocked socket.
707 static void l2cap_sock_kill(struct sock *sk)
709 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
712 BT_DBG("sk %p state %d", sk, sk->sk_state);
714 /* Kill poor orphan */
715 bt_sock_unlink(&l2cap_sk_list, sk);
716 sock_set_flag(sk, SOCK_DEAD);
720 static void __l2cap_sock_close(struct sock *sk, int reason)
722 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
724 switch (sk->sk_state) {
726 l2cap_sock_cleanup_listen(sk);
731 if (sk->sk_type == SOCK_SEQPACKET ||
732 sk->sk_type == SOCK_STREAM) {
733 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
735 sk->sk_state = BT_DISCONN;
736 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
737 l2cap_send_disconn_req(conn, sk);
739 l2cap_chan_del(sk, reason);
743 if (sk->sk_type == SOCK_SEQPACKET ||
744 sk->sk_type == SOCK_STREAM) {
745 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
746 struct l2cap_conn_rsp rsp;
749 if (bt_sk(sk)->defer_setup)
750 result = L2CAP_CR_SEC_BLOCK;
752 result = L2CAP_CR_BAD_PSM;
754 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
755 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
756 rsp.result = cpu_to_le16(result);
757 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
758 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
759 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
761 l2cap_chan_del(sk, reason);
766 l2cap_chan_del(sk, reason);
770 sock_set_flag(sk, SOCK_ZAPPED);
775 /* Must be called on unlocked socket. */
776 static void l2cap_sock_close(struct sock *sk)
778 l2cap_sock_clear_timer(sk);
780 __l2cap_sock_close(sk, ECONNRESET);
785 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
787 struct l2cap_pinfo *pi = l2cap_pi(sk);
792 sk->sk_type = parent->sk_type;
793 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
795 pi->imtu = l2cap_pi(parent)->imtu;
796 pi->omtu = l2cap_pi(parent)->omtu;
797 pi->mode = l2cap_pi(parent)->mode;
798 pi->fcs = l2cap_pi(parent)->fcs;
799 pi->max_tx = l2cap_pi(parent)->max_tx;
800 pi->tx_win = l2cap_pi(parent)->tx_win;
801 pi->sec_level = l2cap_pi(parent)->sec_level;
802 pi->role_switch = l2cap_pi(parent)->role_switch;
803 pi->force_reliable = l2cap_pi(parent)->force_reliable;
805 pi->imtu = L2CAP_DEFAULT_MTU;
807 if (enable_ertm && sk->sk_type == SOCK_STREAM)
808 pi->mode = L2CAP_MODE_ERTM;
810 pi->mode = L2CAP_MODE_BASIC;
811 pi->max_tx = max_transmit;
812 pi->fcs = L2CAP_FCS_CRC16;
813 pi->tx_win = tx_window;
814 pi->sec_level = BT_SECURITY_LOW;
816 pi->force_reliable = 0;
819 /* Default config options */
821 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
822 skb_queue_head_init(TX_QUEUE(sk));
823 skb_queue_head_init(SREJ_QUEUE(sk));
824 skb_queue_head_init(BUSY_QUEUE(sk));
825 INIT_LIST_HEAD(SREJ_LIST(sk));
828 static struct proto l2cap_proto = {
830 .owner = THIS_MODULE,
831 .obj_size = sizeof(struct l2cap_pinfo)
834 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
838 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
842 sock_init_data(sock, sk);
843 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
845 sk->sk_destruct = l2cap_sock_destruct;
846 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
848 sock_reset_flag(sk, SOCK_ZAPPED);
850 sk->sk_protocol = proto;
851 sk->sk_state = BT_OPEN;
853 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
855 bt_sock_link(&l2cap_sk_list, sk);
859 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
864 BT_DBG("sock %p", sock);
866 sock->state = SS_UNCONNECTED;
868 if (sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM &&
869 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
870 return -ESOCKTNOSUPPORT;
872 if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
875 sock->ops = &l2cap_sock_ops;
877 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
881 l2cap_sock_init(sk, NULL);
885 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
887 struct sock *sk = sock->sk;
888 struct sockaddr_l2 la;
893 if (!addr || addr->sa_family != AF_BLUETOOTH)
896 memset(&la, 0, sizeof(la));
897 len = min_t(unsigned int, sizeof(la), alen);
898 memcpy(&la, addr, len);
905 if (sk->sk_state != BT_OPEN) {
910 if (la.l2_psm && __le16_to_cpu(la.l2_psm) < 0x1001 &&
911 !capable(CAP_NET_BIND_SERVICE)) {
916 write_lock_bh(&l2cap_sk_list.lock);
918 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
921 /* Save source address */
922 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
923 l2cap_pi(sk)->psm = la.l2_psm;
924 l2cap_pi(sk)->sport = la.l2_psm;
925 sk->sk_state = BT_BOUND;
927 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
928 __le16_to_cpu(la.l2_psm) == 0x0003)
929 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
932 write_unlock_bh(&l2cap_sk_list.lock);
939 static int l2cap_do_connect(struct sock *sk)
941 bdaddr_t *src = &bt_sk(sk)->src;
942 bdaddr_t *dst = &bt_sk(sk)->dst;
943 struct l2cap_conn *conn;
944 struct hci_conn *hcon;
945 struct hci_dev *hdev;
949 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
952 hdev = hci_get_route(dst, src);
954 return -EHOSTUNREACH;
956 hci_dev_lock_bh(hdev);
960 if (sk->sk_type == SOCK_RAW) {
961 switch (l2cap_pi(sk)->sec_level) {
962 case BT_SECURITY_HIGH:
963 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
965 case BT_SECURITY_MEDIUM:
966 auth_type = HCI_AT_DEDICATED_BONDING;
969 auth_type = HCI_AT_NO_BONDING;
972 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
973 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
974 auth_type = HCI_AT_NO_BONDING_MITM;
976 auth_type = HCI_AT_NO_BONDING;
978 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
979 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
981 switch (l2cap_pi(sk)->sec_level) {
982 case BT_SECURITY_HIGH:
983 auth_type = HCI_AT_GENERAL_BONDING_MITM;
985 case BT_SECURITY_MEDIUM:
986 auth_type = HCI_AT_GENERAL_BONDING;
989 auth_type = HCI_AT_NO_BONDING;
994 hcon = hci_connect(hdev, ACL_LINK, dst,
995 l2cap_pi(sk)->sec_level, auth_type);
999 conn = l2cap_conn_add(hcon, 0);
1007 /* Update source addr of the socket */
1008 bacpy(src, conn->src);
1010 l2cap_chan_add(conn, sk, NULL);
1012 sk->sk_state = BT_CONNECT;
1013 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1015 if (hcon->state == BT_CONNECTED) {
1016 if (sk->sk_type != SOCK_SEQPACKET &&
1017 sk->sk_type != SOCK_STREAM) {
1018 l2cap_sock_clear_timer(sk);
1019 sk->sk_state = BT_CONNECTED;
1025 hci_dev_unlock_bh(hdev);
1030 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
1032 struct sock *sk = sock->sk;
1033 struct sockaddr_l2 la;
1036 BT_DBG("sk %p", sk);
1038 if (!addr || alen < sizeof(addr->sa_family) ||
1039 addr->sa_family != AF_BLUETOOTH)
1042 memset(&la, 0, sizeof(la));
1043 len = min_t(unsigned int, sizeof(la), alen);
1044 memcpy(&la, addr, len);
1051 if ((sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM)
1057 switch (l2cap_pi(sk)->mode) {
1058 case L2CAP_MODE_BASIC:
1060 case L2CAP_MODE_ERTM:
1061 case L2CAP_MODE_STREAMING:
1070 switch (sk->sk_state) {
1074 /* Already connecting */
1078 /* Already connected */
1091 /* Set destination address and psm */
1092 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
1093 l2cap_pi(sk)->psm = la.l2_psm;
1095 err = l2cap_do_connect(sk);
1100 err = bt_sock_wait_state(sk, BT_CONNECTED,
1101 sock_sndtimeo(sk, flags & O_NONBLOCK));
1107 static int l2cap_sock_listen(struct socket *sock, int backlog)
1109 struct sock *sk = sock->sk;
1112 BT_DBG("sk %p backlog %d", sk, backlog);
1116 if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM)
1117 || sk->sk_state != BT_BOUND) {
1122 switch (l2cap_pi(sk)->mode) {
1123 case L2CAP_MODE_BASIC:
1125 case L2CAP_MODE_ERTM:
1126 case L2CAP_MODE_STREAMING:
1135 if (!l2cap_pi(sk)->psm) {
1136 bdaddr_t *src = &bt_sk(sk)->src;
1141 write_lock_bh(&l2cap_sk_list.lock);
1143 for (psm = 0x1001; psm < 0x1100; psm += 2)
1144 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
1145 l2cap_pi(sk)->psm = cpu_to_le16(psm);
1146 l2cap_pi(sk)->sport = cpu_to_le16(psm);
1151 write_unlock_bh(&l2cap_sk_list.lock);
1157 sk->sk_max_ack_backlog = backlog;
1158 sk->sk_ack_backlog = 0;
1159 sk->sk_state = BT_LISTEN;
1166 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
1168 DECLARE_WAITQUEUE(wait, current);
1169 struct sock *sk = sock->sk, *nsk;
1173 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1175 if (sk->sk_state != BT_LISTEN) {
1180 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1182 BT_DBG("sk %p timeo %ld", sk, timeo);
1184 /* Wait for an incoming connection. (wake-one). */
1185 add_wait_queue_exclusive(sk_sleep(sk), &wait);
1186 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1187 set_current_state(TASK_INTERRUPTIBLE);
1194 timeo = schedule_timeout(timeo);
1195 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1197 if (sk->sk_state != BT_LISTEN) {
1202 if (signal_pending(current)) {
1203 err = sock_intr_errno(timeo);
1207 set_current_state(TASK_RUNNING);
1208 remove_wait_queue(sk_sleep(sk), &wait);
1213 newsock->state = SS_CONNECTED;
1215 BT_DBG("new socket %p", nsk);
1222 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1224 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1225 struct sock *sk = sock->sk;
1227 BT_DBG("sock %p, sk %p", sock, sk);
1229 addr->sa_family = AF_BLUETOOTH;
1230 *len = sizeof(struct sockaddr_l2);
1233 la->l2_psm = l2cap_pi(sk)->psm;
1234 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1235 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1237 la->l2_psm = l2cap_pi(sk)->sport;
1238 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1239 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
1245 static void l2cap_monitor_timeout(unsigned long arg)
1247 struct sock *sk = (void *) arg;
1250 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
1251 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk);
1256 l2cap_pi(sk)->retry_count++;
1257 __mod_monitor_timer();
1259 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1263 static void l2cap_retrans_timeout(unsigned long arg)
1265 struct sock *sk = (void *) arg;
1268 l2cap_pi(sk)->retry_count = 1;
1269 __mod_monitor_timer();
1271 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
1273 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1277 static void l2cap_drop_acked_frames(struct sock *sk)
1279 struct sk_buff *skb;
1281 while ((skb = skb_peek(TX_QUEUE(sk))) &&
1282 l2cap_pi(sk)->unacked_frames) {
1283 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
1286 skb = skb_dequeue(TX_QUEUE(sk));
1289 l2cap_pi(sk)->unacked_frames--;
1292 if (!l2cap_pi(sk)->unacked_frames)
1293 del_timer(&l2cap_pi(sk)->retrans_timer);
1298 static inline void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1300 struct l2cap_pinfo *pi = l2cap_pi(sk);
1302 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1304 hci_send_acl(pi->conn->hcon, skb, 0);
1307 static int l2cap_streaming_send(struct sock *sk)
1309 struct sk_buff *skb, *tx_skb;
1310 struct l2cap_pinfo *pi = l2cap_pi(sk);
1313 while ((skb = sk->sk_send_head)) {
1314 tx_skb = skb_clone(skb, GFP_ATOMIC);
1316 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1317 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1318 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1320 if (pi->fcs == L2CAP_FCS_CRC16) {
1321 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1322 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1325 l2cap_do_send(sk, tx_skb);
1327 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1329 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1330 sk->sk_send_head = NULL;
1332 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1334 skb = skb_dequeue(TX_QUEUE(sk));
1340 static void l2cap_retransmit_frame(struct sock *sk, u8 tx_seq)
1342 struct l2cap_pinfo *pi = l2cap_pi(sk);
1343 struct sk_buff *skb, *tx_skb;
1346 skb = skb_peek(TX_QUEUE(sk));
1351 if (bt_cb(skb)->tx_seq == tx_seq)
1354 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1357 } while ((skb = skb_queue_next(TX_QUEUE(sk), skb)));
1359 if (pi->remote_max_tx &&
1360 bt_cb(skb)->retries == pi->remote_max_tx) {
1361 l2cap_send_disconn_req(pi->conn, sk);
1365 tx_skb = skb_clone(skb, GFP_ATOMIC);
1366 bt_cb(skb)->retries++;
1367 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1368 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1369 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1370 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1372 if (pi->fcs == L2CAP_FCS_CRC16) {
1373 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1374 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1377 l2cap_do_send(sk, tx_skb);
1380 static int l2cap_ertm_send(struct sock *sk)
1382 struct sk_buff *skb, *tx_skb;
1383 struct l2cap_pinfo *pi = l2cap_pi(sk);
1387 if (pi->conn_state & L2CAP_CONN_WAIT_F)
1390 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk)) &&
1391 !(pi->conn_state & L2CAP_CONN_REMOTE_BUSY)) {
1393 if (pi->remote_max_tx &&
1394 bt_cb(skb)->retries == pi->remote_max_tx) {
1395 l2cap_send_disconn_req(pi->conn, sk);
1399 tx_skb = skb_clone(skb, GFP_ATOMIC);
1401 bt_cb(skb)->retries++;
1403 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1404 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1405 control |= L2CAP_CTRL_FINAL;
1406 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1408 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1409 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1410 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1413 if (pi->fcs == L2CAP_FCS_CRC16) {
1414 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1415 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1418 l2cap_do_send(sk, tx_skb);
1420 __mod_retrans_timer();
1422 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1423 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1425 pi->unacked_frames++;
1428 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1429 sk->sk_send_head = NULL;
1431 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1439 static void l2cap_send_ack(struct l2cap_pinfo *pi)
1441 struct sock *sk = (struct sock *)pi;
1444 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1446 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1447 control |= L2CAP_SUPER_RCV_NOT_READY;
1448 pi->conn_state |= L2CAP_CONN_RNR_SENT;
1449 l2cap_send_sframe(pi, control);
1451 } else if (l2cap_ertm_send(sk) == 0) {
1452 control |= L2CAP_SUPER_RCV_READY;
1453 l2cap_send_sframe(pi, control);
1457 static void l2cap_send_srejtail(struct sock *sk)
1459 struct srej_list *tail;
1462 control = L2CAP_SUPER_SELECT_REJECT;
1463 control |= L2CAP_CTRL_FINAL;
1465 tail = list_entry(SREJ_LIST(sk)->prev, struct srej_list, list);
1466 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1468 l2cap_send_sframe(l2cap_pi(sk), control);
1471 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1473 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1474 struct sk_buff **frag;
1477 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1483 /* Continuation fragments (no L2CAP header) */
1484 frag = &skb_shinfo(skb)->frag_list;
1486 count = min_t(unsigned int, conn->mtu, len);
1488 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1491 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1497 frag = &(*frag)->next;
1503 static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1505 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1506 struct sk_buff *skb;
1507 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1508 struct l2cap_hdr *lh;
1510 BT_DBG("sk %p len %d", sk, (int)len);
1512 count = min_t(unsigned int, (conn->mtu - hlen), len);
1513 skb = bt_skb_send_alloc(sk, count + hlen,
1514 msg->msg_flags & MSG_DONTWAIT, &err);
1516 return ERR_PTR(-ENOMEM);
1518 /* Create L2CAP header */
1519 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1520 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1521 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1522 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1524 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1525 if (unlikely(err < 0)) {
1527 return ERR_PTR(err);
1532 static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1534 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1535 struct sk_buff *skb;
1536 int err, count, hlen = L2CAP_HDR_SIZE;
1537 struct l2cap_hdr *lh;
1539 BT_DBG("sk %p len %d", sk, (int)len);
1541 count = min_t(unsigned int, (conn->mtu - hlen), len);
1542 skb = bt_skb_send_alloc(sk, count + hlen,
1543 msg->msg_flags & MSG_DONTWAIT, &err);
1545 return ERR_PTR(-ENOMEM);
1547 /* Create L2CAP header */
1548 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1549 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1550 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1552 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1553 if (unlikely(err < 0)) {
1555 return ERR_PTR(err);
1560 static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1562 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1563 struct sk_buff *skb;
1564 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1565 struct l2cap_hdr *lh;
1567 BT_DBG("sk %p len %d", sk, (int)len);
1570 return ERR_PTR(-ENOTCONN);
1575 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1578 count = min_t(unsigned int, (conn->mtu - hlen), len);
1579 skb = bt_skb_send_alloc(sk, count + hlen,
1580 msg->msg_flags & MSG_DONTWAIT, &err);
1582 return ERR_PTR(-ENOMEM);
1584 /* Create L2CAP header */
1585 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1586 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1587 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1588 put_unaligned_le16(control, skb_put(skb, 2));
1590 put_unaligned_le16(sdulen, skb_put(skb, 2));
1592 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1593 if (unlikely(err < 0)) {
1595 return ERR_PTR(err);
1598 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1599 put_unaligned_le16(0, skb_put(skb, 2));
1601 bt_cb(skb)->retries = 0;
1605 static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1607 struct l2cap_pinfo *pi = l2cap_pi(sk);
1608 struct sk_buff *skb;
1609 struct sk_buff_head sar_queue;
1613 skb_queue_head_init(&sar_queue);
1614 control = L2CAP_SDU_START;
1615 skb = l2cap_create_iframe_pdu(sk, msg, pi->remote_mps, control, len);
1617 return PTR_ERR(skb);
1619 __skb_queue_tail(&sar_queue, skb);
1620 len -= pi->remote_mps;
1621 size += pi->remote_mps;
1626 if (len > pi->remote_mps) {
1627 control = L2CAP_SDU_CONTINUE;
1628 buflen = pi->remote_mps;
1630 control = L2CAP_SDU_END;
1634 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1636 skb_queue_purge(&sar_queue);
1637 return PTR_ERR(skb);
1640 __skb_queue_tail(&sar_queue, skb);
1644 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1645 if (sk->sk_send_head == NULL)
1646 sk->sk_send_head = sar_queue.next;
1651 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1653 struct sock *sk = sock->sk;
1654 struct l2cap_pinfo *pi = l2cap_pi(sk);
1655 struct sk_buff *skb;
1659 BT_DBG("sock %p, sk %p", sock, sk);
1661 err = sock_error(sk);
1665 if (msg->msg_flags & MSG_OOB)
1670 if (sk->sk_state != BT_CONNECTED) {
1675 /* Connectionless channel */
1676 if (sk->sk_type == SOCK_DGRAM) {
1677 skb = l2cap_create_connless_pdu(sk, msg, len);
1681 l2cap_do_send(sk, skb);
1688 case L2CAP_MODE_BASIC:
1689 /* Check outgoing MTU */
1690 if (len > pi->omtu) {
1695 /* Create a basic PDU */
1696 skb = l2cap_create_basic_pdu(sk, msg, len);
1702 l2cap_do_send(sk, skb);
1706 case L2CAP_MODE_ERTM:
1707 case L2CAP_MODE_STREAMING:
1708 /* Entire SDU fits into one PDU */
1709 if (len <= pi->remote_mps) {
1710 control = L2CAP_SDU_UNSEGMENTED;
1711 skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
1716 __skb_queue_tail(TX_QUEUE(sk), skb);
1717 if (sk->sk_send_head == NULL)
1718 sk->sk_send_head = skb;
1720 /* Segment SDU into multiples PDUs */
1721 err = l2cap_sar_segment_sdu(sk, msg, len);
1726 if (pi->mode == L2CAP_MODE_STREAMING)
1727 err = l2cap_streaming_send(sk);
1729 err = l2cap_ertm_send(sk);
1736 BT_DBG("bad state %1.1x", pi->mode);
1745 static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1747 struct sock *sk = sock->sk;
1751 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1752 struct l2cap_conn_rsp rsp;
1754 sk->sk_state = BT_CONFIG;
1756 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1757 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1758 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1759 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1760 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1761 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1769 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1772 static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
1774 struct sock *sk = sock->sk;
1775 struct l2cap_options opts;
1779 BT_DBG("sk %p", sk);
1785 opts.imtu = l2cap_pi(sk)->imtu;
1786 opts.omtu = l2cap_pi(sk)->omtu;
1787 opts.flush_to = l2cap_pi(sk)->flush_to;
1788 opts.mode = l2cap_pi(sk)->mode;
1789 opts.fcs = l2cap_pi(sk)->fcs;
1790 opts.max_tx = l2cap_pi(sk)->max_tx;
1791 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
1793 len = min_t(unsigned int, sizeof(opts), optlen);
1794 if (copy_from_user((char *) &opts, optval, len)) {
1799 l2cap_pi(sk)->mode = opts.mode;
1800 switch (l2cap_pi(sk)->mode) {
1801 case L2CAP_MODE_BASIC:
1803 case L2CAP_MODE_ERTM:
1804 case L2CAP_MODE_STREAMING:
1813 l2cap_pi(sk)->imtu = opts.imtu;
1814 l2cap_pi(sk)->omtu = opts.omtu;
1815 l2cap_pi(sk)->fcs = opts.fcs;
1816 l2cap_pi(sk)->max_tx = opts.max_tx;
1817 l2cap_pi(sk)->tx_win = (__u8)opts.txwin_size;
1821 if (get_user(opt, (u32 __user *) optval)) {
1826 if (opt & L2CAP_LM_AUTH)
1827 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
1828 if (opt & L2CAP_LM_ENCRYPT)
1829 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
1830 if (opt & L2CAP_LM_SECURE)
1831 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
1833 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
1834 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
1846 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
1848 struct sock *sk = sock->sk;
1849 struct bt_security sec;
1853 BT_DBG("sk %p", sk);
1855 if (level == SOL_L2CAP)
1856 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
1858 if (level != SOL_BLUETOOTH)
1859 return -ENOPROTOOPT;
1865 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
1866 && sk->sk_type != SOCK_RAW) {
1871 sec.level = BT_SECURITY_LOW;
1873 len = min_t(unsigned int, sizeof(sec), optlen);
1874 if (copy_from_user((char *) &sec, optval, len)) {
1879 if (sec.level < BT_SECURITY_LOW ||
1880 sec.level > BT_SECURITY_HIGH) {
1885 l2cap_pi(sk)->sec_level = sec.level;
1888 case BT_DEFER_SETUP:
1889 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1894 if (get_user(opt, (u32 __user *) optval)) {
1899 bt_sk(sk)->defer_setup = opt;
1911 static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
1913 struct sock *sk = sock->sk;
1914 struct l2cap_options opts;
1915 struct l2cap_conninfo cinfo;
1919 BT_DBG("sk %p", sk);
1921 if (get_user(len, optlen))
1928 opts.imtu = l2cap_pi(sk)->imtu;
1929 opts.omtu = l2cap_pi(sk)->omtu;
1930 opts.flush_to = l2cap_pi(sk)->flush_to;
1931 opts.mode = l2cap_pi(sk)->mode;
1932 opts.fcs = l2cap_pi(sk)->fcs;
1933 opts.max_tx = l2cap_pi(sk)->max_tx;
1934 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
1936 len = min_t(unsigned int, len, sizeof(opts));
1937 if (copy_to_user(optval, (char *) &opts, len))
1943 switch (l2cap_pi(sk)->sec_level) {
1944 case BT_SECURITY_LOW:
1945 opt = L2CAP_LM_AUTH;
1947 case BT_SECURITY_MEDIUM:
1948 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
1950 case BT_SECURITY_HIGH:
1951 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
1959 if (l2cap_pi(sk)->role_switch)
1960 opt |= L2CAP_LM_MASTER;
1962 if (l2cap_pi(sk)->force_reliable)
1963 opt |= L2CAP_LM_RELIABLE;
1965 if (put_user(opt, (u32 __user *) optval))
1969 case L2CAP_CONNINFO:
1970 if (sk->sk_state != BT_CONNECTED &&
1971 !(sk->sk_state == BT_CONNECT2 &&
1972 bt_sk(sk)->defer_setup)) {
1977 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
1978 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
1980 len = min_t(unsigned int, len, sizeof(cinfo));
1981 if (copy_to_user(optval, (char *) &cinfo, len))
1995 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
1997 struct sock *sk = sock->sk;
1998 struct bt_security sec;
2001 BT_DBG("sk %p", sk);
2003 if (level == SOL_L2CAP)
2004 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
2006 if (level != SOL_BLUETOOTH)
2007 return -ENOPROTOOPT;
2009 if (get_user(len, optlen))
2016 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
2017 && sk->sk_type != SOCK_RAW) {
2022 sec.level = l2cap_pi(sk)->sec_level;
2024 len = min_t(unsigned int, len, sizeof(sec));
2025 if (copy_to_user(optval, (char *) &sec, len))
2030 case BT_DEFER_SETUP:
2031 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2036 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
2050 static int l2cap_sock_shutdown(struct socket *sock, int how)
2052 struct sock *sk = sock->sk;
2055 BT_DBG("sock %p, sk %p", sock, sk);
2061 if (!sk->sk_shutdown) {
2062 sk->sk_shutdown = SHUTDOWN_MASK;
2063 l2cap_sock_clear_timer(sk);
2064 __l2cap_sock_close(sk, 0);
2066 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
2067 err = bt_sock_wait_state(sk, BT_CLOSED,
2074 static int l2cap_sock_release(struct socket *sock)
2076 struct sock *sk = sock->sk;
2079 BT_DBG("sock %p, sk %p", sock, sk);
2084 err = l2cap_sock_shutdown(sock, 2);
2087 l2cap_sock_kill(sk);
2091 static void l2cap_chan_ready(struct sock *sk)
2093 struct sock *parent = bt_sk(sk)->parent;
2095 BT_DBG("sk %p, parent %p", sk, parent);
2097 l2cap_pi(sk)->conf_state = 0;
2098 l2cap_sock_clear_timer(sk);
2101 /* Outgoing channel.
2102 * Wake up socket sleeping on connect.
2104 sk->sk_state = BT_CONNECTED;
2105 sk->sk_state_change(sk);
2107 /* Incoming channel.
2108 * Wake up socket sleeping on accept.
2110 parent->sk_data_ready(parent, 0);
2114 /* Copy frame to all raw sockets on that connection */
2115 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2117 struct l2cap_chan_list *l = &conn->chan_list;
2118 struct sk_buff *nskb;
2121 BT_DBG("conn %p", conn);
2123 read_lock(&l->lock);
2124 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2125 if (sk->sk_type != SOCK_RAW)
2128 /* Don't send frame to the socket it came from */
2131 nskb = skb_clone(skb, GFP_ATOMIC);
2135 if (sock_queue_rcv_skb(sk, nskb))
2138 read_unlock(&l->lock);
2141 /* ---- L2CAP signalling commands ---- */
2142 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2143 u8 code, u8 ident, u16 dlen, void *data)
2145 struct sk_buff *skb, **frag;
2146 struct l2cap_cmd_hdr *cmd;
2147 struct l2cap_hdr *lh;
2150 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2151 conn, code, ident, dlen);
2153 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2154 count = min_t(unsigned int, conn->mtu, len);
2156 skb = bt_skb_alloc(count, GFP_ATOMIC);
2160 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2161 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2162 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2164 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2167 cmd->len = cpu_to_le16(dlen);
2170 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2171 memcpy(skb_put(skb, count), data, count);
2177 /* Continuation fragments (no L2CAP header) */
2178 frag = &skb_shinfo(skb)->frag_list;
2180 count = min_t(unsigned int, conn->mtu, len);
2182 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2186 memcpy(skb_put(*frag, count), data, count);
2191 frag = &(*frag)->next;
2201 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2203 struct l2cap_conf_opt *opt = *ptr;
2206 len = L2CAP_CONF_OPT_SIZE + opt->len;
2214 *val = *((u8 *) opt->val);
2218 *val = __le16_to_cpu(*((__le16 *) opt->val));
2222 *val = __le32_to_cpu(*((__le32 *) opt->val));
2226 *val = (unsigned long) opt->val;
2230 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2234 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2236 struct l2cap_conf_opt *opt = *ptr;
2238 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2245 *((u8 *) opt->val) = val;
2249 *((__le16 *) opt->val) = cpu_to_le16(val);
2253 *((__le32 *) opt->val) = cpu_to_le32(val);
2257 memcpy(opt->val, (void *) val, len);
2261 *ptr += L2CAP_CONF_OPT_SIZE + len;
2264 static void l2cap_ack_timeout(unsigned long arg)
2266 struct sock *sk = (void *) arg;
2269 l2cap_send_ack(l2cap_pi(sk));
2273 static inline void l2cap_ertm_init(struct sock *sk)
2275 l2cap_pi(sk)->expected_ack_seq = 0;
2276 l2cap_pi(sk)->unacked_frames = 0;
2277 l2cap_pi(sk)->buffer_seq = 0;
2278 l2cap_pi(sk)->num_acked = 0;
2279 l2cap_pi(sk)->frames_sent = 0;
2281 setup_timer(&l2cap_pi(sk)->retrans_timer,
2282 l2cap_retrans_timeout, (unsigned long) sk);
2283 setup_timer(&l2cap_pi(sk)->monitor_timer,
2284 l2cap_monitor_timeout, (unsigned long) sk);
2285 setup_timer(&l2cap_pi(sk)->ack_timer,
2286 l2cap_ack_timeout, (unsigned long) sk);
2288 __skb_queue_head_init(SREJ_QUEUE(sk));
2289 __skb_queue_head_init(BUSY_QUEUE(sk));
2291 INIT_WORK(&l2cap_pi(sk)->busy_work, l2cap_busy_work);
2294 static int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
2296 u32 local_feat_mask = l2cap_feat_mask;
2298 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
2301 case L2CAP_MODE_ERTM:
2302 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
2303 case L2CAP_MODE_STREAMING:
2304 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
2310 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2313 case L2CAP_MODE_STREAMING:
2314 case L2CAP_MODE_ERTM:
2315 if (l2cap_mode_supported(mode, remote_feat_mask))
2319 return L2CAP_MODE_BASIC;
2323 static int l2cap_build_conf_req(struct sock *sk, void *data)
2325 struct l2cap_pinfo *pi = l2cap_pi(sk);
2326 struct l2cap_conf_req *req = data;
2327 struct l2cap_conf_rfc rfc = { .mode = pi->mode };
2328 void *ptr = req->data;
2330 BT_DBG("sk %p", sk);
2332 if (pi->num_conf_req || pi->num_conf_rsp)
2336 case L2CAP_MODE_STREAMING:
2337 case L2CAP_MODE_ERTM:
2338 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2339 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2340 l2cap_send_disconn_req(pi->conn, sk);
2343 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2349 case L2CAP_MODE_BASIC:
2350 if (pi->imtu != L2CAP_DEFAULT_MTU)
2351 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2354 case L2CAP_MODE_ERTM:
2355 rfc.mode = L2CAP_MODE_ERTM;
2356 rfc.txwin_size = pi->tx_win;
2357 rfc.max_transmit = pi->max_tx;
2358 rfc.retrans_timeout = 0;
2359 rfc.monitor_timeout = 0;
2360 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2361 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2362 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2364 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2365 sizeof(rfc), (unsigned long) &rfc);
2367 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2370 if (pi->fcs == L2CAP_FCS_NONE ||
2371 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2372 pi->fcs = L2CAP_FCS_NONE;
2373 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2377 case L2CAP_MODE_STREAMING:
2378 rfc.mode = L2CAP_MODE_STREAMING;
2380 rfc.max_transmit = 0;
2381 rfc.retrans_timeout = 0;
2382 rfc.monitor_timeout = 0;
2383 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2384 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2385 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2387 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2388 sizeof(rfc), (unsigned long) &rfc);
2390 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2393 if (pi->fcs == L2CAP_FCS_NONE ||
2394 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2395 pi->fcs = L2CAP_FCS_NONE;
2396 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2401 /* FIXME: Need actual value of the flush timeout */
2402 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2403 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2405 req->dcid = cpu_to_le16(pi->dcid);
2406 req->flags = cpu_to_le16(0);
2411 static int l2cap_parse_conf_req(struct sock *sk, void *data)
2413 struct l2cap_pinfo *pi = l2cap_pi(sk);
2414 struct l2cap_conf_rsp *rsp = data;
2415 void *ptr = rsp->data;
2416 void *req = pi->conf_req;
2417 int len = pi->conf_len;
2418 int type, hint, olen;
2420 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2421 u16 mtu = L2CAP_DEFAULT_MTU;
2422 u16 result = L2CAP_CONF_SUCCESS;
2424 BT_DBG("sk %p", sk);
2426 while (len >= L2CAP_CONF_OPT_SIZE) {
2427 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2429 hint = type & L2CAP_CONF_HINT;
2430 type &= L2CAP_CONF_MASK;
2433 case L2CAP_CONF_MTU:
2437 case L2CAP_CONF_FLUSH_TO:
2441 case L2CAP_CONF_QOS:
2444 case L2CAP_CONF_RFC:
2445 if (olen == sizeof(rfc))
2446 memcpy(&rfc, (void *) val, olen);
2449 case L2CAP_CONF_FCS:
2450 if (val == L2CAP_FCS_NONE)
2451 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
2459 result = L2CAP_CONF_UNKNOWN;
2460 *((u8 *) ptr++) = type;
2465 if (pi->num_conf_rsp || pi->num_conf_req)
2469 case L2CAP_MODE_STREAMING:
2470 case L2CAP_MODE_ERTM:
2471 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2472 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2473 return -ECONNREFUSED;
2476 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2481 if (pi->mode != rfc.mode) {
2482 result = L2CAP_CONF_UNACCEPT;
2483 rfc.mode = pi->mode;
2485 if (pi->num_conf_rsp == 1)
2486 return -ECONNREFUSED;
2488 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2489 sizeof(rfc), (unsigned long) &rfc);
2493 if (result == L2CAP_CONF_SUCCESS) {
2494 /* Configure output options and let the other side know
2495 * which ones we don't like. */
2497 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2498 result = L2CAP_CONF_UNACCEPT;
2501 pi->conf_state |= L2CAP_CONF_MTU_DONE;
2503 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2506 case L2CAP_MODE_BASIC:
2507 pi->fcs = L2CAP_FCS_NONE;
2508 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2511 case L2CAP_MODE_ERTM:
2512 pi->remote_tx_win = rfc.txwin_size;
2513 pi->remote_max_tx = rfc.max_transmit;
2514 if (rfc.max_pdu_size > pi->conn->mtu - 10)
2515 rfc.max_pdu_size = le16_to_cpu(pi->conn->mtu - 10);
2517 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2519 rfc.retrans_timeout =
2520 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2521 rfc.monitor_timeout =
2522 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2524 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2526 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2527 sizeof(rfc), (unsigned long) &rfc);
2531 case L2CAP_MODE_STREAMING:
2532 if (rfc.max_pdu_size > pi->conn->mtu - 10)
2533 rfc.max_pdu_size = le16_to_cpu(pi->conn->mtu - 10);
2535 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2537 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2539 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2540 sizeof(rfc), (unsigned long) &rfc);
2545 result = L2CAP_CONF_UNACCEPT;
2547 memset(&rfc, 0, sizeof(rfc));
2548 rfc.mode = pi->mode;
2551 if (result == L2CAP_CONF_SUCCESS)
2552 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
2554 rsp->scid = cpu_to_le16(pi->dcid);
2555 rsp->result = cpu_to_le16(result);
2556 rsp->flags = cpu_to_le16(0x0000);
2561 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
2563 struct l2cap_pinfo *pi = l2cap_pi(sk);
2564 struct l2cap_conf_req *req = data;
2565 void *ptr = req->data;
2568 struct l2cap_conf_rfc rfc;
2570 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
2572 while (len >= L2CAP_CONF_OPT_SIZE) {
2573 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2576 case L2CAP_CONF_MTU:
2577 if (val < L2CAP_DEFAULT_MIN_MTU) {
2578 *result = L2CAP_CONF_UNACCEPT;
2579 pi->omtu = L2CAP_DEFAULT_MIN_MTU;
2582 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2585 case L2CAP_CONF_FLUSH_TO:
2587 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2591 case L2CAP_CONF_RFC:
2592 if (olen == sizeof(rfc))
2593 memcpy(&rfc, (void *)val, olen);
2595 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
2596 rfc.mode != pi->mode)
2597 return -ECONNREFUSED;
2599 pi->mode = rfc.mode;
2602 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2603 sizeof(rfc), (unsigned long) &rfc);
2608 if (*result == L2CAP_CONF_SUCCESS) {
2610 case L2CAP_MODE_ERTM:
2611 pi->remote_tx_win = rfc.txwin_size;
2612 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2613 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2614 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2616 case L2CAP_MODE_STREAMING:
2617 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2621 req->dcid = cpu_to_le16(pi->dcid);
2622 req->flags = cpu_to_le16(0x0000);
2627 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
2629 struct l2cap_conf_rsp *rsp = data;
2630 void *ptr = rsp->data;
2632 BT_DBG("sk %p", sk);
2634 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2635 rsp->result = cpu_to_le16(result);
2636 rsp->flags = cpu_to_le16(flags);
2641 static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
2643 struct l2cap_pinfo *pi = l2cap_pi(sk);
2646 struct l2cap_conf_rfc rfc;
2648 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
2650 if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
2653 while (len >= L2CAP_CONF_OPT_SIZE) {
2654 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2657 case L2CAP_CONF_RFC:
2658 if (olen == sizeof(rfc))
2659 memcpy(&rfc, (void *)val, olen);
2666 case L2CAP_MODE_ERTM:
2667 pi->remote_tx_win = rfc.txwin_size;
2668 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2669 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2670 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2672 case L2CAP_MODE_STREAMING:
2673 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2677 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2679 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2681 if (rej->reason != 0x0000)
2684 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2685 cmd->ident == conn->info_ident) {
2686 del_timer(&conn->info_timer);
2688 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2689 conn->info_ident = 0;
2691 l2cap_conn_start(conn);
2697 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2699 struct l2cap_chan_list *list = &conn->chan_list;
2700 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2701 struct l2cap_conn_rsp rsp;
2702 struct sock *sk, *parent;
2703 int result, status = L2CAP_CS_NO_INFO;
2705 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2706 __le16 psm = req->psm;
2708 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2710 /* Check if we have socket listening on psm */
2711 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2713 result = L2CAP_CR_BAD_PSM;
2717 /* Check if the ACL is secure enough (if not SDP) */
2718 if (psm != cpu_to_le16(0x0001) &&
2719 !hci_conn_check_link_mode(conn->hcon)) {
2720 conn->disc_reason = 0x05;
2721 result = L2CAP_CR_SEC_BLOCK;
2725 result = L2CAP_CR_NO_MEM;
2727 /* Check for backlog size */
2728 if (sk_acceptq_is_full(parent)) {
2729 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2733 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2737 write_lock_bh(&list->lock);
2739 /* Check if we already have channel with that dcid */
2740 if (__l2cap_get_chan_by_dcid(list, scid)) {
2741 write_unlock_bh(&list->lock);
2742 sock_set_flag(sk, SOCK_ZAPPED);
2743 l2cap_sock_kill(sk);
2747 hci_conn_hold(conn->hcon);
2749 l2cap_sock_init(sk, parent);
2750 bacpy(&bt_sk(sk)->src, conn->src);
2751 bacpy(&bt_sk(sk)->dst, conn->dst);
2752 l2cap_pi(sk)->psm = psm;
2753 l2cap_pi(sk)->dcid = scid;
2755 __l2cap_chan_add(conn, sk, parent);
2756 dcid = l2cap_pi(sk)->scid;
2758 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2760 l2cap_pi(sk)->ident = cmd->ident;
2762 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2763 if (l2cap_check_security(sk)) {
2764 if (bt_sk(sk)->defer_setup) {
2765 sk->sk_state = BT_CONNECT2;
2766 result = L2CAP_CR_PEND;
2767 status = L2CAP_CS_AUTHOR_PEND;
2768 parent->sk_data_ready(parent, 0);
2770 sk->sk_state = BT_CONFIG;
2771 result = L2CAP_CR_SUCCESS;
2772 status = L2CAP_CS_NO_INFO;
2775 sk->sk_state = BT_CONNECT2;
2776 result = L2CAP_CR_PEND;
2777 status = L2CAP_CS_AUTHEN_PEND;
2780 sk->sk_state = BT_CONNECT2;
2781 result = L2CAP_CR_PEND;
2782 status = L2CAP_CS_NO_INFO;
2785 write_unlock_bh(&list->lock);
2788 bh_unlock_sock(parent);
2791 rsp.scid = cpu_to_le16(scid);
2792 rsp.dcid = cpu_to_le16(dcid);
2793 rsp.result = cpu_to_le16(result);
2794 rsp.status = cpu_to_le16(status);
2795 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2797 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2798 struct l2cap_info_req info;
2799 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2801 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2802 conn->info_ident = l2cap_get_ident(conn);
2804 mod_timer(&conn->info_timer, jiffies +
2805 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2807 l2cap_send_cmd(conn, conn->info_ident,
2808 L2CAP_INFO_REQ, sizeof(info), &info);
2814 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2816 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2817 u16 scid, dcid, result, status;
2821 scid = __le16_to_cpu(rsp->scid);
2822 dcid = __le16_to_cpu(rsp->dcid);
2823 result = __le16_to_cpu(rsp->result);
2824 status = __le16_to_cpu(rsp->status);
2826 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2829 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2833 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
2839 case L2CAP_CR_SUCCESS:
2840 sk->sk_state = BT_CONFIG;
2841 l2cap_pi(sk)->ident = 0;
2842 l2cap_pi(sk)->dcid = dcid;
2843 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2845 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2847 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2848 l2cap_build_conf_req(sk, req), req);
2849 l2cap_pi(sk)->num_conf_req++;
2853 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
2857 l2cap_chan_del(sk, ECONNREFUSED);
2865 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2867 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2873 dcid = __le16_to_cpu(req->dcid);
2874 flags = __le16_to_cpu(req->flags);
2876 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2878 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2882 if (sk->sk_state == BT_DISCONN)
2885 /* Reject if config buffer is too small. */
2886 len = cmd_len - sizeof(*req);
2887 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
2888 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2889 l2cap_build_conf_rsp(sk, rsp,
2890 L2CAP_CONF_REJECT, flags), rsp);
2895 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
2896 l2cap_pi(sk)->conf_len += len;
2898 if (flags & 0x0001) {
2899 /* Incomplete config. Send empty response. */
2900 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2901 l2cap_build_conf_rsp(sk, rsp,
2902 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2906 /* Complete config. */
2907 len = l2cap_parse_conf_req(sk, rsp);
2909 l2cap_send_disconn_req(conn, sk);
2913 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2914 l2cap_pi(sk)->num_conf_rsp++;
2916 /* Reset config buffer. */
2917 l2cap_pi(sk)->conf_len = 0;
2919 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
2922 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
2923 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
2924 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
2925 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
2927 sk->sk_state = BT_CONNECTED;
2929 l2cap_pi(sk)->next_tx_seq = 0;
2930 l2cap_pi(sk)->expected_tx_seq = 0;
2931 __skb_queue_head_init(TX_QUEUE(sk));
2932 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2933 l2cap_ertm_init(sk);
2935 l2cap_chan_ready(sk);
2939 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
2941 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2942 l2cap_build_conf_req(sk, buf), buf);
2943 l2cap_pi(sk)->num_conf_req++;
2951 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2953 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2954 u16 scid, flags, result;
2956 int len = cmd->len - sizeof(*rsp);
2958 scid = __le16_to_cpu(rsp->scid);
2959 flags = __le16_to_cpu(rsp->flags);
2960 result = __le16_to_cpu(rsp->result);
2962 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2963 scid, flags, result);
2965 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2970 case L2CAP_CONF_SUCCESS:
2971 l2cap_conf_rfc_get(sk, rsp->data, len);
2974 case L2CAP_CONF_UNACCEPT:
2975 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2978 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2979 l2cap_send_disconn_req(conn, sk);
2983 /* throw out any old stored conf requests */
2984 result = L2CAP_CONF_SUCCESS;
2985 len = l2cap_parse_conf_rsp(sk, rsp->data,
2988 l2cap_send_disconn_req(conn, sk);
2992 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2993 L2CAP_CONF_REQ, len, req);
2994 l2cap_pi(sk)->num_conf_req++;
2995 if (result != L2CAP_CONF_SUCCESS)
3001 sk->sk_state = BT_DISCONN;
3002 sk->sk_err = ECONNRESET;
3003 l2cap_sock_set_timer(sk, HZ * 5);
3004 l2cap_send_disconn_req(conn, sk);
3011 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
3013 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
3014 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
3015 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
3016 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
3018 sk->sk_state = BT_CONNECTED;
3019 l2cap_pi(sk)->next_tx_seq = 0;
3020 l2cap_pi(sk)->expected_tx_seq = 0;
3021 __skb_queue_head_init(TX_QUEUE(sk));
3022 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3023 l2cap_ertm_init(sk);
3025 l2cap_chan_ready(sk);
3033 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3035 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3036 struct l2cap_disconn_rsp rsp;
3040 scid = __le16_to_cpu(req->scid);
3041 dcid = __le16_to_cpu(req->dcid);
3043 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3045 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
3049 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3050 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3051 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3053 sk->sk_shutdown = SHUTDOWN_MASK;
3055 skb_queue_purge(TX_QUEUE(sk));
3057 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
3058 skb_queue_purge(SREJ_QUEUE(sk));
3059 skb_queue_purge(BUSY_QUEUE(sk));
3060 del_timer(&l2cap_pi(sk)->retrans_timer);
3061 del_timer(&l2cap_pi(sk)->monitor_timer);
3062 del_timer(&l2cap_pi(sk)->ack_timer);
3065 l2cap_chan_del(sk, ECONNRESET);
3068 l2cap_sock_kill(sk);
3072 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3074 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3078 scid = __le16_to_cpu(rsp->scid);
3079 dcid = __le16_to_cpu(rsp->dcid);
3081 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3083 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3087 skb_queue_purge(TX_QUEUE(sk));
3089 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
3090 skb_queue_purge(SREJ_QUEUE(sk));
3091 skb_queue_purge(BUSY_QUEUE(sk));
3092 del_timer(&l2cap_pi(sk)->retrans_timer);
3093 del_timer(&l2cap_pi(sk)->monitor_timer);
3094 del_timer(&l2cap_pi(sk)->ack_timer);
3097 l2cap_chan_del(sk, 0);
3100 l2cap_sock_kill(sk);
3104 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3106 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3109 type = __le16_to_cpu(req->type);
3111 BT_DBG("type 0x%4.4x", type);
3113 if (type == L2CAP_IT_FEAT_MASK) {
3115 u32 feat_mask = l2cap_feat_mask;
3116 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3117 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3118 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3120 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3122 put_unaligned_le32(feat_mask, rsp->data);
3123 l2cap_send_cmd(conn, cmd->ident,
3124 L2CAP_INFO_RSP, sizeof(buf), buf);
3125 } else if (type == L2CAP_IT_FIXED_CHAN) {
3127 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3128 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3129 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3130 memcpy(buf + 4, l2cap_fixed_chan, 8);
3131 l2cap_send_cmd(conn, cmd->ident,
3132 L2CAP_INFO_RSP, sizeof(buf), buf);
3134 struct l2cap_info_rsp rsp;
3135 rsp.type = cpu_to_le16(type);
3136 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3137 l2cap_send_cmd(conn, cmd->ident,
3138 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3144 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3146 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3149 type = __le16_to_cpu(rsp->type);
3150 result = __le16_to_cpu(rsp->result);
3152 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3154 del_timer(&conn->info_timer);
3156 if (type == L2CAP_IT_FEAT_MASK) {
3157 conn->feat_mask = get_unaligned_le32(rsp->data);
3159 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3160 struct l2cap_info_req req;
3161 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3163 conn->info_ident = l2cap_get_ident(conn);
3165 l2cap_send_cmd(conn, conn->info_ident,
3166 L2CAP_INFO_REQ, sizeof(req), &req);
3168 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3169 conn->info_ident = 0;
3171 l2cap_conn_start(conn);
3173 } else if (type == L2CAP_IT_FIXED_CHAN) {
3174 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3175 conn->info_ident = 0;
3177 l2cap_conn_start(conn);
3183 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
3185 u8 *data = skb->data;
3187 struct l2cap_cmd_hdr cmd;
3190 l2cap_raw_recv(conn, skb);
3192 while (len >= L2CAP_CMD_HDR_SIZE) {
3194 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3195 data += L2CAP_CMD_HDR_SIZE;
3196 len -= L2CAP_CMD_HDR_SIZE;
3198 cmd_len = le16_to_cpu(cmd.len);
3200 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3202 if (cmd_len > len || !cmd.ident) {
3203 BT_DBG("corrupted command");
3208 case L2CAP_COMMAND_REJ:
3209 l2cap_command_rej(conn, &cmd, data);
3212 case L2CAP_CONN_REQ:
3213 err = l2cap_connect_req(conn, &cmd, data);
3216 case L2CAP_CONN_RSP:
3217 err = l2cap_connect_rsp(conn, &cmd, data);
3220 case L2CAP_CONF_REQ:
3221 err = l2cap_config_req(conn, &cmd, cmd_len, data);
3224 case L2CAP_CONF_RSP:
3225 err = l2cap_config_rsp(conn, &cmd, data);
3228 case L2CAP_DISCONN_REQ:
3229 err = l2cap_disconnect_req(conn, &cmd, data);
3232 case L2CAP_DISCONN_RSP:
3233 err = l2cap_disconnect_rsp(conn, &cmd, data);
3236 case L2CAP_ECHO_REQ:
3237 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
3240 case L2CAP_ECHO_RSP:
3243 case L2CAP_INFO_REQ:
3244 err = l2cap_information_req(conn, &cmd, data);
3247 case L2CAP_INFO_RSP:
3248 err = l2cap_information_rsp(conn, &cmd, data);
3252 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
3258 struct l2cap_cmd_rej rej;
3259 BT_DBG("error %d", err);
3261 /* FIXME: Map err to a valid reason */
3262 rej.reason = cpu_to_le16(0);
3263 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3273 static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
3275 u16 our_fcs, rcv_fcs;
3276 int hdr_size = L2CAP_HDR_SIZE + 2;
3278 if (pi->fcs == L2CAP_FCS_CRC16) {
3279 skb_trim(skb, skb->len - 2);
3280 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3281 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3283 if (our_fcs != rcv_fcs)
3289 static inline void l2cap_send_i_or_rr_or_rnr(struct sock *sk)
3291 struct l2cap_pinfo *pi = l2cap_pi(sk);
3294 pi->frames_sent = 0;
3295 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
3297 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3299 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3300 control |= L2CAP_SUPER_RCV_NOT_READY | L2CAP_CTRL_FINAL;
3301 l2cap_send_sframe(pi, control);
3302 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3303 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
3306 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY && pi->unacked_frames > 0)
3307 __mod_retrans_timer();
3309 l2cap_ertm_send(sk);
3311 if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
3312 pi->frames_sent == 0) {
3313 control |= L2CAP_SUPER_RCV_READY;
3314 l2cap_send_sframe(pi, control);
3318 static int l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
3320 struct sk_buff *next_skb;
3322 bt_cb(skb)->tx_seq = tx_seq;
3323 bt_cb(skb)->sar = sar;
3325 next_skb = skb_peek(SREJ_QUEUE(sk));
3327 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3332 if (bt_cb(next_skb)->tx_seq == tx_seq)
3335 if (bt_cb(next_skb)->tx_seq > tx_seq) {
3336 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
3340 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
3343 } while ((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
3345 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3350 static int l2cap_ertm_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3352 struct l2cap_pinfo *pi = l2cap_pi(sk);
3353 struct sk_buff *_skb;
3356 switch (control & L2CAP_CTRL_SAR) {
3357 case L2CAP_SDU_UNSEGMENTED:
3358 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3361 err = sock_queue_rcv_skb(sk, skb);
3367 case L2CAP_SDU_START:
3368 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3371 pi->sdu_len = get_unaligned_le16(skb->data);
3373 if (pi->sdu_len > pi->imtu)
3376 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3380 /* pull sdu_len bytes only after alloc, because of Local Busy
3381 * condition we have to be sure that this will be executed
3382 * only once, i.e., when alloc does not fail */
3385 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3387 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3388 pi->partial_sdu_len = skb->len;
3391 case L2CAP_SDU_CONTINUE:
3392 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3398 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3400 pi->partial_sdu_len += skb->len;
3401 if (pi->partial_sdu_len > pi->sdu_len)
3407 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3413 if (!(pi->conn_state & L2CAP_CONN_SAR_RETRY)) {
3414 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3416 pi->partial_sdu_len += skb->len;
3418 if (pi->partial_sdu_len > pi->imtu)
3421 if (pi->partial_sdu_len != pi->sdu_len)
3425 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3427 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
3431 err = sock_queue_rcv_skb(sk, _skb);
3434 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
3438 pi->conn_state &= ~L2CAP_CONN_SAR_RETRY;
3439 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3453 l2cap_send_disconn_req(pi->conn, sk);
3458 static void l2cap_busy_work(struct work_struct *work)
3460 DECLARE_WAITQUEUE(wait, current);
3461 struct l2cap_pinfo *pi =
3462 container_of(work, struct l2cap_pinfo, busy_work);
3463 struct sock *sk = (struct sock *)pi;
3464 int n_tries = 0, timeo = HZ/5, err;
3465 struct sk_buff *skb;
3470 add_wait_queue(sk->sk_sleep, &wait);
3471 while ((skb = skb_peek(BUSY_QUEUE(sk)))) {
3472 set_current_state(TASK_INTERRUPTIBLE);
3474 if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
3476 l2cap_send_disconn_req(pi->conn, sk);
3483 if (signal_pending(current)) {
3484 err = sock_intr_errno(timeo);
3489 timeo = schedule_timeout(timeo);
3492 err = sock_error(sk);
3496 while ((skb = skb_dequeue(BUSY_QUEUE(sk)))) {
3497 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3498 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3500 skb_queue_head(BUSY_QUEUE(sk), skb);
3504 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3511 if (!(pi->conn_state & L2CAP_CONN_RNR_SENT))
3514 control = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3515 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
3516 l2cap_send_sframe(pi, control);
3517 l2cap_pi(sk)->retry_count = 1;
3519 del_timer(&pi->retrans_timer);
3520 __mod_monitor_timer();
3522 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
3525 pi->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
3526 pi->conn_state &= ~L2CAP_CONN_RNR_SENT;
3528 set_current_state(TASK_RUNNING);
3529 remove_wait_queue(sk->sk_sleep, &wait);
3534 static int l2cap_push_rx_skb(struct sock *sk, struct sk_buff *skb, u16 control)
3536 struct l2cap_pinfo *pi = l2cap_pi(sk);
3539 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3540 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3541 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3545 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3547 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3551 /* Busy Condition */
3552 pi->conn_state |= L2CAP_CONN_LOCAL_BUSY;
3553 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3554 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3556 sctrl = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3557 sctrl |= L2CAP_SUPER_RCV_NOT_READY;
3558 l2cap_send_sframe(pi, sctrl);
3560 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3562 queue_work(_busy_wq, &pi->busy_work);
3567 static int l2cap_streaming_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3569 struct l2cap_pinfo *pi = l2cap_pi(sk);
3570 struct sk_buff *_skb;
3574 * TODO: We have to notify the userland if some data is lost with the
3578 switch (control & L2CAP_CTRL_SAR) {
3579 case L2CAP_SDU_UNSEGMENTED:
3580 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3585 err = sock_queue_rcv_skb(sk, skb);
3591 case L2CAP_SDU_START:
3592 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3597 pi->sdu_len = get_unaligned_le16(skb->data);
3600 if (pi->sdu_len > pi->imtu) {
3605 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3611 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3613 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3614 pi->partial_sdu_len = skb->len;
3618 case L2CAP_SDU_CONTINUE:
3619 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3622 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3624 pi->partial_sdu_len += skb->len;
3625 if (pi->partial_sdu_len > pi->sdu_len)
3633 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3636 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3638 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3639 pi->partial_sdu_len += skb->len;
3641 if (pi->partial_sdu_len > pi->imtu)
3644 if (pi->partial_sdu_len == pi->sdu_len) {
3645 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3646 err = sock_queue_rcv_skb(sk, _skb);
3661 static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
3663 struct sk_buff *skb;
3666 while ((skb = skb_peek(SREJ_QUEUE(sk)))) {
3667 if (bt_cb(skb)->tx_seq != tx_seq)
3670 skb = skb_dequeue(SREJ_QUEUE(sk));
3671 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3672 l2cap_ertm_reassembly_sdu(sk, skb, control);
3673 l2cap_pi(sk)->buffer_seq_srej =
3674 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
3679 static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
3681 struct l2cap_pinfo *pi = l2cap_pi(sk);
3682 struct srej_list *l, *tmp;
3685 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
3686 if (l->tx_seq == tx_seq) {
3691 control = L2CAP_SUPER_SELECT_REJECT;
3692 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3693 l2cap_send_sframe(pi, control);
3695 list_add_tail(&l->list, SREJ_LIST(sk));
3699 static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
3701 struct l2cap_pinfo *pi = l2cap_pi(sk);
3702 struct srej_list *new;
3705 while (tx_seq != pi->expected_tx_seq) {
3706 control = L2CAP_SUPER_SELECT_REJECT;
3707 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3708 l2cap_send_sframe(pi, control);
3710 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3711 new->tx_seq = pi->expected_tx_seq++;
3712 list_add_tail(&new->list, SREJ_LIST(sk));
3714 pi->expected_tx_seq++;
3717 static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3719 struct l2cap_pinfo *pi = l2cap_pi(sk);
3720 u8 tx_seq = __get_txseq(rx_control);
3721 u8 req_seq = __get_reqseq(rx_control);
3722 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3723 u8 tx_seq_offset, expected_tx_seq_offset;
3724 int num_to_ack = (pi->tx_win/6) + 1;
3727 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3729 if (L2CAP_CTRL_FINAL & rx_control &&
3730 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
3731 del_timer(&pi->monitor_timer);
3732 if (pi->unacked_frames > 0)
3733 __mod_retrans_timer();
3734 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
3737 pi->expected_ack_seq = req_seq;
3738 l2cap_drop_acked_frames(sk);
3740 if (tx_seq == pi->expected_tx_seq)
3743 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
3744 if (tx_seq_offset < 0)
3745 tx_seq_offset += 64;
3747 /* invalid tx_seq */
3748 if (tx_seq_offset >= pi->tx_win) {
3749 l2cap_send_disconn_req(pi->conn, sk);
3753 if (pi->conn_state == L2CAP_CONN_LOCAL_BUSY)
3756 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3757 struct srej_list *first;
3759 first = list_first_entry(SREJ_LIST(sk),
3760 struct srej_list, list);
3761 if (tx_seq == first->tx_seq) {
3762 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3763 l2cap_check_srej_gap(sk, tx_seq);
3765 list_del(&first->list);
3768 if (list_empty(SREJ_LIST(sk))) {
3769 pi->buffer_seq = pi->buffer_seq_srej;
3770 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3774 struct srej_list *l;
3776 /* duplicated tx_seq */
3777 if (l2cap_add_to_srej_queue(sk, skb, tx_seq, sar) < 0)
3780 list_for_each_entry(l, SREJ_LIST(sk), list) {
3781 if (l->tx_seq == tx_seq) {
3782 l2cap_resend_srejframe(sk, tx_seq);
3786 l2cap_send_srejframe(sk, tx_seq);
3789 expected_tx_seq_offset =
3790 (pi->expected_tx_seq - pi->buffer_seq) % 64;
3791 if (expected_tx_seq_offset < 0)
3792 expected_tx_seq_offset += 64;
3794 /* duplicated tx_seq */
3795 if (tx_seq_offset < expected_tx_seq_offset)
3798 pi->conn_state |= L2CAP_CONN_SREJ_SENT;
3800 INIT_LIST_HEAD(SREJ_LIST(sk));
3801 pi->buffer_seq_srej = pi->buffer_seq;
3803 __skb_queue_head_init(SREJ_QUEUE(sk));
3804 __skb_queue_head_init(BUSY_QUEUE(sk));
3805 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3807 pi->conn_state |= L2CAP_CONN_SEND_PBIT;
3809 l2cap_send_srejframe(sk, tx_seq);
3814 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3816 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3817 bt_cb(skb)->tx_seq = tx_seq;
3818 bt_cb(skb)->sar = sar;
3819 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3823 if (rx_control & L2CAP_CTRL_FINAL) {
3824 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3825 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3827 if (!skb_queue_empty(TX_QUEUE(sk)))
3828 sk->sk_send_head = TX_QUEUE(sk)->next;
3829 pi->next_tx_seq = pi->expected_ack_seq;
3830 l2cap_ertm_send(sk);
3834 err = l2cap_push_rx_skb(sk, skb, rx_control);
3840 pi->num_acked = (pi->num_acked + 1) % num_to_ack;
3841 if (pi->num_acked == num_to_ack - 1)
3851 static inline void l2cap_data_channel_rrframe(struct sock *sk, u16 rx_control)
3853 struct l2cap_pinfo *pi = l2cap_pi(sk);
3855 pi->expected_ack_seq = __get_reqseq(rx_control);
3856 l2cap_drop_acked_frames(sk);
3858 if (rx_control & L2CAP_CTRL_POLL) {
3859 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3860 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3861 (pi->unacked_frames > 0))
3862 __mod_retrans_timer();
3864 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3865 l2cap_send_srejtail(sk);
3867 l2cap_send_i_or_rr_or_rnr(sk);
3868 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3871 } else if (rx_control & L2CAP_CTRL_FINAL) {
3872 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3874 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3875 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3877 if (!skb_queue_empty(TX_QUEUE(sk)))
3878 sk->sk_send_head = TX_QUEUE(sk)->next;
3879 pi->next_tx_seq = pi->expected_ack_seq;
3880 l2cap_ertm_send(sk);
3884 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3885 (pi->unacked_frames > 0))
3886 __mod_retrans_timer();
3888 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3889 if (pi->conn_state & L2CAP_CONN_SREJ_SENT)
3892 l2cap_ertm_send(sk);
3896 static inline void l2cap_data_channel_rejframe(struct sock *sk, u16 rx_control)
3898 struct l2cap_pinfo *pi = l2cap_pi(sk);
3899 u8 tx_seq = __get_reqseq(rx_control);
3901 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3903 pi->expected_ack_seq = tx_seq;
3904 l2cap_drop_acked_frames(sk);
3906 if (rx_control & L2CAP_CTRL_FINAL) {
3907 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3908 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3910 if (!skb_queue_empty(TX_QUEUE(sk)))
3911 sk->sk_send_head = TX_QUEUE(sk)->next;
3912 pi->next_tx_seq = pi->expected_ack_seq;
3913 l2cap_ertm_send(sk);
3916 if (!skb_queue_empty(TX_QUEUE(sk)))
3917 sk->sk_send_head = TX_QUEUE(sk)->next;
3918 pi->next_tx_seq = pi->expected_ack_seq;
3919 l2cap_ertm_send(sk);
3921 if (pi->conn_state & L2CAP_CONN_WAIT_F)
3922 pi->conn_state |= L2CAP_CONN_REJ_ACT;
3925 static inline void l2cap_data_channel_srejframe(struct sock *sk, u16 rx_control)
3927 struct l2cap_pinfo *pi = l2cap_pi(sk);
3928 u8 tx_seq = __get_reqseq(rx_control);
3930 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3932 if (rx_control & L2CAP_CTRL_POLL) {
3933 pi->expected_ack_seq = tx_seq;
3934 l2cap_drop_acked_frames(sk);
3935 l2cap_retransmit_frame(sk, tx_seq);
3936 l2cap_ertm_send(sk);
3937 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3938 pi->srej_save_reqseq = tx_seq;
3939 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3941 } else if (rx_control & L2CAP_CTRL_FINAL) {
3942 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
3943 pi->srej_save_reqseq == tx_seq)
3944 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
3946 l2cap_retransmit_frame(sk, tx_seq);
3948 l2cap_retransmit_frame(sk, tx_seq);
3949 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3950 pi->srej_save_reqseq = tx_seq;
3951 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3956 static inline void l2cap_data_channel_rnrframe(struct sock *sk, u16 rx_control)
3958 struct l2cap_pinfo *pi = l2cap_pi(sk);
3959 u8 tx_seq = __get_reqseq(rx_control);
3961 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
3962 pi->expected_ack_seq = tx_seq;
3963 l2cap_drop_acked_frames(sk);
3965 if (!(pi->conn_state & L2CAP_CONN_SREJ_SENT)) {
3966 del_timer(&pi->retrans_timer);
3967 if (rx_control & L2CAP_CTRL_POLL)
3968 l2cap_send_rr_or_rnr(pi, L2CAP_CTRL_FINAL);
3972 if (rx_control & L2CAP_CTRL_POLL)
3973 l2cap_send_srejtail(sk);
3975 l2cap_send_sframe(pi, L2CAP_SUPER_RCV_READY);
3978 static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3980 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3982 if (L2CAP_CTRL_FINAL & rx_control &&
3983 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
3984 del_timer(&l2cap_pi(sk)->monitor_timer);
3985 if (l2cap_pi(sk)->unacked_frames > 0)
3986 __mod_retrans_timer();
3987 l2cap_pi(sk)->conn_state &= ~L2CAP_CONN_WAIT_F;
3990 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
3991 case L2CAP_SUPER_RCV_READY:
3992 l2cap_data_channel_rrframe(sk, rx_control);
3995 case L2CAP_SUPER_REJECT:
3996 l2cap_data_channel_rejframe(sk, rx_control);
3999 case L2CAP_SUPER_SELECT_REJECT:
4000 l2cap_data_channel_srejframe(sk, rx_control);
4003 case L2CAP_SUPER_RCV_NOT_READY:
4004 l2cap_data_channel_rnrframe(sk, rx_control);
4012 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4015 struct l2cap_pinfo *pi;
4017 u8 tx_seq, req_seq, next_tx_seq_offset, req_seq_offset;
4019 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
4021 BT_DBG("unknown cid 0x%4.4x", cid);
4027 BT_DBG("sk %p, len %d", sk, skb->len);
4029 if (sk->sk_state != BT_CONNECTED)
4033 case L2CAP_MODE_BASIC:
4034 /* If socket recv buffers overflows we drop data here
4035 * which is *bad* because L2CAP has to be reliable.
4036 * But we don't have any other choice. L2CAP doesn't
4037 * provide flow control mechanism. */
4039 if (pi->imtu < skb->len)
4042 if (!sock_queue_rcv_skb(sk, skb))
4046 case L2CAP_MODE_ERTM:
4047 control = get_unaligned_le16(skb->data);
4051 if (__is_sar_start(control))
4054 if (pi->fcs == L2CAP_FCS_CRC16)
4058 * We can just drop the corrupted I-frame here.
4059 * Receiver will miss it and start proper recovery
4060 * procedures and ask retransmission.
4062 if (len > pi->mps) {
4063 l2cap_send_disconn_req(pi->conn, sk);
4067 if (l2cap_check_fcs(pi, skb))
4070 req_seq = __get_reqseq(control);
4071 req_seq_offset = (req_seq - pi->expected_ack_seq) % 64;
4072 if (req_seq_offset < 0)
4073 req_seq_offset += 64;
4075 next_tx_seq_offset =
4076 (pi->next_tx_seq - pi->expected_ack_seq) % 64;
4077 if (next_tx_seq_offset < 0)
4078 next_tx_seq_offset += 64;
4080 /* check for invalid req-seq */
4081 if (req_seq_offset > next_tx_seq_offset) {
4082 l2cap_send_disconn_req(pi->conn, sk);
4086 if (__is_iframe(control)) {
4088 l2cap_send_disconn_req(pi->conn, sk);
4092 l2cap_data_channel_iframe(sk, control, skb);
4095 l2cap_send_disconn_req(pi->conn, sk);
4099 l2cap_data_channel_sframe(sk, control, skb);
4104 case L2CAP_MODE_STREAMING:
4105 control = get_unaligned_le16(skb->data);
4109 if (__is_sar_start(control))
4112 if (pi->fcs == L2CAP_FCS_CRC16)
4115 if (len > pi->mps || len < 4 || __is_sframe(control))
4118 if (l2cap_check_fcs(pi, skb))
4121 tx_seq = __get_txseq(control);
4123 if (pi->expected_tx_seq == tx_seq)
4124 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
4126 pi->expected_tx_seq = (tx_seq + 1) % 64;
4128 l2cap_streaming_reassembly_sdu(sk, skb, control);
4133 BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode);
4147 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4151 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
4155 BT_DBG("sk %p, len %d", sk, skb->len);
4157 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
4160 if (l2cap_pi(sk)->imtu < skb->len)
4163 if (!sock_queue_rcv_skb(sk, skb))
4175 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4177 struct l2cap_hdr *lh = (void *) skb->data;
4181 skb_pull(skb, L2CAP_HDR_SIZE);
4182 cid = __le16_to_cpu(lh->cid);
4183 len = __le16_to_cpu(lh->len);
4185 if (len != skb->len) {
4190 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4193 case L2CAP_CID_SIGNALING:
4194 l2cap_sig_channel(conn, skb);
4197 case L2CAP_CID_CONN_LESS:
4198 psm = get_unaligned_le16(skb->data);
4200 l2cap_conless_channel(conn, psm, skb);
4204 l2cap_data_channel(conn, cid, skb);
4209 /* ---- L2CAP interface with lower layer (HCI) ---- */
4211 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4213 int exact = 0, lm1 = 0, lm2 = 0;
4214 register struct sock *sk;
4215 struct hlist_node *node;
4217 if (type != ACL_LINK)
4220 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4222 /* Find listening sockets and check their link_mode */
4223 read_lock(&l2cap_sk_list.lock);
4224 sk_for_each(sk, node, &l2cap_sk_list.head) {
4225 if (sk->sk_state != BT_LISTEN)
4228 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4229 lm1 |= HCI_LM_ACCEPT;
4230 if (l2cap_pi(sk)->role_switch)
4231 lm1 |= HCI_LM_MASTER;
4233 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4234 lm2 |= HCI_LM_ACCEPT;
4235 if (l2cap_pi(sk)->role_switch)
4236 lm2 |= HCI_LM_MASTER;
4239 read_unlock(&l2cap_sk_list.lock);
4241 return exact ? lm1 : lm2;
4244 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4246 struct l2cap_conn *conn;
4248 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4250 if (hcon->type != ACL_LINK)
4254 conn = l2cap_conn_add(hcon, status);
4256 l2cap_conn_ready(conn);
4258 l2cap_conn_del(hcon, bt_err(status));
4263 static int l2cap_disconn_ind(struct hci_conn *hcon)
4265 struct l2cap_conn *conn = hcon->l2cap_data;
4267 BT_DBG("hcon %p", hcon);
4269 if (hcon->type != ACL_LINK || !conn)
4272 return conn->disc_reason;
4275 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4277 BT_DBG("hcon %p reason %d", hcon, reason);
4279 if (hcon->type != ACL_LINK)
4282 l2cap_conn_del(hcon, bt_err(reason));
4287 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
4289 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
4292 if (encrypt == 0x00) {
4293 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
4294 l2cap_sock_clear_timer(sk);
4295 l2cap_sock_set_timer(sk, HZ * 5);
4296 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
4297 __l2cap_sock_close(sk, ECONNREFUSED);
4299 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
4300 l2cap_sock_clear_timer(sk);
4304 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4306 struct l2cap_chan_list *l;
4307 struct l2cap_conn *conn = hcon->l2cap_data;
4313 l = &conn->chan_list;
4315 BT_DBG("conn %p", conn);
4317 read_lock(&l->lock);
4319 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
4322 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
4327 if (!status && (sk->sk_state == BT_CONNECTED ||
4328 sk->sk_state == BT_CONFIG)) {
4329 l2cap_check_encryption(sk, encrypt);
4334 if (sk->sk_state == BT_CONNECT) {
4336 struct l2cap_conn_req req;
4337 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
4338 req.psm = l2cap_pi(sk)->psm;
4340 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
4342 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4343 L2CAP_CONN_REQ, sizeof(req), &req);
4345 l2cap_sock_clear_timer(sk);
4346 l2cap_sock_set_timer(sk, HZ / 10);
4348 } else if (sk->sk_state == BT_CONNECT2) {
4349 struct l2cap_conn_rsp rsp;
4353 sk->sk_state = BT_CONFIG;
4354 result = L2CAP_CR_SUCCESS;
4356 sk->sk_state = BT_DISCONN;
4357 l2cap_sock_set_timer(sk, HZ / 10);
4358 result = L2CAP_CR_SEC_BLOCK;
4361 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
4362 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
4363 rsp.result = cpu_to_le16(result);
4364 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4365 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4366 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
4372 read_unlock(&l->lock);
4377 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4379 struct l2cap_conn *conn = hcon->l2cap_data;
4381 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
4384 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4386 if (flags & ACL_START) {
4387 struct l2cap_hdr *hdr;
4391 BT_ERR("Unexpected start frame (len %d)", skb->len);
4392 kfree_skb(conn->rx_skb);
4393 conn->rx_skb = NULL;
4395 l2cap_conn_unreliable(conn, ECOMM);
4399 BT_ERR("Frame is too short (len %d)", skb->len);
4400 l2cap_conn_unreliable(conn, ECOMM);
4404 hdr = (struct l2cap_hdr *) skb->data;
4405 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4407 if (len == skb->len) {
4408 /* Complete frame received */
4409 l2cap_recv_frame(conn, skb);
4413 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4415 if (skb->len > len) {
4416 BT_ERR("Frame is too long (len %d, expected len %d)",
4418 l2cap_conn_unreliable(conn, ECOMM);
4422 /* Allocate skb for the complete frame (with header) */
4423 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4427 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4429 conn->rx_len = len - skb->len;
4431 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4433 if (!conn->rx_len) {
4434 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4435 l2cap_conn_unreliable(conn, ECOMM);
4439 if (skb->len > conn->rx_len) {
4440 BT_ERR("Fragment is too long (len %d, expected %d)",
4441 skb->len, conn->rx_len);
4442 kfree_skb(conn->rx_skb);
4443 conn->rx_skb = NULL;
4445 l2cap_conn_unreliable(conn, ECOMM);
4449 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4451 conn->rx_len -= skb->len;
4453 if (!conn->rx_len) {
4454 /* Complete frame received */
4455 l2cap_recv_frame(conn, conn->rx_skb);
4456 conn->rx_skb = NULL;
4465 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4468 struct hlist_node *node;
4470 read_lock_bh(&l2cap_sk_list.lock);
4472 sk_for_each(sk, node, &l2cap_sk_list.head) {
4473 struct l2cap_pinfo *pi = l2cap_pi(sk);
4475 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
4476 batostr(&bt_sk(sk)->src),
4477 batostr(&bt_sk(sk)->dst),
4478 sk->sk_state, __le16_to_cpu(pi->psm),
4480 pi->imtu, pi->omtu, pi->sec_level);
4483 read_unlock_bh(&l2cap_sk_list.lock);
4488 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4490 return single_open(file, l2cap_debugfs_show, inode->i_private);
4493 static const struct file_operations l2cap_debugfs_fops = {
4494 .open = l2cap_debugfs_open,
4496 .llseek = seq_lseek,
4497 .release = single_release,
4500 static struct dentry *l2cap_debugfs;
4502 static const struct proto_ops l2cap_sock_ops = {
4503 .family = PF_BLUETOOTH,
4504 .owner = THIS_MODULE,
4505 .release = l2cap_sock_release,
4506 .bind = l2cap_sock_bind,
4507 .connect = l2cap_sock_connect,
4508 .listen = l2cap_sock_listen,
4509 .accept = l2cap_sock_accept,
4510 .getname = l2cap_sock_getname,
4511 .sendmsg = l2cap_sock_sendmsg,
4512 .recvmsg = l2cap_sock_recvmsg,
4513 .poll = bt_sock_poll,
4514 .ioctl = bt_sock_ioctl,
4515 .mmap = sock_no_mmap,
4516 .socketpair = sock_no_socketpair,
4517 .shutdown = l2cap_sock_shutdown,
4518 .setsockopt = l2cap_sock_setsockopt,
4519 .getsockopt = l2cap_sock_getsockopt
4522 static const struct net_proto_family l2cap_sock_family_ops = {
4523 .family = PF_BLUETOOTH,
4524 .owner = THIS_MODULE,
4525 .create = l2cap_sock_create,
4528 static struct hci_proto l2cap_hci_proto = {
4530 .id = HCI_PROTO_L2CAP,
4531 .connect_ind = l2cap_connect_ind,
4532 .connect_cfm = l2cap_connect_cfm,
4533 .disconn_ind = l2cap_disconn_ind,
4534 .disconn_cfm = l2cap_disconn_cfm,
4535 .security_cfm = l2cap_security_cfm,
4536 .recv_acldata = l2cap_recv_acldata
4539 static int __init l2cap_init(void)
4543 err = proto_register(&l2cap_proto, 0);
4547 _busy_wq = create_singlethread_workqueue("l2cap");
4551 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
4553 BT_ERR("L2CAP socket registration failed");
4557 err = hci_register_proto(&l2cap_hci_proto);
4559 BT_ERR("L2CAP protocol registration failed");
4560 bt_sock_unregister(BTPROTO_L2CAP);
4565 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4566 bt_debugfs, NULL, &l2cap_debugfs_fops);
4568 BT_ERR("Failed to create L2CAP debug file");
4571 BT_INFO("L2CAP ver %s", VERSION);
4572 BT_INFO("L2CAP socket layer initialized");
4577 proto_unregister(&l2cap_proto);
4581 static void __exit l2cap_exit(void)
4583 debugfs_remove(l2cap_debugfs);
4585 flush_workqueue(_busy_wq);
4586 destroy_workqueue(_busy_wq);
4588 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
4589 BT_ERR("L2CAP socket unregistration failed");
4591 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4592 BT_ERR("L2CAP protocol unregistration failed");
4594 proto_unregister(&l2cap_proto);
4597 void l2cap_load(void)
4599 /* Dummy function to trigger automatic L2CAP module loading by
4600 * other modules that use L2CAP sockets but don't use any other
4601 * symbols from it. */
4604 EXPORT_SYMBOL(l2cap_load);
4606 module_init(l2cap_init);
4607 module_exit(l2cap_exit);
4609 module_param(enable_ertm, bool, 0644);
4610 MODULE_PARM_DESC(enable_ertm, "Enable enhanced retransmission mode");
4612 module_param(max_transmit, uint, 0644);
4613 MODULE_PARM_DESC(max_transmit, "Max transmit value (default = 3)");
4615 module_param(tx_window, uint, 0644);
4616 MODULE_PARM_DESC(tx_window, "Transmission window size value (default = 63)");
4618 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4619 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
4620 MODULE_VERSION(VERSION);
4621 MODULE_LICENSE("GPL");
4622 MODULE_ALIAS("bt-proto-0");