2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <linux/debugfs.h>
44 #include <linux/seq_file.h>
45 #include <linux/uaccess.h>
46 #include <linux/crc16.h>
49 #include <asm/system.h>
50 #include <asm/unaligned.h>
52 #include <net/bluetooth/bluetooth.h>
53 #include <net/bluetooth/hci_core.h>
54 #include <net/bluetooth/l2cap.h>
56 #define VERSION "2.14"
58 static int enable_ertm = 0;
59 static int max_transmit = L2CAP_DEFAULT_MAX_TX;
60 static int tx_window = L2CAP_DEFAULT_TX_WINDOW;
62 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
63 static u8 l2cap_fixed_chan[8] = { 0x02, };
65 static const struct proto_ops l2cap_sock_ops;
67 static struct bt_sock_list l2cap_sk_list = {
68 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
71 static void __l2cap_sock_close(struct sock *sk, int reason);
72 static void l2cap_sock_close(struct sock *sk);
73 static void l2cap_sock_kill(struct sock *sk);
75 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
76 u8 code, u8 ident, u16 dlen, void *data);
78 /* ---- L2CAP timers ---- */
79 static void l2cap_sock_timeout(unsigned long arg)
81 struct sock *sk = (struct sock *) arg;
84 BT_DBG("sock %p state %d", sk, sk->sk_state);
88 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
89 reason = ECONNREFUSED;
90 else if (sk->sk_state == BT_CONNECT &&
91 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
92 reason = ECONNREFUSED;
96 __l2cap_sock_close(sk, reason);
104 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
106 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
107 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
110 static void l2cap_sock_clear_timer(struct sock *sk)
112 BT_DBG("sock %p state %d", sk, sk->sk_state);
113 sk_stop_timer(sk, &sk->sk_timer);
116 /* ---- L2CAP channels ---- */
117 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
120 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
121 if (l2cap_pi(s)->dcid == cid)
127 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
130 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
131 if (l2cap_pi(s)->scid == cid)
137 /* Find channel with given SCID.
138 * Returns locked socket */
139 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
143 s = __l2cap_get_chan_by_scid(l, cid);
146 read_unlock(&l->lock);
150 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
153 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
154 if (l2cap_pi(s)->ident == ident)
160 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
164 s = __l2cap_get_chan_by_ident(l, ident);
167 read_unlock(&l->lock);
171 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
173 u16 cid = L2CAP_CID_DYN_START;
175 for (; cid < L2CAP_CID_DYN_END; cid++) {
176 if (!__l2cap_get_chan_by_scid(l, cid))
183 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
188 l2cap_pi(l->head)->prev_c = sk;
190 l2cap_pi(sk)->next_c = l->head;
191 l2cap_pi(sk)->prev_c = NULL;
195 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
197 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
199 write_lock_bh(&l->lock);
204 l2cap_pi(next)->prev_c = prev;
206 l2cap_pi(prev)->next_c = next;
207 write_unlock_bh(&l->lock);
212 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
214 struct l2cap_chan_list *l = &conn->chan_list;
216 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
217 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
219 conn->disc_reason = 0x13;
221 l2cap_pi(sk)->conn = conn;
223 if (sk->sk_type == SOCK_SEQPACKET) {
224 /* Alloc CID for connection-oriented socket */
225 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
226 } else if (sk->sk_type == SOCK_DGRAM) {
227 /* Connectionless socket */
228 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
229 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
230 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
232 /* Raw socket can send/recv signalling messages only */
233 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
234 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
235 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
238 __l2cap_chan_link(l, sk);
241 bt_accept_enqueue(parent, sk);
245 * Must be called on the locked socket. */
246 static void l2cap_chan_del(struct sock *sk, int err)
248 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
249 struct sock *parent = bt_sk(sk)->parent;
251 l2cap_sock_clear_timer(sk);
253 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
256 /* Unlink from channel list */
257 l2cap_chan_unlink(&conn->chan_list, sk);
258 l2cap_pi(sk)->conn = NULL;
259 hci_conn_put(conn->hcon);
262 sk->sk_state = BT_CLOSED;
263 sock_set_flag(sk, SOCK_ZAPPED);
269 bt_accept_unlink(sk);
270 parent->sk_data_ready(parent, 0);
272 sk->sk_state_change(sk);
275 /* Service level security */
276 static inline int l2cap_check_security(struct sock *sk)
278 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
281 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
282 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
283 auth_type = HCI_AT_NO_BONDING_MITM;
285 auth_type = HCI_AT_NO_BONDING;
287 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
288 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
290 switch (l2cap_pi(sk)->sec_level) {
291 case BT_SECURITY_HIGH:
292 auth_type = HCI_AT_GENERAL_BONDING_MITM;
294 case BT_SECURITY_MEDIUM:
295 auth_type = HCI_AT_GENERAL_BONDING;
298 auth_type = HCI_AT_NO_BONDING;
303 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
307 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
311 /* Get next available identificator.
312 * 1 - 128 are used by kernel.
313 * 129 - 199 are reserved.
314 * 200 - 254 are used by utilities like l2ping, etc.
317 spin_lock_bh(&conn->lock);
319 if (++conn->tx_ident > 128)
324 spin_unlock_bh(&conn->lock);
329 static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
331 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
333 BT_DBG("code 0x%2.2x", code);
338 return hci_send_acl(conn->hcon, skb, 0);
341 static inline int l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
344 struct l2cap_hdr *lh;
345 struct l2cap_conn *conn = pi->conn;
346 int count, hlen = L2CAP_HDR_SIZE + 2;
348 if (pi->fcs == L2CAP_FCS_CRC16)
351 BT_DBG("pi %p, control 0x%2.2x", pi, control);
353 count = min_t(unsigned int, conn->mtu, hlen);
354 control |= L2CAP_CTRL_FRAME_TYPE;
356 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
357 control |= L2CAP_CTRL_FINAL;
358 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
361 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
362 control |= L2CAP_CTRL_POLL;
363 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
366 skb = bt_skb_alloc(count, GFP_ATOMIC);
370 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
371 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
372 lh->cid = cpu_to_le16(pi->dcid);
373 put_unaligned_le16(control, skb_put(skb, 2));
375 if (pi->fcs == L2CAP_FCS_CRC16) {
376 u16 fcs = crc16(0, (u8 *)lh, count - 2);
377 put_unaligned_le16(fcs, skb_put(skb, 2));
380 return hci_send_acl(pi->conn->hcon, skb, 0);
383 static inline int l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
385 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY)
386 control |= L2CAP_SUPER_RCV_NOT_READY;
388 control |= L2CAP_SUPER_RCV_READY;
390 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
392 return l2cap_send_sframe(pi, control);
395 static void l2cap_do_start(struct sock *sk)
397 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
399 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
400 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
403 if (l2cap_check_security(sk)) {
404 struct l2cap_conn_req req;
405 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
406 req.psm = l2cap_pi(sk)->psm;
408 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
410 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
411 L2CAP_CONN_REQ, sizeof(req), &req);
414 struct l2cap_info_req req;
415 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
417 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
418 conn->info_ident = l2cap_get_ident(conn);
420 mod_timer(&conn->info_timer, jiffies +
421 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
423 l2cap_send_cmd(conn, conn->info_ident,
424 L2CAP_INFO_REQ, sizeof(req), &req);
428 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk)
430 struct l2cap_disconn_req req;
432 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
433 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
434 l2cap_send_cmd(conn, l2cap_get_ident(conn),
435 L2CAP_DISCONN_REQ, sizeof(req), &req);
438 /* ---- L2CAP connections ---- */
439 static void l2cap_conn_start(struct l2cap_conn *conn)
441 struct l2cap_chan_list *l = &conn->chan_list;
444 BT_DBG("conn %p", conn);
448 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
451 if (sk->sk_type != SOCK_SEQPACKET) {
456 if (sk->sk_state == BT_CONNECT) {
457 if (l2cap_check_security(sk)) {
458 struct l2cap_conn_req req;
459 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
460 req.psm = l2cap_pi(sk)->psm;
462 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
464 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
465 L2CAP_CONN_REQ, sizeof(req), &req);
467 } else if (sk->sk_state == BT_CONNECT2) {
468 struct l2cap_conn_rsp rsp;
469 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
470 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
472 if (l2cap_check_security(sk)) {
473 if (bt_sk(sk)->defer_setup) {
474 struct sock *parent = bt_sk(sk)->parent;
475 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
476 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
477 parent->sk_data_ready(parent, 0);
480 sk->sk_state = BT_CONFIG;
481 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
482 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
485 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
486 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
489 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
490 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
496 read_unlock(&l->lock);
499 static void l2cap_conn_ready(struct l2cap_conn *conn)
501 struct l2cap_chan_list *l = &conn->chan_list;
504 BT_DBG("conn %p", conn);
508 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
511 if (sk->sk_type != SOCK_SEQPACKET) {
512 l2cap_sock_clear_timer(sk);
513 sk->sk_state = BT_CONNECTED;
514 sk->sk_state_change(sk);
515 } else if (sk->sk_state == BT_CONNECT)
521 read_unlock(&l->lock);
524 /* Notify sockets that we cannot guaranty reliability anymore */
525 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
527 struct l2cap_chan_list *l = &conn->chan_list;
530 BT_DBG("conn %p", conn);
534 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
535 if (l2cap_pi(sk)->force_reliable)
539 read_unlock(&l->lock);
542 static void l2cap_info_timeout(unsigned long arg)
544 struct l2cap_conn *conn = (void *) arg;
546 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
547 conn->info_ident = 0;
549 l2cap_conn_start(conn);
552 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
554 struct l2cap_conn *conn = hcon->l2cap_data;
559 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
563 hcon->l2cap_data = conn;
566 BT_DBG("hcon %p conn %p", hcon, conn);
568 conn->mtu = hcon->hdev->acl_mtu;
569 conn->src = &hcon->hdev->bdaddr;
570 conn->dst = &hcon->dst;
574 spin_lock_init(&conn->lock);
575 rwlock_init(&conn->chan_list.lock);
577 setup_timer(&conn->info_timer, l2cap_info_timeout,
578 (unsigned long) conn);
580 conn->disc_reason = 0x13;
585 static void l2cap_conn_del(struct hci_conn *hcon, int err)
587 struct l2cap_conn *conn = hcon->l2cap_data;
593 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
595 kfree_skb(conn->rx_skb);
598 while ((sk = conn->chan_list.head)) {
600 l2cap_chan_del(sk, err);
605 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
606 del_timer_sync(&conn->info_timer);
608 hcon->l2cap_data = NULL;
612 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
614 struct l2cap_chan_list *l = &conn->chan_list;
615 write_lock_bh(&l->lock);
616 __l2cap_chan_add(conn, sk, parent);
617 write_unlock_bh(&l->lock);
620 /* ---- Socket interface ---- */
621 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
624 struct hlist_node *node;
625 sk_for_each(sk, node, &l2cap_sk_list.head)
626 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
633 /* Find socket with psm and source bdaddr.
634 * Returns closest match.
636 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
638 struct sock *sk = NULL, *sk1 = NULL;
639 struct hlist_node *node;
641 sk_for_each(sk, node, &l2cap_sk_list.head) {
642 if (state && sk->sk_state != state)
645 if (l2cap_pi(sk)->psm == psm) {
647 if (!bacmp(&bt_sk(sk)->src, src))
651 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
655 return node ? sk : sk1;
658 /* Find socket with given address (psm, src).
659 * Returns locked socket */
660 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
663 read_lock(&l2cap_sk_list.lock);
664 s = __l2cap_get_sock_by_psm(state, psm, src);
667 read_unlock(&l2cap_sk_list.lock);
671 static void l2cap_sock_destruct(struct sock *sk)
675 skb_queue_purge(&sk->sk_receive_queue);
676 skb_queue_purge(&sk->sk_write_queue);
679 static void l2cap_sock_cleanup_listen(struct sock *parent)
683 BT_DBG("parent %p", parent);
685 /* Close not yet accepted channels */
686 while ((sk = bt_accept_dequeue(parent, NULL)))
687 l2cap_sock_close(sk);
689 parent->sk_state = BT_CLOSED;
690 sock_set_flag(parent, SOCK_ZAPPED);
693 /* Kill socket (only if zapped and orphan)
694 * Must be called on unlocked socket.
696 static void l2cap_sock_kill(struct sock *sk)
698 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
701 BT_DBG("sk %p state %d", sk, sk->sk_state);
703 /* Kill poor orphan */
704 bt_sock_unlink(&l2cap_sk_list, sk);
705 sock_set_flag(sk, SOCK_DEAD);
709 static void __l2cap_sock_close(struct sock *sk, int reason)
711 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
713 switch (sk->sk_state) {
715 l2cap_sock_cleanup_listen(sk);
720 if (sk->sk_type == SOCK_SEQPACKET) {
721 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
723 sk->sk_state = BT_DISCONN;
724 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
725 l2cap_send_disconn_req(conn, sk);
727 l2cap_chan_del(sk, reason);
731 if (sk->sk_type == SOCK_SEQPACKET) {
732 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
733 struct l2cap_conn_rsp rsp;
736 if (bt_sk(sk)->defer_setup)
737 result = L2CAP_CR_SEC_BLOCK;
739 result = L2CAP_CR_BAD_PSM;
741 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
742 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
743 rsp.result = cpu_to_le16(result);
744 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
745 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
746 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
748 l2cap_chan_del(sk, reason);
753 l2cap_chan_del(sk, reason);
757 sock_set_flag(sk, SOCK_ZAPPED);
762 /* Must be called on unlocked socket. */
763 static void l2cap_sock_close(struct sock *sk)
765 l2cap_sock_clear_timer(sk);
767 __l2cap_sock_close(sk, ECONNRESET);
772 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
774 struct l2cap_pinfo *pi = l2cap_pi(sk);
779 sk->sk_type = parent->sk_type;
780 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
782 pi->imtu = l2cap_pi(parent)->imtu;
783 pi->omtu = l2cap_pi(parent)->omtu;
784 pi->mode = l2cap_pi(parent)->mode;
785 pi->fcs = l2cap_pi(parent)->fcs;
786 pi->max_tx = l2cap_pi(parent)->max_tx;
787 pi->tx_win = l2cap_pi(parent)->tx_win;
788 pi->sec_level = l2cap_pi(parent)->sec_level;
789 pi->role_switch = l2cap_pi(parent)->role_switch;
790 pi->force_reliable = l2cap_pi(parent)->force_reliable;
792 pi->imtu = L2CAP_DEFAULT_MTU;
794 pi->mode = L2CAP_MODE_BASIC;
795 pi->max_tx = max_transmit;
796 pi->fcs = L2CAP_FCS_CRC16;
797 pi->tx_win = tx_window;
798 pi->sec_level = BT_SECURITY_LOW;
800 pi->force_reliable = 0;
803 /* Default config options */
805 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
806 skb_queue_head_init(TX_QUEUE(sk));
807 skb_queue_head_init(SREJ_QUEUE(sk));
808 INIT_LIST_HEAD(SREJ_LIST(sk));
811 static struct proto l2cap_proto = {
813 .owner = THIS_MODULE,
814 .obj_size = sizeof(struct l2cap_pinfo)
817 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
821 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
825 sock_init_data(sock, sk);
826 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
828 sk->sk_destruct = l2cap_sock_destruct;
829 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
831 sock_reset_flag(sk, SOCK_ZAPPED);
833 sk->sk_protocol = proto;
834 sk->sk_state = BT_OPEN;
836 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
838 bt_sock_link(&l2cap_sk_list, sk);
842 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
847 BT_DBG("sock %p", sock);
849 sock->state = SS_UNCONNECTED;
851 if (sock->type != SOCK_SEQPACKET &&
852 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
853 return -ESOCKTNOSUPPORT;
855 if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
858 sock->ops = &l2cap_sock_ops;
860 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
864 l2cap_sock_init(sk, NULL);
868 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
870 struct sock *sk = sock->sk;
871 struct sockaddr_l2 la;
876 if (!addr || addr->sa_family != AF_BLUETOOTH)
879 memset(&la, 0, sizeof(la));
880 len = min_t(unsigned int, sizeof(la), alen);
881 memcpy(&la, addr, len);
888 if (sk->sk_state != BT_OPEN) {
893 if (la.l2_psm && __le16_to_cpu(la.l2_psm) < 0x1001 &&
894 !capable(CAP_NET_BIND_SERVICE)) {
899 write_lock_bh(&l2cap_sk_list.lock);
901 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
904 /* Save source address */
905 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
906 l2cap_pi(sk)->psm = la.l2_psm;
907 l2cap_pi(sk)->sport = la.l2_psm;
908 sk->sk_state = BT_BOUND;
910 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
911 __le16_to_cpu(la.l2_psm) == 0x0003)
912 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
915 write_unlock_bh(&l2cap_sk_list.lock);
922 static int l2cap_do_connect(struct sock *sk)
924 bdaddr_t *src = &bt_sk(sk)->src;
925 bdaddr_t *dst = &bt_sk(sk)->dst;
926 struct l2cap_conn *conn;
927 struct hci_conn *hcon;
928 struct hci_dev *hdev;
932 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
935 hdev = hci_get_route(dst, src);
937 return -EHOSTUNREACH;
939 hci_dev_lock_bh(hdev);
943 if (sk->sk_type == SOCK_RAW) {
944 switch (l2cap_pi(sk)->sec_level) {
945 case BT_SECURITY_HIGH:
946 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
948 case BT_SECURITY_MEDIUM:
949 auth_type = HCI_AT_DEDICATED_BONDING;
952 auth_type = HCI_AT_NO_BONDING;
955 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
956 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
957 auth_type = HCI_AT_NO_BONDING_MITM;
959 auth_type = HCI_AT_NO_BONDING;
961 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
962 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
964 switch (l2cap_pi(sk)->sec_level) {
965 case BT_SECURITY_HIGH:
966 auth_type = HCI_AT_GENERAL_BONDING_MITM;
968 case BT_SECURITY_MEDIUM:
969 auth_type = HCI_AT_GENERAL_BONDING;
972 auth_type = HCI_AT_NO_BONDING;
977 hcon = hci_connect(hdev, ACL_LINK, dst,
978 l2cap_pi(sk)->sec_level, auth_type);
982 conn = l2cap_conn_add(hcon, 0);
990 /* Update source addr of the socket */
991 bacpy(src, conn->src);
993 l2cap_chan_add(conn, sk, NULL);
995 sk->sk_state = BT_CONNECT;
996 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
998 if (hcon->state == BT_CONNECTED) {
999 if (sk->sk_type != SOCK_SEQPACKET) {
1000 l2cap_sock_clear_timer(sk);
1001 sk->sk_state = BT_CONNECTED;
1007 hci_dev_unlock_bh(hdev);
1012 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
1014 struct sock *sk = sock->sk;
1015 struct sockaddr_l2 la;
1018 BT_DBG("sk %p", sk);
1020 if (!addr || alen < sizeof(addr->sa_family) ||
1021 addr->sa_family != AF_BLUETOOTH)
1024 memset(&la, 0, sizeof(la));
1025 len = min_t(unsigned int, sizeof(la), alen);
1026 memcpy(&la, addr, len);
1033 if (sk->sk_type == SOCK_SEQPACKET && !la.l2_psm) {
1038 switch (l2cap_pi(sk)->mode) {
1039 case L2CAP_MODE_BASIC:
1041 case L2CAP_MODE_ERTM:
1042 case L2CAP_MODE_STREAMING:
1051 switch (sk->sk_state) {
1055 /* Already connecting */
1059 /* Already connected */
1072 /* Set destination address and psm */
1073 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
1074 l2cap_pi(sk)->psm = la.l2_psm;
1076 err = l2cap_do_connect(sk);
1081 err = bt_sock_wait_state(sk, BT_CONNECTED,
1082 sock_sndtimeo(sk, flags & O_NONBLOCK));
1088 static int l2cap_sock_listen(struct socket *sock, int backlog)
1090 struct sock *sk = sock->sk;
1093 BT_DBG("sk %p backlog %d", sk, backlog);
1097 if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
1102 switch (l2cap_pi(sk)->mode) {
1103 case L2CAP_MODE_BASIC:
1105 case L2CAP_MODE_ERTM:
1106 case L2CAP_MODE_STREAMING:
1115 if (!l2cap_pi(sk)->psm) {
1116 bdaddr_t *src = &bt_sk(sk)->src;
1121 write_lock_bh(&l2cap_sk_list.lock);
1123 for (psm = 0x1001; psm < 0x1100; psm += 2)
1124 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
1125 l2cap_pi(sk)->psm = cpu_to_le16(psm);
1126 l2cap_pi(sk)->sport = cpu_to_le16(psm);
1131 write_unlock_bh(&l2cap_sk_list.lock);
1137 sk->sk_max_ack_backlog = backlog;
1138 sk->sk_ack_backlog = 0;
1139 sk->sk_state = BT_LISTEN;
1146 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
1148 DECLARE_WAITQUEUE(wait, current);
1149 struct sock *sk = sock->sk, *nsk;
1153 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1155 if (sk->sk_state != BT_LISTEN) {
1160 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1162 BT_DBG("sk %p timeo %ld", sk, timeo);
1164 /* Wait for an incoming connection. (wake-one). */
1165 add_wait_queue_exclusive(sk_sleep(sk), &wait);
1166 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1167 set_current_state(TASK_INTERRUPTIBLE);
1174 timeo = schedule_timeout(timeo);
1175 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1177 if (sk->sk_state != BT_LISTEN) {
1182 if (signal_pending(current)) {
1183 err = sock_intr_errno(timeo);
1187 set_current_state(TASK_RUNNING);
1188 remove_wait_queue(sk_sleep(sk), &wait);
1193 newsock->state = SS_CONNECTED;
1195 BT_DBG("new socket %p", nsk);
1202 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1204 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1205 struct sock *sk = sock->sk;
1207 BT_DBG("sock %p, sk %p", sock, sk);
1209 addr->sa_family = AF_BLUETOOTH;
1210 *len = sizeof(struct sockaddr_l2);
1213 la->l2_psm = l2cap_pi(sk)->psm;
1214 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1215 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1217 la->l2_psm = l2cap_pi(sk)->sport;
1218 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1219 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
1225 static void l2cap_monitor_timeout(unsigned long arg)
1227 struct sock *sk = (void *) arg;
1231 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
1232 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk);
1237 l2cap_pi(sk)->retry_count++;
1238 __mod_monitor_timer();
1240 control = L2CAP_CTRL_POLL;
1241 l2cap_send_rr_or_rnr(l2cap_pi(sk), control);
1245 static void l2cap_retrans_timeout(unsigned long arg)
1247 struct sock *sk = (void *) arg;
1251 l2cap_pi(sk)->retry_count = 1;
1252 __mod_monitor_timer();
1254 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
1256 control = L2CAP_CTRL_POLL;
1257 l2cap_send_rr_or_rnr(l2cap_pi(sk), control);
1261 static void l2cap_drop_acked_frames(struct sock *sk)
1263 struct sk_buff *skb;
1265 while ((skb = skb_peek(TX_QUEUE(sk)))) {
1266 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
1269 skb = skb_dequeue(TX_QUEUE(sk));
1272 l2cap_pi(sk)->unacked_frames--;
1275 if (!l2cap_pi(sk)->unacked_frames)
1276 del_timer(&l2cap_pi(sk)->retrans_timer);
1281 static inline int l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1283 struct l2cap_pinfo *pi = l2cap_pi(sk);
1286 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1288 err = hci_send_acl(pi->conn->hcon, skb, 0);
1295 static int l2cap_streaming_send(struct sock *sk)
1297 struct sk_buff *skb, *tx_skb;
1298 struct l2cap_pinfo *pi = l2cap_pi(sk);
1302 while ((skb = sk->sk_send_head)) {
1303 tx_skb = skb_clone(skb, GFP_ATOMIC);
1305 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1306 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1307 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1309 if (pi->fcs == L2CAP_FCS_CRC16) {
1310 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1311 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1314 err = l2cap_do_send(sk, tx_skb);
1316 l2cap_send_disconn_req(pi->conn, sk);
1320 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1322 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1323 sk->sk_send_head = NULL;
1325 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1327 skb = skb_dequeue(TX_QUEUE(sk));
1333 static int l2cap_retransmit_frame(struct sock *sk, u8 tx_seq)
1335 struct l2cap_pinfo *pi = l2cap_pi(sk);
1336 struct sk_buff *skb, *tx_skb;
1340 skb = skb_peek(TX_QUEUE(sk));
1342 if (bt_cb(skb)->tx_seq != tx_seq) {
1343 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1345 skb = skb_queue_next(TX_QUEUE(sk), skb);
1349 if (pi->remote_max_tx &&
1350 bt_cb(skb)->retries == pi->remote_max_tx) {
1351 l2cap_send_disconn_req(pi->conn, sk);
1355 tx_skb = skb_clone(skb, GFP_ATOMIC);
1356 bt_cb(skb)->retries++;
1357 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1358 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1359 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1360 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1362 if (pi->fcs == L2CAP_FCS_CRC16) {
1363 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1364 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1367 err = l2cap_do_send(sk, tx_skb);
1369 l2cap_send_disconn_req(pi->conn, sk);
1377 static int l2cap_ertm_send(struct sock *sk)
1379 struct sk_buff *skb, *tx_skb;
1380 struct l2cap_pinfo *pi = l2cap_pi(sk);
1384 if (pi->conn_state & L2CAP_CONN_WAIT_F)
1387 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk)) &&
1388 !(pi->conn_state & L2CAP_CONN_REMOTE_BUSY)) {
1390 if (pi->remote_max_tx &&
1391 bt_cb(skb)->retries == pi->remote_max_tx) {
1392 l2cap_send_disconn_req(pi->conn, sk);
1396 tx_skb = skb_clone(skb, GFP_ATOMIC);
1398 bt_cb(skb)->retries++;
1400 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1401 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1402 control |= L2CAP_CTRL_FINAL;
1403 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1405 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1406 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1407 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1410 if (pi->fcs == L2CAP_FCS_CRC16) {
1411 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1412 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1415 err = l2cap_do_send(sk, tx_skb);
1417 l2cap_send_disconn_req(pi->conn, sk);
1420 __mod_retrans_timer();
1422 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1423 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1425 pi->unacked_frames++;
1428 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1429 sk->sk_send_head = NULL;
1431 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1439 static int l2cap_send_ack(struct l2cap_pinfo *pi)
1441 struct sock *sk = (struct sock *)pi;
1444 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1446 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1447 control |= L2CAP_SUPER_RCV_NOT_READY;
1448 return l2cap_send_sframe(pi, control);
1449 } else if (l2cap_ertm_send(sk) == 0) {
1450 control |= L2CAP_SUPER_RCV_READY;
1451 return l2cap_send_sframe(pi, control);
1456 static int l2cap_send_srejtail(struct sock *sk)
1458 struct srej_list *tail;
1461 control = L2CAP_SUPER_SELECT_REJECT;
1462 control |= L2CAP_CTRL_FINAL;
1464 tail = list_entry(SREJ_LIST(sk)->prev, struct srej_list, list);
1465 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1467 l2cap_send_sframe(l2cap_pi(sk), control);
1472 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1474 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1475 struct sk_buff **frag;
1478 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
1485 /* Continuation fragments (no L2CAP header) */
1486 frag = &skb_shinfo(skb)->frag_list;
1488 count = min_t(unsigned int, conn->mtu, len);
1490 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1493 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1499 frag = &(*frag)->next;
1505 static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1507 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1508 struct sk_buff *skb;
1509 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1510 struct l2cap_hdr *lh;
1512 BT_DBG("sk %p len %d", sk, (int)len);
1514 count = min_t(unsigned int, (conn->mtu - hlen), len);
1515 skb = bt_skb_send_alloc(sk, count + hlen,
1516 msg->msg_flags & MSG_DONTWAIT, &err);
1518 return ERR_PTR(-ENOMEM);
1520 /* Create L2CAP header */
1521 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1522 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1523 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1524 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1526 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1527 if (unlikely(err < 0)) {
1529 return ERR_PTR(err);
1534 static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1536 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1537 struct sk_buff *skb;
1538 int err, count, hlen = L2CAP_HDR_SIZE;
1539 struct l2cap_hdr *lh;
1541 BT_DBG("sk %p len %d", sk, (int)len);
1543 count = min_t(unsigned int, (conn->mtu - hlen), len);
1544 skb = bt_skb_send_alloc(sk, count + hlen,
1545 msg->msg_flags & MSG_DONTWAIT, &err);
1547 return ERR_PTR(-ENOMEM);
1549 /* Create L2CAP header */
1550 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1551 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1552 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1554 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1555 if (unlikely(err < 0)) {
1557 return ERR_PTR(err);
1562 static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1564 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1565 struct sk_buff *skb;
1566 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1567 struct l2cap_hdr *lh;
1569 BT_DBG("sk %p len %d", sk, (int)len);
1572 return ERR_PTR(-ENOTCONN);
1577 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1580 count = min_t(unsigned int, (conn->mtu - hlen), len);
1581 skb = bt_skb_send_alloc(sk, count + hlen,
1582 msg->msg_flags & MSG_DONTWAIT, &err);
1584 return ERR_PTR(-ENOMEM);
1586 /* Create L2CAP header */
1587 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1588 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1589 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1590 put_unaligned_le16(control, skb_put(skb, 2));
1592 put_unaligned_le16(sdulen, skb_put(skb, 2));
1594 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1595 if (unlikely(err < 0)) {
1597 return ERR_PTR(err);
1600 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1601 put_unaligned_le16(0, skb_put(skb, 2));
1603 bt_cb(skb)->retries = 0;
1607 static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1609 struct l2cap_pinfo *pi = l2cap_pi(sk);
1610 struct sk_buff *skb;
1611 struct sk_buff_head sar_queue;
1615 __skb_queue_head_init(&sar_queue);
1616 control = L2CAP_SDU_START;
1617 skb = l2cap_create_iframe_pdu(sk, msg, pi->remote_mps, control, len);
1619 return PTR_ERR(skb);
1621 __skb_queue_tail(&sar_queue, skb);
1622 len -= pi->remote_mps;
1623 size += pi->remote_mps;
1629 if (len > pi->remote_mps) {
1630 control |= L2CAP_SDU_CONTINUE;
1631 buflen = pi->remote_mps;
1633 control |= L2CAP_SDU_END;
1637 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1639 skb_queue_purge(&sar_queue);
1640 return PTR_ERR(skb);
1643 __skb_queue_tail(&sar_queue, skb);
1648 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1649 if (sk->sk_send_head == NULL)
1650 sk->sk_send_head = sar_queue.next;
1655 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1657 struct sock *sk = sock->sk;
1658 struct l2cap_pinfo *pi = l2cap_pi(sk);
1659 struct sk_buff *skb;
1663 BT_DBG("sock %p, sk %p", sock, sk);
1665 err = sock_error(sk);
1669 if (msg->msg_flags & MSG_OOB)
1674 if (sk->sk_state != BT_CONNECTED) {
1679 /* Connectionless channel */
1680 if (sk->sk_type == SOCK_DGRAM) {
1681 skb = l2cap_create_connless_pdu(sk, msg, len);
1685 err = l2cap_do_send(sk, skb);
1690 case L2CAP_MODE_BASIC:
1691 /* Check outgoing MTU */
1692 if (len > pi->omtu) {
1697 /* Create a basic PDU */
1698 skb = l2cap_create_basic_pdu(sk, msg, len);
1704 err = l2cap_do_send(sk, skb);
1709 case L2CAP_MODE_ERTM:
1710 case L2CAP_MODE_STREAMING:
1711 /* Entire SDU fits into one PDU */
1712 if (len <= pi->remote_mps) {
1713 control = L2CAP_SDU_UNSEGMENTED;
1714 skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
1719 __skb_queue_tail(TX_QUEUE(sk), skb);
1720 if (sk->sk_send_head == NULL)
1721 sk->sk_send_head = skb;
1723 /* Segment SDU into multiples PDUs */
1724 err = l2cap_sar_segment_sdu(sk, msg, len);
1729 if (pi->mode == L2CAP_MODE_STREAMING)
1730 err = l2cap_streaming_send(sk);
1732 err = l2cap_ertm_send(sk);
1739 BT_DBG("bad state %1.1x", pi->mode);
1748 static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1750 struct sock *sk = sock->sk;
1754 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1755 struct l2cap_conn_rsp rsp;
1757 sk->sk_state = BT_CONFIG;
1759 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1760 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1761 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1762 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1763 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1764 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1772 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1775 static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
1777 struct sock *sk = sock->sk;
1778 struct l2cap_options opts;
1782 BT_DBG("sk %p", sk);
1788 opts.imtu = l2cap_pi(sk)->imtu;
1789 opts.omtu = l2cap_pi(sk)->omtu;
1790 opts.flush_to = l2cap_pi(sk)->flush_to;
1791 opts.mode = l2cap_pi(sk)->mode;
1792 opts.fcs = l2cap_pi(sk)->fcs;
1793 opts.max_tx = l2cap_pi(sk)->max_tx;
1794 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
1796 len = min_t(unsigned int, sizeof(opts), optlen);
1797 if (copy_from_user((char *) &opts, optval, len)) {
1802 l2cap_pi(sk)->imtu = opts.imtu;
1803 l2cap_pi(sk)->omtu = opts.omtu;
1804 l2cap_pi(sk)->mode = opts.mode;
1805 l2cap_pi(sk)->fcs = opts.fcs;
1806 l2cap_pi(sk)->max_tx = opts.max_tx;
1807 l2cap_pi(sk)->tx_win = (__u8)opts.txwin_size;
1811 if (get_user(opt, (u32 __user *) optval)) {
1816 if (opt & L2CAP_LM_AUTH)
1817 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
1818 if (opt & L2CAP_LM_ENCRYPT)
1819 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
1820 if (opt & L2CAP_LM_SECURE)
1821 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
1823 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
1824 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
1836 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
1838 struct sock *sk = sock->sk;
1839 struct bt_security sec;
1843 BT_DBG("sk %p", sk);
1845 if (level == SOL_L2CAP)
1846 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
1848 if (level != SOL_BLUETOOTH)
1849 return -ENOPROTOOPT;
1855 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_RAW) {
1860 sec.level = BT_SECURITY_LOW;
1862 len = min_t(unsigned int, sizeof(sec), optlen);
1863 if (copy_from_user((char *) &sec, optval, len)) {
1868 if (sec.level < BT_SECURITY_LOW ||
1869 sec.level > BT_SECURITY_HIGH) {
1874 l2cap_pi(sk)->sec_level = sec.level;
1877 case BT_DEFER_SETUP:
1878 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1883 if (get_user(opt, (u32 __user *) optval)) {
1888 bt_sk(sk)->defer_setup = opt;
1900 static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
1902 struct sock *sk = sock->sk;
1903 struct l2cap_options opts;
1904 struct l2cap_conninfo cinfo;
1908 BT_DBG("sk %p", sk);
1910 if (get_user(len, optlen))
1917 opts.imtu = l2cap_pi(sk)->imtu;
1918 opts.omtu = l2cap_pi(sk)->omtu;
1919 opts.flush_to = l2cap_pi(sk)->flush_to;
1920 opts.mode = l2cap_pi(sk)->mode;
1921 opts.fcs = l2cap_pi(sk)->fcs;
1922 opts.max_tx = l2cap_pi(sk)->max_tx;
1923 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
1925 len = min_t(unsigned int, len, sizeof(opts));
1926 if (copy_to_user(optval, (char *) &opts, len))
1932 switch (l2cap_pi(sk)->sec_level) {
1933 case BT_SECURITY_LOW:
1934 opt = L2CAP_LM_AUTH;
1936 case BT_SECURITY_MEDIUM:
1937 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
1939 case BT_SECURITY_HIGH:
1940 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
1948 if (l2cap_pi(sk)->role_switch)
1949 opt |= L2CAP_LM_MASTER;
1951 if (l2cap_pi(sk)->force_reliable)
1952 opt |= L2CAP_LM_RELIABLE;
1954 if (put_user(opt, (u32 __user *) optval))
1958 case L2CAP_CONNINFO:
1959 if (sk->sk_state != BT_CONNECTED &&
1960 !(sk->sk_state == BT_CONNECT2 &&
1961 bt_sk(sk)->defer_setup)) {
1966 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
1967 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
1969 len = min_t(unsigned int, len, sizeof(cinfo));
1970 if (copy_to_user(optval, (char *) &cinfo, len))
1984 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
1986 struct sock *sk = sock->sk;
1987 struct bt_security sec;
1990 BT_DBG("sk %p", sk);
1992 if (level == SOL_L2CAP)
1993 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
1995 if (level != SOL_BLUETOOTH)
1996 return -ENOPROTOOPT;
1998 if (get_user(len, optlen))
2005 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_RAW) {
2010 sec.level = l2cap_pi(sk)->sec_level;
2012 len = min_t(unsigned int, len, sizeof(sec));
2013 if (copy_to_user(optval, (char *) &sec, len))
2018 case BT_DEFER_SETUP:
2019 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2024 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
2038 static int l2cap_sock_shutdown(struct socket *sock, int how)
2040 struct sock *sk = sock->sk;
2043 BT_DBG("sock %p, sk %p", sock, sk);
2049 if (!sk->sk_shutdown) {
2050 sk->sk_shutdown = SHUTDOWN_MASK;
2051 l2cap_sock_clear_timer(sk);
2052 __l2cap_sock_close(sk, 0);
2054 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
2055 err = bt_sock_wait_state(sk, BT_CLOSED,
2062 static int l2cap_sock_release(struct socket *sock)
2064 struct sock *sk = sock->sk;
2067 BT_DBG("sock %p, sk %p", sock, sk);
2072 err = l2cap_sock_shutdown(sock, 2);
2075 l2cap_sock_kill(sk);
2079 static void l2cap_chan_ready(struct sock *sk)
2081 struct sock *parent = bt_sk(sk)->parent;
2083 BT_DBG("sk %p, parent %p", sk, parent);
2085 l2cap_pi(sk)->conf_state = 0;
2086 l2cap_sock_clear_timer(sk);
2089 /* Outgoing channel.
2090 * Wake up socket sleeping on connect.
2092 sk->sk_state = BT_CONNECTED;
2093 sk->sk_state_change(sk);
2095 /* Incoming channel.
2096 * Wake up socket sleeping on accept.
2098 parent->sk_data_ready(parent, 0);
2102 /* Copy frame to all raw sockets on that connection */
2103 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2105 struct l2cap_chan_list *l = &conn->chan_list;
2106 struct sk_buff *nskb;
2109 BT_DBG("conn %p", conn);
2111 read_lock(&l->lock);
2112 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2113 if (sk->sk_type != SOCK_RAW)
2116 /* Don't send frame to the socket it came from */
2119 nskb = skb_clone(skb, GFP_ATOMIC);
2123 if (sock_queue_rcv_skb(sk, nskb))
2126 read_unlock(&l->lock);
2129 /* ---- L2CAP signalling commands ---- */
2130 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2131 u8 code, u8 ident, u16 dlen, void *data)
2133 struct sk_buff *skb, **frag;
2134 struct l2cap_cmd_hdr *cmd;
2135 struct l2cap_hdr *lh;
2138 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2139 conn, code, ident, dlen);
2141 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2142 count = min_t(unsigned int, conn->mtu, len);
2144 skb = bt_skb_alloc(count, GFP_ATOMIC);
2148 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2149 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2150 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2152 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2155 cmd->len = cpu_to_le16(dlen);
2158 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2159 memcpy(skb_put(skb, count), data, count);
2165 /* Continuation fragments (no L2CAP header) */
2166 frag = &skb_shinfo(skb)->frag_list;
2168 count = min_t(unsigned int, conn->mtu, len);
2170 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2174 memcpy(skb_put(*frag, count), data, count);
2179 frag = &(*frag)->next;
2189 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2191 struct l2cap_conf_opt *opt = *ptr;
2194 len = L2CAP_CONF_OPT_SIZE + opt->len;
2202 *val = *((u8 *) opt->val);
2206 *val = __le16_to_cpu(*((__le16 *) opt->val));
2210 *val = __le32_to_cpu(*((__le32 *) opt->val));
2214 *val = (unsigned long) opt->val;
2218 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2222 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2224 struct l2cap_conf_opt *opt = *ptr;
2226 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2233 *((u8 *) opt->val) = val;
2237 *((__le16 *) opt->val) = cpu_to_le16(val);
2241 *((__le32 *) opt->val) = cpu_to_le32(val);
2245 memcpy(opt->val, (void *) val, len);
2249 *ptr += L2CAP_CONF_OPT_SIZE + len;
2252 static void l2cap_ack_timeout(unsigned long arg)
2254 struct sock *sk = (void *) arg;
2257 l2cap_send_ack(l2cap_pi(sk));
2261 static inline void l2cap_ertm_init(struct sock *sk)
2263 l2cap_pi(sk)->expected_ack_seq = 0;
2264 l2cap_pi(sk)->unacked_frames = 0;
2265 l2cap_pi(sk)->buffer_seq = 0;
2266 l2cap_pi(sk)->num_acked = 0;
2267 l2cap_pi(sk)->frames_sent = 0;
2269 setup_timer(&l2cap_pi(sk)->retrans_timer,
2270 l2cap_retrans_timeout, (unsigned long) sk);
2271 setup_timer(&l2cap_pi(sk)->monitor_timer,
2272 l2cap_monitor_timeout, (unsigned long) sk);
2273 setup_timer(&l2cap_pi(sk)->ack_timer,
2274 l2cap_ack_timeout, (unsigned long) sk);
2276 __skb_queue_head_init(SREJ_QUEUE(sk));
2279 static int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
2281 u32 local_feat_mask = l2cap_feat_mask;
2283 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
2286 case L2CAP_MODE_ERTM:
2287 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
2288 case L2CAP_MODE_STREAMING:
2289 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
2295 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2298 case L2CAP_MODE_STREAMING:
2299 case L2CAP_MODE_ERTM:
2300 if (l2cap_mode_supported(mode, remote_feat_mask))
2304 return L2CAP_MODE_BASIC;
2308 static int l2cap_build_conf_req(struct sock *sk, void *data)
2310 struct l2cap_pinfo *pi = l2cap_pi(sk);
2311 struct l2cap_conf_req *req = data;
2312 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2313 void *ptr = req->data;
2315 BT_DBG("sk %p", sk);
2317 if (pi->num_conf_req || pi->num_conf_rsp)
2321 case L2CAP_MODE_STREAMING:
2322 case L2CAP_MODE_ERTM:
2323 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2324 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2325 l2cap_send_disconn_req(pi->conn, sk);
2328 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2334 case L2CAP_MODE_BASIC:
2335 if (pi->imtu != L2CAP_DEFAULT_MTU)
2336 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2339 case L2CAP_MODE_ERTM:
2340 rfc.mode = L2CAP_MODE_ERTM;
2341 rfc.txwin_size = pi->tx_win;
2342 rfc.max_transmit = pi->max_tx;
2343 rfc.retrans_timeout = 0;
2344 rfc.monitor_timeout = 0;
2345 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2346 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2347 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2349 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2350 sizeof(rfc), (unsigned long) &rfc);
2352 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2355 if (pi->fcs == L2CAP_FCS_NONE ||
2356 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2357 pi->fcs = L2CAP_FCS_NONE;
2358 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2362 case L2CAP_MODE_STREAMING:
2363 rfc.mode = L2CAP_MODE_STREAMING;
2365 rfc.max_transmit = 0;
2366 rfc.retrans_timeout = 0;
2367 rfc.monitor_timeout = 0;
2368 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2369 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2370 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2372 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2373 sizeof(rfc), (unsigned long) &rfc);
2375 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2378 if (pi->fcs == L2CAP_FCS_NONE ||
2379 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2380 pi->fcs = L2CAP_FCS_NONE;
2381 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2386 /* FIXME: Need actual value of the flush timeout */
2387 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2388 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2390 req->dcid = cpu_to_le16(pi->dcid);
2391 req->flags = cpu_to_le16(0);
2396 static int l2cap_parse_conf_req(struct sock *sk, void *data)
2398 struct l2cap_pinfo *pi = l2cap_pi(sk);
2399 struct l2cap_conf_rsp *rsp = data;
2400 void *ptr = rsp->data;
2401 void *req = pi->conf_req;
2402 int len = pi->conf_len;
2403 int type, hint, olen;
2405 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2406 u16 mtu = L2CAP_DEFAULT_MTU;
2407 u16 result = L2CAP_CONF_SUCCESS;
2409 BT_DBG("sk %p", sk);
2411 while (len >= L2CAP_CONF_OPT_SIZE) {
2412 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2414 hint = type & L2CAP_CONF_HINT;
2415 type &= L2CAP_CONF_MASK;
2418 case L2CAP_CONF_MTU:
2422 case L2CAP_CONF_FLUSH_TO:
2426 case L2CAP_CONF_QOS:
2429 case L2CAP_CONF_RFC:
2430 if (olen == sizeof(rfc))
2431 memcpy(&rfc, (void *) val, olen);
2434 case L2CAP_CONF_FCS:
2435 if (val == L2CAP_FCS_NONE)
2436 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
2444 result = L2CAP_CONF_UNKNOWN;
2445 *((u8 *) ptr++) = type;
2450 if (pi->num_conf_rsp || pi->num_conf_req)
2454 case L2CAP_MODE_STREAMING:
2455 case L2CAP_MODE_ERTM:
2456 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2457 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2458 return -ECONNREFUSED;
2461 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2466 if (pi->mode != rfc.mode) {
2467 result = L2CAP_CONF_UNACCEPT;
2468 rfc.mode = pi->mode;
2470 if (pi->num_conf_rsp == 1)
2471 return -ECONNREFUSED;
2473 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2474 sizeof(rfc), (unsigned long) &rfc);
2478 if (result == L2CAP_CONF_SUCCESS) {
2479 /* Configure output options and let the other side know
2480 * which ones we don't like. */
2482 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2483 result = L2CAP_CONF_UNACCEPT;
2486 pi->conf_state |= L2CAP_CONF_MTU_DONE;
2488 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2491 case L2CAP_MODE_BASIC:
2492 pi->fcs = L2CAP_FCS_NONE;
2493 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2496 case L2CAP_MODE_ERTM:
2497 pi->remote_tx_win = rfc.txwin_size;
2498 pi->remote_max_tx = rfc.max_transmit;
2499 if (rfc.max_pdu_size > pi->conn->mtu - 10)
2500 rfc.max_pdu_size = le16_to_cpu(pi->conn->mtu - 10);
2502 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2504 rfc.retrans_timeout =
2505 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2506 rfc.monitor_timeout =
2507 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2509 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2511 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2512 sizeof(rfc), (unsigned long) &rfc);
2516 case L2CAP_MODE_STREAMING:
2517 if (rfc.max_pdu_size > pi->conn->mtu - 10)
2518 rfc.max_pdu_size = le16_to_cpu(pi->conn->mtu - 10);
2520 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2522 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2524 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2525 sizeof(rfc), (unsigned long) &rfc);
2530 result = L2CAP_CONF_UNACCEPT;
2532 memset(&rfc, 0, sizeof(rfc));
2533 rfc.mode = pi->mode;
2536 if (result == L2CAP_CONF_SUCCESS)
2537 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
2539 rsp->scid = cpu_to_le16(pi->dcid);
2540 rsp->result = cpu_to_le16(result);
2541 rsp->flags = cpu_to_le16(0x0000);
2546 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
2548 struct l2cap_pinfo *pi = l2cap_pi(sk);
2549 struct l2cap_conf_req *req = data;
2550 void *ptr = req->data;
2553 struct l2cap_conf_rfc rfc;
2555 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
2557 while (len >= L2CAP_CONF_OPT_SIZE) {
2558 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2561 case L2CAP_CONF_MTU:
2562 if (val < L2CAP_DEFAULT_MIN_MTU) {
2563 *result = L2CAP_CONF_UNACCEPT;
2564 pi->omtu = L2CAP_DEFAULT_MIN_MTU;
2567 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2570 case L2CAP_CONF_FLUSH_TO:
2572 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2576 case L2CAP_CONF_RFC:
2577 if (olen == sizeof(rfc))
2578 memcpy(&rfc, (void *)val, olen);
2580 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
2581 rfc.mode != pi->mode)
2582 return -ECONNREFUSED;
2584 pi->mode = rfc.mode;
2587 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2588 sizeof(rfc), (unsigned long) &rfc);
2593 if (*result == L2CAP_CONF_SUCCESS) {
2595 case L2CAP_MODE_ERTM:
2596 pi->remote_tx_win = rfc.txwin_size;
2597 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2598 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2599 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2601 case L2CAP_MODE_STREAMING:
2602 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2606 req->dcid = cpu_to_le16(pi->dcid);
2607 req->flags = cpu_to_le16(0x0000);
2612 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
2614 struct l2cap_conf_rsp *rsp = data;
2615 void *ptr = rsp->data;
2617 BT_DBG("sk %p", sk);
2619 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2620 rsp->result = cpu_to_le16(result);
2621 rsp->flags = cpu_to_le16(flags);
2626 static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
2628 struct l2cap_pinfo *pi = l2cap_pi(sk);
2631 struct l2cap_conf_rfc rfc;
2633 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
2635 if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
2638 while (len >= L2CAP_CONF_OPT_SIZE) {
2639 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2642 case L2CAP_CONF_RFC:
2643 if (olen == sizeof(rfc))
2644 memcpy(&rfc, (void *)val, olen);
2651 case L2CAP_MODE_ERTM:
2652 pi->remote_tx_win = rfc.txwin_size;
2653 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2654 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2655 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2657 case L2CAP_MODE_STREAMING:
2658 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2662 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2664 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2666 if (rej->reason != 0x0000)
2669 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2670 cmd->ident == conn->info_ident) {
2671 del_timer(&conn->info_timer);
2673 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2674 conn->info_ident = 0;
2676 l2cap_conn_start(conn);
2682 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2684 struct l2cap_chan_list *list = &conn->chan_list;
2685 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2686 struct l2cap_conn_rsp rsp;
2687 struct sock *sk, *parent;
2688 int result, status = L2CAP_CS_NO_INFO;
2690 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2691 __le16 psm = req->psm;
2693 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2695 /* Check if we have socket listening on psm */
2696 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2698 result = L2CAP_CR_BAD_PSM;
2702 /* Check if the ACL is secure enough (if not SDP) */
2703 if (psm != cpu_to_le16(0x0001) &&
2704 !hci_conn_check_link_mode(conn->hcon)) {
2705 conn->disc_reason = 0x05;
2706 result = L2CAP_CR_SEC_BLOCK;
2710 result = L2CAP_CR_NO_MEM;
2712 /* Check for backlog size */
2713 if (sk_acceptq_is_full(parent)) {
2714 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2718 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2722 write_lock_bh(&list->lock);
2724 /* Check if we already have channel with that dcid */
2725 if (__l2cap_get_chan_by_dcid(list, scid)) {
2726 write_unlock_bh(&list->lock);
2727 sock_set_flag(sk, SOCK_ZAPPED);
2728 l2cap_sock_kill(sk);
2732 hci_conn_hold(conn->hcon);
2734 l2cap_sock_init(sk, parent);
2735 bacpy(&bt_sk(sk)->src, conn->src);
2736 bacpy(&bt_sk(sk)->dst, conn->dst);
2737 l2cap_pi(sk)->psm = psm;
2738 l2cap_pi(sk)->dcid = scid;
2740 __l2cap_chan_add(conn, sk, parent);
2741 dcid = l2cap_pi(sk)->scid;
2743 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2745 l2cap_pi(sk)->ident = cmd->ident;
2747 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2748 if (l2cap_check_security(sk)) {
2749 if (bt_sk(sk)->defer_setup) {
2750 sk->sk_state = BT_CONNECT2;
2751 result = L2CAP_CR_PEND;
2752 status = L2CAP_CS_AUTHOR_PEND;
2753 parent->sk_data_ready(parent, 0);
2755 sk->sk_state = BT_CONFIG;
2756 result = L2CAP_CR_SUCCESS;
2757 status = L2CAP_CS_NO_INFO;
2760 sk->sk_state = BT_CONNECT2;
2761 result = L2CAP_CR_PEND;
2762 status = L2CAP_CS_AUTHEN_PEND;
2765 sk->sk_state = BT_CONNECT2;
2766 result = L2CAP_CR_PEND;
2767 status = L2CAP_CS_NO_INFO;
2770 write_unlock_bh(&list->lock);
2773 bh_unlock_sock(parent);
2776 rsp.scid = cpu_to_le16(scid);
2777 rsp.dcid = cpu_to_le16(dcid);
2778 rsp.result = cpu_to_le16(result);
2779 rsp.status = cpu_to_le16(status);
2780 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2782 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2783 struct l2cap_info_req info;
2784 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2786 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2787 conn->info_ident = l2cap_get_ident(conn);
2789 mod_timer(&conn->info_timer, jiffies +
2790 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2792 l2cap_send_cmd(conn, conn->info_ident,
2793 L2CAP_INFO_REQ, sizeof(info), &info);
2799 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2801 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2802 u16 scid, dcid, result, status;
2806 scid = __le16_to_cpu(rsp->scid);
2807 dcid = __le16_to_cpu(rsp->dcid);
2808 result = __le16_to_cpu(rsp->result);
2809 status = __le16_to_cpu(rsp->status);
2811 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2814 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2818 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
2824 case L2CAP_CR_SUCCESS:
2825 sk->sk_state = BT_CONFIG;
2826 l2cap_pi(sk)->ident = 0;
2827 l2cap_pi(sk)->dcid = dcid;
2828 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2830 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2832 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2833 l2cap_build_conf_req(sk, req), req);
2834 l2cap_pi(sk)->num_conf_req++;
2838 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
2842 l2cap_chan_del(sk, ECONNREFUSED);
2850 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2852 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2858 dcid = __le16_to_cpu(req->dcid);
2859 flags = __le16_to_cpu(req->flags);
2861 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2863 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2867 if (sk->sk_state == BT_DISCONN)
2870 /* Reject if config buffer is too small. */
2871 len = cmd_len - sizeof(*req);
2872 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
2873 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2874 l2cap_build_conf_rsp(sk, rsp,
2875 L2CAP_CONF_REJECT, flags), rsp);
2880 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
2881 l2cap_pi(sk)->conf_len += len;
2883 if (flags & 0x0001) {
2884 /* Incomplete config. Send empty response. */
2885 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2886 l2cap_build_conf_rsp(sk, rsp,
2887 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2891 /* Complete config. */
2892 len = l2cap_parse_conf_req(sk, rsp);
2894 l2cap_send_disconn_req(conn, sk);
2898 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2899 l2cap_pi(sk)->num_conf_rsp++;
2901 /* Reset config buffer. */
2902 l2cap_pi(sk)->conf_len = 0;
2904 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
2907 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
2908 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
2909 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
2910 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
2912 sk->sk_state = BT_CONNECTED;
2914 l2cap_pi(sk)->next_tx_seq = 0;
2915 l2cap_pi(sk)->expected_tx_seq = 0;
2916 __skb_queue_head_init(TX_QUEUE(sk));
2917 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2918 l2cap_ertm_init(sk);
2920 l2cap_chan_ready(sk);
2924 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
2926 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2927 l2cap_build_conf_req(sk, buf), buf);
2928 l2cap_pi(sk)->num_conf_req++;
2936 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2938 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2939 u16 scid, flags, result;
2941 int len = cmd->len - sizeof(*rsp);
2943 scid = __le16_to_cpu(rsp->scid);
2944 flags = __le16_to_cpu(rsp->flags);
2945 result = __le16_to_cpu(rsp->result);
2947 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2948 scid, flags, result);
2950 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2955 case L2CAP_CONF_SUCCESS:
2956 l2cap_conf_rfc_get(sk, rsp->data, len);
2959 case L2CAP_CONF_UNACCEPT:
2960 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2963 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2964 l2cap_send_disconn_req(conn, sk);
2968 /* throw out any old stored conf requests */
2969 result = L2CAP_CONF_SUCCESS;
2970 len = l2cap_parse_conf_rsp(sk, rsp->data,
2973 l2cap_send_disconn_req(conn, sk);
2977 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2978 L2CAP_CONF_REQ, len, req);
2979 l2cap_pi(sk)->num_conf_req++;
2980 if (result != L2CAP_CONF_SUCCESS)
2986 sk->sk_state = BT_DISCONN;
2987 sk->sk_err = ECONNRESET;
2988 l2cap_sock_set_timer(sk, HZ * 5);
2989 l2cap_send_disconn_req(conn, sk);
2996 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
2998 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
2999 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
3000 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
3001 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
3003 sk->sk_state = BT_CONNECTED;
3004 l2cap_pi(sk)->next_tx_seq = 0;
3005 l2cap_pi(sk)->expected_tx_seq = 0;
3006 __skb_queue_head_init(TX_QUEUE(sk));
3007 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3008 l2cap_ertm_init(sk);
3010 l2cap_chan_ready(sk);
3018 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3020 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3021 struct l2cap_disconn_rsp rsp;
3025 scid = __le16_to_cpu(req->scid);
3026 dcid = __le16_to_cpu(req->dcid);
3028 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3030 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
3034 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3035 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3036 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3038 sk->sk_shutdown = SHUTDOWN_MASK;
3040 skb_queue_purge(TX_QUEUE(sk));
3042 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
3043 skb_queue_purge(SREJ_QUEUE(sk));
3044 del_timer(&l2cap_pi(sk)->retrans_timer);
3045 del_timer(&l2cap_pi(sk)->monitor_timer);
3046 del_timer(&l2cap_pi(sk)->ack_timer);
3049 l2cap_chan_del(sk, ECONNRESET);
3052 l2cap_sock_kill(sk);
3056 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3058 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3062 scid = __le16_to_cpu(rsp->scid);
3063 dcid = __le16_to_cpu(rsp->dcid);
3065 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3067 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3071 skb_queue_purge(TX_QUEUE(sk));
3073 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
3074 skb_queue_purge(SREJ_QUEUE(sk));
3075 del_timer(&l2cap_pi(sk)->retrans_timer);
3076 del_timer(&l2cap_pi(sk)->monitor_timer);
3077 del_timer(&l2cap_pi(sk)->ack_timer);
3080 l2cap_chan_del(sk, 0);
3083 l2cap_sock_kill(sk);
3087 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3089 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3092 type = __le16_to_cpu(req->type);
3094 BT_DBG("type 0x%4.4x", type);
3096 if (type == L2CAP_IT_FEAT_MASK) {
3098 u32 feat_mask = l2cap_feat_mask;
3099 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3100 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3101 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3103 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3105 put_unaligned_le32(feat_mask, rsp->data);
3106 l2cap_send_cmd(conn, cmd->ident,
3107 L2CAP_INFO_RSP, sizeof(buf), buf);
3108 } else if (type == L2CAP_IT_FIXED_CHAN) {
3110 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3111 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3112 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3113 memcpy(buf + 4, l2cap_fixed_chan, 8);
3114 l2cap_send_cmd(conn, cmd->ident,
3115 L2CAP_INFO_RSP, sizeof(buf), buf);
3117 struct l2cap_info_rsp rsp;
3118 rsp.type = cpu_to_le16(type);
3119 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3120 l2cap_send_cmd(conn, cmd->ident,
3121 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3127 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3129 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3132 type = __le16_to_cpu(rsp->type);
3133 result = __le16_to_cpu(rsp->result);
3135 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3137 del_timer(&conn->info_timer);
3139 if (type == L2CAP_IT_FEAT_MASK) {
3140 conn->feat_mask = get_unaligned_le32(rsp->data);
3142 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3143 struct l2cap_info_req req;
3144 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3146 conn->info_ident = l2cap_get_ident(conn);
3148 l2cap_send_cmd(conn, conn->info_ident,
3149 L2CAP_INFO_REQ, sizeof(req), &req);
3151 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3152 conn->info_ident = 0;
3154 l2cap_conn_start(conn);
3156 } else if (type == L2CAP_IT_FIXED_CHAN) {
3157 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3158 conn->info_ident = 0;
3160 l2cap_conn_start(conn);
3166 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
3168 u8 *data = skb->data;
3170 struct l2cap_cmd_hdr cmd;
3173 l2cap_raw_recv(conn, skb);
3175 while (len >= L2CAP_CMD_HDR_SIZE) {
3177 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3178 data += L2CAP_CMD_HDR_SIZE;
3179 len -= L2CAP_CMD_HDR_SIZE;
3181 cmd_len = le16_to_cpu(cmd.len);
3183 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3185 if (cmd_len > len || !cmd.ident) {
3186 BT_DBG("corrupted command");
3191 case L2CAP_COMMAND_REJ:
3192 l2cap_command_rej(conn, &cmd, data);
3195 case L2CAP_CONN_REQ:
3196 err = l2cap_connect_req(conn, &cmd, data);
3199 case L2CAP_CONN_RSP:
3200 err = l2cap_connect_rsp(conn, &cmd, data);
3203 case L2CAP_CONF_REQ:
3204 err = l2cap_config_req(conn, &cmd, cmd_len, data);
3207 case L2CAP_CONF_RSP:
3208 err = l2cap_config_rsp(conn, &cmd, data);
3211 case L2CAP_DISCONN_REQ:
3212 err = l2cap_disconnect_req(conn, &cmd, data);
3215 case L2CAP_DISCONN_RSP:
3216 err = l2cap_disconnect_rsp(conn, &cmd, data);
3219 case L2CAP_ECHO_REQ:
3220 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
3223 case L2CAP_ECHO_RSP:
3226 case L2CAP_INFO_REQ:
3227 err = l2cap_information_req(conn, &cmd, data);
3230 case L2CAP_INFO_RSP:
3231 err = l2cap_information_rsp(conn, &cmd, data);
3235 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
3241 struct l2cap_cmd_rej rej;
3242 BT_DBG("error %d", err);
3244 /* FIXME: Map err to a valid reason */
3245 rej.reason = cpu_to_le16(0);
3246 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3256 static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
3258 u16 our_fcs, rcv_fcs;
3259 int hdr_size = L2CAP_HDR_SIZE + 2;
3261 if (pi->fcs == L2CAP_FCS_CRC16) {
3262 skb_trim(skb, skb->len - 2);
3263 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3264 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3266 if (our_fcs != rcv_fcs)
3272 static inline void l2cap_send_i_or_rr_or_rnr(struct sock *sk)
3274 struct l2cap_pinfo *pi = l2cap_pi(sk);
3277 pi->frames_sent = 0;
3278 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
3280 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3282 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3283 control |= L2CAP_SUPER_RCV_NOT_READY | L2CAP_CTRL_FINAL;
3284 l2cap_send_sframe(pi, control);
3285 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
3288 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY && pi->unacked_frames > 0)
3289 __mod_retrans_timer();
3291 l2cap_ertm_send(sk);
3293 if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
3294 pi->frames_sent == 0) {
3295 control |= L2CAP_SUPER_RCV_READY;
3296 l2cap_send_sframe(pi, control);
3300 static void l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
3302 struct sk_buff *next_skb;
3304 bt_cb(skb)->tx_seq = tx_seq;
3305 bt_cb(skb)->sar = sar;
3307 next_skb = skb_peek(SREJ_QUEUE(sk));
3309 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3314 if (bt_cb(next_skb)->tx_seq > tx_seq) {
3315 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
3319 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
3322 } while((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
3324 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3327 static int l2cap_sar_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3329 struct l2cap_pinfo *pi = l2cap_pi(sk);
3330 struct sk_buff *_skb;
3333 switch (control & L2CAP_CTRL_SAR) {
3334 case L2CAP_SDU_UNSEGMENTED:
3335 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3340 err = sock_queue_rcv_skb(sk, skb);
3346 case L2CAP_SDU_START:
3347 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3352 pi->sdu_len = get_unaligned_le16(skb->data);
3355 if (pi->sdu_len > pi->imtu) {
3360 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3366 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3368 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3369 pi->partial_sdu_len = skb->len;
3373 case L2CAP_SDU_CONTINUE:
3374 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3377 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3379 pi->partial_sdu_len += skb->len;
3380 if (pi->partial_sdu_len > pi->sdu_len)
3388 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3391 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3393 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3394 pi->partial_sdu_len += skb->len;
3396 if (pi->partial_sdu_len > pi->imtu)
3399 if (pi->partial_sdu_len == pi->sdu_len) {
3400 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3401 err = sock_queue_rcv_skb(sk, _skb);
3416 static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
3418 struct sk_buff *skb;
3421 while((skb = skb_peek(SREJ_QUEUE(sk)))) {
3422 if (bt_cb(skb)->tx_seq != tx_seq)
3425 skb = skb_dequeue(SREJ_QUEUE(sk));
3426 control |= bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3427 l2cap_sar_reassembly_sdu(sk, skb, control);
3428 l2cap_pi(sk)->buffer_seq_srej =
3429 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
3434 static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
3436 struct l2cap_pinfo *pi = l2cap_pi(sk);
3437 struct srej_list *l, *tmp;
3440 list_for_each_entry_safe(l,tmp, SREJ_LIST(sk), list) {
3441 if (l->tx_seq == tx_seq) {
3446 control = L2CAP_SUPER_SELECT_REJECT;
3447 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3448 l2cap_send_sframe(pi, control);
3450 list_add_tail(&l->list, SREJ_LIST(sk));
3454 static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
3456 struct l2cap_pinfo *pi = l2cap_pi(sk);
3457 struct srej_list *new;
3460 while (tx_seq != pi->expected_tx_seq) {
3461 control = L2CAP_SUPER_SELECT_REJECT;
3462 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3463 l2cap_send_sframe(pi, control);
3465 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3466 new->tx_seq = pi->expected_tx_seq++;
3467 list_add_tail(&new->list, SREJ_LIST(sk));
3469 pi->expected_tx_seq++;
3472 static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3474 struct l2cap_pinfo *pi = l2cap_pi(sk);
3475 u8 tx_seq = __get_txseq(rx_control);
3476 u8 req_seq = __get_reqseq(rx_control);
3477 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3478 int num_to_ack = (pi->tx_win/6) + 1;
3481 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3483 if (L2CAP_CTRL_FINAL & rx_control) {
3484 del_timer(&pi->monitor_timer);
3485 if (pi->unacked_frames > 0)
3486 __mod_retrans_timer();
3487 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
3490 pi->expected_ack_seq = req_seq;
3491 l2cap_drop_acked_frames(sk);
3493 if (tx_seq == pi->expected_tx_seq)
3496 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3497 struct srej_list *first;
3499 first = list_first_entry(SREJ_LIST(sk),
3500 struct srej_list, list);
3501 if (tx_seq == first->tx_seq) {
3502 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3503 l2cap_check_srej_gap(sk, tx_seq);
3505 list_del(&first->list);
3508 if (list_empty(SREJ_LIST(sk))) {
3509 pi->buffer_seq = pi->buffer_seq_srej;
3510 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3514 struct srej_list *l;
3515 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3517 list_for_each_entry(l, SREJ_LIST(sk), list) {
3518 if (l->tx_seq == tx_seq) {
3519 l2cap_resend_srejframe(sk, tx_seq);
3523 l2cap_send_srejframe(sk, tx_seq);
3526 pi->conn_state |= L2CAP_CONN_SREJ_SENT;
3528 INIT_LIST_HEAD(SREJ_LIST(sk));
3529 pi->buffer_seq_srej = pi->buffer_seq;
3531 __skb_queue_head_init(SREJ_QUEUE(sk));
3532 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3534 pi->conn_state |= L2CAP_CONN_SEND_PBIT;
3536 l2cap_send_srejframe(sk, tx_seq);
3541 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3543 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3544 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3548 if (rx_control & L2CAP_CTRL_FINAL) {
3549 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3550 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3552 if (!skb_queue_empty(TX_QUEUE(sk)))
3553 sk->sk_send_head = TX_QUEUE(sk)->next;
3554 pi->next_tx_seq = pi->expected_ack_seq;
3555 l2cap_ertm_send(sk);
3559 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3561 err = l2cap_sar_reassembly_sdu(sk, skb, rx_control);
3567 pi->num_acked = (pi->num_acked + 1) % num_to_ack;
3568 if (pi->num_acked == num_to_ack - 1)
3574 static inline void l2cap_data_channel_rrframe(struct sock *sk, u16 rx_control)
3576 struct l2cap_pinfo *pi = l2cap_pi(sk);
3578 pi->expected_ack_seq = __get_reqseq(rx_control);
3579 l2cap_drop_acked_frames(sk);
3581 if (rx_control & L2CAP_CTRL_POLL) {
3582 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3583 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3584 (pi->unacked_frames > 0))
3585 __mod_retrans_timer();
3587 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3588 l2cap_send_srejtail(sk);
3590 l2cap_send_i_or_rr_or_rnr(sk);
3591 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3594 } else if (rx_control & L2CAP_CTRL_FINAL) {
3595 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3597 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3598 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3600 if (!skb_queue_empty(TX_QUEUE(sk)))
3601 sk->sk_send_head = TX_QUEUE(sk)->next;
3602 pi->next_tx_seq = pi->expected_ack_seq;
3603 l2cap_ertm_send(sk);
3607 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3608 (pi->unacked_frames > 0))
3609 __mod_retrans_timer();
3611 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3612 if (pi->conn_state & L2CAP_CONN_SREJ_SENT)
3615 l2cap_ertm_send(sk);
3619 static inline void l2cap_data_channel_rejframe(struct sock *sk, u16 rx_control)
3621 struct l2cap_pinfo *pi = l2cap_pi(sk);
3622 u8 tx_seq = __get_reqseq(rx_control);
3624 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3626 pi->expected_ack_seq = tx_seq;
3627 l2cap_drop_acked_frames(sk);
3629 if (rx_control & L2CAP_CTRL_FINAL) {
3630 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3631 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3633 if (!skb_queue_empty(TX_QUEUE(sk)))
3634 sk->sk_send_head = TX_QUEUE(sk)->next;
3635 pi->next_tx_seq = pi->expected_ack_seq;
3636 l2cap_ertm_send(sk);
3639 if (!skb_queue_empty(TX_QUEUE(sk)))
3640 sk->sk_send_head = TX_QUEUE(sk)->next;
3641 pi->next_tx_seq = pi->expected_ack_seq;
3642 l2cap_ertm_send(sk);
3644 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3645 pi->srej_save_reqseq = tx_seq;
3646 pi->conn_state |= L2CAP_CONN_REJ_ACT;
3650 static inline void l2cap_data_channel_srejframe(struct sock *sk, u16 rx_control)
3652 struct l2cap_pinfo *pi = l2cap_pi(sk);
3653 u8 tx_seq = __get_reqseq(rx_control);
3655 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3657 if (rx_control & L2CAP_CTRL_POLL) {
3658 pi->expected_ack_seq = tx_seq;
3659 l2cap_drop_acked_frames(sk);
3660 l2cap_retransmit_frame(sk, tx_seq);
3661 l2cap_ertm_send(sk);
3662 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3663 pi->srej_save_reqseq = tx_seq;
3664 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3666 } else if (rx_control & L2CAP_CTRL_FINAL) {
3667 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
3668 pi->srej_save_reqseq == tx_seq)
3669 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
3671 l2cap_retransmit_frame(sk, tx_seq);
3673 l2cap_retransmit_frame(sk, tx_seq);
3674 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3675 pi->srej_save_reqseq = tx_seq;
3676 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3681 static inline void l2cap_data_channel_rnrframe(struct sock *sk, u16 rx_control)
3683 struct l2cap_pinfo *pi = l2cap_pi(sk);
3684 u8 tx_seq = __get_reqseq(rx_control);
3686 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
3687 pi->expected_ack_seq = tx_seq;
3688 l2cap_drop_acked_frames(sk);
3690 if (!(pi->conn_state & L2CAP_CONN_SREJ_SENT)) {
3691 del_timer(&pi->retrans_timer);
3692 if (rx_control & L2CAP_CTRL_POLL) {
3693 u16 control = L2CAP_CTRL_FINAL;
3694 l2cap_send_rr_or_rnr(pi, control);
3699 if (rx_control & L2CAP_CTRL_POLL)
3700 l2cap_send_srejtail(sk);
3702 l2cap_send_sframe(pi, L2CAP_SUPER_RCV_READY);
3705 static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3707 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3709 if (L2CAP_CTRL_FINAL & rx_control) {
3710 del_timer(&l2cap_pi(sk)->monitor_timer);
3711 if (l2cap_pi(sk)->unacked_frames > 0)
3712 __mod_retrans_timer();
3713 l2cap_pi(sk)->conn_state &= ~L2CAP_CONN_WAIT_F;
3716 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
3717 case L2CAP_SUPER_RCV_READY:
3718 l2cap_data_channel_rrframe(sk, rx_control);
3721 case L2CAP_SUPER_REJECT:
3722 l2cap_data_channel_rejframe(sk, rx_control);
3725 case L2CAP_SUPER_SELECT_REJECT:
3726 l2cap_data_channel_srejframe(sk, rx_control);
3729 case L2CAP_SUPER_RCV_NOT_READY:
3730 l2cap_data_channel_rnrframe(sk, rx_control);
3738 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3741 struct l2cap_pinfo *pi;
3745 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
3747 BT_DBG("unknown cid 0x%4.4x", cid);
3753 BT_DBG("sk %p, len %d", sk, skb->len);
3755 if (sk->sk_state != BT_CONNECTED)
3759 case L2CAP_MODE_BASIC:
3760 /* If socket recv buffers overflows we drop data here
3761 * which is *bad* because L2CAP has to be reliable.
3762 * But we don't have any other choice. L2CAP doesn't
3763 * provide flow control mechanism. */
3765 if (pi->imtu < skb->len)
3768 if (!sock_queue_rcv_skb(sk, skb))
3772 case L2CAP_MODE_ERTM:
3773 control = get_unaligned_le16(skb->data);
3777 if (__is_sar_start(control))
3780 if (pi->fcs == L2CAP_FCS_CRC16)
3784 * We can just drop the corrupted I-frame here.
3785 * Receiver will miss it and start proper recovery
3786 * procedures and ask retransmission.
3791 if (l2cap_check_fcs(pi, skb))
3794 if (__is_iframe(control)) {
3798 l2cap_data_channel_iframe(sk, control, skb);
3803 l2cap_data_channel_sframe(sk, control, skb);
3808 case L2CAP_MODE_STREAMING:
3809 control = get_unaligned_le16(skb->data);
3813 if (__is_sar_start(control))
3816 if (pi->fcs == L2CAP_FCS_CRC16)
3819 if (len > pi->mps || len < 4 || __is_sframe(control))
3822 if (l2cap_check_fcs(pi, skb))
3825 tx_seq = __get_txseq(control);
3827 if (pi->expected_tx_seq == tx_seq)
3828 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3830 pi->expected_tx_seq = (tx_seq + 1) % 64;
3832 l2cap_sar_reassembly_sdu(sk, skb, control);
3837 BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode);
3851 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3855 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
3859 BT_DBG("sk %p, len %d", sk, skb->len);
3861 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
3864 if (l2cap_pi(sk)->imtu < skb->len)
3867 if (!sock_queue_rcv_skb(sk, skb))
3879 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
3881 struct l2cap_hdr *lh = (void *) skb->data;
3885 skb_pull(skb, L2CAP_HDR_SIZE);
3886 cid = __le16_to_cpu(lh->cid);
3887 len = __le16_to_cpu(lh->len);
3889 if (len != skb->len) {
3894 BT_DBG("len %d, cid 0x%4.4x", len, cid);
3897 case L2CAP_CID_SIGNALING:
3898 l2cap_sig_channel(conn, skb);
3901 case L2CAP_CID_CONN_LESS:
3902 psm = get_unaligned_le16(skb->data);
3904 l2cap_conless_channel(conn, psm, skb);
3908 l2cap_data_channel(conn, cid, skb);
3913 /* ---- L2CAP interface with lower layer (HCI) ---- */
3915 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3917 int exact = 0, lm1 = 0, lm2 = 0;
3918 register struct sock *sk;
3919 struct hlist_node *node;
3921 if (type != ACL_LINK)
3924 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
3926 /* Find listening sockets and check their link_mode */
3927 read_lock(&l2cap_sk_list.lock);
3928 sk_for_each(sk, node, &l2cap_sk_list.head) {
3929 if (sk->sk_state != BT_LISTEN)
3932 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
3933 lm1 |= HCI_LM_ACCEPT;
3934 if (l2cap_pi(sk)->role_switch)
3935 lm1 |= HCI_LM_MASTER;
3937 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
3938 lm2 |= HCI_LM_ACCEPT;
3939 if (l2cap_pi(sk)->role_switch)
3940 lm2 |= HCI_LM_MASTER;
3943 read_unlock(&l2cap_sk_list.lock);
3945 return exact ? lm1 : lm2;
3948 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
3950 struct l2cap_conn *conn;
3952 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
3954 if (hcon->type != ACL_LINK)
3958 conn = l2cap_conn_add(hcon, status);
3960 l2cap_conn_ready(conn);
3962 l2cap_conn_del(hcon, bt_err(status));
3967 static int l2cap_disconn_ind(struct hci_conn *hcon)
3969 struct l2cap_conn *conn = hcon->l2cap_data;
3971 BT_DBG("hcon %p", hcon);
3973 if (hcon->type != ACL_LINK || !conn)
3976 return conn->disc_reason;
3979 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
3981 BT_DBG("hcon %p reason %d", hcon, reason);
3983 if (hcon->type != ACL_LINK)
3986 l2cap_conn_del(hcon, bt_err(reason));
3991 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
3993 if (sk->sk_type != SOCK_SEQPACKET)
3996 if (encrypt == 0x00) {
3997 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
3998 l2cap_sock_clear_timer(sk);
3999 l2cap_sock_set_timer(sk, HZ * 5);
4000 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
4001 __l2cap_sock_close(sk, ECONNREFUSED);
4003 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
4004 l2cap_sock_clear_timer(sk);
4008 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4010 struct l2cap_chan_list *l;
4011 struct l2cap_conn *conn = hcon->l2cap_data;
4017 l = &conn->chan_list;
4019 BT_DBG("conn %p", conn);
4021 read_lock(&l->lock);
4023 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
4026 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
4031 if (!status && (sk->sk_state == BT_CONNECTED ||
4032 sk->sk_state == BT_CONFIG)) {
4033 l2cap_check_encryption(sk, encrypt);
4038 if (sk->sk_state == BT_CONNECT) {
4040 struct l2cap_conn_req req;
4041 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
4042 req.psm = l2cap_pi(sk)->psm;
4044 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
4046 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4047 L2CAP_CONN_REQ, sizeof(req), &req);
4049 l2cap_sock_clear_timer(sk);
4050 l2cap_sock_set_timer(sk, HZ / 10);
4052 } else if (sk->sk_state == BT_CONNECT2) {
4053 struct l2cap_conn_rsp rsp;
4057 sk->sk_state = BT_CONFIG;
4058 result = L2CAP_CR_SUCCESS;
4060 sk->sk_state = BT_DISCONN;
4061 l2cap_sock_set_timer(sk, HZ / 10);
4062 result = L2CAP_CR_SEC_BLOCK;
4065 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
4066 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
4067 rsp.result = cpu_to_le16(result);
4068 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4069 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4070 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
4076 read_unlock(&l->lock);
4081 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4083 struct l2cap_conn *conn = hcon->l2cap_data;
4085 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
4088 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4090 if (flags & ACL_START) {
4091 struct l2cap_hdr *hdr;
4095 BT_ERR("Unexpected start frame (len %d)", skb->len);
4096 kfree_skb(conn->rx_skb);
4097 conn->rx_skb = NULL;
4099 l2cap_conn_unreliable(conn, ECOMM);
4103 BT_ERR("Frame is too short (len %d)", skb->len);
4104 l2cap_conn_unreliable(conn, ECOMM);
4108 hdr = (struct l2cap_hdr *) skb->data;
4109 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4111 if (len == skb->len) {
4112 /* Complete frame received */
4113 l2cap_recv_frame(conn, skb);
4117 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4119 if (skb->len > len) {
4120 BT_ERR("Frame is too long (len %d, expected len %d)",
4122 l2cap_conn_unreliable(conn, ECOMM);
4126 /* Allocate skb for the complete frame (with header) */
4127 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4131 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4133 conn->rx_len = len - skb->len;
4135 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4137 if (!conn->rx_len) {
4138 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4139 l2cap_conn_unreliable(conn, ECOMM);
4143 if (skb->len > conn->rx_len) {
4144 BT_ERR("Fragment is too long (len %d, expected %d)",
4145 skb->len, conn->rx_len);
4146 kfree_skb(conn->rx_skb);
4147 conn->rx_skb = NULL;
4149 l2cap_conn_unreliable(conn, ECOMM);
4153 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4155 conn->rx_len -= skb->len;
4157 if (!conn->rx_len) {
4158 /* Complete frame received */
4159 l2cap_recv_frame(conn, conn->rx_skb);
4160 conn->rx_skb = NULL;
4169 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4172 struct hlist_node *node;
4174 read_lock_bh(&l2cap_sk_list.lock);
4176 sk_for_each(sk, node, &l2cap_sk_list.head) {
4177 struct l2cap_pinfo *pi = l2cap_pi(sk);
4179 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
4180 batostr(&bt_sk(sk)->src),
4181 batostr(&bt_sk(sk)->dst),
4182 sk->sk_state, __le16_to_cpu(pi->psm),
4184 pi->imtu, pi->omtu, pi->sec_level);
4187 read_unlock_bh(&l2cap_sk_list.lock);
4192 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4194 return single_open(file, l2cap_debugfs_show, inode->i_private);
4197 static const struct file_operations l2cap_debugfs_fops = {
4198 .open = l2cap_debugfs_open,
4200 .llseek = seq_lseek,
4201 .release = single_release,
4204 static struct dentry *l2cap_debugfs;
4206 static const struct proto_ops l2cap_sock_ops = {
4207 .family = PF_BLUETOOTH,
4208 .owner = THIS_MODULE,
4209 .release = l2cap_sock_release,
4210 .bind = l2cap_sock_bind,
4211 .connect = l2cap_sock_connect,
4212 .listen = l2cap_sock_listen,
4213 .accept = l2cap_sock_accept,
4214 .getname = l2cap_sock_getname,
4215 .sendmsg = l2cap_sock_sendmsg,
4216 .recvmsg = l2cap_sock_recvmsg,
4217 .poll = bt_sock_poll,
4218 .ioctl = bt_sock_ioctl,
4219 .mmap = sock_no_mmap,
4220 .socketpair = sock_no_socketpair,
4221 .shutdown = l2cap_sock_shutdown,
4222 .setsockopt = l2cap_sock_setsockopt,
4223 .getsockopt = l2cap_sock_getsockopt
4226 static const struct net_proto_family l2cap_sock_family_ops = {
4227 .family = PF_BLUETOOTH,
4228 .owner = THIS_MODULE,
4229 .create = l2cap_sock_create,
4232 static struct hci_proto l2cap_hci_proto = {
4234 .id = HCI_PROTO_L2CAP,
4235 .connect_ind = l2cap_connect_ind,
4236 .connect_cfm = l2cap_connect_cfm,
4237 .disconn_ind = l2cap_disconn_ind,
4238 .disconn_cfm = l2cap_disconn_cfm,
4239 .security_cfm = l2cap_security_cfm,
4240 .recv_acldata = l2cap_recv_acldata
4243 static int __init l2cap_init(void)
4247 err = proto_register(&l2cap_proto, 0);
4251 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
4253 BT_ERR("L2CAP socket registration failed");
4257 err = hci_register_proto(&l2cap_hci_proto);
4259 BT_ERR("L2CAP protocol registration failed");
4260 bt_sock_unregister(BTPROTO_L2CAP);
4265 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4266 bt_debugfs, NULL, &l2cap_debugfs_fops);
4268 BT_ERR("Failed to create L2CAP debug file");
4271 BT_INFO("L2CAP ver %s", VERSION);
4272 BT_INFO("L2CAP socket layer initialized");
4277 proto_unregister(&l2cap_proto);
4281 static void __exit l2cap_exit(void)
4283 debugfs_remove(l2cap_debugfs);
4285 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
4286 BT_ERR("L2CAP socket unregistration failed");
4288 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4289 BT_ERR("L2CAP protocol unregistration failed");
4291 proto_unregister(&l2cap_proto);
4294 void l2cap_load(void)
4296 /* Dummy function to trigger automatic L2CAP module loading by
4297 * other modules that use L2CAP sockets but don't use any other
4298 * symbols from it. */
4301 EXPORT_SYMBOL(l2cap_load);
4303 module_init(l2cap_init);
4304 module_exit(l2cap_exit);
4306 module_param(enable_ertm, bool, 0644);
4307 MODULE_PARM_DESC(enable_ertm, "Enable enhanced retransmission mode");
4309 module_param(max_transmit, uint, 0644);
4310 MODULE_PARM_DESC(max_transmit, "Max transmit value (default = 3)");
4312 module_param(tx_window, uint, 0644);
4313 MODULE_PARM_DESC(tx_window, "Transmission window size value (default = 63)");
4315 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4316 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
4317 MODULE_VERSION(VERSION);
4318 MODULE_LICENSE("GPL");
4319 MODULE_ALIAS("bt-proto-0");