2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <linux/debugfs.h>
44 #include <linux/seq_file.h>
45 #include <linux/uaccess.h>
46 #include <linux/crc16.h>
49 #include <asm/system.h>
50 #include <asm/unaligned.h>
52 #include <net/bluetooth/bluetooth.h>
53 #include <net/bluetooth/hci_core.h>
54 #include <net/bluetooth/l2cap.h>
56 #define VERSION "2.14"
58 static int enable_ertm = 0;
59 static int max_transmit = L2CAP_DEFAULT_MAX_TX;
60 static int tx_window = L2CAP_DEFAULT_TX_WINDOW;
62 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
63 static u8 l2cap_fixed_chan[8] = { 0x02, };
65 static const struct proto_ops l2cap_sock_ops;
67 static struct bt_sock_list l2cap_sk_list = {
68 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
71 static void __l2cap_sock_close(struct sock *sk, int reason);
72 static void l2cap_sock_close(struct sock *sk);
73 static void l2cap_sock_kill(struct sock *sk);
75 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
76 u8 code, u8 ident, u16 dlen, void *data);
78 /* ---- L2CAP timers ---- */
79 static void l2cap_sock_timeout(unsigned long arg)
81 struct sock *sk = (struct sock *) arg;
84 BT_DBG("sock %p state %d", sk, sk->sk_state);
88 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
89 reason = ECONNREFUSED;
90 else if (sk->sk_state == BT_CONNECT &&
91 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
92 reason = ECONNREFUSED;
96 __l2cap_sock_close(sk, reason);
104 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
106 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
107 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
110 static void l2cap_sock_clear_timer(struct sock *sk)
112 BT_DBG("sock %p state %d", sk, sk->sk_state);
113 sk_stop_timer(sk, &sk->sk_timer);
116 /* ---- L2CAP channels ---- */
117 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
120 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
121 if (l2cap_pi(s)->dcid == cid)
127 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
130 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
131 if (l2cap_pi(s)->scid == cid)
137 /* Find channel with given SCID.
138 * Returns locked socket */
139 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
143 s = __l2cap_get_chan_by_scid(l, cid);
146 read_unlock(&l->lock);
150 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
153 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
154 if (l2cap_pi(s)->ident == ident)
160 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
164 s = __l2cap_get_chan_by_ident(l, ident);
167 read_unlock(&l->lock);
171 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
173 u16 cid = L2CAP_CID_DYN_START;
175 for (; cid < L2CAP_CID_DYN_END; cid++) {
176 if (!__l2cap_get_chan_by_scid(l, cid))
183 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
188 l2cap_pi(l->head)->prev_c = sk;
190 l2cap_pi(sk)->next_c = l->head;
191 l2cap_pi(sk)->prev_c = NULL;
195 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
197 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
199 write_lock_bh(&l->lock);
204 l2cap_pi(next)->prev_c = prev;
206 l2cap_pi(prev)->next_c = next;
207 write_unlock_bh(&l->lock);
212 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
214 struct l2cap_chan_list *l = &conn->chan_list;
216 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
217 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
219 conn->disc_reason = 0x13;
221 l2cap_pi(sk)->conn = conn;
223 if (sk->sk_type == SOCK_SEQPACKET) {
224 /* Alloc CID for connection-oriented socket */
225 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
226 } else if (sk->sk_type == SOCK_DGRAM) {
227 /* Connectionless socket */
228 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
229 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
230 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
232 /* Raw socket can send/recv signalling messages only */
233 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
234 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
235 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
238 __l2cap_chan_link(l, sk);
241 bt_accept_enqueue(parent, sk);
245 * Must be called on the locked socket. */
246 static void l2cap_chan_del(struct sock *sk, int err)
248 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
249 struct sock *parent = bt_sk(sk)->parent;
251 l2cap_sock_clear_timer(sk);
253 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
256 /* Unlink from channel list */
257 l2cap_chan_unlink(&conn->chan_list, sk);
258 l2cap_pi(sk)->conn = NULL;
259 hci_conn_put(conn->hcon);
262 sk->sk_state = BT_CLOSED;
263 sock_set_flag(sk, SOCK_ZAPPED);
269 bt_accept_unlink(sk);
270 parent->sk_data_ready(parent, 0);
272 sk->sk_state_change(sk);
275 /* Service level security */
276 static inline int l2cap_check_security(struct sock *sk)
278 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
281 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
282 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
283 auth_type = HCI_AT_NO_BONDING_MITM;
285 auth_type = HCI_AT_NO_BONDING;
287 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
288 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
290 switch (l2cap_pi(sk)->sec_level) {
291 case BT_SECURITY_HIGH:
292 auth_type = HCI_AT_GENERAL_BONDING_MITM;
294 case BT_SECURITY_MEDIUM:
295 auth_type = HCI_AT_GENERAL_BONDING;
298 auth_type = HCI_AT_NO_BONDING;
303 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
307 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
311 /* Get next available identificator.
312 * 1 - 128 are used by kernel.
313 * 129 - 199 are reserved.
314 * 200 - 254 are used by utilities like l2ping, etc.
317 spin_lock_bh(&conn->lock);
319 if (++conn->tx_ident > 128)
324 spin_unlock_bh(&conn->lock);
329 static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
331 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
333 BT_DBG("code 0x%2.2x", code);
338 return hci_send_acl(conn->hcon, skb, 0);
341 static inline int l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
344 struct l2cap_hdr *lh;
345 struct l2cap_conn *conn = pi->conn;
346 int count, hlen = L2CAP_HDR_SIZE + 2;
348 if (pi->fcs == L2CAP_FCS_CRC16)
351 BT_DBG("pi %p, control 0x%2.2x", pi, control);
353 count = min_t(unsigned int, conn->mtu, hlen);
354 control |= L2CAP_CTRL_FRAME_TYPE;
356 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
357 control |= L2CAP_CTRL_FINAL;
358 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
361 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
362 control |= L2CAP_CTRL_POLL;
363 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
366 skb = bt_skb_alloc(count, GFP_ATOMIC);
370 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
371 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
372 lh->cid = cpu_to_le16(pi->dcid);
373 put_unaligned_le16(control, skb_put(skb, 2));
375 if (pi->fcs == L2CAP_FCS_CRC16) {
376 u16 fcs = crc16(0, (u8 *)lh, count - 2);
377 put_unaligned_le16(fcs, skb_put(skb, 2));
380 return hci_send_acl(pi->conn->hcon, skb, 0);
383 static inline int l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
385 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY)
386 control |= L2CAP_SUPER_RCV_NOT_READY;
388 control |= L2CAP_SUPER_RCV_READY;
390 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
392 return l2cap_send_sframe(pi, control);
395 static void l2cap_do_start(struct sock *sk)
397 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
399 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
400 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
403 if (l2cap_check_security(sk)) {
404 struct l2cap_conn_req req;
405 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
406 req.psm = l2cap_pi(sk)->psm;
408 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
410 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
411 L2CAP_CONN_REQ, sizeof(req), &req);
414 struct l2cap_info_req req;
415 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
417 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
418 conn->info_ident = l2cap_get_ident(conn);
420 mod_timer(&conn->info_timer, jiffies +
421 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
423 l2cap_send_cmd(conn, conn->info_ident,
424 L2CAP_INFO_REQ, sizeof(req), &req);
428 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk)
430 struct l2cap_disconn_req req;
432 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
433 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
434 l2cap_send_cmd(conn, l2cap_get_ident(conn),
435 L2CAP_DISCONN_REQ, sizeof(req), &req);
438 /* ---- L2CAP connections ---- */
439 static void l2cap_conn_start(struct l2cap_conn *conn)
441 struct l2cap_chan_list *l = &conn->chan_list;
444 BT_DBG("conn %p", conn);
448 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
451 if (sk->sk_type != SOCK_SEQPACKET) {
456 if (sk->sk_state == BT_CONNECT) {
457 if (l2cap_check_security(sk)) {
458 struct l2cap_conn_req req;
459 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
460 req.psm = l2cap_pi(sk)->psm;
462 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
464 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
465 L2CAP_CONN_REQ, sizeof(req), &req);
467 } else if (sk->sk_state == BT_CONNECT2) {
468 struct l2cap_conn_rsp rsp;
469 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
470 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
472 if (l2cap_check_security(sk)) {
473 if (bt_sk(sk)->defer_setup) {
474 struct sock *parent = bt_sk(sk)->parent;
475 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
476 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
477 parent->sk_data_ready(parent, 0);
480 sk->sk_state = BT_CONFIG;
481 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
482 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
485 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
486 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
489 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
490 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
496 read_unlock(&l->lock);
499 static void l2cap_conn_ready(struct l2cap_conn *conn)
501 struct l2cap_chan_list *l = &conn->chan_list;
504 BT_DBG("conn %p", conn);
508 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
511 if (sk->sk_type != SOCK_SEQPACKET) {
512 l2cap_sock_clear_timer(sk);
513 sk->sk_state = BT_CONNECTED;
514 sk->sk_state_change(sk);
515 } else if (sk->sk_state == BT_CONNECT)
521 read_unlock(&l->lock);
524 /* Notify sockets that we cannot guaranty reliability anymore */
525 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
527 struct l2cap_chan_list *l = &conn->chan_list;
530 BT_DBG("conn %p", conn);
534 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
535 if (l2cap_pi(sk)->force_reliable)
539 read_unlock(&l->lock);
542 static void l2cap_info_timeout(unsigned long arg)
544 struct l2cap_conn *conn = (void *) arg;
546 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
547 conn->info_ident = 0;
549 l2cap_conn_start(conn);
552 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
554 struct l2cap_conn *conn = hcon->l2cap_data;
559 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
563 hcon->l2cap_data = conn;
566 BT_DBG("hcon %p conn %p", hcon, conn);
568 conn->mtu = hcon->hdev->acl_mtu;
569 conn->src = &hcon->hdev->bdaddr;
570 conn->dst = &hcon->dst;
574 spin_lock_init(&conn->lock);
575 rwlock_init(&conn->chan_list.lock);
577 setup_timer(&conn->info_timer, l2cap_info_timeout,
578 (unsigned long) conn);
580 conn->disc_reason = 0x13;
585 static void l2cap_conn_del(struct hci_conn *hcon, int err)
587 struct l2cap_conn *conn = hcon->l2cap_data;
593 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
595 kfree_skb(conn->rx_skb);
598 while ((sk = conn->chan_list.head)) {
600 l2cap_chan_del(sk, err);
605 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
606 del_timer_sync(&conn->info_timer);
608 hcon->l2cap_data = NULL;
612 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
614 struct l2cap_chan_list *l = &conn->chan_list;
615 write_lock_bh(&l->lock);
616 __l2cap_chan_add(conn, sk, parent);
617 write_unlock_bh(&l->lock);
620 /* ---- Socket interface ---- */
621 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
624 struct hlist_node *node;
625 sk_for_each(sk, node, &l2cap_sk_list.head)
626 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
633 /* Find socket with psm and source bdaddr.
634 * Returns closest match.
636 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
638 struct sock *sk = NULL, *sk1 = NULL;
639 struct hlist_node *node;
641 sk_for_each(sk, node, &l2cap_sk_list.head) {
642 if (state && sk->sk_state != state)
645 if (l2cap_pi(sk)->psm == psm) {
647 if (!bacmp(&bt_sk(sk)->src, src))
651 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
655 return node ? sk : sk1;
658 /* Find socket with given address (psm, src).
659 * Returns locked socket */
660 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
663 read_lock(&l2cap_sk_list.lock);
664 s = __l2cap_get_sock_by_psm(state, psm, src);
667 read_unlock(&l2cap_sk_list.lock);
671 static void l2cap_sock_destruct(struct sock *sk)
675 skb_queue_purge(&sk->sk_receive_queue);
676 skb_queue_purge(&sk->sk_write_queue);
679 static void l2cap_sock_cleanup_listen(struct sock *parent)
683 BT_DBG("parent %p", parent);
685 /* Close not yet accepted channels */
686 while ((sk = bt_accept_dequeue(parent, NULL)))
687 l2cap_sock_close(sk);
689 parent->sk_state = BT_CLOSED;
690 sock_set_flag(parent, SOCK_ZAPPED);
693 /* Kill socket (only if zapped and orphan)
694 * Must be called on unlocked socket.
696 static void l2cap_sock_kill(struct sock *sk)
698 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
701 BT_DBG("sk %p state %d", sk, sk->sk_state);
703 /* Kill poor orphan */
704 bt_sock_unlink(&l2cap_sk_list, sk);
705 sock_set_flag(sk, SOCK_DEAD);
709 static void __l2cap_sock_close(struct sock *sk, int reason)
711 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
713 switch (sk->sk_state) {
715 l2cap_sock_cleanup_listen(sk);
720 if (sk->sk_type == SOCK_SEQPACKET) {
721 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
723 sk->sk_state = BT_DISCONN;
724 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
725 l2cap_send_disconn_req(conn, sk);
727 l2cap_chan_del(sk, reason);
731 if (sk->sk_type == SOCK_SEQPACKET) {
732 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
733 struct l2cap_conn_rsp rsp;
736 if (bt_sk(sk)->defer_setup)
737 result = L2CAP_CR_SEC_BLOCK;
739 result = L2CAP_CR_BAD_PSM;
741 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
742 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
743 rsp.result = cpu_to_le16(result);
744 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
745 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
746 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
748 l2cap_chan_del(sk, reason);
753 l2cap_chan_del(sk, reason);
757 sock_set_flag(sk, SOCK_ZAPPED);
762 /* Must be called on unlocked socket. */
763 static void l2cap_sock_close(struct sock *sk)
765 l2cap_sock_clear_timer(sk);
767 __l2cap_sock_close(sk, ECONNRESET);
772 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
774 struct l2cap_pinfo *pi = l2cap_pi(sk);
779 sk->sk_type = parent->sk_type;
780 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
782 pi->imtu = l2cap_pi(parent)->imtu;
783 pi->omtu = l2cap_pi(parent)->omtu;
784 pi->mode = l2cap_pi(parent)->mode;
785 pi->fcs = l2cap_pi(parent)->fcs;
786 pi->tx_win = l2cap_pi(parent)->tx_win;
787 pi->sec_level = l2cap_pi(parent)->sec_level;
788 pi->role_switch = l2cap_pi(parent)->role_switch;
789 pi->force_reliable = l2cap_pi(parent)->force_reliable;
791 pi->imtu = L2CAP_DEFAULT_MTU;
793 pi->mode = L2CAP_MODE_BASIC;
794 pi->fcs = L2CAP_FCS_CRC16;
795 pi->tx_win = tx_window;
796 pi->sec_level = BT_SECURITY_LOW;
798 pi->force_reliable = 0;
801 /* Default config options */
803 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
804 skb_queue_head_init(TX_QUEUE(sk));
805 skb_queue_head_init(SREJ_QUEUE(sk));
806 INIT_LIST_HEAD(SREJ_LIST(sk));
809 static struct proto l2cap_proto = {
811 .owner = THIS_MODULE,
812 .obj_size = sizeof(struct l2cap_pinfo)
815 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
819 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
823 sock_init_data(sock, sk);
824 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
826 sk->sk_destruct = l2cap_sock_destruct;
827 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
829 sock_reset_flag(sk, SOCK_ZAPPED);
831 sk->sk_protocol = proto;
832 sk->sk_state = BT_OPEN;
834 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
836 bt_sock_link(&l2cap_sk_list, sk);
840 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
845 BT_DBG("sock %p", sock);
847 sock->state = SS_UNCONNECTED;
849 if (sock->type != SOCK_SEQPACKET &&
850 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
851 return -ESOCKTNOSUPPORT;
853 if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
856 sock->ops = &l2cap_sock_ops;
858 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
862 l2cap_sock_init(sk, NULL);
866 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
868 struct sock *sk = sock->sk;
869 struct sockaddr_l2 la;
874 if (!addr || addr->sa_family != AF_BLUETOOTH)
877 memset(&la, 0, sizeof(la));
878 len = min_t(unsigned int, sizeof(la), alen);
879 memcpy(&la, addr, len);
886 if (sk->sk_state != BT_OPEN) {
891 if (la.l2_psm && __le16_to_cpu(la.l2_psm) < 0x1001 &&
892 !capable(CAP_NET_BIND_SERVICE)) {
897 write_lock_bh(&l2cap_sk_list.lock);
899 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
902 /* Save source address */
903 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
904 l2cap_pi(sk)->psm = la.l2_psm;
905 l2cap_pi(sk)->sport = la.l2_psm;
906 sk->sk_state = BT_BOUND;
908 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
909 __le16_to_cpu(la.l2_psm) == 0x0003)
910 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
913 write_unlock_bh(&l2cap_sk_list.lock);
920 static int l2cap_do_connect(struct sock *sk)
922 bdaddr_t *src = &bt_sk(sk)->src;
923 bdaddr_t *dst = &bt_sk(sk)->dst;
924 struct l2cap_conn *conn;
925 struct hci_conn *hcon;
926 struct hci_dev *hdev;
930 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
933 hdev = hci_get_route(dst, src);
935 return -EHOSTUNREACH;
937 hci_dev_lock_bh(hdev);
941 if (sk->sk_type == SOCK_RAW) {
942 switch (l2cap_pi(sk)->sec_level) {
943 case BT_SECURITY_HIGH:
944 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
946 case BT_SECURITY_MEDIUM:
947 auth_type = HCI_AT_DEDICATED_BONDING;
950 auth_type = HCI_AT_NO_BONDING;
953 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
954 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
955 auth_type = HCI_AT_NO_BONDING_MITM;
957 auth_type = HCI_AT_NO_BONDING;
959 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
960 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
962 switch (l2cap_pi(sk)->sec_level) {
963 case BT_SECURITY_HIGH:
964 auth_type = HCI_AT_GENERAL_BONDING_MITM;
966 case BT_SECURITY_MEDIUM:
967 auth_type = HCI_AT_GENERAL_BONDING;
970 auth_type = HCI_AT_NO_BONDING;
975 hcon = hci_connect(hdev, ACL_LINK, dst,
976 l2cap_pi(sk)->sec_level, auth_type);
980 conn = l2cap_conn_add(hcon, 0);
988 /* Update source addr of the socket */
989 bacpy(src, conn->src);
991 l2cap_chan_add(conn, sk, NULL);
993 sk->sk_state = BT_CONNECT;
994 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
996 if (hcon->state == BT_CONNECTED) {
997 if (sk->sk_type != SOCK_SEQPACKET) {
998 l2cap_sock_clear_timer(sk);
999 sk->sk_state = BT_CONNECTED;
1005 hci_dev_unlock_bh(hdev);
1010 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
1012 struct sock *sk = sock->sk;
1013 struct sockaddr_l2 la;
1016 BT_DBG("sk %p", sk);
1018 if (!addr || alen < sizeof(addr->sa_family) ||
1019 addr->sa_family != AF_BLUETOOTH)
1022 memset(&la, 0, sizeof(la));
1023 len = min_t(unsigned int, sizeof(la), alen);
1024 memcpy(&la, addr, len);
1031 if (sk->sk_type == SOCK_SEQPACKET && !la.l2_psm) {
1036 switch (l2cap_pi(sk)->mode) {
1037 case L2CAP_MODE_BASIC:
1039 case L2CAP_MODE_ERTM:
1040 case L2CAP_MODE_STREAMING:
1049 switch (sk->sk_state) {
1053 /* Already connecting */
1057 /* Already connected */
1070 /* Set destination address and psm */
1071 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
1072 l2cap_pi(sk)->psm = la.l2_psm;
1074 err = l2cap_do_connect(sk);
1079 err = bt_sock_wait_state(sk, BT_CONNECTED,
1080 sock_sndtimeo(sk, flags & O_NONBLOCK));
1086 static int l2cap_sock_listen(struct socket *sock, int backlog)
1088 struct sock *sk = sock->sk;
1091 BT_DBG("sk %p backlog %d", sk, backlog);
1095 if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
1100 switch (l2cap_pi(sk)->mode) {
1101 case L2CAP_MODE_BASIC:
1103 case L2CAP_MODE_ERTM:
1104 case L2CAP_MODE_STREAMING:
1113 if (!l2cap_pi(sk)->psm) {
1114 bdaddr_t *src = &bt_sk(sk)->src;
1119 write_lock_bh(&l2cap_sk_list.lock);
1121 for (psm = 0x1001; psm < 0x1100; psm += 2)
1122 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
1123 l2cap_pi(sk)->psm = cpu_to_le16(psm);
1124 l2cap_pi(sk)->sport = cpu_to_le16(psm);
1129 write_unlock_bh(&l2cap_sk_list.lock);
1135 sk->sk_max_ack_backlog = backlog;
1136 sk->sk_ack_backlog = 0;
1137 sk->sk_state = BT_LISTEN;
1144 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
1146 DECLARE_WAITQUEUE(wait, current);
1147 struct sock *sk = sock->sk, *nsk;
1151 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1153 if (sk->sk_state != BT_LISTEN) {
1158 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1160 BT_DBG("sk %p timeo %ld", sk, timeo);
1162 /* Wait for an incoming connection. (wake-one). */
1163 add_wait_queue_exclusive(sk_sleep(sk), &wait);
1164 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1165 set_current_state(TASK_INTERRUPTIBLE);
1172 timeo = schedule_timeout(timeo);
1173 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1175 if (sk->sk_state != BT_LISTEN) {
1180 if (signal_pending(current)) {
1181 err = sock_intr_errno(timeo);
1185 set_current_state(TASK_RUNNING);
1186 remove_wait_queue(sk_sleep(sk), &wait);
1191 newsock->state = SS_CONNECTED;
1193 BT_DBG("new socket %p", nsk);
1200 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1202 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1203 struct sock *sk = sock->sk;
1205 BT_DBG("sock %p, sk %p", sock, sk);
1207 addr->sa_family = AF_BLUETOOTH;
1208 *len = sizeof(struct sockaddr_l2);
1211 la->l2_psm = l2cap_pi(sk)->psm;
1212 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1213 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1215 la->l2_psm = l2cap_pi(sk)->sport;
1216 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1217 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
1223 static void l2cap_monitor_timeout(unsigned long arg)
1225 struct sock *sk = (void *) arg;
1229 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
1230 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk);
1235 l2cap_pi(sk)->retry_count++;
1236 __mod_monitor_timer();
1238 control = L2CAP_CTRL_POLL;
1239 l2cap_send_rr_or_rnr(l2cap_pi(sk), control);
1243 static void l2cap_retrans_timeout(unsigned long arg)
1245 struct sock *sk = (void *) arg;
1249 l2cap_pi(sk)->retry_count = 1;
1250 __mod_monitor_timer();
1252 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
1254 control = L2CAP_CTRL_POLL;
1255 l2cap_send_rr_or_rnr(l2cap_pi(sk), control);
1259 static void l2cap_drop_acked_frames(struct sock *sk)
1261 struct sk_buff *skb;
1263 while ((skb = skb_peek(TX_QUEUE(sk)))) {
1264 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
1267 skb = skb_dequeue(TX_QUEUE(sk));
1270 l2cap_pi(sk)->unacked_frames--;
1273 if (!l2cap_pi(sk)->unacked_frames)
1274 del_timer(&l2cap_pi(sk)->retrans_timer);
1279 static inline int l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1281 struct l2cap_pinfo *pi = l2cap_pi(sk);
1284 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1286 err = hci_send_acl(pi->conn->hcon, skb, 0);
1293 static int l2cap_streaming_send(struct sock *sk)
1295 struct sk_buff *skb, *tx_skb;
1296 struct l2cap_pinfo *pi = l2cap_pi(sk);
1300 while ((skb = sk->sk_send_head)) {
1301 tx_skb = skb_clone(skb, GFP_ATOMIC);
1303 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1304 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1305 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1307 if (pi->fcs == L2CAP_FCS_CRC16) {
1308 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1309 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1312 err = l2cap_do_send(sk, tx_skb);
1314 l2cap_send_disconn_req(pi->conn, sk);
1318 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1320 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1321 sk->sk_send_head = NULL;
1323 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1325 skb = skb_dequeue(TX_QUEUE(sk));
1331 static int l2cap_retransmit_frame(struct sock *sk, u8 tx_seq)
1333 struct l2cap_pinfo *pi = l2cap_pi(sk);
1334 struct sk_buff *skb, *tx_skb;
1338 skb = skb_peek(TX_QUEUE(sk));
1340 if (bt_cb(skb)->tx_seq != tx_seq) {
1341 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1343 skb = skb_queue_next(TX_QUEUE(sk), skb);
1347 if (pi->remote_max_tx &&
1348 bt_cb(skb)->retries == pi->remote_max_tx) {
1349 l2cap_send_disconn_req(pi->conn, sk);
1353 tx_skb = skb_clone(skb, GFP_ATOMIC);
1354 bt_cb(skb)->retries++;
1355 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1356 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1357 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1358 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1360 if (pi->fcs == L2CAP_FCS_CRC16) {
1361 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1362 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1365 err = l2cap_do_send(sk, tx_skb);
1367 l2cap_send_disconn_req(pi->conn, sk);
1375 static int l2cap_ertm_send(struct sock *sk)
1377 struct sk_buff *skb, *tx_skb;
1378 struct l2cap_pinfo *pi = l2cap_pi(sk);
1382 if (pi->conn_state & L2CAP_CONN_WAIT_F)
1385 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk)) &&
1386 !(pi->conn_state & L2CAP_CONN_REMOTE_BUSY)) {
1388 if (pi->remote_max_tx &&
1389 bt_cb(skb)->retries == pi->remote_max_tx) {
1390 l2cap_send_disconn_req(pi->conn, sk);
1394 tx_skb = skb_clone(skb, GFP_ATOMIC);
1396 bt_cb(skb)->retries++;
1398 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1399 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1400 control |= L2CAP_CTRL_FINAL;
1401 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1403 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1404 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1405 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1408 if (pi->fcs == L2CAP_FCS_CRC16) {
1409 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1410 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1413 err = l2cap_do_send(sk, tx_skb);
1415 l2cap_send_disconn_req(pi->conn, sk);
1418 __mod_retrans_timer();
1420 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1421 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1423 pi->unacked_frames++;
1426 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1427 sk->sk_send_head = NULL;
1429 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1437 static int l2cap_send_ack(struct l2cap_pinfo *pi)
1439 struct sock *sk = (struct sock *)pi;
1442 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1444 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1445 control |= L2CAP_SUPER_RCV_NOT_READY;
1446 return l2cap_send_sframe(pi, control);
1447 } else if (l2cap_ertm_send(sk) == 0) {
1448 control |= L2CAP_SUPER_RCV_READY;
1449 return l2cap_send_sframe(pi, control);
1454 static int l2cap_send_srejtail(struct sock *sk)
1456 struct srej_list *tail;
1459 control = L2CAP_SUPER_SELECT_REJECT;
1460 control |= L2CAP_CTRL_FINAL;
1462 tail = list_entry(SREJ_LIST(sk)->prev, struct srej_list, list);
1463 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1465 l2cap_send_sframe(l2cap_pi(sk), control);
1470 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1472 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1473 struct sk_buff **frag;
1476 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
1483 /* Continuation fragments (no L2CAP header) */
1484 frag = &skb_shinfo(skb)->frag_list;
1486 count = min_t(unsigned int, conn->mtu, len);
1488 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1491 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1497 frag = &(*frag)->next;
1503 static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1505 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1506 struct sk_buff *skb;
1507 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1508 struct l2cap_hdr *lh;
1510 BT_DBG("sk %p len %d", sk, (int)len);
1512 count = min_t(unsigned int, (conn->mtu - hlen), len);
1513 skb = bt_skb_send_alloc(sk, count + hlen,
1514 msg->msg_flags & MSG_DONTWAIT, &err);
1516 return ERR_PTR(-ENOMEM);
1518 /* Create L2CAP header */
1519 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1520 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1521 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1522 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1524 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1525 if (unlikely(err < 0)) {
1527 return ERR_PTR(err);
1532 static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1534 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1535 struct sk_buff *skb;
1536 int err, count, hlen = L2CAP_HDR_SIZE;
1537 struct l2cap_hdr *lh;
1539 BT_DBG("sk %p len %d", sk, (int)len);
1541 count = min_t(unsigned int, (conn->mtu - hlen), len);
1542 skb = bt_skb_send_alloc(sk, count + hlen,
1543 msg->msg_flags & MSG_DONTWAIT, &err);
1545 return ERR_PTR(-ENOMEM);
1547 /* Create L2CAP header */
1548 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1549 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1550 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1552 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1553 if (unlikely(err < 0)) {
1555 return ERR_PTR(err);
1560 static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1562 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1563 struct sk_buff *skb;
1564 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1565 struct l2cap_hdr *lh;
1567 BT_DBG("sk %p len %d", sk, (int)len);
1572 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1575 count = min_t(unsigned int, (conn->mtu - hlen), len);
1576 skb = bt_skb_send_alloc(sk, count + hlen,
1577 msg->msg_flags & MSG_DONTWAIT, &err);
1579 return ERR_PTR(-ENOMEM);
1581 /* Create L2CAP header */
1582 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1583 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1584 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1585 put_unaligned_le16(control, skb_put(skb, 2));
1587 put_unaligned_le16(sdulen, skb_put(skb, 2));
1589 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1590 if (unlikely(err < 0)) {
1592 return ERR_PTR(err);
1595 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1596 put_unaligned_le16(0, skb_put(skb, 2));
1598 bt_cb(skb)->retries = 0;
1602 static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1604 struct l2cap_pinfo *pi = l2cap_pi(sk);
1605 struct sk_buff *skb;
1606 struct sk_buff_head sar_queue;
1610 __skb_queue_head_init(&sar_queue);
1611 control = L2CAP_SDU_START;
1612 skb = l2cap_create_iframe_pdu(sk, msg, pi->remote_mps, control, len);
1614 return PTR_ERR(skb);
1616 __skb_queue_tail(&sar_queue, skb);
1617 len -= pi->remote_mps;
1618 size += pi->remote_mps;
1624 if (len > pi->remote_mps) {
1625 control |= L2CAP_SDU_CONTINUE;
1626 buflen = pi->remote_mps;
1628 control |= L2CAP_SDU_END;
1632 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1634 skb_queue_purge(&sar_queue);
1635 return PTR_ERR(skb);
1638 __skb_queue_tail(&sar_queue, skb);
1643 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1644 if (sk->sk_send_head == NULL)
1645 sk->sk_send_head = sar_queue.next;
1650 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1652 struct sock *sk = sock->sk;
1653 struct l2cap_pinfo *pi = l2cap_pi(sk);
1654 struct sk_buff *skb;
1658 BT_DBG("sock %p, sk %p", sock, sk);
1660 err = sock_error(sk);
1664 if (msg->msg_flags & MSG_OOB)
1669 if (sk->sk_state != BT_CONNECTED) {
1674 /* Connectionless channel */
1675 if (sk->sk_type == SOCK_DGRAM) {
1676 skb = l2cap_create_connless_pdu(sk, msg, len);
1680 err = l2cap_do_send(sk, skb);
1685 case L2CAP_MODE_BASIC:
1686 /* Check outgoing MTU */
1687 if (len > pi->omtu) {
1692 /* Create a basic PDU */
1693 skb = l2cap_create_basic_pdu(sk, msg, len);
1699 err = l2cap_do_send(sk, skb);
1704 case L2CAP_MODE_ERTM:
1705 case L2CAP_MODE_STREAMING:
1706 /* Entire SDU fits into one PDU */
1707 if (len <= pi->remote_mps) {
1708 control = L2CAP_SDU_UNSEGMENTED;
1709 skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
1714 __skb_queue_tail(TX_QUEUE(sk), skb);
1715 if (sk->sk_send_head == NULL)
1716 sk->sk_send_head = skb;
1718 /* Segment SDU into multiples PDUs */
1719 err = l2cap_sar_segment_sdu(sk, msg, len);
1724 if (pi->mode == L2CAP_MODE_STREAMING)
1725 err = l2cap_streaming_send(sk);
1727 err = l2cap_ertm_send(sk);
1734 BT_DBG("bad state %1.1x", pi->mode);
1743 static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1745 struct sock *sk = sock->sk;
1749 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1750 struct l2cap_conn_rsp rsp;
1752 sk->sk_state = BT_CONFIG;
1754 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1755 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1756 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1757 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1758 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1759 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1767 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1770 static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
1772 struct sock *sk = sock->sk;
1773 struct l2cap_options opts;
1777 BT_DBG("sk %p", sk);
1783 opts.imtu = l2cap_pi(sk)->imtu;
1784 opts.omtu = l2cap_pi(sk)->omtu;
1785 opts.flush_to = l2cap_pi(sk)->flush_to;
1786 opts.mode = l2cap_pi(sk)->mode;
1787 opts.fcs = l2cap_pi(sk)->fcs;
1788 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
1790 len = min_t(unsigned int, sizeof(opts), optlen);
1791 if (copy_from_user((char *) &opts, optval, len)) {
1796 l2cap_pi(sk)->imtu = opts.imtu;
1797 l2cap_pi(sk)->omtu = opts.omtu;
1798 l2cap_pi(sk)->mode = opts.mode;
1799 l2cap_pi(sk)->fcs = opts.fcs;
1800 l2cap_pi(sk)->tx_win = (__u8)opts.txwin_size;
1804 if (get_user(opt, (u32 __user *) optval)) {
1809 if (opt & L2CAP_LM_AUTH)
1810 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
1811 if (opt & L2CAP_LM_ENCRYPT)
1812 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
1813 if (opt & L2CAP_LM_SECURE)
1814 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
1816 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
1817 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
1829 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
1831 struct sock *sk = sock->sk;
1832 struct bt_security sec;
1836 BT_DBG("sk %p", sk);
1838 if (level == SOL_L2CAP)
1839 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
1841 if (level != SOL_BLUETOOTH)
1842 return -ENOPROTOOPT;
1848 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_RAW) {
1853 sec.level = BT_SECURITY_LOW;
1855 len = min_t(unsigned int, sizeof(sec), optlen);
1856 if (copy_from_user((char *) &sec, optval, len)) {
1861 if (sec.level < BT_SECURITY_LOW ||
1862 sec.level > BT_SECURITY_HIGH) {
1867 l2cap_pi(sk)->sec_level = sec.level;
1870 case BT_DEFER_SETUP:
1871 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1876 if (get_user(opt, (u32 __user *) optval)) {
1881 bt_sk(sk)->defer_setup = opt;
1893 static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
1895 struct sock *sk = sock->sk;
1896 struct l2cap_options opts;
1897 struct l2cap_conninfo cinfo;
1901 BT_DBG("sk %p", sk);
1903 if (get_user(len, optlen))
1910 opts.imtu = l2cap_pi(sk)->imtu;
1911 opts.omtu = l2cap_pi(sk)->omtu;
1912 opts.flush_to = l2cap_pi(sk)->flush_to;
1913 opts.mode = l2cap_pi(sk)->mode;
1914 opts.fcs = l2cap_pi(sk)->fcs;
1915 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
1917 len = min_t(unsigned int, len, sizeof(opts));
1918 if (copy_to_user(optval, (char *) &opts, len))
1924 switch (l2cap_pi(sk)->sec_level) {
1925 case BT_SECURITY_LOW:
1926 opt = L2CAP_LM_AUTH;
1928 case BT_SECURITY_MEDIUM:
1929 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
1931 case BT_SECURITY_HIGH:
1932 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
1940 if (l2cap_pi(sk)->role_switch)
1941 opt |= L2CAP_LM_MASTER;
1943 if (l2cap_pi(sk)->force_reliable)
1944 opt |= L2CAP_LM_RELIABLE;
1946 if (put_user(opt, (u32 __user *) optval))
1950 case L2CAP_CONNINFO:
1951 if (sk->sk_state != BT_CONNECTED &&
1952 !(sk->sk_state == BT_CONNECT2 &&
1953 bt_sk(sk)->defer_setup)) {
1958 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
1959 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
1961 len = min_t(unsigned int, len, sizeof(cinfo));
1962 if (copy_to_user(optval, (char *) &cinfo, len))
1976 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
1978 struct sock *sk = sock->sk;
1979 struct bt_security sec;
1982 BT_DBG("sk %p", sk);
1984 if (level == SOL_L2CAP)
1985 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
1987 if (level != SOL_BLUETOOTH)
1988 return -ENOPROTOOPT;
1990 if (get_user(len, optlen))
1997 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_RAW) {
2002 sec.level = l2cap_pi(sk)->sec_level;
2004 len = min_t(unsigned int, len, sizeof(sec));
2005 if (copy_to_user(optval, (char *) &sec, len))
2010 case BT_DEFER_SETUP:
2011 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2016 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
2030 static int l2cap_sock_shutdown(struct socket *sock, int how)
2032 struct sock *sk = sock->sk;
2035 BT_DBG("sock %p, sk %p", sock, sk);
2041 if (!sk->sk_shutdown) {
2042 sk->sk_shutdown = SHUTDOWN_MASK;
2043 l2cap_sock_clear_timer(sk);
2044 __l2cap_sock_close(sk, 0);
2046 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
2047 err = bt_sock_wait_state(sk, BT_CLOSED,
2054 static int l2cap_sock_release(struct socket *sock)
2056 struct sock *sk = sock->sk;
2059 BT_DBG("sock %p, sk %p", sock, sk);
2064 err = l2cap_sock_shutdown(sock, 2);
2067 l2cap_sock_kill(sk);
2071 static void l2cap_chan_ready(struct sock *sk)
2073 struct sock *parent = bt_sk(sk)->parent;
2075 BT_DBG("sk %p, parent %p", sk, parent);
2077 l2cap_pi(sk)->conf_state = 0;
2078 l2cap_sock_clear_timer(sk);
2081 /* Outgoing channel.
2082 * Wake up socket sleeping on connect.
2084 sk->sk_state = BT_CONNECTED;
2085 sk->sk_state_change(sk);
2087 /* Incoming channel.
2088 * Wake up socket sleeping on accept.
2090 parent->sk_data_ready(parent, 0);
2094 /* Copy frame to all raw sockets on that connection */
2095 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2097 struct l2cap_chan_list *l = &conn->chan_list;
2098 struct sk_buff *nskb;
2101 BT_DBG("conn %p", conn);
2103 read_lock(&l->lock);
2104 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2105 if (sk->sk_type != SOCK_RAW)
2108 /* Don't send frame to the socket it came from */
2111 nskb = skb_clone(skb, GFP_ATOMIC);
2115 if (sock_queue_rcv_skb(sk, nskb))
2118 read_unlock(&l->lock);
2121 /* ---- L2CAP signalling commands ---- */
2122 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2123 u8 code, u8 ident, u16 dlen, void *data)
2125 struct sk_buff *skb, **frag;
2126 struct l2cap_cmd_hdr *cmd;
2127 struct l2cap_hdr *lh;
2130 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2131 conn, code, ident, dlen);
2133 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2134 count = min_t(unsigned int, conn->mtu, len);
2136 skb = bt_skb_alloc(count, GFP_ATOMIC);
2140 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2141 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2142 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2144 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2147 cmd->len = cpu_to_le16(dlen);
2150 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2151 memcpy(skb_put(skb, count), data, count);
2157 /* Continuation fragments (no L2CAP header) */
2158 frag = &skb_shinfo(skb)->frag_list;
2160 count = min_t(unsigned int, conn->mtu, len);
2162 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2166 memcpy(skb_put(*frag, count), data, count);
2171 frag = &(*frag)->next;
2181 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2183 struct l2cap_conf_opt *opt = *ptr;
2186 len = L2CAP_CONF_OPT_SIZE + opt->len;
2194 *val = *((u8 *) opt->val);
2198 *val = __le16_to_cpu(*((__le16 *) opt->val));
2202 *val = __le32_to_cpu(*((__le32 *) opt->val));
2206 *val = (unsigned long) opt->val;
2210 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2214 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2216 struct l2cap_conf_opt *opt = *ptr;
2218 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2225 *((u8 *) opt->val) = val;
2229 *((__le16 *) opt->val) = cpu_to_le16(val);
2233 *((__le32 *) opt->val) = cpu_to_le32(val);
2237 memcpy(opt->val, (void *) val, len);
2241 *ptr += L2CAP_CONF_OPT_SIZE + len;
2244 static void l2cap_ack_timeout(unsigned long arg)
2246 struct sock *sk = (void *) arg;
2249 l2cap_send_ack(l2cap_pi(sk));
2253 static inline void l2cap_ertm_init(struct sock *sk)
2255 l2cap_pi(sk)->expected_ack_seq = 0;
2256 l2cap_pi(sk)->unacked_frames = 0;
2257 l2cap_pi(sk)->buffer_seq = 0;
2258 l2cap_pi(sk)->num_acked = 0;
2259 l2cap_pi(sk)->frames_sent = 0;
2261 setup_timer(&l2cap_pi(sk)->retrans_timer,
2262 l2cap_retrans_timeout, (unsigned long) sk);
2263 setup_timer(&l2cap_pi(sk)->monitor_timer,
2264 l2cap_monitor_timeout, (unsigned long) sk);
2265 setup_timer(&l2cap_pi(sk)->ack_timer,
2266 l2cap_ack_timeout, (unsigned long) sk);
2268 __skb_queue_head_init(SREJ_QUEUE(sk));
2271 static int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
2273 u32 local_feat_mask = l2cap_feat_mask;
2275 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
2278 case L2CAP_MODE_ERTM:
2279 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
2280 case L2CAP_MODE_STREAMING:
2281 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
2287 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2290 case L2CAP_MODE_STREAMING:
2291 case L2CAP_MODE_ERTM:
2292 if (l2cap_mode_supported(mode, remote_feat_mask))
2296 return L2CAP_MODE_BASIC;
2300 static int l2cap_build_conf_req(struct sock *sk, void *data)
2302 struct l2cap_pinfo *pi = l2cap_pi(sk);
2303 struct l2cap_conf_req *req = data;
2304 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2305 void *ptr = req->data;
2307 BT_DBG("sk %p", sk);
2309 if (pi->num_conf_req || pi->num_conf_rsp)
2313 case L2CAP_MODE_STREAMING:
2314 case L2CAP_MODE_ERTM:
2315 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2316 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2317 l2cap_send_disconn_req(pi->conn, sk);
2320 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2326 case L2CAP_MODE_BASIC:
2327 if (pi->imtu != L2CAP_DEFAULT_MTU)
2328 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2331 case L2CAP_MODE_ERTM:
2332 rfc.mode = L2CAP_MODE_ERTM;
2333 rfc.txwin_size = pi->tx_win;
2334 rfc.max_transmit = max_transmit;
2335 rfc.retrans_timeout = 0;
2336 rfc.monitor_timeout = 0;
2337 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2338 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2339 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2341 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2342 sizeof(rfc), (unsigned long) &rfc);
2344 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2347 if (pi->fcs == L2CAP_FCS_NONE ||
2348 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2349 pi->fcs = L2CAP_FCS_NONE;
2350 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2354 case L2CAP_MODE_STREAMING:
2355 rfc.mode = L2CAP_MODE_STREAMING;
2357 rfc.max_transmit = 0;
2358 rfc.retrans_timeout = 0;
2359 rfc.monitor_timeout = 0;
2360 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2361 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2362 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2364 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2365 sizeof(rfc), (unsigned long) &rfc);
2367 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2370 if (pi->fcs == L2CAP_FCS_NONE ||
2371 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2372 pi->fcs = L2CAP_FCS_NONE;
2373 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2378 /* FIXME: Need actual value of the flush timeout */
2379 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2380 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2382 req->dcid = cpu_to_le16(pi->dcid);
2383 req->flags = cpu_to_le16(0);
2388 static int l2cap_parse_conf_req(struct sock *sk, void *data)
2390 struct l2cap_pinfo *pi = l2cap_pi(sk);
2391 struct l2cap_conf_rsp *rsp = data;
2392 void *ptr = rsp->data;
2393 void *req = pi->conf_req;
2394 int len = pi->conf_len;
2395 int type, hint, olen;
2397 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2398 u16 mtu = L2CAP_DEFAULT_MTU;
2399 u16 result = L2CAP_CONF_SUCCESS;
2401 BT_DBG("sk %p", sk);
2403 while (len >= L2CAP_CONF_OPT_SIZE) {
2404 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2406 hint = type & L2CAP_CONF_HINT;
2407 type &= L2CAP_CONF_MASK;
2410 case L2CAP_CONF_MTU:
2414 case L2CAP_CONF_FLUSH_TO:
2418 case L2CAP_CONF_QOS:
2421 case L2CAP_CONF_RFC:
2422 if (olen == sizeof(rfc))
2423 memcpy(&rfc, (void *) val, olen);
2426 case L2CAP_CONF_FCS:
2427 if (val == L2CAP_FCS_NONE)
2428 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
2436 result = L2CAP_CONF_UNKNOWN;
2437 *((u8 *) ptr++) = type;
2442 if (pi->num_conf_rsp || pi->num_conf_req)
2446 case L2CAP_MODE_STREAMING:
2447 case L2CAP_MODE_ERTM:
2448 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2449 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2450 return -ECONNREFUSED;
2453 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2458 if (pi->mode != rfc.mode) {
2459 result = L2CAP_CONF_UNACCEPT;
2460 rfc.mode = pi->mode;
2462 if (pi->num_conf_rsp == 1)
2463 return -ECONNREFUSED;
2465 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2466 sizeof(rfc), (unsigned long) &rfc);
2470 if (result == L2CAP_CONF_SUCCESS) {
2471 /* Configure output options and let the other side know
2472 * which ones we don't like. */
2474 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2475 result = L2CAP_CONF_UNACCEPT;
2478 pi->conf_state |= L2CAP_CONF_MTU_DONE;
2480 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2483 case L2CAP_MODE_BASIC:
2484 pi->fcs = L2CAP_FCS_NONE;
2485 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2488 case L2CAP_MODE_ERTM:
2489 pi->remote_tx_win = rfc.txwin_size;
2490 pi->remote_max_tx = rfc.max_transmit;
2491 if (rfc.max_pdu_size > pi->conn->mtu - 10)
2492 rfc.max_pdu_size = le16_to_cpu(pi->conn->mtu - 10);
2494 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2496 rfc.retrans_timeout =
2497 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2498 rfc.monitor_timeout =
2499 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2501 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2503 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2504 sizeof(rfc), (unsigned long) &rfc);
2508 case L2CAP_MODE_STREAMING:
2509 if (rfc.max_pdu_size > pi->conn->mtu - 10)
2510 rfc.max_pdu_size = le16_to_cpu(pi->conn->mtu - 10);
2512 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2514 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2516 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2517 sizeof(rfc), (unsigned long) &rfc);
2522 result = L2CAP_CONF_UNACCEPT;
2524 memset(&rfc, 0, sizeof(rfc));
2525 rfc.mode = pi->mode;
2528 if (result == L2CAP_CONF_SUCCESS)
2529 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
2531 rsp->scid = cpu_to_le16(pi->dcid);
2532 rsp->result = cpu_to_le16(result);
2533 rsp->flags = cpu_to_le16(0x0000);
2538 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
2540 struct l2cap_pinfo *pi = l2cap_pi(sk);
2541 struct l2cap_conf_req *req = data;
2542 void *ptr = req->data;
2545 struct l2cap_conf_rfc rfc;
2547 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
2549 while (len >= L2CAP_CONF_OPT_SIZE) {
2550 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2553 case L2CAP_CONF_MTU:
2554 if (val < L2CAP_DEFAULT_MIN_MTU) {
2555 *result = L2CAP_CONF_UNACCEPT;
2556 pi->omtu = L2CAP_DEFAULT_MIN_MTU;
2559 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2562 case L2CAP_CONF_FLUSH_TO:
2564 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2568 case L2CAP_CONF_RFC:
2569 if (olen == sizeof(rfc))
2570 memcpy(&rfc, (void *)val, olen);
2572 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
2573 rfc.mode != pi->mode)
2574 return -ECONNREFUSED;
2576 pi->mode = rfc.mode;
2579 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2580 sizeof(rfc), (unsigned long) &rfc);
2585 if (*result == L2CAP_CONF_SUCCESS) {
2587 case L2CAP_MODE_ERTM:
2588 pi->remote_tx_win = rfc.txwin_size;
2589 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2590 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2591 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2593 case L2CAP_MODE_STREAMING:
2594 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2598 req->dcid = cpu_to_le16(pi->dcid);
2599 req->flags = cpu_to_le16(0x0000);
2604 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
2606 struct l2cap_conf_rsp *rsp = data;
2607 void *ptr = rsp->data;
2609 BT_DBG("sk %p", sk);
2611 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2612 rsp->result = cpu_to_le16(result);
2613 rsp->flags = cpu_to_le16(flags);
2618 static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
2620 struct l2cap_pinfo *pi = l2cap_pi(sk);
2623 struct l2cap_conf_rfc rfc;
2625 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
2627 if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
2630 while (len >= L2CAP_CONF_OPT_SIZE) {
2631 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2634 case L2CAP_CONF_RFC:
2635 if (olen == sizeof(rfc))
2636 memcpy(&rfc, (void *)val, olen);
2643 case L2CAP_MODE_ERTM:
2644 pi->remote_tx_win = rfc.txwin_size;
2645 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2646 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2647 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2649 case L2CAP_MODE_STREAMING:
2650 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2654 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2656 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2658 if (rej->reason != 0x0000)
2661 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2662 cmd->ident == conn->info_ident) {
2663 del_timer(&conn->info_timer);
2665 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2666 conn->info_ident = 0;
2668 l2cap_conn_start(conn);
2674 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2676 struct l2cap_chan_list *list = &conn->chan_list;
2677 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2678 struct l2cap_conn_rsp rsp;
2679 struct sock *sk, *parent;
2680 int result, status = L2CAP_CS_NO_INFO;
2682 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2683 __le16 psm = req->psm;
2685 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2687 /* Check if we have socket listening on psm */
2688 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2690 result = L2CAP_CR_BAD_PSM;
2694 /* Check if the ACL is secure enough (if not SDP) */
2695 if (psm != cpu_to_le16(0x0001) &&
2696 !hci_conn_check_link_mode(conn->hcon)) {
2697 conn->disc_reason = 0x05;
2698 result = L2CAP_CR_SEC_BLOCK;
2702 result = L2CAP_CR_NO_MEM;
2704 /* Check for backlog size */
2705 if (sk_acceptq_is_full(parent)) {
2706 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2710 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2714 write_lock_bh(&list->lock);
2716 /* Check if we already have channel with that dcid */
2717 if (__l2cap_get_chan_by_dcid(list, scid)) {
2718 write_unlock_bh(&list->lock);
2719 sock_set_flag(sk, SOCK_ZAPPED);
2720 l2cap_sock_kill(sk);
2724 hci_conn_hold(conn->hcon);
2726 l2cap_sock_init(sk, parent);
2727 bacpy(&bt_sk(sk)->src, conn->src);
2728 bacpy(&bt_sk(sk)->dst, conn->dst);
2729 l2cap_pi(sk)->psm = psm;
2730 l2cap_pi(sk)->dcid = scid;
2732 __l2cap_chan_add(conn, sk, parent);
2733 dcid = l2cap_pi(sk)->scid;
2735 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2737 l2cap_pi(sk)->ident = cmd->ident;
2739 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2740 if (l2cap_check_security(sk)) {
2741 if (bt_sk(sk)->defer_setup) {
2742 sk->sk_state = BT_CONNECT2;
2743 result = L2CAP_CR_PEND;
2744 status = L2CAP_CS_AUTHOR_PEND;
2745 parent->sk_data_ready(parent, 0);
2747 sk->sk_state = BT_CONFIG;
2748 result = L2CAP_CR_SUCCESS;
2749 status = L2CAP_CS_NO_INFO;
2752 sk->sk_state = BT_CONNECT2;
2753 result = L2CAP_CR_PEND;
2754 status = L2CAP_CS_AUTHEN_PEND;
2757 sk->sk_state = BT_CONNECT2;
2758 result = L2CAP_CR_PEND;
2759 status = L2CAP_CS_NO_INFO;
2762 write_unlock_bh(&list->lock);
2765 bh_unlock_sock(parent);
2768 rsp.scid = cpu_to_le16(scid);
2769 rsp.dcid = cpu_to_le16(dcid);
2770 rsp.result = cpu_to_le16(result);
2771 rsp.status = cpu_to_le16(status);
2772 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2774 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2775 struct l2cap_info_req info;
2776 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2778 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2779 conn->info_ident = l2cap_get_ident(conn);
2781 mod_timer(&conn->info_timer, jiffies +
2782 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2784 l2cap_send_cmd(conn, conn->info_ident,
2785 L2CAP_INFO_REQ, sizeof(info), &info);
2791 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2793 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2794 u16 scid, dcid, result, status;
2798 scid = __le16_to_cpu(rsp->scid);
2799 dcid = __le16_to_cpu(rsp->dcid);
2800 result = __le16_to_cpu(rsp->result);
2801 status = __le16_to_cpu(rsp->status);
2803 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2806 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2810 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
2816 case L2CAP_CR_SUCCESS:
2817 sk->sk_state = BT_CONFIG;
2818 l2cap_pi(sk)->ident = 0;
2819 l2cap_pi(sk)->dcid = dcid;
2820 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2822 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2824 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2825 l2cap_build_conf_req(sk, req), req);
2826 l2cap_pi(sk)->num_conf_req++;
2830 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
2834 l2cap_chan_del(sk, ECONNREFUSED);
2842 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2844 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2850 dcid = __le16_to_cpu(req->dcid);
2851 flags = __le16_to_cpu(req->flags);
2853 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2855 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2859 if (sk->sk_state == BT_DISCONN)
2862 /* Reject if config buffer is too small. */
2863 len = cmd_len - sizeof(*req);
2864 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
2865 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2866 l2cap_build_conf_rsp(sk, rsp,
2867 L2CAP_CONF_REJECT, flags), rsp);
2872 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
2873 l2cap_pi(sk)->conf_len += len;
2875 if (flags & 0x0001) {
2876 /* Incomplete config. Send empty response. */
2877 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2878 l2cap_build_conf_rsp(sk, rsp,
2879 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2883 /* Complete config. */
2884 len = l2cap_parse_conf_req(sk, rsp);
2886 l2cap_send_disconn_req(conn, sk);
2890 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2891 l2cap_pi(sk)->num_conf_rsp++;
2893 /* Reset config buffer. */
2894 l2cap_pi(sk)->conf_len = 0;
2896 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
2899 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
2900 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
2901 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
2902 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
2904 sk->sk_state = BT_CONNECTED;
2906 l2cap_pi(sk)->next_tx_seq = 0;
2907 l2cap_pi(sk)->expected_tx_seq = 0;
2908 __skb_queue_head_init(TX_QUEUE(sk));
2909 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2910 l2cap_ertm_init(sk);
2912 l2cap_chan_ready(sk);
2916 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
2918 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2919 l2cap_build_conf_req(sk, buf), buf);
2920 l2cap_pi(sk)->num_conf_req++;
2928 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2930 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2931 u16 scid, flags, result;
2933 int len = cmd->len - sizeof(*rsp);
2935 scid = __le16_to_cpu(rsp->scid);
2936 flags = __le16_to_cpu(rsp->flags);
2937 result = __le16_to_cpu(rsp->result);
2939 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2940 scid, flags, result);
2942 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2947 case L2CAP_CONF_SUCCESS:
2948 l2cap_conf_rfc_get(sk, rsp->data, len);
2951 case L2CAP_CONF_UNACCEPT:
2952 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2955 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2956 l2cap_send_disconn_req(conn, sk);
2960 /* throw out any old stored conf requests */
2961 result = L2CAP_CONF_SUCCESS;
2962 len = l2cap_parse_conf_rsp(sk, rsp->data,
2965 l2cap_send_disconn_req(conn, sk);
2969 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2970 L2CAP_CONF_REQ, len, req);
2971 l2cap_pi(sk)->num_conf_req++;
2972 if (result != L2CAP_CONF_SUCCESS)
2978 sk->sk_state = BT_DISCONN;
2979 sk->sk_err = ECONNRESET;
2980 l2cap_sock_set_timer(sk, HZ * 5);
2981 l2cap_send_disconn_req(conn, sk);
2988 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
2990 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
2991 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
2992 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
2993 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
2995 sk->sk_state = BT_CONNECTED;
2996 l2cap_pi(sk)->next_tx_seq = 0;
2997 l2cap_pi(sk)->expected_tx_seq = 0;
2998 __skb_queue_head_init(TX_QUEUE(sk));
2999 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3000 l2cap_ertm_init(sk);
3002 l2cap_chan_ready(sk);
3010 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3012 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3013 struct l2cap_disconn_rsp rsp;
3017 scid = __le16_to_cpu(req->scid);
3018 dcid = __le16_to_cpu(req->dcid);
3020 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3022 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
3026 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3027 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3028 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3030 sk->sk_shutdown = SHUTDOWN_MASK;
3032 skb_queue_purge(TX_QUEUE(sk));
3034 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
3035 skb_queue_purge(SREJ_QUEUE(sk));
3036 del_timer(&l2cap_pi(sk)->retrans_timer);
3037 del_timer(&l2cap_pi(sk)->monitor_timer);
3038 del_timer(&l2cap_pi(sk)->ack_timer);
3041 l2cap_chan_del(sk, ECONNRESET);
3044 l2cap_sock_kill(sk);
3048 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3050 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3054 scid = __le16_to_cpu(rsp->scid);
3055 dcid = __le16_to_cpu(rsp->dcid);
3057 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3059 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3063 skb_queue_purge(TX_QUEUE(sk));
3065 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
3066 skb_queue_purge(SREJ_QUEUE(sk));
3067 del_timer(&l2cap_pi(sk)->retrans_timer);
3068 del_timer(&l2cap_pi(sk)->monitor_timer);
3069 del_timer(&l2cap_pi(sk)->ack_timer);
3072 l2cap_chan_del(sk, 0);
3075 l2cap_sock_kill(sk);
3079 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3081 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3084 type = __le16_to_cpu(req->type);
3086 BT_DBG("type 0x%4.4x", type);
3088 if (type == L2CAP_IT_FEAT_MASK) {
3090 u32 feat_mask = l2cap_feat_mask;
3091 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3092 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3093 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3095 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3097 put_unaligned_le32(feat_mask, rsp->data);
3098 l2cap_send_cmd(conn, cmd->ident,
3099 L2CAP_INFO_RSP, sizeof(buf), buf);
3100 } else if (type == L2CAP_IT_FIXED_CHAN) {
3102 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3103 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3104 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3105 memcpy(buf + 4, l2cap_fixed_chan, 8);
3106 l2cap_send_cmd(conn, cmd->ident,
3107 L2CAP_INFO_RSP, sizeof(buf), buf);
3109 struct l2cap_info_rsp rsp;
3110 rsp.type = cpu_to_le16(type);
3111 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3112 l2cap_send_cmd(conn, cmd->ident,
3113 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3119 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3121 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3124 type = __le16_to_cpu(rsp->type);
3125 result = __le16_to_cpu(rsp->result);
3127 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3129 del_timer(&conn->info_timer);
3131 if (type == L2CAP_IT_FEAT_MASK) {
3132 conn->feat_mask = get_unaligned_le32(rsp->data);
3134 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3135 struct l2cap_info_req req;
3136 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3138 conn->info_ident = l2cap_get_ident(conn);
3140 l2cap_send_cmd(conn, conn->info_ident,
3141 L2CAP_INFO_REQ, sizeof(req), &req);
3143 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3144 conn->info_ident = 0;
3146 l2cap_conn_start(conn);
3148 } else if (type == L2CAP_IT_FIXED_CHAN) {
3149 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3150 conn->info_ident = 0;
3152 l2cap_conn_start(conn);
3158 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
3160 u8 *data = skb->data;
3162 struct l2cap_cmd_hdr cmd;
3165 l2cap_raw_recv(conn, skb);
3167 while (len >= L2CAP_CMD_HDR_SIZE) {
3169 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3170 data += L2CAP_CMD_HDR_SIZE;
3171 len -= L2CAP_CMD_HDR_SIZE;
3173 cmd_len = le16_to_cpu(cmd.len);
3175 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3177 if (cmd_len > len || !cmd.ident) {
3178 BT_DBG("corrupted command");
3183 case L2CAP_COMMAND_REJ:
3184 l2cap_command_rej(conn, &cmd, data);
3187 case L2CAP_CONN_REQ:
3188 err = l2cap_connect_req(conn, &cmd, data);
3191 case L2CAP_CONN_RSP:
3192 err = l2cap_connect_rsp(conn, &cmd, data);
3195 case L2CAP_CONF_REQ:
3196 err = l2cap_config_req(conn, &cmd, cmd_len, data);
3199 case L2CAP_CONF_RSP:
3200 err = l2cap_config_rsp(conn, &cmd, data);
3203 case L2CAP_DISCONN_REQ:
3204 err = l2cap_disconnect_req(conn, &cmd, data);
3207 case L2CAP_DISCONN_RSP:
3208 err = l2cap_disconnect_rsp(conn, &cmd, data);
3211 case L2CAP_ECHO_REQ:
3212 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
3215 case L2CAP_ECHO_RSP:
3218 case L2CAP_INFO_REQ:
3219 err = l2cap_information_req(conn, &cmd, data);
3222 case L2CAP_INFO_RSP:
3223 err = l2cap_information_rsp(conn, &cmd, data);
3227 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
3233 struct l2cap_cmd_rej rej;
3234 BT_DBG("error %d", err);
3236 /* FIXME: Map err to a valid reason */
3237 rej.reason = cpu_to_le16(0);
3238 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3248 static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
3250 u16 our_fcs, rcv_fcs;
3251 int hdr_size = L2CAP_HDR_SIZE + 2;
3253 if (pi->fcs == L2CAP_FCS_CRC16) {
3254 skb_trim(skb, skb->len - 2);
3255 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3256 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3258 if (our_fcs != rcv_fcs)
3264 static inline void l2cap_send_i_or_rr_or_rnr(struct sock *sk)
3266 struct l2cap_pinfo *pi = l2cap_pi(sk);
3269 pi->frames_sent = 0;
3270 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
3272 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3274 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3275 control |= L2CAP_SUPER_RCV_NOT_READY | L2CAP_CTRL_FINAL;
3276 l2cap_send_sframe(pi, control);
3277 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
3280 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY && pi->unacked_frames > 0)
3281 __mod_retrans_timer();
3283 l2cap_ertm_send(sk);
3285 if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
3286 pi->frames_sent == 0) {
3287 control |= L2CAP_SUPER_RCV_READY;
3288 l2cap_send_sframe(pi, control);
3292 static void l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
3294 struct sk_buff *next_skb;
3296 bt_cb(skb)->tx_seq = tx_seq;
3297 bt_cb(skb)->sar = sar;
3299 next_skb = skb_peek(SREJ_QUEUE(sk));
3301 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3306 if (bt_cb(next_skb)->tx_seq > tx_seq) {
3307 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
3311 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
3314 } while((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
3316 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3319 static int l2cap_sar_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3321 struct l2cap_pinfo *pi = l2cap_pi(sk);
3322 struct sk_buff *_skb;
3325 switch (control & L2CAP_CTRL_SAR) {
3326 case L2CAP_SDU_UNSEGMENTED:
3327 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3332 err = sock_queue_rcv_skb(sk, skb);
3338 case L2CAP_SDU_START:
3339 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3344 pi->sdu_len = get_unaligned_le16(skb->data);
3347 if (pi->sdu_len > pi->imtu) {
3352 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3358 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3360 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3361 pi->partial_sdu_len = skb->len;
3365 case L2CAP_SDU_CONTINUE:
3366 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3369 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3371 pi->partial_sdu_len += skb->len;
3372 if (pi->partial_sdu_len > pi->sdu_len)
3380 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3383 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3385 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3386 pi->partial_sdu_len += skb->len;
3388 if (pi->partial_sdu_len > pi->imtu)
3391 if (pi->partial_sdu_len == pi->sdu_len) {
3392 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3393 err = sock_queue_rcv_skb(sk, _skb);
3408 static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
3410 struct sk_buff *skb;
3413 while((skb = skb_peek(SREJ_QUEUE(sk)))) {
3414 if (bt_cb(skb)->tx_seq != tx_seq)
3417 skb = skb_dequeue(SREJ_QUEUE(sk));
3418 control |= bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3419 l2cap_sar_reassembly_sdu(sk, skb, control);
3420 l2cap_pi(sk)->buffer_seq_srej =
3421 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
3426 static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
3428 struct l2cap_pinfo *pi = l2cap_pi(sk);
3429 struct srej_list *l, *tmp;
3432 list_for_each_entry_safe(l,tmp, SREJ_LIST(sk), list) {
3433 if (l->tx_seq == tx_seq) {
3438 control = L2CAP_SUPER_SELECT_REJECT;
3439 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3440 l2cap_send_sframe(pi, control);
3442 list_add_tail(&l->list, SREJ_LIST(sk));
3446 static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
3448 struct l2cap_pinfo *pi = l2cap_pi(sk);
3449 struct srej_list *new;
3452 while (tx_seq != pi->expected_tx_seq) {
3453 control = L2CAP_SUPER_SELECT_REJECT;
3454 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3455 l2cap_send_sframe(pi, control);
3457 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3458 new->tx_seq = pi->expected_tx_seq++;
3459 list_add_tail(&new->list, SREJ_LIST(sk));
3461 pi->expected_tx_seq++;
3464 static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3466 struct l2cap_pinfo *pi = l2cap_pi(sk);
3467 u8 tx_seq = __get_txseq(rx_control);
3468 u8 req_seq = __get_reqseq(rx_control);
3469 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3470 int num_to_ack = (pi->tx_win/6) + 1;
3473 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3475 if (L2CAP_CTRL_FINAL & rx_control) {
3476 del_timer(&pi->monitor_timer);
3477 if (pi->unacked_frames > 0)
3478 __mod_retrans_timer();
3479 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
3482 pi->expected_ack_seq = req_seq;
3483 l2cap_drop_acked_frames(sk);
3485 if (tx_seq == pi->expected_tx_seq)
3488 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3489 struct srej_list *first;
3491 first = list_first_entry(SREJ_LIST(sk),
3492 struct srej_list, list);
3493 if (tx_seq == first->tx_seq) {
3494 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3495 l2cap_check_srej_gap(sk, tx_seq);
3497 list_del(&first->list);
3500 if (list_empty(SREJ_LIST(sk))) {
3501 pi->buffer_seq = pi->buffer_seq_srej;
3502 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3506 struct srej_list *l;
3507 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3509 list_for_each_entry(l, SREJ_LIST(sk), list) {
3510 if (l->tx_seq == tx_seq) {
3511 l2cap_resend_srejframe(sk, tx_seq);
3515 l2cap_send_srejframe(sk, tx_seq);
3518 pi->conn_state |= L2CAP_CONN_SREJ_SENT;
3520 INIT_LIST_HEAD(SREJ_LIST(sk));
3521 pi->buffer_seq_srej = pi->buffer_seq;
3523 __skb_queue_head_init(SREJ_QUEUE(sk));
3524 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3526 pi->conn_state |= L2CAP_CONN_SEND_PBIT;
3528 l2cap_send_srejframe(sk, tx_seq);
3533 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3535 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3536 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3540 if (rx_control & L2CAP_CTRL_FINAL) {
3541 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3542 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3544 sk->sk_send_head = TX_QUEUE(sk)->next;
3545 pi->next_tx_seq = pi->expected_ack_seq;
3546 l2cap_ertm_send(sk);
3550 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3552 err = l2cap_sar_reassembly_sdu(sk, skb, rx_control);
3558 pi->num_acked = (pi->num_acked + 1) % num_to_ack;
3559 if (pi->num_acked == num_to_ack - 1)
3565 static inline void l2cap_data_channel_rrframe(struct sock *sk, u16 rx_control)
3567 struct l2cap_pinfo *pi = l2cap_pi(sk);
3569 pi->expected_ack_seq = __get_reqseq(rx_control);
3570 l2cap_drop_acked_frames(sk);
3572 if (rx_control & L2CAP_CTRL_POLL) {
3573 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3574 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3575 (pi->unacked_frames > 0))
3576 __mod_retrans_timer();
3578 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3579 l2cap_send_srejtail(sk);
3581 l2cap_send_i_or_rr_or_rnr(sk);
3582 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3585 } else if (rx_control & L2CAP_CTRL_FINAL) {
3586 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3588 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3589 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3591 sk->sk_send_head = TX_QUEUE(sk)->next;
3592 pi->next_tx_seq = pi->expected_ack_seq;
3593 l2cap_ertm_send(sk);
3597 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3598 (pi->unacked_frames > 0))
3599 __mod_retrans_timer();
3601 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3602 if (pi->conn_state & L2CAP_CONN_SREJ_SENT)
3605 l2cap_ertm_send(sk);
3609 static inline void l2cap_data_channel_rejframe(struct sock *sk, u16 rx_control)
3611 struct l2cap_pinfo *pi = l2cap_pi(sk);
3612 u8 tx_seq = __get_reqseq(rx_control);
3614 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3616 pi->expected_ack_seq = tx_seq;
3617 l2cap_drop_acked_frames(sk);
3619 if (rx_control & L2CAP_CTRL_FINAL) {
3620 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3621 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3623 sk->sk_send_head = TX_QUEUE(sk)->next;
3624 pi->next_tx_seq = pi->expected_ack_seq;
3625 l2cap_ertm_send(sk);
3628 sk->sk_send_head = TX_QUEUE(sk)->next;
3629 pi->next_tx_seq = pi->expected_ack_seq;
3630 l2cap_ertm_send(sk);
3632 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3633 pi->srej_save_reqseq = tx_seq;
3634 pi->conn_state |= L2CAP_CONN_REJ_ACT;
3638 static inline void l2cap_data_channel_srejframe(struct sock *sk, u16 rx_control)
3640 struct l2cap_pinfo *pi = l2cap_pi(sk);
3641 u8 tx_seq = __get_reqseq(rx_control);
3643 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3645 if (rx_control & L2CAP_CTRL_POLL) {
3646 pi->expected_ack_seq = tx_seq;
3647 l2cap_drop_acked_frames(sk);
3648 l2cap_retransmit_frame(sk, tx_seq);
3649 l2cap_ertm_send(sk);
3650 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3651 pi->srej_save_reqseq = tx_seq;
3652 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3654 } else if (rx_control & L2CAP_CTRL_FINAL) {
3655 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
3656 pi->srej_save_reqseq == tx_seq)
3657 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
3659 l2cap_retransmit_frame(sk, tx_seq);
3661 l2cap_retransmit_frame(sk, tx_seq);
3662 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3663 pi->srej_save_reqseq = tx_seq;
3664 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3669 static inline void l2cap_data_channel_rnrframe(struct sock *sk, u16 rx_control)
3671 struct l2cap_pinfo *pi = l2cap_pi(sk);
3672 u8 tx_seq = __get_reqseq(rx_control);
3674 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
3675 pi->expected_ack_seq = tx_seq;
3676 l2cap_drop_acked_frames(sk);
3678 if (!(pi->conn_state & L2CAP_CONN_SREJ_SENT)) {
3679 del_timer(&pi->retrans_timer);
3680 if (rx_control & L2CAP_CTRL_POLL) {
3681 u16 control = L2CAP_CTRL_FINAL;
3682 l2cap_send_rr_or_rnr(pi, control);
3687 if (rx_control & L2CAP_CTRL_POLL)
3688 l2cap_send_srejtail(sk);
3690 l2cap_send_sframe(pi, L2CAP_SUPER_RCV_READY);
3693 static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3695 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3697 if (L2CAP_CTRL_FINAL & rx_control) {
3698 del_timer(&l2cap_pi(sk)->monitor_timer);
3699 if (l2cap_pi(sk)->unacked_frames > 0)
3700 __mod_retrans_timer();
3701 l2cap_pi(sk)->conn_state &= ~L2CAP_CONN_WAIT_F;
3704 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
3705 case L2CAP_SUPER_RCV_READY:
3706 l2cap_data_channel_rrframe(sk, rx_control);
3709 case L2CAP_SUPER_REJECT:
3710 l2cap_data_channel_rejframe(sk, rx_control);
3713 case L2CAP_SUPER_SELECT_REJECT:
3714 l2cap_data_channel_srejframe(sk, rx_control);
3717 case L2CAP_SUPER_RCV_NOT_READY:
3718 l2cap_data_channel_rnrframe(sk, rx_control);
3726 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3729 struct l2cap_pinfo *pi;
3733 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
3735 BT_DBG("unknown cid 0x%4.4x", cid);
3741 BT_DBG("sk %p, len %d", sk, skb->len);
3743 if (sk->sk_state != BT_CONNECTED)
3747 case L2CAP_MODE_BASIC:
3748 /* If socket recv buffers overflows we drop data here
3749 * which is *bad* because L2CAP has to be reliable.
3750 * But we don't have any other choice. L2CAP doesn't
3751 * provide flow control mechanism. */
3753 if (pi->imtu < skb->len)
3756 if (!sock_queue_rcv_skb(sk, skb))
3760 case L2CAP_MODE_ERTM:
3761 control = get_unaligned_le16(skb->data);
3765 if (__is_sar_start(control))
3768 if (pi->fcs == L2CAP_FCS_CRC16)
3772 * We can just drop the corrupted I-frame here.
3773 * Receiver will miss it and start proper recovery
3774 * procedures and ask retransmission.
3779 if (l2cap_check_fcs(pi, skb))
3782 if (__is_iframe(control)) {
3786 l2cap_data_channel_iframe(sk, control, skb);
3791 l2cap_data_channel_sframe(sk, control, skb);
3796 case L2CAP_MODE_STREAMING:
3797 control = get_unaligned_le16(skb->data);
3801 if (__is_sar_start(control))
3804 if (pi->fcs == L2CAP_FCS_CRC16)
3807 if (len > pi->mps || len < 4 || __is_sframe(control))
3810 if (l2cap_check_fcs(pi, skb))
3813 tx_seq = __get_txseq(control);
3815 if (pi->expected_tx_seq == tx_seq)
3816 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3818 pi->expected_tx_seq = (tx_seq + 1) % 64;
3820 l2cap_sar_reassembly_sdu(sk, skb, control);
3825 BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode);
3839 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3843 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
3847 BT_DBG("sk %p, len %d", sk, skb->len);
3849 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
3852 if (l2cap_pi(sk)->imtu < skb->len)
3855 if (!sock_queue_rcv_skb(sk, skb))
3867 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
3869 struct l2cap_hdr *lh = (void *) skb->data;
3873 skb_pull(skb, L2CAP_HDR_SIZE);
3874 cid = __le16_to_cpu(lh->cid);
3875 len = __le16_to_cpu(lh->len);
3877 if (len != skb->len) {
3882 BT_DBG("len %d, cid 0x%4.4x", len, cid);
3885 case L2CAP_CID_SIGNALING:
3886 l2cap_sig_channel(conn, skb);
3889 case L2CAP_CID_CONN_LESS:
3890 psm = get_unaligned_le16(skb->data);
3892 l2cap_conless_channel(conn, psm, skb);
3896 l2cap_data_channel(conn, cid, skb);
3901 /* ---- L2CAP interface with lower layer (HCI) ---- */
3903 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3905 int exact = 0, lm1 = 0, lm2 = 0;
3906 register struct sock *sk;
3907 struct hlist_node *node;
3909 if (type != ACL_LINK)
3912 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
3914 /* Find listening sockets and check their link_mode */
3915 read_lock(&l2cap_sk_list.lock);
3916 sk_for_each(sk, node, &l2cap_sk_list.head) {
3917 if (sk->sk_state != BT_LISTEN)
3920 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
3921 lm1 |= HCI_LM_ACCEPT;
3922 if (l2cap_pi(sk)->role_switch)
3923 lm1 |= HCI_LM_MASTER;
3925 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
3926 lm2 |= HCI_LM_ACCEPT;
3927 if (l2cap_pi(sk)->role_switch)
3928 lm2 |= HCI_LM_MASTER;
3931 read_unlock(&l2cap_sk_list.lock);
3933 return exact ? lm1 : lm2;
3936 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
3938 struct l2cap_conn *conn;
3940 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
3942 if (hcon->type != ACL_LINK)
3946 conn = l2cap_conn_add(hcon, status);
3948 l2cap_conn_ready(conn);
3950 l2cap_conn_del(hcon, bt_err(status));
3955 static int l2cap_disconn_ind(struct hci_conn *hcon)
3957 struct l2cap_conn *conn = hcon->l2cap_data;
3959 BT_DBG("hcon %p", hcon);
3961 if (hcon->type != ACL_LINK || !conn)
3964 return conn->disc_reason;
3967 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
3969 BT_DBG("hcon %p reason %d", hcon, reason);
3971 if (hcon->type != ACL_LINK)
3974 l2cap_conn_del(hcon, bt_err(reason));
3979 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
3981 if (sk->sk_type != SOCK_SEQPACKET)
3984 if (encrypt == 0x00) {
3985 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
3986 l2cap_sock_clear_timer(sk);
3987 l2cap_sock_set_timer(sk, HZ * 5);
3988 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
3989 __l2cap_sock_close(sk, ECONNREFUSED);
3991 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
3992 l2cap_sock_clear_timer(sk);
3996 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
3998 struct l2cap_chan_list *l;
3999 struct l2cap_conn *conn = hcon->l2cap_data;
4005 l = &conn->chan_list;
4007 BT_DBG("conn %p", conn);
4009 read_lock(&l->lock);
4011 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
4014 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
4019 if (!status && (sk->sk_state == BT_CONNECTED ||
4020 sk->sk_state == BT_CONFIG)) {
4021 l2cap_check_encryption(sk, encrypt);
4026 if (sk->sk_state == BT_CONNECT) {
4028 struct l2cap_conn_req req;
4029 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
4030 req.psm = l2cap_pi(sk)->psm;
4032 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
4034 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4035 L2CAP_CONN_REQ, sizeof(req), &req);
4037 l2cap_sock_clear_timer(sk);
4038 l2cap_sock_set_timer(sk, HZ / 10);
4040 } else if (sk->sk_state == BT_CONNECT2) {
4041 struct l2cap_conn_rsp rsp;
4045 sk->sk_state = BT_CONFIG;
4046 result = L2CAP_CR_SUCCESS;
4048 sk->sk_state = BT_DISCONN;
4049 l2cap_sock_set_timer(sk, HZ / 10);
4050 result = L2CAP_CR_SEC_BLOCK;
4053 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
4054 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
4055 rsp.result = cpu_to_le16(result);
4056 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4057 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4058 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
4064 read_unlock(&l->lock);
4069 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4071 struct l2cap_conn *conn = hcon->l2cap_data;
4073 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
4076 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4078 if (flags & ACL_START) {
4079 struct l2cap_hdr *hdr;
4083 BT_ERR("Unexpected start frame (len %d)", skb->len);
4084 kfree_skb(conn->rx_skb);
4085 conn->rx_skb = NULL;
4087 l2cap_conn_unreliable(conn, ECOMM);
4091 BT_ERR("Frame is too short (len %d)", skb->len);
4092 l2cap_conn_unreliable(conn, ECOMM);
4096 hdr = (struct l2cap_hdr *) skb->data;
4097 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4099 if (len == skb->len) {
4100 /* Complete frame received */
4101 l2cap_recv_frame(conn, skb);
4105 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4107 if (skb->len > len) {
4108 BT_ERR("Frame is too long (len %d, expected len %d)",
4110 l2cap_conn_unreliable(conn, ECOMM);
4114 /* Allocate skb for the complete frame (with header) */
4115 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4119 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4121 conn->rx_len = len - skb->len;
4123 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4125 if (!conn->rx_len) {
4126 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4127 l2cap_conn_unreliable(conn, ECOMM);
4131 if (skb->len > conn->rx_len) {
4132 BT_ERR("Fragment is too long (len %d, expected %d)",
4133 skb->len, conn->rx_len);
4134 kfree_skb(conn->rx_skb);
4135 conn->rx_skb = NULL;
4137 l2cap_conn_unreliable(conn, ECOMM);
4141 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4143 conn->rx_len -= skb->len;
4145 if (!conn->rx_len) {
4146 /* Complete frame received */
4147 l2cap_recv_frame(conn, conn->rx_skb);
4148 conn->rx_skb = NULL;
4157 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4160 struct hlist_node *node;
4162 read_lock_bh(&l2cap_sk_list.lock);
4164 sk_for_each(sk, node, &l2cap_sk_list.head) {
4165 struct l2cap_pinfo *pi = l2cap_pi(sk);
4167 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
4168 batostr(&bt_sk(sk)->src),
4169 batostr(&bt_sk(sk)->dst),
4170 sk->sk_state, __le16_to_cpu(pi->psm),
4172 pi->imtu, pi->omtu, pi->sec_level);
4175 read_unlock_bh(&l2cap_sk_list.lock);
4180 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4182 return single_open(file, l2cap_debugfs_show, inode->i_private);
4185 static const struct file_operations l2cap_debugfs_fops = {
4186 .open = l2cap_debugfs_open,
4188 .llseek = seq_lseek,
4189 .release = single_release,
4192 static struct dentry *l2cap_debugfs;
4194 static const struct proto_ops l2cap_sock_ops = {
4195 .family = PF_BLUETOOTH,
4196 .owner = THIS_MODULE,
4197 .release = l2cap_sock_release,
4198 .bind = l2cap_sock_bind,
4199 .connect = l2cap_sock_connect,
4200 .listen = l2cap_sock_listen,
4201 .accept = l2cap_sock_accept,
4202 .getname = l2cap_sock_getname,
4203 .sendmsg = l2cap_sock_sendmsg,
4204 .recvmsg = l2cap_sock_recvmsg,
4205 .poll = bt_sock_poll,
4206 .ioctl = bt_sock_ioctl,
4207 .mmap = sock_no_mmap,
4208 .socketpair = sock_no_socketpair,
4209 .shutdown = l2cap_sock_shutdown,
4210 .setsockopt = l2cap_sock_setsockopt,
4211 .getsockopt = l2cap_sock_getsockopt
4214 static const struct net_proto_family l2cap_sock_family_ops = {
4215 .family = PF_BLUETOOTH,
4216 .owner = THIS_MODULE,
4217 .create = l2cap_sock_create,
4220 static struct hci_proto l2cap_hci_proto = {
4222 .id = HCI_PROTO_L2CAP,
4223 .connect_ind = l2cap_connect_ind,
4224 .connect_cfm = l2cap_connect_cfm,
4225 .disconn_ind = l2cap_disconn_ind,
4226 .disconn_cfm = l2cap_disconn_cfm,
4227 .security_cfm = l2cap_security_cfm,
4228 .recv_acldata = l2cap_recv_acldata
4231 static int __init l2cap_init(void)
4235 err = proto_register(&l2cap_proto, 0);
4239 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
4241 BT_ERR("L2CAP socket registration failed");
4245 err = hci_register_proto(&l2cap_hci_proto);
4247 BT_ERR("L2CAP protocol registration failed");
4248 bt_sock_unregister(BTPROTO_L2CAP);
4253 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4254 bt_debugfs, NULL, &l2cap_debugfs_fops);
4256 BT_ERR("Failed to create L2CAP debug file");
4259 BT_INFO("L2CAP ver %s", VERSION);
4260 BT_INFO("L2CAP socket layer initialized");
4265 proto_unregister(&l2cap_proto);
4269 static void __exit l2cap_exit(void)
4271 debugfs_remove(l2cap_debugfs);
4273 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
4274 BT_ERR("L2CAP socket unregistration failed");
4276 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4277 BT_ERR("L2CAP protocol unregistration failed");
4279 proto_unregister(&l2cap_proto);
4282 void l2cap_load(void)
4284 /* Dummy function to trigger automatic L2CAP module loading by
4285 * other modules that use L2CAP sockets but don't use any other
4286 * symbols from it. */
4289 EXPORT_SYMBOL(l2cap_load);
4291 module_init(l2cap_init);
4292 module_exit(l2cap_exit);
4294 module_param(enable_ertm, bool, 0644);
4295 MODULE_PARM_DESC(enable_ertm, "Enable enhanced retransmission mode");
4297 module_param(max_transmit, uint, 0644);
4298 MODULE_PARM_DESC(max_transmit, "Max transmit value (default = 3)");
4300 module_param(tx_window, uint, 0644);
4301 MODULE_PARM_DESC(tx_window, "Transmission window size value (default = 63)");
4303 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4304 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
4305 MODULE_VERSION(VERSION);
4306 MODULE_LICENSE("GPL");
4307 MODULE_ALIAS("bt-proto-0");