2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <linux/debugfs.h>
44 #include <linux/seq_file.h>
45 #include <linux/uaccess.h>
46 #include <linux/crc16.h>
49 #include <asm/system.h>
50 #include <asm/unaligned.h>
52 #include <net/bluetooth/bluetooth.h>
53 #include <net/bluetooth/hci_core.h>
54 #include <net/bluetooth/l2cap.h>
56 #define VERSION "2.14"
58 static int enable_ertm = 0;
59 static int max_transmit = L2CAP_DEFAULT_MAX_TX;
61 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
62 static u8 l2cap_fixed_chan[8] = { 0x02, };
64 static const struct proto_ops l2cap_sock_ops;
66 static struct bt_sock_list l2cap_sk_list = {
67 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
70 static void __l2cap_sock_close(struct sock *sk, int reason);
71 static void l2cap_sock_close(struct sock *sk);
72 static void l2cap_sock_kill(struct sock *sk);
74 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
75 u8 code, u8 ident, u16 dlen, void *data);
77 /* ---- L2CAP timers ---- */
78 static void l2cap_sock_timeout(unsigned long arg)
80 struct sock *sk = (struct sock *) arg;
83 BT_DBG("sock %p state %d", sk, sk->sk_state);
87 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
88 reason = ECONNREFUSED;
89 else if (sk->sk_state == BT_CONNECT &&
90 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
91 reason = ECONNREFUSED;
95 __l2cap_sock_close(sk, reason);
103 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
105 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
106 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
109 static void l2cap_sock_clear_timer(struct sock *sk)
111 BT_DBG("sock %p state %d", sk, sk->sk_state);
112 sk_stop_timer(sk, &sk->sk_timer);
115 /* ---- L2CAP channels ---- */
116 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
119 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
120 if (l2cap_pi(s)->dcid == cid)
126 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
129 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
130 if (l2cap_pi(s)->scid == cid)
136 /* Find channel with given SCID.
137 * Returns locked socket */
138 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
142 s = __l2cap_get_chan_by_scid(l, cid);
145 read_unlock(&l->lock);
149 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
152 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
153 if (l2cap_pi(s)->ident == ident)
159 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
163 s = __l2cap_get_chan_by_ident(l, ident);
166 read_unlock(&l->lock);
170 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
172 u16 cid = L2CAP_CID_DYN_START;
174 for (; cid < L2CAP_CID_DYN_END; cid++) {
175 if (!__l2cap_get_chan_by_scid(l, cid))
182 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
187 l2cap_pi(l->head)->prev_c = sk;
189 l2cap_pi(sk)->next_c = l->head;
190 l2cap_pi(sk)->prev_c = NULL;
194 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
196 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
198 write_lock_bh(&l->lock);
203 l2cap_pi(next)->prev_c = prev;
205 l2cap_pi(prev)->next_c = next;
206 write_unlock_bh(&l->lock);
211 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
213 struct l2cap_chan_list *l = &conn->chan_list;
215 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
216 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
218 conn->disc_reason = 0x13;
220 l2cap_pi(sk)->conn = conn;
222 if (sk->sk_type == SOCK_SEQPACKET) {
223 /* Alloc CID for connection-oriented socket */
224 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
225 } else if (sk->sk_type == SOCK_DGRAM) {
226 /* Connectionless socket */
227 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
228 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
229 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
231 /* Raw socket can send/recv signalling messages only */
232 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
233 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
234 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
237 __l2cap_chan_link(l, sk);
240 bt_accept_enqueue(parent, sk);
244 * Must be called on the locked socket. */
245 static void l2cap_chan_del(struct sock *sk, int err)
247 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
248 struct sock *parent = bt_sk(sk)->parent;
250 l2cap_sock_clear_timer(sk);
252 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
255 /* Unlink from channel list */
256 l2cap_chan_unlink(&conn->chan_list, sk);
257 l2cap_pi(sk)->conn = NULL;
258 hci_conn_put(conn->hcon);
261 sk->sk_state = BT_CLOSED;
262 sock_set_flag(sk, SOCK_ZAPPED);
268 bt_accept_unlink(sk);
269 parent->sk_data_ready(parent, 0);
271 sk->sk_state_change(sk);
274 /* Service level security */
275 static inline int l2cap_check_security(struct sock *sk)
277 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
280 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
281 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
282 auth_type = HCI_AT_NO_BONDING_MITM;
284 auth_type = HCI_AT_NO_BONDING;
286 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
287 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
289 switch (l2cap_pi(sk)->sec_level) {
290 case BT_SECURITY_HIGH:
291 auth_type = HCI_AT_GENERAL_BONDING_MITM;
293 case BT_SECURITY_MEDIUM:
294 auth_type = HCI_AT_GENERAL_BONDING;
297 auth_type = HCI_AT_NO_BONDING;
302 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
306 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
310 /* Get next available identificator.
311 * 1 - 128 are used by kernel.
312 * 129 - 199 are reserved.
313 * 200 - 254 are used by utilities like l2ping, etc.
316 spin_lock_bh(&conn->lock);
318 if (++conn->tx_ident > 128)
323 spin_unlock_bh(&conn->lock);
328 static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
330 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
332 BT_DBG("code 0x%2.2x", code);
337 return hci_send_acl(conn->hcon, skb, 0);
340 static inline int l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
343 struct l2cap_hdr *lh;
344 struct l2cap_conn *conn = pi->conn;
345 int count, hlen = L2CAP_HDR_SIZE + 2;
347 if (pi->fcs == L2CAP_FCS_CRC16)
350 BT_DBG("pi %p, control 0x%2.2x", pi, control);
352 count = min_t(unsigned int, conn->mtu, hlen);
353 control |= L2CAP_CTRL_FRAME_TYPE;
355 skb = bt_skb_alloc(count, GFP_ATOMIC);
359 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
360 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
361 lh->cid = cpu_to_le16(pi->dcid);
362 put_unaligned_le16(control, skb_put(skb, 2));
364 if (pi->fcs == L2CAP_FCS_CRC16) {
365 u16 fcs = crc16(0, (u8 *)lh, count - 2);
366 put_unaligned_le16(fcs, skb_put(skb, 2));
369 return hci_send_acl(pi->conn->hcon, skb, 0);
372 static inline int l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
374 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY)
375 control |= L2CAP_SUPER_RCV_NOT_READY;
377 control |= L2CAP_SUPER_RCV_READY;
379 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
381 return l2cap_send_sframe(pi, control);
384 static void l2cap_do_start(struct sock *sk)
386 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
388 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
389 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
392 if (l2cap_check_security(sk)) {
393 struct l2cap_conn_req req;
394 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
395 req.psm = l2cap_pi(sk)->psm;
397 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
399 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
400 L2CAP_CONN_REQ, sizeof(req), &req);
403 struct l2cap_info_req req;
404 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
406 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
407 conn->info_ident = l2cap_get_ident(conn);
409 mod_timer(&conn->info_timer, jiffies +
410 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
412 l2cap_send_cmd(conn, conn->info_ident,
413 L2CAP_INFO_REQ, sizeof(req), &req);
417 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk)
419 struct l2cap_disconn_req req;
421 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
422 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
423 l2cap_send_cmd(conn, l2cap_get_ident(conn),
424 L2CAP_DISCONN_REQ, sizeof(req), &req);
427 /* ---- L2CAP connections ---- */
428 static void l2cap_conn_start(struct l2cap_conn *conn)
430 struct l2cap_chan_list *l = &conn->chan_list;
433 BT_DBG("conn %p", conn);
437 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
440 if (sk->sk_type != SOCK_SEQPACKET) {
445 if (sk->sk_state == BT_CONNECT) {
446 if (l2cap_check_security(sk)) {
447 struct l2cap_conn_req req;
448 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
449 req.psm = l2cap_pi(sk)->psm;
451 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
453 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
454 L2CAP_CONN_REQ, sizeof(req), &req);
456 } else if (sk->sk_state == BT_CONNECT2) {
457 struct l2cap_conn_rsp rsp;
458 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
459 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
461 if (l2cap_check_security(sk)) {
462 if (bt_sk(sk)->defer_setup) {
463 struct sock *parent = bt_sk(sk)->parent;
464 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
465 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
466 parent->sk_data_ready(parent, 0);
469 sk->sk_state = BT_CONFIG;
470 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
471 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
474 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
475 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
478 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
479 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
485 read_unlock(&l->lock);
488 static void l2cap_conn_ready(struct l2cap_conn *conn)
490 struct l2cap_chan_list *l = &conn->chan_list;
493 BT_DBG("conn %p", conn);
497 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
500 if (sk->sk_type != SOCK_SEQPACKET) {
501 l2cap_sock_clear_timer(sk);
502 sk->sk_state = BT_CONNECTED;
503 sk->sk_state_change(sk);
504 } else if (sk->sk_state == BT_CONNECT)
510 read_unlock(&l->lock);
513 /* Notify sockets that we cannot guaranty reliability anymore */
514 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
516 struct l2cap_chan_list *l = &conn->chan_list;
519 BT_DBG("conn %p", conn);
523 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
524 if (l2cap_pi(sk)->force_reliable)
528 read_unlock(&l->lock);
531 static void l2cap_info_timeout(unsigned long arg)
533 struct l2cap_conn *conn = (void *) arg;
535 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
536 conn->info_ident = 0;
538 l2cap_conn_start(conn);
541 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
543 struct l2cap_conn *conn = hcon->l2cap_data;
548 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
552 hcon->l2cap_data = conn;
555 BT_DBG("hcon %p conn %p", hcon, conn);
557 conn->mtu = hcon->hdev->acl_mtu;
558 conn->src = &hcon->hdev->bdaddr;
559 conn->dst = &hcon->dst;
563 spin_lock_init(&conn->lock);
564 rwlock_init(&conn->chan_list.lock);
566 setup_timer(&conn->info_timer, l2cap_info_timeout,
567 (unsigned long) conn);
569 conn->disc_reason = 0x13;
574 static void l2cap_conn_del(struct hci_conn *hcon, int err)
576 struct l2cap_conn *conn = hcon->l2cap_data;
582 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
584 kfree_skb(conn->rx_skb);
587 while ((sk = conn->chan_list.head)) {
589 l2cap_chan_del(sk, err);
594 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
595 del_timer_sync(&conn->info_timer);
597 hcon->l2cap_data = NULL;
601 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
603 struct l2cap_chan_list *l = &conn->chan_list;
604 write_lock_bh(&l->lock);
605 __l2cap_chan_add(conn, sk, parent);
606 write_unlock_bh(&l->lock);
609 /* ---- Socket interface ---- */
610 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
613 struct hlist_node *node;
614 sk_for_each(sk, node, &l2cap_sk_list.head)
615 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
622 /* Find socket with psm and source bdaddr.
623 * Returns closest match.
625 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
627 struct sock *sk = NULL, *sk1 = NULL;
628 struct hlist_node *node;
630 sk_for_each(sk, node, &l2cap_sk_list.head) {
631 if (state && sk->sk_state != state)
634 if (l2cap_pi(sk)->psm == psm) {
636 if (!bacmp(&bt_sk(sk)->src, src))
640 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
644 return node ? sk : sk1;
647 /* Find socket with given address (psm, src).
648 * Returns locked socket */
649 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
652 read_lock(&l2cap_sk_list.lock);
653 s = __l2cap_get_sock_by_psm(state, psm, src);
656 read_unlock(&l2cap_sk_list.lock);
660 static void l2cap_sock_destruct(struct sock *sk)
664 skb_queue_purge(&sk->sk_receive_queue);
665 skb_queue_purge(&sk->sk_write_queue);
668 static void l2cap_sock_cleanup_listen(struct sock *parent)
672 BT_DBG("parent %p", parent);
674 /* Close not yet accepted channels */
675 while ((sk = bt_accept_dequeue(parent, NULL)))
676 l2cap_sock_close(sk);
678 parent->sk_state = BT_CLOSED;
679 sock_set_flag(parent, SOCK_ZAPPED);
682 /* Kill socket (only if zapped and orphan)
683 * Must be called on unlocked socket.
685 static void l2cap_sock_kill(struct sock *sk)
687 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
690 BT_DBG("sk %p state %d", sk, sk->sk_state);
692 /* Kill poor orphan */
693 bt_sock_unlink(&l2cap_sk_list, sk);
694 sock_set_flag(sk, SOCK_DEAD);
698 static void __l2cap_sock_close(struct sock *sk, int reason)
700 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
702 switch (sk->sk_state) {
704 l2cap_sock_cleanup_listen(sk);
709 if (sk->sk_type == SOCK_SEQPACKET) {
710 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
712 sk->sk_state = BT_DISCONN;
713 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
714 l2cap_send_disconn_req(conn, sk);
716 l2cap_chan_del(sk, reason);
720 if (sk->sk_type == SOCK_SEQPACKET) {
721 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
722 struct l2cap_conn_rsp rsp;
725 if (bt_sk(sk)->defer_setup)
726 result = L2CAP_CR_SEC_BLOCK;
728 result = L2CAP_CR_BAD_PSM;
730 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
731 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
732 rsp.result = cpu_to_le16(result);
733 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
734 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
735 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
737 l2cap_chan_del(sk, reason);
742 l2cap_chan_del(sk, reason);
746 sock_set_flag(sk, SOCK_ZAPPED);
751 /* Must be called on unlocked socket. */
752 static void l2cap_sock_close(struct sock *sk)
754 l2cap_sock_clear_timer(sk);
756 __l2cap_sock_close(sk, ECONNRESET);
761 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
763 struct l2cap_pinfo *pi = l2cap_pi(sk);
768 sk->sk_type = parent->sk_type;
769 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
771 pi->imtu = l2cap_pi(parent)->imtu;
772 pi->omtu = l2cap_pi(parent)->omtu;
773 pi->mode = l2cap_pi(parent)->mode;
774 pi->fcs = l2cap_pi(parent)->fcs;
775 pi->sec_level = l2cap_pi(parent)->sec_level;
776 pi->role_switch = l2cap_pi(parent)->role_switch;
777 pi->force_reliable = l2cap_pi(parent)->force_reliable;
779 pi->imtu = L2CAP_DEFAULT_MTU;
781 pi->mode = L2CAP_MODE_BASIC;
782 pi->fcs = L2CAP_FCS_CRC16;
783 pi->sec_level = BT_SECURITY_LOW;
785 pi->force_reliable = 0;
788 /* Default config options */
790 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
791 skb_queue_head_init(TX_QUEUE(sk));
792 skb_queue_head_init(SREJ_QUEUE(sk));
793 INIT_LIST_HEAD(SREJ_LIST(sk));
796 static struct proto l2cap_proto = {
798 .owner = THIS_MODULE,
799 .obj_size = sizeof(struct l2cap_pinfo)
802 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
806 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
810 sock_init_data(sock, sk);
811 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
813 sk->sk_destruct = l2cap_sock_destruct;
814 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
816 sock_reset_flag(sk, SOCK_ZAPPED);
818 sk->sk_protocol = proto;
819 sk->sk_state = BT_OPEN;
821 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
823 bt_sock_link(&l2cap_sk_list, sk);
827 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
832 BT_DBG("sock %p", sock);
834 sock->state = SS_UNCONNECTED;
836 if (sock->type != SOCK_SEQPACKET &&
837 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
838 return -ESOCKTNOSUPPORT;
840 if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
843 sock->ops = &l2cap_sock_ops;
845 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
849 l2cap_sock_init(sk, NULL);
853 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
855 struct sock *sk = sock->sk;
856 struct sockaddr_l2 la;
861 if (!addr || addr->sa_family != AF_BLUETOOTH)
864 memset(&la, 0, sizeof(la));
865 len = min_t(unsigned int, sizeof(la), alen);
866 memcpy(&la, addr, len);
873 if (sk->sk_state != BT_OPEN) {
878 if (la.l2_psm && __le16_to_cpu(la.l2_psm) < 0x1001 &&
879 !capable(CAP_NET_BIND_SERVICE)) {
884 write_lock_bh(&l2cap_sk_list.lock);
886 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
889 /* Save source address */
890 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
891 l2cap_pi(sk)->psm = la.l2_psm;
892 l2cap_pi(sk)->sport = la.l2_psm;
893 sk->sk_state = BT_BOUND;
895 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
896 __le16_to_cpu(la.l2_psm) == 0x0003)
897 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
900 write_unlock_bh(&l2cap_sk_list.lock);
907 static int l2cap_do_connect(struct sock *sk)
909 bdaddr_t *src = &bt_sk(sk)->src;
910 bdaddr_t *dst = &bt_sk(sk)->dst;
911 struct l2cap_conn *conn;
912 struct hci_conn *hcon;
913 struct hci_dev *hdev;
917 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
920 hdev = hci_get_route(dst, src);
922 return -EHOSTUNREACH;
924 hci_dev_lock_bh(hdev);
928 if (sk->sk_type == SOCK_RAW) {
929 switch (l2cap_pi(sk)->sec_level) {
930 case BT_SECURITY_HIGH:
931 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
933 case BT_SECURITY_MEDIUM:
934 auth_type = HCI_AT_DEDICATED_BONDING;
937 auth_type = HCI_AT_NO_BONDING;
940 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
941 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
942 auth_type = HCI_AT_NO_BONDING_MITM;
944 auth_type = HCI_AT_NO_BONDING;
946 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
947 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
949 switch (l2cap_pi(sk)->sec_level) {
950 case BT_SECURITY_HIGH:
951 auth_type = HCI_AT_GENERAL_BONDING_MITM;
953 case BT_SECURITY_MEDIUM:
954 auth_type = HCI_AT_GENERAL_BONDING;
957 auth_type = HCI_AT_NO_BONDING;
962 hcon = hci_connect(hdev, ACL_LINK, dst,
963 l2cap_pi(sk)->sec_level, auth_type);
967 conn = l2cap_conn_add(hcon, 0);
975 /* Update source addr of the socket */
976 bacpy(src, conn->src);
978 l2cap_chan_add(conn, sk, NULL);
980 sk->sk_state = BT_CONNECT;
981 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
983 if (hcon->state == BT_CONNECTED) {
984 if (sk->sk_type != SOCK_SEQPACKET) {
985 l2cap_sock_clear_timer(sk);
986 sk->sk_state = BT_CONNECTED;
992 hci_dev_unlock_bh(hdev);
997 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
999 struct sock *sk = sock->sk;
1000 struct sockaddr_l2 la;
1003 BT_DBG("sk %p", sk);
1005 if (!addr || alen < sizeof(addr->sa_family) ||
1006 addr->sa_family != AF_BLUETOOTH)
1009 memset(&la, 0, sizeof(la));
1010 len = min_t(unsigned int, sizeof(la), alen);
1011 memcpy(&la, addr, len);
1018 if (sk->sk_type == SOCK_SEQPACKET && !la.l2_psm) {
1023 switch (l2cap_pi(sk)->mode) {
1024 case L2CAP_MODE_BASIC:
1026 case L2CAP_MODE_ERTM:
1027 case L2CAP_MODE_STREAMING:
1036 switch (sk->sk_state) {
1040 /* Already connecting */
1044 /* Already connected */
1057 /* Set destination address and psm */
1058 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
1059 l2cap_pi(sk)->psm = la.l2_psm;
1061 err = l2cap_do_connect(sk);
1066 err = bt_sock_wait_state(sk, BT_CONNECTED,
1067 sock_sndtimeo(sk, flags & O_NONBLOCK));
1073 static int l2cap_sock_listen(struct socket *sock, int backlog)
1075 struct sock *sk = sock->sk;
1078 BT_DBG("sk %p backlog %d", sk, backlog);
1082 if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
1087 switch (l2cap_pi(sk)->mode) {
1088 case L2CAP_MODE_BASIC:
1090 case L2CAP_MODE_ERTM:
1091 case L2CAP_MODE_STREAMING:
1100 if (!l2cap_pi(sk)->psm) {
1101 bdaddr_t *src = &bt_sk(sk)->src;
1106 write_lock_bh(&l2cap_sk_list.lock);
1108 for (psm = 0x1001; psm < 0x1100; psm += 2)
1109 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
1110 l2cap_pi(sk)->psm = cpu_to_le16(psm);
1111 l2cap_pi(sk)->sport = cpu_to_le16(psm);
1116 write_unlock_bh(&l2cap_sk_list.lock);
1122 sk->sk_max_ack_backlog = backlog;
1123 sk->sk_ack_backlog = 0;
1124 sk->sk_state = BT_LISTEN;
1131 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
1133 DECLARE_WAITQUEUE(wait, current);
1134 struct sock *sk = sock->sk, *nsk;
1138 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1140 if (sk->sk_state != BT_LISTEN) {
1145 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1147 BT_DBG("sk %p timeo %ld", sk, timeo);
1149 /* Wait for an incoming connection. (wake-one). */
1150 add_wait_queue_exclusive(sk_sleep(sk), &wait);
1151 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1152 set_current_state(TASK_INTERRUPTIBLE);
1159 timeo = schedule_timeout(timeo);
1160 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1162 if (sk->sk_state != BT_LISTEN) {
1167 if (signal_pending(current)) {
1168 err = sock_intr_errno(timeo);
1172 set_current_state(TASK_RUNNING);
1173 remove_wait_queue(sk_sleep(sk), &wait);
1178 newsock->state = SS_CONNECTED;
1180 BT_DBG("new socket %p", nsk);
1187 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1189 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1190 struct sock *sk = sock->sk;
1192 BT_DBG("sock %p, sk %p", sock, sk);
1194 addr->sa_family = AF_BLUETOOTH;
1195 *len = sizeof(struct sockaddr_l2);
1198 la->l2_psm = l2cap_pi(sk)->psm;
1199 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1200 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1202 la->l2_psm = l2cap_pi(sk)->sport;
1203 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1204 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
1210 static void l2cap_monitor_timeout(unsigned long arg)
1212 struct sock *sk = (void *) arg;
1216 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
1217 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk);
1222 l2cap_pi(sk)->retry_count++;
1223 __mod_monitor_timer();
1225 control = L2CAP_CTRL_POLL;
1226 l2cap_send_rr_or_rnr(l2cap_pi(sk), control);
1230 static void l2cap_retrans_timeout(unsigned long arg)
1232 struct sock *sk = (void *) arg;
1236 l2cap_pi(sk)->retry_count = 1;
1237 __mod_monitor_timer();
1239 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
1241 control = L2CAP_CTRL_POLL;
1242 l2cap_send_rr_or_rnr(l2cap_pi(sk), control);
1246 static void l2cap_drop_acked_frames(struct sock *sk)
1248 struct sk_buff *skb;
1250 while ((skb = skb_peek(TX_QUEUE(sk)))) {
1251 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
1254 skb = skb_dequeue(TX_QUEUE(sk));
1257 l2cap_pi(sk)->unacked_frames--;
1260 if (!l2cap_pi(sk)->unacked_frames)
1261 del_timer(&l2cap_pi(sk)->retrans_timer);
1266 static inline int l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1268 struct l2cap_pinfo *pi = l2cap_pi(sk);
1271 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1273 err = hci_send_acl(pi->conn->hcon, skb, 0);
1280 static int l2cap_streaming_send(struct sock *sk)
1282 struct sk_buff *skb, *tx_skb;
1283 struct l2cap_pinfo *pi = l2cap_pi(sk);
1287 while ((skb = sk->sk_send_head)) {
1288 tx_skb = skb_clone(skb, GFP_ATOMIC);
1290 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1291 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1292 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1294 if (pi->fcs == L2CAP_FCS_CRC16) {
1295 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1296 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1299 err = l2cap_do_send(sk, tx_skb);
1301 l2cap_send_disconn_req(pi->conn, sk);
1305 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1307 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1308 sk->sk_send_head = NULL;
1310 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1312 skb = skb_dequeue(TX_QUEUE(sk));
1318 static int l2cap_retransmit_frame(struct sock *sk, u8 tx_seq)
1320 struct l2cap_pinfo *pi = l2cap_pi(sk);
1321 struct sk_buff *skb, *tx_skb;
1325 skb = skb_peek(TX_QUEUE(sk));
1327 if (bt_cb(skb)->tx_seq != tx_seq) {
1328 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1330 skb = skb_queue_next(TX_QUEUE(sk), skb);
1334 if (pi->remote_max_tx &&
1335 bt_cb(skb)->retries == pi->remote_max_tx) {
1336 l2cap_send_disconn_req(pi->conn, sk);
1340 tx_skb = skb_clone(skb, GFP_ATOMIC);
1341 bt_cb(skb)->retries++;
1342 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1343 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1344 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1345 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1347 if (pi->fcs == L2CAP_FCS_CRC16) {
1348 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1349 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1352 err = l2cap_do_send(sk, tx_skb);
1354 l2cap_send_disconn_req(pi->conn, sk);
1362 static int l2cap_ertm_send(struct sock *sk)
1364 struct sk_buff *skb, *tx_skb;
1365 struct l2cap_pinfo *pi = l2cap_pi(sk);
1369 if (pi->conn_state & L2CAP_CONN_WAIT_F)
1372 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk)) &&
1373 !(pi->conn_state & L2CAP_CONN_REMOTE_BUSY)) {
1375 if (pi->remote_max_tx &&
1376 bt_cb(skb)->retries == pi->remote_max_tx) {
1377 l2cap_send_disconn_req(pi->conn, sk);
1381 tx_skb = skb_clone(skb, GFP_ATOMIC);
1383 bt_cb(skb)->retries++;
1385 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1386 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1387 control |= L2CAP_CTRL_FINAL;
1388 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1390 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1391 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1392 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1395 if (pi->fcs == L2CAP_FCS_CRC16) {
1396 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1397 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1400 err = l2cap_do_send(sk, tx_skb);
1402 l2cap_send_disconn_req(pi->conn, sk);
1405 __mod_retrans_timer();
1407 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1408 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1410 pi->unacked_frames++;
1413 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1414 sk->sk_send_head = NULL;
1416 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1422 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1424 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1425 struct sk_buff **frag;
1428 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
1435 /* Continuation fragments (no L2CAP header) */
1436 frag = &skb_shinfo(skb)->frag_list;
1438 count = min_t(unsigned int, conn->mtu, len);
1440 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1443 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1449 frag = &(*frag)->next;
1455 static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1457 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1458 struct sk_buff *skb;
1459 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1460 struct l2cap_hdr *lh;
1462 BT_DBG("sk %p len %d", sk, (int)len);
1464 count = min_t(unsigned int, (conn->mtu - hlen), len);
1465 skb = bt_skb_send_alloc(sk, count + hlen,
1466 msg->msg_flags & MSG_DONTWAIT, &err);
1468 return ERR_PTR(-ENOMEM);
1470 /* Create L2CAP header */
1471 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1472 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1473 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1474 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1476 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1477 if (unlikely(err < 0)) {
1479 return ERR_PTR(err);
1484 static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1486 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1487 struct sk_buff *skb;
1488 int err, count, hlen = L2CAP_HDR_SIZE;
1489 struct l2cap_hdr *lh;
1491 BT_DBG("sk %p len %d", sk, (int)len);
1493 count = min_t(unsigned int, (conn->mtu - hlen), len);
1494 skb = bt_skb_send_alloc(sk, count + hlen,
1495 msg->msg_flags & MSG_DONTWAIT, &err);
1497 return ERR_PTR(-ENOMEM);
1499 /* Create L2CAP header */
1500 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1501 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1502 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1504 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1505 if (unlikely(err < 0)) {
1507 return ERR_PTR(err);
1512 static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1514 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1515 struct sk_buff *skb;
1516 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1517 struct l2cap_hdr *lh;
1519 BT_DBG("sk %p len %d", sk, (int)len);
1524 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1527 count = min_t(unsigned int, (conn->mtu - hlen), len);
1528 skb = bt_skb_send_alloc(sk, count + hlen,
1529 msg->msg_flags & MSG_DONTWAIT, &err);
1531 return ERR_PTR(-ENOMEM);
1533 /* Create L2CAP header */
1534 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1535 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1536 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1537 put_unaligned_le16(control, skb_put(skb, 2));
1539 put_unaligned_le16(sdulen, skb_put(skb, 2));
1541 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1542 if (unlikely(err < 0)) {
1544 return ERR_PTR(err);
1547 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1548 put_unaligned_le16(0, skb_put(skb, 2));
1550 bt_cb(skb)->retries = 0;
1554 static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1556 struct l2cap_pinfo *pi = l2cap_pi(sk);
1557 struct sk_buff *skb;
1558 struct sk_buff_head sar_queue;
1562 __skb_queue_head_init(&sar_queue);
1563 control = L2CAP_SDU_START;
1564 skb = l2cap_create_iframe_pdu(sk, msg, pi->max_pdu_size, control, len);
1566 return PTR_ERR(skb);
1568 __skb_queue_tail(&sar_queue, skb);
1569 len -= pi->max_pdu_size;
1570 size +=pi->max_pdu_size;
1576 if (len > pi->max_pdu_size) {
1577 control |= L2CAP_SDU_CONTINUE;
1578 buflen = pi->max_pdu_size;
1580 control |= L2CAP_SDU_END;
1584 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1586 skb_queue_purge(&sar_queue);
1587 return PTR_ERR(skb);
1590 __skb_queue_tail(&sar_queue, skb);
1595 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1596 if (sk->sk_send_head == NULL)
1597 sk->sk_send_head = sar_queue.next;
1602 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1604 struct sock *sk = sock->sk;
1605 struct l2cap_pinfo *pi = l2cap_pi(sk);
1606 struct sk_buff *skb;
1610 BT_DBG("sock %p, sk %p", sock, sk);
1612 err = sock_error(sk);
1616 if (msg->msg_flags & MSG_OOB)
1621 if (sk->sk_state != BT_CONNECTED) {
1626 /* Connectionless channel */
1627 if (sk->sk_type == SOCK_DGRAM) {
1628 skb = l2cap_create_connless_pdu(sk, msg, len);
1632 err = l2cap_do_send(sk, skb);
1637 case L2CAP_MODE_BASIC:
1638 /* Check outgoing MTU */
1639 if (len > pi->omtu) {
1644 /* Create a basic PDU */
1645 skb = l2cap_create_basic_pdu(sk, msg, len);
1651 err = l2cap_do_send(sk, skb);
1656 case L2CAP_MODE_ERTM:
1657 case L2CAP_MODE_STREAMING:
1658 /* Entire SDU fits into one PDU */
1659 if (len <= pi->max_pdu_size) {
1660 control = L2CAP_SDU_UNSEGMENTED;
1661 skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
1666 __skb_queue_tail(TX_QUEUE(sk), skb);
1667 if (sk->sk_send_head == NULL)
1668 sk->sk_send_head = skb;
1670 /* Segment SDU into multiples PDUs */
1671 err = l2cap_sar_segment_sdu(sk, msg, len);
1676 if (pi->mode == L2CAP_MODE_STREAMING)
1677 err = l2cap_streaming_send(sk);
1679 err = l2cap_ertm_send(sk);
1686 BT_DBG("bad state %1.1x", pi->mode);
1695 static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1697 struct sock *sk = sock->sk;
1701 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1702 struct l2cap_conn_rsp rsp;
1704 sk->sk_state = BT_CONFIG;
1706 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1707 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1708 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1709 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1710 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1711 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1719 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1722 static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
1724 struct sock *sk = sock->sk;
1725 struct l2cap_options opts;
1729 BT_DBG("sk %p", sk);
1735 opts.imtu = l2cap_pi(sk)->imtu;
1736 opts.omtu = l2cap_pi(sk)->omtu;
1737 opts.flush_to = l2cap_pi(sk)->flush_to;
1738 opts.mode = l2cap_pi(sk)->mode;
1739 opts.fcs = l2cap_pi(sk)->fcs;
1741 len = min_t(unsigned int, sizeof(opts), optlen);
1742 if (copy_from_user((char *) &opts, optval, len)) {
1747 l2cap_pi(sk)->imtu = opts.imtu;
1748 l2cap_pi(sk)->omtu = opts.omtu;
1749 l2cap_pi(sk)->mode = opts.mode;
1750 l2cap_pi(sk)->fcs = opts.fcs;
1754 if (get_user(opt, (u32 __user *) optval)) {
1759 if (opt & L2CAP_LM_AUTH)
1760 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
1761 if (opt & L2CAP_LM_ENCRYPT)
1762 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
1763 if (opt & L2CAP_LM_SECURE)
1764 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
1766 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
1767 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
1779 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
1781 struct sock *sk = sock->sk;
1782 struct bt_security sec;
1786 BT_DBG("sk %p", sk);
1788 if (level == SOL_L2CAP)
1789 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
1791 if (level != SOL_BLUETOOTH)
1792 return -ENOPROTOOPT;
1798 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_RAW) {
1803 sec.level = BT_SECURITY_LOW;
1805 len = min_t(unsigned int, sizeof(sec), optlen);
1806 if (copy_from_user((char *) &sec, optval, len)) {
1811 if (sec.level < BT_SECURITY_LOW ||
1812 sec.level > BT_SECURITY_HIGH) {
1817 l2cap_pi(sk)->sec_level = sec.level;
1820 case BT_DEFER_SETUP:
1821 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1826 if (get_user(opt, (u32 __user *) optval)) {
1831 bt_sk(sk)->defer_setup = opt;
1843 static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
1845 struct sock *sk = sock->sk;
1846 struct l2cap_options opts;
1847 struct l2cap_conninfo cinfo;
1851 BT_DBG("sk %p", sk);
1853 if (get_user(len, optlen))
1860 opts.imtu = l2cap_pi(sk)->imtu;
1861 opts.omtu = l2cap_pi(sk)->omtu;
1862 opts.flush_to = l2cap_pi(sk)->flush_to;
1863 opts.mode = l2cap_pi(sk)->mode;
1864 opts.fcs = l2cap_pi(sk)->fcs;
1866 len = min_t(unsigned int, len, sizeof(opts));
1867 if (copy_to_user(optval, (char *) &opts, len))
1873 switch (l2cap_pi(sk)->sec_level) {
1874 case BT_SECURITY_LOW:
1875 opt = L2CAP_LM_AUTH;
1877 case BT_SECURITY_MEDIUM:
1878 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
1880 case BT_SECURITY_HIGH:
1881 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
1889 if (l2cap_pi(sk)->role_switch)
1890 opt |= L2CAP_LM_MASTER;
1892 if (l2cap_pi(sk)->force_reliable)
1893 opt |= L2CAP_LM_RELIABLE;
1895 if (put_user(opt, (u32 __user *) optval))
1899 case L2CAP_CONNINFO:
1900 if (sk->sk_state != BT_CONNECTED &&
1901 !(sk->sk_state == BT_CONNECT2 &&
1902 bt_sk(sk)->defer_setup)) {
1907 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
1908 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
1910 len = min_t(unsigned int, len, sizeof(cinfo));
1911 if (copy_to_user(optval, (char *) &cinfo, len))
1925 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
1927 struct sock *sk = sock->sk;
1928 struct bt_security sec;
1931 BT_DBG("sk %p", sk);
1933 if (level == SOL_L2CAP)
1934 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
1936 if (level != SOL_BLUETOOTH)
1937 return -ENOPROTOOPT;
1939 if (get_user(len, optlen))
1946 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_RAW) {
1951 sec.level = l2cap_pi(sk)->sec_level;
1953 len = min_t(unsigned int, len, sizeof(sec));
1954 if (copy_to_user(optval, (char *) &sec, len))
1959 case BT_DEFER_SETUP:
1960 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1965 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
1979 static int l2cap_sock_shutdown(struct socket *sock, int how)
1981 struct sock *sk = sock->sk;
1984 BT_DBG("sock %p, sk %p", sock, sk);
1990 if (!sk->sk_shutdown) {
1991 sk->sk_shutdown = SHUTDOWN_MASK;
1992 l2cap_sock_clear_timer(sk);
1993 __l2cap_sock_close(sk, 0);
1995 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
1996 err = bt_sock_wait_state(sk, BT_CLOSED,
2003 static int l2cap_sock_release(struct socket *sock)
2005 struct sock *sk = sock->sk;
2008 BT_DBG("sock %p, sk %p", sock, sk);
2013 err = l2cap_sock_shutdown(sock, 2);
2016 l2cap_sock_kill(sk);
2020 static void l2cap_chan_ready(struct sock *sk)
2022 struct sock *parent = bt_sk(sk)->parent;
2024 BT_DBG("sk %p, parent %p", sk, parent);
2026 l2cap_pi(sk)->conf_state = 0;
2027 l2cap_sock_clear_timer(sk);
2030 /* Outgoing channel.
2031 * Wake up socket sleeping on connect.
2033 sk->sk_state = BT_CONNECTED;
2034 sk->sk_state_change(sk);
2036 /* Incoming channel.
2037 * Wake up socket sleeping on accept.
2039 parent->sk_data_ready(parent, 0);
2043 /* Copy frame to all raw sockets on that connection */
2044 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2046 struct l2cap_chan_list *l = &conn->chan_list;
2047 struct sk_buff *nskb;
2050 BT_DBG("conn %p", conn);
2052 read_lock(&l->lock);
2053 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2054 if (sk->sk_type != SOCK_RAW)
2057 /* Don't send frame to the socket it came from */
2060 nskb = skb_clone(skb, GFP_ATOMIC);
2064 if (sock_queue_rcv_skb(sk, nskb))
2067 read_unlock(&l->lock);
2070 /* ---- L2CAP signalling commands ---- */
2071 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2072 u8 code, u8 ident, u16 dlen, void *data)
2074 struct sk_buff *skb, **frag;
2075 struct l2cap_cmd_hdr *cmd;
2076 struct l2cap_hdr *lh;
2079 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2080 conn, code, ident, dlen);
2082 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2083 count = min_t(unsigned int, conn->mtu, len);
2085 skb = bt_skb_alloc(count, GFP_ATOMIC);
2089 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2090 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2091 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2093 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2096 cmd->len = cpu_to_le16(dlen);
2099 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2100 memcpy(skb_put(skb, count), data, count);
2106 /* Continuation fragments (no L2CAP header) */
2107 frag = &skb_shinfo(skb)->frag_list;
2109 count = min_t(unsigned int, conn->mtu, len);
2111 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2115 memcpy(skb_put(*frag, count), data, count);
2120 frag = &(*frag)->next;
2130 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2132 struct l2cap_conf_opt *opt = *ptr;
2135 len = L2CAP_CONF_OPT_SIZE + opt->len;
2143 *val = *((u8 *) opt->val);
2147 *val = __le16_to_cpu(*((__le16 *) opt->val));
2151 *val = __le32_to_cpu(*((__le32 *) opt->val));
2155 *val = (unsigned long) opt->val;
2159 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2163 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2165 struct l2cap_conf_opt *opt = *ptr;
2167 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2174 *((u8 *) opt->val) = val;
2178 *((__le16 *) opt->val) = cpu_to_le16(val);
2182 *((__le32 *) opt->val) = cpu_to_le32(val);
2186 memcpy(opt->val, (void *) val, len);
2190 *ptr += L2CAP_CONF_OPT_SIZE + len;
2193 static inline void l2cap_ertm_init(struct sock *sk)
2195 l2cap_pi(sk)->expected_ack_seq = 0;
2196 l2cap_pi(sk)->unacked_frames = 0;
2197 l2cap_pi(sk)->buffer_seq = 0;
2198 l2cap_pi(sk)->num_to_ack = 0;
2199 l2cap_pi(sk)->frames_sent = 0;
2201 setup_timer(&l2cap_pi(sk)->retrans_timer,
2202 l2cap_retrans_timeout, (unsigned long) sk);
2203 setup_timer(&l2cap_pi(sk)->monitor_timer,
2204 l2cap_monitor_timeout, (unsigned long) sk);
2206 __skb_queue_head_init(SREJ_QUEUE(sk));
2209 static int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
2211 u32 local_feat_mask = l2cap_feat_mask;
2213 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
2216 case L2CAP_MODE_ERTM:
2217 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
2218 case L2CAP_MODE_STREAMING:
2219 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
2225 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2228 case L2CAP_MODE_STREAMING:
2229 case L2CAP_MODE_ERTM:
2230 if (l2cap_mode_supported(mode, remote_feat_mask))
2234 return L2CAP_MODE_BASIC;
2238 static int l2cap_build_conf_req(struct sock *sk, void *data)
2240 struct l2cap_pinfo *pi = l2cap_pi(sk);
2241 struct l2cap_conf_req *req = data;
2242 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2243 void *ptr = req->data;
2245 BT_DBG("sk %p", sk);
2247 if (pi->num_conf_req || pi->num_conf_rsp)
2251 case L2CAP_MODE_STREAMING:
2252 case L2CAP_MODE_ERTM:
2253 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2254 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2255 l2cap_send_disconn_req(pi->conn, sk);
2258 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2264 case L2CAP_MODE_BASIC:
2265 if (pi->imtu != L2CAP_DEFAULT_MTU)
2266 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2269 case L2CAP_MODE_ERTM:
2270 rfc.mode = L2CAP_MODE_ERTM;
2271 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
2272 rfc.max_transmit = max_transmit;
2273 rfc.retrans_timeout = 0;
2274 rfc.monitor_timeout = 0;
2275 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2276 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2277 rfc.max_pdu_size = pi->conn->mtu - 10;
2279 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2280 sizeof(rfc), (unsigned long) &rfc);
2282 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2285 if (pi->fcs == L2CAP_FCS_NONE ||
2286 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2287 pi->fcs = L2CAP_FCS_NONE;
2288 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2292 case L2CAP_MODE_STREAMING:
2293 rfc.mode = L2CAP_MODE_STREAMING;
2295 rfc.max_transmit = 0;
2296 rfc.retrans_timeout = 0;
2297 rfc.monitor_timeout = 0;
2298 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2299 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2300 rfc.max_pdu_size = pi->conn->mtu - 10;
2302 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2303 sizeof(rfc), (unsigned long) &rfc);
2305 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2308 if (pi->fcs == L2CAP_FCS_NONE ||
2309 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2310 pi->fcs = L2CAP_FCS_NONE;
2311 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2316 /* FIXME: Need actual value of the flush timeout */
2317 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2318 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2320 req->dcid = cpu_to_le16(pi->dcid);
2321 req->flags = cpu_to_le16(0);
2326 static int l2cap_parse_conf_req(struct sock *sk, void *data)
2328 struct l2cap_pinfo *pi = l2cap_pi(sk);
2329 struct l2cap_conf_rsp *rsp = data;
2330 void *ptr = rsp->data;
2331 void *req = pi->conf_req;
2332 int len = pi->conf_len;
2333 int type, hint, olen;
2335 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2336 u16 mtu = L2CAP_DEFAULT_MTU;
2337 u16 result = L2CAP_CONF_SUCCESS;
2339 BT_DBG("sk %p", sk);
2341 while (len >= L2CAP_CONF_OPT_SIZE) {
2342 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2344 hint = type & L2CAP_CONF_HINT;
2345 type &= L2CAP_CONF_MASK;
2348 case L2CAP_CONF_MTU:
2352 case L2CAP_CONF_FLUSH_TO:
2356 case L2CAP_CONF_QOS:
2359 case L2CAP_CONF_RFC:
2360 if (olen == sizeof(rfc))
2361 memcpy(&rfc, (void *) val, olen);
2364 case L2CAP_CONF_FCS:
2365 if (val == L2CAP_FCS_NONE)
2366 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
2374 result = L2CAP_CONF_UNKNOWN;
2375 *((u8 *) ptr++) = type;
2380 if (pi->num_conf_rsp || pi->num_conf_req)
2384 case L2CAP_MODE_STREAMING:
2385 case L2CAP_MODE_ERTM:
2386 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2387 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2388 return -ECONNREFUSED;
2391 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2396 if (pi->mode != rfc.mode) {
2397 result = L2CAP_CONF_UNACCEPT;
2398 rfc.mode = pi->mode;
2400 if (pi->num_conf_rsp == 1)
2401 return -ECONNREFUSED;
2403 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2404 sizeof(rfc), (unsigned long) &rfc);
2408 if (result == L2CAP_CONF_SUCCESS) {
2409 /* Configure output options and let the other side know
2410 * which ones we don't like. */
2412 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2413 result = L2CAP_CONF_UNACCEPT;
2416 pi->conf_state |= L2CAP_CONF_MTU_DONE;
2418 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2421 case L2CAP_MODE_BASIC:
2422 pi->fcs = L2CAP_FCS_NONE;
2423 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2426 case L2CAP_MODE_ERTM:
2427 pi->remote_tx_win = rfc.txwin_size;
2428 pi->remote_max_tx = rfc.max_transmit;
2429 pi->max_pdu_size = rfc.max_pdu_size;
2431 rfc.retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
2432 rfc.monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
2434 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2436 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2437 sizeof(rfc), (unsigned long) &rfc);
2441 case L2CAP_MODE_STREAMING:
2442 pi->remote_tx_win = rfc.txwin_size;
2443 pi->max_pdu_size = rfc.max_pdu_size;
2445 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2447 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2448 sizeof(rfc), (unsigned long) &rfc);
2453 result = L2CAP_CONF_UNACCEPT;
2455 memset(&rfc, 0, sizeof(rfc));
2456 rfc.mode = pi->mode;
2459 if (result == L2CAP_CONF_SUCCESS)
2460 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
2462 rsp->scid = cpu_to_le16(pi->dcid);
2463 rsp->result = cpu_to_le16(result);
2464 rsp->flags = cpu_to_le16(0x0000);
2469 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
2471 struct l2cap_pinfo *pi = l2cap_pi(sk);
2472 struct l2cap_conf_req *req = data;
2473 void *ptr = req->data;
2476 struct l2cap_conf_rfc rfc;
2478 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
2480 while (len >= L2CAP_CONF_OPT_SIZE) {
2481 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2484 case L2CAP_CONF_MTU:
2485 if (val < L2CAP_DEFAULT_MIN_MTU) {
2486 *result = L2CAP_CONF_UNACCEPT;
2487 pi->omtu = L2CAP_DEFAULT_MIN_MTU;
2490 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2493 case L2CAP_CONF_FLUSH_TO:
2495 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2499 case L2CAP_CONF_RFC:
2500 if (olen == sizeof(rfc))
2501 memcpy(&rfc, (void *)val, olen);
2503 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
2504 rfc.mode != pi->mode)
2505 return -ECONNREFUSED;
2507 pi->mode = rfc.mode;
2510 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2511 sizeof(rfc), (unsigned long) &rfc);
2516 if (*result == L2CAP_CONF_SUCCESS) {
2518 case L2CAP_MODE_ERTM:
2519 pi->remote_tx_win = rfc.txwin_size;
2520 pi->retrans_timeout = rfc.retrans_timeout;
2521 pi->monitor_timeout = rfc.monitor_timeout;
2522 pi->max_pdu_size = le16_to_cpu(rfc.max_pdu_size);
2524 case L2CAP_MODE_STREAMING:
2525 pi->max_pdu_size = le16_to_cpu(rfc.max_pdu_size);
2530 req->dcid = cpu_to_le16(pi->dcid);
2531 req->flags = cpu_to_le16(0x0000);
2536 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
2538 struct l2cap_conf_rsp *rsp = data;
2539 void *ptr = rsp->data;
2541 BT_DBG("sk %p", sk);
2543 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2544 rsp->result = cpu_to_le16(result);
2545 rsp->flags = cpu_to_le16(flags);
2550 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2552 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2554 if (rej->reason != 0x0000)
2557 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2558 cmd->ident == conn->info_ident) {
2559 del_timer(&conn->info_timer);
2561 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2562 conn->info_ident = 0;
2564 l2cap_conn_start(conn);
2570 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2572 struct l2cap_chan_list *list = &conn->chan_list;
2573 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2574 struct l2cap_conn_rsp rsp;
2575 struct sock *sk, *parent;
2576 int result, status = L2CAP_CS_NO_INFO;
2578 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2579 __le16 psm = req->psm;
2581 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2583 /* Check if we have socket listening on psm */
2584 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2586 result = L2CAP_CR_BAD_PSM;
2590 /* Check if the ACL is secure enough (if not SDP) */
2591 if (psm != cpu_to_le16(0x0001) &&
2592 !hci_conn_check_link_mode(conn->hcon)) {
2593 conn->disc_reason = 0x05;
2594 result = L2CAP_CR_SEC_BLOCK;
2598 result = L2CAP_CR_NO_MEM;
2600 /* Check for backlog size */
2601 if (sk_acceptq_is_full(parent)) {
2602 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2606 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2610 write_lock_bh(&list->lock);
2612 /* Check if we already have channel with that dcid */
2613 if (__l2cap_get_chan_by_dcid(list, scid)) {
2614 write_unlock_bh(&list->lock);
2615 sock_set_flag(sk, SOCK_ZAPPED);
2616 l2cap_sock_kill(sk);
2620 hci_conn_hold(conn->hcon);
2622 l2cap_sock_init(sk, parent);
2623 bacpy(&bt_sk(sk)->src, conn->src);
2624 bacpy(&bt_sk(sk)->dst, conn->dst);
2625 l2cap_pi(sk)->psm = psm;
2626 l2cap_pi(sk)->dcid = scid;
2628 __l2cap_chan_add(conn, sk, parent);
2629 dcid = l2cap_pi(sk)->scid;
2631 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2633 l2cap_pi(sk)->ident = cmd->ident;
2635 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2636 if (l2cap_check_security(sk)) {
2637 if (bt_sk(sk)->defer_setup) {
2638 sk->sk_state = BT_CONNECT2;
2639 result = L2CAP_CR_PEND;
2640 status = L2CAP_CS_AUTHOR_PEND;
2641 parent->sk_data_ready(parent, 0);
2643 sk->sk_state = BT_CONFIG;
2644 result = L2CAP_CR_SUCCESS;
2645 status = L2CAP_CS_NO_INFO;
2648 sk->sk_state = BT_CONNECT2;
2649 result = L2CAP_CR_PEND;
2650 status = L2CAP_CS_AUTHEN_PEND;
2653 sk->sk_state = BT_CONNECT2;
2654 result = L2CAP_CR_PEND;
2655 status = L2CAP_CS_NO_INFO;
2658 write_unlock_bh(&list->lock);
2661 bh_unlock_sock(parent);
2664 rsp.scid = cpu_to_le16(scid);
2665 rsp.dcid = cpu_to_le16(dcid);
2666 rsp.result = cpu_to_le16(result);
2667 rsp.status = cpu_to_le16(status);
2668 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2670 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2671 struct l2cap_info_req info;
2672 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2674 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2675 conn->info_ident = l2cap_get_ident(conn);
2677 mod_timer(&conn->info_timer, jiffies +
2678 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2680 l2cap_send_cmd(conn, conn->info_ident,
2681 L2CAP_INFO_REQ, sizeof(info), &info);
2687 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2689 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2690 u16 scid, dcid, result, status;
2694 scid = __le16_to_cpu(rsp->scid);
2695 dcid = __le16_to_cpu(rsp->dcid);
2696 result = __le16_to_cpu(rsp->result);
2697 status = __le16_to_cpu(rsp->status);
2699 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2702 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2706 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
2712 case L2CAP_CR_SUCCESS:
2713 sk->sk_state = BT_CONFIG;
2714 l2cap_pi(sk)->ident = 0;
2715 l2cap_pi(sk)->dcid = dcid;
2716 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2718 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2720 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2721 l2cap_build_conf_req(sk, req), req);
2722 l2cap_pi(sk)->num_conf_req++;
2726 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
2730 l2cap_chan_del(sk, ECONNREFUSED);
2738 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2740 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2746 dcid = __le16_to_cpu(req->dcid);
2747 flags = __le16_to_cpu(req->flags);
2749 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2751 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2755 if (sk->sk_state == BT_DISCONN)
2758 /* Reject if config buffer is too small. */
2759 len = cmd_len - sizeof(*req);
2760 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
2761 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2762 l2cap_build_conf_rsp(sk, rsp,
2763 L2CAP_CONF_REJECT, flags), rsp);
2768 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
2769 l2cap_pi(sk)->conf_len += len;
2771 if (flags & 0x0001) {
2772 /* Incomplete config. Send empty response. */
2773 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2774 l2cap_build_conf_rsp(sk, rsp,
2775 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2779 /* Complete config. */
2780 len = l2cap_parse_conf_req(sk, rsp);
2782 l2cap_send_disconn_req(conn, sk);
2786 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2787 l2cap_pi(sk)->num_conf_rsp++;
2789 /* Reset config buffer. */
2790 l2cap_pi(sk)->conf_len = 0;
2792 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
2795 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
2796 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
2797 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
2798 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
2800 sk->sk_state = BT_CONNECTED;
2802 l2cap_pi(sk)->next_tx_seq = 0;
2803 l2cap_pi(sk)->expected_tx_seq = 0;
2804 __skb_queue_head_init(TX_QUEUE(sk));
2805 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2806 l2cap_ertm_init(sk);
2808 l2cap_chan_ready(sk);
2812 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
2814 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2815 l2cap_build_conf_req(sk, buf), buf);
2816 l2cap_pi(sk)->num_conf_req++;
2824 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2826 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2827 u16 scid, flags, result;
2830 scid = __le16_to_cpu(rsp->scid);
2831 flags = __le16_to_cpu(rsp->flags);
2832 result = __le16_to_cpu(rsp->result);
2834 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2835 scid, flags, result);
2837 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2842 case L2CAP_CONF_SUCCESS:
2845 case L2CAP_CONF_UNACCEPT:
2846 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2847 int len = cmd->len - sizeof(*rsp);
2850 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2851 l2cap_send_disconn_req(conn, sk);
2855 /* throw out any old stored conf requests */
2856 result = L2CAP_CONF_SUCCESS;
2857 len = l2cap_parse_conf_rsp(sk, rsp->data,
2860 l2cap_send_disconn_req(conn, sk);
2864 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2865 L2CAP_CONF_REQ, len, req);
2866 l2cap_pi(sk)->num_conf_req++;
2867 if (result != L2CAP_CONF_SUCCESS)
2873 sk->sk_state = BT_DISCONN;
2874 sk->sk_err = ECONNRESET;
2875 l2cap_sock_set_timer(sk, HZ * 5);
2876 l2cap_send_disconn_req(conn, sk);
2883 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
2885 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
2886 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
2887 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
2888 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
2890 sk->sk_state = BT_CONNECTED;
2891 l2cap_pi(sk)->next_tx_seq = 0;
2892 l2cap_pi(sk)->expected_tx_seq = 0;
2893 __skb_queue_head_init(TX_QUEUE(sk));
2894 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2895 l2cap_ertm_init(sk);
2897 l2cap_chan_ready(sk);
2905 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2907 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2908 struct l2cap_disconn_rsp rsp;
2912 scid = __le16_to_cpu(req->scid);
2913 dcid = __le16_to_cpu(req->dcid);
2915 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2917 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2921 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2922 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2923 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2925 sk->sk_shutdown = SHUTDOWN_MASK;
2927 skb_queue_purge(TX_QUEUE(sk));
2929 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
2930 skb_queue_purge(SREJ_QUEUE(sk));
2931 del_timer(&l2cap_pi(sk)->retrans_timer);
2932 del_timer(&l2cap_pi(sk)->monitor_timer);
2935 l2cap_chan_del(sk, ECONNRESET);
2938 l2cap_sock_kill(sk);
2942 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2944 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2948 scid = __le16_to_cpu(rsp->scid);
2949 dcid = __le16_to_cpu(rsp->dcid);
2951 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2953 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2957 skb_queue_purge(TX_QUEUE(sk));
2959 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
2960 skb_queue_purge(SREJ_QUEUE(sk));
2961 del_timer(&l2cap_pi(sk)->retrans_timer);
2962 del_timer(&l2cap_pi(sk)->monitor_timer);
2965 l2cap_chan_del(sk, 0);
2968 l2cap_sock_kill(sk);
2972 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2974 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2977 type = __le16_to_cpu(req->type);
2979 BT_DBG("type 0x%4.4x", type);
2981 if (type == L2CAP_IT_FEAT_MASK) {
2983 u32 feat_mask = l2cap_feat_mask;
2984 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2985 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2986 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2988 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
2990 put_unaligned_le32(feat_mask, rsp->data);
2991 l2cap_send_cmd(conn, cmd->ident,
2992 L2CAP_INFO_RSP, sizeof(buf), buf);
2993 } else if (type == L2CAP_IT_FIXED_CHAN) {
2995 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2996 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2997 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2998 memcpy(buf + 4, l2cap_fixed_chan, 8);
2999 l2cap_send_cmd(conn, cmd->ident,
3000 L2CAP_INFO_RSP, sizeof(buf), buf);
3002 struct l2cap_info_rsp rsp;
3003 rsp.type = cpu_to_le16(type);
3004 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3005 l2cap_send_cmd(conn, cmd->ident,
3006 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3012 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3014 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3017 type = __le16_to_cpu(rsp->type);
3018 result = __le16_to_cpu(rsp->result);
3020 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3022 del_timer(&conn->info_timer);
3024 if (type == L2CAP_IT_FEAT_MASK) {
3025 conn->feat_mask = get_unaligned_le32(rsp->data);
3027 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3028 struct l2cap_info_req req;
3029 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3031 conn->info_ident = l2cap_get_ident(conn);
3033 l2cap_send_cmd(conn, conn->info_ident,
3034 L2CAP_INFO_REQ, sizeof(req), &req);
3036 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3037 conn->info_ident = 0;
3039 l2cap_conn_start(conn);
3041 } else if (type == L2CAP_IT_FIXED_CHAN) {
3042 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3043 conn->info_ident = 0;
3045 l2cap_conn_start(conn);
3051 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
3053 u8 *data = skb->data;
3055 struct l2cap_cmd_hdr cmd;
3058 l2cap_raw_recv(conn, skb);
3060 while (len >= L2CAP_CMD_HDR_SIZE) {
3062 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3063 data += L2CAP_CMD_HDR_SIZE;
3064 len -= L2CAP_CMD_HDR_SIZE;
3066 cmd_len = le16_to_cpu(cmd.len);
3068 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3070 if (cmd_len > len || !cmd.ident) {
3071 BT_DBG("corrupted command");
3076 case L2CAP_COMMAND_REJ:
3077 l2cap_command_rej(conn, &cmd, data);
3080 case L2CAP_CONN_REQ:
3081 err = l2cap_connect_req(conn, &cmd, data);
3084 case L2CAP_CONN_RSP:
3085 err = l2cap_connect_rsp(conn, &cmd, data);
3088 case L2CAP_CONF_REQ:
3089 err = l2cap_config_req(conn, &cmd, cmd_len, data);
3092 case L2CAP_CONF_RSP:
3093 err = l2cap_config_rsp(conn, &cmd, data);
3096 case L2CAP_DISCONN_REQ:
3097 err = l2cap_disconnect_req(conn, &cmd, data);
3100 case L2CAP_DISCONN_RSP:
3101 err = l2cap_disconnect_rsp(conn, &cmd, data);
3104 case L2CAP_ECHO_REQ:
3105 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
3108 case L2CAP_ECHO_RSP:
3111 case L2CAP_INFO_REQ:
3112 err = l2cap_information_req(conn, &cmd, data);
3115 case L2CAP_INFO_RSP:
3116 err = l2cap_information_rsp(conn, &cmd, data);
3120 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
3126 struct l2cap_cmd_rej rej;
3127 BT_DBG("error %d", err);
3129 /* FIXME: Map err to a valid reason */
3130 rej.reason = cpu_to_le16(0);
3131 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3141 static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
3143 u16 our_fcs, rcv_fcs;
3144 int hdr_size = L2CAP_HDR_SIZE + 2;
3146 if (pi->fcs == L2CAP_FCS_CRC16) {
3147 skb_trim(skb, skb->len - 2);
3148 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3149 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3151 if (our_fcs != rcv_fcs)
3157 static inline void l2cap_send_i_or_rr_or_rnr(struct sock *sk)
3159 struct l2cap_pinfo *pi = l2cap_pi(sk);
3162 pi->frames_sent = 0;
3163 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
3165 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3167 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3168 control |= L2CAP_SUPER_RCV_NOT_READY | L2CAP_CTRL_FINAL;
3169 l2cap_send_sframe(pi, control);
3170 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
3173 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY && pi->unacked_frames > 0)
3174 __mod_retrans_timer();
3176 l2cap_ertm_send(sk);
3178 if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
3179 pi->frames_sent == 0) {
3180 control |= L2CAP_SUPER_RCV_READY;
3181 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
3182 control |= L2CAP_CTRL_FINAL;
3183 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
3185 l2cap_send_sframe(pi, control);
3189 static void l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
3191 struct sk_buff *next_skb;
3193 bt_cb(skb)->tx_seq = tx_seq;
3194 bt_cb(skb)->sar = sar;
3196 next_skb = skb_peek(SREJ_QUEUE(sk));
3198 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3203 if (bt_cb(next_skb)->tx_seq > tx_seq) {
3204 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
3208 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
3211 } while((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
3213 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3216 static int l2cap_sar_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3218 struct l2cap_pinfo *pi = l2cap_pi(sk);
3219 struct sk_buff *_skb;
3222 switch (control & L2CAP_CTRL_SAR) {
3223 case L2CAP_SDU_UNSEGMENTED:
3224 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3229 err = sock_queue_rcv_skb(sk, skb);
3235 case L2CAP_SDU_START:
3236 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3241 pi->sdu_len = get_unaligned_le16(skb->data);
3244 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3250 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3252 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3253 pi->partial_sdu_len = skb->len;
3257 case L2CAP_SDU_CONTINUE:
3258 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3261 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3263 pi->partial_sdu_len += skb->len;
3264 if (pi->partial_sdu_len > pi->sdu_len)
3272 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3275 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3277 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3278 pi->partial_sdu_len += skb->len;
3280 if (pi->partial_sdu_len == pi->sdu_len) {
3281 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3282 err = sock_queue_rcv_skb(sk, _skb);
3296 static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
3298 struct sk_buff *skb;
3301 while((skb = skb_peek(SREJ_QUEUE(sk)))) {
3302 if (bt_cb(skb)->tx_seq != tx_seq)
3305 skb = skb_dequeue(SREJ_QUEUE(sk));
3306 control |= bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3307 l2cap_sar_reassembly_sdu(sk, skb, control);
3308 l2cap_pi(sk)->buffer_seq_srej =
3309 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
3314 static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
3316 struct l2cap_pinfo *pi = l2cap_pi(sk);
3317 struct srej_list *l, *tmp;
3320 list_for_each_entry_safe(l,tmp, SREJ_LIST(sk), list) {
3321 if (l->tx_seq == tx_seq) {
3326 control = L2CAP_SUPER_SELECT_REJECT;
3327 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3328 l2cap_send_sframe(pi, control);
3330 list_add_tail(&l->list, SREJ_LIST(sk));
3334 static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
3336 struct l2cap_pinfo *pi = l2cap_pi(sk);
3337 struct srej_list *new;
3340 while (tx_seq != pi->expected_tx_seq) {
3341 control = L2CAP_SUPER_SELECT_REJECT;
3342 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3343 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
3344 control |= L2CAP_CTRL_POLL;
3345 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
3347 l2cap_send_sframe(pi, control);
3349 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3350 new->tx_seq = pi->expected_tx_seq++;
3351 list_add_tail(&new->list, SREJ_LIST(sk));
3353 pi->expected_tx_seq++;
3356 static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3358 struct l2cap_pinfo *pi = l2cap_pi(sk);
3359 u8 tx_seq = __get_txseq(rx_control);
3360 u8 req_seq = __get_reqseq(rx_control);
3362 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3365 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3367 pi->expected_ack_seq = req_seq;
3368 l2cap_drop_acked_frames(sk);
3370 if (tx_seq == pi->expected_tx_seq)
3373 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3374 struct srej_list *first;
3376 first = list_first_entry(SREJ_LIST(sk),
3377 struct srej_list, list);
3378 if (tx_seq == first->tx_seq) {
3379 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3380 l2cap_check_srej_gap(sk, tx_seq);
3382 list_del(&first->list);
3385 if (list_empty(SREJ_LIST(sk))) {
3386 pi->buffer_seq = pi->buffer_seq_srej;
3387 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3390 struct srej_list *l;
3391 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3393 list_for_each_entry(l, SREJ_LIST(sk), list) {
3394 if (l->tx_seq == tx_seq) {
3395 l2cap_resend_srejframe(sk, tx_seq);
3399 l2cap_send_srejframe(sk, tx_seq);
3402 pi->conn_state |= L2CAP_CONN_SREJ_SENT;
3404 INIT_LIST_HEAD(SREJ_LIST(sk));
3405 pi->buffer_seq_srej = pi->buffer_seq;
3407 __skb_queue_head_init(SREJ_QUEUE(sk));
3408 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3410 pi->conn_state |= L2CAP_CONN_SEND_PBIT;
3412 l2cap_send_srejframe(sk, tx_seq);
3417 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3419 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3420 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3424 if (rx_control & L2CAP_CTRL_FINAL) {
3425 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3426 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3428 sk->sk_send_head = TX_QUEUE(sk)->next;
3429 pi->next_tx_seq = pi->expected_ack_seq;
3430 l2cap_ertm_send(sk);
3434 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3436 err = l2cap_sar_reassembly_sdu(sk, skb, rx_control);
3440 pi->num_to_ack = (pi->num_to_ack + 1) % L2CAP_DEFAULT_NUM_TO_ACK;
3441 if (pi->num_to_ack == L2CAP_DEFAULT_NUM_TO_ACK - 1) {
3442 tx_control |= L2CAP_SUPER_RCV_READY;
3443 tx_control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3444 l2cap_send_sframe(pi, tx_control);
3449 static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3451 struct l2cap_pinfo *pi = l2cap_pi(sk);
3452 u8 tx_seq = __get_reqseq(rx_control);
3454 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3456 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
3457 case L2CAP_SUPER_RCV_READY:
3458 if (rx_control & L2CAP_CTRL_POLL) {
3459 l2cap_send_i_or_rr_or_rnr(sk);
3460 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3462 } else if (rx_control & L2CAP_CTRL_FINAL) {
3463 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3464 pi->expected_ack_seq = tx_seq;
3465 l2cap_drop_acked_frames(sk);
3467 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3468 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3470 sk->sk_send_head = TX_QUEUE(sk)->next;
3471 pi->next_tx_seq = pi->expected_ack_seq;
3472 l2cap_ertm_send(sk);
3475 if (!(pi->conn_state & L2CAP_CONN_WAIT_F))
3478 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
3479 del_timer(&pi->monitor_timer);
3481 if (pi->unacked_frames > 0)
3482 __mod_retrans_timer();
3484 pi->expected_ack_seq = tx_seq;
3485 l2cap_drop_acked_frames(sk);
3487 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3488 (pi->unacked_frames > 0))
3489 __mod_retrans_timer();
3491 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3492 l2cap_ertm_send(sk);
3496 case L2CAP_SUPER_REJECT:
3497 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3499 pi->expected_ack_seq = __get_reqseq(rx_control);
3500 l2cap_drop_acked_frames(sk);
3502 if (rx_control & L2CAP_CTRL_FINAL) {
3503 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3504 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3506 sk->sk_send_head = TX_QUEUE(sk)->next;
3507 pi->next_tx_seq = pi->expected_ack_seq;
3508 l2cap_ertm_send(sk);
3511 sk->sk_send_head = TX_QUEUE(sk)->next;
3512 pi->next_tx_seq = pi->expected_ack_seq;
3513 l2cap_ertm_send(sk);
3515 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3516 pi->srej_save_reqseq = tx_seq;
3517 pi->conn_state |= L2CAP_CONN_REJ_ACT;
3523 case L2CAP_SUPER_SELECT_REJECT:
3524 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3526 if (rx_control & L2CAP_CTRL_POLL) {
3527 pi->expected_ack_seq = tx_seq;
3528 l2cap_drop_acked_frames(sk);
3529 l2cap_retransmit_frame(sk, tx_seq);
3530 l2cap_ertm_send(sk);
3531 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3532 pi->srej_save_reqseq = tx_seq;
3533 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3535 } else if (rx_control & L2CAP_CTRL_FINAL) {
3536 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
3537 pi->srej_save_reqseq == tx_seq)
3538 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
3540 l2cap_retransmit_frame(sk, tx_seq);
3543 l2cap_retransmit_frame(sk, tx_seq);
3544 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3545 pi->srej_save_reqseq = tx_seq;
3546 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3551 case L2CAP_SUPER_RCV_NOT_READY:
3552 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
3553 pi->expected_ack_seq = tx_seq;
3554 l2cap_drop_acked_frames(sk);
3556 del_timer(&pi->retrans_timer);
3557 if (rx_control & L2CAP_CTRL_POLL) {
3558 u16 control = L2CAP_CTRL_FINAL;
3559 l2cap_send_rr_or_rnr(pi, control);
3568 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3571 struct l2cap_pinfo *pi;
3575 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
3577 BT_DBG("unknown cid 0x%4.4x", cid);
3583 BT_DBG("sk %p, len %d", sk, skb->len);
3585 if (sk->sk_state != BT_CONNECTED)
3589 case L2CAP_MODE_BASIC:
3590 /* If socket recv buffers overflows we drop data here
3591 * which is *bad* because L2CAP has to be reliable.
3592 * But we don't have any other choice. L2CAP doesn't
3593 * provide flow control mechanism. */
3595 if (pi->imtu < skb->len)
3598 if (!sock_queue_rcv_skb(sk, skb))
3602 case L2CAP_MODE_ERTM:
3603 control = get_unaligned_le16(skb->data);
3607 if (__is_sar_start(control))
3610 if (pi->fcs == L2CAP_FCS_CRC16)
3614 * We can just drop the corrupted I-frame here.
3615 * Receiver will miss it and start proper recovery
3616 * procedures and ask retransmission.
3618 if (len > L2CAP_DEFAULT_MAX_PDU_SIZE)
3621 if (l2cap_check_fcs(pi, skb))
3624 if (__is_iframe(control))
3625 l2cap_data_channel_iframe(sk, control, skb);
3627 l2cap_data_channel_sframe(sk, control, skb);
3631 case L2CAP_MODE_STREAMING:
3632 control = get_unaligned_le16(skb->data);
3636 if (__is_sar_start(control))
3639 if (pi->fcs == L2CAP_FCS_CRC16)
3642 if (len > L2CAP_DEFAULT_MAX_PDU_SIZE || __is_sframe(control))
3645 if (l2cap_check_fcs(pi, skb))
3648 tx_seq = __get_txseq(control);
3650 if (pi->expected_tx_seq == tx_seq)
3651 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3653 pi->expected_tx_seq = (tx_seq + 1) % 64;
3655 l2cap_sar_reassembly_sdu(sk, skb, control);
3660 BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode);
3674 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3678 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
3682 BT_DBG("sk %p, len %d", sk, skb->len);
3684 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
3687 if (l2cap_pi(sk)->imtu < skb->len)
3690 if (!sock_queue_rcv_skb(sk, skb))
3702 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
3704 struct l2cap_hdr *lh = (void *) skb->data;
3708 skb_pull(skb, L2CAP_HDR_SIZE);
3709 cid = __le16_to_cpu(lh->cid);
3710 len = __le16_to_cpu(lh->len);
3712 if (len != skb->len) {
3717 BT_DBG("len %d, cid 0x%4.4x", len, cid);
3720 case L2CAP_CID_SIGNALING:
3721 l2cap_sig_channel(conn, skb);
3724 case L2CAP_CID_CONN_LESS:
3725 psm = get_unaligned_le16(skb->data);
3727 l2cap_conless_channel(conn, psm, skb);
3731 l2cap_data_channel(conn, cid, skb);
3736 /* ---- L2CAP interface with lower layer (HCI) ---- */
3738 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3740 int exact = 0, lm1 = 0, lm2 = 0;
3741 register struct sock *sk;
3742 struct hlist_node *node;
3744 if (type != ACL_LINK)
3747 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
3749 /* Find listening sockets and check their link_mode */
3750 read_lock(&l2cap_sk_list.lock);
3751 sk_for_each(sk, node, &l2cap_sk_list.head) {
3752 if (sk->sk_state != BT_LISTEN)
3755 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
3756 lm1 |= HCI_LM_ACCEPT;
3757 if (l2cap_pi(sk)->role_switch)
3758 lm1 |= HCI_LM_MASTER;
3760 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
3761 lm2 |= HCI_LM_ACCEPT;
3762 if (l2cap_pi(sk)->role_switch)
3763 lm2 |= HCI_LM_MASTER;
3766 read_unlock(&l2cap_sk_list.lock);
3768 return exact ? lm1 : lm2;
3771 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
3773 struct l2cap_conn *conn;
3775 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
3777 if (hcon->type != ACL_LINK)
3781 conn = l2cap_conn_add(hcon, status);
3783 l2cap_conn_ready(conn);
3785 l2cap_conn_del(hcon, bt_err(status));
3790 static int l2cap_disconn_ind(struct hci_conn *hcon)
3792 struct l2cap_conn *conn = hcon->l2cap_data;
3794 BT_DBG("hcon %p", hcon);
3796 if (hcon->type != ACL_LINK || !conn)
3799 return conn->disc_reason;
3802 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
3804 BT_DBG("hcon %p reason %d", hcon, reason);
3806 if (hcon->type != ACL_LINK)
3809 l2cap_conn_del(hcon, bt_err(reason));
3814 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
3816 if (sk->sk_type != SOCK_SEQPACKET)
3819 if (encrypt == 0x00) {
3820 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
3821 l2cap_sock_clear_timer(sk);
3822 l2cap_sock_set_timer(sk, HZ * 5);
3823 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
3824 __l2cap_sock_close(sk, ECONNREFUSED);
3826 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
3827 l2cap_sock_clear_timer(sk);
3831 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
3833 struct l2cap_chan_list *l;
3834 struct l2cap_conn *conn = hcon->l2cap_data;
3840 l = &conn->chan_list;
3842 BT_DBG("conn %p", conn);
3844 read_lock(&l->lock);
3846 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
3849 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
3854 if (!status && (sk->sk_state == BT_CONNECTED ||
3855 sk->sk_state == BT_CONFIG)) {
3856 l2cap_check_encryption(sk, encrypt);
3861 if (sk->sk_state == BT_CONNECT) {
3863 struct l2cap_conn_req req;
3864 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
3865 req.psm = l2cap_pi(sk)->psm;
3867 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
3869 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
3870 L2CAP_CONN_REQ, sizeof(req), &req);
3872 l2cap_sock_clear_timer(sk);
3873 l2cap_sock_set_timer(sk, HZ / 10);
3875 } else if (sk->sk_state == BT_CONNECT2) {
3876 struct l2cap_conn_rsp rsp;
3880 sk->sk_state = BT_CONFIG;
3881 result = L2CAP_CR_SUCCESS;
3883 sk->sk_state = BT_DISCONN;
3884 l2cap_sock_set_timer(sk, HZ / 10);
3885 result = L2CAP_CR_SEC_BLOCK;
3888 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3889 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3890 rsp.result = cpu_to_le16(result);
3891 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3892 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
3893 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3899 read_unlock(&l->lock);
3904 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
3906 struct l2cap_conn *conn = hcon->l2cap_data;
3908 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
3911 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
3913 if (flags & ACL_START) {
3914 struct l2cap_hdr *hdr;
3918 BT_ERR("Unexpected start frame (len %d)", skb->len);
3919 kfree_skb(conn->rx_skb);
3920 conn->rx_skb = NULL;
3922 l2cap_conn_unreliable(conn, ECOMM);
3926 BT_ERR("Frame is too short (len %d)", skb->len);
3927 l2cap_conn_unreliable(conn, ECOMM);
3931 hdr = (struct l2cap_hdr *) skb->data;
3932 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
3934 if (len == skb->len) {
3935 /* Complete frame received */
3936 l2cap_recv_frame(conn, skb);
3940 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
3942 if (skb->len > len) {
3943 BT_ERR("Frame is too long (len %d, expected len %d)",
3945 l2cap_conn_unreliable(conn, ECOMM);
3949 /* Allocate skb for the complete frame (with header) */
3950 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
3954 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
3956 conn->rx_len = len - skb->len;
3958 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
3960 if (!conn->rx_len) {
3961 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
3962 l2cap_conn_unreliable(conn, ECOMM);
3966 if (skb->len > conn->rx_len) {
3967 BT_ERR("Fragment is too long (len %d, expected %d)",
3968 skb->len, conn->rx_len);
3969 kfree_skb(conn->rx_skb);
3970 conn->rx_skb = NULL;
3972 l2cap_conn_unreliable(conn, ECOMM);
3976 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
3978 conn->rx_len -= skb->len;
3980 if (!conn->rx_len) {
3981 /* Complete frame received */
3982 l2cap_recv_frame(conn, conn->rx_skb);
3983 conn->rx_skb = NULL;
3992 static int l2cap_debugfs_show(struct seq_file *f, void *p)
3995 struct hlist_node *node;
3997 read_lock_bh(&l2cap_sk_list.lock);
3999 sk_for_each(sk, node, &l2cap_sk_list.head) {
4000 struct l2cap_pinfo *pi = l2cap_pi(sk);
4002 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
4003 batostr(&bt_sk(sk)->src),
4004 batostr(&bt_sk(sk)->dst),
4005 sk->sk_state, __le16_to_cpu(pi->psm),
4007 pi->imtu, pi->omtu, pi->sec_level);
4010 read_unlock_bh(&l2cap_sk_list.lock);
4015 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4017 return single_open(file, l2cap_debugfs_show, inode->i_private);
4020 static const struct file_operations l2cap_debugfs_fops = {
4021 .open = l2cap_debugfs_open,
4023 .llseek = seq_lseek,
4024 .release = single_release,
4027 static struct dentry *l2cap_debugfs;
4029 static const struct proto_ops l2cap_sock_ops = {
4030 .family = PF_BLUETOOTH,
4031 .owner = THIS_MODULE,
4032 .release = l2cap_sock_release,
4033 .bind = l2cap_sock_bind,
4034 .connect = l2cap_sock_connect,
4035 .listen = l2cap_sock_listen,
4036 .accept = l2cap_sock_accept,
4037 .getname = l2cap_sock_getname,
4038 .sendmsg = l2cap_sock_sendmsg,
4039 .recvmsg = l2cap_sock_recvmsg,
4040 .poll = bt_sock_poll,
4041 .ioctl = bt_sock_ioctl,
4042 .mmap = sock_no_mmap,
4043 .socketpair = sock_no_socketpair,
4044 .shutdown = l2cap_sock_shutdown,
4045 .setsockopt = l2cap_sock_setsockopt,
4046 .getsockopt = l2cap_sock_getsockopt
4049 static const struct net_proto_family l2cap_sock_family_ops = {
4050 .family = PF_BLUETOOTH,
4051 .owner = THIS_MODULE,
4052 .create = l2cap_sock_create,
4055 static struct hci_proto l2cap_hci_proto = {
4057 .id = HCI_PROTO_L2CAP,
4058 .connect_ind = l2cap_connect_ind,
4059 .connect_cfm = l2cap_connect_cfm,
4060 .disconn_ind = l2cap_disconn_ind,
4061 .disconn_cfm = l2cap_disconn_cfm,
4062 .security_cfm = l2cap_security_cfm,
4063 .recv_acldata = l2cap_recv_acldata
4066 static int __init l2cap_init(void)
4070 err = proto_register(&l2cap_proto, 0);
4074 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
4076 BT_ERR("L2CAP socket registration failed");
4080 err = hci_register_proto(&l2cap_hci_proto);
4082 BT_ERR("L2CAP protocol registration failed");
4083 bt_sock_unregister(BTPROTO_L2CAP);
4088 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4089 bt_debugfs, NULL, &l2cap_debugfs_fops);
4091 BT_ERR("Failed to create L2CAP debug file");
4094 BT_INFO("L2CAP ver %s", VERSION);
4095 BT_INFO("L2CAP socket layer initialized");
4100 proto_unregister(&l2cap_proto);
4104 static void __exit l2cap_exit(void)
4106 debugfs_remove(l2cap_debugfs);
4108 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
4109 BT_ERR("L2CAP socket unregistration failed");
4111 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4112 BT_ERR("L2CAP protocol unregistration failed");
4114 proto_unregister(&l2cap_proto);
4117 void l2cap_load(void)
4119 /* Dummy function to trigger automatic L2CAP module loading by
4120 * other modules that use L2CAP sockets but don't use any other
4121 * symbols from it. */
4124 EXPORT_SYMBOL(l2cap_load);
4126 module_init(l2cap_init);
4127 module_exit(l2cap_exit);
4129 module_param(enable_ertm, bool, 0644);
4130 MODULE_PARM_DESC(enable_ertm, "Enable enhanced retransmission mode");
4132 module_param(max_transmit, uint, 0644);
4133 MODULE_PARM_DESC(max_transmit, "Max transmit value (default = 3)");
4135 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4136 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
4137 MODULE_VERSION(VERSION);
4138 MODULE_LICENSE("GPL");
4139 MODULE_ALIAS("bt-proto-0");