2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <linux/debugfs.h>
44 #include <linux/seq_file.h>
45 #include <linux/uaccess.h>
46 #include <linux/crc16.h>
49 #include <asm/system.h>
50 #include <asm/unaligned.h>
52 #include <net/bluetooth/bluetooth.h>
53 #include <net/bluetooth/hci_core.h>
54 #include <net/bluetooth/l2cap.h>
56 #define VERSION "2.14"
58 static int enable_ertm = 0;
59 static int max_transmit = L2CAP_DEFAULT_MAX_TX;
61 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
62 static u8 l2cap_fixed_chan[8] = { 0x02, };
64 static const struct proto_ops l2cap_sock_ops;
66 static struct bt_sock_list l2cap_sk_list = {
67 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
70 static void __l2cap_sock_close(struct sock *sk, int reason);
71 static void l2cap_sock_close(struct sock *sk);
72 static void l2cap_sock_kill(struct sock *sk);
74 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
75 u8 code, u8 ident, u16 dlen, void *data);
77 /* ---- L2CAP timers ---- */
78 static void l2cap_sock_timeout(unsigned long arg)
80 struct sock *sk = (struct sock *) arg;
83 BT_DBG("sock %p state %d", sk, sk->sk_state);
87 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
88 reason = ECONNREFUSED;
89 else if (sk->sk_state == BT_CONNECT &&
90 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
91 reason = ECONNREFUSED;
95 __l2cap_sock_close(sk, reason);
103 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
105 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
106 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
109 static void l2cap_sock_clear_timer(struct sock *sk)
111 BT_DBG("sock %p state %d", sk, sk->sk_state);
112 sk_stop_timer(sk, &sk->sk_timer);
115 /* ---- L2CAP channels ---- */
116 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
119 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
120 if (l2cap_pi(s)->dcid == cid)
126 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
129 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
130 if (l2cap_pi(s)->scid == cid)
136 /* Find channel with given SCID.
137 * Returns locked socket */
138 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
142 s = __l2cap_get_chan_by_scid(l, cid);
145 read_unlock(&l->lock);
149 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
152 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
153 if (l2cap_pi(s)->ident == ident)
159 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
163 s = __l2cap_get_chan_by_ident(l, ident);
166 read_unlock(&l->lock);
170 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
172 u16 cid = L2CAP_CID_DYN_START;
174 for (; cid < L2CAP_CID_DYN_END; cid++) {
175 if (!__l2cap_get_chan_by_scid(l, cid))
182 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
187 l2cap_pi(l->head)->prev_c = sk;
189 l2cap_pi(sk)->next_c = l->head;
190 l2cap_pi(sk)->prev_c = NULL;
194 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
196 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
198 write_lock_bh(&l->lock);
203 l2cap_pi(next)->prev_c = prev;
205 l2cap_pi(prev)->next_c = next;
206 write_unlock_bh(&l->lock);
211 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
213 struct l2cap_chan_list *l = &conn->chan_list;
215 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
216 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
218 conn->disc_reason = 0x13;
220 l2cap_pi(sk)->conn = conn;
222 if (sk->sk_type == SOCK_SEQPACKET) {
223 /* Alloc CID for connection-oriented socket */
224 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
225 } else if (sk->sk_type == SOCK_DGRAM) {
226 /* Connectionless socket */
227 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
228 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
229 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
231 /* Raw socket can send/recv signalling messages only */
232 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
233 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
234 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
237 __l2cap_chan_link(l, sk);
240 bt_accept_enqueue(parent, sk);
244 * Must be called on the locked socket. */
245 static void l2cap_chan_del(struct sock *sk, int err)
247 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
248 struct sock *parent = bt_sk(sk)->parent;
250 l2cap_sock_clear_timer(sk);
252 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
255 /* Unlink from channel list */
256 l2cap_chan_unlink(&conn->chan_list, sk);
257 l2cap_pi(sk)->conn = NULL;
258 hci_conn_put(conn->hcon);
261 sk->sk_state = BT_CLOSED;
262 sock_set_flag(sk, SOCK_ZAPPED);
268 bt_accept_unlink(sk);
269 parent->sk_data_ready(parent, 0);
271 sk->sk_state_change(sk);
274 /* Service level security */
275 static inline int l2cap_check_security(struct sock *sk)
277 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
280 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
281 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
282 auth_type = HCI_AT_NO_BONDING_MITM;
284 auth_type = HCI_AT_NO_BONDING;
286 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
287 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
289 switch (l2cap_pi(sk)->sec_level) {
290 case BT_SECURITY_HIGH:
291 auth_type = HCI_AT_GENERAL_BONDING_MITM;
293 case BT_SECURITY_MEDIUM:
294 auth_type = HCI_AT_GENERAL_BONDING;
297 auth_type = HCI_AT_NO_BONDING;
302 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
306 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
310 /* Get next available identificator.
311 * 1 - 128 are used by kernel.
312 * 129 - 199 are reserved.
313 * 200 - 254 are used by utilities like l2ping, etc.
316 spin_lock_bh(&conn->lock);
318 if (++conn->tx_ident > 128)
323 spin_unlock_bh(&conn->lock);
328 static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
330 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
332 BT_DBG("code 0x%2.2x", code);
337 return hci_send_acl(conn->hcon, skb, 0);
340 static inline int l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
343 struct l2cap_hdr *lh;
344 struct l2cap_conn *conn = pi->conn;
345 int count, hlen = L2CAP_HDR_SIZE + 2;
347 if (pi->fcs == L2CAP_FCS_CRC16)
350 BT_DBG("pi %p, control 0x%2.2x", pi, control);
352 count = min_t(unsigned int, conn->mtu, hlen);
353 control |= L2CAP_CTRL_FRAME_TYPE;
355 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
356 control |= L2CAP_CTRL_FINAL;
357 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
360 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
361 control |= L2CAP_CTRL_POLL;
362 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
365 skb = bt_skb_alloc(count, GFP_ATOMIC);
369 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
370 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
371 lh->cid = cpu_to_le16(pi->dcid);
372 put_unaligned_le16(control, skb_put(skb, 2));
374 if (pi->fcs == L2CAP_FCS_CRC16) {
375 u16 fcs = crc16(0, (u8 *)lh, count - 2);
376 put_unaligned_le16(fcs, skb_put(skb, 2));
379 return hci_send_acl(pi->conn->hcon, skb, 0);
382 static inline int l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
384 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY)
385 control |= L2CAP_SUPER_RCV_NOT_READY;
387 control |= L2CAP_SUPER_RCV_READY;
389 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
391 return l2cap_send_sframe(pi, control);
394 static void l2cap_do_start(struct sock *sk)
396 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
398 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
399 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
402 if (l2cap_check_security(sk)) {
403 struct l2cap_conn_req req;
404 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
405 req.psm = l2cap_pi(sk)->psm;
407 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
409 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
410 L2CAP_CONN_REQ, sizeof(req), &req);
413 struct l2cap_info_req req;
414 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
416 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
417 conn->info_ident = l2cap_get_ident(conn);
419 mod_timer(&conn->info_timer, jiffies +
420 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
422 l2cap_send_cmd(conn, conn->info_ident,
423 L2CAP_INFO_REQ, sizeof(req), &req);
427 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk)
429 struct l2cap_disconn_req req;
431 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
432 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
433 l2cap_send_cmd(conn, l2cap_get_ident(conn),
434 L2CAP_DISCONN_REQ, sizeof(req), &req);
437 /* ---- L2CAP connections ---- */
438 static void l2cap_conn_start(struct l2cap_conn *conn)
440 struct l2cap_chan_list *l = &conn->chan_list;
443 BT_DBG("conn %p", conn);
447 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
450 if (sk->sk_type != SOCK_SEQPACKET) {
455 if (sk->sk_state == BT_CONNECT) {
456 if (l2cap_check_security(sk)) {
457 struct l2cap_conn_req req;
458 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
459 req.psm = l2cap_pi(sk)->psm;
461 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
463 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
464 L2CAP_CONN_REQ, sizeof(req), &req);
466 } else if (sk->sk_state == BT_CONNECT2) {
467 struct l2cap_conn_rsp rsp;
468 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
469 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
471 if (l2cap_check_security(sk)) {
472 if (bt_sk(sk)->defer_setup) {
473 struct sock *parent = bt_sk(sk)->parent;
474 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
475 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
476 parent->sk_data_ready(parent, 0);
479 sk->sk_state = BT_CONFIG;
480 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
481 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
484 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
485 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
488 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
489 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
495 read_unlock(&l->lock);
498 static void l2cap_conn_ready(struct l2cap_conn *conn)
500 struct l2cap_chan_list *l = &conn->chan_list;
503 BT_DBG("conn %p", conn);
507 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
510 if (sk->sk_type != SOCK_SEQPACKET) {
511 l2cap_sock_clear_timer(sk);
512 sk->sk_state = BT_CONNECTED;
513 sk->sk_state_change(sk);
514 } else if (sk->sk_state == BT_CONNECT)
520 read_unlock(&l->lock);
523 /* Notify sockets that we cannot guaranty reliability anymore */
524 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
526 struct l2cap_chan_list *l = &conn->chan_list;
529 BT_DBG("conn %p", conn);
533 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
534 if (l2cap_pi(sk)->force_reliable)
538 read_unlock(&l->lock);
541 static void l2cap_info_timeout(unsigned long arg)
543 struct l2cap_conn *conn = (void *) arg;
545 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
546 conn->info_ident = 0;
548 l2cap_conn_start(conn);
551 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
553 struct l2cap_conn *conn = hcon->l2cap_data;
558 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
562 hcon->l2cap_data = conn;
565 BT_DBG("hcon %p conn %p", hcon, conn);
567 conn->mtu = hcon->hdev->acl_mtu;
568 conn->src = &hcon->hdev->bdaddr;
569 conn->dst = &hcon->dst;
573 spin_lock_init(&conn->lock);
574 rwlock_init(&conn->chan_list.lock);
576 setup_timer(&conn->info_timer, l2cap_info_timeout,
577 (unsigned long) conn);
579 conn->disc_reason = 0x13;
584 static void l2cap_conn_del(struct hci_conn *hcon, int err)
586 struct l2cap_conn *conn = hcon->l2cap_data;
592 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
594 kfree_skb(conn->rx_skb);
597 while ((sk = conn->chan_list.head)) {
599 l2cap_chan_del(sk, err);
604 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
605 del_timer_sync(&conn->info_timer);
607 hcon->l2cap_data = NULL;
611 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
613 struct l2cap_chan_list *l = &conn->chan_list;
614 write_lock_bh(&l->lock);
615 __l2cap_chan_add(conn, sk, parent);
616 write_unlock_bh(&l->lock);
619 /* ---- Socket interface ---- */
620 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
623 struct hlist_node *node;
624 sk_for_each(sk, node, &l2cap_sk_list.head)
625 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
632 /* Find socket with psm and source bdaddr.
633 * Returns closest match.
635 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
637 struct sock *sk = NULL, *sk1 = NULL;
638 struct hlist_node *node;
640 sk_for_each(sk, node, &l2cap_sk_list.head) {
641 if (state && sk->sk_state != state)
644 if (l2cap_pi(sk)->psm == psm) {
646 if (!bacmp(&bt_sk(sk)->src, src))
650 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
654 return node ? sk : sk1;
657 /* Find socket with given address (psm, src).
658 * Returns locked socket */
659 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
662 read_lock(&l2cap_sk_list.lock);
663 s = __l2cap_get_sock_by_psm(state, psm, src);
666 read_unlock(&l2cap_sk_list.lock);
670 static void l2cap_sock_destruct(struct sock *sk)
674 skb_queue_purge(&sk->sk_receive_queue);
675 skb_queue_purge(&sk->sk_write_queue);
678 static void l2cap_sock_cleanup_listen(struct sock *parent)
682 BT_DBG("parent %p", parent);
684 /* Close not yet accepted channels */
685 while ((sk = bt_accept_dequeue(parent, NULL)))
686 l2cap_sock_close(sk);
688 parent->sk_state = BT_CLOSED;
689 sock_set_flag(parent, SOCK_ZAPPED);
692 /* Kill socket (only if zapped and orphan)
693 * Must be called on unlocked socket.
695 static void l2cap_sock_kill(struct sock *sk)
697 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
700 BT_DBG("sk %p state %d", sk, sk->sk_state);
702 /* Kill poor orphan */
703 bt_sock_unlink(&l2cap_sk_list, sk);
704 sock_set_flag(sk, SOCK_DEAD);
708 static void __l2cap_sock_close(struct sock *sk, int reason)
710 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
712 switch (sk->sk_state) {
714 l2cap_sock_cleanup_listen(sk);
719 if (sk->sk_type == SOCK_SEQPACKET) {
720 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
722 sk->sk_state = BT_DISCONN;
723 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
724 l2cap_send_disconn_req(conn, sk);
726 l2cap_chan_del(sk, reason);
730 if (sk->sk_type == SOCK_SEQPACKET) {
731 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
732 struct l2cap_conn_rsp rsp;
735 if (bt_sk(sk)->defer_setup)
736 result = L2CAP_CR_SEC_BLOCK;
738 result = L2CAP_CR_BAD_PSM;
740 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
741 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
742 rsp.result = cpu_to_le16(result);
743 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
744 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
745 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
747 l2cap_chan_del(sk, reason);
752 l2cap_chan_del(sk, reason);
756 sock_set_flag(sk, SOCK_ZAPPED);
761 /* Must be called on unlocked socket. */
762 static void l2cap_sock_close(struct sock *sk)
764 l2cap_sock_clear_timer(sk);
766 __l2cap_sock_close(sk, ECONNRESET);
771 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
773 struct l2cap_pinfo *pi = l2cap_pi(sk);
778 sk->sk_type = parent->sk_type;
779 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
781 pi->imtu = l2cap_pi(parent)->imtu;
782 pi->omtu = l2cap_pi(parent)->omtu;
783 pi->mode = l2cap_pi(parent)->mode;
784 pi->fcs = l2cap_pi(parent)->fcs;
785 pi->tx_win = l2cap_pi(parent)->tx_win;
786 pi->sec_level = l2cap_pi(parent)->sec_level;
787 pi->role_switch = l2cap_pi(parent)->role_switch;
788 pi->force_reliable = l2cap_pi(parent)->force_reliable;
790 pi->imtu = L2CAP_DEFAULT_MTU;
792 pi->mode = L2CAP_MODE_BASIC;
793 pi->fcs = L2CAP_FCS_CRC16;
794 pi->tx_win = L2CAP_DEFAULT_TX_WINDOW;
795 pi->sec_level = BT_SECURITY_LOW;
797 pi->force_reliable = 0;
800 /* Default config options */
802 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
803 skb_queue_head_init(TX_QUEUE(sk));
804 skb_queue_head_init(SREJ_QUEUE(sk));
805 INIT_LIST_HEAD(SREJ_LIST(sk));
808 static struct proto l2cap_proto = {
810 .owner = THIS_MODULE,
811 .obj_size = sizeof(struct l2cap_pinfo)
814 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
818 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
822 sock_init_data(sock, sk);
823 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
825 sk->sk_destruct = l2cap_sock_destruct;
826 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
828 sock_reset_flag(sk, SOCK_ZAPPED);
830 sk->sk_protocol = proto;
831 sk->sk_state = BT_OPEN;
833 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
835 bt_sock_link(&l2cap_sk_list, sk);
839 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
844 BT_DBG("sock %p", sock);
846 sock->state = SS_UNCONNECTED;
848 if (sock->type != SOCK_SEQPACKET &&
849 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
850 return -ESOCKTNOSUPPORT;
852 if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
855 sock->ops = &l2cap_sock_ops;
857 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
861 l2cap_sock_init(sk, NULL);
865 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
867 struct sock *sk = sock->sk;
868 struct sockaddr_l2 la;
873 if (!addr || addr->sa_family != AF_BLUETOOTH)
876 memset(&la, 0, sizeof(la));
877 len = min_t(unsigned int, sizeof(la), alen);
878 memcpy(&la, addr, len);
885 if (sk->sk_state != BT_OPEN) {
890 if (la.l2_psm && __le16_to_cpu(la.l2_psm) < 0x1001 &&
891 !capable(CAP_NET_BIND_SERVICE)) {
896 write_lock_bh(&l2cap_sk_list.lock);
898 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
901 /* Save source address */
902 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
903 l2cap_pi(sk)->psm = la.l2_psm;
904 l2cap_pi(sk)->sport = la.l2_psm;
905 sk->sk_state = BT_BOUND;
907 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
908 __le16_to_cpu(la.l2_psm) == 0x0003)
909 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
912 write_unlock_bh(&l2cap_sk_list.lock);
919 static int l2cap_do_connect(struct sock *sk)
921 bdaddr_t *src = &bt_sk(sk)->src;
922 bdaddr_t *dst = &bt_sk(sk)->dst;
923 struct l2cap_conn *conn;
924 struct hci_conn *hcon;
925 struct hci_dev *hdev;
929 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
932 hdev = hci_get_route(dst, src);
934 return -EHOSTUNREACH;
936 hci_dev_lock_bh(hdev);
940 if (sk->sk_type == SOCK_RAW) {
941 switch (l2cap_pi(sk)->sec_level) {
942 case BT_SECURITY_HIGH:
943 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
945 case BT_SECURITY_MEDIUM:
946 auth_type = HCI_AT_DEDICATED_BONDING;
949 auth_type = HCI_AT_NO_BONDING;
952 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
953 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
954 auth_type = HCI_AT_NO_BONDING_MITM;
956 auth_type = HCI_AT_NO_BONDING;
958 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
959 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
961 switch (l2cap_pi(sk)->sec_level) {
962 case BT_SECURITY_HIGH:
963 auth_type = HCI_AT_GENERAL_BONDING_MITM;
965 case BT_SECURITY_MEDIUM:
966 auth_type = HCI_AT_GENERAL_BONDING;
969 auth_type = HCI_AT_NO_BONDING;
974 hcon = hci_connect(hdev, ACL_LINK, dst,
975 l2cap_pi(sk)->sec_level, auth_type);
979 conn = l2cap_conn_add(hcon, 0);
987 /* Update source addr of the socket */
988 bacpy(src, conn->src);
990 l2cap_chan_add(conn, sk, NULL);
992 sk->sk_state = BT_CONNECT;
993 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
995 if (hcon->state == BT_CONNECTED) {
996 if (sk->sk_type != SOCK_SEQPACKET) {
997 l2cap_sock_clear_timer(sk);
998 sk->sk_state = BT_CONNECTED;
1004 hci_dev_unlock_bh(hdev);
1009 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
1011 struct sock *sk = sock->sk;
1012 struct sockaddr_l2 la;
1015 BT_DBG("sk %p", sk);
1017 if (!addr || alen < sizeof(addr->sa_family) ||
1018 addr->sa_family != AF_BLUETOOTH)
1021 memset(&la, 0, sizeof(la));
1022 len = min_t(unsigned int, sizeof(la), alen);
1023 memcpy(&la, addr, len);
1030 if (sk->sk_type == SOCK_SEQPACKET && !la.l2_psm) {
1035 switch (l2cap_pi(sk)->mode) {
1036 case L2CAP_MODE_BASIC:
1038 case L2CAP_MODE_ERTM:
1039 case L2CAP_MODE_STREAMING:
1048 switch (sk->sk_state) {
1052 /* Already connecting */
1056 /* Already connected */
1069 /* Set destination address and psm */
1070 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
1071 l2cap_pi(sk)->psm = la.l2_psm;
1073 err = l2cap_do_connect(sk);
1078 err = bt_sock_wait_state(sk, BT_CONNECTED,
1079 sock_sndtimeo(sk, flags & O_NONBLOCK));
1085 static int l2cap_sock_listen(struct socket *sock, int backlog)
1087 struct sock *sk = sock->sk;
1090 BT_DBG("sk %p backlog %d", sk, backlog);
1094 if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
1099 switch (l2cap_pi(sk)->mode) {
1100 case L2CAP_MODE_BASIC:
1102 case L2CAP_MODE_ERTM:
1103 case L2CAP_MODE_STREAMING:
1112 if (!l2cap_pi(sk)->psm) {
1113 bdaddr_t *src = &bt_sk(sk)->src;
1118 write_lock_bh(&l2cap_sk_list.lock);
1120 for (psm = 0x1001; psm < 0x1100; psm += 2)
1121 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
1122 l2cap_pi(sk)->psm = cpu_to_le16(psm);
1123 l2cap_pi(sk)->sport = cpu_to_le16(psm);
1128 write_unlock_bh(&l2cap_sk_list.lock);
1134 sk->sk_max_ack_backlog = backlog;
1135 sk->sk_ack_backlog = 0;
1136 sk->sk_state = BT_LISTEN;
1143 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
1145 DECLARE_WAITQUEUE(wait, current);
1146 struct sock *sk = sock->sk, *nsk;
1150 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1152 if (sk->sk_state != BT_LISTEN) {
1157 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1159 BT_DBG("sk %p timeo %ld", sk, timeo);
1161 /* Wait for an incoming connection. (wake-one). */
1162 add_wait_queue_exclusive(sk_sleep(sk), &wait);
1163 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1164 set_current_state(TASK_INTERRUPTIBLE);
1171 timeo = schedule_timeout(timeo);
1172 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1174 if (sk->sk_state != BT_LISTEN) {
1179 if (signal_pending(current)) {
1180 err = sock_intr_errno(timeo);
1184 set_current_state(TASK_RUNNING);
1185 remove_wait_queue(sk_sleep(sk), &wait);
1190 newsock->state = SS_CONNECTED;
1192 BT_DBG("new socket %p", nsk);
1199 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1201 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1202 struct sock *sk = sock->sk;
1204 BT_DBG("sock %p, sk %p", sock, sk);
1206 addr->sa_family = AF_BLUETOOTH;
1207 *len = sizeof(struct sockaddr_l2);
1210 la->l2_psm = l2cap_pi(sk)->psm;
1211 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1212 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1214 la->l2_psm = l2cap_pi(sk)->sport;
1215 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1216 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
1222 static void l2cap_monitor_timeout(unsigned long arg)
1224 struct sock *sk = (void *) arg;
1228 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
1229 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk);
1234 l2cap_pi(sk)->retry_count++;
1235 __mod_monitor_timer();
1237 control = L2CAP_CTRL_POLL;
1238 l2cap_send_rr_or_rnr(l2cap_pi(sk), control);
1242 static void l2cap_retrans_timeout(unsigned long arg)
1244 struct sock *sk = (void *) arg;
1248 l2cap_pi(sk)->retry_count = 1;
1249 __mod_monitor_timer();
1251 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
1253 control = L2CAP_CTRL_POLL;
1254 l2cap_send_rr_or_rnr(l2cap_pi(sk), control);
1258 static void l2cap_drop_acked_frames(struct sock *sk)
1260 struct sk_buff *skb;
1262 while ((skb = skb_peek(TX_QUEUE(sk)))) {
1263 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
1266 skb = skb_dequeue(TX_QUEUE(sk));
1269 l2cap_pi(sk)->unacked_frames--;
1272 if (!l2cap_pi(sk)->unacked_frames)
1273 del_timer(&l2cap_pi(sk)->retrans_timer);
1278 static inline int l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1280 struct l2cap_pinfo *pi = l2cap_pi(sk);
1283 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1285 err = hci_send_acl(pi->conn->hcon, skb, 0);
1292 static int l2cap_streaming_send(struct sock *sk)
1294 struct sk_buff *skb, *tx_skb;
1295 struct l2cap_pinfo *pi = l2cap_pi(sk);
1299 while ((skb = sk->sk_send_head)) {
1300 tx_skb = skb_clone(skb, GFP_ATOMIC);
1302 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1303 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1304 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1306 if (pi->fcs == L2CAP_FCS_CRC16) {
1307 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1308 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1311 err = l2cap_do_send(sk, tx_skb);
1313 l2cap_send_disconn_req(pi->conn, sk);
1317 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1319 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1320 sk->sk_send_head = NULL;
1322 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1324 skb = skb_dequeue(TX_QUEUE(sk));
1330 static int l2cap_retransmit_frame(struct sock *sk, u8 tx_seq)
1332 struct l2cap_pinfo *pi = l2cap_pi(sk);
1333 struct sk_buff *skb, *tx_skb;
1337 skb = skb_peek(TX_QUEUE(sk));
1339 if (bt_cb(skb)->tx_seq != tx_seq) {
1340 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1342 skb = skb_queue_next(TX_QUEUE(sk), skb);
1346 if (pi->remote_max_tx &&
1347 bt_cb(skb)->retries == pi->remote_max_tx) {
1348 l2cap_send_disconn_req(pi->conn, sk);
1352 tx_skb = skb_clone(skb, GFP_ATOMIC);
1353 bt_cb(skb)->retries++;
1354 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1355 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1356 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1357 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1359 if (pi->fcs == L2CAP_FCS_CRC16) {
1360 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1361 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1364 err = l2cap_do_send(sk, tx_skb);
1366 l2cap_send_disconn_req(pi->conn, sk);
1374 static int l2cap_ertm_send(struct sock *sk)
1376 struct sk_buff *skb, *tx_skb;
1377 struct l2cap_pinfo *pi = l2cap_pi(sk);
1381 if (pi->conn_state & L2CAP_CONN_WAIT_F)
1384 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk)) &&
1385 !(pi->conn_state & L2CAP_CONN_REMOTE_BUSY)) {
1387 if (pi->remote_max_tx &&
1388 bt_cb(skb)->retries == pi->remote_max_tx) {
1389 l2cap_send_disconn_req(pi->conn, sk);
1393 tx_skb = skb_clone(skb, GFP_ATOMIC);
1395 bt_cb(skb)->retries++;
1397 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1398 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1399 control |= L2CAP_CTRL_FINAL;
1400 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1402 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1403 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1404 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1407 if (pi->fcs == L2CAP_FCS_CRC16) {
1408 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1409 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1412 err = l2cap_do_send(sk, tx_skb);
1414 l2cap_send_disconn_req(pi->conn, sk);
1417 __mod_retrans_timer();
1419 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1420 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1422 pi->unacked_frames++;
1425 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1426 sk->sk_send_head = NULL;
1428 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1436 static int l2cap_send_ack(struct l2cap_pinfo *pi)
1438 struct sock *sk = (struct sock *)pi;
1441 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1443 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1444 control |= L2CAP_SUPER_RCV_NOT_READY;
1445 return l2cap_send_sframe(pi, control);
1446 } else if (l2cap_ertm_send(sk) == 0) {
1447 control |= L2CAP_SUPER_RCV_READY;
1448 return l2cap_send_sframe(pi, control);
1453 static int l2cap_send_srejtail(struct sock *sk)
1455 struct srej_list *tail;
1458 control = L2CAP_SUPER_SELECT_REJECT;
1459 control |= L2CAP_CTRL_FINAL;
1461 tail = list_entry(SREJ_LIST(sk)->prev, struct srej_list, list);
1462 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1464 l2cap_send_sframe(l2cap_pi(sk), control);
1469 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1471 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1472 struct sk_buff **frag;
1475 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
1482 /* Continuation fragments (no L2CAP header) */
1483 frag = &skb_shinfo(skb)->frag_list;
1485 count = min_t(unsigned int, conn->mtu, len);
1487 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1490 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1496 frag = &(*frag)->next;
1502 static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1504 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1505 struct sk_buff *skb;
1506 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1507 struct l2cap_hdr *lh;
1509 BT_DBG("sk %p len %d", sk, (int)len);
1511 count = min_t(unsigned int, (conn->mtu - hlen), len);
1512 skb = bt_skb_send_alloc(sk, count + hlen,
1513 msg->msg_flags & MSG_DONTWAIT, &err);
1515 return ERR_PTR(-ENOMEM);
1517 /* Create L2CAP header */
1518 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1519 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1520 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1521 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1523 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1524 if (unlikely(err < 0)) {
1526 return ERR_PTR(err);
1531 static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1533 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1534 struct sk_buff *skb;
1535 int err, count, hlen = L2CAP_HDR_SIZE;
1536 struct l2cap_hdr *lh;
1538 BT_DBG("sk %p len %d", sk, (int)len);
1540 count = min_t(unsigned int, (conn->mtu - hlen), len);
1541 skb = bt_skb_send_alloc(sk, count + hlen,
1542 msg->msg_flags & MSG_DONTWAIT, &err);
1544 return ERR_PTR(-ENOMEM);
1546 /* Create L2CAP header */
1547 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1548 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1549 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1551 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1552 if (unlikely(err < 0)) {
1554 return ERR_PTR(err);
1559 static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1561 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1562 struct sk_buff *skb;
1563 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1564 struct l2cap_hdr *lh;
1566 BT_DBG("sk %p len %d", sk, (int)len);
1571 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1574 count = min_t(unsigned int, (conn->mtu - hlen), len);
1575 skb = bt_skb_send_alloc(sk, count + hlen,
1576 msg->msg_flags & MSG_DONTWAIT, &err);
1578 return ERR_PTR(-ENOMEM);
1580 /* Create L2CAP header */
1581 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1582 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1583 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1584 put_unaligned_le16(control, skb_put(skb, 2));
1586 put_unaligned_le16(sdulen, skb_put(skb, 2));
1588 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1589 if (unlikely(err < 0)) {
1591 return ERR_PTR(err);
1594 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1595 put_unaligned_le16(0, skb_put(skb, 2));
1597 bt_cb(skb)->retries = 0;
1601 static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1603 struct l2cap_pinfo *pi = l2cap_pi(sk);
1604 struct sk_buff *skb;
1605 struct sk_buff_head sar_queue;
1609 __skb_queue_head_init(&sar_queue);
1610 control = L2CAP_SDU_START;
1611 skb = l2cap_create_iframe_pdu(sk, msg, pi->remote_mps, control, len);
1613 return PTR_ERR(skb);
1615 __skb_queue_tail(&sar_queue, skb);
1616 len -= pi->remote_mps;
1617 size += pi->remote_mps;
1623 if (len > pi->remote_mps) {
1624 control |= L2CAP_SDU_CONTINUE;
1625 buflen = pi->remote_mps;
1627 control |= L2CAP_SDU_END;
1631 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1633 skb_queue_purge(&sar_queue);
1634 return PTR_ERR(skb);
1637 __skb_queue_tail(&sar_queue, skb);
1642 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1643 if (sk->sk_send_head == NULL)
1644 sk->sk_send_head = sar_queue.next;
1649 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1651 struct sock *sk = sock->sk;
1652 struct l2cap_pinfo *pi = l2cap_pi(sk);
1653 struct sk_buff *skb;
1657 BT_DBG("sock %p, sk %p", sock, sk);
1659 err = sock_error(sk);
1663 if (msg->msg_flags & MSG_OOB)
1668 if (sk->sk_state != BT_CONNECTED) {
1673 /* Connectionless channel */
1674 if (sk->sk_type == SOCK_DGRAM) {
1675 skb = l2cap_create_connless_pdu(sk, msg, len);
1679 err = l2cap_do_send(sk, skb);
1684 case L2CAP_MODE_BASIC:
1685 /* Check outgoing MTU */
1686 if (len > pi->omtu) {
1691 /* Create a basic PDU */
1692 skb = l2cap_create_basic_pdu(sk, msg, len);
1698 err = l2cap_do_send(sk, skb);
1703 case L2CAP_MODE_ERTM:
1704 case L2CAP_MODE_STREAMING:
1705 /* Entire SDU fits into one PDU */
1706 if (len <= pi->remote_mps) {
1707 control = L2CAP_SDU_UNSEGMENTED;
1708 skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
1713 __skb_queue_tail(TX_QUEUE(sk), skb);
1714 if (sk->sk_send_head == NULL)
1715 sk->sk_send_head = skb;
1717 /* Segment SDU into multiples PDUs */
1718 err = l2cap_sar_segment_sdu(sk, msg, len);
1723 if (pi->mode == L2CAP_MODE_STREAMING)
1724 err = l2cap_streaming_send(sk);
1726 err = l2cap_ertm_send(sk);
1733 BT_DBG("bad state %1.1x", pi->mode);
1742 static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1744 struct sock *sk = sock->sk;
1748 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1749 struct l2cap_conn_rsp rsp;
1751 sk->sk_state = BT_CONFIG;
1753 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1754 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1755 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1756 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1757 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1758 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1766 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1769 static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
1771 struct sock *sk = sock->sk;
1772 struct l2cap_options opts;
1776 BT_DBG("sk %p", sk);
1782 opts.imtu = l2cap_pi(sk)->imtu;
1783 opts.omtu = l2cap_pi(sk)->omtu;
1784 opts.flush_to = l2cap_pi(sk)->flush_to;
1785 opts.mode = l2cap_pi(sk)->mode;
1786 opts.fcs = l2cap_pi(sk)->fcs;
1787 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
1789 len = min_t(unsigned int, sizeof(opts), optlen);
1790 if (copy_from_user((char *) &opts, optval, len)) {
1795 l2cap_pi(sk)->imtu = opts.imtu;
1796 l2cap_pi(sk)->omtu = opts.omtu;
1797 l2cap_pi(sk)->mode = opts.mode;
1798 l2cap_pi(sk)->fcs = opts.fcs;
1799 l2cap_pi(sk)->tx_win = (__u8)opts.txwin_size;
1803 if (get_user(opt, (u32 __user *) optval)) {
1808 if (opt & L2CAP_LM_AUTH)
1809 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
1810 if (opt & L2CAP_LM_ENCRYPT)
1811 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
1812 if (opt & L2CAP_LM_SECURE)
1813 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
1815 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
1816 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
1828 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
1830 struct sock *sk = sock->sk;
1831 struct bt_security sec;
1835 BT_DBG("sk %p", sk);
1837 if (level == SOL_L2CAP)
1838 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
1840 if (level != SOL_BLUETOOTH)
1841 return -ENOPROTOOPT;
1847 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_RAW) {
1852 sec.level = BT_SECURITY_LOW;
1854 len = min_t(unsigned int, sizeof(sec), optlen);
1855 if (copy_from_user((char *) &sec, optval, len)) {
1860 if (sec.level < BT_SECURITY_LOW ||
1861 sec.level > BT_SECURITY_HIGH) {
1866 l2cap_pi(sk)->sec_level = sec.level;
1869 case BT_DEFER_SETUP:
1870 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1875 if (get_user(opt, (u32 __user *) optval)) {
1880 bt_sk(sk)->defer_setup = opt;
1892 static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
1894 struct sock *sk = sock->sk;
1895 struct l2cap_options opts;
1896 struct l2cap_conninfo cinfo;
1900 BT_DBG("sk %p", sk);
1902 if (get_user(len, optlen))
1909 opts.imtu = l2cap_pi(sk)->imtu;
1910 opts.omtu = l2cap_pi(sk)->omtu;
1911 opts.flush_to = l2cap_pi(sk)->flush_to;
1912 opts.mode = l2cap_pi(sk)->mode;
1913 opts.fcs = l2cap_pi(sk)->fcs;
1914 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
1916 len = min_t(unsigned int, len, sizeof(opts));
1917 if (copy_to_user(optval, (char *) &opts, len))
1923 switch (l2cap_pi(sk)->sec_level) {
1924 case BT_SECURITY_LOW:
1925 opt = L2CAP_LM_AUTH;
1927 case BT_SECURITY_MEDIUM:
1928 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
1930 case BT_SECURITY_HIGH:
1931 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
1939 if (l2cap_pi(sk)->role_switch)
1940 opt |= L2CAP_LM_MASTER;
1942 if (l2cap_pi(sk)->force_reliable)
1943 opt |= L2CAP_LM_RELIABLE;
1945 if (put_user(opt, (u32 __user *) optval))
1949 case L2CAP_CONNINFO:
1950 if (sk->sk_state != BT_CONNECTED &&
1951 !(sk->sk_state == BT_CONNECT2 &&
1952 bt_sk(sk)->defer_setup)) {
1957 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
1958 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
1960 len = min_t(unsigned int, len, sizeof(cinfo));
1961 if (copy_to_user(optval, (char *) &cinfo, len))
1975 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
1977 struct sock *sk = sock->sk;
1978 struct bt_security sec;
1981 BT_DBG("sk %p", sk);
1983 if (level == SOL_L2CAP)
1984 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
1986 if (level != SOL_BLUETOOTH)
1987 return -ENOPROTOOPT;
1989 if (get_user(len, optlen))
1996 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_RAW) {
2001 sec.level = l2cap_pi(sk)->sec_level;
2003 len = min_t(unsigned int, len, sizeof(sec));
2004 if (copy_to_user(optval, (char *) &sec, len))
2009 case BT_DEFER_SETUP:
2010 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2015 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
2029 static int l2cap_sock_shutdown(struct socket *sock, int how)
2031 struct sock *sk = sock->sk;
2034 BT_DBG("sock %p, sk %p", sock, sk);
2040 if (!sk->sk_shutdown) {
2041 sk->sk_shutdown = SHUTDOWN_MASK;
2042 l2cap_sock_clear_timer(sk);
2043 __l2cap_sock_close(sk, 0);
2045 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
2046 err = bt_sock_wait_state(sk, BT_CLOSED,
2053 static int l2cap_sock_release(struct socket *sock)
2055 struct sock *sk = sock->sk;
2058 BT_DBG("sock %p, sk %p", sock, sk);
2063 err = l2cap_sock_shutdown(sock, 2);
2066 l2cap_sock_kill(sk);
2070 static void l2cap_chan_ready(struct sock *sk)
2072 struct sock *parent = bt_sk(sk)->parent;
2074 BT_DBG("sk %p, parent %p", sk, parent);
2076 l2cap_pi(sk)->conf_state = 0;
2077 l2cap_sock_clear_timer(sk);
2080 /* Outgoing channel.
2081 * Wake up socket sleeping on connect.
2083 sk->sk_state = BT_CONNECTED;
2084 sk->sk_state_change(sk);
2086 /* Incoming channel.
2087 * Wake up socket sleeping on accept.
2089 parent->sk_data_ready(parent, 0);
2093 /* Copy frame to all raw sockets on that connection */
2094 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2096 struct l2cap_chan_list *l = &conn->chan_list;
2097 struct sk_buff *nskb;
2100 BT_DBG("conn %p", conn);
2102 read_lock(&l->lock);
2103 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2104 if (sk->sk_type != SOCK_RAW)
2107 /* Don't send frame to the socket it came from */
2110 nskb = skb_clone(skb, GFP_ATOMIC);
2114 if (sock_queue_rcv_skb(sk, nskb))
2117 read_unlock(&l->lock);
2120 /* ---- L2CAP signalling commands ---- */
2121 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2122 u8 code, u8 ident, u16 dlen, void *data)
2124 struct sk_buff *skb, **frag;
2125 struct l2cap_cmd_hdr *cmd;
2126 struct l2cap_hdr *lh;
2129 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2130 conn, code, ident, dlen);
2132 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2133 count = min_t(unsigned int, conn->mtu, len);
2135 skb = bt_skb_alloc(count, GFP_ATOMIC);
2139 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2140 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2141 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2143 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2146 cmd->len = cpu_to_le16(dlen);
2149 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2150 memcpy(skb_put(skb, count), data, count);
2156 /* Continuation fragments (no L2CAP header) */
2157 frag = &skb_shinfo(skb)->frag_list;
2159 count = min_t(unsigned int, conn->mtu, len);
2161 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2165 memcpy(skb_put(*frag, count), data, count);
2170 frag = &(*frag)->next;
2180 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2182 struct l2cap_conf_opt *opt = *ptr;
2185 len = L2CAP_CONF_OPT_SIZE + opt->len;
2193 *val = *((u8 *) opt->val);
2197 *val = __le16_to_cpu(*((__le16 *) opt->val));
2201 *val = __le32_to_cpu(*((__le32 *) opt->val));
2205 *val = (unsigned long) opt->val;
2209 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2213 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2215 struct l2cap_conf_opt *opt = *ptr;
2217 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2224 *((u8 *) opt->val) = val;
2228 *((__le16 *) opt->val) = cpu_to_le16(val);
2232 *((__le32 *) opt->val) = cpu_to_le32(val);
2236 memcpy(opt->val, (void *) val, len);
2240 *ptr += L2CAP_CONF_OPT_SIZE + len;
2243 static void l2cap_ack_timeout(unsigned long arg)
2245 struct sock *sk = (void *) arg;
2248 l2cap_send_ack(l2cap_pi(sk));
2252 static inline void l2cap_ertm_init(struct sock *sk)
2254 l2cap_pi(sk)->expected_ack_seq = 0;
2255 l2cap_pi(sk)->unacked_frames = 0;
2256 l2cap_pi(sk)->buffer_seq = 0;
2257 l2cap_pi(sk)->num_to_ack = 0;
2258 l2cap_pi(sk)->frames_sent = 0;
2260 setup_timer(&l2cap_pi(sk)->retrans_timer,
2261 l2cap_retrans_timeout, (unsigned long) sk);
2262 setup_timer(&l2cap_pi(sk)->monitor_timer,
2263 l2cap_monitor_timeout, (unsigned long) sk);
2264 setup_timer(&l2cap_pi(sk)->ack_timer,
2265 l2cap_ack_timeout, (unsigned long) sk);
2267 __skb_queue_head_init(SREJ_QUEUE(sk));
2270 static int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
2272 u32 local_feat_mask = l2cap_feat_mask;
2274 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
2277 case L2CAP_MODE_ERTM:
2278 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
2279 case L2CAP_MODE_STREAMING:
2280 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
2286 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2289 case L2CAP_MODE_STREAMING:
2290 case L2CAP_MODE_ERTM:
2291 if (l2cap_mode_supported(mode, remote_feat_mask))
2295 return L2CAP_MODE_BASIC;
2299 static int l2cap_build_conf_req(struct sock *sk, void *data)
2301 struct l2cap_pinfo *pi = l2cap_pi(sk);
2302 struct l2cap_conf_req *req = data;
2303 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2304 void *ptr = req->data;
2306 BT_DBG("sk %p", sk);
2308 if (pi->num_conf_req || pi->num_conf_rsp)
2312 case L2CAP_MODE_STREAMING:
2313 case L2CAP_MODE_ERTM:
2314 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2315 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2316 l2cap_send_disconn_req(pi->conn, sk);
2319 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2325 case L2CAP_MODE_BASIC:
2326 if (pi->imtu != L2CAP_DEFAULT_MTU)
2327 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2330 case L2CAP_MODE_ERTM:
2331 rfc.mode = L2CAP_MODE_ERTM;
2332 rfc.txwin_size = pi->tx_win;
2333 rfc.max_transmit = max_transmit;
2334 rfc.retrans_timeout = 0;
2335 rfc.monitor_timeout = 0;
2336 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2337 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2338 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2340 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2341 sizeof(rfc), (unsigned long) &rfc);
2343 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2346 if (pi->fcs == L2CAP_FCS_NONE ||
2347 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2348 pi->fcs = L2CAP_FCS_NONE;
2349 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2353 case L2CAP_MODE_STREAMING:
2354 rfc.mode = L2CAP_MODE_STREAMING;
2356 rfc.max_transmit = 0;
2357 rfc.retrans_timeout = 0;
2358 rfc.monitor_timeout = 0;
2359 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2360 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2361 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2363 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2364 sizeof(rfc), (unsigned long) &rfc);
2366 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2369 if (pi->fcs == L2CAP_FCS_NONE ||
2370 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2371 pi->fcs = L2CAP_FCS_NONE;
2372 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2377 /* FIXME: Need actual value of the flush timeout */
2378 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2379 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2381 req->dcid = cpu_to_le16(pi->dcid);
2382 req->flags = cpu_to_le16(0);
2387 static int l2cap_parse_conf_req(struct sock *sk, void *data)
2389 struct l2cap_pinfo *pi = l2cap_pi(sk);
2390 struct l2cap_conf_rsp *rsp = data;
2391 void *ptr = rsp->data;
2392 void *req = pi->conf_req;
2393 int len = pi->conf_len;
2394 int type, hint, olen;
2396 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2397 u16 mtu = L2CAP_DEFAULT_MTU;
2398 u16 result = L2CAP_CONF_SUCCESS;
2400 BT_DBG("sk %p", sk);
2402 while (len >= L2CAP_CONF_OPT_SIZE) {
2403 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2405 hint = type & L2CAP_CONF_HINT;
2406 type &= L2CAP_CONF_MASK;
2409 case L2CAP_CONF_MTU:
2413 case L2CAP_CONF_FLUSH_TO:
2417 case L2CAP_CONF_QOS:
2420 case L2CAP_CONF_RFC:
2421 if (olen == sizeof(rfc))
2422 memcpy(&rfc, (void *) val, olen);
2425 case L2CAP_CONF_FCS:
2426 if (val == L2CAP_FCS_NONE)
2427 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
2435 result = L2CAP_CONF_UNKNOWN;
2436 *((u8 *) ptr++) = type;
2441 if (pi->num_conf_rsp || pi->num_conf_req)
2445 case L2CAP_MODE_STREAMING:
2446 case L2CAP_MODE_ERTM:
2447 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2448 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2449 return -ECONNREFUSED;
2452 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2457 if (pi->mode != rfc.mode) {
2458 result = L2CAP_CONF_UNACCEPT;
2459 rfc.mode = pi->mode;
2461 if (pi->num_conf_rsp == 1)
2462 return -ECONNREFUSED;
2464 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2465 sizeof(rfc), (unsigned long) &rfc);
2469 if (result == L2CAP_CONF_SUCCESS) {
2470 /* Configure output options and let the other side know
2471 * which ones we don't like. */
2473 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2474 result = L2CAP_CONF_UNACCEPT;
2477 pi->conf_state |= L2CAP_CONF_MTU_DONE;
2479 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2482 case L2CAP_MODE_BASIC:
2483 pi->fcs = L2CAP_FCS_NONE;
2484 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2487 case L2CAP_MODE_ERTM:
2488 pi->remote_tx_win = rfc.txwin_size;
2489 pi->remote_max_tx = rfc.max_transmit;
2490 if (rfc.max_pdu_size > pi->conn->mtu - 10)
2491 rfc.max_pdu_size = le16_to_cpu(pi->conn->mtu - 10);
2493 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2495 rfc.retrans_timeout =
2496 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2497 rfc.monitor_timeout =
2498 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2500 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2502 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2503 sizeof(rfc), (unsigned long) &rfc);
2507 case L2CAP_MODE_STREAMING:
2508 if (rfc.max_pdu_size > pi->conn->mtu - 10)
2509 rfc.max_pdu_size = le16_to_cpu(pi->conn->mtu - 10);
2511 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2513 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2515 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2516 sizeof(rfc), (unsigned long) &rfc);
2521 result = L2CAP_CONF_UNACCEPT;
2523 memset(&rfc, 0, sizeof(rfc));
2524 rfc.mode = pi->mode;
2527 if (result == L2CAP_CONF_SUCCESS)
2528 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
2530 rsp->scid = cpu_to_le16(pi->dcid);
2531 rsp->result = cpu_to_le16(result);
2532 rsp->flags = cpu_to_le16(0x0000);
2537 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
2539 struct l2cap_pinfo *pi = l2cap_pi(sk);
2540 struct l2cap_conf_req *req = data;
2541 void *ptr = req->data;
2544 struct l2cap_conf_rfc rfc;
2546 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
2548 while (len >= L2CAP_CONF_OPT_SIZE) {
2549 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2552 case L2CAP_CONF_MTU:
2553 if (val < L2CAP_DEFAULT_MIN_MTU) {
2554 *result = L2CAP_CONF_UNACCEPT;
2555 pi->omtu = L2CAP_DEFAULT_MIN_MTU;
2558 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2561 case L2CAP_CONF_FLUSH_TO:
2563 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2567 case L2CAP_CONF_RFC:
2568 if (olen == sizeof(rfc))
2569 memcpy(&rfc, (void *)val, olen);
2571 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
2572 rfc.mode != pi->mode)
2573 return -ECONNREFUSED;
2575 pi->mode = rfc.mode;
2578 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2579 sizeof(rfc), (unsigned long) &rfc);
2584 if (*result == L2CAP_CONF_SUCCESS) {
2586 case L2CAP_MODE_ERTM:
2587 pi->remote_tx_win = rfc.txwin_size;
2588 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2589 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2590 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2592 case L2CAP_MODE_STREAMING:
2593 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2597 req->dcid = cpu_to_le16(pi->dcid);
2598 req->flags = cpu_to_le16(0x0000);
2603 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
2605 struct l2cap_conf_rsp *rsp = data;
2606 void *ptr = rsp->data;
2608 BT_DBG("sk %p", sk);
2610 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2611 rsp->result = cpu_to_le16(result);
2612 rsp->flags = cpu_to_le16(flags);
2617 static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
2619 struct l2cap_pinfo *pi = l2cap_pi(sk);
2622 struct l2cap_conf_rfc rfc;
2624 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
2626 if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
2629 while (len >= L2CAP_CONF_OPT_SIZE) {
2630 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2633 case L2CAP_CONF_RFC:
2634 if (olen == sizeof(rfc))
2635 memcpy(&rfc, (void *)val, olen);
2642 case L2CAP_MODE_ERTM:
2643 pi->remote_tx_win = rfc.txwin_size;
2644 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2645 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2646 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2648 case L2CAP_MODE_STREAMING:
2649 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2653 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2655 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2657 if (rej->reason != 0x0000)
2660 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2661 cmd->ident == conn->info_ident) {
2662 del_timer(&conn->info_timer);
2664 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2665 conn->info_ident = 0;
2667 l2cap_conn_start(conn);
2673 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2675 struct l2cap_chan_list *list = &conn->chan_list;
2676 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2677 struct l2cap_conn_rsp rsp;
2678 struct sock *sk, *parent;
2679 int result, status = L2CAP_CS_NO_INFO;
2681 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2682 __le16 psm = req->psm;
2684 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2686 /* Check if we have socket listening on psm */
2687 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2689 result = L2CAP_CR_BAD_PSM;
2693 /* Check if the ACL is secure enough (if not SDP) */
2694 if (psm != cpu_to_le16(0x0001) &&
2695 !hci_conn_check_link_mode(conn->hcon)) {
2696 conn->disc_reason = 0x05;
2697 result = L2CAP_CR_SEC_BLOCK;
2701 result = L2CAP_CR_NO_MEM;
2703 /* Check for backlog size */
2704 if (sk_acceptq_is_full(parent)) {
2705 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2709 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2713 write_lock_bh(&list->lock);
2715 /* Check if we already have channel with that dcid */
2716 if (__l2cap_get_chan_by_dcid(list, scid)) {
2717 write_unlock_bh(&list->lock);
2718 sock_set_flag(sk, SOCK_ZAPPED);
2719 l2cap_sock_kill(sk);
2723 hci_conn_hold(conn->hcon);
2725 l2cap_sock_init(sk, parent);
2726 bacpy(&bt_sk(sk)->src, conn->src);
2727 bacpy(&bt_sk(sk)->dst, conn->dst);
2728 l2cap_pi(sk)->psm = psm;
2729 l2cap_pi(sk)->dcid = scid;
2731 __l2cap_chan_add(conn, sk, parent);
2732 dcid = l2cap_pi(sk)->scid;
2734 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2736 l2cap_pi(sk)->ident = cmd->ident;
2738 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2739 if (l2cap_check_security(sk)) {
2740 if (bt_sk(sk)->defer_setup) {
2741 sk->sk_state = BT_CONNECT2;
2742 result = L2CAP_CR_PEND;
2743 status = L2CAP_CS_AUTHOR_PEND;
2744 parent->sk_data_ready(parent, 0);
2746 sk->sk_state = BT_CONFIG;
2747 result = L2CAP_CR_SUCCESS;
2748 status = L2CAP_CS_NO_INFO;
2751 sk->sk_state = BT_CONNECT2;
2752 result = L2CAP_CR_PEND;
2753 status = L2CAP_CS_AUTHEN_PEND;
2756 sk->sk_state = BT_CONNECT2;
2757 result = L2CAP_CR_PEND;
2758 status = L2CAP_CS_NO_INFO;
2761 write_unlock_bh(&list->lock);
2764 bh_unlock_sock(parent);
2767 rsp.scid = cpu_to_le16(scid);
2768 rsp.dcid = cpu_to_le16(dcid);
2769 rsp.result = cpu_to_le16(result);
2770 rsp.status = cpu_to_le16(status);
2771 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2773 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2774 struct l2cap_info_req info;
2775 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2777 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2778 conn->info_ident = l2cap_get_ident(conn);
2780 mod_timer(&conn->info_timer, jiffies +
2781 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2783 l2cap_send_cmd(conn, conn->info_ident,
2784 L2CAP_INFO_REQ, sizeof(info), &info);
2790 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2792 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2793 u16 scid, dcid, result, status;
2797 scid = __le16_to_cpu(rsp->scid);
2798 dcid = __le16_to_cpu(rsp->dcid);
2799 result = __le16_to_cpu(rsp->result);
2800 status = __le16_to_cpu(rsp->status);
2802 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2805 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2809 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
2815 case L2CAP_CR_SUCCESS:
2816 sk->sk_state = BT_CONFIG;
2817 l2cap_pi(sk)->ident = 0;
2818 l2cap_pi(sk)->dcid = dcid;
2819 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2821 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2823 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2824 l2cap_build_conf_req(sk, req), req);
2825 l2cap_pi(sk)->num_conf_req++;
2829 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
2833 l2cap_chan_del(sk, ECONNREFUSED);
2841 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2843 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2849 dcid = __le16_to_cpu(req->dcid);
2850 flags = __le16_to_cpu(req->flags);
2852 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2854 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2858 if (sk->sk_state == BT_DISCONN)
2861 /* Reject if config buffer is too small. */
2862 len = cmd_len - sizeof(*req);
2863 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
2864 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2865 l2cap_build_conf_rsp(sk, rsp,
2866 L2CAP_CONF_REJECT, flags), rsp);
2871 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
2872 l2cap_pi(sk)->conf_len += len;
2874 if (flags & 0x0001) {
2875 /* Incomplete config. Send empty response. */
2876 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2877 l2cap_build_conf_rsp(sk, rsp,
2878 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2882 /* Complete config. */
2883 len = l2cap_parse_conf_req(sk, rsp);
2885 l2cap_send_disconn_req(conn, sk);
2889 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2890 l2cap_pi(sk)->num_conf_rsp++;
2892 /* Reset config buffer. */
2893 l2cap_pi(sk)->conf_len = 0;
2895 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
2898 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
2899 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
2900 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
2901 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
2903 sk->sk_state = BT_CONNECTED;
2905 l2cap_pi(sk)->next_tx_seq = 0;
2906 l2cap_pi(sk)->expected_tx_seq = 0;
2907 __skb_queue_head_init(TX_QUEUE(sk));
2908 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2909 l2cap_ertm_init(sk);
2911 l2cap_chan_ready(sk);
2915 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
2917 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2918 l2cap_build_conf_req(sk, buf), buf);
2919 l2cap_pi(sk)->num_conf_req++;
2927 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2929 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2930 u16 scid, flags, result;
2932 int len = cmd->len - sizeof(*rsp);
2934 scid = __le16_to_cpu(rsp->scid);
2935 flags = __le16_to_cpu(rsp->flags);
2936 result = __le16_to_cpu(rsp->result);
2938 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2939 scid, flags, result);
2941 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2946 case L2CAP_CONF_SUCCESS:
2947 l2cap_conf_rfc_get(sk, rsp->data, len);
2950 case L2CAP_CONF_UNACCEPT:
2951 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2954 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2955 l2cap_send_disconn_req(conn, sk);
2959 /* throw out any old stored conf requests */
2960 result = L2CAP_CONF_SUCCESS;
2961 len = l2cap_parse_conf_rsp(sk, rsp->data,
2964 l2cap_send_disconn_req(conn, sk);
2968 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2969 L2CAP_CONF_REQ, len, req);
2970 l2cap_pi(sk)->num_conf_req++;
2971 if (result != L2CAP_CONF_SUCCESS)
2977 sk->sk_state = BT_DISCONN;
2978 sk->sk_err = ECONNRESET;
2979 l2cap_sock_set_timer(sk, HZ * 5);
2980 l2cap_send_disconn_req(conn, sk);
2987 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
2989 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
2990 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
2991 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
2992 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
2994 sk->sk_state = BT_CONNECTED;
2995 l2cap_pi(sk)->next_tx_seq = 0;
2996 l2cap_pi(sk)->expected_tx_seq = 0;
2997 __skb_queue_head_init(TX_QUEUE(sk));
2998 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2999 l2cap_ertm_init(sk);
3001 l2cap_chan_ready(sk);
3009 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3011 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3012 struct l2cap_disconn_rsp rsp;
3016 scid = __le16_to_cpu(req->scid);
3017 dcid = __le16_to_cpu(req->dcid);
3019 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3021 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
3025 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3026 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3027 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3029 sk->sk_shutdown = SHUTDOWN_MASK;
3031 skb_queue_purge(TX_QUEUE(sk));
3033 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
3034 skb_queue_purge(SREJ_QUEUE(sk));
3035 del_timer(&l2cap_pi(sk)->retrans_timer);
3036 del_timer(&l2cap_pi(sk)->monitor_timer);
3037 del_timer(&l2cap_pi(sk)->ack_timer);
3040 l2cap_chan_del(sk, ECONNRESET);
3043 l2cap_sock_kill(sk);
3047 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3049 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3053 scid = __le16_to_cpu(rsp->scid);
3054 dcid = __le16_to_cpu(rsp->dcid);
3056 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3058 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3062 skb_queue_purge(TX_QUEUE(sk));
3064 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
3065 skb_queue_purge(SREJ_QUEUE(sk));
3066 del_timer(&l2cap_pi(sk)->retrans_timer);
3067 del_timer(&l2cap_pi(sk)->monitor_timer);
3068 del_timer(&l2cap_pi(sk)->ack_timer);
3071 l2cap_chan_del(sk, 0);
3074 l2cap_sock_kill(sk);
3078 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3080 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3083 type = __le16_to_cpu(req->type);
3085 BT_DBG("type 0x%4.4x", type);
3087 if (type == L2CAP_IT_FEAT_MASK) {
3089 u32 feat_mask = l2cap_feat_mask;
3090 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3091 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3092 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3094 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3096 put_unaligned_le32(feat_mask, rsp->data);
3097 l2cap_send_cmd(conn, cmd->ident,
3098 L2CAP_INFO_RSP, sizeof(buf), buf);
3099 } else if (type == L2CAP_IT_FIXED_CHAN) {
3101 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3102 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3103 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3104 memcpy(buf + 4, l2cap_fixed_chan, 8);
3105 l2cap_send_cmd(conn, cmd->ident,
3106 L2CAP_INFO_RSP, sizeof(buf), buf);
3108 struct l2cap_info_rsp rsp;
3109 rsp.type = cpu_to_le16(type);
3110 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3111 l2cap_send_cmd(conn, cmd->ident,
3112 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3118 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3120 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3123 type = __le16_to_cpu(rsp->type);
3124 result = __le16_to_cpu(rsp->result);
3126 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3128 del_timer(&conn->info_timer);
3130 if (type == L2CAP_IT_FEAT_MASK) {
3131 conn->feat_mask = get_unaligned_le32(rsp->data);
3133 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3134 struct l2cap_info_req req;
3135 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3137 conn->info_ident = l2cap_get_ident(conn);
3139 l2cap_send_cmd(conn, conn->info_ident,
3140 L2CAP_INFO_REQ, sizeof(req), &req);
3142 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3143 conn->info_ident = 0;
3145 l2cap_conn_start(conn);
3147 } else if (type == L2CAP_IT_FIXED_CHAN) {
3148 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3149 conn->info_ident = 0;
3151 l2cap_conn_start(conn);
3157 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
3159 u8 *data = skb->data;
3161 struct l2cap_cmd_hdr cmd;
3164 l2cap_raw_recv(conn, skb);
3166 while (len >= L2CAP_CMD_HDR_SIZE) {
3168 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3169 data += L2CAP_CMD_HDR_SIZE;
3170 len -= L2CAP_CMD_HDR_SIZE;
3172 cmd_len = le16_to_cpu(cmd.len);
3174 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3176 if (cmd_len > len || !cmd.ident) {
3177 BT_DBG("corrupted command");
3182 case L2CAP_COMMAND_REJ:
3183 l2cap_command_rej(conn, &cmd, data);
3186 case L2CAP_CONN_REQ:
3187 err = l2cap_connect_req(conn, &cmd, data);
3190 case L2CAP_CONN_RSP:
3191 err = l2cap_connect_rsp(conn, &cmd, data);
3194 case L2CAP_CONF_REQ:
3195 err = l2cap_config_req(conn, &cmd, cmd_len, data);
3198 case L2CAP_CONF_RSP:
3199 err = l2cap_config_rsp(conn, &cmd, data);
3202 case L2CAP_DISCONN_REQ:
3203 err = l2cap_disconnect_req(conn, &cmd, data);
3206 case L2CAP_DISCONN_RSP:
3207 err = l2cap_disconnect_rsp(conn, &cmd, data);
3210 case L2CAP_ECHO_REQ:
3211 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
3214 case L2CAP_ECHO_RSP:
3217 case L2CAP_INFO_REQ:
3218 err = l2cap_information_req(conn, &cmd, data);
3221 case L2CAP_INFO_RSP:
3222 err = l2cap_information_rsp(conn, &cmd, data);
3226 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
3232 struct l2cap_cmd_rej rej;
3233 BT_DBG("error %d", err);
3235 /* FIXME: Map err to a valid reason */
3236 rej.reason = cpu_to_le16(0);
3237 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3247 static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
3249 u16 our_fcs, rcv_fcs;
3250 int hdr_size = L2CAP_HDR_SIZE + 2;
3252 if (pi->fcs == L2CAP_FCS_CRC16) {
3253 skb_trim(skb, skb->len - 2);
3254 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3255 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3257 if (our_fcs != rcv_fcs)
3263 static inline void l2cap_send_i_or_rr_or_rnr(struct sock *sk)
3265 struct l2cap_pinfo *pi = l2cap_pi(sk);
3268 pi->frames_sent = 0;
3269 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
3271 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3273 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3274 control |= L2CAP_SUPER_RCV_NOT_READY | L2CAP_CTRL_FINAL;
3275 l2cap_send_sframe(pi, control);
3276 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
3279 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY && pi->unacked_frames > 0)
3280 __mod_retrans_timer();
3282 l2cap_ertm_send(sk);
3284 if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
3285 pi->frames_sent == 0) {
3286 control |= L2CAP_SUPER_RCV_READY;
3287 l2cap_send_sframe(pi, control);
3291 static void l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
3293 struct sk_buff *next_skb;
3295 bt_cb(skb)->tx_seq = tx_seq;
3296 bt_cb(skb)->sar = sar;
3298 next_skb = skb_peek(SREJ_QUEUE(sk));
3300 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3305 if (bt_cb(next_skb)->tx_seq > tx_seq) {
3306 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
3310 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
3313 } while((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
3315 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3318 static int l2cap_sar_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3320 struct l2cap_pinfo *pi = l2cap_pi(sk);
3321 struct sk_buff *_skb;
3324 switch (control & L2CAP_CTRL_SAR) {
3325 case L2CAP_SDU_UNSEGMENTED:
3326 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3331 err = sock_queue_rcv_skb(sk, skb);
3337 case L2CAP_SDU_START:
3338 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3343 pi->sdu_len = get_unaligned_le16(skb->data);
3346 if (pi->sdu_len > pi->imtu) {
3351 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3357 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3359 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3360 pi->partial_sdu_len = skb->len;
3364 case L2CAP_SDU_CONTINUE:
3365 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3368 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3370 pi->partial_sdu_len += skb->len;
3371 if (pi->partial_sdu_len > pi->sdu_len)
3379 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3382 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3384 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3385 pi->partial_sdu_len += skb->len;
3387 if (pi->partial_sdu_len > pi->imtu)
3390 if (pi->partial_sdu_len == pi->sdu_len) {
3391 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3392 err = sock_queue_rcv_skb(sk, _skb);
3407 static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
3409 struct sk_buff *skb;
3412 while((skb = skb_peek(SREJ_QUEUE(sk)))) {
3413 if (bt_cb(skb)->tx_seq != tx_seq)
3416 skb = skb_dequeue(SREJ_QUEUE(sk));
3417 control |= bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3418 l2cap_sar_reassembly_sdu(sk, skb, control);
3419 l2cap_pi(sk)->buffer_seq_srej =
3420 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
3425 static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
3427 struct l2cap_pinfo *pi = l2cap_pi(sk);
3428 struct srej_list *l, *tmp;
3431 list_for_each_entry_safe(l,tmp, SREJ_LIST(sk), list) {
3432 if (l->tx_seq == tx_seq) {
3437 control = L2CAP_SUPER_SELECT_REJECT;
3438 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3439 l2cap_send_sframe(pi, control);
3441 list_add_tail(&l->list, SREJ_LIST(sk));
3445 static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
3447 struct l2cap_pinfo *pi = l2cap_pi(sk);
3448 struct srej_list *new;
3451 while (tx_seq != pi->expected_tx_seq) {
3452 control = L2CAP_SUPER_SELECT_REJECT;
3453 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3454 l2cap_send_sframe(pi, control);
3456 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3457 new->tx_seq = pi->expected_tx_seq++;
3458 list_add_tail(&new->list, SREJ_LIST(sk));
3460 pi->expected_tx_seq++;
3463 static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3465 struct l2cap_pinfo *pi = l2cap_pi(sk);
3466 u8 tx_seq = __get_txseq(rx_control);
3467 u8 req_seq = __get_reqseq(rx_control);
3468 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3471 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3473 if (L2CAP_CTRL_FINAL & rx_control) {
3474 del_timer(&pi->monitor_timer);
3475 if (pi->unacked_frames > 0)
3476 __mod_retrans_timer();
3477 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
3480 pi->expected_ack_seq = req_seq;
3481 l2cap_drop_acked_frames(sk);
3483 if (tx_seq == pi->expected_tx_seq)
3486 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3487 struct srej_list *first;
3489 first = list_first_entry(SREJ_LIST(sk),
3490 struct srej_list, list);
3491 if (tx_seq == first->tx_seq) {
3492 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3493 l2cap_check_srej_gap(sk, tx_seq);
3495 list_del(&first->list);
3498 if (list_empty(SREJ_LIST(sk))) {
3499 pi->buffer_seq = pi->buffer_seq_srej;
3500 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3504 struct srej_list *l;
3505 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3507 list_for_each_entry(l, SREJ_LIST(sk), list) {
3508 if (l->tx_seq == tx_seq) {
3509 l2cap_resend_srejframe(sk, tx_seq);
3513 l2cap_send_srejframe(sk, tx_seq);
3516 pi->conn_state |= L2CAP_CONN_SREJ_SENT;
3518 INIT_LIST_HEAD(SREJ_LIST(sk));
3519 pi->buffer_seq_srej = pi->buffer_seq;
3521 __skb_queue_head_init(SREJ_QUEUE(sk));
3522 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3524 pi->conn_state |= L2CAP_CONN_SEND_PBIT;
3526 l2cap_send_srejframe(sk, tx_seq);
3531 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3533 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3534 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3538 if (rx_control & L2CAP_CTRL_FINAL) {
3539 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3540 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3542 sk->sk_send_head = TX_QUEUE(sk)->next;
3543 pi->next_tx_seq = pi->expected_ack_seq;
3544 l2cap_ertm_send(sk);
3548 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3550 err = l2cap_sar_reassembly_sdu(sk, skb, rx_control);
3556 pi->num_to_ack = (pi->num_to_ack + 1) % L2CAP_DEFAULT_NUM_TO_ACK;
3557 if (pi->num_to_ack == L2CAP_DEFAULT_NUM_TO_ACK - 1)
3563 static inline void l2cap_data_channel_rrframe(struct sock *sk, u16 rx_control)
3565 struct l2cap_pinfo *pi = l2cap_pi(sk);
3567 pi->expected_ack_seq = __get_reqseq(rx_control);
3568 l2cap_drop_acked_frames(sk);
3570 if (rx_control & L2CAP_CTRL_POLL) {
3571 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3572 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3573 (pi->unacked_frames > 0))
3574 __mod_retrans_timer();
3576 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3577 l2cap_send_srejtail(sk);
3579 l2cap_send_i_or_rr_or_rnr(sk);
3580 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3583 } else if (rx_control & L2CAP_CTRL_FINAL) {
3584 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3586 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3587 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3589 sk->sk_send_head = TX_QUEUE(sk)->next;
3590 pi->next_tx_seq = pi->expected_ack_seq;
3591 l2cap_ertm_send(sk);
3595 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3596 (pi->unacked_frames > 0))
3597 __mod_retrans_timer();
3599 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3600 if (pi->conn_state & L2CAP_CONN_SREJ_SENT)
3603 l2cap_ertm_send(sk);
3607 static inline void l2cap_data_channel_rejframe(struct sock *sk, u16 rx_control)
3609 struct l2cap_pinfo *pi = l2cap_pi(sk);
3610 u8 tx_seq = __get_reqseq(rx_control);
3612 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3614 pi->expected_ack_seq = tx_seq;
3615 l2cap_drop_acked_frames(sk);
3617 if (rx_control & L2CAP_CTRL_FINAL) {
3618 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3619 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3621 sk->sk_send_head = TX_QUEUE(sk)->next;
3622 pi->next_tx_seq = pi->expected_ack_seq;
3623 l2cap_ertm_send(sk);
3626 sk->sk_send_head = TX_QUEUE(sk)->next;
3627 pi->next_tx_seq = pi->expected_ack_seq;
3628 l2cap_ertm_send(sk);
3630 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3631 pi->srej_save_reqseq = tx_seq;
3632 pi->conn_state |= L2CAP_CONN_REJ_ACT;
3636 static inline void l2cap_data_channel_srejframe(struct sock *sk, u16 rx_control)
3638 struct l2cap_pinfo *pi = l2cap_pi(sk);
3639 u8 tx_seq = __get_reqseq(rx_control);
3641 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3643 if (rx_control & L2CAP_CTRL_POLL) {
3644 pi->expected_ack_seq = tx_seq;
3645 l2cap_drop_acked_frames(sk);
3646 l2cap_retransmit_frame(sk, tx_seq);
3647 l2cap_ertm_send(sk);
3648 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3649 pi->srej_save_reqseq = tx_seq;
3650 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3652 } else if (rx_control & L2CAP_CTRL_FINAL) {
3653 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
3654 pi->srej_save_reqseq == tx_seq)
3655 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
3657 l2cap_retransmit_frame(sk, tx_seq);
3659 l2cap_retransmit_frame(sk, tx_seq);
3660 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3661 pi->srej_save_reqseq = tx_seq;
3662 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3667 static inline void l2cap_data_channel_rnrframe(struct sock *sk, u16 rx_control)
3669 struct l2cap_pinfo *pi = l2cap_pi(sk);
3670 u8 tx_seq = __get_reqseq(rx_control);
3672 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
3673 pi->expected_ack_seq = tx_seq;
3674 l2cap_drop_acked_frames(sk);
3676 if (!(pi->conn_state & L2CAP_CONN_SREJ_SENT)) {
3677 del_timer(&pi->retrans_timer);
3678 if (rx_control & L2CAP_CTRL_POLL) {
3679 u16 control = L2CAP_CTRL_FINAL;
3680 l2cap_send_rr_or_rnr(pi, control);
3685 if (rx_control & L2CAP_CTRL_POLL)
3686 l2cap_send_srejtail(sk);
3688 l2cap_send_sframe(pi, L2CAP_SUPER_RCV_READY);
3691 static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3693 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3695 if (L2CAP_CTRL_FINAL & rx_control) {
3696 del_timer(&l2cap_pi(sk)->monitor_timer);
3697 if (l2cap_pi(sk)->unacked_frames > 0)
3698 __mod_retrans_timer();
3699 l2cap_pi(sk)->conn_state &= ~L2CAP_CONN_WAIT_F;
3702 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
3703 case L2CAP_SUPER_RCV_READY:
3704 l2cap_data_channel_rrframe(sk, rx_control);
3707 case L2CAP_SUPER_REJECT:
3708 l2cap_data_channel_rejframe(sk, rx_control);
3711 case L2CAP_SUPER_SELECT_REJECT:
3712 l2cap_data_channel_srejframe(sk, rx_control);
3715 case L2CAP_SUPER_RCV_NOT_READY:
3716 l2cap_data_channel_rnrframe(sk, rx_control);
3724 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3727 struct l2cap_pinfo *pi;
3731 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
3733 BT_DBG("unknown cid 0x%4.4x", cid);
3739 BT_DBG("sk %p, len %d", sk, skb->len);
3741 if (sk->sk_state != BT_CONNECTED)
3745 case L2CAP_MODE_BASIC:
3746 /* If socket recv buffers overflows we drop data here
3747 * which is *bad* because L2CAP has to be reliable.
3748 * But we don't have any other choice. L2CAP doesn't
3749 * provide flow control mechanism. */
3751 if (pi->imtu < skb->len)
3754 if (!sock_queue_rcv_skb(sk, skb))
3758 case L2CAP_MODE_ERTM:
3759 control = get_unaligned_le16(skb->data);
3763 if (__is_sar_start(control))
3766 if (pi->fcs == L2CAP_FCS_CRC16)
3770 * We can just drop the corrupted I-frame here.
3771 * Receiver will miss it and start proper recovery
3772 * procedures and ask retransmission.
3777 if (l2cap_check_fcs(pi, skb))
3780 if (__is_iframe(control)) {
3784 l2cap_data_channel_iframe(sk, control, skb);
3789 l2cap_data_channel_sframe(sk, control, skb);
3794 case L2CAP_MODE_STREAMING:
3795 control = get_unaligned_le16(skb->data);
3799 if (__is_sar_start(control))
3802 if (pi->fcs == L2CAP_FCS_CRC16)
3805 if (len > pi->mps || len < 4 || __is_sframe(control))
3808 if (l2cap_check_fcs(pi, skb))
3811 tx_seq = __get_txseq(control);
3813 if (pi->expected_tx_seq == tx_seq)
3814 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3816 pi->expected_tx_seq = (tx_seq + 1) % 64;
3818 l2cap_sar_reassembly_sdu(sk, skb, control);
3823 BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode);
3837 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3841 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
3845 BT_DBG("sk %p, len %d", sk, skb->len);
3847 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
3850 if (l2cap_pi(sk)->imtu < skb->len)
3853 if (!sock_queue_rcv_skb(sk, skb))
3865 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
3867 struct l2cap_hdr *lh = (void *) skb->data;
3871 skb_pull(skb, L2CAP_HDR_SIZE);
3872 cid = __le16_to_cpu(lh->cid);
3873 len = __le16_to_cpu(lh->len);
3875 if (len != skb->len) {
3880 BT_DBG("len %d, cid 0x%4.4x", len, cid);
3883 case L2CAP_CID_SIGNALING:
3884 l2cap_sig_channel(conn, skb);
3887 case L2CAP_CID_CONN_LESS:
3888 psm = get_unaligned_le16(skb->data);
3890 l2cap_conless_channel(conn, psm, skb);
3894 l2cap_data_channel(conn, cid, skb);
3899 /* ---- L2CAP interface with lower layer (HCI) ---- */
3901 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3903 int exact = 0, lm1 = 0, lm2 = 0;
3904 register struct sock *sk;
3905 struct hlist_node *node;
3907 if (type != ACL_LINK)
3910 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
3912 /* Find listening sockets and check their link_mode */
3913 read_lock(&l2cap_sk_list.lock);
3914 sk_for_each(sk, node, &l2cap_sk_list.head) {
3915 if (sk->sk_state != BT_LISTEN)
3918 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
3919 lm1 |= HCI_LM_ACCEPT;
3920 if (l2cap_pi(sk)->role_switch)
3921 lm1 |= HCI_LM_MASTER;
3923 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
3924 lm2 |= HCI_LM_ACCEPT;
3925 if (l2cap_pi(sk)->role_switch)
3926 lm2 |= HCI_LM_MASTER;
3929 read_unlock(&l2cap_sk_list.lock);
3931 return exact ? lm1 : lm2;
3934 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
3936 struct l2cap_conn *conn;
3938 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
3940 if (hcon->type != ACL_LINK)
3944 conn = l2cap_conn_add(hcon, status);
3946 l2cap_conn_ready(conn);
3948 l2cap_conn_del(hcon, bt_err(status));
3953 static int l2cap_disconn_ind(struct hci_conn *hcon)
3955 struct l2cap_conn *conn = hcon->l2cap_data;
3957 BT_DBG("hcon %p", hcon);
3959 if (hcon->type != ACL_LINK || !conn)
3962 return conn->disc_reason;
3965 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
3967 BT_DBG("hcon %p reason %d", hcon, reason);
3969 if (hcon->type != ACL_LINK)
3972 l2cap_conn_del(hcon, bt_err(reason));
3977 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
3979 if (sk->sk_type != SOCK_SEQPACKET)
3982 if (encrypt == 0x00) {
3983 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
3984 l2cap_sock_clear_timer(sk);
3985 l2cap_sock_set_timer(sk, HZ * 5);
3986 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
3987 __l2cap_sock_close(sk, ECONNREFUSED);
3989 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
3990 l2cap_sock_clear_timer(sk);
3994 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
3996 struct l2cap_chan_list *l;
3997 struct l2cap_conn *conn = hcon->l2cap_data;
4003 l = &conn->chan_list;
4005 BT_DBG("conn %p", conn);
4007 read_lock(&l->lock);
4009 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
4012 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
4017 if (!status && (sk->sk_state == BT_CONNECTED ||
4018 sk->sk_state == BT_CONFIG)) {
4019 l2cap_check_encryption(sk, encrypt);
4024 if (sk->sk_state == BT_CONNECT) {
4026 struct l2cap_conn_req req;
4027 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
4028 req.psm = l2cap_pi(sk)->psm;
4030 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
4032 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4033 L2CAP_CONN_REQ, sizeof(req), &req);
4035 l2cap_sock_clear_timer(sk);
4036 l2cap_sock_set_timer(sk, HZ / 10);
4038 } else if (sk->sk_state == BT_CONNECT2) {
4039 struct l2cap_conn_rsp rsp;
4043 sk->sk_state = BT_CONFIG;
4044 result = L2CAP_CR_SUCCESS;
4046 sk->sk_state = BT_DISCONN;
4047 l2cap_sock_set_timer(sk, HZ / 10);
4048 result = L2CAP_CR_SEC_BLOCK;
4051 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
4052 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
4053 rsp.result = cpu_to_le16(result);
4054 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4055 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4056 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
4062 read_unlock(&l->lock);
4067 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4069 struct l2cap_conn *conn = hcon->l2cap_data;
4071 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
4074 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4076 if (flags & ACL_START) {
4077 struct l2cap_hdr *hdr;
4081 BT_ERR("Unexpected start frame (len %d)", skb->len);
4082 kfree_skb(conn->rx_skb);
4083 conn->rx_skb = NULL;
4085 l2cap_conn_unreliable(conn, ECOMM);
4089 BT_ERR("Frame is too short (len %d)", skb->len);
4090 l2cap_conn_unreliable(conn, ECOMM);
4094 hdr = (struct l2cap_hdr *) skb->data;
4095 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4097 if (len == skb->len) {
4098 /* Complete frame received */
4099 l2cap_recv_frame(conn, skb);
4103 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4105 if (skb->len > len) {
4106 BT_ERR("Frame is too long (len %d, expected len %d)",
4108 l2cap_conn_unreliable(conn, ECOMM);
4112 /* Allocate skb for the complete frame (with header) */
4113 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4117 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4119 conn->rx_len = len - skb->len;
4121 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4123 if (!conn->rx_len) {
4124 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4125 l2cap_conn_unreliable(conn, ECOMM);
4129 if (skb->len > conn->rx_len) {
4130 BT_ERR("Fragment is too long (len %d, expected %d)",
4131 skb->len, conn->rx_len);
4132 kfree_skb(conn->rx_skb);
4133 conn->rx_skb = NULL;
4135 l2cap_conn_unreliable(conn, ECOMM);
4139 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4141 conn->rx_len -= skb->len;
4143 if (!conn->rx_len) {
4144 /* Complete frame received */
4145 l2cap_recv_frame(conn, conn->rx_skb);
4146 conn->rx_skb = NULL;
4155 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4158 struct hlist_node *node;
4160 read_lock_bh(&l2cap_sk_list.lock);
4162 sk_for_each(sk, node, &l2cap_sk_list.head) {
4163 struct l2cap_pinfo *pi = l2cap_pi(sk);
4165 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
4166 batostr(&bt_sk(sk)->src),
4167 batostr(&bt_sk(sk)->dst),
4168 sk->sk_state, __le16_to_cpu(pi->psm),
4170 pi->imtu, pi->omtu, pi->sec_level);
4173 read_unlock_bh(&l2cap_sk_list.lock);
4178 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4180 return single_open(file, l2cap_debugfs_show, inode->i_private);
4183 static const struct file_operations l2cap_debugfs_fops = {
4184 .open = l2cap_debugfs_open,
4186 .llseek = seq_lseek,
4187 .release = single_release,
4190 static struct dentry *l2cap_debugfs;
4192 static const struct proto_ops l2cap_sock_ops = {
4193 .family = PF_BLUETOOTH,
4194 .owner = THIS_MODULE,
4195 .release = l2cap_sock_release,
4196 .bind = l2cap_sock_bind,
4197 .connect = l2cap_sock_connect,
4198 .listen = l2cap_sock_listen,
4199 .accept = l2cap_sock_accept,
4200 .getname = l2cap_sock_getname,
4201 .sendmsg = l2cap_sock_sendmsg,
4202 .recvmsg = l2cap_sock_recvmsg,
4203 .poll = bt_sock_poll,
4204 .ioctl = bt_sock_ioctl,
4205 .mmap = sock_no_mmap,
4206 .socketpair = sock_no_socketpair,
4207 .shutdown = l2cap_sock_shutdown,
4208 .setsockopt = l2cap_sock_setsockopt,
4209 .getsockopt = l2cap_sock_getsockopt
4212 static const struct net_proto_family l2cap_sock_family_ops = {
4213 .family = PF_BLUETOOTH,
4214 .owner = THIS_MODULE,
4215 .create = l2cap_sock_create,
4218 static struct hci_proto l2cap_hci_proto = {
4220 .id = HCI_PROTO_L2CAP,
4221 .connect_ind = l2cap_connect_ind,
4222 .connect_cfm = l2cap_connect_cfm,
4223 .disconn_ind = l2cap_disconn_ind,
4224 .disconn_cfm = l2cap_disconn_cfm,
4225 .security_cfm = l2cap_security_cfm,
4226 .recv_acldata = l2cap_recv_acldata
4229 static int __init l2cap_init(void)
4233 err = proto_register(&l2cap_proto, 0);
4237 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
4239 BT_ERR("L2CAP socket registration failed");
4243 err = hci_register_proto(&l2cap_hci_proto);
4245 BT_ERR("L2CAP protocol registration failed");
4246 bt_sock_unregister(BTPROTO_L2CAP);
4251 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4252 bt_debugfs, NULL, &l2cap_debugfs_fops);
4254 BT_ERR("Failed to create L2CAP debug file");
4257 BT_INFO("L2CAP ver %s", VERSION);
4258 BT_INFO("L2CAP socket layer initialized");
4263 proto_unregister(&l2cap_proto);
4267 static void __exit l2cap_exit(void)
4269 debugfs_remove(l2cap_debugfs);
4271 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
4272 BT_ERR("L2CAP socket unregistration failed");
4274 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4275 BT_ERR("L2CAP protocol unregistration failed");
4277 proto_unregister(&l2cap_proto);
4280 void l2cap_load(void)
4282 /* Dummy function to trigger automatic L2CAP module loading by
4283 * other modules that use L2CAP sockets but don't use any other
4284 * symbols from it. */
4287 EXPORT_SYMBOL(l2cap_load);
4289 module_init(l2cap_init);
4290 module_exit(l2cap_exit);
4292 module_param(enable_ertm, bool, 0644);
4293 MODULE_PARM_DESC(enable_ertm, "Enable enhanced retransmission mode");
4295 module_param(max_transmit, uint, 0644);
4296 MODULE_PARM_DESC(max_transmit, "Max transmit value (default = 3)");
4298 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4299 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
4300 MODULE_VERSION(VERSION);
4301 MODULE_LICENSE("GPL");
4302 MODULE_ALIAS("bt-proto-0");