2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <linux/debugfs.h>
44 #include <linux/seq_file.h>
45 #include <linux/uaccess.h>
46 #include <linux/crc16.h>
49 #include <asm/system.h>
50 #include <asm/unaligned.h>
52 #include <net/bluetooth/bluetooth.h>
53 #include <net/bluetooth/hci_core.h>
54 #include <net/bluetooth/l2cap.h>
56 #define VERSION "2.14"
58 #ifdef CONFIG_BT_L2CAP_EXT_FEATURES
59 static int enable_ertm = 1;
61 static int enable_ertm = 0;
63 static int max_transmit = L2CAP_DEFAULT_MAX_TX;
64 static int tx_window = L2CAP_DEFAULT_TX_WINDOW;
66 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
67 static u8 l2cap_fixed_chan[8] = { 0x02, };
69 static const struct proto_ops l2cap_sock_ops;
71 static struct bt_sock_list l2cap_sk_list = {
72 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
75 static void __l2cap_sock_close(struct sock *sk, int reason);
76 static void l2cap_sock_close(struct sock *sk);
77 static void l2cap_sock_kill(struct sock *sk);
79 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
80 u8 code, u8 ident, u16 dlen, void *data);
82 /* ---- L2CAP timers ---- */
83 static void l2cap_sock_timeout(unsigned long arg)
85 struct sock *sk = (struct sock *) arg;
88 BT_DBG("sock %p state %d", sk, sk->sk_state);
92 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
93 reason = ECONNREFUSED;
94 else if (sk->sk_state == BT_CONNECT &&
95 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
96 reason = ECONNREFUSED;
100 __l2cap_sock_close(sk, reason);
108 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
110 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
111 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
114 static void l2cap_sock_clear_timer(struct sock *sk)
116 BT_DBG("sock %p state %d", sk, sk->sk_state);
117 sk_stop_timer(sk, &sk->sk_timer);
120 /* ---- L2CAP channels ---- */
121 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
124 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
125 if (l2cap_pi(s)->dcid == cid)
131 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
134 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
135 if (l2cap_pi(s)->scid == cid)
141 /* Find channel with given SCID.
142 * Returns locked socket */
143 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
147 s = __l2cap_get_chan_by_scid(l, cid);
150 read_unlock(&l->lock);
154 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
157 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
158 if (l2cap_pi(s)->ident == ident)
164 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
168 s = __l2cap_get_chan_by_ident(l, ident);
171 read_unlock(&l->lock);
175 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
177 u16 cid = L2CAP_CID_DYN_START;
179 for (; cid < L2CAP_CID_DYN_END; cid++) {
180 if (!__l2cap_get_chan_by_scid(l, cid))
187 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
192 l2cap_pi(l->head)->prev_c = sk;
194 l2cap_pi(sk)->next_c = l->head;
195 l2cap_pi(sk)->prev_c = NULL;
199 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
201 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
203 write_lock_bh(&l->lock);
208 l2cap_pi(next)->prev_c = prev;
210 l2cap_pi(prev)->next_c = next;
211 write_unlock_bh(&l->lock);
216 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
218 struct l2cap_chan_list *l = &conn->chan_list;
220 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
221 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
223 conn->disc_reason = 0x13;
225 l2cap_pi(sk)->conn = conn;
227 if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) {
228 /* Alloc CID for connection-oriented socket */
229 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
230 } else if (sk->sk_type == SOCK_DGRAM) {
231 /* Connectionless socket */
232 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
233 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
234 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
236 /* Raw socket can send/recv signalling messages only */
237 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
238 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
239 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
242 __l2cap_chan_link(l, sk);
245 bt_accept_enqueue(parent, sk);
249 * Must be called on the locked socket. */
250 static void l2cap_chan_del(struct sock *sk, int err)
252 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
253 struct sock *parent = bt_sk(sk)->parent;
255 l2cap_sock_clear_timer(sk);
257 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
260 /* Unlink from channel list */
261 l2cap_chan_unlink(&conn->chan_list, sk);
262 l2cap_pi(sk)->conn = NULL;
263 hci_conn_put(conn->hcon);
266 sk->sk_state = BT_CLOSED;
267 sock_set_flag(sk, SOCK_ZAPPED);
273 bt_accept_unlink(sk);
274 parent->sk_data_ready(parent, 0);
276 sk->sk_state_change(sk);
279 /* Service level security */
280 static inline int l2cap_check_security(struct sock *sk)
282 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
285 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
286 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
287 auth_type = HCI_AT_NO_BONDING_MITM;
289 auth_type = HCI_AT_NO_BONDING;
291 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
292 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
294 switch (l2cap_pi(sk)->sec_level) {
295 case BT_SECURITY_HIGH:
296 auth_type = HCI_AT_GENERAL_BONDING_MITM;
298 case BT_SECURITY_MEDIUM:
299 auth_type = HCI_AT_GENERAL_BONDING;
302 auth_type = HCI_AT_NO_BONDING;
307 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
311 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
315 /* Get next available identificator.
316 * 1 - 128 are used by kernel.
317 * 129 - 199 are reserved.
318 * 200 - 254 are used by utilities like l2ping, etc.
321 spin_lock_bh(&conn->lock);
323 if (++conn->tx_ident > 128)
328 spin_unlock_bh(&conn->lock);
333 static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
335 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
337 BT_DBG("code 0x%2.2x", code);
342 return hci_send_acl(conn->hcon, skb, 0);
345 static inline int l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
348 struct l2cap_hdr *lh;
349 struct l2cap_conn *conn = pi->conn;
350 int count, hlen = L2CAP_HDR_SIZE + 2;
352 if (pi->fcs == L2CAP_FCS_CRC16)
355 BT_DBG("pi %p, control 0x%2.2x", pi, control);
357 count = min_t(unsigned int, conn->mtu, hlen);
358 control |= L2CAP_CTRL_FRAME_TYPE;
360 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
361 control |= L2CAP_CTRL_FINAL;
362 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
365 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
366 control |= L2CAP_CTRL_POLL;
367 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
370 skb = bt_skb_alloc(count, GFP_ATOMIC);
374 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
375 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
376 lh->cid = cpu_to_le16(pi->dcid);
377 put_unaligned_le16(control, skb_put(skb, 2));
379 if (pi->fcs == L2CAP_FCS_CRC16) {
380 u16 fcs = crc16(0, (u8 *)lh, count - 2);
381 put_unaligned_le16(fcs, skb_put(skb, 2));
384 return hci_send_acl(pi->conn->hcon, skb, 0);
387 static inline int l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
389 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY)
390 control |= L2CAP_SUPER_RCV_NOT_READY;
392 control |= L2CAP_SUPER_RCV_READY;
394 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
396 return l2cap_send_sframe(pi, control);
399 static void l2cap_do_start(struct sock *sk)
401 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
403 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
404 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
407 if (l2cap_check_security(sk)) {
408 struct l2cap_conn_req req;
409 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
410 req.psm = l2cap_pi(sk)->psm;
412 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
414 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
415 L2CAP_CONN_REQ, sizeof(req), &req);
418 struct l2cap_info_req req;
419 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
421 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
422 conn->info_ident = l2cap_get_ident(conn);
424 mod_timer(&conn->info_timer, jiffies +
425 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
427 l2cap_send_cmd(conn, conn->info_ident,
428 L2CAP_INFO_REQ, sizeof(req), &req);
432 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk)
434 struct l2cap_disconn_req req;
436 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
437 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
438 l2cap_send_cmd(conn, l2cap_get_ident(conn),
439 L2CAP_DISCONN_REQ, sizeof(req), &req);
442 /* ---- L2CAP connections ---- */
443 static void l2cap_conn_start(struct l2cap_conn *conn)
445 struct l2cap_chan_list *l = &conn->chan_list;
448 BT_DBG("conn %p", conn);
452 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
455 if (sk->sk_type != SOCK_SEQPACKET &&
456 sk->sk_type != SOCK_STREAM) {
461 if (sk->sk_state == BT_CONNECT) {
462 if (l2cap_check_security(sk)) {
463 struct l2cap_conn_req req;
464 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
465 req.psm = l2cap_pi(sk)->psm;
467 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
469 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
470 L2CAP_CONN_REQ, sizeof(req), &req);
472 } else if (sk->sk_state == BT_CONNECT2) {
473 struct l2cap_conn_rsp rsp;
474 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
475 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
477 if (l2cap_check_security(sk)) {
478 if (bt_sk(sk)->defer_setup) {
479 struct sock *parent = bt_sk(sk)->parent;
480 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
481 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
482 parent->sk_data_ready(parent, 0);
485 sk->sk_state = BT_CONFIG;
486 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
487 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
490 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
491 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
494 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
495 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
501 read_unlock(&l->lock);
504 static void l2cap_conn_ready(struct l2cap_conn *conn)
506 struct l2cap_chan_list *l = &conn->chan_list;
509 BT_DBG("conn %p", conn);
513 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
516 if (sk->sk_type != SOCK_SEQPACKET &&
517 sk->sk_type != SOCK_STREAM) {
518 l2cap_sock_clear_timer(sk);
519 sk->sk_state = BT_CONNECTED;
520 sk->sk_state_change(sk);
521 } else if (sk->sk_state == BT_CONNECT)
527 read_unlock(&l->lock);
530 /* Notify sockets that we cannot guaranty reliability anymore */
531 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
533 struct l2cap_chan_list *l = &conn->chan_list;
536 BT_DBG("conn %p", conn);
540 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
541 if (l2cap_pi(sk)->force_reliable)
545 read_unlock(&l->lock);
548 static void l2cap_info_timeout(unsigned long arg)
550 struct l2cap_conn *conn = (void *) arg;
552 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
553 conn->info_ident = 0;
555 l2cap_conn_start(conn);
558 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
560 struct l2cap_conn *conn = hcon->l2cap_data;
565 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
569 hcon->l2cap_data = conn;
572 BT_DBG("hcon %p conn %p", hcon, conn);
574 conn->mtu = hcon->hdev->acl_mtu;
575 conn->src = &hcon->hdev->bdaddr;
576 conn->dst = &hcon->dst;
580 spin_lock_init(&conn->lock);
581 rwlock_init(&conn->chan_list.lock);
583 setup_timer(&conn->info_timer, l2cap_info_timeout,
584 (unsigned long) conn);
586 conn->disc_reason = 0x13;
591 static void l2cap_conn_del(struct hci_conn *hcon, int err)
593 struct l2cap_conn *conn = hcon->l2cap_data;
599 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
601 kfree_skb(conn->rx_skb);
604 while ((sk = conn->chan_list.head)) {
606 l2cap_chan_del(sk, err);
611 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
612 del_timer_sync(&conn->info_timer);
614 hcon->l2cap_data = NULL;
618 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
620 struct l2cap_chan_list *l = &conn->chan_list;
621 write_lock_bh(&l->lock);
622 __l2cap_chan_add(conn, sk, parent);
623 write_unlock_bh(&l->lock);
626 /* ---- Socket interface ---- */
627 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
630 struct hlist_node *node;
631 sk_for_each(sk, node, &l2cap_sk_list.head)
632 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
639 /* Find socket with psm and source bdaddr.
640 * Returns closest match.
642 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
644 struct sock *sk = NULL, *sk1 = NULL;
645 struct hlist_node *node;
647 sk_for_each(sk, node, &l2cap_sk_list.head) {
648 if (state && sk->sk_state != state)
651 if (l2cap_pi(sk)->psm == psm) {
653 if (!bacmp(&bt_sk(sk)->src, src))
657 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
661 return node ? sk : sk1;
664 /* Find socket with given address (psm, src).
665 * Returns locked socket */
666 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
669 read_lock(&l2cap_sk_list.lock);
670 s = __l2cap_get_sock_by_psm(state, psm, src);
673 read_unlock(&l2cap_sk_list.lock);
677 static void l2cap_sock_destruct(struct sock *sk)
681 skb_queue_purge(&sk->sk_receive_queue);
682 skb_queue_purge(&sk->sk_write_queue);
685 static void l2cap_sock_cleanup_listen(struct sock *parent)
689 BT_DBG("parent %p", parent);
691 /* Close not yet accepted channels */
692 while ((sk = bt_accept_dequeue(parent, NULL)))
693 l2cap_sock_close(sk);
695 parent->sk_state = BT_CLOSED;
696 sock_set_flag(parent, SOCK_ZAPPED);
699 /* Kill socket (only if zapped and orphan)
700 * Must be called on unlocked socket.
702 static void l2cap_sock_kill(struct sock *sk)
704 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
707 BT_DBG("sk %p state %d", sk, sk->sk_state);
709 /* Kill poor orphan */
710 bt_sock_unlink(&l2cap_sk_list, sk);
711 sock_set_flag(sk, SOCK_DEAD);
715 static void __l2cap_sock_close(struct sock *sk, int reason)
717 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
719 switch (sk->sk_state) {
721 l2cap_sock_cleanup_listen(sk);
726 if (sk->sk_type == SOCK_SEQPACKET ||
727 sk->sk_type == SOCK_STREAM) {
728 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
730 sk->sk_state = BT_DISCONN;
731 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
732 l2cap_send_disconn_req(conn, sk);
734 l2cap_chan_del(sk, reason);
738 if (sk->sk_type == SOCK_SEQPACKET ||
739 sk->sk_type == SOCK_STREAM) {
740 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
741 struct l2cap_conn_rsp rsp;
744 if (bt_sk(sk)->defer_setup)
745 result = L2CAP_CR_SEC_BLOCK;
747 result = L2CAP_CR_BAD_PSM;
749 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
750 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
751 rsp.result = cpu_to_le16(result);
752 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
753 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
754 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
756 l2cap_chan_del(sk, reason);
761 l2cap_chan_del(sk, reason);
765 sock_set_flag(sk, SOCK_ZAPPED);
770 /* Must be called on unlocked socket. */
771 static void l2cap_sock_close(struct sock *sk)
773 l2cap_sock_clear_timer(sk);
775 __l2cap_sock_close(sk, ECONNRESET);
780 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
782 struct l2cap_pinfo *pi = l2cap_pi(sk);
787 sk->sk_type = parent->sk_type;
788 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
790 pi->imtu = l2cap_pi(parent)->imtu;
791 pi->omtu = l2cap_pi(parent)->omtu;
792 pi->mode = l2cap_pi(parent)->mode;
793 pi->fcs = l2cap_pi(parent)->fcs;
794 pi->max_tx = l2cap_pi(parent)->max_tx;
795 pi->tx_win = l2cap_pi(parent)->tx_win;
796 pi->sec_level = l2cap_pi(parent)->sec_level;
797 pi->role_switch = l2cap_pi(parent)->role_switch;
798 pi->force_reliable = l2cap_pi(parent)->force_reliable;
800 pi->imtu = L2CAP_DEFAULT_MTU;
802 if (enable_ertm && sk->sk_type == SOCK_STREAM)
803 pi->mode = L2CAP_MODE_ERTM;
805 pi->mode = L2CAP_MODE_BASIC;
806 pi->max_tx = max_transmit;
807 pi->fcs = L2CAP_FCS_CRC16;
808 pi->tx_win = tx_window;
809 pi->sec_level = BT_SECURITY_LOW;
811 pi->force_reliable = 0;
814 /* Default config options */
816 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
817 skb_queue_head_init(TX_QUEUE(sk));
818 skb_queue_head_init(SREJ_QUEUE(sk));
819 INIT_LIST_HEAD(SREJ_LIST(sk));
822 static struct proto l2cap_proto = {
824 .owner = THIS_MODULE,
825 .obj_size = sizeof(struct l2cap_pinfo)
828 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
832 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
836 sock_init_data(sock, sk);
837 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
839 sk->sk_destruct = l2cap_sock_destruct;
840 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
842 sock_reset_flag(sk, SOCK_ZAPPED);
844 sk->sk_protocol = proto;
845 sk->sk_state = BT_OPEN;
847 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
849 bt_sock_link(&l2cap_sk_list, sk);
853 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
858 BT_DBG("sock %p", sock);
860 sock->state = SS_UNCONNECTED;
862 if (sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM &&
863 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
864 return -ESOCKTNOSUPPORT;
866 if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
869 sock->ops = &l2cap_sock_ops;
871 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
875 l2cap_sock_init(sk, NULL);
879 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
881 struct sock *sk = sock->sk;
882 struct sockaddr_l2 la;
887 if (!addr || addr->sa_family != AF_BLUETOOTH)
890 memset(&la, 0, sizeof(la));
891 len = min_t(unsigned int, sizeof(la), alen);
892 memcpy(&la, addr, len);
899 if (sk->sk_state != BT_OPEN) {
904 if (la.l2_psm && __le16_to_cpu(la.l2_psm) < 0x1001 &&
905 !capable(CAP_NET_BIND_SERVICE)) {
910 write_lock_bh(&l2cap_sk_list.lock);
912 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
915 /* Save source address */
916 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
917 l2cap_pi(sk)->psm = la.l2_psm;
918 l2cap_pi(sk)->sport = la.l2_psm;
919 sk->sk_state = BT_BOUND;
921 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
922 __le16_to_cpu(la.l2_psm) == 0x0003)
923 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
926 write_unlock_bh(&l2cap_sk_list.lock);
933 static int l2cap_do_connect(struct sock *sk)
935 bdaddr_t *src = &bt_sk(sk)->src;
936 bdaddr_t *dst = &bt_sk(sk)->dst;
937 struct l2cap_conn *conn;
938 struct hci_conn *hcon;
939 struct hci_dev *hdev;
943 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
946 hdev = hci_get_route(dst, src);
948 return -EHOSTUNREACH;
950 hci_dev_lock_bh(hdev);
954 if (sk->sk_type == SOCK_RAW) {
955 switch (l2cap_pi(sk)->sec_level) {
956 case BT_SECURITY_HIGH:
957 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
959 case BT_SECURITY_MEDIUM:
960 auth_type = HCI_AT_DEDICATED_BONDING;
963 auth_type = HCI_AT_NO_BONDING;
966 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
967 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
968 auth_type = HCI_AT_NO_BONDING_MITM;
970 auth_type = HCI_AT_NO_BONDING;
972 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
973 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
975 switch (l2cap_pi(sk)->sec_level) {
976 case BT_SECURITY_HIGH:
977 auth_type = HCI_AT_GENERAL_BONDING_MITM;
979 case BT_SECURITY_MEDIUM:
980 auth_type = HCI_AT_GENERAL_BONDING;
983 auth_type = HCI_AT_NO_BONDING;
988 hcon = hci_connect(hdev, ACL_LINK, dst,
989 l2cap_pi(sk)->sec_level, auth_type);
993 conn = l2cap_conn_add(hcon, 0);
1001 /* Update source addr of the socket */
1002 bacpy(src, conn->src);
1004 l2cap_chan_add(conn, sk, NULL);
1006 sk->sk_state = BT_CONNECT;
1007 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1009 if (hcon->state == BT_CONNECTED) {
1010 if (sk->sk_type != SOCK_SEQPACKET &&
1011 sk->sk_type != SOCK_STREAM) {
1012 l2cap_sock_clear_timer(sk);
1013 sk->sk_state = BT_CONNECTED;
1019 hci_dev_unlock_bh(hdev);
1024 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
1026 struct sock *sk = sock->sk;
1027 struct sockaddr_l2 la;
1030 BT_DBG("sk %p", sk);
1032 if (!addr || alen < sizeof(addr->sa_family) ||
1033 addr->sa_family != AF_BLUETOOTH)
1036 memset(&la, 0, sizeof(la));
1037 len = min_t(unsigned int, sizeof(la), alen);
1038 memcpy(&la, addr, len);
1045 if ((sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM)
1051 switch (l2cap_pi(sk)->mode) {
1052 case L2CAP_MODE_BASIC:
1054 case L2CAP_MODE_ERTM:
1055 case L2CAP_MODE_STREAMING:
1064 switch (sk->sk_state) {
1068 /* Already connecting */
1072 /* Already connected */
1085 /* Set destination address and psm */
1086 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
1087 l2cap_pi(sk)->psm = la.l2_psm;
1089 err = l2cap_do_connect(sk);
1094 err = bt_sock_wait_state(sk, BT_CONNECTED,
1095 sock_sndtimeo(sk, flags & O_NONBLOCK));
1101 static int l2cap_sock_listen(struct socket *sock, int backlog)
1103 struct sock *sk = sock->sk;
1106 BT_DBG("sk %p backlog %d", sk, backlog);
1110 if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM)
1111 || sk->sk_state != BT_BOUND) {
1116 switch (l2cap_pi(sk)->mode) {
1117 case L2CAP_MODE_BASIC:
1119 case L2CAP_MODE_ERTM:
1120 case L2CAP_MODE_STREAMING:
1129 if (!l2cap_pi(sk)->psm) {
1130 bdaddr_t *src = &bt_sk(sk)->src;
1135 write_lock_bh(&l2cap_sk_list.lock);
1137 for (psm = 0x1001; psm < 0x1100; psm += 2)
1138 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
1139 l2cap_pi(sk)->psm = cpu_to_le16(psm);
1140 l2cap_pi(sk)->sport = cpu_to_le16(psm);
1145 write_unlock_bh(&l2cap_sk_list.lock);
1151 sk->sk_max_ack_backlog = backlog;
1152 sk->sk_ack_backlog = 0;
1153 sk->sk_state = BT_LISTEN;
1160 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
1162 DECLARE_WAITQUEUE(wait, current);
1163 struct sock *sk = sock->sk, *nsk;
1167 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1169 if (sk->sk_state != BT_LISTEN) {
1174 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1176 BT_DBG("sk %p timeo %ld", sk, timeo);
1178 /* Wait for an incoming connection. (wake-one). */
1179 add_wait_queue_exclusive(sk_sleep(sk), &wait);
1180 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1181 set_current_state(TASK_INTERRUPTIBLE);
1188 timeo = schedule_timeout(timeo);
1189 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1191 if (sk->sk_state != BT_LISTEN) {
1196 if (signal_pending(current)) {
1197 err = sock_intr_errno(timeo);
1201 set_current_state(TASK_RUNNING);
1202 remove_wait_queue(sk_sleep(sk), &wait);
1207 newsock->state = SS_CONNECTED;
1209 BT_DBG("new socket %p", nsk);
1216 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1218 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1219 struct sock *sk = sock->sk;
1221 BT_DBG("sock %p, sk %p", sock, sk);
1223 addr->sa_family = AF_BLUETOOTH;
1224 *len = sizeof(struct sockaddr_l2);
1227 la->l2_psm = l2cap_pi(sk)->psm;
1228 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1229 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1231 la->l2_psm = l2cap_pi(sk)->sport;
1232 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1233 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
1239 static void l2cap_monitor_timeout(unsigned long arg)
1241 struct sock *sk = (void *) arg;
1245 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
1246 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk);
1251 l2cap_pi(sk)->retry_count++;
1252 __mod_monitor_timer();
1254 control = L2CAP_CTRL_POLL;
1255 l2cap_send_rr_or_rnr(l2cap_pi(sk), control);
1259 static void l2cap_retrans_timeout(unsigned long arg)
1261 struct sock *sk = (void *) arg;
1265 l2cap_pi(sk)->retry_count = 1;
1266 __mod_monitor_timer();
1268 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
1270 control = L2CAP_CTRL_POLL;
1271 l2cap_send_rr_or_rnr(l2cap_pi(sk), control);
1275 static void l2cap_drop_acked_frames(struct sock *sk)
1277 struct sk_buff *skb;
1279 while ((skb = skb_peek(TX_QUEUE(sk))) &&
1280 l2cap_pi(sk)->unacked_frames) {
1281 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
1284 skb = skb_dequeue(TX_QUEUE(sk));
1287 l2cap_pi(sk)->unacked_frames--;
1290 if (!l2cap_pi(sk)->unacked_frames)
1291 del_timer(&l2cap_pi(sk)->retrans_timer);
1296 static inline int l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1298 struct l2cap_pinfo *pi = l2cap_pi(sk);
1301 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1303 err = hci_send_acl(pi->conn->hcon, skb, 0);
1310 static int l2cap_streaming_send(struct sock *sk)
1312 struct sk_buff *skb, *tx_skb;
1313 struct l2cap_pinfo *pi = l2cap_pi(sk);
1317 while ((skb = sk->sk_send_head)) {
1318 tx_skb = skb_clone(skb, GFP_ATOMIC);
1320 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1321 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1322 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1324 if (pi->fcs == L2CAP_FCS_CRC16) {
1325 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1326 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1329 err = l2cap_do_send(sk, tx_skb);
1331 l2cap_send_disconn_req(pi->conn, sk);
1335 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1337 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1338 sk->sk_send_head = NULL;
1340 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1342 skb = skb_dequeue(TX_QUEUE(sk));
1348 static int l2cap_retransmit_frame(struct sock *sk, u8 tx_seq)
1350 struct l2cap_pinfo *pi = l2cap_pi(sk);
1351 struct sk_buff *skb, *tx_skb;
1355 skb = skb_peek(TX_QUEUE(sk));
1357 if (bt_cb(skb)->tx_seq != tx_seq) {
1358 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1360 skb = skb_queue_next(TX_QUEUE(sk), skb);
1364 if (pi->remote_max_tx &&
1365 bt_cb(skb)->retries == pi->remote_max_tx) {
1366 l2cap_send_disconn_req(pi->conn, sk);
1370 tx_skb = skb_clone(skb, GFP_ATOMIC);
1371 bt_cb(skb)->retries++;
1372 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1373 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1374 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1375 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1377 if (pi->fcs == L2CAP_FCS_CRC16) {
1378 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1379 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1382 err = l2cap_do_send(sk, tx_skb);
1384 l2cap_send_disconn_req(pi->conn, sk);
1392 static int l2cap_ertm_send(struct sock *sk)
1394 struct sk_buff *skb, *tx_skb;
1395 struct l2cap_pinfo *pi = l2cap_pi(sk);
1399 if (pi->conn_state & L2CAP_CONN_WAIT_F)
1402 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk)) &&
1403 !(pi->conn_state & L2CAP_CONN_REMOTE_BUSY)) {
1405 if (pi->remote_max_tx &&
1406 bt_cb(skb)->retries == pi->remote_max_tx) {
1407 l2cap_send_disconn_req(pi->conn, sk);
1411 tx_skb = skb_clone(skb, GFP_ATOMIC);
1413 bt_cb(skb)->retries++;
1415 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1416 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1417 control |= L2CAP_CTRL_FINAL;
1418 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1420 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1421 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1422 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1425 if (pi->fcs == L2CAP_FCS_CRC16) {
1426 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1427 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1430 err = l2cap_do_send(sk, tx_skb);
1432 l2cap_send_disconn_req(pi->conn, sk);
1435 __mod_retrans_timer();
1437 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1438 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1440 pi->unacked_frames++;
1443 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1444 sk->sk_send_head = NULL;
1446 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1454 static int l2cap_send_ack(struct l2cap_pinfo *pi)
1456 struct sock *sk = (struct sock *)pi;
1459 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1461 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1462 control |= L2CAP_SUPER_RCV_NOT_READY;
1463 return l2cap_send_sframe(pi, control);
1464 } else if (l2cap_ertm_send(sk) == 0) {
1465 control |= L2CAP_SUPER_RCV_READY;
1466 return l2cap_send_sframe(pi, control);
1471 static int l2cap_send_srejtail(struct sock *sk)
1473 struct srej_list *tail;
1476 control = L2CAP_SUPER_SELECT_REJECT;
1477 control |= L2CAP_CTRL_FINAL;
1479 tail = list_entry(SREJ_LIST(sk)->prev, struct srej_list, list);
1480 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1482 l2cap_send_sframe(l2cap_pi(sk), control);
1487 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1489 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1490 struct sk_buff **frag;
1493 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
1500 /* Continuation fragments (no L2CAP header) */
1501 frag = &skb_shinfo(skb)->frag_list;
1503 count = min_t(unsigned int, conn->mtu, len);
1505 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1508 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1514 frag = &(*frag)->next;
1520 static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1522 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1523 struct sk_buff *skb;
1524 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1525 struct l2cap_hdr *lh;
1527 BT_DBG("sk %p len %d", sk, (int)len);
1529 count = min_t(unsigned int, (conn->mtu - hlen), len);
1530 skb = bt_skb_send_alloc(sk, count + hlen,
1531 msg->msg_flags & MSG_DONTWAIT, &err);
1533 return ERR_PTR(-ENOMEM);
1535 /* Create L2CAP header */
1536 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1537 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1538 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1539 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1541 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1542 if (unlikely(err < 0)) {
1544 return ERR_PTR(err);
1549 static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1551 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1552 struct sk_buff *skb;
1553 int err, count, hlen = L2CAP_HDR_SIZE;
1554 struct l2cap_hdr *lh;
1556 BT_DBG("sk %p len %d", sk, (int)len);
1558 count = min_t(unsigned int, (conn->mtu - hlen), len);
1559 skb = bt_skb_send_alloc(sk, count + hlen,
1560 msg->msg_flags & MSG_DONTWAIT, &err);
1562 return ERR_PTR(-ENOMEM);
1564 /* Create L2CAP header */
1565 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1566 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1567 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1569 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1570 if (unlikely(err < 0)) {
1572 return ERR_PTR(err);
1577 static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1579 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1580 struct sk_buff *skb;
1581 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1582 struct l2cap_hdr *lh;
1584 BT_DBG("sk %p len %d", sk, (int)len);
1587 return ERR_PTR(-ENOTCONN);
1592 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1595 count = min_t(unsigned int, (conn->mtu - hlen), len);
1596 skb = bt_skb_send_alloc(sk, count + hlen,
1597 msg->msg_flags & MSG_DONTWAIT, &err);
1599 return ERR_PTR(-ENOMEM);
1601 /* Create L2CAP header */
1602 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1603 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1604 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1605 put_unaligned_le16(control, skb_put(skb, 2));
1607 put_unaligned_le16(sdulen, skb_put(skb, 2));
1609 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1610 if (unlikely(err < 0)) {
1612 return ERR_PTR(err);
1615 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1616 put_unaligned_le16(0, skb_put(skb, 2));
1618 bt_cb(skb)->retries = 0;
1622 static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1624 struct l2cap_pinfo *pi = l2cap_pi(sk);
1625 struct sk_buff *skb;
1626 struct sk_buff_head sar_queue;
1630 __skb_queue_head_init(&sar_queue);
1631 control = L2CAP_SDU_START;
1632 skb = l2cap_create_iframe_pdu(sk, msg, pi->remote_mps, control, len);
1634 return PTR_ERR(skb);
1636 __skb_queue_tail(&sar_queue, skb);
1637 len -= pi->remote_mps;
1638 size += pi->remote_mps;
1644 if (len > pi->remote_mps) {
1645 control |= L2CAP_SDU_CONTINUE;
1646 buflen = pi->remote_mps;
1648 control |= L2CAP_SDU_END;
1652 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1654 skb_queue_purge(&sar_queue);
1655 return PTR_ERR(skb);
1658 __skb_queue_tail(&sar_queue, skb);
1663 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1664 if (sk->sk_send_head == NULL)
1665 sk->sk_send_head = sar_queue.next;
1670 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1672 struct sock *sk = sock->sk;
1673 struct l2cap_pinfo *pi = l2cap_pi(sk);
1674 struct sk_buff *skb;
1678 BT_DBG("sock %p, sk %p", sock, sk);
1680 err = sock_error(sk);
1684 if (msg->msg_flags & MSG_OOB)
1689 if (sk->sk_state != BT_CONNECTED) {
1694 /* Connectionless channel */
1695 if (sk->sk_type == SOCK_DGRAM) {
1696 skb = l2cap_create_connless_pdu(sk, msg, len);
1700 err = l2cap_do_send(sk, skb);
1705 case L2CAP_MODE_BASIC:
1706 /* Check outgoing MTU */
1707 if (len > pi->omtu) {
1712 /* Create a basic PDU */
1713 skb = l2cap_create_basic_pdu(sk, msg, len);
1719 err = l2cap_do_send(sk, skb);
1724 case L2CAP_MODE_ERTM:
1725 case L2CAP_MODE_STREAMING:
1726 /* Entire SDU fits into one PDU */
1727 if (len <= pi->remote_mps) {
1728 control = L2CAP_SDU_UNSEGMENTED;
1729 skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
1734 __skb_queue_tail(TX_QUEUE(sk), skb);
1735 if (sk->sk_send_head == NULL)
1736 sk->sk_send_head = skb;
1738 /* Segment SDU into multiples PDUs */
1739 err = l2cap_sar_segment_sdu(sk, msg, len);
1744 if (pi->mode == L2CAP_MODE_STREAMING)
1745 err = l2cap_streaming_send(sk);
1747 err = l2cap_ertm_send(sk);
1754 BT_DBG("bad state %1.1x", pi->mode);
1763 static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1765 struct sock *sk = sock->sk;
1769 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1770 struct l2cap_conn_rsp rsp;
1772 sk->sk_state = BT_CONFIG;
1774 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1775 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1776 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1777 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1778 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1779 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1787 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1790 static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
1792 struct sock *sk = sock->sk;
1793 struct l2cap_options opts;
1797 BT_DBG("sk %p", sk);
1803 opts.imtu = l2cap_pi(sk)->imtu;
1804 opts.omtu = l2cap_pi(sk)->omtu;
1805 opts.flush_to = l2cap_pi(sk)->flush_to;
1806 opts.mode = l2cap_pi(sk)->mode;
1807 opts.fcs = l2cap_pi(sk)->fcs;
1808 opts.max_tx = l2cap_pi(sk)->max_tx;
1809 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
1811 len = min_t(unsigned int, sizeof(opts), optlen);
1812 if (copy_from_user((char *) &opts, optval, len)) {
1817 l2cap_pi(sk)->imtu = opts.imtu;
1818 l2cap_pi(sk)->omtu = opts.omtu;
1819 l2cap_pi(sk)->mode = opts.mode;
1820 l2cap_pi(sk)->fcs = opts.fcs;
1821 l2cap_pi(sk)->max_tx = opts.max_tx;
1822 l2cap_pi(sk)->tx_win = (__u8)opts.txwin_size;
1826 if (get_user(opt, (u32 __user *) optval)) {
1831 if (opt & L2CAP_LM_AUTH)
1832 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
1833 if (opt & L2CAP_LM_ENCRYPT)
1834 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
1835 if (opt & L2CAP_LM_SECURE)
1836 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
1838 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
1839 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
1851 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
1853 struct sock *sk = sock->sk;
1854 struct bt_security sec;
1858 BT_DBG("sk %p", sk);
1860 if (level == SOL_L2CAP)
1861 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
1863 if (level != SOL_BLUETOOTH)
1864 return -ENOPROTOOPT;
1870 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
1871 && sk->sk_type != SOCK_RAW) {
1876 sec.level = BT_SECURITY_LOW;
1878 len = min_t(unsigned int, sizeof(sec), optlen);
1879 if (copy_from_user((char *) &sec, optval, len)) {
1884 if (sec.level < BT_SECURITY_LOW ||
1885 sec.level > BT_SECURITY_HIGH) {
1890 l2cap_pi(sk)->sec_level = sec.level;
1893 case BT_DEFER_SETUP:
1894 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1899 if (get_user(opt, (u32 __user *) optval)) {
1904 bt_sk(sk)->defer_setup = opt;
1916 static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
1918 struct sock *sk = sock->sk;
1919 struct l2cap_options opts;
1920 struct l2cap_conninfo cinfo;
1924 BT_DBG("sk %p", sk);
1926 if (get_user(len, optlen))
1933 opts.imtu = l2cap_pi(sk)->imtu;
1934 opts.omtu = l2cap_pi(sk)->omtu;
1935 opts.flush_to = l2cap_pi(sk)->flush_to;
1936 opts.mode = l2cap_pi(sk)->mode;
1937 opts.fcs = l2cap_pi(sk)->fcs;
1938 opts.max_tx = l2cap_pi(sk)->max_tx;
1939 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
1941 len = min_t(unsigned int, len, sizeof(opts));
1942 if (copy_to_user(optval, (char *) &opts, len))
1948 switch (l2cap_pi(sk)->sec_level) {
1949 case BT_SECURITY_LOW:
1950 opt = L2CAP_LM_AUTH;
1952 case BT_SECURITY_MEDIUM:
1953 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
1955 case BT_SECURITY_HIGH:
1956 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
1964 if (l2cap_pi(sk)->role_switch)
1965 opt |= L2CAP_LM_MASTER;
1967 if (l2cap_pi(sk)->force_reliable)
1968 opt |= L2CAP_LM_RELIABLE;
1970 if (put_user(opt, (u32 __user *) optval))
1974 case L2CAP_CONNINFO:
1975 if (sk->sk_state != BT_CONNECTED &&
1976 !(sk->sk_state == BT_CONNECT2 &&
1977 bt_sk(sk)->defer_setup)) {
1982 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
1983 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
1985 len = min_t(unsigned int, len, sizeof(cinfo));
1986 if (copy_to_user(optval, (char *) &cinfo, len))
2000 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
2002 struct sock *sk = sock->sk;
2003 struct bt_security sec;
2006 BT_DBG("sk %p", sk);
2008 if (level == SOL_L2CAP)
2009 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
2011 if (level != SOL_BLUETOOTH)
2012 return -ENOPROTOOPT;
2014 if (get_user(len, optlen))
2021 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
2022 && sk->sk_type != SOCK_RAW) {
2027 sec.level = l2cap_pi(sk)->sec_level;
2029 len = min_t(unsigned int, len, sizeof(sec));
2030 if (copy_to_user(optval, (char *) &sec, len))
2035 case BT_DEFER_SETUP:
2036 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2041 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
2055 static int l2cap_sock_shutdown(struct socket *sock, int how)
2057 struct sock *sk = sock->sk;
2060 BT_DBG("sock %p, sk %p", sock, sk);
2066 if (!sk->sk_shutdown) {
2067 sk->sk_shutdown = SHUTDOWN_MASK;
2068 l2cap_sock_clear_timer(sk);
2069 __l2cap_sock_close(sk, 0);
2071 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
2072 err = bt_sock_wait_state(sk, BT_CLOSED,
2079 static int l2cap_sock_release(struct socket *sock)
2081 struct sock *sk = sock->sk;
2084 BT_DBG("sock %p, sk %p", sock, sk);
2089 err = l2cap_sock_shutdown(sock, 2);
2092 l2cap_sock_kill(sk);
2096 static void l2cap_chan_ready(struct sock *sk)
2098 struct sock *parent = bt_sk(sk)->parent;
2100 BT_DBG("sk %p, parent %p", sk, parent);
2102 l2cap_pi(sk)->conf_state = 0;
2103 l2cap_sock_clear_timer(sk);
2106 /* Outgoing channel.
2107 * Wake up socket sleeping on connect.
2109 sk->sk_state = BT_CONNECTED;
2110 sk->sk_state_change(sk);
2112 /* Incoming channel.
2113 * Wake up socket sleeping on accept.
2115 parent->sk_data_ready(parent, 0);
2119 /* Copy frame to all raw sockets on that connection */
2120 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2122 struct l2cap_chan_list *l = &conn->chan_list;
2123 struct sk_buff *nskb;
2126 BT_DBG("conn %p", conn);
2128 read_lock(&l->lock);
2129 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2130 if (sk->sk_type != SOCK_RAW)
2133 /* Don't send frame to the socket it came from */
2136 nskb = skb_clone(skb, GFP_ATOMIC);
2140 if (sock_queue_rcv_skb(sk, nskb))
2143 read_unlock(&l->lock);
2146 /* ---- L2CAP signalling commands ---- */
2147 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2148 u8 code, u8 ident, u16 dlen, void *data)
2150 struct sk_buff *skb, **frag;
2151 struct l2cap_cmd_hdr *cmd;
2152 struct l2cap_hdr *lh;
2155 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2156 conn, code, ident, dlen);
2158 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2159 count = min_t(unsigned int, conn->mtu, len);
2161 skb = bt_skb_alloc(count, GFP_ATOMIC);
2165 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2166 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2167 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2169 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2172 cmd->len = cpu_to_le16(dlen);
2175 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2176 memcpy(skb_put(skb, count), data, count);
2182 /* Continuation fragments (no L2CAP header) */
2183 frag = &skb_shinfo(skb)->frag_list;
2185 count = min_t(unsigned int, conn->mtu, len);
2187 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2191 memcpy(skb_put(*frag, count), data, count);
2196 frag = &(*frag)->next;
2206 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2208 struct l2cap_conf_opt *opt = *ptr;
2211 len = L2CAP_CONF_OPT_SIZE + opt->len;
2219 *val = *((u8 *) opt->val);
2223 *val = __le16_to_cpu(*((__le16 *) opt->val));
2227 *val = __le32_to_cpu(*((__le32 *) opt->val));
2231 *val = (unsigned long) opt->val;
2235 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2239 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2241 struct l2cap_conf_opt *opt = *ptr;
2243 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2250 *((u8 *) opt->val) = val;
2254 *((__le16 *) opt->val) = cpu_to_le16(val);
2258 *((__le32 *) opt->val) = cpu_to_le32(val);
2262 memcpy(opt->val, (void *) val, len);
2266 *ptr += L2CAP_CONF_OPT_SIZE + len;
2269 static void l2cap_ack_timeout(unsigned long arg)
2271 struct sock *sk = (void *) arg;
2274 l2cap_send_ack(l2cap_pi(sk));
2278 static inline void l2cap_ertm_init(struct sock *sk)
2280 l2cap_pi(sk)->expected_ack_seq = 0;
2281 l2cap_pi(sk)->unacked_frames = 0;
2282 l2cap_pi(sk)->buffer_seq = 0;
2283 l2cap_pi(sk)->num_acked = 0;
2284 l2cap_pi(sk)->frames_sent = 0;
2286 setup_timer(&l2cap_pi(sk)->retrans_timer,
2287 l2cap_retrans_timeout, (unsigned long) sk);
2288 setup_timer(&l2cap_pi(sk)->monitor_timer,
2289 l2cap_monitor_timeout, (unsigned long) sk);
2290 setup_timer(&l2cap_pi(sk)->ack_timer,
2291 l2cap_ack_timeout, (unsigned long) sk);
2293 __skb_queue_head_init(SREJ_QUEUE(sk));
2296 static int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
2298 u32 local_feat_mask = l2cap_feat_mask;
2300 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
2303 case L2CAP_MODE_ERTM:
2304 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
2305 case L2CAP_MODE_STREAMING:
2306 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
2312 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2315 case L2CAP_MODE_STREAMING:
2316 case L2CAP_MODE_ERTM:
2317 if (l2cap_mode_supported(mode, remote_feat_mask))
2321 return L2CAP_MODE_BASIC;
2325 static int l2cap_build_conf_req(struct sock *sk, void *data)
2327 struct l2cap_pinfo *pi = l2cap_pi(sk);
2328 struct l2cap_conf_req *req = data;
2329 struct l2cap_conf_rfc rfc = { .mode = pi->mode };
2330 void *ptr = req->data;
2332 BT_DBG("sk %p", sk);
2334 if (pi->num_conf_req || pi->num_conf_rsp)
2338 case L2CAP_MODE_STREAMING:
2339 case L2CAP_MODE_ERTM:
2340 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2341 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2342 l2cap_send_disconn_req(pi->conn, sk);
2345 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2351 case L2CAP_MODE_BASIC:
2352 if (pi->imtu != L2CAP_DEFAULT_MTU)
2353 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2356 case L2CAP_MODE_ERTM:
2357 rfc.mode = L2CAP_MODE_ERTM;
2358 rfc.txwin_size = pi->tx_win;
2359 rfc.max_transmit = pi->max_tx;
2360 rfc.retrans_timeout = 0;
2361 rfc.monitor_timeout = 0;
2362 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2363 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2364 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2366 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2367 sizeof(rfc), (unsigned long) &rfc);
2369 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2372 if (pi->fcs == L2CAP_FCS_NONE ||
2373 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2374 pi->fcs = L2CAP_FCS_NONE;
2375 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2379 case L2CAP_MODE_STREAMING:
2380 rfc.mode = L2CAP_MODE_STREAMING;
2382 rfc.max_transmit = 0;
2383 rfc.retrans_timeout = 0;
2384 rfc.monitor_timeout = 0;
2385 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2386 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2387 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2389 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2390 sizeof(rfc), (unsigned long) &rfc);
2392 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2395 if (pi->fcs == L2CAP_FCS_NONE ||
2396 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2397 pi->fcs = L2CAP_FCS_NONE;
2398 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2403 /* FIXME: Need actual value of the flush timeout */
2404 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2405 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2407 req->dcid = cpu_to_le16(pi->dcid);
2408 req->flags = cpu_to_le16(0);
2413 static int l2cap_parse_conf_req(struct sock *sk, void *data)
2415 struct l2cap_pinfo *pi = l2cap_pi(sk);
2416 struct l2cap_conf_rsp *rsp = data;
2417 void *ptr = rsp->data;
2418 void *req = pi->conf_req;
2419 int len = pi->conf_len;
2420 int type, hint, olen;
2422 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2423 u16 mtu = L2CAP_DEFAULT_MTU;
2424 u16 result = L2CAP_CONF_SUCCESS;
2426 BT_DBG("sk %p", sk);
2428 while (len >= L2CAP_CONF_OPT_SIZE) {
2429 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2431 hint = type & L2CAP_CONF_HINT;
2432 type &= L2CAP_CONF_MASK;
2435 case L2CAP_CONF_MTU:
2439 case L2CAP_CONF_FLUSH_TO:
2443 case L2CAP_CONF_QOS:
2446 case L2CAP_CONF_RFC:
2447 if (olen == sizeof(rfc))
2448 memcpy(&rfc, (void *) val, olen);
2451 case L2CAP_CONF_FCS:
2452 if (val == L2CAP_FCS_NONE)
2453 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
2461 result = L2CAP_CONF_UNKNOWN;
2462 *((u8 *) ptr++) = type;
2467 if (pi->num_conf_rsp || pi->num_conf_req)
2471 case L2CAP_MODE_STREAMING:
2472 case L2CAP_MODE_ERTM:
2473 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2474 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2475 return -ECONNREFUSED;
2478 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2483 if (pi->mode != rfc.mode) {
2484 result = L2CAP_CONF_UNACCEPT;
2485 rfc.mode = pi->mode;
2487 if (pi->num_conf_rsp == 1)
2488 return -ECONNREFUSED;
2490 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2491 sizeof(rfc), (unsigned long) &rfc);
2495 if (result == L2CAP_CONF_SUCCESS) {
2496 /* Configure output options and let the other side know
2497 * which ones we don't like. */
2499 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2500 result = L2CAP_CONF_UNACCEPT;
2503 pi->conf_state |= L2CAP_CONF_MTU_DONE;
2505 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2508 case L2CAP_MODE_BASIC:
2509 pi->fcs = L2CAP_FCS_NONE;
2510 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2513 case L2CAP_MODE_ERTM:
2514 pi->remote_tx_win = rfc.txwin_size;
2515 pi->remote_max_tx = rfc.max_transmit;
2516 if (rfc.max_pdu_size > pi->conn->mtu - 10)
2517 rfc.max_pdu_size = le16_to_cpu(pi->conn->mtu - 10);
2519 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2521 rfc.retrans_timeout =
2522 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2523 rfc.monitor_timeout =
2524 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2526 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2528 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2529 sizeof(rfc), (unsigned long) &rfc);
2533 case L2CAP_MODE_STREAMING:
2534 if (rfc.max_pdu_size > pi->conn->mtu - 10)
2535 rfc.max_pdu_size = le16_to_cpu(pi->conn->mtu - 10);
2537 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2539 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2541 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2542 sizeof(rfc), (unsigned long) &rfc);
2547 result = L2CAP_CONF_UNACCEPT;
2549 memset(&rfc, 0, sizeof(rfc));
2550 rfc.mode = pi->mode;
2553 if (result == L2CAP_CONF_SUCCESS)
2554 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
2556 rsp->scid = cpu_to_le16(pi->dcid);
2557 rsp->result = cpu_to_le16(result);
2558 rsp->flags = cpu_to_le16(0x0000);
2563 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
2565 struct l2cap_pinfo *pi = l2cap_pi(sk);
2566 struct l2cap_conf_req *req = data;
2567 void *ptr = req->data;
2570 struct l2cap_conf_rfc rfc;
2572 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
2574 while (len >= L2CAP_CONF_OPT_SIZE) {
2575 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2578 case L2CAP_CONF_MTU:
2579 if (val < L2CAP_DEFAULT_MIN_MTU) {
2580 *result = L2CAP_CONF_UNACCEPT;
2581 pi->omtu = L2CAP_DEFAULT_MIN_MTU;
2584 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2587 case L2CAP_CONF_FLUSH_TO:
2589 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2593 case L2CAP_CONF_RFC:
2594 if (olen == sizeof(rfc))
2595 memcpy(&rfc, (void *)val, olen);
2597 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
2598 rfc.mode != pi->mode)
2599 return -ECONNREFUSED;
2601 pi->mode = rfc.mode;
2604 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2605 sizeof(rfc), (unsigned long) &rfc);
2610 if (*result == L2CAP_CONF_SUCCESS) {
2612 case L2CAP_MODE_ERTM:
2613 pi->remote_tx_win = rfc.txwin_size;
2614 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2615 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2616 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2618 case L2CAP_MODE_STREAMING:
2619 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2623 req->dcid = cpu_to_le16(pi->dcid);
2624 req->flags = cpu_to_le16(0x0000);
2629 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
2631 struct l2cap_conf_rsp *rsp = data;
2632 void *ptr = rsp->data;
2634 BT_DBG("sk %p", sk);
2636 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2637 rsp->result = cpu_to_le16(result);
2638 rsp->flags = cpu_to_le16(flags);
2643 static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
2645 struct l2cap_pinfo *pi = l2cap_pi(sk);
2648 struct l2cap_conf_rfc rfc;
2650 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
2652 if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
2655 while (len >= L2CAP_CONF_OPT_SIZE) {
2656 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2659 case L2CAP_CONF_RFC:
2660 if (olen == sizeof(rfc))
2661 memcpy(&rfc, (void *)val, olen);
2668 case L2CAP_MODE_ERTM:
2669 pi->remote_tx_win = rfc.txwin_size;
2670 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2671 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2672 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2674 case L2CAP_MODE_STREAMING:
2675 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2679 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2681 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2683 if (rej->reason != 0x0000)
2686 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2687 cmd->ident == conn->info_ident) {
2688 del_timer(&conn->info_timer);
2690 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2691 conn->info_ident = 0;
2693 l2cap_conn_start(conn);
2699 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2701 struct l2cap_chan_list *list = &conn->chan_list;
2702 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2703 struct l2cap_conn_rsp rsp;
2704 struct sock *sk, *parent;
2705 int result, status = L2CAP_CS_NO_INFO;
2707 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2708 __le16 psm = req->psm;
2710 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2712 /* Check if we have socket listening on psm */
2713 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2715 result = L2CAP_CR_BAD_PSM;
2719 /* Check if the ACL is secure enough (if not SDP) */
2720 if (psm != cpu_to_le16(0x0001) &&
2721 !hci_conn_check_link_mode(conn->hcon)) {
2722 conn->disc_reason = 0x05;
2723 result = L2CAP_CR_SEC_BLOCK;
2727 result = L2CAP_CR_NO_MEM;
2729 /* Check for backlog size */
2730 if (sk_acceptq_is_full(parent)) {
2731 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2735 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2739 write_lock_bh(&list->lock);
2741 /* Check if we already have channel with that dcid */
2742 if (__l2cap_get_chan_by_dcid(list, scid)) {
2743 write_unlock_bh(&list->lock);
2744 sock_set_flag(sk, SOCK_ZAPPED);
2745 l2cap_sock_kill(sk);
2749 hci_conn_hold(conn->hcon);
2751 l2cap_sock_init(sk, parent);
2752 bacpy(&bt_sk(sk)->src, conn->src);
2753 bacpy(&bt_sk(sk)->dst, conn->dst);
2754 l2cap_pi(sk)->psm = psm;
2755 l2cap_pi(sk)->dcid = scid;
2757 __l2cap_chan_add(conn, sk, parent);
2758 dcid = l2cap_pi(sk)->scid;
2760 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2762 l2cap_pi(sk)->ident = cmd->ident;
2764 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2765 if (l2cap_check_security(sk)) {
2766 if (bt_sk(sk)->defer_setup) {
2767 sk->sk_state = BT_CONNECT2;
2768 result = L2CAP_CR_PEND;
2769 status = L2CAP_CS_AUTHOR_PEND;
2770 parent->sk_data_ready(parent, 0);
2772 sk->sk_state = BT_CONFIG;
2773 result = L2CAP_CR_SUCCESS;
2774 status = L2CAP_CS_NO_INFO;
2777 sk->sk_state = BT_CONNECT2;
2778 result = L2CAP_CR_PEND;
2779 status = L2CAP_CS_AUTHEN_PEND;
2782 sk->sk_state = BT_CONNECT2;
2783 result = L2CAP_CR_PEND;
2784 status = L2CAP_CS_NO_INFO;
2787 write_unlock_bh(&list->lock);
2790 bh_unlock_sock(parent);
2793 rsp.scid = cpu_to_le16(scid);
2794 rsp.dcid = cpu_to_le16(dcid);
2795 rsp.result = cpu_to_le16(result);
2796 rsp.status = cpu_to_le16(status);
2797 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2799 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2800 struct l2cap_info_req info;
2801 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2803 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2804 conn->info_ident = l2cap_get_ident(conn);
2806 mod_timer(&conn->info_timer, jiffies +
2807 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2809 l2cap_send_cmd(conn, conn->info_ident,
2810 L2CAP_INFO_REQ, sizeof(info), &info);
2816 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2818 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2819 u16 scid, dcid, result, status;
2823 scid = __le16_to_cpu(rsp->scid);
2824 dcid = __le16_to_cpu(rsp->dcid);
2825 result = __le16_to_cpu(rsp->result);
2826 status = __le16_to_cpu(rsp->status);
2828 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2831 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2835 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
2841 case L2CAP_CR_SUCCESS:
2842 sk->sk_state = BT_CONFIG;
2843 l2cap_pi(sk)->ident = 0;
2844 l2cap_pi(sk)->dcid = dcid;
2845 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2847 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2849 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2850 l2cap_build_conf_req(sk, req), req);
2851 l2cap_pi(sk)->num_conf_req++;
2855 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
2859 l2cap_chan_del(sk, ECONNREFUSED);
2867 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2869 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2875 dcid = __le16_to_cpu(req->dcid);
2876 flags = __le16_to_cpu(req->flags);
2878 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2880 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2884 if (sk->sk_state == BT_DISCONN)
2887 /* Reject if config buffer is too small. */
2888 len = cmd_len - sizeof(*req);
2889 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
2890 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2891 l2cap_build_conf_rsp(sk, rsp,
2892 L2CAP_CONF_REJECT, flags), rsp);
2897 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
2898 l2cap_pi(sk)->conf_len += len;
2900 if (flags & 0x0001) {
2901 /* Incomplete config. Send empty response. */
2902 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2903 l2cap_build_conf_rsp(sk, rsp,
2904 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2908 /* Complete config. */
2909 len = l2cap_parse_conf_req(sk, rsp);
2911 l2cap_send_disconn_req(conn, sk);
2915 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2916 l2cap_pi(sk)->num_conf_rsp++;
2918 /* Reset config buffer. */
2919 l2cap_pi(sk)->conf_len = 0;
2921 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
2924 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
2925 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
2926 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
2927 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
2929 sk->sk_state = BT_CONNECTED;
2931 l2cap_pi(sk)->next_tx_seq = 0;
2932 l2cap_pi(sk)->expected_tx_seq = 0;
2933 __skb_queue_head_init(TX_QUEUE(sk));
2934 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2935 l2cap_ertm_init(sk);
2937 l2cap_chan_ready(sk);
2941 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
2943 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2944 l2cap_build_conf_req(sk, buf), buf);
2945 l2cap_pi(sk)->num_conf_req++;
2953 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2955 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2956 u16 scid, flags, result;
2958 int len = cmd->len - sizeof(*rsp);
2960 scid = __le16_to_cpu(rsp->scid);
2961 flags = __le16_to_cpu(rsp->flags);
2962 result = __le16_to_cpu(rsp->result);
2964 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2965 scid, flags, result);
2967 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2972 case L2CAP_CONF_SUCCESS:
2973 l2cap_conf_rfc_get(sk, rsp->data, len);
2976 case L2CAP_CONF_UNACCEPT:
2977 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2980 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2981 l2cap_send_disconn_req(conn, sk);
2985 /* throw out any old stored conf requests */
2986 result = L2CAP_CONF_SUCCESS;
2987 len = l2cap_parse_conf_rsp(sk, rsp->data,
2990 l2cap_send_disconn_req(conn, sk);
2994 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2995 L2CAP_CONF_REQ, len, req);
2996 l2cap_pi(sk)->num_conf_req++;
2997 if (result != L2CAP_CONF_SUCCESS)
3003 sk->sk_state = BT_DISCONN;
3004 sk->sk_err = ECONNRESET;
3005 l2cap_sock_set_timer(sk, HZ * 5);
3006 l2cap_send_disconn_req(conn, sk);
3013 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
3015 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
3016 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
3017 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
3018 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
3020 sk->sk_state = BT_CONNECTED;
3021 l2cap_pi(sk)->next_tx_seq = 0;
3022 l2cap_pi(sk)->expected_tx_seq = 0;
3023 __skb_queue_head_init(TX_QUEUE(sk));
3024 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3025 l2cap_ertm_init(sk);
3027 l2cap_chan_ready(sk);
3035 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3037 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3038 struct l2cap_disconn_rsp rsp;
3042 scid = __le16_to_cpu(req->scid);
3043 dcid = __le16_to_cpu(req->dcid);
3045 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3047 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
3051 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3052 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3053 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3055 sk->sk_shutdown = SHUTDOWN_MASK;
3057 skb_queue_purge(TX_QUEUE(sk));
3059 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
3060 skb_queue_purge(SREJ_QUEUE(sk));
3061 del_timer(&l2cap_pi(sk)->retrans_timer);
3062 del_timer(&l2cap_pi(sk)->monitor_timer);
3063 del_timer(&l2cap_pi(sk)->ack_timer);
3066 l2cap_chan_del(sk, ECONNRESET);
3069 l2cap_sock_kill(sk);
3073 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3075 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3079 scid = __le16_to_cpu(rsp->scid);
3080 dcid = __le16_to_cpu(rsp->dcid);
3082 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3084 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3088 skb_queue_purge(TX_QUEUE(sk));
3090 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
3091 skb_queue_purge(SREJ_QUEUE(sk));
3092 del_timer(&l2cap_pi(sk)->retrans_timer);
3093 del_timer(&l2cap_pi(sk)->monitor_timer);
3094 del_timer(&l2cap_pi(sk)->ack_timer);
3097 l2cap_chan_del(sk, 0);
3100 l2cap_sock_kill(sk);
3104 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3106 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3109 type = __le16_to_cpu(req->type);
3111 BT_DBG("type 0x%4.4x", type);
3113 if (type == L2CAP_IT_FEAT_MASK) {
3115 u32 feat_mask = l2cap_feat_mask;
3116 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3117 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3118 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3120 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3122 put_unaligned_le32(feat_mask, rsp->data);
3123 l2cap_send_cmd(conn, cmd->ident,
3124 L2CAP_INFO_RSP, sizeof(buf), buf);
3125 } else if (type == L2CAP_IT_FIXED_CHAN) {
3127 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3128 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3129 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3130 memcpy(buf + 4, l2cap_fixed_chan, 8);
3131 l2cap_send_cmd(conn, cmd->ident,
3132 L2CAP_INFO_RSP, sizeof(buf), buf);
3134 struct l2cap_info_rsp rsp;
3135 rsp.type = cpu_to_le16(type);
3136 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3137 l2cap_send_cmd(conn, cmd->ident,
3138 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3144 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3146 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3149 type = __le16_to_cpu(rsp->type);
3150 result = __le16_to_cpu(rsp->result);
3152 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3154 del_timer(&conn->info_timer);
3156 if (type == L2CAP_IT_FEAT_MASK) {
3157 conn->feat_mask = get_unaligned_le32(rsp->data);
3159 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3160 struct l2cap_info_req req;
3161 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3163 conn->info_ident = l2cap_get_ident(conn);
3165 l2cap_send_cmd(conn, conn->info_ident,
3166 L2CAP_INFO_REQ, sizeof(req), &req);
3168 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3169 conn->info_ident = 0;
3171 l2cap_conn_start(conn);
3173 } else if (type == L2CAP_IT_FIXED_CHAN) {
3174 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3175 conn->info_ident = 0;
3177 l2cap_conn_start(conn);
3183 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
3185 u8 *data = skb->data;
3187 struct l2cap_cmd_hdr cmd;
3190 l2cap_raw_recv(conn, skb);
3192 while (len >= L2CAP_CMD_HDR_SIZE) {
3194 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3195 data += L2CAP_CMD_HDR_SIZE;
3196 len -= L2CAP_CMD_HDR_SIZE;
3198 cmd_len = le16_to_cpu(cmd.len);
3200 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3202 if (cmd_len > len || !cmd.ident) {
3203 BT_DBG("corrupted command");
3208 case L2CAP_COMMAND_REJ:
3209 l2cap_command_rej(conn, &cmd, data);
3212 case L2CAP_CONN_REQ:
3213 err = l2cap_connect_req(conn, &cmd, data);
3216 case L2CAP_CONN_RSP:
3217 err = l2cap_connect_rsp(conn, &cmd, data);
3220 case L2CAP_CONF_REQ:
3221 err = l2cap_config_req(conn, &cmd, cmd_len, data);
3224 case L2CAP_CONF_RSP:
3225 err = l2cap_config_rsp(conn, &cmd, data);
3228 case L2CAP_DISCONN_REQ:
3229 err = l2cap_disconnect_req(conn, &cmd, data);
3232 case L2CAP_DISCONN_RSP:
3233 err = l2cap_disconnect_rsp(conn, &cmd, data);
3236 case L2CAP_ECHO_REQ:
3237 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
3240 case L2CAP_ECHO_RSP:
3243 case L2CAP_INFO_REQ:
3244 err = l2cap_information_req(conn, &cmd, data);
3247 case L2CAP_INFO_RSP:
3248 err = l2cap_information_rsp(conn, &cmd, data);
3252 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
3258 struct l2cap_cmd_rej rej;
3259 BT_DBG("error %d", err);
3261 /* FIXME: Map err to a valid reason */
3262 rej.reason = cpu_to_le16(0);
3263 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3273 static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
3275 u16 our_fcs, rcv_fcs;
3276 int hdr_size = L2CAP_HDR_SIZE + 2;
3278 if (pi->fcs == L2CAP_FCS_CRC16) {
3279 skb_trim(skb, skb->len - 2);
3280 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3281 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3283 if (our_fcs != rcv_fcs)
3289 static inline void l2cap_send_i_or_rr_or_rnr(struct sock *sk)
3291 struct l2cap_pinfo *pi = l2cap_pi(sk);
3294 pi->frames_sent = 0;
3295 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
3297 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3299 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3300 control |= L2CAP_SUPER_RCV_NOT_READY | L2CAP_CTRL_FINAL;
3301 l2cap_send_sframe(pi, control);
3302 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
3305 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY && pi->unacked_frames > 0)
3306 __mod_retrans_timer();
3308 l2cap_ertm_send(sk);
3310 if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
3311 pi->frames_sent == 0) {
3312 control |= L2CAP_SUPER_RCV_READY;
3313 l2cap_send_sframe(pi, control);
3317 static void l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
3319 struct sk_buff *next_skb;
3321 bt_cb(skb)->tx_seq = tx_seq;
3322 bt_cb(skb)->sar = sar;
3324 next_skb = skb_peek(SREJ_QUEUE(sk));
3326 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3331 if (bt_cb(next_skb)->tx_seq > tx_seq) {
3332 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
3336 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
3339 } while((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
3341 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3344 static int l2cap_sar_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3346 struct l2cap_pinfo *pi = l2cap_pi(sk);
3347 struct sk_buff *_skb;
3350 switch (control & L2CAP_CTRL_SAR) {
3351 case L2CAP_SDU_UNSEGMENTED:
3352 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3357 err = sock_queue_rcv_skb(sk, skb);
3363 case L2CAP_SDU_START:
3364 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3369 pi->sdu_len = get_unaligned_le16(skb->data);
3372 if (pi->sdu_len > pi->imtu) {
3377 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3383 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3385 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3386 pi->partial_sdu_len = skb->len;
3390 case L2CAP_SDU_CONTINUE:
3391 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3394 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3396 pi->partial_sdu_len += skb->len;
3397 if (pi->partial_sdu_len > pi->sdu_len)
3405 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3408 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3410 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3411 pi->partial_sdu_len += skb->len;
3413 if (pi->partial_sdu_len > pi->imtu)
3416 if (pi->partial_sdu_len == pi->sdu_len) {
3417 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3418 err = sock_queue_rcv_skb(sk, _skb);
3433 static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
3435 struct sk_buff *skb;
3438 while((skb = skb_peek(SREJ_QUEUE(sk)))) {
3439 if (bt_cb(skb)->tx_seq != tx_seq)
3442 skb = skb_dequeue(SREJ_QUEUE(sk));
3443 control |= bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3444 l2cap_sar_reassembly_sdu(sk, skb, control);
3445 l2cap_pi(sk)->buffer_seq_srej =
3446 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
3451 static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
3453 struct l2cap_pinfo *pi = l2cap_pi(sk);
3454 struct srej_list *l, *tmp;
3457 list_for_each_entry_safe(l,tmp, SREJ_LIST(sk), list) {
3458 if (l->tx_seq == tx_seq) {
3463 control = L2CAP_SUPER_SELECT_REJECT;
3464 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3465 l2cap_send_sframe(pi, control);
3467 list_add_tail(&l->list, SREJ_LIST(sk));
3471 static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
3473 struct l2cap_pinfo *pi = l2cap_pi(sk);
3474 struct srej_list *new;
3477 while (tx_seq != pi->expected_tx_seq) {
3478 control = L2CAP_SUPER_SELECT_REJECT;
3479 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3480 l2cap_send_sframe(pi, control);
3482 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3483 new->tx_seq = pi->expected_tx_seq++;
3484 list_add_tail(&new->list, SREJ_LIST(sk));
3486 pi->expected_tx_seq++;
3489 static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3491 struct l2cap_pinfo *pi = l2cap_pi(sk);
3492 u8 tx_seq = __get_txseq(rx_control);
3493 u8 req_seq = __get_reqseq(rx_control);
3494 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3495 int num_to_ack = (pi->tx_win/6) + 1;
3498 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3500 if (L2CAP_CTRL_FINAL & rx_control) {
3501 del_timer(&pi->monitor_timer);
3502 if (pi->unacked_frames > 0)
3503 __mod_retrans_timer();
3504 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
3507 pi->expected_ack_seq = req_seq;
3508 l2cap_drop_acked_frames(sk);
3510 if (tx_seq == pi->expected_tx_seq)
3513 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3514 struct srej_list *first;
3516 first = list_first_entry(SREJ_LIST(sk),
3517 struct srej_list, list);
3518 if (tx_seq == first->tx_seq) {
3519 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3520 l2cap_check_srej_gap(sk, tx_seq);
3522 list_del(&first->list);
3525 if (list_empty(SREJ_LIST(sk))) {
3526 pi->buffer_seq = pi->buffer_seq_srej;
3527 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3531 struct srej_list *l;
3532 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3534 list_for_each_entry(l, SREJ_LIST(sk), list) {
3535 if (l->tx_seq == tx_seq) {
3536 l2cap_resend_srejframe(sk, tx_seq);
3540 l2cap_send_srejframe(sk, tx_seq);
3543 pi->conn_state |= L2CAP_CONN_SREJ_SENT;
3545 INIT_LIST_HEAD(SREJ_LIST(sk));
3546 pi->buffer_seq_srej = pi->buffer_seq;
3548 __skb_queue_head_init(SREJ_QUEUE(sk));
3549 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3551 pi->conn_state |= L2CAP_CONN_SEND_PBIT;
3553 l2cap_send_srejframe(sk, tx_seq);
3558 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3560 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3561 bt_cb(skb)->tx_seq = tx_seq;
3562 bt_cb(skb)->sar = sar;
3563 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3567 if (rx_control & L2CAP_CTRL_FINAL) {
3568 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3569 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3571 if (!skb_queue_empty(TX_QUEUE(sk)))
3572 sk->sk_send_head = TX_QUEUE(sk)->next;
3573 pi->next_tx_seq = pi->expected_ack_seq;
3574 l2cap_ertm_send(sk);
3578 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3580 err = l2cap_sar_reassembly_sdu(sk, skb, rx_control);
3586 pi->num_acked = (pi->num_acked + 1) % num_to_ack;
3587 if (pi->num_acked == num_to_ack - 1)
3593 static inline void l2cap_data_channel_rrframe(struct sock *sk, u16 rx_control)
3595 struct l2cap_pinfo *pi = l2cap_pi(sk);
3597 pi->expected_ack_seq = __get_reqseq(rx_control);
3598 l2cap_drop_acked_frames(sk);
3600 if (rx_control & L2CAP_CTRL_POLL) {
3601 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3602 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3603 (pi->unacked_frames > 0))
3604 __mod_retrans_timer();
3606 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3607 l2cap_send_srejtail(sk);
3609 l2cap_send_i_or_rr_or_rnr(sk);
3610 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3613 } else if (rx_control & L2CAP_CTRL_FINAL) {
3614 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3616 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3617 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3619 if (!skb_queue_empty(TX_QUEUE(sk)))
3620 sk->sk_send_head = TX_QUEUE(sk)->next;
3621 pi->next_tx_seq = pi->expected_ack_seq;
3622 l2cap_ertm_send(sk);
3626 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3627 (pi->unacked_frames > 0))
3628 __mod_retrans_timer();
3630 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3631 if (pi->conn_state & L2CAP_CONN_SREJ_SENT)
3634 l2cap_ertm_send(sk);
3638 static inline void l2cap_data_channel_rejframe(struct sock *sk, u16 rx_control)
3640 struct l2cap_pinfo *pi = l2cap_pi(sk);
3641 u8 tx_seq = __get_reqseq(rx_control);
3643 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3645 pi->expected_ack_seq = tx_seq;
3646 l2cap_drop_acked_frames(sk);
3648 if (rx_control & L2CAP_CTRL_FINAL) {
3649 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3650 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3652 if (!skb_queue_empty(TX_QUEUE(sk)))
3653 sk->sk_send_head = TX_QUEUE(sk)->next;
3654 pi->next_tx_seq = pi->expected_ack_seq;
3655 l2cap_ertm_send(sk);
3658 if (!skb_queue_empty(TX_QUEUE(sk)))
3659 sk->sk_send_head = TX_QUEUE(sk)->next;
3660 pi->next_tx_seq = pi->expected_ack_seq;
3661 l2cap_ertm_send(sk);
3663 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3664 pi->srej_save_reqseq = tx_seq;
3665 pi->conn_state |= L2CAP_CONN_REJ_ACT;
3669 static inline void l2cap_data_channel_srejframe(struct sock *sk, u16 rx_control)
3671 struct l2cap_pinfo *pi = l2cap_pi(sk);
3672 u8 tx_seq = __get_reqseq(rx_control);
3674 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3676 if (rx_control & L2CAP_CTRL_POLL) {
3677 pi->expected_ack_seq = tx_seq;
3678 l2cap_drop_acked_frames(sk);
3679 l2cap_retransmit_frame(sk, tx_seq);
3680 l2cap_ertm_send(sk);
3681 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3682 pi->srej_save_reqseq = tx_seq;
3683 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3685 } else if (rx_control & L2CAP_CTRL_FINAL) {
3686 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
3687 pi->srej_save_reqseq == tx_seq)
3688 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
3690 l2cap_retransmit_frame(sk, tx_seq);
3692 l2cap_retransmit_frame(sk, tx_seq);
3693 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3694 pi->srej_save_reqseq = tx_seq;
3695 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3700 static inline void l2cap_data_channel_rnrframe(struct sock *sk, u16 rx_control)
3702 struct l2cap_pinfo *pi = l2cap_pi(sk);
3703 u8 tx_seq = __get_reqseq(rx_control);
3705 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
3706 pi->expected_ack_seq = tx_seq;
3707 l2cap_drop_acked_frames(sk);
3709 if (!(pi->conn_state & L2CAP_CONN_SREJ_SENT)) {
3710 del_timer(&pi->retrans_timer);
3711 if (rx_control & L2CAP_CTRL_POLL) {
3712 u16 control = L2CAP_CTRL_FINAL;
3713 l2cap_send_rr_or_rnr(pi, control);
3718 if (rx_control & L2CAP_CTRL_POLL)
3719 l2cap_send_srejtail(sk);
3721 l2cap_send_sframe(pi, L2CAP_SUPER_RCV_READY);
3724 static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3726 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3728 if (L2CAP_CTRL_FINAL & rx_control) {
3729 del_timer(&l2cap_pi(sk)->monitor_timer);
3730 if (l2cap_pi(sk)->unacked_frames > 0)
3731 __mod_retrans_timer();
3732 l2cap_pi(sk)->conn_state &= ~L2CAP_CONN_WAIT_F;
3735 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
3736 case L2CAP_SUPER_RCV_READY:
3737 l2cap_data_channel_rrframe(sk, rx_control);
3740 case L2CAP_SUPER_REJECT:
3741 l2cap_data_channel_rejframe(sk, rx_control);
3744 case L2CAP_SUPER_SELECT_REJECT:
3745 l2cap_data_channel_srejframe(sk, rx_control);
3748 case L2CAP_SUPER_RCV_NOT_READY:
3749 l2cap_data_channel_rnrframe(sk, rx_control);
3757 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3760 struct l2cap_pinfo *pi;
3764 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
3766 BT_DBG("unknown cid 0x%4.4x", cid);
3772 BT_DBG("sk %p, len %d", sk, skb->len);
3774 if (sk->sk_state != BT_CONNECTED)
3778 case L2CAP_MODE_BASIC:
3779 /* If socket recv buffers overflows we drop data here
3780 * which is *bad* because L2CAP has to be reliable.
3781 * But we don't have any other choice. L2CAP doesn't
3782 * provide flow control mechanism. */
3784 if (pi->imtu < skb->len)
3787 if (!sock_queue_rcv_skb(sk, skb))
3791 case L2CAP_MODE_ERTM:
3792 control = get_unaligned_le16(skb->data);
3796 if (__is_sar_start(control))
3799 if (pi->fcs == L2CAP_FCS_CRC16)
3803 * We can just drop the corrupted I-frame here.
3804 * Receiver will miss it and start proper recovery
3805 * procedures and ask retransmission.
3810 if (l2cap_check_fcs(pi, skb))
3813 if (__is_iframe(control)) {
3817 l2cap_data_channel_iframe(sk, control, skb);
3822 l2cap_data_channel_sframe(sk, control, skb);
3827 case L2CAP_MODE_STREAMING:
3828 control = get_unaligned_le16(skb->data);
3832 if (__is_sar_start(control))
3835 if (pi->fcs == L2CAP_FCS_CRC16)
3838 if (len > pi->mps || len < 4 || __is_sframe(control))
3841 if (l2cap_check_fcs(pi, skb))
3844 tx_seq = __get_txseq(control);
3846 if (pi->expected_tx_seq == tx_seq)
3847 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3849 pi->expected_tx_seq = (tx_seq + 1) % 64;
3851 l2cap_sar_reassembly_sdu(sk, skb, control);
3856 BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode);
3870 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3874 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
3878 BT_DBG("sk %p, len %d", sk, skb->len);
3880 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
3883 if (l2cap_pi(sk)->imtu < skb->len)
3886 if (!sock_queue_rcv_skb(sk, skb))
3898 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
3900 struct l2cap_hdr *lh = (void *) skb->data;
3904 skb_pull(skb, L2CAP_HDR_SIZE);
3905 cid = __le16_to_cpu(lh->cid);
3906 len = __le16_to_cpu(lh->len);
3908 if (len != skb->len) {
3913 BT_DBG("len %d, cid 0x%4.4x", len, cid);
3916 case L2CAP_CID_SIGNALING:
3917 l2cap_sig_channel(conn, skb);
3920 case L2CAP_CID_CONN_LESS:
3921 psm = get_unaligned_le16(skb->data);
3923 l2cap_conless_channel(conn, psm, skb);
3927 l2cap_data_channel(conn, cid, skb);
3932 /* ---- L2CAP interface with lower layer (HCI) ---- */
3934 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3936 int exact = 0, lm1 = 0, lm2 = 0;
3937 register struct sock *sk;
3938 struct hlist_node *node;
3940 if (type != ACL_LINK)
3943 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
3945 /* Find listening sockets and check their link_mode */
3946 read_lock(&l2cap_sk_list.lock);
3947 sk_for_each(sk, node, &l2cap_sk_list.head) {
3948 if (sk->sk_state != BT_LISTEN)
3951 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
3952 lm1 |= HCI_LM_ACCEPT;
3953 if (l2cap_pi(sk)->role_switch)
3954 lm1 |= HCI_LM_MASTER;
3956 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
3957 lm2 |= HCI_LM_ACCEPT;
3958 if (l2cap_pi(sk)->role_switch)
3959 lm2 |= HCI_LM_MASTER;
3962 read_unlock(&l2cap_sk_list.lock);
3964 return exact ? lm1 : lm2;
3967 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
3969 struct l2cap_conn *conn;
3971 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
3973 if (hcon->type != ACL_LINK)
3977 conn = l2cap_conn_add(hcon, status);
3979 l2cap_conn_ready(conn);
3981 l2cap_conn_del(hcon, bt_err(status));
3986 static int l2cap_disconn_ind(struct hci_conn *hcon)
3988 struct l2cap_conn *conn = hcon->l2cap_data;
3990 BT_DBG("hcon %p", hcon);
3992 if (hcon->type != ACL_LINK || !conn)
3995 return conn->disc_reason;
3998 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4000 BT_DBG("hcon %p reason %d", hcon, reason);
4002 if (hcon->type != ACL_LINK)
4005 l2cap_conn_del(hcon, bt_err(reason));
4010 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
4012 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
4015 if (encrypt == 0x00) {
4016 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
4017 l2cap_sock_clear_timer(sk);
4018 l2cap_sock_set_timer(sk, HZ * 5);
4019 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
4020 __l2cap_sock_close(sk, ECONNREFUSED);
4022 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
4023 l2cap_sock_clear_timer(sk);
4027 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4029 struct l2cap_chan_list *l;
4030 struct l2cap_conn *conn = hcon->l2cap_data;
4036 l = &conn->chan_list;
4038 BT_DBG("conn %p", conn);
4040 read_lock(&l->lock);
4042 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
4045 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
4050 if (!status && (sk->sk_state == BT_CONNECTED ||
4051 sk->sk_state == BT_CONFIG)) {
4052 l2cap_check_encryption(sk, encrypt);
4057 if (sk->sk_state == BT_CONNECT) {
4059 struct l2cap_conn_req req;
4060 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
4061 req.psm = l2cap_pi(sk)->psm;
4063 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
4065 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4066 L2CAP_CONN_REQ, sizeof(req), &req);
4068 l2cap_sock_clear_timer(sk);
4069 l2cap_sock_set_timer(sk, HZ / 10);
4071 } else if (sk->sk_state == BT_CONNECT2) {
4072 struct l2cap_conn_rsp rsp;
4076 sk->sk_state = BT_CONFIG;
4077 result = L2CAP_CR_SUCCESS;
4079 sk->sk_state = BT_DISCONN;
4080 l2cap_sock_set_timer(sk, HZ / 10);
4081 result = L2CAP_CR_SEC_BLOCK;
4084 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
4085 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
4086 rsp.result = cpu_to_le16(result);
4087 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4088 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4089 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
4095 read_unlock(&l->lock);
4100 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4102 struct l2cap_conn *conn = hcon->l2cap_data;
4104 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
4107 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4109 if (flags & ACL_START) {
4110 struct l2cap_hdr *hdr;
4114 BT_ERR("Unexpected start frame (len %d)", skb->len);
4115 kfree_skb(conn->rx_skb);
4116 conn->rx_skb = NULL;
4118 l2cap_conn_unreliable(conn, ECOMM);
4122 BT_ERR("Frame is too short (len %d)", skb->len);
4123 l2cap_conn_unreliable(conn, ECOMM);
4127 hdr = (struct l2cap_hdr *) skb->data;
4128 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4130 if (len == skb->len) {
4131 /* Complete frame received */
4132 l2cap_recv_frame(conn, skb);
4136 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4138 if (skb->len > len) {
4139 BT_ERR("Frame is too long (len %d, expected len %d)",
4141 l2cap_conn_unreliable(conn, ECOMM);
4145 /* Allocate skb for the complete frame (with header) */
4146 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4150 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4152 conn->rx_len = len - skb->len;
4154 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4156 if (!conn->rx_len) {
4157 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4158 l2cap_conn_unreliable(conn, ECOMM);
4162 if (skb->len > conn->rx_len) {
4163 BT_ERR("Fragment is too long (len %d, expected %d)",
4164 skb->len, conn->rx_len);
4165 kfree_skb(conn->rx_skb);
4166 conn->rx_skb = NULL;
4168 l2cap_conn_unreliable(conn, ECOMM);
4172 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4174 conn->rx_len -= skb->len;
4176 if (!conn->rx_len) {
4177 /* Complete frame received */
4178 l2cap_recv_frame(conn, conn->rx_skb);
4179 conn->rx_skb = NULL;
4188 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4191 struct hlist_node *node;
4193 read_lock_bh(&l2cap_sk_list.lock);
4195 sk_for_each(sk, node, &l2cap_sk_list.head) {
4196 struct l2cap_pinfo *pi = l2cap_pi(sk);
4198 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
4199 batostr(&bt_sk(sk)->src),
4200 batostr(&bt_sk(sk)->dst),
4201 sk->sk_state, __le16_to_cpu(pi->psm),
4203 pi->imtu, pi->omtu, pi->sec_level);
4206 read_unlock_bh(&l2cap_sk_list.lock);
4211 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4213 return single_open(file, l2cap_debugfs_show, inode->i_private);
4216 static const struct file_operations l2cap_debugfs_fops = {
4217 .open = l2cap_debugfs_open,
4219 .llseek = seq_lseek,
4220 .release = single_release,
4223 static struct dentry *l2cap_debugfs;
4225 static const struct proto_ops l2cap_sock_ops = {
4226 .family = PF_BLUETOOTH,
4227 .owner = THIS_MODULE,
4228 .release = l2cap_sock_release,
4229 .bind = l2cap_sock_bind,
4230 .connect = l2cap_sock_connect,
4231 .listen = l2cap_sock_listen,
4232 .accept = l2cap_sock_accept,
4233 .getname = l2cap_sock_getname,
4234 .sendmsg = l2cap_sock_sendmsg,
4235 .recvmsg = l2cap_sock_recvmsg,
4236 .poll = bt_sock_poll,
4237 .ioctl = bt_sock_ioctl,
4238 .mmap = sock_no_mmap,
4239 .socketpair = sock_no_socketpair,
4240 .shutdown = l2cap_sock_shutdown,
4241 .setsockopt = l2cap_sock_setsockopt,
4242 .getsockopt = l2cap_sock_getsockopt
4245 static const struct net_proto_family l2cap_sock_family_ops = {
4246 .family = PF_BLUETOOTH,
4247 .owner = THIS_MODULE,
4248 .create = l2cap_sock_create,
4251 static struct hci_proto l2cap_hci_proto = {
4253 .id = HCI_PROTO_L2CAP,
4254 .connect_ind = l2cap_connect_ind,
4255 .connect_cfm = l2cap_connect_cfm,
4256 .disconn_ind = l2cap_disconn_ind,
4257 .disconn_cfm = l2cap_disconn_cfm,
4258 .security_cfm = l2cap_security_cfm,
4259 .recv_acldata = l2cap_recv_acldata
4262 static int __init l2cap_init(void)
4266 err = proto_register(&l2cap_proto, 0);
4270 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
4272 BT_ERR("L2CAP socket registration failed");
4276 err = hci_register_proto(&l2cap_hci_proto);
4278 BT_ERR("L2CAP protocol registration failed");
4279 bt_sock_unregister(BTPROTO_L2CAP);
4284 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4285 bt_debugfs, NULL, &l2cap_debugfs_fops);
4287 BT_ERR("Failed to create L2CAP debug file");
4290 BT_INFO("L2CAP ver %s", VERSION);
4291 BT_INFO("L2CAP socket layer initialized");
4296 proto_unregister(&l2cap_proto);
4300 static void __exit l2cap_exit(void)
4302 debugfs_remove(l2cap_debugfs);
4304 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
4305 BT_ERR("L2CAP socket unregistration failed");
4307 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4308 BT_ERR("L2CAP protocol unregistration failed");
4310 proto_unregister(&l2cap_proto);
4313 void l2cap_load(void)
4315 /* Dummy function to trigger automatic L2CAP module loading by
4316 * other modules that use L2CAP sockets but don't use any other
4317 * symbols from it. */
4320 EXPORT_SYMBOL(l2cap_load);
4322 module_init(l2cap_init);
4323 module_exit(l2cap_exit);
4325 module_param(enable_ertm, bool, 0644);
4326 MODULE_PARM_DESC(enable_ertm, "Enable enhanced retransmission mode");
4328 module_param(max_transmit, uint, 0644);
4329 MODULE_PARM_DESC(max_transmit, "Max transmit value (default = 3)");
4331 module_param(tx_window, uint, 0644);
4332 MODULE_PARM_DESC(tx_window, "Transmission window size value (default = 63)");
4334 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4335 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
4336 MODULE_VERSION(VERSION);
4337 MODULE_LICENSE("GPL");
4338 MODULE_ALIAS("bt-proto-0");