2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
45 #include <asm/system.h>
46 #include <asm/uaccess.h>
47 #include <asm/unaligned.h>
49 #include <net/bluetooth/bluetooth.h>
50 #include <net/bluetooth/hci_core.h>
51 #include <net/bluetooth/l2cap.h>
53 #ifndef CONFIG_BT_L2CAP_DEBUG
60 static const struct proto_ops l2cap_sock_ops;
62 static struct bt_sock_list l2cap_sk_list = {
63 .lock = RW_LOCK_UNLOCKED
66 static void __l2cap_sock_close(struct sock *sk, int reason);
67 static void l2cap_sock_close(struct sock *sk);
68 static void l2cap_sock_kill(struct sock *sk);
70 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
71 u8 code, u8 ident, u16 dlen, void *data);
73 /* ---- L2CAP timers ---- */
74 static void l2cap_sock_timeout(unsigned long arg)
76 struct sock *sk = (struct sock *) arg;
78 BT_DBG("sock %p state %d", sk, sk->sk_state);
81 __l2cap_sock_close(sk, ETIMEDOUT);
88 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
90 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
91 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
94 static void l2cap_sock_clear_timer(struct sock *sk)
96 BT_DBG("sock %p state %d", sk, sk->sk_state);
97 sk_stop_timer(sk, &sk->sk_timer);
100 static void l2cap_sock_init_timer(struct sock *sk)
102 init_timer(&sk->sk_timer);
103 sk->sk_timer.function = l2cap_sock_timeout;
104 sk->sk_timer.data = (unsigned long)sk;
107 /* ---- L2CAP channels ---- */
108 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
111 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
112 if (l2cap_pi(s)->dcid == cid)
118 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
121 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
122 if (l2cap_pi(s)->scid == cid)
128 /* Find channel with given SCID.
129 * Returns locked socket */
130 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
134 s = __l2cap_get_chan_by_scid(l, cid);
135 if (s) bh_lock_sock(s);
136 read_unlock(&l->lock);
140 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
143 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
144 if (l2cap_pi(s)->ident == ident)
150 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
154 s = __l2cap_get_chan_by_ident(l, ident);
155 if (s) bh_lock_sock(s);
156 read_unlock(&l->lock);
160 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
164 for (; cid < 0xffff; cid++) {
165 if(!__l2cap_get_chan_by_scid(l, cid))
172 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
177 l2cap_pi(l->head)->prev_c = sk;
179 l2cap_pi(sk)->next_c = l->head;
180 l2cap_pi(sk)->prev_c = NULL;
184 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
186 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
188 write_lock_bh(&l->lock);
193 l2cap_pi(next)->prev_c = prev;
195 l2cap_pi(prev)->next_c = next;
196 write_unlock_bh(&l->lock);
201 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
203 struct l2cap_chan_list *l = &conn->chan_list;
205 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn, l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
207 l2cap_pi(sk)->conn = conn;
209 if (sk->sk_type == SOCK_SEQPACKET) {
210 /* Alloc CID for connection-oriented socket */
211 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
212 } else if (sk->sk_type == SOCK_DGRAM) {
213 /* Connectionless socket */
214 l2cap_pi(sk)->scid = 0x0002;
215 l2cap_pi(sk)->dcid = 0x0002;
216 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
218 /* Raw socket can send/recv signalling messages only */
219 l2cap_pi(sk)->scid = 0x0001;
220 l2cap_pi(sk)->dcid = 0x0001;
221 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
224 __l2cap_chan_link(l, sk);
227 bt_accept_enqueue(parent, sk);
231 * Must be called on the locked socket. */
232 static void l2cap_chan_del(struct sock *sk, int err)
234 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
235 struct sock *parent = bt_sk(sk)->parent;
237 l2cap_sock_clear_timer(sk);
239 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
242 /* Unlink from channel list */
243 l2cap_chan_unlink(&conn->chan_list, sk);
244 l2cap_pi(sk)->conn = NULL;
245 hci_conn_put(conn->hcon);
248 sk->sk_state = BT_CLOSED;
249 sock_set_flag(sk, SOCK_ZAPPED);
255 bt_accept_unlink(sk);
256 parent->sk_data_ready(parent, 0);
258 sk->sk_state_change(sk);
261 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
265 /* Get next available identificator.
266 * 1 - 128 are used by kernel.
267 * 129 - 199 are reserved.
268 * 200 - 254 are used by utilities like l2ping, etc.
271 spin_lock_bh(&conn->lock);
273 if (++conn->tx_ident > 128)
278 spin_unlock_bh(&conn->lock);
283 static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
285 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
287 BT_DBG("code 0x%2.2x", code);
292 return hci_send_acl(conn->hcon, skb, 0);
295 /* ---- L2CAP connections ---- */
296 static void l2cap_conn_start(struct l2cap_conn *conn)
298 struct l2cap_chan_list *l = &conn->chan_list;
301 BT_DBG("conn %p", conn);
305 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
308 if (sk->sk_type != SOCK_SEQPACKET) {
309 l2cap_sock_clear_timer(sk);
310 sk->sk_state = BT_CONNECTED;
311 sk->sk_state_change(sk);
312 } else if (sk->sk_state == BT_CONNECT) {
313 struct l2cap_conn_req req;
314 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
315 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
316 req.psm = l2cap_pi(sk)->psm;
317 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
318 L2CAP_CONN_REQ, sizeof(req), &req);
324 read_unlock(&l->lock);
327 static void l2cap_conn_ready(struct l2cap_conn *conn)
329 BT_DBG("conn %p", conn);
331 if (conn->chan_list.head || !hlist_empty(&l2cap_sk_list.head)) {
332 struct l2cap_info_req req;
334 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
336 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
337 conn->info_ident = l2cap_get_ident(conn);
339 mod_timer(&conn->info_timer,
340 jiffies + msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
342 l2cap_send_cmd(conn, conn->info_ident,
343 L2CAP_INFO_REQ, sizeof(req), &req);
347 /* Notify sockets that we cannot guaranty reliability anymore */
348 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
350 struct l2cap_chan_list *l = &conn->chan_list;
353 BT_DBG("conn %p", conn);
357 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
358 if (l2cap_pi(sk)->link_mode & L2CAP_LM_RELIABLE)
362 read_unlock(&l->lock);
365 static void l2cap_info_timeout(unsigned long arg)
367 struct l2cap_conn *conn = (void *) arg;
369 conn->info_ident = 0;
371 l2cap_conn_start(conn);
374 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
376 struct l2cap_conn *conn = hcon->l2cap_data;
381 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
385 hcon->l2cap_data = conn;
388 BT_DBG("hcon %p conn %p", hcon, conn);
390 conn->mtu = hcon->hdev->acl_mtu;
391 conn->src = &hcon->hdev->bdaddr;
392 conn->dst = &hcon->dst;
396 init_timer(&conn->info_timer);
397 conn->info_timer.function = l2cap_info_timeout;
398 conn->info_timer.data = (unsigned long) conn;
400 spin_lock_init(&conn->lock);
401 rwlock_init(&conn->chan_list.lock);
406 static void l2cap_conn_del(struct hci_conn *hcon, int err)
408 struct l2cap_conn *conn = hcon->l2cap_data;
414 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
417 kfree_skb(conn->rx_skb);
420 while ((sk = conn->chan_list.head)) {
422 l2cap_chan_del(sk, err);
427 hcon->l2cap_data = NULL;
431 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
433 struct l2cap_chan_list *l = &conn->chan_list;
434 write_lock_bh(&l->lock);
435 __l2cap_chan_add(conn, sk, parent);
436 write_unlock_bh(&l->lock);
439 /* ---- Socket interface ---- */
440 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
443 struct hlist_node *node;
444 sk_for_each(sk, node, &l2cap_sk_list.head)
445 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
452 /* Find socket with psm and source bdaddr.
453 * Returns closest match.
455 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
457 struct sock *sk = NULL, *sk1 = NULL;
458 struct hlist_node *node;
460 sk_for_each(sk, node, &l2cap_sk_list.head) {
461 if (state && sk->sk_state != state)
464 if (l2cap_pi(sk)->psm == psm) {
466 if (!bacmp(&bt_sk(sk)->src, src))
470 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
474 return node ? sk : sk1;
477 /* Find socket with given address (psm, src).
478 * Returns locked socket */
479 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
482 read_lock(&l2cap_sk_list.lock);
483 s = __l2cap_get_sock_by_psm(state, psm, src);
484 if (s) bh_lock_sock(s);
485 read_unlock(&l2cap_sk_list.lock);
489 static void l2cap_sock_destruct(struct sock *sk)
493 skb_queue_purge(&sk->sk_receive_queue);
494 skb_queue_purge(&sk->sk_write_queue);
497 static void l2cap_sock_cleanup_listen(struct sock *parent)
501 BT_DBG("parent %p", parent);
503 /* Close not yet accepted channels */
504 while ((sk = bt_accept_dequeue(parent, NULL)))
505 l2cap_sock_close(sk);
507 parent->sk_state = BT_CLOSED;
508 sock_set_flag(parent, SOCK_ZAPPED);
511 /* Kill socket (only if zapped and orphan)
512 * Must be called on unlocked socket.
514 static void l2cap_sock_kill(struct sock *sk)
516 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
519 BT_DBG("sk %p state %d", sk, sk->sk_state);
521 /* Kill poor orphan */
522 bt_sock_unlink(&l2cap_sk_list, sk);
523 sock_set_flag(sk, SOCK_DEAD);
527 static void __l2cap_sock_close(struct sock *sk, int reason)
529 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
531 switch (sk->sk_state) {
533 l2cap_sock_cleanup_listen(sk);
539 if (sk->sk_type == SOCK_SEQPACKET) {
540 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
541 struct l2cap_disconn_req req;
543 sk->sk_state = BT_DISCONN;
544 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
546 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
547 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
548 l2cap_send_cmd(conn, l2cap_get_ident(conn),
549 L2CAP_DISCONN_REQ, sizeof(req), &req);
551 l2cap_chan_del(sk, reason);
557 l2cap_chan_del(sk, reason);
561 sock_set_flag(sk, SOCK_ZAPPED);
566 /* Must be called on unlocked socket. */
567 static void l2cap_sock_close(struct sock *sk)
569 l2cap_sock_clear_timer(sk);
571 __l2cap_sock_close(sk, ECONNRESET);
576 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
578 struct l2cap_pinfo *pi = l2cap_pi(sk);
583 sk->sk_type = parent->sk_type;
584 pi->imtu = l2cap_pi(parent)->imtu;
585 pi->omtu = l2cap_pi(parent)->omtu;
586 pi->link_mode = l2cap_pi(parent)->link_mode;
588 pi->imtu = L2CAP_DEFAULT_MTU;
593 /* Default config options */
595 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
598 static struct proto l2cap_proto = {
600 .owner = THIS_MODULE,
601 .obj_size = sizeof(struct l2cap_pinfo)
604 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
608 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto, 1);
612 sock_init_data(sock, sk);
613 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
615 sk->sk_destruct = l2cap_sock_destruct;
616 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
618 sock_reset_flag(sk, SOCK_ZAPPED);
620 sk->sk_protocol = proto;
621 sk->sk_state = BT_OPEN;
623 l2cap_sock_init_timer(sk);
625 bt_sock_link(&l2cap_sk_list, sk);
629 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol)
633 BT_DBG("sock %p", sock);
635 sock->state = SS_UNCONNECTED;
637 if (sock->type != SOCK_SEQPACKET &&
638 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
639 return -ESOCKTNOSUPPORT;
641 if (sock->type == SOCK_RAW && !capable(CAP_NET_RAW))
644 sock->ops = &l2cap_sock_ops;
646 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
650 l2cap_sock_init(sk, NULL);
654 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
656 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
657 struct sock *sk = sock->sk;
660 BT_DBG("sk %p, %s %d", sk, batostr(&la->l2_bdaddr), la->l2_psm);
662 if (!addr || addr->sa_family != AF_BLUETOOTH)
667 if (sk->sk_state != BT_OPEN) {
672 if (la->l2_psm && btohs(la->l2_psm) < 0x1001 &&
673 !capable(CAP_NET_BIND_SERVICE)) {
678 write_lock_bh(&l2cap_sk_list.lock);
680 if (la->l2_psm && __l2cap_get_sock_by_addr(la->l2_psm, &la->l2_bdaddr)) {
683 /* Save source address */
684 bacpy(&bt_sk(sk)->src, &la->l2_bdaddr);
685 l2cap_pi(sk)->psm = la->l2_psm;
686 l2cap_pi(sk)->sport = la->l2_psm;
687 sk->sk_state = BT_BOUND;
690 write_unlock_bh(&l2cap_sk_list.lock);
697 static int l2cap_do_connect(struct sock *sk)
699 bdaddr_t *src = &bt_sk(sk)->src;
700 bdaddr_t *dst = &bt_sk(sk)->dst;
701 struct l2cap_conn *conn;
702 struct hci_conn *hcon;
703 struct hci_dev *hdev;
706 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst), l2cap_pi(sk)->psm);
708 if (!(hdev = hci_get_route(dst, src)))
709 return -EHOSTUNREACH;
711 hci_dev_lock_bh(hdev);
715 hcon = hci_connect(hdev, ACL_LINK, dst);
719 conn = l2cap_conn_add(hcon, 0);
727 /* Update source addr of the socket */
728 bacpy(src, conn->src);
730 l2cap_chan_add(conn, sk, NULL);
732 sk->sk_state = BT_CONNECT;
733 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
735 if (hcon->state == BT_CONNECTED) {
736 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
737 l2cap_conn_ready(conn);
741 if (sk->sk_type == SOCK_SEQPACKET) {
742 struct l2cap_conn_req req;
743 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
744 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
745 req.psm = l2cap_pi(sk)->psm;
746 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
747 L2CAP_CONN_REQ, sizeof(req), &req);
749 l2cap_sock_clear_timer(sk);
750 sk->sk_state = BT_CONNECTED;
755 hci_dev_unlock_bh(hdev);
760 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
762 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
763 struct sock *sk = sock->sk;
770 if (addr->sa_family != AF_BLUETOOTH || alen < sizeof(struct sockaddr_l2)) {
775 if (sk->sk_type == SOCK_SEQPACKET && !la->l2_psm) {
780 switch(sk->sk_state) {
784 /* Already connecting */
788 /* Already connected */
801 /* Set destination address and psm */
802 bacpy(&bt_sk(sk)->dst, &la->l2_bdaddr);
803 l2cap_pi(sk)->psm = la->l2_psm;
805 if ((err = l2cap_do_connect(sk)))
809 err = bt_sock_wait_state(sk, BT_CONNECTED,
810 sock_sndtimeo(sk, flags & O_NONBLOCK));
816 static int l2cap_sock_listen(struct socket *sock, int backlog)
818 struct sock *sk = sock->sk;
821 BT_DBG("sk %p backlog %d", sk, backlog);
825 if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
830 if (!l2cap_pi(sk)->psm) {
831 bdaddr_t *src = &bt_sk(sk)->src;
836 write_lock_bh(&l2cap_sk_list.lock);
838 for (psm = 0x1001; psm < 0x1100; psm += 2)
839 if (!__l2cap_get_sock_by_addr(htobs(psm), src)) {
840 l2cap_pi(sk)->psm = htobs(psm);
841 l2cap_pi(sk)->sport = htobs(psm);
846 write_unlock_bh(&l2cap_sk_list.lock);
852 sk->sk_max_ack_backlog = backlog;
853 sk->sk_ack_backlog = 0;
854 sk->sk_state = BT_LISTEN;
861 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
863 DECLARE_WAITQUEUE(wait, current);
864 struct sock *sk = sock->sk, *nsk;
868 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
870 if (sk->sk_state != BT_LISTEN) {
875 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
877 BT_DBG("sk %p timeo %ld", sk, timeo);
879 /* Wait for an incoming connection. (wake-one). */
880 add_wait_queue_exclusive(sk->sk_sleep, &wait);
881 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
882 set_current_state(TASK_INTERRUPTIBLE);
889 timeo = schedule_timeout(timeo);
890 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
892 if (sk->sk_state != BT_LISTEN) {
897 if (signal_pending(current)) {
898 err = sock_intr_errno(timeo);
902 set_current_state(TASK_RUNNING);
903 remove_wait_queue(sk->sk_sleep, &wait);
908 newsock->state = SS_CONNECTED;
910 BT_DBG("new socket %p", nsk);
917 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
919 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
920 struct sock *sk = sock->sk;
922 BT_DBG("sock %p, sk %p", sock, sk);
924 addr->sa_family = AF_BLUETOOTH;
925 *len = sizeof(struct sockaddr_l2);
928 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
930 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
932 la->l2_psm = l2cap_pi(sk)->psm;
936 static inline int l2cap_do_send(struct sock *sk, struct msghdr *msg, int len)
938 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
939 struct sk_buff *skb, **frag;
940 int err, hlen, count, sent=0;
941 struct l2cap_hdr *lh;
943 BT_DBG("sk %p len %d", sk, len);
945 /* First fragment (with L2CAP header) */
946 if (sk->sk_type == SOCK_DGRAM)
947 hlen = L2CAP_HDR_SIZE + 2;
949 hlen = L2CAP_HDR_SIZE;
951 count = min_t(unsigned int, (conn->mtu - hlen), len);
953 skb = bt_skb_send_alloc(sk, hlen + count,
954 msg->msg_flags & MSG_DONTWAIT, &err);
958 /* Create L2CAP header */
959 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
960 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
961 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
963 if (sk->sk_type == SOCK_DGRAM)
964 put_unaligned(l2cap_pi(sk)->psm, (__le16 *) skb_put(skb, 2));
966 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
974 /* Continuation fragments (no L2CAP header) */
975 frag = &skb_shinfo(skb)->frag_list;
977 count = min_t(unsigned int, conn->mtu, len);
979 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
983 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count)) {
991 frag = &(*frag)->next;
994 if ((err = hci_send_acl(conn->hcon, skb, 0)) < 0)
1004 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1006 struct sock *sk = sock->sk;
1009 BT_DBG("sock %p, sk %p", sock, sk);
1011 err = sock_error(sk);
1015 if (msg->msg_flags & MSG_OOB)
1018 /* Check outgoing MTU */
1019 if (sk->sk_type != SOCK_RAW && len > l2cap_pi(sk)->omtu)
1024 if (sk->sk_state == BT_CONNECTED)
1025 err = l2cap_do_send(sk, msg, len);
1033 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen)
1035 struct sock *sk = sock->sk;
1036 struct l2cap_options opts;
1040 BT_DBG("sk %p", sk);
1046 opts.imtu = l2cap_pi(sk)->imtu;
1047 opts.omtu = l2cap_pi(sk)->omtu;
1048 opts.flush_to = l2cap_pi(sk)->flush_to;
1051 len = min_t(unsigned int, sizeof(opts), optlen);
1052 if (copy_from_user((char *) &opts, optval, len)) {
1057 l2cap_pi(sk)->imtu = opts.imtu;
1058 l2cap_pi(sk)->omtu = opts.omtu;
1062 if (get_user(opt, (u32 __user *) optval)) {
1067 l2cap_pi(sk)->link_mode = opt;
1079 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
1081 struct sock *sk = sock->sk;
1082 struct l2cap_options opts;
1083 struct l2cap_conninfo cinfo;
1086 BT_DBG("sk %p", sk);
1088 if (get_user(len, optlen))
1095 opts.imtu = l2cap_pi(sk)->imtu;
1096 opts.omtu = l2cap_pi(sk)->omtu;
1097 opts.flush_to = l2cap_pi(sk)->flush_to;
1100 len = min_t(unsigned int, len, sizeof(opts));
1101 if (copy_to_user(optval, (char *) &opts, len))
1107 if (put_user(l2cap_pi(sk)->link_mode, (u32 __user *) optval))
1111 case L2CAP_CONNINFO:
1112 if (sk->sk_state != BT_CONNECTED) {
1117 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
1118 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
1120 len = min_t(unsigned int, len, sizeof(cinfo));
1121 if (copy_to_user(optval, (char *) &cinfo, len))
1135 static int l2cap_sock_shutdown(struct socket *sock, int how)
1137 struct sock *sk = sock->sk;
1140 BT_DBG("sock %p, sk %p", sock, sk);
1146 if (!sk->sk_shutdown) {
1147 sk->sk_shutdown = SHUTDOWN_MASK;
1148 l2cap_sock_clear_timer(sk);
1149 __l2cap_sock_close(sk, 0);
1151 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
1152 err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime);
1158 static int l2cap_sock_release(struct socket *sock)
1160 struct sock *sk = sock->sk;
1163 BT_DBG("sock %p, sk %p", sock, sk);
1168 err = l2cap_sock_shutdown(sock, 2);
1171 l2cap_sock_kill(sk);
1175 static void l2cap_chan_ready(struct sock *sk)
1177 struct sock *parent = bt_sk(sk)->parent;
1179 BT_DBG("sk %p, parent %p", sk, parent);
1181 l2cap_pi(sk)->conf_state = 0;
1182 l2cap_sock_clear_timer(sk);
1185 /* Outgoing channel.
1186 * Wake up socket sleeping on connect.
1188 sk->sk_state = BT_CONNECTED;
1189 sk->sk_state_change(sk);
1191 /* Incoming channel.
1192 * Wake up socket sleeping on accept.
1194 parent->sk_data_ready(parent, 0);
1198 /* Copy frame to all raw sockets on that connection */
1199 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1201 struct l2cap_chan_list *l = &conn->chan_list;
1202 struct sk_buff *nskb;
1205 BT_DBG("conn %p", conn);
1207 read_lock(&l->lock);
1208 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1209 if (sk->sk_type != SOCK_RAW)
1212 /* Don't send frame to the socket it came from */
1216 if (!(nskb = skb_clone(skb, GFP_ATOMIC)))
1219 if (sock_queue_rcv_skb(sk, nskb))
1222 read_unlock(&l->lock);
1225 /* ---- L2CAP signalling commands ---- */
1226 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1227 u8 code, u8 ident, u16 dlen, void *data)
1229 struct sk_buff *skb, **frag;
1230 struct l2cap_cmd_hdr *cmd;
1231 struct l2cap_hdr *lh;
1234 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d", conn, code, ident, dlen);
1236 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1237 count = min_t(unsigned int, conn->mtu, len);
1239 skb = bt_skb_alloc(count, GFP_ATOMIC);
1243 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1244 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1245 lh->cid = cpu_to_le16(0x0001);
1247 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1250 cmd->len = cpu_to_le16(dlen);
1253 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1254 memcpy(skb_put(skb, count), data, count);
1260 /* Continuation fragments (no L2CAP header) */
1261 frag = &skb_shinfo(skb)->frag_list;
1263 count = min_t(unsigned int, conn->mtu, len);
1265 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1269 memcpy(skb_put(*frag, count), data, count);
1274 frag = &(*frag)->next;
1284 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1286 struct l2cap_conf_opt *opt = *ptr;
1289 len = L2CAP_CONF_OPT_SIZE + opt->len;
1297 *val = *((u8 *) opt->val);
1301 *val = __le16_to_cpu(*((__le16 *) opt->val));
1305 *val = __le32_to_cpu(*((__le32 *) opt->val));
1309 *val = (unsigned long) opt->val;
1313 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1317 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1319 struct l2cap_conf_opt *opt = *ptr;
1321 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1328 *((u8 *) opt->val) = val;
1332 *((__le16 *) opt->val) = cpu_to_le16(val);
1336 *((__le32 *) opt->val) = cpu_to_le32(val);
1340 memcpy(opt->val, (void *) val, len);
1344 *ptr += L2CAP_CONF_OPT_SIZE + len;
1347 static int l2cap_build_conf_req(struct sock *sk, void *data)
1349 struct l2cap_pinfo *pi = l2cap_pi(sk);
1350 struct l2cap_conf_req *req = data;
1351 void *ptr = req->data;
1353 BT_DBG("sk %p", sk);
1355 if (pi->imtu != L2CAP_DEFAULT_MTU)
1356 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
1358 /* FIXME: Need actual value of the flush timeout */
1359 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
1360 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
1362 req->dcid = cpu_to_le16(pi->dcid);
1363 req->flags = cpu_to_le16(0);
1368 static int l2cap_parse_conf_req(struct sock *sk, void *data)
1370 struct l2cap_pinfo *pi = l2cap_pi(sk);
1371 struct l2cap_conf_rsp *rsp = data;
1372 void *ptr = rsp->data;
1373 void *req = pi->conf_req;
1374 int len = pi->conf_len;
1375 int type, hint, olen;
1377 u16 mtu = L2CAP_DEFAULT_MTU;
1378 u16 result = L2CAP_CONF_SUCCESS;
1380 BT_DBG("sk %p", sk);
1382 while (len >= L2CAP_CONF_OPT_SIZE) {
1383 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
1389 case L2CAP_CONF_MTU:
1393 case L2CAP_CONF_FLUSH_TO:
1397 case L2CAP_CONF_QOS:
1404 result = L2CAP_CONF_UNKNOWN;
1405 *((u8 *) ptr++) = type;
1410 if (result == L2CAP_CONF_SUCCESS) {
1411 /* Configure output options and let the other side know
1412 * which ones we don't like. */
1415 result = L2CAP_CONF_UNACCEPT;
1418 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
1421 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
1424 rsp->scid = cpu_to_le16(pi->dcid);
1425 rsp->result = cpu_to_le16(result);
1426 rsp->flags = cpu_to_le16(0x0000);
1431 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
1433 struct l2cap_conf_rsp *rsp = data;
1434 void *ptr = rsp->data;
1436 BT_DBG("sk %p", sk);
1438 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1439 rsp->result = cpu_to_le16(result);
1440 rsp->flags = cpu_to_le16(flags);
1445 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1447 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
1449 if (rej->reason != 0x0000)
1452 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
1453 cmd->ident == conn->info_ident) {
1454 conn->info_ident = 0;
1455 del_timer(&conn->info_timer);
1456 l2cap_conn_start(conn);
1462 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1464 struct l2cap_chan_list *list = &conn->chan_list;
1465 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
1466 struct l2cap_conn_rsp rsp;
1467 struct sock *sk, *parent;
1468 int result = 0, status = 0;
1470 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
1471 __le16 psm = req->psm;
1473 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
1475 /* Check if we have socket listening on psm */
1476 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
1478 result = L2CAP_CR_BAD_PSM;
1482 result = L2CAP_CR_NO_MEM;
1484 /* Check for backlog size */
1485 if (sk_acceptq_is_full(parent)) {
1486 BT_DBG("backlog full %d", parent->sk_ack_backlog);
1490 sk = l2cap_sock_alloc(parent->sk_net, NULL, BTPROTO_L2CAP, GFP_ATOMIC);
1494 write_lock_bh(&list->lock);
1496 /* Check if we already have channel with that dcid */
1497 if (__l2cap_get_chan_by_dcid(list, scid)) {
1498 write_unlock_bh(&list->lock);
1499 sock_set_flag(sk, SOCK_ZAPPED);
1500 l2cap_sock_kill(sk);
1504 hci_conn_hold(conn->hcon);
1506 l2cap_sock_init(sk, parent);
1507 bacpy(&bt_sk(sk)->src, conn->src);
1508 bacpy(&bt_sk(sk)->dst, conn->dst);
1509 l2cap_pi(sk)->psm = psm;
1510 l2cap_pi(sk)->dcid = scid;
1512 __l2cap_chan_add(conn, sk, parent);
1513 dcid = l2cap_pi(sk)->scid;
1515 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1517 /* Service level security */
1518 result = L2CAP_CR_PEND;
1519 status = L2CAP_CS_AUTHEN_PEND;
1520 sk->sk_state = BT_CONNECT2;
1521 l2cap_pi(sk)->ident = cmd->ident;
1523 if ((l2cap_pi(sk)->link_mode & L2CAP_LM_ENCRYPT) ||
1524 (l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE)) {
1525 if (!hci_conn_encrypt(conn->hcon))
1527 } else if (l2cap_pi(sk)->link_mode & L2CAP_LM_AUTH) {
1528 if (!hci_conn_auth(conn->hcon))
1532 sk->sk_state = BT_CONFIG;
1533 result = status = 0;
1536 write_unlock_bh(&list->lock);
1539 bh_unlock_sock(parent);
1542 rsp.scid = cpu_to_le16(scid);
1543 rsp.dcid = cpu_to_le16(dcid);
1544 rsp.result = cpu_to_le16(result);
1545 rsp.status = cpu_to_le16(status);
1546 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1550 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1552 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
1553 u16 scid, dcid, result, status;
1557 scid = __le16_to_cpu(rsp->scid);
1558 dcid = __le16_to_cpu(rsp->dcid);
1559 result = __le16_to_cpu(rsp->result);
1560 status = __le16_to_cpu(rsp->status);
1562 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
1565 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1568 if (!(sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident)))
1573 case L2CAP_CR_SUCCESS:
1574 sk->sk_state = BT_CONFIG;
1575 l2cap_pi(sk)->ident = 0;
1576 l2cap_pi(sk)->dcid = dcid;
1577 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
1579 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1580 l2cap_build_conf_req(sk, req), req);
1587 l2cap_chan_del(sk, ECONNREFUSED);
1595 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
1597 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
1603 dcid = __le16_to_cpu(req->dcid);
1604 flags = __le16_to_cpu(req->flags);
1606 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
1608 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid)))
1611 if (sk->sk_state == BT_DISCONN)
1614 /* Reject if config buffer is too small. */
1615 len = cmd_len - sizeof(*req);
1616 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
1617 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
1618 l2cap_build_conf_rsp(sk, rsp,
1619 L2CAP_CONF_REJECT, flags), rsp);
1624 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
1625 l2cap_pi(sk)->conf_len += len;
1627 if (flags & 0x0001) {
1628 /* Incomplete config. Send empty response. */
1629 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
1630 l2cap_build_conf_rsp(sk, rsp,
1631 L2CAP_CONF_SUCCESS, 0x0001), rsp);
1635 /* Complete config. */
1636 len = l2cap_parse_conf_req(sk, rsp);
1640 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
1642 /* Reset config buffer. */
1643 l2cap_pi(sk)->conf_len = 0;
1645 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
1648 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
1649 sk->sk_state = BT_CONNECTED;
1650 l2cap_chan_ready(sk);
1654 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
1656 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1657 l2cap_build_conf_req(sk, req), req);
1665 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1667 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
1668 u16 scid, flags, result;
1671 scid = __le16_to_cpu(rsp->scid);
1672 flags = __le16_to_cpu(rsp->flags);
1673 result = __le16_to_cpu(rsp->result);
1675 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x", scid, flags, result);
1677 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1681 case L2CAP_CONF_SUCCESS:
1684 case L2CAP_CONF_UNACCEPT:
1685 if (++l2cap_pi(sk)->conf_retry < L2CAP_CONF_MAX_RETRIES) {
1687 /* It does not make sense to adjust L2CAP parameters
1688 * that are currently defined in the spec. We simply
1689 * resend config request that we sent earlier. It is
1690 * stupid, but it helps qualification testing which
1691 * expects at least some response from us. */
1692 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1693 l2cap_build_conf_req(sk, req), req);
1698 sk->sk_state = BT_DISCONN;
1699 sk->sk_err = ECONNRESET;
1700 l2cap_sock_set_timer(sk, HZ * 5);
1702 struct l2cap_disconn_req req;
1703 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
1704 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
1705 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1706 L2CAP_DISCONN_REQ, sizeof(req), &req);
1714 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
1716 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
1717 sk->sk_state = BT_CONNECTED;
1718 l2cap_chan_ready(sk);
1726 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1728 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
1729 struct l2cap_disconn_rsp rsp;
1733 scid = __le16_to_cpu(req->scid);
1734 dcid = __le16_to_cpu(req->dcid);
1736 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
1738 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid)))
1741 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1742 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1743 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
1745 sk->sk_shutdown = SHUTDOWN_MASK;
1747 l2cap_chan_del(sk, ECONNRESET);
1750 l2cap_sock_kill(sk);
1754 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1756 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
1760 scid = __le16_to_cpu(rsp->scid);
1761 dcid = __le16_to_cpu(rsp->dcid);
1763 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
1765 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1768 l2cap_chan_del(sk, 0);
1771 l2cap_sock_kill(sk);
1775 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1777 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
1778 struct l2cap_info_rsp rsp;
1781 type = __le16_to_cpu(req->type);
1783 BT_DBG("type 0x%4.4x", type);
1785 rsp.type = cpu_to_le16(type);
1786 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
1787 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp), &rsp);
1792 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1794 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
1797 type = __le16_to_cpu(rsp->type);
1798 result = __le16_to_cpu(rsp->result);
1800 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
1802 conn->info_ident = 0;
1804 del_timer(&conn->info_timer);
1806 if (type == L2CAP_IT_FEAT_MASK)
1807 conn->feat_mask = __le32_to_cpu(get_unaligned((__le32 *) rsp->data));
1809 l2cap_conn_start(conn);
1814 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
1816 u8 *data = skb->data;
1818 struct l2cap_cmd_hdr cmd;
1821 l2cap_raw_recv(conn, skb);
1823 while (len >= L2CAP_CMD_HDR_SIZE) {
1825 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
1826 data += L2CAP_CMD_HDR_SIZE;
1827 len -= L2CAP_CMD_HDR_SIZE;
1829 cmd_len = le16_to_cpu(cmd.len);
1831 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
1833 if (cmd_len > len || !cmd.ident) {
1834 BT_DBG("corrupted command");
1839 case L2CAP_COMMAND_REJ:
1840 l2cap_command_rej(conn, &cmd, data);
1843 case L2CAP_CONN_REQ:
1844 err = l2cap_connect_req(conn, &cmd, data);
1847 case L2CAP_CONN_RSP:
1848 err = l2cap_connect_rsp(conn, &cmd, data);
1851 case L2CAP_CONF_REQ:
1852 err = l2cap_config_req(conn, &cmd, cmd_len, data);
1855 case L2CAP_CONF_RSP:
1856 err = l2cap_config_rsp(conn, &cmd, data);
1859 case L2CAP_DISCONN_REQ:
1860 err = l2cap_disconnect_req(conn, &cmd, data);
1863 case L2CAP_DISCONN_RSP:
1864 err = l2cap_disconnect_rsp(conn, &cmd, data);
1867 case L2CAP_ECHO_REQ:
1868 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
1871 case L2CAP_ECHO_RSP:
1874 case L2CAP_INFO_REQ:
1875 err = l2cap_information_req(conn, &cmd, data);
1878 case L2CAP_INFO_RSP:
1879 err = l2cap_information_rsp(conn, &cmd, data);
1883 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
1889 struct l2cap_cmd_rej rej;
1890 BT_DBG("error %d", err);
1892 /* FIXME: Map err to a valid reason */
1893 rej.reason = cpu_to_le16(0);
1894 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
1904 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
1908 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
1910 BT_DBG("unknown cid 0x%4.4x", cid);
1914 BT_DBG("sk %p, len %d", sk, skb->len);
1916 if (sk->sk_state != BT_CONNECTED)
1919 if (l2cap_pi(sk)->imtu < skb->len)
1922 /* If socket recv buffers overflows we drop data here
1923 * which is *bad* because L2CAP has to be reliable.
1924 * But we don't have any other choice. L2CAP doesn't
1925 * provide flow control mechanism. */
1927 if (!sock_queue_rcv_skb(sk, skb))
1940 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
1944 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
1948 BT_DBG("sk %p, len %d", sk, skb->len);
1950 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
1953 if (l2cap_pi(sk)->imtu < skb->len)
1956 if (!sock_queue_rcv_skb(sk, skb))
1963 if (sk) bh_unlock_sock(sk);
1967 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
1969 struct l2cap_hdr *lh = (void *) skb->data;
1973 skb_pull(skb, L2CAP_HDR_SIZE);
1974 cid = __le16_to_cpu(lh->cid);
1975 len = __le16_to_cpu(lh->len);
1977 BT_DBG("len %d, cid 0x%4.4x", len, cid);
1981 l2cap_sig_channel(conn, skb);
1985 psm = get_unaligned((__le16 *) skb->data);
1987 l2cap_conless_channel(conn, psm, skb);
1991 l2cap_data_channel(conn, cid, skb);
1996 /* ---- L2CAP interface with lower layer (HCI) ---- */
1998 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2000 int exact = 0, lm1 = 0, lm2 = 0;
2001 register struct sock *sk;
2002 struct hlist_node *node;
2004 if (type != ACL_LINK)
2007 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
2009 /* Find listening sockets and check their link_mode */
2010 read_lock(&l2cap_sk_list.lock);
2011 sk_for_each(sk, node, &l2cap_sk_list.head) {
2012 if (sk->sk_state != BT_LISTEN)
2015 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
2016 lm1 |= (HCI_LM_ACCEPT | l2cap_pi(sk)->link_mode);
2018 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
2019 lm2 |= (HCI_LM_ACCEPT | l2cap_pi(sk)->link_mode);
2021 read_unlock(&l2cap_sk_list.lock);
2023 return exact ? lm1 : lm2;
2026 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
2028 struct l2cap_conn *conn;
2030 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
2032 if (hcon->type != ACL_LINK)
2036 conn = l2cap_conn_add(hcon, status);
2038 l2cap_conn_ready(conn);
2040 l2cap_conn_del(hcon, bt_err(status));
2045 static int l2cap_disconn_ind(struct hci_conn *hcon, u8 reason)
2047 BT_DBG("hcon %p reason %d", hcon, reason);
2049 if (hcon->type != ACL_LINK)
2052 l2cap_conn_del(hcon, bt_err(reason));
2057 static int l2cap_auth_cfm(struct hci_conn *hcon, u8 status)
2059 struct l2cap_chan_list *l;
2060 struct l2cap_conn *conn = conn = hcon->l2cap_data;
2061 struct l2cap_conn_rsp rsp;
2068 l = &conn->chan_list;
2070 BT_DBG("conn %p", conn);
2072 read_lock(&l->lock);
2074 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2077 if (sk->sk_state != BT_CONNECT2 ||
2078 (l2cap_pi(sk)->link_mode & L2CAP_LM_ENCRYPT) ||
2079 (l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE)) {
2085 sk->sk_state = BT_CONFIG;
2088 sk->sk_state = BT_DISCONN;
2089 l2cap_sock_set_timer(sk, HZ/10);
2090 result = L2CAP_CR_SEC_BLOCK;
2093 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2094 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2095 rsp.result = cpu_to_le16(result);
2096 rsp.status = cpu_to_le16(0);
2097 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2098 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2103 read_unlock(&l->lock);
2107 static int l2cap_encrypt_cfm(struct hci_conn *hcon, u8 status)
2109 struct l2cap_chan_list *l;
2110 struct l2cap_conn *conn = hcon->l2cap_data;
2111 struct l2cap_conn_rsp rsp;
2118 l = &conn->chan_list;
2120 BT_DBG("conn %p", conn);
2122 read_lock(&l->lock);
2124 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2127 if (sk->sk_state != BT_CONNECT2) {
2133 sk->sk_state = BT_CONFIG;
2136 sk->sk_state = BT_DISCONN;
2137 l2cap_sock_set_timer(sk, HZ/10);
2138 result = L2CAP_CR_SEC_BLOCK;
2141 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2142 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2143 rsp.result = cpu_to_le16(result);
2144 rsp.status = cpu_to_le16(0);
2145 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2146 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2148 if (l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE)
2149 hci_conn_change_link_key(hcon);
2154 read_unlock(&l->lock);
2158 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
2160 struct l2cap_conn *conn = hcon->l2cap_data;
2162 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
2165 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
2167 if (flags & ACL_START) {
2168 struct l2cap_hdr *hdr;
2172 BT_ERR("Unexpected start frame (len %d)", skb->len);
2173 kfree_skb(conn->rx_skb);
2174 conn->rx_skb = NULL;
2176 l2cap_conn_unreliable(conn, ECOMM);
2180 BT_ERR("Frame is too short (len %d)", skb->len);
2181 l2cap_conn_unreliable(conn, ECOMM);
2185 hdr = (struct l2cap_hdr *) skb->data;
2186 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
2188 if (len == skb->len) {
2189 /* Complete frame received */
2190 l2cap_recv_frame(conn, skb);
2194 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
2196 if (skb->len > len) {
2197 BT_ERR("Frame is too long (len %d, expected len %d)",
2199 l2cap_conn_unreliable(conn, ECOMM);
2203 /* Allocate skb for the complete frame (with header) */
2204 if (!(conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC)))
2207 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
2209 conn->rx_len = len - skb->len;
2211 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
2213 if (!conn->rx_len) {
2214 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
2215 l2cap_conn_unreliable(conn, ECOMM);
2219 if (skb->len > conn->rx_len) {
2220 BT_ERR("Fragment is too long (len %d, expected %d)",
2221 skb->len, conn->rx_len);
2222 kfree_skb(conn->rx_skb);
2223 conn->rx_skb = NULL;
2225 l2cap_conn_unreliable(conn, ECOMM);
2229 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
2231 conn->rx_len -= skb->len;
2233 if (!conn->rx_len) {
2234 /* Complete frame received */
2235 l2cap_recv_frame(conn, conn->rx_skb);
2236 conn->rx_skb = NULL;
2245 static ssize_t l2cap_sysfs_show(struct class *dev, char *buf)
2248 struct hlist_node *node;
2251 read_lock_bh(&l2cap_sk_list.lock);
2253 sk_for_each(sk, node, &l2cap_sk_list.head) {
2254 struct l2cap_pinfo *pi = l2cap_pi(sk);
2256 str += sprintf(str, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d 0x%x\n",
2257 batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
2258 sk->sk_state, btohs(pi->psm), pi->scid, pi->dcid,
2259 pi->imtu, pi->omtu, pi->link_mode);
2262 read_unlock_bh(&l2cap_sk_list.lock);
2267 static CLASS_ATTR(l2cap, S_IRUGO, l2cap_sysfs_show, NULL);
2269 static const struct proto_ops l2cap_sock_ops = {
2270 .family = PF_BLUETOOTH,
2271 .owner = THIS_MODULE,
2272 .release = l2cap_sock_release,
2273 .bind = l2cap_sock_bind,
2274 .connect = l2cap_sock_connect,
2275 .listen = l2cap_sock_listen,
2276 .accept = l2cap_sock_accept,
2277 .getname = l2cap_sock_getname,
2278 .sendmsg = l2cap_sock_sendmsg,
2279 .recvmsg = bt_sock_recvmsg,
2280 .poll = bt_sock_poll,
2281 .mmap = sock_no_mmap,
2282 .socketpair = sock_no_socketpair,
2283 .ioctl = sock_no_ioctl,
2284 .shutdown = l2cap_sock_shutdown,
2285 .setsockopt = l2cap_sock_setsockopt,
2286 .getsockopt = l2cap_sock_getsockopt
2289 static struct net_proto_family l2cap_sock_family_ops = {
2290 .family = PF_BLUETOOTH,
2291 .owner = THIS_MODULE,
2292 .create = l2cap_sock_create,
2295 static struct hci_proto l2cap_hci_proto = {
2297 .id = HCI_PROTO_L2CAP,
2298 .connect_ind = l2cap_connect_ind,
2299 .connect_cfm = l2cap_connect_cfm,
2300 .disconn_ind = l2cap_disconn_ind,
2301 .auth_cfm = l2cap_auth_cfm,
2302 .encrypt_cfm = l2cap_encrypt_cfm,
2303 .recv_acldata = l2cap_recv_acldata
2306 static int __init l2cap_init(void)
2310 err = proto_register(&l2cap_proto, 0);
2314 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
2316 BT_ERR("L2CAP socket registration failed");
2320 err = hci_register_proto(&l2cap_hci_proto);
2322 BT_ERR("L2CAP protocol registration failed");
2323 bt_sock_unregister(BTPROTO_L2CAP);
2327 if (class_create_file(bt_class, &class_attr_l2cap) < 0)
2328 BT_ERR("Failed to create L2CAP info file");
2330 BT_INFO("L2CAP ver %s", VERSION);
2331 BT_INFO("L2CAP socket layer initialized");
2336 proto_unregister(&l2cap_proto);
2340 static void __exit l2cap_exit(void)
2342 class_remove_file(bt_class, &class_attr_l2cap);
2344 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
2345 BT_ERR("L2CAP socket unregistration failed");
2347 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
2348 BT_ERR("L2CAP protocol unregistration failed");
2350 proto_unregister(&l2cap_proto);
2353 void l2cap_load(void)
2355 /* Dummy function to trigger automatic L2CAP module loading by
2356 * other modules that use L2CAP sockets but don't use any other
2357 * symbols from it. */
2360 EXPORT_SYMBOL(l2cap_load);
2362 module_init(l2cap_init);
2363 module_exit(l2cap_exit);
2365 MODULE_AUTHOR("Maxim Krasnyansky <maxk@qualcomm.com>, Marcel Holtmann <marcel@holtmann.org>");
2366 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
2367 MODULE_VERSION(VERSION);
2368 MODULE_LICENSE("GPL");
2369 MODULE_ALIAS("bt-proto-0");