2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI core. */
27 #include <linux/jiffies.h>
28 #include <linux/module.h>
29 #include <linux/kmod.h>
31 #include <linux/types.h>
32 #include <linux/errno.h>
33 #include <linux/kernel.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/poll.h>
37 #include <linux/fcntl.h>
38 #include <linux/init.h>
39 #include <linux/skbuff.h>
40 #include <linux/interrupt.h>
41 #include <linux/notifier.h>
42 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31))
43 #include <linux/rfkill.h>
45 #include <linux/rfkill_backport.h>
50 #include <asm/system.h>
51 #include <asm/uaccess.h>
52 #include <asm/unaligned.h>
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
57 static void hci_cmd_task(unsigned long arg);
58 static void hci_rx_task(unsigned long arg);
59 static void hci_tx_task(unsigned long arg);
60 static void hci_notify(struct hci_dev *hdev, int event);
62 static DEFINE_RWLOCK(hci_task_lock);
65 LIST_HEAD(hci_dev_list);
66 DEFINE_RWLOCK(hci_dev_list_lock);
68 /* HCI callback list */
69 LIST_HEAD(hci_cb_list);
70 DEFINE_RWLOCK(hci_cb_list_lock);
73 #define HCI_MAX_PROTO 2
74 struct hci_proto *hci_proto[HCI_MAX_PROTO];
76 /* HCI notifiers list */
77 static ATOMIC_NOTIFIER_HEAD(hci_notifier);
79 /* ---- HCI notifications ---- */
81 int hci_register_notifier(struct notifier_block *nb)
83 return atomic_notifier_chain_register(&hci_notifier, nb);
86 int hci_unregister_notifier(struct notifier_block *nb)
88 return atomic_notifier_chain_unregister(&hci_notifier, nb);
91 static void hci_notify(struct hci_dev *hdev, int event)
93 atomic_notifier_call_chain(&hci_notifier, event, hdev);
96 /* ---- HCI requests ---- */
98 void hci_req_complete(struct hci_dev *hdev, int result)
100 BT_DBG("%s result 0x%2.2x", hdev->name, result);
102 if (hdev->req_status == HCI_REQ_PEND) {
103 hdev->req_result = result;
104 hdev->req_status = HCI_REQ_DONE;
105 wake_up_interruptible(&hdev->req_wait_q);
109 static void hci_req_cancel(struct hci_dev *hdev, int err)
111 BT_DBG("%s err 0x%2.2x", hdev->name, err);
113 if (hdev->req_status == HCI_REQ_PEND) {
114 hdev->req_result = err;
115 hdev->req_status = HCI_REQ_CANCELED;
116 wake_up_interruptible(&hdev->req_wait_q);
120 /* Execute request and wait for completion. */
121 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
122 unsigned long opt, __u32 timeout)
124 DECLARE_WAITQUEUE(wait, current);
127 BT_DBG("%s start", hdev->name);
129 hdev->req_status = HCI_REQ_PEND;
131 add_wait_queue(&hdev->req_wait_q, &wait);
132 set_current_state(TASK_INTERRUPTIBLE);
135 schedule_timeout(timeout);
137 remove_wait_queue(&hdev->req_wait_q, &wait);
139 if (signal_pending(current))
142 switch (hdev->req_status) {
144 err = -bt_err(hdev->req_result);
147 case HCI_REQ_CANCELED:
148 err = -hdev->req_result;
156 hdev->req_status = hdev->req_result = 0;
158 BT_DBG("%s end: err %d", hdev->name, err);
163 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
164 unsigned long opt, __u32 timeout)
168 if (!test_bit(HCI_UP, &hdev->flags))
171 /* Serialize all requests */
173 ret = __hci_request(hdev, req, opt, timeout);
174 hci_req_unlock(hdev);
179 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
181 BT_DBG("%s %ld", hdev->name, opt);
184 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
187 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
193 BT_DBG("%s %ld", hdev->name, opt);
195 /* Driver initialization */
197 /* Special commands */
198 while ((skb = skb_dequeue(&hdev->driver_init))) {
199 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
200 skb->dev = (void *) hdev;
202 skb_queue_tail(&hdev->cmd_q, skb);
203 tasklet_schedule(&hdev->cmd_task);
205 skb_queue_purge(&hdev->driver_init);
207 /* Mandatory initialization */
210 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks))
211 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
213 /* Read Local Supported Features */
214 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
216 /* Read Local Version */
217 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
219 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
220 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
223 /* Host buffer size */
225 struct hci_cp_host_buffer_size cp;
226 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
227 cp.sco_mtu = HCI_MAX_SCO_SIZE;
228 cp.acl_max_pkt = cpu_to_le16(0xffff);
229 cp.sco_max_pkt = cpu_to_le16(0xffff);
230 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
234 /* Read BD Address */
235 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
237 /* Read Class of Device */
238 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
240 /* Read Local Name */
241 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
243 /* Read Voice Setting */
244 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
246 /* Optional initialization */
248 /* Clear Event Filters */
249 flt_type = HCI_FLT_CLEAR_ALL;
250 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
252 /* Page timeout ~20 secs */
253 param = cpu_to_le16(0x8000);
254 hci_send_cmd(hdev, HCI_OP_WRITE_PG_TIMEOUT, 2, ¶m);
256 /* Connection accept timeout ~20 secs */
257 param = cpu_to_le16(0x7d00);
258 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
261 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
265 BT_DBG("%s %x", hdev->name, scan);
267 /* Inquiry and Page scans */
268 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
271 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
275 BT_DBG("%s %x", hdev->name, auth);
278 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
281 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
285 BT_DBG("%s %x", hdev->name, encrypt);
288 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
291 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
293 __le16 policy = cpu_to_le16(opt);
295 BT_DBG("%s %x", hdev->name, policy);
297 /* Default link policy */
298 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
301 /* Get HCI device by index.
302 * Device is held on return. */
303 struct hci_dev *hci_dev_get(int index)
305 struct hci_dev *hdev = NULL;
313 read_lock(&hci_dev_list_lock);
314 list_for_each(p, &hci_dev_list) {
315 struct hci_dev *d = list_entry(p, struct hci_dev, list);
316 if (d->id == index) {
317 hdev = hci_dev_hold(d);
321 read_unlock(&hci_dev_list_lock);
325 /* ---- Inquiry support ---- */
326 static void inquiry_cache_flush(struct hci_dev *hdev)
328 struct inquiry_cache *cache = &hdev->inq_cache;
329 struct inquiry_entry *next = cache->list, *e;
331 BT_DBG("cache %p", cache);
340 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
342 struct inquiry_cache *cache = &hdev->inq_cache;
343 struct inquiry_entry *e;
345 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
347 for (e = cache->list; e; e = e->next)
348 if (!bacmp(&e->data.bdaddr, bdaddr))
353 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
355 struct inquiry_cache *cache = &hdev->inq_cache;
356 struct inquiry_entry *e;
358 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
360 if (!(e = hci_inquiry_cache_lookup(hdev, &data->bdaddr))) {
361 /* Entry not in the cache. Add new one. */
362 if (!(e = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC)))
364 e->next = cache->list;
368 memcpy(&e->data, data, sizeof(*data));
369 e->timestamp = jiffies;
370 cache->timestamp = jiffies;
373 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
375 struct inquiry_cache *cache = &hdev->inq_cache;
376 struct inquiry_info *info = (struct inquiry_info *) buf;
377 struct inquiry_entry *e;
380 for (e = cache->list; e && copied < num; e = e->next, copied++) {
381 struct inquiry_data *data = &e->data;
382 bacpy(&info->bdaddr, &data->bdaddr);
383 info->pscan_rep_mode = data->pscan_rep_mode;
384 info->pscan_period_mode = data->pscan_period_mode;
385 info->pscan_mode = data->pscan_mode;
386 memcpy(info->dev_class, data->dev_class, 3);
387 info->clock_offset = data->clock_offset;
391 BT_DBG("cache %p, copied %d", cache, copied);
395 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
397 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
398 struct hci_cp_inquiry cp;
400 BT_DBG("%s", hdev->name);
402 if (test_bit(HCI_INQUIRY, &hdev->flags))
406 memcpy(&cp.lap, &ir->lap, 3);
407 cp.length = ir->length;
408 cp.num_rsp = ir->num_rsp;
409 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
412 int hci_inquiry(void __user *arg)
414 __u8 __user *ptr = arg;
415 struct hci_inquiry_req ir;
416 struct hci_dev *hdev;
417 int err = 0, do_inquiry = 0, max_rsp;
421 if (copy_from_user(&ir, ptr, sizeof(ir)))
424 if (!(hdev = hci_dev_get(ir.dev_id)))
427 hci_dev_lock_bh(hdev);
428 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
429 inquiry_cache_empty(hdev) ||
430 ir.flags & IREQ_CACHE_FLUSH) {
431 inquiry_cache_flush(hdev);
434 hci_dev_unlock_bh(hdev);
436 timeo = ir.length * msecs_to_jiffies(2000);
437 if (do_inquiry && (err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo)) < 0)
440 /* for unlimited number of responses we will use buffer with 255 entries */
441 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
443 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
444 * copy it to the user space.
446 if (!(buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL))) {
451 hci_dev_lock_bh(hdev);
452 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
453 hci_dev_unlock_bh(hdev);
455 BT_DBG("num_rsp %d", ir.num_rsp);
457 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
459 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
472 /* ---- HCI ioctl helpers ---- */
474 int hci_dev_open(__u16 dev)
476 struct hci_dev *hdev;
479 if (!(hdev = hci_dev_get(dev)))
482 BT_DBG("%s %p", hdev->name, hdev);
486 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
491 if (test_bit(HCI_UP, &hdev->flags)) {
496 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
497 set_bit(HCI_RAW, &hdev->flags);
499 /* Treat all non BR/EDR controllers as raw devices for now */
500 if (hdev->dev_type != HCI_BREDR)
501 set_bit(HCI_RAW, &hdev->flags);
503 if (hdev->open(hdev)) {
508 if (!test_bit(HCI_RAW, &hdev->flags)) {
509 atomic_set(&hdev->cmd_cnt, 1);
510 set_bit(HCI_INIT, &hdev->flags);
512 //__hci_request(hdev, hci_reset_req, 0, HZ);
513 ret = __hci_request(hdev, hci_init_req, 0,
514 msecs_to_jiffies(HCI_INIT_TIMEOUT));
516 clear_bit(HCI_INIT, &hdev->flags);
521 set_bit(HCI_UP, &hdev->flags);
522 hci_notify(hdev, HCI_DEV_UP);
524 /* Init failed, cleanup */
525 tasklet_kill(&hdev->rx_task);
526 tasklet_kill(&hdev->tx_task);
527 tasklet_kill(&hdev->cmd_task);
529 skb_queue_purge(&hdev->cmd_q);
530 skb_queue_purge(&hdev->rx_q);
535 if (hdev->sent_cmd) {
536 kfree_skb(hdev->sent_cmd);
537 hdev->sent_cmd = NULL;
545 hci_req_unlock(hdev);
550 static int hci_dev_do_close(struct hci_dev *hdev)
552 BT_DBG("%s %p", hdev->name, hdev);
554 hci_req_cancel(hdev, ENODEV);
557 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
558 hci_req_unlock(hdev);
562 /* Kill RX and TX tasks */
563 tasklet_kill(&hdev->rx_task);
564 tasklet_kill(&hdev->tx_task);
566 hci_dev_lock_bh(hdev);
567 inquiry_cache_flush(hdev);
568 hci_conn_hash_flush(hdev);
569 hci_dev_unlock_bh(hdev);
571 hci_notify(hdev, HCI_DEV_DOWN);
577 skb_queue_purge(&hdev->cmd_q);
578 atomic_set(&hdev->cmd_cnt, 1);
579 if (!test_bit(HCI_RAW, &hdev->flags)) {
580 set_bit(HCI_INIT, &hdev->flags);
581 __hci_request(hdev, hci_reset_req, 0,
582 msecs_to_jiffies(250));
583 clear_bit(HCI_INIT, &hdev->flags);
587 tasklet_kill(&hdev->cmd_task);
590 skb_queue_purge(&hdev->rx_q);
591 skb_queue_purge(&hdev->cmd_q);
592 skb_queue_purge(&hdev->raw_q);
594 /* Drop last sent command */
595 if (hdev->sent_cmd) {
596 kfree_skb(hdev->sent_cmd);
597 hdev->sent_cmd = NULL;
600 /* After this point our queues are empty
601 * and no tasks are scheduled. */
607 hci_req_unlock(hdev);
613 int hci_dev_close(__u16 dev)
615 struct hci_dev *hdev;
618 if (!(hdev = hci_dev_get(dev)))
620 err = hci_dev_do_close(hdev);
625 int hci_dev_reset(__u16 dev)
627 struct hci_dev *hdev;
630 if (!(hdev = hci_dev_get(dev)))
634 tasklet_disable(&hdev->tx_task);
636 if (!test_bit(HCI_UP, &hdev->flags))
640 skb_queue_purge(&hdev->rx_q);
641 skb_queue_purge(&hdev->cmd_q);
643 hci_dev_lock_bh(hdev);
644 inquiry_cache_flush(hdev);
645 hci_conn_hash_flush(hdev);
646 hci_dev_unlock_bh(hdev);
651 atomic_set(&hdev->cmd_cnt, 1);
652 hdev->acl_cnt = 0; hdev->sco_cnt = 0;
654 if (!test_bit(HCI_RAW, &hdev->flags))
655 ret = __hci_request(hdev, hci_reset_req, 0,
656 msecs_to_jiffies(HCI_INIT_TIMEOUT));
659 tasklet_enable(&hdev->tx_task);
660 hci_req_unlock(hdev);
665 int hci_dev_reset_stat(__u16 dev)
667 struct hci_dev *hdev;
670 if (!(hdev = hci_dev_get(dev)))
673 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
680 int hci_dev_cmd(unsigned int cmd, void __user *arg)
682 struct hci_dev *hdev;
683 struct hci_dev_req dr;
686 if (copy_from_user(&dr, arg, sizeof(dr)))
689 if (!(hdev = hci_dev_get(dr.dev_id)))
694 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
695 msecs_to_jiffies(HCI_INIT_TIMEOUT));
699 if (!lmp_encrypt_capable(hdev)) {
704 if (!test_bit(HCI_AUTH, &hdev->flags)) {
705 /* Auth must be enabled first */
706 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
707 msecs_to_jiffies(HCI_INIT_TIMEOUT));
712 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
713 msecs_to_jiffies(HCI_INIT_TIMEOUT));
717 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
718 msecs_to_jiffies(HCI_INIT_TIMEOUT));
722 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
723 msecs_to_jiffies(HCI_INIT_TIMEOUT));
727 hdev->link_mode = ((__u16) dr.dev_opt) &
728 (HCI_LM_MASTER | HCI_LM_ACCEPT);
732 hdev->pkt_type = (__u16) dr.dev_opt;
736 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
737 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
741 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
742 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
754 int hci_get_dev_list(void __user *arg)
756 struct hci_dev_list_req *dl;
757 struct hci_dev_req *dr;
759 int n = 0, size, err;
762 if (get_user(dev_num, (__u16 __user *) arg))
765 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
768 size = sizeof(*dl) + dev_num * sizeof(*dr);
770 if (!(dl = kzalloc(size, GFP_KERNEL)))
775 read_lock_bh(&hci_dev_list_lock);
776 list_for_each(p, &hci_dev_list) {
777 struct hci_dev *hdev;
778 hdev = list_entry(p, struct hci_dev, list);
779 (dr + n)->dev_id = hdev->id;
780 (dr + n)->dev_opt = hdev->flags;
784 read_unlock_bh(&hci_dev_list_lock);
787 size = sizeof(*dl) + n * sizeof(*dr);
789 err = copy_to_user(arg, dl, size);
792 return err ? -EFAULT : 0;
795 int hci_get_dev_info(void __user *arg)
797 struct hci_dev *hdev;
798 struct hci_dev_info di;
801 if (copy_from_user(&di, arg, sizeof(di)))
804 if (!(hdev = hci_dev_get(di.dev_id)))
807 strcpy(di.name, hdev->name);
808 di.bdaddr = hdev->bdaddr;
809 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
810 di.flags = hdev->flags;
811 di.pkt_type = hdev->pkt_type;
812 di.acl_mtu = hdev->acl_mtu;
813 di.acl_pkts = hdev->acl_pkts;
814 di.sco_mtu = hdev->sco_mtu;
815 di.sco_pkts = hdev->sco_pkts;
816 di.link_policy = hdev->link_policy;
817 di.link_mode = hdev->link_mode;
819 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
820 memcpy(&di.features, &hdev->features, sizeof(di.features));
822 if (copy_to_user(arg, &di, sizeof(di)))
830 /* ---- Interface to HCI drivers ---- */
832 static int hci_rfkill_set_block(void *data, bool blocked)
834 struct hci_dev *hdev = data;
836 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
841 hci_dev_do_close(hdev);
846 static const struct rfkill_ops hci_rfkill_ops = {
847 .set_block = hci_rfkill_set_block,
850 /* Alloc HCI device */
851 struct hci_dev *hci_alloc_dev(void)
853 struct hci_dev *hdev;
855 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
859 skb_queue_head_init(&hdev->driver_init);
863 EXPORT_SYMBOL(hci_alloc_dev);
865 /* Free HCI device */
866 void hci_free_dev(struct hci_dev *hdev)
868 skb_queue_purge(&hdev->driver_init);
870 /* will free via device release */
871 put_device(&hdev->dev);
873 EXPORT_SYMBOL(hci_free_dev);
875 /* Register HCI device */
876 int hci_register_dev(struct hci_dev *hdev)
878 struct list_head *head = &hci_dev_list, *p;
881 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
882 hdev->bus, hdev->owner);
884 if (!hdev->open || !hdev->close || !hdev->destruct)
887 write_lock_bh(&hci_dev_list_lock);
889 /* Find first available device id */
890 list_for_each(p, &hci_dev_list) {
891 if (list_entry(p, struct hci_dev, list)->id != id)
896 sprintf(hdev->name, "hci%d", id);
898 list_add(&hdev->list, head);
900 atomic_set(&hdev->refcnt, 1);
901 spin_lock_init(&hdev->lock);
904 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
905 hdev->esco_type = (ESCO_HV1);
906 hdev->link_mode = (HCI_LM_ACCEPT);
908 hdev->idle_timeout = 0;
909 hdev->sniff_max_interval = 800;
910 hdev->sniff_min_interval = 80;
912 tasklet_init(&hdev->cmd_task, hci_cmd_task,(unsigned long) hdev);
913 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
914 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
916 skb_queue_head_init(&hdev->rx_q);
917 skb_queue_head_init(&hdev->cmd_q);
918 skb_queue_head_init(&hdev->raw_q);
920 for (i = 0; i < 3; i++)
921 hdev->reassembly[i] = NULL;
923 init_waitqueue_head(&hdev->req_wait_q);
924 mutex_init(&hdev->req_lock);
926 inquiry_cache_init(hdev);
928 hci_conn_hash_init(hdev);
930 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
932 atomic_set(&hdev->promisc, 0);
934 write_unlock_bh(&hci_dev_list_lock);
936 hci_register_sysfs(hdev);
938 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
939 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
941 if (rfkill_register(hdev->rfkill) < 0) {
942 rfkill_destroy(hdev->rfkill);
947 hci_notify(hdev, HCI_DEV_REG);
951 EXPORT_SYMBOL(hci_register_dev);
953 /* Unregister HCI device */
954 int hci_unregister_dev(struct hci_dev *hdev)
958 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
960 write_lock_bh(&hci_dev_list_lock);
961 list_del(&hdev->list);
962 write_unlock_bh(&hci_dev_list_lock);
964 hci_dev_do_close(hdev);
966 for (i = 0; i < 3; i++)
967 kfree_skb(hdev->reassembly[i]);
969 hci_notify(hdev, HCI_DEV_UNREG);
972 rfkill_unregister(hdev->rfkill);
973 rfkill_destroy(hdev->rfkill);
976 hci_unregister_sysfs(hdev);
982 EXPORT_SYMBOL(hci_unregister_dev);
984 /* Suspend HCI device */
985 int hci_suspend_dev(struct hci_dev *hdev)
987 hci_notify(hdev, HCI_DEV_SUSPEND);
990 EXPORT_SYMBOL(hci_suspend_dev);
992 /* Resume HCI device */
993 int hci_resume_dev(struct hci_dev *hdev)
995 hci_notify(hdev, HCI_DEV_RESUME);
998 EXPORT_SYMBOL(hci_resume_dev);
1000 /* Receive frame from HCI drivers */
1001 int hci_recv_frame(struct sk_buff *skb)
1003 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1004 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1005 && !test_bit(HCI_INIT, &hdev->flags))) {
1011 bt_cb(skb)->incoming = 1;
1014 __net_timestamp(skb);
1016 /* Queue frame for rx task */
1017 skb_queue_tail(&hdev->rx_q, skb);
1018 tasklet_schedule(&hdev->rx_task);
1022 EXPORT_SYMBOL(hci_recv_frame);
1024 /* Receive packet type fragment */
1025 #define __reassembly(hdev, type) ((hdev)->reassembly[(type) - 2])
1027 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1029 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1033 struct sk_buff *skb = __reassembly(hdev, type);
1034 struct { int expect; } *scb;
1038 /* Start of the frame */
1042 if (count >= HCI_EVENT_HDR_SIZE) {
1043 struct hci_event_hdr *h = data;
1044 len = HCI_EVENT_HDR_SIZE + h->plen;
1049 case HCI_ACLDATA_PKT:
1050 if (count >= HCI_ACL_HDR_SIZE) {
1051 struct hci_acl_hdr *h = data;
1052 len = HCI_ACL_HDR_SIZE + __le16_to_cpu(h->dlen);
1057 case HCI_SCODATA_PKT:
1058 if (count >= HCI_SCO_HDR_SIZE) {
1059 struct hci_sco_hdr *h = data;
1060 len = HCI_SCO_HDR_SIZE + h->dlen;
1066 skb = bt_skb_alloc(len, GFP_ATOMIC);
1068 BT_ERR("%s no memory for packet", hdev->name);
1072 skb->dev = (void *) hdev;
1073 bt_cb(skb)->pkt_type = type;
1075 __reassembly(hdev, type) = skb;
1077 scb = (void *) skb->cb;
1082 scb = (void *) skb->cb;
1086 len = min(len, count);
1088 memcpy(skb_put(skb, len), data, len);
1092 if (scb->expect == 0) {
1093 /* Complete frame */
1095 __reassembly(hdev, type) = NULL;
1097 bt_cb(skb)->pkt_type = type;
1098 hci_recv_frame(skb);
1101 count -= len; data += len;
1106 EXPORT_SYMBOL(hci_recv_fragment);
1108 /* ---- Interface to upper protocols ---- */
1110 /* Register/Unregister protocols.
1111 * hci_task_lock is used to ensure that no tasks are running. */
1112 int hci_register_proto(struct hci_proto *hp)
1116 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1118 if (hp->id >= HCI_MAX_PROTO)
1121 write_lock_bh(&hci_task_lock);
1123 if (!hci_proto[hp->id])
1124 hci_proto[hp->id] = hp;
1128 write_unlock_bh(&hci_task_lock);
1132 EXPORT_SYMBOL(hci_register_proto);
1134 int hci_unregister_proto(struct hci_proto *hp)
1138 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1140 if (hp->id >= HCI_MAX_PROTO)
1143 write_lock_bh(&hci_task_lock);
1145 if (hci_proto[hp->id])
1146 hci_proto[hp->id] = NULL;
1150 write_unlock_bh(&hci_task_lock);
1154 EXPORT_SYMBOL(hci_unregister_proto);
1156 int hci_register_cb(struct hci_cb *cb)
1158 BT_DBG("%p name %s", cb, cb->name);
1160 write_lock_bh(&hci_cb_list_lock);
1161 list_add(&cb->list, &hci_cb_list);
1162 write_unlock_bh(&hci_cb_list_lock);
1166 EXPORT_SYMBOL(hci_register_cb);
1168 int hci_unregister_cb(struct hci_cb *cb)
1170 BT_DBG("%p name %s", cb, cb->name);
1172 write_lock_bh(&hci_cb_list_lock);
1173 list_del(&cb->list);
1174 write_unlock_bh(&hci_cb_list_lock);
1178 EXPORT_SYMBOL(hci_unregister_cb);
1180 static int hci_send_frame(struct sk_buff *skb)
1182 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1189 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1191 if (atomic_read(&hdev->promisc)) {
1193 __net_timestamp(skb);
1195 hci_send_to_sock(hdev, skb);
1198 /* Get rid of skb owner, prior to sending to the driver. */
1201 return hdev->send(skb);
1204 /* Send HCI command */
1205 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1207 int len = HCI_COMMAND_HDR_SIZE + plen;
1208 struct hci_command_hdr *hdr;
1209 struct sk_buff *skb;
1211 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1213 skb = bt_skb_alloc(len, GFP_ATOMIC);
1215 BT_ERR("%s no memory for command", hdev->name);
1219 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
1220 hdr->opcode = cpu_to_le16(opcode);
1224 memcpy(skb_put(skb, plen), param, plen);
1226 BT_DBG("skb len %d", skb->len);
1228 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1229 skb->dev = (void *) hdev;
1231 skb_queue_tail(&hdev->cmd_q, skb);
1232 tasklet_schedule(&hdev->cmd_task);
1237 /* Get data from the previously sent command */
1238 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1240 struct hci_command_hdr *hdr;
1242 if (!hdev->sent_cmd)
1245 hdr = (void *) hdev->sent_cmd->data;
1247 if (hdr->opcode != cpu_to_le16(opcode))
1250 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1252 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1256 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1258 struct hci_acl_hdr *hdr;
1261 skb_push(skb, HCI_ACL_HDR_SIZE);
1262 skb_reset_transport_header(skb);
1263 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
1264 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1265 hdr->dlen = cpu_to_le16(len);
1268 int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1270 struct hci_dev *hdev = conn->hdev;
1271 struct sk_buff *list;
1273 BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1275 skb->dev = (void *) hdev;
1276 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1277 hci_add_acl_hdr(skb, conn->handle, flags | ACL_START);
1279 if (!(list = skb_shinfo(skb)->frag_list)) {
1280 /* Non fragmented */
1281 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1283 skb_queue_tail(&conn->data_q, skb);
1286 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1288 skb_shinfo(skb)->frag_list = NULL;
1290 /* Queue all fragments atomically */
1291 spin_lock_bh(&conn->data_q.lock);
1293 __skb_queue_tail(&conn->data_q, skb);
1295 skb = list; list = list->next;
1297 skb->dev = (void *) hdev;
1298 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1299 hci_add_acl_hdr(skb, conn->handle, flags | ACL_CONT);
1301 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1303 __skb_queue_tail(&conn->data_q, skb);
1306 spin_unlock_bh(&conn->data_q.lock);
1309 tasklet_schedule(&hdev->tx_task);
1313 EXPORT_SYMBOL(hci_send_acl);
1316 int hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1318 struct hci_dev *hdev = conn->hdev;
1319 struct hci_sco_hdr hdr;
1321 BT_DBG("%s len %d", hdev->name, skb->len);
1323 if (skb->len > hdev->sco_mtu) {
1328 hdr.handle = cpu_to_le16(conn->handle);
1329 hdr.dlen = skb->len;
1331 skb_push(skb, HCI_SCO_HDR_SIZE);
1332 skb_reset_transport_header(skb);
1333 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1335 skb->dev = (void *) hdev;
1336 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
1338 skb_queue_tail(&conn->data_q, skb);
1339 tasklet_schedule(&hdev->tx_task);
1343 EXPORT_SYMBOL(hci_send_sco);
1345 /* ---- HCI TX task (outgoing data) ---- */
1347 /* HCI Connection scheduler */
1348 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
1350 struct hci_conn_hash *h = &hdev->conn_hash;
1351 struct hci_conn *conn = NULL;
1352 int num = 0, min = ~0;
1353 struct list_head *p;
1355 /* We don't have to lock device here. Connections are always
1356 * added and removed with TX task disabled. */
1357 list_for_each(p, &h->list) {
1359 c = list_entry(p, struct hci_conn, list);
1361 if (c->type != type || skb_queue_empty(&c->data_q))
1364 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
1369 if (c->sent < min) {
1376 int cnt = (type == ACL_LINK ? hdev->acl_cnt : hdev->sco_cnt);
1382 BT_DBG("conn %p quote %d", conn, *quote);
1386 static inline void hci_acl_tx_to(struct hci_dev *hdev)
1388 struct hci_conn_hash *h = &hdev->conn_hash;
1389 struct list_head *p;
1392 BT_ERR("%s ACL tx timeout", hdev->name);
1394 /* Kill stalled connections */
1395 list_for_each(p, &h->list) {
1396 c = list_entry(p, struct hci_conn, list);
1397 if (c->type == ACL_LINK && c->sent) {
1398 BT_ERR("%s killing stalled ACL connection %s",
1399 hdev->name, batostr(&c->dst));
1400 hci_acl_disconn(c, 0x13);
1405 static inline void hci_sched_acl(struct hci_dev *hdev)
1407 struct hci_conn *conn;
1408 struct sk_buff *skb;
1411 BT_DBG("%s", hdev->name);
1413 if (!test_bit(HCI_RAW, &hdev->flags)) {
1414 /* ACL tx timeout must be longer than maximum
1415 * link supervision timeout (40.9 seconds) */
1416 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
1417 hci_acl_tx_to(hdev);
1420 while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, "e))) {
1421 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1422 BT_DBG("skb %p len %d", skb, skb->len);
1424 hci_conn_enter_active_mode(conn);
1426 hci_send_frame(skb);
1427 hdev->acl_last_tx = jiffies;
1436 static inline void hci_sched_sco(struct hci_dev *hdev)
1438 struct hci_conn *conn;
1439 struct sk_buff *skb;
1442 BT_DBG("%s", hdev->name);
1444 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
1445 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1446 BT_DBG("skb %p len %d", skb, skb->len);
1447 hci_send_frame(skb);
1450 if (conn->sent == ~0)
1456 static inline void hci_sched_esco(struct hci_dev *hdev)
1458 struct hci_conn *conn;
1459 struct sk_buff *skb;
1462 BT_DBG("%s", hdev->name);
1464 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, "e))) {
1465 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1466 BT_DBG("skb %p len %d", skb, skb->len);
1467 hci_send_frame(skb);
1470 if (conn->sent == ~0)
1476 static void hci_tx_task(unsigned long arg)
1478 struct hci_dev *hdev = (struct hci_dev *) arg;
1479 struct sk_buff *skb;
1481 read_lock(&hci_task_lock);
1483 BT_DBG("%s acl %d sco %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt);
1485 /* Schedule queues and send stuff to HCI driver */
1487 hci_sched_acl(hdev);
1489 hci_sched_sco(hdev);
1491 hci_sched_esco(hdev);
1493 /* Send next queued raw (unknown type) packet */
1494 while ((skb = skb_dequeue(&hdev->raw_q)))
1495 hci_send_frame(skb);
1497 read_unlock(&hci_task_lock);
1500 /* ----- HCI RX task (incoming data proccessing) ----- */
1502 /* ACL data packet */
1503 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1505 struct hci_acl_hdr *hdr = (void *) skb->data;
1506 struct hci_conn *conn;
1507 __u16 handle, flags;
1509 skb_pull(skb, HCI_ACL_HDR_SIZE);
1511 handle = __le16_to_cpu(hdr->handle);
1512 flags = hci_flags(handle);
1513 handle = hci_handle(handle);
1515 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
1517 hdev->stat.acl_rx++;
1520 conn = hci_conn_hash_lookup_handle(hdev, handle);
1521 hci_dev_unlock(hdev);
1524 register struct hci_proto *hp;
1526 hci_conn_enter_active_mode(conn);
1528 /* Send to upper protocol */
1529 if ((hp = hci_proto[HCI_PROTO_L2CAP]) && hp->recv_acldata) {
1530 hp->recv_acldata(conn, skb, flags);
1534 BT_ERR("%s ACL packet for unknown connection handle %d",
1535 hdev->name, handle);
1541 /* SCO data packet */
1542 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1544 struct hci_sco_hdr *hdr = (void *) skb->data;
1545 struct hci_conn *conn;
1548 skb_pull(skb, HCI_SCO_HDR_SIZE);
1550 handle = __le16_to_cpu(hdr->handle);
1552 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
1554 hdev->stat.sco_rx++;
1557 conn = hci_conn_hash_lookup_handle(hdev, handle);
1558 hci_dev_unlock(hdev);
1561 register struct hci_proto *hp;
1563 /* Send to upper protocol */
1564 if ((hp = hci_proto[HCI_PROTO_SCO]) && hp->recv_scodata) {
1565 hp->recv_scodata(conn, skb);
1569 BT_ERR("%s SCO packet for unknown connection handle %d",
1570 hdev->name, handle);
1576 static void hci_rx_task(unsigned long arg)
1578 struct hci_dev *hdev = (struct hci_dev *) arg;
1579 struct sk_buff *skb;
1581 BT_DBG("%s", hdev->name);
1583 read_lock(&hci_task_lock);
1585 while ((skb = skb_dequeue(&hdev->rx_q))) {
1586 if (atomic_read(&hdev->promisc)) {
1587 /* Send copy to the sockets */
1588 hci_send_to_sock(hdev, skb);
1591 if (test_bit(HCI_RAW, &hdev->flags)) {
1596 if (test_bit(HCI_INIT, &hdev->flags)) {
1597 /* Don't process data packets in this states. */
1598 switch (bt_cb(skb)->pkt_type) {
1599 case HCI_ACLDATA_PKT:
1600 case HCI_SCODATA_PKT:
1607 switch (bt_cb(skb)->pkt_type) {
1609 hci_event_packet(hdev, skb);
1612 case HCI_ACLDATA_PKT:
1613 BT_DBG("%s ACL data packet", hdev->name);
1614 hci_acldata_packet(hdev, skb);
1617 case HCI_SCODATA_PKT:
1618 BT_DBG("%s SCO data packet", hdev->name);
1619 hci_scodata_packet(hdev, skb);
1628 read_unlock(&hci_task_lock);
1631 static void hci_cmd_task(unsigned long arg)
1633 struct hci_dev *hdev = (struct hci_dev *) arg;
1634 struct sk_buff *skb;
1636 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
1638 if (!atomic_read(&hdev->cmd_cnt) && time_after(jiffies, hdev->cmd_last_tx + HZ)) {
1639 BT_ERR("%s command tx timeout", hdev->name);
1640 atomic_set(&hdev->cmd_cnt, 1);
1643 /* Send queued commands */
1644 if (atomic_read(&hdev->cmd_cnt) && (skb = skb_dequeue(&hdev->cmd_q))) {
1645 kfree_skb(hdev->sent_cmd);
1647 if ((hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC))) {
1648 atomic_dec(&hdev->cmd_cnt);
1649 hci_send_frame(skb);
1650 hdev->cmd_last_tx = jiffies;
1652 skb_queue_head(&hdev->cmd_q, skb);
1653 tasklet_schedule(&hdev->cmd_task);