2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <asm/unaligned.h>
34 #include <net/bluetooth/bluetooth.h>
35 #include <net/bluetooth/hci_core.h>
37 static void hci_rx_work(struct work_struct *work);
38 static void hci_cmd_work(struct work_struct *work);
39 static void hci_tx_work(struct work_struct *work);
42 LIST_HEAD(hci_dev_list);
43 DEFINE_RWLOCK(hci_dev_list_lock);
45 /* HCI callback list */
46 LIST_HEAD(hci_cb_list);
47 DEFINE_RWLOCK(hci_cb_list_lock);
49 /* HCI ID Numbering */
50 static DEFINE_IDA(hci_index_ida);
52 /* ---- HCI notifications ---- */
54 static void hci_notify(struct hci_dev *hdev, int event)
56 hci_sock_dev_event(hdev, event);
59 /* ---- HCI debugfs entries ---- */
61 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
62 size_t count, loff_t *ppos)
64 struct hci_dev *hdev = file->private_data;
67 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dev_flags) ? 'Y': 'N';
70 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
73 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
74 size_t count, loff_t *ppos)
76 struct hci_dev *hdev = file->private_data;
79 size_t buf_size = min(count, (sizeof(buf)-1));
83 if (!test_bit(HCI_UP, &hdev->flags))
86 if (copy_from_user(buf, user_buf, buf_size))
90 if (strtobool(buf, &enable))
93 if (enable == test_bit(HCI_DUT_MODE, &hdev->dev_flags))
98 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
101 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
103 hci_req_unlock(hdev);
108 err = -bt_to_errno(skb->data[0]);
114 change_bit(HCI_DUT_MODE, &hdev->dev_flags);
119 static const struct file_operations dut_mode_fops = {
121 .read = dut_mode_read,
122 .write = dut_mode_write,
123 .llseek = default_llseek,
126 static int features_show(struct seq_file *f, void *ptr)
128 struct hci_dev *hdev = f->private;
132 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
133 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
134 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
135 hdev->features[p][0], hdev->features[p][1],
136 hdev->features[p][2], hdev->features[p][3],
137 hdev->features[p][4], hdev->features[p][5],
138 hdev->features[p][6], hdev->features[p][7]);
140 if (lmp_le_capable(hdev))
141 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
142 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
143 hdev->le_features[0], hdev->le_features[1],
144 hdev->le_features[2], hdev->le_features[3],
145 hdev->le_features[4], hdev->le_features[5],
146 hdev->le_features[6], hdev->le_features[7]);
147 hci_dev_unlock(hdev);
152 static int features_open(struct inode *inode, struct file *file)
154 return single_open(file, features_show, inode->i_private);
157 static const struct file_operations features_fops = {
158 .open = features_open,
161 .release = single_release,
164 static int blacklist_show(struct seq_file *f, void *p)
166 struct hci_dev *hdev = f->private;
167 struct bdaddr_list *b;
170 list_for_each_entry(b, &hdev->blacklist, list)
171 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
172 hci_dev_unlock(hdev);
177 static int blacklist_open(struct inode *inode, struct file *file)
179 return single_open(file, blacklist_show, inode->i_private);
182 static const struct file_operations blacklist_fops = {
183 .open = blacklist_open,
186 .release = single_release,
189 static int uuids_show(struct seq_file *f, void *p)
191 struct hci_dev *hdev = f->private;
192 struct bt_uuid *uuid;
195 list_for_each_entry(uuid, &hdev->uuids, list) {
198 /* The Bluetooth UUID values are stored in big endian,
199 * but with reversed byte order. So convert them into
200 * the right order for the %pUb modifier.
202 for (i = 0; i < 16; i++)
203 val[i] = uuid->uuid[15 - i];
205 seq_printf(f, "%pUb\n", val);
207 hci_dev_unlock(hdev);
212 static int uuids_open(struct inode *inode, struct file *file)
214 return single_open(file, uuids_show, inode->i_private);
217 static const struct file_operations uuids_fops = {
221 .release = single_release,
224 static int inquiry_cache_show(struct seq_file *f, void *p)
226 struct hci_dev *hdev = f->private;
227 struct discovery_state *cache = &hdev->discovery;
228 struct inquiry_entry *e;
232 list_for_each_entry(e, &cache->all, all) {
233 struct inquiry_data *data = &e->data;
234 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
236 data->pscan_rep_mode, data->pscan_period_mode,
237 data->pscan_mode, data->dev_class[2],
238 data->dev_class[1], data->dev_class[0],
239 __le16_to_cpu(data->clock_offset),
240 data->rssi, data->ssp_mode, e->timestamp);
243 hci_dev_unlock(hdev);
248 static int inquiry_cache_open(struct inode *inode, struct file *file)
250 return single_open(file, inquiry_cache_show, inode->i_private);
253 static const struct file_operations inquiry_cache_fops = {
254 .open = inquiry_cache_open,
257 .release = single_release,
260 static int link_keys_show(struct seq_file *f, void *ptr)
262 struct hci_dev *hdev = f->private;
263 struct list_head *p, *n;
266 list_for_each_safe(p, n, &hdev->link_keys) {
267 struct link_key *key = list_entry(p, struct link_key, list);
268 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
269 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
271 hci_dev_unlock(hdev);
276 static int link_keys_open(struct inode *inode, struct file *file)
278 return single_open(file, link_keys_show, inode->i_private);
281 static const struct file_operations link_keys_fops = {
282 .open = link_keys_open,
285 .release = single_release,
288 static ssize_t use_debug_keys_read(struct file *file, char __user *user_buf,
289 size_t count, loff_t *ppos)
291 struct hci_dev *hdev = file->private_data;
294 buf[0] = test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) ? 'Y': 'N';
297 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
300 static const struct file_operations use_debug_keys_fops = {
302 .read = use_debug_keys_read,
303 .llseek = default_llseek,
306 static int dev_class_show(struct seq_file *f, void *ptr)
308 struct hci_dev *hdev = f->private;
311 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
312 hdev->dev_class[1], hdev->dev_class[0]);
313 hci_dev_unlock(hdev);
318 static int dev_class_open(struct inode *inode, struct file *file)
320 return single_open(file, dev_class_show, inode->i_private);
323 static const struct file_operations dev_class_fops = {
324 .open = dev_class_open,
327 .release = single_release,
330 static int voice_setting_get(void *data, u64 *val)
332 struct hci_dev *hdev = data;
335 *val = hdev->voice_setting;
336 hci_dev_unlock(hdev);
341 DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
342 NULL, "0x%4.4llx\n");
344 static int auto_accept_delay_set(void *data, u64 val)
346 struct hci_dev *hdev = data;
349 hdev->auto_accept_delay = val;
350 hci_dev_unlock(hdev);
355 static int auto_accept_delay_get(void *data, u64 *val)
357 struct hci_dev *hdev = data;
360 *val = hdev->auto_accept_delay;
361 hci_dev_unlock(hdev);
366 DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
367 auto_accept_delay_set, "%llu\n");
369 static int ssp_debug_mode_set(void *data, u64 val)
371 struct hci_dev *hdev = data;
376 if (val != 0 && val != 1)
379 if (!test_bit(HCI_UP, &hdev->flags))
384 skb = __hci_cmd_sync(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, sizeof(mode),
385 &mode, HCI_CMD_TIMEOUT);
386 hci_req_unlock(hdev);
391 err = -bt_to_errno(skb->data[0]);
398 hdev->ssp_debug_mode = val;
399 hci_dev_unlock(hdev);
404 static int ssp_debug_mode_get(void *data, u64 *val)
406 struct hci_dev *hdev = data;
409 *val = hdev->ssp_debug_mode;
410 hci_dev_unlock(hdev);
415 DEFINE_SIMPLE_ATTRIBUTE(ssp_debug_mode_fops, ssp_debug_mode_get,
416 ssp_debug_mode_set, "%llu\n");
418 static int idle_timeout_set(void *data, u64 val)
420 struct hci_dev *hdev = data;
422 if (val != 0 && (val < 500 || val > 3600000))
426 hdev->idle_timeout = val;
427 hci_dev_unlock(hdev);
432 static int idle_timeout_get(void *data, u64 *val)
434 struct hci_dev *hdev = data;
437 *val = hdev->idle_timeout;
438 hci_dev_unlock(hdev);
443 DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
444 idle_timeout_set, "%llu\n");
446 static int sniff_min_interval_set(void *data, u64 val)
448 struct hci_dev *hdev = data;
450 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
454 hdev->sniff_min_interval = val;
455 hci_dev_unlock(hdev);
460 static int sniff_min_interval_get(void *data, u64 *val)
462 struct hci_dev *hdev = data;
465 *val = hdev->sniff_min_interval;
466 hci_dev_unlock(hdev);
471 DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
472 sniff_min_interval_set, "%llu\n");
474 static int sniff_max_interval_set(void *data, u64 val)
476 struct hci_dev *hdev = data;
478 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
482 hdev->sniff_max_interval = val;
483 hci_dev_unlock(hdev);
488 static int sniff_max_interval_get(void *data, u64 *val)
490 struct hci_dev *hdev = data;
493 *val = hdev->sniff_max_interval;
494 hci_dev_unlock(hdev);
499 DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
500 sniff_max_interval_set, "%llu\n");
502 static int static_address_show(struct seq_file *f, void *p)
504 struct hci_dev *hdev = f->private;
507 seq_printf(f, "%pMR\n", &hdev->static_addr);
508 hci_dev_unlock(hdev);
513 static int static_address_open(struct inode *inode, struct file *file)
515 return single_open(file, static_address_show, inode->i_private);
518 static const struct file_operations static_address_fops = {
519 .open = static_address_open,
522 .release = single_release,
525 static int own_address_type_set(void *data, u64 val)
527 struct hci_dev *hdev = data;
529 if (val != 0 && val != 1)
533 hdev->own_addr_type = val;
534 hci_dev_unlock(hdev);
539 static int own_address_type_get(void *data, u64 *val)
541 struct hci_dev *hdev = data;
544 *val = hdev->own_addr_type;
545 hci_dev_unlock(hdev);
550 DEFINE_SIMPLE_ATTRIBUTE(own_address_type_fops, own_address_type_get,
551 own_address_type_set, "%llu\n");
553 static int long_term_keys_show(struct seq_file *f, void *ptr)
555 struct hci_dev *hdev = f->private;
556 struct list_head *p, *n;
559 list_for_each_safe(p, n, &hdev->link_keys) {
560 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
561 seq_printf(f, "%pMR (type %u) %u %u %u %.4x %*phN %*phN\\n",
562 <k->bdaddr, ltk->bdaddr_type, ltk->authenticated,
563 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
564 8, ltk->rand, 16, ltk->val);
566 hci_dev_unlock(hdev);
571 static int long_term_keys_open(struct inode *inode, struct file *file)
573 return single_open(file, long_term_keys_show, inode->i_private);
576 static const struct file_operations long_term_keys_fops = {
577 .open = long_term_keys_open,
580 .release = single_release,
583 static int conn_min_interval_set(void *data, u64 val)
585 struct hci_dev *hdev = data;
587 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
591 hdev->le_conn_min_interval = val;
592 hci_dev_unlock(hdev);
597 static int conn_min_interval_get(void *data, u64 *val)
599 struct hci_dev *hdev = data;
602 *val = hdev->le_conn_min_interval;
603 hci_dev_unlock(hdev);
608 DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
609 conn_min_interval_set, "%llu\n");
611 static int conn_max_interval_set(void *data, u64 val)
613 struct hci_dev *hdev = data;
615 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
619 hdev->le_conn_max_interval = val;
620 hci_dev_unlock(hdev);
625 static int conn_max_interval_get(void *data, u64 *val)
627 struct hci_dev *hdev = data;
630 *val = hdev->le_conn_max_interval;
631 hci_dev_unlock(hdev);
636 DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
637 conn_max_interval_set, "%llu\n");
639 /* ---- HCI requests ---- */
641 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
643 BT_DBG("%s result 0x%2.2x", hdev->name, result);
645 if (hdev->req_status == HCI_REQ_PEND) {
646 hdev->req_result = result;
647 hdev->req_status = HCI_REQ_DONE;
648 wake_up_interruptible(&hdev->req_wait_q);
652 static void hci_req_cancel(struct hci_dev *hdev, int err)
654 BT_DBG("%s err 0x%2.2x", hdev->name, err);
656 if (hdev->req_status == HCI_REQ_PEND) {
657 hdev->req_result = err;
658 hdev->req_status = HCI_REQ_CANCELED;
659 wake_up_interruptible(&hdev->req_wait_q);
663 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
666 struct hci_ev_cmd_complete *ev;
667 struct hci_event_hdr *hdr;
672 skb = hdev->recv_evt;
673 hdev->recv_evt = NULL;
675 hci_dev_unlock(hdev);
678 return ERR_PTR(-ENODATA);
680 if (skb->len < sizeof(*hdr)) {
681 BT_ERR("Too short HCI event");
685 hdr = (void *) skb->data;
686 skb_pull(skb, HCI_EVENT_HDR_SIZE);
689 if (hdr->evt != event)
694 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
695 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
699 if (skb->len < sizeof(*ev)) {
700 BT_ERR("Too short cmd_complete event");
704 ev = (void *) skb->data;
705 skb_pull(skb, sizeof(*ev));
707 if (opcode == __le16_to_cpu(ev->opcode))
710 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
711 __le16_to_cpu(ev->opcode));
715 return ERR_PTR(-ENODATA);
718 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
719 const void *param, u8 event, u32 timeout)
721 DECLARE_WAITQUEUE(wait, current);
722 struct hci_request req;
725 BT_DBG("%s", hdev->name);
727 hci_req_init(&req, hdev);
729 hci_req_add_ev(&req, opcode, plen, param, event);
731 hdev->req_status = HCI_REQ_PEND;
733 err = hci_req_run(&req, hci_req_sync_complete);
737 add_wait_queue(&hdev->req_wait_q, &wait);
738 set_current_state(TASK_INTERRUPTIBLE);
740 schedule_timeout(timeout);
742 remove_wait_queue(&hdev->req_wait_q, &wait);
744 if (signal_pending(current))
745 return ERR_PTR(-EINTR);
747 switch (hdev->req_status) {
749 err = -bt_to_errno(hdev->req_result);
752 case HCI_REQ_CANCELED:
753 err = -hdev->req_result;
761 hdev->req_status = hdev->req_result = 0;
763 BT_DBG("%s end: err %d", hdev->name, err);
768 return hci_get_cmd_complete(hdev, opcode, event);
770 EXPORT_SYMBOL(__hci_cmd_sync_ev);
772 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
773 const void *param, u32 timeout)
775 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
777 EXPORT_SYMBOL(__hci_cmd_sync);
779 /* Execute request and wait for completion. */
780 static int __hci_req_sync(struct hci_dev *hdev,
781 void (*func)(struct hci_request *req,
783 unsigned long opt, __u32 timeout)
785 struct hci_request req;
786 DECLARE_WAITQUEUE(wait, current);
789 BT_DBG("%s start", hdev->name);
791 hci_req_init(&req, hdev);
793 hdev->req_status = HCI_REQ_PEND;
797 err = hci_req_run(&req, hci_req_sync_complete);
799 hdev->req_status = 0;
801 /* ENODATA means the HCI request command queue is empty.
802 * This can happen when a request with conditionals doesn't
803 * trigger any commands to be sent. This is normal behavior
804 * and should not trigger an error return.
812 add_wait_queue(&hdev->req_wait_q, &wait);
813 set_current_state(TASK_INTERRUPTIBLE);
815 schedule_timeout(timeout);
817 remove_wait_queue(&hdev->req_wait_q, &wait);
819 if (signal_pending(current))
822 switch (hdev->req_status) {
824 err = -bt_to_errno(hdev->req_result);
827 case HCI_REQ_CANCELED:
828 err = -hdev->req_result;
836 hdev->req_status = hdev->req_result = 0;
838 BT_DBG("%s end: err %d", hdev->name, err);
843 static int hci_req_sync(struct hci_dev *hdev,
844 void (*req)(struct hci_request *req,
846 unsigned long opt, __u32 timeout)
850 if (!test_bit(HCI_UP, &hdev->flags))
853 /* Serialize all requests */
855 ret = __hci_req_sync(hdev, req, opt, timeout);
856 hci_req_unlock(hdev);
861 static void hci_reset_req(struct hci_request *req, unsigned long opt)
863 BT_DBG("%s %ld", req->hdev->name, opt);
866 set_bit(HCI_RESET, &req->hdev->flags);
867 hci_req_add(req, HCI_OP_RESET, 0, NULL);
870 static void bredr_init(struct hci_request *req)
872 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
874 /* Read Local Supported Features */
875 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
877 /* Read Local Version */
878 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
880 /* Read BD Address */
881 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
884 static void amp_init(struct hci_request *req)
886 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
888 /* Read Local Version */
889 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
891 /* Read Local Supported Commands */
892 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
894 /* Read Local Supported Features */
895 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
897 /* Read Local AMP Info */
898 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
900 /* Read Data Blk size */
901 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
903 /* Read Flow Control Mode */
904 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
906 /* Read Location Data */
907 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
910 static void hci_init1_req(struct hci_request *req, unsigned long opt)
912 struct hci_dev *hdev = req->hdev;
914 BT_DBG("%s %ld", hdev->name, opt);
917 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
918 hci_reset_req(req, 0);
920 switch (hdev->dev_type) {
930 BT_ERR("Unknown device type %d", hdev->dev_type);
935 static void bredr_setup(struct hci_request *req)
937 struct hci_dev *hdev = req->hdev;
942 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
943 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
945 /* Read Class of Device */
946 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
948 /* Read Local Name */
949 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
951 /* Read Voice Setting */
952 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
954 /* Read Number of Supported IAC */
955 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
957 /* Read Current IAC LAP */
958 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
960 /* Clear Event Filters */
961 flt_type = HCI_FLT_CLEAR_ALL;
962 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
964 /* Connection accept timeout ~20 secs */
965 param = __constant_cpu_to_le16(0x7d00);
966 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
968 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
969 * but it does not support page scan related HCI commands.
971 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
972 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
973 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
977 static void le_setup(struct hci_request *req)
979 struct hci_dev *hdev = req->hdev;
981 /* Read LE Buffer Size */
982 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
984 /* Read LE Local Supported Features */
985 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
987 /* Read LE Advertising Channel TX Power */
988 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
990 /* Read LE White List Size */
991 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
993 /* Read LE Supported States */
994 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
996 /* LE-only controllers have LE implicitly enabled */
997 if (!lmp_bredr_capable(hdev))
998 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1001 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1003 if (lmp_ext_inq_capable(hdev))
1006 if (lmp_inq_rssi_capable(hdev))
1009 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1010 hdev->lmp_subver == 0x0757)
1013 if (hdev->manufacturer == 15) {
1014 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1016 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1018 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1022 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1023 hdev->lmp_subver == 0x1805)
1029 static void hci_setup_inquiry_mode(struct hci_request *req)
1033 mode = hci_get_inquiry_mode(req->hdev);
1035 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
1038 static void hci_setup_event_mask(struct hci_request *req)
1040 struct hci_dev *hdev = req->hdev;
1042 /* The second byte is 0xff instead of 0x9f (two reserved bits
1043 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1044 * command otherwise.
1046 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1048 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1049 * any event mask for pre 1.2 devices.
1051 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1054 if (lmp_bredr_capable(hdev)) {
1055 events[4] |= 0x01; /* Flow Specification Complete */
1056 events[4] |= 0x02; /* Inquiry Result with RSSI */
1057 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1058 events[5] |= 0x08; /* Synchronous Connection Complete */
1059 events[5] |= 0x10; /* Synchronous Connection Changed */
1061 /* Use a different default for LE-only devices */
1062 memset(events, 0, sizeof(events));
1063 events[0] |= 0x10; /* Disconnection Complete */
1064 events[0] |= 0x80; /* Encryption Change */
1065 events[1] |= 0x08; /* Read Remote Version Information Complete */
1066 events[1] |= 0x20; /* Command Complete */
1067 events[1] |= 0x40; /* Command Status */
1068 events[1] |= 0x80; /* Hardware Error */
1069 events[2] |= 0x04; /* Number of Completed Packets */
1070 events[3] |= 0x02; /* Data Buffer Overflow */
1071 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1074 if (lmp_inq_rssi_capable(hdev))
1075 events[4] |= 0x02; /* Inquiry Result with RSSI */
1077 if (lmp_sniffsubr_capable(hdev))
1078 events[5] |= 0x20; /* Sniff Subrating */
1080 if (lmp_pause_enc_capable(hdev))
1081 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1083 if (lmp_ext_inq_capable(hdev))
1084 events[5] |= 0x40; /* Extended Inquiry Result */
1086 if (lmp_no_flush_capable(hdev))
1087 events[7] |= 0x01; /* Enhanced Flush Complete */
1089 if (lmp_lsto_capable(hdev))
1090 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1092 if (lmp_ssp_capable(hdev)) {
1093 events[6] |= 0x01; /* IO Capability Request */
1094 events[6] |= 0x02; /* IO Capability Response */
1095 events[6] |= 0x04; /* User Confirmation Request */
1096 events[6] |= 0x08; /* User Passkey Request */
1097 events[6] |= 0x10; /* Remote OOB Data Request */
1098 events[6] |= 0x20; /* Simple Pairing Complete */
1099 events[7] |= 0x04; /* User Passkey Notification */
1100 events[7] |= 0x08; /* Keypress Notification */
1101 events[7] |= 0x10; /* Remote Host Supported
1102 * Features Notification
1106 if (lmp_le_capable(hdev))
1107 events[7] |= 0x20; /* LE Meta-Event */
1109 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
1111 if (lmp_le_capable(hdev)) {
1112 memset(events, 0, sizeof(events));
1114 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
1115 sizeof(events), events);
1119 static void hci_init2_req(struct hci_request *req, unsigned long opt)
1121 struct hci_dev *hdev = req->hdev;
1123 if (lmp_bredr_capable(hdev))
1126 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
1128 if (lmp_le_capable(hdev))
1131 hci_setup_event_mask(req);
1133 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1134 * local supported commands HCI command.
1136 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
1137 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1139 if (lmp_ssp_capable(hdev)) {
1140 /* When SSP is available, then the host features page
1141 * should also be available as well. However some
1142 * controllers list the max_page as 0 as long as SSP
1143 * has not been enabled. To achieve proper debugging
1144 * output, force the minimum max_page to 1 at least.
1146 hdev->max_page = 0x01;
1148 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1150 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1151 sizeof(mode), &mode);
1153 struct hci_cp_write_eir cp;
1155 memset(hdev->eir, 0, sizeof(hdev->eir));
1156 memset(&cp, 0, sizeof(cp));
1158 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1162 if (lmp_inq_rssi_capable(hdev))
1163 hci_setup_inquiry_mode(req);
1165 if (lmp_inq_tx_pwr_capable(hdev))
1166 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
1168 if (lmp_ext_feat_capable(hdev)) {
1169 struct hci_cp_read_local_ext_features cp;
1172 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1176 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1178 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1183 static void hci_setup_link_policy(struct hci_request *req)
1185 struct hci_dev *hdev = req->hdev;
1186 struct hci_cp_write_def_link_policy cp;
1187 u16 link_policy = 0;
1189 if (lmp_rswitch_capable(hdev))
1190 link_policy |= HCI_LP_RSWITCH;
1191 if (lmp_hold_capable(hdev))
1192 link_policy |= HCI_LP_HOLD;
1193 if (lmp_sniff_capable(hdev))
1194 link_policy |= HCI_LP_SNIFF;
1195 if (lmp_park_capable(hdev))
1196 link_policy |= HCI_LP_PARK;
1198 cp.policy = cpu_to_le16(link_policy);
1199 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
1202 static void hci_set_le_support(struct hci_request *req)
1204 struct hci_dev *hdev = req->hdev;
1205 struct hci_cp_write_le_host_supported cp;
1207 /* LE-only devices do not support explicit enablement */
1208 if (!lmp_bredr_capable(hdev))
1211 memset(&cp, 0, sizeof(cp));
1213 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1215 cp.simul = lmp_le_br_capable(hdev);
1218 if (cp.le != lmp_host_le_capable(hdev))
1219 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1223 static void hci_set_event_mask_page_2(struct hci_request *req)
1225 struct hci_dev *hdev = req->hdev;
1226 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1228 /* If Connectionless Slave Broadcast master role is supported
1229 * enable all necessary events for it.
1231 if (hdev->features[2][0] & 0x01) {
1232 events[1] |= 0x40; /* Triggered Clock Capture */
1233 events[1] |= 0x80; /* Synchronization Train Complete */
1234 events[2] |= 0x10; /* Slave Page Response Timeout */
1235 events[2] |= 0x20; /* CSB Channel Map Change */
1238 /* If Connectionless Slave Broadcast slave role is supported
1239 * enable all necessary events for it.
1241 if (hdev->features[2][0] & 0x02) {
1242 events[2] |= 0x01; /* Synchronization Train Received */
1243 events[2] |= 0x02; /* CSB Receive */
1244 events[2] |= 0x04; /* CSB Timeout */
1245 events[2] |= 0x08; /* Truncated Page Complete */
1248 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1251 static void hci_init3_req(struct hci_request *req, unsigned long opt)
1253 struct hci_dev *hdev = req->hdev;
1256 /* Some Broadcom based Bluetooth controllers do not support the
1257 * Delete Stored Link Key command. They are clearly indicating its
1258 * absence in the bit mask of supported commands.
1260 * Check the supported commands and only if the the command is marked
1261 * as supported send it. If not supported assume that the controller
1262 * does not have actual support for stored link keys which makes this
1263 * command redundant anyway.
1265 if (hdev->commands[6] & 0x80) {
1266 struct hci_cp_delete_stored_link_key cp;
1268 bacpy(&cp.bdaddr, BDADDR_ANY);
1269 cp.delete_all = 0x01;
1270 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1274 if (hdev->commands[5] & 0x10)
1275 hci_setup_link_policy(req);
1277 if (lmp_le_capable(hdev)) {
1278 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1279 /* If the controller has a public BD_ADDR, then
1280 * by default use that one. If this is a LE only
1281 * controller without a public address, default
1282 * to the random address.
1284 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1285 hdev->own_addr_type = ADDR_LE_DEV_PUBLIC;
1287 hdev->own_addr_type = ADDR_LE_DEV_RANDOM;
1290 hci_set_le_support(req);
1293 /* Read features beyond page 1 if available */
1294 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1295 struct hci_cp_read_local_ext_features cp;
1298 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1303 static void hci_init4_req(struct hci_request *req, unsigned long opt)
1305 struct hci_dev *hdev = req->hdev;
1307 /* Set event mask page 2 if the HCI command for it is supported */
1308 if (hdev->commands[22] & 0x04)
1309 hci_set_event_mask_page_2(req);
1311 /* Check for Synchronization Train support */
1312 if (hdev->features[2][0] & 0x04)
1313 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
1316 static int __hci_init(struct hci_dev *hdev)
1320 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1324 /* The Device Under Test (DUT) mode is special and available for
1325 * all controller types. So just create it early on.
1327 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1328 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1332 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1333 * BR/EDR/LE type controllers. AMP controllers only need the
1336 if (hdev->dev_type != HCI_BREDR)
1339 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1343 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1347 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1351 /* Only create debugfs entries during the initial setup
1352 * phase and not every time the controller gets powered on.
1354 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1357 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1359 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1360 &hdev->manufacturer);
1361 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1362 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
1363 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1365 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1367 if (lmp_bredr_capable(hdev)) {
1368 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1369 hdev, &inquiry_cache_fops);
1370 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1371 hdev, &link_keys_fops);
1372 debugfs_create_file("use_debug_keys", 0444, hdev->debugfs,
1373 hdev, &use_debug_keys_fops);
1374 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1375 hdev, &dev_class_fops);
1376 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1377 hdev, &voice_setting_fops);
1380 if (lmp_ssp_capable(hdev)) {
1381 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1382 hdev, &auto_accept_delay_fops);
1383 debugfs_create_file("ssp_debug_mode", 0644, hdev->debugfs,
1384 hdev, &ssp_debug_mode_fops);
1387 if (lmp_sniff_capable(hdev)) {
1388 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1389 hdev, &idle_timeout_fops);
1390 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1391 hdev, &sniff_min_interval_fops);
1392 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1393 hdev, &sniff_max_interval_fops);
1396 if (lmp_le_capable(hdev)) {
1397 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1398 &hdev->le_white_list_size);
1399 debugfs_create_file("static_address", 0444, hdev->debugfs,
1400 hdev, &static_address_fops);
1401 debugfs_create_file("own_address_type", 0644, hdev->debugfs,
1402 hdev, &own_address_type_fops);
1403 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1404 hdev, &long_term_keys_fops);
1405 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1406 hdev, &conn_min_interval_fops);
1407 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1408 hdev, &conn_max_interval_fops);
1414 static void hci_scan_req(struct hci_request *req, unsigned long opt)
1418 BT_DBG("%s %x", req->hdev->name, scan);
1420 /* Inquiry and Page scans */
1421 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1424 static void hci_auth_req(struct hci_request *req, unsigned long opt)
1428 BT_DBG("%s %x", req->hdev->name, auth);
1430 /* Authentication */
1431 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1434 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1438 BT_DBG("%s %x", req->hdev->name, encrypt);
1441 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1444 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
1446 __le16 policy = cpu_to_le16(opt);
1448 BT_DBG("%s %x", req->hdev->name, policy);
1450 /* Default link policy */
1451 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1454 /* Get HCI device by index.
1455 * Device is held on return. */
1456 struct hci_dev *hci_dev_get(int index)
1458 struct hci_dev *hdev = NULL, *d;
1460 BT_DBG("%d", index);
1465 read_lock(&hci_dev_list_lock);
1466 list_for_each_entry(d, &hci_dev_list, list) {
1467 if (d->id == index) {
1468 hdev = hci_dev_hold(d);
1472 read_unlock(&hci_dev_list_lock);
1476 /* ---- Inquiry support ---- */
1478 bool hci_discovery_active(struct hci_dev *hdev)
1480 struct discovery_state *discov = &hdev->discovery;
1482 switch (discov->state) {
1483 case DISCOVERY_FINDING:
1484 case DISCOVERY_RESOLVING:
1492 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1494 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1496 if (hdev->discovery.state == state)
1500 case DISCOVERY_STOPPED:
1501 if (hdev->discovery.state != DISCOVERY_STARTING)
1502 mgmt_discovering(hdev, 0);
1504 case DISCOVERY_STARTING:
1506 case DISCOVERY_FINDING:
1507 mgmt_discovering(hdev, 1);
1509 case DISCOVERY_RESOLVING:
1511 case DISCOVERY_STOPPING:
1515 hdev->discovery.state = state;
1518 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1520 struct discovery_state *cache = &hdev->discovery;
1521 struct inquiry_entry *p, *n;
1523 list_for_each_entry_safe(p, n, &cache->all, all) {
1528 INIT_LIST_HEAD(&cache->unknown);
1529 INIT_LIST_HEAD(&cache->resolve);
1532 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1535 struct discovery_state *cache = &hdev->discovery;
1536 struct inquiry_entry *e;
1538 BT_DBG("cache %p, %pMR", cache, bdaddr);
1540 list_for_each_entry(e, &cache->all, all) {
1541 if (!bacmp(&e->data.bdaddr, bdaddr))
1548 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1551 struct discovery_state *cache = &hdev->discovery;
1552 struct inquiry_entry *e;
1554 BT_DBG("cache %p, %pMR", cache, bdaddr);
1556 list_for_each_entry(e, &cache->unknown, list) {
1557 if (!bacmp(&e->data.bdaddr, bdaddr))
1564 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1568 struct discovery_state *cache = &hdev->discovery;
1569 struct inquiry_entry *e;
1571 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1573 list_for_each_entry(e, &cache->resolve, list) {
1574 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1576 if (!bacmp(&e->data.bdaddr, bdaddr))
1583 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1584 struct inquiry_entry *ie)
1586 struct discovery_state *cache = &hdev->discovery;
1587 struct list_head *pos = &cache->resolve;
1588 struct inquiry_entry *p;
1590 list_del(&ie->list);
1592 list_for_each_entry(p, &cache->resolve, list) {
1593 if (p->name_state != NAME_PENDING &&
1594 abs(p->data.rssi) >= abs(ie->data.rssi))
1599 list_add(&ie->list, pos);
1602 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1603 bool name_known, bool *ssp)
1605 struct discovery_state *cache = &hdev->discovery;
1606 struct inquiry_entry *ie;
1608 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1610 hci_remove_remote_oob_data(hdev, &data->bdaddr);
1613 *ssp = data->ssp_mode;
1615 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1617 if (ie->data.ssp_mode && ssp)
1620 if (ie->name_state == NAME_NEEDED &&
1621 data->rssi != ie->data.rssi) {
1622 ie->data.rssi = data->rssi;
1623 hci_inquiry_cache_update_resolve(hdev, ie);
1629 /* Entry not in the cache. Add new one. */
1630 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
1634 list_add(&ie->all, &cache->all);
1637 ie->name_state = NAME_KNOWN;
1639 ie->name_state = NAME_NOT_KNOWN;
1640 list_add(&ie->list, &cache->unknown);
1644 if (name_known && ie->name_state != NAME_KNOWN &&
1645 ie->name_state != NAME_PENDING) {
1646 ie->name_state = NAME_KNOWN;
1647 list_del(&ie->list);
1650 memcpy(&ie->data, data, sizeof(*data));
1651 ie->timestamp = jiffies;
1652 cache->timestamp = jiffies;
1654 if (ie->name_state == NAME_NOT_KNOWN)
1660 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1662 struct discovery_state *cache = &hdev->discovery;
1663 struct inquiry_info *info = (struct inquiry_info *) buf;
1664 struct inquiry_entry *e;
1667 list_for_each_entry(e, &cache->all, all) {
1668 struct inquiry_data *data = &e->data;
1673 bacpy(&info->bdaddr, &data->bdaddr);
1674 info->pscan_rep_mode = data->pscan_rep_mode;
1675 info->pscan_period_mode = data->pscan_period_mode;
1676 info->pscan_mode = data->pscan_mode;
1677 memcpy(info->dev_class, data->dev_class, 3);
1678 info->clock_offset = data->clock_offset;
1684 BT_DBG("cache %p, copied %d", cache, copied);
1688 static void hci_inq_req(struct hci_request *req, unsigned long opt)
1690 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1691 struct hci_dev *hdev = req->hdev;
1692 struct hci_cp_inquiry cp;
1694 BT_DBG("%s", hdev->name);
1696 if (test_bit(HCI_INQUIRY, &hdev->flags))
1700 memcpy(&cp.lap, &ir->lap, 3);
1701 cp.length = ir->length;
1702 cp.num_rsp = ir->num_rsp;
1703 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1706 static int wait_inquiry(void *word)
1709 return signal_pending(current);
1712 int hci_inquiry(void __user *arg)
1714 __u8 __user *ptr = arg;
1715 struct hci_inquiry_req ir;
1716 struct hci_dev *hdev;
1717 int err = 0, do_inquiry = 0, max_rsp;
1721 if (copy_from_user(&ir, ptr, sizeof(ir)))
1724 hdev = hci_dev_get(ir.dev_id);
1728 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1733 if (hdev->dev_type != HCI_BREDR) {
1738 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1744 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1745 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1746 hci_inquiry_cache_flush(hdev);
1749 hci_dev_unlock(hdev);
1751 timeo = ir.length * msecs_to_jiffies(2000);
1754 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1759 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1760 * cleared). If it is interrupted by a signal, return -EINTR.
1762 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1763 TASK_INTERRUPTIBLE))
1767 /* for unlimited number of responses we will use buffer with
1770 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1772 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1773 * copy it to the user space.
1775 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
1782 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1783 hci_dev_unlock(hdev);
1785 BT_DBG("num_rsp %d", ir.num_rsp);
1787 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1789 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1802 static int hci_dev_do_open(struct hci_dev *hdev)
1806 BT_DBG("%s %p", hdev->name, hdev);
1810 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1815 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
1816 /* Check for rfkill but allow the HCI setup stage to
1817 * proceed (which in itself doesn't cause any RF activity).
1819 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1824 /* Check for valid public address or a configured static
1825 * random adddress, but let the HCI setup proceed to
1826 * be able to determine if there is a public address
1829 * This check is only valid for BR/EDR controllers
1830 * since AMP controllers do not have an address.
1832 if (hdev->dev_type == HCI_BREDR &&
1833 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1834 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1835 ret = -EADDRNOTAVAIL;
1840 if (test_bit(HCI_UP, &hdev->flags)) {
1845 if (hdev->open(hdev)) {
1850 atomic_set(&hdev->cmd_cnt, 1);
1851 set_bit(HCI_INIT, &hdev->flags);
1853 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1854 ret = hdev->setup(hdev);
1857 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1858 set_bit(HCI_RAW, &hdev->flags);
1860 if (!test_bit(HCI_RAW, &hdev->flags) &&
1861 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
1862 ret = __hci_init(hdev);
1865 clear_bit(HCI_INIT, &hdev->flags);
1869 set_bit(HCI_UP, &hdev->flags);
1870 hci_notify(hdev, HCI_DEV_UP);
1871 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1872 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1873 hdev->dev_type == HCI_BREDR) {
1875 mgmt_powered(hdev, 1);
1876 hci_dev_unlock(hdev);
1879 /* Init failed, cleanup */
1880 flush_work(&hdev->tx_work);
1881 flush_work(&hdev->cmd_work);
1882 flush_work(&hdev->rx_work);
1884 skb_queue_purge(&hdev->cmd_q);
1885 skb_queue_purge(&hdev->rx_q);
1890 if (hdev->sent_cmd) {
1891 kfree_skb(hdev->sent_cmd);
1892 hdev->sent_cmd = NULL;
1900 hci_req_unlock(hdev);
1904 /* ---- HCI ioctl helpers ---- */
1906 int hci_dev_open(__u16 dev)
1908 struct hci_dev *hdev;
1911 hdev = hci_dev_get(dev);
1915 /* We need to ensure that no other power on/off work is pending
1916 * before proceeding to call hci_dev_do_open. This is
1917 * particularly important if the setup procedure has not yet
1920 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1921 cancel_delayed_work(&hdev->power_off);
1923 /* After this call it is guaranteed that the setup procedure
1924 * has finished. This means that error conditions like RFKILL
1925 * or no valid public or static random address apply.
1927 flush_workqueue(hdev->req_workqueue);
1929 err = hci_dev_do_open(hdev);
1936 static int hci_dev_do_close(struct hci_dev *hdev)
1938 BT_DBG("%s %p", hdev->name, hdev);
1940 cancel_delayed_work(&hdev->power_off);
1942 hci_req_cancel(hdev, ENODEV);
1945 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1946 del_timer_sync(&hdev->cmd_timer);
1947 hci_req_unlock(hdev);
1951 /* Flush RX and TX works */
1952 flush_work(&hdev->tx_work);
1953 flush_work(&hdev->rx_work);
1955 if (hdev->discov_timeout > 0) {
1956 cancel_delayed_work(&hdev->discov_off);
1957 hdev->discov_timeout = 0;
1958 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1959 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1962 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1963 cancel_delayed_work(&hdev->service_cache);
1965 cancel_delayed_work_sync(&hdev->le_scan_disable);
1968 hci_inquiry_cache_flush(hdev);
1969 hci_conn_hash_flush(hdev);
1970 hci_dev_unlock(hdev);
1972 hci_notify(hdev, HCI_DEV_DOWN);
1978 skb_queue_purge(&hdev->cmd_q);
1979 atomic_set(&hdev->cmd_cnt, 1);
1980 if (!test_bit(HCI_RAW, &hdev->flags) &&
1981 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1982 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1983 set_bit(HCI_INIT, &hdev->flags);
1984 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1985 clear_bit(HCI_INIT, &hdev->flags);
1988 /* flush cmd work */
1989 flush_work(&hdev->cmd_work);
1992 skb_queue_purge(&hdev->rx_q);
1993 skb_queue_purge(&hdev->cmd_q);
1994 skb_queue_purge(&hdev->raw_q);
1996 /* Drop last sent command */
1997 if (hdev->sent_cmd) {
1998 del_timer_sync(&hdev->cmd_timer);
1999 kfree_skb(hdev->sent_cmd);
2000 hdev->sent_cmd = NULL;
2003 kfree_skb(hdev->recv_evt);
2004 hdev->recv_evt = NULL;
2006 /* After this point our queues are empty
2007 * and no tasks are scheduled. */
2012 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2014 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2015 if (hdev->dev_type == HCI_BREDR) {
2017 mgmt_powered(hdev, 0);
2018 hci_dev_unlock(hdev);
2022 /* Controller radio is available but is currently powered down */
2023 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
2025 memset(hdev->eir, 0, sizeof(hdev->eir));
2026 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
2028 hci_req_unlock(hdev);
2034 int hci_dev_close(__u16 dev)
2036 struct hci_dev *hdev;
2039 hdev = hci_dev_get(dev);
2043 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2048 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2049 cancel_delayed_work(&hdev->power_off);
2051 err = hci_dev_do_close(hdev);
2058 int hci_dev_reset(__u16 dev)
2060 struct hci_dev *hdev;
2063 hdev = hci_dev_get(dev);
2069 if (!test_bit(HCI_UP, &hdev->flags)) {
2074 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2080 skb_queue_purge(&hdev->rx_q);
2081 skb_queue_purge(&hdev->cmd_q);
2084 hci_inquiry_cache_flush(hdev);
2085 hci_conn_hash_flush(hdev);
2086 hci_dev_unlock(hdev);
2091 atomic_set(&hdev->cmd_cnt, 1);
2092 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
2094 if (!test_bit(HCI_RAW, &hdev->flags))
2095 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
2098 hci_req_unlock(hdev);
2103 int hci_dev_reset_stat(__u16 dev)
2105 struct hci_dev *hdev;
2108 hdev = hci_dev_get(dev);
2112 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2117 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2124 int hci_dev_cmd(unsigned int cmd, void __user *arg)
2126 struct hci_dev *hdev;
2127 struct hci_dev_req dr;
2130 if (copy_from_user(&dr, arg, sizeof(dr)))
2133 hdev = hci_dev_get(dr.dev_id);
2137 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2142 if (hdev->dev_type != HCI_BREDR) {
2147 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2154 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2159 if (!lmp_encrypt_capable(hdev)) {
2164 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2165 /* Auth must be enabled first */
2166 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2172 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2177 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2182 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2186 case HCISETLINKMODE:
2187 hdev->link_mode = ((__u16) dr.dev_opt) &
2188 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2192 hdev->pkt_type = (__u16) dr.dev_opt;
2196 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2197 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2201 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2202 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2215 int hci_get_dev_list(void __user *arg)
2217 struct hci_dev *hdev;
2218 struct hci_dev_list_req *dl;
2219 struct hci_dev_req *dr;
2220 int n = 0, size, err;
2223 if (get_user(dev_num, (__u16 __user *) arg))
2226 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2229 size = sizeof(*dl) + dev_num * sizeof(*dr);
2231 dl = kzalloc(size, GFP_KERNEL);
2237 read_lock(&hci_dev_list_lock);
2238 list_for_each_entry(hdev, &hci_dev_list, list) {
2239 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2240 cancel_delayed_work(&hdev->power_off);
2242 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2243 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2245 (dr + n)->dev_id = hdev->id;
2246 (dr + n)->dev_opt = hdev->flags;
2251 read_unlock(&hci_dev_list_lock);
2254 size = sizeof(*dl) + n * sizeof(*dr);
2256 err = copy_to_user(arg, dl, size);
2259 return err ? -EFAULT : 0;
2262 int hci_get_dev_info(void __user *arg)
2264 struct hci_dev *hdev;
2265 struct hci_dev_info di;
2268 if (copy_from_user(&di, arg, sizeof(di)))
2271 hdev = hci_dev_get(di.dev_id);
2275 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2276 cancel_delayed_work_sync(&hdev->power_off);
2278 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2279 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2281 strcpy(di.name, hdev->name);
2282 di.bdaddr = hdev->bdaddr;
2283 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2284 di.flags = hdev->flags;
2285 di.pkt_type = hdev->pkt_type;
2286 if (lmp_bredr_capable(hdev)) {
2287 di.acl_mtu = hdev->acl_mtu;
2288 di.acl_pkts = hdev->acl_pkts;
2289 di.sco_mtu = hdev->sco_mtu;
2290 di.sco_pkts = hdev->sco_pkts;
2292 di.acl_mtu = hdev->le_mtu;
2293 di.acl_pkts = hdev->le_pkts;
2297 di.link_policy = hdev->link_policy;
2298 di.link_mode = hdev->link_mode;
2300 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2301 memcpy(&di.features, &hdev->features, sizeof(di.features));
2303 if (copy_to_user(arg, &di, sizeof(di)))
2311 /* ---- Interface to HCI drivers ---- */
2313 static int hci_rfkill_set_block(void *data, bool blocked)
2315 struct hci_dev *hdev = data;
2317 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2319 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2323 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2324 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2325 hci_dev_do_close(hdev);
2327 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
2333 static const struct rfkill_ops hci_rfkill_ops = {
2334 .set_block = hci_rfkill_set_block,
2337 static void hci_power_on(struct work_struct *work)
2339 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2342 BT_DBG("%s", hdev->name);
2344 err = hci_dev_do_open(hdev);
2346 mgmt_set_powered_failed(hdev, err);
2350 /* During the HCI setup phase, a few error conditions are
2351 * ignored and they need to be checked now. If they are still
2352 * valid, it is important to turn the device back off.
2354 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2355 (hdev->dev_type == HCI_BREDR &&
2356 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2357 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2358 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2359 hci_dev_do_close(hdev);
2360 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2361 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2362 HCI_AUTO_OFF_TIMEOUT);
2365 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
2366 mgmt_index_added(hdev);
2369 static void hci_power_off(struct work_struct *work)
2371 struct hci_dev *hdev = container_of(work, struct hci_dev,
2374 BT_DBG("%s", hdev->name);
2376 hci_dev_do_close(hdev);
2379 static void hci_discov_off(struct work_struct *work)
2381 struct hci_dev *hdev;
2383 hdev = container_of(work, struct hci_dev, discov_off.work);
2385 BT_DBG("%s", hdev->name);
2387 mgmt_discoverable_timeout(hdev);
2390 int hci_uuids_clear(struct hci_dev *hdev)
2392 struct bt_uuid *uuid, *tmp;
2394 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2395 list_del(&uuid->list);
2402 int hci_link_keys_clear(struct hci_dev *hdev)
2404 struct list_head *p, *n;
2406 list_for_each_safe(p, n, &hdev->link_keys) {
2407 struct link_key *key;
2409 key = list_entry(p, struct link_key, list);
2418 int hci_smp_ltks_clear(struct hci_dev *hdev)
2420 struct smp_ltk *k, *tmp;
2422 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2430 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2434 list_for_each_entry(k, &hdev->link_keys, list)
2435 if (bacmp(bdaddr, &k->bdaddr) == 0)
2441 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2442 u8 key_type, u8 old_key_type)
2445 if (key_type < 0x03)
2448 /* Debug keys are insecure so don't store them persistently */
2449 if (key_type == HCI_LK_DEBUG_COMBINATION)
2452 /* Changed combination key and there's no previous one */
2453 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2456 /* Security mode 3 case */
2460 /* Neither local nor remote side had no-bonding as requirement */
2461 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2464 /* Local side had dedicated bonding as requirement */
2465 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2468 /* Remote side had dedicated bonding as requirement */
2469 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2472 /* If none of the above criteria match, then don't store the key
2477 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
2481 list_for_each_entry(k, &hdev->long_term_keys, list) {
2482 if (k->ediv != ediv ||
2483 memcmp(rand, k->rand, sizeof(k->rand)))
2492 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2497 list_for_each_entry(k, &hdev->long_term_keys, list)
2498 if (addr_type == k->bdaddr_type &&
2499 bacmp(bdaddr, &k->bdaddr) == 0)
2505 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
2506 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
2508 struct link_key *key, *old_key;
2512 old_key = hci_find_link_key(hdev, bdaddr);
2514 old_key_type = old_key->type;
2517 old_key_type = conn ? conn->key_type : 0xff;
2518 key = kzalloc(sizeof(*key), GFP_ATOMIC);
2521 list_add(&key->list, &hdev->link_keys);
2524 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2526 /* Some buggy controller combinations generate a changed
2527 * combination key for legacy pairing even when there's no
2529 if (type == HCI_LK_CHANGED_COMBINATION &&
2530 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2531 type = HCI_LK_COMBINATION;
2533 conn->key_type = type;
2536 bacpy(&key->bdaddr, bdaddr);
2537 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2538 key->pin_len = pin_len;
2540 if (type == HCI_LK_CHANGED_COMBINATION)
2541 key->type = old_key_type;
2548 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
2550 mgmt_new_link_key(hdev, key, persistent);
2553 conn->flush_key = !persistent;
2558 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
2559 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
2562 struct smp_ltk *key, *old_key;
2564 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
2567 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
2571 key = kzalloc(sizeof(*key), GFP_ATOMIC);
2574 list_add(&key->list, &hdev->long_term_keys);
2577 bacpy(&key->bdaddr, bdaddr);
2578 key->bdaddr_type = addr_type;
2579 memcpy(key->val, tk, sizeof(key->val));
2580 key->authenticated = authenticated;
2582 key->enc_size = enc_size;
2584 memcpy(key->rand, rand, sizeof(key->rand));
2589 if (type & HCI_SMP_LTK)
2590 mgmt_new_ltk(hdev, key, 1);
2595 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2597 struct link_key *key;
2599 key = hci_find_link_key(hdev, bdaddr);
2603 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2605 list_del(&key->list);
2611 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
2613 struct smp_ltk *k, *tmp;
2615 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2616 if (bacmp(bdaddr, &k->bdaddr))
2619 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2628 /* HCI command timer function */
2629 static void hci_cmd_timeout(unsigned long arg)
2631 struct hci_dev *hdev = (void *) arg;
2633 if (hdev->sent_cmd) {
2634 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2635 u16 opcode = __le16_to_cpu(sent->opcode);
2637 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2639 BT_ERR("%s command tx timeout", hdev->name);
2642 atomic_set(&hdev->cmd_cnt, 1);
2643 queue_work(hdev->workqueue, &hdev->cmd_work);
2646 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2649 struct oob_data *data;
2651 list_for_each_entry(data, &hdev->remote_oob_data, list)
2652 if (bacmp(bdaddr, &data->bdaddr) == 0)
2658 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
2660 struct oob_data *data;
2662 data = hci_find_remote_oob_data(hdev, bdaddr);
2666 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2668 list_del(&data->list);
2674 int hci_remote_oob_data_clear(struct hci_dev *hdev)
2676 struct oob_data *data, *n;
2678 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2679 list_del(&data->list);
2686 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
2689 struct oob_data *data;
2691 data = hci_find_remote_oob_data(hdev, bdaddr);
2694 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2698 bacpy(&data->bdaddr, bdaddr);
2699 list_add(&data->list, &hdev->remote_oob_data);
2702 memcpy(data->hash, hash, sizeof(data->hash));
2703 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
2705 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2710 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
2711 bdaddr_t *bdaddr, u8 type)
2713 struct bdaddr_list *b;
2715 list_for_each_entry(b, &hdev->blacklist, list) {
2716 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2723 int hci_blacklist_clear(struct hci_dev *hdev)
2725 struct list_head *p, *n;
2727 list_for_each_safe(p, n, &hdev->blacklist) {
2728 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
2737 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2739 struct bdaddr_list *entry;
2741 if (!bacmp(bdaddr, BDADDR_ANY))
2744 if (hci_blacklist_lookup(hdev, bdaddr, type))
2747 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
2751 bacpy(&entry->bdaddr, bdaddr);
2752 entry->bdaddr_type = type;
2754 list_add(&entry->list, &hdev->blacklist);
2756 return mgmt_device_blocked(hdev, bdaddr, type);
2759 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2761 struct bdaddr_list *entry;
2763 if (!bacmp(bdaddr, BDADDR_ANY))
2764 return hci_blacklist_clear(hdev);
2766 entry = hci_blacklist_lookup(hdev, bdaddr, type);
2770 list_del(&entry->list);
2773 return mgmt_device_unblocked(hdev, bdaddr, type);
2776 static void inquiry_complete(struct hci_dev *hdev, u8 status)
2779 BT_ERR("Failed to start inquiry: status %d", status);
2782 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2783 hci_dev_unlock(hdev);
2788 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
2790 /* General inquiry access code (GIAC) */
2791 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2792 struct hci_request req;
2793 struct hci_cp_inquiry cp;
2797 BT_ERR("Failed to disable LE scanning: status %d", status);
2801 switch (hdev->discovery.type) {
2802 case DISCOV_TYPE_LE:
2804 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2805 hci_dev_unlock(hdev);
2808 case DISCOV_TYPE_INTERLEAVED:
2809 hci_req_init(&req, hdev);
2811 memset(&cp, 0, sizeof(cp));
2812 memcpy(&cp.lap, lap, sizeof(cp.lap));
2813 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2814 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2818 hci_inquiry_cache_flush(hdev);
2820 err = hci_req_run(&req, inquiry_complete);
2822 BT_ERR("Inquiry request failed: err %d", err);
2823 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2826 hci_dev_unlock(hdev);
2831 static void le_scan_disable_work(struct work_struct *work)
2833 struct hci_dev *hdev = container_of(work, struct hci_dev,
2834 le_scan_disable.work);
2835 struct hci_cp_le_set_scan_enable cp;
2836 struct hci_request req;
2839 BT_DBG("%s", hdev->name);
2841 hci_req_init(&req, hdev);
2843 memset(&cp, 0, sizeof(cp));
2844 cp.enable = LE_SCAN_DISABLE;
2845 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2847 err = hci_req_run(&req, le_scan_disable_work_complete);
2849 BT_ERR("Disable LE scanning request failed: err %d", err);
2852 /* Alloc HCI device */
2853 struct hci_dev *hci_alloc_dev(void)
2855 struct hci_dev *hdev;
2857 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2861 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2862 hdev->esco_type = (ESCO_HV1);
2863 hdev->link_mode = (HCI_LM_ACCEPT);
2864 hdev->num_iac = 0x01; /* One IAC support is mandatory */
2865 hdev->io_capability = 0x03; /* No Input No Output */
2866 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2867 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2869 hdev->sniff_max_interval = 800;
2870 hdev->sniff_min_interval = 80;
2872 hdev->le_scan_interval = 0x0060;
2873 hdev->le_scan_window = 0x0030;
2874 hdev->le_conn_min_interval = 0x0028;
2875 hdev->le_conn_max_interval = 0x0038;
2877 mutex_init(&hdev->lock);
2878 mutex_init(&hdev->req_lock);
2880 INIT_LIST_HEAD(&hdev->mgmt_pending);
2881 INIT_LIST_HEAD(&hdev->blacklist);
2882 INIT_LIST_HEAD(&hdev->uuids);
2883 INIT_LIST_HEAD(&hdev->link_keys);
2884 INIT_LIST_HEAD(&hdev->long_term_keys);
2885 INIT_LIST_HEAD(&hdev->remote_oob_data);
2886 INIT_LIST_HEAD(&hdev->conn_hash.list);
2888 INIT_WORK(&hdev->rx_work, hci_rx_work);
2889 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2890 INIT_WORK(&hdev->tx_work, hci_tx_work);
2891 INIT_WORK(&hdev->power_on, hci_power_on);
2893 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2894 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2895 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2897 skb_queue_head_init(&hdev->rx_q);
2898 skb_queue_head_init(&hdev->cmd_q);
2899 skb_queue_head_init(&hdev->raw_q);
2901 init_waitqueue_head(&hdev->req_wait_q);
2903 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
2905 hci_init_sysfs(hdev);
2906 discovery_init(hdev);
2910 EXPORT_SYMBOL(hci_alloc_dev);
2912 /* Free HCI device */
2913 void hci_free_dev(struct hci_dev *hdev)
2915 /* will free via device release */
2916 put_device(&hdev->dev);
2918 EXPORT_SYMBOL(hci_free_dev);
2920 /* Register HCI device */
2921 int hci_register_dev(struct hci_dev *hdev)
2925 if (!hdev->open || !hdev->close)
2928 /* Do not allow HCI_AMP devices to register at index 0,
2929 * so the index can be used as the AMP controller ID.
2931 switch (hdev->dev_type) {
2933 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2936 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2945 sprintf(hdev->name, "hci%d", id);
2948 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2950 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2951 WQ_MEM_RECLAIM, 1, hdev->name);
2952 if (!hdev->workqueue) {
2957 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2958 WQ_MEM_RECLAIM, 1, hdev->name);
2959 if (!hdev->req_workqueue) {
2960 destroy_workqueue(hdev->workqueue);
2965 if (!IS_ERR_OR_NULL(bt_debugfs))
2966 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
2968 dev_set_name(&hdev->dev, "%s", hdev->name);
2970 error = device_add(&hdev->dev);
2974 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2975 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2978 if (rfkill_register(hdev->rfkill) < 0) {
2979 rfkill_destroy(hdev->rfkill);
2980 hdev->rfkill = NULL;
2984 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2985 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2987 set_bit(HCI_SETUP, &hdev->dev_flags);
2988 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2990 if (hdev->dev_type == HCI_BREDR) {
2991 /* Assume BR/EDR support until proven otherwise (such as
2992 * through reading supported features during init.
2994 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2997 write_lock(&hci_dev_list_lock);
2998 list_add(&hdev->list, &hci_dev_list);
2999 write_unlock(&hci_dev_list_lock);
3001 hci_notify(hdev, HCI_DEV_REG);
3004 queue_work(hdev->req_workqueue, &hdev->power_on);
3009 destroy_workqueue(hdev->workqueue);
3010 destroy_workqueue(hdev->req_workqueue);
3012 ida_simple_remove(&hci_index_ida, hdev->id);
3016 EXPORT_SYMBOL(hci_register_dev);
3018 /* Unregister HCI device */
3019 void hci_unregister_dev(struct hci_dev *hdev)
3023 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3025 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3029 write_lock(&hci_dev_list_lock);
3030 list_del(&hdev->list);
3031 write_unlock(&hci_dev_list_lock);
3033 hci_dev_do_close(hdev);
3035 for (i = 0; i < NUM_REASSEMBLY; i++)
3036 kfree_skb(hdev->reassembly[i]);
3038 cancel_work_sync(&hdev->power_on);
3040 if (!test_bit(HCI_INIT, &hdev->flags) &&
3041 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
3043 mgmt_index_removed(hdev);
3044 hci_dev_unlock(hdev);
3047 /* mgmt_index_removed should take care of emptying the
3049 BUG_ON(!list_empty(&hdev->mgmt_pending));
3051 hci_notify(hdev, HCI_DEV_UNREG);
3054 rfkill_unregister(hdev->rfkill);
3055 rfkill_destroy(hdev->rfkill);
3058 device_del(&hdev->dev);
3060 debugfs_remove_recursive(hdev->debugfs);
3062 destroy_workqueue(hdev->workqueue);
3063 destroy_workqueue(hdev->req_workqueue);
3066 hci_blacklist_clear(hdev);
3067 hci_uuids_clear(hdev);
3068 hci_link_keys_clear(hdev);
3069 hci_smp_ltks_clear(hdev);
3070 hci_remote_oob_data_clear(hdev);
3071 hci_dev_unlock(hdev);
3075 ida_simple_remove(&hci_index_ida, id);
3077 EXPORT_SYMBOL(hci_unregister_dev);
3079 /* Suspend HCI device */
3080 int hci_suspend_dev(struct hci_dev *hdev)
3082 hci_notify(hdev, HCI_DEV_SUSPEND);
3085 EXPORT_SYMBOL(hci_suspend_dev);
3087 /* Resume HCI device */
3088 int hci_resume_dev(struct hci_dev *hdev)
3090 hci_notify(hdev, HCI_DEV_RESUME);
3093 EXPORT_SYMBOL(hci_resume_dev);
3095 /* Receive frame from HCI drivers */
3096 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
3098 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
3099 && !test_bit(HCI_INIT, &hdev->flags))) {
3105 bt_cb(skb)->incoming = 1;
3108 __net_timestamp(skb);
3110 skb_queue_tail(&hdev->rx_q, skb);
3111 queue_work(hdev->workqueue, &hdev->rx_work);
3115 EXPORT_SYMBOL(hci_recv_frame);
3117 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
3118 int count, __u8 index)
3123 struct sk_buff *skb;
3124 struct bt_skb_cb *scb;
3126 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
3127 index >= NUM_REASSEMBLY)
3130 skb = hdev->reassembly[index];
3134 case HCI_ACLDATA_PKT:
3135 len = HCI_MAX_FRAME_SIZE;
3136 hlen = HCI_ACL_HDR_SIZE;
3139 len = HCI_MAX_EVENT_SIZE;
3140 hlen = HCI_EVENT_HDR_SIZE;
3142 case HCI_SCODATA_PKT:
3143 len = HCI_MAX_SCO_SIZE;
3144 hlen = HCI_SCO_HDR_SIZE;
3148 skb = bt_skb_alloc(len, GFP_ATOMIC);
3152 scb = (void *) skb->cb;
3154 scb->pkt_type = type;
3156 hdev->reassembly[index] = skb;
3160 scb = (void *) skb->cb;
3161 len = min_t(uint, scb->expect, count);
3163 memcpy(skb_put(skb, len), data, len);
3172 if (skb->len == HCI_EVENT_HDR_SIZE) {
3173 struct hci_event_hdr *h = hci_event_hdr(skb);
3174 scb->expect = h->plen;
3176 if (skb_tailroom(skb) < scb->expect) {
3178 hdev->reassembly[index] = NULL;
3184 case HCI_ACLDATA_PKT:
3185 if (skb->len == HCI_ACL_HDR_SIZE) {
3186 struct hci_acl_hdr *h = hci_acl_hdr(skb);
3187 scb->expect = __le16_to_cpu(h->dlen);
3189 if (skb_tailroom(skb) < scb->expect) {
3191 hdev->reassembly[index] = NULL;
3197 case HCI_SCODATA_PKT:
3198 if (skb->len == HCI_SCO_HDR_SIZE) {
3199 struct hci_sco_hdr *h = hci_sco_hdr(skb);
3200 scb->expect = h->dlen;
3202 if (skb_tailroom(skb) < scb->expect) {
3204 hdev->reassembly[index] = NULL;
3211 if (scb->expect == 0) {
3212 /* Complete frame */
3214 bt_cb(skb)->pkt_type = type;
3215 hci_recv_frame(hdev, skb);
3217 hdev->reassembly[index] = NULL;
3225 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
3229 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
3233 rem = hci_reassembly(hdev, type, data, count, type - 1);
3237 data += (count - rem);
3243 EXPORT_SYMBOL(hci_recv_fragment);
3245 #define STREAM_REASSEMBLY 0
3247 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
3253 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
3256 struct { char type; } *pkt;
3258 /* Start of the frame */
3265 type = bt_cb(skb)->pkt_type;
3267 rem = hci_reassembly(hdev, type, data, count,
3272 data += (count - rem);
3278 EXPORT_SYMBOL(hci_recv_stream_fragment);
3280 /* ---- Interface to upper protocols ---- */
3282 int hci_register_cb(struct hci_cb *cb)
3284 BT_DBG("%p name %s", cb, cb->name);
3286 write_lock(&hci_cb_list_lock);
3287 list_add(&cb->list, &hci_cb_list);
3288 write_unlock(&hci_cb_list_lock);
3292 EXPORT_SYMBOL(hci_register_cb);
3294 int hci_unregister_cb(struct hci_cb *cb)
3296 BT_DBG("%p name %s", cb, cb->name);
3298 write_lock(&hci_cb_list_lock);
3299 list_del(&cb->list);
3300 write_unlock(&hci_cb_list_lock);
3304 EXPORT_SYMBOL(hci_unregister_cb);
3306 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3308 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
3311 __net_timestamp(skb);
3313 /* Send copy to monitor */
3314 hci_send_to_monitor(hdev, skb);
3316 if (atomic_read(&hdev->promisc)) {
3317 /* Send copy to the sockets */
3318 hci_send_to_sock(hdev, skb);
3321 /* Get rid of skb owner, prior to sending to the driver. */
3324 if (hdev->send(hdev, skb) < 0)
3325 BT_ERR("%s sending frame failed", hdev->name);
3328 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
3330 skb_queue_head_init(&req->cmd_q);
3335 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
3337 struct hci_dev *hdev = req->hdev;
3338 struct sk_buff *skb;
3339 unsigned long flags;
3341 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
3343 /* If an error occured during request building, remove all HCI
3344 * commands queued on the HCI request queue.
3347 skb_queue_purge(&req->cmd_q);
3351 /* Do not allow empty requests */
3352 if (skb_queue_empty(&req->cmd_q))
3355 skb = skb_peek_tail(&req->cmd_q);
3356 bt_cb(skb)->req.complete = complete;
3358 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3359 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
3360 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3362 queue_work(hdev->workqueue, &hdev->cmd_work);
3367 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
3368 u32 plen, const void *param)
3370 int len = HCI_COMMAND_HDR_SIZE + plen;
3371 struct hci_command_hdr *hdr;
3372 struct sk_buff *skb;
3374 skb = bt_skb_alloc(len, GFP_ATOMIC);
3378 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
3379 hdr->opcode = cpu_to_le16(opcode);
3383 memcpy(skb_put(skb, plen), param, plen);
3385 BT_DBG("skb len %d", skb->len);
3387 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
3392 /* Send HCI command */
3393 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3396 struct sk_buff *skb;
3398 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3400 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3402 BT_ERR("%s no memory for command", hdev->name);
3406 /* Stand-alone HCI commands must be flaged as
3407 * single-command requests.
3409 bt_cb(skb)->req.start = true;
3411 skb_queue_tail(&hdev->cmd_q, skb);
3412 queue_work(hdev->workqueue, &hdev->cmd_work);
3417 /* Queue a command to an asynchronous HCI request */
3418 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
3419 const void *param, u8 event)
3421 struct hci_dev *hdev = req->hdev;
3422 struct sk_buff *skb;
3424 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3426 /* If an error occured during request building, there is no point in
3427 * queueing the HCI command. We can simply return.
3432 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3434 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
3435 hdev->name, opcode);
3440 if (skb_queue_empty(&req->cmd_q))
3441 bt_cb(skb)->req.start = true;
3443 bt_cb(skb)->req.event = event;
3445 skb_queue_tail(&req->cmd_q, skb);
3448 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
3451 hci_req_add_ev(req, opcode, plen, param, 0);
3454 /* Get data from the previously sent command */
3455 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3457 struct hci_command_hdr *hdr;
3459 if (!hdev->sent_cmd)
3462 hdr = (void *) hdev->sent_cmd->data;
3464 if (hdr->opcode != cpu_to_le16(opcode))
3467 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3469 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3473 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3475 struct hci_acl_hdr *hdr;
3478 skb_push(skb, HCI_ACL_HDR_SIZE);
3479 skb_reset_transport_header(skb);
3480 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3481 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3482 hdr->dlen = cpu_to_le16(len);
3485 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3486 struct sk_buff *skb, __u16 flags)
3488 struct hci_conn *conn = chan->conn;
3489 struct hci_dev *hdev = conn->hdev;
3490 struct sk_buff *list;
3492 skb->len = skb_headlen(skb);
3495 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3497 switch (hdev->dev_type) {
3499 hci_add_acl_hdr(skb, conn->handle, flags);
3502 hci_add_acl_hdr(skb, chan->handle, flags);
3505 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3509 list = skb_shinfo(skb)->frag_list;
3511 /* Non fragmented */
3512 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3514 skb_queue_tail(queue, skb);
3517 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3519 skb_shinfo(skb)->frag_list = NULL;
3521 /* Queue all fragments atomically */
3522 spin_lock(&queue->lock);
3524 __skb_queue_tail(queue, skb);
3526 flags &= ~ACL_START;
3529 skb = list; list = list->next;
3531 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3532 hci_add_acl_hdr(skb, conn->handle, flags);
3534 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3536 __skb_queue_tail(queue, skb);
3539 spin_unlock(&queue->lock);
3543 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3545 struct hci_dev *hdev = chan->conn->hdev;
3547 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3549 hci_queue_acl(chan, &chan->data_q, skb, flags);
3551 queue_work(hdev->workqueue, &hdev->tx_work);
3555 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3557 struct hci_dev *hdev = conn->hdev;
3558 struct hci_sco_hdr hdr;
3560 BT_DBG("%s len %d", hdev->name, skb->len);
3562 hdr.handle = cpu_to_le16(conn->handle);
3563 hdr.dlen = skb->len;
3565 skb_push(skb, HCI_SCO_HDR_SIZE);
3566 skb_reset_transport_header(skb);
3567 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3569 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
3571 skb_queue_tail(&conn->data_q, skb);
3572 queue_work(hdev->workqueue, &hdev->tx_work);
3575 /* ---- HCI TX task (outgoing data) ---- */
3577 /* HCI Connection scheduler */
3578 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3581 struct hci_conn_hash *h = &hdev->conn_hash;
3582 struct hci_conn *conn = NULL, *c;
3583 unsigned int num = 0, min = ~0;
3585 /* We don't have to lock device here. Connections are always
3586 * added and removed with TX task disabled. */
3590 list_for_each_entry_rcu(c, &h->list, list) {
3591 if (c->type != type || skb_queue_empty(&c->data_q))
3594 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3599 if (c->sent < min) {
3604 if (hci_conn_num(hdev, type) == num)
3613 switch (conn->type) {
3615 cnt = hdev->acl_cnt;
3619 cnt = hdev->sco_cnt;
3622 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3626 BT_ERR("Unknown link type");
3634 BT_DBG("conn %p quote %d", conn, *quote);
3638 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3640 struct hci_conn_hash *h = &hdev->conn_hash;
3643 BT_ERR("%s link tx timeout", hdev->name);
3647 /* Kill stalled connections */
3648 list_for_each_entry_rcu(c, &h->list, list) {
3649 if (c->type == type && c->sent) {
3650 BT_ERR("%s killing stalled connection %pMR",
3651 hdev->name, &c->dst);
3652 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3659 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3662 struct hci_conn_hash *h = &hdev->conn_hash;
3663 struct hci_chan *chan = NULL;
3664 unsigned int num = 0, min = ~0, cur_prio = 0;
3665 struct hci_conn *conn;
3666 int cnt, q, conn_num = 0;
3668 BT_DBG("%s", hdev->name);
3672 list_for_each_entry_rcu(conn, &h->list, list) {
3673 struct hci_chan *tmp;
3675 if (conn->type != type)
3678 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3683 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3684 struct sk_buff *skb;
3686 if (skb_queue_empty(&tmp->data_q))
3689 skb = skb_peek(&tmp->data_q);
3690 if (skb->priority < cur_prio)
3693 if (skb->priority > cur_prio) {
3696 cur_prio = skb->priority;
3701 if (conn->sent < min) {
3707 if (hci_conn_num(hdev, type) == conn_num)
3716 switch (chan->conn->type) {
3718 cnt = hdev->acl_cnt;
3721 cnt = hdev->block_cnt;
3725 cnt = hdev->sco_cnt;
3728 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3732 BT_ERR("Unknown link type");
3737 BT_DBG("chan %p quote %d", chan, *quote);
3741 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3743 struct hci_conn_hash *h = &hdev->conn_hash;
3744 struct hci_conn *conn;
3747 BT_DBG("%s", hdev->name);
3751 list_for_each_entry_rcu(conn, &h->list, list) {
3752 struct hci_chan *chan;
3754 if (conn->type != type)
3757 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3762 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3763 struct sk_buff *skb;
3770 if (skb_queue_empty(&chan->data_q))
3773 skb = skb_peek(&chan->data_q);
3774 if (skb->priority >= HCI_PRIO_MAX - 1)
3777 skb->priority = HCI_PRIO_MAX - 1;
3779 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3783 if (hci_conn_num(hdev, type) == num)
3791 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3793 /* Calculate count of blocks used by this packet */
3794 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3797 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3799 if (!test_bit(HCI_RAW, &hdev->flags)) {
3800 /* ACL tx timeout must be longer than maximum
3801 * link supervision timeout (40.9 seconds) */
3802 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3803 HCI_ACL_TX_TIMEOUT))
3804 hci_link_tx_to(hdev, ACL_LINK);
3808 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3810 unsigned int cnt = hdev->acl_cnt;
3811 struct hci_chan *chan;
3812 struct sk_buff *skb;
3815 __check_timeout(hdev, cnt);
3817 while (hdev->acl_cnt &&
3818 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
3819 u32 priority = (skb_peek(&chan->data_q))->priority;
3820 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3821 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3822 skb->len, skb->priority);
3824 /* Stop if priority has changed */
3825 if (skb->priority < priority)
3828 skb = skb_dequeue(&chan->data_q);
3830 hci_conn_enter_active_mode(chan->conn,
3831 bt_cb(skb)->force_active);
3833 hci_send_frame(hdev, skb);
3834 hdev->acl_last_tx = jiffies;
3842 if (cnt != hdev->acl_cnt)
3843 hci_prio_recalculate(hdev, ACL_LINK);
3846 static void hci_sched_acl_blk(struct hci_dev *hdev)
3848 unsigned int cnt = hdev->block_cnt;
3849 struct hci_chan *chan;
3850 struct sk_buff *skb;
3854 __check_timeout(hdev, cnt);
3856 BT_DBG("%s", hdev->name);
3858 if (hdev->dev_type == HCI_AMP)
3863 while (hdev->block_cnt > 0 &&
3864 (chan = hci_chan_sent(hdev, type, "e))) {
3865 u32 priority = (skb_peek(&chan->data_q))->priority;
3866 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3869 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3870 skb->len, skb->priority);
3872 /* Stop if priority has changed */
3873 if (skb->priority < priority)
3876 skb = skb_dequeue(&chan->data_q);
3878 blocks = __get_blocks(hdev, skb);
3879 if (blocks > hdev->block_cnt)
3882 hci_conn_enter_active_mode(chan->conn,
3883 bt_cb(skb)->force_active);
3885 hci_send_frame(hdev, skb);
3886 hdev->acl_last_tx = jiffies;
3888 hdev->block_cnt -= blocks;
3891 chan->sent += blocks;
3892 chan->conn->sent += blocks;
3896 if (cnt != hdev->block_cnt)
3897 hci_prio_recalculate(hdev, type);
3900 static void hci_sched_acl(struct hci_dev *hdev)
3902 BT_DBG("%s", hdev->name);
3904 /* No ACL link over BR/EDR controller */
3905 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3908 /* No AMP link over AMP controller */
3909 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3912 switch (hdev->flow_ctl_mode) {
3913 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3914 hci_sched_acl_pkt(hdev);
3917 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3918 hci_sched_acl_blk(hdev);
3924 static void hci_sched_sco(struct hci_dev *hdev)
3926 struct hci_conn *conn;
3927 struct sk_buff *skb;
3930 BT_DBG("%s", hdev->name);
3932 if (!hci_conn_num(hdev, SCO_LINK))
3935 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
3936 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3937 BT_DBG("skb %p len %d", skb, skb->len);
3938 hci_send_frame(hdev, skb);
3941 if (conn->sent == ~0)
3947 static void hci_sched_esco(struct hci_dev *hdev)
3949 struct hci_conn *conn;
3950 struct sk_buff *skb;
3953 BT_DBG("%s", hdev->name);
3955 if (!hci_conn_num(hdev, ESCO_LINK))
3958 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3960 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3961 BT_DBG("skb %p len %d", skb, skb->len);
3962 hci_send_frame(hdev, skb);
3965 if (conn->sent == ~0)
3971 static void hci_sched_le(struct hci_dev *hdev)
3973 struct hci_chan *chan;
3974 struct sk_buff *skb;
3975 int quote, cnt, tmp;
3977 BT_DBG("%s", hdev->name);
3979 if (!hci_conn_num(hdev, LE_LINK))
3982 if (!test_bit(HCI_RAW, &hdev->flags)) {
3983 /* LE tx timeout must be longer than maximum
3984 * link supervision timeout (40.9 seconds) */
3985 if (!hdev->le_cnt && hdev->le_pkts &&
3986 time_after(jiffies, hdev->le_last_tx + HZ * 45))
3987 hci_link_tx_to(hdev, LE_LINK);
3990 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3992 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
3993 u32 priority = (skb_peek(&chan->data_q))->priority;
3994 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3995 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3996 skb->len, skb->priority);
3998 /* Stop if priority has changed */
3999 if (skb->priority < priority)
4002 skb = skb_dequeue(&chan->data_q);
4004 hci_send_frame(hdev, skb);
4005 hdev->le_last_tx = jiffies;
4016 hdev->acl_cnt = cnt;
4019 hci_prio_recalculate(hdev, LE_LINK);
4022 static void hci_tx_work(struct work_struct *work)
4024 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
4025 struct sk_buff *skb;
4027 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
4028 hdev->sco_cnt, hdev->le_cnt);
4030 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4031 /* Schedule queues and send stuff to HCI driver */
4032 hci_sched_acl(hdev);
4033 hci_sched_sco(hdev);
4034 hci_sched_esco(hdev);
4038 /* Send next queued raw (unknown type) packet */
4039 while ((skb = skb_dequeue(&hdev->raw_q)))
4040 hci_send_frame(hdev, skb);
4043 /* ----- HCI RX task (incoming data processing) ----- */
4045 /* ACL data packet */
4046 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4048 struct hci_acl_hdr *hdr = (void *) skb->data;
4049 struct hci_conn *conn;
4050 __u16 handle, flags;
4052 skb_pull(skb, HCI_ACL_HDR_SIZE);
4054 handle = __le16_to_cpu(hdr->handle);
4055 flags = hci_flags(handle);
4056 handle = hci_handle(handle);
4058 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4061 hdev->stat.acl_rx++;
4064 conn = hci_conn_hash_lookup_handle(hdev, handle);
4065 hci_dev_unlock(hdev);
4068 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4070 /* Send to upper protocol */
4071 l2cap_recv_acldata(conn, skb, flags);
4074 BT_ERR("%s ACL packet for unknown connection handle %d",
4075 hdev->name, handle);
4081 /* SCO data packet */
4082 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4084 struct hci_sco_hdr *hdr = (void *) skb->data;
4085 struct hci_conn *conn;
4088 skb_pull(skb, HCI_SCO_HDR_SIZE);
4090 handle = __le16_to_cpu(hdr->handle);
4092 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
4094 hdev->stat.sco_rx++;
4097 conn = hci_conn_hash_lookup_handle(hdev, handle);
4098 hci_dev_unlock(hdev);
4101 /* Send to upper protocol */
4102 sco_recv_scodata(conn, skb);
4105 BT_ERR("%s SCO packet for unknown connection handle %d",
4106 hdev->name, handle);
4112 static bool hci_req_is_complete(struct hci_dev *hdev)
4114 struct sk_buff *skb;
4116 skb = skb_peek(&hdev->cmd_q);
4120 return bt_cb(skb)->req.start;
4123 static void hci_resend_last(struct hci_dev *hdev)
4125 struct hci_command_hdr *sent;
4126 struct sk_buff *skb;
4129 if (!hdev->sent_cmd)
4132 sent = (void *) hdev->sent_cmd->data;
4133 opcode = __le16_to_cpu(sent->opcode);
4134 if (opcode == HCI_OP_RESET)
4137 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4141 skb_queue_head(&hdev->cmd_q, skb);
4142 queue_work(hdev->workqueue, &hdev->cmd_work);
4145 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
4147 hci_req_complete_t req_complete = NULL;
4148 struct sk_buff *skb;
4149 unsigned long flags;
4151 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4153 /* If the completed command doesn't match the last one that was
4154 * sent we need to do special handling of it.
4156 if (!hci_sent_cmd_data(hdev, opcode)) {
4157 /* Some CSR based controllers generate a spontaneous
4158 * reset complete event during init and any pending
4159 * command will never be completed. In such a case we
4160 * need to resend whatever was the last sent
4163 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4164 hci_resend_last(hdev);
4169 /* If the command succeeded and there's still more commands in
4170 * this request the request is not yet complete.
4172 if (!status && !hci_req_is_complete(hdev))
4175 /* If this was the last command in a request the complete
4176 * callback would be found in hdev->sent_cmd instead of the
4177 * command queue (hdev->cmd_q).
4179 if (hdev->sent_cmd) {
4180 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
4183 /* We must set the complete callback to NULL to
4184 * avoid calling the callback more than once if
4185 * this function gets called again.
4187 bt_cb(hdev->sent_cmd)->req.complete = NULL;
4193 /* Remove all pending commands belonging to this request */
4194 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4195 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4196 if (bt_cb(skb)->req.start) {
4197 __skb_queue_head(&hdev->cmd_q, skb);
4201 req_complete = bt_cb(skb)->req.complete;
4204 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4208 req_complete(hdev, status);
4211 static void hci_rx_work(struct work_struct *work)
4213 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4214 struct sk_buff *skb;
4216 BT_DBG("%s", hdev->name);
4218 while ((skb = skb_dequeue(&hdev->rx_q))) {
4219 /* Send copy to monitor */
4220 hci_send_to_monitor(hdev, skb);
4222 if (atomic_read(&hdev->promisc)) {
4223 /* Send copy to the sockets */
4224 hci_send_to_sock(hdev, skb);
4227 if (test_bit(HCI_RAW, &hdev->flags) ||
4228 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4233 if (test_bit(HCI_INIT, &hdev->flags)) {
4234 /* Don't process data packets in this states. */
4235 switch (bt_cb(skb)->pkt_type) {
4236 case HCI_ACLDATA_PKT:
4237 case HCI_SCODATA_PKT:
4244 switch (bt_cb(skb)->pkt_type) {
4246 BT_DBG("%s Event packet", hdev->name);
4247 hci_event_packet(hdev, skb);
4250 case HCI_ACLDATA_PKT:
4251 BT_DBG("%s ACL data packet", hdev->name);
4252 hci_acldata_packet(hdev, skb);
4255 case HCI_SCODATA_PKT:
4256 BT_DBG("%s SCO data packet", hdev->name);
4257 hci_scodata_packet(hdev, skb);
4267 static void hci_cmd_work(struct work_struct *work)
4269 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4270 struct sk_buff *skb;
4272 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4273 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4275 /* Send queued commands */
4276 if (atomic_read(&hdev->cmd_cnt)) {
4277 skb = skb_dequeue(&hdev->cmd_q);
4281 kfree_skb(hdev->sent_cmd);
4283 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4284 if (hdev->sent_cmd) {
4285 atomic_dec(&hdev->cmd_cnt);
4286 hci_send_frame(hdev, skb);
4287 if (test_bit(HCI_RESET, &hdev->flags))
4288 del_timer(&hdev->cmd_timer);
4290 mod_timer(&hdev->cmd_timer,
4291 jiffies + HCI_CMD_TIMEOUT);
4293 skb_queue_head(&hdev->cmd_q, skb);
4294 queue_work(hdev->workqueue, &hdev->cmd_work);