2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <asm/unaligned.h>
34 #include <net/bluetooth/bluetooth.h>
35 #include <net/bluetooth/hci_core.h>
37 static void hci_rx_work(struct work_struct *work);
38 static void hci_cmd_work(struct work_struct *work);
39 static void hci_tx_work(struct work_struct *work);
42 LIST_HEAD(hci_dev_list);
43 DEFINE_RWLOCK(hci_dev_list_lock);
45 /* HCI callback list */
46 LIST_HEAD(hci_cb_list);
47 DEFINE_RWLOCK(hci_cb_list_lock);
49 /* HCI ID Numbering */
50 static DEFINE_IDA(hci_index_ida);
52 /* ---- HCI notifications ---- */
54 static void hci_notify(struct hci_dev *hdev, int event)
56 hci_sock_dev_event(hdev, event);
59 /* ---- HCI debugfs entries ---- */
61 static int features_show(struct seq_file *f, void *ptr)
63 struct hci_dev *hdev = f->private;
67 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
68 seq_printf(f, "Page %u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
69 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
70 hdev->features[p][0], hdev->features[p][1],
71 hdev->features[p][2], hdev->features[p][3],
72 hdev->features[p][4], hdev->features[p][5],
73 hdev->features[p][6], hdev->features[p][7]);
80 static int features_open(struct inode *inode, struct file *file)
82 return single_open(file, features_show, inode->i_private);
85 static const struct file_operations features_fops = {
86 .open = features_open,
89 .release = single_release,
92 static int blacklist_show(struct seq_file *f, void *p)
94 struct hci_dev *hdev = f->private;
95 struct bdaddr_list *b;
98 list_for_each_entry(b, &hdev->blacklist, list)
99 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
100 hci_dev_unlock(hdev);
105 static int blacklist_open(struct inode *inode, struct file *file)
107 return single_open(file, blacklist_show, inode->i_private);
110 static const struct file_operations blacklist_fops = {
111 .open = blacklist_open,
114 .release = single_release,
117 static int uuids_show(struct seq_file *f, void *p)
119 struct hci_dev *hdev = f->private;
120 struct bt_uuid *uuid;
123 list_for_each_entry(uuid, &hdev->uuids, list) {
125 u16 data1, data2, data3, data4;
127 data5 = get_unaligned_le32(uuid);
128 data4 = get_unaligned_le16(uuid + 4);
129 data3 = get_unaligned_le16(uuid + 6);
130 data2 = get_unaligned_le16(uuid + 8);
131 data1 = get_unaligned_le16(uuid + 10);
132 data0 = get_unaligned_le32(uuid + 12);
134 seq_printf(f, "%.8x-%.4x-%.4x-%.4x-%.4x%.8x\n",
135 data0, data1, data2, data3, data4, data5);
137 hci_dev_unlock(hdev);
142 static int uuids_open(struct inode *inode, struct file *file)
144 return single_open(file, uuids_show, inode->i_private);
147 static const struct file_operations uuids_fops = {
151 .release = single_release,
154 static int inquiry_cache_show(struct seq_file *f, void *p)
156 struct hci_dev *hdev = f->private;
157 struct discovery_state *cache = &hdev->discovery;
158 struct inquiry_entry *e;
162 list_for_each_entry(e, &cache->all, all) {
163 struct inquiry_data *data = &e->data;
164 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
166 data->pscan_rep_mode, data->pscan_period_mode,
167 data->pscan_mode, data->dev_class[2],
168 data->dev_class[1], data->dev_class[0],
169 __le16_to_cpu(data->clock_offset),
170 data->rssi, data->ssp_mode, e->timestamp);
173 hci_dev_unlock(hdev);
178 static int inquiry_cache_open(struct inode *inode, struct file *file)
180 return single_open(file, inquiry_cache_show, inode->i_private);
183 static const struct file_operations inquiry_cache_fops = {
184 .open = inquiry_cache_open,
187 .release = single_release,
190 static int voice_setting_get(void *data, u64 *val)
192 struct hci_dev *hdev = data;
195 *val = hdev->voice_setting;
196 hci_dev_unlock(hdev);
201 DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
202 NULL, "0x%4.4llx\n");
204 static int auto_accept_delay_set(void *data, u64 val)
206 struct hci_dev *hdev = data;
209 hdev->auto_accept_delay = val;
210 hci_dev_unlock(hdev);
215 static int auto_accept_delay_get(void *data, u64 *val)
217 struct hci_dev *hdev = data;
220 *val = hdev->auto_accept_delay;
221 hci_dev_unlock(hdev);
226 DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
227 auto_accept_delay_set, "%llu\n");
229 static int idle_timeout_set(void *data, u64 val)
231 struct hci_dev *hdev = data;
233 if (val != 0 && (val < 500 || val > 3600000))
237 hdev->idle_timeout= val;
238 hci_dev_unlock(hdev);
243 static int idle_timeout_get(void *data, u64 *val)
245 struct hci_dev *hdev = data;
248 *val = hdev->idle_timeout;
249 hci_dev_unlock(hdev);
254 DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
255 idle_timeout_set, "%llu\n");
257 static int sniff_min_interval_set(void *data, u64 val)
259 struct hci_dev *hdev = data;
261 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
265 hdev->sniff_min_interval= val;
266 hci_dev_unlock(hdev);
271 static int sniff_min_interval_get(void *data, u64 *val)
273 struct hci_dev *hdev = data;
276 *val = hdev->sniff_min_interval;
277 hci_dev_unlock(hdev);
282 DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
283 sniff_min_interval_set, "%llu\n");
285 static int sniff_max_interval_set(void *data, u64 val)
287 struct hci_dev *hdev = data;
289 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
293 hdev->sniff_max_interval= val;
294 hci_dev_unlock(hdev);
299 static int sniff_max_interval_get(void *data, u64 *val)
301 struct hci_dev *hdev = data;
304 *val = hdev->sniff_max_interval;
305 hci_dev_unlock(hdev);
310 DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
311 sniff_max_interval_set, "%llu\n");
313 static int static_address_show(struct seq_file *f, void *p)
315 struct hci_dev *hdev = f->private;
318 seq_printf(f, "%pMR\n", &hdev->static_addr);
319 hci_dev_unlock(hdev);
324 static int static_address_open(struct inode *inode, struct file *file)
326 return single_open(file, static_address_show, inode->i_private);
329 static const struct file_operations static_address_fops = {
330 .open = static_address_open,
333 .release = single_release,
336 /* ---- HCI requests ---- */
338 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
340 BT_DBG("%s result 0x%2.2x", hdev->name, result);
342 if (hdev->req_status == HCI_REQ_PEND) {
343 hdev->req_result = result;
344 hdev->req_status = HCI_REQ_DONE;
345 wake_up_interruptible(&hdev->req_wait_q);
349 static void hci_req_cancel(struct hci_dev *hdev, int err)
351 BT_DBG("%s err 0x%2.2x", hdev->name, err);
353 if (hdev->req_status == HCI_REQ_PEND) {
354 hdev->req_result = err;
355 hdev->req_status = HCI_REQ_CANCELED;
356 wake_up_interruptible(&hdev->req_wait_q);
360 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
363 struct hci_ev_cmd_complete *ev;
364 struct hci_event_hdr *hdr;
369 skb = hdev->recv_evt;
370 hdev->recv_evt = NULL;
372 hci_dev_unlock(hdev);
375 return ERR_PTR(-ENODATA);
377 if (skb->len < sizeof(*hdr)) {
378 BT_ERR("Too short HCI event");
382 hdr = (void *) skb->data;
383 skb_pull(skb, HCI_EVENT_HDR_SIZE);
386 if (hdr->evt != event)
391 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
392 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
396 if (skb->len < sizeof(*ev)) {
397 BT_ERR("Too short cmd_complete event");
401 ev = (void *) skb->data;
402 skb_pull(skb, sizeof(*ev));
404 if (opcode == __le16_to_cpu(ev->opcode))
407 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
408 __le16_to_cpu(ev->opcode));
412 return ERR_PTR(-ENODATA);
415 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
416 const void *param, u8 event, u32 timeout)
418 DECLARE_WAITQUEUE(wait, current);
419 struct hci_request req;
422 BT_DBG("%s", hdev->name);
424 hci_req_init(&req, hdev);
426 hci_req_add_ev(&req, opcode, plen, param, event);
428 hdev->req_status = HCI_REQ_PEND;
430 err = hci_req_run(&req, hci_req_sync_complete);
434 add_wait_queue(&hdev->req_wait_q, &wait);
435 set_current_state(TASK_INTERRUPTIBLE);
437 schedule_timeout(timeout);
439 remove_wait_queue(&hdev->req_wait_q, &wait);
441 if (signal_pending(current))
442 return ERR_PTR(-EINTR);
444 switch (hdev->req_status) {
446 err = -bt_to_errno(hdev->req_result);
449 case HCI_REQ_CANCELED:
450 err = -hdev->req_result;
458 hdev->req_status = hdev->req_result = 0;
460 BT_DBG("%s end: err %d", hdev->name, err);
465 return hci_get_cmd_complete(hdev, opcode, event);
467 EXPORT_SYMBOL(__hci_cmd_sync_ev);
469 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
470 const void *param, u32 timeout)
472 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
474 EXPORT_SYMBOL(__hci_cmd_sync);
476 /* Execute request and wait for completion. */
477 static int __hci_req_sync(struct hci_dev *hdev,
478 void (*func)(struct hci_request *req,
480 unsigned long opt, __u32 timeout)
482 struct hci_request req;
483 DECLARE_WAITQUEUE(wait, current);
486 BT_DBG("%s start", hdev->name);
488 hci_req_init(&req, hdev);
490 hdev->req_status = HCI_REQ_PEND;
494 err = hci_req_run(&req, hci_req_sync_complete);
496 hdev->req_status = 0;
498 /* ENODATA means the HCI request command queue is empty.
499 * This can happen when a request with conditionals doesn't
500 * trigger any commands to be sent. This is normal behavior
501 * and should not trigger an error return.
509 add_wait_queue(&hdev->req_wait_q, &wait);
510 set_current_state(TASK_INTERRUPTIBLE);
512 schedule_timeout(timeout);
514 remove_wait_queue(&hdev->req_wait_q, &wait);
516 if (signal_pending(current))
519 switch (hdev->req_status) {
521 err = -bt_to_errno(hdev->req_result);
524 case HCI_REQ_CANCELED:
525 err = -hdev->req_result;
533 hdev->req_status = hdev->req_result = 0;
535 BT_DBG("%s end: err %d", hdev->name, err);
540 static int hci_req_sync(struct hci_dev *hdev,
541 void (*req)(struct hci_request *req,
543 unsigned long opt, __u32 timeout)
547 if (!test_bit(HCI_UP, &hdev->flags))
550 /* Serialize all requests */
552 ret = __hci_req_sync(hdev, req, opt, timeout);
553 hci_req_unlock(hdev);
558 static void hci_reset_req(struct hci_request *req, unsigned long opt)
560 BT_DBG("%s %ld", req->hdev->name, opt);
563 set_bit(HCI_RESET, &req->hdev->flags);
564 hci_req_add(req, HCI_OP_RESET, 0, NULL);
567 static void bredr_init(struct hci_request *req)
569 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
571 /* Read Local Supported Features */
572 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
574 /* Read Local Version */
575 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
577 /* Read BD Address */
578 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
581 static void amp_init(struct hci_request *req)
583 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
585 /* Read Local Version */
586 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
588 /* Read Local Supported Commands */
589 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
591 /* Read Local Supported Features */
592 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
594 /* Read Local AMP Info */
595 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
597 /* Read Data Blk size */
598 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
600 /* Read Flow Control Mode */
601 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
603 /* Read Location Data */
604 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
607 static void hci_init1_req(struct hci_request *req, unsigned long opt)
609 struct hci_dev *hdev = req->hdev;
611 BT_DBG("%s %ld", hdev->name, opt);
614 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
615 hci_reset_req(req, 0);
617 switch (hdev->dev_type) {
627 BT_ERR("Unknown device type %d", hdev->dev_type);
632 static void bredr_setup(struct hci_request *req)
634 struct hci_dev *hdev = req->hdev;
639 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
640 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
642 /* Read Class of Device */
643 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
645 /* Read Local Name */
646 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
648 /* Read Voice Setting */
649 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
651 /* Read Number of Supported IAC */
652 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
654 /* Read Current IAC LAP */
655 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
657 /* Clear Event Filters */
658 flt_type = HCI_FLT_CLEAR_ALL;
659 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
661 /* Connection accept timeout ~20 secs */
662 param = __constant_cpu_to_le16(0x7d00);
663 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
665 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
666 * but it does not support page scan related HCI commands.
668 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
669 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
670 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
674 static void le_setup(struct hci_request *req)
676 struct hci_dev *hdev = req->hdev;
678 /* Read LE Buffer Size */
679 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
681 /* Read LE Local Supported Features */
682 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
684 /* Read LE Advertising Channel TX Power */
685 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
687 /* Read LE White List Size */
688 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
690 /* Read LE Supported States */
691 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
693 /* LE-only controllers have LE implicitly enabled */
694 if (!lmp_bredr_capable(hdev))
695 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
698 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
700 if (lmp_ext_inq_capable(hdev))
703 if (lmp_inq_rssi_capable(hdev))
706 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
707 hdev->lmp_subver == 0x0757)
710 if (hdev->manufacturer == 15) {
711 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
713 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
715 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
719 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
720 hdev->lmp_subver == 0x1805)
726 static void hci_setup_inquiry_mode(struct hci_request *req)
730 mode = hci_get_inquiry_mode(req->hdev);
732 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
735 static void hci_setup_event_mask(struct hci_request *req)
737 struct hci_dev *hdev = req->hdev;
739 /* The second byte is 0xff instead of 0x9f (two reserved bits
740 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
743 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
745 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
746 * any event mask for pre 1.2 devices.
748 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
751 if (lmp_bredr_capable(hdev)) {
752 events[4] |= 0x01; /* Flow Specification Complete */
753 events[4] |= 0x02; /* Inquiry Result with RSSI */
754 events[4] |= 0x04; /* Read Remote Extended Features Complete */
755 events[5] |= 0x08; /* Synchronous Connection Complete */
756 events[5] |= 0x10; /* Synchronous Connection Changed */
758 /* Use a different default for LE-only devices */
759 memset(events, 0, sizeof(events));
760 events[0] |= 0x10; /* Disconnection Complete */
761 events[0] |= 0x80; /* Encryption Change */
762 events[1] |= 0x08; /* Read Remote Version Information Complete */
763 events[1] |= 0x20; /* Command Complete */
764 events[1] |= 0x40; /* Command Status */
765 events[1] |= 0x80; /* Hardware Error */
766 events[2] |= 0x04; /* Number of Completed Packets */
767 events[3] |= 0x02; /* Data Buffer Overflow */
768 events[5] |= 0x80; /* Encryption Key Refresh Complete */
771 if (lmp_inq_rssi_capable(hdev))
772 events[4] |= 0x02; /* Inquiry Result with RSSI */
774 if (lmp_sniffsubr_capable(hdev))
775 events[5] |= 0x20; /* Sniff Subrating */
777 if (lmp_pause_enc_capable(hdev))
778 events[5] |= 0x80; /* Encryption Key Refresh Complete */
780 if (lmp_ext_inq_capable(hdev))
781 events[5] |= 0x40; /* Extended Inquiry Result */
783 if (lmp_no_flush_capable(hdev))
784 events[7] |= 0x01; /* Enhanced Flush Complete */
786 if (lmp_lsto_capable(hdev))
787 events[6] |= 0x80; /* Link Supervision Timeout Changed */
789 if (lmp_ssp_capable(hdev)) {
790 events[6] |= 0x01; /* IO Capability Request */
791 events[6] |= 0x02; /* IO Capability Response */
792 events[6] |= 0x04; /* User Confirmation Request */
793 events[6] |= 0x08; /* User Passkey Request */
794 events[6] |= 0x10; /* Remote OOB Data Request */
795 events[6] |= 0x20; /* Simple Pairing Complete */
796 events[7] |= 0x04; /* User Passkey Notification */
797 events[7] |= 0x08; /* Keypress Notification */
798 events[7] |= 0x10; /* Remote Host Supported
799 * Features Notification
803 if (lmp_le_capable(hdev))
804 events[7] |= 0x20; /* LE Meta-Event */
806 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
808 if (lmp_le_capable(hdev)) {
809 memset(events, 0, sizeof(events));
811 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
812 sizeof(events), events);
816 static void hci_init2_req(struct hci_request *req, unsigned long opt)
818 struct hci_dev *hdev = req->hdev;
820 if (lmp_bredr_capable(hdev))
823 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
825 if (lmp_le_capable(hdev))
828 hci_setup_event_mask(req);
830 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
831 * local supported commands HCI command.
833 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
834 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
836 if (lmp_ssp_capable(hdev)) {
837 /* When SSP is available, then the host features page
838 * should also be available as well. However some
839 * controllers list the max_page as 0 as long as SSP
840 * has not been enabled. To achieve proper debugging
841 * output, force the minimum max_page to 1 at least.
843 hdev->max_page = 0x01;
845 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
847 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
848 sizeof(mode), &mode);
850 struct hci_cp_write_eir cp;
852 memset(hdev->eir, 0, sizeof(hdev->eir));
853 memset(&cp, 0, sizeof(cp));
855 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
859 if (lmp_inq_rssi_capable(hdev))
860 hci_setup_inquiry_mode(req);
862 if (lmp_inq_tx_pwr_capable(hdev))
863 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
865 if (lmp_ext_feat_capable(hdev)) {
866 struct hci_cp_read_local_ext_features cp;
869 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
873 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
875 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
880 static void hci_setup_link_policy(struct hci_request *req)
882 struct hci_dev *hdev = req->hdev;
883 struct hci_cp_write_def_link_policy cp;
886 if (lmp_rswitch_capable(hdev))
887 link_policy |= HCI_LP_RSWITCH;
888 if (lmp_hold_capable(hdev))
889 link_policy |= HCI_LP_HOLD;
890 if (lmp_sniff_capable(hdev))
891 link_policy |= HCI_LP_SNIFF;
892 if (lmp_park_capable(hdev))
893 link_policy |= HCI_LP_PARK;
895 cp.policy = cpu_to_le16(link_policy);
896 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
899 static void hci_set_le_support(struct hci_request *req)
901 struct hci_dev *hdev = req->hdev;
902 struct hci_cp_write_le_host_supported cp;
904 /* LE-only devices do not support explicit enablement */
905 if (!lmp_bredr_capable(hdev))
908 memset(&cp, 0, sizeof(cp));
910 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
912 cp.simul = lmp_le_br_capable(hdev);
915 if (cp.le != lmp_host_le_capable(hdev))
916 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
920 static void hci_set_event_mask_page_2(struct hci_request *req)
922 struct hci_dev *hdev = req->hdev;
923 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
925 /* If Connectionless Slave Broadcast master role is supported
926 * enable all necessary events for it.
928 if (hdev->features[2][0] & 0x01) {
929 events[1] |= 0x40; /* Triggered Clock Capture */
930 events[1] |= 0x80; /* Synchronization Train Complete */
931 events[2] |= 0x10; /* Slave Page Response Timeout */
932 events[2] |= 0x20; /* CSB Channel Map Change */
935 /* If Connectionless Slave Broadcast slave role is supported
936 * enable all necessary events for it.
938 if (hdev->features[2][0] & 0x02) {
939 events[2] |= 0x01; /* Synchronization Train Received */
940 events[2] |= 0x02; /* CSB Receive */
941 events[2] |= 0x04; /* CSB Timeout */
942 events[2] |= 0x08; /* Truncated Page Complete */
945 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
948 static void hci_init3_req(struct hci_request *req, unsigned long opt)
950 struct hci_dev *hdev = req->hdev;
953 /* Some Broadcom based Bluetooth controllers do not support the
954 * Delete Stored Link Key command. They are clearly indicating its
955 * absence in the bit mask of supported commands.
957 * Check the supported commands and only if the the command is marked
958 * as supported send it. If not supported assume that the controller
959 * does not have actual support for stored link keys which makes this
960 * command redundant anyway.
962 if (hdev->commands[6] & 0x80) {
963 struct hci_cp_delete_stored_link_key cp;
965 bacpy(&cp.bdaddr, BDADDR_ANY);
966 cp.delete_all = 0x01;
967 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
971 if (hdev->commands[5] & 0x10)
972 hci_setup_link_policy(req);
974 if (lmp_le_capable(hdev))
975 hci_set_le_support(req);
977 /* Read features beyond page 1 if available */
978 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
979 struct hci_cp_read_local_ext_features cp;
982 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
987 static void hci_init4_req(struct hci_request *req, unsigned long opt)
989 struct hci_dev *hdev = req->hdev;
991 /* Set event mask page 2 if the HCI command for it is supported */
992 if (hdev->commands[22] & 0x04)
993 hci_set_event_mask_page_2(req);
995 /* Check for Synchronization Train support */
996 if (hdev->features[2][0] & 0x04)
997 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
1000 static int __hci_init(struct hci_dev *hdev)
1004 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1008 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1009 * BR/EDR/LE type controllers. AMP controllers only need the
1012 if (hdev->dev_type != HCI_BREDR)
1015 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1019 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1023 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1027 /* Only create debugfs entries during the initial setup
1028 * phase and not every time the controller gets powered on.
1030 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1033 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1035 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1037 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1039 if (lmp_bredr_capable(hdev)) {
1040 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1041 hdev, &inquiry_cache_fops);
1042 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1043 hdev, &voice_setting_fops);
1046 if (lmp_ssp_capable(hdev))
1047 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1048 hdev, &auto_accept_delay_fops);
1050 if (lmp_sniff_capable(hdev)) {
1051 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1052 hdev, &idle_timeout_fops);
1053 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1054 hdev, &sniff_min_interval_fops);
1055 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1056 hdev, &sniff_max_interval_fops);
1059 if (lmp_le_capable(hdev))
1060 debugfs_create_file("static_address", 0444, hdev->debugfs,
1061 hdev, &static_address_fops);
1066 static void hci_scan_req(struct hci_request *req, unsigned long opt)
1070 BT_DBG("%s %x", req->hdev->name, scan);
1072 /* Inquiry and Page scans */
1073 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1076 static void hci_auth_req(struct hci_request *req, unsigned long opt)
1080 BT_DBG("%s %x", req->hdev->name, auth);
1082 /* Authentication */
1083 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1086 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1090 BT_DBG("%s %x", req->hdev->name, encrypt);
1093 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1096 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
1098 __le16 policy = cpu_to_le16(opt);
1100 BT_DBG("%s %x", req->hdev->name, policy);
1102 /* Default link policy */
1103 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1106 /* Get HCI device by index.
1107 * Device is held on return. */
1108 struct hci_dev *hci_dev_get(int index)
1110 struct hci_dev *hdev = NULL, *d;
1112 BT_DBG("%d", index);
1117 read_lock(&hci_dev_list_lock);
1118 list_for_each_entry(d, &hci_dev_list, list) {
1119 if (d->id == index) {
1120 hdev = hci_dev_hold(d);
1124 read_unlock(&hci_dev_list_lock);
1128 /* ---- Inquiry support ---- */
1130 bool hci_discovery_active(struct hci_dev *hdev)
1132 struct discovery_state *discov = &hdev->discovery;
1134 switch (discov->state) {
1135 case DISCOVERY_FINDING:
1136 case DISCOVERY_RESOLVING:
1144 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1146 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1148 if (hdev->discovery.state == state)
1152 case DISCOVERY_STOPPED:
1153 if (hdev->discovery.state != DISCOVERY_STARTING)
1154 mgmt_discovering(hdev, 0);
1156 case DISCOVERY_STARTING:
1158 case DISCOVERY_FINDING:
1159 mgmt_discovering(hdev, 1);
1161 case DISCOVERY_RESOLVING:
1163 case DISCOVERY_STOPPING:
1167 hdev->discovery.state = state;
1170 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1172 struct discovery_state *cache = &hdev->discovery;
1173 struct inquiry_entry *p, *n;
1175 list_for_each_entry_safe(p, n, &cache->all, all) {
1180 INIT_LIST_HEAD(&cache->unknown);
1181 INIT_LIST_HEAD(&cache->resolve);
1184 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1187 struct discovery_state *cache = &hdev->discovery;
1188 struct inquiry_entry *e;
1190 BT_DBG("cache %p, %pMR", cache, bdaddr);
1192 list_for_each_entry(e, &cache->all, all) {
1193 if (!bacmp(&e->data.bdaddr, bdaddr))
1200 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1203 struct discovery_state *cache = &hdev->discovery;
1204 struct inquiry_entry *e;
1206 BT_DBG("cache %p, %pMR", cache, bdaddr);
1208 list_for_each_entry(e, &cache->unknown, list) {
1209 if (!bacmp(&e->data.bdaddr, bdaddr))
1216 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1220 struct discovery_state *cache = &hdev->discovery;
1221 struct inquiry_entry *e;
1223 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1225 list_for_each_entry(e, &cache->resolve, list) {
1226 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1228 if (!bacmp(&e->data.bdaddr, bdaddr))
1235 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1236 struct inquiry_entry *ie)
1238 struct discovery_state *cache = &hdev->discovery;
1239 struct list_head *pos = &cache->resolve;
1240 struct inquiry_entry *p;
1242 list_del(&ie->list);
1244 list_for_each_entry(p, &cache->resolve, list) {
1245 if (p->name_state != NAME_PENDING &&
1246 abs(p->data.rssi) >= abs(ie->data.rssi))
1251 list_add(&ie->list, pos);
1254 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1255 bool name_known, bool *ssp)
1257 struct discovery_state *cache = &hdev->discovery;
1258 struct inquiry_entry *ie;
1260 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1262 hci_remove_remote_oob_data(hdev, &data->bdaddr);
1265 *ssp = data->ssp_mode;
1267 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1269 if (ie->data.ssp_mode && ssp)
1272 if (ie->name_state == NAME_NEEDED &&
1273 data->rssi != ie->data.rssi) {
1274 ie->data.rssi = data->rssi;
1275 hci_inquiry_cache_update_resolve(hdev, ie);
1281 /* Entry not in the cache. Add new one. */
1282 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
1286 list_add(&ie->all, &cache->all);
1289 ie->name_state = NAME_KNOWN;
1291 ie->name_state = NAME_NOT_KNOWN;
1292 list_add(&ie->list, &cache->unknown);
1296 if (name_known && ie->name_state != NAME_KNOWN &&
1297 ie->name_state != NAME_PENDING) {
1298 ie->name_state = NAME_KNOWN;
1299 list_del(&ie->list);
1302 memcpy(&ie->data, data, sizeof(*data));
1303 ie->timestamp = jiffies;
1304 cache->timestamp = jiffies;
1306 if (ie->name_state == NAME_NOT_KNOWN)
1312 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1314 struct discovery_state *cache = &hdev->discovery;
1315 struct inquiry_info *info = (struct inquiry_info *) buf;
1316 struct inquiry_entry *e;
1319 list_for_each_entry(e, &cache->all, all) {
1320 struct inquiry_data *data = &e->data;
1325 bacpy(&info->bdaddr, &data->bdaddr);
1326 info->pscan_rep_mode = data->pscan_rep_mode;
1327 info->pscan_period_mode = data->pscan_period_mode;
1328 info->pscan_mode = data->pscan_mode;
1329 memcpy(info->dev_class, data->dev_class, 3);
1330 info->clock_offset = data->clock_offset;
1336 BT_DBG("cache %p, copied %d", cache, copied);
1340 static void hci_inq_req(struct hci_request *req, unsigned long opt)
1342 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1343 struct hci_dev *hdev = req->hdev;
1344 struct hci_cp_inquiry cp;
1346 BT_DBG("%s", hdev->name);
1348 if (test_bit(HCI_INQUIRY, &hdev->flags))
1352 memcpy(&cp.lap, &ir->lap, 3);
1353 cp.length = ir->length;
1354 cp.num_rsp = ir->num_rsp;
1355 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1358 static int wait_inquiry(void *word)
1361 return signal_pending(current);
1364 int hci_inquiry(void __user *arg)
1366 __u8 __user *ptr = arg;
1367 struct hci_inquiry_req ir;
1368 struct hci_dev *hdev;
1369 int err = 0, do_inquiry = 0, max_rsp;
1373 if (copy_from_user(&ir, ptr, sizeof(ir)))
1376 hdev = hci_dev_get(ir.dev_id);
1380 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1385 if (hdev->dev_type != HCI_BREDR) {
1390 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1396 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1397 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1398 hci_inquiry_cache_flush(hdev);
1401 hci_dev_unlock(hdev);
1403 timeo = ir.length * msecs_to_jiffies(2000);
1406 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1411 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1412 * cleared). If it is interrupted by a signal, return -EINTR.
1414 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1415 TASK_INTERRUPTIBLE))
1419 /* for unlimited number of responses we will use buffer with
1422 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1424 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1425 * copy it to the user space.
1427 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
1434 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1435 hci_dev_unlock(hdev);
1437 BT_DBG("num_rsp %d", ir.num_rsp);
1439 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1441 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1454 static int hci_dev_do_open(struct hci_dev *hdev)
1458 BT_DBG("%s %p", hdev->name, hdev);
1462 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1467 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
1468 /* Check for rfkill but allow the HCI setup stage to
1469 * proceed (which in itself doesn't cause any RF activity).
1471 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1476 /* Check for valid public address or a configured static
1477 * random adddress, but let the HCI setup proceed to
1478 * be able to determine if there is a public address
1481 * This check is only valid for BR/EDR controllers
1482 * since AMP controllers do not have an address.
1484 if (hdev->dev_type == HCI_BREDR &&
1485 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1486 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1487 ret = -EADDRNOTAVAIL;
1492 if (test_bit(HCI_UP, &hdev->flags)) {
1497 if (hdev->open(hdev)) {
1502 atomic_set(&hdev->cmd_cnt, 1);
1503 set_bit(HCI_INIT, &hdev->flags);
1505 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1506 ret = hdev->setup(hdev);
1509 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1510 set_bit(HCI_RAW, &hdev->flags);
1512 if (!test_bit(HCI_RAW, &hdev->flags) &&
1513 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
1514 ret = __hci_init(hdev);
1517 clear_bit(HCI_INIT, &hdev->flags);
1521 set_bit(HCI_UP, &hdev->flags);
1522 hci_notify(hdev, HCI_DEV_UP);
1523 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1524 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1525 hdev->dev_type == HCI_BREDR) {
1527 mgmt_powered(hdev, 1);
1528 hci_dev_unlock(hdev);
1531 /* Init failed, cleanup */
1532 flush_work(&hdev->tx_work);
1533 flush_work(&hdev->cmd_work);
1534 flush_work(&hdev->rx_work);
1536 skb_queue_purge(&hdev->cmd_q);
1537 skb_queue_purge(&hdev->rx_q);
1542 if (hdev->sent_cmd) {
1543 kfree_skb(hdev->sent_cmd);
1544 hdev->sent_cmd = NULL;
1552 hci_req_unlock(hdev);
1556 /* ---- HCI ioctl helpers ---- */
1558 int hci_dev_open(__u16 dev)
1560 struct hci_dev *hdev;
1563 hdev = hci_dev_get(dev);
1567 /* We need to ensure that no other power on/off work is pending
1568 * before proceeding to call hci_dev_do_open. This is
1569 * particularly important if the setup procedure has not yet
1572 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1573 cancel_delayed_work(&hdev->power_off);
1575 /* After this call it is guaranteed that the setup procedure
1576 * has finished. This means that error conditions like RFKILL
1577 * or no valid public or static random address apply.
1579 flush_workqueue(hdev->req_workqueue);
1581 err = hci_dev_do_open(hdev);
1588 static int hci_dev_do_close(struct hci_dev *hdev)
1590 BT_DBG("%s %p", hdev->name, hdev);
1592 cancel_delayed_work(&hdev->power_off);
1594 hci_req_cancel(hdev, ENODEV);
1597 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1598 del_timer_sync(&hdev->cmd_timer);
1599 hci_req_unlock(hdev);
1603 /* Flush RX and TX works */
1604 flush_work(&hdev->tx_work);
1605 flush_work(&hdev->rx_work);
1607 if (hdev->discov_timeout > 0) {
1608 cancel_delayed_work(&hdev->discov_off);
1609 hdev->discov_timeout = 0;
1610 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1611 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1614 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1615 cancel_delayed_work(&hdev->service_cache);
1617 cancel_delayed_work_sync(&hdev->le_scan_disable);
1620 hci_inquiry_cache_flush(hdev);
1621 hci_conn_hash_flush(hdev);
1622 hci_dev_unlock(hdev);
1624 hci_notify(hdev, HCI_DEV_DOWN);
1630 skb_queue_purge(&hdev->cmd_q);
1631 atomic_set(&hdev->cmd_cnt, 1);
1632 if (!test_bit(HCI_RAW, &hdev->flags) &&
1633 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1634 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1635 set_bit(HCI_INIT, &hdev->flags);
1636 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1637 clear_bit(HCI_INIT, &hdev->flags);
1640 /* flush cmd work */
1641 flush_work(&hdev->cmd_work);
1644 skb_queue_purge(&hdev->rx_q);
1645 skb_queue_purge(&hdev->cmd_q);
1646 skb_queue_purge(&hdev->raw_q);
1648 /* Drop last sent command */
1649 if (hdev->sent_cmd) {
1650 del_timer_sync(&hdev->cmd_timer);
1651 kfree_skb(hdev->sent_cmd);
1652 hdev->sent_cmd = NULL;
1655 kfree_skb(hdev->recv_evt);
1656 hdev->recv_evt = NULL;
1658 /* After this point our queues are empty
1659 * and no tasks are scheduled. */
1664 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1666 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1667 if (hdev->dev_type == HCI_BREDR) {
1669 mgmt_powered(hdev, 0);
1670 hci_dev_unlock(hdev);
1674 /* Controller radio is available but is currently powered down */
1675 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
1677 memset(hdev->eir, 0, sizeof(hdev->eir));
1678 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1680 hci_req_unlock(hdev);
1686 int hci_dev_close(__u16 dev)
1688 struct hci_dev *hdev;
1691 hdev = hci_dev_get(dev);
1695 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1700 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1701 cancel_delayed_work(&hdev->power_off);
1703 err = hci_dev_do_close(hdev);
1710 int hci_dev_reset(__u16 dev)
1712 struct hci_dev *hdev;
1715 hdev = hci_dev_get(dev);
1721 if (!test_bit(HCI_UP, &hdev->flags)) {
1726 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1732 skb_queue_purge(&hdev->rx_q);
1733 skb_queue_purge(&hdev->cmd_q);
1736 hci_inquiry_cache_flush(hdev);
1737 hci_conn_hash_flush(hdev);
1738 hci_dev_unlock(hdev);
1743 atomic_set(&hdev->cmd_cnt, 1);
1744 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1746 if (!test_bit(HCI_RAW, &hdev->flags))
1747 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1750 hci_req_unlock(hdev);
1755 int hci_dev_reset_stat(__u16 dev)
1757 struct hci_dev *hdev;
1760 hdev = hci_dev_get(dev);
1764 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1769 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1776 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1778 struct hci_dev *hdev;
1779 struct hci_dev_req dr;
1782 if (copy_from_user(&dr, arg, sizeof(dr)))
1785 hdev = hci_dev_get(dr.dev_id);
1789 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1794 if (hdev->dev_type != HCI_BREDR) {
1799 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1806 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1811 if (!lmp_encrypt_capable(hdev)) {
1816 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1817 /* Auth must be enabled first */
1818 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1824 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1829 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1834 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1838 case HCISETLINKMODE:
1839 hdev->link_mode = ((__u16) dr.dev_opt) &
1840 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1844 hdev->pkt_type = (__u16) dr.dev_opt;
1848 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1849 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1853 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1854 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1867 int hci_get_dev_list(void __user *arg)
1869 struct hci_dev *hdev;
1870 struct hci_dev_list_req *dl;
1871 struct hci_dev_req *dr;
1872 int n = 0, size, err;
1875 if (get_user(dev_num, (__u16 __user *) arg))
1878 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1881 size = sizeof(*dl) + dev_num * sizeof(*dr);
1883 dl = kzalloc(size, GFP_KERNEL);
1889 read_lock(&hci_dev_list_lock);
1890 list_for_each_entry(hdev, &hci_dev_list, list) {
1891 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1892 cancel_delayed_work(&hdev->power_off);
1894 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1895 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1897 (dr + n)->dev_id = hdev->id;
1898 (dr + n)->dev_opt = hdev->flags;
1903 read_unlock(&hci_dev_list_lock);
1906 size = sizeof(*dl) + n * sizeof(*dr);
1908 err = copy_to_user(arg, dl, size);
1911 return err ? -EFAULT : 0;
1914 int hci_get_dev_info(void __user *arg)
1916 struct hci_dev *hdev;
1917 struct hci_dev_info di;
1920 if (copy_from_user(&di, arg, sizeof(di)))
1923 hdev = hci_dev_get(di.dev_id);
1927 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1928 cancel_delayed_work_sync(&hdev->power_off);
1930 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1931 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1933 strcpy(di.name, hdev->name);
1934 di.bdaddr = hdev->bdaddr;
1935 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
1936 di.flags = hdev->flags;
1937 di.pkt_type = hdev->pkt_type;
1938 if (lmp_bredr_capable(hdev)) {
1939 di.acl_mtu = hdev->acl_mtu;
1940 di.acl_pkts = hdev->acl_pkts;
1941 di.sco_mtu = hdev->sco_mtu;
1942 di.sco_pkts = hdev->sco_pkts;
1944 di.acl_mtu = hdev->le_mtu;
1945 di.acl_pkts = hdev->le_pkts;
1949 di.link_policy = hdev->link_policy;
1950 di.link_mode = hdev->link_mode;
1952 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1953 memcpy(&di.features, &hdev->features, sizeof(di.features));
1955 if (copy_to_user(arg, &di, sizeof(di)))
1963 /* ---- Interface to HCI drivers ---- */
1965 static int hci_rfkill_set_block(void *data, bool blocked)
1967 struct hci_dev *hdev = data;
1969 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1971 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
1975 set_bit(HCI_RFKILLED, &hdev->dev_flags);
1976 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1977 hci_dev_do_close(hdev);
1979 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
1985 static const struct rfkill_ops hci_rfkill_ops = {
1986 .set_block = hci_rfkill_set_block,
1989 static void hci_power_on(struct work_struct *work)
1991 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1994 BT_DBG("%s", hdev->name);
1996 err = hci_dev_do_open(hdev);
1998 mgmt_set_powered_failed(hdev, err);
2002 /* During the HCI setup phase, a few error conditions are
2003 * ignored and they need to be checked now. If they are still
2004 * valid, it is important to turn the device back off.
2006 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2007 (hdev->dev_type == HCI_BREDR &&
2008 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2009 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2010 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2011 hci_dev_do_close(hdev);
2012 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2013 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2014 HCI_AUTO_OFF_TIMEOUT);
2017 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
2018 mgmt_index_added(hdev);
2021 static void hci_power_off(struct work_struct *work)
2023 struct hci_dev *hdev = container_of(work, struct hci_dev,
2026 BT_DBG("%s", hdev->name);
2028 hci_dev_do_close(hdev);
2031 static void hci_discov_off(struct work_struct *work)
2033 struct hci_dev *hdev;
2035 hdev = container_of(work, struct hci_dev, discov_off.work);
2037 BT_DBG("%s", hdev->name);
2039 mgmt_discoverable_timeout(hdev);
2042 int hci_uuids_clear(struct hci_dev *hdev)
2044 struct bt_uuid *uuid, *tmp;
2046 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2047 list_del(&uuid->list);
2054 int hci_link_keys_clear(struct hci_dev *hdev)
2056 struct list_head *p, *n;
2058 list_for_each_safe(p, n, &hdev->link_keys) {
2059 struct link_key *key;
2061 key = list_entry(p, struct link_key, list);
2070 int hci_smp_ltks_clear(struct hci_dev *hdev)
2072 struct smp_ltk *k, *tmp;
2074 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2082 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2086 list_for_each_entry(k, &hdev->link_keys, list)
2087 if (bacmp(bdaddr, &k->bdaddr) == 0)
2093 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2094 u8 key_type, u8 old_key_type)
2097 if (key_type < 0x03)
2100 /* Debug keys are insecure so don't store them persistently */
2101 if (key_type == HCI_LK_DEBUG_COMBINATION)
2104 /* Changed combination key and there's no previous one */
2105 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2108 /* Security mode 3 case */
2112 /* Neither local nor remote side had no-bonding as requirement */
2113 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2116 /* Local side had dedicated bonding as requirement */
2117 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2120 /* Remote side had dedicated bonding as requirement */
2121 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2124 /* If none of the above criteria match, then don't store the key
2129 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
2133 list_for_each_entry(k, &hdev->long_term_keys, list) {
2134 if (k->ediv != ediv ||
2135 memcmp(rand, k->rand, sizeof(k->rand)))
2144 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2149 list_for_each_entry(k, &hdev->long_term_keys, list)
2150 if (addr_type == k->bdaddr_type &&
2151 bacmp(bdaddr, &k->bdaddr) == 0)
2157 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
2158 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
2160 struct link_key *key, *old_key;
2164 old_key = hci_find_link_key(hdev, bdaddr);
2166 old_key_type = old_key->type;
2169 old_key_type = conn ? conn->key_type : 0xff;
2170 key = kzalloc(sizeof(*key), GFP_ATOMIC);
2173 list_add(&key->list, &hdev->link_keys);
2176 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2178 /* Some buggy controller combinations generate a changed
2179 * combination key for legacy pairing even when there's no
2181 if (type == HCI_LK_CHANGED_COMBINATION &&
2182 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2183 type = HCI_LK_COMBINATION;
2185 conn->key_type = type;
2188 bacpy(&key->bdaddr, bdaddr);
2189 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2190 key->pin_len = pin_len;
2192 if (type == HCI_LK_CHANGED_COMBINATION)
2193 key->type = old_key_type;
2200 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
2202 mgmt_new_link_key(hdev, key, persistent);
2205 conn->flush_key = !persistent;
2210 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
2211 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
2214 struct smp_ltk *key, *old_key;
2216 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
2219 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
2223 key = kzalloc(sizeof(*key), GFP_ATOMIC);
2226 list_add(&key->list, &hdev->long_term_keys);
2229 bacpy(&key->bdaddr, bdaddr);
2230 key->bdaddr_type = addr_type;
2231 memcpy(key->val, tk, sizeof(key->val));
2232 key->authenticated = authenticated;
2234 key->enc_size = enc_size;
2236 memcpy(key->rand, rand, sizeof(key->rand));
2241 if (type & HCI_SMP_LTK)
2242 mgmt_new_ltk(hdev, key, 1);
2247 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2249 struct link_key *key;
2251 key = hci_find_link_key(hdev, bdaddr);
2255 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2257 list_del(&key->list);
2263 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
2265 struct smp_ltk *k, *tmp;
2267 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2268 if (bacmp(bdaddr, &k->bdaddr))
2271 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2280 /* HCI command timer function */
2281 static void hci_cmd_timeout(unsigned long arg)
2283 struct hci_dev *hdev = (void *) arg;
2285 if (hdev->sent_cmd) {
2286 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2287 u16 opcode = __le16_to_cpu(sent->opcode);
2289 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2291 BT_ERR("%s command tx timeout", hdev->name);
2294 atomic_set(&hdev->cmd_cnt, 1);
2295 queue_work(hdev->workqueue, &hdev->cmd_work);
2298 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2301 struct oob_data *data;
2303 list_for_each_entry(data, &hdev->remote_oob_data, list)
2304 if (bacmp(bdaddr, &data->bdaddr) == 0)
2310 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
2312 struct oob_data *data;
2314 data = hci_find_remote_oob_data(hdev, bdaddr);
2318 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2320 list_del(&data->list);
2326 int hci_remote_oob_data_clear(struct hci_dev *hdev)
2328 struct oob_data *data, *n;
2330 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2331 list_del(&data->list);
2338 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
2341 struct oob_data *data;
2343 data = hci_find_remote_oob_data(hdev, bdaddr);
2346 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2350 bacpy(&data->bdaddr, bdaddr);
2351 list_add(&data->list, &hdev->remote_oob_data);
2354 memcpy(data->hash, hash, sizeof(data->hash));
2355 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
2357 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2362 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
2363 bdaddr_t *bdaddr, u8 type)
2365 struct bdaddr_list *b;
2367 list_for_each_entry(b, &hdev->blacklist, list) {
2368 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2375 int hci_blacklist_clear(struct hci_dev *hdev)
2377 struct list_head *p, *n;
2379 list_for_each_safe(p, n, &hdev->blacklist) {
2380 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
2389 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2391 struct bdaddr_list *entry;
2393 if (!bacmp(bdaddr, BDADDR_ANY))
2396 if (hci_blacklist_lookup(hdev, bdaddr, type))
2399 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
2403 bacpy(&entry->bdaddr, bdaddr);
2404 entry->bdaddr_type = type;
2406 list_add(&entry->list, &hdev->blacklist);
2408 return mgmt_device_blocked(hdev, bdaddr, type);
2411 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2413 struct bdaddr_list *entry;
2415 if (!bacmp(bdaddr, BDADDR_ANY))
2416 return hci_blacklist_clear(hdev);
2418 entry = hci_blacklist_lookup(hdev, bdaddr, type);
2422 list_del(&entry->list);
2425 return mgmt_device_unblocked(hdev, bdaddr, type);
2428 static void inquiry_complete(struct hci_dev *hdev, u8 status)
2431 BT_ERR("Failed to start inquiry: status %d", status);
2434 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2435 hci_dev_unlock(hdev);
2440 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
2442 /* General inquiry access code (GIAC) */
2443 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2444 struct hci_request req;
2445 struct hci_cp_inquiry cp;
2449 BT_ERR("Failed to disable LE scanning: status %d", status);
2453 switch (hdev->discovery.type) {
2454 case DISCOV_TYPE_LE:
2456 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2457 hci_dev_unlock(hdev);
2460 case DISCOV_TYPE_INTERLEAVED:
2461 hci_req_init(&req, hdev);
2463 memset(&cp, 0, sizeof(cp));
2464 memcpy(&cp.lap, lap, sizeof(cp.lap));
2465 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2466 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2470 hci_inquiry_cache_flush(hdev);
2472 err = hci_req_run(&req, inquiry_complete);
2474 BT_ERR("Inquiry request failed: err %d", err);
2475 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2478 hci_dev_unlock(hdev);
2483 static void le_scan_disable_work(struct work_struct *work)
2485 struct hci_dev *hdev = container_of(work, struct hci_dev,
2486 le_scan_disable.work);
2487 struct hci_cp_le_set_scan_enable cp;
2488 struct hci_request req;
2491 BT_DBG("%s", hdev->name);
2493 hci_req_init(&req, hdev);
2495 memset(&cp, 0, sizeof(cp));
2496 cp.enable = LE_SCAN_DISABLE;
2497 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2499 err = hci_req_run(&req, le_scan_disable_work_complete);
2501 BT_ERR("Disable LE scanning request failed: err %d", err);
2504 /* Alloc HCI device */
2505 struct hci_dev *hci_alloc_dev(void)
2507 struct hci_dev *hdev;
2509 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2513 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2514 hdev->esco_type = (ESCO_HV1);
2515 hdev->link_mode = (HCI_LM_ACCEPT);
2516 hdev->num_iac = 0x01; /* One IAC support is mandatory */
2517 hdev->io_capability = 0x03; /* No Input No Output */
2518 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2519 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2521 hdev->sniff_max_interval = 800;
2522 hdev->sniff_min_interval = 80;
2524 hdev->le_scan_interval = 0x0060;
2525 hdev->le_scan_window = 0x0030;
2527 mutex_init(&hdev->lock);
2528 mutex_init(&hdev->req_lock);
2530 INIT_LIST_HEAD(&hdev->mgmt_pending);
2531 INIT_LIST_HEAD(&hdev->blacklist);
2532 INIT_LIST_HEAD(&hdev->uuids);
2533 INIT_LIST_HEAD(&hdev->link_keys);
2534 INIT_LIST_HEAD(&hdev->long_term_keys);
2535 INIT_LIST_HEAD(&hdev->remote_oob_data);
2536 INIT_LIST_HEAD(&hdev->conn_hash.list);
2538 INIT_WORK(&hdev->rx_work, hci_rx_work);
2539 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2540 INIT_WORK(&hdev->tx_work, hci_tx_work);
2541 INIT_WORK(&hdev->power_on, hci_power_on);
2543 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2544 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2545 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2547 skb_queue_head_init(&hdev->rx_q);
2548 skb_queue_head_init(&hdev->cmd_q);
2549 skb_queue_head_init(&hdev->raw_q);
2551 init_waitqueue_head(&hdev->req_wait_q);
2553 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
2555 hci_init_sysfs(hdev);
2556 discovery_init(hdev);
2560 EXPORT_SYMBOL(hci_alloc_dev);
2562 /* Free HCI device */
2563 void hci_free_dev(struct hci_dev *hdev)
2565 /* will free via device release */
2566 put_device(&hdev->dev);
2568 EXPORT_SYMBOL(hci_free_dev);
2570 /* Register HCI device */
2571 int hci_register_dev(struct hci_dev *hdev)
2575 if (!hdev->open || !hdev->close)
2578 /* Do not allow HCI_AMP devices to register at index 0,
2579 * so the index can be used as the AMP controller ID.
2581 switch (hdev->dev_type) {
2583 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2586 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2595 sprintf(hdev->name, "hci%d", id);
2598 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2600 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2601 WQ_MEM_RECLAIM, 1, hdev->name);
2602 if (!hdev->workqueue) {
2607 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2608 WQ_MEM_RECLAIM, 1, hdev->name);
2609 if (!hdev->req_workqueue) {
2610 destroy_workqueue(hdev->workqueue);
2615 if (!IS_ERR_OR_NULL(bt_debugfs))
2616 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
2618 dev_set_name(&hdev->dev, "%s", hdev->name);
2620 error = device_add(&hdev->dev);
2624 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2625 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2628 if (rfkill_register(hdev->rfkill) < 0) {
2629 rfkill_destroy(hdev->rfkill);
2630 hdev->rfkill = NULL;
2634 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2635 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2637 set_bit(HCI_SETUP, &hdev->dev_flags);
2638 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2640 if (hdev->dev_type == HCI_BREDR) {
2641 /* Assume BR/EDR support until proven otherwise (such as
2642 * through reading supported features during init.
2644 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2647 write_lock(&hci_dev_list_lock);
2648 list_add(&hdev->list, &hci_dev_list);
2649 write_unlock(&hci_dev_list_lock);
2651 hci_notify(hdev, HCI_DEV_REG);
2654 queue_work(hdev->req_workqueue, &hdev->power_on);
2659 destroy_workqueue(hdev->workqueue);
2660 destroy_workqueue(hdev->req_workqueue);
2662 ida_simple_remove(&hci_index_ida, hdev->id);
2666 EXPORT_SYMBOL(hci_register_dev);
2668 /* Unregister HCI device */
2669 void hci_unregister_dev(struct hci_dev *hdev)
2673 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2675 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2679 write_lock(&hci_dev_list_lock);
2680 list_del(&hdev->list);
2681 write_unlock(&hci_dev_list_lock);
2683 hci_dev_do_close(hdev);
2685 for (i = 0; i < NUM_REASSEMBLY; i++)
2686 kfree_skb(hdev->reassembly[i]);
2688 cancel_work_sync(&hdev->power_on);
2690 if (!test_bit(HCI_INIT, &hdev->flags) &&
2691 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
2693 mgmt_index_removed(hdev);
2694 hci_dev_unlock(hdev);
2697 /* mgmt_index_removed should take care of emptying the
2699 BUG_ON(!list_empty(&hdev->mgmt_pending));
2701 hci_notify(hdev, HCI_DEV_UNREG);
2704 rfkill_unregister(hdev->rfkill);
2705 rfkill_destroy(hdev->rfkill);
2708 device_del(&hdev->dev);
2710 debugfs_remove_recursive(hdev->debugfs);
2712 destroy_workqueue(hdev->workqueue);
2713 destroy_workqueue(hdev->req_workqueue);
2716 hci_blacklist_clear(hdev);
2717 hci_uuids_clear(hdev);
2718 hci_link_keys_clear(hdev);
2719 hci_smp_ltks_clear(hdev);
2720 hci_remote_oob_data_clear(hdev);
2721 hci_dev_unlock(hdev);
2725 ida_simple_remove(&hci_index_ida, id);
2727 EXPORT_SYMBOL(hci_unregister_dev);
2729 /* Suspend HCI device */
2730 int hci_suspend_dev(struct hci_dev *hdev)
2732 hci_notify(hdev, HCI_DEV_SUSPEND);
2735 EXPORT_SYMBOL(hci_suspend_dev);
2737 /* Resume HCI device */
2738 int hci_resume_dev(struct hci_dev *hdev)
2740 hci_notify(hdev, HCI_DEV_RESUME);
2743 EXPORT_SYMBOL(hci_resume_dev);
2745 /* Receive frame from HCI drivers */
2746 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
2748 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2749 && !test_bit(HCI_INIT, &hdev->flags))) {
2755 bt_cb(skb)->incoming = 1;
2758 __net_timestamp(skb);
2760 skb_queue_tail(&hdev->rx_q, skb);
2761 queue_work(hdev->workqueue, &hdev->rx_work);
2765 EXPORT_SYMBOL(hci_recv_frame);
2767 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
2768 int count, __u8 index)
2773 struct sk_buff *skb;
2774 struct bt_skb_cb *scb;
2776 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
2777 index >= NUM_REASSEMBLY)
2780 skb = hdev->reassembly[index];
2784 case HCI_ACLDATA_PKT:
2785 len = HCI_MAX_FRAME_SIZE;
2786 hlen = HCI_ACL_HDR_SIZE;
2789 len = HCI_MAX_EVENT_SIZE;
2790 hlen = HCI_EVENT_HDR_SIZE;
2792 case HCI_SCODATA_PKT:
2793 len = HCI_MAX_SCO_SIZE;
2794 hlen = HCI_SCO_HDR_SIZE;
2798 skb = bt_skb_alloc(len, GFP_ATOMIC);
2802 scb = (void *) skb->cb;
2804 scb->pkt_type = type;
2806 hdev->reassembly[index] = skb;
2810 scb = (void *) skb->cb;
2811 len = min_t(uint, scb->expect, count);
2813 memcpy(skb_put(skb, len), data, len);
2822 if (skb->len == HCI_EVENT_HDR_SIZE) {
2823 struct hci_event_hdr *h = hci_event_hdr(skb);
2824 scb->expect = h->plen;
2826 if (skb_tailroom(skb) < scb->expect) {
2828 hdev->reassembly[index] = NULL;
2834 case HCI_ACLDATA_PKT:
2835 if (skb->len == HCI_ACL_HDR_SIZE) {
2836 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2837 scb->expect = __le16_to_cpu(h->dlen);
2839 if (skb_tailroom(skb) < scb->expect) {
2841 hdev->reassembly[index] = NULL;
2847 case HCI_SCODATA_PKT:
2848 if (skb->len == HCI_SCO_HDR_SIZE) {
2849 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2850 scb->expect = h->dlen;
2852 if (skb_tailroom(skb) < scb->expect) {
2854 hdev->reassembly[index] = NULL;
2861 if (scb->expect == 0) {
2862 /* Complete frame */
2864 bt_cb(skb)->pkt_type = type;
2865 hci_recv_frame(hdev, skb);
2867 hdev->reassembly[index] = NULL;
2875 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2879 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2883 rem = hci_reassembly(hdev, type, data, count, type - 1);
2887 data += (count - rem);
2893 EXPORT_SYMBOL(hci_recv_fragment);
2895 #define STREAM_REASSEMBLY 0
2897 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2903 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2906 struct { char type; } *pkt;
2908 /* Start of the frame */
2915 type = bt_cb(skb)->pkt_type;
2917 rem = hci_reassembly(hdev, type, data, count,
2922 data += (count - rem);
2928 EXPORT_SYMBOL(hci_recv_stream_fragment);
2930 /* ---- Interface to upper protocols ---- */
2932 int hci_register_cb(struct hci_cb *cb)
2934 BT_DBG("%p name %s", cb, cb->name);
2936 write_lock(&hci_cb_list_lock);
2937 list_add(&cb->list, &hci_cb_list);
2938 write_unlock(&hci_cb_list_lock);
2942 EXPORT_SYMBOL(hci_register_cb);
2944 int hci_unregister_cb(struct hci_cb *cb)
2946 BT_DBG("%p name %s", cb, cb->name);
2948 write_lock(&hci_cb_list_lock);
2949 list_del(&cb->list);
2950 write_unlock(&hci_cb_list_lock);
2954 EXPORT_SYMBOL(hci_unregister_cb);
2956 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
2958 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
2961 __net_timestamp(skb);
2963 /* Send copy to monitor */
2964 hci_send_to_monitor(hdev, skb);
2966 if (atomic_read(&hdev->promisc)) {
2967 /* Send copy to the sockets */
2968 hci_send_to_sock(hdev, skb);
2971 /* Get rid of skb owner, prior to sending to the driver. */
2974 if (hdev->send(hdev, skb) < 0)
2975 BT_ERR("%s sending frame failed", hdev->name);
2978 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2980 skb_queue_head_init(&req->cmd_q);
2985 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2987 struct hci_dev *hdev = req->hdev;
2988 struct sk_buff *skb;
2989 unsigned long flags;
2991 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2993 /* If an error occured during request building, remove all HCI
2994 * commands queued on the HCI request queue.
2997 skb_queue_purge(&req->cmd_q);
3001 /* Do not allow empty requests */
3002 if (skb_queue_empty(&req->cmd_q))
3005 skb = skb_peek_tail(&req->cmd_q);
3006 bt_cb(skb)->req.complete = complete;
3008 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3009 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
3010 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3012 queue_work(hdev->workqueue, &hdev->cmd_work);
3017 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
3018 u32 plen, const void *param)
3020 int len = HCI_COMMAND_HDR_SIZE + plen;
3021 struct hci_command_hdr *hdr;
3022 struct sk_buff *skb;
3024 skb = bt_skb_alloc(len, GFP_ATOMIC);
3028 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
3029 hdr->opcode = cpu_to_le16(opcode);
3033 memcpy(skb_put(skb, plen), param, plen);
3035 BT_DBG("skb len %d", skb->len);
3037 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
3042 /* Send HCI command */
3043 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3046 struct sk_buff *skb;
3048 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3050 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3052 BT_ERR("%s no memory for command", hdev->name);
3056 /* Stand-alone HCI commands must be flaged as
3057 * single-command requests.
3059 bt_cb(skb)->req.start = true;
3061 skb_queue_tail(&hdev->cmd_q, skb);
3062 queue_work(hdev->workqueue, &hdev->cmd_work);
3067 /* Queue a command to an asynchronous HCI request */
3068 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
3069 const void *param, u8 event)
3071 struct hci_dev *hdev = req->hdev;
3072 struct sk_buff *skb;
3074 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3076 /* If an error occured during request building, there is no point in
3077 * queueing the HCI command. We can simply return.
3082 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3084 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
3085 hdev->name, opcode);
3090 if (skb_queue_empty(&req->cmd_q))
3091 bt_cb(skb)->req.start = true;
3093 bt_cb(skb)->req.event = event;
3095 skb_queue_tail(&req->cmd_q, skb);
3098 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
3101 hci_req_add_ev(req, opcode, plen, param, 0);
3104 /* Get data from the previously sent command */
3105 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3107 struct hci_command_hdr *hdr;
3109 if (!hdev->sent_cmd)
3112 hdr = (void *) hdev->sent_cmd->data;
3114 if (hdr->opcode != cpu_to_le16(opcode))
3117 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3119 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3123 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3125 struct hci_acl_hdr *hdr;
3128 skb_push(skb, HCI_ACL_HDR_SIZE);
3129 skb_reset_transport_header(skb);
3130 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3131 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3132 hdr->dlen = cpu_to_le16(len);
3135 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3136 struct sk_buff *skb, __u16 flags)
3138 struct hci_conn *conn = chan->conn;
3139 struct hci_dev *hdev = conn->hdev;
3140 struct sk_buff *list;
3142 skb->len = skb_headlen(skb);
3145 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3147 switch (hdev->dev_type) {
3149 hci_add_acl_hdr(skb, conn->handle, flags);
3152 hci_add_acl_hdr(skb, chan->handle, flags);
3155 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3159 list = skb_shinfo(skb)->frag_list;
3161 /* Non fragmented */
3162 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3164 skb_queue_tail(queue, skb);
3167 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3169 skb_shinfo(skb)->frag_list = NULL;
3171 /* Queue all fragments atomically */
3172 spin_lock(&queue->lock);
3174 __skb_queue_tail(queue, skb);
3176 flags &= ~ACL_START;
3179 skb = list; list = list->next;
3181 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3182 hci_add_acl_hdr(skb, conn->handle, flags);
3184 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3186 __skb_queue_tail(queue, skb);
3189 spin_unlock(&queue->lock);
3193 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3195 struct hci_dev *hdev = chan->conn->hdev;
3197 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3199 hci_queue_acl(chan, &chan->data_q, skb, flags);
3201 queue_work(hdev->workqueue, &hdev->tx_work);
3205 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3207 struct hci_dev *hdev = conn->hdev;
3208 struct hci_sco_hdr hdr;
3210 BT_DBG("%s len %d", hdev->name, skb->len);
3212 hdr.handle = cpu_to_le16(conn->handle);
3213 hdr.dlen = skb->len;
3215 skb_push(skb, HCI_SCO_HDR_SIZE);
3216 skb_reset_transport_header(skb);
3217 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3219 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
3221 skb_queue_tail(&conn->data_q, skb);
3222 queue_work(hdev->workqueue, &hdev->tx_work);
3225 /* ---- HCI TX task (outgoing data) ---- */
3227 /* HCI Connection scheduler */
3228 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3231 struct hci_conn_hash *h = &hdev->conn_hash;
3232 struct hci_conn *conn = NULL, *c;
3233 unsigned int num = 0, min = ~0;
3235 /* We don't have to lock device here. Connections are always
3236 * added and removed with TX task disabled. */
3240 list_for_each_entry_rcu(c, &h->list, list) {
3241 if (c->type != type || skb_queue_empty(&c->data_q))
3244 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3249 if (c->sent < min) {
3254 if (hci_conn_num(hdev, type) == num)
3263 switch (conn->type) {
3265 cnt = hdev->acl_cnt;
3269 cnt = hdev->sco_cnt;
3272 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3276 BT_ERR("Unknown link type");
3284 BT_DBG("conn %p quote %d", conn, *quote);
3288 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3290 struct hci_conn_hash *h = &hdev->conn_hash;
3293 BT_ERR("%s link tx timeout", hdev->name);
3297 /* Kill stalled connections */
3298 list_for_each_entry_rcu(c, &h->list, list) {
3299 if (c->type == type && c->sent) {
3300 BT_ERR("%s killing stalled connection %pMR",
3301 hdev->name, &c->dst);
3302 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3309 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3312 struct hci_conn_hash *h = &hdev->conn_hash;
3313 struct hci_chan *chan = NULL;
3314 unsigned int num = 0, min = ~0, cur_prio = 0;
3315 struct hci_conn *conn;
3316 int cnt, q, conn_num = 0;
3318 BT_DBG("%s", hdev->name);
3322 list_for_each_entry_rcu(conn, &h->list, list) {
3323 struct hci_chan *tmp;
3325 if (conn->type != type)
3328 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3333 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3334 struct sk_buff *skb;
3336 if (skb_queue_empty(&tmp->data_q))
3339 skb = skb_peek(&tmp->data_q);
3340 if (skb->priority < cur_prio)
3343 if (skb->priority > cur_prio) {
3346 cur_prio = skb->priority;
3351 if (conn->sent < min) {
3357 if (hci_conn_num(hdev, type) == conn_num)
3366 switch (chan->conn->type) {
3368 cnt = hdev->acl_cnt;
3371 cnt = hdev->block_cnt;
3375 cnt = hdev->sco_cnt;
3378 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3382 BT_ERR("Unknown link type");
3387 BT_DBG("chan %p quote %d", chan, *quote);
3391 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3393 struct hci_conn_hash *h = &hdev->conn_hash;
3394 struct hci_conn *conn;
3397 BT_DBG("%s", hdev->name);
3401 list_for_each_entry_rcu(conn, &h->list, list) {
3402 struct hci_chan *chan;
3404 if (conn->type != type)
3407 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3412 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3413 struct sk_buff *skb;
3420 if (skb_queue_empty(&chan->data_q))
3423 skb = skb_peek(&chan->data_q);
3424 if (skb->priority >= HCI_PRIO_MAX - 1)
3427 skb->priority = HCI_PRIO_MAX - 1;
3429 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3433 if (hci_conn_num(hdev, type) == num)
3441 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3443 /* Calculate count of blocks used by this packet */
3444 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3447 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3449 if (!test_bit(HCI_RAW, &hdev->flags)) {
3450 /* ACL tx timeout must be longer than maximum
3451 * link supervision timeout (40.9 seconds) */
3452 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3453 HCI_ACL_TX_TIMEOUT))
3454 hci_link_tx_to(hdev, ACL_LINK);
3458 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3460 unsigned int cnt = hdev->acl_cnt;
3461 struct hci_chan *chan;
3462 struct sk_buff *skb;
3465 __check_timeout(hdev, cnt);
3467 while (hdev->acl_cnt &&
3468 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
3469 u32 priority = (skb_peek(&chan->data_q))->priority;
3470 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3471 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3472 skb->len, skb->priority);
3474 /* Stop if priority has changed */
3475 if (skb->priority < priority)
3478 skb = skb_dequeue(&chan->data_q);
3480 hci_conn_enter_active_mode(chan->conn,
3481 bt_cb(skb)->force_active);
3483 hci_send_frame(hdev, skb);
3484 hdev->acl_last_tx = jiffies;
3492 if (cnt != hdev->acl_cnt)
3493 hci_prio_recalculate(hdev, ACL_LINK);
3496 static void hci_sched_acl_blk(struct hci_dev *hdev)
3498 unsigned int cnt = hdev->block_cnt;
3499 struct hci_chan *chan;
3500 struct sk_buff *skb;
3504 __check_timeout(hdev, cnt);
3506 BT_DBG("%s", hdev->name);
3508 if (hdev->dev_type == HCI_AMP)
3513 while (hdev->block_cnt > 0 &&
3514 (chan = hci_chan_sent(hdev, type, "e))) {
3515 u32 priority = (skb_peek(&chan->data_q))->priority;
3516 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3519 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3520 skb->len, skb->priority);
3522 /* Stop if priority has changed */
3523 if (skb->priority < priority)
3526 skb = skb_dequeue(&chan->data_q);
3528 blocks = __get_blocks(hdev, skb);
3529 if (blocks > hdev->block_cnt)
3532 hci_conn_enter_active_mode(chan->conn,
3533 bt_cb(skb)->force_active);
3535 hci_send_frame(hdev, skb);
3536 hdev->acl_last_tx = jiffies;
3538 hdev->block_cnt -= blocks;
3541 chan->sent += blocks;
3542 chan->conn->sent += blocks;
3546 if (cnt != hdev->block_cnt)
3547 hci_prio_recalculate(hdev, type);
3550 static void hci_sched_acl(struct hci_dev *hdev)
3552 BT_DBG("%s", hdev->name);
3554 /* No ACL link over BR/EDR controller */
3555 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3558 /* No AMP link over AMP controller */
3559 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3562 switch (hdev->flow_ctl_mode) {
3563 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3564 hci_sched_acl_pkt(hdev);
3567 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3568 hci_sched_acl_blk(hdev);
3574 static void hci_sched_sco(struct hci_dev *hdev)
3576 struct hci_conn *conn;
3577 struct sk_buff *skb;
3580 BT_DBG("%s", hdev->name);
3582 if (!hci_conn_num(hdev, SCO_LINK))
3585 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
3586 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3587 BT_DBG("skb %p len %d", skb, skb->len);
3588 hci_send_frame(hdev, skb);
3591 if (conn->sent == ~0)
3597 static void hci_sched_esco(struct hci_dev *hdev)
3599 struct hci_conn *conn;
3600 struct sk_buff *skb;
3603 BT_DBG("%s", hdev->name);
3605 if (!hci_conn_num(hdev, ESCO_LINK))
3608 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3610 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3611 BT_DBG("skb %p len %d", skb, skb->len);
3612 hci_send_frame(hdev, skb);
3615 if (conn->sent == ~0)
3621 static void hci_sched_le(struct hci_dev *hdev)
3623 struct hci_chan *chan;
3624 struct sk_buff *skb;
3625 int quote, cnt, tmp;
3627 BT_DBG("%s", hdev->name);
3629 if (!hci_conn_num(hdev, LE_LINK))
3632 if (!test_bit(HCI_RAW, &hdev->flags)) {
3633 /* LE tx timeout must be longer than maximum
3634 * link supervision timeout (40.9 seconds) */
3635 if (!hdev->le_cnt && hdev->le_pkts &&
3636 time_after(jiffies, hdev->le_last_tx + HZ * 45))
3637 hci_link_tx_to(hdev, LE_LINK);
3640 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3642 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
3643 u32 priority = (skb_peek(&chan->data_q))->priority;
3644 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3645 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3646 skb->len, skb->priority);
3648 /* Stop if priority has changed */
3649 if (skb->priority < priority)
3652 skb = skb_dequeue(&chan->data_q);
3654 hci_send_frame(hdev, skb);
3655 hdev->le_last_tx = jiffies;
3666 hdev->acl_cnt = cnt;
3669 hci_prio_recalculate(hdev, LE_LINK);
3672 static void hci_tx_work(struct work_struct *work)
3674 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3675 struct sk_buff *skb;
3677 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
3678 hdev->sco_cnt, hdev->le_cnt);
3680 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
3681 /* Schedule queues and send stuff to HCI driver */
3682 hci_sched_acl(hdev);
3683 hci_sched_sco(hdev);
3684 hci_sched_esco(hdev);
3688 /* Send next queued raw (unknown type) packet */
3689 while ((skb = skb_dequeue(&hdev->raw_q)))
3690 hci_send_frame(hdev, skb);
3693 /* ----- HCI RX task (incoming data processing) ----- */
3695 /* ACL data packet */
3696 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3698 struct hci_acl_hdr *hdr = (void *) skb->data;
3699 struct hci_conn *conn;
3700 __u16 handle, flags;
3702 skb_pull(skb, HCI_ACL_HDR_SIZE);
3704 handle = __le16_to_cpu(hdr->handle);
3705 flags = hci_flags(handle);
3706 handle = hci_handle(handle);
3708 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3711 hdev->stat.acl_rx++;
3714 conn = hci_conn_hash_lookup_handle(hdev, handle);
3715 hci_dev_unlock(hdev);
3718 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3720 /* Send to upper protocol */
3721 l2cap_recv_acldata(conn, skb, flags);
3724 BT_ERR("%s ACL packet for unknown connection handle %d",
3725 hdev->name, handle);
3731 /* SCO data packet */
3732 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3734 struct hci_sco_hdr *hdr = (void *) skb->data;
3735 struct hci_conn *conn;
3738 skb_pull(skb, HCI_SCO_HDR_SIZE);
3740 handle = __le16_to_cpu(hdr->handle);
3742 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
3744 hdev->stat.sco_rx++;
3747 conn = hci_conn_hash_lookup_handle(hdev, handle);
3748 hci_dev_unlock(hdev);
3751 /* Send to upper protocol */
3752 sco_recv_scodata(conn, skb);
3755 BT_ERR("%s SCO packet for unknown connection handle %d",
3756 hdev->name, handle);
3762 static bool hci_req_is_complete(struct hci_dev *hdev)
3764 struct sk_buff *skb;
3766 skb = skb_peek(&hdev->cmd_q);
3770 return bt_cb(skb)->req.start;
3773 static void hci_resend_last(struct hci_dev *hdev)
3775 struct hci_command_hdr *sent;
3776 struct sk_buff *skb;
3779 if (!hdev->sent_cmd)
3782 sent = (void *) hdev->sent_cmd->data;
3783 opcode = __le16_to_cpu(sent->opcode);
3784 if (opcode == HCI_OP_RESET)
3787 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3791 skb_queue_head(&hdev->cmd_q, skb);
3792 queue_work(hdev->workqueue, &hdev->cmd_work);
3795 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3797 hci_req_complete_t req_complete = NULL;
3798 struct sk_buff *skb;
3799 unsigned long flags;
3801 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3803 /* If the completed command doesn't match the last one that was
3804 * sent we need to do special handling of it.
3806 if (!hci_sent_cmd_data(hdev, opcode)) {
3807 /* Some CSR based controllers generate a spontaneous
3808 * reset complete event during init and any pending
3809 * command will never be completed. In such a case we
3810 * need to resend whatever was the last sent
3813 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3814 hci_resend_last(hdev);
3819 /* If the command succeeded and there's still more commands in
3820 * this request the request is not yet complete.
3822 if (!status && !hci_req_is_complete(hdev))
3825 /* If this was the last command in a request the complete
3826 * callback would be found in hdev->sent_cmd instead of the
3827 * command queue (hdev->cmd_q).
3829 if (hdev->sent_cmd) {
3830 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3833 /* We must set the complete callback to NULL to
3834 * avoid calling the callback more than once if
3835 * this function gets called again.
3837 bt_cb(hdev->sent_cmd)->req.complete = NULL;
3843 /* Remove all pending commands belonging to this request */
3844 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3845 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3846 if (bt_cb(skb)->req.start) {
3847 __skb_queue_head(&hdev->cmd_q, skb);
3851 req_complete = bt_cb(skb)->req.complete;
3854 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3858 req_complete(hdev, status);
3861 static void hci_rx_work(struct work_struct *work)
3863 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
3864 struct sk_buff *skb;
3866 BT_DBG("%s", hdev->name);
3868 while ((skb = skb_dequeue(&hdev->rx_q))) {
3869 /* Send copy to monitor */
3870 hci_send_to_monitor(hdev, skb);
3872 if (atomic_read(&hdev->promisc)) {
3873 /* Send copy to the sockets */
3874 hci_send_to_sock(hdev, skb);
3877 if (test_bit(HCI_RAW, &hdev->flags) ||
3878 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
3883 if (test_bit(HCI_INIT, &hdev->flags)) {
3884 /* Don't process data packets in this states. */
3885 switch (bt_cb(skb)->pkt_type) {
3886 case HCI_ACLDATA_PKT:
3887 case HCI_SCODATA_PKT:
3894 switch (bt_cb(skb)->pkt_type) {
3896 BT_DBG("%s Event packet", hdev->name);
3897 hci_event_packet(hdev, skb);
3900 case HCI_ACLDATA_PKT:
3901 BT_DBG("%s ACL data packet", hdev->name);
3902 hci_acldata_packet(hdev, skb);
3905 case HCI_SCODATA_PKT:
3906 BT_DBG("%s SCO data packet", hdev->name);
3907 hci_scodata_packet(hdev, skb);
3917 static void hci_cmd_work(struct work_struct *work)
3919 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
3920 struct sk_buff *skb;
3922 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3923 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
3925 /* Send queued commands */
3926 if (atomic_read(&hdev->cmd_cnt)) {
3927 skb = skb_dequeue(&hdev->cmd_q);
3931 kfree_skb(hdev->sent_cmd);
3933 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
3934 if (hdev->sent_cmd) {
3935 atomic_dec(&hdev->cmd_cnt);
3936 hci_send_frame(hdev, skb);
3937 if (test_bit(HCI_RESET, &hdev->flags))
3938 del_timer(&hdev->cmd_timer);
3940 mod_timer(&hdev->cmd_timer,
3941 jiffies + HCI_CMD_TIMEOUT);
3943 skb_queue_head(&hdev->cmd_q, skb);
3944 queue_work(hdev->workqueue, &hdev->cmd_work);