2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/idr.h>
31 #include <linux/rfkill.h>
33 #include <net/bluetooth/bluetooth.h>
34 #include <net/bluetooth/hci_core.h>
36 static void hci_rx_work(struct work_struct *work);
37 static void hci_cmd_work(struct work_struct *work);
38 static void hci_tx_work(struct work_struct *work);
41 LIST_HEAD(hci_dev_list);
42 DEFINE_RWLOCK(hci_dev_list_lock);
44 /* HCI callback list */
45 LIST_HEAD(hci_cb_list);
46 DEFINE_RWLOCK(hci_cb_list_lock);
48 /* HCI ID Numbering */
49 static DEFINE_IDA(hci_index_ida);
51 /* ---- HCI notifications ---- */
53 static void hci_notify(struct hci_dev *hdev, int event)
55 hci_sock_dev_event(hdev, event);
58 /* ---- HCI requests ---- */
60 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
62 BT_DBG("%s result 0x%2.2x", hdev->name, result);
64 if (hdev->req_status == HCI_REQ_PEND) {
65 hdev->req_result = result;
66 hdev->req_status = HCI_REQ_DONE;
67 wake_up_interruptible(&hdev->req_wait_q);
71 static void hci_req_cancel(struct hci_dev *hdev, int err)
73 BT_DBG("%s err 0x%2.2x", hdev->name, err);
75 if (hdev->req_status == HCI_REQ_PEND) {
76 hdev->req_result = err;
77 hdev->req_status = HCI_REQ_CANCELED;
78 wake_up_interruptible(&hdev->req_wait_q);
82 /* Execute request and wait for completion. */
83 static int __hci_req_sync(struct hci_dev *hdev,
84 void (*func)(struct hci_request *req,
86 unsigned long opt, __u32 timeout)
88 struct hci_request req;
89 DECLARE_WAITQUEUE(wait, current);
92 BT_DBG("%s start", hdev->name);
94 hci_req_init(&req, hdev);
96 hdev->req_status = HCI_REQ_PEND;
100 err = hci_req_run(&req, hci_req_sync_complete);
102 hdev->req_status = 0;
104 /* ENODATA means the HCI request command queue is empty.
105 * This can happen when a request with conditionals doesn't
106 * trigger any commands to be sent. This is normal behavior
107 * and should not trigger an error return.
115 add_wait_queue(&hdev->req_wait_q, &wait);
116 set_current_state(TASK_INTERRUPTIBLE);
118 schedule_timeout(timeout);
120 remove_wait_queue(&hdev->req_wait_q, &wait);
122 if (signal_pending(current))
125 switch (hdev->req_status) {
127 err = -bt_to_errno(hdev->req_result);
130 case HCI_REQ_CANCELED:
131 err = -hdev->req_result;
139 hdev->req_status = hdev->req_result = 0;
141 BT_DBG("%s end: err %d", hdev->name, err);
146 static int hci_req_sync(struct hci_dev *hdev,
147 void (*req)(struct hci_request *req,
149 unsigned long opt, __u32 timeout)
153 if (!test_bit(HCI_UP, &hdev->flags))
156 /* Serialize all requests */
158 ret = __hci_req_sync(hdev, req, opt, timeout);
159 hci_req_unlock(hdev);
164 static void hci_reset_req(struct hci_request *req, unsigned long opt)
166 BT_DBG("%s %ld", req->hdev->name, opt);
169 set_bit(HCI_RESET, &req->hdev->flags);
170 hci_req_add(req, HCI_OP_RESET, 0, NULL);
173 static void bredr_init(struct hci_request *req)
175 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
177 /* Read Local Supported Features */
178 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
180 /* Read Local Version */
181 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
183 /* Read BD Address */
184 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
187 static void amp_init(struct hci_request *req)
189 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
191 /* Read Local Version */
192 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
194 /* Read Local AMP Info */
195 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
197 /* Read Data Blk size */
198 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
201 static void hci_init1_req(struct hci_request *req, unsigned long opt)
203 struct hci_dev *hdev = req->hdev;
204 struct hci_request init_req;
207 BT_DBG("%s %ld", hdev->name, opt);
209 /* Driver initialization */
211 hci_req_init(&init_req, hdev);
213 /* Special commands */
214 while ((skb = skb_dequeue(&hdev->driver_init))) {
215 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
216 skb->dev = (void *) hdev;
218 if (skb_queue_empty(&init_req.cmd_q))
219 bt_cb(skb)->req.start = true;
221 skb_queue_tail(&init_req.cmd_q, skb);
223 skb_queue_purge(&hdev->driver_init);
225 hci_req_run(&init_req, NULL);
228 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
229 hci_reset_req(req, 0);
231 switch (hdev->dev_type) {
241 BT_ERR("Unknown device type %d", hdev->dev_type);
246 static void bredr_setup(struct hci_request *req)
248 struct hci_cp_delete_stored_link_key cp;
252 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
253 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
255 /* Read Class of Device */
256 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
258 /* Read Local Name */
259 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
261 /* Read Voice Setting */
262 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
264 /* Clear Event Filters */
265 flt_type = HCI_FLT_CLEAR_ALL;
266 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
268 /* Connection accept timeout ~20 secs */
269 param = __constant_cpu_to_le16(0x7d00);
270 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
272 bacpy(&cp.bdaddr, BDADDR_ANY);
273 cp.delete_all = 0x01;
274 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
276 /* Read page scan parameters */
277 if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) {
278 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
279 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
283 static void le_setup(struct hci_request *req)
285 /* Read LE Buffer Size */
286 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
288 /* Read LE Local Supported Features */
289 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
291 /* Read LE Advertising Channel TX Power */
292 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
294 /* Read LE White List Size */
295 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
297 /* Read LE Supported States */
298 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
301 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
303 if (lmp_ext_inq_capable(hdev))
306 if (lmp_inq_rssi_capable(hdev))
309 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
310 hdev->lmp_subver == 0x0757)
313 if (hdev->manufacturer == 15) {
314 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
316 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
318 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
322 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
323 hdev->lmp_subver == 0x1805)
329 static void hci_setup_inquiry_mode(struct hci_request *req)
333 mode = hci_get_inquiry_mode(req->hdev);
335 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
338 static void hci_setup_event_mask(struct hci_request *req)
340 struct hci_dev *hdev = req->hdev;
342 /* The second byte is 0xff instead of 0x9f (two reserved bits
343 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
346 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
348 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
349 * any event mask for pre 1.2 devices.
351 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
354 if (lmp_bredr_capable(hdev)) {
355 events[4] |= 0x01; /* Flow Specification Complete */
356 events[4] |= 0x02; /* Inquiry Result with RSSI */
357 events[4] |= 0x04; /* Read Remote Extended Features Complete */
358 events[5] |= 0x08; /* Synchronous Connection Complete */
359 events[5] |= 0x10; /* Synchronous Connection Changed */
362 if (lmp_inq_rssi_capable(hdev))
363 events[4] |= 0x02; /* Inquiry Result with RSSI */
365 if (lmp_sniffsubr_capable(hdev))
366 events[5] |= 0x20; /* Sniff Subrating */
368 if (lmp_pause_enc_capable(hdev))
369 events[5] |= 0x80; /* Encryption Key Refresh Complete */
371 if (lmp_ext_inq_capable(hdev))
372 events[5] |= 0x40; /* Extended Inquiry Result */
374 if (lmp_no_flush_capable(hdev))
375 events[7] |= 0x01; /* Enhanced Flush Complete */
377 if (lmp_lsto_capable(hdev))
378 events[6] |= 0x80; /* Link Supervision Timeout Changed */
380 if (lmp_ssp_capable(hdev)) {
381 events[6] |= 0x01; /* IO Capability Request */
382 events[6] |= 0x02; /* IO Capability Response */
383 events[6] |= 0x04; /* User Confirmation Request */
384 events[6] |= 0x08; /* User Passkey Request */
385 events[6] |= 0x10; /* Remote OOB Data Request */
386 events[6] |= 0x20; /* Simple Pairing Complete */
387 events[7] |= 0x04; /* User Passkey Notification */
388 events[7] |= 0x08; /* Keypress Notification */
389 events[7] |= 0x10; /* Remote Host Supported
390 * Features Notification
394 if (lmp_le_capable(hdev))
395 events[7] |= 0x20; /* LE Meta-Event */
397 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
399 if (lmp_le_capable(hdev)) {
400 memset(events, 0, sizeof(events));
402 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
403 sizeof(events), events);
407 static void hci_init2_req(struct hci_request *req, unsigned long opt)
409 struct hci_dev *hdev = req->hdev;
411 if (lmp_bredr_capable(hdev))
414 if (lmp_le_capable(hdev))
417 hci_setup_event_mask(req);
419 if (hdev->hci_ver > BLUETOOTH_VER_1_1)
420 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
422 if (lmp_ssp_capable(hdev)) {
423 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
425 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
426 sizeof(mode), &mode);
428 struct hci_cp_write_eir cp;
430 memset(hdev->eir, 0, sizeof(hdev->eir));
431 memset(&cp, 0, sizeof(cp));
433 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
437 if (lmp_inq_rssi_capable(hdev))
438 hci_setup_inquiry_mode(req);
440 if (lmp_inq_tx_pwr_capable(hdev))
441 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
443 if (lmp_ext_feat_capable(hdev)) {
444 struct hci_cp_read_local_ext_features cp;
447 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
451 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
453 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
458 static void hci_setup_link_policy(struct hci_request *req)
460 struct hci_dev *hdev = req->hdev;
461 struct hci_cp_write_def_link_policy cp;
464 if (lmp_rswitch_capable(hdev))
465 link_policy |= HCI_LP_RSWITCH;
466 if (lmp_hold_capable(hdev))
467 link_policy |= HCI_LP_HOLD;
468 if (lmp_sniff_capable(hdev))
469 link_policy |= HCI_LP_SNIFF;
470 if (lmp_park_capable(hdev))
471 link_policy |= HCI_LP_PARK;
473 cp.policy = cpu_to_le16(link_policy);
474 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
477 static void hci_set_le_support(struct hci_request *req)
479 struct hci_dev *hdev = req->hdev;
480 struct hci_cp_write_le_host_supported cp;
482 memset(&cp, 0, sizeof(cp));
484 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
486 cp.simul = lmp_le_br_capable(hdev);
489 if (cp.le != lmp_host_le_capable(hdev))
490 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
494 static void hci_init3_req(struct hci_request *req, unsigned long opt)
496 struct hci_dev *hdev = req->hdev;
498 if (hdev->commands[5] & 0x10)
499 hci_setup_link_policy(req);
501 if (lmp_le_capable(hdev)) {
502 hci_set_le_support(req);
507 static int __hci_init(struct hci_dev *hdev)
511 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
515 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
516 * BR/EDR/LE type controllers. AMP controllers only need the
519 if (hdev->dev_type != HCI_BREDR)
522 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
526 return __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
529 static void hci_scan_req(struct hci_request *req, unsigned long opt)
533 BT_DBG("%s %x", req->hdev->name, scan);
535 /* Inquiry and Page scans */
536 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
539 static void hci_auth_req(struct hci_request *req, unsigned long opt)
543 BT_DBG("%s %x", req->hdev->name, auth);
546 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
549 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
553 BT_DBG("%s %x", req->hdev->name, encrypt);
556 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
559 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
561 __le16 policy = cpu_to_le16(opt);
563 BT_DBG("%s %x", req->hdev->name, policy);
565 /* Default link policy */
566 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
569 /* Get HCI device by index.
570 * Device is held on return. */
571 struct hci_dev *hci_dev_get(int index)
573 struct hci_dev *hdev = NULL, *d;
580 read_lock(&hci_dev_list_lock);
581 list_for_each_entry(d, &hci_dev_list, list) {
582 if (d->id == index) {
583 hdev = hci_dev_hold(d);
587 read_unlock(&hci_dev_list_lock);
591 /* ---- Inquiry support ---- */
593 bool hci_discovery_active(struct hci_dev *hdev)
595 struct discovery_state *discov = &hdev->discovery;
597 switch (discov->state) {
598 case DISCOVERY_FINDING:
599 case DISCOVERY_RESOLVING:
607 void hci_discovery_set_state(struct hci_dev *hdev, int state)
609 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
611 if (hdev->discovery.state == state)
615 case DISCOVERY_STOPPED:
616 if (hdev->discovery.state != DISCOVERY_STARTING)
617 mgmt_discovering(hdev, 0);
619 case DISCOVERY_STARTING:
621 case DISCOVERY_FINDING:
622 mgmt_discovering(hdev, 1);
624 case DISCOVERY_RESOLVING:
626 case DISCOVERY_STOPPING:
630 hdev->discovery.state = state;
633 static void inquiry_cache_flush(struct hci_dev *hdev)
635 struct discovery_state *cache = &hdev->discovery;
636 struct inquiry_entry *p, *n;
638 list_for_each_entry_safe(p, n, &cache->all, all) {
643 INIT_LIST_HEAD(&cache->unknown);
644 INIT_LIST_HEAD(&cache->resolve);
647 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
650 struct discovery_state *cache = &hdev->discovery;
651 struct inquiry_entry *e;
653 BT_DBG("cache %p, %pMR", cache, bdaddr);
655 list_for_each_entry(e, &cache->all, all) {
656 if (!bacmp(&e->data.bdaddr, bdaddr))
663 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
666 struct discovery_state *cache = &hdev->discovery;
667 struct inquiry_entry *e;
669 BT_DBG("cache %p, %pMR", cache, bdaddr);
671 list_for_each_entry(e, &cache->unknown, list) {
672 if (!bacmp(&e->data.bdaddr, bdaddr))
679 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
683 struct discovery_state *cache = &hdev->discovery;
684 struct inquiry_entry *e;
686 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
688 list_for_each_entry(e, &cache->resolve, list) {
689 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
691 if (!bacmp(&e->data.bdaddr, bdaddr))
698 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
699 struct inquiry_entry *ie)
701 struct discovery_state *cache = &hdev->discovery;
702 struct list_head *pos = &cache->resolve;
703 struct inquiry_entry *p;
707 list_for_each_entry(p, &cache->resolve, list) {
708 if (p->name_state != NAME_PENDING &&
709 abs(p->data.rssi) >= abs(ie->data.rssi))
714 list_add(&ie->list, pos);
717 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
718 bool name_known, bool *ssp)
720 struct discovery_state *cache = &hdev->discovery;
721 struct inquiry_entry *ie;
723 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
725 hci_remove_remote_oob_data(hdev, &data->bdaddr);
728 *ssp = data->ssp_mode;
730 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
732 if (ie->data.ssp_mode && ssp)
735 if (ie->name_state == NAME_NEEDED &&
736 data->rssi != ie->data.rssi) {
737 ie->data.rssi = data->rssi;
738 hci_inquiry_cache_update_resolve(hdev, ie);
744 /* Entry not in the cache. Add new one. */
745 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
749 list_add(&ie->all, &cache->all);
752 ie->name_state = NAME_KNOWN;
754 ie->name_state = NAME_NOT_KNOWN;
755 list_add(&ie->list, &cache->unknown);
759 if (name_known && ie->name_state != NAME_KNOWN &&
760 ie->name_state != NAME_PENDING) {
761 ie->name_state = NAME_KNOWN;
765 memcpy(&ie->data, data, sizeof(*data));
766 ie->timestamp = jiffies;
767 cache->timestamp = jiffies;
769 if (ie->name_state == NAME_NOT_KNOWN)
775 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
777 struct discovery_state *cache = &hdev->discovery;
778 struct inquiry_info *info = (struct inquiry_info *) buf;
779 struct inquiry_entry *e;
782 list_for_each_entry(e, &cache->all, all) {
783 struct inquiry_data *data = &e->data;
788 bacpy(&info->bdaddr, &data->bdaddr);
789 info->pscan_rep_mode = data->pscan_rep_mode;
790 info->pscan_period_mode = data->pscan_period_mode;
791 info->pscan_mode = data->pscan_mode;
792 memcpy(info->dev_class, data->dev_class, 3);
793 info->clock_offset = data->clock_offset;
799 BT_DBG("cache %p, copied %d", cache, copied);
803 static void hci_inq_req(struct hci_request *req, unsigned long opt)
805 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
806 struct hci_dev *hdev = req->hdev;
807 struct hci_cp_inquiry cp;
809 BT_DBG("%s", hdev->name);
811 if (test_bit(HCI_INQUIRY, &hdev->flags))
815 memcpy(&cp.lap, &ir->lap, 3);
816 cp.length = ir->length;
817 cp.num_rsp = ir->num_rsp;
818 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
821 static int wait_inquiry(void *word)
824 return signal_pending(current);
827 int hci_inquiry(void __user *arg)
829 __u8 __user *ptr = arg;
830 struct hci_inquiry_req ir;
831 struct hci_dev *hdev;
832 int err = 0, do_inquiry = 0, max_rsp;
836 if (copy_from_user(&ir, ptr, sizeof(ir)))
839 hdev = hci_dev_get(ir.dev_id);
844 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
845 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
846 inquiry_cache_flush(hdev);
849 hci_dev_unlock(hdev);
851 timeo = ir.length * msecs_to_jiffies(2000);
854 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
859 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
860 * cleared). If it is interrupted by a signal, return -EINTR.
862 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
867 /* for unlimited number of responses we will use buffer with
870 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
872 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
873 * copy it to the user space.
875 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
882 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
883 hci_dev_unlock(hdev);
885 BT_DBG("num_rsp %d", ir.num_rsp);
887 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
889 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
902 static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
904 u8 ad_len = 0, flags = 0;
907 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
908 flags |= LE_AD_GENERAL;
910 if (!lmp_bredr_capable(hdev))
911 flags |= LE_AD_NO_BREDR;
913 if (lmp_le_br_capable(hdev))
914 flags |= LE_AD_SIM_LE_BREDR_CTRL;
916 if (lmp_host_le_br_capable(hdev))
917 flags |= LE_AD_SIM_LE_BREDR_HOST;
920 BT_DBG("adv flags 0x%02x", flags);
930 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
932 ptr[1] = EIR_TX_POWER;
933 ptr[2] = (u8) hdev->adv_tx_power;
939 name_len = strlen(hdev->dev_name);
941 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
943 if (name_len > max_len) {
945 ptr[1] = EIR_NAME_SHORT;
947 ptr[1] = EIR_NAME_COMPLETE;
949 ptr[0] = name_len + 1;
951 memcpy(ptr + 2, hdev->dev_name, name_len);
953 ad_len += (name_len + 2);
954 ptr += (name_len + 2);
960 void hci_update_ad(struct hci_request *req)
962 struct hci_dev *hdev = req->hdev;
963 struct hci_cp_le_set_adv_data cp;
966 if (!lmp_le_capable(hdev))
969 memset(&cp, 0, sizeof(cp));
971 len = create_ad(hdev, cp.data);
973 if (hdev->adv_data_len == len &&
974 memcmp(cp.data, hdev->adv_data, len) == 0)
977 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
978 hdev->adv_data_len = len;
982 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
985 /* ---- HCI ioctl helpers ---- */
987 int hci_dev_open(__u16 dev)
989 struct hci_dev *hdev;
992 hdev = hci_dev_get(dev);
996 BT_DBG("%s %p", hdev->name, hdev);
1000 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1005 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
1010 if (test_bit(HCI_UP, &hdev->flags)) {
1015 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1016 set_bit(HCI_RAW, &hdev->flags);
1018 /* Treat all non BR/EDR controllers as raw devices if
1019 enable_hs is not set */
1020 if (hdev->dev_type != HCI_BREDR && !enable_hs)
1021 set_bit(HCI_RAW, &hdev->flags);
1023 if (hdev->open(hdev)) {
1028 if (!test_bit(HCI_RAW, &hdev->flags)) {
1029 atomic_set(&hdev->cmd_cnt, 1);
1030 set_bit(HCI_INIT, &hdev->flags);
1031 ret = __hci_init(hdev);
1032 clear_bit(HCI_INIT, &hdev->flags);
1037 set_bit(HCI_UP, &hdev->flags);
1038 hci_notify(hdev, HCI_DEV_UP);
1039 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1040 mgmt_valid_hdev(hdev)) {
1042 mgmt_powered(hdev, 1);
1043 hci_dev_unlock(hdev);
1046 /* Init failed, cleanup */
1047 flush_work(&hdev->tx_work);
1048 flush_work(&hdev->cmd_work);
1049 flush_work(&hdev->rx_work);
1051 skb_queue_purge(&hdev->cmd_q);
1052 skb_queue_purge(&hdev->rx_q);
1057 if (hdev->sent_cmd) {
1058 kfree_skb(hdev->sent_cmd);
1059 hdev->sent_cmd = NULL;
1067 hci_req_unlock(hdev);
1072 static int hci_dev_do_close(struct hci_dev *hdev)
1074 BT_DBG("%s %p", hdev->name, hdev);
1076 cancel_work_sync(&hdev->le_scan);
1078 cancel_delayed_work(&hdev->power_off);
1080 hci_req_cancel(hdev, ENODEV);
1083 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1084 del_timer_sync(&hdev->cmd_timer);
1085 hci_req_unlock(hdev);
1089 /* Flush RX and TX works */
1090 flush_work(&hdev->tx_work);
1091 flush_work(&hdev->rx_work);
1093 if (hdev->discov_timeout > 0) {
1094 cancel_delayed_work(&hdev->discov_off);
1095 hdev->discov_timeout = 0;
1096 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1099 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1100 cancel_delayed_work(&hdev->service_cache);
1102 cancel_delayed_work_sync(&hdev->le_scan_disable);
1105 inquiry_cache_flush(hdev);
1106 hci_conn_hash_flush(hdev);
1107 hci_dev_unlock(hdev);
1109 hci_notify(hdev, HCI_DEV_DOWN);
1115 skb_queue_purge(&hdev->cmd_q);
1116 atomic_set(&hdev->cmd_cnt, 1);
1117 if (!test_bit(HCI_RAW, &hdev->flags) &&
1118 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1119 set_bit(HCI_INIT, &hdev->flags);
1120 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1121 clear_bit(HCI_INIT, &hdev->flags);
1124 /* flush cmd work */
1125 flush_work(&hdev->cmd_work);
1128 skb_queue_purge(&hdev->rx_q);
1129 skb_queue_purge(&hdev->cmd_q);
1130 skb_queue_purge(&hdev->raw_q);
1132 /* Drop last sent command */
1133 if (hdev->sent_cmd) {
1134 del_timer_sync(&hdev->cmd_timer);
1135 kfree_skb(hdev->sent_cmd);
1136 hdev->sent_cmd = NULL;
1139 /* After this point our queues are empty
1140 * and no tasks are scheduled. */
1145 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1147 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1148 mgmt_valid_hdev(hdev)) {
1150 mgmt_powered(hdev, 0);
1151 hci_dev_unlock(hdev);
1154 /* Controller radio is available but is currently powered down */
1155 hdev->amp_status = 0;
1157 memset(hdev->eir, 0, sizeof(hdev->eir));
1158 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1160 hci_req_unlock(hdev);
1166 int hci_dev_close(__u16 dev)
1168 struct hci_dev *hdev;
1171 hdev = hci_dev_get(dev);
1175 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1176 cancel_delayed_work(&hdev->power_off);
1178 err = hci_dev_do_close(hdev);
1184 int hci_dev_reset(__u16 dev)
1186 struct hci_dev *hdev;
1189 hdev = hci_dev_get(dev);
1195 if (!test_bit(HCI_UP, &hdev->flags))
1199 skb_queue_purge(&hdev->rx_q);
1200 skb_queue_purge(&hdev->cmd_q);
1203 inquiry_cache_flush(hdev);
1204 hci_conn_hash_flush(hdev);
1205 hci_dev_unlock(hdev);
1210 atomic_set(&hdev->cmd_cnt, 1);
1211 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1213 if (!test_bit(HCI_RAW, &hdev->flags))
1214 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1217 hci_req_unlock(hdev);
1222 int hci_dev_reset_stat(__u16 dev)
1224 struct hci_dev *hdev;
1227 hdev = hci_dev_get(dev);
1231 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1238 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1240 struct hci_dev *hdev;
1241 struct hci_dev_req dr;
1244 if (copy_from_user(&dr, arg, sizeof(dr)))
1247 hdev = hci_dev_get(dr.dev_id);
1253 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1258 if (!lmp_encrypt_capable(hdev)) {
1263 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1264 /* Auth must be enabled first */
1265 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1271 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1276 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1281 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1285 case HCISETLINKMODE:
1286 hdev->link_mode = ((__u16) dr.dev_opt) &
1287 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1291 hdev->pkt_type = (__u16) dr.dev_opt;
1295 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1296 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1300 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1301 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1313 int hci_get_dev_list(void __user *arg)
1315 struct hci_dev *hdev;
1316 struct hci_dev_list_req *dl;
1317 struct hci_dev_req *dr;
1318 int n = 0, size, err;
1321 if (get_user(dev_num, (__u16 __user *) arg))
1324 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1327 size = sizeof(*dl) + dev_num * sizeof(*dr);
1329 dl = kzalloc(size, GFP_KERNEL);
1335 read_lock(&hci_dev_list_lock);
1336 list_for_each_entry(hdev, &hci_dev_list, list) {
1337 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1338 cancel_delayed_work(&hdev->power_off);
1340 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1341 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1343 (dr + n)->dev_id = hdev->id;
1344 (dr + n)->dev_opt = hdev->flags;
1349 read_unlock(&hci_dev_list_lock);
1352 size = sizeof(*dl) + n * sizeof(*dr);
1354 err = copy_to_user(arg, dl, size);
1357 return err ? -EFAULT : 0;
1360 int hci_get_dev_info(void __user *arg)
1362 struct hci_dev *hdev;
1363 struct hci_dev_info di;
1366 if (copy_from_user(&di, arg, sizeof(di)))
1369 hdev = hci_dev_get(di.dev_id);
1373 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1374 cancel_delayed_work_sync(&hdev->power_off);
1376 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1377 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1379 strcpy(di.name, hdev->name);
1380 di.bdaddr = hdev->bdaddr;
1381 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1382 di.flags = hdev->flags;
1383 di.pkt_type = hdev->pkt_type;
1384 if (lmp_bredr_capable(hdev)) {
1385 di.acl_mtu = hdev->acl_mtu;
1386 di.acl_pkts = hdev->acl_pkts;
1387 di.sco_mtu = hdev->sco_mtu;
1388 di.sco_pkts = hdev->sco_pkts;
1390 di.acl_mtu = hdev->le_mtu;
1391 di.acl_pkts = hdev->le_pkts;
1395 di.link_policy = hdev->link_policy;
1396 di.link_mode = hdev->link_mode;
1398 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1399 memcpy(&di.features, &hdev->features, sizeof(di.features));
1401 if (copy_to_user(arg, &di, sizeof(di)))
1409 /* ---- Interface to HCI drivers ---- */
1411 static int hci_rfkill_set_block(void *data, bool blocked)
1413 struct hci_dev *hdev = data;
1415 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1420 hci_dev_do_close(hdev);
1425 static const struct rfkill_ops hci_rfkill_ops = {
1426 .set_block = hci_rfkill_set_block,
1429 static void hci_power_on(struct work_struct *work)
1431 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1433 BT_DBG("%s", hdev->name);
1435 if (hci_dev_open(hdev->id) < 0)
1438 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1439 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1440 HCI_AUTO_OFF_TIMEOUT);
1442 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1443 mgmt_index_added(hdev);
1446 static void hci_power_off(struct work_struct *work)
1448 struct hci_dev *hdev = container_of(work, struct hci_dev,
1451 BT_DBG("%s", hdev->name);
1453 hci_dev_do_close(hdev);
1456 static void hci_discov_off(struct work_struct *work)
1458 struct hci_dev *hdev;
1459 u8 scan = SCAN_PAGE;
1461 hdev = container_of(work, struct hci_dev, discov_off.work);
1463 BT_DBG("%s", hdev->name);
1467 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1469 hdev->discov_timeout = 0;
1471 hci_dev_unlock(hdev);
1474 int hci_uuids_clear(struct hci_dev *hdev)
1476 struct bt_uuid *uuid, *tmp;
1478 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1479 list_del(&uuid->list);
1486 int hci_link_keys_clear(struct hci_dev *hdev)
1488 struct list_head *p, *n;
1490 list_for_each_safe(p, n, &hdev->link_keys) {
1491 struct link_key *key;
1493 key = list_entry(p, struct link_key, list);
1502 int hci_smp_ltks_clear(struct hci_dev *hdev)
1504 struct smp_ltk *k, *tmp;
1506 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1514 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1518 list_for_each_entry(k, &hdev->link_keys, list)
1519 if (bacmp(bdaddr, &k->bdaddr) == 0)
1525 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1526 u8 key_type, u8 old_key_type)
1529 if (key_type < 0x03)
1532 /* Debug keys are insecure so don't store them persistently */
1533 if (key_type == HCI_LK_DEBUG_COMBINATION)
1536 /* Changed combination key and there's no previous one */
1537 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1540 /* Security mode 3 case */
1544 /* Neither local nor remote side had no-bonding as requirement */
1545 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1548 /* Local side had dedicated bonding as requirement */
1549 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1552 /* Remote side had dedicated bonding as requirement */
1553 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1556 /* If none of the above criteria match, then don't store the key
1561 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1565 list_for_each_entry(k, &hdev->long_term_keys, list) {
1566 if (k->ediv != ediv ||
1567 memcmp(rand, k->rand, sizeof(k->rand)))
1576 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1581 list_for_each_entry(k, &hdev->long_term_keys, list)
1582 if (addr_type == k->bdaddr_type &&
1583 bacmp(bdaddr, &k->bdaddr) == 0)
1589 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1590 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1592 struct link_key *key, *old_key;
1596 old_key = hci_find_link_key(hdev, bdaddr);
1598 old_key_type = old_key->type;
1601 old_key_type = conn ? conn->key_type : 0xff;
1602 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1605 list_add(&key->list, &hdev->link_keys);
1608 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1610 /* Some buggy controller combinations generate a changed
1611 * combination key for legacy pairing even when there's no
1613 if (type == HCI_LK_CHANGED_COMBINATION &&
1614 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1615 type = HCI_LK_COMBINATION;
1617 conn->key_type = type;
1620 bacpy(&key->bdaddr, bdaddr);
1621 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1622 key->pin_len = pin_len;
1624 if (type == HCI_LK_CHANGED_COMBINATION)
1625 key->type = old_key_type;
1632 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1634 mgmt_new_link_key(hdev, key, persistent);
1637 conn->flush_key = !persistent;
1642 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1643 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
1646 struct smp_ltk *key, *old_key;
1648 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1651 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1655 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1658 list_add(&key->list, &hdev->long_term_keys);
1661 bacpy(&key->bdaddr, bdaddr);
1662 key->bdaddr_type = addr_type;
1663 memcpy(key->val, tk, sizeof(key->val));
1664 key->authenticated = authenticated;
1666 key->enc_size = enc_size;
1668 memcpy(key->rand, rand, sizeof(key->rand));
1673 if (type & HCI_SMP_LTK)
1674 mgmt_new_ltk(hdev, key, 1);
1679 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1681 struct link_key *key;
1683 key = hci_find_link_key(hdev, bdaddr);
1687 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1689 list_del(&key->list);
1695 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1697 struct smp_ltk *k, *tmp;
1699 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1700 if (bacmp(bdaddr, &k->bdaddr))
1703 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1712 /* HCI command timer function */
1713 static void hci_cmd_timeout(unsigned long arg)
1715 struct hci_dev *hdev = (void *) arg;
1717 if (hdev->sent_cmd) {
1718 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1719 u16 opcode = __le16_to_cpu(sent->opcode);
1721 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1723 BT_ERR("%s command tx timeout", hdev->name);
1726 atomic_set(&hdev->cmd_cnt, 1);
1727 queue_work(hdev->workqueue, &hdev->cmd_work);
1730 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1733 struct oob_data *data;
1735 list_for_each_entry(data, &hdev->remote_oob_data, list)
1736 if (bacmp(bdaddr, &data->bdaddr) == 0)
1742 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1744 struct oob_data *data;
1746 data = hci_find_remote_oob_data(hdev, bdaddr);
1750 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1752 list_del(&data->list);
1758 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1760 struct oob_data *data, *n;
1762 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1763 list_del(&data->list);
1770 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1773 struct oob_data *data;
1775 data = hci_find_remote_oob_data(hdev, bdaddr);
1778 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1782 bacpy(&data->bdaddr, bdaddr);
1783 list_add(&data->list, &hdev->remote_oob_data);
1786 memcpy(data->hash, hash, sizeof(data->hash));
1787 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1789 BT_DBG("%s for %pMR", hdev->name, bdaddr);
1794 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
1796 struct bdaddr_list *b;
1798 list_for_each_entry(b, &hdev->blacklist, list)
1799 if (bacmp(bdaddr, &b->bdaddr) == 0)
1805 int hci_blacklist_clear(struct hci_dev *hdev)
1807 struct list_head *p, *n;
1809 list_for_each_safe(p, n, &hdev->blacklist) {
1810 struct bdaddr_list *b;
1812 b = list_entry(p, struct bdaddr_list, list);
1821 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1823 struct bdaddr_list *entry;
1825 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1828 if (hci_blacklist_lookup(hdev, bdaddr))
1831 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1835 bacpy(&entry->bdaddr, bdaddr);
1837 list_add(&entry->list, &hdev->blacklist);
1839 return mgmt_device_blocked(hdev, bdaddr, type);
1842 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1844 struct bdaddr_list *entry;
1846 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1847 return hci_blacklist_clear(hdev);
1849 entry = hci_blacklist_lookup(hdev, bdaddr);
1853 list_del(&entry->list);
1856 return mgmt_device_unblocked(hdev, bdaddr, type);
1859 static void le_scan_param_req(struct hci_request *req, unsigned long opt)
1861 struct le_scan_params *param = (struct le_scan_params *) opt;
1862 struct hci_cp_le_set_scan_param cp;
1864 memset(&cp, 0, sizeof(cp));
1865 cp.type = param->type;
1866 cp.interval = cpu_to_le16(param->interval);
1867 cp.window = cpu_to_le16(param->window);
1869 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1872 static void le_scan_enable_req(struct hci_request *req, unsigned long opt)
1874 struct hci_cp_le_set_scan_enable cp;
1876 memset(&cp, 0, sizeof(cp));
1880 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1883 static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
1884 u16 window, int timeout)
1886 long timeo = msecs_to_jiffies(3000);
1887 struct le_scan_params param;
1890 BT_DBG("%s", hdev->name);
1892 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1893 return -EINPROGRESS;
1896 param.interval = interval;
1897 param.window = window;
1901 err = __hci_req_sync(hdev, le_scan_param_req, (unsigned long) ¶m,
1904 err = __hci_req_sync(hdev, le_scan_enable_req, 0, timeo);
1906 hci_req_unlock(hdev);
1911 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
1912 msecs_to_jiffies(timeout));
1917 int hci_cancel_le_scan(struct hci_dev *hdev)
1919 BT_DBG("%s", hdev->name);
1921 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1924 if (cancel_delayed_work(&hdev->le_scan_disable)) {
1925 struct hci_cp_le_set_scan_enable cp;
1927 /* Send HCI command to disable LE Scan */
1928 memset(&cp, 0, sizeof(cp));
1929 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1935 static void le_scan_disable_work(struct work_struct *work)
1937 struct hci_dev *hdev = container_of(work, struct hci_dev,
1938 le_scan_disable.work);
1939 struct hci_cp_le_set_scan_enable cp;
1941 BT_DBG("%s", hdev->name);
1943 memset(&cp, 0, sizeof(cp));
1945 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1948 static void le_scan_work(struct work_struct *work)
1950 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1951 struct le_scan_params *param = &hdev->le_scan_params;
1953 BT_DBG("%s", hdev->name);
1955 hci_do_le_scan(hdev, param->type, param->interval, param->window,
1959 int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
1962 struct le_scan_params *param = &hdev->le_scan_params;
1964 BT_DBG("%s", hdev->name);
1966 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1969 if (work_busy(&hdev->le_scan))
1970 return -EINPROGRESS;
1973 param->interval = interval;
1974 param->window = window;
1975 param->timeout = timeout;
1977 queue_work(system_long_wq, &hdev->le_scan);
1982 /* Alloc HCI device */
1983 struct hci_dev *hci_alloc_dev(void)
1985 struct hci_dev *hdev;
1987 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1991 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1992 hdev->esco_type = (ESCO_HV1);
1993 hdev->link_mode = (HCI_LM_ACCEPT);
1994 hdev->io_capability = 0x03; /* No Input No Output */
1995 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
1996 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
1998 hdev->sniff_max_interval = 800;
1999 hdev->sniff_min_interval = 80;
2001 mutex_init(&hdev->lock);
2002 mutex_init(&hdev->req_lock);
2004 INIT_LIST_HEAD(&hdev->mgmt_pending);
2005 INIT_LIST_HEAD(&hdev->blacklist);
2006 INIT_LIST_HEAD(&hdev->uuids);
2007 INIT_LIST_HEAD(&hdev->link_keys);
2008 INIT_LIST_HEAD(&hdev->long_term_keys);
2009 INIT_LIST_HEAD(&hdev->remote_oob_data);
2010 INIT_LIST_HEAD(&hdev->conn_hash.list);
2012 INIT_WORK(&hdev->rx_work, hci_rx_work);
2013 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2014 INIT_WORK(&hdev->tx_work, hci_tx_work);
2015 INIT_WORK(&hdev->power_on, hci_power_on);
2016 INIT_WORK(&hdev->le_scan, le_scan_work);
2018 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2019 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2020 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2022 skb_queue_head_init(&hdev->driver_init);
2023 skb_queue_head_init(&hdev->rx_q);
2024 skb_queue_head_init(&hdev->cmd_q);
2025 skb_queue_head_init(&hdev->raw_q);
2027 init_waitqueue_head(&hdev->req_wait_q);
2029 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
2031 hci_init_sysfs(hdev);
2032 discovery_init(hdev);
2036 EXPORT_SYMBOL(hci_alloc_dev);
2038 /* Free HCI device */
2039 void hci_free_dev(struct hci_dev *hdev)
2041 skb_queue_purge(&hdev->driver_init);
2043 /* will free via device release */
2044 put_device(&hdev->dev);
2046 EXPORT_SYMBOL(hci_free_dev);
2048 /* Register HCI device */
2049 int hci_register_dev(struct hci_dev *hdev)
2053 if (!hdev->open || !hdev->close)
2056 /* Do not allow HCI_AMP devices to register at index 0,
2057 * so the index can be used as the AMP controller ID.
2059 switch (hdev->dev_type) {
2061 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2064 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2073 sprintf(hdev->name, "hci%d", id);
2076 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2078 write_lock(&hci_dev_list_lock);
2079 list_add(&hdev->list, &hci_dev_list);
2080 write_unlock(&hci_dev_list_lock);
2082 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
2084 if (!hdev->workqueue) {
2089 hdev->req_workqueue = alloc_workqueue(hdev->name,
2090 WQ_HIGHPRI | WQ_UNBOUND |
2092 if (!hdev->req_workqueue) {
2093 destroy_workqueue(hdev->workqueue);
2098 error = hci_add_sysfs(hdev);
2102 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2103 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2106 if (rfkill_register(hdev->rfkill) < 0) {
2107 rfkill_destroy(hdev->rfkill);
2108 hdev->rfkill = NULL;
2112 set_bit(HCI_SETUP, &hdev->dev_flags);
2114 if (hdev->dev_type != HCI_AMP)
2115 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2117 hci_notify(hdev, HCI_DEV_REG);
2120 queue_work(hdev->req_workqueue, &hdev->power_on);
2125 destroy_workqueue(hdev->workqueue);
2126 destroy_workqueue(hdev->req_workqueue);
2128 ida_simple_remove(&hci_index_ida, hdev->id);
2129 write_lock(&hci_dev_list_lock);
2130 list_del(&hdev->list);
2131 write_unlock(&hci_dev_list_lock);
2135 EXPORT_SYMBOL(hci_register_dev);
2137 /* Unregister HCI device */
2138 void hci_unregister_dev(struct hci_dev *hdev)
2142 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2144 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2148 write_lock(&hci_dev_list_lock);
2149 list_del(&hdev->list);
2150 write_unlock(&hci_dev_list_lock);
2152 hci_dev_do_close(hdev);
2154 for (i = 0; i < NUM_REASSEMBLY; i++)
2155 kfree_skb(hdev->reassembly[i]);
2157 cancel_work_sync(&hdev->power_on);
2159 if (!test_bit(HCI_INIT, &hdev->flags) &&
2160 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
2162 mgmt_index_removed(hdev);
2163 hci_dev_unlock(hdev);
2166 /* mgmt_index_removed should take care of emptying the
2168 BUG_ON(!list_empty(&hdev->mgmt_pending));
2170 hci_notify(hdev, HCI_DEV_UNREG);
2173 rfkill_unregister(hdev->rfkill);
2174 rfkill_destroy(hdev->rfkill);
2177 hci_del_sysfs(hdev);
2179 destroy_workqueue(hdev->workqueue);
2180 destroy_workqueue(hdev->req_workqueue);
2183 hci_blacklist_clear(hdev);
2184 hci_uuids_clear(hdev);
2185 hci_link_keys_clear(hdev);
2186 hci_smp_ltks_clear(hdev);
2187 hci_remote_oob_data_clear(hdev);
2188 hci_dev_unlock(hdev);
2192 ida_simple_remove(&hci_index_ida, id);
2194 EXPORT_SYMBOL(hci_unregister_dev);
2196 /* Suspend HCI device */
2197 int hci_suspend_dev(struct hci_dev *hdev)
2199 hci_notify(hdev, HCI_DEV_SUSPEND);
2202 EXPORT_SYMBOL(hci_suspend_dev);
2204 /* Resume HCI device */
2205 int hci_resume_dev(struct hci_dev *hdev)
2207 hci_notify(hdev, HCI_DEV_RESUME);
2210 EXPORT_SYMBOL(hci_resume_dev);
2212 /* Receive frame from HCI drivers */
2213 int hci_recv_frame(struct sk_buff *skb)
2215 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2216 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2217 && !test_bit(HCI_INIT, &hdev->flags))) {
2223 bt_cb(skb)->incoming = 1;
2226 __net_timestamp(skb);
2228 skb_queue_tail(&hdev->rx_q, skb);
2229 queue_work(hdev->workqueue, &hdev->rx_work);
2233 EXPORT_SYMBOL(hci_recv_frame);
2235 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
2236 int count, __u8 index)
2241 struct sk_buff *skb;
2242 struct bt_skb_cb *scb;
2244 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
2245 index >= NUM_REASSEMBLY)
2248 skb = hdev->reassembly[index];
2252 case HCI_ACLDATA_PKT:
2253 len = HCI_MAX_FRAME_SIZE;
2254 hlen = HCI_ACL_HDR_SIZE;
2257 len = HCI_MAX_EVENT_SIZE;
2258 hlen = HCI_EVENT_HDR_SIZE;
2260 case HCI_SCODATA_PKT:
2261 len = HCI_MAX_SCO_SIZE;
2262 hlen = HCI_SCO_HDR_SIZE;
2266 skb = bt_skb_alloc(len, GFP_ATOMIC);
2270 scb = (void *) skb->cb;
2272 scb->pkt_type = type;
2274 skb->dev = (void *) hdev;
2275 hdev->reassembly[index] = skb;
2279 scb = (void *) skb->cb;
2280 len = min_t(uint, scb->expect, count);
2282 memcpy(skb_put(skb, len), data, len);
2291 if (skb->len == HCI_EVENT_HDR_SIZE) {
2292 struct hci_event_hdr *h = hci_event_hdr(skb);
2293 scb->expect = h->plen;
2295 if (skb_tailroom(skb) < scb->expect) {
2297 hdev->reassembly[index] = NULL;
2303 case HCI_ACLDATA_PKT:
2304 if (skb->len == HCI_ACL_HDR_SIZE) {
2305 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2306 scb->expect = __le16_to_cpu(h->dlen);
2308 if (skb_tailroom(skb) < scb->expect) {
2310 hdev->reassembly[index] = NULL;
2316 case HCI_SCODATA_PKT:
2317 if (skb->len == HCI_SCO_HDR_SIZE) {
2318 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2319 scb->expect = h->dlen;
2321 if (skb_tailroom(skb) < scb->expect) {
2323 hdev->reassembly[index] = NULL;
2330 if (scb->expect == 0) {
2331 /* Complete frame */
2333 bt_cb(skb)->pkt_type = type;
2334 hci_recv_frame(skb);
2336 hdev->reassembly[index] = NULL;
2344 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2348 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2352 rem = hci_reassembly(hdev, type, data, count, type - 1);
2356 data += (count - rem);
2362 EXPORT_SYMBOL(hci_recv_fragment);
2364 #define STREAM_REASSEMBLY 0
2366 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2372 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2375 struct { char type; } *pkt;
2377 /* Start of the frame */
2384 type = bt_cb(skb)->pkt_type;
2386 rem = hci_reassembly(hdev, type, data, count,
2391 data += (count - rem);
2397 EXPORT_SYMBOL(hci_recv_stream_fragment);
2399 /* ---- Interface to upper protocols ---- */
2401 int hci_register_cb(struct hci_cb *cb)
2403 BT_DBG("%p name %s", cb, cb->name);
2405 write_lock(&hci_cb_list_lock);
2406 list_add(&cb->list, &hci_cb_list);
2407 write_unlock(&hci_cb_list_lock);
2411 EXPORT_SYMBOL(hci_register_cb);
2413 int hci_unregister_cb(struct hci_cb *cb)
2415 BT_DBG("%p name %s", cb, cb->name);
2417 write_lock(&hci_cb_list_lock);
2418 list_del(&cb->list);
2419 write_unlock(&hci_cb_list_lock);
2423 EXPORT_SYMBOL(hci_unregister_cb);
2425 static int hci_send_frame(struct sk_buff *skb)
2427 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2434 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
2437 __net_timestamp(skb);
2439 /* Send copy to monitor */
2440 hci_send_to_monitor(hdev, skb);
2442 if (atomic_read(&hdev->promisc)) {
2443 /* Send copy to the sockets */
2444 hci_send_to_sock(hdev, skb);
2447 /* Get rid of skb owner, prior to sending to the driver. */
2450 return hdev->send(skb);
2453 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2455 skb_queue_head_init(&req->cmd_q);
2460 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2462 struct hci_dev *hdev = req->hdev;
2463 struct sk_buff *skb;
2464 unsigned long flags;
2466 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2468 /* If an error occured during request building, remove all HCI
2469 * commands queued on the HCI request queue.
2472 skb_queue_purge(&req->cmd_q);
2476 /* Do not allow empty requests */
2477 if (skb_queue_empty(&req->cmd_q))
2480 skb = skb_peek_tail(&req->cmd_q);
2481 bt_cb(skb)->req.complete = complete;
2483 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2484 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2485 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2487 queue_work(hdev->workqueue, &hdev->cmd_work);
2492 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
2493 u32 plen, void *param)
2495 int len = HCI_COMMAND_HDR_SIZE + plen;
2496 struct hci_command_hdr *hdr;
2497 struct sk_buff *skb;
2499 skb = bt_skb_alloc(len, GFP_ATOMIC);
2503 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
2504 hdr->opcode = cpu_to_le16(opcode);
2508 memcpy(skb_put(skb, plen), param, plen);
2510 BT_DBG("skb len %d", skb->len);
2512 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
2513 skb->dev = (void *) hdev;
2518 /* Send HCI command */
2519 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
2521 struct sk_buff *skb;
2523 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2525 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2527 BT_ERR("%s no memory for command", hdev->name);
2531 /* Stand-alone HCI commands must be flaged as
2532 * single-command requests.
2534 bt_cb(skb)->req.start = true;
2536 skb_queue_tail(&hdev->cmd_q, skb);
2537 queue_work(hdev->workqueue, &hdev->cmd_work);
2542 /* Queue a command to an asynchronous HCI request */
2543 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen, void *param)
2545 struct hci_dev *hdev = req->hdev;
2546 struct sk_buff *skb;
2548 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2550 /* If an error occured during request building, there is no point in
2551 * queueing the HCI command. We can simply return.
2556 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2558 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2559 hdev->name, opcode);
2564 if (skb_queue_empty(&req->cmd_q))
2565 bt_cb(skb)->req.start = true;
2567 skb_queue_tail(&req->cmd_q, skb);
2570 /* Get data from the previously sent command */
2571 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
2573 struct hci_command_hdr *hdr;
2575 if (!hdev->sent_cmd)
2578 hdr = (void *) hdev->sent_cmd->data;
2580 if (hdr->opcode != cpu_to_le16(opcode))
2583 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2585 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2589 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2591 struct hci_acl_hdr *hdr;
2594 skb_push(skb, HCI_ACL_HDR_SIZE);
2595 skb_reset_transport_header(skb);
2596 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
2597 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2598 hdr->dlen = cpu_to_le16(len);
2601 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
2602 struct sk_buff *skb, __u16 flags)
2604 struct hci_conn *conn = chan->conn;
2605 struct hci_dev *hdev = conn->hdev;
2606 struct sk_buff *list;
2608 skb->len = skb_headlen(skb);
2611 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2613 switch (hdev->dev_type) {
2615 hci_add_acl_hdr(skb, conn->handle, flags);
2618 hci_add_acl_hdr(skb, chan->handle, flags);
2621 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2625 list = skb_shinfo(skb)->frag_list;
2627 /* Non fragmented */
2628 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2630 skb_queue_tail(queue, skb);
2633 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2635 skb_shinfo(skb)->frag_list = NULL;
2637 /* Queue all fragments atomically */
2638 spin_lock(&queue->lock);
2640 __skb_queue_tail(queue, skb);
2642 flags &= ~ACL_START;
2645 skb = list; list = list->next;
2647 skb->dev = (void *) hdev;
2648 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2649 hci_add_acl_hdr(skb, conn->handle, flags);
2651 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2653 __skb_queue_tail(queue, skb);
2656 spin_unlock(&queue->lock);
2660 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2662 struct hci_dev *hdev = chan->conn->hdev;
2664 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
2666 skb->dev = (void *) hdev;
2668 hci_queue_acl(chan, &chan->data_q, skb, flags);
2670 queue_work(hdev->workqueue, &hdev->tx_work);
2674 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2676 struct hci_dev *hdev = conn->hdev;
2677 struct hci_sco_hdr hdr;
2679 BT_DBG("%s len %d", hdev->name, skb->len);
2681 hdr.handle = cpu_to_le16(conn->handle);
2682 hdr.dlen = skb->len;
2684 skb_push(skb, HCI_SCO_HDR_SIZE);
2685 skb_reset_transport_header(skb);
2686 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2688 skb->dev = (void *) hdev;
2689 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2691 skb_queue_tail(&conn->data_q, skb);
2692 queue_work(hdev->workqueue, &hdev->tx_work);
2695 /* ---- HCI TX task (outgoing data) ---- */
2697 /* HCI Connection scheduler */
2698 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2701 struct hci_conn_hash *h = &hdev->conn_hash;
2702 struct hci_conn *conn = NULL, *c;
2703 unsigned int num = 0, min = ~0;
2705 /* We don't have to lock device here. Connections are always
2706 * added and removed with TX task disabled. */
2710 list_for_each_entry_rcu(c, &h->list, list) {
2711 if (c->type != type || skb_queue_empty(&c->data_q))
2714 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2719 if (c->sent < min) {
2724 if (hci_conn_num(hdev, type) == num)
2733 switch (conn->type) {
2735 cnt = hdev->acl_cnt;
2739 cnt = hdev->sco_cnt;
2742 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2746 BT_ERR("Unknown link type");
2754 BT_DBG("conn %p quote %d", conn, *quote);
2758 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2760 struct hci_conn_hash *h = &hdev->conn_hash;
2763 BT_ERR("%s link tx timeout", hdev->name);
2767 /* Kill stalled connections */
2768 list_for_each_entry_rcu(c, &h->list, list) {
2769 if (c->type == type && c->sent) {
2770 BT_ERR("%s killing stalled connection %pMR",
2771 hdev->name, &c->dst);
2772 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
2779 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2782 struct hci_conn_hash *h = &hdev->conn_hash;
2783 struct hci_chan *chan = NULL;
2784 unsigned int num = 0, min = ~0, cur_prio = 0;
2785 struct hci_conn *conn;
2786 int cnt, q, conn_num = 0;
2788 BT_DBG("%s", hdev->name);
2792 list_for_each_entry_rcu(conn, &h->list, list) {
2793 struct hci_chan *tmp;
2795 if (conn->type != type)
2798 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2803 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2804 struct sk_buff *skb;
2806 if (skb_queue_empty(&tmp->data_q))
2809 skb = skb_peek(&tmp->data_q);
2810 if (skb->priority < cur_prio)
2813 if (skb->priority > cur_prio) {
2816 cur_prio = skb->priority;
2821 if (conn->sent < min) {
2827 if (hci_conn_num(hdev, type) == conn_num)
2836 switch (chan->conn->type) {
2838 cnt = hdev->acl_cnt;
2841 cnt = hdev->block_cnt;
2845 cnt = hdev->sco_cnt;
2848 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2852 BT_ERR("Unknown link type");
2857 BT_DBG("chan %p quote %d", chan, *quote);
2861 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2863 struct hci_conn_hash *h = &hdev->conn_hash;
2864 struct hci_conn *conn;
2867 BT_DBG("%s", hdev->name);
2871 list_for_each_entry_rcu(conn, &h->list, list) {
2872 struct hci_chan *chan;
2874 if (conn->type != type)
2877 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2882 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
2883 struct sk_buff *skb;
2890 if (skb_queue_empty(&chan->data_q))
2893 skb = skb_peek(&chan->data_q);
2894 if (skb->priority >= HCI_PRIO_MAX - 1)
2897 skb->priority = HCI_PRIO_MAX - 1;
2899 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2903 if (hci_conn_num(hdev, type) == num)
2911 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2913 /* Calculate count of blocks used by this packet */
2914 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2917 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
2919 if (!test_bit(HCI_RAW, &hdev->flags)) {
2920 /* ACL tx timeout must be longer than maximum
2921 * link supervision timeout (40.9 seconds) */
2922 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
2923 HCI_ACL_TX_TIMEOUT))
2924 hci_link_tx_to(hdev, ACL_LINK);
2928 static void hci_sched_acl_pkt(struct hci_dev *hdev)
2930 unsigned int cnt = hdev->acl_cnt;
2931 struct hci_chan *chan;
2932 struct sk_buff *skb;
2935 __check_timeout(hdev, cnt);
2937 while (hdev->acl_cnt &&
2938 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
2939 u32 priority = (skb_peek(&chan->data_q))->priority;
2940 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2941 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2942 skb->len, skb->priority);
2944 /* Stop if priority has changed */
2945 if (skb->priority < priority)
2948 skb = skb_dequeue(&chan->data_q);
2950 hci_conn_enter_active_mode(chan->conn,
2951 bt_cb(skb)->force_active);
2953 hci_send_frame(skb);
2954 hdev->acl_last_tx = jiffies;
2962 if (cnt != hdev->acl_cnt)
2963 hci_prio_recalculate(hdev, ACL_LINK);
2966 static void hci_sched_acl_blk(struct hci_dev *hdev)
2968 unsigned int cnt = hdev->block_cnt;
2969 struct hci_chan *chan;
2970 struct sk_buff *skb;
2974 __check_timeout(hdev, cnt);
2976 BT_DBG("%s", hdev->name);
2978 if (hdev->dev_type == HCI_AMP)
2983 while (hdev->block_cnt > 0 &&
2984 (chan = hci_chan_sent(hdev, type, "e))) {
2985 u32 priority = (skb_peek(&chan->data_q))->priority;
2986 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2989 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2990 skb->len, skb->priority);
2992 /* Stop if priority has changed */
2993 if (skb->priority < priority)
2996 skb = skb_dequeue(&chan->data_q);
2998 blocks = __get_blocks(hdev, skb);
2999 if (blocks > hdev->block_cnt)
3002 hci_conn_enter_active_mode(chan->conn,
3003 bt_cb(skb)->force_active);
3005 hci_send_frame(skb);
3006 hdev->acl_last_tx = jiffies;
3008 hdev->block_cnt -= blocks;
3011 chan->sent += blocks;
3012 chan->conn->sent += blocks;
3016 if (cnt != hdev->block_cnt)
3017 hci_prio_recalculate(hdev, type);
3020 static void hci_sched_acl(struct hci_dev *hdev)
3022 BT_DBG("%s", hdev->name);
3024 /* No ACL link over BR/EDR controller */
3025 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3028 /* No AMP link over AMP controller */
3029 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3032 switch (hdev->flow_ctl_mode) {
3033 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3034 hci_sched_acl_pkt(hdev);
3037 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3038 hci_sched_acl_blk(hdev);
3044 static void hci_sched_sco(struct hci_dev *hdev)
3046 struct hci_conn *conn;
3047 struct sk_buff *skb;
3050 BT_DBG("%s", hdev->name);
3052 if (!hci_conn_num(hdev, SCO_LINK))
3055 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
3056 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3057 BT_DBG("skb %p len %d", skb, skb->len);
3058 hci_send_frame(skb);
3061 if (conn->sent == ~0)
3067 static void hci_sched_esco(struct hci_dev *hdev)
3069 struct hci_conn *conn;
3070 struct sk_buff *skb;
3073 BT_DBG("%s", hdev->name);
3075 if (!hci_conn_num(hdev, ESCO_LINK))
3078 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3080 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3081 BT_DBG("skb %p len %d", skb, skb->len);
3082 hci_send_frame(skb);
3085 if (conn->sent == ~0)
3091 static void hci_sched_le(struct hci_dev *hdev)
3093 struct hci_chan *chan;
3094 struct sk_buff *skb;
3095 int quote, cnt, tmp;
3097 BT_DBG("%s", hdev->name);
3099 if (!hci_conn_num(hdev, LE_LINK))
3102 if (!test_bit(HCI_RAW, &hdev->flags)) {
3103 /* LE tx timeout must be longer than maximum
3104 * link supervision timeout (40.9 seconds) */
3105 if (!hdev->le_cnt && hdev->le_pkts &&
3106 time_after(jiffies, hdev->le_last_tx + HZ * 45))
3107 hci_link_tx_to(hdev, LE_LINK);
3110 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3112 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
3113 u32 priority = (skb_peek(&chan->data_q))->priority;
3114 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3115 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3116 skb->len, skb->priority);
3118 /* Stop if priority has changed */
3119 if (skb->priority < priority)
3122 skb = skb_dequeue(&chan->data_q);
3124 hci_send_frame(skb);
3125 hdev->le_last_tx = jiffies;
3136 hdev->acl_cnt = cnt;
3139 hci_prio_recalculate(hdev, LE_LINK);
3142 static void hci_tx_work(struct work_struct *work)
3144 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3145 struct sk_buff *skb;
3147 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
3148 hdev->sco_cnt, hdev->le_cnt);
3150 /* Schedule queues and send stuff to HCI driver */
3152 hci_sched_acl(hdev);
3154 hci_sched_sco(hdev);
3156 hci_sched_esco(hdev);
3160 /* Send next queued raw (unknown type) packet */
3161 while ((skb = skb_dequeue(&hdev->raw_q)))
3162 hci_send_frame(skb);
3165 /* ----- HCI RX task (incoming data processing) ----- */
3167 /* ACL data packet */
3168 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3170 struct hci_acl_hdr *hdr = (void *) skb->data;
3171 struct hci_conn *conn;
3172 __u16 handle, flags;
3174 skb_pull(skb, HCI_ACL_HDR_SIZE);
3176 handle = __le16_to_cpu(hdr->handle);
3177 flags = hci_flags(handle);
3178 handle = hci_handle(handle);
3180 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3183 hdev->stat.acl_rx++;
3186 conn = hci_conn_hash_lookup_handle(hdev, handle);
3187 hci_dev_unlock(hdev);
3190 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3192 /* Send to upper protocol */
3193 l2cap_recv_acldata(conn, skb, flags);
3196 BT_ERR("%s ACL packet for unknown connection handle %d",
3197 hdev->name, handle);
3203 /* SCO data packet */
3204 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3206 struct hci_sco_hdr *hdr = (void *) skb->data;
3207 struct hci_conn *conn;
3210 skb_pull(skb, HCI_SCO_HDR_SIZE);
3212 handle = __le16_to_cpu(hdr->handle);
3214 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
3216 hdev->stat.sco_rx++;
3219 conn = hci_conn_hash_lookup_handle(hdev, handle);
3220 hci_dev_unlock(hdev);
3223 /* Send to upper protocol */
3224 sco_recv_scodata(conn, skb);
3227 BT_ERR("%s SCO packet for unknown connection handle %d",
3228 hdev->name, handle);
3234 static bool hci_req_is_complete(struct hci_dev *hdev)
3236 struct sk_buff *skb;
3238 skb = skb_peek(&hdev->cmd_q);
3242 return bt_cb(skb)->req.start;
3245 static void hci_resend_last(struct hci_dev *hdev)
3247 struct hci_command_hdr *sent;
3248 struct sk_buff *skb;
3251 if (!hdev->sent_cmd)
3254 sent = (void *) hdev->sent_cmd->data;
3255 opcode = __le16_to_cpu(sent->opcode);
3256 if (opcode == HCI_OP_RESET)
3259 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3263 skb_queue_head(&hdev->cmd_q, skb);
3264 queue_work(hdev->workqueue, &hdev->cmd_work);
3267 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3269 hci_req_complete_t req_complete = NULL;
3270 struct sk_buff *skb;
3271 unsigned long flags;
3273 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3275 /* If the completed command doesn't match the last one that was
3276 * sent we need to do special handling of it.
3278 if (!hci_sent_cmd_data(hdev, opcode)) {
3279 /* Some CSR based controllers generate a spontaneous
3280 * reset complete event during init and any pending
3281 * command will never be completed. In such a case we
3282 * need to resend whatever was the last sent
3285 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3286 hci_resend_last(hdev);
3291 /* If the command succeeded and there's still more commands in
3292 * this request the request is not yet complete.
3294 if (!status && !hci_req_is_complete(hdev))
3297 /* If this was the last command in a request the complete
3298 * callback would be found in hdev->sent_cmd instead of the
3299 * command queue (hdev->cmd_q).
3301 if (hdev->sent_cmd) {
3302 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3307 /* Remove all pending commands belonging to this request */
3308 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3309 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3310 if (bt_cb(skb)->req.start) {
3311 __skb_queue_head(&hdev->cmd_q, skb);
3315 req_complete = bt_cb(skb)->req.complete;
3318 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3322 req_complete(hdev, status);
3325 void hci_req_cmd_status(struct hci_dev *hdev, u16 opcode, u8 status)
3327 hci_req_complete_t req_complete = NULL;
3329 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3332 hci_req_cmd_complete(hdev, opcode, status);
3336 /* No need to handle success status if there are more commands */
3337 if (!hci_req_is_complete(hdev))
3341 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3343 /* If the request doesn't have a complete callback or there
3344 * are other commands/requests in the hdev queue we consider
3345 * this request as completed.
3347 if (!req_complete || !skb_queue_empty(&hdev->cmd_q))
3348 hci_req_cmd_complete(hdev, opcode, status);
3351 static void hci_rx_work(struct work_struct *work)
3353 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
3354 struct sk_buff *skb;
3356 BT_DBG("%s", hdev->name);
3358 while ((skb = skb_dequeue(&hdev->rx_q))) {
3359 /* Send copy to monitor */
3360 hci_send_to_monitor(hdev, skb);
3362 if (atomic_read(&hdev->promisc)) {
3363 /* Send copy to the sockets */
3364 hci_send_to_sock(hdev, skb);
3367 if (test_bit(HCI_RAW, &hdev->flags)) {
3372 if (test_bit(HCI_INIT, &hdev->flags)) {
3373 /* Don't process data packets in this states. */
3374 switch (bt_cb(skb)->pkt_type) {
3375 case HCI_ACLDATA_PKT:
3376 case HCI_SCODATA_PKT:
3383 switch (bt_cb(skb)->pkt_type) {
3385 BT_DBG("%s Event packet", hdev->name);
3386 hci_event_packet(hdev, skb);
3389 case HCI_ACLDATA_PKT:
3390 BT_DBG("%s ACL data packet", hdev->name);
3391 hci_acldata_packet(hdev, skb);
3394 case HCI_SCODATA_PKT:
3395 BT_DBG("%s SCO data packet", hdev->name);
3396 hci_scodata_packet(hdev, skb);
3406 static void hci_cmd_work(struct work_struct *work)
3408 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
3409 struct sk_buff *skb;
3411 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3412 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
3414 /* Send queued commands */
3415 if (atomic_read(&hdev->cmd_cnt)) {
3416 skb = skb_dequeue(&hdev->cmd_q);
3420 kfree_skb(hdev->sent_cmd);
3422 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
3423 if (hdev->sent_cmd) {
3424 atomic_dec(&hdev->cmd_cnt);
3425 hci_send_frame(skb);
3426 if (test_bit(HCI_RESET, &hdev->flags))
3427 del_timer(&hdev->cmd_timer);
3429 mod_timer(&hdev->cmd_timer,
3430 jiffies + HCI_CMD_TIMEOUT);
3432 skb_queue_head(&hdev->cmd_q, skb);
3433 queue_work(hdev->workqueue, &hdev->cmd_work);
3438 int hci_do_inquiry(struct hci_dev *hdev, u8 length)
3440 /* General inquiry access code (GIAC) */
3441 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3442 struct hci_cp_inquiry cp;
3444 BT_DBG("%s", hdev->name);
3446 if (test_bit(HCI_INQUIRY, &hdev->flags))
3447 return -EINPROGRESS;
3449 inquiry_cache_flush(hdev);
3451 memset(&cp, 0, sizeof(cp));
3452 memcpy(&cp.lap, lap, sizeof(cp.lap));
3455 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
3458 int hci_cancel_inquiry(struct hci_dev *hdev)
3460 BT_DBG("%s", hdev->name);
3462 if (!test_bit(HCI_INQUIRY, &hdev->flags))
3465 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3468 u8 bdaddr_to_le(u8 bdaddr_type)
3470 switch (bdaddr_type) {
3471 case BDADDR_LE_PUBLIC:
3472 return ADDR_LE_DEV_PUBLIC;
3475 /* Fallback to LE Random address type */
3476 return ADDR_LE_DEV_RANDOM;