2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/export.h>
29 #include <linux/idr.h>
31 #include <linux/rfkill.h>
33 #include <net/bluetooth/bluetooth.h>
34 #include <net/bluetooth/hci_core.h>
36 static void hci_rx_work(struct work_struct *work);
37 static void hci_cmd_work(struct work_struct *work);
38 static void hci_tx_work(struct work_struct *work);
41 LIST_HEAD(hci_dev_list);
42 DEFINE_RWLOCK(hci_dev_list_lock);
44 /* HCI callback list */
45 LIST_HEAD(hci_cb_list);
46 DEFINE_RWLOCK(hci_cb_list_lock);
48 /* HCI ID Numbering */
49 static DEFINE_IDA(hci_index_ida);
51 /* ---- HCI notifications ---- */
53 static void hci_notify(struct hci_dev *hdev, int event)
55 hci_sock_dev_event(hdev, event);
58 /* ---- HCI requests ---- */
60 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
62 BT_DBG("%s result 0x%2.2x", hdev->name, result);
64 if (hdev->req_status == HCI_REQ_PEND) {
65 hdev->req_result = result;
66 hdev->req_status = HCI_REQ_DONE;
67 wake_up_interruptible(&hdev->req_wait_q);
71 static void hci_req_cancel(struct hci_dev *hdev, int err)
73 BT_DBG("%s err 0x%2.2x", hdev->name, err);
75 if (hdev->req_status == HCI_REQ_PEND) {
76 hdev->req_result = err;
77 hdev->req_status = HCI_REQ_CANCELED;
78 wake_up_interruptible(&hdev->req_wait_q);
82 /* Execute request and wait for completion. */
83 static int __hci_req_sync(struct hci_dev *hdev,
84 void (*func)(struct hci_request *req,
86 unsigned long opt, __u32 timeout)
88 struct hci_request req;
89 DECLARE_WAITQUEUE(wait, current);
92 BT_DBG("%s start", hdev->name);
94 hci_req_init(&req, hdev);
96 hdev->req_status = HCI_REQ_PEND;
100 err = hci_req_run(&req, hci_req_sync_complete);
102 hdev->req_status = 0;
104 /* ENODATA means the HCI request command queue is empty.
105 * This can happen when a request with conditionals doesn't
106 * trigger any commands to be sent. This is normal behavior
107 * and should not trigger an error return.
115 add_wait_queue(&hdev->req_wait_q, &wait);
116 set_current_state(TASK_INTERRUPTIBLE);
118 schedule_timeout(timeout);
120 remove_wait_queue(&hdev->req_wait_q, &wait);
122 if (signal_pending(current))
125 switch (hdev->req_status) {
127 err = -bt_to_errno(hdev->req_result);
130 case HCI_REQ_CANCELED:
131 err = -hdev->req_result;
139 hdev->req_status = hdev->req_result = 0;
141 BT_DBG("%s end: err %d", hdev->name, err);
146 static int hci_req_sync(struct hci_dev *hdev,
147 void (*req)(struct hci_request *req,
149 unsigned long opt, __u32 timeout)
153 if (!test_bit(HCI_UP, &hdev->flags))
156 /* Serialize all requests */
158 ret = __hci_req_sync(hdev, req, opt, timeout);
159 hci_req_unlock(hdev);
164 static void hci_reset_req(struct hci_request *req, unsigned long opt)
166 BT_DBG("%s %ld", req->hdev->name, opt);
169 set_bit(HCI_RESET, &req->hdev->flags);
170 hci_req_add(req, HCI_OP_RESET, 0, NULL);
173 static void bredr_init(struct hci_request *req)
175 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
177 /* Read Local Supported Features */
178 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
180 /* Read Local Version */
181 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
183 /* Read BD Address */
184 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
187 static void amp_init(struct hci_request *req)
189 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
191 /* Read Local Version */
192 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
194 /* Read Local AMP Info */
195 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
197 /* Read Data Blk size */
198 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
201 static void hci_init1_req(struct hci_request *req, unsigned long opt)
203 struct hci_dev *hdev = req->hdev;
204 struct hci_request init_req;
207 BT_DBG("%s %ld", hdev->name, opt);
209 /* Driver initialization */
211 hci_req_init(&init_req, hdev);
213 /* Special commands */
214 while ((skb = skb_dequeue(&hdev->driver_init))) {
215 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
216 skb->dev = (void *) hdev;
218 if (skb_queue_empty(&init_req.cmd_q))
219 bt_cb(skb)->req.start = true;
221 skb_queue_tail(&init_req.cmd_q, skb);
223 skb_queue_purge(&hdev->driver_init);
225 hci_req_run(&init_req, NULL);
228 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
229 hci_reset_req(req, 0);
231 switch (hdev->dev_type) {
241 BT_ERR("Unknown device type %d", hdev->dev_type);
246 static void bredr_setup(struct hci_request *req)
248 struct hci_cp_delete_stored_link_key cp;
252 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
253 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
255 /* Read Class of Device */
256 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
258 /* Read Local Name */
259 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
261 /* Read Voice Setting */
262 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
264 /* Clear Event Filters */
265 flt_type = HCI_FLT_CLEAR_ALL;
266 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
268 /* Connection accept timeout ~20 secs */
269 param = __constant_cpu_to_le16(0x7d00);
270 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
272 bacpy(&cp.bdaddr, BDADDR_ANY);
273 cp.delete_all = 0x01;
274 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
277 static void le_setup(struct hci_request *req)
279 /* Read LE Buffer Size */
280 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
282 /* Read LE Local Supported Features */
283 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
285 /* Read LE Advertising Channel TX Power */
286 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
288 /* Read LE White List Size */
289 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
291 /* Read LE Supported States */
292 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
295 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
297 if (lmp_ext_inq_capable(hdev))
300 if (lmp_inq_rssi_capable(hdev))
303 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
304 hdev->lmp_subver == 0x0757)
307 if (hdev->manufacturer == 15) {
308 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
310 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
312 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
316 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
317 hdev->lmp_subver == 0x1805)
323 static void hci_setup_inquiry_mode(struct hci_request *req)
327 mode = hci_get_inquiry_mode(req->hdev);
329 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
332 static void hci_setup_event_mask(struct hci_request *req)
334 struct hci_dev *hdev = req->hdev;
336 /* The second byte is 0xff instead of 0x9f (two reserved bits
337 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
340 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
342 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
343 * any event mask for pre 1.2 devices.
345 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
348 if (lmp_bredr_capable(hdev)) {
349 events[4] |= 0x01; /* Flow Specification Complete */
350 events[4] |= 0x02; /* Inquiry Result with RSSI */
351 events[4] |= 0x04; /* Read Remote Extended Features Complete */
352 events[5] |= 0x08; /* Synchronous Connection Complete */
353 events[5] |= 0x10; /* Synchronous Connection Changed */
356 if (lmp_inq_rssi_capable(hdev))
357 events[4] |= 0x02; /* Inquiry Result with RSSI */
359 if (lmp_sniffsubr_capable(hdev))
360 events[5] |= 0x20; /* Sniff Subrating */
362 if (lmp_pause_enc_capable(hdev))
363 events[5] |= 0x80; /* Encryption Key Refresh Complete */
365 if (lmp_ext_inq_capable(hdev))
366 events[5] |= 0x40; /* Extended Inquiry Result */
368 if (lmp_no_flush_capable(hdev))
369 events[7] |= 0x01; /* Enhanced Flush Complete */
371 if (lmp_lsto_capable(hdev))
372 events[6] |= 0x80; /* Link Supervision Timeout Changed */
374 if (lmp_ssp_capable(hdev)) {
375 events[6] |= 0x01; /* IO Capability Request */
376 events[6] |= 0x02; /* IO Capability Response */
377 events[6] |= 0x04; /* User Confirmation Request */
378 events[6] |= 0x08; /* User Passkey Request */
379 events[6] |= 0x10; /* Remote OOB Data Request */
380 events[6] |= 0x20; /* Simple Pairing Complete */
381 events[7] |= 0x04; /* User Passkey Notification */
382 events[7] |= 0x08; /* Keypress Notification */
383 events[7] |= 0x10; /* Remote Host Supported
384 * Features Notification
388 if (lmp_le_capable(hdev))
389 events[7] |= 0x20; /* LE Meta-Event */
391 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
393 if (lmp_le_capable(hdev)) {
394 memset(events, 0, sizeof(events));
396 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
397 sizeof(events), events);
401 static void hci_init2_req(struct hci_request *req, unsigned long opt)
403 struct hci_dev *hdev = req->hdev;
405 if (lmp_bredr_capable(hdev))
408 if (lmp_le_capable(hdev))
411 hci_setup_event_mask(req);
413 if (hdev->hci_ver > BLUETOOTH_VER_1_1)
414 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
416 if (lmp_ssp_capable(hdev)) {
417 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
419 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
420 sizeof(mode), &mode);
422 struct hci_cp_write_eir cp;
424 memset(hdev->eir, 0, sizeof(hdev->eir));
425 memset(&cp, 0, sizeof(cp));
427 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
431 if (lmp_inq_rssi_capable(hdev))
432 hci_setup_inquiry_mode(req);
434 if (lmp_inq_tx_pwr_capable(hdev))
435 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
437 if (lmp_ext_feat_capable(hdev)) {
438 struct hci_cp_read_local_ext_features cp;
441 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
445 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
447 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
452 static void hci_setup_link_policy(struct hci_request *req)
454 struct hci_dev *hdev = req->hdev;
455 struct hci_cp_write_def_link_policy cp;
458 if (lmp_rswitch_capable(hdev))
459 link_policy |= HCI_LP_RSWITCH;
460 if (lmp_hold_capable(hdev))
461 link_policy |= HCI_LP_HOLD;
462 if (lmp_sniff_capable(hdev))
463 link_policy |= HCI_LP_SNIFF;
464 if (lmp_park_capable(hdev))
465 link_policy |= HCI_LP_PARK;
467 cp.policy = cpu_to_le16(link_policy);
468 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
471 static void hci_set_le_support(struct hci_request *req)
473 struct hci_dev *hdev = req->hdev;
474 struct hci_cp_write_le_host_supported cp;
476 memset(&cp, 0, sizeof(cp));
478 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
480 cp.simul = lmp_le_br_capable(hdev);
483 if (cp.le != lmp_host_le_capable(hdev))
484 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
488 static void hci_init3_req(struct hci_request *req, unsigned long opt)
490 struct hci_dev *hdev = req->hdev;
492 if (hdev->commands[5] & 0x10)
493 hci_setup_link_policy(req);
495 if (lmp_le_capable(hdev))
496 hci_set_le_support(req);
499 static int __hci_init(struct hci_dev *hdev)
503 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
507 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
508 * BR/EDR/LE type controllers. AMP controllers only need the
511 if (hdev->dev_type != HCI_BREDR)
514 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
518 return __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
521 static void hci_scan_req(struct hci_request *req, unsigned long opt)
525 BT_DBG("%s %x", req->hdev->name, scan);
527 /* Inquiry and Page scans */
528 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
531 static void hci_auth_req(struct hci_request *req, unsigned long opt)
535 BT_DBG("%s %x", req->hdev->name, auth);
538 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
541 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
545 BT_DBG("%s %x", req->hdev->name, encrypt);
548 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
551 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
553 __le16 policy = cpu_to_le16(opt);
555 BT_DBG("%s %x", req->hdev->name, policy);
557 /* Default link policy */
558 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
561 /* Get HCI device by index.
562 * Device is held on return. */
563 struct hci_dev *hci_dev_get(int index)
565 struct hci_dev *hdev = NULL, *d;
572 read_lock(&hci_dev_list_lock);
573 list_for_each_entry(d, &hci_dev_list, list) {
574 if (d->id == index) {
575 hdev = hci_dev_hold(d);
579 read_unlock(&hci_dev_list_lock);
583 /* ---- Inquiry support ---- */
585 bool hci_discovery_active(struct hci_dev *hdev)
587 struct discovery_state *discov = &hdev->discovery;
589 switch (discov->state) {
590 case DISCOVERY_FINDING:
591 case DISCOVERY_RESOLVING:
599 void hci_discovery_set_state(struct hci_dev *hdev, int state)
601 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
603 if (hdev->discovery.state == state)
607 case DISCOVERY_STOPPED:
608 if (hdev->discovery.state != DISCOVERY_STARTING)
609 mgmt_discovering(hdev, 0);
611 case DISCOVERY_STARTING:
613 case DISCOVERY_FINDING:
614 mgmt_discovering(hdev, 1);
616 case DISCOVERY_RESOLVING:
618 case DISCOVERY_STOPPING:
622 hdev->discovery.state = state;
625 static void inquiry_cache_flush(struct hci_dev *hdev)
627 struct discovery_state *cache = &hdev->discovery;
628 struct inquiry_entry *p, *n;
630 list_for_each_entry_safe(p, n, &cache->all, all) {
635 INIT_LIST_HEAD(&cache->unknown);
636 INIT_LIST_HEAD(&cache->resolve);
639 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
642 struct discovery_state *cache = &hdev->discovery;
643 struct inquiry_entry *e;
645 BT_DBG("cache %p, %pMR", cache, bdaddr);
647 list_for_each_entry(e, &cache->all, all) {
648 if (!bacmp(&e->data.bdaddr, bdaddr))
655 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
658 struct discovery_state *cache = &hdev->discovery;
659 struct inquiry_entry *e;
661 BT_DBG("cache %p, %pMR", cache, bdaddr);
663 list_for_each_entry(e, &cache->unknown, list) {
664 if (!bacmp(&e->data.bdaddr, bdaddr))
671 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
675 struct discovery_state *cache = &hdev->discovery;
676 struct inquiry_entry *e;
678 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
680 list_for_each_entry(e, &cache->resolve, list) {
681 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
683 if (!bacmp(&e->data.bdaddr, bdaddr))
690 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
691 struct inquiry_entry *ie)
693 struct discovery_state *cache = &hdev->discovery;
694 struct list_head *pos = &cache->resolve;
695 struct inquiry_entry *p;
699 list_for_each_entry(p, &cache->resolve, list) {
700 if (p->name_state != NAME_PENDING &&
701 abs(p->data.rssi) >= abs(ie->data.rssi))
706 list_add(&ie->list, pos);
709 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
710 bool name_known, bool *ssp)
712 struct discovery_state *cache = &hdev->discovery;
713 struct inquiry_entry *ie;
715 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
717 hci_remove_remote_oob_data(hdev, &data->bdaddr);
720 *ssp = data->ssp_mode;
722 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
724 if (ie->data.ssp_mode && ssp)
727 if (ie->name_state == NAME_NEEDED &&
728 data->rssi != ie->data.rssi) {
729 ie->data.rssi = data->rssi;
730 hci_inquiry_cache_update_resolve(hdev, ie);
736 /* Entry not in the cache. Add new one. */
737 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
741 list_add(&ie->all, &cache->all);
744 ie->name_state = NAME_KNOWN;
746 ie->name_state = NAME_NOT_KNOWN;
747 list_add(&ie->list, &cache->unknown);
751 if (name_known && ie->name_state != NAME_KNOWN &&
752 ie->name_state != NAME_PENDING) {
753 ie->name_state = NAME_KNOWN;
757 memcpy(&ie->data, data, sizeof(*data));
758 ie->timestamp = jiffies;
759 cache->timestamp = jiffies;
761 if (ie->name_state == NAME_NOT_KNOWN)
767 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
769 struct discovery_state *cache = &hdev->discovery;
770 struct inquiry_info *info = (struct inquiry_info *) buf;
771 struct inquiry_entry *e;
774 list_for_each_entry(e, &cache->all, all) {
775 struct inquiry_data *data = &e->data;
780 bacpy(&info->bdaddr, &data->bdaddr);
781 info->pscan_rep_mode = data->pscan_rep_mode;
782 info->pscan_period_mode = data->pscan_period_mode;
783 info->pscan_mode = data->pscan_mode;
784 memcpy(info->dev_class, data->dev_class, 3);
785 info->clock_offset = data->clock_offset;
791 BT_DBG("cache %p, copied %d", cache, copied);
795 static void hci_inq_req(struct hci_request *req, unsigned long opt)
797 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
798 struct hci_dev *hdev = req->hdev;
799 struct hci_cp_inquiry cp;
801 BT_DBG("%s", hdev->name);
803 if (test_bit(HCI_INQUIRY, &hdev->flags))
807 memcpy(&cp.lap, &ir->lap, 3);
808 cp.length = ir->length;
809 cp.num_rsp = ir->num_rsp;
810 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
813 int hci_inquiry(void __user *arg)
815 __u8 __user *ptr = arg;
816 struct hci_inquiry_req ir;
817 struct hci_dev *hdev;
818 int err = 0, do_inquiry = 0, max_rsp;
822 if (copy_from_user(&ir, ptr, sizeof(ir)))
825 hdev = hci_dev_get(ir.dev_id);
830 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
831 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
832 inquiry_cache_flush(hdev);
835 hci_dev_unlock(hdev);
837 timeo = ir.length * msecs_to_jiffies(2000);
840 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
846 /* for unlimited number of responses we will use buffer with
849 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
851 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
852 * copy it to the user space.
854 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
861 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
862 hci_dev_unlock(hdev);
864 BT_DBG("num_rsp %d", ir.num_rsp);
866 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
868 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
881 static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
883 u8 ad_len = 0, flags = 0;
886 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
887 flags |= LE_AD_GENERAL;
889 if (!lmp_bredr_capable(hdev))
890 flags |= LE_AD_NO_BREDR;
892 if (lmp_le_br_capable(hdev))
893 flags |= LE_AD_SIM_LE_BREDR_CTRL;
895 if (lmp_host_le_br_capable(hdev))
896 flags |= LE_AD_SIM_LE_BREDR_HOST;
899 BT_DBG("adv flags 0x%02x", flags);
909 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
911 ptr[1] = EIR_TX_POWER;
912 ptr[2] = (u8) hdev->adv_tx_power;
918 name_len = strlen(hdev->dev_name);
920 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
922 if (name_len > max_len) {
924 ptr[1] = EIR_NAME_SHORT;
926 ptr[1] = EIR_NAME_COMPLETE;
928 ptr[0] = name_len + 1;
930 memcpy(ptr + 2, hdev->dev_name, name_len);
932 ad_len += (name_len + 2);
933 ptr += (name_len + 2);
939 int hci_update_ad(struct hci_dev *hdev)
941 struct hci_cp_le_set_adv_data cp;
947 if (!lmp_le_capable(hdev)) {
952 memset(&cp, 0, sizeof(cp));
954 len = create_ad(hdev, cp.data);
956 if (hdev->adv_data_len == len &&
957 memcmp(cp.data, hdev->adv_data, len) == 0) {
962 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
963 hdev->adv_data_len = len;
966 err = hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
969 hci_dev_unlock(hdev);
974 /* ---- HCI ioctl helpers ---- */
976 int hci_dev_open(__u16 dev)
978 struct hci_dev *hdev;
981 hdev = hci_dev_get(dev);
985 BT_DBG("%s %p", hdev->name, hdev);
989 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
994 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
999 if (test_bit(HCI_UP, &hdev->flags)) {
1004 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1005 set_bit(HCI_RAW, &hdev->flags);
1007 /* Treat all non BR/EDR controllers as raw devices if
1008 enable_hs is not set */
1009 if (hdev->dev_type != HCI_BREDR && !enable_hs)
1010 set_bit(HCI_RAW, &hdev->flags);
1012 if (hdev->open(hdev)) {
1017 if (!test_bit(HCI_RAW, &hdev->flags)) {
1018 atomic_set(&hdev->cmd_cnt, 1);
1019 set_bit(HCI_INIT, &hdev->flags);
1020 ret = __hci_init(hdev);
1021 clear_bit(HCI_INIT, &hdev->flags);
1026 set_bit(HCI_UP, &hdev->flags);
1027 hci_notify(hdev, HCI_DEV_UP);
1028 hci_update_ad(hdev);
1029 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1030 mgmt_valid_hdev(hdev)) {
1032 mgmt_powered(hdev, 1);
1033 hci_dev_unlock(hdev);
1036 /* Init failed, cleanup */
1037 flush_work(&hdev->tx_work);
1038 flush_work(&hdev->cmd_work);
1039 flush_work(&hdev->rx_work);
1041 skb_queue_purge(&hdev->cmd_q);
1042 skb_queue_purge(&hdev->rx_q);
1047 if (hdev->sent_cmd) {
1048 kfree_skb(hdev->sent_cmd);
1049 hdev->sent_cmd = NULL;
1057 hci_req_unlock(hdev);
1062 static int hci_dev_do_close(struct hci_dev *hdev)
1064 BT_DBG("%s %p", hdev->name, hdev);
1066 cancel_work_sync(&hdev->le_scan);
1068 cancel_delayed_work(&hdev->power_off);
1070 hci_req_cancel(hdev, ENODEV);
1073 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1074 del_timer_sync(&hdev->cmd_timer);
1075 hci_req_unlock(hdev);
1079 /* Flush RX and TX works */
1080 flush_work(&hdev->tx_work);
1081 flush_work(&hdev->rx_work);
1083 if (hdev->discov_timeout > 0) {
1084 cancel_delayed_work(&hdev->discov_off);
1085 hdev->discov_timeout = 0;
1086 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1089 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1090 cancel_delayed_work(&hdev->service_cache);
1092 cancel_delayed_work_sync(&hdev->le_scan_disable);
1095 inquiry_cache_flush(hdev);
1096 hci_conn_hash_flush(hdev);
1097 hci_dev_unlock(hdev);
1099 hci_notify(hdev, HCI_DEV_DOWN);
1105 skb_queue_purge(&hdev->cmd_q);
1106 atomic_set(&hdev->cmd_cnt, 1);
1107 if (!test_bit(HCI_RAW, &hdev->flags) &&
1108 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1109 set_bit(HCI_INIT, &hdev->flags);
1110 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1111 clear_bit(HCI_INIT, &hdev->flags);
1114 /* flush cmd work */
1115 flush_work(&hdev->cmd_work);
1118 skb_queue_purge(&hdev->rx_q);
1119 skb_queue_purge(&hdev->cmd_q);
1120 skb_queue_purge(&hdev->raw_q);
1122 /* Drop last sent command */
1123 if (hdev->sent_cmd) {
1124 del_timer_sync(&hdev->cmd_timer);
1125 kfree_skb(hdev->sent_cmd);
1126 hdev->sent_cmd = NULL;
1129 /* After this point our queues are empty
1130 * and no tasks are scheduled. */
1133 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1134 mgmt_valid_hdev(hdev)) {
1136 mgmt_powered(hdev, 0);
1137 hci_dev_unlock(hdev);
1143 /* Controller radio is available but is currently powered down */
1144 hdev->amp_status = 0;
1146 memset(hdev->eir, 0, sizeof(hdev->eir));
1147 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1149 hci_req_unlock(hdev);
1155 int hci_dev_close(__u16 dev)
1157 struct hci_dev *hdev;
1160 hdev = hci_dev_get(dev);
1164 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1165 cancel_delayed_work(&hdev->power_off);
1167 err = hci_dev_do_close(hdev);
1173 int hci_dev_reset(__u16 dev)
1175 struct hci_dev *hdev;
1178 hdev = hci_dev_get(dev);
1184 if (!test_bit(HCI_UP, &hdev->flags))
1188 skb_queue_purge(&hdev->rx_q);
1189 skb_queue_purge(&hdev->cmd_q);
1192 inquiry_cache_flush(hdev);
1193 hci_conn_hash_flush(hdev);
1194 hci_dev_unlock(hdev);
1199 atomic_set(&hdev->cmd_cnt, 1);
1200 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1202 if (!test_bit(HCI_RAW, &hdev->flags))
1203 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1206 hci_req_unlock(hdev);
1211 int hci_dev_reset_stat(__u16 dev)
1213 struct hci_dev *hdev;
1216 hdev = hci_dev_get(dev);
1220 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1227 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1229 struct hci_dev *hdev;
1230 struct hci_dev_req dr;
1233 if (copy_from_user(&dr, arg, sizeof(dr)))
1236 hdev = hci_dev_get(dr.dev_id);
1242 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1247 if (!lmp_encrypt_capable(hdev)) {
1252 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1253 /* Auth must be enabled first */
1254 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1260 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1265 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1270 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1274 case HCISETLINKMODE:
1275 hdev->link_mode = ((__u16) dr.dev_opt) &
1276 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1280 hdev->pkt_type = (__u16) dr.dev_opt;
1284 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1285 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1289 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1290 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1302 int hci_get_dev_list(void __user *arg)
1304 struct hci_dev *hdev;
1305 struct hci_dev_list_req *dl;
1306 struct hci_dev_req *dr;
1307 int n = 0, size, err;
1310 if (get_user(dev_num, (__u16 __user *) arg))
1313 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1316 size = sizeof(*dl) + dev_num * sizeof(*dr);
1318 dl = kzalloc(size, GFP_KERNEL);
1324 read_lock(&hci_dev_list_lock);
1325 list_for_each_entry(hdev, &hci_dev_list, list) {
1326 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1327 cancel_delayed_work(&hdev->power_off);
1329 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1330 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1332 (dr + n)->dev_id = hdev->id;
1333 (dr + n)->dev_opt = hdev->flags;
1338 read_unlock(&hci_dev_list_lock);
1341 size = sizeof(*dl) + n * sizeof(*dr);
1343 err = copy_to_user(arg, dl, size);
1346 return err ? -EFAULT : 0;
1349 int hci_get_dev_info(void __user *arg)
1351 struct hci_dev *hdev;
1352 struct hci_dev_info di;
1355 if (copy_from_user(&di, arg, sizeof(di)))
1358 hdev = hci_dev_get(di.dev_id);
1362 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1363 cancel_delayed_work_sync(&hdev->power_off);
1365 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1366 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1368 strcpy(di.name, hdev->name);
1369 di.bdaddr = hdev->bdaddr;
1370 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1371 di.flags = hdev->flags;
1372 di.pkt_type = hdev->pkt_type;
1373 if (lmp_bredr_capable(hdev)) {
1374 di.acl_mtu = hdev->acl_mtu;
1375 di.acl_pkts = hdev->acl_pkts;
1376 di.sco_mtu = hdev->sco_mtu;
1377 di.sco_pkts = hdev->sco_pkts;
1379 di.acl_mtu = hdev->le_mtu;
1380 di.acl_pkts = hdev->le_pkts;
1384 di.link_policy = hdev->link_policy;
1385 di.link_mode = hdev->link_mode;
1387 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1388 memcpy(&di.features, &hdev->features, sizeof(di.features));
1390 if (copy_to_user(arg, &di, sizeof(di)))
1398 /* ---- Interface to HCI drivers ---- */
1400 static int hci_rfkill_set_block(void *data, bool blocked)
1402 struct hci_dev *hdev = data;
1404 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1409 hci_dev_do_close(hdev);
1414 static const struct rfkill_ops hci_rfkill_ops = {
1415 .set_block = hci_rfkill_set_block,
1418 static void hci_power_on(struct work_struct *work)
1420 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1422 BT_DBG("%s", hdev->name);
1424 if (hci_dev_open(hdev->id) < 0)
1427 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1428 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1429 HCI_AUTO_OFF_TIMEOUT);
1431 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1432 mgmt_index_added(hdev);
1435 static void hci_power_off(struct work_struct *work)
1437 struct hci_dev *hdev = container_of(work, struct hci_dev,
1440 BT_DBG("%s", hdev->name);
1442 hci_dev_do_close(hdev);
1445 static void hci_discov_off(struct work_struct *work)
1447 struct hci_dev *hdev;
1448 u8 scan = SCAN_PAGE;
1450 hdev = container_of(work, struct hci_dev, discov_off.work);
1452 BT_DBG("%s", hdev->name);
1456 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1458 hdev->discov_timeout = 0;
1460 hci_dev_unlock(hdev);
1463 int hci_uuids_clear(struct hci_dev *hdev)
1465 struct bt_uuid *uuid, *tmp;
1467 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1468 list_del(&uuid->list);
1475 int hci_link_keys_clear(struct hci_dev *hdev)
1477 struct list_head *p, *n;
1479 list_for_each_safe(p, n, &hdev->link_keys) {
1480 struct link_key *key;
1482 key = list_entry(p, struct link_key, list);
1491 int hci_smp_ltks_clear(struct hci_dev *hdev)
1493 struct smp_ltk *k, *tmp;
1495 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1503 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1507 list_for_each_entry(k, &hdev->link_keys, list)
1508 if (bacmp(bdaddr, &k->bdaddr) == 0)
1514 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1515 u8 key_type, u8 old_key_type)
1518 if (key_type < 0x03)
1521 /* Debug keys are insecure so don't store them persistently */
1522 if (key_type == HCI_LK_DEBUG_COMBINATION)
1525 /* Changed combination key and there's no previous one */
1526 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1529 /* Security mode 3 case */
1533 /* Neither local nor remote side had no-bonding as requirement */
1534 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1537 /* Local side had dedicated bonding as requirement */
1538 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1541 /* Remote side had dedicated bonding as requirement */
1542 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1545 /* If none of the above criteria match, then don't store the key
1550 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1554 list_for_each_entry(k, &hdev->long_term_keys, list) {
1555 if (k->ediv != ediv ||
1556 memcmp(rand, k->rand, sizeof(k->rand)))
1565 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1570 list_for_each_entry(k, &hdev->long_term_keys, list)
1571 if (addr_type == k->bdaddr_type &&
1572 bacmp(bdaddr, &k->bdaddr) == 0)
1578 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1579 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1581 struct link_key *key, *old_key;
1585 old_key = hci_find_link_key(hdev, bdaddr);
1587 old_key_type = old_key->type;
1590 old_key_type = conn ? conn->key_type : 0xff;
1591 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1594 list_add(&key->list, &hdev->link_keys);
1597 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1599 /* Some buggy controller combinations generate a changed
1600 * combination key for legacy pairing even when there's no
1602 if (type == HCI_LK_CHANGED_COMBINATION &&
1603 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1604 type = HCI_LK_COMBINATION;
1606 conn->key_type = type;
1609 bacpy(&key->bdaddr, bdaddr);
1610 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1611 key->pin_len = pin_len;
1613 if (type == HCI_LK_CHANGED_COMBINATION)
1614 key->type = old_key_type;
1621 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1623 mgmt_new_link_key(hdev, key, persistent);
1626 conn->flush_key = !persistent;
1631 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1632 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
1635 struct smp_ltk *key, *old_key;
1637 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1640 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1644 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1647 list_add(&key->list, &hdev->long_term_keys);
1650 bacpy(&key->bdaddr, bdaddr);
1651 key->bdaddr_type = addr_type;
1652 memcpy(key->val, tk, sizeof(key->val));
1653 key->authenticated = authenticated;
1655 key->enc_size = enc_size;
1657 memcpy(key->rand, rand, sizeof(key->rand));
1662 if (type & HCI_SMP_LTK)
1663 mgmt_new_ltk(hdev, key, 1);
1668 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1670 struct link_key *key;
1672 key = hci_find_link_key(hdev, bdaddr);
1676 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1678 list_del(&key->list);
1684 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1686 struct smp_ltk *k, *tmp;
1688 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1689 if (bacmp(bdaddr, &k->bdaddr))
1692 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1701 /* HCI command timer function */
1702 static void hci_cmd_timeout(unsigned long arg)
1704 struct hci_dev *hdev = (void *) arg;
1706 if (hdev->sent_cmd) {
1707 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1708 u16 opcode = __le16_to_cpu(sent->opcode);
1710 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1712 BT_ERR("%s command tx timeout", hdev->name);
1715 atomic_set(&hdev->cmd_cnt, 1);
1716 queue_work(hdev->workqueue, &hdev->cmd_work);
1719 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1722 struct oob_data *data;
1724 list_for_each_entry(data, &hdev->remote_oob_data, list)
1725 if (bacmp(bdaddr, &data->bdaddr) == 0)
1731 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1733 struct oob_data *data;
1735 data = hci_find_remote_oob_data(hdev, bdaddr);
1739 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1741 list_del(&data->list);
1747 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1749 struct oob_data *data, *n;
1751 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1752 list_del(&data->list);
1759 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1762 struct oob_data *data;
1764 data = hci_find_remote_oob_data(hdev, bdaddr);
1767 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1771 bacpy(&data->bdaddr, bdaddr);
1772 list_add(&data->list, &hdev->remote_oob_data);
1775 memcpy(data->hash, hash, sizeof(data->hash));
1776 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1778 BT_DBG("%s for %pMR", hdev->name, bdaddr);
1783 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
1785 struct bdaddr_list *b;
1787 list_for_each_entry(b, &hdev->blacklist, list)
1788 if (bacmp(bdaddr, &b->bdaddr) == 0)
1794 int hci_blacklist_clear(struct hci_dev *hdev)
1796 struct list_head *p, *n;
1798 list_for_each_safe(p, n, &hdev->blacklist) {
1799 struct bdaddr_list *b;
1801 b = list_entry(p, struct bdaddr_list, list);
1810 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1812 struct bdaddr_list *entry;
1814 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1817 if (hci_blacklist_lookup(hdev, bdaddr))
1820 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1824 bacpy(&entry->bdaddr, bdaddr);
1826 list_add(&entry->list, &hdev->blacklist);
1828 return mgmt_device_blocked(hdev, bdaddr, type);
1831 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1833 struct bdaddr_list *entry;
1835 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1836 return hci_blacklist_clear(hdev);
1838 entry = hci_blacklist_lookup(hdev, bdaddr);
1842 list_del(&entry->list);
1845 return mgmt_device_unblocked(hdev, bdaddr, type);
1848 static void le_scan_param_req(struct hci_request *req, unsigned long opt)
1850 struct le_scan_params *param = (struct le_scan_params *) opt;
1851 struct hci_cp_le_set_scan_param cp;
1853 memset(&cp, 0, sizeof(cp));
1854 cp.type = param->type;
1855 cp.interval = cpu_to_le16(param->interval);
1856 cp.window = cpu_to_le16(param->window);
1858 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1861 static void le_scan_enable_req(struct hci_request *req, unsigned long opt)
1863 struct hci_cp_le_set_scan_enable cp;
1865 memset(&cp, 0, sizeof(cp));
1869 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1872 static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
1873 u16 window, int timeout)
1875 long timeo = msecs_to_jiffies(3000);
1876 struct le_scan_params param;
1879 BT_DBG("%s", hdev->name);
1881 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1882 return -EINPROGRESS;
1885 param.interval = interval;
1886 param.window = window;
1890 err = __hci_req_sync(hdev, le_scan_param_req, (unsigned long) ¶m,
1893 err = __hci_req_sync(hdev, le_scan_enable_req, 0, timeo);
1895 hci_req_unlock(hdev);
1900 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
1901 msecs_to_jiffies(timeout));
1906 int hci_cancel_le_scan(struct hci_dev *hdev)
1908 BT_DBG("%s", hdev->name);
1910 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1913 if (cancel_delayed_work(&hdev->le_scan_disable)) {
1914 struct hci_cp_le_set_scan_enable cp;
1916 /* Send HCI command to disable LE Scan */
1917 memset(&cp, 0, sizeof(cp));
1918 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1924 static void le_scan_disable_work(struct work_struct *work)
1926 struct hci_dev *hdev = container_of(work, struct hci_dev,
1927 le_scan_disable.work);
1928 struct hci_cp_le_set_scan_enable cp;
1930 BT_DBG("%s", hdev->name);
1932 memset(&cp, 0, sizeof(cp));
1934 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1937 static void le_scan_work(struct work_struct *work)
1939 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1940 struct le_scan_params *param = &hdev->le_scan_params;
1942 BT_DBG("%s", hdev->name);
1944 hci_do_le_scan(hdev, param->type, param->interval, param->window,
1948 int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
1951 struct le_scan_params *param = &hdev->le_scan_params;
1953 BT_DBG("%s", hdev->name);
1955 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1958 if (work_busy(&hdev->le_scan))
1959 return -EINPROGRESS;
1962 param->interval = interval;
1963 param->window = window;
1964 param->timeout = timeout;
1966 queue_work(system_long_wq, &hdev->le_scan);
1971 /* Alloc HCI device */
1972 struct hci_dev *hci_alloc_dev(void)
1974 struct hci_dev *hdev;
1976 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1980 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1981 hdev->esco_type = (ESCO_HV1);
1982 hdev->link_mode = (HCI_LM_ACCEPT);
1983 hdev->io_capability = 0x03; /* No Input No Output */
1984 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
1985 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
1987 hdev->sniff_max_interval = 800;
1988 hdev->sniff_min_interval = 80;
1990 mutex_init(&hdev->lock);
1991 mutex_init(&hdev->req_lock);
1993 INIT_LIST_HEAD(&hdev->mgmt_pending);
1994 INIT_LIST_HEAD(&hdev->blacklist);
1995 INIT_LIST_HEAD(&hdev->uuids);
1996 INIT_LIST_HEAD(&hdev->link_keys);
1997 INIT_LIST_HEAD(&hdev->long_term_keys);
1998 INIT_LIST_HEAD(&hdev->remote_oob_data);
1999 INIT_LIST_HEAD(&hdev->conn_hash.list);
2001 INIT_WORK(&hdev->rx_work, hci_rx_work);
2002 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2003 INIT_WORK(&hdev->tx_work, hci_tx_work);
2004 INIT_WORK(&hdev->power_on, hci_power_on);
2005 INIT_WORK(&hdev->le_scan, le_scan_work);
2007 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2008 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2009 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2011 skb_queue_head_init(&hdev->driver_init);
2012 skb_queue_head_init(&hdev->rx_q);
2013 skb_queue_head_init(&hdev->cmd_q);
2014 skb_queue_head_init(&hdev->raw_q);
2016 init_waitqueue_head(&hdev->req_wait_q);
2018 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
2020 hci_init_sysfs(hdev);
2021 discovery_init(hdev);
2025 EXPORT_SYMBOL(hci_alloc_dev);
2027 /* Free HCI device */
2028 void hci_free_dev(struct hci_dev *hdev)
2030 skb_queue_purge(&hdev->driver_init);
2032 /* will free via device release */
2033 put_device(&hdev->dev);
2035 EXPORT_SYMBOL(hci_free_dev);
2037 /* Register HCI device */
2038 int hci_register_dev(struct hci_dev *hdev)
2042 if (!hdev->open || !hdev->close)
2045 /* Do not allow HCI_AMP devices to register at index 0,
2046 * so the index can be used as the AMP controller ID.
2048 switch (hdev->dev_type) {
2050 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2053 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2062 sprintf(hdev->name, "hci%d", id);
2065 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2067 write_lock(&hci_dev_list_lock);
2068 list_add(&hdev->list, &hci_dev_list);
2069 write_unlock(&hci_dev_list_lock);
2071 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
2073 if (!hdev->workqueue) {
2078 hdev->req_workqueue = alloc_workqueue(hdev->name,
2079 WQ_HIGHPRI | WQ_UNBOUND |
2081 if (!hdev->req_workqueue) {
2082 destroy_workqueue(hdev->workqueue);
2087 error = hci_add_sysfs(hdev);
2091 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2092 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2095 if (rfkill_register(hdev->rfkill) < 0) {
2096 rfkill_destroy(hdev->rfkill);
2097 hdev->rfkill = NULL;
2101 set_bit(HCI_SETUP, &hdev->dev_flags);
2103 if (hdev->dev_type != HCI_AMP)
2104 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2106 hci_notify(hdev, HCI_DEV_REG);
2109 queue_work(hdev->req_workqueue, &hdev->power_on);
2114 destroy_workqueue(hdev->workqueue);
2115 destroy_workqueue(hdev->req_workqueue);
2117 ida_simple_remove(&hci_index_ida, hdev->id);
2118 write_lock(&hci_dev_list_lock);
2119 list_del(&hdev->list);
2120 write_unlock(&hci_dev_list_lock);
2124 EXPORT_SYMBOL(hci_register_dev);
2126 /* Unregister HCI device */
2127 void hci_unregister_dev(struct hci_dev *hdev)
2131 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2133 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2137 write_lock(&hci_dev_list_lock);
2138 list_del(&hdev->list);
2139 write_unlock(&hci_dev_list_lock);
2141 hci_dev_do_close(hdev);
2143 for (i = 0; i < NUM_REASSEMBLY; i++)
2144 kfree_skb(hdev->reassembly[i]);
2146 cancel_work_sync(&hdev->power_on);
2148 if (!test_bit(HCI_INIT, &hdev->flags) &&
2149 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
2151 mgmt_index_removed(hdev);
2152 hci_dev_unlock(hdev);
2155 /* mgmt_index_removed should take care of emptying the
2157 BUG_ON(!list_empty(&hdev->mgmt_pending));
2159 hci_notify(hdev, HCI_DEV_UNREG);
2162 rfkill_unregister(hdev->rfkill);
2163 rfkill_destroy(hdev->rfkill);
2166 hci_del_sysfs(hdev);
2168 destroy_workqueue(hdev->workqueue);
2169 destroy_workqueue(hdev->req_workqueue);
2172 hci_blacklist_clear(hdev);
2173 hci_uuids_clear(hdev);
2174 hci_link_keys_clear(hdev);
2175 hci_smp_ltks_clear(hdev);
2176 hci_remote_oob_data_clear(hdev);
2177 hci_dev_unlock(hdev);
2181 ida_simple_remove(&hci_index_ida, id);
2183 EXPORT_SYMBOL(hci_unregister_dev);
2185 /* Suspend HCI device */
2186 int hci_suspend_dev(struct hci_dev *hdev)
2188 hci_notify(hdev, HCI_DEV_SUSPEND);
2191 EXPORT_SYMBOL(hci_suspend_dev);
2193 /* Resume HCI device */
2194 int hci_resume_dev(struct hci_dev *hdev)
2196 hci_notify(hdev, HCI_DEV_RESUME);
2199 EXPORT_SYMBOL(hci_resume_dev);
2201 /* Receive frame from HCI drivers */
2202 int hci_recv_frame(struct sk_buff *skb)
2204 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2205 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2206 && !test_bit(HCI_INIT, &hdev->flags))) {
2212 bt_cb(skb)->incoming = 1;
2215 __net_timestamp(skb);
2217 skb_queue_tail(&hdev->rx_q, skb);
2218 queue_work(hdev->workqueue, &hdev->rx_work);
2222 EXPORT_SYMBOL(hci_recv_frame);
2224 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
2225 int count, __u8 index)
2230 struct sk_buff *skb;
2231 struct bt_skb_cb *scb;
2233 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
2234 index >= NUM_REASSEMBLY)
2237 skb = hdev->reassembly[index];
2241 case HCI_ACLDATA_PKT:
2242 len = HCI_MAX_FRAME_SIZE;
2243 hlen = HCI_ACL_HDR_SIZE;
2246 len = HCI_MAX_EVENT_SIZE;
2247 hlen = HCI_EVENT_HDR_SIZE;
2249 case HCI_SCODATA_PKT:
2250 len = HCI_MAX_SCO_SIZE;
2251 hlen = HCI_SCO_HDR_SIZE;
2255 skb = bt_skb_alloc(len, GFP_ATOMIC);
2259 scb = (void *) skb->cb;
2261 scb->pkt_type = type;
2263 skb->dev = (void *) hdev;
2264 hdev->reassembly[index] = skb;
2268 scb = (void *) skb->cb;
2269 len = min_t(uint, scb->expect, count);
2271 memcpy(skb_put(skb, len), data, len);
2280 if (skb->len == HCI_EVENT_HDR_SIZE) {
2281 struct hci_event_hdr *h = hci_event_hdr(skb);
2282 scb->expect = h->plen;
2284 if (skb_tailroom(skb) < scb->expect) {
2286 hdev->reassembly[index] = NULL;
2292 case HCI_ACLDATA_PKT:
2293 if (skb->len == HCI_ACL_HDR_SIZE) {
2294 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2295 scb->expect = __le16_to_cpu(h->dlen);
2297 if (skb_tailroom(skb) < scb->expect) {
2299 hdev->reassembly[index] = NULL;
2305 case HCI_SCODATA_PKT:
2306 if (skb->len == HCI_SCO_HDR_SIZE) {
2307 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2308 scb->expect = h->dlen;
2310 if (skb_tailroom(skb) < scb->expect) {
2312 hdev->reassembly[index] = NULL;
2319 if (scb->expect == 0) {
2320 /* Complete frame */
2322 bt_cb(skb)->pkt_type = type;
2323 hci_recv_frame(skb);
2325 hdev->reassembly[index] = NULL;
2333 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2337 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2341 rem = hci_reassembly(hdev, type, data, count, type - 1);
2345 data += (count - rem);
2351 EXPORT_SYMBOL(hci_recv_fragment);
2353 #define STREAM_REASSEMBLY 0
2355 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2361 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2364 struct { char type; } *pkt;
2366 /* Start of the frame */
2373 type = bt_cb(skb)->pkt_type;
2375 rem = hci_reassembly(hdev, type, data, count,
2380 data += (count - rem);
2386 EXPORT_SYMBOL(hci_recv_stream_fragment);
2388 /* ---- Interface to upper protocols ---- */
2390 int hci_register_cb(struct hci_cb *cb)
2392 BT_DBG("%p name %s", cb, cb->name);
2394 write_lock(&hci_cb_list_lock);
2395 list_add(&cb->list, &hci_cb_list);
2396 write_unlock(&hci_cb_list_lock);
2400 EXPORT_SYMBOL(hci_register_cb);
2402 int hci_unregister_cb(struct hci_cb *cb)
2404 BT_DBG("%p name %s", cb, cb->name);
2406 write_lock(&hci_cb_list_lock);
2407 list_del(&cb->list);
2408 write_unlock(&hci_cb_list_lock);
2412 EXPORT_SYMBOL(hci_unregister_cb);
2414 static int hci_send_frame(struct sk_buff *skb)
2416 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2423 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
2426 __net_timestamp(skb);
2428 /* Send copy to monitor */
2429 hci_send_to_monitor(hdev, skb);
2431 if (atomic_read(&hdev->promisc)) {
2432 /* Send copy to the sockets */
2433 hci_send_to_sock(hdev, skb);
2436 /* Get rid of skb owner, prior to sending to the driver. */
2439 return hdev->send(skb);
2442 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2444 skb_queue_head_init(&req->cmd_q);
2449 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2451 struct hci_dev *hdev = req->hdev;
2452 struct sk_buff *skb;
2453 unsigned long flags;
2455 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2457 /* If an error occured during request building, remove all HCI
2458 * commands queued on the HCI request queue.
2461 skb_queue_purge(&req->cmd_q);
2465 /* Do not allow empty requests */
2466 if (skb_queue_empty(&req->cmd_q))
2469 skb = skb_peek_tail(&req->cmd_q);
2470 bt_cb(skb)->req.complete = complete;
2472 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2473 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2474 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2476 queue_work(hdev->workqueue, &hdev->cmd_work);
2481 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
2482 u32 plen, void *param)
2484 int len = HCI_COMMAND_HDR_SIZE + plen;
2485 struct hci_command_hdr *hdr;
2486 struct sk_buff *skb;
2488 skb = bt_skb_alloc(len, GFP_ATOMIC);
2492 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
2493 hdr->opcode = cpu_to_le16(opcode);
2497 memcpy(skb_put(skb, plen), param, plen);
2499 BT_DBG("skb len %d", skb->len);
2501 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
2502 skb->dev = (void *) hdev;
2507 /* Send HCI command */
2508 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
2510 struct sk_buff *skb;
2512 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2514 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2516 BT_ERR("%s no memory for command", hdev->name);
2520 /* Stand-alone HCI commands must be flaged as
2521 * single-command requests.
2523 bt_cb(skb)->req.start = true;
2525 skb_queue_tail(&hdev->cmd_q, skb);
2526 queue_work(hdev->workqueue, &hdev->cmd_work);
2531 /* Queue a command to an asynchronous HCI request */
2532 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen, void *param)
2534 struct hci_dev *hdev = req->hdev;
2535 struct sk_buff *skb;
2537 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2539 /* If an error occured during request building, there is no point in
2540 * queueing the HCI command. We can simply return.
2545 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2547 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2548 hdev->name, opcode);
2553 if (skb_queue_empty(&req->cmd_q))
2554 bt_cb(skb)->req.start = true;
2556 skb_queue_tail(&req->cmd_q, skb);
2559 /* Get data from the previously sent command */
2560 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
2562 struct hci_command_hdr *hdr;
2564 if (!hdev->sent_cmd)
2567 hdr = (void *) hdev->sent_cmd->data;
2569 if (hdr->opcode != cpu_to_le16(opcode))
2572 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2574 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2578 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2580 struct hci_acl_hdr *hdr;
2583 skb_push(skb, HCI_ACL_HDR_SIZE);
2584 skb_reset_transport_header(skb);
2585 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
2586 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2587 hdr->dlen = cpu_to_le16(len);
2590 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
2591 struct sk_buff *skb, __u16 flags)
2593 struct hci_conn *conn = chan->conn;
2594 struct hci_dev *hdev = conn->hdev;
2595 struct sk_buff *list;
2597 skb->len = skb_headlen(skb);
2600 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2602 switch (hdev->dev_type) {
2604 hci_add_acl_hdr(skb, conn->handle, flags);
2607 hci_add_acl_hdr(skb, chan->handle, flags);
2610 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2614 list = skb_shinfo(skb)->frag_list;
2616 /* Non fragmented */
2617 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2619 skb_queue_tail(queue, skb);
2622 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2624 skb_shinfo(skb)->frag_list = NULL;
2626 /* Queue all fragments atomically */
2627 spin_lock(&queue->lock);
2629 __skb_queue_tail(queue, skb);
2631 flags &= ~ACL_START;
2634 skb = list; list = list->next;
2636 skb->dev = (void *) hdev;
2637 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2638 hci_add_acl_hdr(skb, conn->handle, flags);
2640 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2642 __skb_queue_tail(queue, skb);
2645 spin_unlock(&queue->lock);
2649 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2651 struct hci_dev *hdev = chan->conn->hdev;
2653 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
2655 skb->dev = (void *) hdev;
2657 hci_queue_acl(chan, &chan->data_q, skb, flags);
2659 queue_work(hdev->workqueue, &hdev->tx_work);
2663 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2665 struct hci_dev *hdev = conn->hdev;
2666 struct hci_sco_hdr hdr;
2668 BT_DBG("%s len %d", hdev->name, skb->len);
2670 hdr.handle = cpu_to_le16(conn->handle);
2671 hdr.dlen = skb->len;
2673 skb_push(skb, HCI_SCO_HDR_SIZE);
2674 skb_reset_transport_header(skb);
2675 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2677 skb->dev = (void *) hdev;
2678 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2680 skb_queue_tail(&conn->data_q, skb);
2681 queue_work(hdev->workqueue, &hdev->tx_work);
2684 /* ---- HCI TX task (outgoing data) ---- */
2686 /* HCI Connection scheduler */
2687 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2690 struct hci_conn_hash *h = &hdev->conn_hash;
2691 struct hci_conn *conn = NULL, *c;
2692 unsigned int num = 0, min = ~0;
2694 /* We don't have to lock device here. Connections are always
2695 * added and removed with TX task disabled. */
2699 list_for_each_entry_rcu(c, &h->list, list) {
2700 if (c->type != type || skb_queue_empty(&c->data_q))
2703 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2708 if (c->sent < min) {
2713 if (hci_conn_num(hdev, type) == num)
2722 switch (conn->type) {
2724 cnt = hdev->acl_cnt;
2728 cnt = hdev->sco_cnt;
2731 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2735 BT_ERR("Unknown link type");
2743 BT_DBG("conn %p quote %d", conn, *quote);
2747 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2749 struct hci_conn_hash *h = &hdev->conn_hash;
2752 BT_ERR("%s link tx timeout", hdev->name);
2756 /* Kill stalled connections */
2757 list_for_each_entry_rcu(c, &h->list, list) {
2758 if (c->type == type && c->sent) {
2759 BT_ERR("%s killing stalled connection %pMR",
2760 hdev->name, &c->dst);
2761 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
2768 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2771 struct hci_conn_hash *h = &hdev->conn_hash;
2772 struct hci_chan *chan = NULL;
2773 unsigned int num = 0, min = ~0, cur_prio = 0;
2774 struct hci_conn *conn;
2775 int cnt, q, conn_num = 0;
2777 BT_DBG("%s", hdev->name);
2781 list_for_each_entry_rcu(conn, &h->list, list) {
2782 struct hci_chan *tmp;
2784 if (conn->type != type)
2787 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2792 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2793 struct sk_buff *skb;
2795 if (skb_queue_empty(&tmp->data_q))
2798 skb = skb_peek(&tmp->data_q);
2799 if (skb->priority < cur_prio)
2802 if (skb->priority > cur_prio) {
2805 cur_prio = skb->priority;
2810 if (conn->sent < min) {
2816 if (hci_conn_num(hdev, type) == conn_num)
2825 switch (chan->conn->type) {
2827 cnt = hdev->acl_cnt;
2830 cnt = hdev->block_cnt;
2834 cnt = hdev->sco_cnt;
2837 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2841 BT_ERR("Unknown link type");
2846 BT_DBG("chan %p quote %d", chan, *quote);
2850 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2852 struct hci_conn_hash *h = &hdev->conn_hash;
2853 struct hci_conn *conn;
2856 BT_DBG("%s", hdev->name);
2860 list_for_each_entry_rcu(conn, &h->list, list) {
2861 struct hci_chan *chan;
2863 if (conn->type != type)
2866 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2871 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
2872 struct sk_buff *skb;
2879 if (skb_queue_empty(&chan->data_q))
2882 skb = skb_peek(&chan->data_q);
2883 if (skb->priority >= HCI_PRIO_MAX - 1)
2886 skb->priority = HCI_PRIO_MAX - 1;
2888 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2892 if (hci_conn_num(hdev, type) == num)
2900 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2902 /* Calculate count of blocks used by this packet */
2903 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2906 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
2908 if (!test_bit(HCI_RAW, &hdev->flags)) {
2909 /* ACL tx timeout must be longer than maximum
2910 * link supervision timeout (40.9 seconds) */
2911 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
2912 HCI_ACL_TX_TIMEOUT))
2913 hci_link_tx_to(hdev, ACL_LINK);
2917 static void hci_sched_acl_pkt(struct hci_dev *hdev)
2919 unsigned int cnt = hdev->acl_cnt;
2920 struct hci_chan *chan;
2921 struct sk_buff *skb;
2924 __check_timeout(hdev, cnt);
2926 while (hdev->acl_cnt &&
2927 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
2928 u32 priority = (skb_peek(&chan->data_q))->priority;
2929 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2930 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2931 skb->len, skb->priority);
2933 /* Stop if priority has changed */
2934 if (skb->priority < priority)
2937 skb = skb_dequeue(&chan->data_q);
2939 hci_conn_enter_active_mode(chan->conn,
2940 bt_cb(skb)->force_active);
2942 hci_send_frame(skb);
2943 hdev->acl_last_tx = jiffies;
2951 if (cnt != hdev->acl_cnt)
2952 hci_prio_recalculate(hdev, ACL_LINK);
2955 static void hci_sched_acl_blk(struct hci_dev *hdev)
2957 unsigned int cnt = hdev->block_cnt;
2958 struct hci_chan *chan;
2959 struct sk_buff *skb;
2963 __check_timeout(hdev, cnt);
2965 BT_DBG("%s", hdev->name);
2967 if (hdev->dev_type == HCI_AMP)
2972 while (hdev->block_cnt > 0 &&
2973 (chan = hci_chan_sent(hdev, type, "e))) {
2974 u32 priority = (skb_peek(&chan->data_q))->priority;
2975 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2978 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2979 skb->len, skb->priority);
2981 /* Stop if priority has changed */
2982 if (skb->priority < priority)
2985 skb = skb_dequeue(&chan->data_q);
2987 blocks = __get_blocks(hdev, skb);
2988 if (blocks > hdev->block_cnt)
2991 hci_conn_enter_active_mode(chan->conn,
2992 bt_cb(skb)->force_active);
2994 hci_send_frame(skb);
2995 hdev->acl_last_tx = jiffies;
2997 hdev->block_cnt -= blocks;
3000 chan->sent += blocks;
3001 chan->conn->sent += blocks;
3005 if (cnt != hdev->block_cnt)
3006 hci_prio_recalculate(hdev, type);
3009 static void hci_sched_acl(struct hci_dev *hdev)
3011 BT_DBG("%s", hdev->name);
3013 /* No ACL link over BR/EDR controller */
3014 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3017 /* No AMP link over AMP controller */
3018 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3021 switch (hdev->flow_ctl_mode) {
3022 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3023 hci_sched_acl_pkt(hdev);
3026 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3027 hci_sched_acl_blk(hdev);
3033 static void hci_sched_sco(struct hci_dev *hdev)
3035 struct hci_conn *conn;
3036 struct sk_buff *skb;
3039 BT_DBG("%s", hdev->name);
3041 if (!hci_conn_num(hdev, SCO_LINK))
3044 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
3045 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3046 BT_DBG("skb %p len %d", skb, skb->len);
3047 hci_send_frame(skb);
3050 if (conn->sent == ~0)
3056 static void hci_sched_esco(struct hci_dev *hdev)
3058 struct hci_conn *conn;
3059 struct sk_buff *skb;
3062 BT_DBG("%s", hdev->name);
3064 if (!hci_conn_num(hdev, ESCO_LINK))
3067 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3069 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3070 BT_DBG("skb %p len %d", skb, skb->len);
3071 hci_send_frame(skb);
3074 if (conn->sent == ~0)
3080 static void hci_sched_le(struct hci_dev *hdev)
3082 struct hci_chan *chan;
3083 struct sk_buff *skb;
3084 int quote, cnt, tmp;
3086 BT_DBG("%s", hdev->name);
3088 if (!hci_conn_num(hdev, LE_LINK))
3091 if (!test_bit(HCI_RAW, &hdev->flags)) {
3092 /* LE tx timeout must be longer than maximum
3093 * link supervision timeout (40.9 seconds) */
3094 if (!hdev->le_cnt && hdev->le_pkts &&
3095 time_after(jiffies, hdev->le_last_tx + HZ * 45))
3096 hci_link_tx_to(hdev, LE_LINK);
3099 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3101 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
3102 u32 priority = (skb_peek(&chan->data_q))->priority;
3103 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3104 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3105 skb->len, skb->priority);
3107 /* Stop if priority has changed */
3108 if (skb->priority < priority)
3111 skb = skb_dequeue(&chan->data_q);
3113 hci_send_frame(skb);
3114 hdev->le_last_tx = jiffies;
3125 hdev->acl_cnt = cnt;
3128 hci_prio_recalculate(hdev, LE_LINK);
3131 static void hci_tx_work(struct work_struct *work)
3133 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3134 struct sk_buff *skb;
3136 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
3137 hdev->sco_cnt, hdev->le_cnt);
3139 /* Schedule queues and send stuff to HCI driver */
3141 hci_sched_acl(hdev);
3143 hci_sched_sco(hdev);
3145 hci_sched_esco(hdev);
3149 /* Send next queued raw (unknown type) packet */
3150 while ((skb = skb_dequeue(&hdev->raw_q)))
3151 hci_send_frame(skb);
3154 /* ----- HCI RX task (incoming data processing) ----- */
3156 /* ACL data packet */
3157 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3159 struct hci_acl_hdr *hdr = (void *) skb->data;
3160 struct hci_conn *conn;
3161 __u16 handle, flags;
3163 skb_pull(skb, HCI_ACL_HDR_SIZE);
3165 handle = __le16_to_cpu(hdr->handle);
3166 flags = hci_flags(handle);
3167 handle = hci_handle(handle);
3169 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3172 hdev->stat.acl_rx++;
3175 conn = hci_conn_hash_lookup_handle(hdev, handle);
3176 hci_dev_unlock(hdev);
3179 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3181 /* Send to upper protocol */
3182 l2cap_recv_acldata(conn, skb, flags);
3185 BT_ERR("%s ACL packet for unknown connection handle %d",
3186 hdev->name, handle);
3192 /* SCO data packet */
3193 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3195 struct hci_sco_hdr *hdr = (void *) skb->data;
3196 struct hci_conn *conn;
3199 skb_pull(skb, HCI_SCO_HDR_SIZE);
3201 handle = __le16_to_cpu(hdr->handle);
3203 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
3205 hdev->stat.sco_rx++;
3208 conn = hci_conn_hash_lookup_handle(hdev, handle);
3209 hci_dev_unlock(hdev);
3212 /* Send to upper protocol */
3213 sco_recv_scodata(conn, skb);
3216 BT_ERR("%s SCO packet for unknown connection handle %d",
3217 hdev->name, handle);
3223 static bool hci_req_is_complete(struct hci_dev *hdev)
3225 struct sk_buff *skb;
3227 skb = skb_peek(&hdev->cmd_q);
3231 return bt_cb(skb)->req.start;
3234 static void hci_resend_last(struct hci_dev *hdev)
3236 struct hci_command_hdr *sent;
3237 struct sk_buff *skb;
3240 if (!hdev->sent_cmd)
3243 sent = (void *) hdev->sent_cmd->data;
3244 opcode = __le16_to_cpu(sent->opcode);
3245 if (opcode == HCI_OP_RESET)
3248 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3252 skb_queue_head(&hdev->cmd_q, skb);
3253 queue_work(hdev->workqueue, &hdev->cmd_work);
3256 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3258 hci_req_complete_t req_complete = NULL;
3259 struct sk_buff *skb;
3260 unsigned long flags;
3262 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3264 /* If the completed command doesn't match the last one that was
3265 * sent we need to do special handling of it.
3267 if (!hci_sent_cmd_data(hdev, opcode)) {
3268 /* Some CSR based controllers generate a spontaneous
3269 * reset complete event during init and any pending
3270 * command will never be completed. In such a case we
3271 * need to resend whatever was the last sent
3274 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3275 hci_resend_last(hdev);
3280 /* If the command succeeded and there's still more commands in
3281 * this request the request is not yet complete.
3283 if (!status && !hci_req_is_complete(hdev))
3286 /* If this was the last command in a request the complete
3287 * callback would be found in hdev->sent_cmd instead of the
3288 * command queue (hdev->cmd_q).
3290 if (hdev->sent_cmd) {
3291 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3296 /* Remove all pending commands belonging to this request */
3297 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3298 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3299 if (bt_cb(skb)->req.start) {
3300 __skb_queue_head(&hdev->cmd_q, skb);
3304 req_complete = bt_cb(skb)->req.complete;
3307 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3311 req_complete(hdev, status);
3314 void hci_req_cmd_status(struct hci_dev *hdev, u16 opcode, u8 status)
3316 hci_req_complete_t req_complete = NULL;
3318 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3321 hci_req_cmd_complete(hdev, opcode, status);
3325 /* No need to handle success status if there are more commands */
3326 if (!hci_req_is_complete(hdev))
3330 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3332 /* If the request doesn't have a complete callback or there
3333 * are other commands/requests in the hdev queue we consider
3334 * this request as completed.
3336 if (!req_complete || !skb_queue_empty(&hdev->cmd_q))
3337 hci_req_cmd_complete(hdev, opcode, status);
3340 static void hci_rx_work(struct work_struct *work)
3342 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
3343 struct sk_buff *skb;
3345 BT_DBG("%s", hdev->name);
3347 while ((skb = skb_dequeue(&hdev->rx_q))) {
3348 /* Send copy to monitor */
3349 hci_send_to_monitor(hdev, skb);
3351 if (atomic_read(&hdev->promisc)) {
3352 /* Send copy to the sockets */
3353 hci_send_to_sock(hdev, skb);
3356 if (test_bit(HCI_RAW, &hdev->flags)) {
3361 if (test_bit(HCI_INIT, &hdev->flags)) {
3362 /* Don't process data packets in this states. */
3363 switch (bt_cb(skb)->pkt_type) {
3364 case HCI_ACLDATA_PKT:
3365 case HCI_SCODATA_PKT:
3372 switch (bt_cb(skb)->pkt_type) {
3374 BT_DBG("%s Event packet", hdev->name);
3375 hci_event_packet(hdev, skb);
3378 case HCI_ACLDATA_PKT:
3379 BT_DBG("%s ACL data packet", hdev->name);
3380 hci_acldata_packet(hdev, skb);
3383 case HCI_SCODATA_PKT:
3384 BT_DBG("%s SCO data packet", hdev->name);
3385 hci_scodata_packet(hdev, skb);
3395 static void hci_cmd_work(struct work_struct *work)
3397 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
3398 struct sk_buff *skb;
3400 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3401 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
3403 /* Send queued commands */
3404 if (atomic_read(&hdev->cmd_cnt)) {
3405 skb = skb_dequeue(&hdev->cmd_q);
3409 kfree_skb(hdev->sent_cmd);
3411 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
3412 if (hdev->sent_cmd) {
3413 atomic_dec(&hdev->cmd_cnt);
3414 hci_send_frame(skb);
3415 if (test_bit(HCI_RESET, &hdev->flags))
3416 del_timer(&hdev->cmd_timer);
3418 mod_timer(&hdev->cmd_timer,
3419 jiffies + HCI_CMD_TIMEOUT);
3421 skb_queue_head(&hdev->cmd_q, skb);
3422 queue_work(hdev->workqueue, &hdev->cmd_work);
3427 int hci_do_inquiry(struct hci_dev *hdev, u8 length)
3429 /* General inquiry access code (GIAC) */
3430 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3431 struct hci_cp_inquiry cp;
3433 BT_DBG("%s", hdev->name);
3435 if (test_bit(HCI_INQUIRY, &hdev->flags))
3436 return -EINPROGRESS;
3438 inquiry_cache_flush(hdev);
3440 memset(&cp, 0, sizeof(cp));
3441 memcpy(&cp.lap, lap, sizeof(cp.lap));
3444 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
3447 int hci_cancel_inquiry(struct hci_dev *hdev)
3449 BT_DBG("%s", hdev->name);
3451 if (!test_bit(HCI_INQUIRY, &hdev->flags))
3454 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3457 u8 bdaddr_to_le(u8 bdaddr_type)
3459 switch (bdaddr_type) {
3460 case BDADDR_LE_PUBLIC:
3461 return ADDR_LE_DEV_PUBLIC;
3464 /* Fallback to LE Random address type */
3465 return ADDR_LE_DEV_RANDOM;