2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/mgmt.h>
36 #define MGMT_VERSION 1
37 #define MGMT_REVISION 5
39 static const u16 mgmt_commands[] = {
40 MGMT_OP_READ_INDEX_LIST,
43 MGMT_OP_SET_DISCOVERABLE,
44 MGMT_OP_SET_CONNECTABLE,
45 MGMT_OP_SET_FAST_CONNECTABLE,
47 MGMT_OP_SET_LINK_SECURITY,
51 MGMT_OP_SET_DEV_CLASS,
52 MGMT_OP_SET_LOCAL_NAME,
55 MGMT_OP_LOAD_LINK_KEYS,
56 MGMT_OP_LOAD_LONG_TERM_KEYS,
58 MGMT_OP_GET_CONNECTIONS,
59 MGMT_OP_PIN_CODE_REPLY,
60 MGMT_OP_PIN_CODE_NEG_REPLY,
61 MGMT_OP_SET_IO_CAPABILITY,
63 MGMT_OP_CANCEL_PAIR_DEVICE,
64 MGMT_OP_UNPAIR_DEVICE,
65 MGMT_OP_USER_CONFIRM_REPLY,
66 MGMT_OP_USER_CONFIRM_NEG_REPLY,
67 MGMT_OP_USER_PASSKEY_REPLY,
68 MGMT_OP_USER_PASSKEY_NEG_REPLY,
69 MGMT_OP_READ_LOCAL_OOB_DATA,
70 MGMT_OP_ADD_REMOTE_OOB_DATA,
71 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
72 MGMT_OP_START_DISCOVERY,
73 MGMT_OP_STOP_DISCOVERY,
76 MGMT_OP_UNBLOCK_DEVICE,
77 MGMT_OP_SET_DEVICE_ID,
78 MGMT_OP_SET_ADVERTISING,
80 MGMT_OP_SET_STATIC_ADDRESS,
81 MGMT_OP_SET_SCAN_PARAMS,
82 MGMT_OP_SET_SECURE_CONN,
83 MGMT_OP_SET_DEBUG_KEYS,
88 static const u16 mgmt_events[] = {
89 MGMT_EV_CONTROLLER_ERROR,
91 MGMT_EV_INDEX_REMOVED,
93 MGMT_EV_CLASS_OF_DEV_CHANGED,
94 MGMT_EV_LOCAL_NAME_CHANGED,
96 MGMT_EV_NEW_LONG_TERM_KEY,
97 MGMT_EV_DEVICE_CONNECTED,
98 MGMT_EV_DEVICE_DISCONNECTED,
99 MGMT_EV_CONNECT_FAILED,
100 MGMT_EV_PIN_CODE_REQUEST,
101 MGMT_EV_USER_CONFIRM_REQUEST,
102 MGMT_EV_USER_PASSKEY_REQUEST,
104 MGMT_EV_DEVICE_FOUND,
106 MGMT_EV_DEVICE_BLOCKED,
107 MGMT_EV_DEVICE_UNBLOCKED,
108 MGMT_EV_DEVICE_UNPAIRED,
109 MGMT_EV_PASSKEY_NOTIFY,
113 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
115 #define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
116 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
119 struct list_head list;
127 /* HCI to MGMT error code conversion table */
128 static u8 mgmt_status_table[] = {
130 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
131 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
132 MGMT_STATUS_FAILED, /* Hardware Failure */
133 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
134 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
135 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
136 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
137 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
138 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
139 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
140 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
141 MGMT_STATUS_BUSY, /* Command Disallowed */
142 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
143 MGMT_STATUS_REJECTED, /* Rejected Security */
144 MGMT_STATUS_REJECTED, /* Rejected Personal */
145 MGMT_STATUS_TIMEOUT, /* Host Timeout */
146 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
147 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
148 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
149 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
150 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
151 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
152 MGMT_STATUS_BUSY, /* Repeated Attempts */
153 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
154 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
155 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
156 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
157 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
158 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
159 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
160 MGMT_STATUS_FAILED, /* Unspecified Error */
161 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
162 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
163 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
164 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
165 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
166 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
167 MGMT_STATUS_FAILED, /* Unit Link Key Used */
168 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
169 MGMT_STATUS_TIMEOUT, /* Instant Passed */
170 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
171 MGMT_STATUS_FAILED, /* Transaction Collision */
172 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
173 MGMT_STATUS_REJECTED, /* QoS Rejected */
174 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
175 MGMT_STATUS_REJECTED, /* Insufficient Security */
176 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
177 MGMT_STATUS_BUSY, /* Role Switch Pending */
178 MGMT_STATUS_FAILED, /* Slot Violation */
179 MGMT_STATUS_FAILED, /* Role Switch Failed */
180 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
181 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
182 MGMT_STATUS_BUSY, /* Host Busy Pairing */
183 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
184 MGMT_STATUS_BUSY, /* Controller Busy */
185 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
186 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
187 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
188 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
189 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
192 static u8 mgmt_status(u8 hci_status)
194 if (hci_status < ARRAY_SIZE(mgmt_status_table))
195 return mgmt_status_table[hci_status];
197 return MGMT_STATUS_FAILED;
200 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
203 struct mgmt_hdr *hdr;
204 struct mgmt_ev_cmd_status *ev;
207 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
209 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
213 hdr = (void *) skb_put(skb, sizeof(*hdr));
215 hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_STATUS);
216 hdr->index = cpu_to_le16(index);
217 hdr->len = cpu_to_le16(sizeof(*ev));
219 ev = (void *) skb_put(skb, sizeof(*ev));
221 ev->opcode = cpu_to_le16(cmd);
223 err = sock_queue_rcv_skb(sk, skb);
230 static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
231 void *rp, size_t rp_len)
234 struct mgmt_hdr *hdr;
235 struct mgmt_ev_cmd_complete *ev;
238 BT_DBG("sock %p", sk);
240 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
244 hdr = (void *) skb_put(skb, sizeof(*hdr));
246 hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_COMPLETE);
247 hdr->index = cpu_to_le16(index);
248 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
250 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
251 ev->opcode = cpu_to_le16(cmd);
255 memcpy(ev->data, rp, rp_len);
257 err = sock_queue_rcv_skb(sk, skb);
264 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
267 struct mgmt_rp_read_version rp;
269 BT_DBG("sock %p", sk);
271 rp.version = MGMT_VERSION;
272 rp.revision = __constant_cpu_to_le16(MGMT_REVISION);
274 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
278 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
281 struct mgmt_rp_read_commands *rp;
282 const u16 num_commands = ARRAY_SIZE(mgmt_commands);
283 const u16 num_events = ARRAY_SIZE(mgmt_events);
288 BT_DBG("sock %p", sk);
290 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
292 rp = kmalloc(rp_size, GFP_KERNEL);
296 rp->num_commands = __constant_cpu_to_le16(num_commands);
297 rp->num_events = __constant_cpu_to_le16(num_events);
299 for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
300 put_unaligned_le16(mgmt_commands[i], opcode);
302 for (i = 0; i < num_events; i++, opcode++)
303 put_unaligned_le16(mgmt_events[i], opcode);
305 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
312 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
315 struct mgmt_rp_read_index_list *rp;
321 BT_DBG("sock %p", sk);
323 read_lock(&hci_dev_list_lock);
326 list_for_each_entry(d, &hci_dev_list, list) {
327 if (d->dev_type == HCI_BREDR)
331 rp_len = sizeof(*rp) + (2 * count);
332 rp = kmalloc(rp_len, GFP_ATOMIC);
334 read_unlock(&hci_dev_list_lock);
339 list_for_each_entry(d, &hci_dev_list, list) {
340 if (test_bit(HCI_SETUP, &d->dev_flags))
343 if (test_bit(HCI_USER_CHANNEL, &d->dev_flags))
346 if (d->dev_type == HCI_BREDR) {
347 rp->index[count++] = cpu_to_le16(d->id);
348 BT_DBG("Added hci%u", d->id);
352 rp->num_controllers = cpu_to_le16(count);
353 rp_len = sizeof(*rp) + (2 * count);
355 read_unlock(&hci_dev_list_lock);
357 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
365 static u32 get_supported_settings(struct hci_dev *hdev)
369 settings |= MGMT_SETTING_POWERED;
370 settings |= MGMT_SETTING_PAIRABLE;
371 settings |= MGMT_SETTING_DEBUG_KEYS;
373 if (lmp_bredr_capable(hdev)) {
374 settings |= MGMT_SETTING_CONNECTABLE;
375 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
376 settings |= MGMT_SETTING_FAST_CONNECTABLE;
377 settings |= MGMT_SETTING_DISCOVERABLE;
378 settings |= MGMT_SETTING_BREDR;
379 settings |= MGMT_SETTING_LINK_SECURITY;
381 if (lmp_ssp_capable(hdev)) {
382 settings |= MGMT_SETTING_SSP;
383 settings |= MGMT_SETTING_HS;
386 if (lmp_sc_capable(hdev) ||
387 test_bit(HCI_FORCE_SC, &hdev->dev_flags))
388 settings |= MGMT_SETTING_SECURE_CONN;
391 if (lmp_le_capable(hdev)) {
392 settings |= MGMT_SETTING_LE;
393 settings |= MGMT_SETTING_ADVERTISING;
394 settings |= MGMT_SETTING_PRIVACY;
400 static u32 get_current_settings(struct hci_dev *hdev)
404 if (hdev_is_powered(hdev))
405 settings |= MGMT_SETTING_POWERED;
407 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
408 settings |= MGMT_SETTING_CONNECTABLE;
410 if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
411 settings |= MGMT_SETTING_FAST_CONNECTABLE;
413 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
414 settings |= MGMT_SETTING_DISCOVERABLE;
416 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags))
417 settings |= MGMT_SETTING_PAIRABLE;
419 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
420 settings |= MGMT_SETTING_BREDR;
422 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
423 settings |= MGMT_SETTING_LE;
425 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
426 settings |= MGMT_SETTING_LINK_SECURITY;
428 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
429 settings |= MGMT_SETTING_SSP;
431 if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
432 settings |= MGMT_SETTING_HS;
434 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
435 settings |= MGMT_SETTING_ADVERTISING;
437 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
438 settings |= MGMT_SETTING_SECURE_CONN;
440 if (test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags))
441 settings |= MGMT_SETTING_DEBUG_KEYS;
443 if (test_bit(HCI_PRIVACY, &hdev->dev_flags))
444 settings |= MGMT_SETTING_PRIVACY;
449 #define PNP_INFO_SVCLASS_ID 0x1200
451 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
453 u8 *ptr = data, *uuids_start = NULL;
454 struct bt_uuid *uuid;
459 list_for_each_entry(uuid, &hdev->uuids, list) {
462 if (uuid->size != 16)
465 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
469 if (uuid16 == PNP_INFO_SVCLASS_ID)
475 uuids_start[1] = EIR_UUID16_ALL;
479 /* Stop if not enough space to put next UUID */
480 if ((ptr - data) + sizeof(u16) > len) {
481 uuids_start[1] = EIR_UUID16_SOME;
485 *ptr++ = (uuid16 & 0x00ff);
486 *ptr++ = (uuid16 & 0xff00) >> 8;
487 uuids_start[0] += sizeof(uuid16);
493 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
495 u8 *ptr = data, *uuids_start = NULL;
496 struct bt_uuid *uuid;
501 list_for_each_entry(uuid, &hdev->uuids, list) {
502 if (uuid->size != 32)
508 uuids_start[1] = EIR_UUID32_ALL;
512 /* Stop if not enough space to put next UUID */
513 if ((ptr - data) + sizeof(u32) > len) {
514 uuids_start[1] = EIR_UUID32_SOME;
518 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
520 uuids_start[0] += sizeof(u32);
526 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
528 u8 *ptr = data, *uuids_start = NULL;
529 struct bt_uuid *uuid;
534 list_for_each_entry(uuid, &hdev->uuids, list) {
535 if (uuid->size != 128)
541 uuids_start[1] = EIR_UUID128_ALL;
545 /* Stop if not enough space to put next UUID */
546 if ((ptr - data) + 16 > len) {
547 uuids_start[1] = EIR_UUID128_SOME;
551 memcpy(ptr, uuid->uuid, 16);
553 uuids_start[0] += 16;
559 static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
561 struct pending_cmd *cmd;
563 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
564 if (cmd->opcode == opcode)
571 static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
576 name_len = strlen(hdev->dev_name);
578 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
580 if (name_len > max_len) {
582 ptr[1] = EIR_NAME_SHORT;
584 ptr[1] = EIR_NAME_COMPLETE;
586 ptr[0] = name_len + 1;
588 memcpy(ptr + 2, hdev->dev_name, name_len);
590 ad_len += (name_len + 2);
591 ptr += (name_len + 2);
597 static void update_scan_rsp_data(struct hci_request *req)
599 struct hci_dev *hdev = req->hdev;
600 struct hci_cp_le_set_scan_rsp_data cp;
603 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
606 memset(&cp, 0, sizeof(cp));
608 len = create_scan_rsp_data(hdev, cp.data);
610 if (hdev->scan_rsp_data_len == len &&
611 memcmp(cp.data, hdev->scan_rsp_data, len) == 0)
614 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
615 hdev->scan_rsp_data_len = len;
619 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
622 static u8 get_adv_discov_flags(struct hci_dev *hdev)
624 struct pending_cmd *cmd;
626 /* If there's a pending mgmt command the flags will not yet have
627 * their final values, so check for this first.
629 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
631 struct mgmt_mode *cp = cmd->param;
633 return LE_AD_GENERAL;
634 else if (cp->val == 0x02)
635 return LE_AD_LIMITED;
637 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
638 return LE_AD_LIMITED;
639 else if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
640 return LE_AD_GENERAL;
646 static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr)
648 u8 ad_len = 0, flags = 0;
650 flags |= get_adv_discov_flags(hdev);
652 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
653 flags |= LE_AD_NO_BREDR;
656 BT_DBG("adv flags 0x%02x", flags);
666 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
668 ptr[1] = EIR_TX_POWER;
669 ptr[2] = (u8) hdev->adv_tx_power;
678 static void update_adv_data(struct hci_request *req)
680 struct hci_dev *hdev = req->hdev;
681 struct hci_cp_le_set_adv_data cp;
684 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
687 memset(&cp, 0, sizeof(cp));
689 len = create_adv_data(hdev, cp.data);
691 if (hdev->adv_data_len == len &&
692 memcmp(cp.data, hdev->adv_data, len) == 0)
695 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
696 hdev->adv_data_len = len;
700 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
703 static void create_eir(struct hci_dev *hdev, u8 *data)
708 name_len = strlen(hdev->dev_name);
714 ptr[1] = EIR_NAME_SHORT;
716 ptr[1] = EIR_NAME_COMPLETE;
718 /* EIR Data length */
719 ptr[0] = name_len + 1;
721 memcpy(ptr + 2, hdev->dev_name, name_len);
723 ptr += (name_len + 2);
726 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
728 ptr[1] = EIR_TX_POWER;
729 ptr[2] = (u8) hdev->inq_tx_power;
734 if (hdev->devid_source > 0) {
736 ptr[1] = EIR_DEVICE_ID;
738 put_unaligned_le16(hdev->devid_source, ptr + 2);
739 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
740 put_unaligned_le16(hdev->devid_product, ptr + 6);
741 put_unaligned_le16(hdev->devid_version, ptr + 8);
746 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
747 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
748 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
751 static void update_eir(struct hci_request *req)
753 struct hci_dev *hdev = req->hdev;
754 struct hci_cp_write_eir cp;
756 if (!hdev_is_powered(hdev))
759 if (!lmp_ext_inq_capable(hdev))
762 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
765 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
768 memset(&cp, 0, sizeof(cp));
770 create_eir(hdev, cp.data);
772 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
775 memcpy(hdev->eir, cp.data, sizeof(cp.data));
777 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
780 static u8 get_service_classes(struct hci_dev *hdev)
782 struct bt_uuid *uuid;
785 list_for_each_entry(uuid, &hdev->uuids, list)
786 val |= uuid->svc_hint;
791 static void update_class(struct hci_request *req)
793 struct hci_dev *hdev = req->hdev;
796 BT_DBG("%s", hdev->name);
798 if (!hdev_is_powered(hdev))
801 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
804 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
807 cod[0] = hdev->minor_class;
808 cod[1] = hdev->major_class;
809 cod[2] = get_service_classes(hdev);
811 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
814 if (memcmp(cod, hdev->dev_class, 3) == 0)
817 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
820 static bool get_connectable(struct hci_dev *hdev)
822 struct pending_cmd *cmd;
824 /* If there's a pending mgmt command the flag will not yet have
825 * it's final value, so check for this first.
827 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
829 struct mgmt_mode *cp = cmd->param;
833 return test_bit(HCI_CONNECTABLE, &hdev->dev_flags);
836 static void enable_advertising(struct hci_request *req)
838 struct hci_dev *hdev = req->hdev;
839 struct hci_cp_le_set_adv_param cp;
840 u8 own_addr_type, enable = 0x01;
843 connectable = get_connectable(hdev);
845 /* Set require_privacy to true only when non-connectable
846 * advertising is used. In that case it is fine to use a
847 * non-resolvable private address.
849 if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
852 memset(&cp, 0, sizeof(cp));
853 cp.min_interval = __constant_cpu_to_le16(0x0800);
854 cp.max_interval = __constant_cpu_to_le16(0x0800);
855 cp.type = connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
856 cp.own_address_type = own_addr_type;
857 cp.channel_map = hdev->le_adv_channel_map;
859 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
861 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
864 static void disable_advertising(struct hci_request *req)
868 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
871 static void service_cache_off(struct work_struct *work)
873 struct hci_dev *hdev = container_of(work, struct hci_dev,
875 struct hci_request req;
877 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
880 hci_req_init(&req, hdev);
887 hci_dev_unlock(hdev);
889 hci_req_run(&req, NULL);
892 static void rpa_expired(struct work_struct *work)
894 struct hci_dev *hdev = container_of(work, struct hci_dev,
896 struct hci_request req;
900 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
902 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags) ||
903 hci_conn_num(hdev, LE_LINK) > 0)
906 /* The generation of a new RPA and programming it into the
907 * controller happens in the enable_advertising() function.
910 hci_req_init(&req, hdev);
912 disable_advertising(&req);
913 enable_advertising(&req);
915 hci_req_run(&req, NULL);
918 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
920 if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
923 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
924 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
926 /* Non-mgmt controlled devices get this bit set
927 * implicitly so that pairing works for them, however
928 * for mgmt we require user-space to explicitly enable
931 clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
934 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
935 void *data, u16 data_len)
937 struct mgmt_rp_read_info rp;
939 BT_DBG("sock %p %s", sk, hdev->name);
943 memset(&rp, 0, sizeof(rp));
945 bacpy(&rp.bdaddr, &hdev->bdaddr);
947 rp.version = hdev->hci_ver;
948 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
950 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
951 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
953 memcpy(rp.dev_class, hdev->dev_class, 3);
955 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
956 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
958 hci_dev_unlock(hdev);
960 return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
964 static void mgmt_pending_free(struct pending_cmd *cmd)
971 static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
972 struct hci_dev *hdev, void *data,
975 struct pending_cmd *cmd;
977 cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
981 cmd->opcode = opcode;
982 cmd->index = hdev->id;
984 cmd->param = kmalloc(len, GFP_KERNEL);
991 memcpy(cmd->param, data, len);
996 list_add(&cmd->list, &hdev->mgmt_pending);
1001 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
1002 void (*cb)(struct pending_cmd *cmd,
1006 struct pending_cmd *cmd, *tmp;
1008 list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
1009 if (opcode > 0 && cmd->opcode != opcode)
1016 static void mgmt_pending_remove(struct pending_cmd *cmd)
1018 list_del(&cmd->list);
1019 mgmt_pending_free(cmd);
1022 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1024 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1026 return cmd_complete(sk, hdev->id, opcode, 0, &settings,
1030 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status)
1032 BT_DBG("%s status 0x%02x", hdev->name, status);
1034 if (hci_conn_count(hdev) == 0) {
1035 cancel_delayed_work(&hdev->power_off);
1036 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1040 static int clean_up_hci_state(struct hci_dev *hdev)
1042 struct hci_request req;
1043 struct hci_conn *conn;
1045 hci_req_init(&req, hdev);
1047 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1048 test_bit(HCI_PSCAN, &hdev->flags)) {
1050 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1053 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1054 disable_advertising(&req);
1056 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
1057 hci_req_add_le_scan_disable(&req);
1060 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1061 struct hci_cp_disconnect dc;
1062 struct hci_cp_reject_conn_req rej;
1064 switch (conn->state) {
1067 dc.handle = cpu_to_le16(conn->handle);
1068 dc.reason = 0x15; /* Terminated due to Power Off */
1069 hci_req_add(&req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1072 if (conn->type == LE_LINK)
1073 hci_req_add(&req, HCI_OP_LE_CREATE_CONN_CANCEL,
1075 else if (conn->type == ACL_LINK)
1076 hci_req_add(&req, HCI_OP_CREATE_CONN_CANCEL,
1080 bacpy(&rej.bdaddr, &conn->dst);
1081 rej.reason = 0x15; /* Terminated due to Power Off */
1082 if (conn->type == ACL_LINK)
1083 hci_req_add(&req, HCI_OP_REJECT_CONN_REQ,
1085 else if (conn->type == SCO_LINK)
1086 hci_req_add(&req, HCI_OP_REJECT_SYNC_CONN_REQ,
1092 return hci_req_run(&req, clean_up_hci_complete);
1095 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1098 struct mgmt_mode *cp = data;
1099 struct pending_cmd *cmd;
1102 BT_DBG("request for %s", hdev->name);
1104 if (cp->val != 0x00 && cp->val != 0x01)
1105 return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1106 MGMT_STATUS_INVALID_PARAMS);
1110 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
1111 err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1116 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1117 cancel_delayed_work(&hdev->power_off);
1120 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
1122 err = mgmt_powered(hdev, 1);
1127 if (!!cp->val == hdev_is_powered(hdev)) {
1128 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1132 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1139 queue_work(hdev->req_workqueue, &hdev->power_on);
1142 /* Disconnect connections, stop scans, etc */
1143 err = clean_up_hci_state(hdev);
1145 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1146 HCI_POWER_OFF_TIMEOUT);
1148 /* ENODATA means there were no HCI commands queued */
1149 if (err == -ENODATA) {
1150 cancel_delayed_work(&hdev->power_off);
1151 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1157 hci_dev_unlock(hdev);
1161 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
1162 struct sock *skip_sk)
1164 struct sk_buff *skb;
1165 struct mgmt_hdr *hdr;
1167 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
1171 hdr = (void *) skb_put(skb, sizeof(*hdr));
1172 hdr->opcode = cpu_to_le16(event);
1174 hdr->index = cpu_to_le16(hdev->id);
1176 hdr->index = __constant_cpu_to_le16(MGMT_INDEX_NONE);
1177 hdr->len = cpu_to_le16(data_len);
1180 memcpy(skb_put(skb, data_len), data, data_len);
1183 __net_timestamp(skb);
1185 hci_send_to_control(skb, skip_sk);
1191 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1195 ev = cpu_to_le32(get_current_settings(hdev));
1197 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
1202 struct hci_dev *hdev;
1206 static void settings_rsp(struct pending_cmd *cmd, void *data)
1208 struct cmd_lookup *match = data;
1210 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1212 list_del(&cmd->list);
1214 if (match->sk == NULL) {
1215 match->sk = cmd->sk;
1216 sock_hold(match->sk);
1219 mgmt_pending_free(cmd);
1222 static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
1226 cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1227 mgmt_pending_remove(cmd);
1230 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1232 if (!lmp_bredr_capable(hdev))
1233 return MGMT_STATUS_NOT_SUPPORTED;
1234 else if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1235 return MGMT_STATUS_REJECTED;
1237 return MGMT_STATUS_SUCCESS;
1240 static u8 mgmt_le_support(struct hci_dev *hdev)
1242 if (!lmp_le_capable(hdev))
1243 return MGMT_STATUS_NOT_SUPPORTED;
1244 else if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
1245 return MGMT_STATUS_REJECTED;
1247 return MGMT_STATUS_SUCCESS;
1250 static void set_discoverable_complete(struct hci_dev *hdev, u8 status)
1252 struct pending_cmd *cmd;
1253 struct mgmt_mode *cp;
1254 struct hci_request req;
1257 BT_DBG("status 0x%02x", status);
1261 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1266 u8 mgmt_err = mgmt_status(status);
1267 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1268 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1274 changed = !test_and_set_bit(HCI_DISCOVERABLE,
1277 if (hdev->discov_timeout > 0) {
1278 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1279 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1283 changed = test_and_clear_bit(HCI_DISCOVERABLE,
1287 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1290 new_settings(hdev, cmd->sk);
1292 /* When the discoverable mode gets changed, make sure
1293 * that class of device has the limited discoverable
1294 * bit correctly set.
1296 hci_req_init(&req, hdev);
1298 hci_req_run(&req, NULL);
1301 mgmt_pending_remove(cmd);
1304 hci_dev_unlock(hdev);
1307 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1310 struct mgmt_cp_set_discoverable *cp = data;
1311 struct pending_cmd *cmd;
1312 struct hci_request req;
1317 BT_DBG("request for %s", hdev->name);
1319 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1320 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1321 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1322 MGMT_STATUS_REJECTED);
1324 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1325 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1326 MGMT_STATUS_INVALID_PARAMS);
1328 timeout = __le16_to_cpu(cp->timeout);
1330 /* Disabling discoverable requires that no timeout is set,
1331 * and enabling limited discoverable requires a timeout.
1333 if ((cp->val == 0x00 && timeout > 0) ||
1334 (cp->val == 0x02 && timeout == 0))
1335 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1336 MGMT_STATUS_INVALID_PARAMS);
1340 if (!hdev_is_powered(hdev) && timeout > 0) {
1341 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1342 MGMT_STATUS_NOT_POWERED);
1346 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1347 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1348 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1353 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
1354 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1355 MGMT_STATUS_REJECTED);
1359 if (!hdev_is_powered(hdev)) {
1360 bool changed = false;
1362 /* Setting limited discoverable when powered off is
1363 * not a valid operation since it requires a timeout
1364 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1366 if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
1367 change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1371 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1376 err = new_settings(hdev, sk);
1381 /* If the current mode is the same, then just update the timeout
1382 * value with the new value. And if only the timeout gets updated,
1383 * then no need for any HCI transactions.
1385 if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags) &&
1386 (cp->val == 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE,
1387 &hdev->dev_flags)) {
1388 cancel_delayed_work(&hdev->discov_off);
1389 hdev->discov_timeout = timeout;
1391 if (cp->val && hdev->discov_timeout > 0) {
1392 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1393 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1397 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1401 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1407 /* Cancel any potential discoverable timeout that might be
1408 * still active and store new timeout value. The arming of
1409 * the timeout happens in the complete handler.
1411 cancel_delayed_work(&hdev->discov_off);
1412 hdev->discov_timeout = timeout;
1414 /* Limited discoverable mode */
1415 if (cp->val == 0x02)
1416 set_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1418 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1420 hci_req_init(&req, hdev);
1422 /* The procedure for LE-only controllers is much simpler - just
1423 * update the advertising data.
1425 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1431 struct hci_cp_write_current_iac_lap hci_cp;
1433 if (cp->val == 0x02) {
1434 /* Limited discoverable mode */
1435 hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
1436 hci_cp.iac_lap[0] = 0x00; /* LIAC */
1437 hci_cp.iac_lap[1] = 0x8b;
1438 hci_cp.iac_lap[2] = 0x9e;
1439 hci_cp.iac_lap[3] = 0x33; /* GIAC */
1440 hci_cp.iac_lap[4] = 0x8b;
1441 hci_cp.iac_lap[5] = 0x9e;
1443 /* General discoverable mode */
1445 hci_cp.iac_lap[0] = 0x33; /* GIAC */
1446 hci_cp.iac_lap[1] = 0x8b;
1447 hci_cp.iac_lap[2] = 0x9e;
1450 hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1451 (hci_cp.num_iac * 3) + 1, &hci_cp);
1453 scan |= SCAN_INQUIRY;
1455 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1458 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1461 update_adv_data(&req);
1463 err = hci_req_run(&req, set_discoverable_complete);
1465 mgmt_pending_remove(cmd);
1468 hci_dev_unlock(hdev);
1472 static void write_fast_connectable(struct hci_request *req, bool enable)
1474 struct hci_dev *hdev = req->hdev;
1475 struct hci_cp_write_page_scan_activity acp;
1478 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1481 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1485 type = PAGE_SCAN_TYPE_INTERLACED;
1487 /* 160 msec page scan interval */
1488 acp.interval = __constant_cpu_to_le16(0x0100);
1490 type = PAGE_SCAN_TYPE_STANDARD; /* default */
1492 /* default 1.28 sec page scan */
1493 acp.interval = __constant_cpu_to_le16(0x0800);
1496 acp.window = __constant_cpu_to_le16(0x0012);
1498 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1499 __cpu_to_le16(hdev->page_scan_window) != acp.window)
1500 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1503 if (hdev->page_scan_type != type)
1504 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1507 static void set_connectable_complete(struct hci_dev *hdev, u8 status)
1509 struct pending_cmd *cmd;
1510 struct mgmt_mode *cp;
1513 BT_DBG("status 0x%02x", status);
1517 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1522 u8 mgmt_err = mgmt_status(status);
1523 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1529 changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1531 changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1533 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1536 new_settings(hdev, cmd->sk);
1539 mgmt_pending_remove(cmd);
1542 hci_dev_unlock(hdev);
1545 static int set_connectable_update_settings(struct hci_dev *hdev,
1546 struct sock *sk, u8 val)
1548 bool changed = false;
1551 if (!!val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1555 set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1557 clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1558 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1561 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1566 return new_settings(hdev, sk);
1571 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1574 struct mgmt_mode *cp = data;
1575 struct pending_cmd *cmd;
1576 struct hci_request req;
1580 BT_DBG("request for %s", hdev->name);
1582 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1583 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1584 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1585 MGMT_STATUS_REJECTED);
1587 if (cp->val != 0x00 && cp->val != 0x01)
1588 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1589 MGMT_STATUS_INVALID_PARAMS);
1593 if (!hdev_is_powered(hdev)) {
1594 err = set_connectable_update_settings(hdev, sk, cp->val);
1598 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1599 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1600 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1605 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1611 hci_req_init(&req, hdev);
1613 /* If BR/EDR is not enabled and we disable advertising as a
1614 * by-product of disabling connectable, we need to update the
1615 * advertising flags.
1617 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1619 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1620 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1622 update_adv_data(&req);
1623 } else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
1629 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1630 hdev->discov_timeout > 0)
1631 cancel_delayed_work(&hdev->discov_off);
1634 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1637 /* If we're going from non-connectable to connectable or
1638 * vice-versa when fast connectable is enabled ensure that fast
1639 * connectable gets disabled. write_fast_connectable won't do
1640 * anything if the page scan parameters are already what they
1643 if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
1644 write_fast_connectable(&req, false);
1646 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) &&
1647 hci_conn_num(hdev, LE_LINK) == 0) {
1648 disable_advertising(&req);
1649 enable_advertising(&req);
1652 err = hci_req_run(&req, set_connectable_complete);
1654 mgmt_pending_remove(cmd);
1655 if (err == -ENODATA)
1656 err = set_connectable_update_settings(hdev, sk,
1662 hci_dev_unlock(hdev);
1666 static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data,
1669 struct mgmt_mode *cp = data;
1673 BT_DBG("request for %s", hdev->name);
1675 if (cp->val != 0x00 && cp->val != 0x01)
1676 return cmd_status(sk, hdev->id, MGMT_OP_SET_PAIRABLE,
1677 MGMT_STATUS_INVALID_PARAMS);
1682 changed = !test_and_set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1684 changed = test_and_clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
1686 err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev);
1691 err = new_settings(hdev, sk);
1694 hci_dev_unlock(hdev);
1698 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1701 struct mgmt_mode *cp = data;
1702 struct pending_cmd *cmd;
1706 BT_DBG("request for %s", hdev->name);
1708 status = mgmt_bredr_support(hdev);
1710 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1713 if (cp->val != 0x00 && cp->val != 0x01)
1714 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1715 MGMT_STATUS_INVALID_PARAMS);
1719 if (!hdev_is_powered(hdev)) {
1720 bool changed = false;
1722 if (!!cp->val != test_bit(HCI_LINK_SECURITY,
1723 &hdev->dev_flags)) {
1724 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
1728 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1733 err = new_settings(hdev, sk);
1738 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1739 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1746 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1747 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1751 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1757 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1759 mgmt_pending_remove(cmd);
1764 hci_dev_unlock(hdev);
1768 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1770 struct mgmt_mode *cp = data;
1771 struct pending_cmd *cmd;
1775 BT_DBG("request for %s", hdev->name);
1777 status = mgmt_bredr_support(hdev);
1779 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1781 if (!lmp_ssp_capable(hdev))
1782 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1783 MGMT_STATUS_NOT_SUPPORTED);
1785 if (cp->val != 0x00 && cp->val != 0x01)
1786 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1787 MGMT_STATUS_INVALID_PARAMS);
1791 if (!hdev_is_powered(hdev)) {
1795 changed = !test_and_set_bit(HCI_SSP_ENABLED,
1798 changed = test_and_clear_bit(HCI_SSP_ENABLED,
1801 changed = test_and_clear_bit(HCI_HS_ENABLED,
1804 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1807 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1812 err = new_settings(hdev, sk);
1817 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev) ||
1818 mgmt_pending_find(MGMT_OP_SET_HS, hdev)) {
1819 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1824 if (!!cp->val == test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1825 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1829 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1835 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1837 mgmt_pending_remove(cmd);
1842 hci_dev_unlock(hdev);
1846 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1848 struct mgmt_mode *cp = data;
1853 BT_DBG("request for %s", hdev->name);
1855 status = mgmt_bredr_support(hdev);
1857 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1859 if (!lmp_ssp_capable(hdev))
1860 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1861 MGMT_STATUS_NOT_SUPPORTED);
1863 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
1864 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1865 MGMT_STATUS_REJECTED);
1867 if (cp->val != 0x00 && cp->val != 0x01)
1868 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1869 MGMT_STATUS_INVALID_PARAMS);
1874 changed = !test_and_set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1876 if (hdev_is_powered(hdev)) {
1877 err = cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1878 MGMT_STATUS_REJECTED);
1882 changed = test_and_clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1885 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1890 err = new_settings(hdev, sk);
1893 hci_dev_unlock(hdev);
1897 static void le_enable_complete(struct hci_dev *hdev, u8 status)
1899 struct cmd_lookup match = { NULL, hdev };
1902 u8 mgmt_err = mgmt_status(status);
1904 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
1909 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
1911 new_settings(hdev, match.sk);
1916 /* Make sure the controller has a good default for
1917 * advertising data. Restrict the update to when LE
1918 * has actually been enabled. During power on, the
1919 * update in powered_update_hci will take care of it.
1921 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1922 struct hci_request req;
1926 hci_req_init(&req, hdev);
1927 update_adv_data(&req);
1928 update_scan_rsp_data(&req);
1929 hci_req_run(&req, NULL);
1931 hci_dev_unlock(hdev);
1935 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1937 struct mgmt_mode *cp = data;
1938 struct hci_cp_write_le_host_supported hci_cp;
1939 struct pending_cmd *cmd;
1940 struct hci_request req;
1944 BT_DBG("request for %s", hdev->name);
1946 if (!lmp_le_capable(hdev))
1947 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1948 MGMT_STATUS_NOT_SUPPORTED);
1950 if (cp->val != 0x00 && cp->val != 0x01)
1951 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1952 MGMT_STATUS_INVALID_PARAMS);
1954 /* LE-only devices do not allow toggling LE on/off */
1955 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1956 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1957 MGMT_STATUS_REJECTED);
1962 enabled = lmp_host_le_capable(hdev);
1964 if (!hdev_is_powered(hdev) || val == enabled) {
1965 bool changed = false;
1967 if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1968 change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1972 if (!val && test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
1973 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
1977 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1982 err = new_settings(hdev, sk);
1987 if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) ||
1988 mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
1989 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1994 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2000 hci_req_init(&req, hdev);
2002 memset(&hci_cp, 0, sizeof(hci_cp));
2006 hci_cp.simul = lmp_le_br_capable(hdev);
2008 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
2009 disable_advertising(&req);
2012 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2015 err = hci_req_run(&req, le_enable_complete);
2017 mgmt_pending_remove(cmd);
2020 hci_dev_unlock(hdev);
2024 /* This is a helper function to test for pending mgmt commands that can
2025 * cause CoD or EIR HCI commands. We can only allow one such pending
2026 * mgmt command at a time since otherwise we cannot easily track what
2027 * the current values are, will be, and based on that calculate if a new
2028 * HCI command needs to be sent and if yes with what value.
2030 static bool pending_eir_or_class(struct hci_dev *hdev)
2032 struct pending_cmd *cmd;
2034 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2035 switch (cmd->opcode) {
2036 case MGMT_OP_ADD_UUID:
2037 case MGMT_OP_REMOVE_UUID:
2038 case MGMT_OP_SET_DEV_CLASS:
2039 case MGMT_OP_SET_POWERED:
2047 static const u8 bluetooth_base_uuid[] = {
2048 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2049 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2052 static u8 get_uuid_size(const u8 *uuid)
2056 if (memcmp(uuid, bluetooth_base_uuid, 12))
2059 val = get_unaligned_le32(&uuid[12]);
2066 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2068 struct pending_cmd *cmd;
2072 cmd = mgmt_pending_find(mgmt_op, hdev);
2076 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
2077 hdev->dev_class, 3);
2079 mgmt_pending_remove(cmd);
2082 hci_dev_unlock(hdev);
2085 static void add_uuid_complete(struct hci_dev *hdev, u8 status)
2087 BT_DBG("status 0x%02x", status);
2089 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2092 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2094 struct mgmt_cp_add_uuid *cp = data;
2095 struct pending_cmd *cmd;
2096 struct hci_request req;
2097 struct bt_uuid *uuid;
2100 BT_DBG("request for %s", hdev->name);
2104 if (pending_eir_or_class(hdev)) {
2105 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2110 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2116 memcpy(uuid->uuid, cp->uuid, 16);
2117 uuid->svc_hint = cp->svc_hint;
2118 uuid->size = get_uuid_size(cp->uuid);
2120 list_add_tail(&uuid->list, &hdev->uuids);
2122 hci_req_init(&req, hdev);
2127 err = hci_req_run(&req, add_uuid_complete);
2129 if (err != -ENODATA)
2132 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2133 hdev->dev_class, 3);
2137 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2146 hci_dev_unlock(hdev);
2150 static bool enable_service_cache(struct hci_dev *hdev)
2152 if (!hdev_is_powered(hdev))
2155 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2156 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2164 static void remove_uuid_complete(struct hci_dev *hdev, u8 status)
2166 BT_DBG("status 0x%02x", status);
2168 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2171 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2174 struct mgmt_cp_remove_uuid *cp = data;
2175 struct pending_cmd *cmd;
2176 struct bt_uuid *match, *tmp;
2177 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2178 struct hci_request req;
2181 BT_DBG("request for %s", hdev->name);
2185 if (pending_eir_or_class(hdev)) {
2186 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2191 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2192 hci_uuids_clear(hdev);
2194 if (enable_service_cache(hdev)) {
2195 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2196 0, hdev->dev_class, 3);
2205 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2206 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2209 list_del(&match->list);
2215 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2216 MGMT_STATUS_INVALID_PARAMS);
2221 hci_req_init(&req, hdev);
2226 err = hci_req_run(&req, remove_uuid_complete);
2228 if (err != -ENODATA)
2231 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2232 hdev->dev_class, 3);
2236 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2245 hci_dev_unlock(hdev);
2249 static void set_class_complete(struct hci_dev *hdev, u8 status)
2251 BT_DBG("status 0x%02x", status);
2253 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2256 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2259 struct mgmt_cp_set_dev_class *cp = data;
2260 struct pending_cmd *cmd;
2261 struct hci_request req;
2264 BT_DBG("request for %s", hdev->name);
2266 if (!lmp_bredr_capable(hdev))
2267 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2268 MGMT_STATUS_NOT_SUPPORTED);
2272 if (pending_eir_or_class(hdev)) {
2273 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2278 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2279 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2280 MGMT_STATUS_INVALID_PARAMS);
2284 hdev->major_class = cp->major;
2285 hdev->minor_class = cp->minor;
2287 if (!hdev_is_powered(hdev)) {
2288 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2289 hdev->dev_class, 3);
2293 hci_req_init(&req, hdev);
2295 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2296 hci_dev_unlock(hdev);
2297 cancel_delayed_work_sync(&hdev->service_cache);
2304 err = hci_req_run(&req, set_class_complete);
2306 if (err != -ENODATA)
2309 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2310 hdev->dev_class, 3);
2314 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2323 hci_dev_unlock(hdev);
2327 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2330 struct mgmt_cp_load_link_keys *cp = data;
2331 u16 key_count, expected_len;
2335 BT_DBG("request for %s", hdev->name);
2337 if (!lmp_bredr_capable(hdev))
2338 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2339 MGMT_STATUS_NOT_SUPPORTED);
2341 key_count = __le16_to_cpu(cp->key_count);
2343 expected_len = sizeof(*cp) + key_count *
2344 sizeof(struct mgmt_link_key_info);
2345 if (expected_len != len) {
2346 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2348 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2349 MGMT_STATUS_INVALID_PARAMS);
2352 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2353 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2354 MGMT_STATUS_INVALID_PARAMS);
2356 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2359 for (i = 0; i < key_count; i++) {
2360 struct mgmt_link_key_info *key = &cp->keys[i];
2362 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2363 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2364 MGMT_STATUS_INVALID_PARAMS);
2369 hci_link_keys_clear(hdev);
2372 changed = !test_and_set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
2374 changed = test_and_clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
2377 new_settings(hdev, NULL);
2379 for (i = 0; i < key_count; i++) {
2380 struct mgmt_link_key_info *key = &cp->keys[i];
2382 hci_add_link_key(hdev, NULL, 0, &key->addr.bdaddr, key->val,
2383 key->type, key->pin_len);
2386 cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2388 hci_dev_unlock(hdev);
2393 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2394 u8 addr_type, struct sock *skip_sk)
2396 struct mgmt_ev_device_unpaired ev;
2398 bacpy(&ev.addr.bdaddr, bdaddr);
2399 ev.addr.type = addr_type;
2401 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2405 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2408 struct mgmt_cp_unpair_device *cp = data;
2409 struct mgmt_rp_unpair_device rp;
2410 struct hci_cp_disconnect dc;
2411 struct pending_cmd *cmd;
2412 struct hci_conn *conn;
2415 memset(&rp, 0, sizeof(rp));
2416 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2417 rp.addr.type = cp->addr.type;
2419 if (!bdaddr_type_is_valid(cp->addr.type))
2420 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2421 MGMT_STATUS_INVALID_PARAMS,
2424 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2425 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2426 MGMT_STATUS_INVALID_PARAMS,
2431 if (!hdev_is_powered(hdev)) {
2432 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2433 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2437 if (cp->addr.type == BDADDR_BREDR) {
2438 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2442 if (cp->addr.type == BDADDR_LE_PUBLIC)
2443 addr_type = ADDR_LE_DEV_PUBLIC;
2445 addr_type = ADDR_LE_DEV_RANDOM;
2447 hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
2449 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2451 err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
2455 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2456 MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
2460 if (cp->disconnect) {
2461 if (cp->addr.type == BDADDR_BREDR)
2462 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2465 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
2472 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2474 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2478 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2485 dc.handle = cpu_to_le16(conn->handle);
2486 dc.reason = 0x13; /* Remote User Terminated Connection */
2487 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2489 mgmt_pending_remove(cmd);
2492 hci_dev_unlock(hdev);
2496 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2499 struct mgmt_cp_disconnect *cp = data;
2500 struct mgmt_rp_disconnect rp;
2501 struct hci_cp_disconnect dc;
2502 struct pending_cmd *cmd;
2503 struct hci_conn *conn;
2508 memset(&rp, 0, sizeof(rp));
2509 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2510 rp.addr.type = cp->addr.type;
2512 if (!bdaddr_type_is_valid(cp->addr.type))
2513 return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2514 MGMT_STATUS_INVALID_PARAMS,
2519 if (!test_bit(HCI_UP, &hdev->flags)) {
2520 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2521 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2525 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
2526 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2527 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2531 if (cp->addr.type == BDADDR_BREDR)
2532 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2535 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
2537 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2538 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2539 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
2543 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2549 dc.handle = cpu_to_le16(conn->handle);
2550 dc.reason = HCI_ERROR_REMOTE_USER_TERM;
2552 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2554 mgmt_pending_remove(cmd);
2557 hci_dev_unlock(hdev);
2561 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2563 switch (link_type) {
2565 switch (addr_type) {
2566 case ADDR_LE_DEV_PUBLIC:
2567 return BDADDR_LE_PUBLIC;
2570 /* Fallback to LE Random address type */
2571 return BDADDR_LE_RANDOM;
2575 /* Fallback to BR/EDR type */
2576 return BDADDR_BREDR;
2580 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2583 struct mgmt_rp_get_connections *rp;
2593 if (!hdev_is_powered(hdev)) {
2594 err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2595 MGMT_STATUS_NOT_POWERED);
2600 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2601 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2605 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2606 rp = kmalloc(rp_len, GFP_KERNEL);
2613 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2614 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2616 bacpy(&rp->addr[i].bdaddr, &c->dst);
2617 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2618 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2623 rp->conn_count = cpu_to_le16(i);
2625 /* Recalculate length in case of filtered SCO connections, etc */
2626 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2628 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2634 hci_dev_unlock(hdev);
2638 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2639 struct mgmt_cp_pin_code_neg_reply *cp)
2641 struct pending_cmd *cmd;
2644 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2649 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2650 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2652 mgmt_pending_remove(cmd);
2657 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2660 struct hci_conn *conn;
2661 struct mgmt_cp_pin_code_reply *cp = data;
2662 struct hci_cp_pin_code_reply reply;
2663 struct pending_cmd *cmd;
2670 if (!hdev_is_powered(hdev)) {
2671 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2672 MGMT_STATUS_NOT_POWERED);
2676 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2678 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2679 MGMT_STATUS_NOT_CONNECTED);
2683 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2684 struct mgmt_cp_pin_code_neg_reply ncp;
2686 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2688 BT_ERR("PIN code is not 16 bytes long");
2690 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2692 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2693 MGMT_STATUS_INVALID_PARAMS);
2698 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2704 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2705 reply.pin_len = cp->pin_len;
2706 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2708 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2710 mgmt_pending_remove(cmd);
2713 hci_dev_unlock(hdev);
2717 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2720 struct mgmt_cp_set_io_capability *cp = data;
2726 hdev->io_capability = cp->io_capability;
2728 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
2729 hdev->io_capability);
2731 hci_dev_unlock(hdev);
2733 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
2737 static struct pending_cmd *find_pairing(struct hci_conn *conn)
2739 struct hci_dev *hdev = conn->hdev;
2740 struct pending_cmd *cmd;
2742 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2743 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2746 if (cmd->user_data != conn)
2755 static void pairing_complete(struct pending_cmd *cmd, u8 status)
2757 struct mgmt_rp_pair_device rp;
2758 struct hci_conn *conn = cmd->user_data;
2760 bacpy(&rp.addr.bdaddr, &conn->dst);
2761 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2763 cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
2766 /* So we don't get further callbacks for this connection */
2767 conn->connect_cfm_cb = NULL;
2768 conn->security_cfm_cb = NULL;
2769 conn->disconn_cfm_cb = NULL;
2771 hci_conn_drop(conn);
2773 mgmt_pending_remove(cmd);
2776 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
2778 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
2779 struct pending_cmd *cmd;
2781 cmd = find_pairing(conn);
2783 pairing_complete(cmd, status);
2786 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2788 struct pending_cmd *cmd;
2790 BT_DBG("status %u", status);
2792 cmd = find_pairing(conn);
2794 BT_DBG("Unable to find a pending command");
2796 pairing_complete(cmd, mgmt_status(status));
2799 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
2801 struct pending_cmd *cmd;
2803 BT_DBG("status %u", status);
2808 cmd = find_pairing(conn);
2810 BT_DBG("Unable to find a pending command");
2812 pairing_complete(cmd, mgmt_status(status));
2815 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2818 struct mgmt_cp_pair_device *cp = data;
2819 struct mgmt_rp_pair_device rp;
2820 struct pending_cmd *cmd;
2821 u8 sec_level, auth_type;
2822 struct hci_conn *conn;
2827 memset(&rp, 0, sizeof(rp));
2828 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2829 rp.addr.type = cp->addr.type;
2831 if (!bdaddr_type_is_valid(cp->addr.type))
2832 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2833 MGMT_STATUS_INVALID_PARAMS,
2838 if (!hdev_is_powered(hdev)) {
2839 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2840 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2844 sec_level = BT_SECURITY_MEDIUM;
2845 if (cp->io_cap == 0x03)
2846 auth_type = HCI_AT_DEDICATED_BONDING;
2848 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
2850 if (cp->addr.type == BDADDR_BREDR) {
2851 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
2856 /* Convert from L2CAP channel address type to HCI address type
2858 if (cp->addr.type == BDADDR_LE_PUBLIC)
2859 addr_type = ADDR_LE_DEV_PUBLIC;
2861 addr_type = ADDR_LE_DEV_RANDOM;
2863 conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type,
2864 sec_level, auth_type);
2870 if (PTR_ERR(conn) == -EBUSY)
2871 status = MGMT_STATUS_BUSY;
2873 status = MGMT_STATUS_CONNECT_FAILED;
2875 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2881 if (conn->connect_cfm_cb) {
2882 hci_conn_drop(conn);
2883 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2884 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2888 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
2891 hci_conn_drop(conn);
2895 /* For LE, just connecting isn't a proof that the pairing finished */
2896 if (cp->addr.type == BDADDR_BREDR) {
2897 conn->connect_cfm_cb = pairing_complete_cb;
2898 conn->security_cfm_cb = pairing_complete_cb;
2899 conn->disconn_cfm_cb = pairing_complete_cb;
2901 conn->connect_cfm_cb = le_pairing_complete_cb;
2902 conn->security_cfm_cb = le_pairing_complete_cb;
2903 conn->disconn_cfm_cb = le_pairing_complete_cb;
2906 conn->io_capability = cp->io_cap;
2907 cmd->user_data = conn;
2909 if (conn->state == BT_CONNECTED &&
2910 hci_conn_security(conn, sec_level, auth_type))
2911 pairing_complete(cmd, 0);
2916 hci_dev_unlock(hdev);
2920 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2923 struct mgmt_addr_info *addr = data;
2924 struct pending_cmd *cmd;
2925 struct hci_conn *conn;
2932 if (!hdev_is_powered(hdev)) {
2933 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2934 MGMT_STATUS_NOT_POWERED);
2938 cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
2940 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2941 MGMT_STATUS_INVALID_PARAMS);
2945 conn = cmd->user_data;
2947 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
2948 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2949 MGMT_STATUS_INVALID_PARAMS);
2953 pairing_complete(cmd, MGMT_STATUS_CANCELLED);
2955 err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
2956 addr, sizeof(*addr));
2958 hci_dev_unlock(hdev);
2962 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
2963 struct mgmt_addr_info *addr, u16 mgmt_op,
2964 u16 hci_op, __le32 passkey)
2966 struct pending_cmd *cmd;
2967 struct hci_conn *conn;
2972 if (!hdev_is_powered(hdev)) {
2973 err = cmd_complete(sk, hdev->id, mgmt_op,
2974 MGMT_STATUS_NOT_POWERED, addr,
2979 if (addr->type == BDADDR_BREDR)
2980 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
2982 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
2985 err = cmd_complete(sk, hdev->id, mgmt_op,
2986 MGMT_STATUS_NOT_CONNECTED, addr,
2991 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
2992 /* Continue with pairing via SMP */
2993 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
2996 err = cmd_complete(sk, hdev->id, mgmt_op,
2997 MGMT_STATUS_SUCCESS, addr,
3000 err = cmd_complete(sk, hdev->id, mgmt_op,
3001 MGMT_STATUS_FAILED, addr,
3007 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3013 /* Continue with pairing via HCI */
3014 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3015 struct hci_cp_user_passkey_reply cp;
3017 bacpy(&cp.bdaddr, &addr->bdaddr);
3018 cp.passkey = passkey;
3019 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3021 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3025 mgmt_pending_remove(cmd);
3028 hci_dev_unlock(hdev);
3032 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3033 void *data, u16 len)
3035 struct mgmt_cp_pin_code_neg_reply *cp = data;
3039 return user_pairing_resp(sk, hdev, &cp->addr,
3040 MGMT_OP_PIN_CODE_NEG_REPLY,
3041 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3044 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3047 struct mgmt_cp_user_confirm_reply *cp = data;
3051 if (len != sizeof(*cp))
3052 return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3053 MGMT_STATUS_INVALID_PARAMS);
3055 return user_pairing_resp(sk, hdev, &cp->addr,
3056 MGMT_OP_USER_CONFIRM_REPLY,
3057 HCI_OP_USER_CONFIRM_REPLY, 0);
3060 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3061 void *data, u16 len)
3063 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3067 return user_pairing_resp(sk, hdev, &cp->addr,
3068 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3069 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3072 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3075 struct mgmt_cp_user_passkey_reply *cp = data;
3079 return user_pairing_resp(sk, hdev, &cp->addr,
3080 MGMT_OP_USER_PASSKEY_REPLY,
3081 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3084 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3085 void *data, u16 len)
3087 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3091 return user_pairing_resp(sk, hdev, &cp->addr,
3092 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3093 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3096 static void update_name(struct hci_request *req)
3098 struct hci_dev *hdev = req->hdev;
3099 struct hci_cp_write_local_name cp;
3101 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
3103 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
3106 static void set_name_complete(struct hci_dev *hdev, u8 status)
3108 struct mgmt_cp_set_local_name *cp;
3109 struct pending_cmd *cmd;
3111 BT_DBG("status 0x%02x", status);
3115 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3122 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3123 mgmt_status(status));
3125 cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3128 mgmt_pending_remove(cmd);
3131 hci_dev_unlock(hdev);
3134 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3137 struct mgmt_cp_set_local_name *cp = data;
3138 struct pending_cmd *cmd;
3139 struct hci_request req;
3146 /* If the old values are the same as the new ones just return a
3147 * direct command complete event.
3149 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3150 !memcmp(hdev->short_name, cp->short_name,
3151 sizeof(hdev->short_name))) {
3152 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3157 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3159 if (!hdev_is_powered(hdev)) {
3160 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3162 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3167 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
3173 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3179 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3181 hci_req_init(&req, hdev);
3183 if (lmp_bredr_capable(hdev)) {
3188 /* The name is stored in the scan response data and so
3189 * no need to udpate the advertising data here.
3191 if (lmp_le_capable(hdev))
3192 update_scan_rsp_data(&req);
3194 err = hci_req_run(&req, set_name_complete);
3196 mgmt_pending_remove(cmd);
3199 hci_dev_unlock(hdev);
3203 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3204 void *data, u16 data_len)
3206 struct pending_cmd *cmd;
3209 BT_DBG("%s", hdev->name);
3213 if (!hdev_is_powered(hdev)) {
3214 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3215 MGMT_STATUS_NOT_POWERED);
3219 if (!lmp_ssp_capable(hdev)) {
3220 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3221 MGMT_STATUS_NOT_SUPPORTED);
3225 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3226 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3231 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3237 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
3238 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_EXT_DATA,
3241 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3244 mgmt_pending_remove(cmd);
3247 hci_dev_unlock(hdev);
3251 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3252 void *data, u16 len)
3256 BT_DBG("%s ", hdev->name);
3260 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3261 struct mgmt_cp_add_remote_oob_data *cp = data;
3264 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3265 cp->hash, cp->randomizer);
3267 status = MGMT_STATUS_FAILED;
3269 status = MGMT_STATUS_SUCCESS;
3271 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3272 status, &cp->addr, sizeof(cp->addr));
3273 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3274 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3277 err = hci_add_remote_oob_ext_data(hdev, &cp->addr.bdaddr,
3283 status = MGMT_STATUS_FAILED;
3285 status = MGMT_STATUS_SUCCESS;
3287 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3288 status, &cp->addr, sizeof(cp->addr));
3290 BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
3291 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3292 MGMT_STATUS_INVALID_PARAMS);
3295 hci_dev_unlock(hdev);
3299 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3300 void *data, u16 len)
3302 struct mgmt_cp_remove_remote_oob_data *cp = data;
3306 BT_DBG("%s", hdev->name);
3310 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr);
3312 status = MGMT_STATUS_INVALID_PARAMS;
3314 status = MGMT_STATUS_SUCCESS;
3316 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3317 status, &cp->addr, sizeof(cp->addr));
3319 hci_dev_unlock(hdev);
3323 static int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
3325 struct pending_cmd *cmd;
3329 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3331 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
3335 type = hdev->discovery.type;
3337 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3338 &type, sizeof(type));
3339 mgmt_pending_remove(cmd);
3344 static void start_discovery_complete(struct hci_dev *hdev, u8 status)
3346 BT_DBG("status %d", status);
3350 mgmt_start_discovery_failed(hdev, status);
3351 hci_dev_unlock(hdev);
3356 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3357 hci_dev_unlock(hdev);
3359 switch (hdev->discovery.type) {
3360 case DISCOV_TYPE_LE:
3361 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
3365 case DISCOV_TYPE_INTERLEAVED:
3366 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
3367 DISCOV_INTERLEAVED_TIMEOUT);
3370 case DISCOV_TYPE_BREDR:
3374 BT_ERR("Invalid discovery type %d", hdev->discovery.type);
3378 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3379 void *data, u16 len)
3381 struct mgmt_cp_start_discovery *cp = data;
3382 struct pending_cmd *cmd;
3383 struct hci_cp_le_set_scan_param param_cp;
3384 struct hci_cp_le_set_scan_enable enable_cp;
3385 struct hci_cp_inquiry inq_cp;
3386 struct hci_request req;
3387 /* General inquiry access code (GIAC) */
3388 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3389 u8 status, own_addr_type;
3392 BT_DBG("%s", hdev->name);
3396 if (!hdev_is_powered(hdev)) {
3397 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3398 MGMT_STATUS_NOT_POWERED);
3402 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
3403 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3408 if (hdev->discovery.state != DISCOVERY_STOPPED) {
3409 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3414 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0);
3420 hdev->discovery.type = cp->type;
3422 hci_req_init(&req, hdev);
3424 switch (hdev->discovery.type) {
3425 case DISCOV_TYPE_BREDR:
3426 status = mgmt_bredr_support(hdev);
3428 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3430 mgmt_pending_remove(cmd);
3434 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3435 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3437 mgmt_pending_remove(cmd);
3441 hci_inquiry_cache_flush(hdev);
3443 memset(&inq_cp, 0, sizeof(inq_cp));
3444 memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
3445 inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
3446 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
3449 case DISCOV_TYPE_LE:
3450 case DISCOV_TYPE_INTERLEAVED:
3451 status = mgmt_le_support(hdev);
3453 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3455 mgmt_pending_remove(cmd);
3459 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
3460 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
3461 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3462 MGMT_STATUS_NOT_SUPPORTED);
3463 mgmt_pending_remove(cmd);
3467 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
3468 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3469 MGMT_STATUS_REJECTED);
3470 mgmt_pending_remove(cmd);
3474 /* If controller is scanning, it means the background scanning
3475 * is running. Thus, we should temporarily stop it in order to
3476 * set the discovery scanning parameters.
3478 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
3479 hci_req_add_le_scan_disable(&req);
3481 memset(¶m_cp, 0, sizeof(param_cp));
3483 /* All active scans will be done with either a resolvable
3484 * private address (when privacy feature has been enabled)
3485 * or unresolvable private address.
3487 err = hci_update_random_address(&req, true, &own_addr_type);
3489 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3490 MGMT_STATUS_FAILED);
3491 mgmt_pending_remove(cmd);
3495 param_cp.type = LE_SCAN_ACTIVE;
3496 param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
3497 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
3498 param_cp.own_address_type = own_addr_type;
3499 hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
3502 memset(&enable_cp, 0, sizeof(enable_cp));
3503 enable_cp.enable = LE_SCAN_ENABLE;
3504 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3505 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
3510 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3511 MGMT_STATUS_INVALID_PARAMS);
3512 mgmt_pending_remove(cmd);
3516 err = hci_req_run(&req, start_discovery_complete);
3518 mgmt_pending_remove(cmd);
3520 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
3523 hci_dev_unlock(hdev);
3527 static int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
3529 struct pending_cmd *cmd;
3532 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
3536 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3537 &hdev->discovery.type, sizeof(hdev->discovery.type));
3538 mgmt_pending_remove(cmd);
3543 static void stop_discovery_complete(struct hci_dev *hdev, u8 status)
3545 BT_DBG("status %d", status);
3550 mgmt_stop_discovery_failed(hdev, status);
3554 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3557 hci_dev_unlock(hdev);
3560 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
3563 struct mgmt_cp_stop_discovery *mgmt_cp = data;
3564 struct pending_cmd *cmd;
3565 struct hci_cp_remote_name_req_cancel cp;
3566 struct inquiry_entry *e;
3567 struct hci_request req;
3570 BT_DBG("%s", hdev->name);
3574 if (!hci_discovery_active(hdev)) {
3575 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3576 MGMT_STATUS_REJECTED, &mgmt_cp->type,
3577 sizeof(mgmt_cp->type));
3581 if (hdev->discovery.type != mgmt_cp->type) {
3582 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3583 MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
3584 sizeof(mgmt_cp->type));
3588 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
3594 hci_req_init(&req, hdev);
3596 switch (hdev->discovery.state) {
3597 case DISCOVERY_FINDING:
3598 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3599 hci_req_add(&req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3601 cancel_delayed_work(&hdev->le_scan_disable);
3603 hci_req_add_le_scan_disable(&req);
3608 case DISCOVERY_RESOLVING:
3609 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
3612 mgmt_pending_remove(cmd);
3613 err = cmd_complete(sk, hdev->id,
3614 MGMT_OP_STOP_DISCOVERY, 0,
3616 sizeof(mgmt_cp->type));
3617 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3621 bacpy(&cp.bdaddr, &e->data.bdaddr);
3622 hci_req_add(&req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
3628 BT_DBG("unknown discovery state %u", hdev->discovery.state);
3630 mgmt_pending_remove(cmd);
3631 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3632 MGMT_STATUS_FAILED, &mgmt_cp->type,
3633 sizeof(mgmt_cp->type));
3637 err = hci_req_run(&req, stop_discovery_complete);
3639 mgmt_pending_remove(cmd);
3641 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
3644 hci_dev_unlock(hdev);
3648 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
3651 struct mgmt_cp_confirm_name *cp = data;
3652 struct inquiry_entry *e;
3655 BT_DBG("%s", hdev->name);
3659 if (!hci_discovery_active(hdev)) {
3660 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3661 MGMT_STATUS_FAILED, &cp->addr,
3666 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
3668 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3669 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
3674 if (cp->name_known) {
3675 e->name_state = NAME_KNOWN;
3678 e->name_state = NAME_NEEDED;
3679 hci_inquiry_cache_update_resolve(hdev, e);
3682 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
3686 hci_dev_unlock(hdev);
3690 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
3693 struct mgmt_cp_block_device *cp = data;
3697 BT_DBG("%s", hdev->name);
3699 if (!bdaddr_type_is_valid(cp->addr.type))
3700 return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
3701 MGMT_STATUS_INVALID_PARAMS,
3702 &cp->addr, sizeof(cp->addr));
3706 err = hci_blacklist_add(hdev, &cp->addr.bdaddr, cp->addr.type);
3708 status = MGMT_STATUS_FAILED;
3710 status = MGMT_STATUS_SUCCESS;
3712 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
3713 &cp->addr, sizeof(cp->addr));
3715 hci_dev_unlock(hdev);
3720 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
3723 struct mgmt_cp_unblock_device *cp = data;
3727 BT_DBG("%s", hdev->name);
3729 if (!bdaddr_type_is_valid(cp->addr.type))
3730 return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
3731 MGMT_STATUS_INVALID_PARAMS,
3732 &cp->addr, sizeof(cp->addr));
3736 err = hci_blacklist_del(hdev, &cp->addr.bdaddr, cp->addr.type);
3738 status = MGMT_STATUS_INVALID_PARAMS;
3740 status = MGMT_STATUS_SUCCESS;
3742 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
3743 &cp->addr, sizeof(cp->addr));
3745 hci_dev_unlock(hdev);
3750 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
3753 struct mgmt_cp_set_device_id *cp = data;
3754 struct hci_request req;
3758 BT_DBG("%s", hdev->name);
3760 source = __le16_to_cpu(cp->source);
3762 if (source > 0x0002)
3763 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
3764 MGMT_STATUS_INVALID_PARAMS);
3768 hdev->devid_source = source;
3769 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
3770 hdev->devid_product = __le16_to_cpu(cp->product);
3771 hdev->devid_version = __le16_to_cpu(cp->version);
3773 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
3775 hci_req_init(&req, hdev);
3777 hci_req_run(&req, NULL);
3779 hci_dev_unlock(hdev);
3784 static void set_advertising_complete(struct hci_dev *hdev, u8 status)
3786 struct cmd_lookup match = { NULL, hdev };
3789 u8 mgmt_err = mgmt_status(status);
3791 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
3792 cmd_status_rsp, &mgmt_err);
3796 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
3799 new_settings(hdev, match.sk);
3805 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
3808 struct mgmt_mode *cp = data;
3809 struct pending_cmd *cmd;
3810 struct hci_request req;
3811 u8 val, enabled, status;
3814 BT_DBG("request for %s", hdev->name);
3816 status = mgmt_le_support(hdev);
3818 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3821 if (cp->val != 0x00 && cp->val != 0x01)
3822 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3823 MGMT_STATUS_INVALID_PARAMS);
3828 enabled = test_bit(HCI_ADVERTISING, &hdev->dev_flags);
3830 /* The following conditions are ones which mean that we should
3831 * not do any HCI communication but directly send a mgmt
3832 * response to user space (after toggling the flag if
3835 if (!hdev_is_powered(hdev) || val == enabled ||
3836 hci_conn_num(hdev, LE_LINK) > 0) {
3837 bool changed = false;
3839 if (val != test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
3840 change_bit(HCI_ADVERTISING, &hdev->dev_flags);
3844 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
3849 err = new_settings(hdev, sk);
3854 if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
3855 mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
3856 err = cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
3861 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
3867 hci_req_init(&req, hdev);
3870 enable_advertising(&req);
3872 disable_advertising(&req);
3874 err = hci_req_run(&req, set_advertising_complete);
3876 mgmt_pending_remove(cmd);
3879 hci_dev_unlock(hdev);
3883 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
3884 void *data, u16 len)
3886 struct mgmt_cp_set_static_address *cp = data;
3889 BT_DBG("%s", hdev->name);
3891 if (!lmp_le_capable(hdev))
3892 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
3893 MGMT_STATUS_NOT_SUPPORTED);
3895 if (hdev_is_powered(hdev))
3896 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
3897 MGMT_STATUS_REJECTED);
3899 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
3900 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
3901 return cmd_status(sk, hdev->id,
3902 MGMT_OP_SET_STATIC_ADDRESS,
3903 MGMT_STATUS_INVALID_PARAMS);
3905 /* Two most significant bits shall be set */
3906 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
3907 return cmd_status(sk, hdev->id,
3908 MGMT_OP_SET_STATIC_ADDRESS,
3909 MGMT_STATUS_INVALID_PARAMS);
3914 bacpy(&hdev->static_addr, &cp->bdaddr);
3916 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, 0, NULL, 0);
3918 hci_dev_unlock(hdev);
3923 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
3924 void *data, u16 len)
3926 struct mgmt_cp_set_scan_params *cp = data;
3927 __u16 interval, window;
3930 BT_DBG("%s", hdev->name);
3932 if (!lmp_le_capable(hdev))
3933 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3934 MGMT_STATUS_NOT_SUPPORTED);
3936 interval = __le16_to_cpu(cp->interval);
3938 if (interval < 0x0004 || interval > 0x4000)
3939 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3940 MGMT_STATUS_INVALID_PARAMS);
3942 window = __le16_to_cpu(cp->window);
3944 if (window < 0x0004 || window > 0x4000)
3945 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3946 MGMT_STATUS_INVALID_PARAMS);
3948 if (window > interval)
3949 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
3950 MGMT_STATUS_INVALID_PARAMS);
3954 hdev->le_scan_interval = interval;
3955 hdev->le_scan_window = window;
3957 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0, NULL, 0);
3959 /* If background scan is running, restart it so new parameters are
3962 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
3963 hdev->discovery.state == DISCOVERY_STOPPED) {
3964 struct hci_request req;
3966 hci_req_init(&req, hdev);
3968 hci_req_add_le_scan_disable(&req);
3969 hci_req_add_le_passive_scan(&req);
3971 hci_req_run(&req, NULL);
3974 hci_dev_unlock(hdev);
3979 static void fast_connectable_complete(struct hci_dev *hdev, u8 status)
3981 struct pending_cmd *cmd;
3983 BT_DBG("status 0x%02x", status);
3987 cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
3992 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3993 mgmt_status(status));
3995 struct mgmt_mode *cp = cmd->param;
3998 set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4000 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4002 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4003 new_settings(hdev, cmd->sk);
4006 mgmt_pending_remove(cmd);
4009 hci_dev_unlock(hdev);
4012 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4013 void *data, u16 len)
4015 struct mgmt_mode *cp = data;
4016 struct pending_cmd *cmd;
4017 struct hci_request req;
4020 BT_DBG("%s", hdev->name);
4022 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) ||
4023 hdev->hci_ver < BLUETOOTH_VER_1_2)
4024 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4025 MGMT_STATUS_NOT_SUPPORTED);
4027 if (cp->val != 0x00 && cp->val != 0x01)
4028 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4029 MGMT_STATUS_INVALID_PARAMS);
4031 if (!hdev_is_powered(hdev))
4032 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4033 MGMT_STATUS_NOT_POWERED);
4035 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4036 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4037 MGMT_STATUS_REJECTED);
4041 if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4042 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4047 if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
4048 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4053 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
4060 hci_req_init(&req, hdev);
4062 write_fast_connectable(&req, cp->val);
4064 err = hci_req_run(&req, fast_connectable_complete);
4066 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4067 MGMT_STATUS_FAILED);
4068 mgmt_pending_remove(cmd);
4072 hci_dev_unlock(hdev);
4077 static void set_bredr_scan(struct hci_request *req)
4079 struct hci_dev *hdev = req->hdev;
4082 /* Ensure that fast connectable is disabled. This function will
4083 * not do anything if the page scan parameters are already what
4086 write_fast_connectable(req, false);
4088 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4090 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
4091 scan |= SCAN_INQUIRY;
4094 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
4097 static void set_bredr_complete(struct hci_dev *hdev, u8 status)
4099 struct pending_cmd *cmd;
4101 BT_DBG("status 0x%02x", status);
4105 cmd = mgmt_pending_find(MGMT_OP_SET_BREDR, hdev);
4110 u8 mgmt_err = mgmt_status(status);
4112 /* We need to restore the flag if related HCI commands
4115 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4117 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
4119 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
4120 new_settings(hdev, cmd->sk);
4123 mgmt_pending_remove(cmd);
4126 hci_dev_unlock(hdev);
4129 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4131 struct mgmt_mode *cp = data;
4132 struct pending_cmd *cmd;
4133 struct hci_request req;
4136 BT_DBG("request for %s", hdev->name);
4138 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
4139 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4140 MGMT_STATUS_NOT_SUPPORTED);
4142 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
4143 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4144 MGMT_STATUS_REJECTED);
4146 if (cp->val != 0x00 && cp->val != 0x01)
4147 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4148 MGMT_STATUS_INVALID_PARAMS);
4152 if (cp->val == test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4153 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4157 if (!hdev_is_powered(hdev)) {
4159 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4160 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
4161 clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
4162 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4163 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
4166 change_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4168 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4172 err = new_settings(hdev, sk);
4176 /* Reject disabling when powered on */
4178 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4179 MGMT_STATUS_REJECTED);
4183 if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) {
4184 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4189 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
4195 /* We need to flip the bit already here so that update_adv_data
4196 * generates the correct flags.
4198 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4200 hci_req_init(&req, hdev);
4202 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4203 set_bredr_scan(&req);
4205 /* Since only the advertising data flags will change, there
4206 * is no need to update the scan response data.
4208 update_adv_data(&req);
4210 err = hci_req_run(&req, set_bredr_complete);
4212 mgmt_pending_remove(cmd);
4215 hci_dev_unlock(hdev);
4219 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
4220 void *data, u16 len)
4222 struct mgmt_mode *cp = data;
4223 struct pending_cmd *cmd;
4227 BT_DBG("request for %s", hdev->name);
4229 status = mgmt_bredr_support(hdev);
4231 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4234 if (!lmp_sc_capable(hdev) &&
4235 !test_bit(HCI_FORCE_SC, &hdev->dev_flags))
4236 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4237 MGMT_STATUS_NOT_SUPPORTED);
4239 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4240 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4241 MGMT_STATUS_INVALID_PARAMS);
4245 if (!hdev_is_powered(hdev)) {
4249 changed = !test_and_set_bit(HCI_SC_ENABLED,
4251 if (cp->val == 0x02)
4252 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4254 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4256 changed = test_and_clear_bit(HCI_SC_ENABLED,
4258 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4261 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4266 err = new_settings(hdev, sk);
4271 if (mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
4272 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4279 if (val == test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
4280 (cp->val == 0x02) == test_bit(HCI_SC_ONLY, &hdev->dev_flags)) {
4281 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4285 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
4291 err = hci_send_cmd(hdev, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
4293 mgmt_pending_remove(cmd);
4297 if (cp->val == 0x02)
4298 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4300 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4303 hci_dev_unlock(hdev);
4307 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
4308 void *data, u16 len)
4310 struct mgmt_mode *cp = data;
4314 BT_DBG("request for %s", hdev->name);
4316 if (cp->val != 0x00 && cp->val != 0x01)
4317 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
4318 MGMT_STATUS_INVALID_PARAMS);
4323 changed = !test_and_set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
4325 changed = test_and_clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
4327 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
4332 err = new_settings(hdev, sk);
4335 hci_dev_unlock(hdev);
4339 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4342 struct mgmt_cp_set_privacy *cp = cp_data;
4346 BT_DBG("request for %s", hdev->name);
4348 if (!lmp_le_capable(hdev))
4349 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4350 MGMT_STATUS_NOT_SUPPORTED);
4352 if (cp->privacy != 0x00 && cp->privacy != 0x01)
4353 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4354 MGMT_STATUS_INVALID_PARAMS);
4356 if (hdev_is_powered(hdev))
4357 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4358 MGMT_STATUS_REJECTED);
4362 /* If user space supports this command it is also expected to
4363 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
4365 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4368 changed = !test_and_set_bit(HCI_PRIVACY, &hdev->dev_flags);
4369 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
4370 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4372 changed = test_and_clear_bit(HCI_PRIVACY, &hdev->dev_flags);
4373 memset(hdev->irk, 0, sizeof(hdev->irk));
4374 clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4377 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
4382 err = new_settings(hdev, sk);
4385 hci_dev_unlock(hdev);
4389 static bool irk_is_valid(struct mgmt_irk_info *irk)
4391 switch (irk->addr.type) {
4392 case BDADDR_LE_PUBLIC:
4395 case BDADDR_LE_RANDOM:
4396 /* Two most significant bits shall be set */
4397 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4405 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4408 struct mgmt_cp_load_irks *cp = cp_data;
4409 u16 irk_count, expected_len;
4412 BT_DBG("request for %s", hdev->name);
4414 if (!lmp_le_capable(hdev))
4415 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4416 MGMT_STATUS_NOT_SUPPORTED);
4418 irk_count = __le16_to_cpu(cp->irk_count);
4420 expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
4421 if (expected_len != len) {
4422 BT_ERR("load_irks: expected %u bytes, got %u bytes",
4424 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4425 MGMT_STATUS_INVALID_PARAMS);
4428 BT_DBG("%s irk_count %u", hdev->name, irk_count);
4430 for (i = 0; i < irk_count; i++) {
4431 struct mgmt_irk_info *key = &cp->irks[i];
4433 if (!irk_is_valid(key))
4434 return cmd_status(sk, hdev->id,
4436 MGMT_STATUS_INVALID_PARAMS);
4441 hci_smp_irks_clear(hdev);
4443 for (i = 0; i < irk_count; i++) {
4444 struct mgmt_irk_info *irk = &cp->irks[i];
4447 if (irk->addr.type == BDADDR_LE_PUBLIC)
4448 addr_type = ADDR_LE_DEV_PUBLIC;
4450 addr_type = ADDR_LE_DEV_RANDOM;
4452 hci_add_irk(hdev, &irk->addr.bdaddr, addr_type, irk->val,
4456 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4458 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
4460 hci_dev_unlock(hdev);
4465 static bool ltk_is_valid(struct mgmt_ltk_info *key)
4467 if (key->master != 0x00 && key->master != 0x01)
4470 switch (key->addr.type) {
4471 case BDADDR_LE_PUBLIC:
4474 case BDADDR_LE_RANDOM:
4475 /* Two most significant bits shall be set */
4476 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4484 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
4485 void *cp_data, u16 len)
4487 struct mgmt_cp_load_long_term_keys *cp = cp_data;
4488 u16 key_count, expected_len;
4491 BT_DBG("request for %s", hdev->name);
4493 if (!lmp_le_capable(hdev))
4494 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4495 MGMT_STATUS_NOT_SUPPORTED);
4497 key_count = __le16_to_cpu(cp->key_count);
4499 expected_len = sizeof(*cp) + key_count *
4500 sizeof(struct mgmt_ltk_info);
4501 if (expected_len != len) {
4502 BT_ERR("load_keys: expected %u bytes, got %u bytes",
4504 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4505 MGMT_STATUS_INVALID_PARAMS);
4508 BT_DBG("%s key_count %u", hdev->name, key_count);
4510 for (i = 0; i < key_count; i++) {
4511 struct mgmt_ltk_info *key = &cp->keys[i];
4513 if (!ltk_is_valid(key))
4514 return cmd_status(sk, hdev->id,
4515 MGMT_OP_LOAD_LONG_TERM_KEYS,
4516 MGMT_STATUS_INVALID_PARAMS);
4521 hci_smp_ltks_clear(hdev);
4523 for (i = 0; i < key_count; i++) {
4524 struct mgmt_ltk_info *key = &cp->keys[i];
4527 if (key->addr.type == BDADDR_LE_PUBLIC)
4528 addr_type = ADDR_LE_DEV_PUBLIC;
4530 addr_type = ADDR_LE_DEV_RANDOM;
4535 type = HCI_SMP_LTK_SLAVE;
4537 hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type,
4538 key->type, key->val, key->enc_size, key->ediv,
4542 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
4545 hci_dev_unlock(hdev);
4550 static const struct mgmt_handler {
4551 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
4555 } mgmt_handlers[] = {
4556 { NULL }, /* 0x0000 (no command) */
4557 { read_version, false, MGMT_READ_VERSION_SIZE },
4558 { read_commands, false, MGMT_READ_COMMANDS_SIZE },
4559 { read_index_list, false, MGMT_READ_INDEX_LIST_SIZE },
4560 { read_controller_info, false, MGMT_READ_INFO_SIZE },
4561 { set_powered, false, MGMT_SETTING_SIZE },
4562 { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE },
4563 { set_connectable, false, MGMT_SETTING_SIZE },
4564 { set_fast_connectable, false, MGMT_SETTING_SIZE },
4565 { set_pairable, false, MGMT_SETTING_SIZE },
4566 { set_link_security, false, MGMT_SETTING_SIZE },
4567 { set_ssp, false, MGMT_SETTING_SIZE },
4568 { set_hs, false, MGMT_SETTING_SIZE },
4569 { set_le, false, MGMT_SETTING_SIZE },
4570 { set_dev_class, false, MGMT_SET_DEV_CLASS_SIZE },
4571 { set_local_name, false, MGMT_SET_LOCAL_NAME_SIZE },
4572 { add_uuid, false, MGMT_ADD_UUID_SIZE },
4573 { remove_uuid, false, MGMT_REMOVE_UUID_SIZE },
4574 { load_link_keys, true, MGMT_LOAD_LINK_KEYS_SIZE },
4575 { load_long_term_keys, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE },
4576 { disconnect, false, MGMT_DISCONNECT_SIZE },
4577 { get_connections, false, MGMT_GET_CONNECTIONS_SIZE },
4578 { pin_code_reply, false, MGMT_PIN_CODE_REPLY_SIZE },
4579 { pin_code_neg_reply, false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
4580 { set_io_capability, false, MGMT_SET_IO_CAPABILITY_SIZE },
4581 { pair_device, false, MGMT_PAIR_DEVICE_SIZE },
4582 { cancel_pair_device, false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
4583 { unpair_device, false, MGMT_UNPAIR_DEVICE_SIZE },
4584 { user_confirm_reply, false, MGMT_USER_CONFIRM_REPLY_SIZE },
4585 { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
4586 { user_passkey_reply, false, MGMT_USER_PASSKEY_REPLY_SIZE },
4587 { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
4588 { read_local_oob_data, false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
4589 { add_remote_oob_data, true, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
4590 { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
4591 { start_discovery, false, MGMT_START_DISCOVERY_SIZE },
4592 { stop_discovery, false, MGMT_STOP_DISCOVERY_SIZE },
4593 { confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
4594 { block_device, false, MGMT_BLOCK_DEVICE_SIZE },
4595 { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
4596 { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
4597 { set_advertising, false, MGMT_SETTING_SIZE },
4598 { set_bredr, false, MGMT_SETTING_SIZE },
4599 { set_static_address, false, MGMT_SET_STATIC_ADDRESS_SIZE },
4600 { set_scan_params, false, MGMT_SET_SCAN_PARAMS_SIZE },
4601 { set_secure_conn, false, MGMT_SETTING_SIZE },
4602 { set_debug_keys, false, MGMT_SETTING_SIZE },
4603 { set_privacy, false, MGMT_SET_PRIVACY_SIZE },
4604 { load_irks, true, MGMT_LOAD_IRKS_SIZE },
4608 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
4612 struct mgmt_hdr *hdr;
4613 u16 opcode, index, len;
4614 struct hci_dev *hdev = NULL;
4615 const struct mgmt_handler *handler;
4618 BT_DBG("got %zu bytes", msglen);
4620 if (msglen < sizeof(*hdr))
4623 buf = kmalloc(msglen, GFP_KERNEL);
4627 if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) {
4633 opcode = __le16_to_cpu(hdr->opcode);
4634 index = __le16_to_cpu(hdr->index);
4635 len = __le16_to_cpu(hdr->len);
4637 if (len != msglen - sizeof(*hdr)) {
4642 if (index != MGMT_INDEX_NONE) {
4643 hdev = hci_dev_get(index);
4645 err = cmd_status(sk, index, opcode,
4646 MGMT_STATUS_INVALID_INDEX);
4650 if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
4651 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4652 err = cmd_status(sk, index, opcode,
4653 MGMT_STATUS_INVALID_INDEX);
4658 if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
4659 mgmt_handlers[opcode].func == NULL) {
4660 BT_DBG("Unknown op %u", opcode);
4661 err = cmd_status(sk, index, opcode,
4662 MGMT_STATUS_UNKNOWN_COMMAND);
4666 if ((hdev && opcode < MGMT_OP_READ_INFO) ||
4667 (!hdev && opcode >= MGMT_OP_READ_INFO)) {
4668 err = cmd_status(sk, index, opcode,
4669 MGMT_STATUS_INVALID_INDEX);
4673 handler = &mgmt_handlers[opcode];
4675 if ((handler->var_len && len < handler->data_len) ||
4676 (!handler->var_len && len != handler->data_len)) {
4677 err = cmd_status(sk, index, opcode,
4678 MGMT_STATUS_INVALID_PARAMS);
4683 mgmt_init_hdev(sk, hdev);
4685 cp = buf + sizeof(*hdr);
4687 err = handler->func(sk, hdev, cp, len);
4701 void mgmt_index_added(struct hci_dev *hdev)
4703 if (hdev->dev_type != HCI_BREDR)
4706 mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
4709 void mgmt_index_removed(struct hci_dev *hdev)
4711 u8 status = MGMT_STATUS_INVALID_INDEX;
4713 if (hdev->dev_type != HCI_BREDR)
4716 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
4718 mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
4721 /* This function requires the caller holds hdev->lock */
4722 static void restart_le_auto_conns(struct hci_dev *hdev)
4724 struct hci_conn_params *p;
4726 list_for_each_entry(p, &hdev->le_conn_params, list) {
4727 if (p->auto_connect == HCI_AUTO_CONN_ALWAYS)
4728 hci_pend_le_conn_add(hdev, &p->addr, p->addr_type);
4732 static void powered_complete(struct hci_dev *hdev, u8 status)
4734 struct cmd_lookup match = { NULL, hdev };
4736 BT_DBG("status 0x%02x", status);
4740 restart_le_auto_conns(hdev);
4742 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
4744 new_settings(hdev, match.sk);
4746 hci_dev_unlock(hdev);
4752 static int powered_update_hci(struct hci_dev *hdev)
4754 struct hci_request req;
4757 hci_req_init(&req, hdev);
4759 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
4760 !lmp_host_ssp_capable(hdev)) {
4763 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
4766 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
4767 lmp_bredr_capable(hdev)) {
4768 struct hci_cp_write_le_host_supported cp;
4771 cp.simul = lmp_le_br_capable(hdev);
4773 /* Check first if we already have the right
4774 * host state (host features set)
4776 if (cp.le != lmp_host_le_capable(hdev) ||
4777 cp.simul != lmp_host_le_br_capable(hdev))
4778 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
4782 if (lmp_le_capable(hdev)) {
4783 /* Make sure the controller has a good default for
4784 * advertising data. This also applies to the case
4785 * where BR/EDR was toggled during the AUTO_OFF phase.
4787 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
4788 update_adv_data(&req);
4789 update_scan_rsp_data(&req);
4792 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
4793 enable_advertising(&req);
4796 link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
4797 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
4798 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
4799 sizeof(link_sec), &link_sec);
4801 if (lmp_bredr_capable(hdev)) {
4802 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
4803 set_bredr_scan(&req);
4809 return hci_req_run(&req, powered_complete);
4812 int mgmt_powered(struct hci_dev *hdev, u8 powered)
4814 struct cmd_lookup match = { NULL, hdev };
4815 u8 status_not_powered = MGMT_STATUS_NOT_POWERED;
4816 u8 zero_cod[] = { 0, 0, 0 };
4819 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
4823 if (powered_update_hci(hdev) == 0)
4826 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
4831 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
4832 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status_not_powered);
4834 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
4835 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
4836 zero_cod, sizeof(zero_cod), NULL);
4839 err = new_settings(hdev, match.sk);
4847 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
4849 struct pending_cmd *cmd;
4852 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
4856 if (err == -ERFKILL)
4857 status = MGMT_STATUS_RFKILLED;
4859 status = MGMT_STATUS_FAILED;
4861 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
4863 mgmt_pending_remove(cmd);
4866 void mgmt_discoverable_timeout(struct hci_dev *hdev)
4868 struct hci_request req;
4872 /* When discoverable timeout triggers, then just make sure
4873 * the limited discoverable flag is cleared. Even in the case
4874 * of a timeout triggered from general discoverable, it is
4875 * safe to unconditionally clear the flag.
4877 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
4878 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4880 hci_req_init(&req, hdev);
4881 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4882 u8 scan = SCAN_PAGE;
4883 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
4884 sizeof(scan), &scan);
4887 update_adv_data(&req);
4888 hci_req_run(&req, NULL);
4890 hdev->discov_timeout = 0;
4892 new_settings(hdev, NULL);
4894 hci_dev_unlock(hdev);
4897 void mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
4901 /* Nothing needed here if there's a pending command since that
4902 * commands request completion callback takes care of everything
4905 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
4908 /* Powering off may clear the scan mode - don't let that interfere */
4909 if (!discoverable && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
4913 changed = !test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4915 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
4916 changed = test_and_clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4920 struct hci_request req;
4922 /* In case this change in discoverable was triggered by
4923 * a disabling of connectable there could be a need to
4924 * update the advertising flags.
4926 hci_req_init(&req, hdev);
4927 update_adv_data(&req);
4928 hci_req_run(&req, NULL);
4930 new_settings(hdev, NULL);
4934 void mgmt_connectable(struct hci_dev *hdev, u8 connectable)
4938 /* Nothing needed here if there's a pending command since that
4939 * commands request completion callback takes care of everything
4942 if (mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
4945 /* Powering off may clear the scan mode - don't let that interfere */
4946 if (!connectable && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
4950 changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
4952 changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
4955 new_settings(hdev, NULL);
4958 void mgmt_advertising(struct hci_dev *hdev, u8 advertising)
4960 /* Powering off may stop advertising - don't let that interfere */
4961 if (!advertising && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
4965 set_bit(HCI_ADVERTISING, &hdev->dev_flags);
4967 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
4970 void mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
4972 u8 mgmt_err = mgmt_status(status);
4974 if (scan & SCAN_PAGE)
4975 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev,
4976 cmd_status_rsp, &mgmt_err);
4978 if (scan & SCAN_INQUIRY)
4979 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev,
4980 cmd_status_rsp, &mgmt_err);
4983 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
4986 struct mgmt_ev_new_link_key ev;
4988 memset(&ev, 0, sizeof(ev));
4990 ev.store_hint = persistent;
4991 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
4992 ev.key.addr.type = BDADDR_BREDR;
4993 ev.key.type = key->type;
4994 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
4995 ev.key.pin_len = key->pin_len;
4997 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
5000 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key)
5002 struct mgmt_ev_new_long_term_key ev;
5004 memset(&ev, 0, sizeof(ev));
5006 /* Devices using resolvable or non-resolvable random addresses
5007 * without providing an indentity resolving key don't require
5008 * to store long term keys. Their addresses will change the
5011 * Only when a remote device provides an identity address
5012 * make sure the long term key is stored. If the remote
5013 * identity is known, the long term keys are internally
5014 * mapped to the identity address. So allow static random
5015 * and public addresses here.
5017 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
5018 (key->bdaddr.b[5] & 0xc0) != 0xc0)
5019 ev.store_hint = 0x00;
5021 ev.store_hint = 0x01;
5023 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
5024 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
5025 ev.key.type = key->authenticated;
5026 ev.key.enc_size = key->enc_size;
5027 ev.key.ediv = key->ediv;
5028 ev.key.rand = key->rand;
5030 if (key->type == HCI_SMP_LTK)
5033 memcpy(ev.key.val, key->val, sizeof(key->val));
5035 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
5038 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk)
5040 struct mgmt_ev_new_irk ev;
5042 memset(&ev, 0, sizeof(ev));
5044 /* For identity resolving keys from devices that are already
5045 * using a public address or static random address, do not
5046 * ask for storing this key. The identity resolving key really
5047 * is only mandatory for devices using resovlable random
5050 * Storing all identity resolving keys has the downside that
5051 * they will be also loaded on next boot of they system. More
5052 * identity resolving keys, means more time during scanning is
5053 * needed to actually resolve these addresses.
5055 if (bacmp(&irk->rpa, BDADDR_ANY))
5056 ev.store_hint = 0x01;
5058 ev.store_hint = 0x00;
5060 bacpy(&ev.rpa, &irk->rpa);
5061 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
5062 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
5063 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
5065 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
5068 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
5071 eir[eir_len++] = sizeof(type) + data_len;
5072 eir[eir_len++] = type;
5073 memcpy(&eir[eir_len], data, data_len);
5074 eir_len += data_len;
5079 void mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5080 u8 addr_type, u32 flags, u8 *name, u8 name_len,
5084 struct mgmt_ev_device_connected *ev = (void *) buf;
5087 bacpy(&ev->addr.bdaddr, bdaddr);
5088 ev->addr.type = link_to_bdaddr(link_type, addr_type);
5090 ev->flags = __cpu_to_le32(flags);
5093 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
5096 if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0)
5097 eir_len = eir_append_data(ev->eir, eir_len,
5098 EIR_CLASS_OF_DEV, dev_class, 3);
5100 ev->eir_len = cpu_to_le16(eir_len);
5102 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
5103 sizeof(*ev) + eir_len, NULL);
5106 static void disconnect_rsp(struct pending_cmd *cmd, void *data)
5108 struct mgmt_cp_disconnect *cp = cmd->param;
5109 struct sock **sk = data;
5110 struct mgmt_rp_disconnect rp;
5112 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5113 rp.addr.type = cp->addr.type;
5115 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, 0, &rp,
5121 mgmt_pending_remove(cmd);
5124 static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
5126 struct hci_dev *hdev = data;
5127 struct mgmt_cp_unpair_device *cp = cmd->param;
5128 struct mgmt_rp_unpair_device rp;
5130 memset(&rp, 0, sizeof(rp));
5131 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5132 rp.addr.type = cp->addr.type;
5134 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
5136 cmd_complete(cmd->sk, cmd->index, cmd->opcode, 0, &rp, sizeof(rp));
5138 mgmt_pending_remove(cmd);
5141 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
5142 u8 link_type, u8 addr_type, u8 reason,
5143 bool mgmt_connected)
5145 struct mgmt_ev_device_disconnected ev;
5146 struct pending_cmd *power_off;
5147 struct sock *sk = NULL;
5149 power_off = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
5151 struct mgmt_mode *cp = power_off->param;
5153 /* The connection is still in hci_conn_hash so test for 1
5154 * instead of 0 to know if this is the last one.
5156 if (!cp->val && hci_conn_count(hdev) == 1) {
5157 cancel_delayed_work(&hdev->power_off);
5158 queue_work(hdev->req_workqueue, &hdev->power_off.work);
5162 if (!mgmt_connected)
5165 if (link_type != ACL_LINK && link_type != LE_LINK)
5168 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
5170 bacpy(&ev.addr.bdaddr, bdaddr);
5171 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5174 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
5179 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
5183 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
5184 u8 link_type, u8 addr_type, u8 status)
5186 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
5187 struct mgmt_cp_disconnect *cp;
5188 struct mgmt_rp_disconnect rp;
5189 struct pending_cmd *cmd;
5191 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
5194 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
5200 if (bacmp(bdaddr, &cp->addr.bdaddr))
5203 if (cp->addr.type != bdaddr_type)
5206 bacpy(&rp.addr.bdaddr, bdaddr);
5207 rp.addr.type = bdaddr_type;
5209 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
5210 mgmt_status(status), &rp, sizeof(rp));
5212 mgmt_pending_remove(cmd);
5215 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5216 u8 addr_type, u8 status)
5218 struct mgmt_ev_connect_failed ev;
5219 struct pending_cmd *power_off;
5221 power_off = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
5223 struct mgmt_mode *cp = power_off->param;
5225 /* The connection is still in hci_conn_hash so test for 1
5226 * instead of 0 to know if this is the last one.
5228 if (!cp->val && hci_conn_count(hdev) == 1) {
5229 cancel_delayed_work(&hdev->power_off);
5230 queue_work(hdev->req_workqueue, &hdev->power_off.work);
5234 bacpy(&ev.addr.bdaddr, bdaddr);
5235 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5236 ev.status = mgmt_status(status);
5238 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
5241 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
5243 struct mgmt_ev_pin_code_request ev;
5245 bacpy(&ev.addr.bdaddr, bdaddr);
5246 ev.addr.type = BDADDR_BREDR;
5249 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
5252 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5255 struct pending_cmd *cmd;
5256 struct mgmt_rp_pin_code_reply rp;
5258 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
5262 bacpy(&rp.addr.bdaddr, bdaddr);
5263 rp.addr.type = BDADDR_BREDR;
5265 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
5266 mgmt_status(status), &rp, sizeof(rp));
5268 mgmt_pending_remove(cmd);
5271 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5274 struct pending_cmd *cmd;
5275 struct mgmt_rp_pin_code_reply rp;
5277 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
5281 bacpy(&rp.addr.bdaddr, bdaddr);
5282 rp.addr.type = BDADDR_BREDR;
5284 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
5285 mgmt_status(status), &rp, sizeof(rp));
5287 mgmt_pending_remove(cmd);
5290 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
5291 u8 link_type, u8 addr_type, __le32 value,
5294 struct mgmt_ev_user_confirm_request ev;
5296 BT_DBG("%s", hdev->name);
5298 bacpy(&ev.addr.bdaddr, bdaddr);
5299 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5300 ev.confirm_hint = confirm_hint;
5303 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
5307 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
5308 u8 link_type, u8 addr_type)
5310 struct mgmt_ev_user_passkey_request ev;
5312 BT_DBG("%s", hdev->name);
5314 bacpy(&ev.addr.bdaddr, bdaddr);
5315 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5317 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
5321 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5322 u8 link_type, u8 addr_type, u8 status,
5325 struct pending_cmd *cmd;
5326 struct mgmt_rp_user_confirm_reply rp;
5329 cmd = mgmt_pending_find(opcode, hdev);
5333 bacpy(&rp.addr.bdaddr, bdaddr);
5334 rp.addr.type = link_to_bdaddr(link_type, addr_type);
5335 err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
5338 mgmt_pending_remove(cmd);
5343 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5344 u8 link_type, u8 addr_type, u8 status)
5346 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5347 status, MGMT_OP_USER_CONFIRM_REPLY);
5350 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5351 u8 link_type, u8 addr_type, u8 status)
5353 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5355 MGMT_OP_USER_CONFIRM_NEG_REPLY);
5358 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5359 u8 link_type, u8 addr_type, u8 status)
5361 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5362 status, MGMT_OP_USER_PASSKEY_REPLY);
5365 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
5366 u8 link_type, u8 addr_type, u8 status)
5368 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
5370 MGMT_OP_USER_PASSKEY_NEG_REPLY);
5373 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
5374 u8 link_type, u8 addr_type, u32 passkey,
5377 struct mgmt_ev_passkey_notify ev;
5379 BT_DBG("%s", hdev->name);
5381 bacpy(&ev.addr.bdaddr, bdaddr);
5382 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5383 ev.passkey = __cpu_to_le32(passkey);
5384 ev.entered = entered;
5386 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
5389 void mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5390 u8 addr_type, u8 status)
5392 struct mgmt_ev_auth_failed ev;
5394 bacpy(&ev.addr.bdaddr, bdaddr);
5395 ev.addr.type = link_to_bdaddr(link_type, addr_type);
5396 ev.status = mgmt_status(status);
5398 mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
5401 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
5403 struct cmd_lookup match = { NULL, hdev };
5407 u8 mgmt_err = mgmt_status(status);
5408 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
5409 cmd_status_rsp, &mgmt_err);
5413 if (test_bit(HCI_AUTH, &hdev->flags))
5414 changed = !test_and_set_bit(HCI_LINK_SECURITY,
5417 changed = test_and_clear_bit(HCI_LINK_SECURITY,
5420 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
5424 new_settings(hdev, match.sk);
5430 static void clear_eir(struct hci_request *req)
5432 struct hci_dev *hdev = req->hdev;
5433 struct hci_cp_write_eir cp;
5435 if (!lmp_ext_inq_capable(hdev))
5438 memset(hdev->eir, 0, sizeof(hdev->eir));
5440 memset(&cp, 0, sizeof(cp));
5442 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
5445 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
5447 struct cmd_lookup match = { NULL, hdev };
5448 struct hci_request req;
5449 bool changed = false;
5452 u8 mgmt_err = mgmt_status(status);
5454 if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
5455 &hdev->dev_flags)) {
5456 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
5457 new_settings(hdev, NULL);
5460 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
5466 changed = !test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
5468 changed = test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
5470 changed = test_and_clear_bit(HCI_HS_ENABLED,
5473 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
5476 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
5479 new_settings(hdev, match.sk);
5484 hci_req_init(&req, hdev);
5486 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
5491 hci_req_run(&req, NULL);
5494 void mgmt_sc_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
5496 struct cmd_lookup match = { NULL, hdev };
5497 bool changed = false;
5500 u8 mgmt_err = mgmt_status(status);
5503 if (test_and_clear_bit(HCI_SC_ENABLED,
5505 new_settings(hdev, NULL);
5506 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
5509 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
5510 cmd_status_rsp, &mgmt_err);
5515 changed = !test_and_set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
5517 changed = test_and_clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
5518 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
5521 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
5522 settings_rsp, &match);
5525 new_settings(hdev, match.sk);
5531 static void sk_lookup(struct pending_cmd *cmd, void *data)
5533 struct cmd_lookup *match = data;
5535 if (match->sk == NULL) {
5536 match->sk = cmd->sk;
5537 sock_hold(match->sk);
5541 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
5544 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
5546 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
5547 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
5548 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
5551 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 3,
5558 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
5560 struct mgmt_cp_set_local_name ev;
5561 struct pending_cmd *cmd;
5566 memset(&ev, 0, sizeof(ev));
5567 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
5568 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
5570 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
5572 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
5574 /* If this is a HCI command related to powering on the
5575 * HCI dev don't send any mgmt signals.
5577 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
5581 mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
5582 cmd ? cmd->sk : NULL);
5585 void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
5586 u8 *randomizer192, u8 *hash256,
5587 u8 *randomizer256, u8 status)
5589 struct pending_cmd *cmd;
5591 BT_DBG("%s status %u", hdev->name, status);
5593 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
5598 cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5599 mgmt_status(status));
5601 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
5602 hash256 && randomizer256) {
5603 struct mgmt_rp_read_local_oob_ext_data rp;
5605 memcpy(rp.hash192, hash192, sizeof(rp.hash192));
5606 memcpy(rp.randomizer192, randomizer192,
5607 sizeof(rp.randomizer192));
5609 memcpy(rp.hash256, hash256, sizeof(rp.hash256));
5610 memcpy(rp.randomizer256, randomizer256,
5611 sizeof(rp.randomizer256));
5613 cmd_complete(cmd->sk, hdev->id,
5614 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
5617 struct mgmt_rp_read_local_oob_data rp;
5619 memcpy(rp.hash, hash192, sizeof(rp.hash));
5620 memcpy(rp.randomizer, randomizer192,
5621 sizeof(rp.randomizer));
5623 cmd_complete(cmd->sk, hdev->id,
5624 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
5629 mgmt_pending_remove(cmd);
5632 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5633 u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name, u8
5634 ssp, u8 *eir, u16 eir_len)
5637 struct mgmt_ev_device_found *ev = (void *) buf;
5638 struct smp_irk *irk;
5641 if (!hci_discovery_active(hdev))
5644 /* Leave 5 bytes for a potential CoD field */
5645 if (sizeof(*ev) + eir_len + 5 > sizeof(buf))
5648 memset(buf, 0, sizeof(buf));
5650 irk = hci_get_irk(hdev, bdaddr, addr_type);
5652 bacpy(&ev->addr.bdaddr, &irk->bdaddr);
5653 ev->addr.type = link_to_bdaddr(link_type, irk->addr_type);
5655 bacpy(&ev->addr.bdaddr, bdaddr);
5656 ev->addr.type = link_to_bdaddr(link_type, addr_type);
5661 ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_CONFIRM_NAME);
5663 ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_LEGACY_PAIRING);
5666 memcpy(ev->eir, eir, eir_len);
5668 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
5669 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
5672 ev->eir_len = cpu_to_le16(eir_len);
5673 ev_size = sizeof(*ev) + eir_len;
5675 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
5678 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
5679 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
5681 struct mgmt_ev_device_found *ev;
5682 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
5685 ev = (struct mgmt_ev_device_found *) buf;
5687 memset(buf, 0, sizeof(buf));
5689 bacpy(&ev->addr.bdaddr, bdaddr);
5690 ev->addr.type = link_to_bdaddr(link_type, addr_type);
5693 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
5696 ev->eir_len = cpu_to_le16(eir_len);
5698 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
5701 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
5703 struct mgmt_ev_discovering ev;
5704 struct pending_cmd *cmd;
5706 BT_DBG("%s discovering %u", hdev->name, discovering);
5709 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
5711 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
5714 u8 type = hdev->discovery.type;
5716 cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
5718 mgmt_pending_remove(cmd);
5721 memset(&ev, 0, sizeof(ev));
5722 ev.type = hdev->discovery.type;
5723 ev.discovering = discovering;
5725 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
5728 int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
5730 struct pending_cmd *cmd;
5731 struct mgmt_ev_device_blocked ev;
5733 cmd = mgmt_pending_find(MGMT_OP_BLOCK_DEVICE, hdev);
5735 bacpy(&ev.addr.bdaddr, bdaddr);
5736 ev.addr.type = type;
5738 return mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &ev, sizeof(ev),
5739 cmd ? cmd->sk : NULL);
5742 int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
5744 struct pending_cmd *cmd;
5745 struct mgmt_ev_device_unblocked ev;
5747 cmd = mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE, hdev);
5749 bacpy(&ev.addr.bdaddr, bdaddr);
5750 ev.addr.type = type;
5752 return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &ev, sizeof(ev),
5753 cmd ? cmd->sk : NULL);
5756 static void adv_enable_complete(struct hci_dev *hdev, u8 status)
5758 BT_DBG("%s status %u", hdev->name, status);
5760 /* Clear the advertising mgmt setting if we failed to re-enable it */
5762 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
5763 new_settings(hdev, NULL);
5767 void mgmt_reenable_advertising(struct hci_dev *hdev)
5769 struct hci_request req;
5771 if (hci_conn_num(hdev, LE_LINK) > 0)
5774 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
5777 hci_req_init(&req, hdev);
5778 enable_advertising(&req);
5780 /* If this fails we have no option but to let user space know
5781 * that we've disabled advertising.
5783 if (hci_req_run(&req, adv_enable_complete) < 0) {
5784 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
5785 new_settings(hdev, NULL);