2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/l2cap.h>
33 #include <net/bluetooth/mgmt.h>
37 #define MGMT_VERSION 1
38 #define MGMT_REVISION 7
40 static const u16 mgmt_commands[] = {
41 MGMT_OP_READ_INDEX_LIST,
44 MGMT_OP_SET_DISCOVERABLE,
45 MGMT_OP_SET_CONNECTABLE,
46 MGMT_OP_SET_FAST_CONNECTABLE,
48 MGMT_OP_SET_LINK_SECURITY,
52 MGMT_OP_SET_DEV_CLASS,
53 MGMT_OP_SET_LOCAL_NAME,
56 MGMT_OP_LOAD_LINK_KEYS,
57 MGMT_OP_LOAD_LONG_TERM_KEYS,
59 MGMT_OP_GET_CONNECTIONS,
60 MGMT_OP_PIN_CODE_REPLY,
61 MGMT_OP_PIN_CODE_NEG_REPLY,
62 MGMT_OP_SET_IO_CAPABILITY,
64 MGMT_OP_CANCEL_PAIR_DEVICE,
65 MGMT_OP_UNPAIR_DEVICE,
66 MGMT_OP_USER_CONFIRM_REPLY,
67 MGMT_OP_USER_CONFIRM_NEG_REPLY,
68 MGMT_OP_USER_PASSKEY_REPLY,
69 MGMT_OP_USER_PASSKEY_NEG_REPLY,
70 MGMT_OP_READ_LOCAL_OOB_DATA,
71 MGMT_OP_ADD_REMOTE_OOB_DATA,
72 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
73 MGMT_OP_START_DISCOVERY,
74 MGMT_OP_STOP_DISCOVERY,
77 MGMT_OP_UNBLOCK_DEVICE,
78 MGMT_OP_SET_DEVICE_ID,
79 MGMT_OP_SET_ADVERTISING,
81 MGMT_OP_SET_STATIC_ADDRESS,
82 MGMT_OP_SET_SCAN_PARAMS,
83 MGMT_OP_SET_SECURE_CONN,
84 MGMT_OP_SET_DEBUG_KEYS,
87 MGMT_OP_GET_CONN_INFO,
88 MGMT_OP_GET_CLOCK_INFO,
90 MGMT_OP_REMOVE_DEVICE,
91 MGMT_OP_LOAD_CONN_PARAM,
92 MGMT_OP_READ_UNCONF_INDEX_LIST,
93 MGMT_OP_READ_CONFIG_INFO,
94 MGMT_OP_SET_EXTERNAL_CONFIG,
95 MGMT_OP_SET_PUBLIC_ADDRESS,
98 static const u16 mgmt_events[] = {
99 MGMT_EV_CONTROLLER_ERROR,
101 MGMT_EV_INDEX_REMOVED,
102 MGMT_EV_NEW_SETTINGS,
103 MGMT_EV_CLASS_OF_DEV_CHANGED,
104 MGMT_EV_LOCAL_NAME_CHANGED,
105 MGMT_EV_NEW_LINK_KEY,
106 MGMT_EV_NEW_LONG_TERM_KEY,
107 MGMT_EV_DEVICE_CONNECTED,
108 MGMT_EV_DEVICE_DISCONNECTED,
109 MGMT_EV_CONNECT_FAILED,
110 MGMT_EV_PIN_CODE_REQUEST,
111 MGMT_EV_USER_CONFIRM_REQUEST,
112 MGMT_EV_USER_PASSKEY_REQUEST,
114 MGMT_EV_DEVICE_FOUND,
116 MGMT_EV_DEVICE_BLOCKED,
117 MGMT_EV_DEVICE_UNBLOCKED,
118 MGMT_EV_DEVICE_UNPAIRED,
119 MGMT_EV_PASSKEY_NOTIFY,
122 MGMT_EV_DEVICE_ADDED,
123 MGMT_EV_DEVICE_REMOVED,
124 MGMT_EV_NEW_CONN_PARAM,
125 MGMT_EV_UNCONF_INDEX_ADDED,
126 MGMT_EV_UNCONF_INDEX_REMOVED,
127 MGMT_EV_NEW_CONFIG_OPTIONS,
130 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
132 #define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
133 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
136 struct list_head list;
144 /* HCI to MGMT error code conversion table */
145 static u8 mgmt_status_table[] = {
147 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
148 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
149 MGMT_STATUS_FAILED, /* Hardware Failure */
150 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
151 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
152 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
153 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
154 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
155 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
156 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
157 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
158 MGMT_STATUS_BUSY, /* Command Disallowed */
159 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
160 MGMT_STATUS_REJECTED, /* Rejected Security */
161 MGMT_STATUS_REJECTED, /* Rejected Personal */
162 MGMT_STATUS_TIMEOUT, /* Host Timeout */
163 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
164 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
165 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
166 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
167 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
168 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
169 MGMT_STATUS_BUSY, /* Repeated Attempts */
170 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
171 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
172 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
173 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
174 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
175 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
176 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
177 MGMT_STATUS_FAILED, /* Unspecified Error */
178 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
179 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
180 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
181 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
182 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
183 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
184 MGMT_STATUS_FAILED, /* Unit Link Key Used */
185 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
186 MGMT_STATUS_TIMEOUT, /* Instant Passed */
187 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
188 MGMT_STATUS_FAILED, /* Transaction Collision */
189 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
190 MGMT_STATUS_REJECTED, /* QoS Rejected */
191 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
192 MGMT_STATUS_REJECTED, /* Insufficient Security */
193 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
194 MGMT_STATUS_BUSY, /* Role Switch Pending */
195 MGMT_STATUS_FAILED, /* Slot Violation */
196 MGMT_STATUS_FAILED, /* Role Switch Failed */
197 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
198 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
199 MGMT_STATUS_BUSY, /* Host Busy Pairing */
200 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
201 MGMT_STATUS_BUSY, /* Controller Busy */
202 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
203 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
204 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
205 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
206 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
209 static u8 mgmt_status(u8 hci_status)
211 if (hci_status < ARRAY_SIZE(mgmt_status_table))
212 return mgmt_status_table[hci_status];
214 return MGMT_STATUS_FAILED;
217 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
218 struct sock *skip_sk)
221 struct mgmt_hdr *hdr;
223 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
227 hdr = (void *) skb_put(skb, sizeof(*hdr));
228 hdr->opcode = cpu_to_le16(event);
230 hdr->index = cpu_to_le16(hdev->id);
232 hdr->index = cpu_to_le16(MGMT_INDEX_NONE);
233 hdr->len = cpu_to_le16(data_len);
236 memcpy(skb_put(skb, data_len), data, data_len);
239 __net_timestamp(skb);
241 hci_send_to_control(skb, skip_sk);
247 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
250 struct mgmt_hdr *hdr;
251 struct mgmt_ev_cmd_status *ev;
254 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
256 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
260 hdr = (void *) skb_put(skb, sizeof(*hdr));
262 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_STATUS);
263 hdr->index = cpu_to_le16(index);
264 hdr->len = cpu_to_le16(sizeof(*ev));
266 ev = (void *) skb_put(skb, sizeof(*ev));
268 ev->opcode = cpu_to_le16(cmd);
270 err = sock_queue_rcv_skb(sk, skb);
277 static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
278 void *rp, size_t rp_len)
281 struct mgmt_hdr *hdr;
282 struct mgmt_ev_cmd_complete *ev;
285 BT_DBG("sock %p", sk);
287 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
291 hdr = (void *) skb_put(skb, sizeof(*hdr));
293 hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE);
294 hdr->index = cpu_to_le16(index);
295 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
297 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
298 ev->opcode = cpu_to_le16(cmd);
302 memcpy(ev->data, rp, rp_len);
304 err = sock_queue_rcv_skb(sk, skb);
311 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
314 struct mgmt_rp_read_version rp;
316 BT_DBG("sock %p", sk);
318 rp.version = MGMT_VERSION;
319 rp.revision = cpu_to_le16(MGMT_REVISION);
321 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
325 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
328 struct mgmt_rp_read_commands *rp;
329 const u16 num_commands = ARRAY_SIZE(mgmt_commands);
330 const u16 num_events = ARRAY_SIZE(mgmt_events);
335 BT_DBG("sock %p", sk);
337 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
339 rp = kmalloc(rp_size, GFP_KERNEL);
343 rp->num_commands = cpu_to_le16(num_commands);
344 rp->num_events = cpu_to_le16(num_events);
346 for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
347 put_unaligned_le16(mgmt_commands[i], opcode);
349 for (i = 0; i < num_events; i++, opcode++)
350 put_unaligned_le16(mgmt_events[i], opcode);
352 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
359 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
362 struct mgmt_rp_read_index_list *rp;
368 BT_DBG("sock %p", sk);
370 read_lock(&hci_dev_list_lock);
373 list_for_each_entry(d, &hci_dev_list, list) {
374 if (d->dev_type == HCI_BREDR &&
375 !test_bit(HCI_UNCONFIGURED, &d->dev_flags))
379 rp_len = sizeof(*rp) + (2 * count);
380 rp = kmalloc(rp_len, GFP_ATOMIC);
382 read_unlock(&hci_dev_list_lock);
387 list_for_each_entry(d, &hci_dev_list, list) {
388 if (test_bit(HCI_SETUP, &d->dev_flags) ||
389 test_bit(HCI_CONFIG, &d->dev_flags) ||
390 test_bit(HCI_USER_CHANNEL, &d->dev_flags))
393 /* Devices marked as raw-only are neither configured
394 * nor unconfigured controllers.
396 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
399 if (d->dev_type == HCI_BREDR &&
400 !test_bit(HCI_UNCONFIGURED, &d->dev_flags)) {
401 rp->index[count++] = cpu_to_le16(d->id);
402 BT_DBG("Added hci%u", d->id);
406 rp->num_controllers = cpu_to_le16(count);
407 rp_len = sizeof(*rp) + (2 * count);
409 read_unlock(&hci_dev_list_lock);
411 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
419 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
420 void *data, u16 data_len)
422 struct mgmt_rp_read_unconf_index_list *rp;
428 BT_DBG("sock %p", sk);
430 read_lock(&hci_dev_list_lock);
433 list_for_each_entry(d, &hci_dev_list, list) {
434 if (d->dev_type == HCI_BREDR &&
435 test_bit(HCI_UNCONFIGURED, &d->dev_flags))
439 rp_len = sizeof(*rp) + (2 * count);
440 rp = kmalloc(rp_len, GFP_ATOMIC);
442 read_unlock(&hci_dev_list_lock);
447 list_for_each_entry(d, &hci_dev_list, list) {
448 if (test_bit(HCI_SETUP, &d->dev_flags) ||
449 test_bit(HCI_CONFIG, &d->dev_flags) ||
450 test_bit(HCI_USER_CHANNEL, &d->dev_flags))
453 /* Devices marked as raw-only are neither configured
454 * nor unconfigured controllers.
456 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
459 if (d->dev_type == HCI_BREDR &&
460 test_bit(HCI_UNCONFIGURED, &d->dev_flags)) {
461 rp->index[count++] = cpu_to_le16(d->id);
462 BT_DBG("Added hci%u", d->id);
466 rp->num_controllers = cpu_to_le16(count);
467 rp_len = sizeof(*rp) + (2 * count);
469 read_unlock(&hci_dev_list_lock);
471 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_UNCONF_INDEX_LIST,
479 static bool is_configured(struct hci_dev *hdev)
481 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
482 !test_bit(HCI_EXT_CONFIGURED, &hdev->dev_flags))
485 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
486 !bacmp(&hdev->public_addr, BDADDR_ANY))
492 static __le32 get_missing_options(struct hci_dev *hdev)
496 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
497 !test_bit(HCI_EXT_CONFIGURED, &hdev->dev_flags))
498 options |= MGMT_OPTION_EXTERNAL_CONFIG;
500 if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) &&
501 !bacmp(&hdev->public_addr, BDADDR_ANY))
502 options |= MGMT_OPTION_PUBLIC_ADDRESS;
504 return cpu_to_le32(options);
507 static int new_options(struct hci_dev *hdev, struct sock *skip)
509 __le32 options = get_missing_options(hdev);
511 return mgmt_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
512 sizeof(options), skip);
515 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
517 __le32 options = get_missing_options(hdev);
519 return cmd_complete(sk, hdev->id, opcode, 0, &options,
523 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
524 void *data, u16 data_len)
526 struct mgmt_rp_read_config_info rp;
529 BT_DBG("sock %p %s", sk, hdev->name);
533 memset(&rp, 0, sizeof(rp));
534 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
536 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
537 options |= MGMT_OPTION_EXTERNAL_CONFIG;
539 if (hdev->set_bdaddr)
540 options |= MGMT_OPTION_PUBLIC_ADDRESS;
542 rp.supported_options = cpu_to_le32(options);
543 rp.missing_options = get_missing_options(hdev);
545 hci_dev_unlock(hdev);
547 return cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0, &rp,
551 static u32 get_supported_settings(struct hci_dev *hdev)
555 settings |= MGMT_SETTING_POWERED;
556 settings |= MGMT_SETTING_PAIRABLE;
557 settings |= MGMT_SETTING_DEBUG_KEYS;
558 settings |= MGMT_SETTING_CONNECTABLE;
559 settings |= MGMT_SETTING_DISCOVERABLE;
561 if (lmp_bredr_capable(hdev)) {
562 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
563 settings |= MGMT_SETTING_FAST_CONNECTABLE;
564 settings |= MGMT_SETTING_BREDR;
565 settings |= MGMT_SETTING_LINK_SECURITY;
567 if (lmp_ssp_capable(hdev)) {
568 settings |= MGMT_SETTING_SSP;
569 settings |= MGMT_SETTING_HS;
572 if (lmp_sc_capable(hdev) ||
573 test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
574 settings |= MGMT_SETTING_SECURE_CONN;
577 if (lmp_le_capable(hdev)) {
578 settings |= MGMT_SETTING_LE;
579 settings |= MGMT_SETTING_ADVERTISING;
580 settings |= MGMT_SETTING_PRIVACY;
583 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
585 settings |= MGMT_SETTING_CONFIGURATION;
590 static u32 get_current_settings(struct hci_dev *hdev)
594 if (hdev_is_powered(hdev))
595 settings |= MGMT_SETTING_POWERED;
597 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
598 settings |= MGMT_SETTING_CONNECTABLE;
600 if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
601 settings |= MGMT_SETTING_FAST_CONNECTABLE;
603 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
604 settings |= MGMT_SETTING_DISCOVERABLE;
606 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags))
607 settings |= MGMT_SETTING_PAIRABLE;
609 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
610 settings |= MGMT_SETTING_BREDR;
612 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
613 settings |= MGMT_SETTING_LE;
615 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
616 settings |= MGMT_SETTING_LINK_SECURITY;
618 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
619 settings |= MGMT_SETTING_SSP;
621 if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
622 settings |= MGMT_SETTING_HS;
624 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
625 settings |= MGMT_SETTING_ADVERTISING;
627 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
628 settings |= MGMT_SETTING_SECURE_CONN;
630 if (test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags))
631 settings |= MGMT_SETTING_DEBUG_KEYS;
633 if (test_bit(HCI_PRIVACY, &hdev->dev_flags))
634 settings |= MGMT_SETTING_PRIVACY;
639 #define PNP_INFO_SVCLASS_ID 0x1200
641 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
643 u8 *ptr = data, *uuids_start = NULL;
644 struct bt_uuid *uuid;
649 list_for_each_entry(uuid, &hdev->uuids, list) {
652 if (uuid->size != 16)
655 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
659 if (uuid16 == PNP_INFO_SVCLASS_ID)
665 uuids_start[1] = EIR_UUID16_ALL;
669 /* Stop if not enough space to put next UUID */
670 if ((ptr - data) + sizeof(u16) > len) {
671 uuids_start[1] = EIR_UUID16_SOME;
675 *ptr++ = (uuid16 & 0x00ff);
676 *ptr++ = (uuid16 & 0xff00) >> 8;
677 uuids_start[0] += sizeof(uuid16);
683 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
685 u8 *ptr = data, *uuids_start = NULL;
686 struct bt_uuid *uuid;
691 list_for_each_entry(uuid, &hdev->uuids, list) {
692 if (uuid->size != 32)
698 uuids_start[1] = EIR_UUID32_ALL;
702 /* Stop if not enough space to put next UUID */
703 if ((ptr - data) + sizeof(u32) > len) {
704 uuids_start[1] = EIR_UUID32_SOME;
708 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
710 uuids_start[0] += sizeof(u32);
716 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
718 u8 *ptr = data, *uuids_start = NULL;
719 struct bt_uuid *uuid;
724 list_for_each_entry(uuid, &hdev->uuids, list) {
725 if (uuid->size != 128)
731 uuids_start[1] = EIR_UUID128_ALL;
735 /* Stop if not enough space to put next UUID */
736 if ((ptr - data) + 16 > len) {
737 uuids_start[1] = EIR_UUID128_SOME;
741 memcpy(ptr, uuid->uuid, 16);
743 uuids_start[0] += 16;
749 static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
751 struct pending_cmd *cmd;
753 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
754 if (cmd->opcode == opcode)
761 static struct pending_cmd *mgmt_pending_find_data(u16 opcode,
762 struct hci_dev *hdev,
765 struct pending_cmd *cmd;
767 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
768 if (cmd->user_data != data)
770 if (cmd->opcode == opcode)
777 static u8 create_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
782 name_len = strlen(hdev->dev_name);
784 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
786 if (name_len > max_len) {
788 ptr[1] = EIR_NAME_SHORT;
790 ptr[1] = EIR_NAME_COMPLETE;
792 ptr[0] = name_len + 1;
794 memcpy(ptr + 2, hdev->dev_name, name_len);
796 ad_len += (name_len + 2);
797 ptr += (name_len + 2);
803 static void update_scan_rsp_data(struct hci_request *req)
805 struct hci_dev *hdev = req->hdev;
806 struct hci_cp_le_set_scan_rsp_data cp;
809 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
812 memset(&cp, 0, sizeof(cp));
814 len = create_scan_rsp_data(hdev, cp.data);
816 if (hdev->scan_rsp_data_len == len &&
817 memcmp(cp.data, hdev->scan_rsp_data, len) == 0)
820 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
821 hdev->scan_rsp_data_len = len;
825 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
828 static u8 get_adv_discov_flags(struct hci_dev *hdev)
830 struct pending_cmd *cmd;
832 /* If there's a pending mgmt command the flags will not yet have
833 * their final values, so check for this first.
835 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
837 struct mgmt_mode *cp = cmd->param;
839 return LE_AD_GENERAL;
840 else if (cp->val == 0x02)
841 return LE_AD_LIMITED;
843 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
844 return LE_AD_LIMITED;
845 else if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
846 return LE_AD_GENERAL;
852 static u8 create_adv_data(struct hci_dev *hdev, u8 *ptr)
854 u8 ad_len = 0, flags = 0;
856 flags |= get_adv_discov_flags(hdev);
858 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
859 flags |= LE_AD_NO_BREDR;
862 BT_DBG("adv flags 0x%02x", flags);
872 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
874 ptr[1] = EIR_TX_POWER;
875 ptr[2] = (u8) hdev->adv_tx_power;
884 static void update_adv_data(struct hci_request *req)
886 struct hci_dev *hdev = req->hdev;
887 struct hci_cp_le_set_adv_data cp;
890 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
893 memset(&cp, 0, sizeof(cp));
895 len = create_adv_data(hdev, cp.data);
897 if (hdev->adv_data_len == len &&
898 memcmp(cp.data, hdev->adv_data, len) == 0)
901 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
902 hdev->adv_data_len = len;
906 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
909 static void create_eir(struct hci_dev *hdev, u8 *data)
914 name_len = strlen(hdev->dev_name);
920 ptr[1] = EIR_NAME_SHORT;
922 ptr[1] = EIR_NAME_COMPLETE;
924 /* EIR Data length */
925 ptr[0] = name_len + 1;
927 memcpy(ptr + 2, hdev->dev_name, name_len);
929 ptr += (name_len + 2);
932 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
934 ptr[1] = EIR_TX_POWER;
935 ptr[2] = (u8) hdev->inq_tx_power;
940 if (hdev->devid_source > 0) {
942 ptr[1] = EIR_DEVICE_ID;
944 put_unaligned_le16(hdev->devid_source, ptr + 2);
945 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
946 put_unaligned_le16(hdev->devid_product, ptr + 6);
947 put_unaligned_le16(hdev->devid_version, ptr + 8);
952 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
953 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
954 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
957 static void update_eir(struct hci_request *req)
959 struct hci_dev *hdev = req->hdev;
960 struct hci_cp_write_eir cp;
962 if (!hdev_is_powered(hdev))
965 if (!lmp_ext_inq_capable(hdev))
968 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
971 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
974 memset(&cp, 0, sizeof(cp));
976 create_eir(hdev, cp.data);
978 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
981 memcpy(hdev->eir, cp.data, sizeof(cp.data));
983 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
986 static u8 get_service_classes(struct hci_dev *hdev)
988 struct bt_uuid *uuid;
991 list_for_each_entry(uuid, &hdev->uuids, list)
992 val |= uuid->svc_hint;
997 static void update_class(struct hci_request *req)
999 struct hci_dev *hdev = req->hdev;
1002 BT_DBG("%s", hdev->name);
1004 if (!hdev_is_powered(hdev))
1007 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1010 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1013 cod[0] = hdev->minor_class;
1014 cod[1] = hdev->major_class;
1015 cod[2] = get_service_classes(hdev);
1017 if (test_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags))
1020 if (memcmp(cod, hdev->dev_class, 3) == 0)
1023 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1026 static bool get_connectable(struct hci_dev *hdev)
1028 struct pending_cmd *cmd;
1030 /* If there's a pending mgmt command the flag will not yet have
1031 * it's final value, so check for this first.
1033 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1035 struct mgmt_mode *cp = cmd->param;
1039 return test_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1042 static void disable_advertising(struct hci_request *req)
1046 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1049 static void enable_advertising(struct hci_request *req)
1051 struct hci_dev *hdev = req->hdev;
1052 struct hci_cp_le_set_adv_param cp;
1053 u8 own_addr_type, enable = 0x01;
1056 if (hci_conn_num(hdev, LE_LINK) > 0)
1059 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
1060 disable_advertising(req);
1062 /* Clear the HCI_LE_ADV bit temporarily so that the
1063 * hci_update_random_address knows that it's safe to go ahead
1064 * and write a new random address. The flag will be set back on
1065 * as soon as the SET_ADV_ENABLE HCI command completes.
1067 clear_bit(HCI_LE_ADV, &hdev->dev_flags);
1069 connectable = get_connectable(hdev);
1071 /* Set require_privacy to true only when non-connectable
1072 * advertising is used. In that case it is fine to use a
1073 * non-resolvable private address.
1075 if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
1078 memset(&cp, 0, sizeof(cp));
1079 cp.min_interval = cpu_to_le16(0x0800);
1080 cp.max_interval = cpu_to_le16(0x0800);
1081 cp.type = connectable ? LE_ADV_IND : LE_ADV_NONCONN_IND;
1082 cp.own_address_type = own_addr_type;
1083 cp.channel_map = hdev->le_adv_channel_map;
1085 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1087 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1090 static void service_cache_off(struct work_struct *work)
1092 struct hci_dev *hdev = container_of(work, struct hci_dev,
1093 service_cache.work);
1094 struct hci_request req;
1096 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1099 hci_req_init(&req, hdev);
1106 hci_dev_unlock(hdev);
1108 hci_req_run(&req, NULL);
1111 static void rpa_expired(struct work_struct *work)
1113 struct hci_dev *hdev = container_of(work, struct hci_dev,
1115 struct hci_request req;
1119 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
1121 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1124 /* The generation of a new RPA and programming it into the
1125 * controller happens in the enable_advertising() function.
1127 hci_req_init(&req, hdev);
1128 enable_advertising(&req);
1129 hci_req_run(&req, NULL);
1132 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1134 if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
1137 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1138 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1140 /* Non-mgmt controlled devices get this bit set
1141 * implicitly so that pairing works for them, however
1142 * for mgmt we require user-space to explicitly enable
1145 clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
1148 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1149 void *data, u16 data_len)
1151 struct mgmt_rp_read_info rp;
1153 BT_DBG("sock %p %s", sk, hdev->name);
1157 memset(&rp, 0, sizeof(rp));
1159 bacpy(&rp.bdaddr, &hdev->bdaddr);
1161 rp.version = hdev->hci_ver;
1162 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1164 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1165 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1167 memcpy(rp.dev_class, hdev->dev_class, 3);
1169 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1170 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1172 hci_dev_unlock(hdev);
1174 return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1178 static void mgmt_pending_free(struct pending_cmd *cmd)
1185 static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
1186 struct hci_dev *hdev, void *data,
1189 struct pending_cmd *cmd;
1191 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
1195 cmd->opcode = opcode;
1196 cmd->index = hdev->id;
1198 cmd->param = kmalloc(len, GFP_KERNEL);
1205 memcpy(cmd->param, data, len);
1210 list_add(&cmd->list, &hdev->mgmt_pending);
1215 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
1216 void (*cb)(struct pending_cmd *cmd,
1220 struct pending_cmd *cmd, *tmp;
1222 list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
1223 if (opcode > 0 && cmd->opcode != opcode)
1230 static void mgmt_pending_remove(struct pending_cmd *cmd)
1232 list_del(&cmd->list);
1233 mgmt_pending_free(cmd);
1236 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1238 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1240 return cmd_complete(sk, hdev->id, opcode, 0, &settings,
1244 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status)
1246 BT_DBG("%s status 0x%02x", hdev->name, status);
1248 if (hci_conn_count(hdev) == 0) {
1249 cancel_delayed_work(&hdev->power_off);
1250 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1254 static bool hci_stop_discovery(struct hci_request *req)
1256 struct hci_dev *hdev = req->hdev;
1257 struct hci_cp_remote_name_req_cancel cp;
1258 struct inquiry_entry *e;
1260 switch (hdev->discovery.state) {
1261 case DISCOVERY_FINDING:
1262 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
1263 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
1265 cancel_delayed_work(&hdev->le_scan_disable);
1266 hci_req_add_le_scan_disable(req);
1271 case DISCOVERY_RESOLVING:
1272 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
1277 bacpy(&cp.bdaddr, &e->data.bdaddr);
1278 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
1284 /* Passive scanning */
1285 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
1286 hci_req_add_le_scan_disable(req);
1296 static int clean_up_hci_state(struct hci_dev *hdev)
1298 struct hci_request req;
1299 struct hci_conn *conn;
1300 bool discov_stopped;
1303 hci_req_init(&req, hdev);
1305 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1306 test_bit(HCI_PSCAN, &hdev->flags)) {
1308 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1311 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
1312 disable_advertising(&req);
1314 discov_stopped = hci_stop_discovery(&req);
1316 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1317 struct hci_cp_disconnect dc;
1318 struct hci_cp_reject_conn_req rej;
1320 switch (conn->state) {
1323 dc.handle = cpu_to_le16(conn->handle);
1324 dc.reason = 0x15; /* Terminated due to Power Off */
1325 hci_req_add(&req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1328 if (conn->type == LE_LINK)
1329 hci_req_add(&req, HCI_OP_LE_CREATE_CONN_CANCEL,
1331 else if (conn->type == ACL_LINK)
1332 hci_req_add(&req, HCI_OP_CREATE_CONN_CANCEL,
1336 bacpy(&rej.bdaddr, &conn->dst);
1337 rej.reason = 0x15; /* Terminated due to Power Off */
1338 if (conn->type == ACL_LINK)
1339 hci_req_add(&req, HCI_OP_REJECT_CONN_REQ,
1341 else if (conn->type == SCO_LINK)
1342 hci_req_add(&req, HCI_OP_REJECT_SYNC_CONN_REQ,
1348 err = hci_req_run(&req, clean_up_hci_complete);
1349 if (!err && discov_stopped)
1350 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1355 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1358 struct mgmt_mode *cp = data;
1359 struct pending_cmd *cmd;
1362 BT_DBG("request for %s", hdev->name);
1364 if (cp->val != 0x00 && cp->val != 0x01)
1365 return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1366 MGMT_STATUS_INVALID_PARAMS);
1370 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
1371 err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1376 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1377 cancel_delayed_work(&hdev->power_off);
1380 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
1382 err = mgmt_powered(hdev, 1);
1387 if (!!cp->val == hdev_is_powered(hdev)) {
1388 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1392 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1399 queue_work(hdev->req_workqueue, &hdev->power_on);
1402 /* Disconnect connections, stop scans, etc */
1403 err = clean_up_hci_state(hdev);
1405 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1406 HCI_POWER_OFF_TIMEOUT);
1408 /* ENODATA means there were no HCI commands queued */
1409 if (err == -ENODATA) {
1410 cancel_delayed_work(&hdev->power_off);
1411 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1417 hci_dev_unlock(hdev);
1421 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1425 ev = cpu_to_le32(get_current_settings(hdev));
1427 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
1432 struct hci_dev *hdev;
1436 static void settings_rsp(struct pending_cmd *cmd, void *data)
1438 struct cmd_lookup *match = data;
1440 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1442 list_del(&cmd->list);
1444 if (match->sk == NULL) {
1445 match->sk = cmd->sk;
1446 sock_hold(match->sk);
1449 mgmt_pending_free(cmd);
1452 static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
1456 cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1457 mgmt_pending_remove(cmd);
1460 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1462 if (!lmp_bredr_capable(hdev))
1463 return MGMT_STATUS_NOT_SUPPORTED;
1464 else if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1465 return MGMT_STATUS_REJECTED;
1467 return MGMT_STATUS_SUCCESS;
1470 static u8 mgmt_le_support(struct hci_dev *hdev)
1472 if (!lmp_le_capable(hdev))
1473 return MGMT_STATUS_NOT_SUPPORTED;
1474 else if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
1475 return MGMT_STATUS_REJECTED;
1477 return MGMT_STATUS_SUCCESS;
1480 static void set_discoverable_complete(struct hci_dev *hdev, u8 status)
1482 struct pending_cmd *cmd;
1483 struct mgmt_mode *cp;
1484 struct hci_request req;
1487 BT_DBG("status 0x%02x", status);
1491 cmd = mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1496 u8 mgmt_err = mgmt_status(status);
1497 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1498 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1504 changed = !test_and_set_bit(HCI_DISCOVERABLE,
1507 if (hdev->discov_timeout > 0) {
1508 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1509 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1513 changed = test_and_clear_bit(HCI_DISCOVERABLE,
1517 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1520 new_settings(hdev, cmd->sk);
1522 /* When the discoverable mode gets changed, make sure
1523 * that class of device has the limited discoverable
1524 * bit correctly set.
1526 hci_req_init(&req, hdev);
1528 hci_req_run(&req, NULL);
1531 mgmt_pending_remove(cmd);
1534 hci_dev_unlock(hdev);
1537 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1540 struct mgmt_cp_set_discoverable *cp = data;
1541 struct pending_cmd *cmd;
1542 struct hci_request req;
1547 BT_DBG("request for %s", hdev->name);
1549 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1550 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1551 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1552 MGMT_STATUS_REJECTED);
1554 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1555 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1556 MGMT_STATUS_INVALID_PARAMS);
1558 timeout = __le16_to_cpu(cp->timeout);
1560 /* Disabling discoverable requires that no timeout is set,
1561 * and enabling limited discoverable requires a timeout.
1563 if ((cp->val == 0x00 && timeout > 0) ||
1564 (cp->val == 0x02 && timeout == 0))
1565 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1566 MGMT_STATUS_INVALID_PARAMS);
1570 if (!hdev_is_powered(hdev) && timeout > 0) {
1571 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1572 MGMT_STATUS_NOT_POWERED);
1576 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1577 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1578 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1583 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
1584 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1585 MGMT_STATUS_REJECTED);
1589 if (!hdev_is_powered(hdev)) {
1590 bool changed = false;
1592 /* Setting limited discoverable when powered off is
1593 * not a valid operation since it requires a timeout
1594 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1596 if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
1597 change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1601 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1606 err = new_settings(hdev, sk);
1611 /* If the current mode is the same, then just update the timeout
1612 * value with the new value. And if only the timeout gets updated,
1613 * then no need for any HCI transactions.
1615 if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags) &&
1616 (cp->val == 0x02) == test_bit(HCI_LIMITED_DISCOVERABLE,
1617 &hdev->dev_flags)) {
1618 cancel_delayed_work(&hdev->discov_off);
1619 hdev->discov_timeout = timeout;
1621 if (cp->val && hdev->discov_timeout > 0) {
1622 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1623 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
1627 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1631 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1637 /* Cancel any potential discoverable timeout that might be
1638 * still active and store new timeout value. The arming of
1639 * the timeout happens in the complete handler.
1641 cancel_delayed_work(&hdev->discov_off);
1642 hdev->discov_timeout = timeout;
1644 /* Limited discoverable mode */
1645 if (cp->val == 0x02)
1646 set_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1648 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1650 hci_req_init(&req, hdev);
1652 /* The procedure for LE-only controllers is much simpler - just
1653 * update the advertising data.
1655 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1661 struct hci_cp_write_current_iac_lap hci_cp;
1663 if (cp->val == 0x02) {
1664 /* Limited discoverable mode */
1665 hci_cp.num_iac = min_t(u8, hdev->num_iac, 2);
1666 hci_cp.iac_lap[0] = 0x00; /* LIAC */
1667 hci_cp.iac_lap[1] = 0x8b;
1668 hci_cp.iac_lap[2] = 0x9e;
1669 hci_cp.iac_lap[3] = 0x33; /* GIAC */
1670 hci_cp.iac_lap[4] = 0x8b;
1671 hci_cp.iac_lap[5] = 0x9e;
1673 /* General discoverable mode */
1675 hci_cp.iac_lap[0] = 0x33; /* GIAC */
1676 hci_cp.iac_lap[1] = 0x8b;
1677 hci_cp.iac_lap[2] = 0x9e;
1680 hci_req_add(&req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1681 (hci_cp.num_iac * 3) + 1, &hci_cp);
1683 scan |= SCAN_INQUIRY;
1685 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1688 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1691 update_adv_data(&req);
1693 err = hci_req_run(&req, set_discoverable_complete);
1695 mgmt_pending_remove(cmd);
1698 hci_dev_unlock(hdev);
1702 static void write_fast_connectable(struct hci_request *req, bool enable)
1704 struct hci_dev *hdev = req->hdev;
1705 struct hci_cp_write_page_scan_activity acp;
1708 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1711 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1715 type = PAGE_SCAN_TYPE_INTERLACED;
1717 /* 160 msec page scan interval */
1718 acp.interval = cpu_to_le16(0x0100);
1720 type = PAGE_SCAN_TYPE_STANDARD; /* default */
1722 /* default 1.28 sec page scan */
1723 acp.interval = cpu_to_le16(0x0800);
1726 acp.window = cpu_to_le16(0x0012);
1728 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1729 __cpu_to_le16(hdev->page_scan_window) != acp.window)
1730 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1733 if (hdev->page_scan_type != type)
1734 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1737 static void set_connectable_complete(struct hci_dev *hdev, u8 status)
1739 struct pending_cmd *cmd;
1740 struct mgmt_mode *cp;
1743 BT_DBG("status 0x%02x", status);
1747 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1752 u8 mgmt_err = mgmt_status(status);
1753 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1759 changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1761 changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1763 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1766 new_settings(hdev, cmd->sk);
1767 hci_update_background_scan(hdev);
1771 mgmt_pending_remove(cmd);
1774 hci_dev_unlock(hdev);
1777 static int set_connectable_update_settings(struct hci_dev *hdev,
1778 struct sock *sk, u8 val)
1780 bool changed = false;
1783 if (!!val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1787 set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1789 clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1790 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1793 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1798 hci_update_background_scan(hdev);
1799 return new_settings(hdev, sk);
1805 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1808 struct mgmt_mode *cp = data;
1809 struct pending_cmd *cmd;
1810 struct hci_request req;
1814 BT_DBG("request for %s", hdev->name);
1816 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
1817 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
1818 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1819 MGMT_STATUS_REJECTED);
1821 if (cp->val != 0x00 && cp->val != 0x01)
1822 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1823 MGMT_STATUS_INVALID_PARAMS);
1827 if (!hdev_is_powered(hdev)) {
1828 err = set_connectable_update_settings(hdev, sk, cp->val);
1832 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1833 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1834 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1839 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1845 hci_req_init(&req, hdev);
1847 /* If BR/EDR is not enabled and we disable advertising as a
1848 * by-product of disabling connectable, we need to update the
1849 * advertising flags.
1851 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1853 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
1854 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1856 update_adv_data(&req);
1857 } else if (cp->val != test_bit(HCI_PSCAN, &hdev->flags)) {
1863 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1864 hdev->discov_timeout > 0)
1865 cancel_delayed_work(&hdev->discov_off);
1868 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1871 /* If we're going from non-connectable to connectable or
1872 * vice-versa when fast connectable is enabled ensure that fast
1873 * connectable gets disabled. write_fast_connectable won't do
1874 * anything if the page scan parameters are already what they
1877 if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
1878 write_fast_connectable(&req, false);
1880 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) &&
1881 !test_bit(HCI_LE_ADV, &hdev->dev_flags))
1882 enable_advertising(&req);
1884 err = hci_req_run(&req, set_connectable_complete);
1886 mgmt_pending_remove(cmd);
1887 if (err == -ENODATA)
1888 err = set_connectable_update_settings(hdev, sk,
1894 hci_dev_unlock(hdev);
1898 static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data,
1901 struct mgmt_mode *cp = data;
1905 BT_DBG("request for %s", hdev->name);
1907 if (cp->val != 0x00 && cp->val != 0x01)
1908 return cmd_status(sk, hdev->id, MGMT_OP_SET_PAIRABLE,
1909 MGMT_STATUS_INVALID_PARAMS);
1914 changed = !test_and_set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1916 changed = test_and_clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
1918 err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev);
1923 err = new_settings(hdev, sk);
1926 hci_dev_unlock(hdev);
1930 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1933 struct mgmt_mode *cp = data;
1934 struct pending_cmd *cmd;
1938 BT_DBG("request for %s", hdev->name);
1940 status = mgmt_bredr_support(hdev);
1942 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1945 if (cp->val != 0x00 && cp->val != 0x01)
1946 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1947 MGMT_STATUS_INVALID_PARAMS);
1951 if (!hdev_is_powered(hdev)) {
1952 bool changed = false;
1954 if (!!cp->val != test_bit(HCI_LINK_SECURITY,
1955 &hdev->dev_flags)) {
1956 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
1960 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1965 err = new_settings(hdev, sk);
1970 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1971 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1978 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1979 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1983 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1989 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1991 mgmt_pending_remove(cmd);
1996 hci_dev_unlock(hdev);
2000 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2002 struct mgmt_mode *cp = data;
2003 struct pending_cmd *cmd;
2007 BT_DBG("request for %s", hdev->name);
2009 status = mgmt_bredr_support(hdev);
2011 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
2013 if (!lmp_ssp_capable(hdev))
2014 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2015 MGMT_STATUS_NOT_SUPPORTED);
2017 if (cp->val != 0x00 && cp->val != 0x01)
2018 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2019 MGMT_STATUS_INVALID_PARAMS);
2023 if (!hdev_is_powered(hdev)) {
2027 changed = !test_and_set_bit(HCI_SSP_ENABLED,
2030 changed = test_and_clear_bit(HCI_SSP_ENABLED,
2033 changed = test_and_clear_bit(HCI_HS_ENABLED,
2036 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2039 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2044 err = new_settings(hdev, sk);
2049 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev) ||
2050 mgmt_pending_find(MGMT_OP_SET_HS, hdev)) {
2051 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
2056 if (!!cp->val == test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
2057 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
2061 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
2067 if (!cp->val && test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
2068 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
2069 sizeof(cp->val), &cp->val);
2071 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
2073 mgmt_pending_remove(cmd);
2078 hci_dev_unlock(hdev);
2082 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2084 struct mgmt_mode *cp = data;
2089 BT_DBG("request for %s", hdev->name);
2091 status = mgmt_bredr_support(hdev);
2093 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
2095 if (!lmp_ssp_capable(hdev))
2096 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2097 MGMT_STATUS_NOT_SUPPORTED);
2099 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
2100 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2101 MGMT_STATUS_REJECTED);
2103 if (cp->val != 0x00 && cp->val != 0x01)
2104 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2105 MGMT_STATUS_INVALID_PARAMS);
2110 changed = !test_and_set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2112 if (hdev_is_powered(hdev)) {
2113 err = cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
2114 MGMT_STATUS_REJECTED);
2118 changed = test_and_clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
2121 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
2126 err = new_settings(hdev, sk);
2129 hci_dev_unlock(hdev);
2133 static void le_enable_complete(struct hci_dev *hdev, u8 status)
2135 struct cmd_lookup match = { NULL, hdev };
2138 u8 mgmt_err = mgmt_status(status);
2140 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2145 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2147 new_settings(hdev, match.sk);
2152 /* Make sure the controller has a good default for
2153 * advertising data. Restrict the update to when LE
2154 * has actually been enabled. During power on, the
2155 * update in powered_update_hci will take care of it.
2157 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2158 struct hci_request req;
2162 hci_req_init(&req, hdev);
2163 update_adv_data(&req);
2164 update_scan_rsp_data(&req);
2165 hci_req_run(&req, NULL);
2167 hci_update_background_scan(hdev);
2169 hci_dev_unlock(hdev);
2173 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2175 struct mgmt_mode *cp = data;
2176 struct hci_cp_write_le_host_supported hci_cp;
2177 struct pending_cmd *cmd;
2178 struct hci_request req;
2182 BT_DBG("request for %s", hdev->name);
2184 if (!lmp_le_capable(hdev))
2185 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2186 MGMT_STATUS_NOT_SUPPORTED);
2188 if (cp->val != 0x00 && cp->val != 0x01)
2189 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2190 MGMT_STATUS_INVALID_PARAMS);
2192 /* LE-only devices do not allow toggling LE on/off */
2193 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
2194 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2195 MGMT_STATUS_REJECTED);
2200 enabled = lmp_host_le_capable(hdev);
2202 if (!hdev_is_powered(hdev) || val == enabled) {
2203 bool changed = false;
2205 if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2206 change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2210 if (!val && test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
2211 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
2215 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2220 err = new_settings(hdev, sk);
2225 if (mgmt_pending_find(MGMT_OP_SET_LE, hdev) ||
2226 mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2227 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2232 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2238 hci_req_init(&req, hdev);
2240 memset(&hci_cp, 0, sizeof(hci_cp));
2244 hci_cp.simul = lmp_le_br_capable(hdev);
2246 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
2247 disable_advertising(&req);
2250 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
2253 err = hci_req_run(&req, le_enable_complete);
2255 mgmt_pending_remove(cmd);
2258 hci_dev_unlock(hdev);
2262 /* This is a helper function to test for pending mgmt commands that can
2263 * cause CoD or EIR HCI commands. We can only allow one such pending
2264 * mgmt command at a time since otherwise we cannot easily track what
2265 * the current values are, will be, and based on that calculate if a new
2266 * HCI command needs to be sent and if yes with what value.
2268 static bool pending_eir_or_class(struct hci_dev *hdev)
2270 struct pending_cmd *cmd;
2272 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2273 switch (cmd->opcode) {
2274 case MGMT_OP_ADD_UUID:
2275 case MGMT_OP_REMOVE_UUID:
2276 case MGMT_OP_SET_DEV_CLASS:
2277 case MGMT_OP_SET_POWERED:
2285 static const u8 bluetooth_base_uuid[] = {
2286 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2287 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2290 static u8 get_uuid_size(const u8 *uuid)
2294 if (memcmp(uuid, bluetooth_base_uuid, 12))
2297 val = get_unaligned_le32(&uuid[12]);
2304 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2306 struct pending_cmd *cmd;
2310 cmd = mgmt_pending_find(mgmt_op, hdev);
2314 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
2315 hdev->dev_class, 3);
2317 mgmt_pending_remove(cmd);
2320 hci_dev_unlock(hdev);
2323 static void add_uuid_complete(struct hci_dev *hdev, u8 status)
2325 BT_DBG("status 0x%02x", status);
2327 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2330 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2332 struct mgmt_cp_add_uuid *cp = data;
2333 struct pending_cmd *cmd;
2334 struct hci_request req;
2335 struct bt_uuid *uuid;
2338 BT_DBG("request for %s", hdev->name);
2342 if (pending_eir_or_class(hdev)) {
2343 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2348 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2354 memcpy(uuid->uuid, cp->uuid, 16);
2355 uuid->svc_hint = cp->svc_hint;
2356 uuid->size = get_uuid_size(cp->uuid);
2358 list_add_tail(&uuid->list, &hdev->uuids);
2360 hci_req_init(&req, hdev);
2365 err = hci_req_run(&req, add_uuid_complete);
2367 if (err != -ENODATA)
2370 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2371 hdev->dev_class, 3);
2375 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2384 hci_dev_unlock(hdev);
2388 static bool enable_service_cache(struct hci_dev *hdev)
2390 if (!hdev_is_powered(hdev))
2393 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2394 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2402 static void remove_uuid_complete(struct hci_dev *hdev, u8 status)
2404 BT_DBG("status 0x%02x", status);
2406 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2409 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2412 struct mgmt_cp_remove_uuid *cp = data;
2413 struct pending_cmd *cmd;
2414 struct bt_uuid *match, *tmp;
2415 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2416 struct hci_request req;
2419 BT_DBG("request for %s", hdev->name);
2423 if (pending_eir_or_class(hdev)) {
2424 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2429 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2430 hci_uuids_clear(hdev);
2432 if (enable_service_cache(hdev)) {
2433 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2434 0, hdev->dev_class, 3);
2443 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2444 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2447 list_del(&match->list);
2453 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2454 MGMT_STATUS_INVALID_PARAMS);
2459 hci_req_init(&req, hdev);
2464 err = hci_req_run(&req, remove_uuid_complete);
2466 if (err != -ENODATA)
2469 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2470 hdev->dev_class, 3);
2474 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2483 hci_dev_unlock(hdev);
2487 static void set_class_complete(struct hci_dev *hdev, u8 status)
2489 BT_DBG("status 0x%02x", status);
2491 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2494 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2497 struct mgmt_cp_set_dev_class *cp = data;
2498 struct pending_cmd *cmd;
2499 struct hci_request req;
2502 BT_DBG("request for %s", hdev->name);
2504 if (!lmp_bredr_capable(hdev))
2505 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2506 MGMT_STATUS_NOT_SUPPORTED);
2510 if (pending_eir_or_class(hdev)) {
2511 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2516 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2517 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2518 MGMT_STATUS_INVALID_PARAMS);
2522 hdev->major_class = cp->major;
2523 hdev->minor_class = cp->minor;
2525 if (!hdev_is_powered(hdev)) {
2526 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2527 hdev->dev_class, 3);
2531 hci_req_init(&req, hdev);
2533 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
2534 hci_dev_unlock(hdev);
2535 cancel_delayed_work_sync(&hdev->service_cache);
2542 err = hci_req_run(&req, set_class_complete);
2544 if (err != -ENODATA)
2547 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2548 hdev->dev_class, 3);
2552 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2561 hci_dev_unlock(hdev);
2565 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2568 struct mgmt_cp_load_link_keys *cp = data;
2569 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2570 sizeof(struct mgmt_link_key_info));
2571 u16 key_count, expected_len;
2575 BT_DBG("request for %s", hdev->name);
2577 if (!lmp_bredr_capable(hdev))
2578 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2579 MGMT_STATUS_NOT_SUPPORTED);
2581 key_count = __le16_to_cpu(cp->key_count);
2582 if (key_count > max_key_count) {
2583 BT_ERR("load_link_keys: too big key_count value %u",
2585 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2586 MGMT_STATUS_INVALID_PARAMS);
2589 expected_len = sizeof(*cp) + key_count *
2590 sizeof(struct mgmt_link_key_info);
2591 if (expected_len != len) {
2592 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
2594 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2595 MGMT_STATUS_INVALID_PARAMS);
2598 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2599 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2600 MGMT_STATUS_INVALID_PARAMS);
2602 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2605 for (i = 0; i < key_count; i++) {
2606 struct mgmt_link_key_info *key = &cp->keys[i];
2608 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2609 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2610 MGMT_STATUS_INVALID_PARAMS);
2615 hci_link_keys_clear(hdev);
2618 changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
2621 changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
2625 new_settings(hdev, NULL);
2627 for (i = 0; i < key_count; i++) {
2628 struct mgmt_link_key_info *key = &cp->keys[i];
2630 /* Always ignore debug keys and require a new pairing if
2631 * the user wants to use them.
2633 if (key->type == HCI_LK_DEBUG_COMBINATION)
2636 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2637 key->type, key->pin_len, NULL);
2640 cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2642 hci_dev_unlock(hdev);
2647 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2648 u8 addr_type, struct sock *skip_sk)
2650 struct mgmt_ev_device_unpaired ev;
2652 bacpy(&ev.addr.bdaddr, bdaddr);
2653 ev.addr.type = addr_type;
2655 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2659 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2662 struct mgmt_cp_unpair_device *cp = data;
2663 struct mgmt_rp_unpair_device rp;
2664 struct hci_cp_disconnect dc;
2665 struct pending_cmd *cmd;
2666 struct hci_conn *conn;
2669 memset(&rp, 0, sizeof(rp));
2670 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2671 rp.addr.type = cp->addr.type;
2673 if (!bdaddr_type_is_valid(cp->addr.type))
2674 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2675 MGMT_STATUS_INVALID_PARAMS,
2678 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2679 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2680 MGMT_STATUS_INVALID_PARAMS,
2685 if (!hdev_is_powered(hdev)) {
2686 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2687 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2691 if (cp->addr.type == BDADDR_BREDR) {
2692 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2696 if (cp->addr.type == BDADDR_LE_PUBLIC)
2697 addr_type = ADDR_LE_DEV_PUBLIC;
2699 addr_type = ADDR_LE_DEV_RANDOM;
2701 hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
2703 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2705 err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
2709 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2710 MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
2714 if (cp->disconnect) {
2715 if (cp->addr.type == BDADDR_BREDR)
2716 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2719 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
2726 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2728 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2732 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2739 dc.handle = cpu_to_le16(conn->handle);
2740 dc.reason = 0x13; /* Remote User Terminated Connection */
2741 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2743 mgmt_pending_remove(cmd);
2746 hci_dev_unlock(hdev);
2750 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2753 struct mgmt_cp_disconnect *cp = data;
2754 struct mgmt_rp_disconnect rp;
2755 struct hci_cp_disconnect dc;
2756 struct pending_cmd *cmd;
2757 struct hci_conn *conn;
2762 memset(&rp, 0, sizeof(rp));
2763 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2764 rp.addr.type = cp->addr.type;
2766 if (!bdaddr_type_is_valid(cp->addr.type))
2767 return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2768 MGMT_STATUS_INVALID_PARAMS,
2773 if (!test_bit(HCI_UP, &hdev->flags)) {
2774 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2775 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2779 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
2780 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2781 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2785 if (cp->addr.type == BDADDR_BREDR)
2786 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2789 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
2791 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2792 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2793 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
2797 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2803 dc.handle = cpu_to_le16(conn->handle);
2804 dc.reason = HCI_ERROR_REMOTE_USER_TERM;
2806 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2808 mgmt_pending_remove(cmd);
2811 hci_dev_unlock(hdev);
2815 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2817 switch (link_type) {
2819 switch (addr_type) {
2820 case ADDR_LE_DEV_PUBLIC:
2821 return BDADDR_LE_PUBLIC;
2824 /* Fallback to LE Random address type */
2825 return BDADDR_LE_RANDOM;
2829 /* Fallback to BR/EDR type */
2830 return BDADDR_BREDR;
2834 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2837 struct mgmt_rp_get_connections *rp;
2847 if (!hdev_is_powered(hdev)) {
2848 err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2849 MGMT_STATUS_NOT_POWERED);
2854 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2855 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2859 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2860 rp = kmalloc(rp_len, GFP_KERNEL);
2867 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2868 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2870 bacpy(&rp->addr[i].bdaddr, &c->dst);
2871 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2872 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2877 rp->conn_count = cpu_to_le16(i);
2879 /* Recalculate length in case of filtered SCO connections, etc */
2880 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
2882 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2888 hci_dev_unlock(hdev);
2892 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2893 struct mgmt_cp_pin_code_neg_reply *cp)
2895 struct pending_cmd *cmd;
2898 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2903 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2904 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2906 mgmt_pending_remove(cmd);
2911 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2914 struct hci_conn *conn;
2915 struct mgmt_cp_pin_code_reply *cp = data;
2916 struct hci_cp_pin_code_reply reply;
2917 struct pending_cmd *cmd;
2924 if (!hdev_is_powered(hdev)) {
2925 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2926 MGMT_STATUS_NOT_POWERED);
2930 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2932 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2933 MGMT_STATUS_NOT_CONNECTED);
2937 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2938 struct mgmt_cp_pin_code_neg_reply ncp;
2940 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2942 BT_ERR("PIN code is not 16 bytes long");
2944 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2946 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2947 MGMT_STATUS_INVALID_PARAMS);
2952 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2958 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2959 reply.pin_len = cp->pin_len;
2960 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2962 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2964 mgmt_pending_remove(cmd);
2967 hci_dev_unlock(hdev);
2971 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2974 struct mgmt_cp_set_io_capability *cp = data;
2978 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
2979 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
2980 MGMT_STATUS_INVALID_PARAMS, NULL, 0);
2984 hdev->io_capability = cp->io_capability;
2986 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
2987 hdev->io_capability);
2989 hci_dev_unlock(hdev);
2991 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
2995 static struct pending_cmd *find_pairing(struct hci_conn *conn)
2997 struct hci_dev *hdev = conn->hdev;
2998 struct pending_cmd *cmd;
3000 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
3001 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
3004 if (cmd->user_data != conn)
3013 static void pairing_complete(struct pending_cmd *cmd, u8 status)
3015 struct mgmt_rp_pair_device rp;
3016 struct hci_conn *conn = cmd->user_data;
3018 bacpy(&rp.addr.bdaddr, &conn->dst);
3019 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
3021 cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
3024 /* So we don't get further callbacks for this connection */
3025 conn->connect_cfm_cb = NULL;
3026 conn->security_cfm_cb = NULL;
3027 conn->disconn_cfm_cb = NULL;
3029 hci_conn_drop(conn);
3031 mgmt_pending_remove(cmd);
3034 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
3036 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
3037 struct pending_cmd *cmd;
3039 cmd = find_pairing(conn);
3041 pairing_complete(cmd, status);
3044 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
3046 struct pending_cmd *cmd;
3048 BT_DBG("status %u", status);
3050 cmd = find_pairing(conn);
3052 BT_DBG("Unable to find a pending command");
3054 pairing_complete(cmd, mgmt_status(status));
3057 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
3059 struct pending_cmd *cmd;
3061 BT_DBG("status %u", status);
3066 cmd = find_pairing(conn);
3068 BT_DBG("Unable to find a pending command");
3070 pairing_complete(cmd, mgmt_status(status));
3073 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3076 struct mgmt_cp_pair_device *cp = data;
3077 struct mgmt_rp_pair_device rp;
3078 struct pending_cmd *cmd;
3079 u8 sec_level, auth_type;
3080 struct hci_conn *conn;
3085 memset(&rp, 0, sizeof(rp));
3086 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3087 rp.addr.type = cp->addr.type;
3089 if (!bdaddr_type_is_valid(cp->addr.type))
3090 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3091 MGMT_STATUS_INVALID_PARAMS,
3094 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3095 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3096 MGMT_STATUS_INVALID_PARAMS,
3101 if (!hdev_is_powered(hdev)) {
3102 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3103 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
3107 sec_level = BT_SECURITY_MEDIUM;
3108 auth_type = HCI_AT_DEDICATED_BONDING;
3110 if (cp->addr.type == BDADDR_BREDR) {
3111 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3116 /* Convert from L2CAP channel address type to HCI address type
3118 if (cp->addr.type == BDADDR_LE_PUBLIC)
3119 addr_type = ADDR_LE_DEV_PUBLIC;
3121 addr_type = ADDR_LE_DEV_RANDOM;
3123 /* When pairing a new device, it is expected to remember
3124 * this device for future connections. Adding the connection
3125 * parameter information ahead of time allows tracking
3126 * of the slave preferred values and will speed up any
3127 * further connection establishment.
3129 * If connection parameters already exist, then they
3130 * will be kept and this function does nothing.
3132 hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3134 /* Request a connection with master = true role */
3135 conn = hci_connect_le(hdev, &cp->addr.bdaddr, addr_type,
3136 sec_level, HCI_LE_CONN_TIMEOUT, true);
3142 if (PTR_ERR(conn) == -EBUSY)
3143 status = MGMT_STATUS_BUSY;
3145 status = MGMT_STATUS_CONNECT_FAILED;
3147 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3153 if (conn->connect_cfm_cb) {
3154 hci_conn_drop(conn);
3155 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3156 MGMT_STATUS_BUSY, &rp, sizeof(rp));
3160 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3163 hci_conn_drop(conn);
3167 /* For LE, just connecting isn't a proof that the pairing finished */
3168 if (cp->addr.type == BDADDR_BREDR) {
3169 conn->connect_cfm_cb = pairing_complete_cb;
3170 conn->security_cfm_cb = pairing_complete_cb;
3171 conn->disconn_cfm_cb = pairing_complete_cb;
3173 conn->connect_cfm_cb = le_pairing_complete_cb;
3174 conn->security_cfm_cb = le_pairing_complete_cb;
3175 conn->disconn_cfm_cb = le_pairing_complete_cb;
3178 conn->io_capability = cp->io_cap;
3179 cmd->user_data = conn;
3181 if (conn->state == BT_CONNECTED &&
3182 hci_conn_security(conn, sec_level, auth_type))
3183 pairing_complete(cmd, 0);
3188 hci_dev_unlock(hdev);
3192 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3195 struct mgmt_addr_info *addr = data;
3196 struct pending_cmd *cmd;
3197 struct hci_conn *conn;
3204 if (!hdev_is_powered(hdev)) {
3205 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3206 MGMT_STATUS_NOT_POWERED);
3210 cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3212 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3213 MGMT_STATUS_INVALID_PARAMS);
3217 conn = cmd->user_data;
3219 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3220 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3221 MGMT_STATUS_INVALID_PARAMS);
3225 pairing_complete(cmd, MGMT_STATUS_CANCELLED);
3227 err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3228 addr, sizeof(*addr));
3230 hci_dev_unlock(hdev);
3234 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3235 struct mgmt_addr_info *addr, u16 mgmt_op,
3236 u16 hci_op, __le32 passkey)
3238 struct pending_cmd *cmd;
3239 struct hci_conn *conn;
3244 if (!hdev_is_powered(hdev)) {
3245 err = cmd_complete(sk, hdev->id, mgmt_op,
3246 MGMT_STATUS_NOT_POWERED, addr,
3251 if (addr->type == BDADDR_BREDR)
3252 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3254 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
3257 err = cmd_complete(sk, hdev->id, mgmt_op,
3258 MGMT_STATUS_NOT_CONNECTED, addr,
3263 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3264 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3266 err = cmd_complete(sk, hdev->id, mgmt_op,
3267 MGMT_STATUS_SUCCESS, addr,
3270 err = cmd_complete(sk, hdev->id, mgmt_op,
3271 MGMT_STATUS_FAILED, addr,
3277 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3283 /* Continue with pairing via HCI */
3284 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3285 struct hci_cp_user_passkey_reply cp;
3287 bacpy(&cp.bdaddr, &addr->bdaddr);
3288 cp.passkey = passkey;
3289 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3291 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3295 mgmt_pending_remove(cmd);
3298 hci_dev_unlock(hdev);
3302 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3303 void *data, u16 len)
3305 struct mgmt_cp_pin_code_neg_reply *cp = data;
3309 return user_pairing_resp(sk, hdev, &cp->addr,
3310 MGMT_OP_PIN_CODE_NEG_REPLY,
3311 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3314 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3317 struct mgmt_cp_user_confirm_reply *cp = data;
3321 if (len != sizeof(*cp))
3322 return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3323 MGMT_STATUS_INVALID_PARAMS);
3325 return user_pairing_resp(sk, hdev, &cp->addr,
3326 MGMT_OP_USER_CONFIRM_REPLY,
3327 HCI_OP_USER_CONFIRM_REPLY, 0);
3330 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3331 void *data, u16 len)
3333 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3337 return user_pairing_resp(sk, hdev, &cp->addr,
3338 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3339 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3342 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3345 struct mgmt_cp_user_passkey_reply *cp = data;
3349 return user_pairing_resp(sk, hdev, &cp->addr,
3350 MGMT_OP_USER_PASSKEY_REPLY,
3351 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3354 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3355 void *data, u16 len)
3357 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3361 return user_pairing_resp(sk, hdev, &cp->addr,
3362 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3363 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3366 static void update_name(struct hci_request *req)
3368 struct hci_dev *hdev = req->hdev;
3369 struct hci_cp_write_local_name cp;
3371 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
3373 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
3376 static void set_name_complete(struct hci_dev *hdev, u8 status)
3378 struct mgmt_cp_set_local_name *cp;
3379 struct pending_cmd *cmd;
3381 BT_DBG("status 0x%02x", status);
3385 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3392 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3393 mgmt_status(status));
3395 cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3398 mgmt_pending_remove(cmd);
3401 hci_dev_unlock(hdev);
3404 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3407 struct mgmt_cp_set_local_name *cp = data;
3408 struct pending_cmd *cmd;
3409 struct hci_request req;
3416 /* If the old values are the same as the new ones just return a
3417 * direct command complete event.
3419 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3420 !memcmp(hdev->short_name, cp->short_name,
3421 sizeof(hdev->short_name))) {
3422 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3427 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3429 if (!hdev_is_powered(hdev)) {
3430 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3432 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3437 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
3443 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3449 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3451 hci_req_init(&req, hdev);
3453 if (lmp_bredr_capable(hdev)) {
3458 /* The name is stored in the scan response data and so
3459 * no need to udpate the advertising data here.
3461 if (lmp_le_capable(hdev))
3462 update_scan_rsp_data(&req);
3464 err = hci_req_run(&req, set_name_complete);
3466 mgmt_pending_remove(cmd);
3469 hci_dev_unlock(hdev);
3473 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3474 void *data, u16 data_len)
3476 struct pending_cmd *cmd;
3479 BT_DBG("%s", hdev->name);
3483 if (!hdev_is_powered(hdev)) {
3484 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3485 MGMT_STATUS_NOT_POWERED);
3489 if (!lmp_ssp_capable(hdev)) {
3490 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3491 MGMT_STATUS_NOT_SUPPORTED);
3495 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3496 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3501 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3507 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags))
3508 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_EXT_DATA,
3511 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3514 mgmt_pending_remove(cmd);
3517 hci_dev_unlock(hdev);
3521 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3522 void *data, u16 len)
3526 BT_DBG("%s ", hdev->name);
3530 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3531 struct mgmt_cp_add_remote_oob_data *cp = data;
3534 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3535 cp->hash, cp->randomizer);
3537 status = MGMT_STATUS_FAILED;
3539 status = MGMT_STATUS_SUCCESS;
3541 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3542 status, &cp->addr, sizeof(cp->addr));
3543 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3544 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3547 err = hci_add_remote_oob_ext_data(hdev, &cp->addr.bdaddr,
3553 status = MGMT_STATUS_FAILED;
3555 status = MGMT_STATUS_SUCCESS;
3557 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3558 status, &cp->addr, sizeof(cp->addr));
3560 BT_ERR("add_remote_oob_data: invalid length of %u bytes", len);
3561 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3562 MGMT_STATUS_INVALID_PARAMS);
3565 hci_dev_unlock(hdev);
3569 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3570 void *data, u16 len)
3572 struct mgmt_cp_remove_remote_oob_data *cp = data;
3576 BT_DBG("%s", hdev->name);
3580 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr);
3582 status = MGMT_STATUS_INVALID_PARAMS;
3584 status = MGMT_STATUS_SUCCESS;
3586 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3587 status, &cp->addr, sizeof(cp->addr));
3589 hci_dev_unlock(hdev);
3593 static int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
3595 struct pending_cmd *cmd;
3599 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3601 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
3605 type = hdev->discovery.type;
3607 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3608 &type, sizeof(type));
3609 mgmt_pending_remove(cmd);
3614 static void start_discovery_complete(struct hci_dev *hdev, u8 status)
3616 unsigned long timeout = 0;
3618 BT_DBG("status %d", status);
3622 mgmt_start_discovery_failed(hdev, status);
3623 hci_dev_unlock(hdev);
3628 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
3629 hci_dev_unlock(hdev);
3631 switch (hdev->discovery.type) {
3632 case DISCOV_TYPE_LE:
3633 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
3636 case DISCOV_TYPE_INTERLEAVED:
3637 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
3640 case DISCOV_TYPE_BREDR:
3644 BT_ERR("Invalid discovery type %d", hdev->discovery.type);
3650 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable, timeout);
3653 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3654 void *data, u16 len)
3656 struct mgmt_cp_start_discovery *cp = data;
3657 struct pending_cmd *cmd;
3658 struct hci_cp_le_set_scan_param param_cp;
3659 struct hci_cp_le_set_scan_enable enable_cp;
3660 struct hci_cp_inquiry inq_cp;
3661 struct hci_request req;
3662 /* General inquiry access code (GIAC) */
3663 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3664 u8 status, own_addr_type;
3667 BT_DBG("%s", hdev->name);
3671 if (!hdev_is_powered(hdev)) {
3672 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3673 MGMT_STATUS_NOT_POWERED);
3677 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
3678 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3683 if (hdev->discovery.state != DISCOVERY_STOPPED) {
3684 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3689 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0);
3695 hdev->discovery.type = cp->type;
3697 hci_req_init(&req, hdev);
3699 switch (hdev->discovery.type) {
3700 case DISCOV_TYPE_BREDR:
3701 status = mgmt_bredr_support(hdev);
3703 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3705 mgmt_pending_remove(cmd);
3709 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3710 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3712 mgmt_pending_remove(cmd);
3716 hci_inquiry_cache_flush(hdev);
3718 memset(&inq_cp, 0, sizeof(inq_cp));
3719 memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
3720 inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
3721 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
3724 case DISCOV_TYPE_LE:
3725 case DISCOV_TYPE_INTERLEAVED:
3726 status = mgmt_le_support(hdev);
3728 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3730 mgmt_pending_remove(cmd);
3734 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
3735 !test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
3736 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3737 MGMT_STATUS_NOT_SUPPORTED);
3738 mgmt_pending_remove(cmd);
3742 if (test_bit(HCI_LE_ADV, &hdev->dev_flags)) {
3743 /* Don't let discovery abort an outgoing
3744 * connection attempt that's using directed
3747 if (hci_conn_hash_lookup_state(hdev, LE_LINK,
3749 err = cmd_status(sk, hdev->id,
3750 MGMT_OP_START_DISCOVERY,
3751 MGMT_STATUS_REJECTED);
3752 mgmt_pending_remove(cmd);
3756 disable_advertising(&req);
3759 /* If controller is scanning, it means the background scanning
3760 * is running. Thus, we should temporarily stop it in order to
3761 * set the discovery scanning parameters.
3763 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
3764 hci_req_add_le_scan_disable(&req);
3766 memset(¶m_cp, 0, sizeof(param_cp));
3768 /* All active scans will be done with either a resolvable
3769 * private address (when privacy feature has been enabled)
3770 * or unresolvable private address.
3772 err = hci_update_random_address(&req, true, &own_addr_type);
3774 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3775 MGMT_STATUS_FAILED);
3776 mgmt_pending_remove(cmd);
3780 param_cp.type = LE_SCAN_ACTIVE;
3781 param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
3782 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
3783 param_cp.own_address_type = own_addr_type;
3784 hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
3787 memset(&enable_cp, 0, sizeof(enable_cp));
3788 enable_cp.enable = LE_SCAN_ENABLE;
3789 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
3790 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
3795 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
3796 MGMT_STATUS_INVALID_PARAMS);
3797 mgmt_pending_remove(cmd);
3801 err = hci_req_run(&req, start_discovery_complete);
3803 mgmt_pending_remove(cmd);
3805 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
3808 hci_dev_unlock(hdev);
3812 static int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
3814 struct pending_cmd *cmd;
3817 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
3821 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
3822 &hdev->discovery.type, sizeof(hdev->discovery.type));
3823 mgmt_pending_remove(cmd);
3828 static void stop_discovery_complete(struct hci_dev *hdev, u8 status)
3830 BT_DBG("status %d", status);
3835 mgmt_stop_discovery_failed(hdev, status);
3839 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3842 hci_dev_unlock(hdev);
3845 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
3848 struct mgmt_cp_stop_discovery *mgmt_cp = data;
3849 struct pending_cmd *cmd;
3850 struct hci_request req;
3853 BT_DBG("%s", hdev->name);
3857 if (!hci_discovery_active(hdev)) {
3858 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3859 MGMT_STATUS_REJECTED, &mgmt_cp->type,
3860 sizeof(mgmt_cp->type));
3864 if (hdev->discovery.type != mgmt_cp->type) {
3865 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3866 MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
3867 sizeof(mgmt_cp->type));
3871 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
3877 hci_req_init(&req, hdev);
3879 hci_stop_discovery(&req);
3881 err = hci_req_run(&req, stop_discovery_complete);
3883 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
3887 mgmt_pending_remove(cmd);
3889 /* If no HCI commands were sent we're done */
3890 if (err == -ENODATA) {
3891 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0,
3892 &mgmt_cp->type, sizeof(mgmt_cp->type));
3893 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3897 hci_dev_unlock(hdev);
3901 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
3904 struct mgmt_cp_confirm_name *cp = data;
3905 struct inquiry_entry *e;
3908 BT_DBG("%s", hdev->name);
3912 if (!hci_discovery_active(hdev)) {
3913 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3914 MGMT_STATUS_FAILED, &cp->addr,
3919 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
3921 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
3922 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
3927 if (cp->name_known) {
3928 e->name_state = NAME_KNOWN;
3931 e->name_state = NAME_NEEDED;
3932 hci_inquiry_cache_update_resolve(hdev, e);
3935 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
3939 hci_dev_unlock(hdev);
3943 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
3946 struct mgmt_cp_block_device *cp = data;
3950 BT_DBG("%s", hdev->name);
3952 if (!bdaddr_type_is_valid(cp->addr.type))
3953 return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
3954 MGMT_STATUS_INVALID_PARAMS,
3955 &cp->addr, sizeof(cp->addr));
3959 err = hci_blacklist_add(hdev, &cp->addr.bdaddr, cp->addr.type);
3961 status = MGMT_STATUS_FAILED;
3965 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
3967 status = MGMT_STATUS_SUCCESS;
3970 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
3971 &cp->addr, sizeof(cp->addr));
3973 hci_dev_unlock(hdev);
3978 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
3981 struct mgmt_cp_unblock_device *cp = data;
3985 BT_DBG("%s", hdev->name);
3987 if (!bdaddr_type_is_valid(cp->addr.type))
3988 return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
3989 MGMT_STATUS_INVALID_PARAMS,
3990 &cp->addr, sizeof(cp->addr));
3994 err = hci_blacklist_del(hdev, &cp->addr.bdaddr, cp->addr.type);
3996 status = MGMT_STATUS_INVALID_PARAMS;
4000 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4002 status = MGMT_STATUS_SUCCESS;
4005 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
4006 &cp->addr, sizeof(cp->addr));
4008 hci_dev_unlock(hdev);
4013 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
4016 struct mgmt_cp_set_device_id *cp = data;
4017 struct hci_request req;
4021 BT_DBG("%s", hdev->name);
4023 source = __le16_to_cpu(cp->source);
4025 if (source > 0x0002)
4026 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
4027 MGMT_STATUS_INVALID_PARAMS);
4031 hdev->devid_source = source;
4032 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
4033 hdev->devid_product = __le16_to_cpu(cp->product);
4034 hdev->devid_version = __le16_to_cpu(cp->version);
4036 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
4038 hci_req_init(&req, hdev);
4040 hci_req_run(&req, NULL);
4042 hci_dev_unlock(hdev);
4047 static void set_advertising_complete(struct hci_dev *hdev, u8 status)
4049 struct cmd_lookup match = { NULL, hdev };
4052 u8 mgmt_err = mgmt_status(status);
4054 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
4055 cmd_status_rsp, &mgmt_err);
4059 if (test_bit(HCI_LE_ADV, &hdev->dev_flags))
4060 set_bit(HCI_ADVERTISING, &hdev->dev_flags);
4062 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
4064 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
4067 new_settings(hdev, match.sk);
4073 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
4076 struct mgmt_mode *cp = data;
4077 struct pending_cmd *cmd;
4078 struct hci_request req;
4079 u8 val, enabled, status;
4082 BT_DBG("request for %s", hdev->name);
4084 status = mgmt_le_support(hdev);
4086 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4089 if (cp->val != 0x00 && cp->val != 0x01)
4090 return cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4091 MGMT_STATUS_INVALID_PARAMS);
4096 enabled = test_bit(HCI_ADVERTISING, &hdev->dev_flags);
4098 /* The following conditions are ones which mean that we should
4099 * not do any HCI communication but directly send a mgmt
4100 * response to user space (after toggling the flag if
4103 if (!hdev_is_powered(hdev) || val == enabled ||
4104 hci_conn_num(hdev, LE_LINK) > 0 ||
4105 (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
4106 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
4107 bool changed = false;
4109 if (val != test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
4110 change_bit(HCI_ADVERTISING, &hdev->dev_flags);
4114 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
4119 err = new_settings(hdev, sk);
4124 if (mgmt_pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
4125 mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
4126 err = cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4131 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
4137 hci_req_init(&req, hdev);
4140 enable_advertising(&req);
4142 disable_advertising(&req);
4144 err = hci_req_run(&req, set_advertising_complete);
4146 mgmt_pending_remove(cmd);
4149 hci_dev_unlock(hdev);
4153 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
4154 void *data, u16 len)
4156 struct mgmt_cp_set_static_address *cp = data;
4159 BT_DBG("%s", hdev->name);
4161 if (!lmp_le_capable(hdev))
4162 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4163 MGMT_STATUS_NOT_SUPPORTED);
4165 if (hdev_is_powered(hdev))
4166 return cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4167 MGMT_STATUS_REJECTED);
4169 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
4170 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
4171 return cmd_status(sk, hdev->id,
4172 MGMT_OP_SET_STATIC_ADDRESS,
4173 MGMT_STATUS_INVALID_PARAMS);
4175 /* Two most significant bits shall be set */
4176 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
4177 return cmd_status(sk, hdev->id,
4178 MGMT_OP_SET_STATIC_ADDRESS,
4179 MGMT_STATUS_INVALID_PARAMS);
4184 bacpy(&hdev->static_addr, &cp->bdaddr);
4186 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, 0, NULL, 0);
4188 hci_dev_unlock(hdev);
4193 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
4194 void *data, u16 len)
4196 struct mgmt_cp_set_scan_params *cp = data;
4197 __u16 interval, window;
4200 BT_DBG("%s", hdev->name);
4202 if (!lmp_le_capable(hdev))
4203 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4204 MGMT_STATUS_NOT_SUPPORTED);
4206 interval = __le16_to_cpu(cp->interval);
4208 if (interval < 0x0004 || interval > 0x4000)
4209 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4210 MGMT_STATUS_INVALID_PARAMS);
4212 window = __le16_to_cpu(cp->window);
4214 if (window < 0x0004 || window > 0x4000)
4215 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4216 MGMT_STATUS_INVALID_PARAMS);
4218 if (window > interval)
4219 return cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4220 MGMT_STATUS_INVALID_PARAMS);
4224 hdev->le_scan_interval = interval;
4225 hdev->le_scan_window = window;
4227 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0, NULL, 0);
4229 /* If background scan is running, restart it so new parameters are
4232 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
4233 hdev->discovery.state == DISCOVERY_STOPPED) {
4234 struct hci_request req;
4236 hci_req_init(&req, hdev);
4238 hci_req_add_le_scan_disable(&req);
4239 hci_req_add_le_passive_scan(&req);
4241 hci_req_run(&req, NULL);
4244 hci_dev_unlock(hdev);
4249 static void fast_connectable_complete(struct hci_dev *hdev, u8 status)
4251 struct pending_cmd *cmd;
4253 BT_DBG("status 0x%02x", status);
4257 cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4262 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4263 mgmt_status(status));
4265 struct mgmt_mode *cp = cmd->param;
4268 set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4270 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4272 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4273 new_settings(hdev, cmd->sk);
4276 mgmt_pending_remove(cmd);
4279 hci_dev_unlock(hdev);
4282 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4283 void *data, u16 len)
4285 struct mgmt_mode *cp = data;
4286 struct pending_cmd *cmd;
4287 struct hci_request req;
4290 BT_DBG("%s", hdev->name);
4292 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags) ||
4293 hdev->hci_ver < BLUETOOTH_VER_1_2)
4294 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4295 MGMT_STATUS_NOT_SUPPORTED);
4297 if (cp->val != 0x00 && cp->val != 0x01)
4298 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4299 MGMT_STATUS_INVALID_PARAMS);
4301 if (!hdev_is_powered(hdev))
4302 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4303 MGMT_STATUS_NOT_POWERED);
4305 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4306 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4307 MGMT_STATUS_REJECTED);
4311 if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4312 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4317 if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
4318 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4323 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
4330 hci_req_init(&req, hdev);
4332 write_fast_connectable(&req, cp->val);
4334 err = hci_req_run(&req, fast_connectable_complete);
4336 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4337 MGMT_STATUS_FAILED);
4338 mgmt_pending_remove(cmd);
4342 hci_dev_unlock(hdev);
4347 static void set_bredr_scan(struct hci_request *req)
4349 struct hci_dev *hdev = req->hdev;
4352 /* Ensure that fast connectable is disabled. This function will
4353 * not do anything if the page scan parameters are already what
4356 write_fast_connectable(req, false);
4358 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4360 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
4361 scan |= SCAN_INQUIRY;
4364 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
4367 static void set_bredr_complete(struct hci_dev *hdev, u8 status)
4369 struct pending_cmd *cmd;
4371 BT_DBG("status 0x%02x", status);
4375 cmd = mgmt_pending_find(MGMT_OP_SET_BREDR, hdev);
4380 u8 mgmt_err = mgmt_status(status);
4382 /* We need to restore the flag if related HCI commands
4385 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4387 cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
4389 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
4390 new_settings(hdev, cmd->sk);
4393 mgmt_pending_remove(cmd);
4396 hci_dev_unlock(hdev);
4399 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4401 struct mgmt_mode *cp = data;
4402 struct pending_cmd *cmd;
4403 struct hci_request req;
4406 BT_DBG("request for %s", hdev->name);
4408 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
4409 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4410 MGMT_STATUS_NOT_SUPPORTED);
4412 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
4413 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4414 MGMT_STATUS_REJECTED);
4416 if (cp->val != 0x00 && cp->val != 0x01)
4417 return cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4418 MGMT_STATUS_INVALID_PARAMS);
4422 if (cp->val == test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
4423 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4427 if (!hdev_is_powered(hdev)) {
4429 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
4430 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
4431 clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
4432 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
4433 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
4436 change_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4438 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4442 err = new_settings(hdev, sk);
4446 /* Reject disabling when powered on */
4448 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4449 MGMT_STATUS_REJECTED);
4453 if (mgmt_pending_find(MGMT_OP_SET_BREDR, hdev)) {
4454 err = cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4459 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
4465 /* We need to flip the bit already here so that update_adv_data
4466 * generates the correct flags.
4468 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4470 hci_req_init(&req, hdev);
4472 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
4473 set_bredr_scan(&req);
4475 /* Since only the advertising data flags will change, there
4476 * is no need to update the scan response data.
4478 update_adv_data(&req);
4480 err = hci_req_run(&req, set_bredr_complete);
4482 mgmt_pending_remove(cmd);
4485 hci_dev_unlock(hdev);
4489 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
4490 void *data, u16 len)
4492 struct mgmt_mode *cp = data;
4493 struct pending_cmd *cmd;
4497 BT_DBG("request for %s", hdev->name);
4499 status = mgmt_bredr_support(hdev);
4501 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4504 if (!lmp_sc_capable(hdev) &&
4505 !test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
4506 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4507 MGMT_STATUS_NOT_SUPPORTED);
4509 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4510 return cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4511 MGMT_STATUS_INVALID_PARAMS);
4515 if (!hdev_is_powered(hdev)) {
4519 changed = !test_and_set_bit(HCI_SC_ENABLED,
4521 if (cp->val == 0x02)
4522 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4524 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4526 changed = test_and_clear_bit(HCI_SC_ENABLED,
4528 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4531 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4536 err = new_settings(hdev, sk);
4541 if (mgmt_pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
4542 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4549 if (val == test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
4550 (cp->val == 0x02) == test_bit(HCI_SC_ONLY, &hdev->dev_flags)) {
4551 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4555 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
4561 err = hci_send_cmd(hdev, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
4563 mgmt_pending_remove(cmd);
4567 if (cp->val == 0x02)
4568 set_bit(HCI_SC_ONLY, &hdev->dev_flags);
4570 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
4573 hci_dev_unlock(hdev);
4577 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
4578 void *data, u16 len)
4580 struct mgmt_mode *cp = data;
4581 bool changed, use_changed;
4584 BT_DBG("request for %s", hdev->name);
4586 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4587 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
4588 MGMT_STATUS_INVALID_PARAMS);
4593 changed = !test_and_set_bit(HCI_KEEP_DEBUG_KEYS,
4596 changed = test_and_clear_bit(HCI_KEEP_DEBUG_KEYS,
4599 if (cp->val == 0x02)
4600 use_changed = !test_and_set_bit(HCI_USE_DEBUG_KEYS,
4603 use_changed = test_and_clear_bit(HCI_USE_DEBUG_KEYS,
4606 if (hdev_is_powered(hdev) && use_changed &&
4607 test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
4608 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
4609 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
4610 sizeof(mode), &mode);
4613 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
4618 err = new_settings(hdev, sk);
4621 hci_dev_unlock(hdev);
4625 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4628 struct mgmt_cp_set_privacy *cp = cp_data;
4632 BT_DBG("request for %s", hdev->name);
4634 if (!lmp_le_capable(hdev))
4635 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4636 MGMT_STATUS_NOT_SUPPORTED);
4638 if (cp->privacy != 0x00 && cp->privacy != 0x01)
4639 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4640 MGMT_STATUS_INVALID_PARAMS);
4642 if (hdev_is_powered(hdev))
4643 return cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
4644 MGMT_STATUS_REJECTED);
4648 /* If user space supports this command it is also expected to
4649 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
4651 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4654 changed = !test_and_set_bit(HCI_PRIVACY, &hdev->dev_flags);
4655 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
4656 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4658 changed = test_and_clear_bit(HCI_PRIVACY, &hdev->dev_flags);
4659 memset(hdev->irk, 0, sizeof(hdev->irk));
4660 clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
4663 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
4668 err = new_settings(hdev, sk);
4671 hci_dev_unlock(hdev);
4675 static bool irk_is_valid(struct mgmt_irk_info *irk)
4677 switch (irk->addr.type) {
4678 case BDADDR_LE_PUBLIC:
4681 case BDADDR_LE_RANDOM:
4682 /* Two most significant bits shall be set */
4683 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4691 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4694 struct mgmt_cp_load_irks *cp = cp_data;
4695 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
4696 sizeof(struct mgmt_irk_info));
4697 u16 irk_count, expected_len;
4700 BT_DBG("request for %s", hdev->name);
4702 if (!lmp_le_capable(hdev))
4703 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4704 MGMT_STATUS_NOT_SUPPORTED);
4706 irk_count = __le16_to_cpu(cp->irk_count);
4707 if (irk_count > max_irk_count) {
4708 BT_ERR("load_irks: too big irk_count value %u", irk_count);
4709 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4710 MGMT_STATUS_INVALID_PARAMS);
4713 expected_len = sizeof(*cp) + irk_count * sizeof(struct mgmt_irk_info);
4714 if (expected_len != len) {
4715 BT_ERR("load_irks: expected %u bytes, got %u bytes",
4717 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
4718 MGMT_STATUS_INVALID_PARAMS);
4721 BT_DBG("%s irk_count %u", hdev->name, irk_count);
4723 for (i = 0; i < irk_count; i++) {
4724 struct mgmt_irk_info *key = &cp->irks[i];
4726 if (!irk_is_valid(key))
4727 return cmd_status(sk, hdev->id,
4729 MGMT_STATUS_INVALID_PARAMS);
4734 hci_smp_irks_clear(hdev);
4736 for (i = 0; i < irk_count; i++) {
4737 struct mgmt_irk_info *irk = &cp->irks[i];
4740 if (irk->addr.type == BDADDR_LE_PUBLIC)
4741 addr_type = ADDR_LE_DEV_PUBLIC;
4743 addr_type = ADDR_LE_DEV_RANDOM;
4745 hci_add_irk(hdev, &irk->addr.bdaddr, addr_type, irk->val,
4749 set_bit(HCI_RPA_RESOLVING, &hdev->dev_flags);
4751 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
4753 hci_dev_unlock(hdev);
4758 static bool ltk_is_valid(struct mgmt_ltk_info *key)
4760 if (key->master != 0x00 && key->master != 0x01)
4763 switch (key->addr.type) {
4764 case BDADDR_LE_PUBLIC:
4767 case BDADDR_LE_RANDOM:
4768 /* Two most significant bits shall be set */
4769 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
4777 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
4778 void *cp_data, u16 len)
4780 struct mgmt_cp_load_long_term_keys *cp = cp_data;
4781 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
4782 sizeof(struct mgmt_ltk_info));
4783 u16 key_count, expected_len;
4786 BT_DBG("request for %s", hdev->name);
4788 if (!lmp_le_capable(hdev))
4789 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4790 MGMT_STATUS_NOT_SUPPORTED);
4792 key_count = __le16_to_cpu(cp->key_count);
4793 if (key_count > max_key_count) {
4794 BT_ERR("load_ltks: too big key_count value %u", key_count);
4795 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4796 MGMT_STATUS_INVALID_PARAMS);
4799 expected_len = sizeof(*cp) + key_count *
4800 sizeof(struct mgmt_ltk_info);
4801 if (expected_len != len) {
4802 BT_ERR("load_keys: expected %u bytes, got %u bytes",
4804 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
4805 MGMT_STATUS_INVALID_PARAMS);
4808 BT_DBG("%s key_count %u", hdev->name, key_count);
4810 for (i = 0; i < key_count; i++) {
4811 struct mgmt_ltk_info *key = &cp->keys[i];
4813 if (!ltk_is_valid(key))
4814 return cmd_status(sk, hdev->id,
4815 MGMT_OP_LOAD_LONG_TERM_KEYS,
4816 MGMT_STATUS_INVALID_PARAMS);
4821 hci_smp_ltks_clear(hdev);
4823 for (i = 0; i < key_count; i++) {
4824 struct mgmt_ltk_info *key = &cp->keys[i];
4825 u8 type, addr_type, authenticated;
4827 if (key->addr.type == BDADDR_LE_PUBLIC)
4828 addr_type = ADDR_LE_DEV_PUBLIC;
4830 addr_type = ADDR_LE_DEV_RANDOM;
4835 type = SMP_LTK_SLAVE;
4837 switch (key->type) {
4838 case MGMT_LTK_UNAUTHENTICATED:
4839 authenticated = 0x00;
4841 case MGMT_LTK_AUTHENTICATED:
4842 authenticated = 0x01;
4848 hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type,
4849 authenticated, key->val, key->enc_size, key->ediv,
4853 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
4856 hci_dev_unlock(hdev);
4861 struct cmd_conn_lookup {
4862 struct hci_conn *conn;
4863 bool valid_tx_power;
4867 static void get_conn_info_complete(struct pending_cmd *cmd, void *data)
4869 struct cmd_conn_lookup *match = data;
4870 struct mgmt_cp_get_conn_info *cp;
4871 struct mgmt_rp_get_conn_info rp;
4872 struct hci_conn *conn = cmd->user_data;
4874 if (conn != match->conn)
4877 cp = (struct mgmt_cp_get_conn_info *) cmd->param;
4879 memset(&rp, 0, sizeof(rp));
4880 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4881 rp.addr.type = cp->addr.type;
4883 if (!match->mgmt_status) {
4884 rp.rssi = conn->rssi;
4886 if (match->valid_tx_power) {
4887 rp.tx_power = conn->tx_power;
4888 rp.max_tx_power = conn->max_tx_power;
4890 rp.tx_power = HCI_TX_POWER_INVALID;
4891 rp.max_tx_power = HCI_TX_POWER_INVALID;
4895 cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
4896 match->mgmt_status, &rp, sizeof(rp));
4898 hci_conn_drop(conn);
4900 mgmt_pending_remove(cmd);
4903 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 status)
4905 struct hci_cp_read_rssi *cp;
4906 struct hci_conn *conn;
4907 struct cmd_conn_lookup match;
4910 BT_DBG("status 0x%02x", status);
4914 /* TX power data is valid in case request completed successfully,
4915 * otherwise we assume it's not valid. At the moment we assume that
4916 * either both or none of current and max values are valid to keep code
4919 match.valid_tx_power = !status;
4921 /* Commands sent in request are either Read RSSI or Read Transmit Power
4922 * Level so we check which one was last sent to retrieve connection
4923 * handle. Both commands have handle as first parameter so it's safe to
4924 * cast data on the same command struct.
4926 * First command sent is always Read RSSI and we fail only if it fails.
4927 * In other case we simply override error to indicate success as we
4928 * already remembered if TX power value is actually valid.
4930 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
4932 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
4937 BT_ERR("invalid sent_cmd in response");
4941 handle = __le16_to_cpu(cp->handle);
4942 conn = hci_conn_hash_lookup_handle(hdev, handle);
4944 BT_ERR("unknown handle (%d) in response", handle);
4949 match.mgmt_status = mgmt_status(status);
4951 /* Cache refresh is complete, now reply for mgmt request for given
4954 mgmt_pending_foreach(MGMT_OP_GET_CONN_INFO, hdev,
4955 get_conn_info_complete, &match);
4958 hci_dev_unlock(hdev);
4961 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
4964 struct mgmt_cp_get_conn_info *cp = data;
4965 struct mgmt_rp_get_conn_info rp;
4966 struct hci_conn *conn;
4967 unsigned long conn_info_age;
4970 BT_DBG("%s", hdev->name);
4972 memset(&rp, 0, sizeof(rp));
4973 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4974 rp.addr.type = cp->addr.type;
4976 if (!bdaddr_type_is_valid(cp->addr.type))
4977 return cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4978 MGMT_STATUS_INVALID_PARAMS,
4983 if (!hdev_is_powered(hdev)) {
4984 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4985 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
4989 if (cp->addr.type == BDADDR_BREDR)
4990 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
4993 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
4995 if (!conn || conn->state != BT_CONNECTED) {
4996 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
4997 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
5001 /* To avoid client trying to guess when to poll again for information we
5002 * calculate conn info age as random value between min/max set in hdev.
5004 conn_info_age = hdev->conn_info_min_age +
5005 prandom_u32_max(hdev->conn_info_max_age -
5006 hdev->conn_info_min_age);
5008 /* Query controller to refresh cached values if they are too old or were
5011 if (time_after(jiffies, conn->conn_info_timestamp +
5012 msecs_to_jiffies(conn_info_age)) ||
5013 !conn->conn_info_timestamp) {
5014 struct hci_request req;
5015 struct hci_cp_read_tx_power req_txp_cp;
5016 struct hci_cp_read_rssi req_rssi_cp;
5017 struct pending_cmd *cmd;
5019 hci_req_init(&req, hdev);
5020 req_rssi_cp.handle = cpu_to_le16(conn->handle);
5021 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
5024 /* For LE links TX power does not change thus we don't need to
5025 * query for it once value is known.
5027 if (!bdaddr_type_is_le(cp->addr.type) ||
5028 conn->tx_power == HCI_TX_POWER_INVALID) {
5029 req_txp_cp.handle = cpu_to_le16(conn->handle);
5030 req_txp_cp.type = 0x00;
5031 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5032 sizeof(req_txp_cp), &req_txp_cp);
5035 /* Max TX power needs to be read only once per connection */
5036 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
5037 req_txp_cp.handle = cpu_to_le16(conn->handle);
5038 req_txp_cp.type = 0x01;
5039 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5040 sizeof(req_txp_cp), &req_txp_cp);
5043 err = hci_req_run(&req, conn_info_refresh_complete);
5047 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
5054 hci_conn_hold(conn);
5055 cmd->user_data = conn;
5057 conn->conn_info_timestamp = jiffies;
5059 /* Cache is valid, just reply with values cached in hci_conn */
5060 rp.rssi = conn->rssi;
5061 rp.tx_power = conn->tx_power;
5062 rp.max_tx_power = conn->max_tx_power;
5064 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5065 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
5069 hci_dev_unlock(hdev);
5073 static void get_clock_info_complete(struct hci_dev *hdev, u8 status)
5075 struct mgmt_cp_get_clock_info *cp;
5076 struct mgmt_rp_get_clock_info rp;
5077 struct hci_cp_read_clock *hci_cp;
5078 struct pending_cmd *cmd;
5079 struct hci_conn *conn;
5081 BT_DBG("%s status %u", hdev->name, status);
5085 hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
5089 if (hci_cp->which) {
5090 u16 handle = __le16_to_cpu(hci_cp->handle);
5091 conn = hci_conn_hash_lookup_handle(hdev, handle);
5096 cmd = mgmt_pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
5102 memset(&rp, 0, sizeof(rp));
5103 memcpy(&rp.addr, &cp->addr, sizeof(rp.addr));
5108 rp.local_clock = cpu_to_le32(hdev->clock);
5111 rp.piconet_clock = cpu_to_le32(conn->clock);
5112 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
5116 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
5118 mgmt_pending_remove(cmd);
5120 hci_conn_drop(conn);
5123 hci_dev_unlock(hdev);
5126 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
5129 struct mgmt_cp_get_clock_info *cp = data;
5130 struct mgmt_rp_get_clock_info rp;
5131 struct hci_cp_read_clock hci_cp;
5132 struct pending_cmd *cmd;
5133 struct hci_request req;
5134 struct hci_conn *conn;
5137 BT_DBG("%s", hdev->name);
5139 memset(&rp, 0, sizeof(rp));
5140 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5141 rp.addr.type = cp->addr.type;
5143 if (cp->addr.type != BDADDR_BREDR)
5144 return cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5145 MGMT_STATUS_INVALID_PARAMS,
5150 if (!hdev_is_powered(hdev)) {
5151 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5152 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
5156 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5157 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5159 if (!conn || conn->state != BT_CONNECTED) {
5160 err = cmd_complete(sk, hdev->id,
5161 MGMT_OP_GET_CLOCK_INFO,
5162 MGMT_STATUS_NOT_CONNECTED,
5170 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
5176 hci_req_init(&req, hdev);
5178 memset(&hci_cp, 0, sizeof(hci_cp));
5179 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5182 hci_conn_hold(conn);
5183 cmd->user_data = conn;
5185 hci_cp.handle = cpu_to_le16(conn->handle);
5186 hci_cp.which = 0x01; /* Piconet clock */
5187 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5190 err = hci_req_run(&req, get_clock_info_complete);
5192 mgmt_pending_remove(cmd);
5195 hci_dev_unlock(hdev);
5199 static void device_added(struct sock *sk, struct hci_dev *hdev,
5200 bdaddr_t *bdaddr, u8 type, u8 action)
5202 struct mgmt_ev_device_added ev;
5204 bacpy(&ev.addr.bdaddr, bdaddr);
5205 ev.addr.type = type;
5208 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
5211 static int add_device(struct sock *sk, struct hci_dev *hdev,
5212 void *data, u16 len)
5214 struct mgmt_cp_add_device *cp = data;
5215 u8 auto_conn, addr_type;
5218 BT_DBG("%s", hdev->name);
5220 if (!bdaddr_type_is_le(cp->addr.type) ||
5221 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
5222 return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5223 MGMT_STATUS_INVALID_PARAMS,
5224 &cp->addr, sizeof(cp->addr));
5226 if (cp->action != 0x00 && cp->action != 0x01)
5227 return cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5228 MGMT_STATUS_INVALID_PARAMS,
5229 &cp->addr, sizeof(cp->addr));
5233 if (cp->addr.type == BDADDR_LE_PUBLIC)
5234 addr_type = ADDR_LE_DEV_PUBLIC;
5236 addr_type = ADDR_LE_DEV_RANDOM;
5239 auto_conn = HCI_AUTO_CONN_ALWAYS;
5241 auto_conn = HCI_AUTO_CONN_REPORT;
5243 /* If the connection parameters don't exist for this device,
5244 * they will be created and configured with defaults.
5246 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
5248 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5250 &cp->addr, sizeof(cp->addr));
5254 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
5256 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5257 MGMT_STATUS_SUCCESS, &cp->addr, sizeof(cp->addr));
5260 hci_dev_unlock(hdev);
5264 static void device_removed(struct sock *sk, struct hci_dev *hdev,
5265 bdaddr_t *bdaddr, u8 type)
5267 struct mgmt_ev_device_removed ev;
5269 bacpy(&ev.addr.bdaddr, bdaddr);
5270 ev.addr.type = type;
5272 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
5275 static int remove_device(struct sock *sk, struct hci_dev *hdev,
5276 void *data, u16 len)
5278 struct mgmt_cp_remove_device *cp = data;
5281 BT_DBG("%s", hdev->name);
5285 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5286 struct hci_conn_params *params;
5289 if (!bdaddr_type_is_le(cp->addr.type)) {
5290 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5291 MGMT_STATUS_INVALID_PARAMS,
5292 &cp->addr, sizeof(cp->addr));
5296 if (cp->addr.type == BDADDR_LE_PUBLIC)
5297 addr_type = ADDR_LE_DEV_PUBLIC;
5299 addr_type = ADDR_LE_DEV_RANDOM;
5301 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5304 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5305 MGMT_STATUS_INVALID_PARAMS,
5306 &cp->addr, sizeof(cp->addr));
5310 if (params->auto_connect == HCI_AUTO_CONN_DISABLED) {
5311 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5312 MGMT_STATUS_INVALID_PARAMS,
5313 &cp->addr, sizeof(cp->addr));
5317 list_del(¶ms->action);
5318 list_del(¶ms->list);
5320 hci_update_background_scan(hdev);
5322 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
5324 struct hci_conn_params *p, *tmp;
5326 if (cp->addr.type) {
5327 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5328 MGMT_STATUS_INVALID_PARAMS,
5329 &cp->addr, sizeof(cp->addr));
5333 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
5334 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
5336 device_removed(sk, hdev, &p->addr, p->addr_type);
5337 list_del(&p->action);
5342 BT_DBG("All LE connection parameters were removed");
5344 hci_update_background_scan(hdev);
5347 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5348 MGMT_STATUS_SUCCESS, &cp->addr, sizeof(cp->addr));
5351 hci_dev_unlock(hdev);
5355 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
5358 struct mgmt_cp_load_conn_param *cp = data;
5359 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
5360 sizeof(struct mgmt_conn_param));
5361 u16 param_count, expected_len;
5364 if (!lmp_le_capable(hdev))
5365 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5366 MGMT_STATUS_NOT_SUPPORTED);
5368 param_count = __le16_to_cpu(cp->param_count);
5369 if (param_count > max_param_count) {
5370 BT_ERR("load_conn_param: too big param_count value %u",
5372 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5373 MGMT_STATUS_INVALID_PARAMS);
5376 expected_len = sizeof(*cp) + param_count *
5377 sizeof(struct mgmt_conn_param);
5378 if (expected_len != len) {
5379 BT_ERR("load_conn_param: expected %u bytes, got %u bytes",
5381 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5382 MGMT_STATUS_INVALID_PARAMS);
5385 BT_DBG("%s param_count %u", hdev->name, param_count);
5389 hci_conn_params_clear_disabled(hdev);
5391 for (i = 0; i < param_count; i++) {
5392 struct mgmt_conn_param *param = &cp->params[i];
5393 struct hci_conn_params *hci_param;
5394 u16 min, max, latency, timeout;
5397 BT_DBG("Adding %pMR (type %u)", ¶m->addr.bdaddr,
5400 if (param->addr.type == BDADDR_LE_PUBLIC) {
5401 addr_type = ADDR_LE_DEV_PUBLIC;
5402 } else if (param->addr.type == BDADDR_LE_RANDOM) {
5403 addr_type = ADDR_LE_DEV_RANDOM;
5405 BT_ERR("Ignoring invalid connection parameters");
5409 min = le16_to_cpu(param->min_interval);
5410 max = le16_to_cpu(param->max_interval);
5411 latency = le16_to_cpu(param->latency);
5412 timeout = le16_to_cpu(param->timeout);
5414 BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
5415 min, max, latency, timeout);
5417 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
5418 BT_ERR("Ignoring invalid connection parameters");
5422 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
5425 BT_ERR("Failed to add connection parameters");
5429 hci_param->conn_min_interval = min;
5430 hci_param->conn_max_interval = max;
5431 hci_param->conn_latency = latency;
5432 hci_param->supervision_timeout = timeout;
5435 hci_dev_unlock(hdev);
5437 return cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0, NULL, 0);
5440 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
5441 void *data, u16 len)
5443 struct mgmt_cp_set_external_config *cp = data;
5447 BT_DBG("%s", hdev->name);
5449 if (hdev_is_powered(hdev))
5450 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5451 MGMT_STATUS_REJECTED);
5453 if (cp->config != 0x00 && cp->config != 0x01)
5454 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5455 MGMT_STATUS_INVALID_PARAMS);
5457 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
5458 return cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5459 MGMT_STATUS_NOT_SUPPORTED);
5464 changed = !test_and_set_bit(HCI_EXT_CONFIGURED,
5467 changed = test_and_clear_bit(HCI_EXT_CONFIGURED,
5470 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
5477 err = new_options(hdev, sk);
5479 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) == is_configured(hdev)) {
5480 mgmt_index_removed(hdev);
5482 if (test_and_change_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
5483 set_bit(HCI_CONFIG, &hdev->dev_flags);
5484 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
5486 queue_work(hdev->req_workqueue, &hdev->power_on);
5488 set_bit(HCI_RAW, &hdev->flags);
5489 mgmt_index_added(hdev);
5494 hci_dev_unlock(hdev);
5498 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
5499 void *data, u16 len)
5501 struct mgmt_cp_set_public_address *cp = data;
5505 BT_DBG("%s", hdev->name);
5507 if (hdev_is_powered(hdev))
5508 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5509 MGMT_STATUS_REJECTED);
5511 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
5512 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5513 MGMT_STATUS_INVALID_PARAMS);
5515 if (!hdev->set_bdaddr)
5516 return cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
5517 MGMT_STATUS_NOT_SUPPORTED);
5521 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
5522 bacpy(&hdev->public_addr, &cp->bdaddr);
5524 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
5531 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
5532 err = new_options(hdev, sk);
5534 if (is_configured(hdev)) {
5535 mgmt_index_removed(hdev);
5537 clear_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
5539 set_bit(HCI_CONFIG, &hdev->dev_flags);
5540 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
5542 queue_work(hdev->req_workqueue, &hdev->power_on);
5546 hci_dev_unlock(hdev);
5550 static const struct mgmt_handler {
5551 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
5555 } mgmt_handlers[] = {
5556 { NULL }, /* 0x0000 (no command) */
5557 { read_version, false, MGMT_READ_VERSION_SIZE },
5558 { read_commands, false, MGMT_READ_COMMANDS_SIZE },
5559 { read_index_list, false, MGMT_READ_INDEX_LIST_SIZE },
5560 { read_controller_info, false, MGMT_READ_INFO_SIZE },
5561 { set_powered, false, MGMT_SETTING_SIZE },
5562 { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE },
5563 { set_connectable, false, MGMT_SETTING_SIZE },
5564 { set_fast_connectable, false, MGMT_SETTING_SIZE },
5565 { set_pairable, false, MGMT_SETTING_SIZE },
5566 { set_link_security, false, MGMT_SETTING_SIZE },
5567 { set_ssp, false, MGMT_SETTING_SIZE },
5568 { set_hs, false, MGMT_SETTING_SIZE },
5569 { set_le, false, MGMT_SETTING_SIZE },
5570 { set_dev_class, false, MGMT_SET_DEV_CLASS_SIZE },
5571 { set_local_name, false, MGMT_SET_LOCAL_NAME_SIZE },
5572 { add_uuid, false, MGMT_ADD_UUID_SIZE },
5573 { remove_uuid, false, MGMT_REMOVE_UUID_SIZE },
5574 { load_link_keys, true, MGMT_LOAD_LINK_KEYS_SIZE },
5575 { load_long_term_keys, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE },
5576 { disconnect, false, MGMT_DISCONNECT_SIZE },
5577 { get_connections, false, MGMT_GET_CONNECTIONS_SIZE },
5578 { pin_code_reply, false, MGMT_PIN_CODE_REPLY_SIZE },
5579 { pin_code_neg_reply, false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
5580 { set_io_capability, false, MGMT_SET_IO_CAPABILITY_SIZE },
5581 { pair_device, false, MGMT_PAIR_DEVICE_SIZE },
5582 { cancel_pair_device, false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
5583 { unpair_device, false, MGMT_UNPAIR_DEVICE_SIZE },
5584 { user_confirm_reply, false, MGMT_USER_CONFIRM_REPLY_SIZE },
5585 { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
5586 { user_passkey_reply, false, MGMT_USER_PASSKEY_REPLY_SIZE },
5587 { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
5588 { read_local_oob_data, false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
5589 { add_remote_oob_data, true, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
5590 { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
5591 { start_discovery, false, MGMT_START_DISCOVERY_SIZE },
5592 { stop_discovery, false, MGMT_STOP_DISCOVERY_SIZE },
5593 { confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
5594 { block_device, false, MGMT_BLOCK_DEVICE_SIZE },
5595 { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
5596 { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
5597 { set_advertising, false, MGMT_SETTING_SIZE },
5598 { set_bredr, false, MGMT_SETTING_SIZE },
5599 { set_static_address, false, MGMT_SET_STATIC_ADDRESS_SIZE },
5600 { set_scan_params, false, MGMT_SET_SCAN_PARAMS_SIZE },
5601 { set_secure_conn, false, MGMT_SETTING_SIZE },
5602 { set_debug_keys, false, MGMT_SETTING_SIZE },
5603 { set_privacy, false, MGMT_SET_PRIVACY_SIZE },
5604 { load_irks, true, MGMT_LOAD_IRKS_SIZE },
5605 { get_conn_info, false, MGMT_GET_CONN_INFO_SIZE },
5606 { get_clock_info, false, MGMT_GET_CLOCK_INFO_SIZE },
5607 { add_device, false, MGMT_ADD_DEVICE_SIZE },
5608 { remove_device, false, MGMT_REMOVE_DEVICE_SIZE },
5609 { load_conn_param, true, MGMT_LOAD_CONN_PARAM_SIZE },
5610 { read_unconf_index_list, false, MGMT_READ_UNCONF_INDEX_LIST_SIZE },
5611 { read_config_info, false, MGMT_READ_CONFIG_INFO_SIZE },
5612 { set_external_config, false, MGMT_SET_EXTERNAL_CONFIG_SIZE },
5613 { set_public_address, false, MGMT_SET_PUBLIC_ADDRESS_SIZE },
5616 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
5620 struct mgmt_hdr *hdr;
5621 u16 opcode, index, len;
5622 struct hci_dev *hdev = NULL;
5623 const struct mgmt_handler *handler;
5626 BT_DBG("got %zu bytes", msglen);
5628 if (msglen < sizeof(*hdr))
5631 buf = kmalloc(msglen, GFP_KERNEL);
5635 if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) {
5641 opcode = __le16_to_cpu(hdr->opcode);
5642 index = __le16_to_cpu(hdr->index);
5643 len = __le16_to_cpu(hdr->len);
5645 if (len != msglen - sizeof(*hdr)) {
5650 if (index != MGMT_INDEX_NONE) {
5651 hdev = hci_dev_get(index);
5653 err = cmd_status(sk, index, opcode,
5654 MGMT_STATUS_INVALID_INDEX);
5658 if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
5659 test_bit(HCI_CONFIG, &hdev->dev_flags) ||
5660 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5661 err = cmd_status(sk, index, opcode,
5662 MGMT_STATUS_INVALID_INDEX);
5666 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
5667 opcode != MGMT_OP_READ_CONFIG_INFO &&
5668 opcode != MGMT_OP_SET_EXTERNAL_CONFIG &&
5669 opcode != MGMT_OP_SET_PUBLIC_ADDRESS) {
5670 err = cmd_status(sk, index, opcode,
5671 MGMT_STATUS_INVALID_INDEX);
5676 if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
5677 mgmt_handlers[opcode].func == NULL) {
5678 BT_DBG("Unknown op %u", opcode);
5679 err = cmd_status(sk, index, opcode,
5680 MGMT_STATUS_UNKNOWN_COMMAND);
5684 if (hdev && (opcode <= MGMT_OP_READ_INDEX_LIST ||
5685 opcode == MGMT_OP_READ_UNCONF_INDEX_LIST)) {
5686 err = cmd_status(sk, index, opcode,
5687 MGMT_STATUS_INVALID_INDEX);
5691 if (!hdev && (opcode > MGMT_OP_READ_INDEX_LIST &&
5692 opcode != MGMT_OP_READ_UNCONF_INDEX_LIST)) {
5693 err = cmd_status(sk, index, opcode,
5694 MGMT_STATUS_INVALID_INDEX);
5698 handler = &mgmt_handlers[opcode];
5700 if ((handler->var_len && len < handler->data_len) ||
5701 (!handler->var_len && len != handler->data_len)) {
5702 err = cmd_status(sk, index, opcode,
5703 MGMT_STATUS_INVALID_PARAMS);
5708 mgmt_init_hdev(sk, hdev);
5710 cp = buf + sizeof(*hdr);
5712 err = handler->func(sk, hdev, cp, len);
5726 void mgmt_index_added(struct hci_dev *hdev)
5728 if (hdev->dev_type != HCI_BREDR)
5731 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
5734 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
5735 mgmt_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, NULL, 0, NULL);
5737 mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
5740 void mgmt_index_removed(struct hci_dev *hdev)
5742 u8 status = MGMT_STATUS_INVALID_INDEX;
5744 if (hdev->dev_type != HCI_BREDR)
5747 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
5750 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
5752 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
5753 mgmt_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, NULL, 0, NULL);
5755 mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
5758 /* This function requires the caller holds hdev->lock */
5759 static void restart_le_actions(struct hci_dev *hdev)
5761 struct hci_conn_params *p;
5763 list_for_each_entry(p, &hdev->le_conn_params, list) {
5764 /* Needed for AUTO_OFF case where might not "really"
5765 * have been powered off.
5767 list_del_init(&p->action);
5769 switch (p->auto_connect) {
5770 case HCI_AUTO_CONN_ALWAYS:
5771 list_add(&p->action, &hdev->pend_le_conns);
5773 case HCI_AUTO_CONN_REPORT:
5774 list_add(&p->action, &hdev->pend_le_reports);
5781 hci_update_background_scan(hdev);
5784 static void powered_complete(struct hci_dev *hdev, u8 status)
5786 struct cmd_lookup match = { NULL, hdev };
5788 BT_DBG("status 0x%02x", status);
5792 restart_le_actions(hdev);
5794 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
5796 new_settings(hdev, match.sk);
5798 hci_dev_unlock(hdev);
5804 static int powered_update_hci(struct hci_dev *hdev)
5806 struct hci_request req;
5809 hci_req_init(&req, hdev);
5811 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
5812 !lmp_host_ssp_capable(hdev)) {
5815 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
5818 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
5819 lmp_bredr_capable(hdev)) {
5820 struct hci_cp_write_le_host_supported cp;
5823 cp.simul = lmp_le_br_capable(hdev);
5825 /* Check first if we already have the right
5826 * host state (host features set)
5828 if (cp.le != lmp_host_le_capable(hdev) ||
5829 cp.simul != lmp_host_le_br_capable(hdev))
5830 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
5834 if (lmp_le_capable(hdev)) {
5835 /* Make sure the controller has a good default for
5836 * advertising data. This also applies to the case
5837 * where BR/EDR was toggled during the AUTO_OFF phase.
5839 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
5840 update_adv_data(&req);
5841 update_scan_rsp_data(&req);
5844 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
5845 enable_advertising(&req);
5848 link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
5849 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
5850 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
5851 sizeof(link_sec), &link_sec);
5853 if (lmp_bredr_capable(hdev)) {
5854 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
5855 set_bredr_scan(&req);
5861 return hci_req_run(&req, powered_complete);
5864 int mgmt_powered(struct hci_dev *hdev, u8 powered)
5866 struct cmd_lookup match = { NULL, hdev };
5867 u8 status_not_powered = MGMT_STATUS_NOT_POWERED;
5868 u8 zero_cod[] = { 0, 0, 0 };
5871 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
5875 if (powered_update_hci(hdev) == 0)
5878 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
5883 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
5884 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status_not_powered);
5886 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
5887 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
5888 zero_cod, sizeof(zero_cod), NULL);
5891 err = new_settings(hdev, match.sk);
5899 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
5901 struct pending_cmd *cmd;
5904 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
5908 if (err == -ERFKILL)
5909 status = MGMT_STATUS_RFKILLED;
5911 status = MGMT_STATUS_FAILED;
5913 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
5915 mgmt_pending_remove(cmd);
5918 void mgmt_discoverable_timeout(struct hci_dev *hdev)
5920 struct hci_request req;
5924 /* When discoverable timeout triggers, then just make sure
5925 * the limited discoverable flag is cleared. Even in the case
5926 * of a timeout triggered from general discoverable, it is
5927 * safe to unconditionally clear the flag.
5929 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
5930 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
5932 hci_req_init(&req, hdev);
5933 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
5934 u8 scan = SCAN_PAGE;
5935 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE,
5936 sizeof(scan), &scan);
5939 update_adv_data(&req);
5940 hci_req_run(&req, NULL);
5942 hdev->discov_timeout = 0;
5944 new_settings(hdev, NULL);
5946 hci_dev_unlock(hdev);
5949 void mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
5953 /* Nothing needed here if there's a pending command since that
5954 * commands request completion callback takes care of everything
5957 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
5960 /* Powering off may clear the scan mode - don't let that interfere */
5961 if (!discoverable && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
5965 changed = !test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
5967 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
5968 changed = test_and_clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
5972 struct hci_request req;
5974 /* In case this change in discoverable was triggered by
5975 * a disabling of connectable there could be a need to
5976 * update the advertising flags.
5978 hci_req_init(&req, hdev);
5979 update_adv_data(&req);
5980 hci_req_run(&req, NULL);
5982 new_settings(hdev, NULL);
5986 void mgmt_connectable(struct hci_dev *hdev, u8 connectable)
5990 /* Nothing needed here if there's a pending command since that
5991 * commands request completion callback takes care of everything
5994 if (mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
5997 /* Powering off may clear the scan mode - don't let that interfere */
5998 if (!connectable && mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
6002 changed = !test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
6004 changed = test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
6007 new_settings(hdev, NULL);
6010 void mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
6012 u8 mgmt_err = mgmt_status(status);
6014 if (scan & SCAN_PAGE)
6015 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev,
6016 cmd_status_rsp, &mgmt_err);
6018 if (scan & SCAN_INQUIRY)
6019 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev,
6020 cmd_status_rsp, &mgmt_err);
6023 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
6026 struct mgmt_ev_new_link_key ev;
6028 memset(&ev, 0, sizeof(ev));
6030 ev.store_hint = persistent;
6031 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
6032 ev.key.addr.type = BDADDR_BREDR;
6033 ev.key.type = key->type;
6034 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
6035 ev.key.pin_len = key->pin_len;
6037 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
6040 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
6042 if (ltk->authenticated)
6043 return MGMT_LTK_AUTHENTICATED;
6045 return MGMT_LTK_UNAUTHENTICATED;
6048 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
6050 struct mgmt_ev_new_long_term_key ev;
6052 memset(&ev, 0, sizeof(ev));
6054 /* Devices using resolvable or non-resolvable random addresses
6055 * without providing an indentity resolving key don't require
6056 * to store long term keys. Their addresses will change the
6059 * Only when a remote device provides an identity address
6060 * make sure the long term key is stored. If the remote
6061 * identity is known, the long term keys are internally
6062 * mapped to the identity address. So allow static random
6063 * and public addresses here.
6065 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
6066 (key->bdaddr.b[5] & 0xc0) != 0xc0)
6067 ev.store_hint = 0x00;
6069 ev.store_hint = persistent;
6071 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
6072 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
6073 ev.key.type = mgmt_ltk_type(key);
6074 ev.key.enc_size = key->enc_size;
6075 ev.key.ediv = key->ediv;
6076 ev.key.rand = key->rand;
6078 if (key->type == SMP_LTK)
6081 memcpy(ev.key.val, key->val, sizeof(key->val));
6083 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
6086 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk)
6088 struct mgmt_ev_new_irk ev;
6090 memset(&ev, 0, sizeof(ev));
6092 /* For identity resolving keys from devices that are already
6093 * using a public address or static random address, do not
6094 * ask for storing this key. The identity resolving key really
6095 * is only mandatory for devices using resovlable random
6098 * Storing all identity resolving keys has the downside that
6099 * they will be also loaded on next boot of they system. More
6100 * identity resolving keys, means more time during scanning is
6101 * needed to actually resolve these addresses.
6103 if (bacmp(&irk->rpa, BDADDR_ANY))
6104 ev.store_hint = 0x01;
6106 ev.store_hint = 0x00;
6108 bacpy(&ev.rpa, &irk->rpa);
6109 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
6110 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
6111 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
6113 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
6116 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
6119 struct mgmt_ev_new_csrk ev;
6121 memset(&ev, 0, sizeof(ev));
6123 /* Devices using resolvable or non-resolvable random addresses
6124 * without providing an indentity resolving key don't require
6125 * to store signature resolving keys. Their addresses will change
6126 * the next time around.
6128 * Only when a remote device provides an identity address
6129 * make sure the signature resolving key is stored. So allow
6130 * static random and public addresses here.
6132 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
6133 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
6134 ev.store_hint = 0x00;
6136 ev.store_hint = persistent;
6138 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
6139 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
6140 ev.key.master = csrk->master;
6141 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
6143 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
6146 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
6147 u8 bdaddr_type, u8 store_hint, u16 min_interval,
6148 u16 max_interval, u16 latency, u16 timeout)
6150 struct mgmt_ev_new_conn_param ev;
6152 if (!hci_is_identity_address(bdaddr, bdaddr_type))
6155 memset(&ev, 0, sizeof(ev));
6156 bacpy(&ev.addr.bdaddr, bdaddr);
6157 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
6158 ev.store_hint = store_hint;
6159 ev.min_interval = cpu_to_le16(min_interval);
6160 ev.max_interval = cpu_to_le16(max_interval);
6161 ev.latency = cpu_to_le16(latency);
6162 ev.timeout = cpu_to_le16(timeout);
6164 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
6167 static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, u8 *data,
6170 eir[eir_len++] = sizeof(type) + data_len;
6171 eir[eir_len++] = type;
6172 memcpy(&eir[eir_len], data, data_len);
6173 eir_len += data_len;
6178 void mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6179 u8 addr_type, u32 flags, u8 *name, u8 name_len,
6183 struct mgmt_ev_device_connected *ev = (void *) buf;
6186 bacpy(&ev->addr.bdaddr, bdaddr);
6187 ev->addr.type = link_to_bdaddr(link_type, addr_type);
6189 ev->flags = __cpu_to_le32(flags);
6192 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
6195 if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0)
6196 eir_len = eir_append_data(ev->eir, eir_len,
6197 EIR_CLASS_OF_DEV, dev_class, 3);
6199 ev->eir_len = cpu_to_le16(eir_len);
6201 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
6202 sizeof(*ev) + eir_len, NULL);
6205 static void disconnect_rsp(struct pending_cmd *cmd, void *data)
6207 struct mgmt_cp_disconnect *cp = cmd->param;
6208 struct sock **sk = data;
6209 struct mgmt_rp_disconnect rp;
6211 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6212 rp.addr.type = cp->addr.type;
6214 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, 0, &rp,
6220 mgmt_pending_remove(cmd);
6223 static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
6225 struct hci_dev *hdev = data;
6226 struct mgmt_cp_unpair_device *cp = cmd->param;
6227 struct mgmt_rp_unpair_device rp;
6229 memset(&rp, 0, sizeof(rp));
6230 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6231 rp.addr.type = cp->addr.type;
6233 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
6235 cmd_complete(cmd->sk, cmd->index, cmd->opcode, 0, &rp, sizeof(rp));
6237 mgmt_pending_remove(cmd);
6240 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
6241 u8 link_type, u8 addr_type, u8 reason,
6242 bool mgmt_connected)
6244 struct mgmt_ev_device_disconnected ev;
6245 struct pending_cmd *power_off;
6246 struct sock *sk = NULL;
6248 power_off = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
6250 struct mgmt_mode *cp = power_off->param;
6252 /* The connection is still in hci_conn_hash so test for 1
6253 * instead of 0 to know if this is the last one.
6255 if (!cp->val && hci_conn_count(hdev) == 1) {
6256 cancel_delayed_work(&hdev->power_off);
6257 queue_work(hdev->req_workqueue, &hdev->power_off.work);
6261 if (!mgmt_connected)
6264 if (link_type != ACL_LINK && link_type != LE_LINK)
6267 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
6269 bacpy(&ev.addr.bdaddr, bdaddr);
6270 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6273 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
6278 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6282 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
6283 u8 link_type, u8 addr_type, u8 status)
6285 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
6286 struct mgmt_cp_disconnect *cp;
6287 struct mgmt_rp_disconnect rp;
6288 struct pending_cmd *cmd;
6290 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
6293 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
6299 if (bacmp(bdaddr, &cp->addr.bdaddr))
6302 if (cp->addr.type != bdaddr_type)
6305 bacpy(&rp.addr.bdaddr, bdaddr);
6306 rp.addr.type = bdaddr_type;
6308 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
6309 mgmt_status(status), &rp, sizeof(rp));
6311 mgmt_pending_remove(cmd);
6314 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6315 u8 addr_type, u8 status)
6317 struct mgmt_ev_connect_failed ev;
6318 struct pending_cmd *power_off;
6320 power_off = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
6322 struct mgmt_mode *cp = power_off->param;
6324 /* The connection is still in hci_conn_hash so test for 1
6325 * instead of 0 to know if this is the last one.
6327 if (!cp->val && hci_conn_count(hdev) == 1) {
6328 cancel_delayed_work(&hdev->power_off);
6329 queue_work(hdev->req_workqueue, &hdev->power_off.work);
6333 bacpy(&ev.addr.bdaddr, bdaddr);
6334 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6335 ev.status = mgmt_status(status);
6337 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
6340 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
6342 struct mgmt_ev_pin_code_request ev;
6344 bacpy(&ev.addr.bdaddr, bdaddr);
6345 ev.addr.type = BDADDR_BREDR;
6348 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
6351 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6354 struct pending_cmd *cmd;
6355 struct mgmt_rp_pin_code_reply rp;
6357 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
6361 bacpy(&rp.addr.bdaddr, bdaddr);
6362 rp.addr.type = BDADDR_BREDR;
6364 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
6365 mgmt_status(status), &rp, sizeof(rp));
6367 mgmt_pending_remove(cmd);
6370 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6373 struct pending_cmd *cmd;
6374 struct mgmt_rp_pin_code_reply rp;
6376 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
6380 bacpy(&rp.addr.bdaddr, bdaddr);
6381 rp.addr.type = BDADDR_BREDR;
6383 cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
6384 mgmt_status(status), &rp, sizeof(rp));
6386 mgmt_pending_remove(cmd);
6389 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
6390 u8 link_type, u8 addr_type, u32 value,
6393 struct mgmt_ev_user_confirm_request ev;
6395 BT_DBG("%s", hdev->name);
6397 bacpy(&ev.addr.bdaddr, bdaddr);
6398 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6399 ev.confirm_hint = confirm_hint;
6400 ev.value = cpu_to_le32(value);
6402 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
6406 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
6407 u8 link_type, u8 addr_type)
6409 struct mgmt_ev_user_passkey_request ev;
6411 BT_DBG("%s", hdev->name);
6413 bacpy(&ev.addr.bdaddr, bdaddr);
6414 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6416 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
6420 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6421 u8 link_type, u8 addr_type, u8 status,
6424 struct pending_cmd *cmd;
6425 struct mgmt_rp_user_confirm_reply rp;
6428 cmd = mgmt_pending_find(opcode, hdev);
6432 bacpy(&rp.addr.bdaddr, bdaddr);
6433 rp.addr.type = link_to_bdaddr(link_type, addr_type);
6434 err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
6437 mgmt_pending_remove(cmd);
6442 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6443 u8 link_type, u8 addr_type, u8 status)
6445 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6446 status, MGMT_OP_USER_CONFIRM_REPLY);
6449 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6450 u8 link_type, u8 addr_type, u8 status)
6452 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6454 MGMT_OP_USER_CONFIRM_NEG_REPLY);
6457 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6458 u8 link_type, u8 addr_type, u8 status)
6460 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6461 status, MGMT_OP_USER_PASSKEY_REPLY);
6464 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
6465 u8 link_type, u8 addr_type, u8 status)
6467 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
6469 MGMT_OP_USER_PASSKEY_NEG_REPLY);
6472 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
6473 u8 link_type, u8 addr_type, u32 passkey,
6476 struct mgmt_ev_passkey_notify ev;
6478 BT_DBG("%s", hdev->name);
6480 bacpy(&ev.addr.bdaddr, bdaddr);
6481 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6482 ev.passkey = __cpu_to_le32(passkey);
6483 ev.entered = entered;
6485 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
6488 void mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6489 u8 addr_type, u8 status)
6491 struct mgmt_ev_auth_failed ev;
6493 bacpy(&ev.addr.bdaddr, bdaddr);
6494 ev.addr.type = link_to_bdaddr(link_type, addr_type);
6495 ev.status = mgmt_status(status);
6497 mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
6500 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
6502 struct cmd_lookup match = { NULL, hdev };
6506 u8 mgmt_err = mgmt_status(status);
6507 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
6508 cmd_status_rsp, &mgmt_err);
6512 if (test_bit(HCI_AUTH, &hdev->flags))
6513 changed = !test_and_set_bit(HCI_LINK_SECURITY,
6516 changed = test_and_clear_bit(HCI_LINK_SECURITY,
6519 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
6523 new_settings(hdev, match.sk);
6529 static void clear_eir(struct hci_request *req)
6531 struct hci_dev *hdev = req->hdev;
6532 struct hci_cp_write_eir cp;
6534 if (!lmp_ext_inq_capable(hdev))
6537 memset(hdev->eir, 0, sizeof(hdev->eir));
6539 memset(&cp, 0, sizeof(cp));
6541 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
6544 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
6546 struct cmd_lookup match = { NULL, hdev };
6547 struct hci_request req;
6548 bool changed = false;
6551 u8 mgmt_err = mgmt_status(status);
6553 if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
6554 &hdev->dev_flags)) {
6555 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
6556 new_settings(hdev, NULL);
6559 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
6565 changed = !test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
6567 changed = test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
6569 changed = test_and_clear_bit(HCI_HS_ENABLED,
6572 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
6575 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
6578 new_settings(hdev, match.sk);
6583 hci_req_init(&req, hdev);
6585 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
6586 if (test_bit(HCI_USE_DEBUG_KEYS, &hdev->dev_flags))
6587 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
6588 sizeof(enable), &enable);
6594 hci_req_run(&req, NULL);
6597 void mgmt_sc_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
6599 struct cmd_lookup match = { NULL, hdev };
6600 bool changed = false;
6603 u8 mgmt_err = mgmt_status(status);
6606 if (test_and_clear_bit(HCI_SC_ENABLED,
6608 new_settings(hdev, NULL);
6609 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
6612 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
6613 cmd_status_rsp, &mgmt_err);
6618 changed = !test_and_set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
6620 changed = test_and_clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
6621 clear_bit(HCI_SC_ONLY, &hdev->dev_flags);
6624 mgmt_pending_foreach(MGMT_OP_SET_SECURE_CONN, hdev,
6625 settings_rsp, &match);
6628 new_settings(hdev, match.sk);
6634 static void sk_lookup(struct pending_cmd *cmd, void *data)
6636 struct cmd_lookup *match = data;
6638 if (match->sk == NULL) {
6639 match->sk = cmd->sk;
6640 sock_hold(match->sk);
6644 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
6647 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
6649 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
6650 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
6651 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
6654 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, 3,
6661 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
6663 struct mgmt_cp_set_local_name ev;
6664 struct pending_cmd *cmd;
6669 memset(&ev, 0, sizeof(ev));
6670 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
6671 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
6673 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
6675 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
6677 /* If this is a HCI command related to powering on the
6678 * HCI dev don't send any mgmt signals.
6680 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
6684 mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
6685 cmd ? cmd->sk : NULL);
6688 void mgmt_read_local_oob_data_complete(struct hci_dev *hdev, u8 *hash192,
6689 u8 *randomizer192, u8 *hash256,
6690 u8 *randomizer256, u8 status)
6692 struct pending_cmd *cmd;
6694 BT_DBG("%s status %u", hdev->name, status);
6696 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
6701 cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
6702 mgmt_status(status));
6704 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags) &&
6705 hash256 && randomizer256) {
6706 struct mgmt_rp_read_local_oob_ext_data rp;
6708 memcpy(rp.hash192, hash192, sizeof(rp.hash192));
6709 memcpy(rp.randomizer192, randomizer192,
6710 sizeof(rp.randomizer192));
6712 memcpy(rp.hash256, hash256, sizeof(rp.hash256));
6713 memcpy(rp.randomizer256, randomizer256,
6714 sizeof(rp.randomizer256));
6716 cmd_complete(cmd->sk, hdev->id,
6717 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
6720 struct mgmt_rp_read_local_oob_data rp;
6722 memcpy(rp.hash, hash192, sizeof(rp.hash));
6723 memcpy(rp.randomizer, randomizer192,
6724 sizeof(rp.randomizer));
6726 cmd_complete(cmd->sk, hdev->id,
6727 MGMT_OP_READ_LOCAL_OOB_DATA, 0,
6732 mgmt_pending_remove(cmd);
6735 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6736 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
6737 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
6740 struct mgmt_ev_device_found *ev = (void *) buf;
6743 /* Don't send events for a non-kernel initiated discovery. With
6744 * LE one exception is if we have pend_le_reports > 0 in which
6745 * case we're doing passive scanning and want these events.
6747 if (!hci_discovery_active(hdev)) {
6748 if (link_type == ACL_LINK)
6750 if (link_type == LE_LINK && list_empty(&hdev->pend_le_reports))
6754 /* Make sure that the buffer is big enough. The 5 extra bytes
6755 * are for the potential CoD field.
6757 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
6760 memset(buf, 0, sizeof(buf));
6762 bacpy(&ev->addr.bdaddr, bdaddr);
6763 ev->addr.type = link_to_bdaddr(link_type, addr_type);
6765 ev->flags = cpu_to_le32(flags);
6768 memcpy(ev->eir, eir, eir_len);
6770 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
6771 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
6774 if (scan_rsp_len > 0)
6775 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
6777 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
6778 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
6780 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
6783 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
6784 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
6786 struct mgmt_ev_device_found *ev;
6787 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
6790 ev = (struct mgmt_ev_device_found *) buf;
6792 memset(buf, 0, sizeof(buf));
6794 bacpy(&ev->addr.bdaddr, bdaddr);
6795 ev->addr.type = link_to_bdaddr(link_type, addr_type);
6798 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
6801 ev->eir_len = cpu_to_le16(eir_len);
6803 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
6806 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
6808 struct mgmt_ev_discovering ev;
6809 struct pending_cmd *cmd;
6811 BT_DBG("%s discovering %u", hdev->name, discovering);
6814 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
6816 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
6819 u8 type = hdev->discovery.type;
6821 cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
6823 mgmt_pending_remove(cmd);
6826 memset(&ev, 0, sizeof(ev));
6827 ev.type = hdev->discovery.type;
6828 ev.discovering = discovering;
6830 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
6833 static void adv_enable_complete(struct hci_dev *hdev, u8 status)
6835 BT_DBG("%s status %u", hdev->name, status);
6838 void mgmt_reenable_advertising(struct hci_dev *hdev)
6840 struct hci_request req;
6842 if (!test_bit(HCI_ADVERTISING, &hdev->dev_flags))
6845 hci_req_init(&req, hdev);
6846 enable_advertising(&req);
6847 hci_req_run(&req, adv_enable_complete);