2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/mgmt.h>
33 #include <net/bluetooth/smp.h>
37 #define MGMT_VERSION 1
38 #define MGMT_REVISION 3
40 static const u16 mgmt_commands[] = {
41 MGMT_OP_READ_INDEX_LIST,
44 MGMT_OP_SET_DISCOVERABLE,
45 MGMT_OP_SET_CONNECTABLE,
46 MGMT_OP_SET_FAST_CONNECTABLE,
48 MGMT_OP_SET_LINK_SECURITY,
52 MGMT_OP_SET_DEV_CLASS,
53 MGMT_OP_SET_LOCAL_NAME,
56 MGMT_OP_LOAD_LINK_KEYS,
57 MGMT_OP_LOAD_LONG_TERM_KEYS,
59 MGMT_OP_GET_CONNECTIONS,
60 MGMT_OP_PIN_CODE_REPLY,
61 MGMT_OP_PIN_CODE_NEG_REPLY,
62 MGMT_OP_SET_IO_CAPABILITY,
64 MGMT_OP_CANCEL_PAIR_DEVICE,
65 MGMT_OP_UNPAIR_DEVICE,
66 MGMT_OP_USER_CONFIRM_REPLY,
67 MGMT_OP_USER_CONFIRM_NEG_REPLY,
68 MGMT_OP_USER_PASSKEY_REPLY,
69 MGMT_OP_USER_PASSKEY_NEG_REPLY,
70 MGMT_OP_READ_LOCAL_OOB_DATA,
71 MGMT_OP_ADD_REMOTE_OOB_DATA,
72 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
73 MGMT_OP_START_DISCOVERY,
74 MGMT_OP_STOP_DISCOVERY,
77 MGMT_OP_UNBLOCK_DEVICE,
78 MGMT_OP_SET_DEVICE_ID,
81 static const u16 mgmt_events[] = {
82 MGMT_EV_CONTROLLER_ERROR,
84 MGMT_EV_INDEX_REMOVED,
86 MGMT_EV_CLASS_OF_DEV_CHANGED,
87 MGMT_EV_LOCAL_NAME_CHANGED,
89 MGMT_EV_NEW_LONG_TERM_KEY,
90 MGMT_EV_DEVICE_CONNECTED,
91 MGMT_EV_DEVICE_DISCONNECTED,
92 MGMT_EV_CONNECT_FAILED,
93 MGMT_EV_PIN_CODE_REQUEST,
94 MGMT_EV_USER_CONFIRM_REQUEST,
95 MGMT_EV_USER_PASSKEY_REQUEST,
99 MGMT_EV_DEVICE_BLOCKED,
100 MGMT_EV_DEVICE_UNBLOCKED,
101 MGMT_EV_DEVICE_UNPAIRED,
102 MGMT_EV_PASSKEY_NOTIFY,
105 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
107 #define hdev_is_powered(hdev) (test_bit(HCI_UP, &hdev->flags) && \
108 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
111 struct list_head list;
119 /* HCI to MGMT error code conversion table */
120 static u8 mgmt_status_table[] = {
122 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
123 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
124 MGMT_STATUS_FAILED, /* Hardware Failure */
125 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
126 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
127 MGMT_STATUS_NOT_PAIRED, /* PIN or Key Missing */
128 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
129 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
130 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
131 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
132 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
133 MGMT_STATUS_BUSY, /* Command Disallowed */
134 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
135 MGMT_STATUS_REJECTED, /* Rejected Security */
136 MGMT_STATUS_REJECTED, /* Rejected Personal */
137 MGMT_STATUS_TIMEOUT, /* Host Timeout */
138 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
139 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
140 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
141 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
142 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
143 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
144 MGMT_STATUS_BUSY, /* Repeated Attempts */
145 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
146 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
147 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
148 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
149 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
150 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
151 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
152 MGMT_STATUS_FAILED, /* Unspecified Error */
153 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
154 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
155 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
156 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
157 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
158 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
159 MGMT_STATUS_FAILED, /* Unit Link Key Used */
160 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
161 MGMT_STATUS_TIMEOUT, /* Instant Passed */
162 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
163 MGMT_STATUS_FAILED, /* Transaction Collision */
164 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
165 MGMT_STATUS_REJECTED, /* QoS Rejected */
166 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
167 MGMT_STATUS_REJECTED, /* Insufficient Security */
168 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
169 MGMT_STATUS_BUSY, /* Role Switch Pending */
170 MGMT_STATUS_FAILED, /* Slot Violation */
171 MGMT_STATUS_FAILED, /* Role Switch Failed */
172 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
173 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
174 MGMT_STATUS_BUSY, /* Host Busy Pairing */
175 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
176 MGMT_STATUS_BUSY, /* Controller Busy */
177 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
178 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
179 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
180 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
181 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
184 bool mgmt_valid_hdev(struct hci_dev *hdev)
186 return hdev->dev_type == HCI_BREDR;
189 static u8 mgmt_status(u8 hci_status)
191 if (hci_status < ARRAY_SIZE(mgmt_status_table))
192 return mgmt_status_table[hci_status];
194 return MGMT_STATUS_FAILED;
197 static int cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status)
200 struct mgmt_hdr *hdr;
201 struct mgmt_ev_cmd_status *ev;
204 BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status);
206 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL);
210 hdr = (void *) skb_put(skb, sizeof(*hdr));
212 hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_STATUS);
213 hdr->index = cpu_to_le16(index);
214 hdr->len = cpu_to_le16(sizeof(*ev));
216 ev = (void *) skb_put(skb, sizeof(*ev));
218 ev->opcode = cpu_to_le16(cmd);
220 err = sock_queue_rcv_skb(sk, skb);
227 static int cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status,
228 void *rp, size_t rp_len)
231 struct mgmt_hdr *hdr;
232 struct mgmt_ev_cmd_complete *ev;
235 BT_DBG("sock %p", sk);
237 skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL);
241 hdr = (void *) skb_put(skb, sizeof(*hdr));
243 hdr->opcode = __constant_cpu_to_le16(MGMT_EV_CMD_COMPLETE);
244 hdr->index = cpu_to_le16(index);
245 hdr->len = cpu_to_le16(sizeof(*ev) + rp_len);
247 ev = (void *) skb_put(skb, sizeof(*ev) + rp_len);
248 ev->opcode = cpu_to_le16(cmd);
252 memcpy(ev->data, rp, rp_len);
254 err = sock_queue_rcv_skb(sk, skb);
261 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
264 struct mgmt_rp_read_version rp;
266 BT_DBG("sock %p", sk);
268 rp.version = MGMT_VERSION;
269 rp.revision = __constant_cpu_to_le16(MGMT_REVISION);
271 return cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, &rp,
275 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
278 struct mgmt_rp_read_commands *rp;
279 const u16 num_commands = ARRAY_SIZE(mgmt_commands);
280 const u16 num_events = ARRAY_SIZE(mgmt_events);
285 BT_DBG("sock %p", sk);
287 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
289 rp = kmalloc(rp_size, GFP_KERNEL);
293 rp->num_commands = __constant_cpu_to_le16(num_commands);
294 rp->num_events = __constant_cpu_to_le16(num_events);
296 for (i = 0, opcode = rp->opcodes; i < num_commands; i++, opcode++)
297 put_unaligned_le16(mgmt_commands[i], opcode);
299 for (i = 0; i < num_events; i++, opcode++)
300 put_unaligned_le16(mgmt_events[i], opcode);
302 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, rp,
309 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
312 struct mgmt_rp_read_index_list *rp;
318 BT_DBG("sock %p", sk);
320 read_lock(&hci_dev_list_lock);
323 list_for_each_entry(d, &hci_dev_list, list) {
324 if (!mgmt_valid_hdev(d))
330 rp_len = sizeof(*rp) + (2 * count);
331 rp = kmalloc(rp_len, GFP_ATOMIC);
333 read_unlock(&hci_dev_list_lock);
338 list_for_each_entry(d, &hci_dev_list, list) {
339 if (test_bit(HCI_SETUP, &d->dev_flags))
342 if (test_bit(HCI_USER_CHANNEL, &d->dev_flags))
345 if (!mgmt_valid_hdev(d))
348 rp->index[count++] = cpu_to_le16(d->id);
349 BT_DBG("Added hci%u", d->id);
352 rp->num_controllers = cpu_to_le16(count);
353 rp_len = sizeof(*rp) + (2 * count);
355 read_unlock(&hci_dev_list_lock);
357 err = cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, 0, rp,
365 static u32 get_supported_settings(struct hci_dev *hdev)
369 settings |= MGMT_SETTING_POWERED;
370 settings |= MGMT_SETTING_PAIRABLE;
372 if (lmp_ssp_capable(hdev))
373 settings |= MGMT_SETTING_SSP;
375 if (lmp_bredr_capable(hdev)) {
376 settings |= MGMT_SETTING_CONNECTABLE;
377 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
378 settings |= MGMT_SETTING_FAST_CONNECTABLE;
379 settings |= MGMT_SETTING_DISCOVERABLE;
380 settings |= MGMT_SETTING_BREDR;
381 settings |= MGMT_SETTING_LINK_SECURITY;
385 settings |= MGMT_SETTING_HS;
387 if (lmp_le_capable(hdev))
388 settings |= MGMT_SETTING_LE;
393 static u32 get_current_settings(struct hci_dev *hdev)
397 if (hdev_is_powered(hdev))
398 settings |= MGMT_SETTING_POWERED;
400 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
401 settings |= MGMT_SETTING_CONNECTABLE;
403 if (test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
404 settings |= MGMT_SETTING_FAST_CONNECTABLE;
406 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
407 settings |= MGMT_SETTING_DISCOVERABLE;
409 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags))
410 settings |= MGMT_SETTING_PAIRABLE;
412 if (lmp_bredr_capable(hdev))
413 settings |= MGMT_SETTING_BREDR;
415 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
416 settings |= MGMT_SETTING_LE;
418 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
419 settings |= MGMT_SETTING_LINK_SECURITY;
421 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
422 settings |= MGMT_SETTING_SSP;
424 if (test_bit(HCI_HS_ENABLED, &hdev->dev_flags))
425 settings |= MGMT_SETTING_HS;
430 #define PNP_INFO_SVCLASS_ID 0x1200
432 static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
434 u8 *ptr = data, *uuids_start = NULL;
435 struct bt_uuid *uuid;
440 list_for_each_entry(uuid, &hdev->uuids, list) {
443 if (uuid->size != 16)
446 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
450 if (uuid16 == PNP_INFO_SVCLASS_ID)
456 uuids_start[1] = EIR_UUID16_ALL;
460 /* Stop if not enough space to put next UUID */
461 if ((ptr - data) + sizeof(u16) > len) {
462 uuids_start[1] = EIR_UUID16_SOME;
466 *ptr++ = (uuid16 & 0x00ff);
467 *ptr++ = (uuid16 & 0xff00) >> 8;
468 uuids_start[0] += sizeof(uuid16);
474 static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
476 u8 *ptr = data, *uuids_start = NULL;
477 struct bt_uuid *uuid;
482 list_for_each_entry(uuid, &hdev->uuids, list) {
483 if (uuid->size != 32)
489 uuids_start[1] = EIR_UUID32_ALL;
493 /* Stop if not enough space to put next UUID */
494 if ((ptr - data) + sizeof(u32) > len) {
495 uuids_start[1] = EIR_UUID32_SOME;
499 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
501 uuids_start[0] += sizeof(u32);
507 static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
509 u8 *ptr = data, *uuids_start = NULL;
510 struct bt_uuid *uuid;
515 list_for_each_entry(uuid, &hdev->uuids, list) {
516 if (uuid->size != 128)
522 uuids_start[1] = EIR_UUID128_ALL;
526 /* Stop if not enough space to put next UUID */
527 if ((ptr - data) + 16 > len) {
528 uuids_start[1] = EIR_UUID128_SOME;
532 memcpy(ptr, uuid->uuid, 16);
534 uuids_start[0] += 16;
540 static void create_eir(struct hci_dev *hdev, u8 *data)
545 name_len = strlen(hdev->dev_name);
551 ptr[1] = EIR_NAME_SHORT;
553 ptr[1] = EIR_NAME_COMPLETE;
555 /* EIR Data length */
556 ptr[0] = name_len + 1;
558 memcpy(ptr + 2, hdev->dev_name, name_len);
560 ptr += (name_len + 2);
563 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
565 ptr[1] = EIR_TX_POWER;
566 ptr[2] = (u8) hdev->inq_tx_power;
571 if (hdev->devid_source > 0) {
573 ptr[1] = EIR_DEVICE_ID;
575 put_unaligned_le16(hdev->devid_source, ptr + 2);
576 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
577 put_unaligned_le16(hdev->devid_product, ptr + 6);
578 put_unaligned_le16(hdev->devid_version, ptr + 8);
583 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
584 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
585 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
588 static void update_eir(struct hci_request *req)
590 struct hci_dev *hdev = req->hdev;
591 struct hci_cp_write_eir cp;
593 if (!hdev_is_powered(hdev))
596 if (!lmp_ext_inq_capable(hdev))
599 if (!test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
602 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
605 memset(&cp, 0, sizeof(cp));
607 create_eir(hdev, cp.data);
609 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
612 memcpy(hdev->eir, cp.data, sizeof(cp.data));
614 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
617 static u8 get_service_classes(struct hci_dev *hdev)
619 struct bt_uuid *uuid;
622 list_for_each_entry(uuid, &hdev->uuids, list)
623 val |= uuid->svc_hint;
628 static void update_class(struct hci_request *req)
630 struct hci_dev *hdev = req->hdev;
633 BT_DBG("%s", hdev->name);
635 if (!hdev_is_powered(hdev))
638 if (test_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
641 cod[0] = hdev->minor_class;
642 cod[1] = hdev->major_class;
643 cod[2] = get_service_classes(hdev);
645 if (memcmp(cod, hdev->dev_class, 3) == 0)
648 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
651 static void service_cache_off(struct work_struct *work)
653 struct hci_dev *hdev = container_of(work, struct hci_dev,
655 struct hci_request req;
657 if (!test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
660 hci_req_init(&req, hdev);
667 hci_dev_unlock(hdev);
669 hci_req_run(&req, NULL);
672 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
674 if (test_and_set_bit(HCI_MGMT, &hdev->dev_flags))
677 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
679 /* Non-mgmt controlled devices get this bit set
680 * implicitly so that pairing works for them, however
681 * for mgmt we require user-space to explicitly enable
684 clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
687 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
688 void *data, u16 data_len)
690 struct mgmt_rp_read_info rp;
692 BT_DBG("sock %p %s", sk, hdev->name);
696 memset(&rp, 0, sizeof(rp));
698 bacpy(&rp.bdaddr, &hdev->bdaddr);
700 rp.version = hdev->hci_ver;
701 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
703 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
704 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
706 memcpy(rp.dev_class, hdev->dev_class, 3);
708 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
709 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
711 hci_dev_unlock(hdev);
713 return cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
717 static void mgmt_pending_free(struct pending_cmd *cmd)
724 static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode,
725 struct hci_dev *hdev, void *data,
728 struct pending_cmd *cmd;
730 cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
734 cmd->opcode = opcode;
735 cmd->index = hdev->id;
737 cmd->param = kmalloc(len, GFP_KERNEL);
744 memcpy(cmd->param, data, len);
749 list_add(&cmd->list, &hdev->mgmt_pending);
754 static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev,
755 void (*cb)(struct pending_cmd *cmd,
759 struct pending_cmd *cmd, *tmp;
761 list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) {
762 if (opcode > 0 && cmd->opcode != opcode)
769 static struct pending_cmd *mgmt_pending_find(u16 opcode, struct hci_dev *hdev)
771 struct pending_cmd *cmd;
773 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
774 if (cmd->opcode == opcode)
781 static void mgmt_pending_remove(struct pending_cmd *cmd)
783 list_del(&cmd->list);
784 mgmt_pending_free(cmd);
787 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
789 __le32 settings = cpu_to_le32(get_current_settings(hdev));
791 return cmd_complete(sk, hdev->id, opcode, 0, &settings,
795 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
798 struct mgmt_mode *cp = data;
799 struct pending_cmd *cmd;
802 BT_DBG("request for %s", hdev->name);
804 if (cp->val != 0x00 && cp->val != 0x01)
805 return cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
806 MGMT_STATUS_INVALID_PARAMS);
810 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
811 cancel_delayed_work(&hdev->power_off);
814 mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev,
816 err = mgmt_powered(hdev, 1);
821 if (!!cp->val == hdev_is_powered(hdev)) {
822 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
826 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev)) {
827 err = cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
832 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
839 queue_work(hdev->req_workqueue, &hdev->power_on);
841 queue_work(hdev->req_workqueue, &hdev->power_off.work);
846 hci_dev_unlock(hdev);
850 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 data_len,
851 struct sock *skip_sk)
854 struct mgmt_hdr *hdr;
856 skb = alloc_skb(sizeof(*hdr) + data_len, GFP_KERNEL);
860 hdr = (void *) skb_put(skb, sizeof(*hdr));
861 hdr->opcode = cpu_to_le16(event);
863 hdr->index = cpu_to_le16(hdev->id);
865 hdr->index = __constant_cpu_to_le16(MGMT_INDEX_NONE);
866 hdr->len = cpu_to_le16(data_len);
869 memcpy(skb_put(skb, data_len), data, data_len);
872 __net_timestamp(skb);
874 hci_send_to_control(skb, skip_sk);
880 static int new_settings(struct hci_dev *hdev, struct sock *skip)
884 ev = cpu_to_le32(get_current_settings(hdev));
886 return mgmt_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, sizeof(ev), skip);
889 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
892 struct mgmt_cp_set_discoverable *cp = data;
893 struct pending_cmd *cmd;
898 BT_DBG("request for %s", hdev->name);
900 if (!lmp_bredr_capable(hdev))
901 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
902 MGMT_STATUS_NOT_SUPPORTED);
904 if (cp->val != 0x00 && cp->val != 0x01)
905 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
906 MGMT_STATUS_INVALID_PARAMS);
908 timeout = __le16_to_cpu(cp->timeout);
909 if (!cp->val && timeout > 0)
910 return cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
911 MGMT_STATUS_INVALID_PARAMS);
915 if (!hdev_is_powered(hdev) && timeout > 0) {
916 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
917 MGMT_STATUS_NOT_POWERED);
921 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
922 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
923 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
928 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
929 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
930 MGMT_STATUS_REJECTED);
934 if (!hdev_is_powered(hdev)) {
935 bool changed = false;
937 if (!!cp->val != test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
938 change_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
942 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
947 err = new_settings(hdev, sk);
952 if (!!cp->val == test_bit(HCI_DISCOVERABLE, &hdev->dev_flags)) {
953 if (hdev->discov_timeout > 0) {
954 cancel_delayed_work(&hdev->discov_off);
955 hdev->discov_timeout = 0;
958 if (cp->val && timeout > 0) {
959 hdev->discov_timeout = timeout;
960 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
961 msecs_to_jiffies(hdev->discov_timeout * 1000));
964 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
968 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
977 scan |= SCAN_INQUIRY;
979 cancel_delayed_work(&hdev->discov_off);
981 err = hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
983 mgmt_pending_remove(cmd);
986 hdev->discov_timeout = timeout;
989 hci_dev_unlock(hdev);
993 static void write_fast_connectable(struct hci_request *req, bool enable)
995 struct hci_dev *hdev = req->hdev;
996 struct hci_cp_write_page_scan_activity acp;
999 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1003 type = PAGE_SCAN_TYPE_INTERLACED;
1005 /* 160 msec page scan interval */
1006 acp.interval = __constant_cpu_to_le16(0x0100);
1008 type = PAGE_SCAN_TYPE_STANDARD; /* default */
1010 /* default 1.28 sec page scan */
1011 acp.interval = __constant_cpu_to_le16(0x0800);
1014 acp.window = __constant_cpu_to_le16(0x0012);
1016 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
1017 __cpu_to_le16(hdev->page_scan_window) != acp.window)
1018 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
1021 if (hdev->page_scan_type != type)
1022 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
1025 static void set_connectable_complete(struct hci_dev *hdev, u8 status)
1027 struct pending_cmd *cmd;
1029 BT_DBG("status 0x%02x", status);
1033 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1037 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1039 mgmt_pending_remove(cmd);
1042 hci_dev_unlock(hdev);
1045 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1048 struct mgmt_mode *cp = data;
1049 struct pending_cmd *cmd;
1050 struct hci_request req;
1054 BT_DBG("request for %s", hdev->name);
1056 if (!lmp_bredr_capable(hdev))
1057 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1058 MGMT_STATUS_NOT_SUPPORTED);
1060 if (cp->val != 0x00 && cp->val != 0x01)
1061 return cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1062 MGMT_STATUS_INVALID_PARAMS);
1066 if (!hdev_is_powered(hdev)) {
1067 bool changed = false;
1069 if (!!cp->val != test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
1073 set_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1075 clear_bit(HCI_CONNECTABLE, &hdev->dev_flags);
1076 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1079 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1084 err = new_settings(hdev, sk);
1089 if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1090 mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1091 err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1096 if (!!cp->val == test_bit(HCI_PSCAN, &hdev->flags)) {
1097 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1101 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1112 if (test_bit(HCI_ISCAN, &hdev->flags) &&
1113 hdev->discov_timeout > 0)
1114 cancel_delayed_work(&hdev->discov_off);
1117 hci_req_init(&req, hdev);
1119 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1121 /* If we're going from non-connectable to connectable or
1122 * vice-versa when fast connectable is enabled ensure that fast
1123 * connectable gets disabled. write_fast_connectable won't do
1124 * anything if the page scan parameters are already what they
1127 if (cp->val || test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags))
1128 write_fast_connectable(&req, false);
1130 err = hci_req_run(&req, set_connectable_complete);
1132 mgmt_pending_remove(cmd);
1135 hci_dev_unlock(hdev);
1139 static int set_pairable(struct sock *sk, struct hci_dev *hdev, void *data,
1142 struct mgmt_mode *cp = data;
1145 BT_DBG("request for %s", hdev->name);
1147 if (cp->val != 0x00 && cp->val != 0x01)
1148 return cmd_status(sk, hdev->id, MGMT_OP_SET_PAIRABLE,
1149 MGMT_STATUS_INVALID_PARAMS);
1154 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1156 clear_bit(HCI_PAIRABLE, &hdev->dev_flags);
1158 err = send_settings_rsp(sk, MGMT_OP_SET_PAIRABLE, hdev);
1162 err = new_settings(hdev, sk);
1165 hci_dev_unlock(hdev);
1169 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1172 struct mgmt_mode *cp = data;
1173 struct pending_cmd *cmd;
1177 BT_DBG("request for %s", hdev->name);
1179 if (!lmp_bredr_capable(hdev))
1180 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1181 MGMT_STATUS_NOT_SUPPORTED);
1183 if (cp->val != 0x00 && cp->val != 0x01)
1184 return cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1185 MGMT_STATUS_INVALID_PARAMS);
1189 if (!hdev_is_powered(hdev)) {
1190 bool changed = false;
1192 if (!!cp->val != test_bit(HCI_LINK_SECURITY,
1193 &hdev->dev_flags)) {
1194 change_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
1198 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1203 err = new_settings(hdev, sk);
1208 if (mgmt_pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1209 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1216 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1217 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1221 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1227 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1229 mgmt_pending_remove(cmd);
1234 hci_dev_unlock(hdev);
1238 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1240 struct mgmt_mode *cp = data;
1241 struct pending_cmd *cmd;
1245 BT_DBG("request for %s", hdev->name);
1247 if (!lmp_ssp_capable(hdev))
1248 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1249 MGMT_STATUS_NOT_SUPPORTED);
1251 if (cp->val != 0x00 && cp->val != 0x01)
1252 return cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1253 MGMT_STATUS_INVALID_PARAMS);
1259 if (!hdev_is_powered(hdev)) {
1260 bool changed = false;
1262 if (val != test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1263 change_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
1267 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1272 err = new_settings(hdev, sk);
1277 if (mgmt_pending_find(MGMT_OP_SET_SSP, hdev)) {
1278 err = cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1283 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) == val) {
1284 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1288 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1294 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, sizeof(val), &val);
1296 mgmt_pending_remove(cmd);
1301 hci_dev_unlock(hdev);
1305 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1307 struct mgmt_mode *cp = data;
1309 BT_DBG("request for %s", hdev->name);
1312 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1313 MGMT_STATUS_NOT_SUPPORTED);
1315 if (cp->val != 0x00 && cp->val != 0x01)
1316 return cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1317 MGMT_STATUS_INVALID_PARAMS);
1320 set_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1322 clear_bit(HCI_HS_ENABLED, &hdev->dev_flags);
1324 return send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1327 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1329 struct mgmt_mode *cp = data;
1330 struct hci_cp_write_le_host_supported hci_cp;
1331 struct pending_cmd *cmd;
1335 BT_DBG("request for %s", hdev->name);
1337 if (!lmp_le_capable(hdev))
1338 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1339 MGMT_STATUS_NOT_SUPPORTED);
1341 if (cp->val != 0x00 && cp->val != 0x01)
1342 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1343 MGMT_STATUS_INVALID_PARAMS);
1345 /* LE-only devices do not allow toggling LE on/off */
1346 if (!lmp_bredr_capable(hdev))
1347 return cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1348 MGMT_STATUS_REJECTED);
1353 enabled = lmp_host_le_capable(hdev);
1355 if (!hdev_is_powered(hdev) || val == enabled) {
1356 bool changed = false;
1358 if (val != test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1359 change_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1363 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1368 err = new_settings(hdev, sk);
1373 if (mgmt_pending_find(MGMT_OP_SET_LE, hdev)) {
1374 err = cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1379 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
1385 memset(&hci_cp, 0, sizeof(hci_cp));
1389 hci_cp.simul = lmp_le_br_capable(hdev);
1392 err = hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
1395 mgmt_pending_remove(cmd);
1398 hci_dev_unlock(hdev);
1402 /* This is a helper function to test for pending mgmt commands that can
1403 * cause CoD or EIR HCI commands. We can only allow one such pending
1404 * mgmt command at a time since otherwise we cannot easily track what
1405 * the current values are, will be, and based on that calculate if a new
1406 * HCI command needs to be sent and if yes with what value.
1408 static bool pending_eir_or_class(struct hci_dev *hdev)
1410 struct pending_cmd *cmd;
1412 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
1413 switch (cmd->opcode) {
1414 case MGMT_OP_ADD_UUID:
1415 case MGMT_OP_REMOVE_UUID:
1416 case MGMT_OP_SET_DEV_CLASS:
1417 case MGMT_OP_SET_POWERED:
1425 static const u8 bluetooth_base_uuid[] = {
1426 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
1427 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1430 static u8 get_uuid_size(const u8 *uuid)
1434 if (memcmp(uuid, bluetooth_base_uuid, 12))
1437 val = get_unaligned_le32(&uuid[12]);
1444 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
1446 struct pending_cmd *cmd;
1450 cmd = mgmt_pending_find(mgmt_op, hdev);
1454 cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(status),
1455 hdev->dev_class, 3);
1457 mgmt_pending_remove(cmd);
1460 hci_dev_unlock(hdev);
1463 static void add_uuid_complete(struct hci_dev *hdev, u8 status)
1465 BT_DBG("status 0x%02x", status);
1467 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
1470 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1472 struct mgmt_cp_add_uuid *cp = data;
1473 struct pending_cmd *cmd;
1474 struct hci_request req;
1475 struct bt_uuid *uuid;
1478 BT_DBG("request for %s", hdev->name);
1482 if (pending_eir_or_class(hdev)) {
1483 err = cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
1488 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
1494 memcpy(uuid->uuid, cp->uuid, 16);
1495 uuid->svc_hint = cp->svc_hint;
1496 uuid->size = get_uuid_size(cp->uuid);
1498 list_add_tail(&uuid->list, &hdev->uuids);
1500 hci_req_init(&req, hdev);
1505 err = hci_req_run(&req, add_uuid_complete);
1507 if (err != -ENODATA)
1510 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
1511 hdev->dev_class, 3);
1515 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
1524 hci_dev_unlock(hdev);
1528 static bool enable_service_cache(struct hci_dev *hdev)
1530 if (!hdev_is_powered(hdev))
1533 if (!test_and_set_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
1534 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
1542 static void remove_uuid_complete(struct hci_dev *hdev, u8 status)
1544 BT_DBG("status 0x%02x", status);
1546 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
1549 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
1552 struct mgmt_cp_remove_uuid *cp = data;
1553 struct pending_cmd *cmd;
1554 struct bt_uuid *match, *tmp;
1555 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
1556 struct hci_request req;
1559 BT_DBG("request for %s", hdev->name);
1563 if (pending_eir_or_class(hdev)) {
1564 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
1569 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
1570 err = hci_uuids_clear(hdev);
1572 if (enable_service_cache(hdev)) {
1573 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID,
1574 0, hdev->dev_class, 3);
1583 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
1584 if (memcmp(match->uuid, cp->uuid, 16) != 0)
1587 list_del(&match->list);
1593 err = cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
1594 MGMT_STATUS_INVALID_PARAMS);
1599 hci_req_init(&req, hdev);
1604 err = hci_req_run(&req, remove_uuid_complete);
1606 if (err != -ENODATA)
1609 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
1610 hdev->dev_class, 3);
1614 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
1623 hci_dev_unlock(hdev);
1627 static void set_class_complete(struct hci_dev *hdev, u8 status)
1629 BT_DBG("status 0x%02x", status);
1631 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
1634 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
1637 struct mgmt_cp_set_dev_class *cp = data;
1638 struct pending_cmd *cmd;
1639 struct hci_request req;
1642 BT_DBG("request for %s", hdev->name);
1644 if (!lmp_bredr_capable(hdev))
1645 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
1646 MGMT_STATUS_NOT_SUPPORTED);
1650 if (pending_eir_or_class(hdev)) {
1651 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
1656 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
1657 err = cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
1658 MGMT_STATUS_INVALID_PARAMS);
1662 hdev->major_class = cp->major;
1663 hdev->minor_class = cp->minor;
1665 if (!hdev_is_powered(hdev)) {
1666 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
1667 hdev->dev_class, 3);
1671 hci_req_init(&req, hdev);
1673 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags)) {
1674 hci_dev_unlock(hdev);
1675 cancel_delayed_work_sync(&hdev->service_cache);
1682 err = hci_req_run(&req, set_class_complete);
1684 if (err != -ENODATA)
1687 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
1688 hdev->dev_class, 3);
1692 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
1701 hci_dev_unlock(hdev);
1705 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
1708 struct mgmt_cp_load_link_keys *cp = data;
1709 u16 key_count, expected_len;
1712 key_count = __le16_to_cpu(cp->key_count);
1714 expected_len = sizeof(*cp) + key_count *
1715 sizeof(struct mgmt_link_key_info);
1716 if (expected_len != len) {
1717 BT_ERR("load_link_keys: expected %u bytes, got %u bytes",
1719 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
1720 MGMT_STATUS_INVALID_PARAMS);
1723 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
1724 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
1725 MGMT_STATUS_INVALID_PARAMS);
1727 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
1730 for (i = 0; i < key_count; i++) {
1731 struct mgmt_link_key_info *key = &cp->keys[i];
1733 if (key->addr.type != BDADDR_BREDR)
1734 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
1735 MGMT_STATUS_INVALID_PARAMS);
1740 hci_link_keys_clear(hdev);
1743 set_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
1745 clear_bit(HCI_DEBUG_KEYS, &hdev->dev_flags);
1747 for (i = 0; i < key_count; i++) {
1748 struct mgmt_link_key_info *key = &cp->keys[i];
1750 hci_add_link_key(hdev, NULL, 0, &key->addr.bdaddr, key->val,
1751 key->type, key->pin_len);
1754 cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
1756 hci_dev_unlock(hdev);
1761 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
1762 u8 addr_type, struct sock *skip_sk)
1764 struct mgmt_ev_device_unpaired ev;
1766 bacpy(&ev.addr.bdaddr, bdaddr);
1767 ev.addr.type = addr_type;
1769 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
1773 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
1776 struct mgmt_cp_unpair_device *cp = data;
1777 struct mgmt_rp_unpair_device rp;
1778 struct hci_cp_disconnect dc;
1779 struct pending_cmd *cmd;
1780 struct hci_conn *conn;
1783 memset(&rp, 0, sizeof(rp));
1784 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
1785 rp.addr.type = cp->addr.type;
1787 if (!bdaddr_type_is_valid(cp->addr.type))
1788 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1789 MGMT_STATUS_INVALID_PARAMS,
1792 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
1793 return cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1794 MGMT_STATUS_INVALID_PARAMS,
1799 if (!hdev_is_powered(hdev)) {
1800 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1801 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
1805 if (cp->addr.type == BDADDR_BREDR)
1806 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
1808 err = hci_remove_ltk(hdev, &cp->addr.bdaddr);
1811 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
1812 MGMT_STATUS_NOT_PAIRED, &rp, sizeof(rp));
1816 if (cp->disconnect) {
1817 if (cp->addr.type == BDADDR_BREDR)
1818 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
1821 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
1828 err = cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
1830 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
1834 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
1841 dc.handle = cpu_to_le16(conn->handle);
1842 dc.reason = 0x13; /* Remote User Terminated Connection */
1843 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1845 mgmt_pending_remove(cmd);
1848 hci_dev_unlock(hdev);
1852 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
1855 struct mgmt_cp_disconnect *cp = data;
1856 struct mgmt_rp_disconnect rp;
1857 struct hci_cp_disconnect dc;
1858 struct pending_cmd *cmd;
1859 struct hci_conn *conn;
1864 memset(&rp, 0, sizeof(rp));
1865 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
1866 rp.addr.type = cp->addr.type;
1868 if (!bdaddr_type_is_valid(cp->addr.type))
1869 return cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
1870 MGMT_STATUS_INVALID_PARAMS,
1875 if (!test_bit(HCI_UP, &hdev->flags)) {
1876 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
1877 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
1881 if (mgmt_pending_find(MGMT_OP_DISCONNECT, hdev)) {
1882 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
1883 MGMT_STATUS_BUSY, &rp, sizeof(rp));
1887 if (cp->addr.type == BDADDR_BREDR)
1888 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
1891 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
1893 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
1894 err = cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
1895 MGMT_STATUS_NOT_CONNECTED, &rp, sizeof(rp));
1899 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
1905 dc.handle = cpu_to_le16(conn->handle);
1906 dc.reason = HCI_ERROR_REMOTE_USER_TERM;
1908 err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1910 mgmt_pending_remove(cmd);
1913 hci_dev_unlock(hdev);
1917 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
1919 switch (link_type) {
1921 switch (addr_type) {
1922 case ADDR_LE_DEV_PUBLIC:
1923 return BDADDR_LE_PUBLIC;
1926 /* Fallback to LE Random address type */
1927 return BDADDR_LE_RANDOM;
1931 /* Fallback to BR/EDR type */
1932 return BDADDR_BREDR;
1936 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
1939 struct mgmt_rp_get_connections *rp;
1949 if (!hdev_is_powered(hdev)) {
1950 err = cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
1951 MGMT_STATUS_NOT_POWERED);
1956 list_for_each_entry(c, &hdev->conn_hash.list, list) {
1957 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
1961 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
1962 rp = kmalloc(rp_len, GFP_KERNEL);
1969 list_for_each_entry(c, &hdev->conn_hash.list, list) {
1970 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
1972 bacpy(&rp->addr[i].bdaddr, &c->dst);
1973 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
1974 if (c->type == SCO_LINK || c->type == ESCO_LINK)
1979 rp->conn_count = cpu_to_le16(i);
1981 /* Recalculate length in case of filtered SCO connections, etc */
1982 rp_len = sizeof(*rp) + (i * sizeof(struct mgmt_addr_info));
1984 err = cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
1990 hci_dev_unlock(hdev);
1994 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
1995 struct mgmt_cp_pin_code_neg_reply *cp)
1997 struct pending_cmd *cmd;
2000 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2005 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2006 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2008 mgmt_pending_remove(cmd);
2013 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2016 struct hci_conn *conn;
2017 struct mgmt_cp_pin_code_reply *cp = data;
2018 struct hci_cp_pin_code_reply reply;
2019 struct pending_cmd *cmd;
2026 if (!hdev_is_powered(hdev)) {
2027 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2028 MGMT_STATUS_NOT_POWERED);
2032 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2034 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2035 MGMT_STATUS_NOT_CONNECTED);
2039 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2040 struct mgmt_cp_pin_code_neg_reply ncp;
2042 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2044 BT_ERR("PIN code is not 16 bytes long");
2046 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2048 err = cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2049 MGMT_STATUS_INVALID_PARAMS);
2054 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2060 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2061 reply.pin_len = cp->pin_len;
2062 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2064 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2066 mgmt_pending_remove(cmd);
2069 hci_dev_unlock(hdev);
2073 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2076 struct mgmt_cp_set_io_capability *cp = data;
2082 hdev->io_capability = cp->io_capability;
2084 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
2085 hdev->io_capability);
2087 hci_dev_unlock(hdev);
2089 return cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, NULL,
2093 static struct pending_cmd *find_pairing(struct hci_conn *conn)
2095 struct hci_dev *hdev = conn->hdev;
2096 struct pending_cmd *cmd;
2098 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2099 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2102 if (cmd->user_data != conn)
2111 static void pairing_complete(struct pending_cmd *cmd, u8 status)
2113 struct mgmt_rp_pair_device rp;
2114 struct hci_conn *conn = cmd->user_data;
2116 bacpy(&rp.addr.bdaddr, &conn->dst);
2117 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2119 cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, status,
2122 /* So we don't get further callbacks for this connection */
2123 conn->connect_cfm_cb = NULL;
2124 conn->security_cfm_cb = NULL;
2125 conn->disconn_cfm_cb = NULL;
2127 hci_conn_drop(conn);
2129 mgmt_pending_remove(cmd);
2132 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2134 struct pending_cmd *cmd;
2136 BT_DBG("status %u", status);
2138 cmd = find_pairing(conn);
2140 BT_DBG("Unable to find a pending command");
2142 pairing_complete(cmd, mgmt_status(status));
2145 static void le_connect_complete_cb(struct hci_conn *conn, u8 status)
2147 struct pending_cmd *cmd;
2149 BT_DBG("status %u", status);
2154 cmd = find_pairing(conn);
2156 BT_DBG("Unable to find a pending command");
2158 pairing_complete(cmd, mgmt_status(status));
2161 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2164 struct mgmt_cp_pair_device *cp = data;
2165 struct mgmt_rp_pair_device rp;
2166 struct pending_cmd *cmd;
2167 u8 sec_level, auth_type;
2168 struct hci_conn *conn;
2173 memset(&rp, 0, sizeof(rp));
2174 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2175 rp.addr.type = cp->addr.type;
2177 if (!bdaddr_type_is_valid(cp->addr.type))
2178 return cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2179 MGMT_STATUS_INVALID_PARAMS,
2184 if (!hdev_is_powered(hdev)) {
2185 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2186 MGMT_STATUS_NOT_POWERED, &rp, sizeof(rp));
2190 sec_level = BT_SECURITY_MEDIUM;
2191 if (cp->io_cap == 0x03)
2192 auth_type = HCI_AT_DEDICATED_BONDING;
2194 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
2196 if (cp->addr.type == BDADDR_BREDR)
2197 conn = hci_connect(hdev, ACL_LINK, &cp->addr.bdaddr,
2198 cp->addr.type, sec_level, auth_type);
2200 conn = hci_connect(hdev, LE_LINK, &cp->addr.bdaddr,
2201 cp->addr.type, sec_level, auth_type);
2206 if (PTR_ERR(conn) == -EBUSY)
2207 status = MGMT_STATUS_BUSY;
2209 status = MGMT_STATUS_CONNECT_FAILED;
2211 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2217 if (conn->connect_cfm_cb) {
2218 hci_conn_drop(conn);
2219 err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2220 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2224 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
2227 hci_conn_drop(conn);
2231 /* For LE, just connecting isn't a proof that the pairing finished */
2232 if (cp->addr.type == BDADDR_BREDR)
2233 conn->connect_cfm_cb = pairing_complete_cb;
2235 conn->connect_cfm_cb = le_connect_complete_cb;
2237 conn->security_cfm_cb = pairing_complete_cb;
2238 conn->disconn_cfm_cb = pairing_complete_cb;
2239 conn->io_capability = cp->io_cap;
2240 cmd->user_data = conn;
2242 if (conn->state == BT_CONNECTED &&
2243 hci_conn_security(conn, sec_level, auth_type))
2244 pairing_complete(cmd, 0);
2249 hci_dev_unlock(hdev);
2253 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2256 struct mgmt_addr_info *addr = data;
2257 struct pending_cmd *cmd;
2258 struct hci_conn *conn;
2265 if (!hdev_is_powered(hdev)) {
2266 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2267 MGMT_STATUS_NOT_POWERED);
2271 cmd = mgmt_pending_find(MGMT_OP_PAIR_DEVICE, hdev);
2273 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2274 MGMT_STATUS_INVALID_PARAMS);
2278 conn = cmd->user_data;
2280 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
2281 err = cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2282 MGMT_STATUS_INVALID_PARAMS);
2286 pairing_complete(cmd, MGMT_STATUS_CANCELLED);
2288 err = cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
2289 addr, sizeof(*addr));
2291 hci_dev_unlock(hdev);
2295 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
2296 struct mgmt_addr_info *addr, u16 mgmt_op,
2297 u16 hci_op, __le32 passkey)
2299 struct pending_cmd *cmd;
2300 struct hci_conn *conn;
2305 if (!hdev_is_powered(hdev)) {
2306 err = cmd_complete(sk, hdev->id, mgmt_op,
2307 MGMT_STATUS_NOT_POWERED, addr,
2312 if (addr->type == BDADDR_BREDR)
2313 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
2315 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
2318 err = cmd_complete(sk, hdev->id, mgmt_op,
2319 MGMT_STATUS_NOT_CONNECTED, addr,
2324 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
2325 /* Continue with pairing via SMP */
2326 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
2329 err = cmd_complete(sk, hdev->id, mgmt_op,
2330 MGMT_STATUS_SUCCESS, addr,
2333 err = cmd_complete(sk, hdev->id, mgmt_op,
2334 MGMT_STATUS_FAILED, addr,
2340 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
2346 /* Continue with pairing via HCI */
2347 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
2348 struct hci_cp_user_passkey_reply cp;
2350 bacpy(&cp.bdaddr, &addr->bdaddr);
2351 cp.passkey = passkey;
2352 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
2354 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
2358 mgmt_pending_remove(cmd);
2361 hci_dev_unlock(hdev);
2365 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2366 void *data, u16 len)
2368 struct mgmt_cp_pin_code_neg_reply *cp = data;
2372 return user_pairing_resp(sk, hdev, &cp->addr,
2373 MGMT_OP_PIN_CODE_NEG_REPLY,
2374 HCI_OP_PIN_CODE_NEG_REPLY, 0);
2377 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2380 struct mgmt_cp_user_confirm_reply *cp = data;
2384 if (len != sizeof(*cp))
2385 return cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
2386 MGMT_STATUS_INVALID_PARAMS);
2388 return user_pairing_resp(sk, hdev, &cp->addr,
2389 MGMT_OP_USER_CONFIRM_REPLY,
2390 HCI_OP_USER_CONFIRM_REPLY, 0);
2393 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
2394 void *data, u16 len)
2396 struct mgmt_cp_user_confirm_neg_reply *cp = data;
2400 return user_pairing_resp(sk, hdev, &cp->addr,
2401 MGMT_OP_USER_CONFIRM_NEG_REPLY,
2402 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
2405 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2408 struct mgmt_cp_user_passkey_reply *cp = data;
2412 return user_pairing_resp(sk, hdev, &cp->addr,
2413 MGMT_OP_USER_PASSKEY_REPLY,
2414 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
2417 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
2418 void *data, u16 len)
2420 struct mgmt_cp_user_passkey_neg_reply *cp = data;
2424 return user_pairing_resp(sk, hdev, &cp->addr,
2425 MGMT_OP_USER_PASSKEY_NEG_REPLY,
2426 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
2429 static void update_name(struct hci_request *req)
2431 struct hci_dev *hdev = req->hdev;
2432 struct hci_cp_write_local_name cp;
2434 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
2436 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
2439 static void set_name_complete(struct hci_dev *hdev, u8 status)
2441 struct mgmt_cp_set_local_name *cp;
2442 struct pending_cmd *cmd;
2444 BT_DBG("status 0x%02x", status);
2448 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
2455 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
2456 mgmt_status(status));
2458 cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
2461 mgmt_pending_remove(cmd);
2464 hci_dev_unlock(hdev);
2467 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
2470 struct mgmt_cp_set_local_name *cp = data;
2471 struct pending_cmd *cmd;
2472 struct hci_request req;
2479 /* If the old values are the same as the new ones just return a
2480 * direct command complete event.
2482 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
2483 !memcmp(hdev->short_name, cp->short_name,
2484 sizeof(hdev->short_name))) {
2485 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
2490 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
2492 if (!hdev_is_powered(hdev)) {
2493 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
2495 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
2500 err = mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, len,
2506 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
2512 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
2514 hci_req_init(&req, hdev);
2516 if (lmp_bredr_capable(hdev)) {
2521 if (lmp_le_capable(hdev))
2522 hci_update_ad(&req);
2524 err = hci_req_run(&req, set_name_complete);
2526 mgmt_pending_remove(cmd);
2529 hci_dev_unlock(hdev);
2533 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
2534 void *data, u16 data_len)
2536 struct pending_cmd *cmd;
2539 BT_DBG("%s", hdev->name);
2543 if (!hdev_is_powered(hdev)) {
2544 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
2545 MGMT_STATUS_NOT_POWERED);
2549 if (!lmp_ssp_capable(hdev)) {
2550 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
2551 MGMT_STATUS_NOT_SUPPORTED);
2555 if (mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
2556 err = cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
2561 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
2567 err = hci_send_cmd(hdev, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
2569 mgmt_pending_remove(cmd);
2572 hci_dev_unlock(hdev);
2576 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
2577 void *data, u16 len)
2579 struct mgmt_cp_add_remote_oob_data *cp = data;
2583 BT_DBG("%s ", hdev->name);
2587 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr, cp->hash,
2590 status = MGMT_STATUS_FAILED;
2592 status = MGMT_STATUS_SUCCESS;
2594 err = cmd_complete(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA, status,
2595 &cp->addr, sizeof(cp->addr));
2597 hci_dev_unlock(hdev);
2601 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
2602 void *data, u16 len)
2604 struct mgmt_cp_remove_remote_oob_data *cp = data;
2608 BT_DBG("%s", hdev->name);
2612 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr);
2614 status = MGMT_STATUS_INVALID_PARAMS;
2616 status = MGMT_STATUS_SUCCESS;
2618 err = cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
2619 status, &cp->addr, sizeof(cp->addr));
2621 hci_dev_unlock(hdev);
2625 static int mgmt_start_discovery_failed(struct hci_dev *hdev, u8 status)
2627 struct pending_cmd *cmd;
2631 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2633 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
2637 type = hdev->discovery.type;
2639 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
2640 &type, sizeof(type));
2641 mgmt_pending_remove(cmd);
2646 static void start_discovery_complete(struct hci_dev *hdev, u8 status)
2648 BT_DBG("status %d", status);
2652 mgmt_start_discovery_failed(hdev, status);
2653 hci_dev_unlock(hdev);
2658 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
2659 hci_dev_unlock(hdev);
2661 switch (hdev->discovery.type) {
2662 case DISCOV_TYPE_LE:
2663 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
2667 case DISCOV_TYPE_INTERLEAVED:
2668 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
2669 DISCOV_INTERLEAVED_TIMEOUT);
2672 case DISCOV_TYPE_BREDR:
2676 BT_ERR("Invalid discovery type %d", hdev->discovery.type);
2680 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
2681 void *data, u16 len)
2683 struct mgmt_cp_start_discovery *cp = data;
2684 struct pending_cmd *cmd;
2685 struct hci_cp_le_set_scan_param param_cp;
2686 struct hci_cp_le_set_scan_enable enable_cp;
2687 struct hci_cp_inquiry inq_cp;
2688 struct hci_request req;
2689 /* General inquiry access code (GIAC) */
2690 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2693 BT_DBG("%s", hdev->name);
2697 if (!hdev_is_powered(hdev)) {
2698 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2699 MGMT_STATUS_NOT_POWERED);
2703 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags)) {
2704 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2709 if (hdev->discovery.state != DISCOVERY_STOPPED) {
2710 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2715 cmd = mgmt_pending_add(sk, MGMT_OP_START_DISCOVERY, hdev, NULL, 0);
2721 hdev->discovery.type = cp->type;
2723 hci_req_init(&req, hdev);
2725 switch (hdev->discovery.type) {
2726 case DISCOV_TYPE_BREDR:
2727 if (!lmp_bredr_capable(hdev)) {
2728 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2729 MGMT_STATUS_NOT_SUPPORTED);
2730 mgmt_pending_remove(cmd);
2734 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
2735 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2737 mgmt_pending_remove(cmd);
2741 hci_inquiry_cache_flush(hdev);
2743 memset(&inq_cp, 0, sizeof(inq_cp));
2744 memcpy(&inq_cp.lap, lap, sizeof(inq_cp.lap));
2745 inq_cp.length = DISCOV_BREDR_INQUIRY_LEN;
2746 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(inq_cp), &inq_cp);
2749 case DISCOV_TYPE_LE:
2750 case DISCOV_TYPE_INTERLEAVED:
2751 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
2752 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2753 MGMT_STATUS_NOT_SUPPORTED);
2754 mgmt_pending_remove(cmd);
2758 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
2759 !lmp_bredr_capable(hdev)) {
2760 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2761 MGMT_STATUS_NOT_SUPPORTED);
2762 mgmt_pending_remove(cmd);
2766 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags)) {
2767 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2768 MGMT_STATUS_REJECTED);
2769 mgmt_pending_remove(cmd);
2773 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
2774 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2776 mgmt_pending_remove(cmd);
2780 memset(¶m_cp, 0, sizeof(param_cp));
2781 param_cp.type = LE_SCAN_ACTIVE;
2782 param_cp.interval = cpu_to_le16(DISCOV_LE_SCAN_INT);
2783 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
2784 hci_req_add(&req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
2787 memset(&enable_cp, 0, sizeof(enable_cp));
2788 enable_cp.enable = LE_SCAN_ENABLE;
2789 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2790 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
2795 err = cmd_status(sk, hdev->id, MGMT_OP_START_DISCOVERY,
2796 MGMT_STATUS_INVALID_PARAMS);
2797 mgmt_pending_remove(cmd);
2801 err = hci_req_run(&req, start_discovery_complete);
2803 mgmt_pending_remove(cmd);
2805 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
2808 hci_dev_unlock(hdev);
2812 static int mgmt_stop_discovery_failed(struct hci_dev *hdev, u8 status)
2814 struct pending_cmd *cmd;
2817 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
2821 err = cmd_complete(cmd->sk, hdev->id, cmd->opcode, mgmt_status(status),
2822 &hdev->discovery.type, sizeof(hdev->discovery.type));
2823 mgmt_pending_remove(cmd);
2828 static void stop_discovery_complete(struct hci_dev *hdev, u8 status)
2830 BT_DBG("status %d", status);
2835 mgmt_stop_discovery_failed(hdev, status);
2839 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2842 hci_dev_unlock(hdev);
2845 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
2848 struct mgmt_cp_stop_discovery *mgmt_cp = data;
2849 struct pending_cmd *cmd;
2850 struct hci_cp_remote_name_req_cancel cp;
2851 struct inquiry_entry *e;
2852 struct hci_request req;
2853 struct hci_cp_le_set_scan_enable enable_cp;
2856 BT_DBG("%s", hdev->name);
2860 if (!hci_discovery_active(hdev)) {
2861 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
2862 MGMT_STATUS_REJECTED, &mgmt_cp->type,
2863 sizeof(mgmt_cp->type));
2867 if (hdev->discovery.type != mgmt_cp->type) {
2868 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
2869 MGMT_STATUS_INVALID_PARAMS, &mgmt_cp->type,
2870 sizeof(mgmt_cp->type));
2874 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, NULL, 0);
2880 hci_req_init(&req, hdev);
2882 switch (hdev->discovery.state) {
2883 case DISCOVERY_FINDING:
2884 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
2885 hci_req_add(&req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2887 cancel_delayed_work(&hdev->le_scan_disable);
2889 memset(&enable_cp, 0, sizeof(enable_cp));
2890 enable_cp.enable = LE_SCAN_DISABLE;
2891 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE,
2892 sizeof(enable_cp), &enable_cp);
2897 case DISCOVERY_RESOLVING:
2898 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2901 mgmt_pending_remove(cmd);
2902 err = cmd_complete(sk, hdev->id,
2903 MGMT_OP_STOP_DISCOVERY, 0,
2905 sizeof(mgmt_cp->type));
2906 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2910 bacpy(&cp.bdaddr, &e->data.bdaddr);
2911 hci_req_add(&req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
2917 BT_DBG("unknown discovery state %u", hdev->discovery.state);
2919 mgmt_pending_remove(cmd);
2920 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
2921 MGMT_STATUS_FAILED, &mgmt_cp->type,
2922 sizeof(mgmt_cp->type));
2926 err = hci_req_run(&req, stop_discovery_complete);
2928 mgmt_pending_remove(cmd);
2930 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
2933 hci_dev_unlock(hdev);
2937 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
2940 struct mgmt_cp_confirm_name *cp = data;
2941 struct inquiry_entry *e;
2944 BT_DBG("%s", hdev->name);
2948 if (!hci_discovery_active(hdev)) {
2949 err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
2950 MGMT_STATUS_FAILED);
2954 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
2956 err = cmd_status(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
2957 MGMT_STATUS_INVALID_PARAMS);
2961 if (cp->name_known) {
2962 e->name_state = NAME_KNOWN;
2965 e->name_state = NAME_NEEDED;
2966 hci_inquiry_cache_update_resolve(hdev, e);
2969 err = cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, &cp->addr,
2973 hci_dev_unlock(hdev);
2977 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
2980 struct mgmt_cp_block_device *cp = data;
2984 BT_DBG("%s", hdev->name);
2986 if (!bdaddr_type_is_valid(cp->addr.type))
2987 return cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
2988 MGMT_STATUS_INVALID_PARAMS,
2989 &cp->addr, sizeof(cp->addr));
2993 err = hci_blacklist_add(hdev, &cp->addr.bdaddr, cp->addr.type);
2995 status = MGMT_STATUS_FAILED;
2997 status = MGMT_STATUS_SUCCESS;
2999 err = cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
3000 &cp->addr, sizeof(cp->addr));
3002 hci_dev_unlock(hdev);
3007 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
3010 struct mgmt_cp_unblock_device *cp = data;
3014 BT_DBG("%s", hdev->name);
3016 if (!bdaddr_type_is_valid(cp->addr.type))
3017 return cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
3018 MGMT_STATUS_INVALID_PARAMS,
3019 &cp->addr, sizeof(cp->addr));
3023 err = hci_blacklist_del(hdev, &cp->addr.bdaddr, cp->addr.type);
3025 status = MGMT_STATUS_INVALID_PARAMS;
3027 status = MGMT_STATUS_SUCCESS;
3029 err = cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
3030 &cp->addr, sizeof(cp->addr));
3032 hci_dev_unlock(hdev);
3037 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
3040 struct mgmt_cp_set_device_id *cp = data;
3041 struct hci_request req;
3045 BT_DBG("%s", hdev->name);
3047 source = __le16_to_cpu(cp->source);
3049 if (source > 0x0002)
3050 return cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
3051 MGMT_STATUS_INVALID_PARAMS);
3055 hdev->devid_source = source;
3056 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
3057 hdev->devid_product = __le16_to_cpu(cp->product);
3058 hdev->devid_version = __le16_to_cpu(cp->version);
3060 err = cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, NULL, 0);
3062 hci_req_init(&req, hdev);
3064 hci_req_run(&req, NULL);
3066 hci_dev_unlock(hdev);
3071 static void fast_connectable_complete(struct hci_dev *hdev, u8 status)
3073 struct pending_cmd *cmd;
3075 BT_DBG("status 0x%02x", status);
3079 cmd = mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
3084 cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3085 mgmt_status(status));
3087 struct mgmt_mode *cp = cmd->param;
3090 set_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
3092 clear_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags);
3094 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
3095 new_settings(hdev, cmd->sk);
3098 mgmt_pending_remove(cmd);
3101 hci_dev_unlock(hdev);
3104 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
3105 void *data, u16 len)
3107 struct mgmt_mode *cp = data;
3108 struct pending_cmd *cmd;
3109 struct hci_request req;
3112 BT_DBG("%s", hdev->name);
3114 if (!lmp_bredr_capable(hdev) || hdev->hci_ver < BLUETOOTH_VER_1_2)
3115 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3116 MGMT_STATUS_NOT_SUPPORTED);
3118 if (cp->val != 0x00 && cp->val != 0x01)
3119 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3120 MGMT_STATUS_INVALID_PARAMS);
3122 if (!hdev_is_powered(hdev))
3123 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3124 MGMT_STATUS_NOT_POWERED);
3126 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3127 return cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3128 MGMT_STATUS_REJECTED);
3132 if (mgmt_pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
3133 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3138 if (!!cp->val == test_bit(HCI_FAST_CONNECTABLE, &hdev->dev_flags)) {
3139 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
3144 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
3151 hci_req_init(&req, hdev);
3153 write_fast_connectable(&req, cp->val);
3155 err = hci_req_run(&req, fast_connectable_complete);
3157 err = cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
3158 MGMT_STATUS_FAILED);
3159 mgmt_pending_remove(cmd);
3163 hci_dev_unlock(hdev);
3168 static bool ltk_is_valid(struct mgmt_ltk_info *key)
3170 if (key->authenticated != 0x00 && key->authenticated != 0x01)
3172 if (key->master != 0x00 && key->master != 0x01)
3174 if (!bdaddr_type_is_le(key->addr.type))
3179 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
3180 void *cp_data, u16 len)
3182 struct mgmt_cp_load_long_term_keys *cp = cp_data;
3183 u16 key_count, expected_len;
3186 key_count = __le16_to_cpu(cp->key_count);
3188 expected_len = sizeof(*cp) + key_count *
3189 sizeof(struct mgmt_ltk_info);
3190 if (expected_len != len) {
3191 BT_ERR("load_keys: expected %u bytes, got %u bytes",
3193 return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
3194 MGMT_STATUS_INVALID_PARAMS);
3197 BT_DBG("%s key_count %u", hdev->name, key_count);
3199 for (i = 0; i < key_count; i++) {
3200 struct mgmt_ltk_info *key = &cp->keys[i];
3202 if (!ltk_is_valid(key))
3203 return cmd_status(sk, hdev->id,
3204 MGMT_OP_LOAD_LONG_TERM_KEYS,
3205 MGMT_STATUS_INVALID_PARAMS);
3210 hci_smp_ltks_clear(hdev);
3212 for (i = 0; i < key_count; i++) {
3213 struct mgmt_ltk_info *key = &cp->keys[i];
3219 type = HCI_SMP_LTK_SLAVE;
3221 hci_add_ltk(hdev, &key->addr.bdaddr,
3222 bdaddr_to_le(key->addr.type),
3223 type, 0, key->authenticated, key->val,
3224 key->enc_size, key->ediv, key->rand);
3227 err = cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
3230 hci_dev_unlock(hdev);
3235 static const struct mgmt_handler {
3236 int (*func) (struct sock *sk, struct hci_dev *hdev, void *data,
3240 } mgmt_handlers[] = {
3241 { NULL }, /* 0x0000 (no command) */
3242 { read_version, false, MGMT_READ_VERSION_SIZE },
3243 { read_commands, false, MGMT_READ_COMMANDS_SIZE },
3244 { read_index_list, false, MGMT_READ_INDEX_LIST_SIZE },
3245 { read_controller_info, false, MGMT_READ_INFO_SIZE },
3246 { set_powered, false, MGMT_SETTING_SIZE },
3247 { set_discoverable, false, MGMT_SET_DISCOVERABLE_SIZE },
3248 { set_connectable, false, MGMT_SETTING_SIZE },
3249 { set_fast_connectable, false, MGMT_SETTING_SIZE },
3250 { set_pairable, false, MGMT_SETTING_SIZE },
3251 { set_link_security, false, MGMT_SETTING_SIZE },
3252 { set_ssp, false, MGMT_SETTING_SIZE },
3253 { set_hs, false, MGMT_SETTING_SIZE },
3254 { set_le, false, MGMT_SETTING_SIZE },
3255 { set_dev_class, false, MGMT_SET_DEV_CLASS_SIZE },
3256 { set_local_name, false, MGMT_SET_LOCAL_NAME_SIZE },
3257 { add_uuid, false, MGMT_ADD_UUID_SIZE },
3258 { remove_uuid, false, MGMT_REMOVE_UUID_SIZE },
3259 { load_link_keys, true, MGMT_LOAD_LINK_KEYS_SIZE },
3260 { load_long_term_keys, true, MGMT_LOAD_LONG_TERM_KEYS_SIZE },
3261 { disconnect, false, MGMT_DISCONNECT_SIZE },
3262 { get_connections, false, MGMT_GET_CONNECTIONS_SIZE },
3263 { pin_code_reply, false, MGMT_PIN_CODE_REPLY_SIZE },
3264 { pin_code_neg_reply, false, MGMT_PIN_CODE_NEG_REPLY_SIZE },
3265 { set_io_capability, false, MGMT_SET_IO_CAPABILITY_SIZE },
3266 { pair_device, false, MGMT_PAIR_DEVICE_SIZE },
3267 { cancel_pair_device, false, MGMT_CANCEL_PAIR_DEVICE_SIZE },
3268 { unpair_device, false, MGMT_UNPAIR_DEVICE_SIZE },
3269 { user_confirm_reply, false, MGMT_USER_CONFIRM_REPLY_SIZE },
3270 { user_confirm_neg_reply, false, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
3271 { user_passkey_reply, false, MGMT_USER_PASSKEY_REPLY_SIZE },
3272 { user_passkey_neg_reply, false, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
3273 { read_local_oob_data, false, MGMT_READ_LOCAL_OOB_DATA_SIZE },
3274 { add_remote_oob_data, false, MGMT_ADD_REMOTE_OOB_DATA_SIZE },
3275 { remove_remote_oob_data, false, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
3276 { start_discovery, false, MGMT_START_DISCOVERY_SIZE },
3277 { stop_discovery, false, MGMT_STOP_DISCOVERY_SIZE },
3278 { confirm_name, false, MGMT_CONFIRM_NAME_SIZE },
3279 { block_device, false, MGMT_BLOCK_DEVICE_SIZE },
3280 { unblock_device, false, MGMT_UNBLOCK_DEVICE_SIZE },
3281 { set_device_id, false, MGMT_SET_DEVICE_ID_SIZE },
3285 int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen)
3289 struct mgmt_hdr *hdr;
3290 u16 opcode, index, len;
3291 struct hci_dev *hdev = NULL;
3292 const struct mgmt_handler *handler;
3295 BT_DBG("got %zu bytes", msglen);
3297 if (msglen < sizeof(*hdr))
3300 buf = kmalloc(msglen, GFP_KERNEL);
3304 if (memcpy_fromiovec(buf, msg->msg_iov, msglen)) {
3310 opcode = __le16_to_cpu(hdr->opcode);
3311 index = __le16_to_cpu(hdr->index);
3312 len = __le16_to_cpu(hdr->len);
3314 if (len != msglen - sizeof(*hdr)) {
3319 if (index != MGMT_INDEX_NONE) {
3320 hdev = hci_dev_get(index);
3322 err = cmd_status(sk, index, opcode,
3323 MGMT_STATUS_INVALID_INDEX);
3327 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
3328 err = cmd_status(sk, index, opcode,
3329 MGMT_STATUS_INVALID_INDEX);
3334 if (opcode >= ARRAY_SIZE(mgmt_handlers) ||
3335 mgmt_handlers[opcode].func == NULL) {
3336 BT_DBG("Unknown op %u", opcode);
3337 err = cmd_status(sk, index, opcode,
3338 MGMT_STATUS_UNKNOWN_COMMAND);
3342 if ((hdev && opcode < MGMT_OP_READ_INFO) ||
3343 (!hdev && opcode >= MGMT_OP_READ_INFO)) {
3344 err = cmd_status(sk, index, opcode,
3345 MGMT_STATUS_INVALID_INDEX);
3349 handler = &mgmt_handlers[opcode];
3351 if ((handler->var_len && len < handler->data_len) ||
3352 (!handler->var_len && len != handler->data_len)) {
3353 err = cmd_status(sk, index, opcode,
3354 MGMT_STATUS_INVALID_PARAMS);
3359 mgmt_init_hdev(sk, hdev);
3361 cp = buf + sizeof(*hdr);
3363 err = handler->func(sk, hdev, cp, len);
3377 static void cmd_status_rsp(struct pending_cmd *cmd, void *data)
3381 cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
3382 mgmt_pending_remove(cmd);
3385 int mgmt_index_added(struct hci_dev *hdev)
3387 if (!mgmt_valid_hdev(hdev))
3390 return mgmt_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, NULL);
3393 int mgmt_index_removed(struct hci_dev *hdev)
3395 u8 status = MGMT_STATUS_INVALID_INDEX;
3397 if (!mgmt_valid_hdev(hdev))
3400 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status);
3402 return mgmt_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, NULL);
3407 struct hci_dev *hdev;
3411 static void settings_rsp(struct pending_cmd *cmd, void *data)
3413 struct cmd_lookup *match = data;
3415 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
3417 list_del(&cmd->list);
3419 if (match->sk == NULL) {
3420 match->sk = cmd->sk;
3421 sock_hold(match->sk);
3424 mgmt_pending_free(cmd);
3427 static void set_bredr_scan(struct hci_request *req)
3429 struct hci_dev *hdev = req->hdev;
3432 /* Ensure that fast connectable is disabled. This function will
3433 * not do anything if the page scan parameters are already what
3436 write_fast_connectable(req, false);
3438 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3440 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
3441 scan |= SCAN_INQUIRY;
3444 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
3447 static void powered_complete(struct hci_dev *hdev, u8 status)
3449 struct cmd_lookup match = { NULL, hdev };
3451 BT_DBG("status 0x%02x", status);
3455 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
3457 new_settings(hdev, match.sk);
3459 hci_dev_unlock(hdev);
3465 static int powered_update_hci(struct hci_dev *hdev)
3467 struct hci_request req;
3470 hci_req_init(&req, hdev);
3472 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
3473 !lmp_host_ssp_capable(hdev)) {
3476 hci_req_add(&req, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
3479 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags) &&
3480 lmp_bredr_capable(hdev)) {
3481 struct hci_cp_write_le_host_supported cp;
3484 cp.simul = lmp_le_br_capable(hdev);
3486 /* Check first if we already have the right
3487 * host state (host features set)
3489 if (cp.le != lmp_host_le_capable(hdev) ||
3490 cp.simul != lmp_host_le_br_capable(hdev))
3491 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
3495 link_sec = test_bit(HCI_LINK_SECURITY, &hdev->dev_flags);
3496 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
3497 hci_req_add(&req, HCI_OP_WRITE_AUTH_ENABLE,
3498 sizeof(link_sec), &link_sec);
3500 if (lmp_bredr_capable(hdev)) {
3501 set_bredr_scan(&req);
3507 return hci_req_run(&req, powered_complete);
3510 int mgmt_powered(struct hci_dev *hdev, u8 powered)
3512 struct cmd_lookup match = { NULL, hdev };
3513 u8 status_not_powered = MGMT_STATUS_NOT_POWERED;
3514 u8 zero_cod[] = { 0, 0, 0 };
3517 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3521 if (powered_update_hci(hdev) == 0)
3524 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp,
3529 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
3530 mgmt_pending_foreach(0, hdev, cmd_status_rsp, &status_not_powered);
3532 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0)
3533 mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
3534 zero_cod, sizeof(zero_cod), NULL);
3537 err = new_settings(hdev, match.sk);
3545 int mgmt_set_powered_failed(struct hci_dev *hdev, int err)
3547 struct pending_cmd *cmd;
3550 cmd = mgmt_pending_find(MGMT_OP_SET_POWERED, hdev);
3554 if (err == -ERFKILL)
3555 status = MGMT_STATUS_RFKILLED;
3557 status = MGMT_STATUS_FAILED;
3559 err = cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
3561 mgmt_pending_remove(cmd);
3566 int mgmt_discoverable(struct hci_dev *hdev, u8 discoverable)
3568 struct cmd_lookup match = { NULL, hdev };
3569 bool changed = false;
3573 if (!test_and_set_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
3576 if (test_and_clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
3580 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev, settings_rsp,
3584 err = new_settings(hdev, match.sk);
3592 int mgmt_connectable(struct hci_dev *hdev, u8 connectable)
3594 struct pending_cmd *cmd;
3595 bool changed = false;
3599 if (!test_and_set_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3602 if (test_and_clear_bit(HCI_CONNECTABLE, &hdev->dev_flags))
3606 cmd = mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
3609 err = new_settings(hdev, cmd ? cmd->sk : NULL);
3614 int mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status)
3616 u8 mgmt_err = mgmt_status(status);
3618 if (scan & SCAN_PAGE)
3619 mgmt_pending_foreach(MGMT_OP_SET_CONNECTABLE, hdev,
3620 cmd_status_rsp, &mgmt_err);
3622 if (scan & SCAN_INQUIRY)
3623 mgmt_pending_foreach(MGMT_OP_SET_DISCOVERABLE, hdev,
3624 cmd_status_rsp, &mgmt_err);
3629 int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
3632 struct mgmt_ev_new_link_key ev;
3634 memset(&ev, 0, sizeof(ev));
3636 ev.store_hint = persistent;
3637 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
3638 ev.key.addr.type = BDADDR_BREDR;
3639 ev.key.type = key->type;
3640 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
3641 ev.key.pin_len = key->pin_len;
3643 return mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
3646 int mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, u8 persistent)
3648 struct mgmt_ev_new_long_term_key ev;
3650 memset(&ev, 0, sizeof(ev));
3652 ev.store_hint = persistent;
3653 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
3654 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
3655 ev.key.authenticated = key->authenticated;
3656 ev.key.enc_size = key->enc_size;
3657 ev.key.ediv = key->ediv;
3659 if (key->type == HCI_SMP_LTK)
3662 memcpy(ev.key.rand, key->rand, sizeof(key->rand));
3663 memcpy(ev.key.val, key->val, sizeof(key->val));
3665 return mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev),
3669 int mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3670 u8 addr_type, u32 flags, u8 *name, u8 name_len,
3674 struct mgmt_ev_device_connected *ev = (void *) buf;
3677 bacpy(&ev->addr.bdaddr, bdaddr);
3678 ev->addr.type = link_to_bdaddr(link_type, addr_type);
3680 ev->flags = __cpu_to_le32(flags);
3683 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
3686 if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0)
3687 eir_len = eir_append_data(ev->eir, eir_len,
3688 EIR_CLASS_OF_DEV, dev_class, 3);
3690 ev->eir_len = cpu_to_le16(eir_len);
3692 return mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
3693 sizeof(*ev) + eir_len, NULL);
3696 static void disconnect_rsp(struct pending_cmd *cmd, void *data)
3698 struct mgmt_cp_disconnect *cp = cmd->param;
3699 struct sock **sk = data;
3700 struct mgmt_rp_disconnect rp;
3702 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3703 rp.addr.type = cp->addr.type;
3705 cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT, 0, &rp,
3711 mgmt_pending_remove(cmd);
3714 static void unpair_device_rsp(struct pending_cmd *cmd, void *data)
3716 struct hci_dev *hdev = data;
3717 struct mgmt_cp_unpair_device *cp = cmd->param;
3718 struct mgmt_rp_unpair_device rp;
3720 memset(&rp, 0, sizeof(rp));
3721 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3722 rp.addr.type = cp->addr.type;
3724 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
3726 cmd_complete(cmd->sk, cmd->index, cmd->opcode, 0, &rp, sizeof(rp));
3728 mgmt_pending_remove(cmd);
3731 int mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
3732 u8 link_type, u8 addr_type, u8 reason)
3734 struct mgmt_ev_device_disconnected ev;
3735 struct sock *sk = NULL;
3738 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
3740 bacpy(&ev.addr.bdaddr, bdaddr);
3741 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3744 err = mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev),
3750 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
3756 int mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
3757 u8 link_type, u8 addr_type, u8 status)
3759 struct mgmt_rp_disconnect rp;
3760 struct pending_cmd *cmd;
3763 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
3766 cmd = mgmt_pending_find(MGMT_OP_DISCONNECT, hdev);
3770 bacpy(&rp.addr.bdaddr, bdaddr);
3771 rp.addr.type = link_to_bdaddr(link_type, addr_type);
3773 err = cmd_complete(cmd->sk, cmd->index, MGMT_OP_DISCONNECT,
3774 mgmt_status(status), &rp, sizeof(rp));
3776 mgmt_pending_remove(cmd);
3781 int mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3782 u8 addr_type, u8 status)
3784 struct mgmt_ev_connect_failed ev;
3786 bacpy(&ev.addr.bdaddr, bdaddr);
3787 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3788 ev.status = mgmt_status(status);
3790 return mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
3793 int mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
3795 struct mgmt_ev_pin_code_request ev;
3797 bacpy(&ev.addr.bdaddr, bdaddr);
3798 ev.addr.type = BDADDR_BREDR;
3801 return mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev),
3805 int mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3808 struct pending_cmd *cmd;
3809 struct mgmt_rp_pin_code_reply rp;
3812 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
3816 bacpy(&rp.addr.bdaddr, bdaddr);
3817 rp.addr.type = BDADDR_BREDR;
3819 err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
3820 mgmt_status(status), &rp, sizeof(rp));
3822 mgmt_pending_remove(cmd);
3827 int mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3830 struct pending_cmd *cmd;
3831 struct mgmt_rp_pin_code_reply rp;
3834 cmd = mgmt_pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
3838 bacpy(&rp.addr.bdaddr, bdaddr);
3839 rp.addr.type = BDADDR_BREDR;
3841 err = cmd_complete(cmd->sk, hdev->id, MGMT_OP_PIN_CODE_NEG_REPLY,
3842 mgmt_status(status), &rp, sizeof(rp));
3844 mgmt_pending_remove(cmd);
3849 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
3850 u8 link_type, u8 addr_type, __le32 value,
3853 struct mgmt_ev_user_confirm_request ev;
3855 BT_DBG("%s", hdev->name);
3857 bacpy(&ev.addr.bdaddr, bdaddr);
3858 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3859 ev.confirm_hint = confirm_hint;
3862 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
3866 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
3867 u8 link_type, u8 addr_type)
3869 struct mgmt_ev_user_passkey_request ev;
3871 BT_DBG("%s", hdev->name);
3873 bacpy(&ev.addr.bdaddr, bdaddr);
3874 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3876 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
3880 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3881 u8 link_type, u8 addr_type, u8 status,
3884 struct pending_cmd *cmd;
3885 struct mgmt_rp_user_confirm_reply rp;
3888 cmd = mgmt_pending_find(opcode, hdev);
3892 bacpy(&rp.addr.bdaddr, bdaddr);
3893 rp.addr.type = link_to_bdaddr(link_type, addr_type);
3894 err = cmd_complete(cmd->sk, hdev->id, opcode, mgmt_status(status),
3897 mgmt_pending_remove(cmd);
3902 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3903 u8 link_type, u8 addr_type, u8 status)
3905 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
3906 status, MGMT_OP_USER_CONFIRM_REPLY);
3909 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3910 u8 link_type, u8 addr_type, u8 status)
3912 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
3914 MGMT_OP_USER_CONFIRM_NEG_REPLY);
3917 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3918 u8 link_type, u8 addr_type, u8 status)
3920 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
3921 status, MGMT_OP_USER_PASSKEY_REPLY);
3924 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
3925 u8 link_type, u8 addr_type, u8 status)
3927 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
3929 MGMT_OP_USER_PASSKEY_NEG_REPLY);
3932 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
3933 u8 link_type, u8 addr_type, u32 passkey,
3936 struct mgmt_ev_passkey_notify ev;
3938 BT_DBG("%s", hdev->name);
3940 bacpy(&ev.addr.bdaddr, bdaddr);
3941 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3942 ev.passkey = __cpu_to_le32(passkey);
3943 ev.entered = entered;
3945 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
3948 int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
3949 u8 addr_type, u8 status)
3951 struct mgmt_ev_auth_failed ev;
3953 bacpy(&ev.addr.bdaddr, bdaddr);
3954 ev.addr.type = link_to_bdaddr(link_type, addr_type);
3955 ev.status = mgmt_status(status);
3957 return mgmt_event(MGMT_EV_AUTH_FAILED, hdev, &ev, sizeof(ev), NULL);
3960 int mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
3962 struct cmd_lookup match = { NULL, hdev };
3963 bool changed = false;
3967 u8 mgmt_err = mgmt_status(status);
3968 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
3969 cmd_status_rsp, &mgmt_err);
3973 if (test_bit(HCI_AUTH, &hdev->flags)) {
3974 if (!test_and_set_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
3977 if (test_and_clear_bit(HCI_LINK_SECURITY, &hdev->dev_flags))
3981 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
3985 err = new_settings(hdev, match.sk);
3993 static void clear_eir(struct hci_request *req)
3995 struct hci_dev *hdev = req->hdev;
3996 struct hci_cp_write_eir cp;
3998 if (!lmp_ext_inq_capable(hdev))
4001 memset(hdev->eir, 0, sizeof(hdev->eir));
4003 memset(&cp, 0, sizeof(cp));
4005 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
4008 int mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
4010 struct cmd_lookup match = { NULL, hdev };
4011 struct hci_request req;
4012 bool changed = false;
4016 u8 mgmt_err = mgmt_status(status);
4018 if (enable && test_and_clear_bit(HCI_SSP_ENABLED,
4020 err = new_settings(hdev, NULL);
4022 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
4029 if (!test_and_set_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
4032 if (test_and_clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
4036 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
4039 err = new_settings(hdev, match.sk);
4044 hci_req_init(&req, hdev);
4046 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags))
4051 hci_req_run(&req, NULL);
4056 static void sk_lookup(struct pending_cmd *cmd, void *data)
4058 struct cmd_lookup *match = data;
4060 if (match->sk == NULL) {
4061 match->sk = cmd->sk;
4062 sock_hold(match->sk);
4066 int mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
4069 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
4072 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
4073 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
4074 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
4077 err = mgmt_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
4086 int mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
4088 struct mgmt_cp_set_local_name ev;
4089 struct pending_cmd *cmd;
4094 memset(&ev, 0, sizeof(ev));
4095 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
4096 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
4098 cmd = mgmt_pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
4100 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
4102 /* If this is a HCI command related to powering on the
4103 * HCI dev don't send any mgmt signals.
4105 if (mgmt_pending_find(MGMT_OP_SET_POWERED, hdev))
4109 return mgmt_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
4110 cmd ? cmd->sk : NULL);
4113 int mgmt_read_local_oob_data_reply_complete(struct hci_dev *hdev, u8 *hash,
4114 u8 *randomizer, u8 status)
4116 struct pending_cmd *cmd;
4119 BT_DBG("%s status %u", hdev->name, status);
4121 cmd = mgmt_pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
4126 err = cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
4127 mgmt_status(status));
4129 struct mgmt_rp_read_local_oob_data rp;
4131 memcpy(rp.hash, hash, sizeof(rp.hash));
4132 memcpy(rp.randomizer, randomizer, sizeof(rp.randomizer));
4134 err = cmd_complete(cmd->sk, hdev->id,
4135 MGMT_OP_READ_LOCAL_OOB_DATA, 0, &rp,
4139 mgmt_pending_remove(cmd);
4144 int mgmt_le_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
4146 struct cmd_lookup match = { NULL, hdev };
4147 bool changed = false;
4151 u8 mgmt_err = mgmt_status(status);
4153 if (enable && test_and_clear_bit(HCI_LE_ENABLED,
4155 err = new_settings(hdev, NULL);
4157 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
4164 if (!test_and_set_bit(HCI_LE_ENABLED, &hdev->dev_flags))
4167 if (test_and_clear_bit(HCI_LE_ENABLED, &hdev->dev_flags))
4171 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
4174 err = new_settings(hdev, match.sk);
4182 int mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
4183 u8 addr_type, u8 *dev_class, s8 rssi, u8 cfm_name, u8
4184 ssp, u8 *eir, u16 eir_len)
4187 struct mgmt_ev_device_found *ev = (void *) buf;
4190 if (!hci_discovery_active(hdev))
4193 /* Leave 5 bytes for a potential CoD field */
4194 if (sizeof(*ev) + eir_len + 5 > sizeof(buf))
4197 memset(buf, 0, sizeof(buf));
4199 bacpy(&ev->addr.bdaddr, bdaddr);
4200 ev->addr.type = link_to_bdaddr(link_type, addr_type);
4203 ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_CONFIRM_NAME);
4205 ev->flags |= __constant_cpu_to_le32(MGMT_DEV_FOUND_LEGACY_PAIRING);
4208 memcpy(ev->eir, eir, eir_len);
4210 if (dev_class && !eir_has_data_type(ev->eir, eir_len, EIR_CLASS_OF_DEV))
4211 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
4214 ev->eir_len = cpu_to_le16(eir_len);
4215 ev_size = sizeof(*ev) + eir_len;
4217 return mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
4220 int mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
4221 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
4223 struct mgmt_ev_device_found *ev;
4224 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
4227 ev = (struct mgmt_ev_device_found *) buf;
4229 memset(buf, 0, sizeof(buf));
4231 bacpy(&ev->addr.bdaddr, bdaddr);
4232 ev->addr.type = link_to_bdaddr(link_type, addr_type);
4235 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
4238 ev->eir_len = cpu_to_le16(eir_len);
4240 return mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev,
4241 sizeof(*ev) + eir_len, NULL);
4244 int mgmt_discovering(struct hci_dev *hdev, u8 discovering)
4246 struct mgmt_ev_discovering ev;
4247 struct pending_cmd *cmd;
4249 BT_DBG("%s discovering %u", hdev->name, discovering);
4252 cmd = mgmt_pending_find(MGMT_OP_START_DISCOVERY, hdev);
4254 cmd = mgmt_pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4257 u8 type = hdev->discovery.type;
4259 cmd_complete(cmd->sk, hdev->id, cmd->opcode, 0, &type,
4261 mgmt_pending_remove(cmd);
4264 memset(&ev, 0, sizeof(ev));
4265 ev.type = hdev->discovery.type;
4266 ev.discovering = discovering;
4268 return mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
4271 int mgmt_device_blocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4273 struct pending_cmd *cmd;
4274 struct mgmt_ev_device_blocked ev;
4276 cmd = mgmt_pending_find(MGMT_OP_BLOCK_DEVICE, hdev);
4278 bacpy(&ev.addr.bdaddr, bdaddr);
4279 ev.addr.type = type;
4281 return mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &ev, sizeof(ev),
4282 cmd ? cmd->sk : NULL);
4285 int mgmt_device_unblocked(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4287 struct pending_cmd *cmd;
4288 struct mgmt_ev_device_unblocked ev;
4290 cmd = mgmt_pending_find(MGMT_OP_UNBLOCK_DEVICE, hdev);
4292 bacpy(&ev.addr.bdaddr, bdaddr);
4293 ev.addr.type = type;
4295 return mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &ev, sizeof(ev),
4296 cmd ? cmd->sk : NULL);
4299 module_param(enable_hs, bool, 0644);
4300 MODULE_PARM_DESC(enable_hs, "Enable High Speed support");