Bluetooth: Add new mgmt_set_advertising command
[pandora-kernel.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30
31 #include <linux/rfkill.h>
32
33 #include <net/bluetooth/bluetooth.h>
34 #include <net/bluetooth/hci_core.h>
35
36 static void hci_rx_work(struct work_struct *work);
37 static void hci_cmd_work(struct work_struct *work);
38 static void hci_tx_work(struct work_struct *work);
39
40 /* HCI device list */
41 LIST_HEAD(hci_dev_list);
42 DEFINE_RWLOCK(hci_dev_list_lock);
43
44 /* HCI callback list */
45 LIST_HEAD(hci_cb_list);
46 DEFINE_RWLOCK(hci_cb_list_lock);
47
48 /* HCI ID Numbering */
49 static DEFINE_IDA(hci_index_ida);
50
51 /* ---- HCI notifications ---- */
52
53 static void hci_notify(struct hci_dev *hdev, int event)
54 {
55         hci_sock_dev_event(hdev, event);
56 }
57
58 /* ---- HCI requests ---- */
59
60 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
61 {
62         BT_DBG("%s result 0x%2.2x", hdev->name, result);
63
64         if (hdev->req_status == HCI_REQ_PEND) {
65                 hdev->req_result = result;
66                 hdev->req_status = HCI_REQ_DONE;
67                 wake_up_interruptible(&hdev->req_wait_q);
68         }
69 }
70
71 static void hci_req_cancel(struct hci_dev *hdev, int err)
72 {
73         BT_DBG("%s err 0x%2.2x", hdev->name, err);
74
75         if (hdev->req_status == HCI_REQ_PEND) {
76                 hdev->req_result = err;
77                 hdev->req_status = HCI_REQ_CANCELED;
78                 wake_up_interruptible(&hdev->req_wait_q);
79         }
80 }
81
82 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
83                                             u8 event)
84 {
85         struct hci_ev_cmd_complete *ev;
86         struct hci_event_hdr *hdr;
87         struct sk_buff *skb;
88
89         hci_dev_lock(hdev);
90
91         skb = hdev->recv_evt;
92         hdev->recv_evt = NULL;
93
94         hci_dev_unlock(hdev);
95
96         if (!skb)
97                 return ERR_PTR(-ENODATA);
98
99         if (skb->len < sizeof(*hdr)) {
100                 BT_ERR("Too short HCI event");
101                 goto failed;
102         }
103
104         hdr = (void *) skb->data;
105         skb_pull(skb, HCI_EVENT_HDR_SIZE);
106
107         if (event) {
108                 if (hdr->evt != event)
109                         goto failed;
110                 return skb;
111         }
112
113         if (hdr->evt != HCI_EV_CMD_COMPLETE) {
114                 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
115                 goto failed;
116         }
117
118         if (skb->len < sizeof(*ev)) {
119                 BT_ERR("Too short cmd_complete event");
120                 goto failed;
121         }
122
123         ev = (void *) skb->data;
124         skb_pull(skb, sizeof(*ev));
125
126         if (opcode == __le16_to_cpu(ev->opcode))
127                 return skb;
128
129         BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
130                __le16_to_cpu(ev->opcode));
131
132 failed:
133         kfree_skb(skb);
134         return ERR_PTR(-ENODATA);
135 }
136
137 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
138                                   const void *param, u8 event, u32 timeout)
139 {
140         DECLARE_WAITQUEUE(wait, current);
141         struct hci_request req;
142         int err = 0;
143
144         BT_DBG("%s", hdev->name);
145
146         hci_req_init(&req, hdev);
147
148         hci_req_add_ev(&req, opcode, plen, param, event);
149
150         hdev->req_status = HCI_REQ_PEND;
151
152         err = hci_req_run(&req, hci_req_sync_complete);
153         if (err < 0)
154                 return ERR_PTR(err);
155
156         add_wait_queue(&hdev->req_wait_q, &wait);
157         set_current_state(TASK_INTERRUPTIBLE);
158
159         schedule_timeout(timeout);
160
161         remove_wait_queue(&hdev->req_wait_q, &wait);
162
163         if (signal_pending(current))
164                 return ERR_PTR(-EINTR);
165
166         switch (hdev->req_status) {
167         case HCI_REQ_DONE:
168                 err = -bt_to_errno(hdev->req_result);
169                 break;
170
171         case HCI_REQ_CANCELED:
172                 err = -hdev->req_result;
173                 break;
174
175         default:
176                 err = -ETIMEDOUT;
177                 break;
178         }
179
180         hdev->req_status = hdev->req_result = 0;
181
182         BT_DBG("%s end: err %d", hdev->name, err);
183
184         if (err < 0)
185                 return ERR_PTR(err);
186
187         return hci_get_cmd_complete(hdev, opcode, event);
188 }
189 EXPORT_SYMBOL(__hci_cmd_sync_ev);
190
191 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
192                                const void *param, u32 timeout)
193 {
194         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
195 }
196 EXPORT_SYMBOL(__hci_cmd_sync);
197
198 /* Execute request and wait for completion. */
199 static int __hci_req_sync(struct hci_dev *hdev,
200                           void (*func)(struct hci_request *req,
201                                       unsigned long opt),
202                           unsigned long opt, __u32 timeout)
203 {
204         struct hci_request req;
205         DECLARE_WAITQUEUE(wait, current);
206         int err = 0;
207
208         BT_DBG("%s start", hdev->name);
209
210         hci_req_init(&req, hdev);
211
212         hdev->req_status = HCI_REQ_PEND;
213
214         func(&req, opt);
215
216         err = hci_req_run(&req, hci_req_sync_complete);
217         if (err < 0) {
218                 hdev->req_status = 0;
219
220                 /* ENODATA means the HCI request command queue is empty.
221                  * This can happen when a request with conditionals doesn't
222                  * trigger any commands to be sent. This is normal behavior
223                  * and should not trigger an error return.
224                  */
225                 if (err == -ENODATA)
226                         return 0;
227
228                 return err;
229         }
230
231         add_wait_queue(&hdev->req_wait_q, &wait);
232         set_current_state(TASK_INTERRUPTIBLE);
233
234         schedule_timeout(timeout);
235
236         remove_wait_queue(&hdev->req_wait_q, &wait);
237
238         if (signal_pending(current))
239                 return -EINTR;
240
241         switch (hdev->req_status) {
242         case HCI_REQ_DONE:
243                 err = -bt_to_errno(hdev->req_result);
244                 break;
245
246         case HCI_REQ_CANCELED:
247                 err = -hdev->req_result;
248                 break;
249
250         default:
251                 err = -ETIMEDOUT;
252                 break;
253         }
254
255         hdev->req_status = hdev->req_result = 0;
256
257         BT_DBG("%s end: err %d", hdev->name, err);
258
259         return err;
260 }
261
262 static int hci_req_sync(struct hci_dev *hdev,
263                         void (*req)(struct hci_request *req,
264                                     unsigned long opt),
265                         unsigned long opt, __u32 timeout)
266 {
267         int ret;
268
269         if (!test_bit(HCI_UP, &hdev->flags))
270                 return -ENETDOWN;
271
272         /* Serialize all requests */
273         hci_req_lock(hdev);
274         ret = __hci_req_sync(hdev, req, opt, timeout);
275         hci_req_unlock(hdev);
276
277         return ret;
278 }
279
280 static void hci_reset_req(struct hci_request *req, unsigned long opt)
281 {
282         BT_DBG("%s %ld", req->hdev->name, opt);
283
284         /* Reset device */
285         set_bit(HCI_RESET, &req->hdev->flags);
286         hci_req_add(req, HCI_OP_RESET, 0, NULL);
287 }
288
289 static void bredr_init(struct hci_request *req)
290 {
291         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
292
293         /* Read Local Supported Features */
294         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
295
296         /* Read Local Version */
297         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
298
299         /* Read BD Address */
300         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
301 }
302
303 static void amp_init(struct hci_request *req)
304 {
305         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
306
307         /* Read Local Version */
308         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
309
310         /* Read Local AMP Info */
311         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
312
313         /* Read Data Blk size */
314         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
315 }
316
317 static void hci_init1_req(struct hci_request *req, unsigned long opt)
318 {
319         struct hci_dev *hdev = req->hdev;
320
321         BT_DBG("%s %ld", hdev->name, opt);
322
323         /* Reset */
324         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
325                 hci_reset_req(req, 0);
326
327         switch (hdev->dev_type) {
328         case HCI_BREDR:
329                 bredr_init(req);
330                 break;
331
332         case HCI_AMP:
333                 amp_init(req);
334                 break;
335
336         default:
337                 BT_ERR("Unknown device type %d", hdev->dev_type);
338                 break;
339         }
340 }
341
342 static void bredr_setup(struct hci_request *req)
343 {
344         __le16 param;
345         __u8 flt_type;
346
347         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
348         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
349
350         /* Read Class of Device */
351         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
352
353         /* Read Local Name */
354         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
355
356         /* Read Voice Setting */
357         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
358
359         /* Clear Event Filters */
360         flt_type = HCI_FLT_CLEAR_ALL;
361         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
362
363         /* Connection accept timeout ~20 secs */
364         param = __constant_cpu_to_le16(0x7d00);
365         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
366
367         /* Read page scan parameters */
368         if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) {
369                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
370                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
371         }
372 }
373
374 static void le_setup(struct hci_request *req)
375 {
376         struct hci_dev *hdev = req->hdev;
377
378         /* Read LE Buffer Size */
379         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
380
381         /* Read LE Local Supported Features */
382         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
383
384         /* Read LE Advertising Channel TX Power */
385         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
386
387         /* Read LE White List Size */
388         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
389
390         /* Read LE Supported States */
391         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
392
393         /* LE-only controllers have LE implicitly enabled */
394         if (!lmp_bredr_capable(hdev))
395                 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
396 }
397
398 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
399 {
400         if (lmp_ext_inq_capable(hdev))
401                 return 0x02;
402
403         if (lmp_inq_rssi_capable(hdev))
404                 return 0x01;
405
406         if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
407             hdev->lmp_subver == 0x0757)
408                 return 0x01;
409
410         if (hdev->manufacturer == 15) {
411                 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
412                         return 0x01;
413                 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
414                         return 0x01;
415                 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
416                         return 0x01;
417         }
418
419         if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
420             hdev->lmp_subver == 0x1805)
421                 return 0x01;
422
423         return 0x00;
424 }
425
426 static void hci_setup_inquiry_mode(struct hci_request *req)
427 {
428         u8 mode;
429
430         mode = hci_get_inquiry_mode(req->hdev);
431
432         hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
433 }
434
435 static void hci_setup_event_mask(struct hci_request *req)
436 {
437         struct hci_dev *hdev = req->hdev;
438
439         /* The second byte is 0xff instead of 0x9f (two reserved bits
440          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
441          * command otherwise.
442          */
443         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
444
445         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
446          * any event mask for pre 1.2 devices.
447          */
448         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
449                 return;
450
451         if (lmp_bredr_capable(hdev)) {
452                 events[4] |= 0x01; /* Flow Specification Complete */
453                 events[4] |= 0x02; /* Inquiry Result with RSSI */
454                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
455                 events[5] |= 0x08; /* Synchronous Connection Complete */
456                 events[5] |= 0x10; /* Synchronous Connection Changed */
457         } else {
458                 /* Use a different default for LE-only devices */
459                 memset(events, 0, sizeof(events));
460                 events[0] |= 0x10; /* Disconnection Complete */
461                 events[0] |= 0x80; /* Encryption Change */
462                 events[1] |= 0x08; /* Read Remote Version Information Complete */
463                 events[1] |= 0x20; /* Command Complete */
464                 events[1] |= 0x40; /* Command Status */
465                 events[1] |= 0x80; /* Hardware Error */
466                 events[2] |= 0x04; /* Number of Completed Packets */
467                 events[3] |= 0x02; /* Data Buffer Overflow */
468                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
469         }
470
471         if (lmp_inq_rssi_capable(hdev))
472                 events[4] |= 0x02; /* Inquiry Result with RSSI */
473
474         if (lmp_sniffsubr_capable(hdev))
475                 events[5] |= 0x20; /* Sniff Subrating */
476
477         if (lmp_pause_enc_capable(hdev))
478                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
479
480         if (lmp_ext_inq_capable(hdev))
481                 events[5] |= 0x40; /* Extended Inquiry Result */
482
483         if (lmp_no_flush_capable(hdev))
484                 events[7] |= 0x01; /* Enhanced Flush Complete */
485
486         if (lmp_lsto_capable(hdev))
487                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
488
489         if (lmp_ssp_capable(hdev)) {
490                 events[6] |= 0x01;      /* IO Capability Request */
491                 events[6] |= 0x02;      /* IO Capability Response */
492                 events[6] |= 0x04;      /* User Confirmation Request */
493                 events[6] |= 0x08;      /* User Passkey Request */
494                 events[6] |= 0x10;      /* Remote OOB Data Request */
495                 events[6] |= 0x20;      /* Simple Pairing Complete */
496                 events[7] |= 0x04;      /* User Passkey Notification */
497                 events[7] |= 0x08;      /* Keypress Notification */
498                 events[7] |= 0x10;      /* Remote Host Supported
499                                          * Features Notification
500                                          */
501         }
502
503         if (lmp_le_capable(hdev))
504                 events[7] |= 0x20;      /* LE Meta-Event */
505
506         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
507
508         if (lmp_le_capable(hdev)) {
509                 memset(events, 0, sizeof(events));
510                 events[0] = 0x1f;
511                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
512                             sizeof(events), events);
513         }
514 }
515
516 static void hci_init2_req(struct hci_request *req, unsigned long opt)
517 {
518         struct hci_dev *hdev = req->hdev;
519
520         if (lmp_bredr_capable(hdev))
521                 bredr_setup(req);
522
523         if (lmp_le_capable(hdev))
524                 le_setup(req);
525
526         hci_setup_event_mask(req);
527
528         /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
529          * local supported commands HCI command.
530          */
531         if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
532                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
533
534         if (lmp_ssp_capable(hdev)) {
535                 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
536                         u8 mode = 0x01;
537                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
538                                     sizeof(mode), &mode);
539                 } else {
540                         struct hci_cp_write_eir cp;
541
542                         memset(hdev->eir, 0, sizeof(hdev->eir));
543                         memset(&cp, 0, sizeof(cp));
544
545                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
546                 }
547         }
548
549         if (lmp_inq_rssi_capable(hdev))
550                 hci_setup_inquiry_mode(req);
551
552         if (lmp_inq_tx_pwr_capable(hdev))
553                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
554
555         if (lmp_ext_feat_capable(hdev)) {
556                 struct hci_cp_read_local_ext_features cp;
557
558                 cp.page = 0x01;
559                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
560                             sizeof(cp), &cp);
561         }
562
563         if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
564                 u8 enable = 1;
565                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
566                             &enable);
567         }
568 }
569
570 static void hci_setup_link_policy(struct hci_request *req)
571 {
572         struct hci_dev *hdev = req->hdev;
573         struct hci_cp_write_def_link_policy cp;
574         u16 link_policy = 0;
575
576         if (lmp_rswitch_capable(hdev))
577                 link_policy |= HCI_LP_RSWITCH;
578         if (lmp_hold_capable(hdev))
579                 link_policy |= HCI_LP_HOLD;
580         if (lmp_sniff_capable(hdev))
581                 link_policy |= HCI_LP_SNIFF;
582         if (lmp_park_capable(hdev))
583                 link_policy |= HCI_LP_PARK;
584
585         cp.policy = cpu_to_le16(link_policy);
586         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
587 }
588
589 static void hci_set_le_support(struct hci_request *req)
590 {
591         struct hci_dev *hdev = req->hdev;
592         struct hci_cp_write_le_host_supported cp;
593
594         /* LE-only devices do not support explicit enablement */
595         if (!lmp_bredr_capable(hdev))
596                 return;
597
598         memset(&cp, 0, sizeof(cp));
599
600         if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
601                 cp.le = 0x01;
602                 cp.simul = lmp_le_br_capable(hdev);
603         }
604
605         if (cp.le != lmp_host_le_capable(hdev))
606                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
607                             &cp);
608 }
609
610 static void hci_set_event_mask_page_2(struct hci_request *req)
611 {
612         struct hci_dev *hdev = req->hdev;
613         u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
614
615         /* If Connectionless Slave Broadcast master role is supported
616          * enable all necessary events for it.
617          */
618         if (hdev->features[2][0] & 0x01) {
619                 events[1] |= 0x40;      /* Triggered Clock Capture */
620                 events[1] |= 0x80;      /* Synchronization Train Complete */
621                 events[2] |= 0x10;      /* Slave Page Response Timeout */
622                 events[2] |= 0x20;      /* CSB Channel Map Change */
623         }
624
625         /* If Connectionless Slave Broadcast slave role is supported
626          * enable all necessary events for it.
627          */
628         if (hdev->features[2][0] & 0x02) {
629                 events[2] |= 0x01;      /* Synchronization Train Received */
630                 events[2] |= 0x02;      /* CSB Receive */
631                 events[2] |= 0x04;      /* CSB Timeout */
632                 events[2] |= 0x08;      /* Truncated Page Complete */
633         }
634
635         hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
636 }
637
638 static void hci_init3_req(struct hci_request *req, unsigned long opt)
639 {
640         struct hci_dev *hdev = req->hdev;
641         u8 p;
642
643         /* Some Broadcom based Bluetooth controllers do not support the
644          * Delete Stored Link Key command. They are clearly indicating its
645          * absence in the bit mask of supported commands.
646          *
647          * Check the supported commands and only if the the command is marked
648          * as supported send it. If not supported assume that the controller
649          * does not have actual support for stored link keys which makes this
650          * command redundant anyway.
651          */
652         if (hdev->commands[6] & 0x80) {
653                 struct hci_cp_delete_stored_link_key cp;
654
655                 bacpy(&cp.bdaddr, BDADDR_ANY);
656                 cp.delete_all = 0x01;
657                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
658                             sizeof(cp), &cp);
659         }
660
661         if (hdev->commands[5] & 0x10)
662                 hci_setup_link_policy(req);
663
664         if (lmp_le_capable(hdev)) {
665                 hci_set_le_support(req);
666                 hci_update_ad(req);
667         }
668
669         /* Read features beyond page 1 if available */
670         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
671                 struct hci_cp_read_local_ext_features cp;
672
673                 cp.page = p;
674                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
675                             sizeof(cp), &cp);
676         }
677 }
678
679 static void hci_init4_req(struct hci_request *req, unsigned long opt)
680 {
681         struct hci_dev *hdev = req->hdev;
682
683         /* Set event mask page 2 if the HCI command for it is supported */
684         if (hdev->commands[22] & 0x04)
685                 hci_set_event_mask_page_2(req);
686
687         /* Check for Synchronization Train support */
688         if (hdev->features[2][0] & 0x04)
689                 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
690 }
691
692 static int __hci_init(struct hci_dev *hdev)
693 {
694         int err;
695
696         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
697         if (err < 0)
698                 return err;
699
700         /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
701          * BR/EDR/LE type controllers. AMP controllers only need the
702          * first stage init.
703          */
704         if (hdev->dev_type != HCI_BREDR)
705                 return 0;
706
707         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
708         if (err < 0)
709                 return err;
710
711         err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
712         if (err < 0)
713                 return err;
714
715         return __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
716 }
717
718 static void hci_scan_req(struct hci_request *req, unsigned long opt)
719 {
720         __u8 scan = opt;
721
722         BT_DBG("%s %x", req->hdev->name, scan);
723
724         /* Inquiry and Page scans */
725         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
726 }
727
728 static void hci_auth_req(struct hci_request *req, unsigned long opt)
729 {
730         __u8 auth = opt;
731
732         BT_DBG("%s %x", req->hdev->name, auth);
733
734         /* Authentication */
735         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
736 }
737
738 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
739 {
740         __u8 encrypt = opt;
741
742         BT_DBG("%s %x", req->hdev->name, encrypt);
743
744         /* Encryption */
745         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
746 }
747
748 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
749 {
750         __le16 policy = cpu_to_le16(opt);
751
752         BT_DBG("%s %x", req->hdev->name, policy);
753
754         /* Default link policy */
755         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
756 }
757
758 /* Get HCI device by index.
759  * Device is held on return. */
760 struct hci_dev *hci_dev_get(int index)
761 {
762         struct hci_dev *hdev = NULL, *d;
763
764         BT_DBG("%d", index);
765
766         if (index < 0)
767                 return NULL;
768
769         read_lock(&hci_dev_list_lock);
770         list_for_each_entry(d, &hci_dev_list, list) {
771                 if (d->id == index) {
772                         hdev = hci_dev_hold(d);
773                         break;
774                 }
775         }
776         read_unlock(&hci_dev_list_lock);
777         return hdev;
778 }
779
780 /* ---- Inquiry support ---- */
781
782 bool hci_discovery_active(struct hci_dev *hdev)
783 {
784         struct discovery_state *discov = &hdev->discovery;
785
786         switch (discov->state) {
787         case DISCOVERY_FINDING:
788         case DISCOVERY_RESOLVING:
789                 return true;
790
791         default:
792                 return false;
793         }
794 }
795
796 void hci_discovery_set_state(struct hci_dev *hdev, int state)
797 {
798         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
799
800         if (hdev->discovery.state == state)
801                 return;
802
803         switch (state) {
804         case DISCOVERY_STOPPED:
805                 if (hdev->discovery.state != DISCOVERY_STARTING)
806                         mgmt_discovering(hdev, 0);
807                 break;
808         case DISCOVERY_STARTING:
809                 break;
810         case DISCOVERY_FINDING:
811                 mgmt_discovering(hdev, 1);
812                 break;
813         case DISCOVERY_RESOLVING:
814                 break;
815         case DISCOVERY_STOPPING:
816                 break;
817         }
818
819         hdev->discovery.state = state;
820 }
821
822 void hci_inquiry_cache_flush(struct hci_dev *hdev)
823 {
824         struct discovery_state *cache = &hdev->discovery;
825         struct inquiry_entry *p, *n;
826
827         list_for_each_entry_safe(p, n, &cache->all, all) {
828                 list_del(&p->all);
829                 kfree(p);
830         }
831
832         INIT_LIST_HEAD(&cache->unknown);
833         INIT_LIST_HEAD(&cache->resolve);
834 }
835
836 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
837                                                bdaddr_t *bdaddr)
838 {
839         struct discovery_state *cache = &hdev->discovery;
840         struct inquiry_entry *e;
841
842         BT_DBG("cache %p, %pMR", cache, bdaddr);
843
844         list_for_each_entry(e, &cache->all, all) {
845                 if (!bacmp(&e->data.bdaddr, bdaddr))
846                         return e;
847         }
848
849         return NULL;
850 }
851
852 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
853                                                        bdaddr_t *bdaddr)
854 {
855         struct discovery_state *cache = &hdev->discovery;
856         struct inquiry_entry *e;
857
858         BT_DBG("cache %p, %pMR", cache, bdaddr);
859
860         list_for_each_entry(e, &cache->unknown, list) {
861                 if (!bacmp(&e->data.bdaddr, bdaddr))
862                         return e;
863         }
864
865         return NULL;
866 }
867
868 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
869                                                        bdaddr_t *bdaddr,
870                                                        int state)
871 {
872         struct discovery_state *cache = &hdev->discovery;
873         struct inquiry_entry *e;
874
875         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
876
877         list_for_each_entry(e, &cache->resolve, list) {
878                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
879                         return e;
880                 if (!bacmp(&e->data.bdaddr, bdaddr))
881                         return e;
882         }
883
884         return NULL;
885 }
886
887 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
888                                       struct inquiry_entry *ie)
889 {
890         struct discovery_state *cache = &hdev->discovery;
891         struct list_head *pos = &cache->resolve;
892         struct inquiry_entry *p;
893
894         list_del(&ie->list);
895
896         list_for_each_entry(p, &cache->resolve, list) {
897                 if (p->name_state != NAME_PENDING &&
898                     abs(p->data.rssi) >= abs(ie->data.rssi))
899                         break;
900                 pos = &p->list;
901         }
902
903         list_add(&ie->list, pos);
904 }
905
906 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
907                               bool name_known, bool *ssp)
908 {
909         struct discovery_state *cache = &hdev->discovery;
910         struct inquiry_entry *ie;
911
912         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
913
914         hci_remove_remote_oob_data(hdev, &data->bdaddr);
915
916         if (ssp)
917                 *ssp = data->ssp_mode;
918
919         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
920         if (ie) {
921                 if (ie->data.ssp_mode && ssp)
922                         *ssp = true;
923
924                 if (ie->name_state == NAME_NEEDED &&
925                     data->rssi != ie->data.rssi) {
926                         ie->data.rssi = data->rssi;
927                         hci_inquiry_cache_update_resolve(hdev, ie);
928                 }
929
930                 goto update;
931         }
932
933         /* Entry not in the cache. Add new one. */
934         ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
935         if (!ie)
936                 return false;
937
938         list_add(&ie->all, &cache->all);
939
940         if (name_known) {
941                 ie->name_state = NAME_KNOWN;
942         } else {
943                 ie->name_state = NAME_NOT_KNOWN;
944                 list_add(&ie->list, &cache->unknown);
945         }
946
947 update:
948         if (name_known && ie->name_state != NAME_KNOWN &&
949             ie->name_state != NAME_PENDING) {
950                 ie->name_state = NAME_KNOWN;
951                 list_del(&ie->list);
952         }
953
954         memcpy(&ie->data, data, sizeof(*data));
955         ie->timestamp = jiffies;
956         cache->timestamp = jiffies;
957
958         if (ie->name_state == NAME_NOT_KNOWN)
959                 return false;
960
961         return true;
962 }
963
964 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
965 {
966         struct discovery_state *cache = &hdev->discovery;
967         struct inquiry_info *info = (struct inquiry_info *) buf;
968         struct inquiry_entry *e;
969         int copied = 0;
970
971         list_for_each_entry(e, &cache->all, all) {
972                 struct inquiry_data *data = &e->data;
973
974                 if (copied >= num)
975                         break;
976
977                 bacpy(&info->bdaddr, &data->bdaddr);
978                 info->pscan_rep_mode    = data->pscan_rep_mode;
979                 info->pscan_period_mode = data->pscan_period_mode;
980                 info->pscan_mode        = data->pscan_mode;
981                 memcpy(info->dev_class, data->dev_class, 3);
982                 info->clock_offset      = data->clock_offset;
983
984                 info++;
985                 copied++;
986         }
987
988         BT_DBG("cache %p, copied %d", cache, copied);
989         return copied;
990 }
991
992 static void hci_inq_req(struct hci_request *req, unsigned long opt)
993 {
994         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
995         struct hci_dev *hdev = req->hdev;
996         struct hci_cp_inquiry cp;
997
998         BT_DBG("%s", hdev->name);
999
1000         if (test_bit(HCI_INQUIRY, &hdev->flags))
1001                 return;
1002
1003         /* Start Inquiry */
1004         memcpy(&cp.lap, &ir->lap, 3);
1005         cp.length  = ir->length;
1006         cp.num_rsp = ir->num_rsp;
1007         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1008 }
1009
1010 static int wait_inquiry(void *word)
1011 {
1012         schedule();
1013         return signal_pending(current);
1014 }
1015
1016 int hci_inquiry(void __user *arg)
1017 {
1018         __u8 __user *ptr = arg;
1019         struct hci_inquiry_req ir;
1020         struct hci_dev *hdev;
1021         int err = 0, do_inquiry = 0, max_rsp;
1022         long timeo;
1023         __u8 *buf;
1024
1025         if (copy_from_user(&ir, ptr, sizeof(ir)))
1026                 return -EFAULT;
1027
1028         hdev = hci_dev_get(ir.dev_id);
1029         if (!hdev)
1030                 return -ENODEV;
1031
1032         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1033                 err = -EBUSY;
1034                 goto done;
1035         }
1036
1037         hci_dev_lock(hdev);
1038         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1039             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1040                 hci_inquiry_cache_flush(hdev);
1041                 do_inquiry = 1;
1042         }
1043         hci_dev_unlock(hdev);
1044
1045         timeo = ir.length * msecs_to_jiffies(2000);
1046
1047         if (do_inquiry) {
1048                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1049                                    timeo);
1050                 if (err < 0)
1051                         goto done;
1052
1053                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1054                  * cleared). If it is interrupted by a signal, return -EINTR.
1055                  */
1056                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1057                                 TASK_INTERRUPTIBLE))
1058                         return -EINTR;
1059         }
1060
1061         /* for unlimited number of responses we will use buffer with
1062          * 255 entries
1063          */
1064         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1065
1066         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1067          * copy it to the user space.
1068          */
1069         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
1070         if (!buf) {
1071                 err = -ENOMEM;
1072                 goto done;
1073         }
1074
1075         hci_dev_lock(hdev);
1076         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1077         hci_dev_unlock(hdev);
1078
1079         BT_DBG("num_rsp %d", ir.num_rsp);
1080
1081         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1082                 ptr += sizeof(ir);
1083                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1084                                  ir.num_rsp))
1085                         err = -EFAULT;
1086         } else
1087                 err = -EFAULT;
1088
1089         kfree(buf);
1090
1091 done:
1092         hci_dev_put(hdev);
1093         return err;
1094 }
1095
1096 static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
1097 {
1098         u8 ad_len = 0, flags = 0;
1099         size_t name_len;
1100
1101         if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1102                 flags |= LE_AD_GENERAL;
1103
1104         if (!lmp_bredr_capable(hdev))
1105                 flags |= LE_AD_NO_BREDR;
1106
1107         if (lmp_le_br_capable(hdev))
1108                 flags |= LE_AD_SIM_LE_BREDR_CTRL;
1109
1110         if (lmp_host_le_br_capable(hdev))
1111                 flags |= LE_AD_SIM_LE_BREDR_HOST;
1112
1113         if (flags) {
1114                 BT_DBG("adv flags 0x%02x", flags);
1115
1116                 ptr[0] = 2;
1117                 ptr[1] = EIR_FLAGS;
1118                 ptr[2] = flags;
1119
1120                 ad_len += 3;
1121                 ptr += 3;
1122         }
1123
1124         if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
1125                 ptr[0] = 2;
1126                 ptr[1] = EIR_TX_POWER;
1127                 ptr[2] = (u8) hdev->adv_tx_power;
1128
1129                 ad_len += 3;
1130                 ptr += 3;
1131         }
1132
1133         name_len = strlen(hdev->dev_name);
1134         if (name_len > 0) {
1135                 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
1136
1137                 if (name_len > max_len) {
1138                         name_len = max_len;
1139                         ptr[1] = EIR_NAME_SHORT;
1140                 } else
1141                         ptr[1] = EIR_NAME_COMPLETE;
1142
1143                 ptr[0] = name_len + 1;
1144
1145                 memcpy(ptr + 2, hdev->dev_name, name_len);
1146
1147                 ad_len += (name_len + 2);
1148                 ptr += (name_len + 2);
1149         }
1150
1151         return ad_len;
1152 }
1153
1154 void hci_update_ad(struct hci_request *req)
1155 {
1156         struct hci_dev *hdev = req->hdev;
1157         struct hci_cp_le_set_adv_data cp;
1158         u8 len;
1159
1160         if (!lmp_le_capable(hdev))
1161                 return;
1162
1163         memset(&cp, 0, sizeof(cp));
1164
1165         len = create_ad(hdev, cp.data);
1166
1167         if (hdev->adv_data_len == len &&
1168             memcmp(cp.data, hdev->adv_data, len) == 0)
1169                 return;
1170
1171         memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1172         hdev->adv_data_len = len;
1173
1174         cp.length = len;
1175
1176         hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1177 }
1178
1179 /* ---- HCI ioctl helpers ---- */
1180
1181 int hci_dev_open(__u16 dev)
1182 {
1183         struct hci_dev *hdev;
1184         int ret = 0;
1185
1186         hdev = hci_dev_get(dev);
1187         if (!hdev)
1188                 return -ENODEV;
1189
1190         BT_DBG("%s %p", hdev->name, hdev);
1191
1192         hci_req_lock(hdev);
1193
1194         if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1195                 ret = -ENODEV;
1196                 goto done;
1197         }
1198
1199         if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
1200                 ret = -ERFKILL;
1201                 goto done;
1202         }
1203
1204         if (test_bit(HCI_UP, &hdev->flags)) {
1205                 ret = -EALREADY;
1206                 goto done;
1207         }
1208
1209         if (hdev->open(hdev)) {
1210                 ret = -EIO;
1211                 goto done;
1212         }
1213
1214         atomic_set(&hdev->cmd_cnt, 1);
1215         set_bit(HCI_INIT, &hdev->flags);
1216
1217         if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1218                 ret = hdev->setup(hdev);
1219
1220         if (!ret) {
1221                 /* Treat all non BR/EDR controllers as raw devices if
1222                  * enable_hs is not set.
1223                  */
1224                 if (hdev->dev_type != HCI_BREDR && !enable_hs)
1225                         set_bit(HCI_RAW, &hdev->flags);
1226
1227                 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1228                         set_bit(HCI_RAW, &hdev->flags);
1229
1230                 if (!test_bit(HCI_RAW, &hdev->flags) &&
1231                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
1232                         ret = __hci_init(hdev);
1233         }
1234
1235         clear_bit(HCI_INIT, &hdev->flags);
1236
1237         if (!ret) {
1238                 hci_dev_hold(hdev);
1239                 set_bit(HCI_UP, &hdev->flags);
1240                 hci_notify(hdev, HCI_DEV_UP);
1241                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1242                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1243                     mgmt_valid_hdev(hdev)) {
1244                         hci_dev_lock(hdev);
1245                         mgmt_powered(hdev, 1);
1246                         hci_dev_unlock(hdev);
1247                 }
1248         } else {
1249                 /* Init failed, cleanup */
1250                 flush_work(&hdev->tx_work);
1251                 flush_work(&hdev->cmd_work);
1252                 flush_work(&hdev->rx_work);
1253
1254                 skb_queue_purge(&hdev->cmd_q);
1255                 skb_queue_purge(&hdev->rx_q);
1256
1257                 if (hdev->flush)
1258                         hdev->flush(hdev);
1259
1260                 if (hdev->sent_cmd) {
1261                         kfree_skb(hdev->sent_cmd);
1262                         hdev->sent_cmd = NULL;
1263                 }
1264
1265                 hdev->close(hdev);
1266                 hdev->flags = 0;
1267         }
1268
1269 done:
1270         hci_req_unlock(hdev);
1271         hci_dev_put(hdev);
1272         return ret;
1273 }
1274
1275 static int hci_dev_do_close(struct hci_dev *hdev)
1276 {
1277         BT_DBG("%s %p", hdev->name, hdev);
1278
1279         cancel_delayed_work(&hdev->power_off);
1280
1281         hci_req_cancel(hdev, ENODEV);
1282         hci_req_lock(hdev);
1283
1284         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1285                 del_timer_sync(&hdev->cmd_timer);
1286                 hci_req_unlock(hdev);
1287                 return 0;
1288         }
1289
1290         /* Flush RX and TX works */
1291         flush_work(&hdev->tx_work);
1292         flush_work(&hdev->rx_work);
1293
1294         if (hdev->discov_timeout > 0) {
1295                 cancel_delayed_work(&hdev->discov_off);
1296                 hdev->discov_timeout = 0;
1297                 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1298         }
1299
1300         if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1301                 cancel_delayed_work(&hdev->service_cache);
1302
1303         cancel_delayed_work_sync(&hdev->le_scan_disable);
1304
1305         hci_dev_lock(hdev);
1306         hci_inquiry_cache_flush(hdev);
1307         hci_conn_hash_flush(hdev);
1308         hci_dev_unlock(hdev);
1309
1310         hci_notify(hdev, HCI_DEV_DOWN);
1311
1312         if (hdev->flush)
1313                 hdev->flush(hdev);
1314
1315         /* Reset device */
1316         skb_queue_purge(&hdev->cmd_q);
1317         atomic_set(&hdev->cmd_cnt, 1);
1318         if (!test_bit(HCI_RAW, &hdev->flags) &&
1319             test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1320                 set_bit(HCI_INIT, &hdev->flags);
1321                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1322                 clear_bit(HCI_INIT, &hdev->flags);
1323         }
1324
1325         /* flush cmd  work */
1326         flush_work(&hdev->cmd_work);
1327
1328         /* Drop queues */
1329         skb_queue_purge(&hdev->rx_q);
1330         skb_queue_purge(&hdev->cmd_q);
1331         skb_queue_purge(&hdev->raw_q);
1332
1333         /* Drop last sent command */
1334         if (hdev->sent_cmd) {
1335                 del_timer_sync(&hdev->cmd_timer);
1336                 kfree_skb(hdev->sent_cmd);
1337                 hdev->sent_cmd = NULL;
1338         }
1339
1340         kfree_skb(hdev->recv_evt);
1341         hdev->recv_evt = NULL;
1342
1343         /* After this point our queues are empty
1344          * and no tasks are scheduled. */
1345         hdev->close(hdev);
1346
1347         /* Clear flags */
1348         hdev->flags = 0;
1349         hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1350
1351         if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1352             mgmt_valid_hdev(hdev)) {
1353                 hci_dev_lock(hdev);
1354                 mgmt_powered(hdev, 0);
1355                 hci_dev_unlock(hdev);
1356         }
1357
1358         /* Controller radio is available but is currently powered down */
1359         hdev->amp_status = 0;
1360
1361         memset(hdev->eir, 0, sizeof(hdev->eir));
1362         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1363
1364         hci_req_unlock(hdev);
1365
1366         hci_dev_put(hdev);
1367         return 0;
1368 }
1369
1370 int hci_dev_close(__u16 dev)
1371 {
1372         struct hci_dev *hdev;
1373         int err;
1374
1375         hdev = hci_dev_get(dev);
1376         if (!hdev)
1377                 return -ENODEV;
1378
1379         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1380                 err = -EBUSY;
1381                 goto done;
1382         }
1383
1384         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1385                 cancel_delayed_work(&hdev->power_off);
1386
1387         err = hci_dev_do_close(hdev);
1388
1389 done:
1390         hci_dev_put(hdev);
1391         return err;
1392 }
1393
1394 int hci_dev_reset(__u16 dev)
1395 {
1396         struct hci_dev *hdev;
1397         int ret = 0;
1398
1399         hdev = hci_dev_get(dev);
1400         if (!hdev)
1401                 return -ENODEV;
1402
1403         hci_req_lock(hdev);
1404
1405         if (!test_bit(HCI_UP, &hdev->flags)) {
1406                 ret = -ENETDOWN;
1407                 goto done;
1408         }
1409
1410         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1411                 ret = -EBUSY;
1412                 goto done;
1413         }
1414
1415         /* Drop queues */
1416         skb_queue_purge(&hdev->rx_q);
1417         skb_queue_purge(&hdev->cmd_q);
1418
1419         hci_dev_lock(hdev);
1420         hci_inquiry_cache_flush(hdev);
1421         hci_conn_hash_flush(hdev);
1422         hci_dev_unlock(hdev);
1423
1424         if (hdev->flush)
1425                 hdev->flush(hdev);
1426
1427         atomic_set(&hdev->cmd_cnt, 1);
1428         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1429
1430         if (!test_bit(HCI_RAW, &hdev->flags))
1431                 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1432
1433 done:
1434         hci_req_unlock(hdev);
1435         hci_dev_put(hdev);
1436         return ret;
1437 }
1438
1439 int hci_dev_reset_stat(__u16 dev)
1440 {
1441         struct hci_dev *hdev;
1442         int ret = 0;
1443
1444         hdev = hci_dev_get(dev);
1445         if (!hdev)
1446                 return -ENODEV;
1447
1448         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1449                 ret = -EBUSY;
1450                 goto done;
1451         }
1452
1453         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1454
1455 done:
1456         hci_dev_put(hdev);
1457         return ret;
1458 }
1459
1460 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1461 {
1462         struct hci_dev *hdev;
1463         struct hci_dev_req dr;
1464         int err = 0;
1465
1466         if (copy_from_user(&dr, arg, sizeof(dr)))
1467                 return -EFAULT;
1468
1469         hdev = hci_dev_get(dr.dev_id);
1470         if (!hdev)
1471                 return -ENODEV;
1472
1473         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1474                 err = -EBUSY;
1475                 goto done;
1476         }
1477
1478         switch (cmd) {
1479         case HCISETAUTH:
1480                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1481                                    HCI_INIT_TIMEOUT);
1482                 break;
1483
1484         case HCISETENCRYPT:
1485                 if (!lmp_encrypt_capable(hdev)) {
1486                         err = -EOPNOTSUPP;
1487                         break;
1488                 }
1489
1490                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1491                         /* Auth must be enabled first */
1492                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1493                                            HCI_INIT_TIMEOUT);
1494                         if (err)
1495                                 break;
1496                 }
1497
1498                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1499                                    HCI_INIT_TIMEOUT);
1500                 break;
1501
1502         case HCISETSCAN:
1503                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1504                                    HCI_INIT_TIMEOUT);
1505                 break;
1506
1507         case HCISETLINKPOL:
1508                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1509                                    HCI_INIT_TIMEOUT);
1510                 break;
1511
1512         case HCISETLINKMODE:
1513                 hdev->link_mode = ((__u16) dr.dev_opt) &
1514                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
1515                 break;
1516
1517         case HCISETPTYPE:
1518                 hdev->pkt_type = (__u16) dr.dev_opt;
1519                 break;
1520
1521         case HCISETACLMTU:
1522                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
1523                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1524                 break;
1525
1526         case HCISETSCOMTU:
1527                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
1528                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1529                 break;
1530
1531         default:
1532                 err = -EINVAL;
1533                 break;
1534         }
1535
1536 done:
1537         hci_dev_put(hdev);
1538         return err;
1539 }
1540
1541 int hci_get_dev_list(void __user *arg)
1542 {
1543         struct hci_dev *hdev;
1544         struct hci_dev_list_req *dl;
1545         struct hci_dev_req *dr;
1546         int n = 0, size, err;
1547         __u16 dev_num;
1548
1549         if (get_user(dev_num, (__u16 __user *) arg))
1550                 return -EFAULT;
1551
1552         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1553                 return -EINVAL;
1554
1555         size = sizeof(*dl) + dev_num * sizeof(*dr);
1556
1557         dl = kzalloc(size, GFP_KERNEL);
1558         if (!dl)
1559                 return -ENOMEM;
1560
1561         dr = dl->dev_req;
1562
1563         read_lock(&hci_dev_list_lock);
1564         list_for_each_entry(hdev, &hci_dev_list, list) {
1565                 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1566                         cancel_delayed_work(&hdev->power_off);
1567
1568                 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1569                         set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1570
1571                 (dr + n)->dev_id  = hdev->id;
1572                 (dr + n)->dev_opt = hdev->flags;
1573
1574                 if (++n >= dev_num)
1575                         break;
1576         }
1577         read_unlock(&hci_dev_list_lock);
1578
1579         dl->dev_num = n;
1580         size = sizeof(*dl) + n * sizeof(*dr);
1581
1582         err = copy_to_user(arg, dl, size);
1583         kfree(dl);
1584
1585         return err ? -EFAULT : 0;
1586 }
1587
1588 int hci_get_dev_info(void __user *arg)
1589 {
1590         struct hci_dev *hdev;
1591         struct hci_dev_info di;
1592         int err = 0;
1593
1594         if (copy_from_user(&di, arg, sizeof(di)))
1595                 return -EFAULT;
1596
1597         hdev = hci_dev_get(di.dev_id);
1598         if (!hdev)
1599                 return -ENODEV;
1600
1601         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1602                 cancel_delayed_work_sync(&hdev->power_off);
1603
1604         if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1605                 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1606
1607         strcpy(di.name, hdev->name);
1608         di.bdaddr   = hdev->bdaddr;
1609         di.type     = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1610         di.flags    = hdev->flags;
1611         di.pkt_type = hdev->pkt_type;
1612         if (lmp_bredr_capable(hdev)) {
1613                 di.acl_mtu  = hdev->acl_mtu;
1614                 di.acl_pkts = hdev->acl_pkts;
1615                 di.sco_mtu  = hdev->sco_mtu;
1616                 di.sco_pkts = hdev->sco_pkts;
1617         } else {
1618                 di.acl_mtu  = hdev->le_mtu;
1619                 di.acl_pkts = hdev->le_pkts;
1620                 di.sco_mtu  = 0;
1621                 di.sco_pkts = 0;
1622         }
1623         di.link_policy = hdev->link_policy;
1624         di.link_mode   = hdev->link_mode;
1625
1626         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1627         memcpy(&di.features, &hdev->features, sizeof(di.features));
1628
1629         if (copy_to_user(arg, &di, sizeof(di)))
1630                 err = -EFAULT;
1631
1632         hci_dev_put(hdev);
1633
1634         return err;
1635 }
1636
1637 /* ---- Interface to HCI drivers ---- */
1638
1639 static int hci_rfkill_set_block(void *data, bool blocked)
1640 {
1641         struct hci_dev *hdev = data;
1642
1643         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1644
1645         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
1646                 return -EBUSY;
1647
1648         if (!blocked)
1649                 return 0;
1650
1651         hci_dev_do_close(hdev);
1652
1653         return 0;
1654 }
1655
1656 static const struct rfkill_ops hci_rfkill_ops = {
1657         .set_block = hci_rfkill_set_block,
1658 };
1659
1660 static void hci_power_on(struct work_struct *work)
1661 {
1662         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1663         int err;
1664
1665         BT_DBG("%s", hdev->name);
1666
1667         err = hci_dev_open(hdev->id);
1668         if (err < 0) {
1669                 mgmt_set_powered_failed(hdev, err);
1670                 return;
1671         }
1672
1673         if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1674                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1675                                    HCI_AUTO_OFF_TIMEOUT);
1676
1677         if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1678                 mgmt_index_added(hdev);
1679 }
1680
1681 static void hci_power_off(struct work_struct *work)
1682 {
1683         struct hci_dev *hdev = container_of(work, struct hci_dev,
1684                                             power_off.work);
1685
1686         BT_DBG("%s", hdev->name);
1687
1688         hci_dev_do_close(hdev);
1689 }
1690
1691 static void hci_discov_off(struct work_struct *work)
1692 {
1693         struct hci_dev *hdev;
1694         u8 scan = SCAN_PAGE;
1695
1696         hdev = container_of(work, struct hci_dev, discov_off.work);
1697
1698         BT_DBG("%s", hdev->name);
1699
1700         hci_dev_lock(hdev);
1701
1702         hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1703
1704         hdev->discov_timeout = 0;
1705
1706         hci_dev_unlock(hdev);
1707 }
1708
1709 int hci_uuids_clear(struct hci_dev *hdev)
1710 {
1711         struct bt_uuid *uuid, *tmp;
1712
1713         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1714                 list_del(&uuid->list);
1715                 kfree(uuid);
1716         }
1717
1718         return 0;
1719 }
1720
1721 int hci_link_keys_clear(struct hci_dev *hdev)
1722 {
1723         struct list_head *p, *n;
1724
1725         list_for_each_safe(p, n, &hdev->link_keys) {
1726                 struct link_key *key;
1727
1728                 key = list_entry(p, struct link_key, list);
1729
1730                 list_del(p);
1731                 kfree(key);
1732         }
1733
1734         return 0;
1735 }
1736
1737 int hci_smp_ltks_clear(struct hci_dev *hdev)
1738 {
1739         struct smp_ltk *k, *tmp;
1740
1741         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1742                 list_del(&k->list);
1743                 kfree(k);
1744         }
1745
1746         return 0;
1747 }
1748
1749 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1750 {
1751         struct link_key *k;
1752
1753         list_for_each_entry(k, &hdev->link_keys, list)
1754                 if (bacmp(bdaddr, &k->bdaddr) == 0)
1755                         return k;
1756
1757         return NULL;
1758 }
1759
1760 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1761                                u8 key_type, u8 old_key_type)
1762 {
1763         /* Legacy key */
1764         if (key_type < 0x03)
1765                 return true;
1766
1767         /* Debug keys are insecure so don't store them persistently */
1768         if (key_type == HCI_LK_DEBUG_COMBINATION)
1769                 return false;
1770
1771         /* Changed combination key and there's no previous one */
1772         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1773                 return false;
1774
1775         /* Security mode 3 case */
1776         if (!conn)
1777                 return true;
1778
1779         /* Neither local nor remote side had no-bonding as requirement */
1780         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1781                 return true;
1782
1783         /* Local side had dedicated bonding as requirement */
1784         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1785                 return true;
1786
1787         /* Remote side had dedicated bonding as requirement */
1788         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1789                 return true;
1790
1791         /* If none of the above criteria match, then don't store the key
1792          * persistently */
1793         return false;
1794 }
1795
1796 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1797 {
1798         struct smp_ltk *k;
1799
1800         list_for_each_entry(k, &hdev->long_term_keys, list) {
1801                 if (k->ediv != ediv ||
1802                     memcmp(rand, k->rand, sizeof(k->rand)))
1803                         continue;
1804
1805                 return k;
1806         }
1807
1808         return NULL;
1809 }
1810
1811 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1812                                      u8 addr_type)
1813 {
1814         struct smp_ltk *k;
1815
1816         list_for_each_entry(k, &hdev->long_term_keys, list)
1817                 if (addr_type == k->bdaddr_type &&
1818                     bacmp(bdaddr, &k->bdaddr) == 0)
1819                         return k;
1820
1821         return NULL;
1822 }
1823
1824 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1825                      bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1826 {
1827         struct link_key *key, *old_key;
1828         u8 old_key_type;
1829         bool persistent;
1830
1831         old_key = hci_find_link_key(hdev, bdaddr);
1832         if (old_key) {
1833                 old_key_type = old_key->type;
1834                 key = old_key;
1835         } else {
1836                 old_key_type = conn ? conn->key_type : 0xff;
1837                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1838                 if (!key)
1839                         return -ENOMEM;
1840                 list_add(&key->list, &hdev->link_keys);
1841         }
1842
1843         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1844
1845         /* Some buggy controller combinations generate a changed
1846          * combination key for legacy pairing even when there's no
1847          * previous key */
1848         if (type == HCI_LK_CHANGED_COMBINATION &&
1849             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1850                 type = HCI_LK_COMBINATION;
1851                 if (conn)
1852                         conn->key_type = type;
1853         }
1854
1855         bacpy(&key->bdaddr, bdaddr);
1856         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1857         key->pin_len = pin_len;
1858
1859         if (type == HCI_LK_CHANGED_COMBINATION)
1860                 key->type = old_key_type;
1861         else
1862                 key->type = type;
1863
1864         if (!new_key)
1865                 return 0;
1866
1867         persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1868
1869         mgmt_new_link_key(hdev, key, persistent);
1870
1871         if (conn)
1872                 conn->flush_key = !persistent;
1873
1874         return 0;
1875 }
1876
1877 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1878                 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
1879                 ediv, u8 rand[8])
1880 {
1881         struct smp_ltk *key, *old_key;
1882
1883         if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1884                 return 0;
1885
1886         old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1887         if (old_key)
1888                 key = old_key;
1889         else {
1890                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1891                 if (!key)
1892                         return -ENOMEM;
1893                 list_add(&key->list, &hdev->long_term_keys);
1894         }
1895
1896         bacpy(&key->bdaddr, bdaddr);
1897         key->bdaddr_type = addr_type;
1898         memcpy(key->val, tk, sizeof(key->val));
1899         key->authenticated = authenticated;
1900         key->ediv = ediv;
1901         key->enc_size = enc_size;
1902         key->type = type;
1903         memcpy(key->rand, rand, sizeof(key->rand));
1904
1905         if (!new_key)
1906                 return 0;
1907
1908         if (type & HCI_SMP_LTK)
1909                 mgmt_new_ltk(hdev, key, 1);
1910
1911         return 0;
1912 }
1913
1914 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1915 {
1916         struct link_key *key;
1917
1918         key = hci_find_link_key(hdev, bdaddr);
1919         if (!key)
1920                 return -ENOENT;
1921
1922         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1923
1924         list_del(&key->list);
1925         kfree(key);
1926
1927         return 0;
1928 }
1929
1930 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1931 {
1932         struct smp_ltk *k, *tmp;
1933
1934         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1935                 if (bacmp(bdaddr, &k->bdaddr))
1936                         continue;
1937
1938                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1939
1940                 list_del(&k->list);
1941                 kfree(k);
1942         }
1943
1944         return 0;
1945 }
1946
1947 /* HCI command timer function */
1948 static void hci_cmd_timeout(unsigned long arg)
1949 {
1950         struct hci_dev *hdev = (void *) arg;
1951
1952         if (hdev->sent_cmd) {
1953                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1954                 u16 opcode = __le16_to_cpu(sent->opcode);
1955
1956                 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1957         } else {
1958                 BT_ERR("%s command tx timeout", hdev->name);
1959         }
1960
1961         atomic_set(&hdev->cmd_cnt, 1);
1962         queue_work(hdev->workqueue, &hdev->cmd_work);
1963 }
1964
1965 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1966                                           bdaddr_t *bdaddr)
1967 {
1968         struct oob_data *data;
1969
1970         list_for_each_entry(data, &hdev->remote_oob_data, list)
1971                 if (bacmp(bdaddr, &data->bdaddr) == 0)
1972                         return data;
1973
1974         return NULL;
1975 }
1976
1977 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1978 {
1979         struct oob_data *data;
1980
1981         data = hci_find_remote_oob_data(hdev, bdaddr);
1982         if (!data)
1983                 return -ENOENT;
1984
1985         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1986
1987         list_del(&data->list);
1988         kfree(data);
1989
1990         return 0;
1991 }
1992
1993 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1994 {
1995         struct oob_data *data, *n;
1996
1997         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1998                 list_del(&data->list);
1999                 kfree(data);
2000         }
2001
2002         return 0;
2003 }
2004
2005 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
2006                             u8 *randomizer)
2007 {
2008         struct oob_data *data;
2009
2010         data = hci_find_remote_oob_data(hdev, bdaddr);
2011
2012         if (!data) {
2013                 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2014                 if (!data)
2015                         return -ENOMEM;
2016
2017                 bacpy(&data->bdaddr, bdaddr);
2018                 list_add(&data->list, &hdev->remote_oob_data);
2019         }
2020
2021         memcpy(data->hash, hash, sizeof(data->hash));
2022         memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
2023
2024         BT_DBG("%s for %pMR", hdev->name, bdaddr);
2025
2026         return 0;
2027 }
2028
2029 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
2030 {
2031         struct bdaddr_list *b;
2032
2033         list_for_each_entry(b, &hdev->blacklist, list)
2034                 if (bacmp(bdaddr, &b->bdaddr) == 0)
2035                         return b;
2036
2037         return NULL;
2038 }
2039
2040 int hci_blacklist_clear(struct hci_dev *hdev)
2041 {
2042         struct list_head *p, *n;
2043
2044         list_for_each_safe(p, n, &hdev->blacklist) {
2045                 struct bdaddr_list *b;
2046
2047                 b = list_entry(p, struct bdaddr_list, list);
2048
2049                 list_del(p);
2050                 kfree(b);
2051         }
2052
2053         return 0;
2054 }
2055
2056 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2057 {
2058         struct bdaddr_list *entry;
2059
2060         if (bacmp(bdaddr, BDADDR_ANY) == 0)
2061                 return -EBADF;
2062
2063         if (hci_blacklist_lookup(hdev, bdaddr))
2064                 return -EEXIST;
2065
2066         entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
2067         if (!entry)
2068                 return -ENOMEM;
2069
2070         bacpy(&entry->bdaddr, bdaddr);
2071
2072         list_add(&entry->list, &hdev->blacklist);
2073
2074         return mgmt_device_blocked(hdev, bdaddr, type);
2075 }
2076
2077 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2078 {
2079         struct bdaddr_list *entry;
2080
2081         if (bacmp(bdaddr, BDADDR_ANY) == 0)
2082                 return hci_blacklist_clear(hdev);
2083
2084         entry = hci_blacklist_lookup(hdev, bdaddr);
2085         if (!entry)
2086                 return -ENOENT;
2087
2088         list_del(&entry->list);
2089         kfree(entry);
2090
2091         return mgmt_device_unblocked(hdev, bdaddr, type);
2092 }
2093
2094 static void inquiry_complete(struct hci_dev *hdev, u8 status)
2095 {
2096         if (status) {
2097                 BT_ERR("Failed to start inquiry: status %d", status);
2098
2099                 hci_dev_lock(hdev);
2100                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2101                 hci_dev_unlock(hdev);
2102                 return;
2103         }
2104 }
2105
2106 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
2107 {
2108         /* General inquiry access code (GIAC) */
2109         u8 lap[3] = { 0x33, 0x8b, 0x9e };
2110         struct hci_request req;
2111         struct hci_cp_inquiry cp;
2112         int err;
2113
2114         if (status) {
2115                 BT_ERR("Failed to disable LE scanning: status %d", status);
2116                 return;
2117         }
2118
2119         switch (hdev->discovery.type) {
2120         case DISCOV_TYPE_LE:
2121                 hci_dev_lock(hdev);
2122                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2123                 hci_dev_unlock(hdev);
2124                 break;
2125
2126         case DISCOV_TYPE_INTERLEAVED:
2127                 hci_req_init(&req, hdev);
2128
2129                 memset(&cp, 0, sizeof(cp));
2130                 memcpy(&cp.lap, lap, sizeof(cp.lap));
2131                 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2132                 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2133
2134                 hci_dev_lock(hdev);
2135
2136                 hci_inquiry_cache_flush(hdev);
2137
2138                 err = hci_req_run(&req, inquiry_complete);
2139                 if (err) {
2140                         BT_ERR("Inquiry request failed: err %d", err);
2141                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2142                 }
2143
2144                 hci_dev_unlock(hdev);
2145                 break;
2146         }
2147 }
2148
2149 static void le_scan_disable_work(struct work_struct *work)
2150 {
2151         struct hci_dev *hdev = container_of(work, struct hci_dev,
2152                                             le_scan_disable.work);
2153         struct hci_cp_le_set_scan_enable cp;
2154         struct hci_request req;
2155         int err;
2156
2157         BT_DBG("%s", hdev->name);
2158
2159         hci_req_init(&req, hdev);
2160
2161         memset(&cp, 0, sizeof(cp));
2162         cp.enable = LE_SCAN_DISABLE;
2163         hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2164
2165         err = hci_req_run(&req, le_scan_disable_work_complete);
2166         if (err)
2167                 BT_ERR("Disable LE scanning request failed: err %d", err);
2168 }
2169
2170 /* Alloc HCI device */
2171 struct hci_dev *hci_alloc_dev(void)
2172 {
2173         struct hci_dev *hdev;
2174
2175         hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2176         if (!hdev)
2177                 return NULL;
2178
2179         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2180         hdev->esco_type = (ESCO_HV1);
2181         hdev->link_mode = (HCI_LM_ACCEPT);
2182         hdev->io_capability = 0x03; /* No Input No Output */
2183         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2184         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2185
2186         hdev->sniff_max_interval = 800;
2187         hdev->sniff_min_interval = 80;
2188
2189         mutex_init(&hdev->lock);
2190         mutex_init(&hdev->req_lock);
2191
2192         INIT_LIST_HEAD(&hdev->mgmt_pending);
2193         INIT_LIST_HEAD(&hdev->blacklist);
2194         INIT_LIST_HEAD(&hdev->uuids);
2195         INIT_LIST_HEAD(&hdev->link_keys);
2196         INIT_LIST_HEAD(&hdev->long_term_keys);
2197         INIT_LIST_HEAD(&hdev->remote_oob_data);
2198         INIT_LIST_HEAD(&hdev->conn_hash.list);
2199
2200         INIT_WORK(&hdev->rx_work, hci_rx_work);
2201         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2202         INIT_WORK(&hdev->tx_work, hci_tx_work);
2203         INIT_WORK(&hdev->power_on, hci_power_on);
2204
2205         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2206         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2207         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2208
2209         skb_queue_head_init(&hdev->rx_q);
2210         skb_queue_head_init(&hdev->cmd_q);
2211         skb_queue_head_init(&hdev->raw_q);
2212
2213         init_waitqueue_head(&hdev->req_wait_q);
2214
2215         setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
2216
2217         hci_init_sysfs(hdev);
2218         discovery_init(hdev);
2219
2220         return hdev;
2221 }
2222 EXPORT_SYMBOL(hci_alloc_dev);
2223
2224 /* Free HCI device */
2225 void hci_free_dev(struct hci_dev *hdev)
2226 {
2227         /* will free via device release */
2228         put_device(&hdev->dev);
2229 }
2230 EXPORT_SYMBOL(hci_free_dev);
2231
2232 /* Register HCI device */
2233 int hci_register_dev(struct hci_dev *hdev)
2234 {
2235         int id, error;
2236
2237         if (!hdev->open || !hdev->close)
2238                 return -EINVAL;
2239
2240         /* Do not allow HCI_AMP devices to register at index 0,
2241          * so the index can be used as the AMP controller ID.
2242          */
2243         switch (hdev->dev_type) {
2244         case HCI_BREDR:
2245                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2246                 break;
2247         case HCI_AMP:
2248                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2249                 break;
2250         default:
2251                 return -EINVAL;
2252         }
2253
2254         if (id < 0)
2255                 return id;
2256
2257         sprintf(hdev->name, "hci%d", id);
2258         hdev->id = id;
2259
2260         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2261
2262         hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2263                                           WQ_MEM_RECLAIM, 1, hdev->name);
2264         if (!hdev->workqueue) {
2265                 error = -ENOMEM;
2266                 goto err;
2267         }
2268
2269         hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2270                                               WQ_MEM_RECLAIM, 1, hdev->name);
2271         if (!hdev->req_workqueue) {
2272                 destroy_workqueue(hdev->workqueue);
2273                 error = -ENOMEM;
2274                 goto err;
2275         }
2276
2277         error = hci_add_sysfs(hdev);
2278         if (error < 0)
2279                 goto err_wqueue;
2280
2281         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2282                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2283                                     hdev);
2284         if (hdev->rfkill) {
2285                 if (rfkill_register(hdev->rfkill) < 0) {
2286                         rfkill_destroy(hdev->rfkill);
2287                         hdev->rfkill = NULL;
2288                 }
2289         }
2290
2291         set_bit(HCI_SETUP, &hdev->dev_flags);
2292
2293         if (hdev->dev_type != HCI_AMP)
2294                 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2295
2296         write_lock(&hci_dev_list_lock);
2297         list_add(&hdev->list, &hci_dev_list);
2298         write_unlock(&hci_dev_list_lock);
2299
2300         hci_notify(hdev, HCI_DEV_REG);
2301         hci_dev_hold(hdev);
2302
2303         queue_work(hdev->req_workqueue, &hdev->power_on);
2304
2305         return id;
2306
2307 err_wqueue:
2308         destroy_workqueue(hdev->workqueue);
2309         destroy_workqueue(hdev->req_workqueue);
2310 err:
2311         ida_simple_remove(&hci_index_ida, hdev->id);
2312
2313         return error;
2314 }
2315 EXPORT_SYMBOL(hci_register_dev);
2316
2317 /* Unregister HCI device */
2318 void hci_unregister_dev(struct hci_dev *hdev)
2319 {
2320         int i, id;
2321
2322         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2323
2324         set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2325
2326         id = hdev->id;
2327
2328         write_lock(&hci_dev_list_lock);
2329         list_del(&hdev->list);
2330         write_unlock(&hci_dev_list_lock);
2331
2332         hci_dev_do_close(hdev);
2333
2334         for (i = 0; i < NUM_REASSEMBLY; i++)
2335                 kfree_skb(hdev->reassembly[i]);
2336
2337         cancel_work_sync(&hdev->power_on);
2338
2339         if (!test_bit(HCI_INIT, &hdev->flags) &&
2340             !test_bit(HCI_SETUP, &hdev->dev_flags)) {
2341                 hci_dev_lock(hdev);
2342                 mgmt_index_removed(hdev);
2343                 hci_dev_unlock(hdev);
2344         }
2345
2346         /* mgmt_index_removed should take care of emptying the
2347          * pending list */
2348         BUG_ON(!list_empty(&hdev->mgmt_pending));
2349
2350         hci_notify(hdev, HCI_DEV_UNREG);
2351
2352         if (hdev->rfkill) {
2353                 rfkill_unregister(hdev->rfkill);
2354                 rfkill_destroy(hdev->rfkill);
2355         }
2356
2357         hci_del_sysfs(hdev);
2358
2359         destroy_workqueue(hdev->workqueue);
2360         destroy_workqueue(hdev->req_workqueue);
2361
2362         hci_dev_lock(hdev);
2363         hci_blacklist_clear(hdev);
2364         hci_uuids_clear(hdev);
2365         hci_link_keys_clear(hdev);
2366         hci_smp_ltks_clear(hdev);
2367         hci_remote_oob_data_clear(hdev);
2368         hci_dev_unlock(hdev);
2369
2370         hci_dev_put(hdev);
2371
2372         ida_simple_remove(&hci_index_ida, id);
2373 }
2374 EXPORT_SYMBOL(hci_unregister_dev);
2375
2376 /* Suspend HCI device */
2377 int hci_suspend_dev(struct hci_dev *hdev)
2378 {
2379         hci_notify(hdev, HCI_DEV_SUSPEND);
2380         return 0;
2381 }
2382 EXPORT_SYMBOL(hci_suspend_dev);
2383
2384 /* Resume HCI device */
2385 int hci_resume_dev(struct hci_dev *hdev)
2386 {
2387         hci_notify(hdev, HCI_DEV_RESUME);
2388         return 0;
2389 }
2390 EXPORT_SYMBOL(hci_resume_dev);
2391
2392 /* Receive frame from HCI drivers */
2393 int hci_recv_frame(struct sk_buff *skb)
2394 {
2395         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2396         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2397                       && !test_bit(HCI_INIT, &hdev->flags))) {
2398                 kfree_skb(skb);
2399                 return -ENXIO;
2400         }
2401
2402         /* Incoming skb */
2403         bt_cb(skb)->incoming = 1;
2404
2405         /* Time stamp */
2406         __net_timestamp(skb);
2407
2408         skb_queue_tail(&hdev->rx_q, skb);
2409         queue_work(hdev->workqueue, &hdev->rx_work);
2410
2411         return 0;
2412 }
2413 EXPORT_SYMBOL(hci_recv_frame);
2414
2415 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
2416                           int count, __u8 index)
2417 {
2418         int len = 0;
2419         int hlen = 0;
2420         int remain = count;
2421         struct sk_buff *skb;
2422         struct bt_skb_cb *scb;
2423
2424         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
2425             index >= NUM_REASSEMBLY)
2426                 return -EILSEQ;
2427
2428         skb = hdev->reassembly[index];
2429
2430         if (!skb) {
2431                 switch (type) {
2432                 case HCI_ACLDATA_PKT:
2433                         len = HCI_MAX_FRAME_SIZE;
2434                         hlen = HCI_ACL_HDR_SIZE;
2435                         break;
2436                 case HCI_EVENT_PKT:
2437                         len = HCI_MAX_EVENT_SIZE;
2438                         hlen = HCI_EVENT_HDR_SIZE;
2439                         break;
2440                 case HCI_SCODATA_PKT:
2441                         len = HCI_MAX_SCO_SIZE;
2442                         hlen = HCI_SCO_HDR_SIZE;
2443                         break;
2444                 }
2445
2446                 skb = bt_skb_alloc(len, GFP_ATOMIC);
2447                 if (!skb)
2448                         return -ENOMEM;
2449
2450                 scb = (void *) skb->cb;
2451                 scb->expect = hlen;
2452                 scb->pkt_type = type;
2453
2454                 skb->dev = (void *) hdev;
2455                 hdev->reassembly[index] = skb;
2456         }
2457
2458         while (count) {
2459                 scb = (void *) skb->cb;
2460                 len = min_t(uint, scb->expect, count);
2461
2462                 memcpy(skb_put(skb, len), data, len);
2463
2464                 count -= len;
2465                 data += len;
2466                 scb->expect -= len;
2467                 remain = count;
2468
2469                 switch (type) {
2470                 case HCI_EVENT_PKT:
2471                         if (skb->len == HCI_EVENT_HDR_SIZE) {
2472                                 struct hci_event_hdr *h = hci_event_hdr(skb);
2473                                 scb->expect = h->plen;
2474
2475                                 if (skb_tailroom(skb) < scb->expect) {
2476                                         kfree_skb(skb);
2477                                         hdev->reassembly[index] = NULL;
2478                                         return -ENOMEM;
2479                                 }
2480                         }
2481                         break;
2482
2483                 case HCI_ACLDATA_PKT:
2484                         if (skb->len  == HCI_ACL_HDR_SIZE) {
2485                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2486                                 scb->expect = __le16_to_cpu(h->dlen);
2487
2488                                 if (skb_tailroom(skb) < scb->expect) {
2489                                         kfree_skb(skb);
2490                                         hdev->reassembly[index] = NULL;
2491                                         return -ENOMEM;
2492                                 }
2493                         }
2494                         break;
2495
2496                 case HCI_SCODATA_PKT:
2497                         if (skb->len == HCI_SCO_HDR_SIZE) {
2498                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2499                                 scb->expect = h->dlen;
2500
2501                                 if (skb_tailroom(skb) < scb->expect) {
2502                                         kfree_skb(skb);
2503                                         hdev->reassembly[index] = NULL;
2504                                         return -ENOMEM;
2505                                 }
2506                         }
2507                         break;
2508                 }
2509
2510                 if (scb->expect == 0) {
2511                         /* Complete frame */
2512
2513                         bt_cb(skb)->pkt_type = type;
2514                         hci_recv_frame(skb);
2515
2516                         hdev->reassembly[index] = NULL;
2517                         return remain;
2518                 }
2519         }
2520
2521         return remain;
2522 }
2523
2524 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2525 {
2526         int rem = 0;
2527
2528         if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2529                 return -EILSEQ;
2530
2531         while (count) {
2532                 rem = hci_reassembly(hdev, type, data, count, type - 1);
2533                 if (rem < 0)
2534                         return rem;
2535
2536                 data += (count - rem);
2537                 count = rem;
2538         }
2539
2540         return rem;
2541 }
2542 EXPORT_SYMBOL(hci_recv_fragment);
2543
2544 #define STREAM_REASSEMBLY 0
2545
2546 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2547 {
2548         int type;
2549         int rem = 0;
2550
2551         while (count) {
2552                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2553
2554                 if (!skb) {
2555                         struct { char type; } *pkt;
2556
2557                         /* Start of the frame */
2558                         pkt = data;
2559                         type = pkt->type;
2560
2561                         data++;
2562                         count--;
2563                 } else
2564                         type = bt_cb(skb)->pkt_type;
2565
2566                 rem = hci_reassembly(hdev, type, data, count,
2567                                      STREAM_REASSEMBLY);
2568                 if (rem < 0)
2569                         return rem;
2570
2571                 data += (count - rem);
2572                 count = rem;
2573         }
2574
2575         return rem;
2576 }
2577 EXPORT_SYMBOL(hci_recv_stream_fragment);
2578
2579 /* ---- Interface to upper protocols ---- */
2580
2581 int hci_register_cb(struct hci_cb *cb)
2582 {
2583         BT_DBG("%p name %s", cb, cb->name);
2584
2585         write_lock(&hci_cb_list_lock);
2586         list_add(&cb->list, &hci_cb_list);
2587         write_unlock(&hci_cb_list_lock);
2588
2589         return 0;
2590 }
2591 EXPORT_SYMBOL(hci_register_cb);
2592
2593 int hci_unregister_cb(struct hci_cb *cb)
2594 {
2595         BT_DBG("%p name %s", cb, cb->name);
2596
2597         write_lock(&hci_cb_list_lock);
2598         list_del(&cb->list);
2599         write_unlock(&hci_cb_list_lock);
2600
2601         return 0;
2602 }
2603 EXPORT_SYMBOL(hci_unregister_cb);
2604
2605 static int hci_send_frame(struct sk_buff *skb)
2606 {
2607         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2608
2609         if (!hdev) {
2610                 kfree_skb(skb);
2611                 return -ENODEV;
2612         }
2613
2614         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
2615
2616         /* Time stamp */
2617         __net_timestamp(skb);
2618
2619         /* Send copy to monitor */
2620         hci_send_to_monitor(hdev, skb);
2621
2622         if (atomic_read(&hdev->promisc)) {
2623                 /* Send copy to the sockets */
2624                 hci_send_to_sock(hdev, skb);
2625         }
2626
2627         /* Get rid of skb owner, prior to sending to the driver. */
2628         skb_orphan(skb);
2629
2630         return hdev->send(skb);
2631 }
2632
2633 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2634 {
2635         skb_queue_head_init(&req->cmd_q);
2636         req->hdev = hdev;
2637         req->err = 0;
2638 }
2639
2640 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2641 {
2642         struct hci_dev *hdev = req->hdev;
2643         struct sk_buff *skb;
2644         unsigned long flags;
2645
2646         BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2647
2648         /* If an error occured during request building, remove all HCI
2649          * commands queued on the HCI request queue.
2650          */
2651         if (req->err) {
2652                 skb_queue_purge(&req->cmd_q);
2653                 return req->err;
2654         }
2655
2656         /* Do not allow empty requests */
2657         if (skb_queue_empty(&req->cmd_q))
2658                 return -ENODATA;
2659
2660         skb = skb_peek_tail(&req->cmd_q);
2661         bt_cb(skb)->req.complete = complete;
2662
2663         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2664         skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2665         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2666
2667         queue_work(hdev->workqueue, &hdev->cmd_work);
2668
2669         return 0;
2670 }
2671
2672 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
2673                                        u32 plen, const void *param)
2674 {
2675         int len = HCI_COMMAND_HDR_SIZE + plen;
2676         struct hci_command_hdr *hdr;
2677         struct sk_buff *skb;
2678
2679         skb = bt_skb_alloc(len, GFP_ATOMIC);
2680         if (!skb)
2681                 return NULL;
2682
2683         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
2684         hdr->opcode = cpu_to_le16(opcode);
2685         hdr->plen   = plen;
2686
2687         if (plen)
2688                 memcpy(skb_put(skb, plen), param, plen);
2689
2690         BT_DBG("skb len %d", skb->len);
2691
2692         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
2693         skb->dev = (void *) hdev;
2694
2695         return skb;
2696 }
2697
2698 /* Send HCI command */
2699 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
2700                  const void *param)
2701 {
2702         struct sk_buff *skb;
2703
2704         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2705
2706         skb = hci_prepare_cmd(hdev, opcode, plen, param);
2707         if (!skb) {
2708                 BT_ERR("%s no memory for command", hdev->name);
2709                 return -ENOMEM;
2710         }
2711
2712         /* Stand-alone HCI commands must be flaged as
2713          * single-command requests.
2714          */
2715         bt_cb(skb)->req.start = true;
2716
2717         skb_queue_tail(&hdev->cmd_q, skb);
2718         queue_work(hdev->workqueue, &hdev->cmd_work);
2719
2720         return 0;
2721 }
2722
2723 /* Queue a command to an asynchronous HCI request */
2724 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
2725                     const void *param, u8 event)
2726 {
2727         struct hci_dev *hdev = req->hdev;
2728         struct sk_buff *skb;
2729
2730         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2731
2732         /* If an error occured during request building, there is no point in
2733          * queueing the HCI command. We can simply return.
2734          */
2735         if (req->err)
2736                 return;
2737
2738         skb = hci_prepare_cmd(hdev, opcode, plen, param);
2739         if (!skb) {
2740                 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2741                        hdev->name, opcode);
2742                 req->err = -ENOMEM;
2743                 return;
2744         }
2745
2746         if (skb_queue_empty(&req->cmd_q))
2747                 bt_cb(skb)->req.start = true;
2748
2749         bt_cb(skb)->req.event = event;
2750
2751         skb_queue_tail(&req->cmd_q, skb);
2752 }
2753
2754 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
2755                  const void *param)
2756 {
2757         hci_req_add_ev(req, opcode, plen, param, 0);
2758 }
2759
2760 /* Get data from the previously sent command */
2761 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
2762 {
2763         struct hci_command_hdr *hdr;
2764
2765         if (!hdev->sent_cmd)
2766                 return NULL;
2767
2768         hdr = (void *) hdev->sent_cmd->data;
2769
2770         if (hdr->opcode != cpu_to_le16(opcode))
2771                 return NULL;
2772
2773         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2774
2775         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2776 }
2777
2778 /* Send ACL data */
2779 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2780 {
2781         struct hci_acl_hdr *hdr;
2782         int len = skb->len;
2783
2784         skb_push(skb, HCI_ACL_HDR_SIZE);
2785         skb_reset_transport_header(skb);
2786         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
2787         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2788         hdr->dlen   = cpu_to_le16(len);
2789 }
2790
2791 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
2792                           struct sk_buff *skb, __u16 flags)
2793 {
2794         struct hci_conn *conn = chan->conn;
2795         struct hci_dev *hdev = conn->hdev;
2796         struct sk_buff *list;
2797
2798         skb->len = skb_headlen(skb);
2799         skb->data_len = 0;
2800
2801         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2802
2803         switch (hdev->dev_type) {
2804         case HCI_BREDR:
2805                 hci_add_acl_hdr(skb, conn->handle, flags);
2806                 break;
2807         case HCI_AMP:
2808                 hci_add_acl_hdr(skb, chan->handle, flags);
2809                 break;
2810         default:
2811                 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2812                 return;
2813         }
2814
2815         list = skb_shinfo(skb)->frag_list;
2816         if (!list) {
2817                 /* Non fragmented */
2818                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2819
2820                 skb_queue_tail(queue, skb);
2821         } else {
2822                 /* Fragmented */
2823                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2824
2825                 skb_shinfo(skb)->frag_list = NULL;
2826
2827                 /* Queue all fragments atomically */
2828                 spin_lock(&queue->lock);
2829
2830                 __skb_queue_tail(queue, skb);
2831
2832                 flags &= ~ACL_START;
2833                 flags |= ACL_CONT;
2834                 do {
2835                         skb = list; list = list->next;
2836
2837                         skb->dev = (void *) hdev;
2838                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2839                         hci_add_acl_hdr(skb, conn->handle, flags);
2840
2841                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2842
2843                         __skb_queue_tail(queue, skb);
2844                 } while (list);
2845
2846                 spin_unlock(&queue->lock);
2847         }
2848 }
2849
2850 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2851 {
2852         struct hci_dev *hdev = chan->conn->hdev;
2853
2854         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
2855
2856         skb->dev = (void *) hdev;
2857
2858         hci_queue_acl(chan, &chan->data_q, skb, flags);
2859
2860         queue_work(hdev->workqueue, &hdev->tx_work);
2861 }
2862
2863 /* Send SCO data */
2864 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2865 {
2866         struct hci_dev *hdev = conn->hdev;
2867         struct hci_sco_hdr hdr;
2868
2869         BT_DBG("%s len %d", hdev->name, skb->len);
2870
2871         hdr.handle = cpu_to_le16(conn->handle);
2872         hdr.dlen   = skb->len;
2873
2874         skb_push(skb, HCI_SCO_HDR_SIZE);
2875         skb_reset_transport_header(skb);
2876         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2877
2878         skb->dev = (void *) hdev;
2879         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2880
2881         skb_queue_tail(&conn->data_q, skb);
2882         queue_work(hdev->workqueue, &hdev->tx_work);
2883 }
2884
2885 /* ---- HCI TX task (outgoing data) ---- */
2886
2887 /* HCI Connection scheduler */
2888 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2889                                      int *quote)
2890 {
2891         struct hci_conn_hash *h = &hdev->conn_hash;
2892         struct hci_conn *conn = NULL, *c;
2893         unsigned int num = 0, min = ~0;
2894
2895         /* We don't have to lock device here. Connections are always
2896          * added and removed with TX task disabled. */
2897
2898         rcu_read_lock();
2899
2900         list_for_each_entry_rcu(c, &h->list, list) {
2901                 if (c->type != type || skb_queue_empty(&c->data_q))
2902                         continue;
2903
2904                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2905                         continue;
2906
2907                 num++;
2908
2909                 if (c->sent < min) {
2910                         min  = c->sent;
2911                         conn = c;
2912                 }
2913
2914                 if (hci_conn_num(hdev, type) == num)
2915                         break;
2916         }
2917
2918         rcu_read_unlock();
2919
2920         if (conn) {
2921                 int cnt, q;
2922
2923                 switch (conn->type) {
2924                 case ACL_LINK:
2925                         cnt = hdev->acl_cnt;
2926                         break;
2927                 case SCO_LINK:
2928                 case ESCO_LINK:
2929                         cnt = hdev->sco_cnt;
2930                         break;
2931                 case LE_LINK:
2932                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2933                         break;
2934                 default:
2935                         cnt = 0;
2936                         BT_ERR("Unknown link type");
2937                 }
2938
2939                 q = cnt / num;
2940                 *quote = q ? q : 1;
2941         } else
2942                 *quote = 0;
2943
2944         BT_DBG("conn %p quote %d", conn, *quote);
2945         return conn;
2946 }
2947
2948 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2949 {
2950         struct hci_conn_hash *h = &hdev->conn_hash;
2951         struct hci_conn *c;
2952
2953         BT_ERR("%s link tx timeout", hdev->name);
2954
2955         rcu_read_lock();
2956
2957         /* Kill stalled connections */
2958         list_for_each_entry_rcu(c, &h->list, list) {
2959                 if (c->type == type && c->sent) {
2960                         BT_ERR("%s killing stalled connection %pMR",
2961                                hdev->name, &c->dst);
2962                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
2963                 }
2964         }
2965
2966         rcu_read_unlock();
2967 }
2968
2969 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2970                                       int *quote)
2971 {
2972         struct hci_conn_hash *h = &hdev->conn_hash;
2973         struct hci_chan *chan = NULL;
2974         unsigned int num = 0, min = ~0, cur_prio = 0;
2975         struct hci_conn *conn;
2976         int cnt, q, conn_num = 0;
2977
2978         BT_DBG("%s", hdev->name);
2979
2980         rcu_read_lock();
2981
2982         list_for_each_entry_rcu(conn, &h->list, list) {
2983                 struct hci_chan *tmp;
2984
2985                 if (conn->type != type)
2986                         continue;
2987
2988                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2989                         continue;
2990
2991                 conn_num++;
2992
2993                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2994                         struct sk_buff *skb;
2995
2996                         if (skb_queue_empty(&tmp->data_q))
2997                                 continue;
2998
2999                         skb = skb_peek(&tmp->data_q);
3000                         if (skb->priority < cur_prio)
3001                                 continue;
3002
3003                         if (skb->priority > cur_prio) {
3004                                 num = 0;
3005                                 min = ~0;
3006                                 cur_prio = skb->priority;
3007                         }
3008
3009                         num++;
3010
3011                         if (conn->sent < min) {
3012                                 min  = conn->sent;
3013                                 chan = tmp;
3014                         }
3015                 }
3016
3017                 if (hci_conn_num(hdev, type) == conn_num)
3018                         break;
3019         }
3020
3021         rcu_read_unlock();
3022
3023         if (!chan)
3024                 return NULL;
3025
3026         switch (chan->conn->type) {
3027         case ACL_LINK:
3028                 cnt = hdev->acl_cnt;
3029                 break;
3030         case AMP_LINK:
3031                 cnt = hdev->block_cnt;
3032                 break;
3033         case SCO_LINK:
3034         case ESCO_LINK:
3035                 cnt = hdev->sco_cnt;
3036                 break;
3037         case LE_LINK:
3038                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3039                 break;
3040         default:
3041                 cnt = 0;
3042                 BT_ERR("Unknown link type");
3043         }
3044
3045         q = cnt / num;
3046         *quote = q ? q : 1;
3047         BT_DBG("chan %p quote %d", chan, *quote);
3048         return chan;
3049 }
3050
3051 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3052 {
3053         struct hci_conn_hash *h = &hdev->conn_hash;
3054         struct hci_conn *conn;
3055         int num = 0;
3056
3057         BT_DBG("%s", hdev->name);
3058
3059         rcu_read_lock();
3060
3061         list_for_each_entry_rcu(conn, &h->list, list) {
3062                 struct hci_chan *chan;
3063
3064                 if (conn->type != type)
3065                         continue;
3066
3067                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3068                         continue;
3069
3070                 num++;
3071
3072                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3073                         struct sk_buff *skb;
3074
3075                         if (chan->sent) {
3076                                 chan->sent = 0;
3077                                 continue;
3078                         }
3079
3080                         if (skb_queue_empty(&chan->data_q))
3081                                 continue;
3082
3083                         skb = skb_peek(&chan->data_q);
3084                         if (skb->priority >= HCI_PRIO_MAX - 1)
3085                                 continue;
3086
3087                         skb->priority = HCI_PRIO_MAX - 1;
3088
3089                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3090                                skb->priority);
3091                 }
3092
3093                 if (hci_conn_num(hdev, type) == num)
3094                         break;
3095         }
3096
3097         rcu_read_unlock();
3098
3099 }
3100
3101 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3102 {
3103         /* Calculate count of blocks used by this packet */
3104         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3105 }
3106
3107 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3108 {
3109         if (!test_bit(HCI_RAW, &hdev->flags)) {
3110                 /* ACL tx timeout must be longer than maximum
3111                  * link supervision timeout (40.9 seconds) */
3112                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3113                                        HCI_ACL_TX_TIMEOUT))
3114                         hci_link_tx_to(hdev, ACL_LINK);
3115         }
3116 }
3117
3118 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3119 {
3120         unsigned int cnt = hdev->acl_cnt;
3121         struct hci_chan *chan;
3122         struct sk_buff *skb;
3123         int quote;
3124
3125         __check_timeout(hdev, cnt);
3126
3127         while (hdev->acl_cnt &&
3128                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
3129                 u32 priority = (skb_peek(&chan->data_q))->priority;
3130                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3131                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3132                                skb->len, skb->priority);
3133
3134                         /* Stop if priority has changed */
3135                         if (skb->priority < priority)
3136                                 break;
3137
3138                         skb = skb_dequeue(&chan->data_q);
3139
3140                         hci_conn_enter_active_mode(chan->conn,
3141                                                    bt_cb(skb)->force_active);
3142
3143                         hci_send_frame(skb);
3144                         hdev->acl_last_tx = jiffies;
3145
3146                         hdev->acl_cnt--;
3147                         chan->sent++;
3148                         chan->conn->sent++;
3149                 }
3150         }
3151
3152         if (cnt != hdev->acl_cnt)
3153                 hci_prio_recalculate(hdev, ACL_LINK);
3154 }
3155
3156 static void hci_sched_acl_blk(struct hci_dev *hdev)
3157 {
3158         unsigned int cnt = hdev->block_cnt;
3159         struct hci_chan *chan;
3160         struct sk_buff *skb;
3161         int quote;
3162         u8 type;
3163
3164         __check_timeout(hdev, cnt);
3165
3166         BT_DBG("%s", hdev->name);
3167
3168         if (hdev->dev_type == HCI_AMP)
3169                 type = AMP_LINK;
3170         else
3171                 type = ACL_LINK;
3172
3173         while (hdev->block_cnt > 0 &&
3174                (chan = hci_chan_sent(hdev, type, &quote))) {
3175                 u32 priority = (skb_peek(&chan->data_q))->priority;
3176                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3177                         int blocks;
3178
3179                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3180                                skb->len, skb->priority);
3181
3182                         /* Stop if priority has changed */
3183                         if (skb->priority < priority)
3184                                 break;
3185
3186                         skb = skb_dequeue(&chan->data_q);
3187
3188                         blocks = __get_blocks(hdev, skb);
3189                         if (blocks > hdev->block_cnt)
3190                                 return;
3191
3192                         hci_conn_enter_active_mode(chan->conn,
3193                                                    bt_cb(skb)->force_active);
3194
3195                         hci_send_frame(skb);
3196                         hdev->acl_last_tx = jiffies;
3197
3198                         hdev->block_cnt -= blocks;
3199                         quote -= blocks;
3200
3201                         chan->sent += blocks;
3202                         chan->conn->sent += blocks;
3203                 }
3204         }
3205
3206         if (cnt != hdev->block_cnt)
3207                 hci_prio_recalculate(hdev, type);
3208 }
3209
3210 static void hci_sched_acl(struct hci_dev *hdev)
3211 {
3212         BT_DBG("%s", hdev->name);
3213
3214         /* No ACL link over BR/EDR controller */
3215         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3216                 return;
3217
3218         /* No AMP link over AMP controller */
3219         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3220                 return;
3221
3222         switch (hdev->flow_ctl_mode) {
3223         case HCI_FLOW_CTL_MODE_PACKET_BASED:
3224                 hci_sched_acl_pkt(hdev);
3225                 break;
3226
3227         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3228                 hci_sched_acl_blk(hdev);
3229                 break;
3230         }
3231 }
3232
3233 /* Schedule SCO */
3234 static void hci_sched_sco(struct hci_dev *hdev)
3235 {
3236         struct hci_conn *conn;
3237         struct sk_buff *skb;
3238         int quote;
3239
3240         BT_DBG("%s", hdev->name);
3241
3242         if (!hci_conn_num(hdev, SCO_LINK))
3243                 return;
3244
3245         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3246                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3247                         BT_DBG("skb %p len %d", skb, skb->len);
3248                         hci_send_frame(skb);
3249
3250                         conn->sent++;
3251                         if (conn->sent == ~0)
3252                                 conn->sent = 0;
3253                 }
3254         }
3255 }
3256
3257 static void hci_sched_esco(struct hci_dev *hdev)
3258 {
3259         struct hci_conn *conn;
3260         struct sk_buff *skb;
3261         int quote;
3262
3263         BT_DBG("%s", hdev->name);
3264
3265         if (!hci_conn_num(hdev, ESCO_LINK))
3266                 return;
3267
3268         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3269                                                      &quote))) {
3270                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3271                         BT_DBG("skb %p len %d", skb, skb->len);
3272                         hci_send_frame(skb);
3273
3274                         conn->sent++;
3275                         if (conn->sent == ~0)
3276                                 conn->sent = 0;
3277                 }
3278         }
3279 }
3280
3281 static void hci_sched_le(struct hci_dev *hdev)
3282 {
3283         struct hci_chan *chan;
3284         struct sk_buff *skb;
3285         int quote, cnt, tmp;
3286
3287         BT_DBG("%s", hdev->name);
3288
3289         if (!hci_conn_num(hdev, LE_LINK))
3290                 return;
3291
3292         if (!test_bit(HCI_RAW, &hdev->flags)) {
3293                 /* LE tx timeout must be longer than maximum
3294                  * link supervision timeout (40.9 seconds) */
3295                 if (!hdev->le_cnt && hdev->le_pkts &&
3296                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
3297                         hci_link_tx_to(hdev, LE_LINK);
3298         }
3299
3300         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3301         tmp = cnt;
3302         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
3303                 u32 priority = (skb_peek(&chan->data_q))->priority;
3304                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3305                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3306                                skb->len, skb->priority);
3307
3308                         /* Stop if priority has changed */
3309                         if (skb->priority < priority)
3310                                 break;
3311
3312                         skb = skb_dequeue(&chan->data_q);
3313
3314                         hci_send_frame(skb);
3315                         hdev->le_last_tx = jiffies;
3316
3317                         cnt--;
3318                         chan->sent++;
3319                         chan->conn->sent++;
3320                 }
3321         }
3322
3323         if (hdev->le_pkts)
3324                 hdev->le_cnt = cnt;
3325         else
3326                 hdev->acl_cnt = cnt;
3327
3328         if (cnt != tmp)
3329                 hci_prio_recalculate(hdev, LE_LINK);
3330 }
3331
3332 static void hci_tx_work(struct work_struct *work)
3333 {
3334         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3335         struct sk_buff *skb;
3336
3337         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
3338                hdev->sco_cnt, hdev->le_cnt);
3339
3340         if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
3341                 /* Schedule queues and send stuff to HCI driver */
3342                 hci_sched_acl(hdev);
3343                 hci_sched_sco(hdev);
3344                 hci_sched_esco(hdev);
3345                 hci_sched_le(hdev);
3346         }
3347
3348         /* Send next queued raw (unknown type) packet */
3349         while ((skb = skb_dequeue(&hdev->raw_q)))
3350                 hci_send_frame(skb);
3351 }
3352
3353 /* ----- HCI RX task (incoming data processing) ----- */
3354
3355 /* ACL data packet */
3356 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3357 {
3358         struct hci_acl_hdr *hdr = (void *) skb->data;
3359         struct hci_conn *conn;
3360         __u16 handle, flags;
3361
3362         skb_pull(skb, HCI_ACL_HDR_SIZE);
3363
3364         handle = __le16_to_cpu(hdr->handle);
3365         flags  = hci_flags(handle);
3366         handle = hci_handle(handle);
3367
3368         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3369                handle, flags);
3370
3371         hdev->stat.acl_rx++;
3372
3373         hci_dev_lock(hdev);
3374         conn = hci_conn_hash_lookup_handle(hdev, handle);
3375         hci_dev_unlock(hdev);
3376
3377         if (conn) {
3378                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3379
3380                 /* Send to upper protocol */
3381                 l2cap_recv_acldata(conn, skb, flags);
3382                 return;
3383         } else {
3384                 BT_ERR("%s ACL packet for unknown connection handle %d",
3385                        hdev->name, handle);
3386         }
3387
3388         kfree_skb(skb);
3389 }
3390
3391 /* SCO data packet */
3392 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3393 {
3394         struct hci_sco_hdr *hdr = (void *) skb->data;
3395         struct hci_conn *conn;
3396         __u16 handle;
3397
3398         skb_pull(skb, HCI_SCO_HDR_SIZE);
3399
3400         handle = __le16_to_cpu(hdr->handle);
3401
3402         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
3403
3404         hdev->stat.sco_rx++;
3405
3406         hci_dev_lock(hdev);
3407         conn = hci_conn_hash_lookup_handle(hdev, handle);
3408         hci_dev_unlock(hdev);
3409
3410         if (conn) {
3411                 /* Send to upper protocol */
3412                 sco_recv_scodata(conn, skb);
3413                 return;
3414         } else {
3415                 BT_ERR("%s SCO packet for unknown connection handle %d",
3416                        hdev->name, handle);
3417         }
3418
3419         kfree_skb(skb);
3420 }
3421
3422 static bool hci_req_is_complete(struct hci_dev *hdev)
3423 {
3424         struct sk_buff *skb;
3425
3426         skb = skb_peek(&hdev->cmd_q);
3427         if (!skb)
3428                 return true;
3429
3430         return bt_cb(skb)->req.start;
3431 }
3432
3433 static void hci_resend_last(struct hci_dev *hdev)
3434 {
3435         struct hci_command_hdr *sent;
3436         struct sk_buff *skb;
3437         u16 opcode;
3438
3439         if (!hdev->sent_cmd)
3440                 return;
3441
3442         sent = (void *) hdev->sent_cmd->data;
3443         opcode = __le16_to_cpu(sent->opcode);
3444         if (opcode == HCI_OP_RESET)
3445                 return;
3446
3447         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3448         if (!skb)
3449                 return;
3450
3451         skb_queue_head(&hdev->cmd_q, skb);
3452         queue_work(hdev->workqueue, &hdev->cmd_work);
3453 }
3454
3455 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3456 {
3457         hci_req_complete_t req_complete = NULL;
3458         struct sk_buff *skb;
3459         unsigned long flags;
3460
3461         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3462
3463         /* If the completed command doesn't match the last one that was
3464          * sent we need to do special handling of it.
3465          */
3466         if (!hci_sent_cmd_data(hdev, opcode)) {
3467                 /* Some CSR based controllers generate a spontaneous
3468                  * reset complete event during init and any pending
3469                  * command will never be completed. In such a case we
3470                  * need to resend whatever was the last sent
3471                  * command.
3472                  */
3473                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3474                         hci_resend_last(hdev);
3475
3476                 return;
3477         }
3478
3479         /* If the command succeeded and there's still more commands in
3480          * this request the request is not yet complete.
3481          */
3482         if (!status && !hci_req_is_complete(hdev))
3483                 return;
3484
3485         /* If this was the last command in a request the complete
3486          * callback would be found in hdev->sent_cmd instead of the
3487          * command queue (hdev->cmd_q).
3488          */
3489         if (hdev->sent_cmd) {
3490                 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3491
3492                 if (req_complete) {
3493                         /* We must set the complete callback to NULL to
3494                          * avoid calling the callback more than once if
3495                          * this function gets called again.
3496                          */
3497                         bt_cb(hdev->sent_cmd)->req.complete = NULL;
3498
3499                         goto call_complete;
3500                 }
3501         }
3502
3503         /* Remove all pending commands belonging to this request */
3504         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3505         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3506                 if (bt_cb(skb)->req.start) {
3507                         __skb_queue_head(&hdev->cmd_q, skb);
3508                         break;
3509                 }
3510
3511                 req_complete = bt_cb(skb)->req.complete;
3512                 kfree_skb(skb);
3513         }
3514         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3515
3516 call_complete:
3517         if (req_complete)
3518                 req_complete(hdev, status);
3519 }
3520
3521 static void hci_rx_work(struct work_struct *work)
3522 {
3523         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
3524         struct sk_buff *skb;
3525
3526         BT_DBG("%s", hdev->name);
3527
3528         while ((skb = skb_dequeue(&hdev->rx_q))) {
3529                 /* Send copy to monitor */
3530                 hci_send_to_monitor(hdev, skb);
3531
3532                 if (atomic_read(&hdev->promisc)) {
3533                         /* Send copy to the sockets */
3534                         hci_send_to_sock(hdev, skb);
3535                 }
3536
3537                 if (test_bit(HCI_RAW, &hdev->flags) ||
3538                     test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
3539                         kfree_skb(skb);
3540                         continue;
3541                 }
3542
3543                 if (test_bit(HCI_INIT, &hdev->flags)) {
3544                         /* Don't process data packets in this states. */
3545                         switch (bt_cb(skb)->pkt_type) {
3546                         case HCI_ACLDATA_PKT:
3547                         case HCI_SCODATA_PKT:
3548                                 kfree_skb(skb);
3549                                 continue;
3550                         }
3551                 }
3552
3553                 /* Process frame */
3554                 switch (bt_cb(skb)->pkt_type) {
3555                 case HCI_EVENT_PKT:
3556                         BT_DBG("%s Event packet", hdev->name);
3557                         hci_event_packet(hdev, skb);
3558                         break;
3559
3560                 case HCI_ACLDATA_PKT:
3561                         BT_DBG("%s ACL data packet", hdev->name);
3562                         hci_acldata_packet(hdev, skb);
3563                         break;
3564
3565                 case HCI_SCODATA_PKT:
3566                         BT_DBG("%s SCO data packet", hdev->name);
3567                         hci_scodata_packet(hdev, skb);
3568                         break;
3569
3570                 default:
3571                         kfree_skb(skb);
3572                         break;
3573                 }
3574         }
3575 }
3576
3577 static void hci_cmd_work(struct work_struct *work)
3578 {
3579         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
3580         struct sk_buff *skb;
3581
3582         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3583                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
3584
3585         /* Send queued commands */
3586         if (atomic_read(&hdev->cmd_cnt)) {
3587                 skb = skb_dequeue(&hdev->cmd_q);
3588                 if (!skb)
3589                         return;
3590
3591                 kfree_skb(hdev->sent_cmd);
3592
3593                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
3594                 if (hdev->sent_cmd) {
3595                         atomic_dec(&hdev->cmd_cnt);
3596                         hci_send_frame(skb);
3597                         if (test_bit(HCI_RESET, &hdev->flags))
3598                                 del_timer(&hdev->cmd_timer);
3599                         else
3600                                 mod_timer(&hdev->cmd_timer,
3601                                           jiffies + HCI_CMD_TIMEOUT);
3602                 } else {
3603                         skb_queue_head(&hdev->cmd_q, skb);
3604                         queue_work(hdev->workqueue, &hdev->cmd_work);
3605                 }
3606         }
3607 }
3608
3609 u8 bdaddr_to_le(u8 bdaddr_type)
3610 {
3611         switch (bdaddr_type) {
3612         case BDADDR_LE_PUBLIC:
3613                 return ADDR_LE_DEV_PUBLIC;
3614
3615         default:
3616                 /* Fallback to LE Random address type */
3617                 return ADDR_LE_DEV_RANDOM;
3618         }
3619 }