Bluetooth: Remove driver init queue from core
[pandora-kernel.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30
31 #include <linux/rfkill.h>
32
33 #include <net/bluetooth/bluetooth.h>
34 #include <net/bluetooth/hci_core.h>
35
36 static void hci_rx_work(struct work_struct *work);
37 static void hci_cmd_work(struct work_struct *work);
38 static void hci_tx_work(struct work_struct *work);
39
40 /* HCI device list */
41 LIST_HEAD(hci_dev_list);
42 DEFINE_RWLOCK(hci_dev_list_lock);
43
44 /* HCI callback list */
45 LIST_HEAD(hci_cb_list);
46 DEFINE_RWLOCK(hci_cb_list_lock);
47
48 /* HCI ID Numbering */
49 static DEFINE_IDA(hci_index_ida);
50
51 /* ---- HCI notifications ---- */
52
53 static void hci_notify(struct hci_dev *hdev, int event)
54 {
55         hci_sock_dev_event(hdev, event);
56 }
57
58 /* ---- HCI requests ---- */
59
60 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
61 {
62         BT_DBG("%s result 0x%2.2x", hdev->name, result);
63
64         if (hdev->req_status == HCI_REQ_PEND) {
65                 hdev->req_result = result;
66                 hdev->req_status = HCI_REQ_DONE;
67                 wake_up_interruptible(&hdev->req_wait_q);
68         }
69 }
70
71 static void hci_req_cancel(struct hci_dev *hdev, int err)
72 {
73         BT_DBG("%s err 0x%2.2x", hdev->name, err);
74
75         if (hdev->req_status == HCI_REQ_PEND) {
76                 hdev->req_result = err;
77                 hdev->req_status = HCI_REQ_CANCELED;
78                 wake_up_interruptible(&hdev->req_wait_q);
79         }
80 }
81
82 struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 event)
83 {
84         struct hci_ev_cmd_complete *ev;
85         struct hci_event_hdr *hdr;
86         struct sk_buff *skb;
87
88         hci_dev_lock(hdev);
89
90         skb = hdev->recv_evt;
91         hdev->recv_evt = NULL;
92
93         hci_dev_unlock(hdev);
94
95         if (!skb)
96                 return ERR_PTR(-ENODATA);
97
98         if (skb->len < sizeof(*hdr)) {
99                 BT_ERR("Too short HCI event");
100                 goto failed;
101         }
102
103         hdr = (void *) skb->data;
104         skb_pull(skb, HCI_EVENT_HDR_SIZE);
105
106         if (event) {
107                 if (hdr->evt != event)
108                         goto failed;
109                 return skb;
110         }
111
112         if (hdr->evt != HCI_EV_CMD_COMPLETE) {
113                 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
114                 goto failed;
115         }
116
117         if (skb->len < sizeof(*ev)) {
118                 BT_ERR("Too short cmd_complete event");
119                 goto failed;
120         }
121
122         ev = (void *) skb->data;
123         skb_pull(skb, sizeof(*ev));
124
125         if (opcode == __le16_to_cpu(ev->opcode))
126                 return skb;
127
128         BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
129                __le16_to_cpu(ev->opcode));
130
131 failed:
132         kfree_skb(skb);
133         return ERR_PTR(-ENODATA);
134 }
135
136 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
137                                   void *param, u8 event, u32 timeout)
138 {
139         DECLARE_WAITQUEUE(wait, current);
140         struct hci_request req;
141         int err = 0;
142
143         BT_DBG("%s", hdev->name);
144
145         hci_req_init(&req, hdev);
146
147         hci_req_add_ev(&req, opcode, plen, param, event);
148
149         hdev->req_status = HCI_REQ_PEND;
150
151         err = hci_req_run(&req, hci_req_sync_complete);
152         if (err < 0)
153                 return ERR_PTR(err);
154
155         add_wait_queue(&hdev->req_wait_q, &wait);
156         set_current_state(TASK_INTERRUPTIBLE);
157
158         schedule_timeout(timeout);
159
160         remove_wait_queue(&hdev->req_wait_q, &wait);
161
162         if (signal_pending(current))
163                 return ERR_PTR(-EINTR);
164
165         switch (hdev->req_status) {
166         case HCI_REQ_DONE:
167                 err = -bt_to_errno(hdev->req_result);
168                 break;
169
170         case HCI_REQ_CANCELED:
171                 err = -hdev->req_result;
172                 break;
173
174         default:
175                 err = -ETIMEDOUT;
176                 break;
177         }
178
179         hdev->req_status = hdev->req_result = 0;
180
181         BT_DBG("%s end: err %d", hdev->name, err);
182
183         if (err < 0)
184                 return ERR_PTR(err);
185
186         return hci_get_cmd_complete(hdev, opcode, event);
187 }
188 EXPORT_SYMBOL(__hci_cmd_sync_ev);
189
190 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
191                                void *param, u32 timeout)
192 {
193         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
194 }
195 EXPORT_SYMBOL(__hci_cmd_sync);
196
197 /* Execute request and wait for completion. */
198 static int __hci_req_sync(struct hci_dev *hdev,
199                           void (*func)(struct hci_request *req,
200                                       unsigned long opt),
201                           unsigned long opt, __u32 timeout)
202 {
203         struct hci_request req;
204         DECLARE_WAITQUEUE(wait, current);
205         int err = 0;
206
207         BT_DBG("%s start", hdev->name);
208
209         hci_req_init(&req, hdev);
210
211         hdev->req_status = HCI_REQ_PEND;
212
213         func(&req, opt);
214
215         err = hci_req_run(&req, hci_req_sync_complete);
216         if (err < 0) {
217                 hdev->req_status = 0;
218
219                 /* ENODATA means the HCI request command queue is empty.
220                  * This can happen when a request with conditionals doesn't
221                  * trigger any commands to be sent. This is normal behavior
222                  * and should not trigger an error return.
223                  */
224                 if (err == -ENODATA)
225                         return 0;
226
227                 return err;
228         }
229
230         add_wait_queue(&hdev->req_wait_q, &wait);
231         set_current_state(TASK_INTERRUPTIBLE);
232
233         schedule_timeout(timeout);
234
235         remove_wait_queue(&hdev->req_wait_q, &wait);
236
237         if (signal_pending(current))
238                 return -EINTR;
239
240         switch (hdev->req_status) {
241         case HCI_REQ_DONE:
242                 err = -bt_to_errno(hdev->req_result);
243                 break;
244
245         case HCI_REQ_CANCELED:
246                 err = -hdev->req_result;
247                 break;
248
249         default:
250                 err = -ETIMEDOUT;
251                 break;
252         }
253
254         hdev->req_status = hdev->req_result = 0;
255
256         BT_DBG("%s end: err %d", hdev->name, err);
257
258         return err;
259 }
260
261 static int hci_req_sync(struct hci_dev *hdev,
262                         void (*req)(struct hci_request *req,
263                                     unsigned long opt),
264                         unsigned long opt, __u32 timeout)
265 {
266         int ret;
267
268         if (!test_bit(HCI_UP, &hdev->flags))
269                 return -ENETDOWN;
270
271         /* Serialize all requests */
272         hci_req_lock(hdev);
273         ret = __hci_req_sync(hdev, req, opt, timeout);
274         hci_req_unlock(hdev);
275
276         return ret;
277 }
278
279 static void hci_reset_req(struct hci_request *req, unsigned long opt)
280 {
281         BT_DBG("%s %ld", req->hdev->name, opt);
282
283         /* Reset device */
284         set_bit(HCI_RESET, &req->hdev->flags);
285         hci_req_add(req, HCI_OP_RESET, 0, NULL);
286 }
287
288 static void bredr_init(struct hci_request *req)
289 {
290         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
291
292         /* Read Local Supported Features */
293         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
294
295         /* Read Local Version */
296         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
297
298         /* Read BD Address */
299         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
300 }
301
302 static void amp_init(struct hci_request *req)
303 {
304         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
305
306         /* Read Local Version */
307         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
308
309         /* Read Local AMP Info */
310         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
311
312         /* Read Data Blk size */
313         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
314 }
315
316 static void hci_init1_req(struct hci_request *req, unsigned long opt)
317 {
318         struct hci_dev *hdev = req->hdev;
319
320         BT_DBG("%s %ld", hdev->name, opt);
321
322         /* Reset */
323         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
324                 hci_reset_req(req, 0);
325
326         switch (hdev->dev_type) {
327         case HCI_BREDR:
328                 bredr_init(req);
329                 break;
330
331         case HCI_AMP:
332                 amp_init(req);
333                 break;
334
335         default:
336                 BT_ERR("Unknown device type %d", hdev->dev_type);
337                 break;
338         }
339 }
340
341 static void bredr_setup(struct hci_request *req)
342 {
343         struct hci_cp_delete_stored_link_key cp;
344         __le16 param;
345         __u8 flt_type;
346
347         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
348         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
349
350         /* Read Class of Device */
351         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
352
353         /* Read Local Name */
354         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
355
356         /* Read Voice Setting */
357         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
358
359         /* Clear Event Filters */
360         flt_type = HCI_FLT_CLEAR_ALL;
361         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
362
363         /* Connection accept timeout ~20 secs */
364         param = __constant_cpu_to_le16(0x7d00);
365         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
366
367         bacpy(&cp.bdaddr, BDADDR_ANY);
368         cp.delete_all = 0x01;
369         hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
370
371         /* Read page scan parameters */
372         if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) {
373                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
374                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
375         }
376 }
377
378 static void le_setup(struct hci_request *req)
379 {
380         /* Read LE Buffer Size */
381         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
382
383         /* Read LE Local Supported Features */
384         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
385
386         /* Read LE Advertising Channel TX Power */
387         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
388
389         /* Read LE White List Size */
390         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
391
392         /* Read LE Supported States */
393         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
394 }
395
396 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
397 {
398         if (lmp_ext_inq_capable(hdev))
399                 return 0x02;
400
401         if (lmp_inq_rssi_capable(hdev))
402                 return 0x01;
403
404         if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
405             hdev->lmp_subver == 0x0757)
406                 return 0x01;
407
408         if (hdev->manufacturer == 15) {
409                 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
410                         return 0x01;
411                 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
412                         return 0x01;
413                 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
414                         return 0x01;
415         }
416
417         if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
418             hdev->lmp_subver == 0x1805)
419                 return 0x01;
420
421         return 0x00;
422 }
423
424 static void hci_setup_inquiry_mode(struct hci_request *req)
425 {
426         u8 mode;
427
428         mode = hci_get_inquiry_mode(req->hdev);
429
430         hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
431 }
432
433 static void hci_setup_event_mask(struct hci_request *req)
434 {
435         struct hci_dev *hdev = req->hdev;
436
437         /* The second byte is 0xff instead of 0x9f (two reserved bits
438          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
439          * command otherwise.
440          */
441         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
442
443         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
444          * any event mask for pre 1.2 devices.
445          */
446         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
447                 return;
448
449         if (lmp_bredr_capable(hdev)) {
450                 events[4] |= 0x01; /* Flow Specification Complete */
451                 events[4] |= 0x02; /* Inquiry Result with RSSI */
452                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
453                 events[5] |= 0x08; /* Synchronous Connection Complete */
454                 events[5] |= 0x10; /* Synchronous Connection Changed */
455         }
456
457         if (lmp_inq_rssi_capable(hdev))
458                 events[4] |= 0x02; /* Inquiry Result with RSSI */
459
460         if (lmp_sniffsubr_capable(hdev))
461                 events[5] |= 0x20; /* Sniff Subrating */
462
463         if (lmp_pause_enc_capable(hdev))
464                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
465
466         if (lmp_ext_inq_capable(hdev))
467                 events[5] |= 0x40; /* Extended Inquiry Result */
468
469         if (lmp_no_flush_capable(hdev))
470                 events[7] |= 0x01; /* Enhanced Flush Complete */
471
472         if (lmp_lsto_capable(hdev))
473                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
474
475         if (lmp_ssp_capable(hdev)) {
476                 events[6] |= 0x01;      /* IO Capability Request */
477                 events[6] |= 0x02;      /* IO Capability Response */
478                 events[6] |= 0x04;      /* User Confirmation Request */
479                 events[6] |= 0x08;      /* User Passkey Request */
480                 events[6] |= 0x10;      /* Remote OOB Data Request */
481                 events[6] |= 0x20;      /* Simple Pairing Complete */
482                 events[7] |= 0x04;      /* User Passkey Notification */
483                 events[7] |= 0x08;      /* Keypress Notification */
484                 events[7] |= 0x10;      /* Remote Host Supported
485                                          * Features Notification
486                                          */
487         }
488
489         if (lmp_le_capable(hdev))
490                 events[7] |= 0x20;      /* LE Meta-Event */
491
492         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
493
494         if (lmp_le_capable(hdev)) {
495                 memset(events, 0, sizeof(events));
496                 events[0] = 0x1f;
497                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
498                             sizeof(events), events);
499         }
500 }
501
502 static void hci_init2_req(struct hci_request *req, unsigned long opt)
503 {
504         struct hci_dev *hdev = req->hdev;
505
506         if (lmp_bredr_capable(hdev))
507                 bredr_setup(req);
508
509         if (lmp_le_capable(hdev))
510                 le_setup(req);
511
512         hci_setup_event_mask(req);
513
514         if (hdev->hci_ver > BLUETOOTH_VER_1_1)
515                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
516
517         if (lmp_ssp_capable(hdev)) {
518                 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
519                         u8 mode = 0x01;
520                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
521                                     sizeof(mode), &mode);
522                 } else {
523                         struct hci_cp_write_eir cp;
524
525                         memset(hdev->eir, 0, sizeof(hdev->eir));
526                         memset(&cp, 0, sizeof(cp));
527
528                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
529                 }
530         }
531
532         if (lmp_inq_rssi_capable(hdev))
533                 hci_setup_inquiry_mode(req);
534
535         if (lmp_inq_tx_pwr_capable(hdev))
536                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
537
538         if (lmp_ext_feat_capable(hdev)) {
539                 struct hci_cp_read_local_ext_features cp;
540
541                 cp.page = 0x01;
542                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
543                             sizeof(cp), &cp);
544         }
545
546         if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
547                 u8 enable = 1;
548                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
549                             &enable);
550         }
551 }
552
553 static void hci_setup_link_policy(struct hci_request *req)
554 {
555         struct hci_dev *hdev = req->hdev;
556         struct hci_cp_write_def_link_policy cp;
557         u16 link_policy = 0;
558
559         if (lmp_rswitch_capable(hdev))
560                 link_policy |= HCI_LP_RSWITCH;
561         if (lmp_hold_capable(hdev))
562                 link_policy |= HCI_LP_HOLD;
563         if (lmp_sniff_capable(hdev))
564                 link_policy |= HCI_LP_SNIFF;
565         if (lmp_park_capable(hdev))
566                 link_policy |= HCI_LP_PARK;
567
568         cp.policy = cpu_to_le16(link_policy);
569         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
570 }
571
572 static void hci_set_le_support(struct hci_request *req)
573 {
574         struct hci_dev *hdev = req->hdev;
575         struct hci_cp_write_le_host_supported cp;
576
577         memset(&cp, 0, sizeof(cp));
578
579         if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
580                 cp.le = 0x01;
581                 cp.simul = lmp_le_br_capable(hdev);
582         }
583
584         if (cp.le != lmp_host_le_capable(hdev))
585                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
586                             &cp);
587 }
588
589 static void hci_init3_req(struct hci_request *req, unsigned long opt)
590 {
591         struct hci_dev *hdev = req->hdev;
592
593         if (hdev->commands[5] & 0x10)
594                 hci_setup_link_policy(req);
595
596         if (lmp_le_capable(hdev)) {
597                 hci_set_le_support(req);
598                 hci_update_ad(req);
599         }
600 }
601
602 static int __hci_init(struct hci_dev *hdev)
603 {
604         int err;
605
606         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
607         if (err < 0)
608                 return err;
609
610         /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
611          * BR/EDR/LE type controllers. AMP controllers only need the
612          * first stage init.
613          */
614         if (hdev->dev_type != HCI_BREDR)
615                 return 0;
616
617         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
618         if (err < 0)
619                 return err;
620
621         return __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
622 }
623
624 static void hci_scan_req(struct hci_request *req, unsigned long opt)
625 {
626         __u8 scan = opt;
627
628         BT_DBG("%s %x", req->hdev->name, scan);
629
630         /* Inquiry and Page scans */
631         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
632 }
633
634 static void hci_auth_req(struct hci_request *req, unsigned long opt)
635 {
636         __u8 auth = opt;
637
638         BT_DBG("%s %x", req->hdev->name, auth);
639
640         /* Authentication */
641         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
642 }
643
644 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
645 {
646         __u8 encrypt = opt;
647
648         BT_DBG("%s %x", req->hdev->name, encrypt);
649
650         /* Encryption */
651         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
652 }
653
654 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
655 {
656         __le16 policy = cpu_to_le16(opt);
657
658         BT_DBG("%s %x", req->hdev->name, policy);
659
660         /* Default link policy */
661         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
662 }
663
664 /* Get HCI device by index.
665  * Device is held on return. */
666 struct hci_dev *hci_dev_get(int index)
667 {
668         struct hci_dev *hdev = NULL, *d;
669
670         BT_DBG("%d", index);
671
672         if (index < 0)
673                 return NULL;
674
675         read_lock(&hci_dev_list_lock);
676         list_for_each_entry(d, &hci_dev_list, list) {
677                 if (d->id == index) {
678                         hdev = hci_dev_hold(d);
679                         break;
680                 }
681         }
682         read_unlock(&hci_dev_list_lock);
683         return hdev;
684 }
685
686 /* ---- Inquiry support ---- */
687
688 bool hci_discovery_active(struct hci_dev *hdev)
689 {
690         struct discovery_state *discov = &hdev->discovery;
691
692         switch (discov->state) {
693         case DISCOVERY_FINDING:
694         case DISCOVERY_RESOLVING:
695                 return true;
696
697         default:
698                 return false;
699         }
700 }
701
702 void hci_discovery_set_state(struct hci_dev *hdev, int state)
703 {
704         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
705
706         if (hdev->discovery.state == state)
707                 return;
708
709         switch (state) {
710         case DISCOVERY_STOPPED:
711                 if (hdev->discovery.state != DISCOVERY_STARTING)
712                         mgmt_discovering(hdev, 0);
713                 break;
714         case DISCOVERY_STARTING:
715                 break;
716         case DISCOVERY_FINDING:
717                 mgmt_discovering(hdev, 1);
718                 break;
719         case DISCOVERY_RESOLVING:
720                 break;
721         case DISCOVERY_STOPPING:
722                 break;
723         }
724
725         hdev->discovery.state = state;
726 }
727
728 static void inquiry_cache_flush(struct hci_dev *hdev)
729 {
730         struct discovery_state *cache = &hdev->discovery;
731         struct inquiry_entry *p, *n;
732
733         list_for_each_entry_safe(p, n, &cache->all, all) {
734                 list_del(&p->all);
735                 kfree(p);
736         }
737
738         INIT_LIST_HEAD(&cache->unknown);
739         INIT_LIST_HEAD(&cache->resolve);
740 }
741
742 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
743                                                bdaddr_t *bdaddr)
744 {
745         struct discovery_state *cache = &hdev->discovery;
746         struct inquiry_entry *e;
747
748         BT_DBG("cache %p, %pMR", cache, bdaddr);
749
750         list_for_each_entry(e, &cache->all, all) {
751                 if (!bacmp(&e->data.bdaddr, bdaddr))
752                         return e;
753         }
754
755         return NULL;
756 }
757
758 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
759                                                        bdaddr_t *bdaddr)
760 {
761         struct discovery_state *cache = &hdev->discovery;
762         struct inquiry_entry *e;
763
764         BT_DBG("cache %p, %pMR", cache, bdaddr);
765
766         list_for_each_entry(e, &cache->unknown, list) {
767                 if (!bacmp(&e->data.bdaddr, bdaddr))
768                         return e;
769         }
770
771         return NULL;
772 }
773
774 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
775                                                        bdaddr_t *bdaddr,
776                                                        int state)
777 {
778         struct discovery_state *cache = &hdev->discovery;
779         struct inquiry_entry *e;
780
781         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
782
783         list_for_each_entry(e, &cache->resolve, list) {
784                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
785                         return e;
786                 if (!bacmp(&e->data.bdaddr, bdaddr))
787                         return e;
788         }
789
790         return NULL;
791 }
792
793 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
794                                       struct inquiry_entry *ie)
795 {
796         struct discovery_state *cache = &hdev->discovery;
797         struct list_head *pos = &cache->resolve;
798         struct inquiry_entry *p;
799
800         list_del(&ie->list);
801
802         list_for_each_entry(p, &cache->resolve, list) {
803                 if (p->name_state != NAME_PENDING &&
804                     abs(p->data.rssi) >= abs(ie->data.rssi))
805                         break;
806                 pos = &p->list;
807         }
808
809         list_add(&ie->list, pos);
810 }
811
812 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
813                               bool name_known, bool *ssp)
814 {
815         struct discovery_state *cache = &hdev->discovery;
816         struct inquiry_entry *ie;
817
818         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
819
820         hci_remove_remote_oob_data(hdev, &data->bdaddr);
821
822         if (ssp)
823                 *ssp = data->ssp_mode;
824
825         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
826         if (ie) {
827                 if (ie->data.ssp_mode && ssp)
828                         *ssp = true;
829
830                 if (ie->name_state == NAME_NEEDED &&
831                     data->rssi != ie->data.rssi) {
832                         ie->data.rssi = data->rssi;
833                         hci_inquiry_cache_update_resolve(hdev, ie);
834                 }
835
836                 goto update;
837         }
838
839         /* Entry not in the cache. Add new one. */
840         ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
841         if (!ie)
842                 return false;
843
844         list_add(&ie->all, &cache->all);
845
846         if (name_known) {
847                 ie->name_state = NAME_KNOWN;
848         } else {
849                 ie->name_state = NAME_NOT_KNOWN;
850                 list_add(&ie->list, &cache->unknown);
851         }
852
853 update:
854         if (name_known && ie->name_state != NAME_KNOWN &&
855             ie->name_state != NAME_PENDING) {
856                 ie->name_state = NAME_KNOWN;
857                 list_del(&ie->list);
858         }
859
860         memcpy(&ie->data, data, sizeof(*data));
861         ie->timestamp = jiffies;
862         cache->timestamp = jiffies;
863
864         if (ie->name_state == NAME_NOT_KNOWN)
865                 return false;
866
867         return true;
868 }
869
870 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
871 {
872         struct discovery_state *cache = &hdev->discovery;
873         struct inquiry_info *info = (struct inquiry_info *) buf;
874         struct inquiry_entry *e;
875         int copied = 0;
876
877         list_for_each_entry(e, &cache->all, all) {
878                 struct inquiry_data *data = &e->data;
879
880                 if (copied >= num)
881                         break;
882
883                 bacpy(&info->bdaddr, &data->bdaddr);
884                 info->pscan_rep_mode    = data->pscan_rep_mode;
885                 info->pscan_period_mode = data->pscan_period_mode;
886                 info->pscan_mode        = data->pscan_mode;
887                 memcpy(info->dev_class, data->dev_class, 3);
888                 info->clock_offset      = data->clock_offset;
889
890                 info++;
891                 copied++;
892         }
893
894         BT_DBG("cache %p, copied %d", cache, copied);
895         return copied;
896 }
897
898 static void hci_inq_req(struct hci_request *req, unsigned long opt)
899 {
900         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
901         struct hci_dev *hdev = req->hdev;
902         struct hci_cp_inquiry cp;
903
904         BT_DBG("%s", hdev->name);
905
906         if (test_bit(HCI_INQUIRY, &hdev->flags))
907                 return;
908
909         /* Start Inquiry */
910         memcpy(&cp.lap, &ir->lap, 3);
911         cp.length  = ir->length;
912         cp.num_rsp = ir->num_rsp;
913         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
914 }
915
916 static int wait_inquiry(void *word)
917 {
918         schedule();
919         return signal_pending(current);
920 }
921
922 int hci_inquiry(void __user *arg)
923 {
924         __u8 __user *ptr = arg;
925         struct hci_inquiry_req ir;
926         struct hci_dev *hdev;
927         int err = 0, do_inquiry = 0, max_rsp;
928         long timeo;
929         __u8 *buf;
930
931         if (copy_from_user(&ir, ptr, sizeof(ir)))
932                 return -EFAULT;
933
934         hdev = hci_dev_get(ir.dev_id);
935         if (!hdev)
936                 return -ENODEV;
937
938         hci_dev_lock(hdev);
939         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
940             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
941                 inquiry_cache_flush(hdev);
942                 do_inquiry = 1;
943         }
944         hci_dev_unlock(hdev);
945
946         timeo = ir.length * msecs_to_jiffies(2000);
947
948         if (do_inquiry) {
949                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
950                                    timeo);
951                 if (err < 0)
952                         goto done;
953
954                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
955                  * cleared). If it is interrupted by a signal, return -EINTR.
956                  */
957                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
958                                 TASK_INTERRUPTIBLE))
959                         return -EINTR;
960         }
961
962         /* for unlimited number of responses we will use buffer with
963          * 255 entries
964          */
965         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
966
967         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
968          * copy it to the user space.
969          */
970         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
971         if (!buf) {
972                 err = -ENOMEM;
973                 goto done;
974         }
975
976         hci_dev_lock(hdev);
977         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
978         hci_dev_unlock(hdev);
979
980         BT_DBG("num_rsp %d", ir.num_rsp);
981
982         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
983                 ptr += sizeof(ir);
984                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
985                                  ir.num_rsp))
986                         err = -EFAULT;
987         } else
988                 err = -EFAULT;
989
990         kfree(buf);
991
992 done:
993         hci_dev_put(hdev);
994         return err;
995 }
996
997 static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
998 {
999         u8 ad_len = 0, flags = 0;
1000         size_t name_len;
1001
1002         if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1003                 flags |= LE_AD_GENERAL;
1004
1005         if (!lmp_bredr_capable(hdev))
1006                 flags |= LE_AD_NO_BREDR;
1007
1008         if (lmp_le_br_capable(hdev))
1009                 flags |= LE_AD_SIM_LE_BREDR_CTRL;
1010
1011         if (lmp_host_le_br_capable(hdev))
1012                 flags |= LE_AD_SIM_LE_BREDR_HOST;
1013
1014         if (flags) {
1015                 BT_DBG("adv flags 0x%02x", flags);
1016
1017                 ptr[0] = 2;
1018                 ptr[1] = EIR_FLAGS;
1019                 ptr[2] = flags;
1020
1021                 ad_len += 3;
1022                 ptr += 3;
1023         }
1024
1025         if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
1026                 ptr[0] = 2;
1027                 ptr[1] = EIR_TX_POWER;
1028                 ptr[2] = (u8) hdev->adv_tx_power;
1029
1030                 ad_len += 3;
1031                 ptr += 3;
1032         }
1033
1034         name_len = strlen(hdev->dev_name);
1035         if (name_len > 0) {
1036                 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
1037
1038                 if (name_len > max_len) {
1039                         name_len = max_len;
1040                         ptr[1] = EIR_NAME_SHORT;
1041                 } else
1042                         ptr[1] = EIR_NAME_COMPLETE;
1043
1044                 ptr[0] = name_len + 1;
1045
1046                 memcpy(ptr + 2, hdev->dev_name, name_len);
1047
1048                 ad_len += (name_len + 2);
1049                 ptr += (name_len + 2);
1050         }
1051
1052         return ad_len;
1053 }
1054
1055 void hci_update_ad(struct hci_request *req)
1056 {
1057         struct hci_dev *hdev = req->hdev;
1058         struct hci_cp_le_set_adv_data cp;
1059         u8 len;
1060
1061         if (!lmp_le_capable(hdev))
1062                 return;
1063
1064         memset(&cp, 0, sizeof(cp));
1065
1066         len = create_ad(hdev, cp.data);
1067
1068         if (hdev->adv_data_len == len &&
1069             memcmp(cp.data, hdev->adv_data, len) == 0)
1070                 return;
1071
1072         memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1073         hdev->adv_data_len = len;
1074
1075         cp.length = len;
1076
1077         hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1078 }
1079
1080 /* ---- HCI ioctl helpers ---- */
1081
1082 int hci_dev_open(__u16 dev)
1083 {
1084         struct hci_dev *hdev;
1085         int ret = 0;
1086
1087         hdev = hci_dev_get(dev);
1088         if (!hdev)
1089                 return -ENODEV;
1090
1091         BT_DBG("%s %p", hdev->name, hdev);
1092
1093         hci_req_lock(hdev);
1094
1095         if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1096                 ret = -ENODEV;
1097                 goto done;
1098         }
1099
1100         if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
1101                 ret = -ERFKILL;
1102                 goto done;
1103         }
1104
1105         if (test_bit(HCI_UP, &hdev->flags)) {
1106                 ret = -EALREADY;
1107                 goto done;
1108         }
1109
1110         if (hdev->open(hdev)) {
1111                 ret = -EIO;
1112                 goto done;
1113         }
1114
1115         atomic_set(&hdev->cmd_cnt, 1);
1116         set_bit(HCI_INIT, &hdev->flags);
1117
1118         if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1119                 ret = hdev->setup(hdev);
1120
1121         if (!ret) {
1122                 /* Treat all non BR/EDR controllers as raw devices if
1123                  * enable_hs is not set.
1124                  */
1125                 if (hdev->dev_type != HCI_BREDR && !enable_hs)
1126                         set_bit(HCI_RAW, &hdev->flags);
1127
1128                 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1129                         set_bit(HCI_RAW, &hdev->flags);
1130
1131                 if (!test_bit(HCI_RAW, &hdev->flags))
1132                         ret = __hci_init(hdev);
1133         }
1134
1135         clear_bit(HCI_INIT, &hdev->flags);
1136
1137         if (!ret) {
1138                 hci_dev_hold(hdev);
1139                 set_bit(HCI_UP, &hdev->flags);
1140                 hci_notify(hdev, HCI_DEV_UP);
1141                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1142                     mgmt_valid_hdev(hdev)) {
1143                         hci_dev_lock(hdev);
1144                         mgmt_powered(hdev, 1);
1145                         hci_dev_unlock(hdev);
1146                 }
1147         } else {
1148                 /* Init failed, cleanup */
1149                 flush_work(&hdev->tx_work);
1150                 flush_work(&hdev->cmd_work);
1151                 flush_work(&hdev->rx_work);
1152
1153                 skb_queue_purge(&hdev->cmd_q);
1154                 skb_queue_purge(&hdev->rx_q);
1155
1156                 if (hdev->flush)
1157                         hdev->flush(hdev);
1158
1159                 if (hdev->sent_cmd) {
1160                         kfree_skb(hdev->sent_cmd);
1161                         hdev->sent_cmd = NULL;
1162                 }
1163
1164                 hdev->close(hdev);
1165                 hdev->flags = 0;
1166         }
1167
1168 done:
1169         hci_req_unlock(hdev);
1170         hci_dev_put(hdev);
1171         return ret;
1172 }
1173
1174 static int hci_dev_do_close(struct hci_dev *hdev)
1175 {
1176         BT_DBG("%s %p", hdev->name, hdev);
1177
1178         cancel_work_sync(&hdev->le_scan);
1179
1180         cancel_delayed_work(&hdev->power_off);
1181
1182         hci_req_cancel(hdev, ENODEV);
1183         hci_req_lock(hdev);
1184
1185         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1186                 del_timer_sync(&hdev->cmd_timer);
1187                 hci_req_unlock(hdev);
1188                 return 0;
1189         }
1190
1191         /* Flush RX and TX works */
1192         flush_work(&hdev->tx_work);
1193         flush_work(&hdev->rx_work);
1194
1195         if (hdev->discov_timeout > 0) {
1196                 cancel_delayed_work(&hdev->discov_off);
1197                 hdev->discov_timeout = 0;
1198                 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1199         }
1200
1201         if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1202                 cancel_delayed_work(&hdev->service_cache);
1203
1204         cancel_delayed_work_sync(&hdev->le_scan_disable);
1205
1206         hci_dev_lock(hdev);
1207         inquiry_cache_flush(hdev);
1208         hci_conn_hash_flush(hdev);
1209         hci_dev_unlock(hdev);
1210
1211         hci_notify(hdev, HCI_DEV_DOWN);
1212
1213         if (hdev->flush)
1214                 hdev->flush(hdev);
1215
1216         /* Reset device */
1217         skb_queue_purge(&hdev->cmd_q);
1218         atomic_set(&hdev->cmd_cnt, 1);
1219         if (!test_bit(HCI_RAW, &hdev->flags) &&
1220             test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1221                 set_bit(HCI_INIT, &hdev->flags);
1222                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1223                 clear_bit(HCI_INIT, &hdev->flags);
1224         }
1225
1226         /* flush cmd  work */
1227         flush_work(&hdev->cmd_work);
1228
1229         /* Drop queues */
1230         skb_queue_purge(&hdev->rx_q);
1231         skb_queue_purge(&hdev->cmd_q);
1232         skb_queue_purge(&hdev->raw_q);
1233
1234         /* Drop last sent command */
1235         if (hdev->sent_cmd) {
1236                 del_timer_sync(&hdev->cmd_timer);
1237                 kfree_skb(hdev->sent_cmd);
1238                 hdev->sent_cmd = NULL;
1239         }
1240
1241         kfree_skb(hdev->recv_evt);
1242         hdev->recv_evt = NULL;
1243
1244         /* After this point our queues are empty
1245          * and no tasks are scheduled. */
1246         hdev->close(hdev);
1247
1248         /* Clear flags */
1249         hdev->flags = 0;
1250         hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1251
1252         if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1253             mgmt_valid_hdev(hdev)) {
1254                 hci_dev_lock(hdev);
1255                 mgmt_powered(hdev, 0);
1256                 hci_dev_unlock(hdev);
1257         }
1258
1259         /* Controller radio is available but is currently powered down */
1260         hdev->amp_status = 0;
1261
1262         memset(hdev->eir, 0, sizeof(hdev->eir));
1263         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1264
1265         hci_req_unlock(hdev);
1266
1267         hci_dev_put(hdev);
1268         return 0;
1269 }
1270
1271 int hci_dev_close(__u16 dev)
1272 {
1273         struct hci_dev *hdev;
1274         int err;
1275
1276         hdev = hci_dev_get(dev);
1277         if (!hdev)
1278                 return -ENODEV;
1279
1280         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1281                 cancel_delayed_work(&hdev->power_off);
1282
1283         err = hci_dev_do_close(hdev);
1284
1285         hci_dev_put(hdev);
1286         return err;
1287 }
1288
1289 int hci_dev_reset(__u16 dev)
1290 {
1291         struct hci_dev *hdev;
1292         int ret = 0;
1293
1294         hdev = hci_dev_get(dev);
1295         if (!hdev)
1296                 return -ENODEV;
1297
1298         hci_req_lock(hdev);
1299
1300         if (!test_bit(HCI_UP, &hdev->flags))
1301                 goto done;
1302
1303         /* Drop queues */
1304         skb_queue_purge(&hdev->rx_q);
1305         skb_queue_purge(&hdev->cmd_q);
1306
1307         hci_dev_lock(hdev);
1308         inquiry_cache_flush(hdev);
1309         hci_conn_hash_flush(hdev);
1310         hci_dev_unlock(hdev);
1311
1312         if (hdev->flush)
1313                 hdev->flush(hdev);
1314
1315         atomic_set(&hdev->cmd_cnt, 1);
1316         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1317
1318         if (!test_bit(HCI_RAW, &hdev->flags))
1319                 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1320
1321 done:
1322         hci_req_unlock(hdev);
1323         hci_dev_put(hdev);
1324         return ret;
1325 }
1326
1327 int hci_dev_reset_stat(__u16 dev)
1328 {
1329         struct hci_dev *hdev;
1330         int ret = 0;
1331
1332         hdev = hci_dev_get(dev);
1333         if (!hdev)
1334                 return -ENODEV;
1335
1336         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1337
1338         hci_dev_put(hdev);
1339
1340         return ret;
1341 }
1342
1343 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1344 {
1345         struct hci_dev *hdev;
1346         struct hci_dev_req dr;
1347         int err = 0;
1348
1349         if (copy_from_user(&dr, arg, sizeof(dr)))
1350                 return -EFAULT;
1351
1352         hdev = hci_dev_get(dr.dev_id);
1353         if (!hdev)
1354                 return -ENODEV;
1355
1356         switch (cmd) {
1357         case HCISETAUTH:
1358                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1359                                    HCI_INIT_TIMEOUT);
1360                 break;
1361
1362         case HCISETENCRYPT:
1363                 if (!lmp_encrypt_capable(hdev)) {
1364                         err = -EOPNOTSUPP;
1365                         break;
1366                 }
1367
1368                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1369                         /* Auth must be enabled first */
1370                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1371                                            HCI_INIT_TIMEOUT);
1372                         if (err)
1373                                 break;
1374                 }
1375
1376                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1377                                    HCI_INIT_TIMEOUT);
1378                 break;
1379
1380         case HCISETSCAN:
1381                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1382                                    HCI_INIT_TIMEOUT);
1383                 break;
1384
1385         case HCISETLINKPOL:
1386                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1387                                    HCI_INIT_TIMEOUT);
1388                 break;
1389
1390         case HCISETLINKMODE:
1391                 hdev->link_mode = ((__u16) dr.dev_opt) &
1392                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
1393                 break;
1394
1395         case HCISETPTYPE:
1396                 hdev->pkt_type = (__u16) dr.dev_opt;
1397                 break;
1398
1399         case HCISETACLMTU:
1400                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
1401                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1402                 break;
1403
1404         case HCISETSCOMTU:
1405                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
1406                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1407                 break;
1408
1409         default:
1410                 err = -EINVAL;
1411                 break;
1412         }
1413
1414         hci_dev_put(hdev);
1415         return err;
1416 }
1417
1418 int hci_get_dev_list(void __user *arg)
1419 {
1420         struct hci_dev *hdev;
1421         struct hci_dev_list_req *dl;
1422         struct hci_dev_req *dr;
1423         int n = 0, size, err;
1424         __u16 dev_num;
1425
1426         if (get_user(dev_num, (__u16 __user *) arg))
1427                 return -EFAULT;
1428
1429         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1430                 return -EINVAL;
1431
1432         size = sizeof(*dl) + dev_num * sizeof(*dr);
1433
1434         dl = kzalloc(size, GFP_KERNEL);
1435         if (!dl)
1436                 return -ENOMEM;
1437
1438         dr = dl->dev_req;
1439
1440         read_lock(&hci_dev_list_lock);
1441         list_for_each_entry(hdev, &hci_dev_list, list) {
1442                 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1443                         cancel_delayed_work(&hdev->power_off);
1444
1445                 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1446                         set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1447
1448                 (dr + n)->dev_id  = hdev->id;
1449                 (dr + n)->dev_opt = hdev->flags;
1450
1451                 if (++n >= dev_num)
1452                         break;
1453         }
1454         read_unlock(&hci_dev_list_lock);
1455
1456         dl->dev_num = n;
1457         size = sizeof(*dl) + n * sizeof(*dr);
1458
1459         err = copy_to_user(arg, dl, size);
1460         kfree(dl);
1461
1462         return err ? -EFAULT : 0;
1463 }
1464
1465 int hci_get_dev_info(void __user *arg)
1466 {
1467         struct hci_dev *hdev;
1468         struct hci_dev_info di;
1469         int err = 0;
1470
1471         if (copy_from_user(&di, arg, sizeof(di)))
1472                 return -EFAULT;
1473
1474         hdev = hci_dev_get(di.dev_id);
1475         if (!hdev)
1476                 return -ENODEV;
1477
1478         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1479                 cancel_delayed_work_sync(&hdev->power_off);
1480
1481         if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1482                 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1483
1484         strcpy(di.name, hdev->name);
1485         di.bdaddr   = hdev->bdaddr;
1486         di.type     = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1487         di.flags    = hdev->flags;
1488         di.pkt_type = hdev->pkt_type;
1489         if (lmp_bredr_capable(hdev)) {
1490                 di.acl_mtu  = hdev->acl_mtu;
1491                 di.acl_pkts = hdev->acl_pkts;
1492                 di.sco_mtu  = hdev->sco_mtu;
1493                 di.sco_pkts = hdev->sco_pkts;
1494         } else {
1495                 di.acl_mtu  = hdev->le_mtu;
1496                 di.acl_pkts = hdev->le_pkts;
1497                 di.sco_mtu  = 0;
1498                 di.sco_pkts = 0;
1499         }
1500         di.link_policy = hdev->link_policy;
1501         di.link_mode   = hdev->link_mode;
1502
1503         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1504         memcpy(&di.features, &hdev->features, sizeof(di.features));
1505
1506         if (copy_to_user(arg, &di, sizeof(di)))
1507                 err = -EFAULT;
1508
1509         hci_dev_put(hdev);
1510
1511         return err;
1512 }
1513
1514 /* ---- Interface to HCI drivers ---- */
1515
1516 static int hci_rfkill_set_block(void *data, bool blocked)
1517 {
1518         struct hci_dev *hdev = data;
1519
1520         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1521
1522         if (!blocked)
1523                 return 0;
1524
1525         hci_dev_do_close(hdev);
1526
1527         return 0;
1528 }
1529
1530 static const struct rfkill_ops hci_rfkill_ops = {
1531         .set_block = hci_rfkill_set_block,
1532 };
1533
1534 static void hci_power_on(struct work_struct *work)
1535 {
1536         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1537
1538         BT_DBG("%s", hdev->name);
1539
1540         if (hci_dev_open(hdev->id) < 0)
1541                 return;
1542
1543         if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1544                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1545                                    HCI_AUTO_OFF_TIMEOUT);
1546
1547         if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1548                 mgmt_index_added(hdev);
1549 }
1550
1551 static void hci_power_off(struct work_struct *work)
1552 {
1553         struct hci_dev *hdev = container_of(work, struct hci_dev,
1554                                             power_off.work);
1555
1556         BT_DBG("%s", hdev->name);
1557
1558         hci_dev_do_close(hdev);
1559 }
1560
1561 static void hci_discov_off(struct work_struct *work)
1562 {
1563         struct hci_dev *hdev;
1564         u8 scan = SCAN_PAGE;
1565
1566         hdev = container_of(work, struct hci_dev, discov_off.work);
1567
1568         BT_DBG("%s", hdev->name);
1569
1570         hci_dev_lock(hdev);
1571
1572         hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1573
1574         hdev->discov_timeout = 0;
1575
1576         hci_dev_unlock(hdev);
1577 }
1578
1579 int hci_uuids_clear(struct hci_dev *hdev)
1580 {
1581         struct bt_uuid *uuid, *tmp;
1582
1583         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1584                 list_del(&uuid->list);
1585                 kfree(uuid);
1586         }
1587
1588         return 0;
1589 }
1590
1591 int hci_link_keys_clear(struct hci_dev *hdev)
1592 {
1593         struct list_head *p, *n;
1594
1595         list_for_each_safe(p, n, &hdev->link_keys) {
1596                 struct link_key *key;
1597
1598                 key = list_entry(p, struct link_key, list);
1599
1600                 list_del(p);
1601                 kfree(key);
1602         }
1603
1604         return 0;
1605 }
1606
1607 int hci_smp_ltks_clear(struct hci_dev *hdev)
1608 {
1609         struct smp_ltk *k, *tmp;
1610
1611         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1612                 list_del(&k->list);
1613                 kfree(k);
1614         }
1615
1616         return 0;
1617 }
1618
1619 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1620 {
1621         struct link_key *k;
1622
1623         list_for_each_entry(k, &hdev->link_keys, list)
1624                 if (bacmp(bdaddr, &k->bdaddr) == 0)
1625                         return k;
1626
1627         return NULL;
1628 }
1629
1630 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1631                                u8 key_type, u8 old_key_type)
1632 {
1633         /* Legacy key */
1634         if (key_type < 0x03)
1635                 return true;
1636
1637         /* Debug keys are insecure so don't store them persistently */
1638         if (key_type == HCI_LK_DEBUG_COMBINATION)
1639                 return false;
1640
1641         /* Changed combination key and there's no previous one */
1642         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1643                 return false;
1644
1645         /* Security mode 3 case */
1646         if (!conn)
1647                 return true;
1648
1649         /* Neither local nor remote side had no-bonding as requirement */
1650         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1651                 return true;
1652
1653         /* Local side had dedicated bonding as requirement */
1654         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1655                 return true;
1656
1657         /* Remote side had dedicated bonding as requirement */
1658         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1659                 return true;
1660
1661         /* If none of the above criteria match, then don't store the key
1662          * persistently */
1663         return false;
1664 }
1665
1666 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1667 {
1668         struct smp_ltk *k;
1669
1670         list_for_each_entry(k, &hdev->long_term_keys, list) {
1671                 if (k->ediv != ediv ||
1672                     memcmp(rand, k->rand, sizeof(k->rand)))
1673                         continue;
1674
1675                 return k;
1676         }
1677
1678         return NULL;
1679 }
1680
1681 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1682                                      u8 addr_type)
1683 {
1684         struct smp_ltk *k;
1685
1686         list_for_each_entry(k, &hdev->long_term_keys, list)
1687                 if (addr_type == k->bdaddr_type &&
1688                     bacmp(bdaddr, &k->bdaddr) == 0)
1689                         return k;
1690
1691         return NULL;
1692 }
1693
1694 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1695                      bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1696 {
1697         struct link_key *key, *old_key;
1698         u8 old_key_type;
1699         bool persistent;
1700
1701         old_key = hci_find_link_key(hdev, bdaddr);
1702         if (old_key) {
1703                 old_key_type = old_key->type;
1704                 key = old_key;
1705         } else {
1706                 old_key_type = conn ? conn->key_type : 0xff;
1707                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1708                 if (!key)
1709                         return -ENOMEM;
1710                 list_add(&key->list, &hdev->link_keys);
1711         }
1712
1713         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1714
1715         /* Some buggy controller combinations generate a changed
1716          * combination key for legacy pairing even when there's no
1717          * previous key */
1718         if (type == HCI_LK_CHANGED_COMBINATION &&
1719             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1720                 type = HCI_LK_COMBINATION;
1721                 if (conn)
1722                         conn->key_type = type;
1723         }
1724
1725         bacpy(&key->bdaddr, bdaddr);
1726         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1727         key->pin_len = pin_len;
1728
1729         if (type == HCI_LK_CHANGED_COMBINATION)
1730                 key->type = old_key_type;
1731         else
1732                 key->type = type;
1733
1734         if (!new_key)
1735                 return 0;
1736
1737         persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1738
1739         mgmt_new_link_key(hdev, key, persistent);
1740
1741         if (conn)
1742                 conn->flush_key = !persistent;
1743
1744         return 0;
1745 }
1746
1747 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1748                 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
1749                 ediv, u8 rand[8])
1750 {
1751         struct smp_ltk *key, *old_key;
1752
1753         if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1754                 return 0;
1755
1756         old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1757         if (old_key)
1758                 key = old_key;
1759         else {
1760                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1761                 if (!key)
1762                         return -ENOMEM;
1763                 list_add(&key->list, &hdev->long_term_keys);
1764         }
1765
1766         bacpy(&key->bdaddr, bdaddr);
1767         key->bdaddr_type = addr_type;
1768         memcpy(key->val, tk, sizeof(key->val));
1769         key->authenticated = authenticated;
1770         key->ediv = ediv;
1771         key->enc_size = enc_size;
1772         key->type = type;
1773         memcpy(key->rand, rand, sizeof(key->rand));
1774
1775         if (!new_key)
1776                 return 0;
1777
1778         if (type & HCI_SMP_LTK)
1779                 mgmt_new_ltk(hdev, key, 1);
1780
1781         return 0;
1782 }
1783
1784 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1785 {
1786         struct link_key *key;
1787
1788         key = hci_find_link_key(hdev, bdaddr);
1789         if (!key)
1790                 return -ENOENT;
1791
1792         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1793
1794         list_del(&key->list);
1795         kfree(key);
1796
1797         return 0;
1798 }
1799
1800 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1801 {
1802         struct smp_ltk *k, *tmp;
1803
1804         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1805                 if (bacmp(bdaddr, &k->bdaddr))
1806                         continue;
1807
1808                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1809
1810                 list_del(&k->list);
1811                 kfree(k);
1812         }
1813
1814         return 0;
1815 }
1816
1817 /* HCI command timer function */
1818 static void hci_cmd_timeout(unsigned long arg)
1819 {
1820         struct hci_dev *hdev = (void *) arg;
1821
1822         if (hdev->sent_cmd) {
1823                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1824                 u16 opcode = __le16_to_cpu(sent->opcode);
1825
1826                 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1827         } else {
1828                 BT_ERR("%s command tx timeout", hdev->name);
1829         }
1830
1831         atomic_set(&hdev->cmd_cnt, 1);
1832         queue_work(hdev->workqueue, &hdev->cmd_work);
1833 }
1834
1835 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1836                                           bdaddr_t *bdaddr)
1837 {
1838         struct oob_data *data;
1839
1840         list_for_each_entry(data, &hdev->remote_oob_data, list)
1841                 if (bacmp(bdaddr, &data->bdaddr) == 0)
1842                         return data;
1843
1844         return NULL;
1845 }
1846
1847 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1848 {
1849         struct oob_data *data;
1850
1851         data = hci_find_remote_oob_data(hdev, bdaddr);
1852         if (!data)
1853                 return -ENOENT;
1854
1855         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1856
1857         list_del(&data->list);
1858         kfree(data);
1859
1860         return 0;
1861 }
1862
1863 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1864 {
1865         struct oob_data *data, *n;
1866
1867         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1868                 list_del(&data->list);
1869                 kfree(data);
1870         }
1871
1872         return 0;
1873 }
1874
1875 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1876                             u8 *randomizer)
1877 {
1878         struct oob_data *data;
1879
1880         data = hci_find_remote_oob_data(hdev, bdaddr);
1881
1882         if (!data) {
1883                 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1884                 if (!data)
1885                         return -ENOMEM;
1886
1887                 bacpy(&data->bdaddr, bdaddr);
1888                 list_add(&data->list, &hdev->remote_oob_data);
1889         }
1890
1891         memcpy(data->hash, hash, sizeof(data->hash));
1892         memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1893
1894         BT_DBG("%s for %pMR", hdev->name, bdaddr);
1895
1896         return 0;
1897 }
1898
1899 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
1900 {
1901         struct bdaddr_list *b;
1902
1903         list_for_each_entry(b, &hdev->blacklist, list)
1904                 if (bacmp(bdaddr, &b->bdaddr) == 0)
1905                         return b;
1906
1907         return NULL;
1908 }
1909
1910 int hci_blacklist_clear(struct hci_dev *hdev)
1911 {
1912         struct list_head *p, *n;
1913
1914         list_for_each_safe(p, n, &hdev->blacklist) {
1915                 struct bdaddr_list *b;
1916
1917                 b = list_entry(p, struct bdaddr_list, list);
1918
1919                 list_del(p);
1920                 kfree(b);
1921         }
1922
1923         return 0;
1924 }
1925
1926 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1927 {
1928         struct bdaddr_list *entry;
1929
1930         if (bacmp(bdaddr, BDADDR_ANY) == 0)
1931                 return -EBADF;
1932
1933         if (hci_blacklist_lookup(hdev, bdaddr))
1934                 return -EEXIST;
1935
1936         entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1937         if (!entry)
1938                 return -ENOMEM;
1939
1940         bacpy(&entry->bdaddr, bdaddr);
1941
1942         list_add(&entry->list, &hdev->blacklist);
1943
1944         return mgmt_device_blocked(hdev, bdaddr, type);
1945 }
1946
1947 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1948 {
1949         struct bdaddr_list *entry;
1950
1951         if (bacmp(bdaddr, BDADDR_ANY) == 0)
1952                 return hci_blacklist_clear(hdev);
1953
1954         entry = hci_blacklist_lookup(hdev, bdaddr);
1955         if (!entry)
1956                 return -ENOENT;
1957
1958         list_del(&entry->list);
1959         kfree(entry);
1960
1961         return mgmt_device_unblocked(hdev, bdaddr, type);
1962 }
1963
1964 static void le_scan_param_req(struct hci_request *req, unsigned long opt)
1965 {
1966         struct le_scan_params *param =  (struct le_scan_params *) opt;
1967         struct hci_cp_le_set_scan_param cp;
1968
1969         memset(&cp, 0, sizeof(cp));
1970         cp.type = param->type;
1971         cp.interval = cpu_to_le16(param->interval);
1972         cp.window = cpu_to_le16(param->window);
1973
1974         hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1975 }
1976
1977 static void le_scan_enable_req(struct hci_request *req, unsigned long opt)
1978 {
1979         struct hci_cp_le_set_scan_enable cp;
1980
1981         memset(&cp, 0, sizeof(cp));
1982         cp.enable = 1;
1983         cp.filter_dup = 1;
1984
1985         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1986 }
1987
1988 static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
1989                           u16 window, int timeout)
1990 {
1991         long timeo = msecs_to_jiffies(3000);
1992         struct le_scan_params param;
1993         int err;
1994
1995         BT_DBG("%s", hdev->name);
1996
1997         if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1998                 return -EINPROGRESS;
1999
2000         param.type = type;
2001         param.interval = interval;
2002         param.window = window;
2003
2004         hci_req_lock(hdev);
2005
2006         err = __hci_req_sync(hdev, le_scan_param_req, (unsigned long) &param,
2007                              timeo);
2008         if (!err)
2009                 err = __hci_req_sync(hdev, le_scan_enable_req, 0, timeo);
2010
2011         hci_req_unlock(hdev);
2012
2013         if (err < 0)
2014                 return err;
2015
2016         queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
2017                            msecs_to_jiffies(timeout));
2018
2019         return 0;
2020 }
2021
2022 int hci_cancel_le_scan(struct hci_dev *hdev)
2023 {
2024         BT_DBG("%s", hdev->name);
2025
2026         if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
2027                 return -EALREADY;
2028
2029         if (cancel_delayed_work(&hdev->le_scan_disable)) {
2030                 struct hci_cp_le_set_scan_enable cp;
2031
2032                 /* Send HCI command to disable LE Scan */
2033                 memset(&cp, 0, sizeof(cp));
2034                 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2035         }
2036
2037         return 0;
2038 }
2039
2040 static void le_scan_disable_work(struct work_struct *work)
2041 {
2042         struct hci_dev *hdev = container_of(work, struct hci_dev,
2043                                             le_scan_disable.work);
2044         struct hci_cp_le_set_scan_enable cp;
2045
2046         BT_DBG("%s", hdev->name);
2047
2048         memset(&cp, 0, sizeof(cp));
2049
2050         hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2051 }
2052
2053 static void le_scan_work(struct work_struct *work)
2054 {
2055         struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
2056         struct le_scan_params *param = &hdev->le_scan_params;
2057
2058         BT_DBG("%s", hdev->name);
2059
2060         hci_do_le_scan(hdev, param->type, param->interval, param->window,
2061                        param->timeout);
2062 }
2063
2064 int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
2065                 int timeout)
2066 {
2067         struct le_scan_params *param = &hdev->le_scan_params;
2068
2069         BT_DBG("%s", hdev->name);
2070
2071         if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
2072                 return -ENOTSUPP;
2073
2074         if (work_busy(&hdev->le_scan))
2075                 return -EINPROGRESS;
2076
2077         param->type = type;
2078         param->interval = interval;
2079         param->window = window;
2080         param->timeout = timeout;
2081
2082         queue_work(system_long_wq, &hdev->le_scan);
2083
2084         return 0;
2085 }
2086
2087 /* Alloc HCI device */
2088 struct hci_dev *hci_alloc_dev(void)
2089 {
2090         struct hci_dev *hdev;
2091
2092         hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2093         if (!hdev)
2094                 return NULL;
2095
2096         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2097         hdev->esco_type = (ESCO_HV1);
2098         hdev->link_mode = (HCI_LM_ACCEPT);
2099         hdev->io_capability = 0x03; /* No Input No Output */
2100         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2101         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2102
2103         hdev->sniff_max_interval = 800;
2104         hdev->sniff_min_interval = 80;
2105
2106         mutex_init(&hdev->lock);
2107         mutex_init(&hdev->req_lock);
2108
2109         INIT_LIST_HEAD(&hdev->mgmt_pending);
2110         INIT_LIST_HEAD(&hdev->blacklist);
2111         INIT_LIST_HEAD(&hdev->uuids);
2112         INIT_LIST_HEAD(&hdev->link_keys);
2113         INIT_LIST_HEAD(&hdev->long_term_keys);
2114         INIT_LIST_HEAD(&hdev->remote_oob_data);
2115         INIT_LIST_HEAD(&hdev->conn_hash.list);
2116
2117         INIT_WORK(&hdev->rx_work, hci_rx_work);
2118         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2119         INIT_WORK(&hdev->tx_work, hci_tx_work);
2120         INIT_WORK(&hdev->power_on, hci_power_on);
2121         INIT_WORK(&hdev->le_scan, le_scan_work);
2122
2123         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2124         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2125         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2126
2127         skb_queue_head_init(&hdev->rx_q);
2128         skb_queue_head_init(&hdev->cmd_q);
2129         skb_queue_head_init(&hdev->raw_q);
2130
2131         init_waitqueue_head(&hdev->req_wait_q);
2132
2133         setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
2134
2135         hci_init_sysfs(hdev);
2136         discovery_init(hdev);
2137
2138         return hdev;
2139 }
2140 EXPORT_SYMBOL(hci_alloc_dev);
2141
2142 /* Free HCI device */
2143 void hci_free_dev(struct hci_dev *hdev)
2144 {
2145         /* will free via device release */
2146         put_device(&hdev->dev);
2147 }
2148 EXPORT_SYMBOL(hci_free_dev);
2149
2150 /* Register HCI device */
2151 int hci_register_dev(struct hci_dev *hdev)
2152 {
2153         int id, error;
2154
2155         if (!hdev->open || !hdev->close)
2156                 return -EINVAL;
2157
2158         /* Do not allow HCI_AMP devices to register at index 0,
2159          * so the index can be used as the AMP controller ID.
2160          */
2161         switch (hdev->dev_type) {
2162         case HCI_BREDR:
2163                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2164                 break;
2165         case HCI_AMP:
2166                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2167                 break;
2168         default:
2169                 return -EINVAL;
2170         }
2171
2172         if (id < 0)
2173                 return id;
2174
2175         sprintf(hdev->name, "hci%d", id);
2176         hdev->id = id;
2177
2178         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2179
2180         write_lock(&hci_dev_list_lock);
2181         list_add(&hdev->list, &hci_dev_list);
2182         write_unlock(&hci_dev_list_lock);
2183
2184         hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
2185                                           WQ_MEM_RECLAIM, 1);
2186         if (!hdev->workqueue) {
2187                 error = -ENOMEM;
2188                 goto err;
2189         }
2190
2191         hdev->req_workqueue = alloc_workqueue(hdev->name,
2192                                               WQ_HIGHPRI | WQ_UNBOUND |
2193                                               WQ_MEM_RECLAIM, 1);
2194         if (!hdev->req_workqueue) {
2195                 destroy_workqueue(hdev->workqueue);
2196                 error = -ENOMEM;
2197                 goto err;
2198         }
2199
2200         error = hci_add_sysfs(hdev);
2201         if (error < 0)
2202                 goto err_wqueue;
2203
2204         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2205                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2206                                     hdev);
2207         if (hdev->rfkill) {
2208                 if (rfkill_register(hdev->rfkill) < 0) {
2209                         rfkill_destroy(hdev->rfkill);
2210                         hdev->rfkill = NULL;
2211                 }
2212         }
2213
2214         set_bit(HCI_SETUP, &hdev->dev_flags);
2215
2216         if (hdev->dev_type != HCI_AMP)
2217                 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2218
2219         hci_notify(hdev, HCI_DEV_REG);
2220         hci_dev_hold(hdev);
2221
2222         queue_work(hdev->req_workqueue, &hdev->power_on);
2223
2224         return id;
2225
2226 err_wqueue:
2227         destroy_workqueue(hdev->workqueue);
2228         destroy_workqueue(hdev->req_workqueue);
2229 err:
2230         ida_simple_remove(&hci_index_ida, hdev->id);
2231         write_lock(&hci_dev_list_lock);
2232         list_del(&hdev->list);
2233         write_unlock(&hci_dev_list_lock);
2234
2235         return error;
2236 }
2237 EXPORT_SYMBOL(hci_register_dev);
2238
2239 /* Unregister HCI device */
2240 void hci_unregister_dev(struct hci_dev *hdev)
2241 {
2242         int i, id;
2243
2244         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2245
2246         set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2247
2248         id = hdev->id;
2249
2250         write_lock(&hci_dev_list_lock);
2251         list_del(&hdev->list);
2252         write_unlock(&hci_dev_list_lock);
2253
2254         hci_dev_do_close(hdev);
2255
2256         for (i = 0; i < NUM_REASSEMBLY; i++)
2257                 kfree_skb(hdev->reassembly[i]);
2258
2259         cancel_work_sync(&hdev->power_on);
2260
2261         if (!test_bit(HCI_INIT, &hdev->flags) &&
2262             !test_bit(HCI_SETUP, &hdev->dev_flags)) {
2263                 hci_dev_lock(hdev);
2264                 mgmt_index_removed(hdev);
2265                 hci_dev_unlock(hdev);
2266         }
2267
2268         /* mgmt_index_removed should take care of emptying the
2269          * pending list */
2270         BUG_ON(!list_empty(&hdev->mgmt_pending));
2271
2272         hci_notify(hdev, HCI_DEV_UNREG);
2273
2274         if (hdev->rfkill) {
2275                 rfkill_unregister(hdev->rfkill);
2276                 rfkill_destroy(hdev->rfkill);
2277         }
2278
2279         hci_del_sysfs(hdev);
2280
2281         destroy_workqueue(hdev->workqueue);
2282         destroy_workqueue(hdev->req_workqueue);
2283
2284         hci_dev_lock(hdev);
2285         hci_blacklist_clear(hdev);
2286         hci_uuids_clear(hdev);
2287         hci_link_keys_clear(hdev);
2288         hci_smp_ltks_clear(hdev);
2289         hci_remote_oob_data_clear(hdev);
2290         hci_dev_unlock(hdev);
2291
2292         hci_dev_put(hdev);
2293
2294         ida_simple_remove(&hci_index_ida, id);
2295 }
2296 EXPORT_SYMBOL(hci_unregister_dev);
2297
2298 /* Suspend HCI device */
2299 int hci_suspend_dev(struct hci_dev *hdev)
2300 {
2301         hci_notify(hdev, HCI_DEV_SUSPEND);
2302         return 0;
2303 }
2304 EXPORT_SYMBOL(hci_suspend_dev);
2305
2306 /* Resume HCI device */
2307 int hci_resume_dev(struct hci_dev *hdev)
2308 {
2309         hci_notify(hdev, HCI_DEV_RESUME);
2310         return 0;
2311 }
2312 EXPORT_SYMBOL(hci_resume_dev);
2313
2314 /* Receive frame from HCI drivers */
2315 int hci_recv_frame(struct sk_buff *skb)
2316 {
2317         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2318         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2319                       && !test_bit(HCI_INIT, &hdev->flags))) {
2320                 kfree_skb(skb);
2321                 return -ENXIO;
2322         }
2323
2324         /* Incoming skb */
2325         bt_cb(skb)->incoming = 1;
2326
2327         /* Time stamp */
2328         __net_timestamp(skb);
2329
2330         skb_queue_tail(&hdev->rx_q, skb);
2331         queue_work(hdev->workqueue, &hdev->rx_work);
2332
2333         return 0;
2334 }
2335 EXPORT_SYMBOL(hci_recv_frame);
2336
2337 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
2338                           int count, __u8 index)
2339 {
2340         int len = 0;
2341         int hlen = 0;
2342         int remain = count;
2343         struct sk_buff *skb;
2344         struct bt_skb_cb *scb;
2345
2346         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
2347             index >= NUM_REASSEMBLY)
2348                 return -EILSEQ;
2349
2350         skb = hdev->reassembly[index];
2351
2352         if (!skb) {
2353                 switch (type) {
2354                 case HCI_ACLDATA_PKT:
2355                         len = HCI_MAX_FRAME_SIZE;
2356                         hlen = HCI_ACL_HDR_SIZE;
2357                         break;
2358                 case HCI_EVENT_PKT:
2359                         len = HCI_MAX_EVENT_SIZE;
2360                         hlen = HCI_EVENT_HDR_SIZE;
2361                         break;
2362                 case HCI_SCODATA_PKT:
2363                         len = HCI_MAX_SCO_SIZE;
2364                         hlen = HCI_SCO_HDR_SIZE;
2365                         break;
2366                 }
2367
2368                 skb = bt_skb_alloc(len, GFP_ATOMIC);
2369                 if (!skb)
2370                         return -ENOMEM;
2371
2372                 scb = (void *) skb->cb;
2373                 scb->expect = hlen;
2374                 scb->pkt_type = type;
2375
2376                 skb->dev = (void *) hdev;
2377                 hdev->reassembly[index] = skb;
2378         }
2379
2380         while (count) {
2381                 scb = (void *) skb->cb;
2382                 len = min_t(uint, scb->expect, count);
2383
2384                 memcpy(skb_put(skb, len), data, len);
2385
2386                 count -= len;
2387                 data += len;
2388                 scb->expect -= len;
2389                 remain = count;
2390
2391                 switch (type) {
2392                 case HCI_EVENT_PKT:
2393                         if (skb->len == HCI_EVENT_HDR_SIZE) {
2394                                 struct hci_event_hdr *h = hci_event_hdr(skb);
2395                                 scb->expect = h->plen;
2396
2397                                 if (skb_tailroom(skb) < scb->expect) {
2398                                         kfree_skb(skb);
2399                                         hdev->reassembly[index] = NULL;
2400                                         return -ENOMEM;
2401                                 }
2402                         }
2403                         break;
2404
2405                 case HCI_ACLDATA_PKT:
2406                         if (skb->len  == HCI_ACL_HDR_SIZE) {
2407                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2408                                 scb->expect = __le16_to_cpu(h->dlen);
2409
2410                                 if (skb_tailroom(skb) < scb->expect) {
2411                                         kfree_skb(skb);
2412                                         hdev->reassembly[index] = NULL;
2413                                         return -ENOMEM;
2414                                 }
2415                         }
2416                         break;
2417
2418                 case HCI_SCODATA_PKT:
2419                         if (skb->len == HCI_SCO_HDR_SIZE) {
2420                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2421                                 scb->expect = h->dlen;
2422
2423                                 if (skb_tailroom(skb) < scb->expect) {
2424                                         kfree_skb(skb);
2425                                         hdev->reassembly[index] = NULL;
2426                                         return -ENOMEM;
2427                                 }
2428                         }
2429                         break;
2430                 }
2431
2432                 if (scb->expect == 0) {
2433                         /* Complete frame */
2434
2435                         bt_cb(skb)->pkt_type = type;
2436                         hci_recv_frame(skb);
2437
2438                         hdev->reassembly[index] = NULL;
2439                         return remain;
2440                 }
2441         }
2442
2443         return remain;
2444 }
2445
2446 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2447 {
2448         int rem = 0;
2449
2450         if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2451                 return -EILSEQ;
2452
2453         while (count) {
2454                 rem = hci_reassembly(hdev, type, data, count, type - 1);
2455                 if (rem < 0)
2456                         return rem;
2457
2458                 data += (count - rem);
2459                 count = rem;
2460         }
2461
2462         return rem;
2463 }
2464 EXPORT_SYMBOL(hci_recv_fragment);
2465
2466 #define STREAM_REASSEMBLY 0
2467
2468 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2469 {
2470         int type;
2471         int rem = 0;
2472
2473         while (count) {
2474                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2475
2476                 if (!skb) {
2477                         struct { char type; } *pkt;
2478
2479                         /* Start of the frame */
2480                         pkt = data;
2481                         type = pkt->type;
2482
2483                         data++;
2484                         count--;
2485                 } else
2486                         type = bt_cb(skb)->pkt_type;
2487
2488                 rem = hci_reassembly(hdev, type, data, count,
2489                                      STREAM_REASSEMBLY);
2490                 if (rem < 0)
2491                         return rem;
2492
2493                 data += (count - rem);
2494                 count = rem;
2495         }
2496
2497         return rem;
2498 }
2499 EXPORT_SYMBOL(hci_recv_stream_fragment);
2500
2501 /* ---- Interface to upper protocols ---- */
2502
2503 int hci_register_cb(struct hci_cb *cb)
2504 {
2505         BT_DBG("%p name %s", cb, cb->name);
2506
2507         write_lock(&hci_cb_list_lock);
2508         list_add(&cb->list, &hci_cb_list);
2509         write_unlock(&hci_cb_list_lock);
2510
2511         return 0;
2512 }
2513 EXPORT_SYMBOL(hci_register_cb);
2514
2515 int hci_unregister_cb(struct hci_cb *cb)
2516 {
2517         BT_DBG("%p name %s", cb, cb->name);
2518
2519         write_lock(&hci_cb_list_lock);
2520         list_del(&cb->list);
2521         write_unlock(&hci_cb_list_lock);
2522
2523         return 0;
2524 }
2525 EXPORT_SYMBOL(hci_unregister_cb);
2526
2527 static int hci_send_frame(struct sk_buff *skb)
2528 {
2529         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2530
2531         if (!hdev) {
2532                 kfree_skb(skb);
2533                 return -ENODEV;
2534         }
2535
2536         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
2537
2538         /* Time stamp */
2539         __net_timestamp(skb);
2540
2541         /* Send copy to monitor */
2542         hci_send_to_monitor(hdev, skb);
2543
2544         if (atomic_read(&hdev->promisc)) {
2545                 /* Send copy to the sockets */
2546                 hci_send_to_sock(hdev, skb);
2547         }
2548
2549         /* Get rid of skb owner, prior to sending to the driver. */
2550         skb_orphan(skb);
2551
2552         return hdev->send(skb);
2553 }
2554
2555 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2556 {
2557         skb_queue_head_init(&req->cmd_q);
2558         req->hdev = hdev;
2559         req->err = 0;
2560 }
2561
2562 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2563 {
2564         struct hci_dev *hdev = req->hdev;
2565         struct sk_buff *skb;
2566         unsigned long flags;
2567
2568         BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2569
2570         /* If an error occured during request building, remove all HCI
2571          * commands queued on the HCI request queue.
2572          */
2573         if (req->err) {
2574                 skb_queue_purge(&req->cmd_q);
2575                 return req->err;
2576         }
2577
2578         /* Do not allow empty requests */
2579         if (skb_queue_empty(&req->cmd_q))
2580                 return -ENODATA;
2581
2582         skb = skb_peek_tail(&req->cmd_q);
2583         bt_cb(skb)->req.complete = complete;
2584
2585         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2586         skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2587         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2588
2589         queue_work(hdev->workqueue, &hdev->cmd_work);
2590
2591         return 0;
2592 }
2593
2594 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
2595                                        u32 plen, void *param)
2596 {
2597         int len = HCI_COMMAND_HDR_SIZE + plen;
2598         struct hci_command_hdr *hdr;
2599         struct sk_buff *skb;
2600
2601         skb = bt_skb_alloc(len, GFP_ATOMIC);
2602         if (!skb)
2603                 return NULL;
2604
2605         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
2606         hdr->opcode = cpu_to_le16(opcode);
2607         hdr->plen   = plen;
2608
2609         if (plen)
2610                 memcpy(skb_put(skb, plen), param, plen);
2611
2612         BT_DBG("skb len %d", skb->len);
2613
2614         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
2615         skb->dev = (void *) hdev;
2616
2617         return skb;
2618 }
2619
2620 /* Send HCI command */
2621 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
2622 {
2623         struct sk_buff *skb;
2624
2625         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2626
2627         skb = hci_prepare_cmd(hdev, opcode, plen, param);
2628         if (!skb) {
2629                 BT_ERR("%s no memory for command", hdev->name);
2630                 return -ENOMEM;
2631         }
2632
2633         /* Stand-alone HCI commands must be flaged as
2634          * single-command requests.
2635          */
2636         bt_cb(skb)->req.start = true;
2637
2638         skb_queue_tail(&hdev->cmd_q, skb);
2639         queue_work(hdev->workqueue, &hdev->cmd_work);
2640
2641         return 0;
2642 }
2643
2644 /* Queue a command to an asynchronous HCI request */
2645 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen, void *param,
2646                     u8 event)
2647 {
2648         struct hci_dev *hdev = req->hdev;
2649         struct sk_buff *skb;
2650
2651         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2652
2653         /* If an error occured during request building, there is no point in
2654          * queueing the HCI command. We can simply return.
2655          */
2656         if (req->err)
2657                 return;
2658
2659         skb = hci_prepare_cmd(hdev, opcode, plen, param);
2660         if (!skb) {
2661                 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2662                        hdev->name, opcode);
2663                 req->err = -ENOMEM;
2664                 return;
2665         }
2666
2667         if (skb_queue_empty(&req->cmd_q))
2668                 bt_cb(skb)->req.start = true;
2669
2670         bt_cb(skb)->req.event = event;
2671
2672         skb_queue_tail(&req->cmd_q, skb);
2673 }
2674
2675 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen, void *param)
2676 {
2677         hci_req_add_ev(req, opcode, plen, param, 0);
2678 }
2679
2680 /* Get data from the previously sent command */
2681 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
2682 {
2683         struct hci_command_hdr *hdr;
2684
2685         if (!hdev->sent_cmd)
2686                 return NULL;
2687
2688         hdr = (void *) hdev->sent_cmd->data;
2689
2690         if (hdr->opcode != cpu_to_le16(opcode))
2691                 return NULL;
2692
2693         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2694
2695         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2696 }
2697
2698 /* Send ACL data */
2699 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2700 {
2701         struct hci_acl_hdr *hdr;
2702         int len = skb->len;
2703
2704         skb_push(skb, HCI_ACL_HDR_SIZE);
2705         skb_reset_transport_header(skb);
2706         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
2707         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2708         hdr->dlen   = cpu_to_le16(len);
2709 }
2710
2711 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
2712                           struct sk_buff *skb, __u16 flags)
2713 {
2714         struct hci_conn *conn = chan->conn;
2715         struct hci_dev *hdev = conn->hdev;
2716         struct sk_buff *list;
2717
2718         skb->len = skb_headlen(skb);
2719         skb->data_len = 0;
2720
2721         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2722
2723         switch (hdev->dev_type) {
2724         case HCI_BREDR:
2725                 hci_add_acl_hdr(skb, conn->handle, flags);
2726                 break;
2727         case HCI_AMP:
2728                 hci_add_acl_hdr(skb, chan->handle, flags);
2729                 break;
2730         default:
2731                 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2732                 return;
2733         }
2734
2735         list = skb_shinfo(skb)->frag_list;
2736         if (!list) {
2737                 /* Non fragmented */
2738                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2739
2740                 skb_queue_tail(queue, skb);
2741         } else {
2742                 /* Fragmented */
2743                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2744
2745                 skb_shinfo(skb)->frag_list = NULL;
2746
2747                 /* Queue all fragments atomically */
2748                 spin_lock(&queue->lock);
2749
2750                 __skb_queue_tail(queue, skb);
2751
2752                 flags &= ~ACL_START;
2753                 flags |= ACL_CONT;
2754                 do {
2755                         skb = list; list = list->next;
2756
2757                         skb->dev = (void *) hdev;
2758                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2759                         hci_add_acl_hdr(skb, conn->handle, flags);
2760
2761                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2762
2763                         __skb_queue_tail(queue, skb);
2764                 } while (list);
2765
2766                 spin_unlock(&queue->lock);
2767         }
2768 }
2769
2770 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2771 {
2772         struct hci_dev *hdev = chan->conn->hdev;
2773
2774         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
2775
2776         skb->dev = (void *) hdev;
2777
2778         hci_queue_acl(chan, &chan->data_q, skb, flags);
2779
2780         queue_work(hdev->workqueue, &hdev->tx_work);
2781 }
2782
2783 /* Send SCO data */
2784 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2785 {
2786         struct hci_dev *hdev = conn->hdev;
2787         struct hci_sco_hdr hdr;
2788
2789         BT_DBG("%s len %d", hdev->name, skb->len);
2790
2791         hdr.handle = cpu_to_le16(conn->handle);
2792         hdr.dlen   = skb->len;
2793
2794         skb_push(skb, HCI_SCO_HDR_SIZE);
2795         skb_reset_transport_header(skb);
2796         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2797
2798         skb->dev = (void *) hdev;
2799         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2800
2801         skb_queue_tail(&conn->data_q, skb);
2802         queue_work(hdev->workqueue, &hdev->tx_work);
2803 }
2804
2805 /* ---- HCI TX task (outgoing data) ---- */
2806
2807 /* HCI Connection scheduler */
2808 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2809                                      int *quote)
2810 {
2811         struct hci_conn_hash *h = &hdev->conn_hash;
2812         struct hci_conn *conn = NULL, *c;
2813         unsigned int num = 0, min = ~0;
2814
2815         /* We don't have to lock device here. Connections are always
2816          * added and removed with TX task disabled. */
2817
2818         rcu_read_lock();
2819
2820         list_for_each_entry_rcu(c, &h->list, list) {
2821                 if (c->type != type || skb_queue_empty(&c->data_q))
2822                         continue;
2823
2824                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2825                         continue;
2826
2827                 num++;
2828
2829                 if (c->sent < min) {
2830                         min  = c->sent;
2831                         conn = c;
2832                 }
2833
2834                 if (hci_conn_num(hdev, type) == num)
2835                         break;
2836         }
2837
2838         rcu_read_unlock();
2839
2840         if (conn) {
2841                 int cnt, q;
2842
2843                 switch (conn->type) {
2844                 case ACL_LINK:
2845                         cnt = hdev->acl_cnt;
2846                         break;
2847                 case SCO_LINK:
2848                 case ESCO_LINK:
2849                         cnt = hdev->sco_cnt;
2850                         break;
2851                 case LE_LINK:
2852                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2853                         break;
2854                 default:
2855                         cnt = 0;
2856                         BT_ERR("Unknown link type");
2857                 }
2858
2859                 q = cnt / num;
2860                 *quote = q ? q : 1;
2861         } else
2862                 *quote = 0;
2863
2864         BT_DBG("conn %p quote %d", conn, *quote);
2865         return conn;
2866 }
2867
2868 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2869 {
2870         struct hci_conn_hash *h = &hdev->conn_hash;
2871         struct hci_conn *c;
2872
2873         BT_ERR("%s link tx timeout", hdev->name);
2874
2875         rcu_read_lock();
2876
2877         /* Kill stalled connections */
2878         list_for_each_entry_rcu(c, &h->list, list) {
2879                 if (c->type == type && c->sent) {
2880                         BT_ERR("%s killing stalled connection %pMR",
2881                                hdev->name, &c->dst);
2882                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
2883                 }
2884         }
2885
2886         rcu_read_unlock();
2887 }
2888
2889 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2890                                       int *quote)
2891 {
2892         struct hci_conn_hash *h = &hdev->conn_hash;
2893         struct hci_chan *chan = NULL;
2894         unsigned int num = 0, min = ~0, cur_prio = 0;
2895         struct hci_conn *conn;
2896         int cnt, q, conn_num = 0;
2897
2898         BT_DBG("%s", hdev->name);
2899
2900         rcu_read_lock();
2901
2902         list_for_each_entry_rcu(conn, &h->list, list) {
2903                 struct hci_chan *tmp;
2904
2905                 if (conn->type != type)
2906                         continue;
2907
2908                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2909                         continue;
2910
2911                 conn_num++;
2912
2913                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2914                         struct sk_buff *skb;
2915
2916                         if (skb_queue_empty(&tmp->data_q))
2917                                 continue;
2918
2919                         skb = skb_peek(&tmp->data_q);
2920                         if (skb->priority < cur_prio)
2921                                 continue;
2922
2923                         if (skb->priority > cur_prio) {
2924                                 num = 0;
2925                                 min = ~0;
2926                                 cur_prio = skb->priority;
2927                         }
2928
2929                         num++;
2930
2931                         if (conn->sent < min) {
2932                                 min  = conn->sent;
2933                                 chan = tmp;
2934                         }
2935                 }
2936
2937                 if (hci_conn_num(hdev, type) == conn_num)
2938                         break;
2939         }
2940
2941         rcu_read_unlock();
2942
2943         if (!chan)
2944                 return NULL;
2945
2946         switch (chan->conn->type) {
2947         case ACL_LINK:
2948                 cnt = hdev->acl_cnt;
2949                 break;
2950         case AMP_LINK:
2951                 cnt = hdev->block_cnt;
2952                 break;
2953         case SCO_LINK:
2954         case ESCO_LINK:
2955                 cnt = hdev->sco_cnt;
2956                 break;
2957         case LE_LINK:
2958                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2959                 break;
2960         default:
2961                 cnt = 0;
2962                 BT_ERR("Unknown link type");
2963         }
2964
2965         q = cnt / num;
2966         *quote = q ? q : 1;
2967         BT_DBG("chan %p quote %d", chan, *quote);
2968         return chan;
2969 }
2970
2971 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2972 {
2973         struct hci_conn_hash *h = &hdev->conn_hash;
2974         struct hci_conn *conn;
2975         int num = 0;
2976
2977         BT_DBG("%s", hdev->name);
2978
2979         rcu_read_lock();
2980
2981         list_for_each_entry_rcu(conn, &h->list, list) {
2982                 struct hci_chan *chan;
2983
2984                 if (conn->type != type)
2985                         continue;
2986
2987                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2988                         continue;
2989
2990                 num++;
2991
2992                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
2993                         struct sk_buff *skb;
2994
2995                         if (chan->sent) {
2996                                 chan->sent = 0;
2997                                 continue;
2998                         }
2999
3000                         if (skb_queue_empty(&chan->data_q))
3001                                 continue;
3002
3003                         skb = skb_peek(&chan->data_q);
3004                         if (skb->priority >= HCI_PRIO_MAX - 1)
3005                                 continue;
3006
3007                         skb->priority = HCI_PRIO_MAX - 1;
3008
3009                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3010                                skb->priority);
3011                 }
3012
3013                 if (hci_conn_num(hdev, type) == num)
3014                         break;
3015         }
3016
3017         rcu_read_unlock();
3018
3019 }
3020
3021 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3022 {
3023         /* Calculate count of blocks used by this packet */
3024         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3025 }
3026
3027 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3028 {
3029         if (!test_bit(HCI_RAW, &hdev->flags)) {
3030                 /* ACL tx timeout must be longer than maximum
3031                  * link supervision timeout (40.9 seconds) */
3032                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3033                                        HCI_ACL_TX_TIMEOUT))
3034                         hci_link_tx_to(hdev, ACL_LINK);
3035         }
3036 }
3037
3038 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3039 {
3040         unsigned int cnt = hdev->acl_cnt;
3041         struct hci_chan *chan;
3042         struct sk_buff *skb;
3043         int quote;
3044
3045         __check_timeout(hdev, cnt);
3046
3047         while (hdev->acl_cnt &&
3048                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
3049                 u32 priority = (skb_peek(&chan->data_q))->priority;
3050                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3051                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3052                                skb->len, skb->priority);
3053
3054                         /* Stop if priority has changed */
3055                         if (skb->priority < priority)
3056                                 break;
3057
3058                         skb = skb_dequeue(&chan->data_q);
3059
3060                         hci_conn_enter_active_mode(chan->conn,
3061                                                    bt_cb(skb)->force_active);
3062
3063                         hci_send_frame(skb);
3064                         hdev->acl_last_tx = jiffies;
3065
3066                         hdev->acl_cnt--;
3067                         chan->sent++;
3068                         chan->conn->sent++;
3069                 }
3070         }
3071
3072         if (cnt != hdev->acl_cnt)
3073                 hci_prio_recalculate(hdev, ACL_LINK);
3074 }
3075
3076 static void hci_sched_acl_blk(struct hci_dev *hdev)
3077 {
3078         unsigned int cnt = hdev->block_cnt;
3079         struct hci_chan *chan;
3080         struct sk_buff *skb;
3081         int quote;
3082         u8 type;
3083
3084         __check_timeout(hdev, cnt);
3085
3086         BT_DBG("%s", hdev->name);
3087
3088         if (hdev->dev_type == HCI_AMP)
3089                 type = AMP_LINK;
3090         else
3091                 type = ACL_LINK;
3092
3093         while (hdev->block_cnt > 0 &&
3094                (chan = hci_chan_sent(hdev, type, &quote))) {
3095                 u32 priority = (skb_peek(&chan->data_q))->priority;
3096                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3097                         int blocks;
3098
3099                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3100                                skb->len, skb->priority);
3101
3102                         /* Stop if priority has changed */
3103                         if (skb->priority < priority)
3104                                 break;
3105
3106                         skb = skb_dequeue(&chan->data_q);
3107
3108                         blocks = __get_blocks(hdev, skb);
3109                         if (blocks > hdev->block_cnt)
3110                                 return;
3111
3112                         hci_conn_enter_active_mode(chan->conn,
3113                                                    bt_cb(skb)->force_active);
3114
3115                         hci_send_frame(skb);
3116                         hdev->acl_last_tx = jiffies;
3117
3118                         hdev->block_cnt -= blocks;
3119                         quote -= blocks;
3120
3121                         chan->sent += blocks;
3122                         chan->conn->sent += blocks;
3123                 }
3124         }
3125
3126         if (cnt != hdev->block_cnt)
3127                 hci_prio_recalculate(hdev, type);
3128 }
3129
3130 static void hci_sched_acl(struct hci_dev *hdev)
3131 {
3132         BT_DBG("%s", hdev->name);
3133
3134         /* No ACL link over BR/EDR controller */
3135         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3136                 return;
3137
3138         /* No AMP link over AMP controller */
3139         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3140                 return;
3141
3142         switch (hdev->flow_ctl_mode) {
3143         case HCI_FLOW_CTL_MODE_PACKET_BASED:
3144                 hci_sched_acl_pkt(hdev);
3145                 break;
3146
3147         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3148                 hci_sched_acl_blk(hdev);
3149                 break;
3150         }
3151 }
3152
3153 /* Schedule SCO */
3154 static void hci_sched_sco(struct hci_dev *hdev)
3155 {
3156         struct hci_conn *conn;
3157         struct sk_buff *skb;
3158         int quote;
3159
3160         BT_DBG("%s", hdev->name);
3161
3162         if (!hci_conn_num(hdev, SCO_LINK))
3163                 return;
3164
3165         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3166                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3167                         BT_DBG("skb %p len %d", skb, skb->len);
3168                         hci_send_frame(skb);
3169
3170                         conn->sent++;
3171                         if (conn->sent == ~0)
3172                                 conn->sent = 0;
3173                 }
3174         }
3175 }
3176
3177 static void hci_sched_esco(struct hci_dev *hdev)
3178 {
3179         struct hci_conn *conn;
3180         struct sk_buff *skb;
3181         int quote;
3182
3183         BT_DBG("%s", hdev->name);
3184
3185         if (!hci_conn_num(hdev, ESCO_LINK))
3186                 return;
3187
3188         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3189                                                      &quote))) {
3190                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3191                         BT_DBG("skb %p len %d", skb, skb->len);
3192                         hci_send_frame(skb);
3193
3194                         conn->sent++;
3195                         if (conn->sent == ~0)
3196                                 conn->sent = 0;
3197                 }
3198         }
3199 }
3200
3201 static void hci_sched_le(struct hci_dev *hdev)
3202 {
3203         struct hci_chan *chan;
3204         struct sk_buff *skb;
3205         int quote, cnt, tmp;
3206
3207         BT_DBG("%s", hdev->name);
3208
3209         if (!hci_conn_num(hdev, LE_LINK))
3210                 return;
3211
3212         if (!test_bit(HCI_RAW, &hdev->flags)) {
3213                 /* LE tx timeout must be longer than maximum
3214                  * link supervision timeout (40.9 seconds) */
3215                 if (!hdev->le_cnt && hdev->le_pkts &&
3216                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
3217                         hci_link_tx_to(hdev, LE_LINK);
3218         }
3219
3220         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3221         tmp = cnt;
3222         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
3223                 u32 priority = (skb_peek(&chan->data_q))->priority;
3224                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3225                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3226                                skb->len, skb->priority);
3227
3228                         /* Stop if priority has changed */
3229                         if (skb->priority < priority)
3230                                 break;
3231
3232                         skb = skb_dequeue(&chan->data_q);
3233
3234                         hci_send_frame(skb);
3235                         hdev->le_last_tx = jiffies;
3236
3237                         cnt--;
3238                         chan->sent++;
3239                         chan->conn->sent++;
3240                 }
3241         }
3242
3243         if (hdev->le_pkts)
3244                 hdev->le_cnt = cnt;
3245         else
3246                 hdev->acl_cnt = cnt;
3247
3248         if (cnt != tmp)
3249                 hci_prio_recalculate(hdev, LE_LINK);
3250 }
3251
3252 static void hci_tx_work(struct work_struct *work)
3253 {
3254         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3255         struct sk_buff *skb;
3256
3257         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
3258                hdev->sco_cnt, hdev->le_cnt);
3259
3260         /* Schedule queues and send stuff to HCI driver */
3261
3262         hci_sched_acl(hdev);
3263
3264         hci_sched_sco(hdev);
3265
3266         hci_sched_esco(hdev);
3267
3268         hci_sched_le(hdev);
3269
3270         /* Send next queued raw (unknown type) packet */
3271         while ((skb = skb_dequeue(&hdev->raw_q)))
3272                 hci_send_frame(skb);
3273 }
3274
3275 /* ----- HCI RX task (incoming data processing) ----- */
3276
3277 /* ACL data packet */
3278 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3279 {
3280         struct hci_acl_hdr *hdr = (void *) skb->data;
3281         struct hci_conn *conn;
3282         __u16 handle, flags;
3283
3284         skb_pull(skb, HCI_ACL_HDR_SIZE);
3285
3286         handle = __le16_to_cpu(hdr->handle);
3287         flags  = hci_flags(handle);
3288         handle = hci_handle(handle);
3289
3290         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3291                handle, flags);
3292
3293         hdev->stat.acl_rx++;
3294
3295         hci_dev_lock(hdev);
3296         conn = hci_conn_hash_lookup_handle(hdev, handle);
3297         hci_dev_unlock(hdev);
3298
3299         if (conn) {
3300                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3301
3302                 /* Send to upper protocol */
3303                 l2cap_recv_acldata(conn, skb, flags);
3304                 return;
3305         } else {
3306                 BT_ERR("%s ACL packet for unknown connection handle %d",
3307                        hdev->name, handle);
3308         }
3309
3310         kfree_skb(skb);
3311 }
3312
3313 /* SCO data packet */
3314 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3315 {
3316         struct hci_sco_hdr *hdr = (void *) skb->data;
3317         struct hci_conn *conn;
3318         __u16 handle;
3319
3320         skb_pull(skb, HCI_SCO_HDR_SIZE);
3321
3322         handle = __le16_to_cpu(hdr->handle);
3323
3324         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
3325
3326         hdev->stat.sco_rx++;
3327
3328         hci_dev_lock(hdev);
3329         conn = hci_conn_hash_lookup_handle(hdev, handle);
3330         hci_dev_unlock(hdev);
3331
3332         if (conn) {
3333                 /* Send to upper protocol */
3334                 sco_recv_scodata(conn, skb);
3335                 return;
3336         } else {
3337                 BT_ERR("%s SCO packet for unknown connection handle %d",
3338                        hdev->name, handle);
3339         }
3340
3341         kfree_skb(skb);
3342 }
3343
3344 static bool hci_req_is_complete(struct hci_dev *hdev)
3345 {
3346         struct sk_buff *skb;
3347
3348         skb = skb_peek(&hdev->cmd_q);
3349         if (!skb)
3350                 return true;
3351
3352         return bt_cb(skb)->req.start;
3353 }
3354
3355 static void hci_resend_last(struct hci_dev *hdev)
3356 {
3357         struct hci_command_hdr *sent;
3358         struct sk_buff *skb;
3359         u16 opcode;
3360
3361         if (!hdev->sent_cmd)
3362                 return;
3363
3364         sent = (void *) hdev->sent_cmd->data;
3365         opcode = __le16_to_cpu(sent->opcode);
3366         if (opcode == HCI_OP_RESET)
3367                 return;
3368
3369         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3370         if (!skb)
3371                 return;
3372
3373         skb_queue_head(&hdev->cmd_q, skb);
3374         queue_work(hdev->workqueue, &hdev->cmd_work);
3375 }
3376
3377 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3378 {
3379         hci_req_complete_t req_complete = NULL;
3380         struct sk_buff *skb;
3381         unsigned long flags;
3382
3383         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3384
3385         /* If the completed command doesn't match the last one that was
3386          * sent we need to do special handling of it.
3387          */
3388         if (!hci_sent_cmd_data(hdev, opcode)) {
3389                 /* Some CSR based controllers generate a spontaneous
3390                  * reset complete event during init and any pending
3391                  * command will never be completed. In such a case we
3392                  * need to resend whatever was the last sent
3393                  * command.
3394                  */
3395                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3396                         hci_resend_last(hdev);
3397
3398                 return;
3399         }
3400
3401         /* If the command succeeded and there's still more commands in
3402          * this request the request is not yet complete.
3403          */
3404         if (!status && !hci_req_is_complete(hdev))
3405                 return;
3406
3407         /* If this was the last command in a request the complete
3408          * callback would be found in hdev->sent_cmd instead of the
3409          * command queue (hdev->cmd_q).
3410          */
3411         if (hdev->sent_cmd) {
3412                 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3413                 if (req_complete)
3414                         goto call_complete;
3415         }
3416
3417         /* Remove all pending commands belonging to this request */
3418         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3419         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3420                 if (bt_cb(skb)->req.start) {
3421                         __skb_queue_head(&hdev->cmd_q, skb);
3422                         break;
3423                 }
3424
3425                 req_complete = bt_cb(skb)->req.complete;
3426                 kfree_skb(skb);
3427         }
3428         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3429
3430 call_complete:
3431         if (req_complete)
3432                 req_complete(hdev, status);
3433 }
3434
3435 static void hci_rx_work(struct work_struct *work)
3436 {
3437         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
3438         struct sk_buff *skb;
3439
3440         BT_DBG("%s", hdev->name);
3441
3442         while ((skb = skb_dequeue(&hdev->rx_q))) {
3443                 /* Send copy to monitor */
3444                 hci_send_to_monitor(hdev, skb);
3445
3446                 if (atomic_read(&hdev->promisc)) {
3447                         /* Send copy to the sockets */
3448                         hci_send_to_sock(hdev, skb);
3449                 }
3450
3451                 if (test_bit(HCI_RAW, &hdev->flags)) {
3452                         kfree_skb(skb);
3453                         continue;
3454                 }
3455
3456                 if (test_bit(HCI_INIT, &hdev->flags)) {
3457                         /* Don't process data packets in this states. */
3458                         switch (bt_cb(skb)->pkt_type) {
3459                         case HCI_ACLDATA_PKT:
3460                         case HCI_SCODATA_PKT:
3461                                 kfree_skb(skb);
3462                                 continue;
3463                         }
3464                 }
3465
3466                 /* Process frame */
3467                 switch (bt_cb(skb)->pkt_type) {
3468                 case HCI_EVENT_PKT:
3469                         BT_DBG("%s Event packet", hdev->name);
3470                         hci_event_packet(hdev, skb);
3471                         break;
3472
3473                 case HCI_ACLDATA_PKT:
3474                         BT_DBG("%s ACL data packet", hdev->name);
3475                         hci_acldata_packet(hdev, skb);
3476                         break;
3477
3478                 case HCI_SCODATA_PKT:
3479                         BT_DBG("%s SCO data packet", hdev->name);
3480                         hci_scodata_packet(hdev, skb);
3481                         break;
3482
3483                 default:
3484                         kfree_skb(skb);
3485                         break;
3486                 }
3487         }
3488 }
3489
3490 static void hci_cmd_work(struct work_struct *work)
3491 {
3492         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
3493         struct sk_buff *skb;
3494
3495         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3496                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
3497
3498         /* Send queued commands */
3499         if (atomic_read(&hdev->cmd_cnt)) {
3500                 skb = skb_dequeue(&hdev->cmd_q);
3501                 if (!skb)
3502                         return;
3503
3504                 kfree_skb(hdev->sent_cmd);
3505
3506                 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
3507                 if (hdev->sent_cmd) {
3508                         atomic_dec(&hdev->cmd_cnt);
3509                         hci_send_frame(skb);
3510                         if (test_bit(HCI_RESET, &hdev->flags))
3511                                 del_timer(&hdev->cmd_timer);
3512                         else
3513                                 mod_timer(&hdev->cmd_timer,
3514                                           jiffies + HCI_CMD_TIMEOUT);
3515                 } else {
3516                         skb_queue_head(&hdev->cmd_q, skb);
3517                         queue_work(hdev->workqueue, &hdev->cmd_work);
3518                 }
3519         }
3520 }
3521
3522 int hci_do_inquiry(struct hci_dev *hdev, u8 length)
3523 {
3524         /* General inquiry access code (GIAC) */
3525         u8 lap[3] = { 0x33, 0x8b, 0x9e };
3526         struct hci_cp_inquiry cp;
3527
3528         BT_DBG("%s", hdev->name);
3529
3530         if (test_bit(HCI_INQUIRY, &hdev->flags))
3531                 return -EINPROGRESS;
3532
3533         inquiry_cache_flush(hdev);
3534
3535         memset(&cp, 0, sizeof(cp));
3536         memcpy(&cp.lap, lap, sizeof(cp.lap));
3537         cp.length  = length;
3538
3539         return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
3540 }
3541
3542 int hci_cancel_inquiry(struct hci_dev *hdev)
3543 {
3544         BT_DBG("%s", hdev->name);
3545
3546         if (!test_bit(HCI_INQUIRY, &hdev->flags))
3547                 return -EALREADY;
3548
3549         return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3550 }
3551
3552 u8 bdaddr_to_le(u8 bdaddr_type)
3553 {
3554         switch (bdaddr_type) {
3555         case BDADDR_LE_PUBLIC:
3556                 return ADDR_LE_DEV_PUBLIC;
3557
3558         default:
3559                 /* Fallback to LE Random address type */
3560                 return ADDR_LE_DEV_RANDOM;
3561         }
3562 }