Bluetooth: Add support for custom event terminated commands
[pandora-kernel.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30
31 #include <linux/rfkill.h>
32
33 #include <net/bluetooth/bluetooth.h>
34 #include <net/bluetooth/hci_core.h>
35
36 static void hci_rx_work(struct work_struct *work);
37 static void hci_cmd_work(struct work_struct *work);
38 static void hci_tx_work(struct work_struct *work);
39
40 /* HCI device list */
41 LIST_HEAD(hci_dev_list);
42 DEFINE_RWLOCK(hci_dev_list_lock);
43
44 /* HCI callback list */
45 LIST_HEAD(hci_cb_list);
46 DEFINE_RWLOCK(hci_cb_list_lock);
47
48 /* HCI ID Numbering */
49 static DEFINE_IDA(hci_index_ida);
50
51 /* ---- HCI notifications ---- */
52
53 static void hci_notify(struct hci_dev *hdev, int event)
54 {
55         hci_sock_dev_event(hdev, event);
56 }
57
58 /* ---- HCI requests ---- */
59
60 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
61 {
62         BT_DBG("%s result 0x%2.2x", hdev->name, result);
63
64         if (hdev->req_status == HCI_REQ_PEND) {
65                 hdev->req_result = result;
66                 hdev->req_status = HCI_REQ_DONE;
67                 wake_up_interruptible(&hdev->req_wait_q);
68         }
69 }
70
71 static void hci_req_cancel(struct hci_dev *hdev, int err)
72 {
73         BT_DBG("%s err 0x%2.2x", hdev->name, err);
74
75         if (hdev->req_status == HCI_REQ_PEND) {
76                 hdev->req_result = err;
77                 hdev->req_status = HCI_REQ_CANCELED;
78                 wake_up_interruptible(&hdev->req_wait_q);
79         }
80 }
81
82 struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode)
83 {
84         struct hci_ev_cmd_complete *ev;
85         struct hci_event_hdr *hdr;
86         struct sk_buff *skb;
87
88         hci_dev_lock(hdev);
89
90         skb = hdev->recv_evt;
91         hdev->recv_evt = NULL;
92
93         hci_dev_unlock(hdev);
94
95         if (!skb)
96                 return ERR_PTR(-ENODATA);
97
98         if (skb->len < sizeof(*hdr)) {
99                 BT_ERR("Too short HCI event");
100                 goto failed;
101         }
102
103         hdr = (void *) skb->data;
104         skb_pull(skb, HCI_EVENT_HDR_SIZE);
105
106         if (hdr->evt != HCI_EV_CMD_COMPLETE) {
107                 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
108                 goto failed;
109         }
110
111         if (skb->len < sizeof(*ev)) {
112                 BT_ERR("Too short cmd_complete event");
113                 goto failed;
114         }
115
116         ev = (void *) skb->data;
117         skb_pull(skb, sizeof(*ev));
118
119         if (opcode == __le16_to_cpu(ev->opcode))
120                 return skb;
121
122         BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
123                __le16_to_cpu(ev->opcode));
124
125 failed:
126         kfree_skb(skb);
127         return ERR_PTR(-ENODATA);
128 }
129
130 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
131                                void *param, u32 timeout)
132 {
133         DECLARE_WAITQUEUE(wait, current);
134         struct hci_request req;
135         int err = 0;
136
137         BT_DBG("%s", hdev->name);
138
139         hci_req_init(&req, hdev);
140
141         hci_req_add(&req, opcode, plen, param);
142
143         hdev->req_status = HCI_REQ_PEND;
144
145         err = hci_req_run(&req, hci_req_sync_complete);
146         if (err < 0)
147                 return ERR_PTR(err);
148
149         add_wait_queue(&hdev->req_wait_q, &wait);
150         set_current_state(TASK_INTERRUPTIBLE);
151
152         schedule_timeout(timeout);
153
154         remove_wait_queue(&hdev->req_wait_q, &wait);
155
156         if (signal_pending(current))
157                 return ERR_PTR(-EINTR);
158
159         switch (hdev->req_status) {
160         case HCI_REQ_DONE:
161                 err = -bt_to_errno(hdev->req_result);
162                 break;
163
164         case HCI_REQ_CANCELED:
165                 err = -hdev->req_result;
166                 break;
167
168         default:
169                 err = -ETIMEDOUT;
170                 break;
171         }
172
173         hdev->req_status = hdev->req_result = 0;
174
175         BT_DBG("%s end: err %d", hdev->name, err);
176
177         if (err < 0)
178                 return ERR_PTR(err);
179
180         return hci_get_cmd_complete(hdev, opcode);
181 }
182 EXPORT_SYMBOL(__hci_cmd_sync);
183
184 /* Execute request and wait for completion. */
185 static int __hci_req_sync(struct hci_dev *hdev,
186                           void (*func)(struct hci_request *req,
187                                       unsigned long opt),
188                           unsigned long opt, __u32 timeout)
189 {
190         struct hci_request req;
191         DECLARE_WAITQUEUE(wait, current);
192         int err = 0;
193
194         BT_DBG("%s start", hdev->name);
195
196         hci_req_init(&req, hdev);
197
198         hdev->req_status = HCI_REQ_PEND;
199
200         func(&req, opt);
201
202         err = hci_req_run(&req, hci_req_sync_complete);
203         if (err < 0) {
204                 hdev->req_status = 0;
205
206                 /* ENODATA means the HCI request command queue is empty.
207                  * This can happen when a request with conditionals doesn't
208                  * trigger any commands to be sent. This is normal behavior
209                  * and should not trigger an error return.
210                  */
211                 if (err == -ENODATA)
212                         return 0;
213
214                 return err;
215         }
216
217         add_wait_queue(&hdev->req_wait_q, &wait);
218         set_current_state(TASK_INTERRUPTIBLE);
219
220         schedule_timeout(timeout);
221
222         remove_wait_queue(&hdev->req_wait_q, &wait);
223
224         if (signal_pending(current))
225                 return -EINTR;
226
227         switch (hdev->req_status) {
228         case HCI_REQ_DONE:
229                 err = -bt_to_errno(hdev->req_result);
230                 break;
231
232         case HCI_REQ_CANCELED:
233                 err = -hdev->req_result;
234                 break;
235
236         default:
237                 err = -ETIMEDOUT;
238                 break;
239         }
240
241         hdev->req_status = hdev->req_result = 0;
242
243         BT_DBG("%s end: err %d", hdev->name, err);
244
245         return err;
246 }
247
248 static int hci_req_sync(struct hci_dev *hdev,
249                         void (*req)(struct hci_request *req,
250                                     unsigned long opt),
251                         unsigned long opt, __u32 timeout)
252 {
253         int ret;
254
255         if (!test_bit(HCI_UP, &hdev->flags))
256                 return -ENETDOWN;
257
258         /* Serialize all requests */
259         hci_req_lock(hdev);
260         ret = __hci_req_sync(hdev, req, opt, timeout);
261         hci_req_unlock(hdev);
262
263         return ret;
264 }
265
266 static void hci_reset_req(struct hci_request *req, unsigned long opt)
267 {
268         BT_DBG("%s %ld", req->hdev->name, opt);
269
270         /* Reset device */
271         set_bit(HCI_RESET, &req->hdev->flags);
272         hci_req_add(req, HCI_OP_RESET, 0, NULL);
273 }
274
275 static void bredr_init(struct hci_request *req)
276 {
277         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
278
279         /* Read Local Supported Features */
280         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
281
282         /* Read Local Version */
283         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
284
285         /* Read BD Address */
286         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
287 }
288
289 static void amp_init(struct hci_request *req)
290 {
291         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
292
293         /* Read Local Version */
294         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
295
296         /* Read Local AMP Info */
297         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
298
299         /* Read Data Blk size */
300         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
301 }
302
303 static void hci_init1_req(struct hci_request *req, unsigned long opt)
304 {
305         struct hci_dev *hdev = req->hdev;
306         struct hci_request init_req;
307         struct sk_buff *skb;
308
309         BT_DBG("%s %ld", hdev->name, opt);
310
311         /* Driver initialization */
312
313         hci_req_init(&init_req, hdev);
314
315         /* Special commands */
316         while ((skb = skb_dequeue(&hdev->driver_init))) {
317                 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
318                 skb->dev = (void *) hdev;
319
320                 if (skb_queue_empty(&init_req.cmd_q))
321                         bt_cb(skb)->req.start = true;
322
323                 skb_queue_tail(&init_req.cmd_q, skb);
324         }
325         skb_queue_purge(&hdev->driver_init);
326
327         hci_req_run(&init_req, NULL);
328
329         /* Reset */
330         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
331                 hci_reset_req(req, 0);
332
333         switch (hdev->dev_type) {
334         case HCI_BREDR:
335                 bredr_init(req);
336                 break;
337
338         case HCI_AMP:
339                 amp_init(req);
340                 break;
341
342         default:
343                 BT_ERR("Unknown device type %d", hdev->dev_type);
344                 break;
345         }
346 }
347
348 static void bredr_setup(struct hci_request *req)
349 {
350         struct hci_cp_delete_stored_link_key cp;
351         __le16 param;
352         __u8 flt_type;
353
354         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
355         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
356
357         /* Read Class of Device */
358         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
359
360         /* Read Local Name */
361         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
362
363         /* Read Voice Setting */
364         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
365
366         /* Clear Event Filters */
367         flt_type = HCI_FLT_CLEAR_ALL;
368         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
369
370         /* Connection accept timeout ~20 secs */
371         param = __constant_cpu_to_le16(0x7d00);
372         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
373
374         bacpy(&cp.bdaddr, BDADDR_ANY);
375         cp.delete_all = 0x01;
376         hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
377
378         /* Read page scan parameters */
379         if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) {
380                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
381                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
382         }
383 }
384
385 static void le_setup(struct hci_request *req)
386 {
387         /* Read LE Buffer Size */
388         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
389
390         /* Read LE Local Supported Features */
391         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
392
393         /* Read LE Advertising Channel TX Power */
394         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
395
396         /* Read LE White List Size */
397         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
398
399         /* Read LE Supported States */
400         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
401 }
402
403 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
404 {
405         if (lmp_ext_inq_capable(hdev))
406                 return 0x02;
407
408         if (lmp_inq_rssi_capable(hdev))
409                 return 0x01;
410
411         if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
412             hdev->lmp_subver == 0x0757)
413                 return 0x01;
414
415         if (hdev->manufacturer == 15) {
416                 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
417                         return 0x01;
418                 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
419                         return 0x01;
420                 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
421                         return 0x01;
422         }
423
424         if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
425             hdev->lmp_subver == 0x1805)
426                 return 0x01;
427
428         return 0x00;
429 }
430
431 static void hci_setup_inquiry_mode(struct hci_request *req)
432 {
433         u8 mode;
434
435         mode = hci_get_inquiry_mode(req->hdev);
436
437         hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
438 }
439
440 static void hci_setup_event_mask(struct hci_request *req)
441 {
442         struct hci_dev *hdev = req->hdev;
443
444         /* The second byte is 0xff instead of 0x9f (two reserved bits
445          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
446          * command otherwise.
447          */
448         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
449
450         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
451          * any event mask for pre 1.2 devices.
452          */
453         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
454                 return;
455
456         if (lmp_bredr_capable(hdev)) {
457                 events[4] |= 0x01; /* Flow Specification Complete */
458                 events[4] |= 0x02; /* Inquiry Result with RSSI */
459                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
460                 events[5] |= 0x08; /* Synchronous Connection Complete */
461                 events[5] |= 0x10; /* Synchronous Connection Changed */
462         }
463
464         if (lmp_inq_rssi_capable(hdev))
465                 events[4] |= 0x02; /* Inquiry Result with RSSI */
466
467         if (lmp_sniffsubr_capable(hdev))
468                 events[5] |= 0x20; /* Sniff Subrating */
469
470         if (lmp_pause_enc_capable(hdev))
471                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
472
473         if (lmp_ext_inq_capable(hdev))
474                 events[5] |= 0x40; /* Extended Inquiry Result */
475
476         if (lmp_no_flush_capable(hdev))
477                 events[7] |= 0x01; /* Enhanced Flush Complete */
478
479         if (lmp_lsto_capable(hdev))
480                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
481
482         if (lmp_ssp_capable(hdev)) {
483                 events[6] |= 0x01;      /* IO Capability Request */
484                 events[6] |= 0x02;      /* IO Capability Response */
485                 events[6] |= 0x04;      /* User Confirmation Request */
486                 events[6] |= 0x08;      /* User Passkey Request */
487                 events[6] |= 0x10;      /* Remote OOB Data Request */
488                 events[6] |= 0x20;      /* Simple Pairing Complete */
489                 events[7] |= 0x04;      /* User Passkey Notification */
490                 events[7] |= 0x08;      /* Keypress Notification */
491                 events[7] |= 0x10;      /* Remote Host Supported
492                                          * Features Notification
493                                          */
494         }
495
496         if (lmp_le_capable(hdev))
497                 events[7] |= 0x20;      /* LE Meta-Event */
498
499         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
500
501         if (lmp_le_capable(hdev)) {
502                 memset(events, 0, sizeof(events));
503                 events[0] = 0x1f;
504                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
505                             sizeof(events), events);
506         }
507 }
508
509 static void hci_init2_req(struct hci_request *req, unsigned long opt)
510 {
511         struct hci_dev *hdev = req->hdev;
512
513         if (lmp_bredr_capable(hdev))
514                 bredr_setup(req);
515
516         if (lmp_le_capable(hdev))
517                 le_setup(req);
518
519         hci_setup_event_mask(req);
520
521         if (hdev->hci_ver > BLUETOOTH_VER_1_1)
522                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
523
524         if (lmp_ssp_capable(hdev)) {
525                 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
526                         u8 mode = 0x01;
527                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
528                                     sizeof(mode), &mode);
529                 } else {
530                         struct hci_cp_write_eir cp;
531
532                         memset(hdev->eir, 0, sizeof(hdev->eir));
533                         memset(&cp, 0, sizeof(cp));
534
535                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
536                 }
537         }
538
539         if (lmp_inq_rssi_capable(hdev))
540                 hci_setup_inquiry_mode(req);
541
542         if (lmp_inq_tx_pwr_capable(hdev))
543                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
544
545         if (lmp_ext_feat_capable(hdev)) {
546                 struct hci_cp_read_local_ext_features cp;
547
548                 cp.page = 0x01;
549                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
550                             sizeof(cp), &cp);
551         }
552
553         if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
554                 u8 enable = 1;
555                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
556                             &enable);
557         }
558 }
559
560 static void hci_setup_link_policy(struct hci_request *req)
561 {
562         struct hci_dev *hdev = req->hdev;
563         struct hci_cp_write_def_link_policy cp;
564         u16 link_policy = 0;
565
566         if (lmp_rswitch_capable(hdev))
567                 link_policy |= HCI_LP_RSWITCH;
568         if (lmp_hold_capable(hdev))
569                 link_policy |= HCI_LP_HOLD;
570         if (lmp_sniff_capable(hdev))
571                 link_policy |= HCI_LP_SNIFF;
572         if (lmp_park_capable(hdev))
573                 link_policy |= HCI_LP_PARK;
574
575         cp.policy = cpu_to_le16(link_policy);
576         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
577 }
578
579 static void hci_set_le_support(struct hci_request *req)
580 {
581         struct hci_dev *hdev = req->hdev;
582         struct hci_cp_write_le_host_supported cp;
583
584         memset(&cp, 0, sizeof(cp));
585
586         if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
587                 cp.le = 0x01;
588                 cp.simul = lmp_le_br_capable(hdev);
589         }
590
591         if (cp.le != lmp_host_le_capable(hdev))
592                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
593                             &cp);
594 }
595
596 static void hci_init3_req(struct hci_request *req, unsigned long opt)
597 {
598         struct hci_dev *hdev = req->hdev;
599
600         if (hdev->commands[5] & 0x10)
601                 hci_setup_link_policy(req);
602
603         if (lmp_le_capable(hdev)) {
604                 hci_set_le_support(req);
605                 hci_update_ad(req);
606         }
607 }
608
609 static int __hci_init(struct hci_dev *hdev)
610 {
611         int err;
612
613         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
614         if (err < 0)
615                 return err;
616
617         /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
618          * BR/EDR/LE type controllers. AMP controllers only need the
619          * first stage init.
620          */
621         if (hdev->dev_type != HCI_BREDR)
622                 return 0;
623
624         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
625         if (err < 0)
626                 return err;
627
628         return __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
629 }
630
631 static void hci_scan_req(struct hci_request *req, unsigned long opt)
632 {
633         __u8 scan = opt;
634
635         BT_DBG("%s %x", req->hdev->name, scan);
636
637         /* Inquiry and Page scans */
638         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
639 }
640
641 static void hci_auth_req(struct hci_request *req, unsigned long opt)
642 {
643         __u8 auth = opt;
644
645         BT_DBG("%s %x", req->hdev->name, auth);
646
647         /* Authentication */
648         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
649 }
650
651 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
652 {
653         __u8 encrypt = opt;
654
655         BT_DBG("%s %x", req->hdev->name, encrypt);
656
657         /* Encryption */
658         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
659 }
660
661 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
662 {
663         __le16 policy = cpu_to_le16(opt);
664
665         BT_DBG("%s %x", req->hdev->name, policy);
666
667         /* Default link policy */
668         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
669 }
670
671 /* Get HCI device by index.
672  * Device is held on return. */
673 struct hci_dev *hci_dev_get(int index)
674 {
675         struct hci_dev *hdev = NULL, *d;
676
677         BT_DBG("%d", index);
678
679         if (index < 0)
680                 return NULL;
681
682         read_lock(&hci_dev_list_lock);
683         list_for_each_entry(d, &hci_dev_list, list) {
684                 if (d->id == index) {
685                         hdev = hci_dev_hold(d);
686                         break;
687                 }
688         }
689         read_unlock(&hci_dev_list_lock);
690         return hdev;
691 }
692
693 /* ---- Inquiry support ---- */
694
695 bool hci_discovery_active(struct hci_dev *hdev)
696 {
697         struct discovery_state *discov = &hdev->discovery;
698
699         switch (discov->state) {
700         case DISCOVERY_FINDING:
701         case DISCOVERY_RESOLVING:
702                 return true;
703
704         default:
705                 return false;
706         }
707 }
708
709 void hci_discovery_set_state(struct hci_dev *hdev, int state)
710 {
711         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
712
713         if (hdev->discovery.state == state)
714                 return;
715
716         switch (state) {
717         case DISCOVERY_STOPPED:
718                 if (hdev->discovery.state != DISCOVERY_STARTING)
719                         mgmt_discovering(hdev, 0);
720                 break;
721         case DISCOVERY_STARTING:
722                 break;
723         case DISCOVERY_FINDING:
724                 mgmt_discovering(hdev, 1);
725                 break;
726         case DISCOVERY_RESOLVING:
727                 break;
728         case DISCOVERY_STOPPING:
729                 break;
730         }
731
732         hdev->discovery.state = state;
733 }
734
735 static void inquiry_cache_flush(struct hci_dev *hdev)
736 {
737         struct discovery_state *cache = &hdev->discovery;
738         struct inquiry_entry *p, *n;
739
740         list_for_each_entry_safe(p, n, &cache->all, all) {
741                 list_del(&p->all);
742                 kfree(p);
743         }
744
745         INIT_LIST_HEAD(&cache->unknown);
746         INIT_LIST_HEAD(&cache->resolve);
747 }
748
749 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
750                                                bdaddr_t *bdaddr)
751 {
752         struct discovery_state *cache = &hdev->discovery;
753         struct inquiry_entry *e;
754
755         BT_DBG("cache %p, %pMR", cache, bdaddr);
756
757         list_for_each_entry(e, &cache->all, all) {
758                 if (!bacmp(&e->data.bdaddr, bdaddr))
759                         return e;
760         }
761
762         return NULL;
763 }
764
765 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
766                                                        bdaddr_t *bdaddr)
767 {
768         struct discovery_state *cache = &hdev->discovery;
769         struct inquiry_entry *e;
770
771         BT_DBG("cache %p, %pMR", cache, bdaddr);
772
773         list_for_each_entry(e, &cache->unknown, list) {
774                 if (!bacmp(&e->data.bdaddr, bdaddr))
775                         return e;
776         }
777
778         return NULL;
779 }
780
781 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
782                                                        bdaddr_t *bdaddr,
783                                                        int state)
784 {
785         struct discovery_state *cache = &hdev->discovery;
786         struct inquiry_entry *e;
787
788         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
789
790         list_for_each_entry(e, &cache->resolve, list) {
791                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
792                         return e;
793                 if (!bacmp(&e->data.bdaddr, bdaddr))
794                         return e;
795         }
796
797         return NULL;
798 }
799
800 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
801                                       struct inquiry_entry *ie)
802 {
803         struct discovery_state *cache = &hdev->discovery;
804         struct list_head *pos = &cache->resolve;
805         struct inquiry_entry *p;
806
807         list_del(&ie->list);
808
809         list_for_each_entry(p, &cache->resolve, list) {
810                 if (p->name_state != NAME_PENDING &&
811                     abs(p->data.rssi) >= abs(ie->data.rssi))
812                         break;
813                 pos = &p->list;
814         }
815
816         list_add(&ie->list, pos);
817 }
818
819 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
820                               bool name_known, bool *ssp)
821 {
822         struct discovery_state *cache = &hdev->discovery;
823         struct inquiry_entry *ie;
824
825         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
826
827         hci_remove_remote_oob_data(hdev, &data->bdaddr);
828
829         if (ssp)
830                 *ssp = data->ssp_mode;
831
832         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
833         if (ie) {
834                 if (ie->data.ssp_mode && ssp)
835                         *ssp = true;
836
837                 if (ie->name_state == NAME_NEEDED &&
838                     data->rssi != ie->data.rssi) {
839                         ie->data.rssi = data->rssi;
840                         hci_inquiry_cache_update_resolve(hdev, ie);
841                 }
842
843                 goto update;
844         }
845
846         /* Entry not in the cache. Add new one. */
847         ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
848         if (!ie)
849                 return false;
850
851         list_add(&ie->all, &cache->all);
852
853         if (name_known) {
854                 ie->name_state = NAME_KNOWN;
855         } else {
856                 ie->name_state = NAME_NOT_KNOWN;
857                 list_add(&ie->list, &cache->unknown);
858         }
859
860 update:
861         if (name_known && ie->name_state != NAME_KNOWN &&
862             ie->name_state != NAME_PENDING) {
863                 ie->name_state = NAME_KNOWN;
864                 list_del(&ie->list);
865         }
866
867         memcpy(&ie->data, data, sizeof(*data));
868         ie->timestamp = jiffies;
869         cache->timestamp = jiffies;
870
871         if (ie->name_state == NAME_NOT_KNOWN)
872                 return false;
873
874         return true;
875 }
876
877 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
878 {
879         struct discovery_state *cache = &hdev->discovery;
880         struct inquiry_info *info = (struct inquiry_info *) buf;
881         struct inquiry_entry *e;
882         int copied = 0;
883
884         list_for_each_entry(e, &cache->all, all) {
885                 struct inquiry_data *data = &e->data;
886
887                 if (copied >= num)
888                         break;
889
890                 bacpy(&info->bdaddr, &data->bdaddr);
891                 info->pscan_rep_mode    = data->pscan_rep_mode;
892                 info->pscan_period_mode = data->pscan_period_mode;
893                 info->pscan_mode        = data->pscan_mode;
894                 memcpy(info->dev_class, data->dev_class, 3);
895                 info->clock_offset      = data->clock_offset;
896
897                 info++;
898                 copied++;
899         }
900
901         BT_DBG("cache %p, copied %d", cache, copied);
902         return copied;
903 }
904
905 static void hci_inq_req(struct hci_request *req, unsigned long opt)
906 {
907         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
908         struct hci_dev *hdev = req->hdev;
909         struct hci_cp_inquiry cp;
910
911         BT_DBG("%s", hdev->name);
912
913         if (test_bit(HCI_INQUIRY, &hdev->flags))
914                 return;
915
916         /* Start Inquiry */
917         memcpy(&cp.lap, &ir->lap, 3);
918         cp.length  = ir->length;
919         cp.num_rsp = ir->num_rsp;
920         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
921 }
922
923 static int wait_inquiry(void *word)
924 {
925         schedule();
926         return signal_pending(current);
927 }
928
929 int hci_inquiry(void __user *arg)
930 {
931         __u8 __user *ptr = arg;
932         struct hci_inquiry_req ir;
933         struct hci_dev *hdev;
934         int err = 0, do_inquiry = 0, max_rsp;
935         long timeo;
936         __u8 *buf;
937
938         if (copy_from_user(&ir, ptr, sizeof(ir)))
939                 return -EFAULT;
940
941         hdev = hci_dev_get(ir.dev_id);
942         if (!hdev)
943                 return -ENODEV;
944
945         hci_dev_lock(hdev);
946         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
947             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
948                 inquiry_cache_flush(hdev);
949                 do_inquiry = 1;
950         }
951         hci_dev_unlock(hdev);
952
953         timeo = ir.length * msecs_to_jiffies(2000);
954
955         if (do_inquiry) {
956                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
957                                    timeo);
958                 if (err < 0)
959                         goto done;
960
961                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
962                  * cleared). If it is interrupted by a signal, return -EINTR.
963                  */
964                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
965                                 TASK_INTERRUPTIBLE))
966                         return -EINTR;
967         }
968
969         /* for unlimited number of responses we will use buffer with
970          * 255 entries
971          */
972         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
973
974         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
975          * copy it to the user space.
976          */
977         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
978         if (!buf) {
979                 err = -ENOMEM;
980                 goto done;
981         }
982
983         hci_dev_lock(hdev);
984         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
985         hci_dev_unlock(hdev);
986
987         BT_DBG("num_rsp %d", ir.num_rsp);
988
989         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
990                 ptr += sizeof(ir);
991                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
992                                  ir.num_rsp))
993                         err = -EFAULT;
994         } else
995                 err = -EFAULT;
996
997         kfree(buf);
998
999 done:
1000         hci_dev_put(hdev);
1001         return err;
1002 }
1003
1004 static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
1005 {
1006         u8 ad_len = 0, flags = 0;
1007         size_t name_len;
1008
1009         if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1010                 flags |= LE_AD_GENERAL;
1011
1012         if (!lmp_bredr_capable(hdev))
1013                 flags |= LE_AD_NO_BREDR;
1014
1015         if (lmp_le_br_capable(hdev))
1016                 flags |= LE_AD_SIM_LE_BREDR_CTRL;
1017
1018         if (lmp_host_le_br_capable(hdev))
1019                 flags |= LE_AD_SIM_LE_BREDR_HOST;
1020
1021         if (flags) {
1022                 BT_DBG("adv flags 0x%02x", flags);
1023
1024                 ptr[0] = 2;
1025                 ptr[1] = EIR_FLAGS;
1026                 ptr[2] = flags;
1027
1028                 ad_len += 3;
1029                 ptr += 3;
1030         }
1031
1032         if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
1033                 ptr[0] = 2;
1034                 ptr[1] = EIR_TX_POWER;
1035                 ptr[2] = (u8) hdev->adv_tx_power;
1036
1037                 ad_len += 3;
1038                 ptr += 3;
1039         }
1040
1041         name_len = strlen(hdev->dev_name);
1042         if (name_len > 0) {
1043                 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
1044
1045                 if (name_len > max_len) {
1046                         name_len = max_len;
1047                         ptr[1] = EIR_NAME_SHORT;
1048                 } else
1049                         ptr[1] = EIR_NAME_COMPLETE;
1050
1051                 ptr[0] = name_len + 1;
1052
1053                 memcpy(ptr + 2, hdev->dev_name, name_len);
1054
1055                 ad_len += (name_len + 2);
1056                 ptr += (name_len + 2);
1057         }
1058
1059         return ad_len;
1060 }
1061
1062 void hci_update_ad(struct hci_request *req)
1063 {
1064         struct hci_dev *hdev = req->hdev;
1065         struct hci_cp_le_set_adv_data cp;
1066         u8 len;
1067
1068         if (!lmp_le_capable(hdev))
1069                 return;
1070
1071         memset(&cp, 0, sizeof(cp));
1072
1073         len = create_ad(hdev, cp.data);
1074
1075         if (hdev->adv_data_len == len &&
1076             memcmp(cp.data, hdev->adv_data, len) == 0)
1077                 return;
1078
1079         memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1080         hdev->adv_data_len = len;
1081
1082         cp.length = len;
1083
1084         hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1085 }
1086
1087 /* ---- HCI ioctl helpers ---- */
1088
1089 int hci_dev_open(__u16 dev)
1090 {
1091         struct hci_dev *hdev;
1092         int ret = 0;
1093
1094         hdev = hci_dev_get(dev);
1095         if (!hdev)
1096                 return -ENODEV;
1097
1098         BT_DBG("%s %p", hdev->name, hdev);
1099
1100         hci_req_lock(hdev);
1101
1102         if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1103                 ret = -ENODEV;
1104                 goto done;
1105         }
1106
1107         if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
1108                 ret = -ERFKILL;
1109                 goto done;
1110         }
1111
1112         if (test_bit(HCI_UP, &hdev->flags)) {
1113                 ret = -EALREADY;
1114                 goto done;
1115         }
1116
1117         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1118                 set_bit(HCI_RAW, &hdev->flags);
1119
1120         /* Treat all non BR/EDR controllers as raw devices if
1121            enable_hs is not set */
1122         if (hdev->dev_type != HCI_BREDR && !enable_hs)
1123                 set_bit(HCI_RAW, &hdev->flags);
1124
1125         if (hdev->open(hdev)) {
1126                 ret = -EIO;
1127                 goto done;
1128         }
1129
1130         if (!test_bit(HCI_RAW, &hdev->flags)) {
1131                 atomic_set(&hdev->cmd_cnt, 1);
1132                 set_bit(HCI_INIT, &hdev->flags);
1133                 ret = __hci_init(hdev);
1134                 clear_bit(HCI_INIT, &hdev->flags);
1135         }
1136
1137         if (!ret) {
1138                 hci_dev_hold(hdev);
1139                 set_bit(HCI_UP, &hdev->flags);
1140                 hci_notify(hdev, HCI_DEV_UP);
1141                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1142                     mgmt_valid_hdev(hdev)) {
1143                         hci_dev_lock(hdev);
1144                         mgmt_powered(hdev, 1);
1145                         hci_dev_unlock(hdev);
1146                 }
1147         } else {
1148                 /* Init failed, cleanup */
1149                 flush_work(&hdev->tx_work);
1150                 flush_work(&hdev->cmd_work);
1151                 flush_work(&hdev->rx_work);
1152
1153                 skb_queue_purge(&hdev->cmd_q);
1154                 skb_queue_purge(&hdev->rx_q);
1155
1156                 if (hdev->flush)
1157                         hdev->flush(hdev);
1158
1159                 if (hdev->sent_cmd) {
1160                         kfree_skb(hdev->sent_cmd);
1161                         hdev->sent_cmd = NULL;
1162                 }
1163
1164                 hdev->close(hdev);
1165                 hdev->flags = 0;
1166         }
1167
1168 done:
1169         hci_req_unlock(hdev);
1170         hci_dev_put(hdev);
1171         return ret;
1172 }
1173
1174 static int hci_dev_do_close(struct hci_dev *hdev)
1175 {
1176         BT_DBG("%s %p", hdev->name, hdev);
1177
1178         cancel_work_sync(&hdev->le_scan);
1179
1180         cancel_delayed_work(&hdev->power_off);
1181
1182         hci_req_cancel(hdev, ENODEV);
1183         hci_req_lock(hdev);
1184
1185         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1186                 del_timer_sync(&hdev->cmd_timer);
1187                 hci_req_unlock(hdev);
1188                 return 0;
1189         }
1190
1191         /* Flush RX and TX works */
1192         flush_work(&hdev->tx_work);
1193         flush_work(&hdev->rx_work);
1194
1195         if (hdev->discov_timeout > 0) {
1196                 cancel_delayed_work(&hdev->discov_off);
1197                 hdev->discov_timeout = 0;
1198                 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1199         }
1200
1201         if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1202                 cancel_delayed_work(&hdev->service_cache);
1203
1204         cancel_delayed_work_sync(&hdev->le_scan_disable);
1205
1206         hci_dev_lock(hdev);
1207         inquiry_cache_flush(hdev);
1208         hci_conn_hash_flush(hdev);
1209         hci_dev_unlock(hdev);
1210
1211         hci_notify(hdev, HCI_DEV_DOWN);
1212
1213         if (hdev->flush)
1214                 hdev->flush(hdev);
1215
1216         /* Reset device */
1217         skb_queue_purge(&hdev->cmd_q);
1218         atomic_set(&hdev->cmd_cnt, 1);
1219         if (!test_bit(HCI_RAW, &hdev->flags) &&
1220             test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1221                 set_bit(HCI_INIT, &hdev->flags);
1222                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1223                 clear_bit(HCI_INIT, &hdev->flags);
1224         }
1225
1226         /* flush cmd  work */
1227         flush_work(&hdev->cmd_work);
1228
1229         /* Drop queues */
1230         skb_queue_purge(&hdev->rx_q);
1231         skb_queue_purge(&hdev->cmd_q);
1232         skb_queue_purge(&hdev->raw_q);
1233
1234         /* Drop last sent command */
1235         if (hdev->sent_cmd) {
1236                 del_timer_sync(&hdev->cmd_timer);
1237                 kfree_skb(hdev->sent_cmd);
1238                 hdev->sent_cmd = NULL;
1239         }
1240
1241         kfree_skb(hdev->recv_evt);
1242         hdev->recv_evt = NULL;
1243
1244         /* After this point our queues are empty
1245          * and no tasks are scheduled. */
1246         hdev->close(hdev);
1247
1248         /* Clear flags */
1249         hdev->flags = 0;
1250         hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1251
1252         if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1253             mgmt_valid_hdev(hdev)) {
1254                 hci_dev_lock(hdev);
1255                 mgmt_powered(hdev, 0);
1256                 hci_dev_unlock(hdev);
1257         }
1258
1259         /* Controller radio is available but is currently powered down */
1260         hdev->amp_status = 0;
1261
1262         memset(hdev->eir, 0, sizeof(hdev->eir));
1263         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1264
1265         hci_req_unlock(hdev);
1266
1267         hci_dev_put(hdev);
1268         return 0;
1269 }
1270
1271 int hci_dev_close(__u16 dev)
1272 {
1273         struct hci_dev *hdev;
1274         int err;
1275
1276         hdev = hci_dev_get(dev);
1277         if (!hdev)
1278                 return -ENODEV;
1279
1280         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1281                 cancel_delayed_work(&hdev->power_off);
1282
1283         err = hci_dev_do_close(hdev);
1284
1285         hci_dev_put(hdev);
1286         return err;
1287 }
1288
1289 int hci_dev_reset(__u16 dev)
1290 {
1291         struct hci_dev *hdev;
1292         int ret = 0;
1293
1294         hdev = hci_dev_get(dev);
1295         if (!hdev)
1296                 return -ENODEV;
1297
1298         hci_req_lock(hdev);
1299
1300         if (!test_bit(HCI_UP, &hdev->flags))
1301                 goto done;
1302
1303         /* Drop queues */
1304         skb_queue_purge(&hdev->rx_q);
1305         skb_queue_purge(&hdev->cmd_q);
1306
1307         hci_dev_lock(hdev);
1308         inquiry_cache_flush(hdev);
1309         hci_conn_hash_flush(hdev);
1310         hci_dev_unlock(hdev);
1311
1312         if (hdev->flush)
1313                 hdev->flush(hdev);
1314
1315         atomic_set(&hdev->cmd_cnt, 1);
1316         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1317
1318         if (!test_bit(HCI_RAW, &hdev->flags))
1319                 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1320
1321 done:
1322         hci_req_unlock(hdev);
1323         hci_dev_put(hdev);
1324         return ret;
1325 }
1326
1327 int hci_dev_reset_stat(__u16 dev)
1328 {
1329         struct hci_dev *hdev;
1330         int ret = 0;
1331
1332         hdev = hci_dev_get(dev);
1333         if (!hdev)
1334                 return -ENODEV;
1335
1336         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1337
1338         hci_dev_put(hdev);
1339
1340         return ret;
1341 }
1342
1343 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1344 {
1345         struct hci_dev *hdev;
1346         struct hci_dev_req dr;
1347         int err = 0;
1348
1349         if (copy_from_user(&dr, arg, sizeof(dr)))
1350                 return -EFAULT;
1351
1352         hdev = hci_dev_get(dr.dev_id);
1353         if (!hdev)
1354                 return -ENODEV;
1355
1356         switch (cmd) {
1357         case HCISETAUTH:
1358                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1359                                    HCI_INIT_TIMEOUT);
1360                 break;
1361
1362         case HCISETENCRYPT:
1363                 if (!lmp_encrypt_capable(hdev)) {
1364                         err = -EOPNOTSUPP;
1365                         break;
1366                 }
1367
1368                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1369                         /* Auth must be enabled first */
1370                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1371                                            HCI_INIT_TIMEOUT);
1372                         if (err)
1373                                 break;
1374                 }
1375
1376                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1377                                    HCI_INIT_TIMEOUT);
1378                 break;
1379
1380         case HCISETSCAN:
1381                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1382                                    HCI_INIT_TIMEOUT);
1383                 break;
1384
1385         case HCISETLINKPOL:
1386                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1387                                    HCI_INIT_TIMEOUT);
1388                 break;
1389
1390         case HCISETLINKMODE:
1391                 hdev->link_mode = ((__u16) dr.dev_opt) &
1392                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
1393                 break;
1394
1395         case HCISETPTYPE:
1396                 hdev->pkt_type = (__u16) dr.dev_opt;
1397                 break;
1398
1399         case HCISETACLMTU:
1400                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
1401                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1402                 break;
1403
1404         case HCISETSCOMTU:
1405                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
1406                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1407                 break;
1408
1409         default:
1410                 err = -EINVAL;
1411                 break;
1412         }
1413
1414         hci_dev_put(hdev);
1415         return err;
1416 }
1417
1418 int hci_get_dev_list(void __user *arg)
1419 {
1420         struct hci_dev *hdev;
1421         struct hci_dev_list_req *dl;
1422         struct hci_dev_req *dr;
1423         int n = 0, size, err;
1424         __u16 dev_num;
1425
1426         if (get_user(dev_num, (__u16 __user *) arg))
1427                 return -EFAULT;
1428
1429         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1430                 return -EINVAL;
1431
1432         size = sizeof(*dl) + dev_num * sizeof(*dr);
1433
1434         dl = kzalloc(size, GFP_KERNEL);
1435         if (!dl)
1436                 return -ENOMEM;
1437
1438         dr = dl->dev_req;
1439
1440         read_lock(&hci_dev_list_lock);
1441         list_for_each_entry(hdev, &hci_dev_list, list) {
1442                 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1443                         cancel_delayed_work(&hdev->power_off);
1444
1445                 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1446                         set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1447
1448                 (dr + n)->dev_id  = hdev->id;
1449                 (dr + n)->dev_opt = hdev->flags;
1450
1451                 if (++n >= dev_num)
1452                         break;
1453         }
1454         read_unlock(&hci_dev_list_lock);
1455
1456         dl->dev_num = n;
1457         size = sizeof(*dl) + n * sizeof(*dr);
1458
1459         err = copy_to_user(arg, dl, size);
1460         kfree(dl);
1461
1462         return err ? -EFAULT : 0;
1463 }
1464
1465 int hci_get_dev_info(void __user *arg)
1466 {
1467         struct hci_dev *hdev;
1468         struct hci_dev_info di;
1469         int err = 0;
1470
1471         if (copy_from_user(&di, arg, sizeof(di)))
1472                 return -EFAULT;
1473
1474         hdev = hci_dev_get(di.dev_id);
1475         if (!hdev)
1476                 return -ENODEV;
1477
1478         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1479                 cancel_delayed_work_sync(&hdev->power_off);
1480
1481         if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1482                 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1483
1484         strcpy(di.name, hdev->name);
1485         di.bdaddr   = hdev->bdaddr;
1486         di.type     = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1487         di.flags    = hdev->flags;
1488         di.pkt_type = hdev->pkt_type;
1489         if (lmp_bredr_capable(hdev)) {
1490                 di.acl_mtu  = hdev->acl_mtu;
1491                 di.acl_pkts = hdev->acl_pkts;
1492                 di.sco_mtu  = hdev->sco_mtu;
1493                 di.sco_pkts = hdev->sco_pkts;
1494         } else {
1495                 di.acl_mtu  = hdev->le_mtu;
1496                 di.acl_pkts = hdev->le_pkts;
1497                 di.sco_mtu  = 0;
1498                 di.sco_pkts = 0;
1499         }
1500         di.link_policy = hdev->link_policy;
1501         di.link_mode   = hdev->link_mode;
1502
1503         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1504         memcpy(&di.features, &hdev->features, sizeof(di.features));
1505
1506         if (copy_to_user(arg, &di, sizeof(di)))
1507                 err = -EFAULT;
1508
1509         hci_dev_put(hdev);
1510
1511         return err;
1512 }
1513
1514 /* ---- Interface to HCI drivers ---- */
1515
1516 static int hci_rfkill_set_block(void *data, bool blocked)
1517 {
1518         struct hci_dev *hdev = data;
1519
1520         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1521
1522         if (!blocked)
1523                 return 0;
1524
1525         hci_dev_do_close(hdev);
1526
1527         return 0;
1528 }
1529
1530 static const struct rfkill_ops hci_rfkill_ops = {
1531         .set_block = hci_rfkill_set_block,
1532 };
1533
1534 static void hci_power_on(struct work_struct *work)
1535 {
1536         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1537
1538         BT_DBG("%s", hdev->name);
1539
1540         if (hci_dev_open(hdev->id) < 0)
1541                 return;
1542
1543         if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1544                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1545                                    HCI_AUTO_OFF_TIMEOUT);
1546
1547         if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1548                 mgmt_index_added(hdev);
1549 }
1550
1551 static void hci_power_off(struct work_struct *work)
1552 {
1553         struct hci_dev *hdev = container_of(work, struct hci_dev,
1554                                             power_off.work);
1555
1556         BT_DBG("%s", hdev->name);
1557
1558         hci_dev_do_close(hdev);
1559 }
1560
1561 static void hci_discov_off(struct work_struct *work)
1562 {
1563         struct hci_dev *hdev;
1564         u8 scan = SCAN_PAGE;
1565
1566         hdev = container_of(work, struct hci_dev, discov_off.work);
1567
1568         BT_DBG("%s", hdev->name);
1569
1570         hci_dev_lock(hdev);
1571
1572         hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1573
1574         hdev->discov_timeout = 0;
1575
1576         hci_dev_unlock(hdev);
1577 }
1578
1579 int hci_uuids_clear(struct hci_dev *hdev)
1580 {
1581         struct bt_uuid *uuid, *tmp;
1582
1583         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1584                 list_del(&uuid->list);
1585                 kfree(uuid);
1586         }
1587
1588         return 0;
1589 }
1590
1591 int hci_link_keys_clear(struct hci_dev *hdev)
1592 {
1593         struct list_head *p, *n;
1594
1595         list_for_each_safe(p, n, &hdev->link_keys) {
1596                 struct link_key *key;
1597
1598                 key = list_entry(p, struct link_key, list);
1599
1600                 list_del(p);
1601                 kfree(key);
1602         }
1603
1604         return 0;
1605 }
1606
1607 int hci_smp_ltks_clear(struct hci_dev *hdev)
1608 {
1609         struct smp_ltk *k, *tmp;
1610
1611         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1612                 list_del(&k->list);
1613                 kfree(k);
1614         }
1615
1616         return 0;
1617 }
1618
1619 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1620 {
1621         struct link_key *k;
1622
1623         list_for_each_entry(k, &hdev->link_keys, list)
1624                 if (bacmp(bdaddr, &k->bdaddr) == 0)
1625                         return k;
1626
1627         return NULL;
1628 }
1629
1630 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1631                                u8 key_type, u8 old_key_type)
1632 {
1633         /* Legacy key */
1634         if (key_type < 0x03)
1635                 return true;
1636
1637         /* Debug keys are insecure so don't store them persistently */
1638         if (key_type == HCI_LK_DEBUG_COMBINATION)
1639                 return false;
1640
1641         /* Changed combination key and there's no previous one */
1642         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1643                 return false;
1644
1645         /* Security mode 3 case */
1646         if (!conn)
1647                 return true;
1648
1649         /* Neither local nor remote side had no-bonding as requirement */
1650         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1651                 return true;
1652
1653         /* Local side had dedicated bonding as requirement */
1654         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1655                 return true;
1656
1657         /* Remote side had dedicated bonding as requirement */
1658         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1659                 return true;
1660
1661         /* If none of the above criteria match, then don't store the key
1662          * persistently */
1663         return false;
1664 }
1665
1666 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1667 {
1668         struct smp_ltk *k;
1669
1670         list_for_each_entry(k, &hdev->long_term_keys, list) {
1671                 if (k->ediv != ediv ||
1672                     memcmp(rand, k->rand, sizeof(k->rand)))
1673                         continue;
1674
1675                 return k;
1676         }
1677
1678         return NULL;
1679 }
1680
1681 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1682                                      u8 addr_type)
1683 {
1684         struct smp_ltk *k;
1685
1686         list_for_each_entry(k, &hdev->long_term_keys, list)
1687                 if (addr_type == k->bdaddr_type &&
1688                     bacmp(bdaddr, &k->bdaddr) == 0)
1689                         return k;
1690
1691         return NULL;
1692 }
1693
1694 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1695                      bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1696 {
1697         struct link_key *key, *old_key;
1698         u8 old_key_type;
1699         bool persistent;
1700
1701         old_key = hci_find_link_key(hdev, bdaddr);
1702         if (old_key) {
1703                 old_key_type = old_key->type;
1704                 key = old_key;
1705         } else {
1706                 old_key_type = conn ? conn->key_type : 0xff;
1707                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1708                 if (!key)
1709                         return -ENOMEM;
1710                 list_add(&key->list, &hdev->link_keys);
1711         }
1712
1713         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1714
1715         /* Some buggy controller combinations generate a changed
1716          * combination key for legacy pairing even when there's no
1717          * previous key */
1718         if (type == HCI_LK_CHANGED_COMBINATION &&
1719             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1720                 type = HCI_LK_COMBINATION;
1721                 if (conn)
1722                         conn->key_type = type;
1723         }
1724
1725         bacpy(&key->bdaddr, bdaddr);
1726         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1727         key->pin_len = pin_len;
1728
1729         if (type == HCI_LK_CHANGED_COMBINATION)
1730                 key->type = old_key_type;
1731         else
1732                 key->type = type;
1733
1734         if (!new_key)
1735                 return 0;
1736
1737         persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1738
1739         mgmt_new_link_key(hdev, key, persistent);
1740
1741         if (conn)
1742                 conn->flush_key = !persistent;
1743
1744         return 0;
1745 }
1746
1747 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1748                 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
1749                 ediv, u8 rand[8])
1750 {
1751         struct smp_ltk *key, *old_key;
1752
1753         if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1754                 return 0;
1755
1756         old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1757         if (old_key)
1758                 key = old_key;
1759         else {
1760                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1761                 if (!key)
1762                         return -ENOMEM;
1763                 list_add(&key->list, &hdev->long_term_keys);
1764         }
1765
1766         bacpy(&key->bdaddr, bdaddr);
1767         key->bdaddr_type = addr_type;
1768         memcpy(key->val, tk, sizeof(key->val));
1769         key->authenticated = authenticated;
1770         key->ediv = ediv;
1771         key->enc_size = enc_size;
1772         key->type = type;
1773         memcpy(key->rand, rand, sizeof(key->rand));
1774
1775         if (!new_key)
1776                 return 0;
1777
1778         if (type & HCI_SMP_LTK)
1779                 mgmt_new_ltk(hdev, key, 1);
1780
1781         return 0;
1782 }
1783
1784 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1785 {
1786         struct link_key *key;
1787
1788         key = hci_find_link_key(hdev, bdaddr);
1789         if (!key)
1790                 return -ENOENT;
1791
1792         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1793
1794         list_del(&key->list);
1795         kfree(key);
1796
1797         return 0;
1798 }
1799
1800 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1801 {
1802         struct smp_ltk *k, *tmp;
1803
1804         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1805                 if (bacmp(bdaddr, &k->bdaddr))
1806                         continue;
1807
1808                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1809
1810                 list_del(&k->list);
1811                 kfree(k);
1812         }
1813
1814         return 0;
1815 }
1816
1817 /* HCI command timer function */
1818 static void hci_cmd_timeout(unsigned long arg)
1819 {
1820         struct hci_dev *hdev = (void *) arg;
1821
1822         if (hdev->sent_cmd) {
1823                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1824                 u16 opcode = __le16_to_cpu(sent->opcode);
1825
1826                 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1827         } else {
1828                 BT_ERR("%s command tx timeout", hdev->name);
1829         }
1830
1831         atomic_set(&hdev->cmd_cnt, 1);
1832         queue_work(hdev->workqueue, &hdev->cmd_work);
1833 }
1834
1835 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1836                                           bdaddr_t *bdaddr)
1837 {
1838         struct oob_data *data;
1839
1840         list_for_each_entry(data, &hdev->remote_oob_data, list)
1841                 if (bacmp(bdaddr, &data->bdaddr) == 0)
1842                         return data;
1843
1844         return NULL;
1845 }
1846
1847 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1848 {
1849         struct oob_data *data;
1850
1851         data = hci_find_remote_oob_data(hdev, bdaddr);
1852         if (!data)
1853                 return -ENOENT;
1854
1855         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1856
1857         list_del(&data->list);
1858         kfree(data);
1859
1860         return 0;
1861 }
1862
1863 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1864 {
1865         struct oob_data *data, *n;
1866
1867         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1868                 list_del(&data->list);
1869                 kfree(data);
1870         }
1871
1872         return 0;
1873 }
1874
1875 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1876                             u8 *randomizer)
1877 {
1878         struct oob_data *data;
1879
1880         data = hci_find_remote_oob_data(hdev, bdaddr);
1881
1882         if (!data) {
1883                 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1884                 if (!data)
1885                         return -ENOMEM;
1886
1887                 bacpy(&data->bdaddr, bdaddr);
1888                 list_add(&data->list, &hdev->remote_oob_data);
1889         }
1890
1891         memcpy(data->hash, hash, sizeof(data->hash));
1892         memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1893
1894         BT_DBG("%s for %pMR", hdev->name, bdaddr);
1895
1896         return 0;
1897 }
1898
1899 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
1900 {
1901         struct bdaddr_list *b;
1902
1903         list_for_each_entry(b, &hdev->blacklist, list)
1904                 if (bacmp(bdaddr, &b->bdaddr) == 0)
1905                         return b;
1906
1907         return NULL;
1908 }
1909
1910 int hci_blacklist_clear(struct hci_dev *hdev)
1911 {
1912         struct list_head *p, *n;
1913
1914         list_for_each_safe(p, n, &hdev->blacklist) {
1915                 struct bdaddr_list *b;
1916
1917                 b = list_entry(p, struct bdaddr_list, list);
1918
1919                 list_del(p);
1920                 kfree(b);
1921         }
1922
1923         return 0;
1924 }
1925
1926 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1927 {
1928         struct bdaddr_list *entry;
1929
1930         if (bacmp(bdaddr, BDADDR_ANY) == 0)
1931                 return -EBADF;
1932
1933         if (hci_blacklist_lookup(hdev, bdaddr))
1934                 return -EEXIST;
1935
1936         entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1937         if (!entry)
1938                 return -ENOMEM;
1939
1940         bacpy(&entry->bdaddr, bdaddr);
1941
1942         list_add(&entry->list, &hdev->blacklist);
1943
1944         return mgmt_device_blocked(hdev, bdaddr, type);
1945 }
1946
1947 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1948 {
1949         struct bdaddr_list *entry;
1950
1951         if (bacmp(bdaddr, BDADDR_ANY) == 0)
1952                 return hci_blacklist_clear(hdev);
1953
1954         entry = hci_blacklist_lookup(hdev, bdaddr);
1955         if (!entry)
1956                 return -ENOENT;
1957
1958         list_del(&entry->list);
1959         kfree(entry);
1960
1961         return mgmt_device_unblocked(hdev, bdaddr, type);
1962 }
1963
1964 static void le_scan_param_req(struct hci_request *req, unsigned long opt)
1965 {
1966         struct le_scan_params *param =  (struct le_scan_params *) opt;
1967         struct hci_cp_le_set_scan_param cp;
1968
1969         memset(&cp, 0, sizeof(cp));
1970         cp.type = param->type;
1971         cp.interval = cpu_to_le16(param->interval);
1972         cp.window = cpu_to_le16(param->window);
1973
1974         hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1975 }
1976
1977 static void le_scan_enable_req(struct hci_request *req, unsigned long opt)
1978 {
1979         struct hci_cp_le_set_scan_enable cp;
1980
1981         memset(&cp, 0, sizeof(cp));
1982         cp.enable = 1;
1983         cp.filter_dup = 1;
1984
1985         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1986 }
1987
1988 static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
1989                           u16 window, int timeout)
1990 {
1991         long timeo = msecs_to_jiffies(3000);
1992         struct le_scan_params param;
1993         int err;
1994
1995         BT_DBG("%s", hdev->name);
1996
1997         if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1998                 return -EINPROGRESS;
1999
2000         param.type = type;
2001         param.interval = interval;
2002         param.window = window;
2003
2004         hci_req_lock(hdev);
2005
2006         err = __hci_req_sync(hdev, le_scan_param_req, (unsigned long) &param,
2007                              timeo);
2008         if (!err)
2009                 err = __hci_req_sync(hdev, le_scan_enable_req, 0, timeo);
2010
2011         hci_req_unlock(hdev);
2012
2013         if (err < 0)
2014                 return err;
2015
2016         queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
2017                            msecs_to_jiffies(timeout));
2018
2019         return 0;
2020 }
2021
2022 int hci_cancel_le_scan(struct hci_dev *hdev)
2023 {
2024         BT_DBG("%s", hdev->name);
2025
2026         if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
2027                 return -EALREADY;
2028
2029         if (cancel_delayed_work(&hdev->le_scan_disable)) {
2030                 struct hci_cp_le_set_scan_enable cp;
2031
2032                 /* Send HCI command to disable LE Scan */
2033                 memset(&cp, 0, sizeof(cp));
2034                 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2035         }
2036
2037         return 0;
2038 }
2039
2040 static void le_scan_disable_work(struct work_struct *work)
2041 {
2042         struct hci_dev *hdev = container_of(work, struct hci_dev,
2043                                             le_scan_disable.work);
2044         struct hci_cp_le_set_scan_enable cp;
2045
2046         BT_DBG("%s", hdev->name);
2047
2048         memset(&cp, 0, sizeof(cp));
2049
2050         hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2051 }
2052
2053 static void le_scan_work(struct work_struct *work)
2054 {
2055         struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
2056         struct le_scan_params *param = &hdev->le_scan_params;
2057
2058         BT_DBG("%s", hdev->name);
2059
2060         hci_do_le_scan(hdev, param->type, param->interval, param->window,
2061                        param->timeout);
2062 }
2063
2064 int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
2065                 int timeout)
2066 {
2067         struct le_scan_params *param = &hdev->le_scan_params;
2068
2069         BT_DBG("%s", hdev->name);
2070
2071         if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
2072                 return -ENOTSUPP;
2073
2074         if (work_busy(&hdev->le_scan))
2075                 return -EINPROGRESS;
2076
2077         param->type = type;
2078         param->interval = interval;
2079         param->window = window;
2080         param->timeout = timeout;
2081
2082         queue_work(system_long_wq, &hdev->le_scan);
2083
2084         return 0;
2085 }
2086
2087 /* Alloc HCI device */
2088 struct hci_dev *hci_alloc_dev(void)
2089 {
2090         struct hci_dev *hdev;
2091
2092         hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2093         if (!hdev)
2094                 return NULL;
2095
2096         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2097         hdev->esco_type = (ESCO_HV1);
2098         hdev->link_mode = (HCI_LM_ACCEPT);
2099         hdev->io_capability = 0x03; /* No Input No Output */
2100         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2101         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2102
2103         hdev->sniff_max_interval = 800;
2104         hdev->sniff_min_interval = 80;
2105
2106         mutex_init(&hdev->lock);
2107         mutex_init(&hdev->req_lock);
2108
2109         INIT_LIST_HEAD(&hdev->mgmt_pending);
2110         INIT_LIST_HEAD(&hdev->blacklist);
2111         INIT_LIST_HEAD(&hdev->uuids);
2112         INIT_LIST_HEAD(&hdev->link_keys);
2113         INIT_LIST_HEAD(&hdev->long_term_keys);
2114         INIT_LIST_HEAD(&hdev->remote_oob_data);
2115         INIT_LIST_HEAD(&hdev->conn_hash.list);
2116
2117         INIT_WORK(&hdev->rx_work, hci_rx_work);
2118         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2119         INIT_WORK(&hdev->tx_work, hci_tx_work);
2120         INIT_WORK(&hdev->power_on, hci_power_on);
2121         INIT_WORK(&hdev->le_scan, le_scan_work);
2122
2123         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2124         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2125         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2126
2127         skb_queue_head_init(&hdev->driver_init);
2128         skb_queue_head_init(&hdev->rx_q);
2129         skb_queue_head_init(&hdev->cmd_q);
2130         skb_queue_head_init(&hdev->raw_q);
2131
2132         init_waitqueue_head(&hdev->req_wait_q);
2133
2134         setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
2135
2136         hci_init_sysfs(hdev);
2137         discovery_init(hdev);
2138
2139         return hdev;
2140 }
2141 EXPORT_SYMBOL(hci_alloc_dev);
2142
2143 /* Free HCI device */
2144 void hci_free_dev(struct hci_dev *hdev)
2145 {
2146         skb_queue_purge(&hdev->driver_init);
2147
2148         /* will free via device release */
2149         put_device(&hdev->dev);
2150 }
2151 EXPORT_SYMBOL(hci_free_dev);
2152
2153 /* Register HCI device */
2154 int hci_register_dev(struct hci_dev *hdev)
2155 {
2156         int id, error;
2157
2158         if (!hdev->open || !hdev->close)
2159                 return -EINVAL;
2160
2161         /* Do not allow HCI_AMP devices to register at index 0,
2162          * so the index can be used as the AMP controller ID.
2163          */
2164         switch (hdev->dev_type) {
2165         case HCI_BREDR:
2166                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2167                 break;
2168         case HCI_AMP:
2169                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2170                 break;
2171         default:
2172                 return -EINVAL;
2173         }
2174
2175         if (id < 0)
2176                 return id;
2177
2178         sprintf(hdev->name, "hci%d", id);
2179         hdev->id = id;
2180
2181         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2182
2183         write_lock(&hci_dev_list_lock);
2184         list_add(&hdev->list, &hci_dev_list);
2185         write_unlock(&hci_dev_list_lock);
2186
2187         hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
2188                                           WQ_MEM_RECLAIM, 1);
2189         if (!hdev->workqueue) {
2190                 error = -ENOMEM;
2191                 goto err;
2192         }
2193
2194         hdev->req_workqueue = alloc_workqueue(hdev->name,
2195                                               WQ_HIGHPRI | WQ_UNBOUND |
2196                                               WQ_MEM_RECLAIM, 1);
2197         if (!hdev->req_workqueue) {
2198                 destroy_workqueue(hdev->workqueue);
2199                 error = -ENOMEM;
2200                 goto err;
2201         }
2202
2203         error = hci_add_sysfs(hdev);
2204         if (error < 0)
2205                 goto err_wqueue;
2206
2207         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2208                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2209                                     hdev);
2210         if (hdev->rfkill) {
2211                 if (rfkill_register(hdev->rfkill) < 0) {
2212                         rfkill_destroy(hdev->rfkill);
2213                         hdev->rfkill = NULL;
2214                 }
2215         }
2216
2217         set_bit(HCI_SETUP, &hdev->dev_flags);
2218
2219         if (hdev->dev_type != HCI_AMP)
2220                 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2221
2222         hci_notify(hdev, HCI_DEV_REG);
2223         hci_dev_hold(hdev);
2224
2225         queue_work(hdev->req_workqueue, &hdev->power_on);
2226
2227         return id;
2228
2229 err_wqueue:
2230         destroy_workqueue(hdev->workqueue);
2231         destroy_workqueue(hdev->req_workqueue);
2232 err:
2233         ida_simple_remove(&hci_index_ida, hdev->id);
2234         write_lock(&hci_dev_list_lock);
2235         list_del(&hdev->list);
2236         write_unlock(&hci_dev_list_lock);
2237
2238         return error;
2239 }
2240 EXPORT_SYMBOL(hci_register_dev);
2241
2242 /* Unregister HCI device */
2243 void hci_unregister_dev(struct hci_dev *hdev)
2244 {
2245         int i, id;
2246
2247         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2248
2249         set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2250
2251         id = hdev->id;
2252
2253         write_lock(&hci_dev_list_lock);
2254         list_del(&hdev->list);
2255         write_unlock(&hci_dev_list_lock);
2256
2257         hci_dev_do_close(hdev);
2258
2259         for (i = 0; i < NUM_REASSEMBLY; i++)
2260                 kfree_skb(hdev->reassembly[i]);
2261
2262         cancel_work_sync(&hdev->power_on);
2263
2264         if (!test_bit(HCI_INIT, &hdev->flags) &&
2265             !test_bit(HCI_SETUP, &hdev->dev_flags)) {
2266                 hci_dev_lock(hdev);
2267                 mgmt_index_removed(hdev);
2268                 hci_dev_unlock(hdev);
2269         }
2270
2271         /* mgmt_index_removed should take care of emptying the
2272          * pending list */
2273         BUG_ON(!list_empty(&hdev->mgmt_pending));
2274
2275         hci_notify(hdev, HCI_DEV_UNREG);
2276
2277         if (hdev->rfkill) {
2278                 rfkill_unregister(hdev->rfkill);
2279                 rfkill_destroy(hdev->rfkill);
2280         }
2281
2282         hci_del_sysfs(hdev);
2283
2284         destroy_workqueue(hdev->workqueue);
2285         destroy_workqueue(hdev->req_workqueue);
2286
2287         hci_dev_lock(hdev);
2288         hci_blacklist_clear(hdev);
2289         hci_uuids_clear(hdev);
2290         hci_link_keys_clear(hdev);
2291         hci_smp_ltks_clear(hdev);
2292         hci_remote_oob_data_clear(hdev);
2293         hci_dev_unlock(hdev);
2294
2295         hci_dev_put(hdev);
2296
2297         ida_simple_remove(&hci_index_ida, id);
2298 }
2299 EXPORT_SYMBOL(hci_unregister_dev);
2300
2301 /* Suspend HCI device */
2302 int hci_suspend_dev(struct hci_dev *hdev)
2303 {
2304         hci_notify(hdev, HCI_DEV_SUSPEND);
2305         return 0;
2306 }
2307 EXPORT_SYMBOL(hci_suspend_dev);
2308
2309 /* Resume HCI device */
2310 int hci_resume_dev(struct hci_dev *hdev)
2311 {
2312         hci_notify(hdev, HCI_DEV_RESUME);
2313         return 0;
2314 }
2315 EXPORT_SYMBOL(hci_resume_dev);
2316
2317 /* Receive frame from HCI drivers */
2318 int hci_recv_frame(struct sk_buff *skb)
2319 {
2320         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2321         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2322                       && !test_bit(HCI_INIT, &hdev->flags))) {
2323                 kfree_skb(skb);
2324                 return -ENXIO;
2325         }
2326
2327         /* Incoming skb */
2328         bt_cb(skb)->incoming = 1;
2329
2330         /* Time stamp */
2331         __net_timestamp(skb);
2332
2333         skb_queue_tail(&hdev->rx_q, skb);
2334         queue_work(hdev->workqueue, &hdev->rx_work);
2335
2336         return 0;
2337 }
2338 EXPORT_SYMBOL(hci_recv_frame);
2339
2340 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
2341                           int count, __u8 index)
2342 {
2343         int len = 0;
2344         int hlen = 0;
2345         int remain = count;
2346         struct sk_buff *skb;
2347         struct bt_skb_cb *scb;
2348
2349         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
2350             index >= NUM_REASSEMBLY)
2351                 return -EILSEQ;
2352
2353         skb = hdev->reassembly[index];
2354
2355         if (!skb) {
2356                 switch (type) {
2357                 case HCI_ACLDATA_PKT:
2358                         len = HCI_MAX_FRAME_SIZE;
2359                         hlen = HCI_ACL_HDR_SIZE;
2360                         break;
2361                 case HCI_EVENT_PKT:
2362                         len = HCI_MAX_EVENT_SIZE;
2363                         hlen = HCI_EVENT_HDR_SIZE;
2364                         break;
2365                 case HCI_SCODATA_PKT:
2366                         len = HCI_MAX_SCO_SIZE;
2367                         hlen = HCI_SCO_HDR_SIZE;
2368                         break;
2369                 }
2370
2371                 skb = bt_skb_alloc(len, GFP_ATOMIC);
2372                 if (!skb)
2373                         return -ENOMEM;
2374
2375                 scb = (void *) skb->cb;
2376                 scb->expect = hlen;
2377                 scb->pkt_type = type;
2378
2379                 skb->dev = (void *) hdev;
2380                 hdev->reassembly[index] = skb;
2381         }
2382
2383         while (count) {
2384                 scb = (void *) skb->cb;
2385                 len = min_t(uint, scb->expect, count);
2386
2387                 memcpy(skb_put(skb, len), data, len);
2388
2389                 count -= len;
2390                 data += len;
2391                 scb->expect -= len;
2392                 remain = count;
2393
2394                 switch (type) {
2395                 case HCI_EVENT_PKT:
2396                         if (skb->len == HCI_EVENT_HDR_SIZE) {
2397                                 struct hci_event_hdr *h = hci_event_hdr(skb);
2398                                 scb->expect = h->plen;
2399
2400                                 if (skb_tailroom(skb) < scb->expect) {
2401                                         kfree_skb(skb);
2402                                         hdev->reassembly[index] = NULL;
2403                                         return -ENOMEM;
2404                                 }
2405                         }
2406                         break;
2407
2408                 case HCI_ACLDATA_PKT:
2409                         if (skb->len  == HCI_ACL_HDR_SIZE) {
2410                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2411                                 scb->expect = __le16_to_cpu(h->dlen);
2412
2413                                 if (skb_tailroom(skb) < scb->expect) {
2414                                         kfree_skb(skb);
2415                                         hdev->reassembly[index] = NULL;
2416                                         return -ENOMEM;
2417                                 }
2418                         }
2419                         break;
2420
2421                 case HCI_SCODATA_PKT:
2422                         if (skb->len == HCI_SCO_HDR_SIZE) {
2423                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2424                                 scb->expect = h->dlen;
2425
2426                                 if (skb_tailroom(skb) < scb->expect) {
2427                                         kfree_skb(skb);
2428                                         hdev->reassembly[index] = NULL;
2429                                         return -ENOMEM;
2430                                 }
2431                         }
2432                         break;
2433                 }
2434
2435                 if (scb->expect == 0) {
2436                         /* Complete frame */
2437
2438                         bt_cb(skb)->pkt_type = type;
2439                         hci_recv_frame(skb);
2440
2441                         hdev->reassembly[index] = NULL;
2442                         return remain;
2443                 }
2444         }
2445
2446         return remain;
2447 }
2448
2449 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2450 {
2451         int rem = 0;
2452
2453         if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2454                 return -EILSEQ;
2455
2456         while (count) {
2457                 rem = hci_reassembly(hdev, type, data, count, type - 1);
2458                 if (rem < 0)
2459                         return rem;
2460
2461                 data += (count - rem);
2462                 count = rem;
2463         }
2464
2465         return rem;
2466 }
2467 EXPORT_SYMBOL(hci_recv_fragment);
2468
2469 #define STREAM_REASSEMBLY 0
2470
2471 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2472 {
2473         int type;
2474         int rem = 0;
2475
2476         while (count) {
2477                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2478
2479                 if (!skb) {
2480                         struct { char type; } *pkt;
2481
2482                         /* Start of the frame */
2483                         pkt = data;
2484                         type = pkt->type;
2485
2486                         data++;
2487                         count--;
2488                 } else
2489                         type = bt_cb(skb)->pkt_type;
2490
2491                 rem = hci_reassembly(hdev, type, data, count,
2492                                      STREAM_REASSEMBLY);
2493                 if (rem < 0)
2494                         return rem;
2495
2496                 data += (count - rem);
2497                 count = rem;
2498         }
2499
2500         return rem;
2501 }
2502 EXPORT_SYMBOL(hci_recv_stream_fragment);
2503
2504 /* ---- Interface to upper protocols ---- */
2505
2506 int hci_register_cb(struct hci_cb *cb)
2507 {
2508         BT_DBG("%p name %s", cb, cb->name);
2509
2510         write_lock(&hci_cb_list_lock);
2511         list_add(&cb->list, &hci_cb_list);
2512         write_unlock(&hci_cb_list_lock);
2513
2514         return 0;
2515 }
2516 EXPORT_SYMBOL(hci_register_cb);
2517
2518 int hci_unregister_cb(struct hci_cb *cb)
2519 {
2520         BT_DBG("%p name %s", cb, cb->name);
2521
2522         write_lock(&hci_cb_list_lock);
2523         list_del(&cb->list);
2524         write_unlock(&hci_cb_list_lock);
2525
2526         return 0;
2527 }
2528 EXPORT_SYMBOL(hci_unregister_cb);
2529
2530 static int hci_send_frame(struct sk_buff *skb)
2531 {
2532         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2533
2534         if (!hdev) {
2535                 kfree_skb(skb);
2536                 return -ENODEV;
2537         }
2538
2539         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
2540
2541         /* Time stamp */
2542         __net_timestamp(skb);
2543
2544         /* Send copy to monitor */
2545         hci_send_to_monitor(hdev, skb);
2546
2547         if (atomic_read(&hdev->promisc)) {
2548                 /* Send copy to the sockets */
2549                 hci_send_to_sock(hdev, skb);
2550         }
2551
2552         /* Get rid of skb owner, prior to sending to the driver. */
2553         skb_orphan(skb);
2554
2555         return hdev->send(skb);
2556 }
2557
2558 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2559 {
2560         skb_queue_head_init(&req->cmd_q);
2561         req->hdev = hdev;
2562         req->err = 0;
2563 }
2564
2565 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2566 {
2567         struct hci_dev *hdev = req->hdev;
2568         struct sk_buff *skb;
2569         unsigned long flags;
2570
2571         BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2572
2573         /* If an error occured during request building, remove all HCI
2574          * commands queued on the HCI request queue.
2575          */
2576         if (req->err) {
2577                 skb_queue_purge(&req->cmd_q);
2578                 return req->err;
2579         }
2580
2581         /* Do not allow empty requests */
2582         if (skb_queue_empty(&req->cmd_q))
2583                 return -ENODATA;
2584
2585         skb = skb_peek_tail(&req->cmd_q);
2586         bt_cb(skb)->req.complete = complete;
2587
2588         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2589         skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2590         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2591
2592         queue_work(hdev->workqueue, &hdev->cmd_work);
2593
2594         return 0;
2595 }
2596
2597 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
2598                                        u32 plen, void *param)
2599 {
2600         int len = HCI_COMMAND_HDR_SIZE + plen;
2601         struct hci_command_hdr *hdr;
2602         struct sk_buff *skb;
2603
2604         skb = bt_skb_alloc(len, GFP_ATOMIC);
2605         if (!skb)
2606                 return NULL;
2607
2608         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
2609         hdr->opcode = cpu_to_le16(opcode);
2610         hdr->plen   = plen;
2611
2612         if (plen)
2613                 memcpy(skb_put(skb, plen), param, plen);
2614
2615         BT_DBG("skb len %d", skb->len);
2616
2617         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
2618         skb->dev = (void *) hdev;
2619
2620         return skb;
2621 }
2622
2623 /* Send HCI command */
2624 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
2625 {
2626         struct sk_buff *skb;
2627
2628         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2629
2630         skb = hci_prepare_cmd(hdev, opcode, plen, param);
2631         if (!skb) {
2632                 BT_ERR("%s no memory for command", hdev->name);
2633                 return -ENOMEM;
2634         }
2635
2636         /* Stand-alone HCI commands must be flaged as
2637          * single-command requests.
2638          */
2639         bt_cb(skb)->req.start = true;
2640
2641         skb_queue_tail(&hdev->cmd_q, skb);
2642         queue_work(hdev->workqueue, &hdev->cmd_work);
2643
2644         return 0;
2645 }
2646
2647 /* Queue a command to an asynchronous HCI request */
2648 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen, void *param,
2649                     u8 event)
2650 {
2651         struct hci_dev *hdev = req->hdev;
2652         struct sk_buff *skb;
2653
2654         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2655
2656         /* If an error occured during request building, there is no point in
2657          * queueing the HCI command. We can simply return.
2658          */
2659         if (req->err)
2660                 return;
2661
2662         skb = hci_prepare_cmd(hdev, opcode, plen, param);
2663         if (!skb) {
2664                 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2665                        hdev->name, opcode);
2666                 req->err = -ENOMEM;
2667                 return;
2668         }
2669
2670         if (skb_queue_empty(&req->cmd_q))
2671                 bt_cb(skb)->req.start = true;
2672
2673         bt_cb(skb)->req.event = event;
2674
2675         skb_queue_tail(&req->cmd_q, skb);
2676 }
2677
2678 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen, void *param)
2679 {
2680         hci_req_add_ev(req, opcode, plen, param, 0);
2681 }
2682
2683 /* Get data from the previously sent command */
2684 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
2685 {
2686         struct hci_command_hdr *hdr;
2687
2688         if (!hdev->sent_cmd)
2689                 return NULL;
2690
2691         hdr = (void *) hdev->sent_cmd->data;
2692
2693         if (hdr->opcode != cpu_to_le16(opcode))
2694                 return NULL;
2695
2696         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2697
2698         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2699 }
2700
2701 /* Send ACL data */
2702 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2703 {
2704         struct hci_acl_hdr *hdr;
2705         int len = skb->len;
2706
2707         skb_push(skb, HCI_ACL_HDR_SIZE);
2708         skb_reset_transport_header(skb);
2709         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
2710         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2711         hdr->dlen   = cpu_to_le16(len);
2712 }
2713
2714 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
2715                           struct sk_buff *skb, __u16 flags)
2716 {
2717         struct hci_conn *conn = chan->conn;
2718         struct hci_dev *hdev = conn->hdev;
2719         struct sk_buff *list;
2720
2721         skb->len = skb_headlen(skb);
2722         skb->data_len = 0;
2723
2724         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2725
2726         switch (hdev->dev_type) {
2727         case HCI_BREDR:
2728                 hci_add_acl_hdr(skb, conn->handle, flags);
2729                 break;
2730         case HCI_AMP:
2731                 hci_add_acl_hdr(skb, chan->handle, flags);
2732                 break;
2733         default:
2734                 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2735                 return;
2736         }
2737
2738         list = skb_shinfo(skb)->frag_list;
2739         if (!list) {
2740                 /* Non fragmented */
2741                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2742
2743                 skb_queue_tail(queue, skb);
2744         } else {
2745                 /* Fragmented */
2746                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2747
2748                 skb_shinfo(skb)->frag_list = NULL;
2749
2750                 /* Queue all fragments atomically */
2751                 spin_lock(&queue->lock);
2752
2753                 __skb_queue_tail(queue, skb);
2754
2755                 flags &= ~ACL_START;
2756                 flags |= ACL_CONT;
2757                 do {
2758                         skb = list; list = list->next;
2759
2760                         skb->dev = (void *) hdev;
2761                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2762                         hci_add_acl_hdr(skb, conn->handle, flags);
2763
2764                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2765
2766                         __skb_queue_tail(queue, skb);
2767                 } while (list);
2768
2769                 spin_unlock(&queue->lock);
2770         }
2771 }
2772
2773 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2774 {
2775         struct hci_dev *hdev = chan->conn->hdev;
2776
2777         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
2778
2779         skb->dev = (void *) hdev;
2780
2781         hci_queue_acl(chan, &chan->data_q, skb, flags);
2782
2783         queue_work(hdev->workqueue, &hdev->tx_work);
2784 }
2785
2786 /* Send SCO data */
2787 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2788 {
2789         struct hci_dev *hdev = conn->hdev;
2790         struct hci_sco_hdr hdr;
2791
2792         BT_DBG("%s len %d", hdev->name, skb->len);
2793
2794         hdr.handle = cpu_to_le16(conn->handle);
2795         hdr.dlen   = skb->len;
2796
2797         skb_push(skb, HCI_SCO_HDR_SIZE);
2798         skb_reset_transport_header(skb);
2799         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2800
2801         skb->dev = (void *) hdev;
2802         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2803
2804         skb_queue_tail(&conn->data_q, skb);
2805         queue_work(hdev->workqueue, &hdev->tx_work);
2806 }
2807
2808 /* ---- HCI TX task (outgoing data) ---- */
2809
2810 /* HCI Connection scheduler */
2811 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2812                                      int *quote)
2813 {
2814         struct hci_conn_hash *h = &hdev->conn_hash;
2815         struct hci_conn *conn = NULL, *c;
2816         unsigned int num = 0, min = ~0;
2817
2818         /* We don't have to lock device here. Connections are always
2819          * added and removed with TX task disabled. */
2820
2821         rcu_read_lock();
2822
2823         list_for_each_entry_rcu(c, &h->list, list) {
2824                 if (c->type != type || skb_queue_empty(&c->data_q))
2825                         continue;
2826
2827                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2828                         continue;
2829
2830                 num++;
2831
2832                 if (c->sent < min) {
2833                         min  = c->sent;
2834                         conn = c;
2835                 }
2836
2837                 if (hci_conn_num(hdev, type) == num)
2838                         break;
2839         }
2840
2841         rcu_read_unlock();
2842
2843         if (conn) {
2844                 int cnt, q;
2845
2846                 switch (conn->type) {
2847                 case ACL_LINK:
2848                         cnt = hdev->acl_cnt;
2849                         break;
2850                 case SCO_LINK:
2851                 case ESCO_LINK:
2852                         cnt = hdev->sco_cnt;
2853                         break;
2854                 case LE_LINK:
2855                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2856                         break;
2857                 default:
2858                         cnt = 0;
2859                         BT_ERR("Unknown link type");
2860                 }
2861
2862                 q = cnt / num;
2863                 *quote = q ? q : 1;
2864         } else
2865                 *quote = 0;
2866
2867         BT_DBG("conn %p quote %d", conn, *quote);
2868         return conn;
2869 }
2870
2871 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2872 {
2873         struct hci_conn_hash *h = &hdev->conn_hash;
2874         struct hci_conn *c;
2875
2876         BT_ERR("%s link tx timeout", hdev->name);
2877
2878         rcu_read_lock();
2879
2880         /* Kill stalled connections */
2881         list_for_each_entry_rcu(c, &h->list, list) {
2882                 if (c->type == type && c->sent) {
2883                         BT_ERR("%s killing stalled connection %pMR",
2884                                hdev->name, &c->dst);
2885                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
2886                 }
2887         }
2888
2889         rcu_read_unlock();
2890 }
2891
2892 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2893                                       int *quote)
2894 {
2895         struct hci_conn_hash *h = &hdev->conn_hash;
2896         struct hci_chan *chan = NULL;
2897         unsigned int num = 0, min = ~0, cur_prio = 0;
2898         struct hci_conn *conn;
2899         int cnt, q, conn_num = 0;
2900
2901         BT_DBG("%s", hdev->name);
2902
2903         rcu_read_lock();
2904
2905         list_for_each_entry_rcu(conn, &h->list, list) {
2906                 struct hci_chan *tmp;
2907
2908                 if (conn->type != type)
2909                         continue;
2910
2911                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2912                         continue;
2913
2914                 conn_num++;
2915
2916                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2917                         struct sk_buff *skb;
2918
2919                         if (skb_queue_empty(&tmp->data_q))
2920                                 continue;
2921
2922                         skb = skb_peek(&tmp->data_q);
2923                         if (skb->priority < cur_prio)
2924                                 continue;
2925
2926                         if (skb->priority > cur_prio) {
2927                                 num = 0;
2928                                 min = ~0;
2929                                 cur_prio = skb->priority;
2930                         }
2931
2932                         num++;
2933
2934                         if (conn->sent < min) {
2935                                 min  = conn->sent;
2936                                 chan = tmp;
2937                         }
2938                 }
2939
2940                 if (hci_conn_num(hdev, type) == conn_num)
2941                         break;
2942         }
2943
2944         rcu_read_unlock();
2945
2946         if (!chan)
2947                 return NULL;
2948
2949         switch (chan->conn->type) {
2950         case ACL_LINK:
2951                 cnt = hdev->acl_cnt;
2952                 break;
2953         case AMP_LINK:
2954                 cnt = hdev->block_cnt;
2955                 break;
2956         case SCO_LINK:
2957         case ESCO_LINK:
2958                 cnt = hdev->sco_cnt;
2959                 break;
2960         case LE_LINK:
2961                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2962                 break;
2963         default:
2964                 cnt = 0;
2965                 BT_ERR("Unknown link type");
2966         }
2967
2968         q = cnt / num;
2969         *quote = q ? q : 1;
2970         BT_DBG("chan %p quote %d", chan, *quote);
2971         return chan;
2972 }
2973
2974 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2975 {
2976         struct hci_conn_hash *h = &hdev->conn_hash;
2977         struct hci_conn *conn;
2978         int num = 0;
2979
2980         BT_DBG("%s", hdev->name);
2981
2982         rcu_read_lock();
2983
2984         list_for_each_entry_rcu(conn, &h->list, list) {
2985                 struct hci_chan *chan;
2986
2987                 if (conn->type != type)
2988                         continue;
2989
2990                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2991                         continue;
2992
2993                 num++;
2994
2995                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
2996                         struct sk_buff *skb;
2997
2998                         if (chan->sent) {
2999                                 chan->sent = 0;
3000                                 continue;
3001                         }
3002
3003                         if (skb_queue_empty(&chan->data_q))
3004                                 continue;
3005
3006                         skb = skb_peek(&chan->data_q);
3007                         if (skb->priority >= HCI_PRIO_MAX - 1)
3008                                 continue;
3009
3010                         skb->priority = HCI_PRIO_MAX - 1;
3011
3012                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3013                                skb->priority);
3014                 }
3015
3016                 if (hci_conn_num(hdev, type) == num)
3017                         break;
3018         }
3019
3020         rcu_read_unlock();
3021
3022 }
3023
3024 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3025 {
3026         /* Calculate count of blocks used by this packet */
3027         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3028 }
3029
3030 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3031 {
3032         if (!test_bit(HCI_RAW, &hdev->flags)) {
3033                 /* ACL tx timeout must be longer than maximum
3034                  * link supervision timeout (40.9 seconds) */
3035                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3036                                        HCI_ACL_TX_TIMEOUT))
3037                         hci_link_tx_to(hdev, ACL_LINK);
3038         }
3039 }
3040
3041 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3042 {
3043         unsigned int cnt = hdev->acl_cnt;
3044         struct hci_chan *chan;
3045         struct sk_buff *skb;
3046         int quote;
3047
3048         __check_timeout(hdev, cnt);
3049
3050         while (hdev->acl_cnt &&
3051                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
3052                 u32 priority = (skb_peek(&chan->data_q))->priority;
3053                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3054                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3055                                skb->len, skb->priority);
3056
3057                         /* Stop if priority has changed */
3058                         if (skb->priority < priority)
3059                                 break;
3060
3061                         skb = skb_dequeue(&chan->data_q);
3062
3063                         hci_conn_enter_active_mode(chan->conn,
3064                                                    bt_cb(skb)->force_active);
3065
3066                         hci_send_frame(skb);
3067                         hdev->acl_last_tx = jiffies;
3068
3069                         hdev->acl_cnt--;
3070                         chan->sent++;
3071                         chan->conn->sent++;
3072                 }
3073         }
3074
3075         if (cnt != hdev->acl_cnt)
3076                 hci_prio_recalculate(hdev, ACL_LINK);
3077 }
3078
3079 static void hci_sched_acl_blk(struct hci_dev *hdev)
3080 {
3081         unsigned int cnt = hdev->block_cnt;
3082         struct hci_chan *chan;
3083         struct sk_buff *skb;
3084         int quote;
3085         u8 type;
3086
3087         __check_timeout(hdev, cnt);
3088
3089         BT_DBG("%s", hdev->name);
3090
3091         if (hdev->dev_type == HCI_AMP)
3092                 type = AMP_LINK;
3093         else
3094                 type = ACL_LINK;
3095
3096         while (hdev->block_cnt > 0 &&
3097                (chan = hci_chan_sent(hdev, type, &quote))) {
3098                 u32 priority = (skb_peek(&chan->data_q))->priority;
3099                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3100                         int blocks;
3101
3102                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3103                                skb->len, skb->priority);
3104
3105                         /* Stop if priority has changed */
3106                         if (skb->priority < priority)
3107                                 break;
3108
3109                         skb = skb_dequeue(&chan->data_q);
3110
3111                         blocks = __get_blocks(hdev, skb);
3112                         if (blocks > hdev->block_cnt)
3113                                 return;
3114
3115                         hci_conn_enter_active_mode(chan->conn,
3116                                                    bt_cb(skb)->force_active);
3117
3118                         hci_send_frame(skb);
3119                         hdev->acl_last_tx = jiffies;
3120
3121                         hdev->block_cnt -= blocks;
3122                         quote -= blocks;
3123
3124                         chan->sent += blocks;
3125                         chan->conn->sent += blocks;
3126                 }
3127         }
3128
3129         if (cnt != hdev->block_cnt)
3130                 hci_prio_recalculate(hdev, type);
3131 }
3132
3133 static void hci_sched_acl(struct hci_dev *hdev)
3134 {
3135         BT_DBG("%s", hdev->name);
3136
3137         /* No ACL link over BR/EDR controller */
3138         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3139                 return;
3140
3141         /* No AMP link over AMP controller */
3142         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3143                 return;
3144
3145         switch (hdev->flow_ctl_mode) {
3146         case HCI_FLOW_CTL_MODE_PACKET_BASED:
3147                 hci_sched_acl_pkt(hdev);
3148                 break;
3149
3150         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3151                 hci_sched_acl_blk(hdev);
3152                 break;
3153         }
3154 }
3155
3156 /* Schedule SCO */
3157 static void hci_sched_sco(struct hci_dev *hdev)
3158 {
3159         struct hci_conn *conn;
3160         struct sk_buff *skb;
3161         int quote;
3162
3163         BT_DBG("%s", hdev->name);
3164
3165         if (!hci_conn_num(hdev, SCO_LINK))
3166                 return;
3167
3168         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3169                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3170                         BT_DBG("skb %p len %d", skb, skb->len);
3171                         hci_send_frame(skb);
3172
3173                         conn->sent++;
3174                         if (conn->sent == ~0)
3175                                 conn->sent = 0;
3176                 }
3177         }
3178 }
3179
3180 static void hci_sched_esco(struct hci_dev *hdev)
3181 {
3182         struct hci_conn *conn;
3183         struct sk_buff *skb;
3184         int quote;
3185
3186         BT_DBG("%s", hdev->name);
3187
3188         if (!hci_conn_num(hdev, ESCO_LINK))
3189                 return;
3190
3191         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3192                                                      &quote))) {
3193                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3194                         BT_DBG("skb %p len %d", skb, skb->len);
3195                         hci_send_frame(skb);
3196
3197                         conn->sent++;
3198                         if (conn->sent == ~0)
3199                                 conn->sent = 0;
3200                 }
3201         }
3202 }
3203
3204 static void hci_sched_le(struct hci_dev *hdev)
3205 {
3206         struct hci_chan *chan;
3207         struct sk_buff *skb;
3208         int quote, cnt, tmp;
3209
3210         BT_DBG("%s", hdev->name);
3211
3212         if (!hci_conn_num(hdev, LE_LINK))
3213                 return;
3214
3215         if (!test_bit(HCI_RAW, &hdev->flags)) {
3216                 /* LE tx timeout must be longer than maximum
3217                  * link supervision timeout (40.9 seconds) */
3218                 if (!hdev->le_cnt && hdev->le_pkts &&
3219                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
3220                         hci_link_tx_to(hdev, LE_LINK);
3221         }
3222
3223         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3224         tmp = cnt;
3225         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
3226                 u32 priority = (skb_peek(&chan->data_q))->priority;
3227                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3228                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3229                                skb->len, skb->priority);
3230
3231                         /* Stop if priority has changed */
3232                         if (skb->priority < priority)
3233                                 break;
3234
3235                         skb = skb_dequeue(&chan->data_q);
3236
3237                         hci_send_frame(skb);
3238                         hdev->le_last_tx = jiffies;
3239
3240                         cnt--;
3241                         chan->sent++;
3242                         chan->conn->sent++;
3243                 }
3244         }
3245
3246         if (hdev->le_pkts)
3247                 hdev->le_cnt = cnt;
3248         else
3249                 hdev->acl_cnt = cnt;
3250
3251         if (cnt != tmp)
3252                 hci_prio_recalculate(hdev, LE_LINK);
3253 }
3254
3255 static void hci_tx_work(struct work_struct *work)
3256 {
3257         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3258         struct sk_buff *skb;
3259
3260         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
3261                hdev->sco_cnt, hdev->le_cnt);
3262
3263         /* Schedule queues and send stuff to HCI driver */
3264
3265         hci_sched_acl(hdev);
3266
3267         hci_sched_sco(hdev);
3268
3269         hci_sched_esco(hdev);
3270
3271         hci_sched_le(hdev);
3272
3273         /* Send next queued raw (unknown type) packet */
3274         while ((skb = skb_dequeue(&hdev->raw_q)))
3275                 hci_send_frame(skb);
3276 }
3277
3278 /* ----- HCI RX task (incoming data processing) ----- */
3279
3280 /* ACL data packet */
3281 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3282 {
3283         struct hci_acl_hdr *hdr = (void *) skb->data;
3284         struct hci_conn *conn;
3285         __u16 handle, flags;
3286
3287         skb_pull(skb, HCI_ACL_HDR_SIZE);
3288
3289         handle = __le16_to_cpu(hdr->handle);
3290         flags  = hci_flags(handle);
3291         handle = hci_handle(handle);
3292
3293         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3294                handle, flags);
3295
3296         hdev->stat.acl_rx++;
3297
3298         hci_dev_lock(hdev);
3299         conn = hci_conn_hash_lookup_handle(hdev, handle);
3300         hci_dev_unlock(hdev);
3301
3302         if (conn) {
3303                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3304
3305                 /* Send to upper protocol */
3306                 l2cap_recv_acldata(conn, skb, flags);
3307                 return;
3308         } else {
3309                 BT_ERR("%s ACL packet for unknown connection handle %d",
3310                        hdev->name, handle);
3311         }
3312
3313         kfree_skb(skb);
3314 }
3315
3316 /* SCO data packet */
3317 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3318 {
3319         struct hci_sco_hdr *hdr = (void *) skb->data;
3320         struct hci_conn *conn;
3321         __u16 handle;
3322
3323         skb_pull(skb, HCI_SCO_HDR_SIZE);
3324
3325         handle = __le16_to_cpu(hdr->handle);
3326
3327         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
3328
3329         hdev->stat.sco_rx++;
3330
3331         hci_dev_lock(hdev);
3332         conn = hci_conn_hash_lookup_handle(hdev, handle);
3333         hci_dev_unlock(hdev);
3334
3335         if (conn) {
3336                 /* Send to upper protocol */
3337                 sco_recv_scodata(conn, skb);
3338                 return;
3339         } else {
3340                 BT_ERR("%s SCO packet for unknown connection handle %d",
3341                        hdev->name, handle);
3342         }
3343
3344         kfree_skb(skb);
3345 }
3346
3347 static bool hci_req_is_complete(struct hci_dev *hdev)
3348 {
3349         struct sk_buff *skb;
3350
3351         skb = skb_peek(&hdev->cmd_q);
3352         if (!skb)
3353                 return true;
3354
3355         return bt_cb(skb)->req.start;
3356 }
3357
3358 static void hci_resend_last(struct hci_dev *hdev)
3359 {
3360         struct hci_command_hdr *sent;
3361         struct sk_buff *skb;
3362         u16 opcode;
3363
3364         if (!hdev->sent_cmd)
3365                 return;
3366
3367         sent = (void *) hdev->sent_cmd->data;
3368         opcode = __le16_to_cpu(sent->opcode);
3369         if (opcode == HCI_OP_RESET)
3370                 return;
3371
3372         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3373         if (!skb)
3374                 return;
3375
3376         skb_queue_head(&hdev->cmd_q, skb);
3377         queue_work(hdev->workqueue, &hdev->cmd_work);
3378 }
3379
3380 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3381 {
3382         hci_req_complete_t req_complete = NULL;
3383         struct sk_buff *skb;
3384         unsigned long flags;
3385
3386         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3387
3388         /* If the completed command doesn't match the last one that was
3389          * sent we need to do special handling of it.
3390          */
3391         if (!hci_sent_cmd_data(hdev, opcode)) {
3392                 /* Some CSR based controllers generate a spontaneous
3393                  * reset complete event during init and any pending
3394                  * command will never be completed. In such a case we
3395                  * need to resend whatever was the last sent
3396                  * command.
3397                  */
3398                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3399                         hci_resend_last(hdev);
3400
3401                 return;
3402         }
3403
3404         /* If the command succeeded and there's still more commands in
3405          * this request the request is not yet complete.
3406          */
3407         if (!status && !hci_req_is_complete(hdev))
3408                 return;
3409
3410         /* If this was the last command in a request the complete
3411          * callback would be found in hdev->sent_cmd instead of the
3412          * command queue (hdev->cmd_q).
3413          */
3414         if (hdev->sent_cmd) {
3415                 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3416                 if (req_complete)
3417                         goto call_complete;
3418         }
3419
3420         /* Remove all pending commands belonging to this request */
3421         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3422         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3423                 if (bt_cb(skb)->req.start) {
3424                         __skb_queue_head(&hdev->cmd_q, skb);
3425                         break;
3426                 }
3427
3428                 req_complete = bt_cb(skb)->req.complete;
3429                 kfree_skb(skb);
3430         }
3431         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3432
3433 call_complete:
3434         if (req_complete)
3435                 req_complete(hdev, status);
3436 }
3437
3438 static void hci_rx_work(struct work_struct *work)
3439 {
3440         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
3441         struct sk_buff *skb;
3442
3443         BT_DBG("%s", hdev->name);
3444
3445         while ((skb = skb_dequeue(&hdev->rx_q))) {
3446                 /* Send copy to monitor */
3447                 hci_send_to_monitor(hdev, skb);
3448
3449                 if (atomic_read(&hdev->promisc)) {
3450                         /* Send copy to the sockets */
3451                         hci_send_to_sock(hdev, skb);
3452                 }
3453
3454                 if (test_bit(HCI_RAW, &hdev->flags)) {
3455                         kfree_skb(skb);
3456                         continue;
3457                 }
3458
3459                 if (test_bit(HCI_INIT, &hdev->flags)) {
3460                         /* Don't process data packets in this states. */
3461                         switch (bt_cb(skb)->pkt_type) {
3462                         case HCI_ACLDATA_PKT:
3463                         case HCI_SCODATA_PKT:
3464                                 kfree_skb(skb);
3465                                 continue;
3466                         }
3467                 }
3468
3469                 /* Process frame */
3470                 switch (bt_cb(skb)->pkt_type) {
3471                 case HCI_EVENT_PKT:
3472                         BT_DBG("%s Event packet", hdev->name);
3473                         hci_event_packet(hdev, skb);
3474                         break;
3475
3476                 case HCI_ACLDATA_PKT:
3477                         BT_DBG("%s ACL data packet", hdev->name);
3478                         hci_acldata_packet(hdev, skb);
3479                         break;
3480
3481                 case HCI_SCODATA_PKT:
3482                         BT_DBG("%s SCO data packet", hdev->name);
3483                         hci_scodata_packet(hdev, skb);
3484                         break;
3485
3486                 default:
3487                         kfree_skb(skb);
3488                         break;
3489                 }
3490         }
3491 }
3492
3493 static void hci_cmd_work(struct work_struct *work)
3494 {
3495         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
3496         struct sk_buff *skb;
3497
3498         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3499                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
3500
3501         /* Send queued commands */
3502         if (atomic_read(&hdev->cmd_cnt)) {
3503                 skb = skb_dequeue(&hdev->cmd_q);
3504                 if (!skb)
3505                         return;
3506
3507                 kfree_skb(hdev->sent_cmd);
3508
3509                 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
3510                 if (hdev->sent_cmd) {
3511                         atomic_dec(&hdev->cmd_cnt);
3512                         hci_send_frame(skb);
3513                         if (test_bit(HCI_RESET, &hdev->flags))
3514                                 del_timer(&hdev->cmd_timer);
3515                         else
3516                                 mod_timer(&hdev->cmd_timer,
3517                                           jiffies + HCI_CMD_TIMEOUT);
3518                 } else {
3519                         skb_queue_head(&hdev->cmd_q, skb);
3520                         queue_work(hdev->workqueue, &hdev->cmd_work);
3521                 }
3522         }
3523 }
3524
3525 int hci_do_inquiry(struct hci_dev *hdev, u8 length)
3526 {
3527         /* General inquiry access code (GIAC) */
3528         u8 lap[3] = { 0x33, 0x8b, 0x9e };
3529         struct hci_cp_inquiry cp;
3530
3531         BT_DBG("%s", hdev->name);
3532
3533         if (test_bit(HCI_INQUIRY, &hdev->flags))
3534                 return -EINPROGRESS;
3535
3536         inquiry_cache_flush(hdev);
3537
3538         memset(&cp, 0, sizeof(cp));
3539         memcpy(&cp.lap, lap, sizeof(cp.lap));
3540         cp.length  = length;
3541
3542         return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
3543 }
3544
3545 int hci_cancel_inquiry(struct hci_dev *hdev)
3546 {
3547         BT_DBG("%s", hdev->name);
3548
3549         if (!test_bit(HCI_INQUIRY, &hdev->flags))
3550                 return -EALREADY;
3551
3552         return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3553 }
3554
3555 u8 bdaddr_to_le(u8 bdaddr_type)
3556 {
3557         switch (bdaddr_type) {
3558         case BDADDR_LE_PUBLIC:
3559                 return ADDR_LE_DEV_PUBLIC;
3560
3561         default:
3562                 /* Fallback to LE Random address type */
3563                 return ADDR_LE_DEV_RANDOM;
3564         }
3565 }