Bluetooth: Fix calling request callback more than once
[pandora-kernel.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30
31 #include <linux/rfkill.h>
32
33 #include <net/bluetooth/bluetooth.h>
34 #include <net/bluetooth/hci_core.h>
35
36 static void hci_rx_work(struct work_struct *work);
37 static void hci_cmd_work(struct work_struct *work);
38 static void hci_tx_work(struct work_struct *work);
39
40 /* HCI device list */
41 LIST_HEAD(hci_dev_list);
42 DEFINE_RWLOCK(hci_dev_list_lock);
43
44 /* HCI callback list */
45 LIST_HEAD(hci_cb_list);
46 DEFINE_RWLOCK(hci_cb_list_lock);
47
48 /* HCI ID Numbering */
49 static DEFINE_IDA(hci_index_ida);
50
51 /* ---- HCI notifications ---- */
52
53 static void hci_notify(struct hci_dev *hdev, int event)
54 {
55         hci_sock_dev_event(hdev, event);
56 }
57
58 /* ---- HCI requests ---- */
59
60 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
61 {
62         BT_DBG("%s result 0x%2.2x", hdev->name, result);
63
64         if (hdev->req_status == HCI_REQ_PEND) {
65                 hdev->req_result = result;
66                 hdev->req_status = HCI_REQ_DONE;
67                 wake_up_interruptible(&hdev->req_wait_q);
68         }
69 }
70
71 static void hci_req_cancel(struct hci_dev *hdev, int err)
72 {
73         BT_DBG("%s err 0x%2.2x", hdev->name, err);
74
75         if (hdev->req_status == HCI_REQ_PEND) {
76                 hdev->req_result = err;
77                 hdev->req_status = HCI_REQ_CANCELED;
78                 wake_up_interruptible(&hdev->req_wait_q);
79         }
80 }
81
82 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
83                                             u8 event)
84 {
85         struct hci_ev_cmd_complete *ev;
86         struct hci_event_hdr *hdr;
87         struct sk_buff *skb;
88
89         hci_dev_lock(hdev);
90
91         skb = hdev->recv_evt;
92         hdev->recv_evt = NULL;
93
94         hci_dev_unlock(hdev);
95
96         if (!skb)
97                 return ERR_PTR(-ENODATA);
98
99         if (skb->len < sizeof(*hdr)) {
100                 BT_ERR("Too short HCI event");
101                 goto failed;
102         }
103
104         hdr = (void *) skb->data;
105         skb_pull(skb, HCI_EVENT_HDR_SIZE);
106
107         if (event) {
108                 if (hdr->evt != event)
109                         goto failed;
110                 return skb;
111         }
112
113         if (hdr->evt != HCI_EV_CMD_COMPLETE) {
114                 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
115                 goto failed;
116         }
117
118         if (skb->len < sizeof(*ev)) {
119                 BT_ERR("Too short cmd_complete event");
120                 goto failed;
121         }
122
123         ev = (void *) skb->data;
124         skb_pull(skb, sizeof(*ev));
125
126         if (opcode == __le16_to_cpu(ev->opcode))
127                 return skb;
128
129         BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
130                __le16_to_cpu(ev->opcode));
131
132 failed:
133         kfree_skb(skb);
134         return ERR_PTR(-ENODATA);
135 }
136
137 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
138                                   const void *param, u8 event, u32 timeout)
139 {
140         DECLARE_WAITQUEUE(wait, current);
141         struct hci_request req;
142         int err = 0;
143
144         BT_DBG("%s", hdev->name);
145
146         hci_req_init(&req, hdev);
147
148         hci_req_add_ev(&req, opcode, plen, param, event);
149
150         hdev->req_status = HCI_REQ_PEND;
151
152         err = hci_req_run(&req, hci_req_sync_complete);
153         if (err < 0)
154                 return ERR_PTR(err);
155
156         add_wait_queue(&hdev->req_wait_q, &wait);
157         set_current_state(TASK_INTERRUPTIBLE);
158
159         schedule_timeout(timeout);
160
161         remove_wait_queue(&hdev->req_wait_q, &wait);
162
163         if (signal_pending(current))
164                 return ERR_PTR(-EINTR);
165
166         switch (hdev->req_status) {
167         case HCI_REQ_DONE:
168                 err = -bt_to_errno(hdev->req_result);
169                 break;
170
171         case HCI_REQ_CANCELED:
172                 err = -hdev->req_result;
173                 break;
174
175         default:
176                 err = -ETIMEDOUT;
177                 break;
178         }
179
180         hdev->req_status = hdev->req_result = 0;
181
182         BT_DBG("%s end: err %d", hdev->name, err);
183
184         if (err < 0)
185                 return ERR_PTR(err);
186
187         return hci_get_cmd_complete(hdev, opcode, event);
188 }
189 EXPORT_SYMBOL(__hci_cmd_sync_ev);
190
191 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
192                                const void *param, u32 timeout)
193 {
194         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
195 }
196 EXPORT_SYMBOL(__hci_cmd_sync);
197
198 /* Execute request and wait for completion. */
199 static int __hci_req_sync(struct hci_dev *hdev,
200                           void (*func)(struct hci_request *req,
201                                       unsigned long opt),
202                           unsigned long opt, __u32 timeout)
203 {
204         struct hci_request req;
205         DECLARE_WAITQUEUE(wait, current);
206         int err = 0;
207
208         BT_DBG("%s start", hdev->name);
209
210         hci_req_init(&req, hdev);
211
212         hdev->req_status = HCI_REQ_PEND;
213
214         func(&req, opt);
215
216         err = hci_req_run(&req, hci_req_sync_complete);
217         if (err < 0) {
218                 hdev->req_status = 0;
219
220                 /* ENODATA means the HCI request command queue is empty.
221                  * This can happen when a request with conditionals doesn't
222                  * trigger any commands to be sent. This is normal behavior
223                  * and should not trigger an error return.
224                  */
225                 if (err == -ENODATA)
226                         return 0;
227
228                 return err;
229         }
230
231         add_wait_queue(&hdev->req_wait_q, &wait);
232         set_current_state(TASK_INTERRUPTIBLE);
233
234         schedule_timeout(timeout);
235
236         remove_wait_queue(&hdev->req_wait_q, &wait);
237
238         if (signal_pending(current))
239                 return -EINTR;
240
241         switch (hdev->req_status) {
242         case HCI_REQ_DONE:
243                 err = -bt_to_errno(hdev->req_result);
244                 break;
245
246         case HCI_REQ_CANCELED:
247                 err = -hdev->req_result;
248                 break;
249
250         default:
251                 err = -ETIMEDOUT;
252                 break;
253         }
254
255         hdev->req_status = hdev->req_result = 0;
256
257         BT_DBG("%s end: err %d", hdev->name, err);
258
259         return err;
260 }
261
262 static int hci_req_sync(struct hci_dev *hdev,
263                         void (*req)(struct hci_request *req,
264                                     unsigned long opt),
265                         unsigned long opt, __u32 timeout)
266 {
267         int ret;
268
269         if (!test_bit(HCI_UP, &hdev->flags))
270                 return -ENETDOWN;
271
272         /* Serialize all requests */
273         hci_req_lock(hdev);
274         ret = __hci_req_sync(hdev, req, opt, timeout);
275         hci_req_unlock(hdev);
276
277         return ret;
278 }
279
280 static void hci_reset_req(struct hci_request *req, unsigned long opt)
281 {
282         BT_DBG("%s %ld", req->hdev->name, opt);
283
284         /* Reset device */
285         set_bit(HCI_RESET, &req->hdev->flags);
286         hci_req_add(req, HCI_OP_RESET, 0, NULL);
287 }
288
289 static void bredr_init(struct hci_request *req)
290 {
291         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
292
293         /* Read Local Supported Features */
294         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
295
296         /* Read Local Version */
297         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
298
299         /* Read BD Address */
300         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
301 }
302
303 static void amp_init(struct hci_request *req)
304 {
305         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
306
307         /* Read Local Version */
308         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
309
310         /* Read Local AMP Info */
311         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
312
313         /* Read Data Blk size */
314         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
315 }
316
317 static void hci_init1_req(struct hci_request *req, unsigned long opt)
318 {
319         struct hci_dev *hdev = req->hdev;
320
321         BT_DBG("%s %ld", hdev->name, opt);
322
323         /* Reset */
324         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
325                 hci_reset_req(req, 0);
326
327         switch (hdev->dev_type) {
328         case HCI_BREDR:
329                 bredr_init(req);
330                 break;
331
332         case HCI_AMP:
333                 amp_init(req);
334                 break;
335
336         default:
337                 BT_ERR("Unknown device type %d", hdev->dev_type);
338                 break;
339         }
340 }
341
342 static void bredr_setup(struct hci_request *req)
343 {
344         __le16 param;
345         __u8 flt_type;
346
347         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
348         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
349
350         /* Read Class of Device */
351         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
352
353         /* Read Local Name */
354         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
355
356         /* Read Voice Setting */
357         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
358
359         /* Clear Event Filters */
360         flt_type = HCI_FLT_CLEAR_ALL;
361         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
362
363         /* Connection accept timeout ~20 secs */
364         param = __constant_cpu_to_le16(0x7d00);
365         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
366
367         /* Read page scan parameters */
368         if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) {
369                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
370                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
371         }
372 }
373
374 static void le_setup(struct hci_request *req)
375 {
376         struct hci_dev *hdev = req->hdev;
377
378         /* Read LE Buffer Size */
379         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
380
381         /* Read LE Local Supported Features */
382         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
383
384         /* Read LE Advertising Channel TX Power */
385         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
386
387         /* Read LE White List Size */
388         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
389
390         /* Read LE Supported States */
391         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
392
393         /* LE-only controllers have LE implicitly enabled */
394         if (!lmp_bredr_capable(hdev))
395                 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
396 }
397
398 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
399 {
400         if (lmp_ext_inq_capable(hdev))
401                 return 0x02;
402
403         if (lmp_inq_rssi_capable(hdev))
404                 return 0x01;
405
406         if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
407             hdev->lmp_subver == 0x0757)
408                 return 0x01;
409
410         if (hdev->manufacturer == 15) {
411                 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
412                         return 0x01;
413                 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
414                         return 0x01;
415                 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
416                         return 0x01;
417         }
418
419         if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
420             hdev->lmp_subver == 0x1805)
421                 return 0x01;
422
423         return 0x00;
424 }
425
426 static void hci_setup_inquiry_mode(struct hci_request *req)
427 {
428         u8 mode;
429
430         mode = hci_get_inquiry_mode(req->hdev);
431
432         hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
433 }
434
435 static void hci_setup_event_mask(struct hci_request *req)
436 {
437         struct hci_dev *hdev = req->hdev;
438
439         /* The second byte is 0xff instead of 0x9f (two reserved bits
440          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
441          * command otherwise.
442          */
443         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
444
445         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
446          * any event mask for pre 1.2 devices.
447          */
448         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
449                 return;
450
451         if (lmp_bredr_capable(hdev)) {
452                 events[4] |= 0x01; /* Flow Specification Complete */
453                 events[4] |= 0x02; /* Inquiry Result with RSSI */
454                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
455                 events[5] |= 0x08; /* Synchronous Connection Complete */
456                 events[5] |= 0x10; /* Synchronous Connection Changed */
457         }
458
459         if (lmp_inq_rssi_capable(hdev))
460                 events[4] |= 0x02; /* Inquiry Result with RSSI */
461
462         if (lmp_sniffsubr_capable(hdev))
463                 events[5] |= 0x20; /* Sniff Subrating */
464
465         if (lmp_pause_enc_capable(hdev))
466                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
467
468         if (lmp_ext_inq_capable(hdev))
469                 events[5] |= 0x40; /* Extended Inquiry Result */
470
471         if (lmp_no_flush_capable(hdev))
472                 events[7] |= 0x01; /* Enhanced Flush Complete */
473
474         if (lmp_lsto_capable(hdev))
475                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
476
477         if (lmp_ssp_capable(hdev)) {
478                 events[6] |= 0x01;      /* IO Capability Request */
479                 events[6] |= 0x02;      /* IO Capability Response */
480                 events[6] |= 0x04;      /* User Confirmation Request */
481                 events[6] |= 0x08;      /* User Passkey Request */
482                 events[6] |= 0x10;      /* Remote OOB Data Request */
483                 events[6] |= 0x20;      /* Simple Pairing Complete */
484                 events[7] |= 0x04;      /* User Passkey Notification */
485                 events[7] |= 0x08;      /* Keypress Notification */
486                 events[7] |= 0x10;      /* Remote Host Supported
487                                          * Features Notification
488                                          */
489         }
490
491         if (lmp_le_capable(hdev))
492                 events[7] |= 0x20;      /* LE Meta-Event */
493
494         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
495
496         if (lmp_le_capable(hdev)) {
497                 memset(events, 0, sizeof(events));
498                 events[0] = 0x1f;
499                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
500                             sizeof(events), events);
501         }
502 }
503
504 static void hci_init2_req(struct hci_request *req, unsigned long opt)
505 {
506         struct hci_dev *hdev = req->hdev;
507
508         if (lmp_bredr_capable(hdev))
509                 bredr_setup(req);
510
511         if (lmp_le_capable(hdev))
512                 le_setup(req);
513
514         hci_setup_event_mask(req);
515
516         /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
517          * local supported commands HCI command.
518          */
519         if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
520                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
521
522         if (lmp_ssp_capable(hdev)) {
523                 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
524                         u8 mode = 0x01;
525                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
526                                     sizeof(mode), &mode);
527                 } else {
528                         struct hci_cp_write_eir cp;
529
530                         memset(hdev->eir, 0, sizeof(hdev->eir));
531                         memset(&cp, 0, sizeof(cp));
532
533                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
534                 }
535         }
536
537         if (lmp_inq_rssi_capable(hdev))
538                 hci_setup_inquiry_mode(req);
539
540         if (lmp_inq_tx_pwr_capable(hdev))
541                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
542
543         if (lmp_ext_feat_capable(hdev)) {
544                 struct hci_cp_read_local_ext_features cp;
545
546                 cp.page = 0x01;
547                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
548                             sizeof(cp), &cp);
549         }
550
551         if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
552                 u8 enable = 1;
553                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
554                             &enable);
555         }
556 }
557
558 static void hci_setup_link_policy(struct hci_request *req)
559 {
560         struct hci_dev *hdev = req->hdev;
561         struct hci_cp_write_def_link_policy cp;
562         u16 link_policy = 0;
563
564         if (lmp_rswitch_capable(hdev))
565                 link_policy |= HCI_LP_RSWITCH;
566         if (lmp_hold_capable(hdev))
567                 link_policy |= HCI_LP_HOLD;
568         if (lmp_sniff_capable(hdev))
569                 link_policy |= HCI_LP_SNIFF;
570         if (lmp_park_capable(hdev))
571                 link_policy |= HCI_LP_PARK;
572
573         cp.policy = cpu_to_le16(link_policy);
574         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
575 }
576
577 static void hci_set_le_support(struct hci_request *req)
578 {
579         struct hci_dev *hdev = req->hdev;
580         struct hci_cp_write_le_host_supported cp;
581
582         /* LE-only devices do not support explicit enablement */
583         if (!lmp_bredr_capable(hdev))
584                 return;
585
586         memset(&cp, 0, sizeof(cp));
587
588         if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
589                 cp.le = 0x01;
590                 cp.simul = lmp_le_br_capable(hdev);
591         }
592
593         if (cp.le != lmp_host_le_capable(hdev))
594                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
595                             &cp);
596 }
597
598 static void hci_init3_req(struct hci_request *req, unsigned long opt)
599 {
600         struct hci_dev *hdev = req->hdev;
601         u8 p;
602
603         /* Only send HCI_Delete_Stored_Link_Key if it is supported */
604         if (hdev->commands[6] & 0x80) {
605                 struct hci_cp_delete_stored_link_key cp;
606
607                 bacpy(&cp.bdaddr, BDADDR_ANY);
608                 cp.delete_all = 0x01;
609                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
610                             sizeof(cp), &cp);
611         }
612
613         if (hdev->commands[5] & 0x10)
614                 hci_setup_link_policy(req);
615
616         if (lmp_le_capable(hdev)) {
617                 hci_set_le_support(req);
618                 hci_update_ad(req);
619         }
620
621         /* Read features beyond page 1 if available */
622         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
623                 struct hci_cp_read_local_ext_features cp;
624
625                 cp.page = p;
626                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
627                             sizeof(cp), &cp);
628         }
629 }
630
631 static int __hci_init(struct hci_dev *hdev)
632 {
633         int err;
634
635         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
636         if (err < 0)
637                 return err;
638
639         /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
640          * BR/EDR/LE type controllers. AMP controllers only need the
641          * first stage init.
642          */
643         if (hdev->dev_type != HCI_BREDR)
644                 return 0;
645
646         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
647         if (err < 0)
648                 return err;
649
650         return __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
651 }
652
653 static void hci_scan_req(struct hci_request *req, unsigned long opt)
654 {
655         __u8 scan = opt;
656
657         BT_DBG("%s %x", req->hdev->name, scan);
658
659         /* Inquiry and Page scans */
660         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
661 }
662
663 static void hci_auth_req(struct hci_request *req, unsigned long opt)
664 {
665         __u8 auth = opt;
666
667         BT_DBG("%s %x", req->hdev->name, auth);
668
669         /* Authentication */
670         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
671 }
672
673 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
674 {
675         __u8 encrypt = opt;
676
677         BT_DBG("%s %x", req->hdev->name, encrypt);
678
679         /* Encryption */
680         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
681 }
682
683 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
684 {
685         __le16 policy = cpu_to_le16(opt);
686
687         BT_DBG("%s %x", req->hdev->name, policy);
688
689         /* Default link policy */
690         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
691 }
692
693 /* Get HCI device by index.
694  * Device is held on return. */
695 struct hci_dev *hci_dev_get(int index)
696 {
697         struct hci_dev *hdev = NULL, *d;
698
699         BT_DBG("%d", index);
700
701         if (index < 0)
702                 return NULL;
703
704         read_lock(&hci_dev_list_lock);
705         list_for_each_entry(d, &hci_dev_list, list) {
706                 if (d->id == index) {
707                         hdev = hci_dev_hold(d);
708                         break;
709                 }
710         }
711         read_unlock(&hci_dev_list_lock);
712         return hdev;
713 }
714
715 /* ---- Inquiry support ---- */
716
717 bool hci_discovery_active(struct hci_dev *hdev)
718 {
719         struct discovery_state *discov = &hdev->discovery;
720
721         switch (discov->state) {
722         case DISCOVERY_FINDING:
723         case DISCOVERY_RESOLVING:
724                 return true;
725
726         default:
727                 return false;
728         }
729 }
730
731 void hci_discovery_set_state(struct hci_dev *hdev, int state)
732 {
733         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
734
735         if (hdev->discovery.state == state)
736                 return;
737
738         switch (state) {
739         case DISCOVERY_STOPPED:
740                 if (hdev->discovery.state != DISCOVERY_STARTING)
741                         mgmt_discovering(hdev, 0);
742                 break;
743         case DISCOVERY_STARTING:
744                 break;
745         case DISCOVERY_FINDING:
746                 mgmt_discovering(hdev, 1);
747                 break;
748         case DISCOVERY_RESOLVING:
749                 break;
750         case DISCOVERY_STOPPING:
751                 break;
752         }
753
754         hdev->discovery.state = state;
755 }
756
757 static void inquiry_cache_flush(struct hci_dev *hdev)
758 {
759         struct discovery_state *cache = &hdev->discovery;
760         struct inquiry_entry *p, *n;
761
762         list_for_each_entry_safe(p, n, &cache->all, all) {
763                 list_del(&p->all);
764                 kfree(p);
765         }
766
767         INIT_LIST_HEAD(&cache->unknown);
768         INIT_LIST_HEAD(&cache->resolve);
769 }
770
771 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
772                                                bdaddr_t *bdaddr)
773 {
774         struct discovery_state *cache = &hdev->discovery;
775         struct inquiry_entry *e;
776
777         BT_DBG("cache %p, %pMR", cache, bdaddr);
778
779         list_for_each_entry(e, &cache->all, all) {
780                 if (!bacmp(&e->data.bdaddr, bdaddr))
781                         return e;
782         }
783
784         return NULL;
785 }
786
787 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
788                                                        bdaddr_t *bdaddr)
789 {
790         struct discovery_state *cache = &hdev->discovery;
791         struct inquiry_entry *e;
792
793         BT_DBG("cache %p, %pMR", cache, bdaddr);
794
795         list_for_each_entry(e, &cache->unknown, list) {
796                 if (!bacmp(&e->data.bdaddr, bdaddr))
797                         return e;
798         }
799
800         return NULL;
801 }
802
803 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
804                                                        bdaddr_t *bdaddr,
805                                                        int state)
806 {
807         struct discovery_state *cache = &hdev->discovery;
808         struct inquiry_entry *e;
809
810         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
811
812         list_for_each_entry(e, &cache->resolve, list) {
813                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
814                         return e;
815                 if (!bacmp(&e->data.bdaddr, bdaddr))
816                         return e;
817         }
818
819         return NULL;
820 }
821
822 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
823                                       struct inquiry_entry *ie)
824 {
825         struct discovery_state *cache = &hdev->discovery;
826         struct list_head *pos = &cache->resolve;
827         struct inquiry_entry *p;
828
829         list_del(&ie->list);
830
831         list_for_each_entry(p, &cache->resolve, list) {
832                 if (p->name_state != NAME_PENDING &&
833                     abs(p->data.rssi) >= abs(ie->data.rssi))
834                         break;
835                 pos = &p->list;
836         }
837
838         list_add(&ie->list, pos);
839 }
840
841 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
842                               bool name_known, bool *ssp)
843 {
844         struct discovery_state *cache = &hdev->discovery;
845         struct inquiry_entry *ie;
846
847         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
848
849         hci_remove_remote_oob_data(hdev, &data->bdaddr);
850
851         if (ssp)
852                 *ssp = data->ssp_mode;
853
854         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
855         if (ie) {
856                 if (ie->data.ssp_mode && ssp)
857                         *ssp = true;
858
859                 if (ie->name_state == NAME_NEEDED &&
860                     data->rssi != ie->data.rssi) {
861                         ie->data.rssi = data->rssi;
862                         hci_inquiry_cache_update_resolve(hdev, ie);
863                 }
864
865                 goto update;
866         }
867
868         /* Entry not in the cache. Add new one. */
869         ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
870         if (!ie)
871                 return false;
872
873         list_add(&ie->all, &cache->all);
874
875         if (name_known) {
876                 ie->name_state = NAME_KNOWN;
877         } else {
878                 ie->name_state = NAME_NOT_KNOWN;
879                 list_add(&ie->list, &cache->unknown);
880         }
881
882 update:
883         if (name_known && ie->name_state != NAME_KNOWN &&
884             ie->name_state != NAME_PENDING) {
885                 ie->name_state = NAME_KNOWN;
886                 list_del(&ie->list);
887         }
888
889         memcpy(&ie->data, data, sizeof(*data));
890         ie->timestamp = jiffies;
891         cache->timestamp = jiffies;
892
893         if (ie->name_state == NAME_NOT_KNOWN)
894                 return false;
895
896         return true;
897 }
898
899 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
900 {
901         struct discovery_state *cache = &hdev->discovery;
902         struct inquiry_info *info = (struct inquiry_info *) buf;
903         struct inquiry_entry *e;
904         int copied = 0;
905
906         list_for_each_entry(e, &cache->all, all) {
907                 struct inquiry_data *data = &e->data;
908
909                 if (copied >= num)
910                         break;
911
912                 bacpy(&info->bdaddr, &data->bdaddr);
913                 info->pscan_rep_mode    = data->pscan_rep_mode;
914                 info->pscan_period_mode = data->pscan_period_mode;
915                 info->pscan_mode        = data->pscan_mode;
916                 memcpy(info->dev_class, data->dev_class, 3);
917                 info->clock_offset      = data->clock_offset;
918
919                 info++;
920                 copied++;
921         }
922
923         BT_DBG("cache %p, copied %d", cache, copied);
924         return copied;
925 }
926
927 static void hci_inq_req(struct hci_request *req, unsigned long opt)
928 {
929         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
930         struct hci_dev *hdev = req->hdev;
931         struct hci_cp_inquiry cp;
932
933         BT_DBG("%s", hdev->name);
934
935         if (test_bit(HCI_INQUIRY, &hdev->flags))
936                 return;
937
938         /* Start Inquiry */
939         memcpy(&cp.lap, &ir->lap, 3);
940         cp.length  = ir->length;
941         cp.num_rsp = ir->num_rsp;
942         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
943 }
944
945 static int wait_inquiry(void *word)
946 {
947         schedule();
948         return signal_pending(current);
949 }
950
951 int hci_inquiry(void __user *arg)
952 {
953         __u8 __user *ptr = arg;
954         struct hci_inquiry_req ir;
955         struct hci_dev *hdev;
956         int err = 0, do_inquiry = 0, max_rsp;
957         long timeo;
958         __u8 *buf;
959
960         if (copy_from_user(&ir, ptr, sizeof(ir)))
961                 return -EFAULT;
962
963         hdev = hci_dev_get(ir.dev_id);
964         if (!hdev)
965                 return -ENODEV;
966
967         hci_dev_lock(hdev);
968         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
969             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
970                 inquiry_cache_flush(hdev);
971                 do_inquiry = 1;
972         }
973         hci_dev_unlock(hdev);
974
975         timeo = ir.length * msecs_to_jiffies(2000);
976
977         if (do_inquiry) {
978                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
979                                    timeo);
980                 if (err < 0)
981                         goto done;
982
983                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
984                  * cleared). If it is interrupted by a signal, return -EINTR.
985                  */
986                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
987                                 TASK_INTERRUPTIBLE))
988                         return -EINTR;
989         }
990
991         /* for unlimited number of responses we will use buffer with
992          * 255 entries
993          */
994         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
995
996         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
997          * copy it to the user space.
998          */
999         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
1000         if (!buf) {
1001                 err = -ENOMEM;
1002                 goto done;
1003         }
1004
1005         hci_dev_lock(hdev);
1006         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1007         hci_dev_unlock(hdev);
1008
1009         BT_DBG("num_rsp %d", ir.num_rsp);
1010
1011         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1012                 ptr += sizeof(ir);
1013                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1014                                  ir.num_rsp))
1015                         err = -EFAULT;
1016         } else
1017                 err = -EFAULT;
1018
1019         kfree(buf);
1020
1021 done:
1022         hci_dev_put(hdev);
1023         return err;
1024 }
1025
1026 static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
1027 {
1028         u8 ad_len = 0, flags = 0;
1029         size_t name_len;
1030
1031         if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1032                 flags |= LE_AD_GENERAL;
1033
1034         if (!lmp_bredr_capable(hdev))
1035                 flags |= LE_AD_NO_BREDR;
1036
1037         if (lmp_le_br_capable(hdev))
1038                 flags |= LE_AD_SIM_LE_BREDR_CTRL;
1039
1040         if (lmp_host_le_br_capable(hdev))
1041                 flags |= LE_AD_SIM_LE_BREDR_HOST;
1042
1043         if (flags) {
1044                 BT_DBG("adv flags 0x%02x", flags);
1045
1046                 ptr[0] = 2;
1047                 ptr[1] = EIR_FLAGS;
1048                 ptr[2] = flags;
1049
1050                 ad_len += 3;
1051                 ptr += 3;
1052         }
1053
1054         if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
1055                 ptr[0] = 2;
1056                 ptr[1] = EIR_TX_POWER;
1057                 ptr[2] = (u8) hdev->adv_tx_power;
1058
1059                 ad_len += 3;
1060                 ptr += 3;
1061         }
1062
1063         name_len = strlen(hdev->dev_name);
1064         if (name_len > 0) {
1065                 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
1066
1067                 if (name_len > max_len) {
1068                         name_len = max_len;
1069                         ptr[1] = EIR_NAME_SHORT;
1070                 } else
1071                         ptr[1] = EIR_NAME_COMPLETE;
1072
1073                 ptr[0] = name_len + 1;
1074
1075                 memcpy(ptr + 2, hdev->dev_name, name_len);
1076
1077                 ad_len += (name_len + 2);
1078                 ptr += (name_len + 2);
1079         }
1080
1081         return ad_len;
1082 }
1083
1084 void hci_update_ad(struct hci_request *req)
1085 {
1086         struct hci_dev *hdev = req->hdev;
1087         struct hci_cp_le_set_adv_data cp;
1088         u8 len;
1089
1090         if (!lmp_le_capable(hdev))
1091                 return;
1092
1093         memset(&cp, 0, sizeof(cp));
1094
1095         len = create_ad(hdev, cp.data);
1096
1097         if (hdev->adv_data_len == len &&
1098             memcmp(cp.data, hdev->adv_data, len) == 0)
1099                 return;
1100
1101         memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1102         hdev->adv_data_len = len;
1103
1104         cp.length = len;
1105
1106         hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1107 }
1108
1109 /* ---- HCI ioctl helpers ---- */
1110
1111 int hci_dev_open(__u16 dev)
1112 {
1113         struct hci_dev *hdev;
1114         int ret = 0;
1115
1116         hdev = hci_dev_get(dev);
1117         if (!hdev)
1118                 return -ENODEV;
1119
1120         BT_DBG("%s %p", hdev->name, hdev);
1121
1122         hci_req_lock(hdev);
1123
1124         if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1125                 ret = -ENODEV;
1126                 goto done;
1127         }
1128
1129         if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
1130                 ret = -ERFKILL;
1131                 goto done;
1132         }
1133
1134         if (test_bit(HCI_UP, &hdev->flags)) {
1135                 ret = -EALREADY;
1136                 goto done;
1137         }
1138
1139         if (hdev->open(hdev)) {
1140                 ret = -EIO;
1141                 goto done;
1142         }
1143
1144         atomic_set(&hdev->cmd_cnt, 1);
1145         set_bit(HCI_INIT, &hdev->flags);
1146
1147         if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1148                 ret = hdev->setup(hdev);
1149
1150         if (!ret) {
1151                 /* Treat all non BR/EDR controllers as raw devices if
1152                  * enable_hs is not set.
1153                  */
1154                 if (hdev->dev_type != HCI_BREDR && !enable_hs)
1155                         set_bit(HCI_RAW, &hdev->flags);
1156
1157                 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1158                         set_bit(HCI_RAW, &hdev->flags);
1159
1160                 if (!test_bit(HCI_RAW, &hdev->flags))
1161                         ret = __hci_init(hdev);
1162         }
1163
1164         clear_bit(HCI_INIT, &hdev->flags);
1165
1166         if (!ret) {
1167                 hci_dev_hold(hdev);
1168                 set_bit(HCI_UP, &hdev->flags);
1169                 hci_notify(hdev, HCI_DEV_UP);
1170                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1171                     mgmt_valid_hdev(hdev)) {
1172                         hci_dev_lock(hdev);
1173                         mgmt_powered(hdev, 1);
1174                         hci_dev_unlock(hdev);
1175                 }
1176         } else {
1177                 /* Init failed, cleanup */
1178                 flush_work(&hdev->tx_work);
1179                 flush_work(&hdev->cmd_work);
1180                 flush_work(&hdev->rx_work);
1181
1182                 skb_queue_purge(&hdev->cmd_q);
1183                 skb_queue_purge(&hdev->rx_q);
1184
1185                 if (hdev->flush)
1186                         hdev->flush(hdev);
1187
1188                 if (hdev->sent_cmd) {
1189                         kfree_skb(hdev->sent_cmd);
1190                         hdev->sent_cmd = NULL;
1191                 }
1192
1193                 hdev->close(hdev);
1194                 hdev->flags = 0;
1195         }
1196
1197 done:
1198         hci_req_unlock(hdev);
1199         hci_dev_put(hdev);
1200         return ret;
1201 }
1202
1203 static int hci_dev_do_close(struct hci_dev *hdev)
1204 {
1205         BT_DBG("%s %p", hdev->name, hdev);
1206
1207         cancel_work_sync(&hdev->le_scan);
1208
1209         cancel_delayed_work(&hdev->power_off);
1210
1211         hci_req_cancel(hdev, ENODEV);
1212         hci_req_lock(hdev);
1213
1214         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1215                 del_timer_sync(&hdev->cmd_timer);
1216                 hci_req_unlock(hdev);
1217                 return 0;
1218         }
1219
1220         /* Flush RX and TX works */
1221         flush_work(&hdev->tx_work);
1222         flush_work(&hdev->rx_work);
1223
1224         if (hdev->discov_timeout > 0) {
1225                 cancel_delayed_work(&hdev->discov_off);
1226                 hdev->discov_timeout = 0;
1227                 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1228         }
1229
1230         if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1231                 cancel_delayed_work(&hdev->service_cache);
1232
1233         cancel_delayed_work_sync(&hdev->le_scan_disable);
1234
1235         hci_dev_lock(hdev);
1236         inquiry_cache_flush(hdev);
1237         hci_conn_hash_flush(hdev);
1238         hci_dev_unlock(hdev);
1239
1240         hci_notify(hdev, HCI_DEV_DOWN);
1241
1242         if (hdev->flush)
1243                 hdev->flush(hdev);
1244
1245         /* Reset device */
1246         skb_queue_purge(&hdev->cmd_q);
1247         atomic_set(&hdev->cmd_cnt, 1);
1248         if (!test_bit(HCI_RAW, &hdev->flags) &&
1249             test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1250                 set_bit(HCI_INIT, &hdev->flags);
1251                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1252                 clear_bit(HCI_INIT, &hdev->flags);
1253         }
1254
1255         /* flush cmd  work */
1256         flush_work(&hdev->cmd_work);
1257
1258         /* Drop queues */
1259         skb_queue_purge(&hdev->rx_q);
1260         skb_queue_purge(&hdev->cmd_q);
1261         skb_queue_purge(&hdev->raw_q);
1262
1263         /* Drop last sent command */
1264         if (hdev->sent_cmd) {
1265                 del_timer_sync(&hdev->cmd_timer);
1266                 kfree_skb(hdev->sent_cmd);
1267                 hdev->sent_cmd = NULL;
1268         }
1269
1270         kfree_skb(hdev->recv_evt);
1271         hdev->recv_evt = NULL;
1272
1273         /* After this point our queues are empty
1274          * and no tasks are scheduled. */
1275         hdev->close(hdev);
1276
1277         /* Clear flags */
1278         hdev->flags = 0;
1279         hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1280
1281         if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1282             mgmt_valid_hdev(hdev)) {
1283                 hci_dev_lock(hdev);
1284                 mgmt_powered(hdev, 0);
1285                 hci_dev_unlock(hdev);
1286         }
1287
1288         /* Controller radio is available but is currently powered down */
1289         hdev->amp_status = 0;
1290
1291         memset(hdev->eir, 0, sizeof(hdev->eir));
1292         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1293
1294         hci_req_unlock(hdev);
1295
1296         hci_dev_put(hdev);
1297         return 0;
1298 }
1299
1300 int hci_dev_close(__u16 dev)
1301 {
1302         struct hci_dev *hdev;
1303         int err;
1304
1305         hdev = hci_dev_get(dev);
1306         if (!hdev)
1307                 return -ENODEV;
1308
1309         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1310                 cancel_delayed_work(&hdev->power_off);
1311
1312         err = hci_dev_do_close(hdev);
1313
1314         hci_dev_put(hdev);
1315         return err;
1316 }
1317
1318 int hci_dev_reset(__u16 dev)
1319 {
1320         struct hci_dev *hdev;
1321         int ret = 0;
1322
1323         hdev = hci_dev_get(dev);
1324         if (!hdev)
1325                 return -ENODEV;
1326
1327         hci_req_lock(hdev);
1328
1329         if (!test_bit(HCI_UP, &hdev->flags))
1330                 goto done;
1331
1332         /* Drop queues */
1333         skb_queue_purge(&hdev->rx_q);
1334         skb_queue_purge(&hdev->cmd_q);
1335
1336         hci_dev_lock(hdev);
1337         inquiry_cache_flush(hdev);
1338         hci_conn_hash_flush(hdev);
1339         hci_dev_unlock(hdev);
1340
1341         if (hdev->flush)
1342                 hdev->flush(hdev);
1343
1344         atomic_set(&hdev->cmd_cnt, 1);
1345         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1346
1347         if (!test_bit(HCI_RAW, &hdev->flags))
1348                 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1349
1350 done:
1351         hci_req_unlock(hdev);
1352         hci_dev_put(hdev);
1353         return ret;
1354 }
1355
1356 int hci_dev_reset_stat(__u16 dev)
1357 {
1358         struct hci_dev *hdev;
1359         int ret = 0;
1360
1361         hdev = hci_dev_get(dev);
1362         if (!hdev)
1363                 return -ENODEV;
1364
1365         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1366
1367         hci_dev_put(hdev);
1368
1369         return ret;
1370 }
1371
1372 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1373 {
1374         struct hci_dev *hdev;
1375         struct hci_dev_req dr;
1376         int err = 0;
1377
1378         if (copy_from_user(&dr, arg, sizeof(dr)))
1379                 return -EFAULT;
1380
1381         hdev = hci_dev_get(dr.dev_id);
1382         if (!hdev)
1383                 return -ENODEV;
1384
1385         switch (cmd) {
1386         case HCISETAUTH:
1387                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1388                                    HCI_INIT_TIMEOUT);
1389                 break;
1390
1391         case HCISETENCRYPT:
1392                 if (!lmp_encrypt_capable(hdev)) {
1393                         err = -EOPNOTSUPP;
1394                         break;
1395                 }
1396
1397                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1398                         /* Auth must be enabled first */
1399                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1400                                            HCI_INIT_TIMEOUT);
1401                         if (err)
1402                                 break;
1403                 }
1404
1405                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1406                                    HCI_INIT_TIMEOUT);
1407                 break;
1408
1409         case HCISETSCAN:
1410                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1411                                    HCI_INIT_TIMEOUT);
1412                 break;
1413
1414         case HCISETLINKPOL:
1415                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1416                                    HCI_INIT_TIMEOUT);
1417                 break;
1418
1419         case HCISETLINKMODE:
1420                 hdev->link_mode = ((__u16) dr.dev_opt) &
1421                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
1422                 break;
1423
1424         case HCISETPTYPE:
1425                 hdev->pkt_type = (__u16) dr.dev_opt;
1426                 break;
1427
1428         case HCISETACLMTU:
1429                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
1430                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1431                 break;
1432
1433         case HCISETSCOMTU:
1434                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
1435                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1436                 break;
1437
1438         default:
1439                 err = -EINVAL;
1440                 break;
1441         }
1442
1443         hci_dev_put(hdev);
1444         return err;
1445 }
1446
1447 int hci_get_dev_list(void __user *arg)
1448 {
1449         struct hci_dev *hdev;
1450         struct hci_dev_list_req *dl;
1451         struct hci_dev_req *dr;
1452         int n = 0, size, err;
1453         __u16 dev_num;
1454
1455         if (get_user(dev_num, (__u16 __user *) arg))
1456                 return -EFAULT;
1457
1458         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1459                 return -EINVAL;
1460
1461         size = sizeof(*dl) + dev_num * sizeof(*dr);
1462
1463         dl = kzalloc(size, GFP_KERNEL);
1464         if (!dl)
1465                 return -ENOMEM;
1466
1467         dr = dl->dev_req;
1468
1469         read_lock(&hci_dev_list_lock);
1470         list_for_each_entry(hdev, &hci_dev_list, list) {
1471                 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1472                         cancel_delayed_work(&hdev->power_off);
1473
1474                 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1475                         set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1476
1477                 (dr + n)->dev_id  = hdev->id;
1478                 (dr + n)->dev_opt = hdev->flags;
1479
1480                 if (++n >= dev_num)
1481                         break;
1482         }
1483         read_unlock(&hci_dev_list_lock);
1484
1485         dl->dev_num = n;
1486         size = sizeof(*dl) + n * sizeof(*dr);
1487
1488         err = copy_to_user(arg, dl, size);
1489         kfree(dl);
1490
1491         return err ? -EFAULT : 0;
1492 }
1493
1494 int hci_get_dev_info(void __user *arg)
1495 {
1496         struct hci_dev *hdev;
1497         struct hci_dev_info di;
1498         int err = 0;
1499
1500         if (copy_from_user(&di, arg, sizeof(di)))
1501                 return -EFAULT;
1502
1503         hdev = hci_dev_get(di.dev_id);
1504         if (!hdev)
1505                 return -ENODEV;
1506
1507         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1508                 cancel_delayed_work_sync(&hdev->power_off);
1509
1510         if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1511                 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1512
1513         strcpy(di.name, hdev->name);
1514         di.bdaddr   = hdev->bdaddr;
1515         di.type     = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1516         di.flags    = hdev->flags;
1517         di.pkt_type = hdev->pkt_type;
1518         if (lmp_bredr_capable(hdev)) {
1519                 di.acl_mtu  = hdev->acl_mtu;
1520                 di.acl_pkts = hdev->acl_pkts;
1521                 di.sco_mtu  = hdev->sco_mtu;
1522                 di.sco_pkts = hdev->sco_pkts;
1523         } else {
1524                 di.acl_mtu  = hdev->le_mtu;
1525                 di.acl_pkts = hdev->le_pkts;
1526                 di.sco_mtu  = 0;
1527                 di.sco_pkts = 0;
1528         }
1529         di.link_policy = hdev->link_policy;
1530         di.link_mode   = hdev->link_mode;
1531
1532         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1533         memcpy(&di.features, &hdev->features, sizeof(di.features));
1534
1535         if (copy_to_user(arg, &di, sizeof(di)))
1536                 err = -EFAULT;
1537
1538         hci_dev_put(hdev);
1539
1540         return err;
1541 }
1542
1543 /* ---- Interface to HCI drivers ---- */
1544
1545 static int hci_rfkill_set_block(void *data, bool blocked)
1546 {
1547         struct hci_dev *hdev = data;
1548
1549         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1550
1551         if (!blocked)
1552                 return 0;
1553
1554         hci_dev_do_close(hdev);
1555
1556         return 0;
1557 }
1558
1559 static const struct rfkill_ops hci_rfkill_ops = {
1560         .set_block = hci_rfkill_set_block,
1561 };
1562
1563 static void hci_power_on(struct work_struct *work)
1564 {
1565         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1566         int err;
1567
1568         BT_DBG("%s", hdev->name);
1569
1570         err = hci_dev_open(hdev->id);
1571         if (err < 0) {
1572                 mgmt_set_powered_failed(hdev, err);
1573                 return;
1574         }
1575
1576         if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1577                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1578                                    HCI_AUTO_OFF_TIMEOUT);
1579
1580         if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1581                 mgmt_index_added(hdev);
1582 }
1583
1584 static void hci_power_off(struct work_struct *work)
1585 {
1586         struct hci_dev *hdev = container_of(work, struct hci_dev,
1587                                             power_off.work);
1588
1589         BT_DBG("%s", hdev->name);
1590
1591         hci_dev_do_close(hdev);
1592 }
1593
1594 static void hci_discov_off(struct work_struct *work)
1595 {
1596         struct hci_dev *hdev;
1597         u8 scan = SCAN_PAGE;
1598
1599         hdev = container_of(work, struct hci_dev, discov_off.work);
1600
1601         BT_DBG("%s", hdev->name);
1602
1603         hci_dev_lock(hdev);
1604
1605         hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1606
1607         hdev->discov_timeout = 0;
1608
1609         hci_dev_unlock(hdev);
1610 }
1611
1612 int hci_uuids_clear(struct hci_dev *hdev)
1613 {
1614         struct bt_uuid *uuid, *tmp;
1615
1616         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1617                 list_del(&uuid->list);
1618                 kfree(uuid);
1619         }
1620
1621         return 0;
1622 }
1623
1624 int hci_link_keys_clear(struct hci_dev *hdev)
1625 {
1626         struct list_head *p, *n;
1627
1628         list_for_each_safe(p, n, &hdev->link_keys) {
1629                 struct link_key *key;
1630
1631                 key = list_entry(p, struct link_key, list);
1632
1633                 list_del(p);
1634                 kfree(key);
1635         }
1636
1637         return 0;
1638 }
1639
1640 int hci_smp_ltks_clear(struct hci_dev *hdev)
1641 {
1642         struct smp_ltk *k, *tmp;
1643
1644         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1645                 list_del(&k->list);
1646                 kfree(k);
1647         }
1648
1649         return 0;
1650 }
1651
1652 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1653 {
1654         struct link_key *k;
1655
1656         list_for_each_entry(k, &hdev->link_keys, list)
1657                 if (bacmp(bdaddr, &k->bdaddr) == 0)
1658                         return k;
1659
1660         return NULL;
1661 }
1662
1663 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1664                                u8 key_type, u8 old_key_type)
1665 {
1666         /* Legacy key */
1667         if (key_type < 0x03)
1668                 return true;
1669
1670         /* Debug keys are insecure so don't store them persistently */
1671         if (key_type == HCI_LK_DEBUG_COMBINATION)
1672                 return false;
1673
1674         /* Changed combination key and there's no previous one */
1675         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1676                 return false;
1677
1678         /* Security mode 3 case */
1679         if (!conn)
1680                 return true;
1681
1682         /* Neither local nor remote side had no-bonding as requirement */
1683         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1684                 return true;
1685
1686         /* Local side had dedicated bonding as requirement */
1687         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1688                 return true;
1689
1690         /* Remote side had dedicated bonding as requirement */
1691         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1692                 return true;
1693
1694         /* If none of the above criteria match, then don't store the key
1695          * persistently */
1696         return false;
1697 }
1698
1699 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1700 {
1701         struct smp_ltk *k;
1702
1703         list_for_each_entry(k, &hdev->long_term_keys, list) {
1704                 if (k->ediv != ediv ||
1705                     memcmp(rand, k->rand, sizeof(k->rand)))
1706                         continue;
1707
1708                 return k;
1709         }
1710
1711         return NULL;
1712 }
1713
1714 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1715                                      u8 addr_type)
1716 {
1717         struct smp_ltk *k;
1718
1719         list_for_each_entry(k, &hdev->long_term_keys, list)
1720                 if (addr_type == k->bdaddr_type &&
1721                     bacmp(bdaddr, &k->bdaddr) == 0)
1722                         return k;
1723
1724         return NULL;
1725 }
1726
1727 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1728                      bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1729 {
1730         struct link_key *key, *old_key;
1731         u8 old_key_type;
1732         bool persistent;
1733
1734         old_key = hci_find_link_key(hdev, bdaddr);
1735         if (old_key) {
1736                 old_key_type = old_key->type;
1737                 key = old_key;
1738         } else {
1739                 old_key_type = conn ? conn->key_type : 0xff;
1740                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1741                 if (!key)
1742                         return -ENOMEM;
1743                 list_add(&key->list, &hdev->link_keys);
1744         }
1745
1746         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1747
1748         /* Some buggy controller combinations generate a changed
1749          * combination key for legacy pairing even when there's no
1750          * previous key */
1751         if (type == HCI_LK_CHANGED_COMBINATION &&
1752             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1753                 type = HCI_LK_COMBINATION;
1754                 if (conn)
1755                         conn->key_type = type;
1756         }
1757
1758         bacpy(&key->bdaddr, bdaddr);
1759         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1760         key->pin_len = pin_len;
1761
1762         if (type == HCI_LK_CHANGED_COMBINATION)
1763                 key->type = old_key_type;
1764         else
1765                 key->type = type;
1766
1767         if (!new_key)
1768                 return 0;
1769
1770         persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1771
1772         mgmt_new_link_key(hdev, key, persistent);
1773
1774         if (conn)
1775                 conn->flush_key = !persistent;
1776
1777         return 0;
1778 }
1779
1780 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1781                 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
1782                 ediv, u8 rand[8])
1783 {
1784         struct smp_ltk *key, *old_key;
1785
1786         if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1787                 return 0;
1788
1789         old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1790         if (old_key)
1791                 key = old_key;
1792         else {
1793                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1794                 if (!key)
1795                         return -ENOMEM;
1796                 list_add(&key->list, &hdev->long_term_keys);
1797         }
1798
1799         bacpy(&key->bdaddr, bdaddr);
1800         key->bdaddr_type = addr_type;
1801         memcpy(key->val, tk, sizeof(key->val));
1802         key->authenticated = authenticated;
1803         key->ediv = ediv;
1804         key->enc_size = enc_size;
1805         key->type = type;
1806         memcpy(key->rand, rand, sizeof(key->rand));
1807
1808         if (!new_key)
1809                 return 0;
1810
1811         if (type & HCI_SMP_LTK)
1812                 mgmt_new_ltk(hdev, key, 1);
1813
1814         return 0;
1815 }
1816
1817 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1818 {
1819         struct link_key *key;
1820
1821         key = hci_find_link_key(hdev, bdaddr);
1822         if (!key)
1823                 return -ENOENT;
1824
1825         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1826
1827         list_del(&key->list);
1828         kfree(key);
1829
1830         return 0;
1831 }
1832
1833 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1834 {
1835         struct smp_ltk *k, *tmp;
1836
1837         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1838                 if (bacmp(bdaddr, &k->bdaddr))
1839                         continue;
1840
1841                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1842
1843                 list_del(&k->list);
1844                 kfree(k);
1845         }
1846
1847         return 0;
1848 }
1849
1850 /* HCI command timer function */
1851 static void hci_cmd_timeout(unsigned long arg)
1852 {
1853         struct hci_dev *hdev = (void *) arg;
1854
1855         if (hdev->sent_cmd) {
1856                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1857                 u16 opcode = __le16_to_cpu(sent->opcode);
1858
1859                 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1860         } else {
1861                 BT_ERR("%s command tx timeout", hdev->name);
1862         }
1863
1864         atomic_set(&hdev->cmd_cnt, 1);
1865         queue_work(hdev->workqueue, &hdev->cmd_work);
1866 }
1867
1868 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1869                                           bdaddr_t *bdaddr)
1870 {
1871         struct oob_data *data;
1872
1873         list_for_each_entry(data, &hdev->remote_oob_data, list)
1874                 if (bacmp(bdaddr, &data->bdaddr) == 0)
1875                         return data;
1876
1877         return NULL;
1878 }
1879
1880 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1881 {
1882         struct oob_data *data;
1883
1884         data = hci_find_remote_oob_data(hdev, bdaddr);
1885         if (!data)
1886                 return -ENOENT;
1887
1888         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1889
1890         list_del(&data->list);
1891         kfree(data);
1892
1893         return 0;
1894 }
1895
1896 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1897 {
1898         struct oob_data *data, *n;
1899
1900         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1901                 list_del(&data->list);
1902                 kfree(data);
1903         }
1904
1905         return 0;
1906 }
1907
1908 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1909                             u8 *randomizer)
1910 {
1911         struct oob_data *data;
1912
1913         data = hci_find_remote_oob_data(hdev, bdaddr);
1914
1915         if (!data) {
1916                 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1917                 if (!data)
1918                         return -ENOMEM;
1919
1920                 bacpy(&data->bdaddr, bdaddr);
1921                 list_add(&data->list, &hdev->remote_oob_data);
1922         }
1923
1924         memcpy(data->hash, hash, sizeof(data->hash));
1925         memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1926
1927         BT_DBG("%s for %pMR", hdev->name, bdaddr);
1928
1929         return 0;
1930 }
1931
1932 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
1933 {
1934         struct bdaddr_list *b;
1935
1936         list_for_each_entry(b, &hdev->blacklist, list)
1937                 if (bacmp(bdaddr, &b->bdaddr) == 0)
1938                         return b;
1939
1940         return NULL;
1941 }
1942
1943 int hci_blacklist_clear(struct hci_dev *hdev)
1944 {
1945         struct list_head *p, *n;
1946
1947         list_for_each_safe(p, n, &hdev->blacklist) {
1948                 struct bdaddr_list *b;
1949
1950                 b = list_entry(p, struct bdaddr_list, list);
1951
1952                 list_del(p);
1953                 kfree(b);
1954         }
1955
1956         return 0;
1957 }
1958
1959 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1960 {
1961         struct bdaddr_list *entry;
1962
1963         if (bacmp(bdaddr, BDADDR_ANY) == 0)
1964                 return -EBADF;
1965
1966         if (hci_blacklist_lookup(hdev, bdaddr))
1967                 return -EEXIST;
1968
1969         entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1970         if (!entry)
1971                 return -ENOMEM;
1972
1973         bacpy(&entry->bdaddr, bdaddr);
1974
1975         list_add(&entry->list, &hdev->blacklist);
1976
1977         return mgmt_device_blocked(hdev, bdaddr, type);
1978 }
1979
1980 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1981 {
1982         struct bdaddr_list *entry;
1983
1984         if (bacmp(bdaddr, BDADDR_ANY) == 0)
1985                 return hci_blacklist_clear(hdev);
1986
1987         entry = hci_blacklist_lookup(hdev, bdaddr);
1988         if (!entry)
1989                 return -ENOENT;
1990
1991         list_del(&entry->list);
1992         kfree(entry);
1993
1994         return mgmt_device_unblocked(hdev, bdaddr, type);
1995 }
1996
1997 static void le_scan_param_req(struct hci_request *req, unsigned long opt)
1998 {
1999         struct le_scan_params *param =  (struct le_scan_params *) opt;
2000         struct hci_cp_le_set_scan_param cp;
2001
2002         memset(&cp, 0, sizeof(cp));
2003         cp.type = param->type;
2004         cp.interval = cpu_to_le16(param->interval);
2005         cp.window = cpu_to_le16(param->window);
2006
2007         hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
2008 }
2009
2010 static void le_scan_enable_req(struct hci_request *req, unsigned long opt)
2011 {
2012         struct hci_cp_le_set_scan_enable cp;
2013
2014         memset(&cp, 0, sizeof(cp));
2015         cp.enable = LE_SCAN_ENABLE;
2016         cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2017
2018         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2019 }
2020
2021 static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
2022                           u16 window, int timeout)
2023 {
2024         long timeo = msecs_to_jiffies(3000);
2025         struct le_scan_params param;
2026         int err;
2027
2028         BT_DBG("%s", hdev->name);
2029
2030         if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
2031                 return -EINPROGRESS;
2032
2033         param.type = type;
2034         param.interval = interval;
2035         param.window = window;
2036
2037         hci_req_lock(hdev);
2038
2039         err = __hci_req_sync(hdev, le_scan_param_req, (unsigned long) &param,
2040                              timeo);
2041         if (!err)
2042                 err = __hci_req_sync(hdev, le_scan_enable_req, 0, timeo);
2043
2044         hci_req_unlock(hdev);
2045
2046         if (err < 0)
2047                 return err;
2048
2049         queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
2050                            timeout);
2051
2052         return 0;
2053 }
2054
2055 int hci_cancel_le_scan(struct hci_dev *hdev)
2056 {
2057         BT_DBG("%s", hdev->name);
2058
2059         if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
2060                 return -EALREADY;
2061
2062         if (cancel_delayed_work(&hdev->le_scan_disable)) {
2063                 struct hci_cp_le_set_scan_enable cp;
2064
2065                 /* Send HCI command to disable LE Scan */
2066                 memset(&cp, 0, sizeof(cp));
2067                 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2068         }
2069
2070         return 0;
2071 }
2072
2073 static void le_scan_disable_work(struct work_struct *work)
2074 {
2075         struct hci_dev *hdev = container_of(work, struct hci_dev,
2076                                             le_scan_disable.work);
2077         struct hci_cp_le_set_scan_enable cp;
2078
2079         BT_DBG("%s", hdev->name);
2080
2081         memset(&cp, 0, sizeof(cp));
2082
2083         hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2084 }
2085
2086 static void le_scan_work(struct work_struct *work)
2087 {
2088         struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
2089         struct le_scan_params *param = &hdev->le_scan_params;
2090
2091         BT_DBG("%s", hdev->name);
2092
2093         hci_do_le_scan(hdev, param->type, param->interval, param->window,
2094                        param->timeout);
2095 }
2096
2097 int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
2098                 int timeout)
2099 {
2100         struct le_scan_params *param = &hdev->le_scan_params;
2101
2102         BT_DBG("%s", hdev->name);
2103
2104         if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
2105                 return -ENOTSUPP;
2106
2107         if (work_busy(&hdev->le_scan))
2108                 return -EINPROGRESS;
2109
2110         param->type = type;
2111         param->interval = interval;
2112         param->window = window;
2113         param->timeout = timeout;
2114
2115         queue_work(system_long_wq, &hdev->le_scan);
2116
2117         return 0;
2118 }
2119
2120 /* Alloc HCI device */
2121 struct hci_dev *hci_alloc_dev(void)
2122 {
2123         struct hci_dev *hdev;
2124
2125         hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2126         if (!hdev)
2127                 return NULL;
2128
2129         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2130         hdev->esco_type = (ESCO_HV1);
2131         hdev->link_mode = (HCI_LM_ACCEPT);
2132         hdev->io_capability = 0x03; /* No Input No Output */
2133         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2134         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2135
2136         hdev->sniff_max_interval = 800;
2137         hdev->sniff_min_interval = 80;
2138
2139         mutex_init(&hdev->lock);
2140         mutex_init(&hdev->req_lock);
2141
2142         INIT_LIST_HEAD(&hdev->mgmt_pending);
2143         INIT_LIST_HEAD(&hdev->blacklist);
2144         INIT_LIST_HEAD(&hdev->uuids);
2145         INIT_LIST_HEAD(&hdev->link_keys);
2146         INIT_LIST_HEAD(&hdev->long_term_keys);
2147         INIT_LIST_HEAD(&hdev->remote_oob_data);
2148         INIT_LIST_HEAD(&hdev->conn_hash.list);
2149
2150         INIT_WORK(&hdev->rx_work, hci_rx_work);
2151         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2152         INIT_WORK(&hdev->tx_work, hci_tx_work);
2153         INIT_WORK(&hdev->power_on, hci_power_on);
2154         INIT_WORK(&hdev->le_scan, le_scan_work);
2155
2156         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2157         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2158         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2159
2160         skb_queue_head_init(&hdev->rx_q);
2161         skb_queue_head_init(&hdev->cmd_q);
2162         skb_queue_head_init(&hdev->raw_q);
2163
2164         init_waitqueue_head(&hdev->req_wait_q);
2165
2166         setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
2167
2168         hci_init_sysfs(hdev);
2169         discovery_init(hdev);
2170
2171         return hdev;
2172 }
2173 EXPORT_SYMBOL(hci_alloc_dev);
2174
2175 /* Free HCI device */
2176 void hci_free_dev(struct hci_dev *hdev)
2177 {
2178         /* will free via device release */
2179         put_device(&hdev->dev);
2180 }
2181 EXPORT_SYMBOL(hci_free_dev);
2182
2183 /* Register HCI device */
2184 int hci_register_dev(struct hci_dev *hdev)
2185 {
2186         int id, error;
2187
2188         if (!hdev->open || !hdev->close)
2189                 return -EINVAL;
2190
2191         /* Do not allow HCI_AMP devices to register at index 0,
2192          * so the index can be used as the AMP controller ID.
2193          */
2194         switch (hdev->dev_type) {
2195         case HCI_BREDR:
2196                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2197                 break;
2198         case HCI_AMP:
2199                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2200                 break;
2201         default:
2202                 return -EINVAL;
2203         }
2204
2205         if (id < 0)
2206                 return id;
2207
2208         sprintf(hdev->name, "hci%d", id);
2209         hdev->id = id;
2210
2211         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2212
2213         hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
2214                                           WQ_MEM_RECLAIM, 1);
2215         if (!hdev->workqueue) {
2216                 error = -ENOMEM;
2217                 goto err;
2218         }
2219
2220         hdev->req_workqueue = alloc_workqueue(hdev->name,
2221                                               WQ_HIGHPRI | WQ_UNBOUND |
2222                                               WQ_MEM_RECLAIM, 1);
2223         if (!hdev->req_workqueue) {
2224                 destroy_workqueue(hdev->workqueue);
2225                 error = -ENOMEM;
2226                 goto err;
2227         }
2228
2229         error = hci_add_sysfs(hdev);
2230         if (error < 0)
2231                 goto err_wqueue;
2232
2233         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2234                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2235                                     hdev);
2236         if (hdev->rfkill) {
2237                 if (rfkill_register(hdev->rfkill) < 0) {
2238                         rfkill_destroy(hdev->rfkill);
2239                         hdev->rfkill = NULL;
2240                 }
2241         }
2242
2243         set_bit(HCI_SETUP, &hdev->dev_flags);
2244
2245         if (hdev->dev_type != HCI_AMP)
2246                 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2247
2248         write_lock(&hci_dev_list_lock);
2249         list_add(&hdev->list, &hci_dev_list);
2250         write_unlock(&hci_dev_list_lock);
2251
2252         hci_notify(hdev, HCI_DEV_REG);
2253         hci_dev_hold(hdev);
2254
2255         queue_work(hdev->req_workqueue, &hdev->power_on);
2256
2257         return id;
2258
2259 err_wqueue:
2260         destroy_workqueue(hdev->workqueue);
2261         destroy_workqueue(hdev->req_workqueue);
2262 err:
2263         ida_simple_remove(&hci_index_ida, hdev->id);
2264
2265         return error;
2266 }
2267 EXPORT_SYMBOL(hci_register_dev);
2268
2269 /* Unregister HCI device */
2270 void hci_unregister_dev(struct hci_dev *hdev)
2271 {
2272         int i, id;
2273
2274         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2275
2276         set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2277
2278         id = hdev->id;
2279
2280         write_lock(&hci_dev_list_lock);
2281         list_del(&hdev->list);
2282         write_unlock(&hci_dev_list_lock);
2283
2284         hci_dev_do_close(hdev);
2285
2286         for (i = 0; i < NUM_REASSEMBLY; i++)
2287                 kfree_skb(hdev->reassembly[i]);
2288
2289         cancel_work_sync(&hdev->power_on);
2290
2291         if (!test_bit(HCI_INIT, &hdev->flags) &&
2292             !test_bit(HCI_SETUP, &hdev->dev_flags)) {
2293                 hci_dev_lock(hdev);
2294                 mgmt_index_removed(hdev);
2295                 hci_dev_unlock(hdev);
2296         }
2297
2298         /* mgmt_index_removed should take care of emptying the
2299          * pending list */
2300         BUG_ON(!list_empty(&hdev->mgmt_pending));
2301
2302         hci_notify(hdev, HCI_DEV_UNREG);
2303
2304         if (hdev->rfkill) {
2305                 rfkill_unregister(hdev->rfkill);
2306                 rfkill_destroy(hdev->rfkill);
2307         }
2308
2309         hci_del_sysfs(hdev);
2310
2311         destroy_workqueue(hdev->workqueue);
2312         destroy_workqueue(hdev->req_workqueue);
2313
2314         hci_dev_lock(hdev);
2315         hci_blacklist_clear(hdev);
2316         hci_uuids_clear(hdev);
2317         hci_link_keys_clear(hdev);
2318         hci_smp_ltks_clear(hdev);
2319         hci_remote_oob_data_clear(hdev);
2320         hci_dev_unlock(hdev);
2321
2322         hci_dev_put(hdev);
2323
2324         ida_simple_remove(&hci_index_ida, id);
2325 }
2326 EXPORT_SYMBOL(hci_unregister_dev);
2327
2328 /* Suspend HCI device */
2329 int hci_suspend_dev(struct hci_dev *hdev)
2330 {
2331         hci_notify(hdev, HCI_DEV_SUSPEND);
2332         return 0;
2333 }
2334 EXPORT_SYMBOL(hci_suspend_dev);
2335
2336 /* Resume HCI device */
2337 int hci_resume_dev(struct hci_dev *hdev)
2338 {
2339         hci_notify(hdev, HCI_DEV_RESUME);
2340         return 0;
2341 }
2342 EXPORT_SYMBOL(hci_resume_dev);
2343
2344 /* Receive frame from HCI drivers */
2345 int hci_recv_frame(struct sk_buff *skb)
2346 {
2347         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2348         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2349                       && !test_bit(HCI_INIT, &hdev->flags))) {
2350                 kfree_skb(skb);
2351                 return -ENXIO;
2352         }
2353
2354         /* Incoming skb */
2355         bt_cb(skb)->incoming = 1;
2356
2357         /* Time stamp */
2358         __net_timestamp(skb);
2359
2360         skb_queue_tail(&hdev->rx_q, skb);
2361         queue_work(hdev->workqueue, &hdev->rx_work);
2362
2363         return 0;
2364 }
2365 EXPORT_SYMBOL(hci_recv_frame);
2366
2367 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
2368                           int count, __u8 index)
2369 {
2370         int len = 0;
2371         int hlen = 0;
2372         int remain = count;
2373         struct sk_buff *skb;
2374         struct bt_skb_cb *scb;
2375
2376         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
2377             index >= NUM_REASSEMBLY)
2378                 return -EILSEQ;
2379
2380         skb = hdev->reassembly[index];
2381
2382         if (!skb) {
2383                 switch (type) {
2384                 case HCI_ACLDATA_PKT:
2385                         len = HCI_MAX_FRAME_SIZE;
2386                         hlen = HCI_ACL_HDR_SIZE;
2387                         break;
2388                 case HCI_EVENT_PKT:
2389                         len = HCI_MAX_EVENT_SIZE;
2390                         hlen = HCI_EVENT_HDR_SIZE;
2391                         break;
2392                 case HCI_SCODATA_PKT:
2393                         len = HCI_MAX_SCO_SIZE;
2394                         hlen = HCI_SCO_HDR_SIZE;
2395                         break;
2396                 }
2397
2398                 skb = bt_skb_alloc(len, GFP_ATOMIC);
2399                 if (!skb)
2400                         return -ENOMEM;
2401
2402                 scb = (void *) skb->cb;
2403                 scb->expect = hlen;
2404                 scb->pkt_type = type;
2405
2406                 skb->dev = (void *) hdev;
2407                 hdev->reassembly[index] = skb;
2408         }
2409
2410         while (count) {
2411                 scb = (void *) skb->cb;
2412                 len = min_t(uint, scb->expect, count);
2413
2414                 memcpy(skb_put(skb, len), data, len);
2415
2416                 count -= len;
2417                 data += len;
2418                 scb->expect -= len;
2419                 remain = count;
2420
2421                 switch (type) {
2422                 case HCI_EVENT_PKT:
2423                         if (skb->len == HCI_EVENT_HDR_SIZE) {
2424                                 struct hci_event_hdr *h = hci_event_hdr(skb);
2425                                 scb->expect = h->plen;
2426
2427                                 if (skb_tailroom(skb) < scb->expect) {
2428                                         kfree_skb(skb);
2429                                         hdev->reassembly[index] = NULL;
2430                                         return -ENOMEM;
2431                                 }
2432                         }
2433                         break;
2434
2435                 case HCI_ACLDATA_PKT:
2436                         if (skb->len  == HCI_ACL_HDR_SIZE) {
2437                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2438                                 scb->expect = __le16_to_cpu(h->dlen);
2439
2440                                 if (skb_tailroom(skb) < scb->expect) {
2441                                         kfree_skb(skb);
2442                                         hdev->reassembly[index] = NULL;
2443                                         return -ENOMEM;
2444                                 }
2445                         }
2446                         break;
2447
2448                 case HCI_SCODATA_PKT:
2449                         if (skb->len == HCI_SCO_HDR_SIZE) {
2450                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2451                                 scb->expect = h->dlen;
2452
2453                                 if (skb_tailroom(skb) < scb->expect) {
2454                                         kfree_skb(skb);
2455                                         hdev->reassembly[index] = NULL;
2456                                         return -ENOMEM;
2457                                 }
2458                         }
2459                         break;
2460                 }
2461
2462                 if (scb->expect == 0) {
2463                         /* Complete frame */
2464
2465                         bt_cb(skb)->pkt_type = type;
2466                         hci_recv_frame(skb);
2467
2468                         hdev->reassembly[index] = NULL;
2469                         return remain;
2470                 }
2471         }
2472
2473         return remain;
2474 }
2475
2476 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2477 {
2478         int rem = 0;
2479
2480         if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2481                 return -EILSEQ;
2482
2483         while (count) {
2484                 rem = hci_reassembly(hdev, type, data, count, type - 1);
2485                 if (rem < 0)
2486                         return rem;
2487
2488                 data += (count - rem);
2489                 count = rem;
2490         }
2491
2492         return rem;
2493 }
2494 EXPORT_SYMBOL(hci_recv_fragment);
2495
2496 #define STREAM_REASSEMBLY 0
2497
2498 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2499 {
2500         int type;
2501         int rem = 0;
2502
2503         while (count) {
2504                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2505
2506                 if (!skb) {
2507                         struct { char type; } *pkt;
2508
2509                         /* Start of the frame */
2510                         pkt = data;
2511                         type = pkt->type;
2512
2513                         data++;
2514                         count--;
2515                 } else
2516                         type = bt_cb(skb)->pkt_type;
2517
2518                 rem = hci_reassembly(hdev, type, data, count,
2519                                      STREAM_REASSEMBLY);
2520                 if (rem < 0)
2521                         return rem;
2522
2523                 data += (count - rem);
2524                 count = rem;
2525         }
2526
2527         return rem;
2528 }
2529 EXPORT_SYMBOL(hci_recv_stream_fragment);
2530
2531 /* ---- Interface to upper protocols ---- */
2532
2533 int hci_register_cb(struct hci_cb *cb)
2534 {
2535         BT_DBG("%p name %s", cb, cb->name);
2536
2537         write_lock(&hci_cb_list_lock);
2538         list_add(&cb->list, &hci_cb_list);
2539         write_unlock(&hci_cb_list_lock);
2540
2541         return 0;
2542 }
2543 EXPORT_SYMBOL(hci_register_cb);
2544
2545 int hci_unregister_cb(struct hci_cb *cb)
2546 {
2547         BT_DBG("%p name %s", cb, cb->name);
2548
2549         write_lock(&hci_cb_list_lock);
2550         list_del(&cb->list);
2551         write_unlock(&hci_cb_list_lock);
2552
2553         return 0;
2554 }
2555 EXPORT_SYMBOL(hci_unregister_cb);
2556
2557 static int hci_send_frame(struct sk_buff *skb)
2558 {
2559         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2560
2561         if (!hdev) {
2562                 kfree_skb(skb);
2563                 return -ENODEV;
2564         }
2565
2566         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
2567
2568         /* Time stamp */
2569         __net_timestamp(skb);
2570
2571         /* Send copy to monitor */
2572         hci_send_to_monitor(hdev, skb);
2573
2574         if (atomic_read(&hdev->promisc)) {
2575                 /* Send copy to the sockets */
2576                 hci_send_to_sock(hdev, skb);
2577         }
2578
2579         /* Get rid of skb owner, prior to sending to the driver. */
2580         skb_orphan(skb);
2581
2582         return hdev->send(skb);
2583 }
2584
2585 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2586 {
2587         skb_queue_head_init(&req->cmd_q);
2588         req->hdev = hdev;
2589         req->err = 0;
2590 }
2591
2592 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2593 {
2594         struct hci_dev *hdev = req->hdev;
2595         struct sk_buff *skb;
2596         unsigned long flags;
2597
2598         BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2599
2600         /* If an error occured during request building, remove all HCI
2601          * commands queued on the HCI request queue.
2602          */
2603         if (req->err) {
2604                 skb_queue_purge(&req->cmd_q);
2605                 return req->err;
2606         }
2607
2608         /* Do not allow empty requests */
2609         if (skb_queue_empty(&req->cmd_q))
2610                 return -ENODATA;
2611
2612         skb = skb_peek_tail(&req->cmd_q);
2613         bt_cb(skb)->req.complete = complete;
2614
2615         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2616         skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2617         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2618
2619         queue_work(hdev->workqueue, &hdev->cmd_work);
2620
2621         return 0;
2622 }
2623
2624 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
2625                                        u32 plen, const void *param)
2626 {
2627         int len = HCI_COMMAND_HDR_SIZE + plen;
2628         struct hci_command_hdr *hdr;
2629         struct sk_buff *skb;
2630
2631         skb = bt_skb_alloc(len, GFP_ATOMIC);
2632         if (!skb)
2633                 return NULL;
2634
2635         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
2636         hdr->opcode = cpu_to_le16(opcode);
2637         hdr->plen   = plen;
2638
2639         if (plen)
2640                 memcpy(skb_put(skb, plen), param, plen);
2641
2642         BT_DBG("skb len %d", skb->len);
2643
2644         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
2645         skb->dev = (void *) hdev;
2646
2647         return skb;
2648 }
2649
2650 /* Send HCI command */
2651 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
2652                  const void *param)
2653 {
2654         struct sk_buff *skb;
2655
2656         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2657
2658         skb = hci_prepare_cmd(hdev, opcode, plen, param);
2659         if (!skb) {
2660                 BT_ERR("%s no memory for command", hdev->name);
2661                 return -ENOMEM;
2662         }
2663
2664         /* Stand-alone HCI commands must be flaged as
2665          * single-command requests.
2666          */
2667         bt_cb(skb)->req.start = true;
2668
2669         skb_queue_tail(&hdev->cmd_q, skb);
2670         queue_work(hdev->workqueue, &hdev->cmd_work);
2671
2672         return 0;
2673 }
2674
2675 /* Queue a command to an asynchronous HCI request */
2676 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
2677                     const void *param, u8 event)
2678 {
2679         struct hci_dev *hdev = req->hdev;
2680         struct sk_buff *skb;
2681
2682         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2683
2684         /* If an error occured during request building, there is no point in
2685          * queueing the HCI command. We can simply return.
2686          */
2687         if (req->err)
2688                 return;
2689
2690         skb = hci_prepare_cmd(hdev, opcode, plen, param);
2691         if (!skb) {
2692                 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2693                        hdev->name, opcode);
2694                 req->err = -ENOMEM;
2695                 return;
2696         }
2697
2698         if (skb_queue_empty(&req->cmd_q))
2699                 bt_cb(skb)->req.start = true;
2700
2701         bt_cb(skb)->req.event = event;
2702
2703         skb_queue_tail(&req->cmd_q, skb);
2704 }
2705
2706 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
2707                  const void *param)
2708 {
2709         hci_req_add_ev(req, opcode, plen, param, 0);
2710 }
2711
2712 /* Get data from the previously sent command */
2713 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
2714 {
2715         struct hci_command_hdr *hdr;
2716
2717         if (!hdev->sent_cmd)
2718                 return NULL;
2719
2720         hdr = (void *) hdev->sent_cmd->data;
2721
2722         if (hdr->opcode != cpu_to_le16(opcode))
2723                 return NULL;
2724
2725         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2726
2727         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2728 }
2729
2730 /* Send ACL data */
2731 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2732 {
2733         struct hci_acl_hdr *hdr;
2734         int len = skb->len;
2735
2736         skb_push(skb, HCI_ACL_HDR_SIZE);
2737         skb_reset_transport_header(skb);
2738         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
2739         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2740         hdr->dlen   = cpu_to_le16(len);
2741 }
2742
2743 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
2744                           struct sk_buff *skb, __u16 flags)
2745 {
2746         struct hci_conn *conn = chan->conn;
2747         struct hci_dev *hdev = conn->hdev;
2748         struct sk_buff *list;
2749
2750         skb->len = skb_headlen(skb);
2751         skb->data_len = 0;
2752
2753         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2754
2755         switch (hdev->dev_type) {
2756         case HCI_BREDR:
2757                 hci_add_acl_hdr(skb, conn->handle, flags);
2758                 break;
2759         case HCI_AMP:
2760                 hci_add_acl_hdr(skb, chan->handle, flags);
2761                 break;
2762         default:
2763                 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2764                 return;
2765         }
2766
2767         list = skb_shinfo(skb)->frag_list;
2768         if (!list) {
2769                 /* Non fragmented */
2770                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2771
2772                 skb_queue_tail(queue, skb);
2773         } else {
2774                 /* Fragmented */
2775                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2776
2777                 skb_shinfo(skb)->frag_list = NULL;
2778
2779                 /* Queue all fragments atomically */
2780                 spin_lock(&queue->lock);
2781
2782                 __skb_queue_tail(queue, skb);
2783
2784                 flags &= ~ACL_START;
2785                 flags |= ACL_CONT;
2786                 do {
2787                         skb = list; list = list->next;
2788
2789                         skb->dev = (void *) hdev;
2790                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2791                         hci_add_acl_hdr(skb, conn->handle, flags);
2792
2793                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2794
2795                         __skb_queue_tail(queue, skb);
2796                 } while (list);
2797
2798                 spin_unlock(&queue->lock);
2799         }
2800 }
2801
2802 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2803 {
2804         struct hci_dev *hdev = chan->conn->hdev;
2805
2806         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
2807
2808         skb->dev = (void *) hdev;
2809
2810         hci_queue_acl(chan, &chan->data_q, skb, flags);
2811
2812         queue_work(hdev->workqueue, &hdev->tx_work);
2813 }
2814
2815 /* Send SCO data */
2816 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2817 {
2818         struct hci_dev *hdev = conn->hdev;
2819         struct hci_sco_hdr hdr;
2820
2821         BT_DBG("%s len %d", hdev->name, skb->len);
2822
2823         hdr.handle = cpu_to_le16(conn->handle);
2824         hdr.dlen   = skb->len;
2825
2826         skb_push(skb, HCI_SCO_HDR_SIZE);
2827         skb_reset_transport_header(skb);
2828         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2829
2830         skb->dev = (void *) hdev;
2831         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2832
2833         skb_queue_tail(&conn->data_q, skb);
2834         queue_work(hdev->workqueue, &hdev->tx_work);
2835 }
2836
2837 /* ---- HCI TX task (outgoing data) ---- */
2838
2839 /* HCI Connection scheduler */
2840 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2841                                      int *quote)
2842 {
2843         struct hci_conn_hash *h = &hdev->conn_hash;
2844         struct hci_conn *conn = NULL, *c;
2845         unsigned int num = 0, min = ~0;
2846
2847         /* We don't have to lock device here. Connections are always
2848          * added and removed with TX task disabled. */
2849
2850         rcu_read_lock();
2851
2852         list_for_each_entry_rcu(c, &h->list, list) {
2853                 if (c->type != type || skb_queue_empty(&c->data_q))
2854                         continue;
2855
2856                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2857                         continue;
2858
2859                 num++;
2860
2861                 if (c->sent < min) {
2862                         min  = c->sent;
2863                         conn = c;
2864                 }
2865
2866                 if (hci_conn_num(hdev, type) == num)
2867                         break;
2868         }
2869
2870         rcu_read_unlock();
2871
2872         if (conn) {
2873                 int cnt, q;
2874
2875                 switch (conn->type) {
2876                 case ACL_LINK:
2877                         cnt = hdev->acl_cnt;
2878                         break;
2879                 case SCO_LINK:
2880                 case ESCO_LINK:
2881                         cnt = hdev->sco_cnt;
2882                         break;
2883                 case LE_LINK:
2884                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2885                         break;
2886                 default:
2887                         cnt = 0;
2888                         BT_ERR("Unknown link type");
2889                 }
2890
2891                 q = cnt / num;
2892                 *quote = q ? q : 1;
2893         } else
2894                 *quote = 0;
2895
2896         BT_DBG("conn %p quote %d", conn, *quote);
2897         return conn;
2898 }
2899
2900 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2901 {
2902         struct hci_conn_hash *h = &hdev->conn_hash;
2903         struct hci_conn *c;
2904
2905         BT_ERR("%s link tx timeout", hdev->name);
2906
2907         rcu_read_lock();
2908
2909         /* Kill stalled connections */
2910         list_for_each_entry_rcu(c, &h->list, list) {
2911                 if (c->type == type && c->sent) {
2912                         BT_ERR("%s killing stalled connection %pMR",
2913                                hdev->name, &c->dst);
2914                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
2915                 }
2916         }
2917
2918         rcu_read_unlock();
2919 }
2920
2921 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2922                                       int *quote)
2923 {
2924         struct hci_conn_hash *h = &hdev->conn_hash;
2925         struct hci_chan *chan = NULL;
2926         unsigned int num = 0, min = ~0, cur_prio = 0;
2927         struct hci_conn *conn;
2928         int cnt, q, conn_num = 0;
2929
2930         BT_DBG("%s", hdev->name);
2931
2932         rcu_read_lock();
2933
2934         list_for_each_entry_rcu(conn, &h->list, list) {
2935                 struct hci_chan *tmp;
2936
2937                 if (conn->type != type)
2938                         continue;
2939
2940                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2941                         continue;
2942
2943                 conn_num++;
2944
2945                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2946                         struct sk_buff *skb;
2947
2948                         if (skb_queue_empty(&tmp->data_q))
2949                                 continue;
2950
2951                         skb = skb_peek(&tmp->data_q);
2952                         if (skb->priority < cur_prio)
2953                                 continue;
2954
2955                         if (skb->priority > cur_prio) {
2956                                 num = 0;
2957                                 min = ~0;
2958                                 cur_prio = skb->priority;
2959                         }
2960
2961                         num++;
2962
2963                         if (conn->sent < min) {
2964                                 min  = conn->sent;
2965                                 chan = tmp;
2966                         }
2967                 }
2968
2969                 if (hci_conn_num(hdev, type) == conn_num)
2970                         break;
2971         }
2972
2973         rcu_read_unlock();
2974
2975         if (!chan)
2976                 return NULL;
2977
2978         switch (chan->conn->type) {
2979         case ACL_LINK:
2980                 cnt = hdev->acl_cnt;
2981                 break;
2982         case AMP_LINK:
2983                 cnt = hdev->block_cnt;
2984                 break;
2985         case SCO_LINK:
2986         case ESCO_LINK:
2987                 cnt = hdev->sco_cnt;
2988                 break;
2989         case LE_LINK:
2990                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2991                 break;
2992         default:
2993                 cnt = 0;
2994                 BT_ERR("Unknown link type");
2995         }
2996
2997         q = cnt / num;
2998         *quote = q ? q : 1;
2999         BT_DBG("chan %p quote %d", chan, *quote);
3000         return chan;
3001 }
3002
3003 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3004 {
3005         struct hci_conn_hash *h = &hdev->conn_hash;
3006         struct hci_conn *conn;
3007         int num = 0;
3008
3009         BT_DBG("%s", hdev->name);
3010
3011         rcu_read_lock();
3012
3013         list_for_each_entry_rcu(conn, &h->list, list) {
3014                 struct hci_chan *chan;
3015
3016                 if (conn->type != type)
3017                         continue;
3018
3019                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3020                         continue;
3021
3022                 num++;
3023
3024                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3025                         struct sk_buff *skb;
3026
3027                         if (chan->sent) {
3028                                 chan->sent = 0;
3029                                 continue;
3030                         }
3031
3032                         if (skb_queue_empty(&chan->data_q))
3033                                 continue;
3034
3035                         skb = skb_peek(&chan->data_q);
3036                         if (skb->priority >= HCI_PRIO_MAX - 1)
3037                                 continue;
3038
3039                         skb->priority = HCI_PRIO_MAX - 1;
3040
3041                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3042                                skb->priority);
3043                 }
3044
3045                 if (hci_conn_num(hdev, type) == num)
3046                         break;
3047         }
3048
3049         rcu_read_unlock();
3050
3051 }
3052
3053 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3054 {
3055         /* Calculate count of blocks used by this packet */
3056         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3057 }
3058
3059 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3060 {
3061         if (!test_bit(HCI_RAW, &hdev->flags)) {
3062                 /* ACL tx timeout must be longer than maximum
3063                  * link supervision timeout (40.9 seconds) */
3064                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3065                                        HCI_ACL_TX_TIMEOUT))
3066                         hci_link_tx_to(hdev, ACL_LINK);
3067         }
3068 }
3069
3070 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3071 {
3072         unsigned int cnt = hdev->acl_cnt;
3073         struct hci_chan *chan;
3074         struct sk_buff *skb;
3075         int quote;
3076
3077         __check_timeout(hdev, cnt);
3078
3079         while (hdev->acl_cnt &&
3080                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
3081                 u32 priority = (skb_peek(&chan->data_q))->priority;
3082                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3083                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3084                                skb->len, skb->priority);
3085
3086                         /* Stop if priority has changed */
3087                         if (skb->priority < priority)
3088                                 break;
3089
3090                         skb = skb_dequeue(&chan->data_q);
3091
3092                         hci_conn_enter_active_mode(chan->conn,
3093                                                    bt_cb(skb)->force_active);
3094
3095                         hci_send_frame(skb);
3096                         hdev->acl_last_tx = jiffies;
3097
3098                         hdev->acl_cnt--;
3099                         chan->sent++;
3100                         chan->conn->sent++;
3101                 }
3102         }
3103
3104         if (cnt != hdev->acl_cnt)
3105                 hci_prio_recalculate(hdev, ACL_LINK);
3106 }
3107
3108 static void hci_sched_acl_blk(struct hci_dev *hdev)
3109 {
3110         unsigned int cnt = hdev->block_cnt;
3111         struct hci_chan *chan;
3112         struct sk_buff *skb;
3113         int quote;
3114         u8 type;
3115
3116         __check_timeout(hdev, cnt);
3117
3118         BT_DBG("%s", hdev->name);
3119
3120         if (hdev->dev_type == HCI_AMP)
3121                 type = AMP_LINK;
3122         else
3123                 type = ACL_LINK;
3124
3125         while (hdev->block_cnt > 0 &&
3126                (chan = hci_chan_sent(hdev, type, &quote))) {
3127                 u32 priority = (skb_peek(&chan->data_q))->priority;
3128                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3129                         int blocks;
3130
3131                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3132                                skb->len, skb->priority);
3133
3134                         /* Stop if priority has changed */
3135                         if (skb->priority < priority)
3136                                 break;
3137
3138                         skb = skb_dequeue(&chan->data_q);
3139
3140                         blocks = __get_blocks(hdev, skb);
3141                         if (blocks > hdev->block_cnt)
3142                                 return;
3143
3144                         hci_conn_enter_active_mode(chan->conn,
3145                                                    bt_cb(skb)->force_active);
3146
3147                         hci_send_frame(skb);
3148                         hdev->acl_last_tx = jiffies;
3149
3150                         hdev->block_cnt -= blocks;
3151                         quote -= blocks;
3152
3153                         chan->sent += blocks;
3154                         chan->conn->sent += blocks;
3155                 }
3156         }
3157
3158         if (cnt != hdev->block_cnt)
3159                 hci_prio_recalculate(hdev, type);
3160 }
3161
3162 static void hci_sched_acl(struct hci_dev *hdev)
3163 {
3164         BT_DBG("%s", hdev->name);
3165
3166         /* No ACL link over BR/EDR controller */
3167         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3168                 return;
3169
3170         /* No AMP link over AMP controller */
3171         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3172                 return;
3173
3174         switch (hdev->flow_ctl_mode) {
3175         case HCI_FLOW_CTL_MODE_PACKET_BASED:
3176                 hci_sched_acl_pkt(hdev);
3177                 break;
3178
3179         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3180                 hci_sched_acl_blk(hdev);
3181                 break;
3182         }
3183 }
3184
3185 /* Schedule SCO */
3186 static void hci_sched_sco(struct hci_dev *hdev)
3187 {
3188         struct hci_conn *conn;
3189         struct sk_buff *skb;
3190         int quote;
3191
3192         BT_DBG("%s", hdev->name);
3193
3194         if (!hci_conn_num(hdev, SCO_LINK))
3195                 return;
3196
3197         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3198                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3199                         BT_DBG("skb %p len %d", skb, skb->len);
3200                         hci_send_frame(skb);
3201
3202                         conn->sent++;
3203                         if (conn->sent == ~0)
3204                                 conn->sent = 0;
3205                 }
3206         }
3207 }
3208
3209 static void hci_sched_esco(struct hci_dev *hdev)
3210 {
3211         struct hci_conn *conn;
3212         struct sk_buff *skb;
3213         int quote;
3214
3215         BT_DBG("%s", hdev->name);
3216
3217         if (!hci_conn_num(hdev, ESCO_LINK))
3218                 return;
3219
3220         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3221                                                      &quote))) {
3222                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3223                         BT_DBG("skb %p len %d", skb, skb->len);
3224                         hci_send_frame(skb);
3225
3226                         conn->sent++;
3227                         if (conn->sent == ~0)
3228                                 conn->sent = 0;
3229                 }
3230         }
3231 }
3232
3233 static void hci_sched_le(struct hci_dev *hdev)
3234 {
3235         struct hci_chan *chan;
3236         struct sk_buff *skb;
3237         int quote, cnt, tmp;
3238
3239         BT_DBG("%s", hdev->name);
3240
3241         if (!hci_conn_num(hdev, LE_LINK))
3242                 return;
3243
3244         if (!test_bit(HCI_RAW, &hdev->flags)) {
3245                 /* LE tx timeout must be longer than maximum
3246                  * link supervision timeout (40.9 seconds) */
3247                 if (!hdev->le_cnt && hdev->le_pkts &&
3248                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
3249                         hci_link_tx_to(hdev, LE_LINK);
3250         }
3251
3252         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3253         tmp = cnt;
3254         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
3255                 u32 priority = (skb_peek(&chan->data_q))->priority;
3256                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3257                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3258                                skb->len, skb->priority);
3259
3260                         /* Stop if priority has changed */
3261                         if (skb->priority < priority)
3262                                 break;
3263
3264                         skb = skb_dequeue(&chan->data_q);
3265
3266                         hci_send_frame(skb);
3267                         hdev->le_last_tx = jiffies;
3268
3269                         cnt--;
3270                         chan->sent++;
3271                         chan->conn->sent++;
3272                 }
3273         }
3274
3275         if (hdev->le_pkts)
3276                 hdev->le_cnt = cnt;
3277         else
3278                 hdev->acl_cnt = cnt;
3279
3280         if (cnt != tmp)
3281                 hci_prio_recalculate(hdev, LE_LINK);
3282 }
3283
3284 static void hci_tx_work(struct work_struct *work)
3285 {
3286         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3287         struct sk_buff *skb;
3288
3289         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
3290                hdev->sco_cnt, hdev->le_cnt);
3291
3292         /* Schedule queues and send stuff to HCI driver */
3293
3294         hci_sched_acl(hdev);
3295
3296         hci_sched_sco(hdev);
3297
3298         hci_sched_esco(hdev);
3299
3300         hci_sched_le(hdev);
3301
3302         /* Send next queued raw (unknown type) packet */
3303         while ((skb = skb_dequeue(&hdev->raw_q)))
3304                 hci_send_frame(skb);
3305 }
3306
3307 /* ----- HCI RX task (incoming data processing) ----- */
3308
3309 /* ACL data packet */
3310 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3311 {
3312         struct hci_acl_hdr *hdr = (void *) skb->data;
3313         struct hci_conn *conn;
3314         __u16 handle, flags;
3315
3316         skb_pull(skb, HCI_ACL_HDR_SIZE);
3317
3318         handle = __le16_to_cpu(hdr->handle);
3319         flags  = hci_flags(handle);
3320         handle = hci_handle(handle);
3321
3322         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3323                handle, flags);
3324
3325         hdev->stat.acl_rx++;
3326
3327         hci_dev_lock(hdev);
3328         conn = hci_conn_hash_lookup_handle(hdev, handle);
3329         hci_dev_unlock(hdev);
3330
3331         if (conn) {
3332                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3333
3334                 /* Send to upper protocol */
3335                 l2cap_recv_acldata(conn, skb, flags);
3336                 return;
3337         } else {
3338                 BT_ERR("%s ACL packet for unknown connection handle %d",
3339                        hdev->name, handle);
3340         }
3341
3342         kfree_skb(skb);
3343 }
3344
3345 /* SCO data packet */
3346 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3347 {
3348         struct hci_sco_hdr *hdr = (void *) skb->data;
3349         struct hci_conn *conn;
3350         __u16 handle;
3351
3352         skb_pull(skb, HCI_SCO_HDR_SIZE);
3353
3354         handle = __le16_to_cpu(hdr->handle);
3355
3356         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
3357
3358         hdev->stat.sco_rx++;
3359
3360         hci_dev_lock(hdev);
3361         conn = hci_conn_hash_lookup_handle(hdev, handle);
3362         hci_dev_unlock(hdev);
3363
3364         if (conn) {
3365                 /* Send to upper protocol */
3366                 sco_recv_scodata(conn, skb);
3367                 return;
3368         } else {
3369                 BT_ERR("%s SCO packet for unknown connection handle %d",
3370                        hdev->name, handle);
3371         }
3372
3373         kfree_skb(skb);
3374 }
3375
3376 static bool hci_req_is_complete(struct hci_dev *hdev)
3377 {
3378         struct sk_buff *skb;
3379
3380         skb = skb_peek(&hdev->cmd_q);
3381         if (!skb)
3382                 return true;
3383
3384         return bt_cb(skb)->req.start;
3385 }
3386
3387 static void hci_resend_last(struct hci_dev *hdev)
3388 {
3389         struct hci_command_hdr *sent;
3390         struct sk_buff *skb;
3391         u16 opcode;
3392
3393         if (!hdev->sent_cmd)
3394                 return;
3395
3396         sent = (void *) hdev->sent_cmd->data;
3397         opcode = __le16_to_cpu(sent->opcode);
3398         if (opcode == HCI_OP_RESET)
3399                 return;
3400
3401         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3402         if (!skb)
3403                 return;
3404
3405         skb_queue_head(&hdev->cmd_q, skb);
3406         queue_work(hdev->workqueue, &hdev->cmd_work);
3407 }
3408
3409 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3410 {
3411         hci_req_complete_t req_complete = NULL;
3412         struct sk_buff *skb;
3413         unsigned long flags;
3414
3415         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3416
3417         /* If the completed command doesn't match the last one that was
3418          * sent we need to do special handling of it.
3419          */
3420         if (!hci_sent_cmd_data(hdev, opcode)) {
3421                 /* Some CSR based controllers generate a spontaneous
3422                  * reset complete event during init and any pending
3423                  * command will never be completed. In such a case we
3424                  * need to resend whatever was the last sent
3425                  * command.
3426                  */
3427                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3428                         hci_resend_last(hdev);
3429
3430                 return;
3431         }
3432
3433         /* If the command succeeded and there's still more commands in
3434          * this request the request is not yet complete.
3435          */
3436         if (!status && !hci_req_is_complete(hdev))
3437                 return;
3438
3439         /* If this was the last command in a request the complete
3440          * callback would be found in hdev->sent_cmd instead of the
3441          * command queue (hdev->cmd_q).
3442          */
3443         if (hdev->sent_cmd) {
3444                 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3445
3446                 if (req_complete) {
3447                         /* We must set the complete callback to NULL to
3448                          * avoid calling the callback more than once if
3449                          * this function gets called again.
3450                          */
3451                         bt_cb(hdev->sent_cmd)->req.complete = NULL;
3452
3453                         goto call_complete;
3454                 }
3455         }
3456
3457         /* Remove all pending commands belonging to this request */
3458         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3459         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3460                 if (bt_cb(skb)->req.start) {
3461                         __skb_queue_head(&hdev->cmd_q, skb);
3462                         break;
3463                 }
3464
3465                 req_complete = bt_cb(skb)->req.complete;
3466                 kfree_skb(skb);
3467         }
3468         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3469
3470 call_complete:
3471         if (req_complete)
3472                 req_complete(hdev, status);
3473 }
3474
3475 static void hci_rx_work(struct work_struct *work)
3476 {
3477         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
3478         struct sk_buff *skb;
3479
3480         BT_DBG("%s", hdev->name);
3481
3482         while ((skb = skb_dequeue(&hdev->rx_q))) {
3483                 /* Send copy to monitor */
3484                 hci_send_to_monitor(hdev, skb);
3485
3486                 if (atomic_read(&hdev->promisc)) {
3487                         /* Send copy to the sockets */
3488                         hci_send_to_sock(hdev, skb);
3489                 }
3490
3491                 if (test_bit(HCI_RAW, &hdev->flags)) {
3492                         kfree_skb(skb);
3493                         continue;
3494                 }
3495
3496                 if (test_bit(HCI_INIT, &hdev->flags)) {
3497                         /* Don't process data packets in this states. */
3498                         switch (bt_cb(skb)->pkt_type) {
3499                         case HCI_ACLDATA_PKT:
3500                         case HCI_SCODATA_PKT:
3501                                 kfree_skb(skb);
3502                                 continue;
3503                         }
3504                 }
3505
3506                 /* Process frame */
3507                 switch (bt_cb(skb)->pkt_type) {
3508                 case HCI_EVENT_PKT:
3509                         BT_DBG("%s Event packet", hdev->name);
3510                         hci_event_packet(hdev, skb);
3511                         break;
3512
3513                 case HCI_ACLDATA_PKT:
3514                         BT_DBG("%s ACL data packet", hdev->name);
3515                         hci_acldata_packet(hdev, skb);
3516                         break;
3517
3518                 case HCI_SCODATA_PKT:
3519                         BT_DBG("%s SCO data packet", hdev->name);
3520                         hci_scodata_packet(hdev, skb);
3521                         break;
3522
3523                 default:
3524                         kfree_skb(skb);
3525                         break;
3526                 }
3527         }
3528 }
3529
3530 static void hci_cmd_work(struct work_struct *work)
3531 {
3532         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
3533         struct sk_buff *skb;
3534
3535         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3536                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
3537
3538         /* Send queued commands */
3539         if (atomic_read(&hdev->cmd_cnt)) {
3540                 skb = skb_dequeue(&hdev->cmd_q);
3541                 if (!skb)
3542                         return;
3543
3544                 kfree_skb(hdev->sent_cmd);
3545
3546                 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
3547                 if (hdev->sent_cmd) {
3548                         atomic_dec(&hdev->cmd_cnt);
3549                         hci_send_frame(skb);
3550                         if (test_bit(HCI_RESET, &hdev->flags))
3551                                 del_timer(&hdev->cmd_timer);
3552                         else
3553                                 mod_timer(&hdev->cmd_timer,
3554                                           jiffies + HCI_CMD_TIMEOUT);
3555                 } else {
3556                         skb_queue_head(&hdev->cmd_q, skb);
3557                         queue_work(hdev->workqueue, &hdev->cmd_work);
3558                 }
3559         }
3560 }
3561
3562 int hci_do_inquiry(struct hci_dev *hdev, u8 length)
3563 {
3564         /* General inquiry access code (GIAC) */
3565         u8 lap[3] = { 0x33, 0x8b, 0x9e };
3566         struct hci_cp_inquiry cp;
3567
3568         BT_DBG("%s", hdev->name);
3569
3570         if (test_bit(HCI_INQUIRY, &hdev->flags))
3571                 return -EINPROGRESS;
3572
3573         inquiry_cache_flush(hdev);
3574
3575         memset(&cp, 0, sizeof(cp));
3576         memcpy(&cp.lap, lap, sizeof(cp.lap));
3577         cp.length  = length;
3578
3579         return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
3580 }
3581
3582 int hci_cancel_inquiry(struct hci_dev *hdev)
3583 {
3584         BT_DBG("%s", hdev->name);
3585
3586         if (!test_bit(HCI_INQUIRY, &hdev->flags))
3587                 return -EALREADY;
3588
3589         return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3590 }
3591
3592 u8 bdaddr_to_le(u8 bdaddr_type)
3593 {
3594         switch (bdaddr_type) {
3595         case BDADDR_LE_PUBLIC:
3596                 return ADDR_LE_DEV_PUBLIC;
3597
3598         default:
3599                 /* Fallback to LE Random address type */
3600                 return ADDR_LE_DEV_RANDOM;
3601         }
3602 }