Merge git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth
[pandora-kernel.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30
31 #include <linux/rfkill.h>
32
33 #include <net/bluetooth/bluetooth.h>
34 #include <net/bluetooth/hci_core.h>
35
36 static void hci_rx_work(struct work_struct *work);
37 static void hci_cmd_work(struct work_struct *work);
38 static void hci_tx_work(struct work_struct *work);
39
40 /* HCI device list */
41 LIST_HEAD(hci_dev_list);
42 DEFINE_RWLOCK(hci_dev_list_lock);
43
44 /* HCI callback list */
45 LIST_HEAD(hci_cb_list);
46 DEFINE_RWLOCK(hci_cb_list_lock);
47
48 /* HCI ID Numbering */
49 static DEFINE_IDA(hci_index_ida);
50
51 /* ---- HCI notifications ---- */
52
53 static void hci_notify(struct hci_dev *hdev, int event)
54 {
55         hci_sock_dev_event(hdev, event);
56 }
57
58 /* ---- HCI requests ---- */
59
60 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
61 {
62         BT_DBG("%s result 0x%2.2x", hdev->name, result);
63
64         if (hdev->req_status == HCI_REQ_PEND) {
65                 hdev->req_result = result;
66                 hdev->req_status = HCI_REQ_DONE;
67                 wake_up_interruptible(&hdev->req_wait_q);
68         }
69 }
70
71 static void hci_req_cancel(struct hci_dev *hdev, int err)
72 {
73         BT_DBG("%s err 0x%2.2x", hdev->name, err);
74
75         if (hdev->req_status == HCI_REQ_PEND) {
76                 hdev->req_result = err;
77                 hdev->req_status = HCI_REQ_CANCELED;
78                 wake_up_interruptible(&hdev->req_wait_q);
79         }
80 }
81
82 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
83                                             u8 event)
84 {
85         struct hci_ev_cmd_complete *ev;
86         struct hci_event_hdr *hdr;
87         struct sk_buff *skb;
88
89         hci_dev_lock(hdev);
90
91         skb = hdev->recv_evt;
92         hdev->recv_evt = NULL;
93
94         hci_dev_unlock(hdev);
95
96         if (!skb)
97                 return ERR_PTR(-ENODATA);
98
99         if (skb->len < sizeof(*hdr)) {
100                 BT_ERR("Too short HCI event");
101                 goto failed;
102         }
103
104         hdr = (void *) skb->data;
105         skb_pull(skb, HCI_EVENT_HDR_SIZE);
106
107         if (event) {
108                 if (hdr->evt != event)
109                         goto failed;
110                 return skb;
111         }
112
113         if (hdr->evt != HCI_EV_CMD_COMPLETE) {
114                 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
115                 goto failed;
116         }
117
118         if (skb->len < sizeof(*ev)) {
119                 BT_ERR("Too short cmd_complete event");
120                 goto failed;
121         }
122
123         ev = (void *) skb->data;
124         skb_pull(skb, sizeof(*ev));
125
126         if (opcode == __le16_to_cpu(ev->opcode))
127                 return skb;
128
129         BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
130                __le16_to_cpu(ev->opcode));
131
132 failed:
133         kfree_skb(skb);
134         return ERR_PTR(-ENODATA);
135 }
136
137 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
138                                   const void *param, u8 event, u32 timeout)
139 {
140         DECLARE_WAITQUEUE(wait, current);
141         struct hci_request req;
142         int err = 0;
143
144         BT_DBG("%s", hdev->name);
145
146         hci_req_init(&req, hdev);
147
148         hci_req_add_ev(&req, opcode, plen, param, event);
149
150         hdev->req_status = HCI_REQ_PEND;
151
152         err = hci_req_run(&req, hci_req_sync_complete);
153         if (err < 0)
154                 return ERR_PTR(err);
155
156         add_wait_queue(&hdev->req_wait_q, &wait);
157         set_current_state(TASK_INTERRUPTIBLE);
158
159         schedule_timeout(timeout);
160
161         remove_wait_queue(&hdev->req_wait_q, &wait);
162
163         if (signal_pending(current))
164                 return ERR_PTR(-EINTR);
165
166         switch (hdev->req_status) {
167         case HCI_REQ_DONE:
168                 err = -bt_to_errno(hdev->req_result);
169                 break;
170
171         case HCI_REQ_CANCELED:
172                 err = -hdev->req_result;
173                 break;
174
175         default:
176                 err = -ETIMEDOUT;
177                 break;
178         }
179
180         hdev->req_status = hdev->req_result = 0;
181
182         BT_DBG("%s end: err %d", hdev->name, err);
183
184         if (err < 0)
185                 return ERR_PTR(err);
186
187         return hci_get_cmd_complete(hdev, opcode, event);
188 }
189 EXPORT_SYMBOL(__hci_cmd_sync_ev);
190
191 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
192                                const void *param, u32 timeout)
193 {
194         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
195 }
196 EXPORT_SYMBOL(__hci_cmd_sync);
197
198 /* Execute request and wait for completion. */
199 static int __hci_req_sync(struct hci_dev *hdev,
200                           void (*func)(struct hci_request *req,
201                                       unsigned long opt),
202                           unsigned long opt, __u32 timeout)
203 {
204         struct hci_request req;
205         DECLARE_WAITQUEUE(wait, current);
206         int err = 0;
207
208         BT_DBG("%s start", hdev->name);
209
210         hci_req_init(&req, hdev);
211
212         hdev->req_status = HCI_REQ_PEND;
213
214         func(&req, opt);
215
216         err = hci_req_run(&req, hci_req_sync_complete);
217         if (err < 0) {
218                 hdev->req_status = 0;
219
220                 /* ENODATA means the HCI request command queue is empty.
221                  * This can happen when a request with conditionals doesn't
222                  * trigger any commands to be sent. This is normal behavior
223                  * and should not trigger an error return.
224                  */
225                 if (err == -ENODATA)
226                         return 0;
227
228                 return err;
229         }
230
231         add_wait_queue(&hdev->req_wait_q, &wait);
232         set_current_state(TASK_INTERRUPTIBLE);
233
234         schedule_timeout(timeout);
235
236         remove_wait_queue(&hdev->req_wait_q, &wait);
237
238         if (signal_pending(current))
239                 return -EINTR;
240
241         switch (hdev->req_status) {
242         case HCI_REQ_DONE:
243                 err = -bt_to_errno(hdev->req_result);
244                 break;
245
246         case HCI_REQ_CANCELED:
247                 err = -hdev->req_result;
248                 break;
249
250         default:
251                 err = -ETIMEDOUT;
252                 break;
253         }
254
255         hdev->req_status = hdev->req_result = 0;
256
257         BT_DBG("%s end: err %d", hdev->name, err);
258
259         return err;
260 }
261
262 static int hci_req_sync(struct hci_dev *hdev,
263                         void (*req)(struct hci_request *req,
264                                     unsigned long opt),
265                         unsigned long opt, __u32 timeout)
266 {
267         int ret;
268
269         if (!test_bit(HCI_UP, &hdev->flags))
270                 return -ENETDOWN;
271
272         /* Serialize all requests */
273         hci_req_lock(hdev);
274         ret = __hci_req_sync(hdev, req, opt, timeout);
275         hci_req_unlock(hdev);
276
277         return ret;
278 }
279
280 static void hci_reset_req(struct hci_request *req, unsigned long opt)
281 {
282         BT_DBG("%s %ld", req->hdev->name, opt);
283
284         /* Reset device */
285         set_bit(HCI_RESET, &req->hdev->flags);
286         hci_req_add(req, HCI_OP_RESET, 0, NULL);
287 }
288
289 static void bredr_init(struct hci_request *req)
290 {
291         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
292
293         /* Read Local Supported Features */
294         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
295
296         /* Read Local Version */
297         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
298
299         /* Read BD Address */
300         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
301 }
302
303 static void amp_init(struct hci_request *req)
304 {
305         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
306
307         /* Read Local Version */
308         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
309
310         /* Read Local AMP Info */
311         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
312
313         /* Read Data Blk size */
314         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
315 }
316
317 static void hci_init1_req(struct hci_request *req, unsigned long opt)
318 {
319         struct hci_dev *hdev = req->hdev;
320
321         BT_DBG("%s %ld", hdev->name, opt);
322
323         /* Reset */
324         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
325                 hci_reset_req(req, 0);
326
327         switch (hdev->dev_type) {
328         case HCI_BREDR:
329                 bredr_init(req);
330                 break;
331
332         case HCI_AMP:
333                 amp_init(req);
334                 break;
335
336         default:
337                 BT_ERR("Unknown device type %d", hdev->dev_type);
338                 break;
339         }
340 }
341
342 static void bredr_setup(struct hci_request *req)
343 {
344         __le16 param;
345         __u8 flt_type;
346
347         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
348         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
349
350         /* Read Class of Device */
351         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
352
353         /* Read Local Name */
354         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
355
356         /* Read Voice Setting */
357         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
358
359         /* Clear Event Filters */
360         flt_type = HCI_FLT_CLEAR_ALL;
361         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
362
363         /* Connection accept timeout ~20 secs */
364         param = __constant_cpu_to_le16(0x7d00);
365         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
366
367         /* Read page scan parameters */
368         if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) {
369                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
370                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
371         }
372 }
373
374 static void le_setup(struct hci_request *req)
375 {
376         struct hci_dev *hdev = req->hdev;
377
378         /* Read LE Buffer Size */
379         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
380
381         /* Read LE Local Supported Features */
382         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
383
384         /* Read LE Advertising Channel TX Power */
385         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
386
387         /* Read LE White List Size */
388         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
389
390         /* Read LE Supported States */
391         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
392
393         /* LE-only controllers have LE implicitly enabled */
394         if (!lmp_bredr_capable(hdev))
395                 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
396 }
397
398 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
399 {
400         if (lmp_ext_inq_capable(hdev))
401                 return 0x02;
402
403         if (lmp_inq_rssi_capable(hdev))
404                 return 0x01;
405
406         if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
407             hdev->lmp_subver == 0x0757)
408                 return 0x01;
409
410         if (hdev->manufacturer == 15) {
411                 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
412                         return 0x01;
413                 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
414                         return 0x01;
415                 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
416                         return 0x01;
417         }
418
419         if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
420             hdev->lmp_subver == 0x1805)
421                 return 0x01;
422
423         return 0x00;
424 }
425
426 static void hci_setup_inquiry_mode(struct hci_request *req)
427 {
428         u8 mode;
429
430         mode = hci_get_inquiry_mode(req->hdev);
431
432         hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
433 }
434
435 static void hci_setup_event_mask(struct hci_request *req)
436 {
437         struct hci_dev *hdev = req->hdev;
438
439         /* The second byte is 0xff instead of 0x9f (two reserved bits
440          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
441          * command otherwise.
442          */
443         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
444
445         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
446          * any event mask for pre 1.2 devices.
447          */
448         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
449                 return;
450
451         if (lmp_bredr_capable(hdev)) {
452                 events[4] |= 0x01; /* Flow Specification Complete */
453                 events[4] |= 0x02; /* Inquiry Result with RSSI */
454                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
455                 events[5] |= 0x08; /* Synchronous Connection Complete */
456                 events[5] |= 0x10; /* Synchronous Connection Changed */
457         } else {
458                 /* Use a different default for LE-only devices */
459                 memset(events, 0, sizeof(events));
460                 events[0] |= 0x10; /* Disconnection Complete */
461                 events[0] |= 0x80; /* Encryption Change */
462                 events[1] |= 0x08; /* Read Remote Version Information Complete */
463                 events[1] |= 0x20; /* Command Complete */
464                 events[1] |= 0x40; /* Command Status */
465                 events[1] |= 0x80; /* Hardware Error */
466                 events[2] |= 0x04; /* Number of Completed Packets */
467                 events[3] |= 0x02; /* Data Buffer Overflow */
468                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
469         }
470
471         if (lmp_inq_rssi_capable(hdev))
472                 events[4] |= 0x02; /* Inquiry Result with RSSI */
473
474         if (lmp_sniffsubr_capable(hdev))
475                 events[5] |= 0x20; /* Sniff Subrating */
476
477         if (lmp_pause_enc_capable(hdev))
478                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
479
480         if (lmp_ext_inq_capable(hdev))
481                 events[5] |= 0x40; /* Extended Inquiry Result */
482
483         if (lmp_no_flush_capable(hdev))
484                 events[7] |= 0x01; /* Enhanced Flush Complete */
485
486         if (lmp_lsto_capable(hdev))
487                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
488
489         if (lmp_ssp_capable(hdev)) {
490                 events[6] |= 0x01;      /* IO Capability Request */
491                 events[6] |= 0x02;      /* IO Capability Response */
492                 events[6] |= 0x04;      /* User Confirmation Request */
493                 events[6] |= 0x08;      /* User Passkey Request */
494                 events[6] |= 0x10;      /* Remote OOB Data Request */
495                 events[6] |= 0x20;      /* Simple Pairing Complete */
496                 events[7] |= 0x04;      /* User Passkey Notification */
497                 events[7] |= 0x08;      /* Keypress Notification */
498                 events[7] |= 0x10;      /* Remote Host Supported
499                                          * Features Notification
500                                          */
501         }
502
503         if (lmp_le_capable(hdev))
504                 events[7] |= 0x20;      /* LE Meta-Event */
505
506         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
507
508         if (lmp_le_capable(hdev)) {
509                 memset(events, 0, sizeof(events));
510                 events[0] = 0x1f;
511                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
512                             sizeof(events), events);
513         }
514 }
515
516 static void hci_init2_req(struct hci_request *req, unsigned long opt)
517 {
518         struct hci_dev *hdev = req->hdev;
519
520         if (lmp_bredr_capable(hdev))
521                 bredr_setup(req);
522
523         if (lmp_le_capable(hdev))
524                 le_setup(req);
525
526         hci_setup_event_mask(req);
527
528         /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
529          * local supported commands HCI command.
530          */
531         if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
532                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
533
534         if (lmp_ssp_capable(hdev)) {
535                 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
536                         u8 mode = 0x01;
537                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
538                                     sizeof(mode), &mode);
539                 } else {
540                         struct hci_cp_write_eir cp;
541
542                         memset(hdev->eir, 0, sizeof(hdev->eir));
543                         memset(&cp, 0, sizeof(cp));
544
545                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
546                 }
547         }
548
549         if (lmp_inq_rssi_capable(hdev))
550                 hci_setup_inquiry_mode(req);
551
552         if (lmp_inq_tx_pwr_capable(hdev))
553                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
554
555         if (lmp_ext_feat_capable(hdev)) {
556                 struct hci_cp_read_local_ext_features cp;
557
558                 cp.page = 0x01;
559                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
560                             sizeof(cp), &cp);
561         }
562
563         if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
564                 u8 enable = 1;
565                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
566                             &enable);
567         }
568 }
569
570 static void hci_setup_link_policy(struct hci_request *req)
571 {
572         struct hci_dev *hdev = req->hdev;
573         struct hci_cp_write_def_link_policy cp;
574         u16 link_policy = 0;
575
576         if (lmp_rswitch_capable(hdev))
577                 link_policy |= HCI_LP_RSWITCH;
578         if (lmp_hold_capable(hdev))
579                 link_policy |= HCI_LP_HOLD;
580         if (lmp_sniff_capable(hdev))
581                 link_policy |= HCI_LP_SNIFF;
582         if (lmp_park_capable(hdev))
583                 link_policy |= HCI_LP_PARK;
584
585         cp.policy = cpu_to_le16(link_policy);
586         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
587 }
588
589 static void hci_set_le_support(struct hci_request *req)
590 {
591         struct hci_dev *hdev = req->hdev;
592         struct hci_cp_write_le_host_supported cp;
593
594         /* LE-only devices do not support explicit enablement */
595         if (!lmp_bredr_capable(hdev))
596                 return;
597
598         memset(&cp, 0, sizeof(cp));
599
600         if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
601                 cp.le = 0x01;
602                 cp.simul = lmp_le_br_capable(hdev);
603         }
604
605         if (cp.le != lmp_host_le_capable(hdev))
606                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
607                             &cp);
608 }
609
610 static void hci_set_event_mask_page_2(struct hci_request *req)
611 {
612         struct hci_dev *hdev = req->hdev;
613         u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
614
615         /* If Connectionless Slave Broadcast master role is supported
616          * enable all necessary events for it.
617          */
618         if (hdev->features[2][0] & 0x01) {
619                 events[1] |= 0x40;      /* Triggered Clock Capture */
620                 events[1] |= 0x80;      /* Synchronization Train Complete */
621                 events[2] |= 0x10;      /* Slave Page Response Timeout */
622                 events[2] |= 0x20;      /* CSB Channel Map Change */
623         }
624
625         /* If Connectionless Slave Broadcast slave role is supported
626          * enable all necessary events for it.
627          */
628         if (hdev->features[2][0] & 0x02) {
629                 events[2] |= 0x01;      /* Synchronization Train Received */
630                 events[2] |= 0x02;      /* CSB Receive */
631                 events[2] |= 0x04;      /* CSB Timeout */
632                 events[2] |= 0x08;      /* Truncated Page Complete */
633         }
634
635         hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
636 }
637
638 static void hci_init3_req(struct hci_request *req, unsigned long opt)
639 {
640         struct hci_dev *hdev = req->hdev;
641         u8 p;
642
643         /* Some Broadcom based Bluetooth controllers do not support the
644          * Delete Stored Link Key command. They are clearly indicating its
645          * absence in the bit mask of supported commands.
646          *
647          * Check the supported commands and only if the the command is marked
648          * as supported send it. If not supported assume that the controller
649          * does not have actual support for stored link keys which makes this
650          * command redundant anyway.
651          */
652         if (hdev->commands[6] & 0x80) {
653                 struct hci_cp_delete_stored_link_key cp;
654
655                 bacpy(&cp.bdaddr, BDADDR_ANY);
656                 cp.delete_all = 0x01;
657                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
658                             sizeof(cp), &cp);
659         }
660
661         if (hdev->commands[5] & 0x10)
662                 hci_setup_link_policy(req);
663
664         if (lmp_le_capable(hdev)) {
665                 hci_set_le_support(req);
666                 hci_update_ad(req);
667         }
668
669         /* Read features beyond page 1 if available */
670         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
671                 struct hci_cp_read_local_ext_features cp;
672
673                 cp.page = p;
674                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
675                             sizeof(cp), &cp);
676         }
677 }
678
679 static void hci_init4_req(struct hci_request *req, unsigned long opt)
680 {
681         struct hci_dev *hdev = req->hdev;
682
683         /* Set event mask page 2 if the HCI command for it is supported */
684         if (hdev->commands[22] & 0x04)
685                 hci_set_event_mask_page_2(req);
686
687         /* Check for Synchronization Train support */
688         if (hdev->features[2][0] & 0x04)
689                 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
690 }
691
692 static int __hci_init(struct hci_dev *hdev)
693 {
694         int err;
695
696         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
697         if (err < 0)
698                 return err;
699
700         /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
701          * BR/EDR/LE type controllers. AMP controllers only need the
702          * first stage init.
703          */
704         if (hdev->dev_type != HCI_BREDR)
705                 return 0;
706
707         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
708         if (err < 0)
709                 return err;
710
711         err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
712         if (err < 0)
713                 return err;
714
715         return __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
716 }
717
718 static void hci_scan_req(struct hci_request *req, unsigned long opt)
719 {
720         __u8 scan = opt;
721
722         BT_DBG("%s %x", req->hdev->name, scan);
723
724         /* Inquiry and Page scans */
725         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
726 }
727
728 static void hci_auth_req(struct hci_request *req, unsigned long opt)
729 {
730         __u8 auth = opt;
731
732         BT_DBG("%s %x", req->hdev->name, auth);
733
734         /* Authentication */
735         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
736 }
737
738 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
739 {
740         __u8 encrypt = opt;
741
742         BT_DBG("%s %x", req->hdev->name, encrypt);
743
744         /* Encryption */
745         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
746 }
747
748 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
749 {
750         __le16 policy = cpu_to_le16(opt);
751
752         BT_DBG("%s %x", req->hdev->name, policy);
753
754         /* Default link policy */
755         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
756 }
757
758 /* Get HCI device by index.
759  * Device is held on return. */
760 struct hci_dev *hci_dev_get(int index)
761 {
762         struct hci_dev *hdev = NULL, *d;
763
764         BT_DBG("%d", index);
765
766         if (index < 0)
767                 return NULL;
768
769         read_lock(&hci_dev_list_lock);
770         list_for_each_entry(d, &hci_dev_list, list) {
771                 if (d->id == index) {
772                         hdev = hci_dev_hold(d);
773                         break;
774                 }
775         }
776         read_unlock(&hci_dev_list_lock);
777         return hdev;
778 }
779
780 /* ---- Inquiry support ---- */
781
782 bool hci_discovery_active(struct hci_dev *hdev)
783 {
784         struct discovery_state *discov = &hdev->discovery;
785
786         switch (discov->state) {
787         case DISCOVERY_FINDING:
788         case DISCOVERY_RESOLVING:
789                 return true;
790
791         default:
792                 return false;
793         }
794 }
795
796 void hci_discovery_set_state(struct hci_dev *hdev, int state)
797 {
798         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
799
800         if (hdev->discovery.state == state)
801                 return;
802
803         switch (state) {
804         case DISCOVERY_STOPPED:
805                 if (hdev->discovery.state != DISCOVERY_STARTING)
806                         mgmt_discovering(hdev, 0);
807                 break;
808         case DISCOVERY_STARTING:
809                 break;
810         case DISCOVERY_FINDING:
811                 mgmt_discovering(hdev, 1);
812                 break;
813         case DISCOVERY_RESOLVING:
814                 break;
815         case DISCOVERY_STOPPING:
816                 break;
817         }
818
819         hdev->discovery.state = state;
820 }
821
822 void hci_inquiry_cache_flush(struct hci_dev *hdev)
823 {
824         struct discovery_state *cache = &hdev->discovery;
825         struct inquiry_entry *p, *n;
826
827         list_for_each_entry_safe(p, n, &cache->all, all) {
828                 list_del(&p->all);
829                 kfree(p);
830         }
831
832         INIT_LIST_HEAD(&cache->unknown);
833         INIT_LIST_HEAD(&cache->resolve);
834 }
835
836 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
837                                                bdaddr_t *bdaddr)
838 {
839         struct discovery_state *cache = &hdev->discovery;
840         struct inquiry_entry *e;
841
842         BT_DBG("cache %p, %pMR", cache, bdaddr);
843
844         list_for_each_entry(e, &cache->all, all) {
845                 if (!bacmp(&e->data.bdaddr, bdaddr))
846                         return e;
847         }
848
849         return NULL;
850 }
851
852 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
853                                                        bdaddr_t *bdaddr)
854 {
855         struct discovery_state *cache = &hdev->discovery;
856         struct inquiry_entry *e;
857
858         BT_DBG("cache %p, %pMR", cache, bdaddr);
859
860         list_for_each_entry(e, &cache->unknown, list) {
861                 if (!bacmp(&e->data.bdaddr, bdaddr))
862                         return e;
863         }
864
865         return NULL;
866 }
867
868 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
869                                                        bdaddr_t *bdaddr,
870                                                        int state)
871 {
872         struct discovery_state *cache = &hdev->discovery;
873         struct inquiry_entry *e;
874
875         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
876
877         list_for_each_entry(e, &cache->resolve, list) {
878                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
879                         return e;
880                 if (!bacmp(&e->data.bdaddr, bdaddr))
881                         return e;
882         }
883
884         return NULL;
885 }
886
887 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
888                                       struct inquiry_entry *ie)
889 {
890         struct discovery_state *cache = &hdev->discovery;
891         struct list_head *pos = &cache->resolve;
892         struct inquiry_entry *p;
893
894         list_del(&ie->list);
895
896         list_for_each_entry(p, &cache->resolve, list) {
897                 if (p->name_state != NAME_PENDING &&
898                     abs(p->data.rssi) >= abs(ie->data.rssi))
899                         break;
900                 pos = &p->list;
901         }
902
903         list_add(&ie->list, pos);
904 }
905
906 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
907                               bool name_known, bool *ssp)
908 {
909         struct discovery_state *cache = &hdev->discovery;
910         struct inquiry_entry *ie;
911
912         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
913
914         hci_remove_remote_oob_data(hdev, &data->bdaddr);
915
916         if (ssp)
917                 *ssp = data->ssp_mode;
918
919         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
920         if (ie) {
921                 if (ie->data.ssp_mode && ssp)
922                         *ssp = true;
923
924                 if (ie->name_state == NAME_NEEDED &&
925                     data->rssi != ie->data.rssi) {
926                         ie->data.rssi = data->rssi;
927                         hci_inquiry_cache_update_resolve(hdev, ie);
928                 }
929
930                 goto update;
931         }
932
933         /* Entry not in the cache. Add new one. */
934         ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
935         if (!ie)
936                 return false;
937
938         list_add(&ie->all, &cache->all);
939
940         if (name_known) {
941                 ie->name_state = NAME_KNOWN;
942         } else {
943                 ie->name_state = NAME_NOT_KNOWN;
944                 list_add(&ie->list, &cache->unknown);
945         }
946
947 update:
948         if (name_known && ie->name_state != NAME_KNOWN &&
949             ie->name_state != NAME_PENDING) {
950                 ie->name_state = NAME_KNOWN;
951                 list_del(&ie->list);
952         }
953
954         memcpy(&ie->data, data, sizeof(*data));
955         ie->timestamp = jiffies;
956         cache->timestamp = jiffies;
957
958         if (ie->name_state == NAME_NOT_KNOWN)
959                 return false;
960
961         return true;
962 }
963
964 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
965 {
966         struct discovery_state *cache = &hdev->discovery;
967         struct inquiry_info *info = (struct inquiry_info *) buf;
968         struct inquiry_entry *e;
969         int copied = 0;
970
971         list_for_each_entry(e, &cache->all, all) {
972                 struct inquiry_data *data = &e->data;
973
974                 if (copied >= num)
975                         break;
976
977                 bacpy(&info->bdaddr, &data->bdaddr);
978                 info->pscan_rep_mode    = data->pscan_rep_mode;
979                 info->pscan_period_mode = data->pscan_period_mode;
980                 info->pscan_mode        = data->pscan_mode;
981                 memcpy(info->dev_class, data->dev_class, 3);
982                 info->clock_offset      = data->clock_offset;
983
984                 info++;
985                 copied++;
986         }
987
988         BT_DBG("cache %p, copied %d", cache, copied);
989         return copied;
990 }
991
992 static void hci_inq_req(struct hci_request *req, unsigned long opt)
993 {
994         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
995         struct hci_dev *hdev = req->hdev;
996         struct hci_cp_inquiry cp;
997
998         BT_DBG("%s", hdev->name);
999
1000         if (test_bit(HCI_INQUIRY, &hdev->flags))
1001                 return;
1002
1003         /* Start Inquiry */
1004         memcpy(&cp.lap, &ir->lap, 3);
1005         cp.length  = ir->length;
1006         cp.num_rsp = ir->num_rsp;
1007         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1008 }
1009
1010 static int wait_inquiry(void *word)
1011 {
1012         schedule();
1013         return signal_pending(current);
1014 }
1015
1016 int hci_inquiry(void __user *arg)
1017 {
1018         __u8 __user *ptr = arg;
1019         struct hci_inquiry_req ir;
1020         struct hci_dev *hdev;
1021         int err = 0, do_inquiry = 0, max_rsp;
1022         long timeo;
1023         __u8 *buf;
1024
1025         if (copy_from_user(&ir, ptr, sizeof(ir)))
1026                 return -EFAULT;
1027
1028         hdev = hci_dev_get(ir.dev_id);
1029         if (!hdev)
1030                 return -ENODEV;
1031
1032         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1033                 err = -EBUSY;
1034                 goto done;
1035         }
1036
1037         hci_dev_lock(hdev);
1038         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1039             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1040                 hci_inquiry_cache_flush(hdev);
1041                 do_inquiry = 1;
1042         }
1043         hci_dev_unlock(hdev);
1044
1045         timeo = ir.length * msecs_to_jiffies(2000);
1046
1047         if (do_inquiry) {
1048                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1049                                    timeo);
1050                 if (err < 0)
1051                         goto done;
1052
1053                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1054                  * cleared). If it is interrupted by a signal, return -EINTR.
1055                  */
1056                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1057                                 TASK_INTERRUPTIBLE))
1058                         return -EINTR;
1059         }
1060
1061         /* for unlimited number of responses we will use buffer with
1062          * 255 entries
1063          */
1064         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1065
1066         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1067          * copy it to the user space.
1068          */
1069         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
1070         if (!buf) {
1071                 err = -ENOMEM;
1072                 goto done;
1073         }
1074
1075         hci_dev_lock(hdev);
1076         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1077         hci_dev_unlock(hdev);
1078
1079         BT_DBG("num_rsp %d", ir.num_rsp);
1080
1081         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1082                 ptr += sizeof(ir);
1083                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1084                                  ir.num_rsp))
1085                         err = -EFAULT;
1086         } else
1087                 err = -EFAULT;
1088
1089         kfree(buf);
1090
1091 done:
1092         hci_dev_put(hdev);
1093         return err;
1094 }
1095
1096 static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
1097 {
1098         u8 ad_len = 0, flags = 0;
1099         size_t name_len;
1100
1101         if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1102                 flags |= LE_AD_GENERAL;
1103
1104         if (!lmp_bredr_capable(hdev))
1105                 flags |= LE_AD_NO_BREDR;
1106
1107         if (lmp_le_br_capable(hdev))
1108                 flags |= LE_AD_SIM_LE_BREDR_CTRL;
1109
1110         if (lmp_host_le_br_capable(hdev))
1111                 flags |= LE_AD_SIM_LE_BREDR_HOST;
1112
1113         if (flags) {
1114                 BT_DBG("adv flags 0x%02x", flags);
1115
1116                 ptr[0] = 2;
1117                 ptr[1] = EIR_FLAGS;
1118                 ptr[2] = flags;
1119
1120                 ad_len += 3;
1121                 ptr += 3;
1122         }
1123
1124         if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
1125                 ptr[0] = 2;
1126                 ptr[1] = EIR_TX_POWER;
1127                 ptr[2] = (u8) hdev->adv_tx_power;
1128
1129                 ad_len += 3;
1130                 ptr += 3;
1131         }
1132
1133         name_len = strlen(hdev->dev_name);
1134         if (name_len > 0) {
1135                 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
1136
1137                 if (name_len > max_len) {
1138                         name_len = max_len;
1139                         ptr[1] = EIR_NAME_SHORT;
1140                 } else
1141                         ptr[1] = EIR_NAME_COMPLETE;
1142
1143                 ptr[0] = name_len + 1;
1144
1145                 memcpy(ptr + 2, hdev->dev_name, name_len);
1146
1147                 ad_len += (name_len + 2);
1148                 ptr += (name_len + 2);
1149         }
1150
1151         return ad_len;
1152 }
1153
1154 void hci_update_ad(struct hci_request *req)
1155 {
1156         struct hci_dev *hdev = req->hdev;
1157         struct hci_cp_le_set_adv_data cp;
1158         u8 len;
1159
1160         if (!lmp_le_capable(hdev))
1161                 return;
1162
1163         memset(&cp, 0, sizeof(cp));
1164
1165         len = create_ad(hdev, cp.data);
1166
1167         if (hdev->adv_data_len == len &&
1168             memcmp(cp.data, hdev->adv_data, len) == 0)
1169                 return;
1170
1171         memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1172         hdev->adv_data_len = len;
1173
1174         cp.length = len;
1175
1176         hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1177 }
1178
1179 /* ---- HCI ioctl helpers ---- */
1180
1181 int hci_dev_open(__u16 dev)
1182 {
1183         struct hci_dev *hdev;
1184         int ret = 0;
1185
1186         hdev = hci_dev_get(dev);
1187         if (!hdev)
1188                 return -ENODEV;
1189
1190         BT_DBG("%s %p", hdev->name, hdev);
1191
1192         hci_req_lock(hdev);
1193
1194         if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1195                 ret = -ENODEV;
1196                 goto done;
1197         }
1198
1199         /* Check for rfkill but allow the HCI setup stage to proceed
1200          * (which in itself doesn't cause any RF activity).
1201          */
1202         if (test_bit(HCI_RFKILLED, &hdev->dev_flags) &&
1203             !test_bit(HCI_SETUP, &hdev->dev_flags)) {
1204                 ret = -ERFKILL;
1205                 goto done;
1206         }
1207
1208         if (test_bit(HCI_UP, &hdev->flags)) {
1209                 ret = -EALREADY;
1210                 goto done;
1211         }
1212
1213         if (hdev->open(hdev)) {
1214                 ret = -EIO;
1215                 goto done;
1216         }
1217
1218         atomic_set(&hdev->cmd_cnt, 1);
1219         set_bit(HCI_INIT, &hdev->flags);
1220
1221         if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1222                 ret = hdev->setup(hdev);
1223
1224         if (!ret) {
1225                 /* Treat all non BR/EDR controllers as raw devices if
1226                  * enable_hs is not set.
1227                  */
1228                 if (hdev->dev_type != HCI_BREDR && !enable_hs)
1229                         set_bit(HCI_RAW, &hdev->flags);
1230
1231                 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1232                         set_bit(HCI_RAW, &hdev->flags);
1233
1234                 if (!test_bit(HCI_RAW, &hdev->flags) &&
1235                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
1236                         ret = __hci_init(hdev);
1237         }
1238
1239         clear_bit(HCI_INIT, &hdev->flags);
1240
1241         if (!ret) {
1242                 hci_dev_hold(hdev);
1243                 set_bit(HCI_UP, &hdev->flags);
1244                 hci_notify(hdev, HCI_DEV_UP);
1245                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1246                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1247                     mgmt_valid_hdev(hdev)) {
1248                         hci_dev_lock(hdev);
1249                         mgmt_powered(hdev, 1);
1250                         hci_dev_unlock(hdev);
1251                 }
1252         } else {
1253                 /* Init failed, cleanup */
1254                 flush_work(&hdev->tx_work);
1255                 flush_work(&hdev->cmd_work);
1256                 flush_work(&hdev->rx_work);
1257
1258                 skb_queue_purge(&hdev->cmd_q);
1259                 skb_queue_purge(&hdev->rx_q);
1260
1261                 if (hdev->flush)
1262                         hdev->flush(hdev);
1263
1264                 if (hdev->sent_cmd) {
1265                         kfree_skb(hdev->sent_cmd);
1266                         hdev->sent_cmd = NULL;
1267                 }
1268
1269                 hdev->close(hdev);
1270                 hdev->flags = 0;
1271         }
1272
1273 done:
1274         hci_req_unlock(hdev);
1275         hci_dev_put(hdev);
1276         return ret;
1277 }
1278
1279 static int hci_dev_do_close(struct hci_dev *hdev)
1280 {
1281         BT_DBG("%s %p", hdev->name, hdev);
1282
1283         cancel_delayed_work(&hdev->power_off);
1284
1285         hci_req_cancel(hdev, ENODEV);
1286         hci_req_lock(hdev);
1287
1288         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1289                 del_timer_sync(&hdev->cmd_timer);
1290                 hci_req_unlock(hdev);
1291                 return 0;
1292         }
1293
1294         /* Flush RX and TX works */
1295         flush_work(&hdev->tx_work);
1296         flush_work(&hdev->rx_work);
1297
1298         if (hdev->discov_timeout > 0) {
1299                 cancel_delayed_work(&hdev->discov_off);
1300                 hdev->discov_timeout = 0;
1301                 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
1302         }
1303
1304         if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
1305                 cancel_delayed_work(&hdev->service_cache);
1306
1307         cancel_delayed_work_sync(&hdev->le_scan_disable);
1308
1309         hci_dev_lock(hdev);
1310         hci_inquiry_cache_flush(hdev);
1311         hci_conn_hash_flush(hdev);
1312         hci_dev_unlock(hdev);
1313
1314         hci_notify(hdev, HCI_DEV_DOWN);
1315
1316         if (hdev->flush)
1317                 hdev->flush(hdev);
1318
1319         /* Reset device */
1320         skb_queue_purge(&hdev->cmd_q);
1321         atomic_set(&hdev->cmd_cnt, 1);
1322         if (!test_bit(HCI_RAW, &hdev->flags) &&
1323             test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1324                 set_bit(HCI_INIT, &hdev->flags);
1325                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1326                 clear_bit(HCI_INIT, &hdev->flags);
1327         }
1328
1329         /* flush cmd  work */
1330         flush_work(&hdev->cmd_work);
1331
1332         /* Drop queues */
1333         skb_queue_purge(&hdev->rx_q);
1334         skb_queue_purge(&hdev->cmd_q);
1335         skb_queue_purge(&hdev->raw_q);
1336
1337         /* Drop last sent command */
1338         if (hdev->sent_cmd) {
1339                 del_timer_sync(&hdev->cmd_timer);
1340                 kfree_skb(hdev->sent_cmd);
1341                 hdev->sent_cmd = NULL;
1342         }
1343
1344         kfree_skb(hdev->recv_evt);
1345         hdev->recv_evt = NULL;
1346
1347         /* After this point our queues are empty
1348          * and no tasks are scheduled. */
1349         hdev->close(hdev);
1350
1351         /* Clear flags */
1352         hdev->flags = 0;
1353         hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1354
1355         if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1356             mgmt_valid_hdev(hdev)) {
1357                 hci_dev_lock(hdev);
1358                 mgmt_powered(hdev, 0);
1359                 hci_dev_unlock(hdev);
1360         }
1361
1362         /* Controller radio is available but is currently powered down */
1363         hdev->amp_status = 0;
1364
1365         memset(hdev->eir, 0, sizeof(hdev->eir));
1366         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
1367
1368         hci_req_unlock(hdev);
1369
1370         hci_dev_put(hdev);
1371         return 0;
1372 }
1373
1374 int hci_dev_close(__u16 dev)
1375 {
1376         struct hci_dev *hdev;
1377         int err;
1378
1379         hdev = hci_dev_get(dev);
1380         if (!hdev)
1381                 return -ENODEV;
1382
1383         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1384                 err = -EBUSY;
1385                 goto done;
1386         }
1387
1388         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1389                 cancel_delayed_work(&hdev->power_off);
1390
1391         err = hci_dev_do_close(hdev);
1392
1393 done:
1394         hci_dev_put(hdev);
1395         return err;
1396 }
1397
1398 int hci_dev_reset(__u16 dev)
1399 {
1400         struct hci_dev *hdev;
1401         int ret = 0;
1402
1403         hdev = hci_dev_get(dev);
1404         if (!hdev)
1405                 return -ENODEV;
1406
1407         hci_req_lock(hdev);
1408
1409         if (!test_bit(HCI_UP, &hdev->flags)) {
1410                 ret = -ENETDOWN;
1411                 goto done;
1412         }
1413
1414         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1415                 ret = -EBUSY;
1416                 goto done;
1417         }
1418
1419         /* Drop queues */
1420         skb_queue_purge(&hdev->rx_q);
1421         skb_queue_purge(&hdev->cmd_q);
1422
1423         hci_dev_lock(hdev);
1424         hci_inquiry_cache_flush(hdev);
1425         hci_conn_hash_flush(hdev);
1426         hci_dev_unlock(hdev);
1427
1428         if (hdev->flush)
1429                 hdev->flush(hdev);
1430
1431         atomic_set(&hdev->cmd_cnt, 1);
1432         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1433
1434         if (!test_bit(HCI_RAW, &hdev->flags))
1435                 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1436
1437 done:
1438         hci_req_unlock(hdev);
1439         hci_dev_put(hdev);
1440         return ret;
1441 }
1442
1443 int hci_dev_reset_stat(__u16 dev)
1444 {
1445         struct hci_dev *hdev;
1446         int ret = 0;
1447
1448         hdev = hci_dev_get(dev);
1449         if (!hdev)
1450                 return -ENODEV;
1451
1452         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1453                 ret = -EBUSY;
1454                 goto done;
1455         }
1456
1457         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1458
1459 done:
1460         hci_dev_put(hdev);
1461         return ret;
1462 }
1463
1464 int hci_dev_cmd(unsigned int cmd, void __user *arg)
1465 {
1466         struct hci_dev *hdev;
1467         struct hci_dev_req dr;
1468         int err = 0;
1469
1470         if (copy_from_user(&dr, arg, sizeof(dr)))
1471                 return -EFAULT;
1472
1473         hdev = hci_dev_get(dr.dev_id);
1474         if (!hdev)
1475                 return -ENODEV;
1476
1477         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1478                 err = -EBUSY;
1479                 goto done;
1480         }
1481
1482         switch (cmd) {
1483         case HCISETAUTH:
1484                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1485                                    HCI_INIT_TIMEOUT);
1486                 break;
1487
1488         case HCISETENCRYPT:
1489                 if (!lmp_encrypt_capable(hdev)) {
1490                         err = -EOPNOTSUPP;
1491                         break;
1492                 }
1493
1494                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1495                         /* Auth must be enabled first */
1496                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1497                                            HCI_INIT_TIMEOUT);
1498                         if (err)
1499                                 break;
1500                 }
1501
1502                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1503                                    HCI_INIT_TIMEOUT);
1504                 break;
1505
1506         case HCISETSCAN:
1507                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1508                                    HCI_INIT_TIMEOUT);
1509                 break;
1510
1511         case HCISETLINKPOL:
1512                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1513                                    HCI_INIT_TIMEOUT);
1514                 break;
1515
1516         case HCISETLINKMODE:
1517                 hdev->link_mode = ((__u16) dr.dev_opt) &
1518                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
1519                 break;
1520
1521         case HCISETPTYPE:
1522                 hdev->pkt_type = (__u16) dr.dev_opt;
1523                 break;
1524
1525         case HCISETACLMTU:
1526                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
1527                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1528                 break;
1529
1530         case HCISETSCOMTU:
1531                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
1532                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1533                 break;
1534
1535         default:
1536                 err = -EINVAL;
1537                 break;
1538         }
1539
1540 done:
1541         hci_dev_put(hdev);
1542         return err;
1543 }
1544
1545 int hci_get_dev_list(void __user *arg)
1546 {
1547         struct hci_dev *hdev;
1548         struct hci_dev_list_req *dl;
1549         struct hci_dev_req *dr;
1550         int n = 0, size, err;
1551         __u16 dev_num;
1552
1553         if (get_user(dev_num, (__u16 __user *) arg))
1554                 return -EFAULT;
1555
1556         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1557                 return -EINVAL;
1558
1559         size = sizeof(*dl) + dev_num * sizeof(*dr);
1560
1561         dl = kzalloc(size, GFP_KERNEL);
1562         if (!dl)
1563                 return -ENOMEM;
1564
1565         dr = dl->dev_req;
1566
1567         read_lock(&hci_dev_list_lock);
1568         list_for_each_entry(hdev, &hci_dev_list, list) {
1569                 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1570                         cancel_delayed_work(&hdev->power_off);
1571
1572                 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1573                         set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1574
1575                 (dr + n)->dev_id  = hdev->id;
1576                 (dr + n)->dev_opt = hdev->flags;
1577
1578                 if (++n >= dev_num)
1579                         break;
1580         }
1581         read_unlock(&hci_dev_list_lock);
1582
1583         dl->dev_num = n;
1584         size = sizeof(*dl) + n * sizeof(*dr);
1585
1586         err = copy_to_user(arg, dl, size);
1587         kfree(dl);
1588
1589         return err ? -EFAULT : 0;
1590 }
1591
1592 int hci_get_dev_info(void __user *arg)
1593 {
1594         struct hci_dev *hdev;
1595         struct hci_dev_info di;
1596         int err = 0;
1597
1598         if (copy_from_user(&di, arg, sizeof(di)))
1599                 return -EFAULT;
1600
1601         hdev = hci_dev_get(di.dev_id);
1602         if (!hdev)
1603                 return -ENODEV;
1604
1605         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1606                 cancel_delayed_work_sync(&hdev->power_off);
1607
1608         if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1609                 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1610
1611         strcpy(di.name, hdev->name);
1612         di.bdaddr   = hdev->bdaddr;
1613         di.type     = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1614         di.flags    = hdev->flags;
1615         di.pkt_type = hdev->pkt_type;
1616         if (lmp_bredr_capable(hdev)) {
1617                 di.acl_mtu  = hdev->acl_mtu;
1618                 di.acl_pkts = hdev->acl_pkts;
1619                 di.sco_mtu  = hdev->sco_mtu;
1620                 di.sco_pkts = hdev->sco_pkts;
1621         } else {
1622                 di.acl_mtu  = hdev->le_mtu;
1623                 di.acl_pkts = hdev->le_pkts;
1624                 di.sco_mtu  = 0;
1625                 di.sco_pkts = 0;
1626         }
1627         di.link_policy = hdev->link_policy;
1628         di.link_mode   = hdev->link_mode;
1629
1630         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1631         memcpy(&di.features, &hdev->features, sizeof(di.features));
1632
1633         if (copy_to_user(arg, &di, sizeof(di)))
1634                 err = -EFAULT;
1635
1636         hci_dev_put(hdev);
1637
1638         return err;
1639 }
1640
1641 /* ---- Interface to HCI drivers ---- */
1642
1643 static int hci_rfkill_set_block(void *data, bool blocked)
1644 {
1645         struct hci_dev *hdev = data;
1646
1647         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1648
1649         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
1650                 return -EBUSY;
1651
1652         if (blocked) {
1653                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
1654                 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1655                         hci_dev_do_close(hdev);
1656         } else {
1657                 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
1658         }
1659
1660         return 0;
1661 }
1662
1663 static const struct rfkill_ops hci_rfkill_ops = {
1664         .set_block = hci_rfkill_set_block,
1665 };
1666
1667 static void hci_power_on(struct work_struct *work)
1668 {
1669         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1670         int err;
1671
1672         BT_DBG("%s", hdev->name);
1673
1674         err = hci_dev_open(hdev->id);
1675         if (err < 0) {
1676                 mgmt_set_powered_failed(hdev, err);
1677                 return;
1678         }
1679
1680         if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1681                 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1682                 hci_dev_do_close(hdev);
1683         } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1684                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1685                                    HCI_AUTO_OFF_TIMEOUT);
1686         }
1687
1688         if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1689                 mgmt_index_added(hdev);
1690 }
1691
1692 static void hci_power_off(struct work_struct *work)
1693 {
1694         struct hci_dev *hdev = container_of(work, struct hci_dev,
1695                                             power_off.work);
1696
1697         BT_DBG("%s", hdev->name);
1698
1699         hci_dev_do_close(hdev);
1700 }
1701
1702 static void hci_discov_off(struct work_struct *work)
1703 {
1704         struct hci_dev *hdev;
1705         u8 scan = SCAN_PAGE;
1706
1707         hdev = container_of(work, struct hci_dev, discov_off.work);
1708
1709         BT_DBG("%s", hdev->name);
1710
1711         hci_dev_lock(hdev);
1712
1713         hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1714
1715         hdev->discov_timeout = 0;
1716
1717         hci_dev_unlock(hdev);
1718 }
1719
1720 int hci_uuids_clear(struct hci_dev *hdev)
1721 {
1722         struct bt_uuid *uuid, *tmp;
1723
1724         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1725                 list_del(&uuid->list);
1726                 kfree(uuid);
1727         }
1728
1729         return 0;
1730 }
1731
1732 int hci_link_keys_clear(struct hci_dev *hdev)
1733 {
1734         struct list_head *p, *n;
1735
1736         list_for_each_safe(p, n, &hdev->link_keys) {
1737                 struct link_key *key;
1738
1739                 key = list_entry(p, struct link_key, list);
1740
1741                 list_del(p);
1742                 kfree(key);
1743         }
1744
1745         return 0;
1746 }
1747
1748 int hci_smp_ltks_clear(struct hci_dev *hdev)
1749 {
1750         struct smp_ltk *k, *tmp;
1751
1752         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1753                 list_del(&k->list);
1754                 kfree(k);
1755         }
1756
1757         return 0;
1758 }
1759
1760 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1761 {
1762         struct link_key *k;
1763
1764         list_for_each_entry(k, &hdev->link_keys, list)
1765                 if (bacmp(bdaddr, &k->bdaddr) == 0)
1766                         return k;
1767
1768         return NULL;
1769 }
1770
1771 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1772                                u8 key_type, u8 old_key_type)
1773 {
1774         /* Legacy key */
1775         if (key_type < 0x03)
1776                 return true;
1777
1778         /* Debug keys are insecure so don't store them persistently */
1779         if (key_type == HCI_LK_DEBUG_COMBINATION)
1780                 return false;
1781
1782         /* Changed combination key and there's no previous one */
1783         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1784                 return false;
1785
1786         /* Security mode 3 case */
1787         if (!conn)
1788                 return true;
1789
1790         /* Neither local nor remote side had no-bonding as requirement */
1791         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1792                 return true;
1793
1794         /* Local side had dedicated bonding as requirement */
1795         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1796                 return true;
1797
1798         /* Remote side had dedicated bonding as requirement */
1799         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1800                 return true;
1801
1802         /* If none of the above criteria match, then don't store the key
1803          * persistently */
1804         return false;
1805 }
1806
1807 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1808 {
1809         struct smp_ltk *k;
1810
1811         list_for_each_entry(k, &hdev->long_term_keys, list) {
1812                 if (k->ediv != ediv ||
1813                     memcmp(rand, k->rand, sizeof(k->rand)))
1814                         continue;
1815
1816                 return k;
1817         }
1818
1819         return NULL;
1820 }
1821
1822 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1823                                      u8 addr_type)
1824 {
1825         struct smp_ltk *k;
1826
1827         list_for_each_entry(k, &hdev->long_term_keys, list)
1828                 if (addr_type == k->bdaddr_type &&
1829                     bacmp(bdaddr, &k->bdaddr) == 0)
1830                         return k;
1831
1832         return NULL;
1833 }
1834
1835 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1836                      bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1837 {
1838         struct link_key *key, *old_key;
1839         u8 old_key_type;
1840         bool persistent;
1841
1842         old_key = hci_find_link_key(hdev, bdaddr);
1843         if (old_key) {
1844                 old_key_type = old_key->type;
1845                 key = old_key;
1846         } else {
1847                 old_key_type = conn ? conn->key_type : 0xff;
1848                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1849                 if (!key)
1850                         return -ENOMEM;
1851                 list_add(&key->list, &hdev->link_keys);
1852         }
1853
1854         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1855
1856         /* Some buggy controller combinations generate a changed
1857          * combination key for legacy pairing even when there's no
1858          * previous key */
1859         if (type == HCI_LK_CHANGED_COMBINATION &&
1860             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1861                 type = HCI_LK_COMBINATION;
1862                 if (conn)
1863                         conn->key_type = type;
1864         }
1865
1866         bacpy(&key->bdaddr, bdaddr);
1867         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1868         key->pin_len = pin_len;
1869
1870         if (type == HCI_LK_CHANGED_COMBINATION)
1871                 key->type = old_key_type;
1872         else
1873                 key->type = type;
1874
1875         if (!new_key)
1876                 return 0;
1877
1878         persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1879
1880         mgmt_new_link_key(hdev, key, persistent);
1881
1882         if (conn)
1883                 conn->flush_key = !persistent;
1884
1885         return 0;
1886 }
1887
1888 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1889                 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
1890                 ediv, u8 rand[8])
1891 {
1892         struct smp_ltk *key, *old_key;
1893
1894         if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1895                 return 0;
1896
1897         old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1898         if (old_key)
1899                 key = old_key;
1900         else {
1901                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1902                 if (!key)
1903                         return -ENOMEM;
1904                 list_add(&key->list, &hdev->long_term_keys);
1905         }
1906
1907         bacpy(&key->bdaddr, bdaddr);
1908         key->bdaddr_type = addr_type;
1909         memcpy(key->val, tk, sizeof(key->val));
1910         key->authenticated = authenticated;
1911         key->ediv = ediv;
1912         key->enc_size = enc_size;
1913         key->type = type;
1914         memcpy(key->rand, rand, sizeof(key->rand));
1915
1916         if (!new_key)
1917                 return 0;
1918
1919         if (type & HCI_SMP_LTK)
1920                 mgmt_new_ltk(hdev, key, 1);
1921
1922         return 0;
1923 }
1924
1925 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1926 {
1927         struct link_key *key;
1928
1929         key = hci_find_link_key(hdev, bdaddr);
1930         if (!key)
1931                 return -ENOENT;
1932
1933         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1934
1935         list_del(&key->list);
1936         kfree(key);
1937
1938         return 0;
1939 }
1940
1941 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1942 {
1943         struct smp_ltk *k, *tmp;
1944
1945         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1946                 if (bacmp(bdaddr, &k->bdaddr))
1947                         continue;
1948
1949                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1950
1951                 list_del(&k->list);
1952                 kfree(k);
1953         }
1954
1955         return 0;
1956 }
1957
1958 /* HCI command timer function */
1959 static void hci_cmd_timeout(unsigned long arg)
1960 {
1961         struct hci_dev *hdev = (void *) arg;
1962
1963         if (hdev->sent_cmd) {
1964                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1965                 u16 opcode = __le16_to_cpu(sent->opcode);
1966
1967                 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1968         } else {
1969                 BT_ERR("%s command tx timeout", hdev->name);
1970         }
1971
1972         atomic_set(&hdev->cmd_cnt, 1);
1973         queue_work(hdev->workqueue, &hdev->cmd_work);
1974 }
1975
1976 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1977                                           bdaddr_t *bdaddr)
1978 {
1979         struct oob_data *data;
1980
1981         list_for_each_entry(data, &hdev->remote_oob_data, list)
1982                 if (bacmp(bdaddr, &data->bdaddr) == 0)
1983                         return data;
1984
1985         return NULL;
1986 }
1987
1988 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1989 {
1990         struct oob_data *data;
1991
1992         data = hci_find_remote_oob_data(hdev, bdaddr);
1993         if (!data)
1994                 return -ENOENT;
1995
1996         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1997
1998         list_del(&data->list);
1999         kfree(data);
2000
2001         return 0;
2002 }
2003
2004 int hci_remote_oob_data_clear(struct hci_dev *hdev)
2005 {
2006         struct oob_data *data, *n;
2007
2008         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2009                 list_del(&data->list);
2010                 kfree(data);
2011         }
2012
2013         return 0;
2014 }
2015
2016 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
2017                             u8 *randomizer)
2018 {
2019         struct oob_data *data;
2020
2021         data = hci_find_remote_oob_data(hdev, bdaddr);
2022
2023         if (!data) {
2024                 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2025                 if (!data)
2026                         return -ENOMEM;
2027
2028                 bacpy(&data->bdaddr, bdaddr);
2029                 list_add(&data->list, &hdev->remote_oob_data);
2030         }
2031
2032         memcpy(data->hash, hash, sizeof(data->hash));
2033         memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
2034
2035         BT_DBG("%s for %pMR", hdev->name, bdaddr);
2036
2037         return 0;
2038 }
2039
2040 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
2041 {
2042         struct bdaddr_list *b;
2043
2044         list_for_each_entry(b, &hdev->blacklist, list)
2045                 if (bacmp(bdaddr, &b->bdaddr) == 0)
2046                         return b;
2047
2048         return NULL;
2049 }
2050
2051 int hci_blacklist_clear(struct hci_dev *hdev)
2052 {
2053         struct list_head *p, *n;
2054
2055         list_for_each_safe(p, n, &hdev->blacklist) {
2056                 struct bdaddr_list *b;
2057
2058                 b = list_entry(p, struct bdaddr_list, list);
2059
2060                 list_del(p);
2061                 kfree(b);
2062         }
2063
2064         return 0;
2065 }
2066
2067 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2068 {
2069         struct bdaddr_list *entry;
2070
2071         if (bacmp(bdaddr, BDADDR_ANY) == 0)
2072                 return -EBADF;
2073
2074         if (hci_blacklist_lookup(hdev, bdaddr))
2075                 return -EEXIST;
2076
2077         entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
2078         if (!entry)
2079                 return -ENOMEM;
2080
2081         bacpy(&entry->bdaddr, bdaddr);
2082
2083         list_add(&entry->list, &hdev->blacklist);
2084
2085         return mgmt_device_blocked(hdev, bdaddr, type);
2086 }
2087
2088 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2089 {
2090         struct bdaddr_list *entry;
2091
2092         if (bacmp(bdaddr, BDADDR_ANY) == 0)
2093                 return hci_blacklist_clear(hdev);
2094
2095         entry = hci_blacklist_lookup(hdev, bdaddr);
2096         if (!entry)
2097                 return -ENOENT;
2098
2099         list_del(&entry->list);
2100         kfree(entry);
2101
2102         return mgmt_device_unblocked(hdev, bdaddr, type);
2103 }
2104
2105 static void inquiry_complete(struct hci_dev *hdev, u8 status)
2106 {
2107         if (status) {
2108                 BT_ERR("Failed to start inquiry: status %d", status);
2109
2110                 hci_dev_lock(hdev);
2111                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2112                 hci_dev_unlock(hdev);
2113                 return;
2114         }
2115 }
2116
2117 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
2118 {
2119         /* General inquiry access code (GIAC) */
2120         u8 lap[3] = { 0x33, 0x8b, 0x9e };
2121         struct hci_request req;
2122         struct hci_cp_inquiry cp;
2123         int err;
2124
2125         if (status) {
2126                 BT_ERR("Failed to disable LE scanning: status %d", status);
2127                 return;
2128         }
2129
2130         switch (hdev->discovery.type) {
2131         case DISCOV_TYPE_LE:
2132                 hci_dev_lock(hdev);
2133                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2134                 hci_dev_unlock(hdev);
2135                 break;
2136
2137         case DISCOV_TYPE_INTERLEAVED:
2138                 hci_req_init(&req, hdev);
2139
2140                 memset(&cp, 0, sizeof(cp));
2141                 memcpy(&cp.lap, lap, sizeof(cp.lap));
2142                 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2143                 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2144
2145                 hci_dev_lock(hdev);
2146
2147                 hci_inquiry_cache_flush(hdev);
2148
2149                 err = hci_req_run(&req, inquiry_complete);
2150                 if (err) {
2151                         BT_ERR("Inquiry request failed: err %d", err);
2152                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2153                 }
2154
2155                 hci_dev_unlock(hdev);
2156                 break;
2157         }
2158 }
2159
2160 static void le_scan_disable_work(struct work_struct *work)
2161 {
2162         struct hci_dev *hdev = container_of(work, struct hci_dev,
2163                                             le_scan_disable.work);
2164         struct hci_cp_le_set_scan_enable cp;
2165         struct hci_request req;
2166         int err;
2167
2168         BT_DBG("%s", hdev->name);
2169
2170         hci_req_init(&req, hdev);
2171
2172         memset(&cp, 0, sizeof(cp));
2173         cp.enable = LE_SCAN_DISABLE;
2174         hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2175
2176         err = hci_req_run(&req, le_scan_disable_work_complete);
2177         if (err)
2178                 BT_ERR("Disable LE scanning request failed: err %d", err);
2179 }
2180
2181 /* Alloc HCI device */
2182 struct hci_dev *hci_alloc_dev(void)
2183 {
2184         struct hci_dev *hdev;
2185
2186         hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2187         if (!hdev)
2188                 return NULL;
2189
2190         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2191         hdev->esco_type = (ESCO_HV1);
2192         hdev->link_mode = (HCI_LM_ACCEPT);
2193         hdev->io_capability = 0x03; /* No Input No Output */
2194         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2195         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2196
2197         hdev->sniff_max_interval = 800;
2198         hdev->sniff_min_interval = 80;
2199
2200         mutex_init(&hdev->lock);
2201         mutex_init(&hdev->req_lock);
2202
2203         INIT_LIST_HEAD(&hdev->mgmt_pending);
2204         INIT_LIST_HEAD(&hdev->blacklist);
2205         INIT_LIST_HEAD(&hdev->uuids);
2206         INIT_LIST_HEAD(&hdev->link_keys);
2207         INIT_LIST_HEAD(&hdev->long_term_keys);
2208         INIT_LIST_HEAD(&hdev->remote_oob_data);
2209         INIT_LIST_HEAD(&hdev->conn_hash.list);
2210
2211         INIT_WORK(&hdev->rx_work, hci_rx_work);
2212         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2213         INIT_WORK(&hdev->tx_work, hci_tx_work);
2214         INIT_WORK(&hdev->power_on, hci_power_on);
2215
2216         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2217         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2218         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2219
2220         skb_queue_head_init(&hdev->rx_q);
2221         skb_queue_head_init(&hdev->cmd_q);
2222         skb_queue_head_init(&hdev->raw_q);
2223
2224         init_waitqueue_head(&hdev->req_wait_q);
2225
2226         setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
2227
2228         hci_init_sysfs(hdev);
2229         discovery_init(hdev);
2230
2231         return hdev;
2232 }
2233 EXPORT_SYMBOL(hci_alloc_dev);
2234
2235 /* Free HCI device */
2236 void hci_free_dev(struct hci_dev *hdev)
2237 {
2238         /* will free via device release */
2239         put_device(&hdev->dev);
2240 }
2241 EXPORT_SYMBOL(hci_free_dev);
2242
2243 /* Register HCI device */
2244 int hci_register_dev(struct hci_dev *hdev)
2245 {
2246         int id, error;
2247
2248         if (!hdev->open || !hdev->close)
2249                 return -EINVAL;
2250
2251         /* Do not allow HCI_AMP devices to register at index 0,
2252          * so the index can be used as the AMP controller ID.
2253          */
2254         switch (hdev->dev_type) {
2255         case HCI_BREDR:
2256                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2257                 break;
2258         case HCI_AMP:
2259                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2260                 break;
2261         default:
2262                 return -EINVAL;
2263         }
2264
2265         if (id < 0)
2266                 return id;
2267
2268         sprintf(hdev->name, "hci%d", id);
2269         hdev->id = id;
2270
2271         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2272
2273         hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2274                                           WQ_MEM_RECLAIM, 1, hdev->name);
2275         if (!hdev->workqueue) {
2276                 error = -ENOMEM;
2277                 goto err;
2278         }
2279
2280         hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2281                                               WQ_MEM_RECLAIM, 1, hdev->name);
2282         if (!hdev->req_workqueue) {
2283                 destroy_workqueue(hdev->workqueue);
2284                 error = -ENOMEM;
2285                 goto err;
2286         }
2287
2288         error = hci_add_sysfs(hdev);
2289         if (error < 0)
2290                 goto err_wqueue;
2291
2292         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2293                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2294                                     hdev);
2295         if (hdev->rfkill) {
2296                 if (rfkill_register(hdev->rfkill) < 0) {
2297                         rfkill_destroy(hdev->rfkill);
2298                         hdev->rfkill = NULL;
2299                 }
2300         }
2301
2302         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2303                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2304
2305         set_bit(HCI_SETUP, &hdev->dev_flags);
2306
2307         if (hdev->dev_type != HCI_AMP)
2308                 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2309
2310         write_lock(&hci_dev_list_lock);
2311         list_add(&hdev->list, &hci_dev_list);
2312         write_unlock(&hci_dev_list_lock);
2313
2314         hci_notify(hdev, HCI_DEV_REG);
2315         hci_dev_hold(hdev);
2316
2317         queue_work(hdev->req_workqueue, &hdev->power_on);
2318
2319         return id;
2320
2321 err_wqueue:
2322         destroy_workqueue(hdev->workqueue);
2323         destroy_workqueue(hdev->req_workqueue);
2324 err:
2325         ida_simple_remove(&hci_index_ida, hdev->id);
2326
2327         return error;
2328 }
2329 EXPORT_SYMBOL(hci_register_dev);
2330
2331 /* Unregister HCI device */
2332 void hci_unregister_dev(struct hci_dev *hdev)
2333 {
2334         int i, id;
2335
2336         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2337
2338         set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2339
2340         id = hdev->id;
2341
2342         write_lock(&hci_dev_list_lock);
2343         list_del(&hdev->list);
2344         write_unlock(&hci_dev_list_lock);
2345
2346         hci_dev_do_close(hdev);
2347
2348         for (i = 0; i < NUM_REASSEMBLY; i++)
2349                 kfree_skb(hdev->reassembly[i]);
2350
2351         cancel_work_sync(&hdev->power_on);
2352
2353         if (!test_bit(HCI_INIT, &hdev->flags) &&
2354             !test_bit(HCI_SETUP, &hdev->dev_flags)) {
2355                 hci_dev_lock(hdev);
2356                 mgmt_index_removed(hdev);
2357                 hci_dev_unlock(hdev);
2358         }
2359
2360         /* mgmt_index_removed should take care of emptying the
2361          * pending list */
2362         BUG_ON(!list_empty(&hdev->mgmt_pending));
2363
2364         hci_notify(hdev, HCI_DEV_UNREG);
2365
2366         if (hdev->rfkill) {
2367                 rfkill_unregister(hdev->rfkill);
2368                 rfkill_destroy(hdev->rfkill);
2369         }
2370
2371         hci_del_sysfs(hdev);
2372
2373         destroy_workqueue(hdev->workqueue);
2374         destroy_workqueue(hdev->req_workqueue);
2375
2376         hci_dev_lock(hdev);
2377         hci_blacklist_clear(hdev);
2378         hci_uuids_clear(hdev);
2379         hci_link_keys_clear(hdev);
2380         hci_smp_ltks_clear(hdev);
2381         hci_remote_oob_data_clear(hdev);
2382         hci_dev_unlock(hdev);
2383
2384         hci_dev_put(hdev);
2385
2386         ida_simple_remove(&hci_index_ida, id);
2387 }
2388 EXPORT_SYMBOL(hci_unregister_dev);
2389
2390 /* Suspend HCI device */
2391 int hci_suspend_dev(struct hci_dev *hdev)
2392 {
2393         hci_notify(hdev, HCI_DEV_SUSPEND);
2394         return 0;
2395 }
2396 EXPORT_SYMBOL(hci_suspend_dev);
2397
2398 /* Resume HCI device */
2399 int hci_resume_dev(struct hci_dev *hdev)
2400 {
2401         hci_notify(hdev, HCI_DEV_RESUME);
2402         return 0;
2403 }
2404 EXPORT_SYMBOL(hci_resume_dev);
2405
2406 /* Receive frame from HCI drivers */
2407 int hci_recv_frame(struct sk_buff *skb)
2408 {
2409         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2410         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2411                       && !test_bit(HCI_INIT, &hdev->flags))) {
2412                 kfree_skb(skb);
2413                 return -ENXIO;
2414         }
2415
2416         /* Incoming skb */
2417         bt_cb(skb)->incoming = 1;
2418
2419         /* Time stamp */
2420         __net_timestamp(skb);
2421
2422         skb_queue_tail(&hdev->rx_q, skb);
2423         queue_work(hdev->workqueue, &hdev->rx_work);
2424
2425         return 0;
2426 }
2427 EXPORT_SYMBOL(hci_recv_frame);
2428
2429 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
2430                           int count, __u8 index)
2431 {
2432         int len = 0;
2433         int hlen = 0;
2434         int remain = count;
2435         struct sk_buff *skb;
2436         struct bt_skb_cb *scb;
2437
2438         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
2439             index >= NUM_REASSEMBLY)
2440                 return -EILSEQ;
2441
2442         skb = hdev->reassembly[index];
2443
2444         if (!skb) {
2445                 switch (type) {
2446                 case HCI_ACLDATA_PKT:
2447                         len = HCI_MAX_FRAME_SIZE;
2448                         hlen = HCI_ACL_HDR_SIZE;
2449                         break;
2450                 case HCI_EVENT_PKT:
2451                         len = HCI_MAX_EVENT_SIZE;
2452                         hlen = HCI_EVENT_HDR_SIZE;
2453                         break;
2454                 case HCI_SCODATA_PKT:
2455                         len = HCI_MAX_SCO_SIZE;
2456                         hlen = HCI_SCO_HDR_SIZE;
2457                         break;
2458                 }
2459
2460                 skb = bt_skb_alloc(len, GFP_ATOMIC);
2461                 if (!skb)
2462                         return -ENOMEM;
2463
2464                 scb = (void *) skb->cb;
2465                 scb->expect = hlen;
2466                 scb->pkt_type = type;
2467
2468                 skb->dev = (void *) hdev;
2469                 hdev->reassembly[index] = skb;
2470         }
2471
2472         while (count) {
2473                 scb = (void *) skb->cb;
2474                 len = min_t(uint, scb->expect, count);
2475
2476                 memcpy(skb_put(skb, len), data, len);
2477
2478                 count -= len;
2479                 data += len;
2480                 scb->expect -= len;
2481                 remain = count;
2482
2483                 switch (type) {
2484                 case HCI_EVENT_PKT:
2485                         if (skb->len == HCI_EVENT_HDR_SIZE) {
2486                                 struct hci_event_hdr *h = hci_event_hdr(skb);
2487                                 scb->expect = h->plen;
2488
2489                                 if (skb_tailroom(skb) < scb->expect) {
2490                                         kfree_skb(skb);
2491                                         hdev->reassembly[index] = NULL;
2492                                         return -ENOMEM;
2493                                 }
2494                         }
2495                         break;
2496
2497                 case HCI_ACLDATA_PKT:
2498                         if (skb->len  == HCI_ACL_HDR_SIZE) {
2499                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2500                                 scb->expect = __le16_to_cpu(h->dlen);
2501
2502                                 if (skb_tailroom(skb) < scb->expect) {
2503                                         kfree_skb(skb);
2504                                         hdev->reassembly[index] = NULL;
2505                                         return -ENOMEM;
2506                                 }
2507                         }
2508                         break;
2509
2510                 case HCI_SCODATA_PKT:
2511                         if (skb->len == HCI_SCO_HDR_SIZE) {
2512                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2513                                 scb->expect = h->dlen;
2514
2515                                 if (skb_tailroom(skb) < scb->expect) {
2516                                         kfree_skb(skb);
2517                                         hdev->reassembly[index] = NULL;
2518                                         return -ENOMEM;
2519                                 }
2520                         }
2521                         break;
2522                 }
2523
2524                 if (scb->expect == 0) {
2525                         /* Complete frame */
2526
2527                         bt_cb(skb)->pkt_type = type;
2528                         hci_recv_frame(skb);
2529
2530                         hdev->reassembly[index] = NULL;
2531                         return remain;
2532                 }
2533         }
2534
2535         return remain;
2536 }
2537
2538 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2539 {
2540         int rem = 0;
2541
2542         if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2543                 return -EILSEQ;
2544
2545         while (count) {
2546                 rem = hci_reassembly(hdev, type, data, count, type - 1);
2547                 if (rem < 0)
2548                         return rem;
2549
2550                 data += (count - rem);
2551                 count = rem;
2552         }
2553
2554         return rem;
2555 }
2556 EXPORT_SYMBOL(hci_recv_fragment);
2557
2558 #define STREAM_REASSEMBLY 0
2559
2560 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2561 {
2562         int type;
2563         int rem = 0;
2564
2565         while (count) {
2566                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2567
2568                 if (!skb) {
2569                         struct { char type; } *pkt;
2570
2571                         /* Start of the frame */
2572                         pkt = data;
2573                         type = pkt->type;
2574
2575                         data++;
2576                         count--;
2577                 } else
2578                         type = bt_cb(skb)->pkt_type;
2579
2580                 rem = hci_reassembly(hdev, type, data, count,
2581                                      STREAM_REASSEMBLY);
2582                 if (rem < 0)
2583                         return rem;
2584
2585                 data += (count - rem);
2586                 count = rem;
2587         }
2588
2589         return rem;
2590 }
2591 EXPORT_SYMBOL(hci_recv_stream_fragment);
2592
2593 /* ---- Interface to upper protocols ---- */
2594
2595 int hci_register_cb(struct hci_cb *cb)
2596 {
2597         BT_DBG("%p name %s", cb, cb->name);
2598
2599         write_lock(&hci_cb_list_lock);
2600         list_add(&cb->list, &hci_cb_list);
2601         write_unlock(&hci_cb_list_lock);
2602
2603         return 0;
2604 }
2605 EXPORT_SYMBOL(hci_register_cb);
2606
2607 int hci_unregister_cb(struct hci_cb *cb)
2608 {
2609         BT_DBG("%p name %s", cb, cb->name);
2610
2611         write_lock(&hci_cb_list_lock);
2612         list_del(&cb->list);
2613         write_unlock(&hci_cb_list_lock);
2614
2615         return 0;
2616 }
2617 EXPORT_SYMBOL(hci_unregister_cb);
2618
2619 static int hci_send_frame(struct sk_buff *skb)
2620 {
2621         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2622
2623         if (!hdev) {
2624                 kfree_skb(skb);
2625                 return -ENODEV;
2626         }
2627
2628         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
2629
2630         /* Time stamp */
2631         __net_timestamp(skb);
2632
2633         /* Send copy to monitor */
2634         hci_send_to_monitor(hdev, skb);
2635
2636         if (atomic_read(&hdev->promisc)) {
2637                 /* Send copy to the sockets */
2638                 hci_send_to_sock(hdev, skb);
2639         }
2640
2641         /* Get rid of skb owner, prior to sending to the driver. */
2642         skb_orphan(skb);
2643
2644         return hdev->send(skb);
2645 }
2646
2647 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2648 {
2649         skb_queue_head_init(&req->cmd_q);
2650         req->hdev = hdev;
2651         req->err = 0;
2652 }
2653
2654 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2655 {
2656         struct hci_dev *hdev = req->hdev;
2657         struct sk_buff *skb;
2658         unsigned long flags;
2659
2660         BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2661
2662         /* If an error occured during request building, remove all HCI
2663          * commands queued on the HCI request queue.
2664          */
2665         if (req->err) {
2666                 skb_queue_purge(&req->cmd_q);
2667                 return req->err;
2668         }
2669
2670         /* Do not allow empty requests */
2671         if (skb_queue_empty(&req->cmd_q))
2672                 return -ENODATA;
2673
2674         skb = skb_peek_tail(&req->cmd_q);
2675         bt_cb(skb)->req.complete = complete;
2676
2677         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2678         skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2679         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2680
2681         queue_work(hdev->workqueue, &hdev->cmd_work);
2682
2683         return 0;
2684 }
2685
2686 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
2687                                        u32 plen, const void *param)
2688 {
2689         int len = HCI_COMMAND_HDR_SIZE + plen;
2690         struct hci_command_hdr *hdr;
2691         struct sk_buff *skb;
2692
2693         skb = bt_skb_alloc(len, GFP_ATOMIC);
2694         if (!skb)
2695                 return NULL;
2696
2697         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
2698         hdr->opcode = cpu_to_le16(opcode);
2699         hdr->plen   = plen;
2700
2701         if (plen)
2702                 memcpy(skb_put(skb, plen), param, plen);
2703
2704         BT_DBG("skb len %d", skb->len);
2705
2706         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
2707         skb->dev = (void *) hdev;
2708
2709         return skb;
2710 }
2711
2712 /* Send HCI command */
2713 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
2714                  const void *param)
2715 {
2716         struct sk_buff *skb;
2717
2718         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2719
2720         skb = hci_prepare_cmd(hdev, opcode, plen, param);
2721         if (!skb) {
2722                 BT_ERR("%s no memory for command", hdev->name);
2723                 return -ENOMEM;
2724         }
2725
2726         /* Stand-alone HCI commands must be flaged as
2727          * single-command requests.
2728          */
2729         bt_cb(skb)->req.start = true;
2730
2731         skb_queue_tail(&hdev->cmd_q, skb);
2732         queue_work(hdev->workqueue, &hdev->cmd_work);
2733
2734         return 0;
2735 }
2736
2737 /* Queue a command to an asynchronous HCI request */
2738 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
2739                     const void *param, u8 event)
2740 {
2741         struct hci_dev *hdev = req->hdev;
2742         struct sk_buff *skb;
2743
2744         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2745
2746         /* If an error occured during request building, there is no point in
2747          * queueing the HCI command. We can simply return.
2748          */
2749         if (req->err)
2750                 return;
2751
2752         skb = hci_prepare_cmd(hdev, opcode, plen, param);
2753         if (!skb) {
2754                 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2755                        hdev->name, opcode);
2756                 req->err = -ENOMEM;
2757                 return;
2758         }
2759
2760         if (skb_queue_empty(&req->cmd_q))
2761                 bt_cb(skb)->req.start = true;
2762
2763         bt_cb(skb)->req.event = event;
2764
2765         skb_queue_tail(&req->cmd_q, skb);
2766 }
2767
2768 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
2769                  const void *param)
2770 {
2771         hci_req_add_ev(req, opcode, plen, param, 0);
2772 }
2773
2774 /* Get data from the previously sent command */
2775 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
2776 {
2777         struct hci_command_hdr *hdr;
2778
2779         if (!hdev->sent_cmd)
2780                 return NULL;
2781
2782         hdr = (void *) hdev->sent_cmd->data;
2783
2784         if (hdr->opcode != cpu_to_le16(opcode))
2785                 return NULL;
2786
2787         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2788
2789         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2790 }
2791
2792 /* Send ACL data */
2793 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2794 {
2795         struct hci_acl_hdr *hdr;
2796         int len = skb->len;
2797
2798         skb_push(skb, HCI_ACL_HDR_SIZE);
2799         skb_reset_transport_header(skb);
2800         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
2801         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2802         hdr->dlen   = cpu_to_le16(len);
2803 }
2804
2805 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
2806                           struct sk_buff *skb, __u16 flags)
2807 {
2808         struct hci_conn *conn = chan->conn;
2809         struct hci_dev *hdev = conn->hdev;
2810         struct sk_buff *list;
2811
2812         skb->len = skb_headlen(skb);
2813         skb->data_len = 0;
2814
2815         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2816
2817         switch (hdev->dev_type) {
2818         case HCI_BREDR:
2819                 hci_add_acl_hdr(skb, conn->handle, flags);
2820                 break;
2821         case HCI_AMP:
2822                 hci_add_acl_hdr(skb, chan->handle, flags);
2823                 break;
2824         default:
2825                 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2826                 return;
2827         }
2828
2829         list = skb_shinfo(skb)->frag_list;
2830         if (!list) {
2831                 /* Non fragmented */
2832                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2833
2834                 skb_queue_tail(queue, skb);
2835         } else {
2836                 /* Fragmented */
2837                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2838
2839                 skb_shinfo(skb)->frag_list = NULL;
2840
2841                 /* Queue all fragments atomically */
2842                 spin_lock(&queue->lock);
2843
2844                 __skb_queue_tail(queue, skb);
2845
2846                 flags &= ~ACL_START;
2847                 flags |= ACL_CONT;
2848                 do {
2849                         skb = list; list = list->next;
2850
2851                         skb->dev = (void *) hdev;
2852                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2853                         hci_add_acl_hdr(skb, conn->handle, flags);
2854
2855                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2856
2857                         __skb_queue_tail(queue, skb);
2858                 } while (list);
2859
2860                 spin_unlock(&queue->lock);
2861         }
2862 }
2863
2864 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2865 {
2866         struct hci_dev *hdev = chan->conn->hdev;
2867
2868         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
2869
2870         skb->dev = (void *) hdev;
2871
2872         hci_queue_acl(chan, &chan->data_q, skb, flags);
2873
2874         queue_work(hdev->workqueue, &hdev->tx_work);
2875 }
2876
2877 /* Send SCO data */
2878 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2879 {
2880         struct hci_dev *hdev = conn->hdev;
2881         struct hci_sco_hdr hdr;
2882
2883         BT_DBG("%s len %d", hdev->name, skb->len);
2884
2885         hdr.handle = cpu_to_le16(conn->handle);
2886         hdr.dlen   = skb->len;
2887
2888         skb_push(skb, HCI_SCO_HDR_SIZE);
2889         skb_reset_transport_header(skb);
2890         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2891
2892         skb->dev = (void *) hdev;
2893         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2894
2895         skb_queue_tail(&conn->data_q, skb);
2896         queue_work(hdev->workqueue, &hdev->tx_work);
2897 }
2898
2899 /* ---- HCI TX task (outgoing data) ---- */
2900
2901 /* HCI Connection scheduler */
2902 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2903                                      int *quote)
2904 {
2905         struct hci_conn_hash *h = &hdev->conn_hash;
2906         struct hci_conn *conn = NULL, *c;
2907         unsigned int num = 0, min = ~0;
2908
2909         /* We don't have to lock device here. Connections are always
2910          * added and removed with TX task disabled. */
2911
2912         rcu_read_lock();
2913
2914         list_for_each_entry_rcu(c, &h->list, list) {
2915                 if (c->type != type || skb_queue_empty(&c->data_q))
2916                         continue;
2917
2918                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2919                         continue;
2920
2921                 num++;
2922
2923                 if (c->sent < min) {
2924                         min  = c->sent;
2925                         conn = c;
2926                 }
2927
2928                 if (hci_conn_num(hdev, type) == num)
2929                         break;
2930         }
2931
2932         rcu_read_unlock();
2933
2934         if (conn) {
2935                 int cnt, q;
2936
2937                 switch (conn->type) {
2938                 case ACL_LINK:
2939                         cnt = hdev->acl_cnt;
2940                         break;
2941                 case SCO_LINK:
2942                 case ESCO_LINK:
2943                         cnt = hdev->sco_cnt;
2944                         break;
2945                 case LE_LINK:
2946                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2947                         break;
2948                 default:
2949                         cnt = 0;
2950                         BT_ERR("Unknown link type");
2951                 }
2952
2953                 q = cnt / num;
2954                 *quote = q ? q : 1;
2955         } else
2956                 *quote = 0;
2957
2958         BT_DBG("conn %p quote %d", conn, *quote);
2959         return conn;
2960 }
2961
2962 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2963 {
2964         struct hci_conn_hash *h = &hdev->conn_hash;
2965         struct hci_conn *c;
2966
2967         BT_ERR("%s link tx timeout", hdev->name);
2968
2969         rcu_read_lock();
2970
2971         /* Kill stalled connections */
2972         list_for_each_entry_rcu(c, &h->list, list) {
2973                 if (c->type == type && c->sent) {
2974                         BT_ERR("%s killing stalled connection %pMR",
2975                                hdev->name, &c->dst);
2976                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
2977                 }
2978         }
2979
2980         rcu_read_unlock();
2981 }
2982
2983 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2984                                       int *quote)
2985 {
2986         struct hci_conn_hash *h = &hdev->conn_hash;
2987         struct hci_chan *chan = NULL;
2988         unsigned int num = 0, min = ~0, cur_prio = 0;
2989         struct hci_conn *conn;
2990         int cnt, q, conn_num = 0;
2991
2992         BT_DBG("%s", hdev->name);
2993
2994         rcu_read_lock();
2995
2996         list_for_each_entry_rcu(conn, &h->list, list) {
2997                 struct hci_chan *tmp;
2998
2999                 if (conn->type != type)
3000                         continue;
3001
3002                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3003                         continue;
3004
3005                 conn_num++;
3006
3007                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3008                         struct sk_buff *skb;
3009
3010                         if (skb_queue_empty(&tmp->data_q))
3011                                 continue;
3012
3013                         skb = skb_peek(&tmp->data_q);
3014                         if (skb->priority < cur_prio)
3015                                 continue;
3016
3017                         if (skb->priority > cur_prio) {
3018                                 num = 0;
3019                                 min = ~0;
3020                                 cur_prio = skb->priority;
3021                         }
3022
3023                         num++;
3024
3025                         if (conn->sent < min) {
3026                                 min  = conn->sent;
3027                                 chan = tmp;
3028                         }
3029                 }
3030
3031                 if (hci_conn_num(hdev, type) == conn_num)
3032                         break;
3033         }
3034
3035         rcu_read_unlock();
3036
3037         if (!chan)
3038                 return NULL;
3039
3040         switch (chan->conn->type) {
3041         case ACL_LINK:
3042                 cnt = hdev->acl_cnt;
3043                 break;
3044         case AMP_LINK:
3045                 cnt = hdev->block_cnt;
3046                 break;
3047         case SCO_LINK:
3048         case ESCO_LINK:
3049                 cnt = hdev->sco_cnt;
3050                 break;
3051         case LE_LINK:
3052                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3053                 break;
3054         default:
3055                 cnt = 0;
3056                 BT_ERR("Unknown link type");
3057         }
3058
3059         q = cnt / num;
3060         *quote = q ? q : 1;
3061         BT_DBG("chan %p quote %d", chan, *quote);
3062         return chan;
3063 }
3064
3065 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3066 {
3067         struct hci_conn_hash *h = &hdev->conn_hash;
3068         struct hci_conn *conn;
3069         int num = 0;
3070
3071         BT_DBG("%s", hdev->name);
3072
3073         rcu_read_lock();
3074
3075         list_for_each_entry_rcu(conn, &h->list, list) {
3076                 struct hci_chan *chan;
3077
3078                 if (conn->type != type)
3079                         continue;
3080
3081                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3082                         continue;
3083
3084                 num++;
3085
3086                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3087                         struct sk_buff *skb;
3088
3089                         if (chan->sent) {
3090                                 chan->sent = 0;
3091                                 continue;
3092                         }
3093
3094                         if (skb_queue_empty(&chan->data_q))
3095                                 continue;
3096
3097                         skb = skb_peek(&chan->data_q);
3098                         if (skb->priority >= HCI_PRIO_MAX - 1)
3099                                 continue;
3100
3101                         skb->priority = HCI_PRIO_MAX - 1;
3102
3103                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3104                                skb->priority);
3105                 }
3106
3107                 if (hci_conn_num(hdev, type) == num)
3108                         break;
3109         }
3110
3111         rcu_read_unlock();
3112
3113 }
3114
3115 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3116 {
3117         /* Calculate count of blocks used by this packet */
3118         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3119 }
3120
3121 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3122 {
3123         if (!test_bit(HCI_RAW, &hdev->flags)) {
3124                 /* ACL tx timeout must be longer than maximum
3125                  * link supervision timeout (40.9 seconds) */
3126                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3127                                        HCI_ACL_TX_TIMEOUT))
3128                         hci_link_tx_to(hdev, ACL_LINK);
3129         }
3130 }
3131
3132 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3133 {
3134         unsigned int cnt = hdev->acl_cnt;
3135         struct hci_chan *chan;
3136         struct sk_buff *skb;
3137         int quote;
3138
3139         __check_timeout(hdev, cnt);
3140
3141         while (hdev->acl_cnt &&
3142                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
3143                 u32 priority = (skb_peek(&chan->data_q))->priority;
3144                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3145                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3146                                skb->len, skb->priority);
3147
3148                         /* Stop if priority has changed */
3149                         if (skb->priority < priority)
3150                                 break;
3151
3152                         skb = skb_dequeue(&chan->data_q);
3153
3154                         hci_conn_enter_active_mode(chan->conn,
3155                                                    bt_cb(skb)->force_active);
3156
3157                         hci_send_frame(skb);
3158                         hdev->acl_last_tx = jiffies;
3159
3160                         hdev->acl_cnt--;
3161                         chan->sent++;
3162                         chan->conn->sent++;
3163                 }
3164         }
3165
3166         if (cnt != hdev->acl_cnt)
3167                 hci_prio_recalculate(hdev, ACL_LINK);
3168 }
3169
3170 static void hci_sched_acl_blk(struct hci_dev *hdev)
3171 {
3172         unsigned int cnt = hdev->block_cnt;
3173         struct hci_chan *chan;
3174         struct sk_buff *skb;
3175         int quote;
3176         u8 type;
3177
3178         __check_timeout(hdev, cnt);
3179
3180         BT_DBG("%s", hdev->name);
3181
3182         if (hdev->dev_type == HCI_AMP)
3183                 type = AMP_LINK;
3184         else
3185                 type = ACL_LINK;
3186
3187         while (hdev->block_cnt > 0 &&
3188                (chan = hci_chan_sent(hdev, type, &quote))) {
3189                 u32 priority = (skb_peek(&chan->data_q))->priority;
3190                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3191                         int blocks;
3192
3193                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3194                                skb->len, skb->priority);
3195
3196                         /* Stop if priority has changed */
3197                         if (skb->priority < priority)
3198                                 break;
3199
3200                         skb = skb_dequeue(&chan->data_q);
3201
3202                         blocks = __get_blocks(hdev, skb);
3203                         if (blocks > hdev->block_cnt)
3204                                 return;
3205
3206                         hci_conn_enter_active_mode(chan->conn,
3207                                                    bt_cb(skb)->force_active);
3208
3209                         hci_send_frame(skb);
3210                         hdev->acl_last_tx = jiffies;
3211
3212                         hdev->block_cnt -= blocks;
3213                         quote -= blocks;
3214
3215                         chan->sent += blocks;
3216                         chan->conn->sent += blocks;
3217                 }
3218         }
3219
3220         if (cnt != hdev->block_cnt)
3221                 hci_prio_recalculate(hdev, type);
3222 }
3223
3224 static void hci_sched_acl(struct hci_dev *hdev)
3225 {
3226         BT_DBG("%s", hdev->name);
3227
3228         /* No ACL link over BR/EDR controller */
3229         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3230                 return;
3231
3232         /* No AMP link over AMP controller */
3233         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3234                 return;
3235
3236         switch (hdev->flow_ctl_mode) {
3237         case HCI_FLOW_CTL_MODE_PACKET_BASED:
3238                 hci_sched_acl_pkt(hdev);
3239                 break;
3240
3241         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3242                 hci_sched_acl_blk(hdev);
3243                 break;
3244         }
3245 }
3246
3247 /* Schedule SCO */
3248 static void hci_sched_sco(struct hci_dev *hdev)
3249 {
3250         struct hci_conn *conn;
3251         struct sk_buff *skb;
3252         int quote;
3253
3254         BT_DBG("%s", hdev->name);
3255
3256         if (!hci_conn_num(hdev, SCO_LINK))
3257                 return;
3258
3259         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3260                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3261                         BT_DBG("skb %p len %d", skb, skb->len);
3262                         hci_send_frame(skb);
3263
3264                         conn->sent++;
3265                         if (conn->sent == ~0)
3266                                 conn->sent = 0;
3267                 }
3268         }
3269 }
3270
3271 static void hci_sched_esco(struct hci_dev *hdev)
3272 {
3273         struct hci_conn *conn;
3274         struct sk_buff *skb;
3275         int quote;
3276
3277         BT_DBG("%s", hdev->name);
3278
3279         if (!hci_conn_num(hdev, ESCO_LINK))
3280                 return;
3281
3282         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3283                                                      &quote))) {
3284                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3285                         BT_DBG("skb %p len %d", skb, skb->len);
3286                         hci_send_frame(skb);
3287
3288                         conn->sent++;
3289                         if (conn->sent == ~0)
3290                                 conn->sent = 0;
3291                 }
3292         }
3293 }
3294
3295 static void hci_sched_le(struct hci_dev *hdev)
3296 {
3297         struct hci_chan *chan;
3298         struct sk_buff *skb;
3299         int quote, cnt, tmp;
3300
3301         BT_DBG("%s", hdev->name);
3302
3303         if (!hci_conn_num(hdev, LE_LINK))
3304                 return;
3305
3306         if (!test_bit(HCI_RAW, &hdev->flags)) {
3307                 /* LE tx timeout must be longer than maximum
3308                  * link supervision timeout (40.9 seconds) */
3309                 if (!hdev->le_cnt && hdev->le_pkts &&
3310                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
3311                         hci_link_tx_to(hdev, LE_LINK);
3312         }
3313
3314         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3315         tmp = cnt;
3316         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
3317                 u32 priority = (skb_peek(&chan->data_q))->priority;
3318                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3319                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3320                                skb->len, skb->priority);
3321
3322                         /* Stop if priority has changed */
3323                         if (skb->priority < priority)
3324                                 break;
3325
3326                         skb = skb_dequeue(&chan->data_q);
3327
3328                         hci_send_frame(skb);
3329                         hdev->le_last_tx = jiffies;
3330
3331                         cnt--;
3332                         chan->sent++;
3333                         chan->conn->sent++;
3334                 }
3335         }
3336
3337         if (hdev->le_pkts)
3338                 hdev->le_cnt = cnt;
3339         else
3340                 hdev->acl_cnt = cnt;
3341
3342         if (cnt != tmp)
3343                 hci_prio_recalculate(hdev, LE_LINK);
3344 }
3345
3346 static void hci_tx_work(struct work_struct *work)
3347 {
3348         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3349         struct sk_buff *skb;
3350
3351         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
3352                hdev->sco_cnt, hdev->le_cnt);
3353
3354         if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
3355                 /* Schedule queues and send stuff to HCI driver */
3356                 hci_sched_acl(hdev);
3357                 hci_sched_sco(hdev);
3358                 hci_sched_esco(hdev);
3359                 hci_sched_le(hdev);
3360         }
3361
3362         /* Send next queued raw (unknown type) packet */
3363         while ((skb = skb_dequeue(&hdev->raw_q)))
3364                 hci_send_frame(skb);
3365 }
3366
3367 /* ----- HCI RX task (incoming data processing) ----- */
3368
3369 /* ACL data packet */
3370 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3371 {
3372         struct hci_acl_hdr *hdr = (void *) skb->data;
3373         struct hci_conn *conn;
3374         __u16 handle, flags;
3375
3376         skb_pull(skb, HCI_ACL_HDR_SIZE);
3377
3378         handle = __le16_to_cpu(hdr->handle);
3379         flags  = hci_flags(handle);
3380         handle = hci_handle(handle);
3381
3382         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3383                handle, flags);
3384
3385         hdev->stat.acl_rx++;
3386
3387         hci_dev_lock(hdev);
3388         conn = hci_conn_hash_lookup_handle(hdev, handle);
3389         hci_dev_unlock(hdev);
3390
3391         if (conn) {
3392                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3393
3394                 /* Send to upper protocol */
3395                 l2cap_recv_acldata(conn, skb, flags);
3396                 return;
3397         } else {
3398                 BT_ERR("%s ACL packet for unknown connection handle %d",
3399                        hdev->name, handle);
3400         }
3401
3402         kfree_skb(skb);
3403 }
3404
3405 /* SCO data packet */
3406 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3407 {
3408         struct hci_sco_hdr *hdr = (void *) skb->data;
3409         struct hci_conn *conn;
3410         __u16 handle;
3411
3412         skb_pull(skb, HCI_SCO_HDR_SIZE);
3413
3414         handle = __le16_to_cpu(hdr->handle);
3415
3416         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
3417
3418         hdev->stat.sco_rx++;
3419
3420         hci_dev_lock(hdev);
3421         conn = hci_conn_hash_lookup_handle(hdev, handle);
3422         hci_dev_unlock(hdev);
3423
3424         if (conn) {
3425                 /* Send to upper protocol */
3426                 sco_recv_scodata(conn, skb);
3427                 return;
3428         } else {
3429                 BT_ERR("%s SCO packet for unknown connection handle %d",
3430                        hdev->name, handle);
3431         }
3432
3433         kfree_skb(skb);
3434 }
3435
3436 static bool hci_req_is_complete(struct hci_dev *hdev)
3437 {
3438         struct sk_buff *skb;
3439
3440         skb = skb_peek(&hdev->cmd_q);
3441         if (!skb)
3442                 return true;
3443
3444         return bt_cb(skb)->req.start;
3445 }
3446
3447 static void hci_resend_last(struct hci_dev *hdev)
3448 {
3449         struct hci_command_hdr *sent;
3450         struct sk_buff *skb;
3451         u16 opcode;
3452
3453         if (!hdev->sent_cmd)
3454                 return;
3455
3456         sent = (void *) hdev->sent_cmd->data;
3457         opcode = __le16_to_cpu(sent->opcode);
3458         if (opcode == HCI_OP_RESET)
3459                 return;
3460
3461         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3462         if (!skb)
3463                 return;
3464
3465         skb_queue_head(&hdev->cmd_q, skb);
3466         queue_work(hdev->workqueue, &hdev->cmd_work);
3467 }
3468
3469 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3470 {
3471         hci_req_complete_t req_complete = NULL;
3472         struct sk_buff *skb;
3473         unsigned long flags;
3474
3475         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3476
3477         /* If the completed command doesn't match the last one that was
3478          * sent we need to do special handling of it.
3479          */
3480         if (!hci_sent_cmd_data(hdev, opcode)) {
3481                 /* Some CSR based controllers generate a spontaneous
3482                  * reset complete event during init and any pending
3483                  * command will never be completed. In such a case we
3484                  * need to resend whatever was the last sent
3485                  * command.
3486                  */
3487                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3488                         hci_resend_last(hdev);
3489
3490                 return;
3491         }
3492
3493         /* If the command succeeded and there's still more commands in
3494          * this request the request is not yet complete.
3495          */
3496         if (!status && !hci_req_is_complete(hdev))
3497                 return;
3498
3499         /* If this was the last command in a request the complete
3500          * callback would be found in hdev->sent_cmd instead of the
3501          * command queue (hdev->cmd_q).
3502          */
3503         if (hdev->sent_cmd) {
3504                 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3505
3506                 if (req_complete) {
3507                         /* We must set the complete callback to NULL to
3508                          * avoid calling the callback more than once if
3509                          * this function gets called again.
3510                          */
3511                         bt_cb(hdev->sent_cmd)->req.complete = NULL;
3512
3513                         goto call_complete;
3514                 }
3515         }
3516
3517         /* Remove all pending commands belonging to this request */
3518         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3519         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3520                 if (bt_cb(skb)->req.start) {
3521                         __skb_queue_head(&hdev->cmd_q, skb);
3522                         break;
3523                 }
3524
3525                 req_complete = bt_cb(skb)->req.complete;
3526                 kfree_skb(skb);
3527         }
3528         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3529
3530 call_complete:
3531         if (req_complete)
3532                 req_complete(hdev, status);
3533 }
3534
3535 static void hci_rx_work(struct work_struct *work)
3536 {
3537         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
3538         struct sk_buff *skb;
3539
3540         BT_DBG("%s", hdev->name);
3541
3542         while ((skb = skb_dequeue(&hdev->rx_q))) {
3543                 /* Send copy to monitor */
3544                 hci_send_to_monitor(hdev, skb);
3545
3546                 if (atomic_read(&hdev->promisc)) {
3547                         /* Send copy to the sockets */
3548                         hci_send_to_sock(hdev, skb);
3549                 }
3550
3551                 if (test_bit(HCI_RAW, &hdev->flags) ||
3552                     test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
3553                         kfree_skb(skb);
3554                         continue;
3555                 }
3556
3557                 if (test_bit(HCI_INIT, &hdev->flags)) {
3558                         /* Don't process data packets in this states. */
3559                         switch (bt_cb(skb)->pkt_type) {
3560                         case HCI_ACLDATA_PKT:
3561                         case HCI_SCODATA_PKT:
3562                                 kfree_skb(skb);
3563                                 continue;
3564                         }
3565                 }
3566
3567                 /* Process frame */
3568                 switch (bt_cb(skb)->pkt_type) {
3569                 case HCI_EVENT_PKT:
3570                         BT_DBG("%s Event packet", hdev->name);
3571                         hci_event_packet(hdev, skb);
3572                         break;
3573
3574                 case HCI_ACLDATA_PKT:
3575                         BT_DBG("%s ACL data packet", hdev->name);
3576                         hci_acldata_packet(hdev, skb);
3577                         break;
3578
3579                 case HCI_SCODATA_PKT:
3580                         BT_DBG("%s SCO data packet", hdev->name);
3581                         hci_scodata_packet(hdev, skb);
3582                         break;
3583
3584                 default:
3585                         kfree_skb(skb);
3586                         break;
3587                 }
3588         }
3589 }
3590
3591 static void hci_cmd_work(struct work_struct *work)
3592 {
3593         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
3594         struct sk_buff *skb;
3595
3596         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3597                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
3598
3599         /* Send queued commands */
3600         if (atomic_read(&hdev->cmd_cnt)) {
3601                 skb = skb_dequeue(&hdev->cmd_q);
3602                 if (!skb)
3603                         return;
3604
3605                 kfree_skb(hdev->sent_cmd);
3606
3607                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
3608                 if (hdev->sent_cmd) {
3609                         atomic_dec(&hdev->cmd_cnt);
3610                         hci_send_frame(skb);
3611                         if (test_bit(HCI_RESET, &hdev->flags))
3612                                 del_timer(&hdev->cmd_timer);
3613                         else
3614                                 mod_timer(&hdev->cmd_timer,
3615                                           jiffies + HCI_CMD_TIMEOUT);
3616                 } else {
3617                         skb_queue_head(&hdev->cmd_q, skb);
3618                         queue_work(hdev->workqueue, &hdev->cmd_work);
3619                 }
3620         }
3621 }
3622
3623 u8 bdaddr_to_le(u8 bdaddr_type)
3624 {
3625         switch (bdaddr_type) {
3626         case BDADDR_LE_PUBLIC:
3627                 return ADDR_LE_DEV_PUBLIC;
3628
3629         default:
3630                 /* Fallback to LE Random address type */
3631                 return ADDR_LE_DEV_RANDOM;
3632         }
3633 }