Merge branch 'misc-3.2' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux
[pandora-kernel.git] / drivers / net / caif / caif_hsi.c
1 /*
2  * Copyright (C) ST-Ericsson AB 2010
3  * Contact: Sjur Brendeland / sjur.brandeland@stericsson.com
4  * Author:  Daniel Martensson / daniel.martensson@stericsson.com
5  *          Dmitry.Tarnyagin  / dmitry.tarnyagin@stericsson.com
6  * License terms: GNU General Public License (GPL) version 2.
7  */
8
9 #include <linux/init.h>
10 #include <linux/module.h>
11 #include <linux/device.h>
12 #include <linux/platform_device.h>
13 #include <linux/netdevice.h>
14 #include <linux/string.h>
15 #include <linux/list.h>
16 #include <linux/interrupt.h>
17 #include <linux/delay.h>
18 #include <linux/sched.h>
19 #include <linux/if_arp.h>
20 #include <linux/timer.h>
21 #include <linux/rtnetlink.h>
22 #include <net/caif/caif_layer.h>
23 #include <net/caif/caif_hsi.h>
24
25 MODULE_LICENSE("GPL");
26 MODULE_AUTHOR("Daniel Martensson<daniel.martensson@stericsson.com>");
27 MODULE_DESCRIPTION("CAIF HSI driver");
28
29 /* Returns the number of padding bytes for alignment. */
30 #define PAD_POW2(x, pow) ((((x)&((pow)-1)) == 0) ? 0 :\
31                                 (((pow)-((x)&((pow)-1)))))
32
33 static int inactivity_timeout = 1000;
34 module_param(inactivity_timeout, int, S_IRUGO | S_IWUSR);
35 MODULE_PARM_DESC(inactivity_timeout, "Inactivity timeout on HSI, ms.");
36
37 /*
38  * HSI padding options.
39  * Warning: must be a base of 2 (& operation used) and can not be zero !
40  */
41 static int hsi_head_align = 4;
42 module_param(hsi_head_align, int, S_IRUGO);
43 MODULE_PARM_DESC(hsi_head_align, "HSI head alignment.");
44
45 static int hsi_tail_align = 4;
46 module_param(hsi_tail_align, int, S_IRUGO);
47 MODULE_PARM_DESC(hsi_tail_align, "HSI tail alignment.");
48
49 /*
50  * HSI link layer flowcontrol thresholds.
51  * Warning: A high threshold value migth increase throughput but it will at
52  * the same time prevent channel prioritization and increase the risk of
53  * flooding the modem. The high threshold should be above the low.
54  */
55 static int hsi_high_threshold = 100;
56 module_param(hsi_high_threshold, int, S_IRUGO);
57 MODULE_PARM_DESC(hsi_high_threshold, "HSI high threshold (FLOW OFF).");
58
59 static int hsi_low_threshold = 50;
60 module_param(hsi_low_threshold, int, S_IRUGO);
61 MODULE_PARM_DESC(hsi_low_threshold, "HSI high threshold (FLOW ON).");
62
63 #define ON 1
64 #define OFF 0
65
66 /*
67  * Threshold values for the HSI packet queue. Flowcontrol will be asserted
68  * when the number of packets exceeds HIGH_WATER_MARK. It will not be
69  * de-asserted before the number of packets drops below LOW_WATER_MARK.
70  */
71 #define LOW_WATER_MARK   hsi_low_threshold
72 #define HIGH_WATER_MARK  hsi_high_threshold
73
74 static LIST_HEAD(cfhsi_list);
75 static spinlock_t cfhsi_list_lock;
76
77 static void cfhsi_inactivity_tout(unsigned long arg)
78 {
79         struct cfhsi *cfhsi = (struct cfhsi *)arg;
80
81         dev_dbg(&cfhsi->ndev->dev, "%s.\n",
82                 __func__);
83
84         /* Schedule power down work queue. */
85         if (!test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
86                 queue_work(cfhsi->wq, &cfhsi->wake_down_work);
87 }
88
89 static void cfhsi_abort_tx(struct cfhsi *cfhsi)
90 {
91         struct sk_buff *skb;
92
93         for (;;) {
94                 spin_lock_bh(&cfhsi->lock);
95                 skb = skb_dequeue(&cfhsi->qhead);
96                 if (!skb)
97                         break;
98
99                 cfhsi->ndev->stats.tx_errors++;
100                 cfhsi->ndev->stats.tx_dropped++;
101                 spin_unlock_bh(&cfhsi->lock);
102                 kfree_skb(skb);
103         }
104         cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
105         if (!test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
106                 mod_timer(&cfhsi->timer,
107                         jiffies + cfhsi->inactivity_timeout);
108         spin_unlock_bh(&cfhsi->lock);
109 }
110
111 static int cfhsi_flush_fifo(struct cfhsi *cfhsi)
112 {
113         char buffer[32]; /* Any reasonable value */
114         size_t fifo_occupancy;
115         int ret;
116
117         dev_dbg(&cfhsi->ndev->dev, "%s.\n",
118                 __func__);
119
120
121         ret = cfhsi->dev->cfhsi_wake_up(cfhsi->dev);
122         if (ret) {
123                 dev_warn(&cfhsi->ndev->dev,
124                         "%s: can't wake up HSI interface: %d.\n",
125                         __func__, ret);
126                 return ret;
127         }
128
129         do {
130                 ret = cfhsi->dev->cfhsi_fifo_occupancy(cfhsi->dev,
131                                 &fifo_occupancy);
132                 if (ret) {
133                         dev_warn(&cfhsi->ndev->dev,
134                                 "%s: can't get FIFO occupancy: %d.\n",
135                                 __func__, ret);
136                         break;
137                 } else if (!fifo_occupancy)
138                         /* No more data, exitting normally */
139                         break;
140
141                 fifo_occupancy = min(sizeof(buffer), fifo_occupancy);
142                 set_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits);
143                 ret = cfhsi->dev->cfhsi_rx(buffer, fifo_occupancy,
144                                 cfhsi->dev);
145                 if (ret) {
146                         clear_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits);
147                         dev_warn(&cfhsi->ndev->dev,
148                                 "%s: can't read data: %d.\n",
149                                 __func__, ret);
150                         break;
151                 }
152
153                 ret = 5 * HZ;
154                 ret = wait_event_interruptible_timeout(cfhsi->flush_fifo_wait,
155                          !test_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits), ret);
156
157                 if (ret < 0) {
158                         dev_warn(&cfhsi->ndev->dev,
159                                 "%s: can't wait for flush complete: %d.\n",
160                                 __func__, ret);
161                         break;
162                 } else if (!ret) {
163                         ret = -ETIMEDOUT;
164                         dev_warn(&cfhsi->ndev->dev,
165                                 "%s: timeout waiting for flush complete.\n",
166                                 __func__);
167                         break;
168                 }
169         } while (1);
170
171         cfhsi->dev->cfhsi_wake_down(cfhsi->dev);
172
173         return ret;
174 }
175
176 static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
177 {
178         int nfrms = 0;
179         int pld_len = 0;
180         struct sk_buff *skb;
181         u8 *pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;
182
183         skb = skb_dequeue(&cfhsi->qhead);
184         if (!skb)
185                 return 0;
186
187         /* Clear offset. */
188         desc->offset = 0;
189
190         /* Check if we can embed a CAIF frame. */
191         if (skb->len < CFHSI_MAX_EMB_FRM_SZ) {
192                 struct caif_payload_info *info;
193                 int hpad = 0;
194                 int tpad = 0;
195
196                 /* Calculate needed head alignment and tail alignment. */
197                 info = (struct caif_payload_info *)&skb->cb;
198
199                 hpad = 1 + PAD_POW2((info->hdr_len + 1), hsi_head_align);
200                 tpad = PAD_POW2((skb->len + hpad), hsi_tail_align);
201
202                 /* Check if frame still fits with added alignment. */
203                 if ((skb->len + hpad + tpad) <= CFHSI_MAX_EMB_FRM_SZ) {
204                         u8 *pemb = desc->emb_frm;
205                         desc->offset = CFHSI_DESC_SHORT_SZ;
206                         *pemb = (u8)(hpad - 1);
207                         pemb += hpad;
208
209                         /* Update network statistics. */
210                         cfhsi->ndev->stats.tx_packets++;
211                         cfhsi->ndev->stats.tx_bytes += skb->len;
212
213                         /* Copy in embedded CAIF frame. */
214                         skb_copy_bits(skb, 0, pemb, skb->len);
215                         consume_skb(skb);
216                         skb = NULL;
217                 }
218         }
219
220         /* Create payload CAIF frames. */
221         pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;
222         while (nfrms < CFHSI_MAX_PKTS) {
223                 struct caif_payload_info *info;
224                 int hpad = 0;
225                 int tpad = 0;
226
227                 if (!skb)
228                         skb = skb_dequeue(&cfhsi->qhead);
229
230                 if (!skb)
231                         break;
232
233                 /* Calculate needed head alignment and tail alignment. */
234                 info = (struct caif_payload_info *)&skb->cb;
235
236                 hpad = 1 + PAD_POW2((info->hdr_len + 1), hsi_head_align);
237                 tpad = PAD_POW2((skb->len + hpad), hsi_tail_align);
238
239                 /* Fill in CAIF frame length in descriptor. */
240                 desc->cffrm_len[nfrms] = hpad + skb->len + tpad;
241
242                 /* Fill head padding information. */
243                 *pfrm = (u8)(hpad - 1);
244                 pfrm += hpad;
245
246                 /* Update network statistics. */
247                 cfhsi->ndev->stats.tx_packets++;
248                 cfhsi->ndev->stats.tx_bytes += skb->len;
249
250                 /* Copy in CAIF frame. */
251                 skb_copy_bits(skb, 0, pfrm, skb->len);
252
253                 /* Update payload length. */
254                 pld_len += desc->cffrm_len[nfrms];
255
256                 /* Update frame pointer. */
257                 pfrm += skb->len + tpad;
258                 consume_skb(skb);
259                 skb = NULL;
260
261                 /* Update number of frames. */
262                 nfrms++;
263         }
264
265         /* Unused length fields should be zero-filled (according to SPEC). */
266         while (nfrms < CFHSI_MAX_PKTS) {
267                 desc->cffrm_len[nfrms] = 0x0000;
268                 nfrms++;
269         }
270
271         /* Check if we can piggy-back another descriptor. */
272         skb = skb_peek(&cfhsi->qhead);
273         if (skb)
274                 desc->header |= CFHSI_PIGGY_DESC;
275         else
276                 desc->header &= ~CFHSI_PIGGY_DESC;
277
278         return CFHSI_DESC_SZ + pld_len;
279 }
280
281 static void cfhsi_tx_done(struct cfhsi *cfhsi)
282 {
283         struct cfhsi_desc *desc = NULL;
284         int len = 0;
285         int res;
286
287         dev_dbg(&cfhsi->ndev->dev, "%s.\n", __func__);
288
289         if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
290                 return;
291
292         desc = (struct cfhsi_desc *)cfhsi->tx_buf;
293
294         do {
295                 /*
296                  * Send flow on if flow off has been previously signalled
297                  * and number of packets is below low water mark.
298                  */
299                 spin_lock_bh(&cfhsi->lock);
300                 if (cfhsi->flow_off_sent &&
301                                 cfhsi->qhead.qlen <= cfhsi->q_low_mark &&
302                                 cfhsi->cfdev.flowctrl) {
303
304                         cfhsi->flow_off_sent = 0;
305                         cfhsi->cfdev.flowctrl(cfhsi->ndev, ON);
306                 }
307                 spin_unlock_bh(&cfhsi->lock);
308
309                 /* Create HSI frame. */
310                 do {
311                         len = cfhsi_tx_frm(desc, cfhsi);
312                         if (!len) {
313                                 spin_lock_bh(&cfhsi->lock);
314                                 if (unlikely(skb_peek(&cfhsi->qhead))) {
315                                         spin_unlock_bh(&cfhsi->lock);
316                                         continue;
317                                 }
318                                 cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
319                                 /* Start inactivity timer. */
320                                 mod_timer(&cfhsi->timer,
321                                         jiffies + cfhsi->inactivity_timeout);
322                                 spin_unlock_bh(&cfhsi->lock);
323                                 goto done;
324                         }
325                 } while (!len);
326
327                 /* Set up new transfer. */
328                 res = cfhsi->dev->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->dev);
329                 if (WARN_ON(res < 0)) {
330                         dev_err(&cfhsi->ndev->dev, "%s: TX error %d.\n",
331                                 __func__, res);
332                 }
333         } while (res < 0);
334
335 done:
336         return;
337 }
338
339 static void cfhsi_tx_done_cb(struct cfhsi_drv *drv)
340 {
341         struct cfhsi *cfhsi;
342
343         cfhsi = container_of(drv, struct cfhsi, drv);
344         dev_dbg(&cfhsi->ndev->dev, "%s.\n",
345                 __func__);
346
347         if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
348                 return;
349         cfhsi_tx_done(cfhsi);
350 }
351
352 static int cfhsi_rx_desc(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
353 {
354         int xfer_sz = 0;
355         int nfrms = 0;
356         u16 *plen = NULL;
357         u8 *pfrm = NULL;
358
359         if ((desc->header & ~CFHSI_PIGGY_DESC) ||
360                         (desc->offset > CFHSI_MAX_EMB_FRM_SZ)) {
361                 dev_err(&cfhsi->ndev->dev, "%s: Invalid descriptor.\n",
362                         __func__);
363                 return -EPROTO;
364         }
365
366         /* Check for embedded CAIF frame. */
367         if (desc->offset) {
368                 struct sk_buff *skb;
369                 u8 *dst = NULL;
370                 int len = 0;
371                 pfrm = ((u8 *)desc) + desc->offset;
372
373                 /* Remove offset padding. */
374                 pfrm += *pfrm + 1;
375
376                 /* Read length of CAIF frame (little endian). */
377                 len = *pfrm;
378                 len |= ((*(pfrm+1)) << 8) & 0xFF00;
379                 len += 2;       /* Add FCS fields. */
380
381                 /* Sanity check length of CAIF frame. */
382                 if (unlikely(len > CFHSI_MAX_CAIF_FRAME_SZ)) {
383                         dev_err(&cfhsi->ndev->dev, "%s: Invalid length.\n",
384                                 __func__);
385                         return -EPROTO;
386                 }
387
388                 /* Allocate SKB (OK even in IRQ context). */
389                 skb = alloc_skb(len + 1, GFP_ATOMIC);
390                 if (!skb) {
391                         dev_err(&cfhsi->ndev->dev, "%s: Out of memory !\n",
392                                 __func__);
393                         return -ENOMEM;
394                 }
395                 caif_assert(skb != NULL);
396
397                 dst = skb_put(skb, len);
398                 memcpy(dst, pfrm, len);
399
400                 skb->protocol = htons(ETH_P_CAIF);
401                 skb_reset_mac_header(skb);
402                 skb->dev = cfhsi->ndev;
403
404                 /*
405                  * We are called from a arch specific platform device.
406                  * Unfortunately we don't know what context we're
407                  * running in.
408                  */
409                 if (in_interrupt())
410                         netif_rx(skb);
411                 else
412                         netif_rx_ni(skb);
413
414                 /* Update network statistics. */
415                 cfhsi->ndev->stats.rx_packets++;
416                 cfhsi->ndev->stats.rx_bytes += len;
417         }
418
419         /* Calculate transfer length. */
420         plen = desc->cffrm_len;
421         while (nfrms < CFHSI_MAX_PKTS && *plen) {
422                 xfer_sz += *plen;
423                 plen++;
424                 nfrms++;
425         }
426
427         /* Check for piggy-backed descriptor. */
428         if (desc->header & CFHSI_PIGGY_DESC)
429                 xfer_sz += CFHSI_DESC_SZ;
430
431         if ((xfer_sz % 4) || (xfer_sz > (CFHSI_BUF_SZ_RX - CFHSI_DESC_SZ))) {
432                 dev_err(&cfhsi->ndev->dev,
433                                 "%s: Invalid payload len: %d, ignored.\n",
434                         __func__, xfer_sz);
435                 return -EPROTO;
436         }
437         return xfer_sz;
438 }
439
440 static int cfhsi_rx_pld(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
441 {
442         int rx_sz = 0;
443         int nfrms = 0;
444         u16 *plen = NULL;
445         u8 *pfrm = NULL;
446
447         /* Sanity check header and offset. */
448         if (WARN_ON((desc->header & ~CFHSI_PIGGY_DESC) ||
449                         (desc->offset > CFHSI_MAX_EMB_FRM_SZ))) {
450                 dev_err(&cfhsi->ndev->dev, "%s: Invalid descriptor.\n",
451                         __func__);
452                 return -EPROTO;
453         }
454
455         /* Set frame pointer to start of payload. */
456         pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;
457         plen = desc->cffrm_len;
458
459         /* Skip already processed frames. */
460         while (nfrms < cfhsi->rx_state.nfrms) {
461                 pfrm += *plen;
462                 rx_sz += *plen;
463                 plen++;
464                 nfrms++;
465         }
466
467         /* Parse payload. */
468         while (nfrms < CFHSI_MAX_PKTS && *plen) {
469                 struct sk_buff *skb;
470                 u8 *dst = NULL;
471                 u8 *pcffrm = NULL;
472                 int len = 0;
473
474                 /* CAIF frame starts after head padding. */
475                 pcffrm = pfrm + *pfrm + 1;
476
477                 /* Read length of CAIF frame (little endian). */
478                 len = *pcffrm;
479                 len |= ((*(pcffrm + 1)) << 8) & 0xFF00;
480                 len += 2;       /* Add FCS fields. */
481
482                 /* Sanity check length of CAIF frames. */
483                 if (unlikely(len > CFHSI_MAX_CAIF_FRAME_SZ)) {
484                         dev_err(&cfhsi->ndev->dev, "%s: Invalid length.\n",
485                                 __func__);
486                         return -EPROTO;
487                 }
488
489                 /* Allocate SKB (OK even in IRQ context). */
490                 skb = alloc_skb(len + 1, GFP_ATOMIC);
491                 if (!skb) {
492                         dev_err(&cfhsi->ndev->dev, "%s: Out of memory !\n",
493                                 __func__);
494                         cfhsi->rx_state.nfrms = nfrms;
495                         return -ENOMEM;
496                 }
497                 caif_assert(skb != NULL);
498
499                 dst = skb_put(skb, len);
500                 memcpy(dst, pcffrm, len);
501
502                 skb->protocol = htons(ETH_P_CAIF);
503                 skb_reset_mac_header(skb);
504                 skb->dev = cfhsi->ndev;
505
506                 /*
507                  * We're called from a platform device,
508                  * and don't know the context we're running in.
509                  */
510                 if (in_interrupt())
511                         netif_rx(skb);
512                 else
513                         netif_rx_ni(skb);
514
515                 /* Update network statistics. */
516                 cfhsi->ndev->stats.rx_packets++;
517                 cfhsi->ndev->stats.rx_bytes += len;
518
519                 pfrm += *plen;
520                 rx_sz += *plen;
521                 plen++;
522                 nfrms++;
523         }
524
525         return rx_sz;
526 }
527
528 static void cfhsi_rx_done(struct cfhsi *cfhsi)
529 {
530         int res;
531         int desc_pld_len = 0;
532         struct cfhsi_desc *desc = NULL;
533
534         desc = (struct cfhsi_desc *)cfhsi->rx_buf;
535
536         dev_dbg(&cfhsi->ndev->dev, "%s\n", __func__);
537
538         if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
539                 return;
540
541         /* Update inactivity timer if pending. */
542         spin_lock_bh(&cfhsi->lock);
543         mod_timer_pending(&cfhsi->timer,
544                         jiffies + cfhsi->inactivity_timeout);
545         spin_unlock_bh(&cfhsi->lock);
546
547         if (cfhsi->rx_state.state == CFHSI_RX_STATE_DESC) {
548                 desc_pld_len = cfhsi_rx_desc(desc, cfhsi);
549                 if (desc_pld_len == -ENOMEM)
550                         goto restart;
551                 if (desc_pld_len == -EPROTO)
552                         goto out_of_sync;
553         } else {
554                 int pld_len;
555
556                 if (!cfhsi->rx_state.piggy_desc) {
557                         pld_len = cfhsi_rx_pld(desc, cfhsi);
558                         if (pld_len == -ENOMEM)
559                                 goto restart;
560                         if (pld_len == -EPROTO)
561                                 goto out_of_sync;
562                         cfhsi->rx_state.pld_len = pld_len;
563                 } else {
564                         pld_len = cfhsi->rx_state.pld_len;
565                 }
566
567                 if ((pld_len > 0) && (desc->header & CFHSI_PIGGY_DESC)) {
568                         struct cfhsi_desc *piggy_desc;
569                         piggy_desc = (struct cfhsi_desc *)
570                                 (desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ +
571                                                 pld_len);
572                         cfhsi->rx_state.piggy_desc = true;
573
574                         /* Extract piggy-backed descriptor. */
575                         desc_pld_len = cfhsi_rx_desc(piggy_desc, cfhsi);
576                         if (desc_pld_len == -ENOMEM)
577                                 goto restart;
578
579                         /*
580                          * Copy needed information from the piggy-backed
581                          * descriptor to the descriptor in the start.
582                          */
583                         memcpy((u8 *)desc, (u8 *)piggy_desc,
584                                         CFHSI_DESC_SHORT_SZ);
585
586                         if (desc_pld_len == -EPROTO)
587                                 goto out_of_sync;
588                 }
589         }
590
591         memset(&cfhsi->rx_state, 0, sizeof(cfhsi->rx_state));
592         if (desc_pld_len) {
593                 cfhsi->rx_state.state = CFHSI_RX_STATE_PAYLOAD;
594                 cfhsi->rx_ptr = cfhsi->rx_buf + CFHSI_DESC_SZ;
595                 cfhsi->rx_len = desc_pld_len;
596         } else {
597                 cfhsi->rx_state.state = CFHSI_RX_STATE_DESC;
598                 cfhsi->rx_ptr = cfhsi->rx_buf;
599                 cfhsi->rx_len = CFHSI_DESC_SZ;
600         }
601
602         if (test_bit(CFHSI_AWAKE, &cfhsi->bits)) {
603                 /* Set up new transfer. */
604                 dev_dbg(&cfhsi->ndev->dev, "%s: Start RX.\n",
605                         __func__);
606                 res = cfhsi->dev->cfhsi_rx(cfhsi->rx_ptr, cfhsi->rx_len,
607                                 cfhsi->dev);
608                 if (WARN_ON(res < 0)) {
609                         dev_err(&cfhsi->ndev->dev, "%s: RX error %d.\n",
610                                 __func__, res);
611                         cfhsi->ndev->stats.rx_errors++;
612                         cfhsi->ndev->stats.rx_dropped++;
613                 }
614         }
615         return;
616
617 restart:
618         if (++cfhsi->rx_state.retries > CFHSI_MAX_RX_RETRIES) {
619                 dev_err(&cfhsi->ndev->dev, "%s: No memory available "
620                         "in %d iterations.\n",
621                         __func__, CFHSI_MAX_RX_RETRIES);
622                 BUG();
623         }
624         mod_timer(&cfhsi->rx_slowpath_timer, jiffies + 1);
625         return;
626
627 out_of_sync:
628         dev_err(&cfhsi->ndev->dev, "%s: Out of sync.\n", __func__);
629         print_hex_dump_bytes("--> ", DUMP_PREFIX_NONE,
630                         cfhsi->rx_buf, CFHSI_DESC_SZ);
631         schedule_work(&cfhsi->out_of_sync_work);
632 }
633
634 static void cfhsi_rx_slowpath(unsigned long arg)
635 {
636         struct cfhsi *cfhsi = (struct cfhsi *)arg;
637
638         dev_dbg(&cfhsi->ndev->dev, "%s.\n",
639                 __func__);
640
641         cfhsi_rx_done(cfhsi);
642 }
643
644 static void cfhsi_rx_done_cb(struct cfhsi_drv *drv)
645 {
646         struct cfhsi *cfhsi;
647
648         cfhsi = container_of(drv, struct cfhsi, drv);
649         dev_dbg(&cfhsi->ndev->dev, "%s.\n",
650                 __func__);
651
652         if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
653                 return;
654
655         if (test_and_clear_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits))
656                 wake_up_interruptible(&cfhsi->flush_fifo_wait);
657         else
658                 cfhsi_rx_done(cfhsi);
659 }
660
661 static void cfhsi_wake_up(struct work_struct *work)
662 {
663         struct cfhsi *cfhsi = NULL;
664         int res;
665         int len;
666         long ret;
667
668         cfhsi = container_of(work, struct cfhsi, wake_up_work);
669
670         if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
671                 return;
672
673         if (unlikely(test_bit(CFHSI_AWAKE, &cfhsi->bits))) {
674                 /* It happenes when wakeup is requested by
675                  * both ends at the same time. */
676                 clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
677                 clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
678                 return;
679         }
680
681         /* Activate wake line. */
682         cfhsi->dev->cfhsi_wake_up(cfhsi->dev);
683
684         dev_dbg(&cfhsi->ndev->dev, "%s: Start waiting.\n",
685                 __func__);
686
687         /* Wait for acknowledge. */
688         ret = CFHSI_WAKE_TOUT;
689         ret = wait_event_interruptible_timeout(cfhsi->wake_up_wait,
690                                         test_and_clear_bit(CFHSI_WAKE_UP_ACK,
691                                                         &cfhsi->bits), ret);
692         if (unlikely(ret < 0)) {
693                 /* Interrupted by signal. */
694                 dev_err(&cfhsi->ndev->dev, "%s: Signalled: %ld.\n",
695                         __func__, ret);
696
697                 clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
698                 cfhsi->dev->cfhsi_wake_down(cfhsi->dev);
699                 return;
700         } else if (!ret) {
701                 bool ca_wake = false;
702                 size_t fifo_occupancy = 0;
703
704                 /* Wakeup timeout */
705                 dev_err(&cfhsi->ndev->dev, "%s: Timeout.\n",
706                         __func__);
707
708                 /* Check FIFO to check if modem has sent something. */
709                 WARN_ON(cfhsi->dev->cfhsi_fifo_occupancy(cfhsi->dev,
710                                         &fifo_occupancy));
711
712                 dev_err(&cfhsi->ndev->dev, "%s: Bytes in FIFO: %u.\n",
713                                 __func__, (unsigned) fifo_occupancy);
714
715                 /* Check if we misssed the interrupt. */
716                 WARN_ON(cfhsi->dev->cfhsi_get_peer_wake(cfhsi->dev,
717                                                         &ca_wake));
718
719                 if (ca_wake) {
720                         dev_err(&cfhsi->ndev->dev, "%s: CA Wake missed !.\n",
721                                 __func__);
722
723                         /* Clear the CFHSI_WAKE_UP_ACK bit to prevent race. */
724                         clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
725
726                         /* Continue execution. */
727                         goto wake_ack;
728                 }
729
730                 clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
731                 cfhsi->dev->cfhsi_wake_down(cfhsi->dev);
732                 return;
733         }
734 wake_ack:
735         dev_dbg(&cfhsi->ndev->dev, "%s: Woken.\n",
736                 __func__);
737
738         /* Clear power up bit. */
739         set_bit(CFHSI_AWAKE, &cfhsi->bits);
740         clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
741
742         /* Resume read operation. */
743         dev_dbg(&cfhsi->ndev->dev, "%s: Start RX.\n", __func__);
744         res = cfhsi->dev->cfhsi_rx(cfhsi->rx_ptr, cfhsi->rx_len, cfhsi->dev);
745
746         if (WARN_ON(res < 0))
747                 dev_err(&cfhsi->ndev->dev, "%s: RX err %d.\n", __func__, res);
748
749         /* Clear power up acknowledment. */
750         clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
751
752         spin_lock_bh(&cfhsi->lock);
753
754         /* Resume transmit if queue is not empty. */
755         if (!skb_peek(&cfhsi->qhead)) {
756                 dev_dbg(&cfhsi->ndev->dev, "%s: Peer wake, start timer.\n",
757                         __func__);
758                 /* Start inactivity timer. */
759                 mod_timer(&cfhsi->timer,
760                                 jiffies + cfhsi->inactivity_timeout);
761                 spin_unlock_bh(&cfhsi->lock);
762                 return;
763         }
764
765         dev_dbg(&cfhsi->ndev->dev, "%s: Host wake.\n",
766                 __func__);
767
768         spin_unlock_bh(&cfhsi->lock);
769
770         /* Create HSI frame. */
771         len = cfhsi_tx_frm((struct cfhsi_desc *)cfhsi->tx_buf, cfhsi);
772
773         if (likely(len > 0)) {
774                 /* Set up new transfer. */
775                 res = cfhsi->dev->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->dev);
776                 if (WARN_ON(res < 0)) {
777                         dev_err(&cfhsi->ndev->dev, "%s: TX error %d.\n",
778                                 __func__, res);
779                         cfhsi_abort_tx(cfhsi);
780                 }
781         } else {
782                 dev_err(&cfhsi->ndev->dev,
783                                 "%s: Failed to create HSI frame: %d.\n",
784                                 __func__, len);
785         }
786 }
787
788 static void cfhsi_wake_down(struct work_struct *work)
789 {
790         long ret;
791         struct cfhsi *cfhsi = NULL;
792         size_t fifo_occupancy = 0;
793         int retry = CFHSI_WAKE_TOUT;
794
795         cfhsi = container_of(work, struct cfhsi, wake_down_work);
796         dev_dbg(&cfhsi->ndev->dev, "%s.\n", __func__);
797
798         if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
799                 return;
800
801         /* Deactivate wake line. */
802         cfhsi->dev->cfhsi_wake_down(cfhsi->dev);
803
804         /* Wait for acknowledge. */
805         ret = CFHSI_WAKE_TOUT;
806         ret = wait_event_interruptible_timeout(cfhsi->wake_down_wait,
807                                         test_and_clear_bit(CFHSI_WAKE_DOWN_ACK,
808                                                         &cfhsi->bits), ret);
809         if (ret < 0) {
810                 /* Interrupted by signal. */
811                 dev_err(&cfhsi->ndev->dev, "%s: Signalled: %ld.\n",
812                         __func__, ret);
813                 return;
814         } else if (!ret) {
815                 bool ca_wake = true;
816
817                 /* Timeout */
818                 dev_err(&cfhsi->ndev->dev, "%s: Timeout.\n", __func__);
819
820                 /* Check if we misssed the interrupt. */
821                 WARN_ON(cfhsi->dev->cfhsi_get_peer_wake(cfhsi->dev,
822                                                         &ca_wake));
823                 if (!ca_wake)
824                         dev_err(&cfhsi->ndev->dev, "%s: CA Wake missed !.\n",
825                                 __func__);
826         }
827
828         /* Check FIFO occupancy. */
829         while (retry) {
830                 WARN_ON(cfhsi->dev->cfhsi_fifo_occupancy(cfhsi->dev,
831                                                         &fifo_occupancy));
832
833                 if (!fifo_occupancy)
834                         break;
835
836                 set_current_state(TASK_INTERRUPTIBLE);
837                 schedule_timeout(1);
838                 retry--;
839         }
840
841         if (!retry)
842                 dev_err(&cfhsi->ndev->dev, "%s: FIFO Timeout.\n", __func__);
843
844         /* Clear AWAKE condition. */
845         clear_bit(CFHSI_AWAKE, &cfhsi->bits);
846
847         /* Cancel pending RX requests. */
848         cfhsi->dev->cfhsi_rx_cancel(cfhsi->dev);
849
850 }
851
852 static void cfhsi_out_of_sync(struct work_struct *work)
853 {
854         struct cfhsi *cfhsi = NULL;
855
856         cfhsi = container_of(work, struct cfhsi, out_of_sync_work);
857
858         rtnl_lock();
859         dev_close(cfhsi->ndev);
860         rtnl_unlock();
861 }
862
863 static void cfhsi_wake_up_cb(struct cfhsi_drv *drv)
864 {
865         struct cfhsi *cfhsi = NULL;
866
867         cfhsi = container_of(drv, struct cfhsi, drv);
868         dev_dbg(&cfhsi->ndev->dev, "%s.\n",
869                 __func__);
870
871         set_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
872         wake_up_interruptible(&cfhsi->wake_up_wait);
873
874         if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
875                 return;
876
877         /* Schedule wake up work queue if the peer initiates. */
878         if (!test_and_set_bit(CFHSI_WAKE_UP, &cfhsi->bits))
879                 queue_work(cfhsi->wq, &cfhsi->wake_up_work);
880 }
881
882 static void cfhsi_wake_down_cb(struct cfhsi_drv *drv)
883 {
884         struct cfhsi *cfhsi = NULL;
885
886         cfhsi = container_of(drv, struct cfhsi, drv);
887         dev_dbg(&cfhsi->ndev->dev, "%s.\n",
888                 __func__);
889
890         /* Initiating low power is only permitted by the host (us). */
891         set_bit(CFHSI_WAKE_DOWN_ACK, &cfhsi->bits);
892         wake_up_interruptible(&cfhsi->wake_down_wait);
893 }
894
895 static int cfhsi_xmit(struct sk_buff *skb, struct net_device *dev)
896 {
897         struct cfhsi *cfhsi = NULL;
898         int start_xfer = 0;
899         int timer_active;
900
901         if (!dev)
902                 return -EINVAL;
903
904         cfhsi = netdev_priv(dev);
905
906         spin_lock_bh(&cfhsi->lock);
907
908         skb_queue_tail(&cfhsi->qhead, skb);
909
910         /* Sanity check; xmit should not be called after unregister_netdev */
911         if (WARN_ON(test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))) {
912                 spin_unlock_bh(&cfhsi->lock);
913                 cfhsi_abort_tx(cfhsi);
914                 return -EINVAL;
915         }
916
917         /* Send flow off if number of packets is above high water mark. */
918         if (!cfhsi->flow_off_sent &&
919                 cfhsi->qhead.qlen > cfhsi->q_high_mark &&
920                 cfhsi->cfdev.flowctrl) {
921                 cfhsi->flow_off_sent = 1;
922                 cfhsi->cfdev.flowctrl(cfhsi->ndev, OFF);
923         }
924
925         if (cfhsi->tx_state == CFHSI_TX_STATE_IDLE) {
926                 cfhsi->tx_state = CFHSI_TX_STATE_XFER;
927                 start_xfer = 1;
928         }
929
930         if (!start_xfer) {
931                 spin_unlock_bh(&cfhsi->lock);
932                 return 0;
933         }
934
935         /* Delete inactivity timer if started. */
936         timer_active = del_timer_sync(&cfhsi->timer);
937
938         spin_unlock_bh(&cfhsi->lock);
939
940         if (timer_active) {
941                 struct cfhsi_desc *desc = (struct cfhsi_desc *)cfhsi->tx_buf;
942                 int len;
943                 int res;
944
945                 /* Create HSI frame. */
946                 len = cfhsi_tx_frm(desc, cfhsi);
947                 BUG_ON(!len);
948
949                 /* Set up new transfer. */
950                 res = cfhsi->dev->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->dev);
951                 if (WARN_ON(res < 0)) {
952                         dev_err(&cfhsi->ndev->dev, "%s: TX error %d.\n",
953                                 __func__, res);
954                         cfhsi_abort_tx(cfhsi);
955                 }
956         } else {
957                 /* Schedule wake up work queue if the we initiate. */
958                 if (!test_and_set_bit(CFHSI_WAKE_UP, &cfhsi->bits))
959                         queue_work(cfhsi->wq, &cfhsi->wake_up_work);
960         }
961
962         return 0;
963 }
964
965 static int cfhsi_open(struct net_device *dev)
966 {
967         netif_wake_queue(dev);
968
969         return 0;
970 }
971
972 static int cfhsi_close(struct net_device *dev)
973 {
974         netif_stop_queue(dev);
975
976         return 0;
977 }
978
979 static const struct net_device_ops cfhsi_ops = {
980         .ndo_open = cfhsi_open,
981         .ndo_stop = cfhsi_close,
982         .ndo_start_xmit = cfhsi_xmit
983 };
984
985 static void cfhsi_setup(struct net_device *dev)
986 {
987         struct cfhsi *cfhsi = netdev_priv(dev);
988         dev->features = 0;
989         dev->netdev_ops = &cfhsi_ops;
990         dev->type = ARPHRD_CAIF;
991         dev->flags = IFF_POINTOPOINT | IFF_NOARP;
992         dev->mtu = CFHSI_MAX_PAYLOAD_SZ;
993         dev->tx_queue_len = 0;
994         dev->destructor = free_netdev;
995         skb_queue_head_init(&cfhsi->qhead);
996         cfhsi->cfdev.link_select = CAIF_LINK_HIGH_BANDW;
997         cfhsi->cfdev.use_frag = false;
998         cfhsi->cfdev.use_stx = false;
999         cfhsi->cfdev.use_fcs = false;
1000         cfhsi->ndev = dev;
1001 }
1002
1003 int cfhsi_probe(struct platform_device *pdev)
1004 {
1005         struct cfhsi *cfhsi = NULL;
1006         struct net_device *ndev;
1007         struct cfhsi_dev *dev;
1008         int res;
1009
1010         ndev = alloc_netdev(sizeof(struct cfhsi), "cfhsi%d", cfhsi_setup);
1011         if (!ndev)
1012                 return -ENODEV;
1013
1014         cfhsi = netdev_priv(ndev);
1015         cfhsi->ndev = ndev;
1016         cfhsi->pdev = pdev;
1017
1018         /* Initialize state vaiables. */
1019         cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
1020         cfhsi->rx_state.state = CFHSI_RX_STATE_DESC;
1021
1022         /* Set flow info */
1023         cfhsi->flow_off_sent = 0;
1024         cfhsi->q_low_mark = LOW_WATER_MARK;
1025         cfhsi->q_high_mark = HIGH_WATER_MARK;
1026
1027         /* Assign the HSI device. */
1028         dev = (struct cfhsi_dev *)pdev->dev.platform_data;
1029         cfhsi->dev = dev;
1030
1031         /* Assign the driver to this HSI device. */
1032         dev->drv = &cfhsi->drv;
1033
1034         /*
1035          * Allocate a TX buffer with the size of a HSI packet descriptors
1036          * and the necessary room for CAIF payload frames.
1037          */
1038         cfhsi->tx_buf = kzalloc(CFHSI_BUF_SZ_TX, GFP_KERNEL);
1039         if (!cfhsi->tx_buf) {
1040                 res = -ENODEV;
1041                 goto err_alloc_tx;
1042         }
1043
1044         /*
1045          * Allocate a RX buffer with the size of two HSI packet descriptors and
1046          * the necessary room for CAIF payload frames.
1047          */
1048         cfhsi->rx_buf = kzalloc(CFHSI_BUF_SZ_RX, GFP_KERNEL);
1049         if (!cfhsi->rx_buf) {
1050                 res = -ENODEV;
1051                 goto err_alloc_rx;
1052         }
1053
1054         /* Pre-calculate inactivity timeout. */
1055         if (inactivity_timeout != -1) {
1056                 cfhsi->inactivity_timeout =
1057                                 inactivity_timeout * HZ / 1000;
1058                 if (!cfhsi->inactivity_timeout)
1059                         cfhsi->inactivity_timeout = 1;
1060                 else if (cfhsi->inactivity_timeout > NEXT_TIMER_MAX_DELTA)
1061                         cfhsi->inactivity_timeout = NEXT_TIMER_MAX_DELTA;
1062         } else {
1063                 cfhsi->inactivity_timeout = NEXT_TIMER_MAX_DELTA;
1064         }
1065
1066         /* Initialize recieve vaiables. */
1067         cfhsi->rx_ptr = cfhsi->rx_buf;
1068         cfhsi->rx_len = CFHSI_DESC_SZ;
1069
1070         /* Initialize spin locks. */
1071         spin_lock_init(&cfhsi->lock);
1072
1073         /* Set up the driver. */
1074         cfhsi->drv.tx_done_cb = cfhsi_tx_done_cb;
1075         cfhsi->drv.rx_done_cb = cfhsi_rx_done_cb;
1076         cfhsi->drv.wake_up_cb = cfhsi_wake_up_cb;
1077         cfhsi->drv.wake_down_cb = cfhsi_wake_down_cb;
1078
1079         /* Initialize the work queues. */
1080         INIT_WORK(&cfhsi->wake_up_work, cfhsi_wake_up);
1081         INIT_WORK(&cfhsi->wake_down_work, cfhsi_wake_down);
1082         INIT_WORK(&cfhsi->out_of_sync_work, cfhsi_out_of_sync);
1083
1084         /* Clear all bit fields. */
1085         clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
1086         clear_bit(CFHSI_WAKE_DOWN_ACK, &cfhsi->bits);
1087         clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
1088         clear_bit(CFHSI_AWAKE, &cfhsi->bits);
1089
1090         /* Create work thread. */
1091         cfhsi->wq = create_singlethread_workqueue(pdev->name);
1092         if (!cfhsi->wq) {
1093                 dev_err(&ndev->dev, "%s: Failed to create work queue.\n",
1094                         __func__);
1095                 res = -ENODEV;
1096                 goto err_create_wq;
1097         }
1098
1099         /* Initialize wait queues. */
1100         init_waitqueue_head(&cfhsi->wake_up_wait);
1101         init_waitqueue_head(&cfhsi->wake_down_wait);
1102         init_waitqueue_head(&cfhsi->flush_fifo_wait);
1103
1104         /* Setup the inactivity timer. */
1105         init_timer(&cfhsi->timer);
1106         cfhsi->timer.data = (unsigned long)cfhsi;
1107         cfhsi->timer.function = cfhsi_inactivity_tout;
1108         /* Setup the slowpath RX timer. */
1109         init_timer(&cfhsi->rx_slowpath_timer);
1110         cfhsi->rx_slowpath_timer.data = (unsigned long)cfhsi;
1111         cfhsi->rx_slowpath_timer.function = cfhsi_rx_slowpath;
1112
1113         /* Add CAIF HSI device to list. */
1114         spin_lock(&cfhsi_list_lock);
1115         list_add_tail(&cfhsi->list, &cfhsi_list);
1116         spin_unlock(&cfhsi_list_lock);
1117
1118         /* Activate HSI interface. */
1119         res = cfhsi->dev->cfhsi_up(cfhsi->dev);
1120         if (res) {
1121                 dev_err(&cfhsi->ndev->dev,
1122                         "%s: can't activate HSI interface: %d.\n",
1123                         __func__, res);
1124                 goto err_activate;
1125         }
1126
1127         /* Flush FIFO */
1128         res = cfhsi_flush_fifo(cfhsi);
1129         if (res) {
1130                 dev_err(&ndev->dev, "%s: Can't flush FIFO: %d.\n",
1131                         __func__, res);
1132                 goto err_net_reg;
1133         }
1134
1135         /* Register network device. */
1136         res = register_netdev(ndev);
1137         if (res) {
1138                 dev_err(&ndev->dev, "%s: Registration error: %d.\n",
1139                         __func__, res);
1140                 goto err_net_reg;
1141         }
1142
1143         netif_stop_queue(ndev);
1144
1145         return res;
1146
1147  err_net_reg:
1148         cfhsi->dev->cfhsi_down(cfhsi->dev);
1149  err_activate:
1150         destroy_workqueue(cfhsi->wq);
1151  err_create_wq:
1152         kfree(cfhsi->rx_buf);
1153  err_alloc_rx:
1154         kfree(cfhsi->tx_buf);
1155  err_alloc_tx:
1156         free_netdev(ndev);
1157
1158         return res;
1159 }
1160
1161 static void cfhsi_shutdown(struct cfhsi *cfhsi)
1162 {
1163         u8 *tx_buf, *rx_buf;
1164
1165         /* Stop TXing */
1166         netif_tx_stop_all_queues(cfhsi->ndev);
1167
1168         /* going to shutdown driver */
1169         set_bit(CFHSI_SHUTDOWN, &cfhsi->bits);
1170
1171         /* Flush workqueue */
1172         flush_workqueue(cfhsi->wq);
1173
1174         /* Delete timers if pending */
1175         del_timer_sync(&cfhsi->timer);
1176         del_timer_sync(&cfhsi->rx_slowpath_timer);
1177
1178         /* Cancel pending RX request (if any) */
1179         cfhsi->dev->cfhsi_rx_cancel(cfhsi->dev);
1180
1181         /* Destroy workqueue */
1182         destroy_workqueue(cfhsi->wq);
1183
1184         /* Store bufferes: will be freed later. */
1185         tx_buf = cfhsi->tx_buf;
1186         rx_buf = cfhsi->rx_buf;
1187
1188         /* Flush transmit queues. */
1189         cfhsi_abort_tx(cfhsi);
1190
1191         /* Deactivate interface */
1192         cfhsi->dev->cfhsi_down(cfhsi->dev);
1193
1194         /* Finally unregister the network device. */
1195         unregister_netdev(cfhsi->ndev);
1196
1197         /* Free buffers. */
1198         kfree(tx_buf);
1199         kfree(rx_buf);
1200 }
1201
1202 int cfhsi_remove(struct platform_device *pdev)
1203 {
1204         struct list_head *list_node;
1205         struct list_head *n;
1206         struct cfhsi *cfhsi = NULL;
1207         struct cfhsi_dev *dev;
1208
1209         dev = (struct cfhsi_dev *)pdev->dev.platform_data;
1210         spin_lock(&cfhsi_list_lock);
1211         list_for_each_safe(list_node, n, &cfhsi_list) {
1212                 cfhsi = list_entry(list_node, struct cfhsi, list);
1213                 /* Find the corresponding device. */
1214                 if (cfhsi->dev == dev) {
1215                         /* Remove from list. */
1216                         list_del(list_node);
1217                         spin_unlock(&cfhsi_list_lock);
1218
1219                         /* Shutdown driver. */
1220                         cfhsi_shutdown(cfhsi);
1221
1222                         return 0;
1223                 }
1224         }
1225         spin_unlock(&cfhsi_list_lock);
1226         return -ENODEV;
1227 }
1228
1229 struct platform_driver cfhsi_plat_drv = {
1230         .probe = cfhsi_probe,
1231         .remove = cfhsi_remove,
1232         .driver = {
1233                    .name = "cfhsi",
1234                    .owner = THIS_MODULE,
1235                    },
1236 };
1237
1238 static void __exit cfhsi_exit_module(void)
1239 {
1240         struct list_head *list_node;
1241         struct list_head *n;
1242         struct cfhsi *cfhsi = NULL;
1243
1244         spin_lock(&cfhsi_list_lock);
1245         list_for_each_safe(list_node, n, &cfhsi_list) {
1246                 cfhsi = list_entry(list_node, struct cfhsi, list);
1247
1248                 /* Remove from list. */
1249                 list_del(list_node);
1250                 spin_unlock(&cfhsi_list_lock);
1251
1252                 /* Shutdown driver. */
1253                 cfhsi_shutdown(cfhsi);
1254
1255                 spin_lock(&cfhsi_list_lock);
1256         }
1257         spin_unlock(&cfhsi_list_lock);
1258
1259         /* Unregister platform driver. */
1260         platform_driver_unregister(&cfhsi_plat_drv);
1261 }
1262
1263 static int __init cfhsi_init_module(void)
1264 {
1265         int result;
1266
1267         /* Initialize spin lock. */
1268         spin_lock_init(&cfhsi_list_lock);
1269
1270         /* Register platform driver. */
1271         result = platform_driver_register(&cfhsi_plat_drv);
1272         if (result) {
1273                 printk(KERN_ERR "Could not register platform HSI driver: %d.\n",
1274                         result);
1275                 goto err_dev_register;
1276         }
1277
1278         return result;
1279
1280  err_dev_register:
1281         return result;
1282 }
1283
1284 module_init(cfhsi_init_module);
1285 module_exit(cfhsi_exit_module);