Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
[pandora-kernel.git] / drivers / net / caif / caif_shmcore.c
1 /*
2  * Copyright (C) ST-Ericsson AB 2010
3  * Contact: Sjur Brendeland / sjur.brandeland@stericsson.com
4  * Authors:  Amarnath Revanna / amarnath.bangalore.revanna@stericsson.com,
5  *           Daniel Martensson / daniel.martensson@stericsson.com
6  * License terms: GNU General Public License (GPL) version 2
7  */
8
9 #define pr_fmt(fmt) KBUILD_MODNAME ":" fmt
10
11 #include <linux/spinlock.h>
12 #include <linux/sched.h>
13 #include <linux/list.h>
14 #include <linux/netdevice.h>
15 #include <linux/if_arp.h>
16
17 #include <net/caif/caif_device.h>
18 #include <net/caif/caif_shm.h>
19
20 #define NR_TX_BUF               6
21 #define NR_RX_BUF               6
22 #define TX_BUF_SZ               0x2000
23 #define RX_BUF_SZ               0x2000
24
25 #define CAIF_NEEDED_HEADROOM    32
26
27 #define CAIF_FLOW_ON            1
28 #define CAIF_FLOW_OFF           0
29
30 #define LOW_WATERMARK           3
31 #define HIGH_WATERMARK          4
32
33 /* Maximum number of CAIF buffers per shared memory buffer. */
34 #define SHM_MAX_FRMS_PER_BUF    10
35
36 /*
37  * Size in bytes of the descriptor area
38  * (With end of descriptor signalling)
39  */
40 #define SHM_CAIF_DESC_SIZE      ((SHM_MAX_FRMS_PER_BUF + 1) * \
41                                         sizeof(struct shm_pck_desc))
42
43 /*
44  * Offset to the first CAIF frame within a shared memory buffer.
45  * Aligned on 32 bytes.
46  */
47 #define SHM_CAIF_FRM_OFS        (SHM_CAIF_DESC_SIZE + (SHM_CAIF_DESC_SIZE % 32))
48
49 /* Number of bytes for CAIF shared memory header. */
50 #define SHM_HDR_LEN             1
51
52 /* Number of padding bytes for the complete CAIF frame. */
53 #define SHM_FRM_PAD_LEN         4
54
55 #define CAIF_MAX_MTU            4096
56
57 #define SHM_SET_FULL(x) (((x+1) & 0x0F) << 0)
58 #define SHM_GET_FULL(x) (((x >> 0) & 0x0F) - 1)
59
60 #define SHM_SET_EMPTY(x)        (((x+1) & 0x0F) << 4)
61 #define SHM_GET_EMPTY(x)        (((x >> 4) & 0x0F) - 1)
62
63 #define SHM_FULL_MASK           (0x0F << 0)
64 #define SHM_EMPTY_MASK          (0x0F << 4)
65
66 struct shm_pck_desc {
67         /*
68          * Offset from start of shared memory area to start of
69          * shared memory CAIF frame.
70          */
71         u32 frm_ofs;
72         u32 frm_len;
73 };
74
75 struct buf_list {
76         unsigned char *desc_vptr;
77         u32 phy_addr;
78         u32 index;
79         u32 len;
80         u32 frames;
81         u32 frm_ofs;
82         struct list_head list;
83 };
84
85 struct shm_caif_frm {
86         /* Number of bytes of padding before the CAIF frame. */
87         u8 hdr_ofs;
88 };
89
90 struct shmdrv_layer {
91         /* caif_dev_common must always be first in the structure*/
92         struct caif_dev_common cfdev;
93
94         u32 shm_tx_addr;
95         u32 shm_rx_addr;
96         u32 shm_base_addr;
97         u32 tx_empty_available;
98         spinlock_t lock;
99
100         struct list_head tx_empty_list;
101         struct list_head tx_pend_list;
102         struct list_head tx_full_list;
103         struct list_head rx_empty_list;
104         struct list_head rx_pend_list;
105         struct list_head rx_full_list;
106
107         struct workqueue_struct *pshm_tx_workqueue;
108         struct workqueue_struct *pshm_rx_workqueue;
109
110         struct work_struct shm_tx_work;
111         struct work_struct shm_rx_work;
112
113         struct sk_buff_head sk_qhead;
114         struct shmdev_layer *pshm_dev;
115 };
116
117 static int shm_netdev_open(struct net_device *shm_netdev)
118 {
119         netif_wake_queue(shm_netdev);
120         return 0;
121 }
122
123 static int shm_netdev_close(struct net_device *shm_netdev)
124 {
125         netif_stop_queue(shm_netdev);
126         return 0;
127 }
128
129 int caif_shmdrv_rx_cb(u32 mbx_msg, void *priv)
130 {
131         struct buf_list *pbuf;
132         struct shmdrv_layer *pshm_drv;
133         struct list_head *pos;
134         u32 avail_emptybuff = 0;
135         unsigned long flags = 0;
136
137         pshm_drv = priv;
138
139         /* Check for received buffers. */
140         if (mbx_msg & SHM_FULL_MASK) {
141                 int idx;
142
143                 spin_lock_irqsave(&pshm_drv->lock, flags);
144
145                 /* Check whether we have any outstanding buffers. */
146                 if (list_empty(&pshm_drv->rx_empty_list)) {
147
148                         /* Release spin lock. */
149                         spin_unlock_irqrestore(&pshm_drv->lock, flags);
150
151                         /* We print even in IRQ context... */
152                         pr_warn("No empty Rx buffers to fill: "
153                                         "mbx_msg:%x\n", mbx_msg);
154
155                         /* Bail out. */
156                         goto err_sync;
157                 }
158
159                 pbuf =
160                         list_entry(pshm_drv->rx_empty_list.next,
161                                         struct buf_list, list);
162                 idx = pbuf->index;
163
164                 /* Check buffer synchronization. */
165                 if (idx != SHM_GET_FULL(mbx_msg)) {
166
167                         /* We print even in IRQ context... */
168                         pr_warn(
169                         "phyif_shm_mbx_msg_cb: RX full out of sync:"
170                         " idx:%d, msg:%x SHM_GET_FULL(mbx_msg):%x\n",
171                                 idx, mbx_msg, SHM_GET_FULL(mbx_msg));
172
173                         spin_unlock_irqrestore(&pshm_drv->lock, flags);
174
175                         /* Bail out. */
176                         goto err_sync;
177                 }
178
179                 list_del_init(&pbuf->list);
180                 list_add_tail(&pbuf->list, &pshm_drv->rx_full_list);
181
182                 spin_unlock_irqrestore(&pshm_drv->lock, flags);
183
184                 /* Schedule RX work queue. */
185                 if (!work_pending(&pshm_drv->shm_rx_work))
186                         queue_work(pshm_drv->pshm_rx_workqueue,
187                                                 &pshm_drv->shm_rx_work);
188         }
189
190         /* Check for emptied buffers. */
191         if (mbx_msg & SHM_EMPTY_MASK) {
192                 int idx;
193
194                 spin_lock_irqsave(&pshm_drv->lock, flags);
195
196                 /* Check whether we have any outstanding buffers. */
197                 if (list_empty(&pshm_drv->tx_full_list)) {
198
199                         /* We print even in IRQ context... */
200                         pr_warn("No TX to empty: msg:%x\n", mbx_msg);
201
202                         spin_unlock_irqrestore(&pshm_drv->lock, flags);
203
204                         /* Bail out. */
205                         goto err_sync;
206                 }
207
208                 pbuf =
209                         list_entry(pshm_drv->tx_full_list.next,
210                                         struct buf_list, list);
211                 idx = pbuf->index;
212
213                 /* Check buffer synchronization. */
214                 if (idx != SHM_GET_EMPTY(mbx_msg)) {
215
216                         spin_unlock_irqrestore(&pshm_drv->lock, flags);
217
218                         /* We print even in IRQ context... */
219                         pr_warn("TX empty "
220                                 "out of sync:idx:%d, msg:%x\n", idx, mbx_msg);
221
222                         /* Bail out. */
223                         goto err_sync;
224                 }
225                 list_del_init(&pbuf->list);
226
227                 /* Reset buffer parameters. */
228                 pbuf->frames = 0;
229                 pbuf->frm_ofs = SHM_CAIF_FRM_OFS;
230
231                 list_add_tail(&pbuf->list, &pshm_drv->tx_empty_list);
232
233                 /* Check the available no. of buffers in the empty list */
234                 list_for_each(pos, &pshm_drv->tx_empty_list)
235                         avail_emptybuff++;
236
237                 /* Check whether we have to wake up the transmitter. */
238                 if ((avail_emptybuff > HIGH_WATERMARK) &&
239                                         (!pshm_drv->tx_empty_available)) {
240                         pshm_drv->tx_empty_available = 1;
241                         spin_unlock_irqrestore(&pshm_drv->lock, flags);
242                         pshm_drv->cfdev.flowctrl
243                                         (pshm_drv->pshm_dev->pshm_netdev,
244                                                                 CAIF_FLOW_ON);
245
246
247                         /* Schedule the work queue. if required */
248                         if (!work_pending(&pshm_drv->shm_tx_work))
249                                 queue_work(pshm_drv->pshm_tx_workqueue,
250                                                         &pshm_drv->shm_tx_work);
251                 } else
252                         spin_unlock_irqrestore(&pshm_drv->lock, flags);
253         }
254
255         return 0;
256
257 err_sync:
258         return -EIO;
259 }
260
261 static void shm_rx_work_func(struct work_struct *rx_work)
262 {
263         struct shmdrv_layer *pshm_drv;
264         struct buf_list *pbuf;
265         unsigned long flags = 0;
266         struct sk_buff *skb;
267         char *p;
268         int ret;
269
270         pshm_drv = container_of(rx_work, struct shmdrv_layer, shm_rx_work);
271
272         while (1) {
273
274                 struct shm_pck_desc *pck_desc;
275
276                 spin_lock_irqsave(&pshm_drv->lock, flags);
277
278                 /* Check for received buffers. */
279                 if (list_empty(&pshm_drv->rx_full_list)) {
280                         spin_unlock_irqrestore(&pshm_drv->lock, flags);
281                         break;
282                 }
283
284                 pbuf =
285                         list_entry(pshm_drv->rx_full_list.next, struct buf_list,
286                                         list);
287                 list_del_init(&pbuf->list);
288                 spin_unlock_irqrestore(&pshm_drv->lock, flags);
289
290                 /* Retrieve pointer to start of the packet descriptor area. */
291                 pck_desc = (struct shm_pck_desc *) pbuf->desc_vptr;
292
293                 /*
294                  * Check whether descriptor contains a CAIF shared memory
295                  * frame.
296                  */
297                 while (pck_desc->frm_ofs) {
298                         unsigned int frm_buf_ofs;
299                         unsigned int frm_pck_ofs;
300                         unsigned int frm_pck_len;
301                         /*
302                          * Check whether offset is within buffer limits
303                          * (lower).
304                          */
305                         if (pck_desc->frm_ofs <
306                                 (pbuf->phy_addr - pshm_drv->shm_base_addr))
307                                 break;
308                         /*
309                          * Check whether offset is within buffer limits
310                          * (higher).
311                          */
312                         if (pck_desc->frm_ofs >
313                                 ((pbuf->phy_addr - pshm_drv->shm_base_addr) +
314                                         pbuf->len))
315                                 break;
316
317                         /* Calculate offset from start of buffer. */
318                         frm_buf_ofs =
319                                 pck_desc->frm_ofs - (pbuf->phy_addr -
320                                                 pshm_drv->shm_base_addr);
321
322                         /*
323                          * Calculate offset and length of CAIF packet while
324                          * taking care of the shared memory header.
325                          */
326                         frm_pck_ofs =
327                                 frm_buf_ofs + SHM_HDR_LEN +
328                                 (*(pbuf->desc_vptr + frm_buf_ofs));
329                         frm_pck_len =
330                                 (pck_desc->frm_len - SHM_HDR_LEN -
331                                 (*(pbuf->desc_vptr + frm_buf_ofs)));
332
333                         /* Check whether CAIF packet is within buffer limits */
334                         if ((frm_pck_ofs + pck_desc->frm_len) > pbuf->len)
335                                 break;
336
337                         /* Get a suitable CAIF packet and copy in data. */
338                         skb = netdev_alloc_skb(pshm_drv->pshm_dev->pshm_netdev,
339                                                         frm_pck_len + 1);
340
341                         if (skb == NULL) {
342                                 pr_info("OOM: Try next frame in descriptor\n");
343                                 break;
344                         }
345
346                         p = skb_put(skb, frm_pck_len);
347                         memcpy(p, pbuf->desc_vptr + frm_pck_ofs, frm_pck_len);
348
349                         skb->protocol = htons(ETH_P_CAIF);
350                         skb_reset_mac_header(skb);
351                         skb->dev = pshm_drv->pshm_dev->pshm_netdev;
352
353                         /* Push received packet up the stack. */
354                         ret = netif_rx_ni(skb);
355
356                         if (!ret) {
357                                 pshm_drv->pshm_dev->pshm_netdev->stats.
358                                                                 rx_packets++;
359                                 pshm_drv->pshm_dev->pshm_netdev->stats.
360                                                 rx_bytes += pck_desc->frm_len;
361                         } else
362                                 ++pshm_drv->pshm_dev->pshm_netdev->stats.
363                                                                 rx_dropped;
364                         /* Move to next packet descriptor. */
365                         pck_desc++;
366                 }
367
368                 spin_lock_irqsave(&pshm_drv->lock, flags);
369                 list_add_tail(&pbuf->list, &pshm_drv->rx_pend_list);
370
371                 spin_unlock_irqrestore(&pshm_drv->lock, flags);
372
373         }
374
375         /* Schedule the work queue. if required */
376         if (!work_pending(&pshm_drv->shm_tx_work))
377                 queue_work(pshm_drv->pshm_tx_workqueue, &pshm_drv->shm_tx_work);
378
379 }
380
381 static void shm_tx_work_func(struct work_struct *tx_work)
382 {
383         u32 mbox_msg;
384         unsigned int frmlen, avail_emptybuff, append = 0;
385         unsigned long flags = 0;
386         struct buf_list *pbuf = NULL;
387         struct shmdrv_layer *pshm_drv;
388         struct shm_caif_frm *frm;
389         struct sk_buff *skb;
390         struct shm_pck_desc *pck_desc;
391         struct list_head *pos;
392
393         pshm_drv = container_of(tx_work, struct shmdrv_layer, shm_tx_work);
394
395         do {
396                 /* Initialize mailbox message. */
397                 mbox_msg = 0x00;
398                 avail_emptybuff = 0;
399
400                 spin_lock_irqsave(&pshm_drv->lock, flags);
401
402                 /* Check for pending receive buffers. */
403                 if (!list_empty(&pshm_drv->rx_pend_list)) {
404
405                         pbuf = list_entry(pshm_drv->rx_pend_list.next,
406                                                 struct buf_list, list);
407
408                         list_del_init(&pbuf->list);
409                         list_add_tail(&pbuf->list, &pshm_drv->rx_empty_list);
410                         /*
411                          * Value index is never changed,
412                          * so read access should be safe.
413                          */
414                         mbox_msg |= SHM_SET_EMPTY(pbuf->index);
415                 }
416
417                 skb = skb_peek(&pshm_drv->sk_qhead);
418
419                 if (skb == NULL)
420                         goto send_msg;
421                 /* Check the available no. of buffers in the empty list */
422                 list_for_each(pos, &pshm_drv->tx_empty_list)
423                         avail_emptybuff++;
424
425                 if ((avail_emptybuff < LOW_WATERMARK) &&
426                                         pshm_drv->tx_empty_available) {
427                         /* Update blocking condition. */
428                         pshm_drv->tx_empty_available = 0;
429                         spin_unlock_irqrestore(&pshm_drv->lock, flags);
430                         pshm_drv->cfdev.flowctrl
431                                         (pshm_drv->pshm_dev->pshm_netdev,
432                                         CAIF_FLOW_OFF);
433                         spin_lock_irqsave(&pshm_drv->lock, flags);
434                 }
435                 /*
436                  * We simply return back to the caller if we do not have space
437                  * either in Tx pending list or Tx empty list. In this case,
438                  * we hold the received skb in the skb list, waiting to
439                  * be transmitted once Tx buffers become available
440                  */
441                 if (list_empty(&pshm_drv->tx_empty_list))
442                         goto send_msg;
443
444                 /* Get the first free Tx buffer. */
445                 pbuf = list_entry(pshm_drv->tx_empty_list.next,
446                                                 struct buf_list, list);
447                 do {
448                         if (append) {
449                                 skb = skb_peek(&pshm_drv->sk_qhead);
450                                 if (skb == NULL)
451                                         break;
452                         }
453
454                         frm = (struct shm_caif_frm *)
455                                         (pbuf->desc_vptr + pbuf->frm_ofs);
456
457                         frm->hdr_ofs = 0;
458                         frmlen = 0;
459                         frmlen += SHM_HDR_LEN + frm->hdr_ofs + skb->len;
460
461                         /* Add tail padding if needed. */
462                         if (frmlen % SHM_FRM_PAD_LEN)
463                                 frmlen += SHM_FRM_PAD_LEN -
464                                                 (frmlen % SHM_FRM_PAD_LEN);
465
466                         /*
467                          * Verify that packet, header and additional padding
468                          * can fit within the buffer frame area.
469                          */
470                         if (frmlen >= (pbuf->len - pbuf->frm_ofs))
471                                 break;
472
473                         if (!append) {
474                                 list_del_init(&pbuf->list);
475                                 append = 1;
476                         }
477
478                         skb = skb_dequeue(&pshm_drv->sk_qhead);
479                         if (skb == NULL)
480                                 break;
481                         /* Copy in CAIF frame. */
482                         skb_copy_bits(skb, 0, pbuf->desc_vptr +
483                                         pbuf->frm_ofs + SHM_HDR_LEN +
484                                                 frm->hdr_ofs, skb->len);
485
486                         pshm_drv->pshm_dev->pshm_netdev->stats.tx_packets++;
487                         pshm_drv->pshm_dev->pshm_netdev->stats.tx_bytes +=
488                                                                         frmlen;
489                         dev_kfree_skb_irq(skb);
490
491                         /* Fill in the shared memory packet descriptor area. */
492                         pck_desc = (struct shm_pck_desc *) (pbuf->desc_vptr);
493                         /* Forward to current frame. */
494                         pck_desc += pbuf->frames;
495                         pck_desc->frm_ofs = (pbuf->phy_addr -
496                                                 pshm_drv->shm_base_addr) +
497                                                                 pbuf->frm_ofs;
498                         pck_desc->frm_len = frmlen;
499                         /* Terminate packet descriptor area. */
500                         pck_desc++;
501                         pck_desc->frm_ofs = 0;
502                         /* Update buffer parameters. */
503                         pbuf->frames++;
504                         pbuf->frm_ofs += frmlen + (frmlen % 32);
505
506                 } while (pbuf->frames < SHM_MAX_FRMS_PER_BUF);
507
508                 /* Assign buffer as full. */
509                 list_add_tail(&pbuf->list, &pshm_drv->tx_full_list);
510                 append = 0;
511                 mbox_msg |= SHM_SET_FULL(pbuf->index);
512 send_msg:
513                 spin_unlock_irqrestore(&pshm_drv->lock, flags);
514
515                 if (mbox_msg)
516                         pshm_drv->pshm_dev->pshmdev_mbxsend
517                                         (pshm_drv->pshm_dev->shm_id, mbox_msg);
518         } while (mbox_msg);
519 }
520
521 static int shm_netdev_tx(struct sk_buff *skb, struct net_device *shm_netdev)
522 {
523         struct shmdrv_layer *pshm_drv;
524
525         pshm_drv = netdev_priv(shm_netdev);
526
527         skb_queue_tail(&pshm_drv->sk_qhead, skb);
528
529         /* Schedule Tx work queue. for deferred processing of skbs*/
530         if (!work_pending(&pshm_drv->shm_tx_work))
531                 queue_work(pshm_drv->pshm_tx_workqueue, &pshm_drv->shm_tx_work);
532
533         return 0;
534 }
535
536 static const struct net_device_ops netdev_ops = {
537         .ndo_open = shm_netdev_open,
538         .ndo_stop = shm_netdev_close,
539         .ndo_start_xmit = shm_netdev_tx,
540 };
541
542 static void shm_netdev_setup(struct net_device *pshm_netdev)
543 {
544         struct shmdrv_layer *pshm_drv;
545         pshm_netdev->netdev_ops = &netdev_ops;
546
547         pshm_netdev->mtu = CAIF_MAX_MTU;
548         pshm_netdev->type = ARPHRD_CAIF;
549         pshm_netdev->hard_header_len = CAIF_NEEDED_HEADROOM;
550         pshm_netdev->tx_queue_len = 0;
551         pshm_netdev->destructor = free_netdev;
552
553         pshm_drv = netdev_priv(pshm_netdev);
554
555         /* Initialize structures in a clean state. */
556         memset(pshm_drv, 0, sizeof(struct shmdrv_layer));
557
558         pshm_drv->cfdev.link_select = CAIF_LINK_LOW_LATENCY;
559 }
560
561 int caif_shmcore_probe(struct shmdev_layer *pshm_dev)
562 {
563         int result, j;
564         struct shmdrv_layer *pshm_drv = NULL;
565
566         pshm_dev->pshm_netdev = alloc_netdev(sizeof(struct shmdrv_layer),
567                                                 "cfshm%d", shm_netdev_setup);
568         if (!pshm_dev->pshm_netdev)
569                 return -ENOMEM;
570
571         pshm_drv = netdev_priv(pshm_dev->pshm_netdev);
572         pshm_drv->pshm_dev = pshm_dev;
573
574         /*
575          * Initialization starts with the verification of the
576          * availability of MBX driver by calling its setup function.
577          * MBX driver must be available by this time for proper
578          * functioning of SHM driver.
579          */
580         if ((pshm_dev->pshmdev_mbxsetup
581                                 (caif_shmdrv_rx_cb, pshm_dev, pshm_drv)) != 0) {
582                 pr_warn("Could not config. SHM Mailbox,"
583                                 " Bailing out.....\n");
584                 free_netdev(pshm_dev->pshm_netdev);
585                 return -ENODEV;
586         }
587
588         skb_queue_head_init(&pshm_drv->sk_qhead);
589
590         pr_info("SHM DEVICE[%d] PROBED BY DRIVER, NEW SHM DRIVER"
591                         " INSTANCE AT pshm_drv =0x%p\n",
592                         pshm_drv->pshm_dev->shm_id, pshm_drv);
593
594         if (pshm_dev->shm_total_sz <
595                         (NR_TX_BUF * TX_BUF_SZ + NR_RX_BUF * RX_BUF_SZ)) {
596
597                 pr_warn("ERROR, Amount of available"
598                                 " Phys. SHM cannot accommodate current SHM "
599                                 "driver configuration, Bailing out ...\n");
600                 free_netdev(pshm_dev->pshm_netdev);
601                 return -ENOMEM;
602         }
603
604         pshm_drv->shm_base_addr = pshm_dev->shm_base_addr;
605         pshm_drv->shm_tx_addr = pshm_drv->shm_base_addr;
606
607         if (pshm_dev->shm_loopback)
608                 pshm_drv->shm_rx_addr = pshm_drv->shm_tx_addr;
609         else
610                 pshm_drv->shm_rx_addr = pshm_dev->shm_base_addr +
611                                                 (NR_TX_BUF * TX_BUF_SZ);
612
613         spin_lock_init(&pshm_drv->lock);
614         INIT_LIST_HEAD(&pshm_drv->tx_empty_list);
615         INIT_LIST_HEAD(&pshm_drv->tx_pend_list);
616         INIT_LIST_HEAD(&pshm_drv->tx_full_list);
617
618         INIT_LIST_HEAD(&pshm_drv->rx_empty_list);
619         INIT_LIST_HEAD(&pshm_drv->rx_pend_list);
620         INIT_LIST_HEAD(&pshm_drv->rx_full_list);
621
622         INIT_WORK(&pshm_drv->shm_tx_work, shm_tx_work_func);
623         INIT_WORK(&pshm_drv->shm_rx_work, shm_rx_work_func);
624
625         pshm_drv->pshm_tx_workqueue =
626                                 create_singlethread_workqueue("shm_tx_work");
627         pshm_drv->pshm_rx_workqueue =
628                                 create_singlethread_workqueue("shm_rx_work");
629
630         for (j = 0; j < NR_TX_BUF; j++) {
631                 struct buf_list *tx_buf =
632                                 kmalloc(sizeof(struct buf_list), GFP_KERNEL);
633
634                 if (tx_buf == NULL) {
635                         pr_warn("ERROR, Could not"
636                                         " allocate dynamic mem. for tx_buf,"
637                                         " Bailing out ...\n");
638                         free_netdev(pshm_dev->pshm_netdev);
639                         return -ENOMEM;
640                 }
641                 tx_buf->index = j;
642                 tx_buf->phy_addr = pshm_drv->shm_tx_addr + (TX_BUF_SZ * j);
643                 tx_buf->len = TX_BUF_SZ;
644                 tx_buf->frames = 0;
645                 tx_buf->frm_ofs = SHM_CAIF_FRM_OFS;
646
647                 if (pshm_dev->shm_loopback)
648                         tx_buf->desc_vptr = (unsigned char *)tx_buf->phy_addr;
649                 else
650                         tx_buf->desc_vptr =
651                                         ioremap(tx_buf->phy_addr, TX_BUF_SZ);
652
653                 list_add_tail(&tx_buf->list, &pshm_drv->tx_empty_list);
654         }
655
656         for (j = 0; j < NR_RX_BUF; j++) {
657                 struct buf_list *rx_buf =
658                                 kmalloc(sizeof(struct buf_list), GFP_KERNEL);
659
660                 if (rx_buf == NULL) {
661                         pr_warn("ERROR, Could not"
662                                         " allocate dynamic mem.for rx_buf,"
663                                         " Bailing out ...\n");
664                         free_netdev(pshm_dev->pshm_netdev);
665                         return -ENOMEM;
666                 }
667                 rx_buf->index = j;
668                 rx_buf->phy_addr = pshm_drv->shm_rx_addr + (RX_BUF_SZ * j);
669                 rx_buf->len = RX_BUF_SZ;
670
671                 if (pshm_dev->shm_loopback)
672                         rx_buf->desc_vptr = (unsigned char *)rx_buf->phy_addr;
673                 else
674                         rx_buf->desc_vptr =
675                                         ioremap(rx_buf->phy_addr, RX_BUF_SZ);
676                 list_add_tail(&rx_buf->list, &pshm_drv->rx_empty_list);
677         }
678
679         pshm_drv->tx_empty_available = 1;
680         result = register_netdev(pshm_dev->pshm_netdev);
681         if (result)
682                 pr_warn("ERROR[%d], SHM could not, "
683                         "register with NW FRMWK Bailing out ...\n", result);
684
685         return result;
686 }
687
688 void caif_shmcore_remove(struct net_device *pshm_netdev)
689 {
690         struct buf_list *pbuf;
691         struct shmdrv_layer *pshm_drv = NULL;
692
693         pshm_drv = netdev_priv(pshm_netdev);
694
695         while (!(list_empty(&pshm_drv->tx_pend_list))) {
696                 pbuf =
697                         list_entry(pshm_drv->tx_pend_list.next,
698                                         struct buf_list, list);
699
700                 list_del(&pbuf->list);
701                 kfree(pbuf);
702         }
703
704         while (!(list_empty(&pshm_drv->tx_full_list))) {
705                 pbuf =
706                         list_entry(pshm_drv->tx_full_list.next,
707                                         struct buf_list, list);
708                 list_del(&pbuf->list);
709                 kfree(pbuf);
710         }
711
712         while (!(list_empty(&pshm_drv->tx_empty_list))) {
713                 pbuf =
714                         list_entry(pshm_drv->tx_empty_list.next,
715                                         struct buf_list, list);
716                 list_del(&pbuf->list);
717                 kfree(pbuf);
718         }
719
720         while (!(list_empty(&pshm_drv->rx_full_list))) {
721                 pbuf =
722                         list_entry(pshm_drv->tx_full_list.next,
723                                 struct buf_list, list);
724                 list_del(&pbuf->list);
725                 kfree(pbuf);
726         }
727
728         while (!(list_empty(&pshm_drv->rx_pend_list))) {
729                 pbuf =
730                         list_entry(pshm_drv->tx_pend_list.next,
731                                 struct buf_list, list);
732                 list_del(&pbuf->list);
733                 kfree(pbuf);
734         }
735
736         while (!(list_empty(&pshm_drv->rx_empty_list))) {
737                 pbuf =
738                         list_entry(pshm_drv->rx_empty_list.next,
739                                 struct buf_list, list);
740                 list_del(&pbuf->list);
741                 kfree(pbuf);
742         }
743
744         /* Destroy work queues. */
745         destroy_workqueue(pshm_drv->pshm_tx_workqueue);
746         destroy_workqueue(pshm_drv->pshm_rx_workqueue);
747
748         unregister_netdev(pshm_netdev);
749 }