Merge tag 'upstream-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jgarzik...
[pandora-kernel.git] / drivers / net / caif / caif_shmcore.c
1 /*
2  * Copyright (C) ST-Ericsson AB 2010
3  * Contact: Sjur Brendeland / sjur.brandeland@stericsson.com
4  * Authors:  Amarnath Revanna / amarnath.bangalore.revanna@stericsson.com,
5  *           Daniel Martensson / daniel.martensson@stericsson.com
6  * License terms: GNU General Public License (GPL) version 2
7  */
8
9 #define pr_fmt(fmt) KBUILD_MODNAME ":" fmt
10
11 #include <linux/spinlock.h>
12 #include <linux/sched.h>
13 #include <linux/list.h>
14 #include <linux/netdevice.h>
15 #include <linux/if_arp.h>
16 #include <linux/io.h>
17
18 #include <net/caif/caif_device.h>
19 #include <net/caif/caif_shm.h>
20
21 #define NR_TX_BUF               6
22 #define NR_RX_BUF               6
23 #define TX_BUF_SZ               0x2000
24 #define RX_BUF_SZ               0x2000
25
26 #define CAIF_NEEDED_HEADROOM    32
27
28 #define CAIF_FLOW_ON            1
29 #define CAIF_FLOW_OFF           0
30
31 #define LOW_WATERMARK           3
32 #define HIGH_WATERMARK          4
33
34 /* Maximum number of CAIF buffers per shared memory buffer. */
35 #define SHM_MAX_FRMS_PER_BUF    10
36
37 /*
38  * Size in bytes of the descriptor area
39  * (With end of descriptor signalling)
40  */
41 #define SHM_CAIF_DESC_SIZE      ((SHM_MAX_FRMS_PER_BUF + 1) * \
42                                         sizeof(struct shm_pck_desc))
43
44 /*
45  * Offset to the first CAIF frame within a shared memory buffer.
46  * Aligned on 32 bytes.
47  */
48 #define SHM_CAIF_FRM_OFS        (SHM_CAIF_DESC_SIZE + (SHM_CAIF_DESC_SIZE % 32))
49
50 /* Number of bytes for CAIF shared memory header. */
51 #define SHM_HDR_LEN             1
52
53 /* Number of padding bytes for the complete CAIF frame. */
54 #define SHM_FRM_PAD_LEN         4
55
56 #define CAIF_MAX_MTU            4096
57
58 #define SHM_SET_FULL(x) (((x+1) & 0x0F) << 0)
59 #define SHM_GET_FULL(x) (((x >> 0) & 0x0F) - 1)
60
61 #define SHM_SET_EMPTY(x)        (((x+1) & 0x0F) << 4)
62 #define SHM_GET_EMPTY(x)        (((x >> 4) & 0x0F) - 1)
63
64 #define SHM_FULL_MASK           (0x0F << 0)
65 #define SHM_EMPTY_MASK          (0x0F << 4)
66
67 struct shm_pck_desc {
68         /*
69          * Offset from start of shared memory area to start of
70          * shared memory CAIF frame.
71          */
72         u32 frm_ofs;
73         u32 frm_len;
74 };
75
76 struct buf_list {
77         unsigned char *desc_vptr;
78         u32 phy_addr;
79         u32 index;
80         u32 len;
81         u32 frames;
82         u32 frm_ofs;
83         struct list_head list;
84 };
85
86 struct shm_caif_frm {
87         /* Number of bytes of padding before the CAIF frame. */
88         u8 hdr_ofs;
89 };
90
91 struct shmdrv_layer {
92         /* caif_dev_common must always be first in the structure*/
93         struct caif_dev_common cfdev;
94
95         u32 shm_tx_addr;
96         u32 shm_rx_addr;
97         u32 shm_base_addr;
98         u32 tx_empty_available;
99         spinlock_t lock;
100
101         struct list_head tx_empty_list;
102         struct list_head tx_pend_list;
103         struct list_head tx_full_list;
104         struct list_head rx_empty_list;
105         struct list_head rx_pend_list;
106         struct list_head rx_full_list;
107
108         struct workqueue_struct *pshm_tx_workqueue;
109         struct workqueue_struct *pshm_rx_workqueue;
110
111         struct work_struct shm_tx_work;
112         struct work_struct shm_rx_work;
113
114         struct sk_buff_head sk_qhead;
115         struct shmdev_layer *pshm_dev;
116 };
117
118 static int shm_netdev_open(struct net_device *shm_netdev)
119 {
120         netif_wake_queue(shm_netdev);
121         return 0;
122 }
123
124 static int shm_netdev_close(struct net_device *shm_netdev)
125 {
126         netif_stop_queue(shm_netdev);
127         return 0;
128 }
129
130 int caif_shmdrv_rx_cb(u32 mbx_msg, void *priv)
131 {
132         struct buf_list *pbuf;
133         struct shmdrv_layer *pshm_drv;
134         struct list_head *pos;
135         u32 avail_emptybuff = 0;
136         unsigned long flags = 0;
137
138         pshm_drv = priv;
139
140         /* Check for received buffers. */
141         if (mbx_msg & SHM_FULL_MASK) {
142                 int idx;
143
144                 spin_lock_irqsave(&pshm_drv->lock, flags);
145
146                 /* Check whether we have any outstanding buffers. */
147                 if (list_empty(&pshm_drv->rx_empty_list)) {
148
149                         /* Release spin lock. */
150                         spin_unlock_irqrestore(&pshm_drv->lock, flags);
151
152                         /* We print even in IRQ context... */
153                         pr_warn("No empty Rx buffers to fill: "
154                                         "mbx_msg:%x\n", mbx_msg);
155
156                         /* Bail out. */
157                         goto err_sync;
158                 }
159
160                 pbuf =
161                         list_entry(pshm_drv->rx_empty_list.next,
162                                         struct buf_list, list);
163                 idx = pbuf->index;
164
165                 /* Check buffer synchronization. */
166                 if (idx != SHM_GET_FULL(mbx_msg)) {
167
168                         /* We print even in IRQ context... */
169                         pr_warn(
170                         "phyif_shm_mbx_msg_cb: RX full out of sync:"
171                         " idx:%d, msg:%x SHM_GET_FULL(mbx_msg):%x\n",
172                                 idx, mbx_msg, SHM_GET_FULL(mbx_msg));
173
174                         spin_unlock_irqrestore(&pshm_drv->lock, flags);
175
176                         /* Bail out. */
177                         goto err_sync;
178                 }
179
180                 list_del_init(&pbuf->list);
181                 list_add_tail(&pbuf->list, &pshm_drv->rx_full_list);
182
183                 spin_unlock_irqrestore(&pshm_drv->lock, flags);
184
185                 /* Schedule RX work queue. */
186                 if (!work_pending(&pshm_drv->shm_rx_work))
187                         queue_work(pshm_drv->pshm_rx_workqueue,
188                                                 &pshm_drv->shm_rx_work);
189         }
190
191         /* Check for emptied buffers. */
192         if (mbx_msg & SHM_EMPTY_MASK) {
193                 int idx;
194
195                 spin_lock_irqsave(&pshm_drv->lock, flags);
196
197                 /* Check whether we have any outstanding buffers. */
198                 if (list_empty(&pshm_drv->tx_full_list)) {
199
200                         /* We print even in IRQ context... */
201                         pr_warn("No TX to empty: msg:%x\n", mbx_msg);
202
203                         spin_unlock_irqrestore(&pshm_drv->lock, flags);
204
205                         /* Bail out. */
206                         goto err_sync;
207                 }
208
209                 pbuf =
210                         list_entry(pshm_drv->tx_full_list.next,
211                                         struct buf_list, list);
212                 idx = pbuf->index;
213
214                 /* Check buffer synchronization. */
215                 if (idx != SHM_GET_EMPTY(mbx_msg)) {
216
217                         spin_unlock_irqrestore(&pshm_drv->lock, flags);
218
219                         /* We print even in IRQ context... */
220                         pr_warn("TX empty "
221                                 "out of sync:idx:%d, msg:%x\n", idx, mbx_msg);
222
223                         /* Bail out. */
224                         goto err_sync;
225                 }
226                 list_del_init(&pbuf->list);
227
228                 /* Reset buffer parameters. */
229                 pbuf->frames = 0;
230                 pbuf->frm_ofs = SHM_CAIF_FRM_OFS;
231
232                 list_add_tail(&pbuf->list, &pshm_drv->tx_empty_list);
233
234                 /* Check the available no. of buffers in the empty list */
235                 list_for_each(pos, &pshm_drv->tx_empty_list)
236                         avail_emptybuff++;
237
238                 /* Check whether we have to wake up the transmitter. */
239                 if ((avail_emptybuff > HIGH_WATERMARK) &&
240                                         (!pshm_drv->tx_empty_available)) {
241                         pshm_drv->tx_empty_available = 1;
242                         spin_unlock_irqrestore(&pshm_drv->lock, flags);
243                         pshm_drv->cfdev.flowctrl
244                                         (pshm_drv->pshm_dev->pshm_netdev,
245                                                                 CAIF_FLOW_ON);
246
247
248                         /* Schedule the work queue. if required */
249                         if (!work_pending(&pshm_drv->shm_tx_work))
250                                 queue_work(pshm_drv->pshm_tx_workqueue,
251                                                         &pshm_drv->shm_tx_work);
252                 } else
253                         spin_unlock_irqrestore(&pshm_drv->lock, flags);
254         }
255
256         return 0;
257
258 err_sync:
259         return -EIO;
260 }
261
262 static void shm_rx_work_func(struct work_struct *rx_work)
263 {
264         struct shmdrv_layer *pshm_drv;
265         struct buf_list *pbuf;
266         unsigned long flags = 0;
267         struct sk_buff *skb;
268         char *p;
269         int ret;
270
271         pshm_drv = container_of(rx_work, struct shmdrv_layer, shm_rx_work);
272
273         while (1) {
274
275                 struct shm_pck_desc *pck_desc;
276
277                 spin_lock_irqsave(&pshm_drv->lock, flags);
278
279                 /* Check for received buffers. */
280                 if (list_empty(&pshm_drv->rx_full_list)) {
281                         spin_unlock_irqrestore(&pshm_drv->lock, flags);
282                         break;
283                 }
284
285                 pbuf =
286                         list_entry(pshm_drv->rx_full_list.next, struct buf_list,
287                                         list);
288                 list_del_init(&pbuf->list);
289                 spin_unlock_irqrestore(&pshm_drv->lock, flags);
290
291                 /* Retrieve pointer to start of the packet descriptor area. */
292                 pck_desc = (struct shm_pck_desc *) pbuf->desc_vptr;
293
294                 /*
295                  * Check whether descriptor contains a CAIF shared memory
296                  * frame.
297                  */
298                 while (pck_desc->frm_ofs) {
299                         unsigned int frm_buf_ofs;
300                         unsigned int frm_pck_ofs;
301                         unsigned int frm_pck_len;
302                         /*
303                          * Check whether offset is within buffer limits
304                          * (lower).
305                          */
306                         if (pck_desc->frm_ofs <
307                                 (pbuf->phy_addr - pshm_drv->shm_base_addr))
308                                 break;
309                         /*
310                          * Check whether offset is within buffer limits
311                          * (higher).
312                          */
313                         if (pck_desc->frm_ofs >
314                                 ((pbuf->phy_addr - pshm_drv->shm_base_addr) +
315                                         pbuf->len))
316                                 break;
317
318                         /* Calculate offset from start of buffer. */
319                         frm_buf_ofs =
320                                 pck_desc->frm_ofs - (pbuf->phy_addr -
321                                                 pshm_drv->shm_base_addr);
322
323                         /*
324                          * Calculate offset and length of CAIF packet while
325                          * taking care of the shared memory header.
326                          */
327                         frm_pck_ofs =
328                                 frm_buf_ofs + SHM_HDR_LEN +
329                                 (*(pbuf->desc_vptr + frm_buf_ofs));
330                         frm_pck_len =
331                                 (pck_desc->frm_len - SHM_HDR_LEN -
332                                 (*(pbuf->desc_vptr + frm_buf_ofs)));
333
334                         /* Check whether CAIF packet is within buffer limits */
335                         if ((frm_pck_ofs + pck_desc->frm_len) > pbuf->len)
336                                 break;
337
338                         /* Get a suitable CAIF packet and copy in data. */
339                         skb = netdev_alloc_skb(pshm_drv->pshm_dev->pshm_netdev,
340                                                         frm_pck_len + 1);
341
342                         if (skb == NULL) {
343                                 pr_info("OOM: Try next frame in descriptor\n");
344                                 break;
345                         }
346
347                         p = skb_put(skb, frm_pck_len);
348                         memcpy(p, pbuf->desc_vptr + frm_pck_ofs, frm_pck_len);
349
350                         skb->protocol = htons(ETH_P_CAIF);
351                         skb_reset_mac_header(skb);
352                         skb->dev = pshm_drv->pshm_dev->pshm_netdev;
353
354                         /* Push received packet up the stack. */
355                         ret = netif_rx_ni(skb);
356
357                         if (!ret) {
358                                 pshm_drv->pshm_dev->pshm_netdev->stats.
359                                                                 rx_packets++;
360                                 pshm_drv->pshm_dev->pshm_netdev->stats.
361                                                 rx_bytes += pck_desc->frm_len;
362                         } else
363                                 ++pshm_drv->pshm_dev->pshm_netdev->stats.
364                                                                 rx_dropped;
365                         /* Move to next packet descriptor. */
366                         pck_desc++;
367                 }
368
369                 spin_lock_irqsave(&pshm_drv->lock, flags);
370                 list_add_tail(&pbuf->list, &pshm_drv->rx_pend_list);
371
372                 spin_unlock_irqrestore(&pshm_drv->lock, flags);
373
374         }
375
376         /* Schedule the work queue. if required */
377         if (!work_pending(&pshm_drv->shm_tx_work))
378                 queue_work(pshm_drv->pshm_tx_workqueue, &pshm_drv->shm_tx_work);
379
380 }
381
382 static void shm_tx_work_func(struct work_struct *tx_work)
383 {
384         u32 mbox_msg;
385         unsigned int frmlen, avail_emptybuff, append = 0;
386         unsigned long flags = 0;
387         struct buf_list *pbuf = NULL;
388         struct shmdrv_layer *pshm_drv;
389         struct shm_caif_frm *frm;
390         struct sk_buff *skb;
391         struct shm_pck_desc *pck_desc;
392         struct list_head *pos;
393
394         pshm_drv = container_of(tx_work, struct shmdrv_layer, shm_tx_work);
395
396         do {
397                 /* Initialize mailbox message. */
398                 mbox_msg = 0x00;
399                 avail_emptybuff = 0;
400
401                 spin_lock_irqsave(&pshm_drv->lock, flags);
402
403                 /* Check for pending receive buffers. */
404                 if (!list_empty(&pshm_drv->rx_pend_list)) {
405
406                         pbuf = list_entry(pshm_drv->rx_pend_list.next,
407                                                 struct buf_list, list);
408
409                         list_del_init(&pbuf->list);
410                         list_add_tail(&pbuf->list, &pshm_drv->rx_empty_list);
411                         /*
412                          * Value index is never changed,
413                          * so read access should be safe.
414                          */
415                         mbox_msg |= SHM_SET_EMPTY(pbuf->index);
416                 }
417
418                 skb = skb_peek(&pshm_drv->sk_qhead);
419
420                 if (skb == NULL)
421                         goto send_msg;
422                 /* Check the available no. of buffers in the empty list */
423                 list_for_each(pos, &pshm_drv->tx_empty_list)
424                         avail_emptybuff++;
425
426                 if ((avail_emptybuff < LOW_WATERMARK) &&
427                                         pshm_drv->tx_empty_available) {
428                         /* Update blocking condition. */
429                         pshm_drv->tx_empty_available = 0;
430                         spin_unlock_irqrestore(&pshm_drv->lock, flags);
431                         pshm_drv->cfdev.flowctrl
432                                         (pshm_drv->pshm_dev->pshm_netdev,
433                                         CAIF_FLOW_OFF);
434                         spin_lock_irqsave(&pshm_drv->lock, flags);
435                 }
436                 /*
437                  * We simply return back to the caller if we do not have space
438                  * either in Tx pending list or Tx empty list. In this case,
439                  * we hold the received skb in the skb list, waiting to
440                  * be transmitted once Tx buffers become available
441                  */
442                 if (list_empty(&pshm_drv->tx_empty_list))
443                         goto send_msg;
444
445                 /* Get the first free Tx buffer. */
446                 pbuf = list_entry(pshm_drv->tx_empty_list.next,
447                                                 struct buf_list, list);
448                 do {
449                         if (append) {
450                                 skb = skb_peek(&pshm_drv->sk_qhead);
451                                 if (skb == NULL)
452                                         break;
453                         }
454
455                         frm = (struct shm_caif_frm *)
456                                         (pbuf->desc_vptr + pbuf->frm_ofs);
457
458                         frm->hdr_ofs = 0;
459                         frmlen = 0;
460                         frmlen += SHM_HDR_LEN + frm->hdr_ofs + skb->len;
461
462                         /* Add tail padding if needed. */
463                         if (frmlen % SHM_FRM_PAD_LEN)
464                                 frmlen += SHM_FRM_PAD_LEN -
465                                                 (frmlen % SHM_FRM_PAD_LEN);
466
467                         /*
468                          * Verify that packet, header and additional padding
469                          * can fit within the buffer frame area.
470                          */
471                         if (frmlen >= (pbuf->len - pbuf->frm_ofs))
472                                 break;
473
474                         if (!append) {
475                                 list_del_init(&pbuf->list);
476                                 append = 1;
477                         }
478
479                         skb = skb_dequeue(&pshm_drv->sk_qhead);
480                         if (skb == NULL)
481                                 break;
482                         /* Copy in CAIF frame. */
483                         skb_copy_bits(skb, 0, pbuf->desc_vptr +
484                                         pbuf->frm_ofs + SHM_HDR_LEN +
485                                                 frm->hdr_ofs, skb->len);
486
487                         pshm_drv->pshm_dev->pshm_netdev->stats.tx_packets++;
488                         pshm_drv->pshm_dev->pshm_netdev->stats.tx_bytes +=
489                                                                         frmlen;
490                         dev_kfree_skb_irq(skb);
491
492                         /* Fill in the shared memory packet descriptor area. */
493                         pck_desc = (struct shm_pck_desc *) (pbuf->desc_vptr);
494                         /* Forward to current frame. */
495                         pck_desc += pbuf->frames;
496                         pck_desc->frm_ofs = (pbuf->phy_addr -
497                                                 pshm_drv->shm_base_addr) +
498                                                                 pbuf->frm_ofs;
499                         pck_desc->frm_len = frmlen;
500                         /* Terminate packet descriptor area. */
501                         pck_desc++;
502                         pck_desc->frm_ofs = 0;
503                         /* Update buffer parameters. */
504                         pbuf->frames++;
505                         pbuf->frm_ofs += frmlen + (frmlen % 32);
506
507                 } while (pbuf->frames < SHM_MAX_FRMS_PER_BUF);
508
509                 /* Assign buffer as full. */
510                 list_add_tail(&pbuf->list, &pshm_drv->tx_full_list);
511                 append = 0;
512                 mbox_msg |= SHM_SET_FULL(pbuf->index);
513 send_msg:
514                 spin_unlock_irqrestore(&pshm_drv->lock, flags);
515
516                 if (mbox_msg)
517                         pshm_drv->pshm_dev->pshmdev_mbxsend
518                                         (pshm_drv->pshm_dev->shm_id, mbox_msg);
519         } while (mbox_msg);
520 }
521
522 static int shm_netdev_tx(struct sk_buff *skb, struct net_device *shm_netdev)
523 {
524         struct shmdrv_layer *pshm_drv;
525
526         pshm_drv = netdev_priv(shm_netdev);
527
528         skb_queue_tail(&pshm_drv->sk_qhead, skb);
529
530         /* Schedule Tx work queue. for deferred processing of skbs*/
531         if (!work_pending(&pshm_drv->shm_tx_work))
532                 queue_work(pshm_drv->pshm_tx_workqueue, &pshm_drv->shm_tx_work);
533
534         return 0;
535 }
536
537 static const struct net_device_ops netdev_ops = {
538         .ndo_open = shm_netdev_open,
539         .ndo_stop = shm_netdev_close,
540         .ndo_start_xmit = shm_netdev_tx,
541 };
542
543 static void shm_netdev_setup(struct net_device *pshm_netdev)
544 {
545         struct shmdrv_layer *pshm_drv;
546         pshm_netdev->netdev_ops = &netdev_ops;
547
548         pshm_netdev->mtu = CAIF_MAX_MTU;
549         pshm_netdev->type = ARPHRD_CAIF;
550         pshm_netdev->hard_header_len = CAIF_NEEDED_HEADROOM;
551         pshm_netdev->tx_queue_len = 0;
552         pshm_netdev->destructor = free_netdev;
553
554         pshm_drv = netdev_priv(pshm_netdev);
555
556         /* Initialize structures in a clean state. */
557         memset(pshm_drv, 0, sizeof(struct shmdrv_layer));
558
559         pshm_drv->cfdev.link_select = CAIF_LINK_LOW_LATENCY;
560 }
561
562 int caif_shmcore_probe(struct shmdev_layer *pshm_dev)
563 {
564         int result, j;
565         struct shmdrv_layer *pshm_drv = NULL;
566
567         pshm_dev->pshm_netdev = alloc_netdev(sizeof(struct shmdrv_layer),
568                                                 "cfshm%d", shm_netdev_setup);
569         if (!pshm_dev->pshm_netdev)
570                 return -ENOMEM;
571
572         pshm_drv = netdev_priv(pshm_dev->pshm_netdev);
573         pshm_drv->pshm_dev = pshm_dev;
574
575         /*
576          * Initialization starts with the verification of the
577          * availability of MBX driver by calling its setup function.
578          * MBX driver must be available by this time for proper
579          * functioning of SHM driver.
580          */
581         if ((pshm_dev->pshmdev_mbxsetup
582                                 (caif_shmdrv_rx_cb, pshm_dev, pshm_drv)) != 0) {
583                 pr_warn("Could not config. SHM Mailbox,"
584                                 " Bailing out.....\n");
585                 free_netdev(pshm_dev->pshm_netdev);
586                 return -ENODEV;
587         }
588
589         skb_queue_head_init(&pshm_drv->sk_qhead);
590
591         pr_info("SHM DEVICE[%d] PROBED BY DRIVER, NEW SHM DRIVER"
592                         " INSTANCE AT pshm_drv =0x%p\n",
593                         pshm_drv->pshm_dev->shm_id, pshm_drv);
594
595         if (pshm_dev->shm_total_sz <
596                         (NR_TX_BUF * TX_BUF_SZ + NR_RX_BUF * RX_BUF_SZ)) {
597
598                 pr_warn("ERROR, Amount of available"
599                                 " Phys. SHM cannot accommodate current SHM "
600                                 "driver configuration, Bailing out ...\n");
601                 free_netdev(pshm_dev->pshm_netdev);
602                 return -ENOMEM;
603         }
604
605         pshm_drv->shm_base_addr = pshm_dev->shm_base_addr;
606         pshm_drv->shm_tx_addr = pshm_drv->shm_base_addr;
607
608         if (pshm_dev->shm_loopback)
609                 pshm_drv->shm_rx_addr = pshm_drv->shm_tx_addr;
610         else
611                 pshm_drv->shm_rx_addr = pshm_dev->shm_base_addr +
612                                                 (NR_TX_BUF * TX_BUF_SZ);
613
614         spin_lock_init(&pshm_drv->lock);
615         INIT_LIST_HEAD(&pshm_drv->tx_empty_list);
616         INIT_LIST_HEAD(&pshm_drv->tx_pend_list);
617         INIT_LIST_HEAD(&pshm_drv->tx_full_list);
618
619         INIT_LIST_HEAD(&pshm_drv->rx_empty_list);
620         INIT_LIST_HEAD(&pshm_drv->rx_pend_list);
621         INIT_LIST_HEAD(&pshm_drv->rx_full_list);
622
623         INIT_WORK(&pshm_drv->shm_tx_work, shm_tx_work_func);
624         INIT_WORK(&pshm_drv->shm_rx_work, shm_rx_work_func);
625
626         pshm_drv->pshm_tx_workqueue =
627                                 create_singlethread_workqueue("shm_tx_work");
628         pshm_drv->pshm_rx_workqueue =
629                                 create_singlethread_workqueue("shm_rx_work");
630
631         for (j = 0; j < NR_TX_BUF; j++) {
632                 struct buf_list *tx_buf =
633                                 kmalloc(sizeof(struct buf_list), GFP_KERNEL);
634
635                 if (tx_buf == NULL) {
636                         free_netdev(pshm_dev->pshm_netdev);
637                         return -ENOMEM;
638                 }
639                 tx_buf->index = j;
640                 tx_buf->phy_addr = pshm_drv->shm_tx_addr + (TX_BUF_SZ * j);
641                 tx_buf->len = TX_BUF_SZ;
642                 tx_buf->frames = 0;
643                 tx_buf->frm_ofs = SHM_CAIF_FRM_OFS;
644
645                 if (pshm_dev->shm_loopback)
646                         tx_buf->desc_vptr = (unsigned char *)tx_buf->phy_addr;
647                 else
648                         /*
649                          * FIXME: the result of ioremap is not a pointer - arnd
650                          */
651                         tx_buf->desc_vptr =
652                                         ioremap(tx_buf->phy_addr, TX_BUF_SZ);
653
654                 list_add_tail(&tx_buf->list, &pshm_drv->tx_empty_list);
655         }
656
657         for (j = 0; j < NR_RX_BUF; j++) {
658                 struct buf_list *rx_buf =
659                                 kmalloc(sizeof(struct buf_list), GFP_KERNEL);
660
661                 if (rx_buf == NULL) {
662                         free_netdev(pshm_dev->pshm_netdev);
663                         return -ENOMEM;
664                 }
665                 rx_buf->index = j;
666                 rx_buf->phy_addr = pshm_drv->shm_rx_addr + (RX_BUF_SZ * j);
667                 rx_buf->len = RX_BUF_SZ;
668
669                 if (pshm_dev->shm_loopback)
670                         rx_buf->desc_vptr = (unsigned char *)rx_buf->phy_addr;
671                 else
672                         rx_buf->desc_vptr =
673                                         ioremap(rx_buf->phy_addr, RX_BUF_SZ);
674                 list_add_tail(&rx_buf->list, &pshm_drv->rx_empty_list);
675         }
676
677         pshm_drv->tx_empty_available = 1;
678         result = register_netdev(pshm_dev->pshm_netdev);
679         if (result)
680                 pr_warn("ERROR[%d], SHM could not, "
681                         "register with NW FRMWK Bailing out ...\n", result);
682
683         return result;
684 }
685
686 void caif_shmcore_remove(struct net_device *pshm_netdev)
687 {
688         struct buf_list *pbuf;
689         struct shmdrv_layer *pshm_drv = NULL;
690
691         pshm_drv = netdev_priv(pshm_netdev);
692
693         while (!(list_empty(&pshm_drv->tx_pend_list))) {
694                 pbuf =
695                         list_entry(pshm_drv->tx_pend_list.next,
696                                         struct buf_list, list);
697
698                 list_del(&pbuf->list);
699                 kfree(pbuf);
700         }
701
702         while (!(list_empty(&pshm_drv->tx_full_list))) {
703                 pbuf =
704                         list_entry(pshm_drv->tx_full_list.next,
705                                         struct buf_list, list);
706                 list_del(&pbuf->list);
707                 kfree(pbuf);
708         }
709
710         while (!(list_empty(&pshm_drv->tx_empty_list))) {
711                 pbuf =
712                         list_entry(pshm_drv->tx_empty_list.next,
713                                         struct buf_list, list);
714                 list_del(&pbuf->list);
715                 kfree(pbuf);
716         }
717
718         while (!(list_empty(&pshm_drv->rx_full_list))) {
719                 pbuf =
720                         list_entry(pshm_drv->tx_full_list.next,
721                                 struct buf_list, list);
722                 list_del(&pbuf->list);
723                 kfree(pbuf);
724         }
725
726         while (!(list_empty(&pshm_drv->rx_pend_list))) {
727                 pbuf =
728                         list_entry(pshm_drv->tx_pend_list.next,
729                                 struct buf_list, list);
730                 list_del(&pbuf->list);
731                 kfree(pbuf);
732         }
733
734         while (!(list_empty(&pshm_drv->rx_empty_list))) {
735                 pbuf =
736                         list_entry(pshm_drv->rx_empty_list.next,
737                                 struct buf_list, list);
738                 list_del(&pbuf->list);
739                 kfree(pbuf);
740         }
741
742         /* Destroy work queues. */
743         destroy_workqueue(pshm_drv->pshm_tx_workqueue);
744         destroy_workqueue(pshm_drv->pshm_rx_workqueue);
745
746         unregister_netdev(pshm_netdev);
747 }