ehea: fix skb_frag_size typo
[pandora-kernel.git] / drivers / net / ethernet / ibm / ibmveth.c
1 /*
2  * IBM Power Virtual Ethernet Device Driver
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17  *
18  * Copyright (C) IBM Corporation, 2003, 2010
19  *
20  * Authors: Dave Larson <larson1@us.ibm.com>
21  *          Santiago Leon <santil@linux.vnet.ibm.com>
22  *          Brian King <brking@linux.vnet.ibm.com>
23  *          Robert Jennings <rcj@linux.vnet.ibm.com>
24  *          Anton Blanchard <anton@au.ibm.com>
25  */
26
27 #include <linux/module.h>
28 #include <linux/moduleparam.h>
29 #include <linux/types.h>
30 #include <linux/errno.h>
31 #include <linux/dma-mapping.h>
32 #include <linux/kernel.h>
33 #include <linux/netdevice.h>
34 #include <linux/etherdevice.h>
35 #include <linux/skbuff.h>
36 #include <linux/init.h>
37 #include <linux/interrupt.h>
38 #include <linux/mm.h>
39 #include <linux/pm.h>
40 #include <linux/ethtool.h>
41 #include <linux/in.h>
42 #include <linux/ip.h>
43 #include <linux/ipv6.h>
44 #include <linux/slab.h>
45 #include <asm/hvcall.h>
46 #include <linux/atomic.h>
47 #include <asm/vio.h>
48 #include <asm/iommu.h>
49 #include <asm/firmware.h>
50
51 #include "ibmveth.h"
52
53 static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance);
54 static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter);
55 static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev);
56
57 static struct kobj_type ktype_veth_pool;
58
59
60 static const char ibmveth_driver_name[] = "ibmveth";
61 static const char ibmveth_driver_string[] = "IBM Power Virtual Ethernet Driver";
62 #define ibmveth_driver_version "1.04"
63
64 MODULE_AUTHOR("Santiago Leon <santil@linux.vnet.ibm.com>");
65 MODULE_DESCRIPTION("IBM Power Virtual Ethernet Driver");
66 MODULE_LICENSE("GPL");
67 MODULE_VERSION(ibmveth_driver_version);
68
69 static unsigned int tx_copybreak __read_mostly = 128;
70 module_param(tx_copybreak, uint, 0644);
71 MODULE_PARM_DESC(tx_copybreak,
72         "Maximum size of packet that is copied to a new buffer on transmit");
73
74 static unsigned int rx_copybreak __read_mostly = 128;
75 module_param(rx_copybreak, uint, 0644);
76 MODULE_PARM_DESC(rx_copybreak,
77         "Maximum size of packet that is copied to a new buffer on receive");
78
79 static unsigned int rx_flush __read_mostly = 0;
80 module_param(rx_flush, uint, 0644);
81 MODULE_PARM_DESC(rx_flush, "Flush receive buffers before use");
82
83 struct ibmveth_stat {
84         char name[ETH_GSTRING_LEN];
85         int offset;
86 };
87
88 #define IBMVETH_STAT_OFF(stat) offsetof(struct ibmveth_adapter, stat)
89 #define IBMVETH_GET_STAT(a, off) *((u64 *)(((unsigned long)(a)) + off))
90
91 struct ibmveth_stat ibmveth_stats[] = {
92         { "replenish_task_cycles", IBMVETH_STAT_OFF(replenish_task_cycles) },
93         { "replenish_no_mem", IBMVETH_STAT_OFF(replenish_no_mem) },
94         { "replenish_add_buff_failure",
95                         IBMVETH_STAT_OFF(replenish_add_buff_failure) },
96         { "replenish_add_buff_success",
97                         IBMVETH_STAT_OFF(replenish_add_buff_success) },
98         { "rx_invalid_buffer", IBMVETH_STAT_OFF(rx_invalid_buffer) },
99         { "rx_no_buffer", IBMVETH_STAT_OFF(rx_no_buffer) },
100         { "tx_map_failed", IBMVETH_STAT_OFF(tx_map_failed) },
101         { "tx_send_failed", IBMVETH_STAT_OFF(tx_send_failed) },
102         { "fw_enabled_ipv4_csum", IBMVETH_STAT_OFF(fw_ipv4_csum_support) },
103         { "fw_enabled_ipv6_csum", IBMVETH_STAT_OFF(fw_ipv6_csum_support) },
104 };
105
106 /* simple methods of getting data from the current rxq entry */
107 static inline u32 ibmveth_rxq_flags(struct ibmveth_adapter *adapter)
108 {
109         return adapter->rx_queue.queue_addr[adapter->rx_queue.index].flags_off;
110 }
111
112 static inline int ibmveth_rxq_toggle(struct ibmveth_adapter *adapter)
113 {
114         return (ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_TOGGLE) >>
115                         IBMVETH_RXQ_TOGGLE_SHIFT;
116 }
117
118 static inline int ibmveth_rxq_pending_buffer(struct ibmveth_adapter *adapter)
119 {
120         return ibmveth_rxq_toggle(adapter) == adapter->rx_queue.toggle;
121 }
122
123 static inline int ibmveth_rxq_buffer_valid(struct ibmveth_adapter *adapter)
124 {
125         return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_VALID;
126 }
127
128 static inline int ibmveth_rxq_frame_offset(struct ibmveth_adapter *adapter)
129 {
130         return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_OFF_MASK;
131 }
132
133 static inline int ibmveth_rxq_frame_length(struct ibmveth_adapter *adapter)
134 {
135         return adapter->rx_queue.queue_addr[adapter->rx_queue.index].length;
136 }
137
138 static inline int ibmveth_rxq_csum_good(struct ibmveth_adapter *adapter)
139 {
140         return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_CSUM_GOOD;
141 }
142
143 /* setup the initial settings for a buffer pool */
144 static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool *pool,
145                                      u32 pool_index, u32 pool_size,
146                                      u32 buff_size, u32 pool_active)
147 {
148         pool->size = pool_size;
149         pool->index = pool_index;
150         pool->buff_size = buff_size;
151         pool->threshold = pool_size * 7 / 8;
152         pool->active = pool_active;
153 }
154
155 /* allocate and setup an buffer pool - called during open */
156 static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool)
157 {
158         int i;
159
160         pool->free_map = kmalloc(sizeof(u16) * pool->size, GFP_KERNEL);
161
162         if (!pool->free_map)
163                 return -1;
164
165         pool->dma_addr = kmalloc(sizeof(dma_addr_t) * pool->size, GFP_KERNEL);
166         if (!pool->dma_addr) {
167                 kfree(pool->free_map);
168                 pool->free_map = NULL;
169                 return -1;
170         }
171
172         pool->skbuff = kcalloc(pool->size, sizeof(void *), GFP_KERNEL);
173
174         if (!pool->skbuff) {
175                 kfree(pool->dma_addr);
176                 pool->dma_addr = NULL;
177
178                 kfree(pool->free_map);
179                 pool->free_map = NULL;
180                 return -1;
181         }
182
183         memset(pool->dma_addr, 0, sizeof(dma_addr_t) * pool->size);
184
185         for (i = 0; i < pool->size; ++i)
186                 pool->free_map[i] = i;
187
188         atomic_set(&pool->available, 0);
189         pool->producer_index = 0;
190         pool->consumer_index = 0;
191
192         return 0;
193 }
194
195 static inline void ibmveth_flush_buffer(void *addr, unsigned long length)
196 {
197         unsigned long offset;
198
199         for (offset = 0; offset < length; offset += SMP_CACHE_BYTES)
200                 asm("dcbfl %0,%1" :: "b" (addr), "r" (offset));
201 }
202
203 /* replenish the buffers for a pool.  note that we don't need to
204  * skb_reserve these since they are used for incoming...
205  */
206 static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter,
207                                           struct ibmveth_buff_pool *pool)
208 {
209         u32 i;
210         u32 count = pool->size - atomic_read(&pool->available);
211         u32 buffers_added = 0;
212         struct sk_buff *skb;
213         unsigned int free_index, index;
214         u64 correlator;
215         unsigned long lpar_rc;
216         dma_addr_t dma_addr;
217
218         mb();
219
220         for (i = 0; i < count; ++i) {
221                 union ibmveth_buf_desc desc;
222
223                 skb = netdev_alloc_skb(adapter->netdev, pool->buff_size);
224
225                 if (!skb) {
226                         netdev_dbg(adapter->netdev,
227                                    "replenish: unable to allocate skb\n");
228                         adapter->replenish_no_mem++;
229                         break;
230                 }
231
232                 free_index = pool->consumer_index;
233                 pool->consumer_index++;
234                 if (pool->consumer_index >= pool->size)
235                         pool->consumer_index = 0;
236                 index = pool->free_map[free_index];
237
238                 BUG_ON(index == IBM_VETH_INVALID_MAP);
239                 BUG_ON(pool->skbuff[index] != NULL);
240
241                 dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
242                                 pool->buff_size, DMA_FROM_DEVICE);
243
244                 if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
245                         goto failure;
246
247                 pool->free_map[free_index] = IBM_VETH_INVALID_MAP;
248                 pool->dma_addr[index] = dma_addr;
249                 pool->skbuff[index] = skb;
250
251                 correlator = ((u64)pool->index << 32) | index;
252                 *(u64 *)skb->data = correlator;
253
254                 desc.fields.flags_len = IBMVETH_BUF_VALID | pool->buff_size;
255                 desc.fields.address = dma_addr;
256
257                 if (rx_flush) {
258                         unsigned int len = min(pool->buff_size,
259                                                 adapter->netdev->mtu +
260                                                 IBMVETH_BUFF_OH);
261                         ibmveth_flush_buffer(skb->data, len);
262                 }
263                 lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address,
264                                                    desc.desc);
265
266                 if (lpar_rc != H_SUCCESS) {
267                         goto failure;
268                 } else {
269                         buffers_added++;
270                         adapter->replenish_add_buff_success++;
271                 }
272         }
273
274         mb();
275         atomic_add(buffers_added, &(pool->available));
276         return;
277
278 failure:
279         pool->free_map[free_index] = index;
280         pool->skbuff[index] = NULL;
281         if (pool->consumer_index == 0)
282                 pool->consumer_index = pool->size - 1;
283         else
284                 pool->consumer_index--;
285         if (!dma_mapping_error(&adapter->vdev->dev, dma_addr))
286                 dma_unmap_single(&adapter->vdev->dev,
287                                  pool->dma_addr[index], pool->buff_size,
288                                  DMA_FROM_DEVICE);
289         dev_kfree_skb_any(skb);
290         adapter->replenish_add_buff_failure++;
291
292         mb();
293         atomic_add(buffers_added, &(pool->available));
294 }
295
296 /* replenish routine */
297 static void ibmveth_replenish_task(struct ibmveth_adapter *adapter)
298 {
299         int i;
300
301         adapter->replenish_task_cycles++;
302
303         for (i = (IBMVETH_NUM_BUFF_POOLS - 1); i >= 0; i--) {
304                 struct ibmveth_buff_pool *pool = &adapter->rx_buff_pool[i];
305
306                 if (pool->active &&
307                     (atomic_read(&pool->available) < pool->threshold))
308                         ibmveth_replenish_buffer_pool(adapter, pool);
309         }
310
311         adapter->rx_no_buffer = *(u64 *)(((char*)adapter->buffer_list_addr) +
312                                                 4096 - 8);
313 }
314
315 /* empty and free ana buffer pool - also used to do cleanup in error paths */
316 static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter,
317                                      struct ibmveth_buff_pool *pool)
318 {
319         int i;
320
321         kfree(pool->free_map);
322         pool->free_map = NULL;
323
324         if (pool->skbuff && pool->dma_addr) {
325                 for (i = 0; i < pool->size; ++i) {
326                         struct sk_buff *skb = pool->skbuff[i];
327                         if (skb) {
328                                 dma_unmap_single(&adapter->vdev->dev,
329                                                  pool->dma_addr[i],
330                                                  pool->buff_size,
331                                                  DMA_FROM_DEVICE);
332                                 dev_kfree_skb_any(skb);
333                                 pool->skbuff[i] = NULL;
334                         }
335                 }
336         }
337
338         if (pool->dma_addr) {
339                 kfree(pool->dma_addr);
340                 pool->dma_addr = NULL;
341         }
342
343         if (pool->skbuff) {
344                 kfree(pool->skbuff);
345                 pool->skbuff = NULL;
346         }
347 }
348
349 /* remove a buffer from a pool */
350 static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter,
351                                             u64 correlator)
352 {
353         unsigned int pool  = correlator >> 32;
354         unsigned int index = correlator & 0xffffffffUL;
355         unsigned int free_index;
356         struct sk_buff *skb;
357
358         BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
359         BUG_ON(index >= adapter->rx_buff_pool[pool].size);
360
361         skb = adapter->rx_buff_pool[pool].skbuff[index];
362
363         BUG_ON(skb == NULL);
364
365         adapter->rx_buff_pool[pool].skbuff[index] = NULL;
366
367         dma_unmap_single(&adapter->vdev->dev,
368                          adapter->rx_buff_pool[pool].dma_addr[index],
369                          adapter->rx_buff_pool[pool].buff_size,
370                          DMA_FROM_DEVICE);
371
372         free_index = adapter->rx_buff_pool[pool].producer_index;
373         adapter->rx_buff_pool[pool].producer_index++;
374         if (adapter->rx_buff_pool[pool].producer_index >=
375             adapter->rx_buff_pool[pool].size)
376                 adapter->rx_buff_pool[pool].producer_index = 0;
377         adapter->rx_buff_pool[pool].free_map[free_index] = index;
378
379         mb();
380
381         atomic_dec(&(adapter->rx_buff_pool[pool].available));
382 }
383
384 /* get the current buffer on the rx queue */
385 static inline struct sk_buff *ibmveth_rxq_get_buffer(struct ibmveth_adapter *adapter)
386 {
387         u64 correlator = adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator;
388         unsigned int pool = correlator >> 32;
389         unsigned int index = correlator & 0xffffffffUL;
390
391         BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
392         BUG_ON(index >= adapter->rx_buff_pool[pool].size);
393
394         return adapter->rx_buff_pool[pool].skbuff[index];
395 }
396
397 /* recycle the current buffer on the rx queue */
398 static int ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
399 {
400         u32 q_index = adapter->rx_queue.index;
401         u64 correlator = adapter->rx_queue.queue_addr[q_index].correlator;
402         unsigned int pool = correlator >> 32;
403         unsigned int index = correlator & 0xffffffffUL;
404         union ibmveth_buf_desc desc;
405         unsigned long lpar_rc;
406         int ret = 1;
407
408         BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
409         BUG_ON(index >= adapter->rx_buff_pool[pool].size);
410
411         if (!adapter->rx_buff_pool[pool].active) {
412                 ibmveth_rxq_harvest_buffer(adapter);
413                 ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[pool]);
414                 goto out;
415         }
416
417         desc.fields.flags_len = IBMVETH_BUF_VALID |
418                 adapter->rx_buff_pool[pool].buff_size;
419         desc.fields.address = adapter->rx_buff_pool[pool].dma_addr[index];
420
421         lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc);
422
423         if (lpar_rc != H_SUCCESS) {
424                 netdev_dbg(adapter->netdev, "h_add_logical_lan_buffer failed "
425                            "during recycle rc=%ld", lpar_rc);
426                 ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
427                 ret = 0;
428         }
429
430         if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
431                 adapter->rx_queue.index = 0;
432                 adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
433         }
434
435 out:
436         return ret;
437 }
438
439 static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter)
440 {
441         ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
442
443         if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
444                 adapter->rx_queue.index = 0;
445                 adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
446         }
447 }
448
449 static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
450 {
451         int i;
452         struct device *dev = &adapter->vdev->dev;
453
454         if (adapter->buffer_list_addr != NULL) {
455                 if (!dma_mapping_error(dev, adapter->buffer_list_dma)) {
456                         dma_unmap_single(dev, adapter->buffer_list_dma, 4096,
457                                         DMA_BIDIRECTIONAL);
458                         adapter->buffer_list_dma = DMA_ERROR_CODE;
459                 }
460                 free_page((unsigned long)adapter->buffer_list_addr);
461                 adapter->buffer_list_addr = NULL;
462         }
463
464         if (adapter->filter_list_addr != NULL) {
465                 if (!dma_mapping_error(dev, adapter->filter_list_dma)) {
466                         dma_unmap_single(dev, adapter->filter_list_dma, 4096,
467                                         DMA_BIDIRECTIONAL);
468                         adapter->filter_list_dma = DMA_ERROR_CODE;
469                 }
470                 free_page((unsigned long)adapter->filter_list_addr);
471                 adapter->filter_list_addr = NULL;
472         }
473
474         if (adapter->rx_queue.queue_addr != NULL) {
475                 if (!dma_mapping_error(dev, adapter->rx_queue.queue_dma)) {
476                         dma_unmap_single(dev,
477                                         adapter->rx_queue.queue_dma,
478                                         adapter->rx_queue.queue_len,
479                                         DMA_BIDIRECTIONAL);
480                         adapter->rx_queue.queue_dma = DMA_ERROR_CODE;
481                 }
482                 kfree(adapter->rx_queue.queue_addr);
483                 adapter->rx_queue.queue_addr = NULL;
484         }
485
486         for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
487                 if (adapter->rx_buff_pool[i].active)
488                         ibmveth_free_buffer_pool(adapter,
489                                                  &adapter->rx_buff_pool[i]);
490
491         if (adapter->bounce_buffer != NULL) {
492                 if (!dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
493                         dma_unmap_single(&adapter->vdev->dev,
494                                         adapter->bounce_buffer_dma,
495                                         adapter->netdev->mtu + IBMVETH_BUFF_OH,
496                                         DMA_BIDIRECTIONAL);
497                         adapter->bounce_buffer_dma = DMA_ERROR_CODE;
498                 }
499                 kfree(adapter->bounce_buffer);
500                 adapter->bounce_buffer = NULL;
501         }
502 }
503
504 static int ibmveth_register_logical_lan(struct ibmveth_adapter *adapter,
505         union ibmveth_buf_desc rxq_desc, u64 mac_address)
506 {
507         int rc, try_again = 1;
508
509         /*
510          * After a kexec the adapter will still be open, so our attempt to
511          * open it will fail. So if we get a failure we free the adapter and
512          * try again, but only once.
513          */
514 retry:
515         rc = h_register_logical_lan(adapter->vdev->unit_address,
516                                     adapter->buffer_list_dma, rxq_desc.desc,
517                                     adapter->filter_list_dma, mac_address);
518
519         if (rc != H_SUCCESS && try_again) {
520                 do {
521                         rc = h_free_logical_lan(adapter->vdev->unit_address);
522                 } while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY));
523
524                 try_again = 0;
525                 goto retry;
526         }
527
528         return rc;
529 }
530
531 static int ibmveth_open(struct net_device *netdev)
532 {
533         struct ibmveth_adapter *adapter = netdev_priv(netdev);
534         u64 mac_address = 0;
535         int rxq_entries = 1;
536         unsigned long lpar_rc;
537         int rc;
538         union ibmveth_buf_desc rxq_desc;
539         int i;
540         struct device *dev;
541
542         netdev_dbg(netdev, "open starting\n");
543
544         napi_enable(&adapter->napi);
545
546         for(i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
547                 rxq_entries += adapter->rx_buff_pool[i].size;
548
549         adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
550         adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
551
552         if (!adapter->buffer_list_addr || !adapter->filter_list_addr) {
553                 netdev_err(netdev, "unable to allocate filter or buffer list "
554                            "pages\n");
555                 rc = -ENOMEM;
556                 goto err_out;
557         }
558
559         adapter->rx_queue.queue_len = sizeof(struct ibmveth_rx_q_entry) *
560                                                 rxq_entries;
561         adapter->rx_queue.queue_addr = kmalloc(adapter->rx_queue.queue_len,
562                                                 GFP_KERNEL);
563
564         if (!adapter->rx_queue.queue_addr) {
565                 netdev_err(netdev, "unable to allocate rx queue pages\n");
566                 rc = -ENOMEM;
567                 goto err_out;
568         }
569
570         dev = &adapter->vdev->dev;
571
572         adapter->buffer_list_dma = dma_map_single(dev,
573                         adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL);
574         adapter->filter_list_dma = dma_map_single(dev,
575                         adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL);
576         adapter->rx_queue.queue_dma = dma_map_single(dev,
577                         adapter->rx_queue.queue_addr,
578                         adapter->rx_queue.queue_len, DMA_BIDIRECTIONAL);
579
580         if ((dma_mapping_error(dev, adapter->buffer_list_dma)) ||
581             (dma_mapping_error(dev, adapter->filter_list_dma)) ||
582             (dma_mapping_error(dev, adapter->rx_queue.queue_dma))) {
583                 netdev_err(netdev, "unable to map filter or buffer list "
584                            "pages\n");
585                 rc = -ENOMEM;
586                 goto err_out;
587         }
588
589         adapter->rx_queue.index = 0;
590         adapter->rx_queue.num_slots = rxq_entries;
591         adapter->rx_queue.toggle = 1;
592
593         memcpy(&mac_address, netdev->dev_addr, netdev->addr_len);
594         mac_address = mac_address >> 16;
595
596         rxq_desc.fields.flags_len = IBMVETH_BUF_VALID |
597                                         adapter->rx_queue.queue_len;
598         rxq_desc.fields.address = adapter->rx_queue.queue_dma;
599
600         netdev_dbg(netdev, "buffer list @ 0x%p\n", adapter->buffer_list_addr);
601         netdev_dbg(netdev, "filter list @ 0x%p\n", adapter->filter_list_addr);
602         netdev_dbg(netdev, "receive q   @ 0x%p\n", adapter->rx_queue.queue_addr);
603
604         h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
605
606         lpar_rc = ibmveth_register_logical_lan(adapter, rxq_desc, mac_address);
607
608         if (lpar_rc != H_SUCCESS) {
609                 netdev_err(netdev, "h_register_logical_lan failed with %ld\n",
610                            lpar_rc);
611                 netdev_err(netdev, "buffer TCE:0x%llx filter TCE:0x%llx rxq "
612                            "desc:0x%llx MAC:0x%llx\n",
613                                      adapter->buffer_list_dma,
614                                      adapter->filter_list_dma,
615                                      rxq_desc.desc,
616                                      mac_address);
617                 rc = -ENONET;
618                 goto err_out;
619         }
620
621         for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
622                 if (!adapter->rx_buff_pool[i].active)
623                         continue;
624                 if (ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[i])) {
625                         netdev_err(netdev, "unable to alloc pool\n");
626                         adapter->rx_buff_pool[i].active = 0;
627                         rc = -ENOMEM;
628                         goto err_out;
629                 }
630         }
631
632         netdev_dbg(netdev, "registering irq 0x%x\n", netdev->irq);
633         rc = request_irq(netdev->irq, ibmveth_interrupt, 0, netdev->name,
634                          netdev);
635         if (rc != 0) {
636                 netdev_err(netdev, "unable to request irq 0x%x, rc %d\n",
637                            netdev->irq, rc);
638                 do {
639                         lpar_rc = h_free_logical_lan(adapter->vdev->unit_address);
640                 } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY));
641
642                 goto err_out;
643         }
644
645         adapter->bounce_buffer =
646             kmalloc(netdev->mtu + IBMVETH_BUFF_OH, GFP_KERNEL);
647         if (!adapter->bounce_buffer) {
648                 netdev_err(netdev, "unable to allocate bounce buffer\n");
649                 rc = -ENOMEM;
650                 goto err_out_free_irq;
651         }
652         adapter->bounce_buffer_dma =
653             dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer,
654                            netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL);
655         if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
656                 netdev_err(netdev, "unable to map bounce buffer\n");
657                 rc = -ENOMEM;
658                 goto err_out_free_irq;
659         }
660
661         netdev_dbg(netdev, "initial replenish cycle\n");
662         ibmveth_interrupt(netdev->irq, netdev);
663
664         netif_start_queue(netdev);
665
666         netdev_dbg(netdev, "open complete\n");
667
668         return 0;
669
670 err_out_free_irq:
671         free_irq(netdev->irq, netdev);
672 err_out:
673         ibmveth_cleanup(adapter);
674         napi_disable(&adapter->napi);
675         return rc;
676 }
677
678 static int ibmveth_close(struct net_device *netdev)
679 {
680         struct ibmveth_adapter *adapter = netdev_priv(netdev);
681         long lpar_rc;
682
683         netdev_dbg(netdev, "close starting\n");
684
685         napi_disable(&adapter->napi);
686
687         if (!adapter->pool_config)
688                 netif_stop_queue(netdev);
689
690         h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
691
692         do {
693                 lpar_rc = h_free_logical_lan(adapter->vdev->unit_address);
694         } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY));
695
696         if (lpar_rc != H_SUCCESS) {
697                 netdev_err(netdev, "h_free_logical_lan failed with %lx, "
698                            "continuing with close\n", lpar_rc);
699         }
700
701         free_irq(netdev->irq, netdev);
702
703         adapter->rx_no_buffer = *(u64 *)(((char *)adapter->buffer_list_addr) +
704                                                 4096 - 8);
705
706         ibmveth_cleanup(adapter);
707
708         netdev_dbg(netdev, "close complete\n");
709
710         return 0;
711 }
712
713 static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
714 {
715         cmd->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
716                                 SUPPORTED_FIBRE);
717         cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg |
718                                 ADVERTISED_FIBRE);
719         ethtool_cmd_speed_set(cmd, SPEED_1000);
720         cmd->duplex = DUPLEX_FULL;
721         cmd->port = PORT_FIBRE;
722         cmd->phy_address = 0;
723         cmd->transceiver = XCVR_INTERNAL;
724         cmd->autoneg = AUTONEG_ENABLE;
725         cmd->maxtxpkt = 0;
726         cmd->maxrxpkt = 1;
727         return 0;
728 }
729
730 static void netdev_get_drvinfo(struct net_device *dev,
731                                struct ethtool_drvinfo *info)
732 {
733         strncpy(info->driver, ibmveth_driver_name, sizeof(info->driver) - 1);
734         strncpy(info->version, ibmveth_driver_version,
735                 sizeof(info->version) - 1);
736 }
737
738 static u32 ibmveth_fix_features(struct net_device *dev, u32 features)
739 {
740         /*
741          * Since the ibmveth firmware interface does not have the
742          * concept of separate tx/rx checksum offload enable, if rx
743          * checksum is disabled we also have to disable tx checksum
744          * offload. Once we disable rx checksum offload, we are no
745          * longer allowed to send tx buffers that are not properly
746          * checksummed.
747          */
748
749         if (!(features & NETIF_F_RXCSUM))
750                 features &= ~NETIF_F_ALL_CSUM;
751
752         return features;
753 }
754
755 static int ibmveth_set_csum_offload(struct net_device *dev, u32 data)
756 {
757         struct ibmveth_adapter *adapter = netdev_priv(dev);
758         unsigned long set_attr, clr_attr, ret_attr;
759         unsigned long set_attr6, clr_attr6;
760         long ret, ret4, ret6;
761         int rc1 = 0, rc2 = 0;
762         int restart = 0;
763
764         if (netif_running(dev)) {
765                 restart = 1;
766                 adapter->pool_config = 1;
767                 ibmveth_close(dev);
768                 adapter->pool_config = 0;
769         }
770
771         set_attr = 0;
772         clr_attr = 0;
773         set_attr6 = 0;
774         clr_attr6 = 0;
775
776         if (data) {
777                 set_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM;
778                 set_attr6 = IBMVETH_ILLAN_IPV6_TCP_CSUM;
779         } else {
780                 clr_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM;
781                 clr_attr6 = IBMVETH_ILLAN_IPV6_TCP_CSUM;
782         }
783
784         ret = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr);
785
786         if (ret == H_SUCCESS && !(ret_attr & IBMVETH_ILLAN_ACTIVE_TRUNK) &&
787             !(ret_attr & IBMVETH_ILLAN_TRUNK_PRI_MASK) &&
788             (ret_attr & IBMVETH_ILLAN_PADDED_PKT_CSUM)) {
789                 ret4 = h_illan_attributes(adapter->vdev->unit_address, clr_attr,
790                                          set_attr, &ret_attr);
791
792                 if (ret4 != H_SUCCESS) {
793                         netdev_err(dev, "unable to change IPv4 checksum "
794                                         "offload settings. %d rc=%ld\n",
795                                         data, ret4);
796
797                         h_illan_attributes(adapter->vdev->unit_address,
798                                            set_attr, clr_attr, &ret_attr);
799
800                         if (data == 1)
801                                 dev->features &= ~NETIF_F_IP_CSUM;
802
803                 } else {
804                         adapter->fw_ipv4_csum_support = data;
805                 }
806
807                 ret6 = h_illan_attributes(adapter->vdev->unit_address,
808                                          clr_attr6, set_attr6, &ret_attr);
809
810                 if (ret6 != H_SUCCESS) {
811                         netdev_err(dev, "unable to change IPv6 checksum "
812                                         "offload settings. %d rc=%ld\n",
813                                         data, ret6);
814
815                         h_illan_attributes(adapter->vdev->unit_address,
816                                            set_attr6, clr_attr6, &ret_attr);
817
818                         if (data == 1)
819                                 dev->features &= ~NETIF_F_IPV6_CSUM;
820
821                 } else
822                         adapter->fw_ipv6_csum_support = data;
823
824                 if (ret4 == H_SUCCESS || ret6 == H_SUCCESS)
825                         adapter->rx_csum = data;
826                 else
827                         rc1 = -EIO;
828         } else {
829                 rc1 = -EIO;
830                 netdev_err(dev, "unable to change checksum offload settings."
831                                      " %d rc=%ld ret_attr=%lx\n", data, ret,
832                                      ret_attr);
833         }
834
835         if (restart)
836                 rc2 = ibmveth_open(dev);
837
838         return rc1 ? rc1 : rc2;
839 }
840
841 static int ibmveth_set_features(struct net_device *dev, u32 features)
842 {
843         struct ibmveth_adapter *adapter = netdev_priv(dev);
844         int rx_csum = !!(features & NETIF_F_RXCSUM);
845         int rc;
846
847         if (rx_csum == adapter->rx_csum)
848                 return 0;
849
850         rc = ibmveth_set_csum_offload(dev, rx_csum);
851         if (rc && !adapter->rx_csum)
852                 dev->features = features & ~(NETIF_F_ALL_CSUM | NETIF_F_RXCSUM);
853
854         return rc;
855 }
856
857 static void ibmveth_get_strings(struct net_device *dev, u32 stringset, u8 *data)
858 {
859         int i;
860
861         if (stringset != ETH_SS_STATS)
862                 return;
863
864         for (i = 0; i < ARRAY_SIZE(ibmveth_stats); i++, data += ETH_GSTRING_LEN)
865                 memcpy(data, ibmveth_stats[i].name, ETH_GSTRING_LEN);
866 }
867
868 static int ibmveth_get_sset_count(struct net_device *dev, int sset)
869 {
870         switch (sset) {
871         case ETH_SS_STATS:
872                 return ARRAY_SIZE(ibmveth_stats);
873         default:
874                 return -EOPNOTSUPP;
875         }
876 }
877
878 static void ibmveth_get_ethtool_stats(struct net_device *dev,
879                                       struct ethtool_stats *stats, u64 *data)
880 {
881         int i;
882         struct ibmveth_adapter *adapter = netdev_priv(dev);
883
884         for (i = 0; i < ARRAY_SIZE(ibmveth_stats); i++)
885                 data[i] = IBMVETH_GET_STAT(adapter, ibmveth_stats[i].offset);
886 }
887
888 static const struct ethtool_ops netdev_ethtool_ops = {
889         .get_drvinfo            = netdev_get_drvinfo,
890         .get_settings           = netdev_get_settings,
891         .get_link               = ethtool_op_get_link,
892         .get_strings            = ibmveth_get_strings,
893         .get_sset_count         = ibmveth_get_sset_count,
894         .get_ethtool_stats      = ibmveth_get_ethtool_stats,
895 };
896
897 static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
898 {
899         return -EOPNOTSUPP;
900 }
901
902 #define page_offset(v) ((unsigned long)(v) & ((1 << 12) - 1))
903
904 static int ibmveth_send(struct ibmveth_adapter *adapter,
905                         union ibmveth_buf_desc *descs)
906 {
907         unsigned long correlator;
908         unsigned int retry_count;
909         unsigned long ret;
910
911         /*
912          * The retry count sets a maximum for the number of broadcast and
913          * multicast destinations within the system.
914          */
915         retry_count = 1024;
916         correlator = 0;
917         do {
918                 ret = h_send_logical_lan(adapter->vdev->unit_address,
919                                              descs[0].desc, descs[1].desc,
920                                              descs[2].desc, descs[3].desc,
921                                              descs[4].desc, descs[5].desc,
922                                              correlator, &correlator);
923         } while ((ret == H_BUSY) && (retry_count--));
924
925         if (ret != H_SUCCESS && ret != H_DROPPED) {
926                 netdev_err(adapter->netdev, "tx: h_send_logical_lan failed "
927                            "with rc=%ld\n", ret);
928                 return 1;
929         }
930
931         return 0;
932 }
933
934 static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb,
935                                       struct net_device *netdev)
936 {
937         struct ibmveth_adapter *adapter = netdev_priv(netdev);
938         unsigned int desc_flags;
939         union ibmveth_buf_desc descs[6];
940         int last, i;
941         int force_bounce = 0;
942         dma_addr_t dma_addr;
943
944         /*
945          * veth handles a maximum of 6 segments including the header, so
946          * we have to linearize the skb if there are more than this.
947          */
948         if (skb_shinfo(skb)->nr_frags > 5 && __skb_linearize(skb)) {
949                 netdev->stats.tx_dropped++;
950                 goto out;
951         }
952
953         /* veth can't checksum offload UDP */
954         if (skb->ip_summed == CHECKSUM_PARTIAL &&
955             ((skb->protocol == htons(ETH_P_IP) &&
956               ip_hdr(skb)->protocol != IPPROTO_TCP) ||
957              (skb->protocol == htons(ETH_P_IPV6) &&
958               ipv6_hdr(skb)->nexthdr != IPPROTO_TCP)) &&
959             skb_checksum_help(skb)) {
960
961                 netdev_err(netdev, "tx: failed to checksum packet\n");
962                 netdev->stats.tx_dropped++;
963                 goto out;
964         }
965
966         desc_flags = IBMVETH_BUF_VALID;
967
968         if (skb->ip_summed == CHECKSUM_PARTIAL) {
969                 unsigned char *buf = skb_transport_header(skb) +
970                                                 skb->csum_offset;
971
972                 desc_flags |= (IBMVETH_BUF_NO_CSUM | IBMVETH_BUF_CSUM_GOOD);
973
974                 /* Need to zero out the checksum */
975                 buf[0] = 0;
976                 buf[1] = 0;
977         }
978
979 retry_bounce:
980         memset(descs, 0, sizeof(descs));
981
982         /*
983          * If a linear packet is below the rx threshold then
984          * copy it into the static bounce buffer. This avoids the
985          * cost of a TCE insert and remove.
986          */
987         if (force_bounce || (!skb_is_nonlinear(skb) &&
988                                 (skb->len < tx_copybreak))) {
989                 skb_copy_from_linear_data(skb, adapter->bounce_buffer,
990                                           skb->len);
991
992                 descs[0].fields.flags_len = desc_flags | skb->len;
993                 descs[0].fields.address = adapter->bounce_buffer_dma;
994
995                 if (ibmveth_send(adapter, descs)) {
996                         adapter->tx_send_failed++;
997                         netdev->stats.tx_dropped++;
998                 } else {
999                         netdev->stats.tx_packets++;
1000                         netdev->stats.tx_bytes += skb->len;
1001                 }
1002
1003                 goto out;
1004         }
1005
1006         /* Map the header */
1007         dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
1008                                   skb_headlen(skb), DMA_TO_DEVICE);
1009         if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
1010                 goto map_failed;
1011
1012         descs[0].fields.flags_len = desc_flags | skb_headlen(skb);
1013         descs[0].fields.address = dma_addr;
1014
1015         /* Map the frags */
1016         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1017                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1018
1019                 dma_addr = skb_frag_dma_map(&adapter->vdev->dev, frag, 0,
1020                                             skb_frag_size(frag), DMA_TO_DEVICE);
1021
1022                 if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
1023                         goto map_failed_frags;
1024
1025                 descs[i+1].fields.flags_len = desc_flags | skb_frag_size(frag);
1026                 descs[i+1].fields.address = dma_addr;
1027         }
1028
1029         if (ibmveth_send(adapter, descs)) {
1030                 adapter->tx_send_failed++;
1031                 netdev->stats.tx_dropped++;
1032         } else {
1033                 netdev->stats.tx_packets++;
1034                 netdev->stats.tx_bytes += skb->len;
1035         }
1036
1037         dma_unmap_single(&adapter->vdev->dev,
1038                          descs[0].fields.address,
1039                          descs[0].fields.flags_len & IBMVETH_BUF_LEN_MASK,
1040                          DMA_TO_DEVICE);
1041
1042         for (i = 1; i < skb_shinfo(skb)->nr_frags + 1; i++)
1043                 dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
1044                                descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
1045                                DMA_TO_DEVICE);
1046
1047 out:
1048         dev_kfree_skb(skb);
1049         return NETDEV_TX_OK;
1050
1051 map_failed_frags:
1052         last = i+1;
1053         for (i = 0; i < last; i++)
1054                 dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
1055                                descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
1056                                DMA_TO_DEVICE);
1057
1058 map_failed:
1059         if (!firmware_has_feature(FW_FEATURE_CMO))
1060                 netdev_err(netdev, "tx: unable to map xmit buffer\n");
1061         adapter->tx_map_failed++;
1062         skb_linearize(skb);
1063         force_bounce = 1;
1064         goto retry_bounce;
1065 }
1066
1067 static int ibmveth_poll(struct napi_struct *napi, int budget)
1068 {
1069         struct ibmveth_adapter *adapter =
1070                         container_of(napi, struct ibmveth_adapter, napi);
1071         struct net_device *netdev = adapter->netdev;
1072         int frames_processed = 0;
1073         unsigned long lpar_rc;
1074
1075 restart_poll:
1076         do {
1077                 if (!ibmveth_rxq_pending_buffer(adapter))
1078                         break;
1079
1080                 smp_rmb();
1081                 if (!ibmveth_rxq_buffer_valid(adapter)) {
1082                         wmb(); /* suggested by larson1 */
1083                         adapter->rx_invalid_buffer++;
1084                         netdev_dbg(netdev, "recycling invalid buffer\n");
1085                         ibmveth_rxq_recycle_buffer(adapter);
1086                 } else {
1087                         struct sk_buff *skb, *new_skb;
1088                         int length = ibmveth_rxq_frame_length(adapter);
1089                         int offset = ibmveth_rxq_frame_offset(adapter);
1090                         int csum_good = ibmveth_rxq_csum_good(adapter);
1091
1092                         skb = ibmveth_rxq_get_buffer(adapter);
1093
1094                         new_skb = NULL;
1095                         if (length < rx_copybreak)
1096                                 new_skb = netdev_alloc_skb(netdev, length);
1097
1098                         if (new_skb) {
1099                                 skb_copy_to_linear_data(new_skb,
1100                                                         skb->data + offset,
1101                                                         length);
1102                                 if (rx_flush)
1103                                         ibmveth_flush_buffer(skb->data,
1104                                                 length + offset);
1105                                 if (!ibmveth_rxq_recycle_buffer(adapter))
1106                                         kfree_skb(skb);
1107                                 skb = new_skb;
1108                         } else {
1109                                 ibmveth_rxq_harvest_buffer(adapter);
1110                                 skb_reserve(skb, offset);
1111                         }
1112
1113                         skb_put(skb, length);
1114                         skb->protocol = eth_type_trans(skb, netdev);
1115
1116                         if (csum_good)
1117                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1118
1119                         netif_receive_skb(skb); /* send it up */
1120
1121                         netdev->stats.rx_packets++;
1122                         netdev->stats.rx_bytes += length;
1123                         frames_processed++;
1124                 }
1125         } while (frames_processed < budget);
1126
1127         ibmveth_replenish_task(adapter);
1128
1129         if (frames_processed < budget) {
1130                 /* We think we are done - reenable interrupts,
1131                  * then check once more to make sure we are done.
1132                  */
1133                 lpar_rc = h_vio_signal(adapter->vdev->unit_address,
1134                                        VIO_IRQ_ENABLE);
1135
1136                 BUG_ON(lpar_rc != H_SUCCESS);
1137
1138                 napi_complete(napi);
1139
1140                 if (ibmveth_rxq_pending_buffer(adapter) &&
1141                     napi_reschedule(napi)) {
1142                         lpar_rc = h_vio_signal(adapter->vdev->unit_address,
1143                                                VIO_IRQ_DISABLE);
1144                         goto restart_poll;
1145                 }
1146         }
1147
1148         return frames_processed;
1149 }
1150
1151 static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance)
1152 {
1153         struct net_device *netdev = dev_instance;
1154         struct ibmveth_adapter *adapter = netdev_priv(netdev);
1155         unsigned long lpar_rc;
1156
1157         if (napi_schedule_prep(&adapter->napi)) {
1158                 lpar_rc = h_vio_signal(adapter->vdev->unit_address,
1159                                        VIO_IRQ_DISABLE);
1160                 BUG_ON(lpar_rc != H_SUCCESS);
1161                 __napi_schedule(&adapter->napi);
1162         }
1163         return IRQ_HANDLED;
1164 }
1165
1166 static void ibmveth_set_multicast_list(struct net_device *netdev)
1167 {
1168         struct ibmveth_adapter *adapter = netdev_priv(netdev);
1169         unsigned long lpar_rc;
1170
1171         if ((netdev->flags & IFF_PROMISC) ||
1172             (netdev_mc_count(netdev) > adapter->mcastFilterSize)) {
1173                 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1174                                            IbmVethMcastEnableRecv |
1175                                            IbmVethMcastDisableFiltering,
1176                                            0);
1177                 if (lpar_rc != H_SUCCESS) {
1178                         netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
1179                                    "entering promisc mode\n", lpar_rc);
1180                 }
1181         } else {
1182                 struct netdev_hw_addr *ha;
1183                 /* clear the filter table & disable filtering */
1184                 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1185                                            IbmVethMcastEnableRecv |
1186                                            IbmVethMcastDisableFiltering |
1187                                            IbmVethMcastClearFilterTable,
1188                                            0);
1189                 if (lpar_rc != H_SUCCESS) {
1190                         netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
1191                                    "attempting to clear filter table\n",
1192                                    lpar_rc);
1193                 }
1194                 /* add the addresses to the filter table */
1195                 netdev_for_each_mc_addr(ha, netdev) {
1196                         /* add the multicast address to the filter table */
1197                         unsigned long mcast_addr = 0;
1198                         memcpy(((char *)&mcast_addr)+2, ha->addr, 6);
1199                         lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1200                                                    IbmVethMcastAddFilter,
1201                                                    mcast_addr);
1202                         if (lpar_rc != H_SUCCESS) {
1203                                 netdev_err(netdev, "h_multicast_ctrl rc=%ld "
1204                                            "when adding an entry to the filter "
1205                                            "table\n", lpar_rc);
1206                         }
1207                 }
1208
1209                 /* re-enable filtering */
1210                 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1211                                            IbmVethMcastEnableFiltering,
1212                                            0);
1213                 if (lpar_rc != H_SUCCESS) {
1214                         netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
1215                                    "enabling filtering\n", lpar_rc);
1216                 }
1217         }
1218 }
1219
1220 static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
1221 {
1222         struct ibmveth_adapter *adapter = netdev_priv(dev);
1223         struct vio_dev *viodev = adapter->vdev;
1224         int new_mtu_oh = new_mtu + IBMVETH_BUFF_OH;
1225         int i, rc;
1226         int need_restart = 0;
1227
1228         if (new_mtu < IBMVETH_MIN_MTU)
1229                 return -EINVAL;
1230
1231         for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
1232                 if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size)
1233                         break;
1234
1235         if (i == IBMVETH_NUM_BUFF_POOLS)
1236                 return -EINVAL;
1237
1238         /* Deactivate all the buffer pools so that the next loop can activate
1239            only the buffer pools necessary to hold the new MTU */
1240         if (netif_running(adapter->netdev)) {
1241                 need_restart = 1;
1242                 adapter->pool_config = 1;
1243                 ibmveth_close(adapter->netdev);
1244                 adapter->pool_config = 0;
1245         }
1246
1247         /* Look for an active buffer pool that can hold the new MTU */
1248         for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
1249                 adapter->rx_buff_pool[i].active = 1;
1250
1251                 if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) {
1252                         dev->mtu = new_mtu;
1253                         vio_cmo_set_dev_desired(viodev,
1254                                                 ibmveth_get_desired_dma
1255                                                 (viodev));
1256                         if (need_restart) {
1257                                 return ibmveth_open(adapter->netdev);
1258                         }
1259                         return 0;
1260                 }
1261         }
1262
1263         if (need_restart && (rc = ibmveth_open(adapter->netdev)))
1264                 return rc;
1265
1266         return -EINVAL;
1267 }
1268
1269 #ifdef CONFIG_NET_POLL_CONTROLLER
1270 static void ibmveth_poll_controller(struct net_device *dev)
1271 {
1272         ibmveth_replenish_task(netdev_priv(dev));
1273         ibmveth_interrupt(dev->irq, dev);
1274 }
1275 #endif
1276
1277 /**
1278  * ibmveth_get_desired_dma - Calculate IO memory desired by the driver
1279  *
1280  * @vdev: struct vio_dev for the device whose desired IO mem is to be returned
1281  *
1282  * Return value:
1283  *      Number of bytes of IO data the driver will need to perform well.
1284  */
1285 static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev)
1286 {
1287         struct net_device *netdev = dev_get_drvdata(&vdev->dev);
1288         struct ibmveth_adapter *adapter;
1289         unsigned long ret;
1290         int i;
1291         int rxqentries = 1;
1292
1293         /* netdev inits at probe time along with the structures we need below*/
1294         if (netdev == NULL)
1295                 return IOMMU_PAGE_ALIGN(IBMVETH_IO_ENTITLEMENT_DEFAULT);
1296
1297         adapter = netdev_priv(netdev);
1298
1299         ret = IBMVETH_BUFF_LIST_SIZE + IBMVETH_FILT_LIST_SIZE;
1300         ret += IOMMU_PAGE_ALIGN(netdev->mtu);
1301
1302         for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
1303                 /* add the size of the active receive buffers */
1304                 if (adapter->rx_buff_pool[i].active)
1305                         ret +=
1306                             adapter->rx_buff_pool[i].size *
1307                             IOMMU_PAGE_ALIGN(adapter->rx_buff_pool[i].
1308                                     buff_size);
1309                 rxqentries += adapter->rx_buff_pool[i].size;
1310         }
1311         /* add the size of the receive queue entries */
1312         ret += IOMMU_PAGE_ALIGN(rxqentries * sizeof(struct ibmveth_rx_q_entry));
1313
1314         return ret;
1315 }
1316
1317 static const struct net_device_ops ibmveth_netdev_ops = {
1318         .ndo_open               = ibmveth_open,
1319         .ndo_stop               = ibmveth_close,
1320         .ndo_start_xmit         = ibmveth_start_xmit,
1321         .ndo_set_rx_mode        = ibmveth_set_multicast_list,
1322         .ndo_do_ioctl           = ibmveth_ioctl,
1323         .ndo_change_mtu         = ibmveth_change_mtu,
1324         .ndo_fix_features       = ibmveth_fix_features,
1325         .ndo_set_features       = ibmveth_set_features,
1326         .ndo_validate_addr      = eth_validate_addr,
1327         .ndo_set_mac_address    = eth_mac_addr,
1328 #ifdef CONFIG_NET_POLL_CONTROLLER
1329         .ndo_poll_controller    = ibmveth_poll_controller,
1330 #endif
1331 };
1332
1333 static int __devinit ibmveth_probe(struct vio_dev *dev,
1334                                    const struct vio_device_id *id)
1335 {
1336         int rc, i;
1337         struct net_device *netdev;
1338         struct ibmveth_adapter *adapter;
1339         unsigned char *mac_addr_p;
1340         unsigned int *mcastFilterSize_p;
1341
1342         dev_dbg(&dev->dev, "entering ibmveth_probe for UA 0x%x\n",
1343                 dev->unit_address);
1344
1345         mac_addr_p = (unsigned char *)vio_get_attribute(dev, VETH_MAC_ADDR,
1346                                                         NULL);
1347         if (!mac_addr_p) {
1348                 dev_err(&dev->dev, "Can't find VETH_MAC_ADDR attribute\n");
1349                 return -EINVAL;
1350         }
1351
1352         mcastFilterSize_p = (unsigned int *)vio_get_attribute(dev,
1353                                                 VETH_MCAST_FILTER_SIZE, NULL);
1354         if (!mcastFilterSize_p) {
1355                 dev_err(&dev->dev, "Can't find VETH_MCAST_FILTER_SIZE "
1356                         "attribute\n");
1357                 return -EINVAL;
1358         }
1359
1360         netdev = alloc_etherdev(sizeof(struct ibmveth_adapter));
1361
1362         if (!netdev)
1363                 return -ENOMEM;
1364
1365         adapter = netdev_priv(netdev);
1366         dev_set_drvdata(&dev->dev, netdev);
1367
1368         adapter->vdev = dev;
1369         adapter->netdev = netdev;
1370         adapter->mcastFilterSize = *mcastFilterSize_p;
1371         adapter->pool_config = 0;
1372
1373         netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16);
1374
1375         /*
1376          * Some older boxes running PHYP non-natively have an OF that returns
1377          * a 8-byte local-mac-address field (and the first 2 bytes have to be
1378          * ignored) while newer boxes' OF return a 6-byte field. Note that
1379          * IEEE 1275 specifies that local-mac-address must be a 6-byte field.
1380          * The RPA doc specifies that the first byte must be 10b, so we'll
1381          * just look for it to solve this 8 vs. 6 byte field issue
1382          */
1383         if ((*mac_addr_p & 0x3) != 0x02)
1384                 mac_addr_p += 2;
1385
1386         adapter->mac_addr = 0;
1387         memcpy(&adapter->mac_addr, mac_addr_p, 6);
1388
1389         netdev->irq = dev->irq;
1390         netdev->netdev_ops = &ibmveth_netdev_ops;
1391         netdev->ethtool_ops = &netdev_ethtool_ops;
1392         SET_NETDEV_DEV(netdev, &dev->dev);
1393         netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
1394                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
1395         netdev->features |= netdev->hw_features;
1396
1397         memcpy(netdev->dev_addr, &adapter->mac_addr, netdev->addr_len);
1398
1399         for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
1400                 struct kobject *kobj = &adapter->rx_buff_pool[i].kobj;
1401                 int error;
1402
1403                 ibmveth_init_buffer_pool(&adapter->rx_buff_pool[i], i,
1404                                          pool_count[i], pool_size[i],
1405                                          pool_active[i]);
1406                 error = kobject_init_and_add(kobj, &ktype_veth_pool,
1407                                              &dev->dev.kobj, "pool%d", i);
1408                 if (!error)
1409                         kobject_uevent(kobj, KOBJ_ADD);
1410         }
1411
1412         netdev_dbg(netdev, "adapter @ 0x%p\n", adapter);
1413
1414         adapter->buffer_list_dma = DMA_ERROR_CODE;
1415         adapter->filter_list_dma = DMA_ERROR_CODE;
1416         adapter->rx_queue.queue_dma = DMA_ERROR_CODE;
1417
1418         netdev_dbg(netdev, "registering netdev...\n");
1419
1420         ibmveth_set_features(netdev, netdev->features);
1421
1422         rc = register_netdev(netdev);
1423
1424         if (rc) {
1425                 netdev_dbg(netdev, "failed to register netdev rc=%d\n", rc);
1426                 free_netdev(netdev);
1427                 return rc;
1428         }
1429
1430         netdev_dbg(netdev, "registered\n");
1431
1432         return 0;
1433 }
1434
1435 static int __devexit ibmveth_remove(struct vio_dev *dev)
1436 {
1437         struct net_device *netdev = dev_get_drvdata(&dev->dev);
1438         struct ibmveth_adapter *adapter = netdev_priv(netdev);
1439         int i;
1440
1441         for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
1442                 kobject_put(&adapter->rx_buff_pool[i].kobj);
1443
1444         unregister_netdev(netdev);
1445
1446         free_netdev(netdev);
1447         dev_set_drvdata(&dev->dev, NULL);
1448
1449         return 0;
1450 }
1451
1452 static struct attribute veth_active_attr;
1453 static struct attribute veth_num_attr;
1454 static struct attribute veth_size_attr;
1455
1456 static ssize_t veth_pool_show(struct kobject *kobj,
1457                               struct attribute *attr, char *buf)
1458 {
1459         struct ibmveth_buff_pool *pool = container_of(kobj,
1460                                                       struct ibmveth_buff_pool,
1461                                                       kobj);
1462
1463         if (attr == &veth_active_attr)
1464                 return sprintf(buf, "%d\n", pool->active);
1465         else if (attr == &veth_num_attr)
1466                 return sprintf(buf, "%d\n", pool->size);
1467         else if (attr == &veth_size_attr)
1468                 return sprintf(buf, "%d\n", pool->buff_size);
1469         return 0;
1470 }
1471
1472 static ssize_t veth_pool_store(struct kobject *kobj, struct attribute *attr,
1473                                const char *buf, size_t count)
1474 {
1475         struct ibmveth_buff_pool *pool = container_of(kobj,
1476                                                       struct ibmveth_buff_pool,
1477                                                       kobj);
1478         struct net_device *netdev = dev_get_drvdata(
1479             container_of(kobj->parent, struct device, kobj));
1480         struct ibmveth_adapter *adapter = netdev_priv(netdev);
1481         long value = simple_strtol(buf, NULL, 10);
1482         long rc;
1483
1484         if (attr == &veth_active_attr) {
1485                 if (value && !pool->active) {
1486                         if (netif_running(netdev)) {
1487                                 if (ibmveth_alloc_buffer_pool(pool)) {
1488                                         netdev_err(netdev,
1489                                                    "unable to alloc pool\n");
1490                                         return -ENOMEM;
1491                                 }
1492                                 pool->active = 1;
1493                                 adapter->pool_config = 1;
1494                                 ibmveth_close(netdev);
1495                                 adapter->pool_config = 0;
1496                                 if ((rc = ibmveth_open(netdev)))
1497                                         return rc;
1498                         } else {
1499                                 pool->active = 1;
1500                         }
1501                 } else if (!value && pool->active) {
1502                         int mtu = netdev->mtu + IBMVETH_BUFF_OH;
1503                         int i;
1504                         /* Make sure there is a buffer pool with buffers that
1505                            can hold a packet of the size of the MTU */
1506                         for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
1507                                 if (pool == &adapter->rx_buff_pool[i])
1508                                         continue;
1509                                 if (!adapter->rx_buff_pool[i].active)
1510                                         continue;
1511                                 if (mtu <= adapter->rx_buff_pool[i].buff_size)
1512                                         break;
1513                         }
1514
1515                         if (i == IBMVETH_NUM_BUFF_POOLS) {
1516                                 netdev_err(netdev, "no active pool >= MTU\n");
1517                                 return -EPERM;
1518                         }
1519
1520                         if (netif_running(netdev)) {
1521                                 adapter->pool_config = 1;
1522                                 ibmveth_close(netdev);
1523                                 pool->active = 0;
1524                                 adapter->pool_config = 0;
1525                                 if ((rc = ibmveth_open(netdev)))
1526                                         return rc;
1527                         }
1528                         pool->active = 0;
1529                 }
1530         } else if (attr == &veth_num_attr) {
1531                 if (value <= 0 || value > IBMVETH_MAX_POOL_COUNT) {
1532                         return -EINVAL;
1533                 } else {
1534                         if (netif_running(netdev)) {
1535                                 adapter->pool_config = 1;
1536                                 ibmveth_close(netdev);
1537                                 adapter->pool_config = 0;
1538                                 pool->size = value;
1539                                 if ((rc = ibmveth_open(netdev)))
1540                                         return rc;
1541                         } else {
1542                                 pool->size = value;
1543                         }
1544                 }
1545         } else if (attr == &veth_size_attr) {
1546                 if (value <= IBMVETH_BUFF_OH || value > IBMVETH_MAX_BUF_SIZE) {
1547                         return -EINVAL;
1548                 } else {
1549                         if (netif_running(netdev)) {
1550                                 adapter->pool_config = 1;
1551                                 ibmveth_close(netdev);
1552                                 adapter->pool_config = 0;
1553                                 pool->buff_size = value;
1554                                 if ((rc = ibmveth_open(netdev)))
1555                                         return rc;
1556                         } else {
1557                                 pool->buff_size = value;
1558                         }
1559                 }
1560         }
1561
1562         /* kick the interrupt handler to allocate/deallocate pools */
1563         ibmveth_interrupt(netdev->irq, netdev);
1564         return count;
1565 }
1566
1567
1568 #define ATTR(_name, _mode)                              \
1569         struct attribute veth_##_name##_attr = {        \
1570         .name = __stringify(_name), .mode = _mode,      \
1571         };
1572
1573 static ATTR(active, 0644);
1574 static ATTR(num, 0644);
1575 static ATTR(size, 0644);
1576
1577 static struct attribute *veth_pool_attrs[] = {
1578         &veth_active_attr,
1579         &veth_num_attr,
1580         &veth_size_attr,
1581         NULL,
1582 };
1583
1584 static const struct sysfs_ops veth_pool_ops = {
1585         .show   = veth_pool_show,
1586         .store  = veth_pool_store,
1587 };
1588
1589 static struct kobj_type ktype_veth_pool = {
1590         .release        = NULL,
1591         .sysfs_ops      = &veth_pool_ops,
1592         .default_attrs  = veth_pool_attrs,
1593 };
1594
1595 static int ibmveth_resume(struct device *dev)
1596 {
1597         struct net_device *netdev = dev_get_drvdata(dev);
1598         ibmveth_interrupt(netdev->irq, netdev);
1599         return 0;
1600 }
1601
1602 static struct vio_device_id ibmveth_device_table[] __devinitdata = {
1603         { "network", "IBM,l-lan"},
1604         { "", "" }
1605 };
1606 MODULE_DEVICE_TABLE(vio, ibmveth_device_table);
1607
1608 static struct dev_pm_ops ibmveth_pm_ops = {
1609         .resume = ibmveth_resume
1610 };
1611
1612 static struct vio_driver ibmveth_driver = {
1613         .id_table       = ibmveth_device_table,
1614         .probe          = ibmveth_probe,
1615         .remove         = ibmveth_remove,
1616         .get_desired_dma = ibmveth_get_desired_dma,
1617         .driver         = {
1618                 .name   = ibmveth_driver_name,
1619                 .owner  = THIS_MODULE,
1620                 .pm = &ibmveth_pm_ops,
1621         }
1622 };
1623
1624 static int __init ibmveth_module_init(void)
1625 {
1626         printk(KERN_DEBUG "%s: %s %s\n", ibmveth_driver_name,
1627                ibmveth_driver_string, ibmveth_driver_version);
1628
1629         return vio_register_driver(&ibmveth_driver);
1630 }
1631
1632 static void __exit ibmveth_module_exit(void)
1633 {
1634         vio_unregister_driver(&ibmveth_driver);
1635 }
1636
1637 module_init(ibmveth_module_init);
1638 module_exit(ibmveth_module_exit);