1 /**************************************************************************/
3 /* IBM eServer i/pSeries Virtual Ethernet Device Driver */
4 /* Copyright (C) 2003 IBM Corp. */
5 /* Originally written by Dave Larson (larson1@us.ibm.com) */
6 /* Maintained by Santiago Leon (santil@us.ibm.com) */
8 /* This program is free software; you can redistribute it and/or modify */
9 /* it under the terms of the GNU General Public License as published by */
10 /* the Free Software Foundation; either version 2 of the License, or */
11 /* (at your option) any later version. */
13 /* This program is distributed in the hope that it will be useful, */
14 /* but WITHOUT ANY WARRANTY; without even the implied warranty of */
15 /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
16 /* GNU General Public License for more details. */
18 /* You should have received a copy of the GNU General Public License */
19 /* along with this program; if not, write to the Free Software */
20 /* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 */
23 /* This module contains the implementation of a virtual ethernet device */
24 /* for use with IBM i/pSeries LPAR Linux. It utilizes the logical LAN */
25 /* option of the RS/6000 Platform Architechture to interface with virtual */
26 /* ethernet NICs that are presented to the partition by the hypervisor. */
28 /**************************************************************************/
31 - add support for sysfs
32 - possibly remove procfs support
35 #include <linux/module.h>
36 #include <linux/types.h>
37 #include <linux/errno.h>
38 #include <linux/ioport.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/kernel.h>
41 #include <linux/netdevice.h>
42 #include <linux/etherdevice.h>
43 #include <linux/skbuff.h>
44 #include <linux/init.h>
45 #include <linux/delay.h>
47 #include <linux/ethtool.h>
48 #include <linux/proc_fs.h>
51 #include <net/net_namespace.h>
52 #include <asm/semaphore.h>
53 #include <asm/hvcall.h>
54 #include <asm/atomic.h>
56 #include <asm/uaccess.h>
57 #include <linux/seq_file.h>
63 #define ibmveth_printk(fmt, args...) \
64 printk(KERN_DEBUG "%s: " fmt, __FILE__, ## args)
66 #define ibmveth_error_printk(fmt, args...) \
67 printk(KERN_ERR "(%s:%3.3d ua:%x) ERROR: " fmt, __FILE__, __LINE__ , adapter->vdev->unit_address, ## args)
70 #define ibmveth_debug_printk_no_adapter(fmt, args...) \
71 printk(KERN_DEBUG "(%s:%3.3d): " fmt, __FILE__, __LINE__ , ## args)
72 #define ibmveth_debug_printk(fmt, args...) \
73 printk(KERN_DEBUG "(%s:%3.3d ua:%x): " fmt, __FILE__, __LINE__ , adapter->vdev->unit_address, ## args)
74 #define ibmveth_assert(expr) \
76 printk(KERN_DEBUG "assertion failed (%s:%3.3d ua:%x): %s\n", __FILE__, __LINE__, adapter->vdev->unit_address, #expr); \
80 #define ibmveth_debug_printk_no_adapter(fmt, args...)
81 #define ibmveth_debug_printk(fmt, args...)
82 #define ibmveth_assert(expr)
85 static int ibmveth_open(struct net_device *dev);
86 static int ibmveth_close(struct net_device *dev);
87 static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
88 static int ibmveth_poll(struct napi_struct *napi, int budget);
89 static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *dev);
90 static void ibmveth_set_multicast_list(struct net_device *dev);
91 static int ibmveth_change_mtu(struct net_device *dev, int new_mtu);
92 static void ibmveth_proc_register_driver(void);
93 static void ibmveth_proc_unregister_driver(void);
94 static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter);
95 static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter);
96 static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance);
97 static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter);
98 static struct kobj_type ktype_veth_pool;
100 #ifdef CONFIG_PROC_FS
101 #define IBMVETH_PROC_DIR "ibmveth"
102 static struct proc_dir_entry *ibmveth_proc_dir;
105 static const char ibmveth_driver_name[] = "ibmveth";
106 static const char ibmveth_driver_string[] = "IBM i/pSeries Virtual Ethernet Driver";
107 #define ibmveth_driver_version "1.03"
109 MODULE_AUTHOR("Santiago Leon <santil@us.ibm.com>");
110 MODULE_DESCRIPTION("IBM i/pSeries Virtual Ethernet Driver");
111 MODULE_LICENSE("GPL");
112 MODULE_VERSION(ibmveth_driver_version);
114 struct ibmveth_stat {
115 char name[ETH_GSTRING_LEN];
119 #define IBMVETH_STAT_OFF(stat) offsetof(struct ibmveth_adapter, stat)
120 #define IBMVETH_GET_STAT(a, off) *((u64 *)(((unsigned long)(a)) + off))
122 struct ibmveth_stat ibmveth_stats[] = {
123 { "replenish_task_cycles", IBMVETH_STAT_OFF(replenish_task_cycles) },
124 { "replenish_no_mem", IBMVETH_STAT_OFF(replenish_no_mem) },
125 { "replenish_add_buff_failure", IBMVETH_STAT_OFF(replenish_add_buff_failure) },
126 { "replenish_add_buff_success", IBMVETH_STAT_OFF(replenish_add_buff_success) },
127 { "rx_invalid_buffer", IBMVETH_STAT_OFF(rx_invalid_buffer) },
128 { "rx_no_buffer", IBMVETH_STAT_OFF(rx_no_buffer) },
129 { "tx_map_failed", IBMVETH_STAT_OFF(tx_map_failed) },
130 { "tx_send_failed", IBMVETH_STAT_OFF(tx_send_failed) },
133 /* simple methods of getting data from the current rxq entry */
134 static inline u32 ibmveth_rxq_flags(struct ibmveth_adapter *adapter)
136 return adapter->rx_queue.queue_addr[adapter->rx_queue.index].flags_off;
139 static inline int ibmveth_rxq_toggle(struct ibmveth_adapter *adapter)
141 return (ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_TOGGLE) >> IBMVETH_RXQ_TOGGLE_SHIFT;
144 static inline int ibmveth_rxq_pending_buffer(struct ibmveth_adapter *adapter)
146 return (ibmveth_rxq_toggle(adapter) == adapter->rx_queue.toggle);
149 static inline int ibmveth_rxq_buffer_valid(struct ibmveth_adapter *adapter)
151 return (ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_VALID);
154 static inline int ibmveth_rxq_frame_offset(struct ibmveth_adapter *adapter)
156 return (ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_OFF_MASK);
159 static inline int ibmveth_rxq_frame_length(struct ibmveth_adapter *adapter)
161 return (adapter->rx_queue.queue_addr[adapter->rx_queue.index].length);
164 static inline int ibmveth_rxq_csum_good(struct ibmveth_adapter *adapter)
166 return (ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_CSUM_GOOD);
169 /* setup the initial settings for a buffer pool */
170 static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool *pool, u32 pool_index, u32 pool_size, u32 buff_size, u32 pool_active)
172 pool->size = pool_size;
173 pool->index = pool_index;
174 pool->buff_size = buff_size;
175 pool->threshold = pool_size / 2;
176 pool->active = pool_active;
179 /* allocate and setup an buffer pool - called during open */
180 static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool)
184 pool->free_map = kmalloc(sizeof(u16) * pool->size, GFP_KERNEL);
186 if(!pool->free_map) {
190 pool->dma_addr = kmalloc(sizeof(dma_addr_t) * pool->size, GFP_KERNEL);
191 if(!pool->dma_addr) {
192 kfree(pool->free_map);
193 pool->free_map = NULL;
197 pool->skbuff = kmalloc(sizeof(void*) * pool->size, GFP_KERNEL);
200 kfree(pool->dma_addr);
201 pool->dma_addr = NULL;
203 kfree(pool->free_map);
204 pool->free_map = NULL;
208 memset(pool->skbuff, 0, sizeof(void*) * pool->size);
209 memset(pool->dma_addr, 0, sizeof(dma_addr_t) * pool->size);
211 for(i = 0; i < pool->size; ++i) {
212 pool->free_map[i] = i;
215 atomic_set(&pool->available, 0);
216 pool->producer_index = 0;
217 pool->consumer_index = 0;
222 /* replenish the buffers for a pool. note that we don't need to
223 * skb_reserve these since they are used for incoming...
225 static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struct ibmveth_buff_pool *pool)
228 u32 count = pool->size - atomic_read(&pool->available);
229 u32 buffers_added = 0;
233 for(i = 0; i < count; ++i) {
235 unsigned int free_index, index;
237 union ibmveth_buf_desc desc;
238 unsigned long lpar_rc;
241 skb = alloc_skb(pool->buff_size, GFP_ATOMIC);
244 ibmveth_debug_printk("replenish: unable to allocate skb\n");
245 adapter->replenish_no_mem++;
249 free_index = pool->consumer_index;
250 pool->consumer_index = (pool->consumer_index + 1) % pool->size;
251 index = pool->free_map[free_index];
253 ibmveth_assert(index != IBM_VETH_INVALID_MAP);
254 ibmveth_assert(pool->skbuff[index] == NULL);
256 dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
257 pool->buff_size, DMA_FROM_DEVICE);
259 pool->free_map[free_index] = IBM_VETH_INVALID_MAP;
260 pool->dma_addr[index] = dma_addr;
261 pool->skbuff[index] = skb;
263 correlator = ((u64)pool->index << 32) | index;
264 *(u64*)skb->data = correlator;
266 desc.fields.flags_len = IBMVETH_BUF_VALID | pool->buff_size;
267 desc.fields.address = dma_addr;
269 lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc);
271 if(lpar_rc != H_SUCCESS) {
272 pool->free_map[free_index] = index;
273 pool->skbuff[index] = NULL;
274 if (pool->consumer_index == 0)
275 pool->consumer_index = pool->size - 1;
277 pool->consumer_index--;
278 dma_unmap_single(&adapter->vdev->dev,
279 pool->dma_addr[index], pool->buff_size,
281 dev_kfree_skb_any(skb);
282 adapter->replenish_add_buff_failure++;
286 adapter->replenish_add_buff_success++;
291 atomic_add(buffers_added, &(pool->available));
294 /* replenish routine */
295 static void ibmveth_replenish_task(struct ibmveth_adapter *adapter)
299 adapter->replenish_task_cycles++;
301 for(i = 0; i < IbmVethNumBufferPools; i++)
302 if(adapter->rx_buff_pool[i].active)
303 ibmveth_replenish_buffer_pool(adapter,
304 &adapter->rx_buff_pool[i]);
306 adapter->rx_no_buffer = *(u64*)(((char*)adapter->buffer_list_addr) + 4096 - 8);
309 /* empty and free ana buffer pool - also used to do cleanup in error paths */
310 static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter, struct ibmveth_buff_pool *pool)
314 kfree(pool->free_map);
315 pool->free_map = NULL;
317 if(pool->skbuff && pool->dma_addr) {
318 for(i = 0; i < pool->size; ++i) {
319 struct sk_buff *skb = pool->skbuff[i];
321 dma_unmap_single(&adapter->vdev->dev,
325 dev_kfree_skb_any(skb);
326 pool->skbuff[i] = NULL;
332 kfree(pool->dma_addr);
333 pool->dma_addr = NULL;
342 /* remove a buffer from a pool */
343 static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter, u64 correlator)
345 unsigned int pool = correlator >> 32;
346 unsigned int index = correlator & 0xffffffffUL;
347 unsigned int free_index;
350 ibmveth_assert(pool < IbmVethNumBufferPools);
351 ibmveth_assert(index < adapter->rx_buff_pool[pool].size);
353 skb = adapter->rx_buff_pool[pool].skbuff[index];
355 ibmveth_assert(skb != NULL);
357 adapter->rx_buff_pool[pool].skbuff[index] = NULL;
359 dma_unmap_single(&adapter->vdev->dev,
360 adapter->rx_buff_pool[pool].dma_addr[index],
361 adapter->rx_buff_pool[pool].buff_size,
364 free_index = adapter->rx_buff_pool[pool].producer_index;
365 adapter->rx_buff_pool[pool].producer_index
366 = (adapter->rx_buff_pool[pool].producer_index + 1)
367 % adapter->rx_buff_pool[pool].size;
368 adapter->rx_buff_pool[pool].free_map[free_index] = index;
372 atomic_dec(&(adapter->rx_buff_pool[pool].available));
375 /* get the current buffer on the rx queue */
376 static inline struct sk_buff *ibmveth_rxq_get_buffer(struct ibmveth_adapter *adapter)
378 u64 correlator = adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator;
379 unsigned int pool = correlator >> 32;
380 unsigned int index = correlator & 0xffffffffUL;
382 ibmveth_assert(pool < IbmVethNumBufferPools);
383 ibmveth_assert(index < adapter->rx_buff_pool[pool].size);
385 return adapter->rx_buff_pool[pool].skbuff[index];
388 /* recycle the current buffer on the rx queue */
389 static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
391 u32 q_index = adapter->rx_queue.index;
392 u64 correlator = adapter->rx_queue.queue_addr[q_index].correlator;
393 unsigned int pool = correlator >> 32;
394 unsigned int index = correlator & 0xffffffffUL;
395 union ibmveth_buf_desc desc;
396 unsigned long lpar_rc;
398 ibmveth_assert(pool < IbmVethNumBufferPools);
399 ibmveth_assert(index < adapter->rx_buff_pool[pool].size);
401 if(!adapter->rx_buff_pool[pool].active) {
402 ibmveth_rxq_harvest_buffer(adapter);
403 ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[pool]);
407 desc.fields.flags_len = IBMVETH_BUF_VALID |
408 adapter->rx_buff_pool[pool].buff_size;
409 desc.fields.address = adapter->rx_buff_pool[pool].dma_addr[index];
411 lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc);
413 if(lpar_rc != H_SUCCESS) {
414 ibmveth_debug_printk("h_add_logical_lan_buffer failed during recycle rc=%ld", lpar_rc);
415 ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
418 if(++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
419 adapter->rx_queue.index = 0;
420 adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
424 static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter)
426 ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
428 if(++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
429 adapter->rx_queue.index = 0;
430 adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
434 static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
438 if(adapter->buffer_list_addr != NULL) {
439 if(!dma_mapping_error(adapter->buffer_list_dma)) {
440 dma_unmap_single(&adapter->vdev->dev,
441 adapter->buffer_list_dma, 4096,
443 adapter->buffer_list_dma = DMA_ERROR_CODE;
445 free_page((unsigned long)adapter->buffer_list_addr);
446 adapter->buffer_list_addr = NULL;
449 if(adapter->filter_list_addr != NULL) {
450 if(!dma_mapping_error(adapter->filter_list_dma)) {
451 dma_unmap_single(&adapter->vdev->dev,
452 adapter->filter_list_dma, 4096,
454 adapter->filter_list_dma = DMA_ERROR_CODE;
456 free_page((unsigned long)adapter->filter_list_addr);
457 adapter->filter_list_addr = NULL;
460 if(adapter->rx_queue.queue_addr != NULL) {
461 if(!dma_mapping_error(adapter->rx_queue.queue_dma)) {
462 dma_unmap_single(&adapter->vdev->dev,
463 adapter->rx_queue.queue_dma,
464 adapter->rx_queue.queue_len,
466 adapter->rx_queue.queue_dma = DMA_ERROR_CODE;
468 kfree(adapter->rx_queue.queue_addr);
469 adapter->rx_queue.queue_addr = NULL;
472 for(i = 0; i<IbmVethNumBufferPools; i++)
473 if (adapter->rx_buff_pool[i].active)
474 ibmveth_free_buffer_pool(adapter,
475 &adapter->rx_buff_pool[i]);
478 static int ibmveth_register_logical_lan(struct ibmveth_adapter *adapter,
479 union ibmveth_buf_desc rxq_desc, u64 mac_address)
481 int rc, try_again = 1;
483 /* After a kexec the adapter will still be open, so our attempt to
484 * open it will fail. So if we get a failure we free the adapter and
485 * try again, but only once. */
487 rc = h_register_logical_lan(adapter->vdev->unit_address,
488 adapter->buffer_list_dma, rxq_desc.desc,
489 adapter->filter_list_dma, mac_address);
491 if (rc != H_SUCCESS && try_again) {
493 rc = h_free_logical_lan(adapter->vdev->unit_address);
494 } while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY));
503 static int ibmveth_open(struct net_device *netdev)
505 struct ibmveth_adapter *adapter = netdev->priv;
508 unsigned long lpar_rc;
510 union ibmveth_buf_desc rxq_desc;
513 ibmveth_debug_printk("open starting\n");
515 napi_enable(&adapter->napi);
517 for(i = 0; i<IbmVethNumBufferPools; i++)
518 rxq_entries += adapter->rx_buff_pool[i].size;
520 adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
521 adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
523 if(!adapter->buffer_list_addr || !adapter->filter_list_addr) {
524 ibmveth_error_printk("unable to allocate filter or buffer list pages\n");
525 ibmveth_cleanup(adapter);
526 napi_disable(&adapter->napi);
530 adapter->rx_queue.queue_len = sizeof(struct ibmveth_rx_q_entry) * rxq_entries;
531 adapter->rx_queue.queue_addr = kmalloc(adapter->rx_queue.queue_len, GFP_KERNEL);
533 if(!adapter->rx_queue.queue_addr) {
534 ibmveth_error_printk("unable to allocate rx queue pages\n");
535 ibmveth_cleanup(adapter);
536 napi_disable(&adapter->napi);
540 adapter->buffer_list_dma = dma_map_single(&adapter->vdev->dev,
541 adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL);
542 adapter->filter_list_dma = dma_map_single(&adapter->vdev->dev,
543 adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL);
544 adapter->rx_queue.queue_dma = dma_map_single(&adapter->vdev->dev,
545 adapter->rx_queue.queue_addr,
546 adapter->rx_queue.queue_len, DMA_BIDIRECTIONAL);
548 if((dma_mapping_error(adapter->buffer_list_dma) ) ||
549 (dma_mapping_error(adapter->filter_list_dma)) ||
550 (dma_mapping_error(adapter->rx_queue.queue_dma))) {
551 ibmveth_error_printk("unable to map filter or buffer list pages\n");
552 ibmveth_cleanup(adapter);
553 napi_disable(&adapter->napi);
557 adapter->rx_queue.index = 0;
558 adapter->rx_queue.num_slots = rxq_entries;
559 adapter->rx_queue.toggle = 1;
561 memcpy(&mac_address, netdev->dev_addr, netdev->addr_len);
562 mac_address = mac_address >> 16;
564 rxq_desc.fields.flags_len = IBMVETH_BUF_VALID | adapter->rx_queue.queue_len;
565 rxq_desc.fields.address = adapter->rx_queue.queue_dma;
567 ibmveth_debug_printk("buffer list @ 0x%p\n", adapter->buffer_list_addr);
568 ibmveth_debug_printk("filter list @ 0x%p\n", adapter->filter_list_addr);
569 ibmveth_debug_printk("receive q @ 0x%p\n", adapter->rx_queue.queue_addr);
571 h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
573 lpar_rc = ibmveth_register_logical_lan(adapter, rxq_desc, mac_address);
575 if(lpar_rc != H_SUCCESS) {
576 ibmveth_error_printk("h_register_logical_lan failed with %ld\n", lpar_rc);
577 ibmveth_error_printk("buffer TCE:0x%lx filter TCE:0x%lx rxq desc:0x%lx MAC:0x%lx\n",
578 adapter->buffer_list_dma,
579 adapter->filter_list_dma,
582 ibmveth_cleanup(adapter);
583 napi_disable(&adapter->napi);
587 for(i = 0; i<IbmVethNumBufferPools; i++) {
588 if(!adapter->rx_buff_pool[i].active)
590 if (ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[i])) {
591 ibmveth_error_printk("unable to alloc pool\n");
592 adapter->rx_buff_pool[i].active = 0;
593 ibmveth_cleanup(adapter);
594 napi_disable(&adapter->napi);
599 ibmveth_debug_printk("registering irq 0x%x\n", netdev->irq);
600 if((rc = request_irq(netdev->irq, &ibmveth_interrupt, 0, netdev->name, netdev)) != 0) {
601 ibmveth_error_printk("unable to request irq 0x%x, rc %d\n", netdev->irq, rc);
603 rc = h_free_logical_lan(adapter->vdev->unit_address);
604 } while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY));
606 ibmveth_cleanup(adapter);
607 napi_disable(&adapter->napi);
611 ibmveth_debug_printk("initial replenish cycle\n");
612 ibmveth_interrupt(netdev->irq, netdev);
614 netif_start_queue(netdev);
616 ibmveth_debug_printk("open complete\n");
621 static int ibmveth_close(struct net_device *netdev)
623 struct ibmveth_adapter *adapter = netdev->priv;
626 ibmveth_debug_printk("close starting\n");
628 napi_disable(&adapter->napi);
630 if (!adapter->pool_config)
631 netif_stop_queue(netdev);
633 free_irq(netdev->irq, netdev);
636 lpar_rc = h_free_logical_lan(adapter->vdev->unit_address);
637 } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY));
639 if(lpar_rc != H_SUCCESS)
641 ibmveth_error_printk("h_free_logical_lan failed with %lx, continuing with close\n",
645 adapter->rx_no_buffer = *(u64*)(((char*)adapter->buffer_list_addr) + 4096 - 8);
647 ibmveth_cleanup(adapter);
649 ibmveth_debug_printk("close complete\n");
654 static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) {
655 cmd->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_FIBRE);
656 cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg | ADVERTISED_FIBRE);
657 cmd->speed = SPEED_1000;
658 cmd->duplex = DUPLEX_FULL;
659 cmd->port = PORT_FIBRE;
660 cmd->phy_address = 0;
661 cmd->transceiver = XCVR_INTERNAL;
662 cmd->autoneg = AUTONEG_ENABLE;
668 static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info) {
669 strncpy(info->driver, ibmveth_driver_name, sizeof(info->driver) - 1);
670 strncpy(info->version, ibmveth_driver_version, sizeof(info->version) - 1);
673 static u32 netdev_get_link(struct net_device *dev) {
677 static void ibmveth_set_rx_csum_flags(struct net_device *dev, u32 data)
679 struct ibmveth_adapter *adapter = dev->priv;
682 adapter->rx_csum = 1;
685 * Since the ibmveth firmware interface does not have the concept of
686 * separate tx/rx checksum offload enable, if rx checksum is disabled
687 * we also have to disable tx checksum offload. Once we disable rx
688 * checksum offload, we are no longer allowed to send tx buffers that
689 * are not properly checksummed.
691 adapter->rx_csum = 0;
692 dev->features &= ~NETIF_F_IP_CSUM;
696 static void ibmveth_set_tx_csum_flags(struct net_device *dev, u32 data)
698 struct ibmveth_adapter *adapter = dev->priv;
701 dev->features |= NETIF_F_IP_CSUM;
702 adapter->rx_csum = 1;
704 dev->features &= ~NETIF_F_IP_CSUM;
707 static int ibmveth_set_csum_offload(struct net_device *dev, u32 data,
708 void (*done) (struct net_device *, u32))
710 struct ibmveth_adapter *adapter = dev->priv;
711 u64 set_attr, clr_attr, ret_attr;
713 int rc1 = 0, rc2 = 0;
716 if (netif_running(dev)) {
718 adapter->pool_config = 1;
720 adapter->pool_config = 0;
727 set_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM;
729 clr_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM;
731 ret = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr);
733 if (ret == H_SUCCESS && !(ret_attr & IBMVETH_ILLAN_ACTIVE_TRUNK) &&
734 !(ret_attr & IBMVETH_ILLAN_TRUNK_PRI_MASK) &&
735 (ret_attr & IBMVETH_ILLAN_PADDED_PKT_CSUM)) {
736 ret = h_illan_attributes(adapter->vdev->unit_address, clr_attr,
737 set_attr, &ret_attr);
739 if (ret != H_SUCCESS) {
741 ibmveth_error_printk("unable to change checksum offload settings."
742 " %d rc=%ld\n", data, ret);
744 ret = h_illan_attributes(adapter->vdev->unit_address,
745 set_attr, clr_attr, &ret_attr);
750 ibmveth_error_printk("unable to change checksum offload settings."
751 " %d rc=%ld ret_attr=%lx\n", data, ret, ret_attr);
755 rc2 = ibmveth_open(dev);
757 return rc1 ? rc1 : rc2;
760 static int ibmveth_set_rx_csum(struct net_device *dev, u32 data)
762 struct ibmveth_adapter *adapter = dev->priv;
764 if ((data && adapter->rx_csum) || (!data && !adapter->rx_csum))
767 return ibmveth_set_csum_offload(dev, data, ibmveth_set_rx_csum_flags);
770 static int ibmveth_set_tx_csum(struct net_device *dev, u32 data)
772 struct ibmveth_adapter *adapter = dev->priv;
775 if (data && (dev->features & NETIF_F_IP_CSUM))
777 if (!data && !(dev->features & NETIF_F_IP_CSUM))
780 if (data && !adapter->rx_csum)
781 rc = ibmveth_set_csum_offload(dev, data, ibmveth_set_tx_csum_flags);
783 ibmveth_set_tx_csum_flags(dev, data);
788 static u32 ibmveth_get_rx_csum(struct net_device *dev)
790 struct ibmveth_adapter *adapter = dev->priv;
791 return adapter->rx_csum;
794 static void ibmveth_get_strings(struct net_device *dev, u32 stringset, u8 *data)
798 if (stringset != ETH_SS_STATS)
801 for (i = 0; i < ARRAY_SIZE(ibmveth_stats); i++, data += ETH_GSTRING_LEN)
802 memcpy(data, ibmveth_stats[i].name, ETH_GSTRING_LEN);
805 static int ibmveth_get_stats_count(struct net_device *dev)
807 return ARRAY_SIZE(ibmveth_stats);
810 static void ibmveth_get_ethtool_stats(struct net_device *dev,
811 struct ethtool_stats *stats, u64 *data)
814 struct ibmveth_adapter *adapter = dev->priv;
816 for (i = 0; i < ARRAY_SIZE(ibmveth_stats); i++)
817 data[i] = IBMVETH_GET_STAT(adapter, ibmveth_stats[i].offset);
820 static const struct ethtool_ops netdev_ethtool_ops = {
821 .get_drvinfo = netdev_get_drvinfo,
822 .get_settings = netdev_get_settings,
823 .get_link = netdev_get_link,
824 .set_tx_csum = ibmveth_set_tx_csum,
825 .get_rx_csum = ibmveth_get_rx_csum,
826 .set_rx_csum = ibmveth_set_rx_csum,
827 .get_strings = ibmveth_get_strings,
828 .get_stats_count = ibmveth_get_stats_count,
829 .get_ethtool_stats = ibmveth_get_ethtool_stats,
832 static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
837 #define page_offset(v) ((unsigned long)(v) & ((1 << 12) - 1))
839 static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
841 struct ibmveth_adapter *adapter = netdev->priv;
842 union ibmveth_buf_desc desc;
843 unsigned long lpar_rc;
844 unsigned long correlator;
846 unsigned int retry_count;
847 unsigned int tx_dropped = 0;
848 unsigned int tx_bytes = 0;
849 unsigned int tx_packets = 0;
850 unsigned int tx_send_failed = 0;
851 unsigned int tx_map_failed = 0;
853 desc.fields.flags_len = IBMVETH_BUF_VALID | skb->len;
854 desc.fields.address = dma_map_single(&adapter->vdev->dev, skb->data,
855 skb->len, DMA_TO_DEVICE);
857 if (skb->ip_summed == CHECKSUM_PARTIAL &&
858 ip_hdr(skb)->protocol != IPPROTO_TCP && skb_checksum_help(skb)) {
859 ibmveth_error_printk("tx: failed to checksum packet\n");
864 if (skb->ip_summed == CHECKSUM_PARTIAL) {
865 unsigned char *buf = skb_transport_header(skb) + skb->csum_offset;
867 desc.fields.flags_len |= (IBMVETH_BUF_NO_CSUM | IBMVETH_BUF_CSUM_GOOD);
869 /* Need to zero out the checksum */
874 if (dma_mapping_error(desc.fields.address)) {
875 ibmveth_error_printk("tx: unable to map xmit buffer\n");
881 /* send the frame. Arbitrarily set retrycount to 1024 */
885 lpar_rc = h_send_logical_lan(adapter->vdev->unit_address,
886 desc.desc, 0, 0, 0, 0, 0,
887 correlator, &correlator);
888 } while ((lpar_rc == H_BUSY) && (retry_count--));
890 if(lpar_rc != H_SUCCESS && lpar_rc != H_DROPPED) {
891 ibmveth_error_printk("tx: h_send_logical_lan failed with rc=%ld\n", lpar_rc);
892 ibmveth_error_printk("tx: valid=%d, len=%d, address=0x%08x\n",
893 (desc.fields.flags_len & IBMVETH_BUF_VALID) ? 1 : 0,
894 skb->len, desc.fields.address);
899 tx_bytes += skb->len;
900 netdev->trans_start = jiffies;
903 dma_unmap_single(&adapter->vdev->dev, desc.fields.address,
904 skb->len, DMA_TO_DEVICE);
906 out: spin_lock_irqsave(&adapter->stats_lock, flags);
907 netdev->stats.tx_dropped += tx_dropped;
908 netdev->stats.tx_bytes += tx_bytes;
909 netdev->stats.tx_packets += tx_packets;
910 adapter->tx_send_failed += tx_send_failed;
911 adapter->tx_map_failed += tx_map_failed;
912 spin_unlock_irqrestore(&adapter->stats_lock, flags);
918 static int ibmveth_poll(struct napi_struct *napi, int budget)
920 struct ibmveth_adapter *adapter = container_of(napi, struct ibmveth_adapter, napi);
921 struct net_device *netdev = adapter->netdev;
922 int frames_processed = 0;
923 unsigned long lpar_rc;
929 if (!ibmveth_rxq_pending_buffer(adapter))
933 if (!ibmveth_rxq_buffer_valid(adapter)) {
934 wmb(); /* suggested by larson1 */
935 adapter->rx_invalid_buffer++;
936 ibmveth_debug_printk("recycling invalid buffer\n");
937 ibmveth_rxq_recycle_buffer(adapter);
939 int length = ibmveth_rxq_frame_length(adapter);
940 int offset = ibmveth_rxq_frame_offset(adapter);
941 int csum_good = ibmveth_rxq_csum_good(adapter);
943 skb = ibmveth_rxq_get_buffer(adapter);
945 skb->ip_summed = CHECKSUM_UNNECESSARY;
947 ibmveth_rxq_harvest_buffer(adapter);
949 skb_reserve(skb, offset);
950 skb_put(skb, length);
951 skb->protocol = eth_type_trans(skb, netdev);
953 netif_receive_skb(skb); /* send it up */
955 netdev->stats.rx_packets++;
956 netdev->stats.rx_bytes += length;
958 netdev->last_rx = jiffies;
960 } while (frames_processed < budget);
962 ibmveth_replenish_task(adapter);
964 if (frames_processed < budget) {
965 /* We think we are done - reenable interrupts,
966 * then check once more to make sure we are done.
968 lpar_rc = h_vio_signal(adapter->vdev->unit_address,
971 ibmveth_assert(lpar_rc == H_SUCCESS);
973 netif_rx_complete(netdev, napi);
975 if (ibmveth_rxq_pending_buffer(adapter) &&
976 netif_rx_reschedule(netdev, napi)) {
977 lpar_rc = h_vio_signal(adapter->vdev->unit_address,
983 return frames_processed;
986 static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance)
988 struct net_device *netdev = dev_instance;
989 struct ibmveth_adapter *adapter = netdev->priv;
990 unsigned long lpar_rc;
992 if (netif_rx_schedule_prep(netdev, &adapter->napi)) {
993 lpar_rc = h_vio_signal(adapter->vdev->unit_address,
995 ibmveth_assert(lpar_rc == H_SUCCESS);
996 __netif_rx_schedule(netdev, &adapter->napi);
1001 static void ibmveth_set_multicast_list(struct net_device *netdev)
1003 struct ibmveth_adapter *adapter = netdev->priv;
1004 unsigned long lpar_rc;
1006 if((netdev->flags & IFF_PROMISC) || (netdev->mc_count > adapter->mcastFilterSize)) {
1007 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1008 IbmVethMcastEnableRecv |
1009 IbmVethMcastDisableFiltering,
1011 if(lpar_rc != H_SUCCESS) {
1012 ibmveth_error_printk("h_multicast_ctrl rc=%ld when entering promisc mode\n", lpar_rc);
1015 struct dev_mc_list *mclist = netdev->mc_list;
1017 /* clear the filter table & disable filtering */
1018 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1019 IbmVethMcastEnableRecv |
1020 IbmVethMcastDisableFiltering |
1021 IbmVethMcastClearFilterTable,
1023 if(lpar_rc != H_SUCCESS) {
1024 ibmveth_error_printk("h_multicast_ctrl rc=%ld when attempting to clear filter table\n", lpar_rc);
1026 /* add the addresses to the filter table */
1027 for(i = 0; i < netdev->mc_count; ++i, mclist = mclist->next) {
1028 // add the multicast address to the filter table
1029 unsigned long mcast_addr = 0;
1030 memcpy(((char *)&mcast_addr)+2, mclist->dmi_addr, 6);
1031 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1032 IbmVethMcastAddFilter,
1034 if(lpar_rc != H_SUCCESS) {
1035 ibmveth_error_printk("h_multicast_ctrl rc=%ld when adding an entry to the filter table\n", lpar_rc);
1039 /* re-enable filtering */
1040 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1041 IbmVethMcastEnableFiltering,
1043 if(lpar_rc != H_SUCCESS) {
1044 ibmveth_error_printk("h_multicast_ctrl rc=%ld when enabling filtering\n", lpar_rc);
1049 static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
1051 struct ibmveth_adapter *adapter = dev->priv;
1052 int new_mtu_oh = new_mtu + IBMVETH_BUFF_OH;
1056 if (new_mtu < IBMVETH_MAX_MTU)
1059 for (i = 0; i < IbmVethNumBufferPools; i++)
1060 if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size)
1063 if (i == IbmVethNumBufferPools)
1066 /* Look for an active buffer pool that can hold the new MTU */
1067 for(i = 0; i<IbmVethNumBufferPools; i++) {
1068 if (!adapter->rx_buff_pool[i].active) {
1069 adapter->rx_buff_pool[i].active = 1;
1073 if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) {
1074 if (reinit && netif_running(adapter->netdev)) {
1075 adapter->pool_config = 1;
1076 ibmveth_close(adapter->netdev);
1077 adapter->pool_config = 0;
1079 if ((rc = ibmveth_open(adapter->netdev)))
1089 #ifdef CONFIG_NET_POLL_CONTROLLER
1090 static void ibmveth_poll_controller(struct net_device *dev)
1092 ibmveth_replenish_task(dev->priv);
1093 ibmveth_interrupt(dev->irq, dev);
1097 static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
1101 struct net_device *netdev;
1102 struct ibmveth_adapter *adapter;
1103 u64 set_attr, ret_attr;
1105 unsigned char *mac_addr_p;
1106 unsigned int *mcastFilterSize_p;
1109 ibmveth_debug_printk_no_adapter("entering ibmveth_probe for UA 0x%x\n",
1112 mac_addr_p = (unsigned char *) vio_get_attribute(dev,
1113 VETH_MAC_ADDR, NULL);
1115 printk(KERN_ERR "(%s:%3.3d) ERROR: Can't find VETH_MAC_ADDR "
1116 "attribute\n", __FILE__, __LINE__);
1120 mcastFilterSize_p = (unsigned int *) vio_get_attribute(dev,
1121 VETH_MCAST_FILTER_SIZE, NULL);
1122 if(!mcastFilterSize_p) {
1123 printk(KERN_ERR "(%s:%3.3d) ERROR: Can't find "
1124 "VETH_MCAST_FILTER_SIZE attribute\n",
1125 __FILE__, __LINE__);
1129 netdev = alloc_etherdev(sizeof(struct ibmveth_adapter));
1134 adapter = netdev->priv;
1135 dev->dev.driver_data = netdev;
1137 adapter->vdev = dev;
1138 adapter->netdev = netdev;
1139 adapter->mcastFilterSize= *mcastFilterSize_p;
1140 adapter->pool_config = 0;
1142 netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16);
1144 /* Some older boxes running PHYP non-natively have an OF that
1145 returns a 8-byte local-mac-address field (and the first
1146 2 bytes have to be ignored) while newer boxes' OF return
1147 a 6-byte field. Note that IEEE 1275 specifies that
1148 local-mac-address must be a 6-byte field.
1149 The RPA doc specifies that the first byte must be 10b, so
1150 we'll just look for it to solve this 8 vs. 6 byte field issue */
1152 if ((*mac_addr_p & 0x3) != 0x02)
1155 adapter->mac_addr = 0;
1156 memcpy(&adapter->mac_addr, mac_addr_p, 6);
1158 netdev->irq = dev->irq;
1159 netdev->open = ibmveth_open;
1160 netdev->stop = ibmveth_close;
1161 netdev->hard_start_xmit = ibmveth_start_xmit;
1162 netdev->set_multicast_list = ibmveth_set_multicast_list;
1163 netdev->do_ioctl = ibmveth_ioctl;
1164 netdev->ethtool_ops = &netdev_ethtool_ops;
1165 netdev->change_mtu = ibmveth_change_mtu;
1166 SET_NETDEV_DEV(netdev, &dev->dev);
1167 #ifdef CONFIG_NET_POLL_CONTROLLER
1168 netdev->poll_controller = ibmveth_poll_controller;
1170 netdev->features |= NETIF_F_LLTX;
1171 spin_lock_init(&adapter->stats_lock);
1173 memcpy(&netdev->dev_addr, &adapter->mac_addr, netdev->addr_len);
1175 for(i = 0; i<IbmVethNumBufferPools; i++) {
1176 struct kobject *kobj = &adapter->rx_buff_pool[i].kobj;
1177 ibmveth_init_buffer_pool(&adapter->rx_buff_pool[i], i,
1178 pool_count[i], pool_size[i],
1180 kobj->parent = &dev->dev.kobj;
1181 sprintf(kobj->name, "pool%d", i);
1182 kobj->ktype = &ktype_veth_pool;
1183 kobject_register(kobj);
1186 ibmveth_debug_printk("adapter @ 0x%p\n", adapter);
1188 adapter->buffer_list_dma = DMA_ERROR_CODE;
1189 adapter->filter_list_dma = DMA_ERROR_CODE;
1190 adapter->rx_queue.queue_dma = DMA_ERROR_CODE;
1192 ibmveth_debug_printk("registering netdev...\n");
1194 ret = h_illan_attributes(dev->unit_address, 0, 0, &ret_attr);
1196 if (ret == H_SUCCESS && !(ret_attr & IBMVETH_ILLAN_ACTIVE_TRUNK) &&
1197 !(ret_attr & IBMVETH_ILLAN_TRUNK_PRI_MASK) &&
1198 (ret_attr & IBMVETH_ILLAN_PADDED_PKT_CSUM)) {
1199 set_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM;
1201 ret = h_illan_attributes(dev->unit_address, 0, set_attr, &ret_attr);
1203 if (ret == H_SUCCESS) {
1204 adapter->rx_csum = 1;
1205 netdev->features |= NETIF_F_IP_CSUM;
1207 ret = h_illan_attributes(dev->unit_address, set_attr, 0, &ret_attr);
1210 rc = register_netdev(netdev);
1213 ibmveth_debug_printk("failed to register netdev rc=%d\n", rc);
1214 free_netdev(netdev);
1218 ibmveth_debug_printk("registered\n");
1220 ibmveth_proc_register_adapter(adapter);
1225 static int __devexit ibmveth_remove(struct vio_dev *dev)
1227 struct net_device *netdev = dev->dev.driver_data;
1228 struct ibmveth_adapter *adapter = netdev->priv;
1231 for(i = 0; i<IbmVethNumBufferPools; i++)
1232 kobject_unregister(&adapter->rx_buff_pool[i].kobj);
1234 unregister_netdev(netdev);
1236 ibmveth_proc_unregister_adapter(adapter);
1238 free_netdev(netdev);
1242 #ifdef CONFIG_PROC_FS
1243 static void ibmveth_proc_register_driver(void)
1245 ibmveth_proc_dir = proc_mkdir(IBMVETH_PROC_DIR, init_net.proc_net);
1246 if (ibmveth_proc_dir) {
1250 static void ibmveth_proc_unregister_driver(void)
1252 remove_proc_entry(IBMVETH_PROC_DIR, init_net.proc_net);
1255 static void *ibmveth_seq_start(struct seq_file *seq, loff_t *pos)
1264 static void *ibmveth_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1270 static void ibmveth_seq_stop(struct seq_file *seq, void *v)
1274 static int ibmveth_seq_show(struct seq_file *seq, void *v)
1276 struct ibmveth_adapter *adapter = seq->private;
1277 char *current_mac = ((char*) &adapter->netdev->dev_addr);
1278 char *firmware_mac = ((char*) &adapter->mac_addr) ;
1280 seq_printf(seq, "%s %s\n\n", ibmveth_driver_string, ibmveth_driver_version);
1282 seq_printf(seq, "Unit Address: 0x%x\n", adapter->vdev->unit_address);
1283 seq_printf(seq, "Current MAC: %02X:%02X:%02X:%02X:%02X:%02X\n",
1284 current_mac[0], current_mac[1], current_mac[2],
1285 current_mac[3], current_mac[4], current_mac[5]);
1286 seq_printf(seq, "Firmware MAC: %02X:%02X:%02X:%02X:%02X:%02X\n",
1287 firmware_mac[0], firmware_mac[1], firmware_mac[2],
1288 firmware_mac[3], firmware_mac[4], firmware_mac[5]);
1290 seq_printf(seq, "\nAdapter Statistics:\n");
1291 seq_printf(seq, " TX: vio_map_single failres: %ld\n", adapter->tx_map_failed);
1292 seq_printf(seq, " send failures: %ld\n", adapter->tx_send_failed);
1293 seq_printf(seq, " RX: replenish task cycles: %ld\n", adapter->replenish_task_cycles);
1294 seq_printf(seq, " alloc_skb_failures: %ld\n", adapter->replenish_no_mem);
1295 seq_printf(seq, " add buffer failures: %ld\n", adapter->replenish_add_buff_failure);
1296 seq_printf(seq, " invalid buffers: %ld\n", adapter->rx_invalid_buffer);
1297 seq_printf(seq, " no buffers: %ld\n", adapter->rx_no_buffer);
1301 static struct seq_operations ibmveth_seq_ops = {
1302 .start = ibmveth_seq_start,
1303 .next = ibmveth_seq_next,
1304 .stop = ibmveth_seq_stop,
1305 .show = ibmveth_seq_show,
1308 static int ibmveth_proc_open(struct inode *inode, struct file *file)
1310 struct seq_file *seq;
1311 struct proc_dir_entry *proc;
1314 rc = seq_open(file, &ibmveth_seq_ops);
1316 /* recover the pointer buried in proc_dir_entry data */
1317 seq = file->private_data;
1319 seq->private = proc->data;
1324 static const struct file_operations ibmveth_proc_fops = {
1325 .owner = THIS_MODULE,
1326 .open = ibmveth_proc_open,
1328 .llseek = seq_lseek,
1329 .release = seq_release,
1332 static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter)
1334 struct proc_dir_entry *entry;
1335 if (ibmveth_proc_dir) {
1337 sprintf(u_addr, "%x", adapter->vdev->unit_address);
1338 entry = create_proc_entry(u_addr, S_IFREG, ibmveth_proc_dir);
1340 ibmveth_error_printk("Cannot create adapter proc entry");
1342 entry->data = (void *) adapter;
1343 entry->proc_fops = &ibmveth_proc_fops;
1349 static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter)
1351 if (ibmveth_proc_dir) {
1353 sprintf(u_addr, "%x", adapter->vdev->unit_address);
1354 remove_proc_entry(u_addr, ibmveth_proc_dir);
1358 #else /* CONFIG_PROC_FS */
1359 static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter)
1363 static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter)
1366 static void ibmveth_proc_register_driver(void)
1370 static void ibmveth_proc_unregister_driver(void)
1373 #endif /* CONFIG_PROC_FS */
1375 static struct attribute veth_active_attr;
1376 static struct attribute veth_num_attr;
1377 static struct attribute veth_size_attr;
1379 static ssize_t veth_pool_show(struct kobject * kobj,
1380 struct attribute * attr, char * buf)
1382 struct ibmveth_buff_pool *pool = container_of(kobj,
1383 struct ibmveth_buff_pool,
1386 if (attr == &veth_active_attr)
1387 return sprintf(buf, "%d\n", pool->active);
1388 else if (attr == &veth_num_attr)
1389 return sprintf(buf, "%d\n", pool->size);
1390 else if (attr == &veth_size_attr)
1391 return sprintf(buf, "%d\n", pool->buff_size);
1395 static ssize_t veth_pool_store(struct kobject * kobj, struct attribute * attr,
1396 const char * buf, size_t count)
1398 struct ibmveth_buff_pool *pool = container_of(kobj,
1399 struct ibmveth_buff_pool,
1401 struct net_device *netdev =
1402 container_of(kobj->parent, struct device, kobj)->driver_data;
1403 struct ibmveth_adapter *adapter = netdev->priv;
1404 long value = simple_strtol(buf, NULL, 10);
1407 if (attr == &veth_active_attr) {
1408 if (value && !pool->active) {
1409 if (netif_running(netdev)) {
1410 if(ibmveth_alloc_buffer_pool(pool)) {
1411 ibmveth_error_printk("unable to alloc pool\n");
1415 adapter->pool_config = 1;
1416 ibmveth_close(netdev);
1417 adapter->pool_config = 0;
1418 if ((rc = ibmveth_open(netdev)))
1422 } else if (!value && pool->active) {
1423 int mtu = netdev->mtu + IBMVETH_BUFF_OH;
1425 /* Make sure there is a buffer pool with buffers that
1426 can hold a packet of the size of the MTU */
1427 for (i = 0; i < IbmVethNumBufferPools; i++) {
1428 if (pool == &adapter->rx_buff_pool[i])
1430 if (!adapter->rx_buff_pool[i].active)
1432 if (mtu <= adapter->rx_buff_pool[i].buff_size)
1436 if (i == IbmVethNumBufferPools) {
1437 ibmveth_error_printk("no active pool >= MTU\n");
1442 if (netif_running(netdev)) {
1443 adapter->pool_config = 1;
1444 ibmveth_close(netdev);
1445 adapter->pool_config = 0;
1446 if ((rc = ibmveth_open(netdev)))
1450 } else if (attr == &veth_num_attr) {
1451 if (value <= 0 || value > IBMVETH_MAX_POOL_COUNT)
1454 if (netif_running(netdev)) {
1455 adapter->pool_config = 1;
1456 ibmveth_close(netdev);
1457 adapter->pool_config = 0;
1459 if ((rc = ibmveth_open(netdev)))
1464 } else if (attr == &veth_size_attr) {
1465 if (value <= IBMVETH_BUFF_OH || value > IBMVETH_MAX_BUF_SIZE)
1468 if (netif_running(netdev)) {
1469 adapter->pool_config = 1;
1470 ibmveth_close(netdev);
1471 adapter->pool_config = 0;
1472 pool->buff_size = value;
1473 if ((rc = ibmveth_open(netdev)))
1476 pool->buff_size = value;
1480 /* kick the interrupt handler to allocate/deallocate pools */
1481 ibmveth_interrupt(netdev->irq, netdev);
1486 #define ATTR(_name, _mode) \
1487 struct attribute veth_##_name##_attr = { \
1488 .name = __stringify(_name), .mode = _mode, \
1491 static ATTR(active, 0644);
1492 static ATTR(num, 0644);
1493 static ATTR(size, 0644);
1495 static struct attribute * veth_pool_attrs[] = {
1502 static struct sysfs_ops veth_pool_ops = {
1503 .show = veth_pool_show,
1504 .store = veth_pool_store,
1507 static struct kobj_type ktype_veth_pool = {
1509 .sysfs_ops = &veth_pool_ops,
1510 .default_attrs = veth_pool_attrs,
1514 static struct vio_device_id ibmveth_device_table[] __devinitdata= {
1515 { "network", "IBM,l-lan"},
1518 MODULE_DEVICE_TABLE(vio, ibmveth_device_table);
1520 static struct vio_driver ibmveth_driver = {
1521 .id_table = ibmveth_device_table,
1522 .probe = ibmveth_probe,
1523 .remove = ibmveth_remove,
1525 .name = ibmveth_driver_name,
1526 .owner = THIS_MODULE,
1530 static int __init ibmveth_module_init(void)
1532 ibmveth_printk("%s: %s %s\n", ibmveth_driver_name, ibmveth_driver_string, ibmveth_driver_version);
1534 ibmveth_proc_register_driver();
1536 return vio_register_driver(&ibmveth_driver);
1539 static void __exit ibmveth_module_exit(void)
1541 vio_unregister_driver(&ibmveth_driver);
1542 ibmveth_proc_unregister_driver();
1545 module_init(ibmveth_module_init);
1546 module_exit(ibmveth_module_exit);