e2da7fed7e830dfcaed57329ae0aaa467f9bd4f3
[pandora-kernel.git] / drivers / staging / hv / netvsc_drv.c
1 /*
2  * Copyright (c) 2009, Microsoft Corporation.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * You should have received a copy of the GNU General Public License along with
14  * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15  * Place - Suite 330, Boston, MA 02111-1307 USA.
16  *
17  * Authors:
18  *   Haiyang Zhang <haiyangz@microsoft.com>
19  *   Hank Janssen  <hjanssen@microsoft.com>
20  */
21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22
23 #include <linux/init.h>
24 #include <linux/module.h>
25 #include <linux/highmem.h>
26 #include <linux/device.h>
27 #include <linux/io.h>
28 #include <linux/delay.h>
29 #include <linux/netdevice.h>
30 #include <linux/inetdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/in.h>
34 #include <linux/slab.h>
35 #include <linux/dmi.h>
36 #include <linux/pci.h>
37 #include <net/arp.h>
38 #include <net/route.h>
39 #include <net/sock.h>
40 #include <net/pkt_sched.h>
41
42 #include "hyperv.h"
43 #include "hyperv_net.h"
44
45 struct net_device_context {
46         /* point back to our device context */
47         struct hv_device *device_ctx;
48         unsigned long avail;
49         struct work_struct work;
50 };
51
52
53 #define PACKET_PAGES_LOWATER  8
54 /* Need this many pages to handle worst case fragmented packet */
55 #define PACKET_PAGES_HIWATER  (MAX_SKB_FRAGS + 2)
56
57 static int ring_size = 128;
58 module_param(ring_size, int, S_IRUGO);
59 MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
60
61 /* no-op so the netdev core doesn't return -EINVAL when modifying the the
62  * multicast address list in SIOCADDMULTI. hv is setup to get all multicast
63  * when it calls RndisFilterOnOpen() */
64 static void netvsc_set_multicast_list(struct net_device *net)
65 {
66 }
67
68 static int netvsc_open(struct net_device *net)
69 {
70         struct net_device_context *net_device_ctx = netdev_priv(net);
71         struct hv_device *device_obj = net_device_ctx->device_ctx;
72         int ret = 0;
73
74         if (netif_carrier_ok(net)) {
75                 /* Open up the device */
76                 ret = rndis_filter_open(device_obj);
77                 if (ret != 0) {
78                         netdev_err(net, "unable to open device (ret %d).\n",
79                                    ret);
80                         return ret;
81                 }
82
83                 netif_start_queue(net);
84         } else {
85                 netdev_err(net, "unable to open device...link is down.\n");
86         }
87
88         return ret;
89 }
90
91 static int netvsc_close(struct net_device *net)
92 {
93         struct net_device_context *net_device_ctx = netdev_priv(net);
94         struct hv_device *device_obj = net_device_ctx->device_ctx;
95         int ret;
96
97         netif_stop_queue(net);
98
99         ret = rndis_filter_close(device_obj);
100         if (ret != 0)
101                 netdev_err(net, "unable to close device (ret %d).\n", ret);
102
103         return ret;
104 }
105
106 static void netvsc_xmit_completion(void *context)
107 {
108         struct hv_netvsc_packet *packet = (struct hv_netvsc_packet *)context;
109         struct sk_buff *skb = (struct sk_buff *)
110                 (unsigned long)packet->completion.send.send_completion_tid;
111
112         kfree(packet);
113
114         if (skb) {
115                 struct net_device *net = skb->dev;
116                 struct net_device_context *net_device_ctx = netdev_priv(net);
117                 unsigned int num_pages = skb_shinfo(skb)->nr_frags + 2;
118
119                 dev_kfree_skb_any(skb);
120
121                 net_device_ctx->avail += num_pages;
122                 if (net_device_ctx->avail >= PACKET_PAGES_HIWATER)
123                         netif_wake_queue(net);
124         }
125 }
126
127 static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
128 {
129         struct net_device_context *net_device_ctx = netdev_priv(net);
130         struct netvsc_driver *net_drv_obj =
131                 drv_to_netvscdrv(net_device_ctx->device_ctx->device.driver);
132         struct hv_netvsc_packet *packet;
133         int ret;
134         unsigned int i, num_pages;
135
136         /* Add 1 for skb->data and additional one for RNDIS */
137         num_pages = skb_shinfo(skb)->nr_frags + 1 + 1;
138         if (num_pages > net_device_ctx->avail)
139                 return NETDEV_TX_BUSY;
140
141         /* Allocate a netvsc packet based on # of frags. */
142         packet = kzalloc(sizeof(struct hv_netvsc_packet) +
143                          (num_pages * sizeof(struct hv_page_buffer)) +
144                          net_drv_obj->req_ext_size, GFP_ATOMIC);
145         if (!packet) {
146                 /* out of memory, silently drop packet */
147                 netdev_err(net, "unable to allocate hv_netvsc_packet\n");
148
149                 dev_kfree_skb(skb);
150                 net->stats.tx_dropped++;
151                 return NETDEV_TX_OK;
152         }
153
154         packet->extension = (void *)(unsigned long)packet +
155                                 sizeof(struct hv_netvsc_packet) +
156                                     (num_pages * sizeof(struct hv_page_buffer));
157
158         /* Setup the rndis header */
159         packet->page_buf_cnt = num_pages;
160
161         /* TODO: Flush all write buffers/ memory fence ??? */
162         /* wmb(); */
163
164         /* Initialize it from the skb */
165         packet->total_data_buflen       = skb->len;
166
167         /* Start filling in the page buffers starting after RNDIS buffer. */
168         packet->page_buf[1].pfn = virt_to_phys(skb->data) >> PAGE_SHIFT;
169         packet->page_buf[1].offset
170                 = (unsigned long)skb->data & (PAGE_SIZE - 1);
171         packet->page_buf[1].len = skb_headlen(skb);
172
173         /* Additional fragments are after SKB data */
174         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
175                 skb_frag_t *f = &skb_shinfo(skb)->frags[i];
176
177                 packet->page_buf[i+2].pfn = page_to_pfn(f->page);
178                 packet->page_buf[i+2].offset = f->page_offset;
179                 packet->page_buf[i+2].len = f->size;
180         }
181
182         /* Set the completion routine */
183         packet->completion.send.send_completion = netvsc_xmit_completion;
184         packet->completion.send.send_completion_ctx = packet;
185         packet->completion.send.send_completion_tid = (unsigned long)skb;
186
187         ret = net_drv_obj->send(net_device_ctx->device_ctx,
188                                   packet);
189         if (ret == 0) {
190                 net->stats.tx_bytes += skb->len;
191                 net->stats.tx_packets++;
192
193                 net_device_ctx->avail -= num_pages;
194                 if (net_device_ctx->avail < PACKET_PAGES_LOWATER)
195                         netif_stop_queue(net);
196         } else {
197                 /* we are shutting down or bus overloaded, just drop packet */
198                 net->stats.tx_dropped++;
199                 netvsc_xmit_completion(packet);
200         }
201
202         return NETDEV_TX_OK;
203 }
204
205 /*
206  * netvsc_linkstatus_callback - Link up/down notification
207  */
208 void netvsc_linkstatus_callback(struct hv_device *device_obj,
209                                        unsigned int status)
210 {
211         struct net_device *net = dev_get_drvdata(&device_obj->device);
212         struct net_device_context *ndev_ctx;
213
214         if (!net) {
215                 netdev_err(net, "got link status but net device "
216                                 "not initialized yet\n");
217                 return;
218         }
219
220         if (status == 1) {
221                 netif_carrier_on(net);
222                 netif_wake_queue(net);
223                 netif_notify_peers(net);
224                 ndev_ctx = netdev_priv(net);
225                 schedule_work(&ndev_ctx->work);
226         } else {
227                 netif_carrier_off(net);
228                 netif_stop_queue(net);
229         }
230 }
231
232 /*
233  * netvsc_recv_callback -  Callback when we receive a packet from the
234  * "wire" on the specified device.
235  */
236 static int netvsc_recv_callback(struct hv_device *device_obj,
237                                 struct hv_netvsc_packet *packet)
238 {
239         struct net_device *net = dev_get_drvdata(&device_obj->device);
240         struct sk_buff *skb;
241         void *data;
242         int i;
243         unsigned long flags;
244
245         if (!net) {
246                 netdev_err(net, "got receive callback but net device"
247                         " not initialized yet\n");
248                 return 0;
249         }
250
251         /* Allocate a skb - TODO direct I/O to pages? */
252         skb = netdev_alloc_skb_ip_align(net, packet->total_data_buflen);
253         if (unlikely(!skb)) {
254                 ++net->stats.rx_dropped;
255                 return 0;
256         }
257
258         /* for kmap_atomic */
259         local_irq_save(flags);
260
261         /*
262          * Copy to skb. This copy is needed here since the memory pointed by
263          * hv_netvsc_packet cannot be deallocated
264          */
265         for (i = 0; i < packet->page_buf_cnt; i++) {
266                 data = kmap_atomic(pfn_to_page(packet->page_buf[i].pfn),
267                                                KM_IRQ1);
268                 data = (void *)(unsigned long)data +
269                                 packet->page_buf[i].offset;
270
271                 memcpy(skb_put(skb, packet->page_buf[i].len), data,
272                        packet->page_buf[i].len);
273
274                 kunmap_atomic((void *)((unsigned long)data -
275                                        packet->page_buf[i].offset), KM_IRQ1);
276         }
277
278         local_irq_restore(flags);
279
280         skb->protocol = eth_type_trans(skb, net);
281         skb->ip_summed = CHECKSUM_NONE;
282
283         net->stats.rx_packets++;
284         net->stats.rx_bytes += skb->len;
285
286         /*
287          * Pass the skb back up. Network stack will deallocate the skb when it
288          * is done.
289          * TODO - use NAPI?
290          */
291         netif_rx(skb);
292
293         return 0;
294 }
295
296 static void netvsc_get_drvinfo(struct net_device *net,
297                                struct ethtool_drvinfo *info)
298 {
299         strcpy(info->driver, "hv_netvsc");
300         strcpy(info->version, HV_DRV_VERSION);
301         strcpy(info->fw_version, "N/A");
302 }
303
304 static const struct ethtool_ops ethtool_ops = {
305         .get_drvinfo    = netvsc_get_drvinfo,
306         .get_link       = ethtool_op_get_link,
307 };
308
309 static const struct net_device_ops device_ops = {
310         .ndo_open =                     netvsc_open,
311         .ndo_stop =                     netvsc_close,
312         .ndo_start_xmit =               netvsc_start_xmit,
313         .ndo_set_multicast_list =       netvsc_set_multicast_list,
314         .ndo_change_mtu =               eth_change_mtu,
315         .ndo_validate_addr =            eth_validate_addr,
316         .ndo_set_mac_address =          eth_mac_addr,
317 };
318
319 /*
320  * Send GARP packet to network peers after migrations.
321  * After Quick Migration, the network is not immediately operational in the
322  * current context when receiving RNDIS_STATUS_MEDIA_CONNECT event. So, add
323  * another netif_notify_peers() into a scheduled work, otherwise GARP packet
324  * will not be sent after quick migration, and cause network disconnection.
325  */
326 static void netvsc_send_garp(struct work_struct *w)
327 {
328         struct net_device_context *ndev_ctx;
329         struct net_device *net;
330
331         msleep(20);
332         ndev_ctx = container_of(w, struct net_device_context, work);
333         net = dev_get_drvdata(&ndev_ctx->device_ctx->device);
334         netif_notify_peers(net);
335 }
336
337
338 static int netvsc_probe(struct hv_device *dev)
339 {
340         struct net_device *net = NULL;
341         struct net_device_context *net_device_ctx;
342         struct netvsc_device_info device_info;
343         int ret;
344
345         net = alloc_etherdev(sizeof(struct net_device_context));
346         if (!net)
347                 return -1;
348
349         /* Set initial state */
350         netif_carrier_off(net);
351
352         net_device_ctx = netdev_priv(net);
353         net_device_ctx->device_ctx = dev;
354         net_device_ctx->avail = ring_size;
355         dev_set_drvdata(&dev->device, net);
356         INIT_WORK(&net_device_ctx->work, netvsc_send_garp);
357
358         /* Notify the netvsc driver of the new device */
359         ret = rndis_filte_device_add(dev, &device_info);
360         if (ret != 0) {
361                 free_netdev(net);
362                 dev_set_drvdata(&dev->device, NULL);
363
364                 netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
365                 return ret;
366         }
367
368         /*
369          * If carrier is still off ie we did not get a link status callback,
370          * update it if necessary
371          */
372         /*
373          * FIXME: We should use a atomic or test/set instead to avoid getting
374          * out of sync with the device's link status
375          */
376         if (!netif_carrier_ok(net))
377                 if (!device_info.link_state)
378                         netif_carrier_on(net);
379
380         memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN);
381
382         net->netdev_ops = &device_ops;
383
384         /* TODO: Add GSO and Checksum offload */
385         net->hw_features = NETIF_F_SG;
386         net->features = NETIF_F_SG;
387
388         SET_ETHTOOL_OPS(net, &ethtool_ops);
389         SET_NETDEV_DEV(net, &dev->device);
390
391         ret = register_netdev(net);
392         if (ret != 0) {
393                 /* Remove the device and release the resource */
394                 rndis_filter_device_remove(dev);
395                 free_netdev(net);
396         }
397
398         return ret;
399 }
400
401 static int netvsc_remove(struct hv_device *dev)
402 {
403         struct net_device *net = dev_get_drvdata(&dev->device);
404         int ret;
405
406         if (net == NULL) {
407                 dev_err(&dev->device, "No net device to remove\n");
408                 return 0;
409         }
410
411         /* Stop outbound asap */
412         netif_stop_queue(net);
413         /* netif_carrier_off(net); */
414
415         unregister_netdev(net);
416
417         /*
418          * Call to the vsc driver to let it know that the device is being
419          * removed
420          */
421         ret = rndis_filter_device_remove(dev);
422         if (ret != 0) {
423                 /* TODO: */
424                 netdev_err(net, "unable to remove vsc device (ret %d)\n", ret);
425         }
426
427         free_netdev(net);
428         return ret;
429 }
430
431 /* The one and only one */
432 static struct  netvsc_driver netvsc_drv = {
433         .base.probe = netvsc_probe,
434         .base.remove = netvsc_remove,
435 };
436
437 static void netvsc_drv_exit(void)
438 {
439         vmbus_child_driver_unregister(&netvsc_drv.base.driver);
440 }
441
442 static int netvsc_drv_init(int (*drv_init)(struct hv_driver *drv))
443 {
444         struct netvsc_driver *net_drv_obj = &netvsc_drv;
445         struct hv_driver *drv = &netvsc_drv.base;
446         int ret;
447
448         net_drv_obj->ring_buf_size = ring_size * PAGE_SIZE;
449         net_drv_obj->recv_cb = netvsc_recv_callback;
450         net_drv_obj->link_status_change = netvsc_linkstatus_callback;
451
452         /* Callback to client driver to complete the initialization */
453         drv_init(&net_drv_obj->base);
454
455         drv->driver.name = net_drv_obj->base.name;
456
457         /* The driver belongs to vmbus */
458         ret = vmbus_child_driver_register(&drv->driver);
459
460         return ret;
461 }
462
463 static const struct dmi_system_id __initconst
464 hv_netvsc_dmi_table[] __maybe_unused  = {
465         {
466                 .ident = "Hyper-V",
467                 .matches = {
468                         DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
469                         DMI_MATCH(DMI_PRODUCT_NAME, "Virtual Machine"),
470                         DMI_MATCH(DMI_BOARD_NAME, "Virtual Machine"),
471                 },
472         },
473         { },
474 };
475 MODULE_DEVICE_TABLE(dmi, hv_netvsc_dmi_table);
476
477 static int __init netvsc_init(void)
478 {
479         pr_info("initializing....");
480
481         if (!dmi_check_system(hv_netvsc_dmi_table))
482                 return -ENODEV;
483
484         return netvsc_drv_init(netvsc_initialize);
485 }
486
487 static void __exit netvsc_exit(void)
488 {
489         netvsc_drv_exit();
490 }
491
492 static const struct pci_device_id __initconst
493 hv_netvsc_pci_table[] __maybe_unused = {
494         { PCI_DEVICE(0x1414, 0x5353) }, /* VGA compatible controller */
495         { 0 }
496 };
497 MODULE_DEVICE_TABLE(pci, hv_netvsc_pci_table);
498
499 MODULE_LICENSE("GPL");
500 MODULE_VERSION(HV_DRV_VERSION);
501 MODULE_DESCRIPTION("Microsoft Hyper-V network driver");
502
503 module_init(netvsc_init);
504 module_exit(netvsc_exit);