Staging: hv: netvsc: Get rid of the refcnt field in struct netvsc_device
[pandora-kernel.git] / drivers / staging / hv / netvsc.c
1 /*
2  * Copyright (c) 2009, Microsoft Corporation.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * You should have received a copy of the GNU General Public License along with
14  * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15  * Place - Suite 330, Boston, MA 02111-1307 USA.
16  *
17  * Authors:
18  *   Haiyang Zhang <haiyangz@microsoft.com>
19  *   Hank Janssen  <hjanssen@microsoft.com>
20  */
21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22
23 #include <linux/kernel.h>
24 #include <linux/sched.h>
25 #include <linux/wait.h>
26 #include <linux/mm.h>
27 #include <linux/delay.h>
28 #include <linux/io.h>
29 #include <linux/slab.h>
30
31 #include "hyperv.h"
32 #include "hyperv_net.h"
33
34
35 static struct netvsc_device *alloc_net_device(struct hv_device *device)
36 {
37         struct netvsc_device *net_device;
38
39         net_device = kzalloc(sizeof(struct netvsc_device), GFP_KERNEL);
40         if (!net_device)
41                 return NULL;
42
43
44         net_device->destroy = false;
45         net_device->dev = device;
46         device->ext = net_device;
47
48         return net_device;
49 }
50
51 static struct netvsc_device *get_outbound_net_device(struct hv_device *device)
52 {
53         struct netvsc_device *net_device;
54
55         net_device = device->ext;
56         if (net_device && net_device->destroy)
57                 net_device = NULL;
58
59         return net_device;
60 }
61
62 static struct netvsc_device *get_inbound_net_device(struct hv_device *device)
63 {
64         struct netvsc_device *net_device;
65         unsigned long flags;
66
67         spin_lock_irqsave(&device->channel->inbound_lock, flags);
68         net_device = device->ext;
69
70         if (!net_device)
71                 goto get_in_err;
72
73         if (net_device->destroy &&
74                 atomic_read(&net_device->num_outstanding_sends) == 0)
75                 net_device = NULL;
76
77 get_in_err:
78         spin_unlock_irqrestore(&device->channel->inbound_lock, flags);
79         return net_device;
80 }
81
82
83 static int netvsc_destroy_recv_buf(struct netvsc_device *net_device)
84 {
85         struct nvsp_message *revoke_packet;
86         int ret = 0;
87
88         /*
89          * If we got a section count, it means we received a
90          * SendReceiveBufferComplete msg (ie sent
91          * NvspMessage1TypeSendReceiveBuffer msg) therefore, we need
92          * to send a revoke msg here
93          */
94         if (net_device->recv_section_cnt) {
95                 /* Send the revoke receive buffer */
96                 revoke_packet = &net_device->revoke_packet;
97                 memset(revoke_packet, 0, sizeof(struct nvsp_message));
98
99                 revoke_packet->hdr.msg_type =
100                         NVSP_MSG1_TYPE_REVOKE_RECV_BUF;
101                 revoke_packet->msg.v1_msg.
102                 revoke_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
103
104                 ret = vmbus_sendpacket(net_device->dev->channel,
105                                        revoke_packet,
106                                        sizeof(struct nvsp_message),
107                                        (unsigned long)revoke_packet,
108                                        VM_PKT_DATA_INBAND, 0);
109                 /*
110                  * If we failed here, we might as well return and
111                  * have a leak rather than continue and a bugchk
112                  */
113                 if (ret != 0) {
114                         dev_err(&net_device->dev->device, "unable to send "
115                                 "revoke receive buffer to netvsp");
116                         return ret;
117                 }
118         }
119
120         /* Teardown the gpadl on the vsp end */
121         if (net_device->recv_buf_gpadl_handle) {
122                 ret = vmbus_teardown_gpadl(net_device->dev->channel,
123                            net_device->recv_buf_gpadl_handle);
124
125                 /* If we failed here, we might as well return and have a leak
126                  * rather than continue and a bugchk
127                  */
128                 if (ret != 0) {
129                         dev_err(&net_device->dev->device,
130                                    "unable to teardown receive buffer's gpadl");
131                         return -ret;
132                 }
133                 net_device->recv_buf_gpadl_handle = 0;
134         }
135
136         if (net_device->recv_buf) {
137                 /* Free up the receive buffer */
138                 free_pages((unsigned long)net_device->recv_buf,
139                         get_order(net_device->recv_buf_size));
140                 net_device->recv_buf = NULL;
141         }
142
143         if (net_device->recv_section) {
144                 net_device->recv_section_cnt = 0;
145                 kfree(net_device->recv_section);
146                 net_device->recv_section = NULL;
147         }
148
149         return ret;
150 }
151
152 static int netvsc_init_recv_buf(struct hv_device *device)
153 {
154         int ret = 0;
155         int t;
156         struct netvsc_device *net_device;
157         struct nvsp_message *init_packet;
158
159         net_device = get_outbound_net_device(device);
160         if (!net_device) {
161                 dev_err(&device->device, "unable to get net device..."
162                            "device being destroyed?");
163                 return -ENODEV;
164         }
165
166         net_device->recv_buf =
167                 (void *)__get_free_pages(GFP_KERNEL|__GFP_ZERO,
168                                 get_order(net_device->recv_buf_size));
169         if (!net_device->recv_buf) {
170                 dev_err(&device->device, "unable to allocate receive "
171                         "buffer of size %d", net_device->recv_buf_size);
172                 ret = -ENOMEM;
173                 goto cleanup;
174         }
175
176         /*
177          * Establish the gpadl handle for this buffer on this
178          * channel.  Note: This call uses the vmbus connection rather
179          * than the channel to establish the gpadl handle.
180          */
181         ret = vmbus_establish_gpadl(device->channel, net_device->recv_buf,
182                                     net_device->recv_buf_size,
183                                     &net_device->recv_buf_gpadl_handle);
184         if (ret != 0) {
185                 dev_err(&device->device,
186                         "unable to establish receive buffer's gpadl");
187                 goto cleanup;
188         }
189
190
191         /* Notify the NetVsp of the gpadl handle */
192         init_packet = &net_device->channel_init_pkt;
193
194         memset(init_packet, 0, sizeof(struct nvsp_message));
195
196         init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_RECV_BUF;
197         init_packet->msg.v1_msg.send_recv_buf.
198                 gpadl_handle = net_device->recv_buf_gpadl_handle;
199         init_packet->msg.v1_msg.
200                 send_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
201
202         /* Send the gpadl notification request */
203         ret = vmbus_sendpacket(device->channel, init_packet,
204                                sizeof(struct nvsp_message),
205                                (unsigned long)init_packet,
206                                VM_PKT_DATA_INBAND,
207                                VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
208         if (ret != 0) {
209                 dev_err(&device->device,
210                         "unable to send receive buffer's gpadl to netvsp");
211                 goto cleanup;
212         }
213
214         t = wait_for_completion_timeout(&net_device->channel_init_wait, 5*HZ);
215         BUG_ON(t == 0);
216
217
218         /* Check the response */
219         if (init_packet->msg.v1_msg.
220             send_recv_buf_complete.status != NVSP_STAT_SUCCESS) {
221                 dev_err(&device->device, "Unable to complete receive buffer "
222                            "initialzation with NetVsp - status %d",
223                            init_packet->msg.v1_msg.
224                            send_recv_buf_complete.status);
225                 ret = -EINVAL;
226                 goto cleanup;
227         }
228
229         /* Parse the response */
230
231         net_device->recv_section_cnt = init_packet->msg.
232                 v1_msg.send_recv_buf_complete.num_sections;
233
234         net_device->recv_section = kmalloc(net_device->recv_section_cnt
235                 * sizeof(struct nvsp_1_receive_buffer_section), GFP_KERNEL);
236         if (net_device->recv_section == NULL) {
237                 ret = -EINVAL;
238                 goto cleanup;
239         }
240
241         memcpy(net_device->recv_section,
242                 init_packet->msg.v1_msg.
243                send_recv_buf_complete.sections,
244                 net_device->recv_section_cnt *
245                sizeof(struct nvsp_1_receive_buffer_section));
246
247         /*
248          * For 1st release, there should only be 1 section that represents the
249          * entire receive buffer
250          */
251         if (net_device->recv_section_cnt != 1 ||
252             net_device->recv_section->offset != 0) {
253                 ret = -EINVAL;
254                 goto cleanup;
255         }
256
257         goto exit;
258
259 cleanup:
260         netvsc_destroy_recv_buf(net_device);
261
262 exit:
263         return ret;
264 }
265
266
267 static int netvsc_connect_vsp(struct hv_device *device)
268 {
269         int ret, t;
270         struct netvsc_device *net_device;
271         struct nvsp_message *init_packet;
272         int ndis_version;
273
274         net_device = get_outbound_net_device(device);
275         if (!net_device) {
276                 dev_err(&device->device, "unable to get net device..."
277                            "device being destroyed?");
278                 return -ENODEV;
279         }
280
281         init_packet = &net_device->channel_init_pkt;
282
283         memset(init_packet, 0, sizeof(struct nvsp_message));
284         init_packet->hdr.msg_type = NVSP_MSG_TYPE_INIT;
285         init_packet->msg.init_msg.init.min_protocol_ver =
286                 NVSP_MIN_PROTOCOL_VERSION;
287         init_packet->msg.init_msg.init.max_protocol_ver =
288                 NVSP_MAX_PROTOCOL_VERSION;
289
290         /* Send the init request */
291         ret = vmbus_sendpacket(device->channel, init_packet,
292                                sizeof(struct nvsp_message),
293                                (unsigned long)init_packet,
294                                VM_PKT_DATA_INBAND,
295                                VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
296
297         if (ret != 0)
298                 goto cleanup;
299
300         t = wait_for_completion_timeout(&net_device->channel_init_wait, 5*HZ);
301
302         if (t == 0) {
303                 ret = -ETIMEDOUT;
304                 goto cleanup;
305         }
306
307         if (init_packet->msg.init_msg.init_complete.status !=
308             NVSP_STAT_SUCCESS) {
309                 ret = -EINVAL;
310                 goto cleanup;
311         }
312
313         if (init_packet->msg.init_msg.init_complete.
314             negotiated_protocol_ver != NVSP_PROTOCOL_VERSION_1) {
315                 ret = -EPROTO;
316                 goto cleanup;
317         }
318         /* Send the ndis version */
319         memset(init_packet, 0, sizeof(struct nvsp_message));
320
321         ndis_version = 0x00050000;
322
323         init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_NDIS_VER;
324         init_packet->msg.v1_msg.
325                 send_ndis_ver.ndis_major_ver =
326                                 (ndis_version & 0xFFFF0000) >> 16;
327         init_packet->msg.v1_msg.
328                 send_ndis_ver.ndis_minor_ver =
329                                 ndis_version & 0xFFFF;
330
331         /* Send the init request */
332         ret = vmbus_sendpacket(device->channel, init_packet,
333                                 sizeof(struct nvsp_message),
334                                 (unsigned long)init_packet,
335                                 VM_PKT_DATA_INBAND, 0);
336         if (ret != 0)
337                 goto cleanup;
338
339         /* Post the big receive buffer to NetVSP */
340         ret = netvsc_init_recv_buf(device);
341
342 cleanup:
343         return ret;
344 }
345
346 static void netvsc_disconnect_vsp(struct netvsc_device *net_device)
347 {
348         netvsc_destroy_recv_buf(net_device);
349 }
350
351 /*
352  * netvsc_device_remove - Callback when the root bus device is removed
353  */
354 int netvsc_device_remove(struct hv_device *device)
355 {
356         struct netvsc_device *net_device;
357         struct hv_netvsc_packet *netvsc_packet, *pos;
358         unsigned long flags;
359
360         net_device = (struct netvsc_device *)device->ext;
361         spin_lock_irqsave(&device->channel->inbound_lock, flags);
362         net_device->destroy = true;
363         spin_unlock_irqrestore(&device->channel->inbound_lock, flags);
364
365         /* Wait for all send completions */
366         while (atomic_read(&net_device->num_outstanding_sends)) {
367                 dev_err(&device->device,
368                         "waiting for %d requests to complete...",
369                         atomic_read(&net_device->num_outstanding_sends));
370                 udelay(100);
371         }
372
373         netvsc_disconnect_vsp(net_device);
374
375         /*
376          * Since we have already drained, we don't need to busy wait
377          * as was done in final_release_stor_device()
378          * Note that we cannot set the ext pointer to NULL until
379          * we have drained - to drain the outgoing packets, we need to
380          * allow incoming packets.
381          */
382
383         spin_lock_irqsave(&device->channel->inbound_lock, flags);
384         device->ext = NULL;
385         spin_unlock_irqrestore(&device->channel->inbound_lock, flags);
386
387         /* At this point, no one should be accessing netDevice except in here */
388         dev_notice(&device->device, "net device safe to remove");
389
390         /* Now, we can close the channel safely */
391         vmbus_close(device->channel);
392
393         /* Release all resources */
394         list_for_each_entry_safe(netvsc_packet, pos,
395                                  &net_device->recv_pkt_list, list_ent) {
396                 list_del(&netvsc_packet->list_ent);
397                 kfree(netvsc_packet);
398         }
399
400         kfree(net_device);
401         return 0;
402 }
403
404 static void netvsc_send_completion(struct hv_device *device,
405                                    struct vmpacket_descriptor *packet)
406 {
407         struct netvsc_device *net_device;
408         struct nvsp_message *nvsp_packet;
409         struct hv_netvsc_packet *nvsc_packet;
410
411         net_device = get_inbound_net_device(device);
412         if (!net_device) {
413                 dev_err(&device->device, "unable to get net device..."
414                            "device being destroyed?");
415                 return;
416         }
417
418         nvsp_packet = (struct nvsp_message *)((unsigned long)packet +
419                         (packet->offset8 << 3));
420
421         if ((nvsp_packet->hdr.msg_type == NVSP_MSG_TYPE_INIT_COMPLETE) ||
422             (nvsp_packet->hdr.msg_type ==
423              NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE) ||
424             (nvsp_packet->hdr.msg_type ==
425              NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE)) {
426                 /* Copy the response back */
427                 memcpy(&net_device->channel_init_pkt, nvsp_packet,
428                        sizeof(struct nvsp_message));
429                 complete(&net_device->channel_init_wait);
430         } else if (nvsp_packet->hdr.msg_type ==
431                    NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE) {
432                 /* Get the send context */
433                 nvsc_packet = (struct hv_netvsc_packet *)(unsigned long)
434                         packet->trans_id;
435
436                 /* Notify the layer above us */
437                 nvsc_packet->completion.send.send_completion(
438                         nvsc_packet->completion.send.send_completion_ctx);
439
440                 atomic_dec(&net_device->num_outstanding_sends);
441         } else {
442                 dev_err(&device->device, "Unknown send completion packet type- "
443                            "%d received!!", nvsp_packet->hdr.msg_type);
444         }
445
446 }
447
448 int netvsc_send(struct hv_device *device,
449                         struct hv_netvsc_packet *packet)
450 {
451         struct netvsc_device *net_device;
452         int ret = 0;
453
454         struct nvsp_message sendMessage;
455
456         net_device = get_outbound_net_device(device);
457         if (!net_device) {
458                 dev_err(&device->device, "net device (%p) shutting down..."
459                            "ignoring outbound packets", net_device);
460                 return -ENODEV;
461         }
462
463         sendMessage.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT;
464         if (packet->is_data_pkt) {
465                 /* 0 is RMC_DATA; */
466                 sendMessage.msg.v1_msg.send_rndis_pkt.channel_type = 0;
467         } else {
468                 /* 1 is RMC_CONTROL; */
469                 sendMessage.msg.v1_msg.send_rndis_pkt.channel_type = 1;
470         }
471
472         /* Not using send buffer section */
473         sendMessage.msg.v1_msg.send_rndis_pkt.send_buf_section_index =
474                 0xFFFFFFFF;
475         sendMessage.msg.v1_msg.send_rndis_pkt.send_buf_section_size = 0;
476
477         if (packet->page_buf_cnt) {
478                 ret = vmbus_sendpacket_pagebuffer(device->channel,
479                                                   packet->page_buf,
480                                                   packet->page_buf_cnt,
481                                                   &sendMessage,
482                                                   sizeof(struct nvsp_message),
483                                                   (unsigned long)packet);
484         } else {
485                 ret = vmbus_sendpacket(device->channel, &sendMessage,
486                                 sizeof(struct nvsp_message),
487                                 (unsigned long)packet,
488                                 VM_PKT_DATA_INBAND,
489                                 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
490
491         }
492
493         if (ret != 0)
494                 dev_err(&device->device, "Unable to send packet %p ret %d",
495                            packet, ret);
496
497         atomic_inc(&net_device->num_outstanding_sends);
498         return ret;
499 }
500
501 static void netvsc_send_recv_completion(struct hv_device *device,
502                                         u64 transaction_id)
503 {
504         struct nvsp_message recvcompMessage;
505         int retries = 0;
506         int ret;
507
508         recvcompMessage.hdr.msg_type =
509                                 NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE;
510
511         /* FIXME: Pass in the status */
512         recvcompMessage.msg.v1_msg.send_rndis_pkt_complete.status =
513                 NVSP_STAT_SUCCESS;
514
515 retry_send_cmplt:
516         /* Send the completion */
517         ret = vmbus_sendpacket(device->channel, &recvcompMessage,
518                                sizeof(struct nvsp_message), transaction_id,
519                                VM_PKT_COMP, 0);
520         if (ret == 0) {
521                 /* success */
522                 /* no-op */
523         } else if (ret == -EAGAIN) {
524                 /* no more room...wait a bit and attempt to retry 3 times */
525                 retries++;
526                 dev_err(&device->device, "unable to send receive completion pkt"
527                         " (tid %llx)...retrying %d", transaction_id, retries);
528
529                 if (retries < 4) {
530                         udelay(100);
531                         goto retry_send_cmplt;
532                 } else {
533                         dev_err(&device->device, "unable to send receive "
534                                 "completion pkt (tid %llx)...give up retrying",
535                                 transaction_id);
536                 }
537         } else {
538                 dev_err(&device->device, "unable to send receive "
539                         "completion pkt - %llx", transaction_id);
540         }
541 }
542
543 /* Send a receive completion packet to RNDIS device (ie NetVsp) */
544 static void netvsc_receive_completion(void *context)
545 {
546         struct hv_netvsc_packet *packet = context;
547         struct hv_device *device = (struct hv_device *)packet->device;
548         struct netvsc_device *net_device;
549         u64 transaction_id = 0;
550         bool fsend_receive_comp = false;
551         unsigned long flags;
552
553         /*
554          * Even though it seems logical to do a GetOutboundNetDevice() here to
555          * send out receive completion, we are using GetInboundNetDevice()
556          * since we may have disable outbound traffic already.
557          */
558         net_device = get_inbound_net_device(device);
559         if (!net_device) {
560                 dev_err(&device->device, "unable to get net device..."
561                            "device being destroyed?");
562                 return;
563         }
564
565         /* Overloading use of the lock. */
566         spin_lock_irqsave(&net_device->recv_pkt_list_lock, flags);
567
568         packet->xfer_page_pkt->count--;
569
570         /*
571          * Last one in the line that represent 1 xfer page packet.
572          * Return the xfer page packet itself to the freelist
573          */
574         if (packet->xfer_page_pkt->count == 0) {
575                 fsend_receive_comp = true;
576                 transaction_id = packet->completion.recv.recv_completion_tid;
577                 list_add_tail(&packet->xfer_page_pkt->list_ent,
578                               &net_device->recv_pkt_list);
579
580         }
581
582         /* Put the packet back */
583         list_add_tail(&packet->list_ent, &net_device->recv_pkt_list);
584         spin_unlock_irqrestore(&net_device->recv_pkt_list_lock, flags);
585
586         /* Send a receive completion for the xfer page packet */
587         if (fsend_receive_comp)
588                 netvsc_send_recv_completion(device, transaction_id);
589
590 }
591
592 static void netvsc_receive(struct hv_device *device,
593                             struct vmpacket_descriptor *packet)
594 {
595         struct netvsc_device *net_device;
596         struct vmtransfer_page_packet_header *vmxferpage_packet;
597         struct nvsp_message *nvsp_packet;
598         struct hv_netvsc_packet *netvsc_packet = NULL;
599         unsigned long start;
600         unsigned long end, end_virtual;
601         /* struct netvsc_driver *netvscDriver; */
602         struct xferpage_packet *xferpage_packet = NULL;
603         int i, j;
604         int count = 0, bytes_remain = 0;
605         unsigned long flags;
606
607         LIST_HEAD(listHead);
608
609         net_device = get_inbound_net_device(device);
610         if (!net_device) {
611                 dev_err(&device->device, "unable to get net device..."
612                            "device being destroyed?");
613                 return;
614         }
615
616         /*
617          * All inbound packets other than send completion should be xfer page
618          * packet
619          */
620         if (packet->type != VM_PKT_DATA_USING_XFER_PAGES) {
621                 dev_err(&device->device, "Unknown packet type received - %d",
622                            packet->type);
623                 return;
624         }
625
626         nvsp_packet = (struct nvsp_message *)((unsigned long)packet +
627                         (packet->offset8 << 3));
628
629         /* Make sure this is a valid nvsp packet */
630         if (nvsp_packet->hdr.msg_type !=
631             NVSP_MSG1_TYPE_SEND_RNDIS_PKT) {
632                 dev_err(&device->device, "Unknown nvsp packet type received-"
633                         " %d", nvsp_packet->hdr.msg_type);
634                 return;
635         }
636
637         vmxferpage_packet = (struct vmtransfer_page_packet_header *)packet;
638
639         if (vmxferpage_packet->xfer_pageset_id != NETVSC_RECEIVE_BUFFER_ID) {
640                 dev_err(&device->device, "Invalid xfer page set id - "
641                            "expecting %x got %x", NETVSC_RECEIVE_BUFFER_ID,
642                            vmxferpage_packet->xfer_pageset_id);
643                 return;
644         }
645
646         /*
647          * Grab free packets (range count + 1) to represent this xfer
648          * page packet. +1 to represent the xfer page packet itself.
649          * We grab it here so that we know exactly how many we can
650          * fulfil
651          */
652         spin_lock_irqsave(&net_device->recv_pkt_list_lock, flags);
653         while (!list_empty(&net_device->recv_pkt_list)) {
654                 list_move_tail(net_device->recv_pkt_list.next, &listHead);
655                 if (++count == vmxferpage_packet->range_cnt + 1)
656                         break;
657         }
658         spin_unlock_irqrestore(&net_device->recv_pkt_list_lock, flags);
659
660         /*
661          * We need at least 2 netvsc pkts (1 to represent the xfer
662          * page and at least 1 for the range) i.e. we can handled
663          * some of the xfer page packet ranges...
664          */
665         if (count < 2) {
666                 dev_err(&device->device, "Got only %d netvsc pkt...needed "
667                         "%d pkts. Dropping this xfer page packet completely!",
668                         count, vmxferpage_packet->range_cnt + 1);
669
670                 /* Return it to the freelist */
671                 spin_lock_irqsave(&net_device->recv_pkt_list_lock, flags);
672                 for (i = count; i != 0; i--) {
673                         list_move_tail(listHead.next,
674                                        &net_device->recv_pkt_list);
675                 }
676                 spin_unlock_irqrestore(&net_device->recv_pkt_list_lock,
677                                        flags);
678
679                 netvsc_send_recv_completion(device,
680                                             vmxferpage_packet->d.trans_id);
681
682                 return;
683         }
684
685         /* Remove the 1st packet to represent the xfer page packet itself */
686         xferpage_packet = (struct xferpage_packet *)listHead.next;
687         list_del(&xferpage_packet->list_ent);
688
689         /* This is how much we can satisfy */
690         xferpage_packet->count = count - 1;
691
692         if (xferpage_packet->count != vmxferpage_packet->range_cnt) {
693                 dev_err(&device->device, "Needed %d netvsc pkts to satisy "
694                         "this xfer page...got %d",
695                         vmxferpage_packet->range_cnt, xferpage_packet->count);
696         }
697
698         /* Each range represents 1 RNDIS pkt that contains 1 ethernet frame */
699         for (i = 0; i < (count - 1); i++) {
700                 netvsc_packet = (struct hv_netvsc_packet *)listHead.next;
701                 list_del(&netvsc_packet->list_ent);
702
703                 /* Initialize the netvsc packet */
704                 netvsc_packet->xfer_page_pkt = xferpage_packet;
705                 netvsc_packet->completion.recv.recv_completion =
706                                         netvsc_receive_completion;
707                 netvsc_packet->completion.recv.recv_completion_ctx =
708                                         netvsc_packet;
709                 netvsc_packet->device = device;
710                 /* Save this so that we can send it back */
711                 netvsc_packet->completion.recv.recv_completion_tid =
712                                         vmxferpage_packet->d.trans_id;
713
714                 netvsc_packet->total_data_buflen =
715                                         vmxferpage_packet->ranges[i].byte_count;
716                 netvsc_packet->page_buf_cnt = 1;
717
718                 netvsc_packet->page_buf[0].len =
719                                         vmxferpage_packet->ranges[i].byte_count;
720
721                 start = virt_to_phys((void *)((unsigned long)net_device->
722                 recv_buf + vmxferpage_packet->ranges[i].byte_offset));
723
724                 netvsc_packet->page_buf[0].pfn = start >> PAGE_SHIFT;
725                 end_virtual = (unsigned long)net_device->recv_buf
726                     + vmxferpage_packet->ranges[i].byte_offset
727                     + vmxferpage_packet->ranges[i].byte_count - 1;
728                 end = virt_to_phys((void *)end_virtual);
729
730                 /* Calculate the page relative offset */
731                 netvsc_packet->page_buf[0].offset =
732                         vmxferpage_packet->ranges[i].byte_offset &
733                         (PAGE_SIZE - 1);
734                 if ((end >> PAGE_SHIFT) != (start >> PAGE_SHIFT)) {
735                         /* Handle frame across multiple pages: */
736                         netvsc_packet->page_buf[0].len =
737                                 (netvsc_packet->page_buf[0].pfn <<
738                                  PAGE_SHIFT)
739                                 + PAGE_SIZE - start;
740                         bytes_remain = netvsc_packet->total_data_buflen -
741                                         netvsc_packet->page_buf[0].len;
742                         for (j = 1; j < NETVSC_PACKET_MAXPAGE; j++) {
743                                 netvsc_packet->page_buf[j].offset = 0;
744                                 if (bytes_remain <= PAGE_SIZE) {
745                                         netvsc_packet->page_buf[j].len =
746                                                 bytes_remain;
747                                         bytes_remain = 0;
748                                 } else {
749                                         netvsc_packet->page_buf[j].len =
750                                                 PAGE_SIZE;
751                                         bytes_remain -= PAGE_SIZE;
752                                 }
753                                 netvsc_packet->page_buf[j].pfn =
754                                     virt_to_phys((void *)(end_virtual -
755                                                 bytes_remain)) >> PAGE_SHIFT;
756                                 netvsc_packet->page_buf_cnt++;
757                                 if (bytes_remain == 0)
758                                         break;
759                         }
760                 }
761
762                 /* Pass it to the upper layer */
763                 rndis_filter_receive(device, netvsc_packet);
764
765                 netvsc_receive_completion(netvsc_packet->
766                                 completion.recv.recv_completion_ctx);
767         }
768
769 }
770
771 static void netvsc_channel_cb(void *context)
772 {
773         int ret;
774         struct hv_device *device = context;
775         struct netvsc_device *net_device;
776         u32 bytes_recvd;
777         u64 request_id;
778         unsigned char *packet;
779         struct vmpacket_descriptor *desc;
780         unsigned char *buffer;
781         int bufferlen = NETVSC_PACKET_SIZE;
782
783         packet = kzalloc(NETVSC_PACKET_SIZE * sizeof(unsigned char),
784                          GFP_ATOMIC);
785         if (!packet)
786                 return;
787         buffer = packet;
788
789         net_device = get_inbound_net_device(device);
790         if (!net_device) {
791                 dev_err(&device->device, "net device (%p) shutting down..."
792                            "ignoring inbound packets", net_device);
793                 goto out;
794         }
795
796         do {
797                 ret = vmbus_recvpacket_raw(device->channel, buffer, bufferlen,
798                                            &bytes_recvd, &request_id);
799                 if (ret == 0) {
800                         if (bytes_recvd > 0) {
801                                 desc = (struct vmpacket_descriptor *)buffer;
802                                 switch (desc->type) {
803                                 case VM_PKT_COMP:
804                                         netvsc_send_completion(device, desc);
805                                         break;
806
807                                 case VM_PKT_DATA_USING_XFER_PAGES:
808                                         netvsc_receive(device, desc);
809                                         break;
810
811                                 default:
812                                         dev_err(&device->device,
813                                                    "unhandled packet type %d, "
814                                                    "tid %llx len %d\n",
815                                                    desc->type, request_id,
816                                                    bytes_recvd);
817                                         break;
818                                 }
819
820                                 /* reset */
821                                 if (bufferlen > NETVSC_PACKET_SIZE) {
822                                         kfree(buffer);
823                                         buffer = packet;
824                                         bufferlen = NETVSC_PACKET_SIZE;
825                                 }
826                         } else {
827                                 /* reset */
828                                 if (bufferlen > NETVSC_PACKET_SIZE) {
829                                         kfree(buffer);
830                                         buffer = packet;
831                                         bufferlen = NETVSC_PACKET_SIZE;
832                                 }
833
834                                 break;
835                         }
836                 } else if (ret == -ENOBUFS) {
837                         /* Handle large packet */
838                         buffer = kmalloc(bytes_recvd, GFP_ATOMIC);
839                         if (buffer == NULL) {
840                                 /* Try again next time around */
841                                 dev_err(&device->device,
842                                            "unable to allocate buffer of size "
843                                            "(%d)!!", bytes_recvd);
844                                 break;
845                         }
846
847                         bufferlen = bytes_recvd;
848                 }
849         } while (1);
850
851 out:
852         kfree(buffer);
853         return;
854 }
855
856 /*
857  * netvsc_device_add - Callback when the device belonging to this
858  * driver is added
859  */
860 int netvsc_device_add(struct hv_device *device, void *additional_info)
861 {
862         int ret = 0;
863         int i;
864         int ring_size =
865         ((struct netvsc_device_info *)additional_info)->ring_size;
866         struct netvsc_device *net_device;
867         struct hv_netvsc_packet *packet, *pos;
868
869         net_device = alloc_net_device(device);
870         if (!net_device) {
871                 ret = -ENOMEM;
872                 goto cleanup;
873         }
874
875         /* Initialize the NetVSC channel extension */
876         net_device->recv_buf_size = NETVSC_RECEIVE_BUFFER_SIZE;
877         spin_lock_init(&net_device->recv_pkt_list_lock);
878
879         INIT_LIST_HEAD(&net_device->recv_pkt_list);
880
881         for (i = 0; i < NETVSC_RECEIVE_PACKETLIST_COUNT; i++) {
882                 packet = kzalloc(sizeof(struct hv_netvsc_packet) +
883                                  (NETVSC_RECEIVE_SG_COUNT *
884                                   sizeof(struct hv_page_buffer)), GFP_KERNEL);
885                 if (!packet)
886                         break;
887
888                 list_add_tail(&packet->list_ent,
889                               &net_device->recv_pkt_list);
890         }
891         init_completion(&net_device->channel_init_wait);
892
893         /* Open the channel */
894         ret = vmbus_open(device->channel, ring_size * PAGE_SIZE,
895                          ring_size * PAGE_SIZE, NULL, 0,
896                          netvsc_channel_cb, device);
897
898         if (ret != 0) {
899                 dev_err(&device->device, "unable to open channel: %d", ret);
900                 goto cleanup;
901         }
902
903         /* Channel is opened */
904         pr_info("hv_netvsc channel opened successfully");
905
906         /* Connect with the NetVsp */
907         ret = netvsc_connect_vsp(device);
908         if (ret != 0) {
909                 dev_err(&device->device,
910                         "unable to connect to NetVSP - %d", ret);
911                 goto close;
912         }
913
914         return ret;
915
916 close:
917         /* Now, we can close the channel safely */
918         vmbus_close(device->channel);
919
920 cleanup:
921
922         if (net_device) {
923                 list_for_each_entry_safe(packet, pos,
924                                          &net_device->recv_pkt_list,
925                                          list_ent) {
926                         list_del(&packet->list_ent);
927                         kfree(packet);
928                 }
929
930                 kfree(net_device);
931         }
932
933         return ret;
934 }