Merge branch 'for-linus' of git://git.kernel.dk/linux-block
[pandora-kernel.git] / drivers / staging / hv / ring_buffer.c
1 /*
2  *
3  * Copyright (c) 2009, Microsoft Corporation.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  * You should have received a copy of the GNU General Public License along with
15  * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16  * Place - Suite 330, Boston, MA 02111-1307 USA.
17  *
18  * Authors:
19  *   Haiyang Zhang <haiyangz@microsoft.com>
20  *   Hank Janssen  <hjanssen@microsoft.com>
21  *   K. Y. Srinivasan <kys@microsoft.com>
22  *
23  */
24 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25
26 #include <linux/kernel.h>
27 #include <linux/mm.h>
28
29 #include "hyperv.h"
30 #include "hyperv_vmbus.h"
31
32
33 /* #defines */
34
35
36 /* Amount of space to write to */
37 #define BYTES_AVAIL_TO_WRITE(r, w, z) ((w) >= (r)) ? ((z) - ((w) - (r))) : ((r) - (w))
38
39
40 /*
41  *
42  * hv_get_ringbuffer_availbytes()
43  *
44  * Get number of bytes available to read and to write to
45  * for the specified ring buffer
46  */
47 static inline void
48 hv_get_ringbuffer_availbytes(struct hv_ring_buffer_info *rbi,
49                           u32 *read, u32 *write)
50 {
51         u32 read_loc, write_loc;
52
53         smp_read_barrier_depends();
54
55         /* Capture the read/write indices before they changed */
56         read_loc = rbi->ring_buffer->read_index;
57         write_loc = rbi->ring_buffer->write_index;
58
59         *write = BYTES_AVAIL_TO_WRITE(read_loc, write_loc, rbi->ring_datasize);
60         *read = rbi->ring_datasize - *write;
61 }
62
63 /*
64  * hv_get_next_write_location()
65  *
66  * Get the next write location for the specified ring buffer
67  *
68  */
69 static inline u32
70 hv_get_next_write_location(struct hv_ring_buffer_info *ring_info)
71 {
72         u32 next = ring_info->ring_buffer->write_index;
73
74         return next;
75 }
76
77 /*
78  * hv_set_next_write_location()
79  *
80  * Set the next write location for the specified ring buffer
81  *
82  */
83 static inline void
84 hv_set_next_write_location(struct hv_ring_buffer_info *ring_info,
85                      u32 next_write_location)
86 {
87         ring_info->ring_buffer->write_index = next_write_location;
88 }
89
90 /*
91  * hv_get_next_read_location()
92  *
93  * Get the next read location for the specified ring buffer
94  */
95 static inline u32
96 hv_get_next_read_location(struct hv_ring_buffer_info *ring_info)
97 {
98         u32 next = ring_info->ring_buffer->read_index;
99
100         return next;
101 }
102
103 /*
104  * hv_get_next_readlocation_withoffset()
105  *
106  * Get the next read location + offset for the specified ring buffer.
107  * This allows the caller to skip
108  */
109 static inline u32
110 hv_get_next_readlocation_withoffset(struct hv_ring_buffer_info *ring_info,
111                                  u32 offset)
112 {
113         u32 next = ring_info->ring_buffer->read_index;
114
115         next += offset;
116         next %= ring_info->ring_datasize;
117
118         return next;
119 }
120
121 /*
122  *
123  * hv_set_next_read_location()
124  *
125  * Set the next read location for the specified ring buffer
126  *
127  */
128 static inline void
129 hv_set_next_read_location(struct hv_ring_buffer_info *ring_info,
130                     u32 next_read_location)
131 {
132         ring_info->ring_buffer->read_index = next_read_location;
133 }
134
135
136 /*
137  *
138  * hv_get_ring_buffer()
139  *
140  * Get the start of the ring buffer
141  */
142 static inline void *
143 hv_get_ring_buffer(struct hv_ring_buffer_info *ring_info)
144 {
145         return (void *)ring_info->ring_buffer->buffer;
146 }
147
148
149 /*
150  *
151  * hv_get_ring_buffersize()
152  *
153  * Get the size of the ring buffer
154  */
155 static inline u32
156 hv_get_ring_buffersize(struct hv_ring_buffer_info *ring_info)
157 {
158         return ring_info->ring_datasize;
159 }
160
161 /*
162  *
163  * hv_get_ring_bufferindices()
164  *
165  * Get the read and write indices as u64 of the specified ring buffer
166  *
167  */
168 static inline u64
169 hv_get_ring_bufferindices(struct hv_ring_buffer_info *ring_info)
170 {
171         return (u64)ring_info->ring_buffer->write_index << 32;
172 }
173
174
175 /*
176  *
177  * hv_dump_ring_info()
178  *
179  * Dump out to console the ring buffer info
180  *
181  */
182 void hv_dump_ring_info(struct hv_ring_buffer_info *ring_info, char *prefix)
183 {
184         u32 bytes_avail_towrite;
185         u32 bytes_avail_toread;
186
187         hv_get_ringbuffer_availbytes(ring_info,
188         &bytes_avail_toread,
189         &bytes_avail_towrite);
190
191         DPRINT(VMBUS,
192                 DEBUG_RING_LVL,
193                 "%s <<ringinfo %p buffer %p avail write %u "
194                 "avail read %u read idx %u write idx %u>>",
195                 prefix,
196                 ring_info,
197                 ring_info->ring_buffer->buffer,
198                 bytes_avail_towrite,
199                 bytes_avail_toread,
200                 ring_info->ring_buffer->read_index,
201                 ring_info->ring_buffer->write_index);
202 }
203
204
205 /*
206  *
207  * hv_copyfrom_ringbuffer()
208  *
209  * Helper routine to copy to source from ring buffer.
210  * Assume there is enough room. Handles wrap-around in src case only!!
211  *
212  */
213 static u32 hv_copyfrom_ringbuffer(
214         struct hv_ring_buffer_info      *ring_info,
215         void                            *dest,
216         u32                             destlen,
217         u32                             start_read_offset)
218 {
219         void *ring_buffer = hv_get_ring_buffer(ring_info);
220         u32 ring_buffer_size = hv_get_ring_buffersize(ring_info);
221
222         u32 frag_len;
223
224         /* wrap-around detected at the src */
225         if (destlen > ring_buffer_size - start_read_offset) {
226                 frag_len = ring_buffer_size - start_read_offset;
227
228                 memcpy(dest, ring_buffer + start_read_offset, frag_len);
229                 memcpy(dest + frag_len, ring_buffer, destlen - frag_len);
230         } else
231
232                 memcpy(dest, ring_buffer + start_read_offset, destlen);
233
234
235         start_read_offset += destlen;
236         start_read_offset %= ring_buffer_size;
237
238         return start_read_offset;
239 }
240
241
242 /*
243  *
244  * hv_copyto_ringbuffer()
245  *
246  * Helper routine to copy from source to ring buffer.
247  * Assume there is enough room. Handles wrap-around in dest case only!!
248  *
249  */
250 static u32 hv_copyto_ringbuffer(
251         struct hv_ring_buffer_info      *ring_info,
252         u32                             start_write_offset,
253         void                            *src,
254         u32                             srclen)
255 {
256         void *ring_buffer = hv_get_ring_buffer(ring_info);
257         u32 ring_buffer_size = hv_get_ring_buffersize(ring_info);
258         u32 frag_len;
259
260         /* wrap-around detected! */
261         if (srclen > ring_buffer_size - start_write_offset) {
262                 frag_len = ring_buffer_size - start_write_offset;
263                 memcpy(ring_buffer + start_write_offset, src, frag_len);
264                 memcpy(ring_buffer, src + frag_len, srclen - frag_len);
265         } else
266                 memcpy(ring_buffer + start_write_offset, src, srclen);
267
268         start_write_offset += srclen;
269         start_write_offset %= ring_buffer_size;
270
271         return start_write_offset;
272 }
273
274 /*
275  *
276  * hv_ringbuffer_get_debuginfo()
277  *
278  * Get various debug metrics for the specified ring buffer
279  *
280  */
281 void hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
282                             struct hv_ring_buffer_debug_info *debug_info)
283 {
284         u32 bytes_avail_towrite;
285         u32 bytes_avail_toread;
286
287         if (ring_info->ring_buffer) {
288                 hv_get_ringbuffer_availbytes(ring_info,
289                                         &bytes_avail_toread,
290                                         &bytes_avail_towrite);
291
292                 debug_info->bytes_avail_toread = bytes_avail_toread;
293                 debug_info->bytes_avail_towrite = bytes_avail_towrite;
294                 debug_info->current_read_index =
295                         ring_info->ring_buffer->read_index;
296                 debug_info->current_write_index =
297                         ring_info->ring_buffer->write_index;
298                 debug_info->current_interrupt_mask =
299                         ring_info->ring_buffer->interrupt_mask;
300         }
301 }
302
303
304 /*
305  *
306  * hv_get_ringbuffer_interrupt_mask()
307  *
308  * Get the interrupt mask for the specified ring buffer
309  *
310  */
311 u32 hv_get_ringbuffer_interrupt_mask(struct hv_ring_buffer_info *rbi)
312 {
313         return rbi->ring_buffer->interrupt_mask;
314 }
315
316 /*
317  *
318  * hv_ringbuffer_init()
319  *
320  *Initialize the ring buffer
321  *
322  */
323 int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
324                    void *buffer, u32 buflen)
325 {
326         if (sizeof(struct hv_ring_buffer) != PAGE_SIZE)
327                 return -EINVAL;
328
329         memset(ring_info, 0, sizeof(struct hv_ring_buffer_info));
330
331         ring_info->ring_buffer = (struct hv_ring_buffer *)buffer;
332         ring_info->ring_buffer->read_index =
333                 ring_info->ring_buffer->write_index = 0;
334
335         ring_info->ring_size = buflen;
336         ring_info->ring_datasize = buflen - sizeof(struct hv_ring_buffer);
337
338         spin_lock_init(&ring_info->ring_lock);
339
340         return 0;
341 }
342
343 /*
344  *
345  * hv_ringbuffer_cleanup()
346  *
347  * Cleanup the ring buffer
348  *
349  */
350 void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
351 {
352 }
353
354 /*
355  *
356  * hv_ringbuffer_write()
357  *
358  * Write to the ring buffer
359  *
360  */
361 int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
362                     struct scatterlist *sglist, u32 sgcount)
363 {
364         int i = 0;
365         u32 bytes_avail_towrite;
366         u32 bytes_avail_toread;
367         u32 totalbytes_towrite = 0;
368
369         struct scatterlist *sg;
370         u32 next_write_location;
371         u64 prev_indices = 0;
372         unsigned long flags;
373
374         for_each_sg(sglist, sg, sgcount, i)
375         {
376                 totalbytes_towrite += sg->length;
377         }
378
379         totalbytes_towrite += sizeof(u64);
380
381         spin_lock_irqsave(&outring_info->ring_lock, flags);
382
383         hv_get_ringbuffer_availbytes(outring_info,
384                                 &bytes_avail_toread,
385                                 &bytes_avail_towrite);
386
387
388         /* If there is only room for the packet, assume it is full. */
389         /* Otherwise, the next time around, we think the ring buffer */
390         /* is empty since the read index == write index */
391         if (bytes_avail_towrite <= totalbytes_towrite) {
392                 spin_unlock_irqrestore(&outring_info->ring_lock, flags);
393                 return -1;
394         }
395
396         /* Write to the ring buffer */
397         next_write_location = hv_get_next_write_location(outring_info);
398
399         for_each_sg(sglist, sg, sgcount, i)
400         {
401                 next_write_location = hv_copyto_ringbuffer(outring_info,
402                                                      next_write_location,
403                                                      sg_virt(sg),
404                                                      sg->length);
405         }
406
407         /* Set previous packet start */
408         prev_indices = hv_get_ring_bufferindices(outring_info);
409
410         next_write_location = hv_copyto_ringbuffer(outring_info,
411                                              next_write_location,
412                                              &prev_indices,
413                                              sizeof(u64));
414
415         /* Make sure we flush all writes before updating the writeIndex */
416         smp_wmb();
417
418         /* Now, update the write location */
419         hv_set_next_write_location(outring_info, next_write_location);
420
421
422         spin_unlock_irqrestore(&outring_info->ring_lock, flags);
423         return 0;
424 }
425
426
427 /*
428  *
429  * hv_ringbuffer_peek()
430  *
431  * Read without advancing the read index
432  *
433  */
434 int hv_ringbuffer_peek(struct hv_ring_buffer_info *Inring_info,
435                    void *Buffer, u32 buflen)
436 {
437         u32 bytes_avail_towrite;
438         u32 bytes_avail_toread;
439         u32 next_read_location = 0;
440         unsigned long flags;
441
442         spin_lock_irqsave(&Inring_info->ring_lock, flags);
443
444         hv_get_ringbuffer_availbytes(Inring_info,
445                                 &bytes_avail_toread,
446                                 &bytes_avail_towrite);
447
448         /* Make sure there is something to read */
449         if (bytes_avail_toread < buflen) {
450
451                 spin_unlock_irqrestore(&Inring_info->ring_lock, flags);
452
453                 return -1;
454         }
455
456         /* Convert to byte offset */
457         next_read_location = hv_get_next_read_location(Inring_info);
458
459         next_read_location = hv_copyfrom_ringbuffer(Inring_info,
460                                                 Buffer,
461                                                 buflen,
462                                                 next_read_location);
463
464         spin_unlock_irqrestore(&Inring_info->ring_lock, flags);
465
466         return 0;
467 }
468
469
470 /*
471  *
472  * hv_ringbuffer_read()
473  *
474  * Read and advance the read index
475  *
476  */
477 int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer,
478                    u32 buflen, u32 offset)
479 {
480         u32 bytes_avail_towrite;
481         u32 bytes_avail_toread;
482         u32 next_read_location = 0;
483         u64 prev_indices = 0;
484         unsigned long flags;
485
486         if (buflen <= 0)
487                 return -EINVAL;
488
489         spin_lock_irqsave(&inring_info->ring_lock, flags);
490
491         hv_get_ringbuffer_availbytes(inring_info,
492                                 &bytes_avail_toread,
493                                 &bytes_avail_towrite);
494
495         /* Make sure there is something to read */
496         if (bytes_avail_toread < buflen) {
497                 spin_unlock_irqrestore(&inring_info->ring_lock, flags);
498
499                 return -1;
500         }
501
502         next_read_location =
503                 hv_get_next_readlocation_withoffset(inring_info, offset);
504
505         next_read_location = hv_copyfrom_ringbuffer(inring_info,
506                                                 buffer,
507                                                 buflen,
508                                                 next_read_location);
509
510         next_read_location = hv_copyfrom_ringbuffer(inring_info,
511                                                 &prev_indices,
512                                                 sizeof(u64),
513                                                 next_read_location);
514
515         /* Make sure all reads are done before we update the read index since */
516         /* the writer may start writing to the read area once the read index */
517         /*is updated */
518         smp_mb();
519
520         /* Update the read index */
521         hv_set_next_read_location(inring_info, next_read_location);
522
523         spin_unlock_irqrestore(&inring_info->ring_lock, flags);
524
525         return 0;
526 }