3 * Copyright (c) 2009, Microsoft Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
19 * Haiyang Zhang <haiyangz@microsoft.com>
20 * Hank Janssen <hjanssen@microsoft.com>
21 * K. Y. Srinivasan <kys@microsoft.com>
24 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
26 #include <linux/kernel.h>
28 #include <linux/hyperv.h>
30 #include "hyperv_vmbus.h"
32 void hv_begin_read(struct hv_ring_buffer_info
*rbi
)
34 rbi
->ring_buffer
->interrupt_mask
= 1;
38 u32
hv_end_read(struct hv_ring_buffer_info
*rbi
)
43 rbi
->ring_buffer
->interrupt_mask
= 0;
47 * Now check to see if the ring buffer is still empty.
48 * If it is not, we raced and we need to process new
51 hv_get_ringbuffer_availbytes(rbi
, &read
, &write
);
57 * When we write to the ring buffer, check if the host needs to
58 * be signaled. Here is the details of this protocol:
60 * 1. The host guarantees that while it is draining the
61 * ring buffer, it will set the interrupt_mask to
62 * indicate it does not need to be interrupted when
65 * 2. The host guarantees that it will completely drain
66 * the ring buffer before exiting the read loop. Further,
67 * once the ring buffer is empty, it will clear the
68 * interrupt_mask and re-check to see if new data has
72 static bool hv_need_to_signal(u32 old_write
, struct hv_ring_buffer_info
*rbi
)
74 if (rbi
->ring_buffer
->interrupt_mask
)
78 * This is the only case we need to signal when the
79 * ring transitions from being empty to non-empty.
81 if (old_write
== rbi
->ring_buffer
->read_index
)
89 * hv_get_next_write_location()
91 * Get the next write location for the specified ring buffer
95 hv_get_next_write_location(struct hv_ring_buffer_info
*ring_info
)
97 u32 next
= ring_info
->ring_buffer
->write_index
;
103 * hv_set_next_write_location()
105 * Set the next write location for the specified ring buffer
109 hv_set_next_write_location(struct hv_ring_buffer_info
*ring_info
,
110 u32 next_write_location
)
112 ring_info
->ring_buffer
->write_index
= next_write_location
;
116 * hv_get_next_read_location()
118 * Get the next read location for the specified ring buffer
121 hv_get_next_read_location(struct hv_ring_buffer_info
*ring_info
)
123 u32 next
= ring_info
->ring_buffer
->read_index
;
129 * hv_get_next_readlocation_withoffset()
131 * Get the next read location + offset for the specified ring buffer.
132 * This allows the caller to skip
135 hv_get_next_readlocation_withoffset(struct hv_ring_buffer_info
*ring_info
,
138 u32 next
= ring_info
->ring_buffer
->read_index
;
141 next
%= ring_info
->ring_datasize
;
148 * hv_set_next_read_location()
150 * Set the next read location for the specified ring buffer
154 hv_set_next_read_location(struct hv_ring_buffer_info
*ring_info
,
155 u32 next_read_location
)
157 ring_info
->ring_buffer
->read_index
= next_read_location
;
163 * hv_get_ring_buffer()
165 * Get the start of the ring buffer
168 hv_get_ring_buffer(struct hv_ring_buffer_info
*ring_info
)
170 return (void *)ring_info
->ring_buffer
->buffer
;
176 * hv_get_ring_buffersize()
178 * Get the size of the ring buffer
181 hv_get_ring_buffersize(struct hv_ring_buffer_info
*ring_info
)
183 return ring_info
->ring_datasize
;
188 * hv_get_ring_bufferindices()
190 * Get the read and write indices as u64 of the specified ring buffer
194 hv_get_ring_bufferindices(struct hv_ring_buffer_info
*ring_info
)
196 return (u64
)ring_info
->ring_buffer
->write_index
<< 32;
201 * hv_copyfrom_ringbuffer()
203 * Helper routine to copy to source from ring buffer.
204 * Assume there is enough room. Handles wrap-around in src case only!!
207 static u32
hv_copyfrom_ringbuffer(
208 struct hv_ring_buffer_info
*ring_info
,
211 u32 start_read_offset
)
213 void *ring_buffer
= hv_get_ring_buffer(ring_info
);
214 u32 ring_buffer_size
= hv_get_ring_buffersize(ring_info
);
218 /* wrap-around detected at the src */
219 if (destlen
> ring_buffer_size
- start_read_offset
) {
220 frag_len
= ring_buffer_size
- start_read_offset
;
222 memcpy(dest
, ring_buffer
+ start_read_offset
, frag_len
);
223 memcpy(dest
+ frag_len
, ring_buffer
, destlen
- frag_len
);
226 memcpy(dest
, ring_buffer
+ start_read_offset
, destlen
);
229 start_read_offset
+= destlen
;
230 start_read_offset
%= ring_buffer_size
;
232 return start_read_offset
;
238 * hv_copyto_ringbuffer()
240 * Helper routine to copy from source to ring buffer.
241 * Assume there is enough room. Handles wrap-around in dest case only!!
244 static u32
hv_copyto_ringbuffer(
245 struct hv_ring_buffer_info
*ring_info
,
246 u32 start_write_offset
,
250 void *ring_buffer
= hv_get_ring_buffer(ring_info
);
251 u32 ring_buffer_size
= hv_get_ring_buffersize(ring_info
);
254 /* wrap-around detected! */
255 if (srclen
> ring_buffer_size
- start_write_offset
) {
256 frag_len
= ring_buffer_size
- start_write_offset
;
257 memcpy(ring_buffer
+ start_write_offset
, src
, frag_len
);
258 memcpy(ring_buffer
, src
+ frag_len
, srclen
- frag_len
);
260 memcpy(ring_buffer
+ start_write_offset
, src
, srclen
);
262 start_write_offset
+= srclen
;
263 start_write_offset
%= ring_buffer_size
;
265 return start_write_offset
;
270 * hv_ringbuffer_get_debuginfo()
272 * Get various debug metrics for the specified ring buffer
275 void hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info
*ring_info
,
276 struct hv_ring_buffer_debug_info
*debug_info
)
278 u32 bytes_avail_towrite
;
279 u32 bytes_avail_toread
;
281 if (ring_info
->ring_buffer
) {
282 hv_get_ringbuffer_availbytes(ring_info
,
284 &bytes_avail_towrite
);
286 debug_info
->bytes_avail_toread
= bytes_avail_toread
;
287 debug_info
->bytes_avail_towrite
= bytes_avail_towrite
;
288 debug_info
->current_read_index
=
289 ring_info
->ring_buffer
->read_index
;
290 debug_info
->current_write_index
=
291 ring_info
->ring_buffer
->write_index
;
292 debug_info
->current_interrupt_mask
=
293 ring_info
->ring_buffer
->interrupt_mask
;
299 * hv_ringbuffer_init()
301 *Initialize the ring buffer
304 int hv_ringbuffer_init(struct hv_ring_buffer_info
*ring_info
,
305 void *buffer
, u32 buflen
)
307 if (sizeof(struct hv_ring_buffer
) != PAGE_SIZE
)
310 memset(ring_info
, 0, sizeof(struct hv_ring_buffer_info
));
312 ring_info
->ring_buffer
= (struct hv_ring_buffer
*)buffer
;
313 ring_info
->ring_buffer
->read_index
=
314 ring_info
->ring_buffer
->write_index
= 0;
316 ring_info
->ring_size
= buflen
;
317 ring_info
->ring_datasize
= buflen
- sizeof(struct hv_ring_buffer
);
319 spin_lock_init(&ring_info
->ring_lock
);
326 * hv_ringbuffer_cleanup()
328 * Cleanup the ring buffer
331 void hv_ringbuffer_cleanup(struct hv_ring_buffer_info
*ring_info
)
337 * hv_ringbuffer_write()
339 * Write to the ring buffer
342 int hv_ringbuffer_write(struct hv_ring_buffer_info
*outring_info
,
343 struct scatterlist
*sglist
, u32 sgcount
, bool *signal
)
346 u32 bytes_avail_towrite
;
347 u32 bytes_avail_toread
;
348 u32 totalbytes_towrite
= 0;
350 struct scatterlist
*sg
;
351 u32 next_write_location
;
353 u64 prev_indices
= 0;
356 for_each_sg(sglist
, sg
, sgcount
, i
)
358 totalbytes_towrite
+= sg
->length
;
361 totalbytes_towrite
+= sizeof(u64
);
363 spin_lock_irqsave(&outring_info
->ring_lock
, flags
);
365 hv_get_ringbuffer_availbytes(outring_info
,
367 &bytes_avail_towrite
);
370 /* If there is only room for the packet, assume it is full. */
371 /* Otherwise, the next time around, we think the ring buffer */
372 /* is empty since the read index == write index */
373 if (bytes_avail_towrite
<= totalbytes_towrite
) {
374 spin_unlock_irqrestore(&outring_info
->ring_lock
, flags
);
378 /* Write to the ring buffer */
379 next_write_location
= hv_get_next_write_location(outring_info
);
381 old_write
= next_write_location
;
383 for_each_sg(sglist
, sg
, sgcount
, i
)
385 next_write_location
= hv_copyto_ringbuffer(outring_info
,
391 /* Set previous packet start */
392 prev_indices
= hv_get_ring_bufferindices(outring_info
);
394 next_write_location
= hv_copyto_ringbuffer(outring_info
,
399 /* Issue a full memory barrier before updating the write index */
402 /* Now, update the write location */
403 hv_set_next_write_location(outring_info
, next_write_location
);
406 spin_unlock_irqrestore(&outring_info
->ring_lock
, flags
);
408 *signal
= hv_need_to_signal(old_write
, outring_info
);
415 * hv_ringbuffer_peek()
417 * Read without advancing the read index
420 int hv_ringbuffer_peek(struct hv_ring_buffer_info
*Inring_info
,
421 void *Buffer
, u32 buflen
)
423 u32 bytes_avail_towrite
;
424 u32 bytes_avail_toread
;
425 u32 next_read_location
= 0;
428 spin_lock_irqsave(&Inring_info
->ring_lock
, flags
);
430 hv_get_ringbuffer_availbytes(Inring_info
,
432 &bytes_avail_towrite
);
434 /* Make sure there is something to read */
435 if (bytes_avail_toread
< buflen
) {
437 spin_unlock_irqrestore(&Inring_info
->ring_lock
, flags
);
442 /* Convert to byte offset */
443 next_read_location
= hv_get_next_read_location(Inring_info
);
445 next_read_location
= hv_copyfrom_ringbuffer(Inring_info
,
450 spin_unlock_irqrestore(&Inring_info
->ring_lock
, flags
);
458 * hv_ringbuffer_read()
460 * Read and advance the read index
463 int hv_ringbuffer_read(struct hv_ring_buffer_info
*inring_info
, void *buffer
,
464 u32 buflen
, u32 offset
)
466 u32 bytes_avail_towrite
;
467 u32 bytes_avail_toread
;
468 u32 next_read_location
= 0;
469 u64 prev_indices
= 0;
475 spin_lock_irqsave(&inring_info
->ring_lock
, flags
);
477 hv_get_ringbuffer_availbytes(inring_info
,
479 &bytes_avail_towrite
);
481 /* Make sure there is something to read */
482 if (bytes_avail_toread
< buflen
) {
483 spin_unlock_irqrestore(&inring_info
->ring_lock
, flags
);
489 hv_get_next_readlocation_withoffset(inring_info
, offset
);
491 next_read_location
= hv_copyfrom_ringbuffer(inring_info
,
496 next_read_location
= hv_copyfrom_ringbuffer(inring_info
,
501 /* Make sure all reads are done before we update the read index since */
502 /* the writer may start writing to the read area once the read index */
506 /* Update the read index */
507 hv_set_next_read_location(inring_info
, next_read_location
);
509 spin_unlock_irqrestore(&inring_info
->ring_lock
, flags
);