Drivers: hv: Get rid of hv_get_ringbuffer_interrupt_mask()
[deliverable/linux.git] / drivers / hv / ring_buffer.c
1 /*
2 *
3 * Copyright (c) 2009, Microsoft Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 *
18 * Authors:
19 * Haiyang Zhang <haiyangz@microsoft.com>
20 * Hank Janssen <hjanssen@microsoft.com>
21 * K. Y. Srinivasan <kys@microsoft.com>
22 *
23 */
24 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25
26 #include <linux/kernel.h>
27 #include <linux/mm.h>
28 #include <linux/hyperv.h>
29
30 #include "hyperv_vmbus.h"
31
32 void hv_begin_read(struct hv_ring_buffer_info *rbi)
33 {
34 rbi->ring_buffer->interrupt_mask = 1;
35 smp_mb();
36 }
37
38 u32 hv_end_read(struct hv_ring_buffer_info *rbi)
39 {
40 u32 read;
41 u32 write;
42
43 rbi->ring_buffer->interrupt_mask = 0;
44 smp_mb();
45
46 /*
47 * Now check to see if the ring buffer is still empty.
48 * If it is not, we raced and we need to process new
49 * incoming messages.
50 */
51 hv_get_ringbuffer_availbytes(rbi, &read, &write);
52
53 return read;
54 }
55
56 /*
57 * When we write to the ring buffer, check if the host needs to
58 * be signaled. Here is the details of this protocol:
59 *
60 * 1. The host guarantees that while it is draining the
61 * ring buffer, it will set the interrupt_mask to
62 * indicate it does not need to be interrupted when
63 * new data is placed.
64 *
65 * 2. The host guarantees that it will completely drain
66 * the ring buffer before exiting the read loop. Further,
67 * once the ring buffer is empty, it will clear the
68 * interrupt_mask and re-check to see if new data has
69 * arrived.
70 */
71
72 static bool hv_need_to_signal(u32 old_write, struct hv_ring_buffer_info *rbi)
73 {
74 if (rbi->ring_buffer->interrupt_mask)
75 return false;
76
77 /*
78 * This is the only case we need to signal when the
79 * ring transitions from being empty to non-empty.
80 */
81 if (old_write == rbi->ring_buffer->read_index)
82 return true;
83
84 return false;
85 }
86
87
88 /*
89 * hv_get_next_write_location()
90 *
91 * Get the next write location for the specified ring buffer
92 *
93 */
94 static inline u32
95 hv_get_next_write_location(struct hv_ring_buffer_info *ring_info)
96 {
97 u32 next = ring_info->ring_buffer->write_index;
98
99 return next;
100 }
101
102 /*
103 * hv_set_next_write_location()
104 *
105 * Set the next write location for the specified ring buffer
106 *
107 */
108 static inline void
109 hv_set_next_write_location(struct hv_ring_buffer_info *ring_info,
110 u32 next_write_location)
111 {
112 ring_info->ring_buffer->write_index = next_write_location;
113 }
114
115 /*
116 * hv_get_next_read_location()
117 *
118 * Get the next read location for the specified ring buffer
119 */
120 static inline u32
121 hv_get_next_read_location(struct hv_ring_buffer_info *ring_info)
122 {
123 u32 next = ring_info->ring_buffer->read_index;
124
125 return next;
126 }
127
128 /*
129 * hv_get_next_readlocation_withoffset()
130 *
131 * Get the next read location + offset for the specified ring buffer.
132 * This allows the caller to skip
133 */
134 static inline u32
135 hv_get_next_readlocation_withoffset(struct hv_ring_buffer_info *ring_info,
136 u32 offset)
137 {
138 u32 next = ring_info->ring_buffer->read_index;
139
140 next += offset;
141 next %= ring_info->ring_datasize;
142
143 return next;
144 }
145
146 /*
147 *
148 * hv_set_next_read_location()
149 *
150 * Set the next read location for the specified ring buffer
151 *
152 */
153 static inline void
154 hv_set_next_read_location(struct hv_ring_buffer_info *ring_info,
155 u32 next_read_location)
156 {
157 ring_info->ring_buffer->read_index = next_read_location;
158 }
159
160
161 /*
162 *
163 * hv_get_ring_buffer()
164 *
165 * Get the start of the ring buffer
166 */
167 static inline void *
168 hv_get_ring_buffer(struct hv_ring_buffer_info *ring_info)
169 {
170 return (void *)ring_info->ring_buffer->buffer;
171 }
172
173
174 /*
175 *
176 * hv_get_ring_buffersize()
177 *
178 * Get the size of the ring buffer
179 */
180 static inline u32
181 hv_get_ring_buffersize(struct hv_ring_buffer_info *ring_info)
182 {
183 return ring_info->ring_datasize;
184 }
185
186 /*
187 *
188 * hv_get_ring_bufferindices()
189 *
190 * Get the read and write indices as u64 of the specified ring buffer
191 *
192 */
193 static inline u64
194 hv_get_ring_bufferindices(struct hv_ring_buffer_info *ring_info)
195 {
196 return (u64)ring_info->ring_buffer->write_index << 32;
197 }
198
199 /*
200 *
201 * hv_copyfrom_ringbuffer()
202 *
203 * Helper routine to copy to source from ring buffer.
204 * Assume there is enough room. Handles wrap-around in src case only!!
205 *
206 */
207 static u32 hv_copyfrom_ringbuffer(
208 struct hv_ring_buffer_info *ring_info,
209 void *dest,
210 u32 destlen,
211 u32 start_read_offset)
212 {
213 void *ring_buffer = hv_get_ring_buffer(ring_info);
214 u32 ring_buffer_size = hv_get_ring_buffersize(ring_info);
215
216 u32 frag_len;
217
218 /* wrap-around detected at the src */
219 if (destlen > ring_buffer_size - start_read_offset) {
220 frag_len = ring_buffer_size - start_read_offset;
221
222 memcpy(dest, ring_buffer + start_read_offset, frag_len);
223 memcpy(dest + frag_len, ring_buffer, destlen - frag_len);
224 } else
225
226 memcpy(dest, ring_buffer + start_read_offset, destlen);
227
228
229 start_read_offset += destlen;
230 start_read_offset %= ring_buffer_size;
231
232 return start_read_offset;
233 }
234
235
236 /*
237 *
238 * hv_copyto_ringbuffer()
239 *
240 * Helper routine to copy from source to ring buffer.
241 * Assume there is enough room. Handles wrap-around in dest case only!!
242 *
243 */
244 static u32 hv_copyto_ringbuffer(
245 struct hv_ring_buffer_info *ring_info,
246 u32 start_write_offset,
247 void *src,
248 u32 srclen)
249 {
250 void *ring_buffer = hv_get_ring_buffer(ring_info);
251 u32 ring_buffer_size = hv_get_ring_buffersize(ring_info);
252 u32 frag_len;
253
254 /* wrap-around detected! */
255 if (srclen > ring_buffer_size - start_write_offset) {
256 frag_len = ring_buffer_size - start_write_offset;
257 memcpy(ring_buffer + start_write_offset, src, frag_len);
258 memcpy(ring_buffer, src + frag_len, srclen - frag_len);
259 } else
260 memcpy(ring_buffer + start_write_offset, src, srclen);
261
262 start_write_offset += srclen;
263 start_write_offset %= ring_buffer_size;
264
265 return start_write_offset;
266 }
267
268 /*
269 *
270 * hv_ringbuffer_get_debuginfo()
271 *
272 * Get various debug metrics for the specified ring buffer
273 *
274 */
275 void hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
276 struct hv_ring_buffer_debug_info *debug_info)
277 {
278 u32 bytes_avail_towrite;
279 u32 bytes_avail_toread;
280
281 if (ring_info->ring_buffer) {
282 hv_get_ringbuffer_availbytes(ring_info,
283 &bytes_avail_toread,
284 &bytes_avail_towrite);
285
286 debug_info->bytes_avail_toread = bytes_avail_toread;
287 debug_info->bytes_avail_towrite = bytes_avail_towrite;
288 debug_info->current_read_index =
289 ring_info->ring_buffer->read_index;
290 debug_info->current_write_index =
291 ring_info->ring_buffer->write_index;
292 debug_info->current_interrupt_mask =
293 ring_info->ring_buffer->interrupt_mask;
294 }
295 }
296
297 /*
298 *
299 * hv_ringbuffer_init()
300 *
301 *Initialize the ring buffer
302 *
303 */
304 int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
305 void *buffer, u32 buflen)
306 {
307 if (sizeof(struct hv_ring_buffer) != PAGE_SIZE)
308 return -EINVAL;
309
310 memset(ring_info, 0, sizeof(struct hv_ring_buffer_info));
311
312 ring_info->ring_buffer = (struct hv_ring_buffer *)buffer;
313 ring_info->ring_buffer->read_index =
314 ring_info->ring_buffer->write_index = 0;
315
316 ring_info->ring_size = buflen;
317 ring_info->ring_datasize = buflen - sizeof(struct hv_ring_buffer);
318
319 spin_lock_init(&ring_info->ring_lock);
320
321 return 0;
322 }
323
324 /*
325 *
326 * hv_ringbuffer_cleanup()
327 *
328 * Cleanup the ring buffer
329 *
330 */
331 void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
332 {
333 }
334
335 /*
336 *
337 * hv_ringbuffer_write()
338 *
339 * Write to the ring buffer
340 *
341 */
342 int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
343 struct scatterlist *sglist, u32 sgcount, bool *signal)
344 {
345 int i = 0;
346 u32 bytes_avail_towrite;
347 u32 bytes_avail_toread;
348 u32 totalbytes_towrite = 0;
349
350 struct scatterlist *sg;
351 u32 next_write_location;
352 u32 old_write;
353 u64 prev_indices = 0;
354 unsigned long flags;
355
356 for_each_sg(sglist, sg, sgcount, i)
357 {
358 totalbytes_towrite += sg->length;
359 }
360
361 totalbytes_towrite += sizeof(u64);
362
363 spin_lock_irqsave(&outring_info->ring_lock, flags);
364
365 hv_get_ringbuffer_availbytes(outring_info,
366 &bytes_avail_toread,
367 &bytes_avail_towrite);
368
369
370 /* If there is only room for the packet, assume it is full. */
371 /* Otherwise, the next time around, we think the ring buffer */
372 /* is empty since the read index == write index */
373 if (bytes_avail_towrite <= totalbytes_towrite) {
374 spin_unlock_irqrestore(&outring_info->ring_lock, flags);
375 return -EAGAIN;
376 }
377
378 /* Write to the ring buffer */
379 next_write_location = hv_get_next_write_location(outring_info);
380
381 old_write = next_write_location;
382
383 for_each_sg(sglist, sg, sgcount, i)
384 {
385 next_write_location = hv_copyto_ringbuffer(outring_info,
386 next_write_location,
387 sg_virt(sg),
388 sg->length);
389 }
390
391 /* Set previous packet start */
392 prev_indices = hv_get_ring_bufferindices(outring_info);
393
394 next_write_location = hv_copyto_ringbuffer(outring_info,
395 next_write_location,
396 &prev_indices,
397 sizeof(u64));
398
399 /* Issue a full memory barrier before updating the write index */
400 smp_mb();
401
402 /* Now, update the write location */
403 hv_set_next_write_location(outring_info, next_write_location);
404
405
406 spin_unlock_irqrestore(&outring_info->ring_lock, flags);
407
408 *signal = hv_need_to_signal(old_write, outring_info);
409 return 0;
410 }
411
412
413 /*
414 *
415 * hv_ringbuffer_peek()
416 *
417 * Read without advancing the read index
418 *
419 */
420 int hv_ringbuffer_peek(struct hv_ring_buffer_info *Inring_info,
421 void *Buffer, u32 buflen)
422 {
423 u32 bytes_avail_towrite;
424 u32 bytes_avail_toread;
425 u32 next_read_location = 0;
426 unsigned long flags;
427
428 spin_lock_irqsave(&Inring_info->ring_lock, flags);
429
430 hv_get_ringbuffer_availbytes(Inring_info,
431 &bytes_avail_toread,
432 &bytes_avail_towrite);
433
434 /* Make sure there is something to read */
435 if (bytes_avail_toread < buflen) {
436
437 spin_unlock_irqrestore(&Inring_info->ring_lock, flags);
438
439 return -EAGAIN;
440 }
441
442 /* Convert to byte offset */
443 next_read_location = hv_get_next_read_location(Inring_info);
444
445 next_read_location = hv_copyfrom_ringbuffer(Inring_info,
446 Buffer,
447 buflen,
448 next_read_location);
449
450 spin_unlock_irqrestore(&Inring_info->ring_lock, flags);
451
452 return 0;
453 }
454
455
456 /*
457 *
458 * hv_ringbuffer_read()
459 *
460 * Read and advance the read index
461 *
462 */
463 int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer,
464 u32 buflen, u32 offset)
465 {
466 u32 bytes_avail_towrite;
467 u32 bytes_avail_toread;
468 u32 next_read_location = 0;
469 u64 prev_indices = 0;
470 unsigned long flags;
471
472 if (buflen <= 0)
473 return -EINVAL;
474
475 spin_lock_irqsave(&inring_info->ring_lock, flags);
476
477 hv_get_ringbuffer_availbytes(inring_info,
478 &bytes_avail_toread,
479 &bytes_avail_towrite);
480
481 /* Make sure there is something to read */
482 if (bytes_avail_toread < buflen) {
483 spin_unlock_irqrestore(&inring_info->ring_lock, flags);
484
485 return -EAGAIN;
486 }
487
488 next_read_location =
489 hv_get_next_readlocation_withoffset(inring_info, offset);
490
491 next_read_location = hv_copyfrom_ringbuffer(inring_info,
492 buffer,
493 buflen,
494 next_read_location);
495
496 next_read_location = hv_copyfrom_ringbuffer(inring_info,
497 &prev_indices,
498 sizeof(u64),
499 next_read_location);
500
501 /* Make sure all reads are done before we update the read index since */
502 /* the writer may start writing to the read area once the read index */
503 /*is updated */
504 smp_mb();
505
506 /* Update the read index */
507 hv_set_next_read_location(inring_info, next_read_location);
508
509 spin_unlock_irqrestore(&inring_info->ring_lock, flags);
510
511 return 0;
512 }
This page took 0.040623 seconds and 5 git commands to generate.