IB/{core, ulp} Support above 32 possible device capability flags
[deliverable/linux.git] / drivers / hv / ring_buffer.c
1 /*
2 *
3 * Copyright (c) 2009, Microsoft Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 *
18 * Authors:
19 * Haiyang Zhang <haiyangz@microsoft.com>
20 * Hank Janssen <hjanssen@microsoft.com>
21 * K. Y. Srinivasan <kys@microsoft.com>
22 *
23 */
24 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25
26 #include <linux/kernel.h>
27 #include <linux/mm.h>
28 #include <linux/hyperv.h>
29 #include <linux/uio.h>
30
31 #include "hyperv_vmbus.h"
32
33 void hv_begin_read(struct hv_ring_buffer_info *rbi)
34 {
35 rbi->ring_buffer->interrupt_mask = 1;
36 mb();
37 }
38
39 u32 hv_end_read(struct hv_ring_buffer_info *rbi)
40 {
41 u32 read;
42 u32 write;
43
44 rbi->ring_buffer->interrupt_mask = 0;
45 mb();
46
47 /*
48 * Now check to see if the ring buffer is still empty.
49 * If it is not, we raced and we need to process new
50 * incoming messages.
51 */
52 hv_get_ringbuffer_availbytes(rbi, &read, &write);
53
54 return read;
55 }
56
57 /*
58 * When we write to the ring buffer, check if the host needs to
59 * be signaled. Here is the details of this protocol:
60 *
61 * 1. The host guarantees that while it is draining the
62 * ring buffer, it will set the interrupt_mask to
63 * indicate it does not need to be interrupted when
64 * new data is placed.
65 *
66 * 2. The host guarantees that it will completely drain
67 * the ring buffer before exiting the read loop. Further,
68 * once the ring buffer is empty, it will clear the
69 * interrupt_mask and re-check to see if new data has
70 * arrived.
71 */
72
73 static bool hv_need_to_signal(u32 old_write, struct hv_ring_buffer_info *rbi)
74 {
75 mb();
76 if (rbi->ring_buffer->interrupt_mask)
77 return false;
78
79 /* check interrupt_mask before read_index */
80 rmb();
81 /*
82 * This is the only case we need to signal when the
83 * ring transitions from being empty to non-empty.
84 */
85 if (old_write == rbi->ring_buffer->read_index)
86 return true;
87
88 return false;
89 }
90
91 /*
92 * To optimize the flow management on the send-side,
93 * when the sender is blocked because of lack of
94 * sufficient space in the ring buffer, potential the
95 * consumer of the ring buffer can signal the producer.
96 * This is controlled by the following parameters:
97 *
98 * 1. pending_send_sz: This is the size in bytes that the
99 * producer is trying to send.
100 * 2. The feature bit feat_pending_send_sz set to indicate if
101 * the consumer of the ring will signal when the ring
102 * state transitions from being full to a state where
103 * there is room for the producer to send the pending packet.
104 */
105
106 static bool hv_need_to_signal_on_read(u32 prev_write_sz,
107 struct hv_ring_buffer_info *rbi)
108 {
109 u32 cur_write_sz;
110 u32 r_size;
111 u32 write_loc = rbi->ring_buffer->write_index;
112 u32 read_loc = rbi->ring_buffer->read_index;
113 u32 pending_sz = rbi->ring_buffer->pending_send_sz;
114
115 /* If the other end is not blocked on write don't bother. */
116 if (pending_sz == 0)
117 return false;
118
119 r_size = rbi->ring_datasize;
120 cur_write_sz = write_loc >= read_loc ? r_size - (write_loc - read_loc) :
121 read_loc - write_loc;
122
123 if ((prev_write_sz < pending_sz) && (cur_write_sz >= pending_sz))
124 return true;
125
126 return false;
127 }
128
129 /* Get the next write location for the specified ring buffer. */
130 static inline u32
131 hv_get_next_write_location(struct hv_ring_buffer_info *ring_info)
132 {
133 u32 next = ring_info->ring_buffer->write_index;
134
135 return next;
136 }
137
138 /* Set the next write location for the specified ring buffer. */
139 static inline void
140 hv_set_next_write_location(struct hv_ring_buffer_info *ring_info,
141 u32 next_write_location)
142 {
143 ring_info->ring_buffer->write_index = next_write_location;
144 }
145
146 /* Get the next read location for the specified ring buffer. */
147 static inline u32
148 hv_get_next_read_location(struct hv_ring_buffer_info *ring_info)
149 {
150 u32 next = ring_info->ring_buffer->read_index;
151
152 return next;
153 }
154
155 /*
156 * Get the next read location + offset for the specified ring buffer.
157 * This allows the caller to skip.
158 */
159 static inline u32
160 hv_get_next_readlocation_withoffset(struct hv_ring_buffer_info *ring_info,
161 u32 offset)
162 {
163 u32 next = ring_info->ring_buffer->read_index;
164
165 next += offset;
166 next %= ring_info->ring_datasize;
167
168 return next;
169 }
170
171 /* Set the next read location for the specified ring buffer. */
172 static inline void
173 hv_set_next_read_location(struct hv_ring_buffer_info *ring_info,
174 u32 next_read_location)
175 {
176 ring_info->ring_buffer->read_index = next_read_location;
177 }
178
179
180 /* Get the start of the ring buffer. */
181 static inline void *
182 hv_get_ring_buffer(struct hv_ring_buffer_info *ring_info)
183 {
184 return (void *)ring_info->ring_buffer->buffer;
185 }
186
187
188 /* Get the size of the ring buffer. */
189 static inline u32
190 hv_get_ring_buffersize(struct hv_ring_buffer_info *ring_info)
191 {
192 return ring_info->ring_datasize;
193 }
194
195 /* Get the read and write indices as u64 of the specified ring buffer. */
196 static inline u64
197 hv_get_ring_bufferindices(struct hv_ring_buffer_info *ring_info)
198 {
199 return (u64)ring_info->ring_buffer->write_index << 32;
200 }
201
202 /*
203 * Helper routine to copy to source from ring buffer.
204 * Assume there is enough room. Handles wrap-around in src case only!!
205 */
206 static u32 hv_copyfrom_ringbuffer(
207 struct hv_ring_buffer_info *ring_info,
208 void *dest,
209 u32 destlen,
210 u32 start_read_offset)
211 {
212 void *ring_buffer = hv_get_ring_buffer(ring_info);
213 u32 ring_buffer_size = hv_get_ring_buffersize(ring_info);
214
215 u32 frag_len;
216
217 /* wrap-around detected at the src */
218 if (destlen > ring_buffer_size - start_read_offset) {
219 frag_len = ring_buffer_size - start_read_offset;
220
221 memcpy(dest, ring_buffer + start_read_offset, frag_len);
222 memcpy(dest + frag_len, ring_buffer, destlen - frag_len);
223 } else
224
225 memcpy(dest, ring_buffer + start_read_offset, destlen);
226
227
228 start_read_offset += destlen;
229 start_read_offset %= ring_buffer_size;
230
231 return start_read_offset;
232 }
233
234
235 /*
236 * Helper routine to copy from source to ring buffer.
237 * Assume there is enough room. Handles wrap-around in dest case only!!
238 */
239 static u32 hv_copyto_ringbuffer(
240 struct hv_ring_buffer_info *ring_info,
241 u32 start_write_offset,
242 void *src,
243 u32 srclen)
244 {
245 void *ring_buffer = hv_get_ring_buffer(ring_info);
246 u32 ring_buffer_size = hv_get_ring_buffersize(ring_info);
247 u32 frag_len;
248
249 /* wrap-around detected! */
250 if (srclen > ring_buffer_size - start_write_offset) {
251 frag_len = ring_buffer_size - start_write_offset;
252 memcpy(ring_buffer + start_write_offset, src, frag_len);
253 memcpy(ring_buffer, src + frag_len, srclen - frag_len);
254 } else
255 memcpy(ring_buffer + start_write_offset, src, srclen);
256
257 start_write_offset += srclen;
258 start_write_offset %= ring_buffer_size;
259
260 return start_write_offset;
261 }
262
263 /* Get various debug metrics for the specified ring buffer. */
264 void hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
265 struct hv_ring_buffer_debug_info *debug_info)
266 {
267 u32 bytes_avail_towrite;
268 u32 bytes_avail_toread;
269
270 if (ring_info->ring_buffer) {
271 hv_get_ringbuffer_availbytes(ring_info,
272 &bytes_avail_toread,
273 &bytes_avail_towrite);
274
275 debug_info->bytes_avail_toread = bytes_avail_toread;
276 debug_info->bytes_avail_towrite = bytes_avail_towrite;
277 debug_info->current_read_index =
278 ring_info->ring_buffer->read_index;
279 debug_info->current_write_index =
280 ring_info->ring_buffer->write_index;
281 debug_info->current_interrupt_mask =
282 ring_info->ring_buffer->interrupt_mask;
283 }
284 }
285
286 /* Initialize the ring buffer. */
287 int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
288 void *buffer, u32 buflen)
289 {
290 if (sizeof(struct hv_ring_buffer) != PAGE_SIZE)
291 return -EINVAL;
292
293 memset(ring_info, 0, sizeof(struct hv_ring_buffer_info));
294
295 ring_info->ring_buffer = (struct hv_ring_buffer *)buffer;
296 ring_info->ring_buffer->read_index =
297 ring_info->ring_buffer->write_index = 0;
298
299 /* Set the feature bit for enabling flow control. */
300 ring_info->ring_buffer->feature_bits.value = 1;
301
302 ring_info->ring_size = buflen;
303 ring_info->ring_datasize = buflen - sizeof(struct hv_ring_buffer);
304
305 spin_lock_init(&ring_info->ring_lock);
306
307 return 0;
308 }
309
310 /* Cleanup the ring buffer. */
311 void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
312 {
313 }
314
315 /* Write to the ring buffer. */
316 int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
317 struct kvec *kv_list, u32 kv_count, bool *signal)
318 {
319 int i = 0;
320 u32 bytes_avail_towrite;
321 u32 bytes_avail_toread;
322 u32 totalbytes_towrite = 0;
323
324 u32 next_write_location;
325 u32 old_write;
326 u64 prev_indices = 0;
327 unsigned long flags;
328
329 for (i = 0; i < kv_count; i++)
330 totalbytes_towrite += kv_list[i].iov_len;
331
332 totalbytes_towrite += sizeof(u64);
333
334 spin_lock_irqsave(&outring_info->ring_lock, flags);
335
336 hv_get_ringbuffer_availbytes(outring_info,
337 &bytes_avail_toread,
338 &bytes_avail_towrite);
339
340 /*
341 * If there is only room for the packet, assume it is full.
342 * Otherwise, the next time around, we think the ring buffer
343 * is empty since the read index == write index.
344 */
345 if (bytes_avail_towrite <= totalbytes_towrite) {
346 spin_unlock_irqrestore(&outring_info->ring_lock, flags);
347 return -EAGAIN;
348 }
349
350 /* Write to the ring buffer */
351 next_write_location = hv_get_next_write_location(outring_info);
352
353 old_write = next_write_location;
354
355 for (i = 0; i < kv_count; i++) {
356 next_write_location = hv_copyto_ringbuffer(outring_info,
357 next_write_location,
358 kv_list[i].iov_base,
359 kv_list[i].iov_len);
360 }
361
362 /* Set previous packet start */
363 prev_indices = hv_get_ring_bufferindices(outring_info);
364
365 next_write_location = hv_copyto_ringbuffer(outring_info,
366 next_write_location,
367 &prev_indices,
368 sizeof(u64));
369
370 /* Issue a full memory barrier before updating the write index */
371 mb();
372
373 /* Now, update the write location */
374 hv_set_next_write_location(outring_info, next_write_location);
375
376
377 spin_unlock_irqrestore(&outring_info->ring_lock, flags);
378
379 *signal = hv_need_to_signal(old_write, outring_info);
380 return 0;
381 }
382
383 int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info,
384 void *buffer, u32 buflen, u32 *buffer_actual_len,
385 u64 *requestid, bool *signal, bool raw)
386 {
387 u32 bytes_avail_towrite;
388 u32 bytes_avail_toread;
389 u32 next_read_location = 0;
390 u64 prev_indices = 0;
391 unsigned long flags;
392 struct vmpacket_descriptor desc;
393 u32 offset;
394 u32 packetlen;
395 int ret = 0;
396
397 if (buflen <= 0)
398 return -EINVAL;
399
400 spin_lock_irqsave(&inring_info->ring_lock, flags);
401
402 *buffer_actual_len = 0;
403 *requestid = 0;
404
405 hv_get_ringbuffer_availbytes(inring_info,
406 &bytes_avail_toread,
407 &bytes_avail_towrite);
408
409 /* Make sure there is something to read */
410 if (bytes_avail_toread < sizeof(desc)) {
411 /*
412 * No error is set when there is even no header, drivers are
413 * supposed to analyze buffer_actual_len.
414 */
415 goto out_unlock;
416 }
417
418 next_read_location = hv_get_next_read_location(inring_info);
419 next_read_location = hv_copyfrom_ringbuffer(inring_info, &desc,
420 sizeof(desc),
421 next_read_location);
422
423 offset = raw ? 0 : (desc.offset8 << 3);
424 packetlen = (desc.len8 << 3) - offset;
425 *buffer_actual_len = packetlen;
426 *requestid = desc.trans_id;
427
428 if (bytes_avail_toread < packetlen + offset) {
429 ret = -EAGAIN;
430 goto out_unlock;
431 }
432
433 if (packetlen > buflen) {
434 ret = -ENOBUFS;
435 goto out_unlock;
436 }
437
438 next_read_location =
439 hv_get_next_readlocation_withoffset(inring_info, offset);
440
441 next_read_location = hv_copyfrom_ringbuffer(inring_info,
442 buffer,
443 packetlen,
444 next_read_location);
445
446 next_read_location = hv_copyfrom_ringbuffer(inring_info,
447 &prev_indices,
448 sizeof(u64),
449 next_read_location);
450
451 /*
452 * Make sure all reads are done before we update the read index since
453 * the writer may start writing to the read area once the read index
454 * is updated.
455 */
456 mb();
457
458 /* Update the read index */
459 hv_set_next_read_location(inring_info, next_read_location);
460
461 *signal = hv_need_to_signal_on_read(bytes_avail_towrite, inring_info);
462
463 out_unlock:
464 spin_unlock_irqrestore(&inring_info->ring_lock, flags);
465 return ret;
466 }
This page took 0.042517 seconds and 5 git commands to generate.