diff options
Diffstat (limited to 'drivers/hv/ring_buffer.c')
-rw-r--r-- | drivers/hv/ring_buffer.c | 496 |
1 files changed, 496 insertions, 0 deletions
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c new file mode 100644 index 000000000000..8af25a097d75 --- /dev/null +++ b/drivers/hv/ring_buffer.c | |||
@@ -0,0 +1,496 @@ | |||
1 | /* | ||
2 | * | ||
3 | * Copyright (c) 2009, Microsoft Corporation. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms and conditions of the GNU General Public License, | ||
7 | * version 2, as published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License along with | ||
15 | * this program; if not, write to the Free Software Foundation, Inc., 59 Temple | ||
16 | * Place - Suite 330, Boston, MA 02111-1307 USA. | ||
17 | * | ||
18 | * Authors: | ||
19 | * Haiyang Zhang <haiyangz@microsoft.com> | ||
20 | * Hank Janssen <hjanssen@microsoft.com> | ||
21 | * K. Y. Srinivasan <kys@microsoft.com> | ||
22 | * | ||
23 | */ | ||
24 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
25 | |||
26 | #include <linux/kernel.h> | ||
27 | #include <linux/mm.h> | ||
28 | #include <linux/hyperv.h> | ||
29 | |||
30 | #include "hyperv_vmbus.h" | ||
31 | |||
32 | |||
33 | /* #defines */ | ||
34 | |||
35 | |||
36 | /* Amount of space to write to */ | ||
37 | #define BYTES_AVAIL_TO_WRITE(r, w, z) \ | ||
38 | ((w) >= (r)) ? ((z) - ((w) - (r))) : ((r) - (w)) | ||
39 | |||
40 | |||
41 | /* | ||
42 | * | ||
43 | * hv_get_ringbuffer_availbytes() | ||
44 | * | ||
45 | * Get number of bytes available to read and to write to | ||
46 | * for the specified ring buffer | ||
47 | */ | ||
48 | static inline void | ||
49 | hv_get_ringbuffer_availbytes(struct hv_ring_buffer_info *rbi, | ||
50 | u32 *read, u32 *write) | ||
51 | { | ||
52 | u32 read_loc, write_loc; | ||
53 | |||
54 | smp_read_barrier_depends(); | ||
55 | |||
56 | /* Capture the read/write indices before they changed */ | ||
57 | read_loc = rbi->ring_buffer->read_index; | ||
58 | write_loc = rbi->ring_buffer->write_index; | ||
59 | |||
60 | *write = BYTES_AVAIL_TO_WRITE(read_loc, write_loc, rbi->ring_datasize); | ||
61 | *read = rbi->ring_datasize - *write; | ||
62 | } | ||
63 | |||
64 | /* | ||
65 | * hv_get_next_write_location() | ||
66 | * | ||
67 | * Get the next write location for the specified ring buffer | ||
68 | * | ||
69 | */ | ||
70 | static inline u32 | ||
71 | hv_get_next_write_location(struct hv_ring_buffer_info *ring_info) | ||
72 | { | ||
73 | u32 next = ring_info->ring_buffer->write_index; | ||
74 | |||
75 | return next; | ||
76 | } | ||
77 | |||
78 | /* | ||
79 | * hv_set_next_write_location() | ||
80 | * | ||
81 | * Set the next write location for the specified ring buffer | ||
82 | * | ||
83 | */ | ||
84 | static inline void | ||
85 | hv_set_next_write_location(struct hv_ring_buffer_info *ring_info, | ||
86 | u32 next_write_location) | ||
87 | { | ||
88 | ring_info->ring_buffer->write_index = next_write_location; | ||
89 | } | ||
90 | |||
91 | /* | ||
92 | * hv_get_next_read_location() | ||
93 | * | ||
94 | * Get the next read location for the specified ring buffer | ||
95 | */ | ||
96 | static inline u32 | ||
97 | hv_get_next_read_location(struct hv_ring_buffer_info *ring_info) | ||
98 | { | ||
99 | u32 next = ring_info->ring_buffer->read_index; | ||
100 | |||
101 | return next; | ||
102 | } | ||
103 | |||
104 | /* | ||
105 | * hv_get_next_readlocation_withoffset() | ||
106 | * | ||
107 | * Get the next read location + offset for the specified ring buffer. | ||
108 | * This allows the caller to skip | ||
109 | */ | ||
110 | static inline u32 | ||
111 | hv_get_next_readlocation_withoffset(struct hv_ring_buffer_info *ring_info, | ||
112 | u32 offset) | ||
113 | { | ||
114 | u32 next = ring_info->ring_buffer->read_index; | ||
115 | |||
116 | next += offset; | ||
117 | next %= ring_info->ring_datasize; | ||
118 | |||
119 | return next; | ||
120 | } | ||
121 | |||
122 | /* | ||
123 | * | ||
124 | * hv_set_next_read_location() | ||
125 | * | ||
126 | * Set the next read location for the specified ring buffer | ||
127 | * | ||
128 | */ | ||
129 | static inline void | ||
130 | hv_set_next_read_location(struct hv_ring_buffer_info *ring_info, | ||
131 | u32 next_read_location) | ||
132 | { | ||
133 | ring_info->ring_buffer->read_index = next_read_location; | ||
134 | } | ||
135 | |||
136 | |||
137 | /* | ||
138 | * | ||
139 | * hv_get_ring_buffer() | ||
140 | * | ||
141 | * Get the start of the ring buffer | ||
142 | */ | ||
143 | static inline void * | ||
144 | hv_get_ring_buffer(struct hv_ring_buffer_info *ring_info) | ||
145 | { | ||
146 | return (void *)ring_info->ring_buffer->buffer; | ||
147 | } | ||
148 | |||
149 | |||
150 | /* | ||
151 | * | ||
152 | * hv_get_ring_buffersize() | ||
153 | * | ||
154 | * Get the size of the ring buffer | ||
155 | */ | ||
156 | static inline u32 | ||
157 | hv_get_ring_buffersize(struct hv_ring_buffer_info *ring_info) | ||
158 | { | ||
159 | return ring_info->ring_datasize; | ||
160 | } | ||
161 | |||
162 | /* | ||
163 | * | ||
164 | * hv_get_ring_bufferindices() | ||
165 | * | ||
166 | * Get the read and write indices as u64 of the specified ring buffer | ||
167 | * | ||
168 | */ | ||
169 | static inline u64 | ||
170 | hv_get_ring_bufferindices(struct hv_ring_buffer_info *ring_info) | ||
171 | { | ||
172 | return (u64)ring_info->ring_buffer->write_index << 32; | ||
173 | } | ||
174 | |||
175 | /* | ||
176 | * | ||
177 | * hv_copyfrom_ringbuffer() | ||
178 | * | ||
179 | * Helper routine to copy to source from ring buffer. | ||
180 | * Assume there is enough room. Handles wrap-around in src case only!! | ||
181 | * | ||
182 | */ | ||
183 | static u32 hv_copyfrom_ringbuffer( | ||
184 | struct hv_ring_buffer_info *ring_info, | ||
185 | void *dest, | ||
186 | u32 destlen, | ||
187 | u32 start_read_offset) | ||
188 | { | ||
189 | void *ring_buffer = hv_get_ring_buffer(ring_info); | ||
190 | u32 ring_buffer_size = hv_get_ring_buffersize(ring_info); | ||
191 | |||
192 | u32 frag_len; | ||
193 | |||
194 | /* wrap-around detected at the src */ | ||
195 | if (destlen > ring_buffer_size - start_read_offset) { | ||
196 | frag_len = ring_buffer_size - start_read_offset; | ||
197 | |||
198 | memcpy(dest, ring_buffer + start_read_offset, frag_len); | ||
199 | memcpy(dest + frag_len, ring_buffer, destlen - frag_len); | ||
200 | } else | ||
201 | |||
202 | memcpy(dest, ring_buffer + start_read_offset, destlen); | ||
203 | |||
204 | |||
205 | start_read_offset += destlen; | ||
206 | start_read_offset %= ring_buffer_size; | ||
207 | |||
208 | return start_read_offset; | ||
209 | } | ||
210 | |||
211 | |||
212 | /* | ||
213 | * | ||
214 | * hv_copyto_ringbuffer() | ||
215 | * | ||
216 | * Helper routine to copy from source to ring buffer. | ||
217 | * Assume there is enough room. Handles wrap-around in dest case only!! | ||
218 | * | ||
219 | */ | ||
220 | static u32 hv_copyto_ringbuffer( | ||
221 | struct hv_ring_buffer_info *ring_info, | ||
222 | u32 start_write_offset, | ||
223 | void *src, | ||
224 | u32 srclen) | ||
225 | { | ||
226 | void *ring_buffer = hv_get_ring_buffer(ring_info); | ||
227 | u32 ring_buffer_size = hv_get_ring_buffersize(ring_info); | ||
228 | u32 frag_len; | ||
229 | |||
230 | /* wrap-around detected! */ | ||
231 | if (srclen > ring_buffer_size - start_write_offset) { | ||
232 | frag_len = ring_buffer_size - start_write_offset; | ||
233 | memcpy(ring_buffer + start_write_offset, src, frag_len); | ||
234 | memcpy(ring_buffer, src + frag_len, srclen - frag_len); | ||
235 | } else | ||
236 | memcpy(ring_buffer + start_write_offset, src, srclen); | ||
237 | |||
238 | start_write_offset += srclen; | ||
239 | start_write_offset %= ring_buffer_size; | ||
240 | |||
241 | return start_write_offset; | ||
242 | } | ||
243 | |||
244 | /* | ||
245 | * | ||
246 | * hv_ringbuffer_get_debuginfo() | ||
247 | * | ||
248 | * Get various debug metrics for the specified ring buffer | ||
249 | * | ||
250 | */ | ||
251 | void hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info, | ||
252 | struct hv_ring_buffer_debug_info *debug_info) | ||
253 | { | ||
254 | u32 bytes_avail_towrite; | ||
255 | u32 bytes_avail_toread; | ||
256 | |||
257 | if (ring_info->ring_buffer) { | ||
258 | hv_get_ringbuffer_availbytes(ring_info, | ||
259 | &bytes_avail_toread, | ||
260 | &bytes_avail_towrite); | ||
261 | |||
262 | debug_info->bytes_avail_toread = bytes_avail_toread; | ||
263 | debug_info->bytes_avail_towrite = bytes_avail_towrite; | ||
264 | debug_info->current_read_index = | ||
265 | ring_info->ring_buffer->read_index; | ||
266 | debug_info->current_write_index = | ||
267 | ring_info->ring_buffer->write_index; | ||
268 | debug_info->current_interrupt_mask = | ||
269 | ring_info->ring_buffer->interrupt_mask; | ||
270 | } | ||
271 | } | ||
272 | |||
273 | |||
274 | /* | ||
275 | * | ||
276 | * hv_get_ringbuffer_interrupt_mask() | ||
277 | * | ||
278 | * Get the interrupt mask for the specified ring buffer | ||
279 | * | ||
280 | */ | ||
281 | u32 hv_get_ringbuffer_interrupt_mask(struct hv_ring_buffer_info *rbi) | ||
282 | { | ||
283 | return rbi->ring_buffer->interrupt_mask; | ||
284 | } | ||
285 | |||
286 | /* | ||
287 | * | ||
288 | * hv_ringbuffer_init() | ||
289 | * | ||
290 | *Initialize the ring buffer | ||
291 | * | ||
292 | */ | ||
293 | int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info, | ||
294 | void *buffer, u32 buflen) | ||
295 | { | ||
296 | if (sizeof(struct hv_ring_buffer) != PAGE_SIZE) | ||
297 | return -EINVAL; | ||
298 | |||
299 | memset(ring_info, 0, sizeof(struct hv_ring_buffer_info)); | ||
300 | |||
301 | ring_info->ring_buffer = (struct hv_ring_buffer *)buffer; | ||
302 | ring_info->ring_buffer->read_index = | ||
303 | ring_info->ring_buffer->write_index = 0; | ||
304 | |||
305 | ring_info->ring_size = buflen; | ||
306 | ring_info->ring_datasize = buflen - sizeof(struct hv_ring_buffer); | ||
307 | |||
308 | spin_lock_init(&ring_info->ring_lock); | ||
309 | |||
310 | return 0; | ||
311 | } | ||
312 | |||
313 | /* | ||
314 | * | ||
315 | * hv_ringbuffer_cleanup() | ||
316 | * | ||
317 | * Cleanup the ring buffer | ||
318 | * | ||
319 | */ | ||
320 | void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info) | ||
321 | { | ||
322 | } | ||
323 | |||
324 | /* | ||
325 | * | ||
326 | * hv_ringbuffer_write() | ||
327 | * | ||
328 | * Write to the ring buffer | ||
329 | * | ||
330 | */ | ||
331 | int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info, | ||
332 | struct scatterlist *sglist, u32 sgcount) | ||
333 | { | ||
334 | int i = 0; | ||
335 | u32 bytes_avail_towrite; | ||
336 | u32 bytes_avail_toread; | ||
337 | u32 totalbytes_towrite = 0; | ||
338 | |||
339 | struct scatterlist *sg; | ||
340 | u32 next_write_location; | ||
341 | u64 prev_indices = 0; | ||
342 | unsigned long flags; | ||
343 | |||
344 | for_each_sg(sglist, sg, sgcount, i) | ||
345 | { | ||
346 | totalbytes_towrite += sg->length; | ||
347 | } | ||
348 | |||
349 | totalbytes_towrite += sizeof(u64); | ||
350 | |||
351 | spin_lock_irqsave(&outring_info->ring_lock, flags); | ||
352 | |||
353 | hv_get_ringbuffer_availbytes(outring_info, | ||
354 | &bytes_avail_toread, | ||
355 | &bytes_avail_towrite); | ||
356 | |||
357 | |||
358 | /* If there is only room for the packet, assume it is full. */ | ||
359 | /* Otherwise, the next time around, we think the ring buffer */ | ||
360 | /* is empty since the read index == write index */ | ||
361 | if (bytes_avail_towrite <= totalbytes_towrite) { | ||
362 | spin_unlock_irqrestore(&outring_info->ring_lock, flags); | ||
363 | return -EAGAIN; | ||
364 | } | ||
365 | |||
366 | /* Write to the ring buffer */ | ||
367 | next_write_location = hv_get_next_write_location(outring_info); | ||
368 | |||
369 | for_each_sg(sglist, sg, sgcount, i) | ||
370 | { | ||
371 | next_write_location = hv_copyto_ringbuffer(outring_info, | ||
372 | next_write_location, | ||
373 | sg_virt(sg), | ||
374 | sg->length); | ||
375 | } | ||
376 | |||
377 | /* Set previous packet start */ | ||
378 | prev_indices = hv_get_ring_bufferindices(outring_info); | ||
379 | |||
380 | next_write_location = hv_copyto_ringbuffer(outring_info, | ||
381 | next_write_location, | ||
382 | &prev_indices, | ||
383 | sizeof(u64)); | ||
384 | |||
385 | /* Make sure we flush all writes before updating the writeIndex */ | ||
386 | smp_wmb(); | ||
387 | |||
388 | /* Now, update the write location */ | ||
389 | hv_set_next_write_location(outring_info, next_write_location); | ||
390 | |||
391 | |||
392 | spin_unlock_irqrestore(&outring_info->ring_lock, flags); | ||
393 | return 0; | ||
394 | } | ||
395 | |||
396 | |||
397 | /* | ||
398 | * | ||
399 | * hv_ringbuffer_peek() | ||
400 | * | ||
401 | * Read without advancing the read index | ||
402 | * | ||
403 | */ | ||
404 | int hv_ringbuffer_peek(struct hv_ring_buffer_info *Inring_info, | ||
405 | void *Buffer, u32 buflen) | ||
406 | { | ||
407 | u32 bytes_avail_towrite; | ||
408 | u32 bytes_avail_toread; | ||
409 | u32 next_read_location = 0; | ||
410 | unsigned long flags; | ||
411 | |||
412 | spin_lock_irqsave(&Inring_info->ring_lock, flags); | ||
413 | |||
414 | hv_get_ringbuffer_availbytes(Inring_info, | ||
415 | &bytes_avail_toread, | ||
416 | &bytes_avail_towrite); | ||
417 | |||
418 | /* Make sure there is something to read */ | ||
419 | if (bytes_avail_toread < buflen) { | ||
420 | |||
421 | spin_unlock_irqrestore(&Inring_info->ring_lock, flags); | ||
422 | |||
423 | return -EAGAIN; | ||
424 | } | ||
425 | |||
426 | /* Convert to byte offset */ | ||
427 | next_read_location = hv_get_next_read_location(Inring_info); | ||
428 | |||
429 | next_read_location = hv_copyfrom_ringbuffer(Inring_info, | ||
430 | Buffer, | ||
431 | buflen, | ||
432 | next_read_location); | ||
433 | |||
434 | spin_unlock_irqrestore(&Inring_info->ring_lock, flags); | ||
435 | |||
436 | return 0; | ||
437 | } | ||
438 | |||
439 | |||
440 | /* | ||
441 | * | ||
442 | * hv_ringbuffer_read() | ||
443 | * | ||
444 | * Read and advance the read index | ||
445 | * | ||
446 | */ | ||
447 | int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer, | ||
448 | u32 buflen, u32 offset) | ||
449 | { | ||
450 | u32 bytes_avail_towrite; | ||
451 | u32 bytes_avail_toread; | ||
452 | u32 next_read_location = 0; | ||
453 | u64 prev_indices = 0; | ||
454 | unsigned long flags; | ||
455 | |||
456 | if (buflen <= 0) | ||
457 | return -EINVAL; | ||
458 | |||
459 | spin_lock_irqsave(&inring_info->ring_lock, flags); | ||
460 | |||
461 | hv_get_ringbuffer_availbytes(inring_info, | ||
462 | &bytes_avail_toread, | ||
463 | &bytes_avail_towrite); | ||
464 | |||
465 | /* Make sure there is something to read */ | ||
466 | if (bytes_avail_toread < buflen) { | ||
467 | spin_unlock_irqrestore(&inring_info->ring_lock, flags); | ||
468 | |||
469 | return -EAGAIN; | ||
470 | } | ||
471 | |||
472 | next_read_location = | ||
473 | hv_get_next_readlocation_withoffset(inring_info, offset); | ||
474 | |||
475 | next_read_location = hv_copyfrom_ringbuffer(inring_info, | ||
476 | buffer, | ||
477 | buflen, | ||
478 | next_read_location); | ||
479 | |||
480 | next_read_location = hv_copyfrom_ringbuffer(inring_info, | ||
481 | &prev_indices, | ||
482 | sizeof(u64), | ||
483 | next_read_location); | ||
484 | |||
485 | /* Make sure all reads are done before we update the read index since */ | ||
486 | /* the writer may start writing to the read area once the read index */ | ||
487 | /*is updated */ | ||
488 | smp_mb(); | ||
489 | |||
490 | /* Update the read index */ | ||
491 | hv_set_next_read_location(inring_info, next_read_location); | ||
492 | |||
493 | spin_unlock_irqrestore(&inring_info->ring_lock, flags); | ||
494 | |||
495 | return 0; | ||
496 | } | ||