diff options
| author | K. Y. Srinivasan <kys@microsoft.com> | 2012-12-01 09:46:36 -0500 |
|---|---|---|
| committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2013-01-17 13:46:39 -0500 |
| commit | 98fa8cf4bcc79cb14de8fd815bbcd00dcbd7b20e (patch) | |
| tree | 0d48abd179e49709cdac7e28eb853a653df7fbe2 /drivers/hv/ring_buffer.c | |
| parent | f878f3d59ed26f489add852ed6d5c8e5f3bbb1aa (diff) | |
Drivers: hv: Optimize the signaling on the write path
The host has already implemented the "read" side optimizations.
Leverage that to optimize "write" side signaling.
Signed-off-by: K. Y. Srinivasan <kys@microsoft.com>
Reviewed-by: Haiyang Zhang <haiyangz@microsoft.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/hv/ring_buffer.c')
| -rw-r--r-- | drivers/hv/ring_buffer.c | 42 |
1 files changed, 39 insertions, 3 deletions
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c index 001079287709..9bc55f0dcf47 100644 --- a/drivers/hv/ring_buffer.c +++ b/drivers/hv/ring_buffer.c | |||
| @@ -53,6 +53,37 @@ u32 hv_end_read(struct hv_ring_buffer_info *rbi) | |||
| 53 | return read; | 53 | return read; |
| 54 | } | 54 | } |
| 55 | 55 | ||
| 56 | /* | ||
| 57 | * When we write to the ring buffer, check if the host needs to | ||
| 58 | * be signaled. Here is the details of this protocol: | ||
| 59 | * | ||
| 60 | * 1. The host guarantees that while it is draining the | ||
| 61 | * ring buffer, it will set the interrupt_mask to | ||
| 62 | * indicate it does not need to be interrupted when | ||
| 63 | * new data is placed. | ||
| 64 | * | ||
| 65 | * 2. The host guarantees that it will completely drain | ||
| 66 | * the ring buffer before exiting the read loop. Further, | ||
| 67 | * once the ring buffer is empty, it will clear the | ||
| 68 | * interrupt_mask and re-check to see if new data has | ||
| 69 | * arrived. | ||
| 70 | */ | ||
| 71 | |||
| 72 | static bool hv_need_to_signal(u32 old_write, struct hv_ring_buffer_info *rbi) | ||
| 73 | { | ||
| 74 | if (rbi->ring_buffer->interrupt_mask) | ||
| 75 | return false; | ||
| 76 | |||
| 77 | /* | ||
| 78 | * This is the only case we need to signal when the | ||
| 79 | * ring transitions from being empty to non-empty. | ||
| 80 | */ | ||
| 81 | if (old_write == rbi->ring_buffer->read_index) | ||
| 82 | return true; | ||
| 83 | |||
| 84 | return false; | ||
| 85 | } | ||
| 86 | |||
| 56 | 87 | ||
| 57 | /* | 88 | /* |
| 58 | * hv_get_next_write_location() | 89 | * hv_get_next_write_location() |
| @@ -322,7 +353,7 @@ void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info) | |||
| 322 | * | 353 | * |
| 323 | */ | 354 | */ |
| 324 | int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info, | 355 | int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info, |
| 325 | struct scatterlist *sglist, u32 sgcount) | 356 | struct scatterlist *sglist, u32 sgcount, bool *signal) |
| 326 | { | 357 | { |
| 327 | int i = 0; | 358 | int i = 0; |
| 328 | u32 bytes_avail_towrite; | 359 | u32 bytes_avail_towrite; |
| @@ -331,6 +362,7 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info, | |||
| 331 | 362 | ||
| 332 | struct scatterlist *sg; | 363 | struct scatterlist *sg; |
| 333 | u32 next_write_location; | 364 | u32 next_write_location; |
| 365 | u32 old_write; | ||
| 334 | u64 prev_indices = 0; | 366 | u64 prev_indices = 0; |
| 335 | unsigned long flags; | 367 | unsigned long flags; |
| 336 | 368 | ||
| @@ -359,6 +391,8 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info, | |||
| 359 | /* Write to the ring buffer */ | 391 | /* Write to the ring buffer */ |
| 360 | next_write_location = hv_get_next_write_location(outring_info); | 392 | next_write_location = hv_get_next_write_location(outring_info); |
| 361 | 393 | ||
| 394 | old_write = next_write_location; | ||
| 395 | |||
| 362 | for_each_sg(sglist, sg, sgcount, i) | 396 | for_each_sg(sglist, sg, sgcount, i) |
| 363 | { | 397 | { |
| 364 | next_write_location = hv_copyto_ringbuffer(outring_info, | 398 | next_write_location = hv_copyto_ringbuffer(outring_info, |
| @@ -375,14 +409,16 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info, | |||
| 375 | &prev_indices, | 409 | &prev_indices, |
| 376 | sizeof(u64)); | 410 | sizeof(u64)); |
| 377 | 411 | ||
| 378 | /* Make sure we flush all writes before updating the writeIndex */ | 412 | /* Issue a full memory barrier before updating the write index */ |
| 379 | smp_wmb(); | 413 | smp_mb(); |
| 380 | 414 | ||
| 381 | /* Now, update the write location */ | 415 | /* Now, update the write location */ |
| 382 | hv_set_next_write_location(outring_info, next_write_location); | 416 | hv_set_next_write_location(outring_info, next_write_location); |
| 383 | 417 | ||
| 384 | 418 | ||
| 385 | spin_unlock_irqrestore(&outring_info->ring_lock, flags); | 419 | spin_unlock_irqrestore(&outring_info->ring_lock, flags); |
| 420 | |||
| 421 | *signal = hv_need_to_signal(old_write, outring_info); | ||
| 386 | return 0; | 422 | return 0; |
| 387 | } | 423 | } |
| 388 | 424 | ||
