diff options
author | K. Y. Srinivasan <kys@microsoft.com> | 2012-12-01 09:46:36 -0500 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2013-01-17 13:46:39 -0500 |
commit | 98fa8cf4bcc79cb14de8fd815bbcd00dcbd7b20e (patch) | |
tree | 0d48abd179e49709cdac7e28eb853a653df7fbe2 /drivers/hv | |
parent | f878f3d59ed26f489add852ed6d5c8e5f3bbb1aa (diff) |
Drivers: hv: Optimize the signaling on the write path
The host has already implemented the "read" side optimizations.
Leverage that to optimize "write" side signaling.
Signed-off-by: K. Y. Srinivasan <kys@microsoft.com>
Reviewed-by: Haiyang Zhang <haiyangz@microsoft.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/hv')
-rw-r--r-- | drivers/hv/channel.c | 15 | ||||
-rw-r--r-- | drivers/hv/hyperv_vmbus.h | 2 | ||||
-rw-r--r-- | drivers/hv/ring_buffer.c | 42 |
3 files changed, 49 insertions, 10 deletions
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c index 773a2f25a8f0..727c5f1d6acf 100644 --- a/drivers/hv/channel.c +++ b/drivers/hv/channel.c | |||
@@ -564,6 +564,7 @@ int vmbus_sendpacket(struct vmbus_channel *channel, const void *buffer, | |||
564 | struct scatterlist bufferlist[3]; | 564 | struct scatterlist bufferlist[3]; |
565 | u64 aligned_data = 0; | 565 | u64 aligned_data = 0; |
566 | int ret; | 566 | int ret; |
567 | bool signal = false; | ||
567 | 568 | ||
568 | 569 | ||
569 | /* Setup the descriptor */ | 570 | /* Setup the descriptor */ |
@@ -580,9 +581,9 @@ int vmbus_sendpacket(struct vmbus_channel *channel, const void *buffer, | |||
580 | sg_set_buf(&bufferlist[2], &aligned_data, | 581 | sg_set_buf(&bufferlist[2], &aligned_data, |
581 | packetlen_aligned - packetlen); | 582 | packetlen_aligned - packetlen); |
582 | 583 | ||
583 | ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3); | 584 | ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal); |
584 | 585 | ||
585 | if (ret == 0 && !hv_get_ringbuffer_interrupt_mask(&channel->outbound)) | 586 | if (ret == 0 && signal) |
586 | vmbus_setevent(channel); | 587 | vmbus_setevent(channel); |
587 | 588 | ||
588 | return ret; | 589 | return ret; |
@@ -606,6 +607,7 @@ int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel, | |||
606 | u32 packetlen_aligned; | 607 | u32 packetlen_aligned; |
607 | struct scatterlist bufferlist[3]; | 608 | struct scatterlist bufferlist[3]; |
608 | u64 aligned_data = 0; | 609 | u64 aligned_data = 0; |
610 | bool signal = false; | ||
609 | 611 | ||
610 | if (pagecount > MAX_PAGE_BUFFER_COUNT) | 612 | if (pagecount > MAX_PAGE_BUFFER_COUNT) |
611 | return -EINVAL; | 613 | return -EINVAL; |
@@ -641,9 +643,9 @@ int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel, | |||
641 | sg_set_buf(&bufferlist[2], &aligned_data, | 643 | sg_set_buf(&bufferlist[2], &aligned_data, |
642 | packetlen_aligned - packetlen); | 644 | packetlen_aligned - packetlen); |
643 | 645 | ||
644 | ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3); | 646 | ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal); |
645 | 647 | ||
646 | if (ret == 0 && !hv_get_ringbuffer_interrupt_mask(&channel->outbound)) | 648 | if (ret == 0 && signal) |
647 | vmbus_setevent(channel); | 649 | vmbus_setevent(channel); |
648 | 650 | ||
649 | return ret; | 651 | return ret; |
@@ -665,6 +667,7 @@ int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel, | |||
665 | u32 packetlen_aligned; | 667 | u32 packetlen_aligned; |
666 | struct scatterlist bufferlist[3]; | 668 | struct scatterlist bufferlist[3]; |
667 | u64 aligned_data = 0; | 669 | u64 aligned_data = 0; |
670 | bool signal = false; | ||
668 | u32 pfncount = NUM_PAGES_SPANNED(multi_pagebuffer->offset, | 671 | u32 pfncount = NUM_PAGES_SPANNED(multi_pagebuffer->offset, |
669 | multi_pagebuffer->len); | 672 | multi_pagebuffer->len); |
670 | 673 | ||
@@ -703,9 +706,9 @@ int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel, | |||
703 | sg_set_buf(&bufferlist[2], &aligned_data, | 706 | sg_set_buf(&bufferlist[2], &aligned_data, |
704 | packetlen_aligned - packetlen); | 707 | packetlen_aligned - packetlen); |
705 | 708 | ||
706 | ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3); | 709 | ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal); |
707 | 710 | ||
708 | if (ret == 0 && !hv_get_ringbuffer_interrupt_mask(&channel->outbound)) | 711 | if (ret == 0 && signal) |
709 | vmbus_setevent(channel); | 712 | vmbus_setevent(channel); |
710 | 713 | ||
711 | return ret; | 714 | return ret; |
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h index 3184f6ff4e74..895c898812bd 100644 --- a/drivers/hv/hyperv_vmbus.h +++ b/drivers/hv/hyperv_vmbus.h | |||
@@ -555,7 +555,7 @@ void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info); | |||
555 | 555 | ||
556 | int hv_ringbuffer_write(struct hv_ring_buffer_info *ring_info, | 556 | int hv_ringbuffer_write(struct hv_ring_buffer_info *ring_info, |
557 | struct scatterlist *sglist, | 557 | struct scatterlist *sglist, |
558 | u32 sgcount); | 558 | u32 sgcount, bool *signal); |
559 | 559 | ||
560 | int hv_ringbuffer_peek(struct hv_ring_buffer_info *ring_info, void *buffer, | 560 | int hv_ringbuffer_peek(struct hv_ring_buffer_info *ring_info, void *buffer, |
561 | u32 buflen); | 561 | u32 buflen); |
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c index 001079287709..9bc55f0dcf47 100644 --- a/drivers/hv/ring_buffer.c +++ b/drivers/hv/ring_buffer.c | |||
@@ -53,6 +53,37 @@ u32 hv_end_read(struct hv_ring_buffer_info *rbi) | |||
53 | return read; | 53 | return read; |
54 | } | 54 | } |
55 | 55 | ||
56 | /* | ||
57 | * When we write to the ring buffer, check if the host needs to | ||
58 | * be signaled. Here is the details of this protocol: | ||
59 | * | ||
60 | * 1. The host guarantees that while it is draining the | ||
61 | * ring buffer, it will set the interrupt_mask to | ||
62 | * indicate it does not need to be interrupted when | ||
63 | * new data is placed. | ||
64 | * | ||
65 | * 2. The host guarantees that it will completely drain | ||
66 | * the ring buffer before exiting the read loop. Further, | ||
67 | * once the ring buffer is empty, it will clear the | ||
68 | * interrupt_mask and re-check to see if new data has | ||
69 | * arrived. | ||
70 | */ | ||
71 | |||
72 | static bool hv_need_to_signal(u32 old_write, struct hv_ring_buffer_info *rbi) | ||
73 | { | ||
74 | if (rbi->ring_buffer->interrupt_mask) | ||
75 | return false; | ||
76 | |||
77 | /* | ||
78 | * This is the only case we need to signal when the | ||
79 | * ring transitions from being empty to non-empty. | ||
80 | */ | ||
81 | if (old_write == rbi->ring_buffer->read_index) | ||
82 | return true; | ||
83 | |||
84 | return false; | ||
85 | } | ||
86 | |||
56 | 87 | ||
57 | /* | 88 | /* |
58 | * hv_get_next_write_location() | 89 | * hv_get_next_write_location() |
@@ -322,7 +353,7 @@ void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info) | |||
322 | * | 353 | * |
323 | */ | 354 | */ |
324 | int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info, | 355 | int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info, |
325 | struct scatterlist *sglist, u32 sgcount) | 356 | struct scatterlist *sglist, u32 sgcount, bool *signal) |
326 | { | 357 | { |
327 | int i = 0; | 358 | int i = 0; |
328 | u32 bytes_avail_towrite; | 359 | u32 bytes_avail_towrite; |
@@ -331,6 +362,7 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info, | |||
331 | 362 | ||
332 | struct scatterlist *sg; | 363 | struct scatterlist *sg; |
333 | u32 next_write_location; | 364 | u32 next_write_location; |
365 | u32 old_write; | ||
334 | u64 prev_indices = 0; | 366 | u64 prev_indices = 0; |
335 | unsigned long flags; | 367 | unsigned long flags; |
336 | 368 | ||
@@ -359,6 +391,8 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info, | |||
359 | /* Write to the ring buffer */ | 391 | /* Write to the ring buffer */ |
360 | next_write_location = hv_get_next_write_location(outring_info); | 392 | next_write_location = hv_get_next_write_location(outring_info); |
361 | 393 | ||
394 | old_write = next_write_location; | ||
395 | |||
362 | for_each_sg(sglist, sg, sgcount, i) | 396 | for_each_sg(sglist, sg, sgcount, i) |
363 | { | 397 | { |
364 | next_write_location = hv_copyto_ringbuffer(outring_info, | 398 | next_write_location = hv_copyto_ringbuffer(outring_info, |
@@ -375,14 +409,16 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info, | |||
375 | &prev_indices, | 409 | &prev_indices, |
376 | sizeof(u64)); | 410 | sizeof(u64)); |
377 | 411 | ||
378 | /* Make sure we flush all writes before updating the writeIndex */ | 412 | /* Issue a full memory barrier before updating the write index */ |
379 | smp_wmb(); | 413 | smp_mb(); |
380 | 414 | ||
381 | /* Now, update the write location */ | 415 | /* Now, update the write location */ |
382 | hv_set_next_write_location(outring_info, next_write_location); | 416 | hv_set_next_write_location(outring_info, next_write_location); |
383 | 417 | ||
384 | 418 | ||
385 | spin_unlock_irqrestore(&outring_info->ring_lock, flags); | 419 | spin_unlock_irqrestore(&outring_info->ring_lock, flags); |
420 | |||
421 | *signal = hv_need_to_signal(old_write, outring_info); | ||
386 | return 0; | 422 | return 0; |
387 | } | 423 | } |
388 | 424 | ||