aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/hv
diff options
context:
space:
mode:
authorK. Y. Srinivasan <kys@microsoft.com>2016-01-28 01:29:45 -0500
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2016-02-08 00:34:12 -0500
commitfe760e4d64fe5c17c39e86c410d41f6587ee88bc (patch)
treee25710bde0e1e4da9a127a3e6c449c08696c1940 /drivers/hv
parent3eba9a77d5fc2cee486a16fff435686f024f61cf (diff)
Drivers: hv: vmbus: Give control over how the ring access is serialized
On the channel send side, many of the VMBUS device drivers explicity serialize access to the outgoing ring buffer. Give more control to the VMBUS device drivers in terms how to serialize accesss to the outgoing ring buffer. The default behavior will be to aquire the ring lock to preserve the current behavior. Signed-off-by: K. Y. Srinivasan <kys@microsoft.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/hv')
-rw-r--r--drivers/hv/channel.c15
-rw-r--r--drivers/hv/channel_mgmt.c1
-rw-r--r--drivers/hv/hyperv_vmbus.h2
-rw-r--r--drivers/hv/ring_buffer.c13
4 files changed, 21 insertions, 10 deletions
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index fcab234796ef..56dd261f7142 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -639,6 +639,7 @@ int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer,
639 u64 aligned_data = 0; 639 u64 aligned_data = 0;
640 int ret; 640 int ret;
641 bool signal = false; 641 bool signal = false;
642 bool lock = channel->acquire_ring_lock;
642 int num_vecs = ((bufferlen != 0) ? 3 : 1); 643 int num_vecs = ((bufferlen != 0) ? 3 : 1);
643 644
644 645
@@ -658,7 +659,7 @@ int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer,
658 bufferlist[2].iov_len = (packetlen_aligned - packetlen); 659 bufferlist[2].iov_len = (packetlen_aligned - packetlen);
659 660
660 ret = hv_ringbuffer_write(&channel->outbound, bufferlist, num_vecs, 661 ret = hv_ringbuffer_write(&channel->outbound, bufferlist, num_vecs,
661 &signal); 662 &signal, lock);
662 663
663 /* 664 /*
664 * Signalling the host is conditional on many factors: 665 * Signalling the host is conditional on many factors:
@@ -738,6 +739,7 @@ int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
738 struct kvec bufferlist[3]; 739 struct kvec bufferlist[3];
739 u64 aligned_data = 0; 740 u64 aligned_data = 0;
740 bool signal = false; 741 bool signal = false;
742 bool lock = channel->acquire_ring_lock;
741 743
742 if (pagecount > MAX_PAGE_BUFFER_COUNT) 744 if (pagecount > MAX_PAGE_BUFFER_COUNT)
743 return -EINVAL; 745 return -EINVAL;
@@ -774,7 +776,8 @@ int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
774 bufferlist[2].iov_base = &aligned_data; 776 bufferlist[2].iov_base = &aligned_data;
775 bufferlist[2].iov_len = (packetlen_aligned - packetlen); 777 bufferlist[2].iov_len = (packetlen_aligned - packetlen);
776 778
777 ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal); 779 ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3,
780 &signal, lock);
778 781
779 /* 782 /*
780 * Signalling the host is conditional on many factors: 783 * Signalling the host is conditional on many factors:
@@ -837,6 +840,7 @@ int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
837 struct kvec bufferlist[3]; 840 struct kvec bufferlist[3];
838 u64 aligned_data = 0; 841 u64 aligned_data = 0;
839 bool signal = false; 842 bool signal = false;
843 bool lock = channel->acquire_ring_lock;
840 844
841 packetlen = desc_size + bufferlen; 845 packetlen = desc_size + bufferlen;
842 packetlen_aligned = ALIGN(packetlen, sizeof(u64)); 846 packetlen_aligned = ALIGN(packetlen, sizeof(u64));
@@ -856,7 +860,8 @@ int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
856 bufferlist[2].iov_base = &aligned_data; 860 bufferlist[2].iov_base = &aligned_data;
857 bufferlist[2].iov_len = (packetlen_aligned - packetlen); 861 bufferlist[2].iov_len = (packetlen_aligned - packetlen);
858 862
859 ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal); 863 ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3,
864 &signal, lock);
860 865
861 if (ret == 0 && signal) 866 if (ret == 0 && signal)
862 vmbus_setevent(channel); 867 vmbus_setevent(channel);
@@ -881,6 +886,7 @@ int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
881 struct kvec bufferlist[3]; 886 struct kvec bufferlist[3];
882 u64 aligned_data = 0; 887 u64 aligned_data = 0;
883 bool signal = false; 888 bool signal = false;
889 bool lock = channel->acquire_ring_lock;
884 u32 pfncount = NUM_PAGES_SPANNED(multi_pagebuffer->offset, 890 u32 pfncount = NUM_PAGES_SPANNED(multi_pagebuffer->offset,
885 multi_pagebuffer->len); 891 multi_pagebuffer->len);
886 892
@@ -919,7 +925,8 @@ int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
919 bufferlist[2].iov_base = &aligned_data; 925 bufferlist[2].iov_base = &aligned_data;
920 bufferlist[2].iov_len = (packetlen_aligned - packetlen); 926 bufferlist[2].iov_len = (packetlen_aligned - packetlen);
921 927
922 ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal); 928 ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3,
929 &signal, lock);
923 930
924 if (ret == 0 && signal) 931 if (ret == 0 && signal)
925 vmbus_setevent(channel); 932 vmbus_setevent(channel);
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index cf311be88cb4..b40f429aaa13 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -259,6 +259,7 @@ static struct vmbus_channel *alloc_channel(void)
259 return NULL; 259 return NULL;
260 260
261 channel->id = atomic_inc_return(&chan_num); 261 channel->id = atomic_inc_return(&chan_num);
262 channel->acquire_ring_lock = true;
262 spin_lock_init(&channel->inbound_lock); 263 spin_lock_init(&channel->inbound_lock);
263 spin_lock_init(&channel->lock); 264 spin_lock_init(&channel->lock);
264 265
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index ac7aa303c37d..b9ea7f59036b 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -529,7 +529,7 @@ void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info);
529 529
530int hv_ringbuffer_write(struct hv_ring_buffer_info *ring_info, 530int hv_ringbuffer_write(struct hv_ring_buffer_info *ring_info,
531 struct kvec *kv_list, 531 struct kvec *kv_list,
532 u32 kv_count, bool *signal); 532 u32 kv_count, bool *signal, bool lock);
533 533
534int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, 534int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info,
535 void *buffer, u32 buflen, u32 *buffer_actual_len, 535 void *buffer, u32 buflen, u32 *buffer_actual_len,
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index 1145f3b8e4e0..5613e2b5cff7 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -314,7 +314,7 @@ void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
314 314
315/* Write to the ring buffer. */ 315/* Write to the ring buffer. */
316int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info, 316int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
317 struct kvec *kv_list, u32 kv_count, bool *signal) 317 struct kvec *kv_list, u32 kv_count, bool *signal, bool lock)
318{ 318{
319 int i = 0; 319 int i = 0;
320 u32 bytes_avail_towrite; 320 u32 bytes_avail_towrite;
@@ -324,14 +324,15 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
324 u32 next_write_location; 324 u32 next_write_location;
325 u32 old_write; 325 u32 old_write;
326 u64 prev_indices = 0; 326 u64 prev_indices = 0;
327 unsigned long flags; 327 unsigned long flags = 0;
328 328
329 for (i = 0; i < kv_count; i++) 329 for (i = 0; i < kv_count; i++)
330 totalbytes_towrite += kv_list[i].iov_len; 330 totalbytes_towrite += kv_list[i].iov_len;
331 331
332 totalbytes_towrite += sizeof(u64); 332 totalbytes_towrite += sizeof(u64);
333 333
334 spin_lock_irqsave(&outring_info->ring_lock, flags); 334 if (lock)
335 spin_lock_irqsave(&outring_info->ring_lock, flags);
335 336
336 hv_get_ringbuffer_availbytes(outring_info, 337 hv_get_ringbuffer_availbytes(outring_info,
337 &bytes_avail_toread, 338 &bytes_avail_toread,
@@ -343,7 +344,8 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
343 * is empty since the read index == write index. 344 * is empty since the read index == write index.
344 */ 345 */
345 if (bytes_avail_towrite <= totalbytes_towrite) { 346 if (bytes_avail_towrite <= totalbytes_towrite) {
346 spin_unlock_irqrestore(&outring_info->ring_lock, flags); 347 if (lock)
348 spin_unlock_irqrestore(&outring_info->ring_lock, flags);
347 return -EAGAIN; 349 return -EAGAIN;
348 } 350 }
349 351
@@ -374,7 +376,8 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
374 hv_set_next_write_location(outring_info, next_write_location); 376 hv_set_next_write_location(outring_info, next_write_location);
375 377
376 378
377 spin_unlock_irqrestore(&outring_info->ring_lock, flags); 379 if (lock)
380 spin_unlock_irqrestore(&outring_info->ring_lock, flags);
378 381
379 *signal = hv_need_to_signal(old_write, outring_info); 382 *signal = hv_need_to_signal(old_write, outring_info);
380 return 0; 383 return 0;