aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorStephen Hemminger <stephen@networkplumber.org>2017-02-12 01:02:22 -0500
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2017-02-14 13:20:35 -0500
commit5529eaf6e79a61e0ca7ade257f31d2ababc7f6c9 (patch)
treea619c818c8ea2b3f1a4c9b39281915c34771bac2
parentb71e328297a3a578c482fb4814e737a0ec185839 (diff)
vmbus: remove conditional locking of vmbus_write
All current usage of vmbus write uses the acquire_lock flag, therefore having it be optional is unnecessary. This also fixes a sparse warning since sparse doesn't like when a function has conditional locking. Signed-off-by: Stephen Hemminger <sthemmin@microsoft.com> Signed-off-by: K. Y. Srinivasan <kys@microsoft.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r--drivers/hv/channel.c13
-rw-r--r--drivers/hv/channel_mgmt.c1
-rw-r--r--drivers/hv/hyperv_vmbus.h3
-rw-r--r--drivers/hv/ring_buffer.c11
-rw-r--r--include/linux/hyperv.h15
5 files changed, 9 insertions, 34 deletions
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index 18cc1c78260d..81a80c82f1bd 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -651,7 +651,6 @@ int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer,
651 u32 packetlen_aligned = ALIGN(packetlen, sizeof(u64)); 651 u32 packetlen_aligned = ALIGN(packetlen, sizeof(u64));
652 struct kvec bufferlist[3]; 652 struct kvec bufferlist[3];
653 u64 aligned_data = 0; 653 u64 aligned_data = 0;
654 bool lock = channel->acquire_ring_lock;
655 int num_vecs = ((bufferlen != 0) ? 3 : 1); 654 int num_vecs = ((bufferlen != 0) ? 3 : 1);
656 655
657 656
@@ -670,7 +669,7 @@ int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer,
670 bufferlist[2].iov_base = &aligned_data; 669 bufferlist[2].iov_base = &aligned_data;
671 bufferlist[2].iov_len = (packetlen_aligned - packetlen); 670 bufferlist[2].iov_len = (packetlen_aligned - packetlen);
672 671
673 return hv_ringbuffer_write(channel, bufferlist, num_vecs, lock); 672 return hv_ringbuffer_write(channel, bufferlist, num_vecs);
674} 673}
675EXPORT_SYMBOL(vmbus_sendpacket_ctl); 674EXPORT_SYMBOL(vmbus_sendpacket_ctl);
676 675
@@ -716,12 +715,10 @@ int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
716 u32 packetlen_aligned; 715 u32 packetlen_aligned;
717 struct kvec bufferlist[3]; 716 struct kvec bufferlist[3];
718 u64 aligned_data = 0; 717 u64 aligned_data = 0;
719 bool lock = channel->acquire_ring_lock;
720 718
721 if (pagecount > MAX_PAGE_BUFFER_COUNT) 719 if (pagecount > MAX_PAGE_BUFFER_COUNT)
722 return -EINVAL; 720 return -EINVAL;
723 721
724
725 /* 722 /*
726 * Adjust the size down since vmbus_channel_packet_page_buffer is the 723 * Adjust the size down since vmbus_channel_packet_page_buffer is the
727 * largest size we support 724 * largest size we support
@@ -753,7 +750,7 @@ int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
753 bufferlist[2].iov_base = &aligned_data; 750 bufferlist[2].iov_base = &aligned_data;
754 bufferlist[2].iov_len = (packetlen_aligned - packetlen); 751 bufferlist[2].iov_len = (packetlen_aligned - packetlen);
755 752
756 return hv_ringbuffer_write(channel, bufferlist, 3, lock); 753 return hv_ringbuffer_write(channel, bufferlist, 3);
757} 754}
758EXPORT_SYMBOL_GPL(vmbus_sendpacket_pagebuffer_ctl); 755EXPORT_SYMBOL_GPL(vmbus_sendpacket_pagebuffer_ctl);
759 756
@@ -789,7 +786,6 @@ int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
789 u32 packetlen_aligned; 786 u32 packetlen_aligned;
790 struct kvec bufferlist[3]; 787 struct kvec bufferlist[3];
791 u64 aligned_data = 0; 788 u64 aligned_data = 0;
792 bool lock = channel->acquire_ring_lock;
793 789
794 packetlen = desc_size + bufferlen; 790 packetlen = desc_size + bufferlen;
795 packetlen_aligned = ALIGN(packetlen, sizeof(u64)); 791 packetlen_aligned = ALIGN(packetlen, sizeof(u64));
@@ -809,7 +805,7 @@ int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
809 bufferlist[2].iov_base = &aligned_data; 805 bufferlist[2].iov_base = &aligned_data;
810 bufferlist[2].iov_len = (packetlen_aligned - packetlen); 806 bufferlist[2].iov_len = (packetlen_aligned - packetlen);
811 807
812 return hv_ringbuffer_write(channel, bufferlist, 3, lock); 808 return hv_ringbuffer_write(channel, bufferlist, 3);
813} 809}
814EXPORT_SYMBOL_GPL(vmbus_sendpacket_mpb_desc); 810EXPORT_SYMBOL_GPL(vmbus_sendpacket_mpb_desc);
815 811
@@ -827,7 +823,6 @@ int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
827 u32 packetlen_aligned; 823 u32 packetlen_aligned;
828 struct kvec bufferlist[3]; 824 struct kvec bufferlist[3];
829 u64 aligned_data = 0; 825 u64 aligned_data = 0;
830 bool lock = channel->acquire_ring_lock;
831 u32 pfncount = NUM_PAGES_SPANNED(multi_pagebuffer->offset, 826 u32 pfncount = NUM_PAGES_SPANNED(multi_pagebuffer->offset,
832 multi_pagebuffer->len); 827 multi_pagebuffer->len);
833 828
@@ -866,7 +861,7 @@ int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
866 bufferlist[2].iov_base = &aligned_data; 861 bufferlist[2].iov_base = &aligned_data;
867 bufferlist[2].iov_len = (packetlen_aligned - packetlen); 862 bufferlist[2].iov_len = (packetlen_aligned - packetlen);
868 863
869 return hv_ringbuffer_write(channel, bufferlist, 3, lock); 864 return hv_ringbuffer_write(channel, bufferlist, 3);
870} 865}
871EXPORT_SYMBOL_GPL(vmbus_sendpacket_multipagebuffer); 866EXPORT_SYMBOL_GPL(vmbus_sendpacket_multipagebuffer);
872 867
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index b2bb5aafaa2f..f33465d78a02 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -332,7 +332,6 @@ static struct vmbus_channel *alloc_channel(void)
332 if (!channel) 332 if (!channel)
333 return NULL; 333 return NULL;
334 334
335 channel->acquire_ring_lock = true;
336 spin_lock_init(&channel->inbound_lock); 335 spin_lock_init(&channel->inbound_lock);
337 spin_lock_init(&channel->lock); 336 spin_lock_init(&channel->lock);
338 337
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index 558a798c407c..6a9b54677218 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -283,8 +283,7 @@ int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
283void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info); 283void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info);
284 284
285int hv_ringbuffer_write(struct vmbus_channel *channel, 285int hv_ringbuffer_write(struct vmbus_channel *channel,
286 struct kvec *kv_list, 286 struct kvec *kv_list, u32 kv_count);
287 u32 kv_count, bool lock);
288 287
289int hv_ringbuffer_read(struct vmbus_channel *channel, 288int hv_ringbuffer_read(struct vmbus_channel *channel,
290 void *buffer, u32 buflen, u32 *buffer_actual_len, 289 void *buffer, u32 buflen, u32 *buffer_actual_len,
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index 1b70e034ef92..1a1e70a45146 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -284,7 +284,7 @@ void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
284 284
285/* Write to the ring buffer. */ 285/* Write to the ring buffer. */
286int hv_ringbuffer_write(struct vmbus_channel *channel, 286int hv_ringbuffer_write(struct vmbus_channel *channel,
287 struct kvec *kv_list, u32 kv_count, bool lock) 287 struct kvec *kv_list, u32 kv_count)
288{ 288{
289 int i = 0; 289 int i = 0;
290 u32 bytes_avail_towrite; 290 u32 bytes_avail_towrite;
@@ -304,8 +304,7 @@ int hv_ringbuffer_write(struct vmbus_channel *channel,
304 304
305 totalbytes_towrite += sizeof(u64); 305 totalbytes_towrite += sizeof(u64);
306 306
307 if (lock) 307 spin_lock_irqsave(&outring_info->ring_lock, flags);
308 spin_lock_irqsave(&outring_info->ring_lock, flags);
309 308
310 bytes_avail_towrite = hv_get_bytes_to_write(outring_info); 309 bytes_avail_towrite = hv_get_bytes_to_write(outring_info);
311 310
@@ -315,8 +314,7 @@ int hv_ringbuffer_write(struct vmbus_channel *channel,
315 * is empty since the read index == write index. 314 * is empty since the read index == write index.
316 */ 315 */
317 if (bytes_avail_towrite <= totalbytes_towrite) { 316 if (bytes_avail_towrite <= totalbytes_towrite) {
318 if (lock) 317 spin_unlock_irqrestore(&outring_info->ring_lock, flags);
319 spin_unlock_irqrestore(&outring_info->ring_lock, flags);
320 return -EAGAIN; 318 return -EAGAIN;
321 } 319 }
322 320
@@ -347,8 +345,7 @@ int hv_ringbuffer_write(struct vmbus_channel *channel,
347 hv_set_next_write_location(outring_info, next_write_location); 345 hv_set_next_write_location(outring_info, next_write_location);
348 346
349 347
350 if (lock) 348 spin_unlock_irqrestore(&outring_info->ring_lock, flags);
351 spin_unlock_irqrestore(&outring_info->ring_lock, flags);
352 349
353 hv_signal_on_write(old_write, channel); 350 hv_signal_on_write(old_write, channel);
354 351
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index e5aac5c051f7..466374dbc98f 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -846,16 +846,6 @@ struct vmbus_channel {
846 */ 846 */
847 struct list_head percpu_list; 847 struct list_head percpu_list;
848 /* 848 /*
849 * On the channel send side, many of the VMBUS
850 * device drivers explicity serialize access to the
851 * outgoing ring buffer. Give more control to the
852 * VMBUS device drivers in terms how to serialize
853 * accesss to the outgoing ring buffer.
854 * The default behavior will be to aquire the
855 * ring lock to preserve the current behavior.
856 */
857 bool acquire_ring_lock;
858 /*
859 * For performance critical channels (storage, networking 849 * For performance critical channels (storage, networking
860 * etc,), Hyper-V has a mechanism to enhance the throughput 850 * etc,), Hyper-V has a mechanism to enhance the throughput
861 * at the expense of latency: 851 * at the expense of latency:
@@ -895,11 +885,6 @@ struct vmbus_channel {
895 885
896}; 886};
897 887
898static inline void set_channel_lock_state(struct vmbus_channel *c, bool state)
899{
900 c->acquire_ring_lock = state;
901}
902
903static inline bool is_hvsock_channel(const struct vmbus_channel *c) 888static inline bool is_hvsock_channel(const struct vmbus_channel *c)
904{ 889{
905 return !!(c->offermsg.offer.chn_flags & 890 return !!(c->offermsg.offer.chn_flags &