aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVitaly Kuznetsov <vkuznets@redhat.com>2016-09-02 08:58:20 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2016-09-02 11:22:51 -0400
commit9988ce685676cebe0b14dc128f00e1ae9cd1a4fa (patch)
tree5f7e2212bca74c839e686068b74ca3b3dec5d739
parent98f531b10d23e3c28e8d34c0e88822a81231b3c2 (diff)
Drivers: hv: ring_buffer: wrap around mappings for ring buffers
Make it possible to always use a single memcpy() or to provide a direct link to a packet on the ring buffer by creating virtual mapping for two copies of the ring buffer with vmap(). Utilize currently empty hv_ringbuffer_cleanup() to do the unmap. While on it, replace sizeof(struct hv_ring_buffer) check in hv_ringbuffer_init() with BUILD_BUG_ON() as it is a compile time check. Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com> Tested-by: Dexuan Cui <decui@microsoft.com> Signed-off-by: K. Y. Srinivasan <kys@microsoft.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r--drivers/hv/channel.c29
-rw-r--r--drivers/hv/hyperv_vmbus.h4
-rw-r--r--drivers/hv/ring_buffer.c39
3 files changed, 49 insertions, 23 deletions
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index 15e833004fc3..16f91c8490fe 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -75,7 +75,6 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
75{ 75{
76 struct vmbus_channel_open_channel *open_msg; 76 struct vmbus_channel_open_channel *open_msg;
77 struct vmbus_channel_msginfo *open_info = NULL; 77 struct vmbus_channel_msginfo *open_info = NULL;
78 void *in, *out;
79 unsigned long flags; 78 unsigned long flags;
80 int ret, err = 0; 79 int ret, err = 0;
81 struct page *page; 80 struct page *page;
@@ -112,23 +111,21 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
112 goto error_set_chnstate; 111 goto error_set_chnstate;
113 } 112 }
114 113
115 out = page_address(page); 114 newchannel->ringbuffer_pages = page_address(page);
116 in = (void *)((unsigned long)out + send_ringbuffer_size);
117
118 newchannel->ringbuffer_pages = out;
119 newchannel->ringbuffer_pagecount = (send_ringbuffer_size + 115 newchannel->ringbuffer_pagecount = (send_ringbuffer_size +
120 recv_ringbuffer_size) >> PAGE_SHIFT; 116 recv_ringbuffer_size) >> PAGE_SHIFT;
121 117
122 ret = hv_ringbuffer_init( 118 ret = hv_ringbuffer_init(&newchannel->outbound, page,
123 &newchannel->outbound, out, send_ringbuffer_size); 119 send_ringbuffer_size >> PAGE_SHIFT);
124 120
125 if (ret != 0) { 121 if (ret != 0) {
126 err = ret; 122 err = ret;
127 goto error_free_pages; 123 goto error_free_pages;
128 } 124 }
129 125
130 ret = hv_ringbuffer_init( 126 ret = hv_ringbuffer_init(&newchannel->inbound,
131 &newchannel->inbound, in, recv_ringbuffer_size); 127 &page[send_ringbuffer_size >> PAGE_SHIFT],
128 recv_ringbuffer_size >> PAGE_SHIFT);
132 if (ret != 0) { 129 if (ret != 0) {
133 err = ret; 130 err = ret;
134 goto error_free_pages; 131 goto error_free_pages;
@@ -139,10 +136,10 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
139 newchannel->ringbuffer_gpadlhandle = 0; 136 newchannel->ringbuffer_gpadlhandle = 0;
140 137
141 ret = vmbus_establish_gpadl(newchannel, 138 ret = vmbus_establish_gpadl(newchannel,
142 newchannel->outbound.ring_buffer, 139 page_address(page),
143 send_ringbuffer_size + 140 send_ringbuffer_size +
144 recv_ringbuffer_size, 141 recv_ringbuffer_size,
145 &newchannel->ringbuffer_gpadlhandle); 142 &newchannel->ringbuffer_gpadlhandle);
146 143
147 if (ret != 0) { 144 if (ret != 0) {
148 err = ret; 145 err = ret;
@@ -214,8 +211,10 @@ error_free_gpadl:
214 vmbus_teardown_gpadl(newchannel, newchannel->ringbuffer_gpadlhandle); 211 vmbus_teardown_gpadl(newchannel, newchannel->ringbuffer_gpadlhandle);
215 kfree(open_info); 212 kfree(open_info);
216error_free_pages: 213error_free_pages:
217 free_pages((unsigned long)out, 214 hv_ringbuffer_cleanup(&newchannel->outbound);
218 get_order(send_ringbuffer_size + recv_ringbuffer_size)); 215 hv_ringbuffer_cleanup(&newchannel->inbound);
216 __free_pages(page,
217 get_order(send_ringbuffer_size + recv_ringbuffer_size));
219error_set_chnstate: 218error_set_chnstate:
220 newchannel->state = CHANNEL_OPEN_STATE; 219 newchannel->state = CHANNEL_OPEN_STATE;
221 return err; 220 return err;
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index ddcc3485520d..a5b4442433c8 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -522,8 +522,8 @@ extern unsigned int host_info_edx;
522/* Interface */ 522/* Interface */
523 523
524 524
525int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info, void *buffer, 525int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
526 u32 buflen); 526 struct page *pages, u32 pagecnt);
527 527
528void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info); 528void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info);
529 529
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index e3edcaee7ab3..7e21c2c82ad1 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -27,6 +27,8 @@
27#include <linux/mm.h> 27#include <linux/mm.h>
28#include <linux/hyperv.h> 28#include <linux/hyperv.h>
29#include <linux/uio.h> 29#include <linux/uio.h>
30#include <linux/vmalloc.h>
31#include <linux/slab.h>
30 32
31#include "hyperv_vmbus.h" 33#include "hyperv_vmbus.h"
32 34
@@ -243,22 +245,46 @@ void hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
243 245
244/* Initialize the ring buffer. */ 246/* Initialize the ring buffer. */
245int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info, 247int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
246 void *buffer, u32 buflen) 248 struct page *pages, u32 page_cnt)
247{ 249{
248 if (sizeof(struct hv_ring_buffer) != PAGE_SIZE) 250 int i;
249 return -EINVAL; 251 struct page **pages_wraparound;
252
253 BUILD_BUG_ON((sizeof(struct hv_ring_buffer) != PAGE_SIZE));
250 254
251 memset(ring_info, 0, sizeof(struct hv_ring_buffer_info)); 255 memset(ring_info, 0, sizeof(struct hv_ring_buffer_info));
252 256
253 ring_info->ring_buffer = (struct hv_ring_buffer *)buffer; 257 /*
258 * First page holds struct hv_ring_buffer, do wraparound mapping for
259 * the rest.
260 */
261 pages_wraparound = kzalloc(sizeof(struct page *) * (page_cnt * 2 - 1),
262 GFP_KERNEL);
263 if (!pages_wraparound)
264 return -ENOMEM;
265
266 pages_wraparound[0] = pages;
267 for (i = 0; i < 2 * (page_cnt - 1); i++)
268 pages_wraparound[i + 1] = &pages[i % (page_cnt - 1) + 1];
269
270 ring_info->ring_buffer = (struct hv_ring_buffer *)
271 vmap(pages_wraparound, page_cnt * 2 - 1, VM_MAP, PAGE_KERNEL);
272
273 kfree(pages_wraparound);
274
275
276 if (!ring_info->ring_buffer)
277 return -ENOMEM;
278
254 ring_info->ring_buffer->read_index = 279 ring_info->ring_buffer->read_index =
255 ring_info->ring_buffer->write_index = 0; 280 ring_info->ring_buffer->write_index = 0;
256 281
257 /* Set the feature bit for enabling flow control. */ 282 /* Set the feature bit for enabling flow control. */
258 ring_info->ring_buffer->feature_bits.value = 1; 283 ring_info->ring_buffer->feature_bits.value = 1;
259 284
260 ring_info->ring_size = buflen; 285 ring_info->ring_size = page_cnt << PAGE_SHIFT;
261 ring_info->ring_datasize = buflen - sizeof(struct hv_ring_buffer); 286 ring_info->ring_datasize = ring_info->ring_size -
287 sizeof(struct hv_ring_buffer);
262 288
263 spin_lock_init(&ring_info->ring_lock); 289 spin_lock_init(&ring_info->ring_lock);
264 290
@@ -268,6 +294,7 @@ int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
268/* Cleanup the ring buffer. */ 294/* Cleanup the ring buffer. */
269void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info) 295void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
270{ 296{
297 vunmap(ring_info->ring_buffer);
271} 298}
272 299
273/* Write to the ring buffer. */ 300/* Write to the ring buffer. */