aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/hv/channel.c
diff options
context:
space:
mode:
authorGreg Kroah-Hartman <gregkh@suse.de>2011-10-04 15:29:52 -0400
committerGreg Kroah-Hartman <gregkh@suse.de>2011-10-11 00:52:55 -0400
commit46a971913611a23478283931460a95be962ce329 (patch)
tree7452d0f07ee9f1f5270a8da6c1387f35c439843d /drivers/hv/channel.c
parent715a4801e734ea9c8e528265ce3ff6aead85bce1 (diff)
Staging: hv: move hyperv code out of staging directory
After many years wandering the desert, it is finally time for the Microsoft HyperV code to move out of the staging directory. Or at least the core hyperv bus code, and the utility driver, the rest still have some review to get through by the various subsystem maintainers. Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de> Signed-off-by: K. Y. Srinivasan <kys@microsoft.com>
Diffstat (limited to 'drivers/hv/channel.c')
-rw-r--r--drivers/hv/channel.c815
1 files changed, 815 insertions, 0 deletions
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
new file mode 100644
index 000000000000..406537420fff
--- /dev/null
+++ b/drivers/hv/channel.c
@@ -0,0 +1,815 @@
1/*
2 * Copyright (c) 2009, Microsoft Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
17 * Authors:
18 * Haiyang Zhang <haiyangz@microsoft.com>
19 * Hank Janssen <hjanssen@microsoft.com>
20 */
21#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22
23#include <linux/kernel.h>
24#include <linux/sched.h>
25#include <linux/wait.h>
26#include <linux/mm.h>
27#include <linux/slab.h>
28#include <linux/module.h>
29#include <linux/hyperv.h>
30
31#include "hyperv_vmbus.h"
32
33#define NUM_PAGES_SPANNED(addr, len) \
34((PAGE_ALIGN(addr + len) >> PAGE_SHIFT) - (addr >> PAGE_SHIFT))
35
36/* Internal routines */
37static int create_gpadl_header(
38 void *kbuffer, /* must be phys and virt contiguous */
39 u32 size, /* page-size multiple */
40 struct vmbus_channel_msginfo **msginfo,
41 u32 *messagecount);
42static void vmbus_setevent(struct vmbus_channel *channel);
43
44/*
45 * vmbus_setevent- Trigger an event notification on the specified
46 * channel.
47 */
48static void vmbus_setevent(struct vmbus_channel *channel)
49{
50 struct hv_monitor_page *monitorpage;
51
52 if (channel->offermsg.monitor_allocated) {
53 /* Each u32 represents 32 channels */
54 sync_set_bit(channel->offermsg.child_relid & 31,
55 (unsigned long *) vmbus_connection.send_int_page +
56 (channel->offermsg.child_relid >> 5));
57
58 monitorpage = vmbus_connection.monitor_pages;
59 monitorpage++; /* Get the child to parent monitor page */
60
61 sync_set_bit(channel->monitor_bit,
62 (unsigned long *)&monitorpage->trigger_group
63 [channel->monitor_grp].pending);
64
65 } else {
66 vmbus_set_event(channel->offermsg.child_relid);
67 }
68}
69
70/*
71 * vmbus_get_debug_info -Retrieve various channel debug info
72 */
73void vmbus_get_debug_info(struct vmbus_channel *channel,
74 struct vmbus_channel_debug_info *debuginfo)
75{
76 struct hv_monitor_page *monitorpage;
77 u8 monitor_group = (u8)channel->offermsg.monitorid / 32;
78 u8 monitor_offset = (u8)channel->offermsg.monitorid % 32;
79
80 debuginfo->relid = channel->offermsg.child_relid;
81 debuginfo->state = channel->state;
82 memcpy(&debuginfo->interfacetype,
83 &channel->offermsg.offer.if_type, sizeof(uuid_le));
84 memcpy(&debuginfo->interface_instance,
85 &channel->offermsg.offer.if_instance,
86 sizeof(uuid_le));
87
88 monitorpage = (struct hv_monitor_page *)vmbus_connection.monitor_pages;
89
90 debuginfo->monitorid = channel->offermsg.monitorid;
91
92 debuginfo->servermonitor_pending =
93 monitorpage->trigger_group[monitor_group].pending;
94 debuginfo->servermonitor_latency =
95 monitorpage->latency[monitor_group][monitor_offset];
96 debuginfo->servermonitor_connectionid =
97 monitorpage->parameter[monitor_group]
98 [monitor_offset].connectionid.u.id;
99
100 monitorpage++;
101
102 debuginfo->clientmonitor_pending =
103 monitorpage->trigger_group[monitor_group].pending;
104 debuginfo->clientmonitor_latency =
105 monitorpage->latency[monitor_group][monitor_offset];
106 debuginfo->clientmonitor_connectionid =
107 monitorpage->parameter[monitor_group]
108 [monitor_offset].connectionid.u.id;
109
110 hv_ringbuffer_get_debuginfo(&channel->inbound, &debuginfo->inbound);
111 hv_ringbuffer_get_debuginfo(&channel->outbound, &debuginfo->outbound);
112}
113
114/*
115 * vmbus_open - Open the specified channel.
116 */
117int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
118 u32 recv_ringbuffer_size, void *userdata, u32 userdatalen,
119 void (*onchannelcallback)(void *context), void *context)
120{
121 struct vmbus_channel_open_channel *open_msg;
122 struct vmbus_channel_msginfo *open_info = NULL;
123 void *in, *out;
124 unsigned long flags;
125 int ret, t, err = 0;
126
127 newchannel->onchannel_callback = onchannelcallback;
128 newchannel->channel_callback_context = context;
129
130 /* Allocate the ring buffer */
131 out = (void *)__get_free_pages(GFP_KERNEL|__GFP_ZERO,
132 get_order(send_ringbuffer_size + recv_ringbuffer_size));
133
134 if (!out)
135 return -ENOMEM;
136
137
138 in = (void *)((unsigned long)out + send_ringbuffer_size);
139
140 newchannel->ringbuffer_pages = out;
141 newchannel->ringbuffer_pagecount = (send_ringbuffer_size +
142 recv_ringbuffer_size) >> PAGE_SHIFT;
143
144 ret = hv_ringbuffer_init(
145 &newchannel->outbound, out, send_ringbuffer_size);
146
147 if (ret != 0) {
148 err = ret;
149 goto errorout;
150 }
151
152 ret = hv_ringbuffer_init(
153 &newchannel->inbound, in, recv_ringbuffer_size);
154 if (ret != 0) {
155 err = ret;
156 goto errorout;
157 }
158
159
160 /* Establish the gpadl for the ring buffer */
161 newchannel->ringbuffer_gpadlhandle = 0;
162
163 ret = vmbus_establish_gpadl(newchannel,
164 newchannel->outbound.ring_buffer,
165 send_ringbuffer_size +
166 recv_ringbuffer_size,
167 &newchannel->ringbuffer_gpadlhandle);
168
169 if (ret != 0) {
170 err = ret;
171 goto errorout;
172 }
173
174 /* Create and init the channel open message */
175 open_info = kmalloc(sizeof(*open_info) +
176 sizeof(struct vmbus_channel_open_channel),
177 GFP_KERNEL);
178 if (!open_info) {
179 err = -ENOMEM;
180 goto errorout;
181 }
182
183 init_completion(&open_info->waitevent);
184
185 open_msg = (struct vmbus_channel_open_channel *)open_info->msg;
186 open_msg->header.msgtype = CHANNELMSG_OPENCHANNEL;
187 open_msg->openid = newchannel->offermsg.child_relid;
188 open_msg->child_relid = newchannel->offermsg.child_relid;
189 open_msg->ringbuffer_gpadlhandle = newchannel->ringbuffer_gpadlhandle;
190 open_msg->downstream_ringbuffer_pageoffset = send_ringbuffer_size >>
191 PAGE_SHIFT;
192 open_msg->server_contextarea_gpadlhandle = 0;
193
194 if (userdatalen > MAX_USER_DEFINED_BYTES) {
195 err = -EINVAL;
196 goto errorout;
197 }
198
199 if (userdatalen)
200 memcpy(open_msg->userdata, userdata, userdatalen);
201
202 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
203 list_add_tail(&open_info->msglistentry,
204 &vmbus_connection.chn_msg_list);
205 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
206
207 ret = vmbus_post_msg(open_msg,
208 sizeof(struct vmbus_channel_open_channel));
209
210 if (ret != 0)
211 goto cleanup;
212
213 t = wait_for_completion_timeout(&open_info->waitevent, 5*HZ);
214 if (t == 0) {
215 err = -ETIMEDOUT;
216 goto errorout;
217 }
218
219
220 if (open_info->response.open_result.status)
221 err = open_info->response.open_result.status;
222
223cleanup:
224 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
225 list_del(&open_info->msglistentry);
226 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
227
228 kfree(open_info);
229 return err;
230
231errorout:
232 hv_ringbuffer_cleanup(&newchannel->outbound);
233 hv_ringbuffer_cleanup(&newchannel->inbound);
234 free_pages((unsigned long)out,
235 get_order(send_ringbuffer_size + recv_ringbuffer_size));
236 kfree(open_info);
237 return err;
238}
239EXPORT_SYMBOL_GPL(vmbus_open);
240
241/*
242 * create_gpadl_header - Creates a gpadl for the specified buffer
243 */
244static int create_gpadl_header(void *kbuffer, u32 size,
245 struct vmbus_channel_msginfo **msginfo,
246 u32 *messagecount)
247{
248 int i;
249 int pagecount;
250 unsigned long long pfn;
251 struct vmbus_channel_gpadl_header *gpadl_header;
252 struct vmbus_channel_gpadl_body *gpadl_body;
253 struct vmbus_channel_msginfo *msgheader;
254 struct vmbus_channel_msginfo *msgbody = NULL;
255 u32 msgsize;
256
257 int pfnsum, pfncount, pfnleft, pfncurr, pfnsize;
258
259 pagecount = size >> PAGE_SHIFT;
260 pfn = virt_to_phys(kbuffer) >> PAGE_SHIFT;
261
262 /* do we need a gpadl body msg */
263 pfnsize = MAX_SIZE_CHANNEL_MESSAGE -
264 sizeof(struct vmbus_channel_gpadl_header) -
265 sizeof(struct gpa_range);
266 pfncount = pfnsize / sizeof(u64);
267
268 if (pagecount > pfncount) {
269 /* we need a gpadl body */
270 /* fill in the header */
271 msgsize = sizeof(struct vmbus_channel_msginfo) +
272 sizeof(struct vmbus_channel_gpadl_header) +
273 sizeof(struct gpa_range) + pfncount * sizeof(u64);
274 msgheader = kzalloc(msgsize, GFP_KERNEL);
275 if (!msgheader)
276 goto nomem;
277
278 INIT_LIST_HEAD(&msgheader->submsglist);
279 msgheader->msgsize = msgsize;
280
281 gpadl_header = (struct vmbus_channel_gpadl_header *)
282 msgheader->msg;
283 gpadl_header->rangecount = 1;
284 gpadl_header->range_buflen = sizeof(struct gpa_range) +
285 pagecount * sizeof(u64);
286 gpadl_header->range[0].byte_offset = 0;
287 gpadl_header->range[0].byte_count = size;
288 for (i = 0; i < pfncount; i++)
289 gpadl_header->range[0].pfn_array[i] = pfn+i;
290 *msginfo = msgheader;
291 *messagecount = 1;
292
293 pfnsum = pfncount;
294 pfnleft = pagecount - pfncount;
295
296 /* how many pfns can we fit */
297 pfnsize = MAX_SIZE_CHANNEL_MESSAGE -
298 sizeof(struct vmbus_channel_gpadl_body);
299 pfncount = pfnsize / sizeof(u64);
300
301 /* fill in the body */
302 while (pfnleft) {
303 if (pfnleft > pfncount)
304 pfncurr = pfncount;
305 else
306 pfncurr = pfnleft;
307
308 msgsize = sizeof(struct vmbus_channel_msginfo) +
309 sizeof(struct vmbus_channel_gpadl_body) +
310 pfncurr * sizeof(u64);
311 msgbody = kzalloc(msgsize, GFP_KERNEL);
312
313 if (!msgbody) {
314 struct vmbus_channel_msginfo *pos = NULL;
315 struct vmbus_channel_msginfo *tmp = NULL;
316 /*
317 * Free up all the allocated messages.
318 */
319 list_for_each_entry_safe(pos, tmp,
320 &msgheader->submsglist,
321 msglistentry) {
322
323 list_del(&pos->msglistentry);
324 kfree(pos);
325 }
326
327 goto nomem;
328 }
329
330 msgbody->msgsize = msgsize;
331 (*messagecount)++;
332 gpadl_body =
333 (struct vmbus_channel_gpadl_body *)msgbody->msg;
334
335 /*
336 * Gpadl is u32 and we are using a pointer which could
337 * be 64-bit
338 * This is governed by the guest/host protocol and
339 * so the hypervisor gurantees that this is ok.
340 */
341 for (i = 0; i < pfncurr; i++)
342 gpadl_body->pfn[i] = pfn + pfnsum + i;
343
344 /* add to msg header */
345 list_add_tail(&msgbody->msglistentry,
346 &msgheader->submsglist);
347 pfnsum += pfncurr;
348 pfnleft -= pfncurr;
349 }
350 } else {
351 /* everything fits in a header */
352 msgsize = sizeof(struct vmbus_channel_msginfo) +
353 sizeof(struct vmbus_channel_gpadl_header) +
354 sizeof(struct gpa_range) + pagecount * sizeof(u64);
355 msgheader = kzalloc(msgsize, GFP_KERNEL);
356 if (msgheader == NULL)
357 goto nomem;
358 msgheader->msgsize = msgsize;
359
360 gpadl_header = (struct vmbus_channel_gpadl_header *)
361 msgheader->msg;
362 gpadl_header->rangecount = 1;
363 gpadl_header->range_buflen = sizeof(struct gpa_range) +
364 pagecount * sizeof(u64);
365 gpadl_header->range[0].byte_offset = 0;
366 gpadl_header->range[0].byte_count = size;
367 for (i = 0; i < pagecount; i++)
368 gpadl_header->range[0].pfn_array[i] = pfn+i;
369
370 *msginfo = msgheader;
371 *messagecount = 1;
372 }
373
374 return 0;
375nomem:
376 kfree(msgheader);
377 kfree(msgbody);
378 return -ENOMEM;
379}
380
381/*
382 * vmbus_establish_gpadl - Estabish a GPADL for the specified buffer
383 *
384 * @channel: a channel
385 * @kbuffer: from kmalloc
386 * @size: page-size multiple
387 * @gpadl_handle: some funky thing
388 */
389int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
390 u32 size, u32 *gpadl_handle)
391{
392 struct vmbus_channel_gpadl_header *gpadlmsg;
393 struct vmbus_channel_gpadl_body *gpadl_body;
394 struct vmbus_channel_msginfo *msginfo = NULL;
395 struct vmbus_channel_msginfo *submsginfo;
396 u32 msgcount;
397 struct list_head *curr;
398 u32 next_gpadl_handle;
399 unsigned long flags;
400 int ret = 0;
401 int t;
402
403 next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
404 atomic_inc(&vmbus_connection.next_gpadl_handle);
405
406 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
407 if (ret)
408 return ret;
409
410 init_completion(&msginfo->waitevent);
411
412 gpadlmsg = (struct vmbus_channel_gpadl_header *)msginfo->msg;
413 gpadlmsg->header.msgtype = CHANNELMSG_GPADL_HEADER;
414 gpadlmsg->child_relid = channel->offermsg.child_relid;
415 gpadlmsg->gpadl = next_gpadl_handle;
416
417
418 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
419 list_add_tail(&msginfo->msglistentry,
420 &vmbus_connection.chn_msg_list);
421
422 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
423
424 ret = vmbus_post_msg(gpadlmsg, msginfo->msgsize -
425 sizeof(*msginfo));
426 if (ret != 0)
427 goto cleanup;
428
429 if (msgcount > 1) {
430 list_for_each(curr, &msginfo->submsglist) {
431
432 submsginfo = (struct vmbus_channel_msginfo *)curr;
433 gpadl_body =
434 (struct vmbus_channel_gpadl_body *)submsginfo->msg;
435
436 gpadl_body->header.msgtype =
437 CHANNELMSG_GPADL_BODY;
438 gpadl_body->gpadl = next_gpadl_handle;
439
440 ret = vmbus_post_msg(gpadl_body,
441 submsginfo->msgsize -
442 sizeof(*submsginfo));
443 if (ret != 0)
444 goto cleanup;
445
446 }
447 }
448 t = wait_for_completion_timeout(&msginfo->waitevent, 5*HZ);
449 BUG_ON(t == 0);
450
451
452 /* At this point, we received the gpadl created msg */
453 *gpadl_handle = gpadlmsg->gpadl;
454
455cleanup:
456 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
457 list_del(&msginfo->msglistentry);
458 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
459
460 kfree(msginfo);
461 return ret;
462}
463EXPORT_SYMBOL_GPL(vmbus_establish_gpadl);
464
465/*
466 * vmbus_teardown_gpadl -Teardown the specified GPADL handle
467 */
468int vmbus_teardown_gpadl(struct vmbus_channel *channel, u32 gpadl_handle)
469{
470 struct vmbus_channel_gpadl_teardown *msg;
471 struct vmbus_channel_msginfo *info;
472 unsigned long flags;
473 int ret, t;
474
475 info = kmalloc(sizeof(*info) +
476 sizeof(struct vmbus_channel_gpadl_teardown), GFP_KERNEL);
477 if (!info)
478 return -ENOMEM;
479
480 init_completion(&info->waitevent);
481
482 msg = (struct vmbus_channel_gpadl_teardown *)info->msg;
483
484 msg->header.msgtype = CHANNELMSG_GPADL_TEARDOWN;
485 msg->child_relid = channel->offermsg.child_relid;
486 msg->gpadl = gpadl_handle;
487
488 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
489 list_add_tail(&info->msglistentry,
490 &vmbus_connection.chn_msg_list);
491 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
492 ret = vmbus_post_msg(msg,
493 sizeof(struct vmbus_channel_gpadl_teardown));
494
495 BUG_ON(ret != 0);
496 t = wait_for_completion_timeout(&info->waitevent, 5*HZ);
497 BUG_ON(t == 0);
498
499 /* Received a torndown response */
500 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
501 list_del(&info->msglistentry);
502 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
503
504 kfree(info);
505 return ret;
506}
507EXPORT_SYMBOL_GPL(vmbus_teardown_gpadl);
508
509/*
510 * vmbus_close - Close the specified channel
511 */
512void vmbus_close(struct vmbus_channel *channel)
513{
514 struct vmbus_channel_close_channel *msg;
515 int ret;
516 unsigned long flags;
517
518 /* Stop callback and cancel the timer asap */
519 spin_lock_irqsave(&channel->inbound_lock, flags);
520 channel->onchannel_callback = NULL;
521 spin_unlock_irqrestore(&channel->inbound_lock, flags);
522
523 /* Send a closing message */
524
525 msg = &channel->close_msg.msg;
526
527 msg->header.msgtype = CHANNELMSG_CLOSECHANNEL;
528 msg->child_relid = channel->offermsg.child_relid;
529
530 ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_close_channel));
531
532 BUG_ON(ret != 0);
533 /* Tear down the gpadl for the channel's ring buffer */
534 if (channel->ringbuffer_gpadlhandle)
535 vmbus_teardown_gpadl(channel,
536 channel->ringbuffer_gpadlhandle);
537
538 /* Cleanup the ring buffers for this channel */
539 hv_ringbuffer_cleanup(&channel->outbound);
540 hv_ringbuffer_cleanup(&channel->inbound);
541
542 free_pages((unsigned long)channel->ringbuffer_pages,
543 get_order(channel->ringbuffer_pagecount * PAGE_SIZE));
544
545
546}
547EXPORT_SYMBOL_GPL(vmbus_close);
548
549/**
550 * vmbus_sendpacket() - Send the specified buffer on the given channel
551 * @channel: Pointer to vmbus_channel structure.
552 * @buffer: Pointer to the buffer you want to receive the data into.
553 * @bufferlen: Maximum size of what the the buffer will hold
554 * @requestid: Identifier of the request
555 * @type: Type of packet that is being send e.g. negotiate, time
556 * packet etc.
557 *
558 * Sends data in @buffer directly to hyper-v via the vmbus
559 * This will send the data unparsed to hyper-v.
560 *
561 * Mainly used by Hyper-V drivers.
562 */
563int vmbus_sendpacket(struct vmbus_channel *channel, const void *buffer,
564 u32 bufferlen, u64 requestid,
565 enum vmbus_packet_type type, u32 flags)
566{
567 struct vmpacket_descriptor desc;
568 u32 packetlen = sizeof(struct vmpacket_descriptor) + bufferlen;
569 u32 packetlen_aligned = ALIGN(packetlen, sizeof(u64));
570 struct scatterlist bufferlist[3];
571 u64 aligned_data = 0;
572 int ret;
573
574
575 /* Setup the descriptor */
576 desc.type = type; /* VmbusPacketTypeDataInBand; */
577 desc.flags = flags; /* VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED; */
578 /* in 8-bytes granularity */
579 desc.offset8 = sizeof(struct vmpacket_descriptor) >> 3;
580 desc.len8 = (u16)(packetlen_aligned >> 3);
581 desc.trans_id = requestid;
582
583 sg_init_table(bufferlist, 3);
584 sg_set_buf(&bufferlist[0], &desc, sizeof(struct vmpacket_descriptor));
585 sg_set_buf(&bufferlist[1], buffer, bufferlen);
586 sg_set_buf(&bufferlist[2], &aligned_data,
587 packetlen_aligned - packetlen);
588
589 ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3);
590
591 if (ret == 0 && !hv_get_ringbuffer_interrupt_mask(&channel->outbound))
592 vmbus_setevent(channel);
593
594 return ret;
595}
596EXPORT_SYMBOL(vmbus_sendpacket);
597
598/*
599 * vmbus_sendpacket_pagebuffer - Send a range of single-page buffer
600 * packets using a GPADL Direct packet type.
601 */
602int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
603 struct hv_page_buffer pagebuffers[],
604 u32 pagecount, void *buffer, u32 bufferlen,
605 u64 requestid)
606{
607 int ret;
608 int i;
609 struct vmbus_channel_packet_page_buffer desc;
610 u32 descsize;
611 u32 packetlen;
612 u32 packetlen_aligned;
613 struct scatterlist bufferlist[3];
614 u64 aligned_data = 0;
615
616 if (pagecount > MAX_PAGE_BUFFER_COUNT)
617 return -EINVAL;
618
619
620 /*
621 * Adjust the size down since vmbus_channel_packet_page_buffer is the
622 * largest size we support
623 */
624 descsize = sizeof(struct vmbus_channel_packet_page_buffer) -
625 ((MAX_PAGE_BUFFER_COUNT - pagecount) *
626 sizeof(struct hv_page_buffer));
627 packetlen = descsize + bufferlen;
628 packetlen_aligned = ALIGN(packetlen, sizeof(u64));
629
630 /* Setup the descriptor */
631 desc.type = VM_PKT_DATA_USING_GPA_DIRECT;
632 desc.flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED;
633 desc.dataoffset8 = descsize >> 3; /* in 8-bytes grandularity */
634 desc.length8 = (u16)(packetlen_aligned >> 3);
635 desc.transactionid = requestid;
636 desc.rangecount = pagecount;
637
638 for (i = 0; i < pagecount; i++) {
639 desc.range[i].len = pagebuffers[i].len;
640 desc.range[i].offset = pagebuffers[i].offset;
641 desc.range[i].pfn = pagebuffers[i].pfn;
642 }
643
644 sg_init_table(bufferlist, 3);
645 sg_set_buf(&bufferlist[0], &desc, descsize);
646 sg_set_buf(&bufferlist[1], buffer, bufferlen);
647 sg_set_buf(&bufferlist[2], &aligned_data,
648 packetlen_aligned - packetlen);
649
650 ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3);
651
652 if (ret == 0 && !hv_get_ringbuffer_interrupt_mask(&channel->outbound))
653 vmbus_setevent(channel);
654
655 return ret;
656}
657EXPORT_SYMBOL_GPL(vmbus_sendpacket_pagebuffer);
658
659/*
660 * vmbus_sendpacket_multipagebuffer - Send a multi-page buffer packet
661 * using a GPADL Direct packet type.
662 */
663int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
664 struct hv_multipage_buffer *multi_pagebuffer,
665 void *buffer, u32 bufferlen, u64 requestid)
666{
667 int ret;
668 struct vmbus_channel_packet_multipage_buffer desc;
669 u32 descsize;
670 u32 packetlen;
671 u32 packetlen_aligned;
672 struct scatterlist bufferlist[3];
673 u64 aligned_data = 0;
674 u32 pfncount = NUM_PAGES_SPANNED(multi_pagebuffer->offset,
675 multi_pagebuffer->len);
676
677
678 if ((pfncount < 0) || (pfncount > MAX_MULTIPAGE_BUFFER_COUNT))
679 return -EINVAL;
680
681 /*
682 * Adjust the size down since vmbus_channel_packet_multipage_buffer is
683 * the largest size we support
684 */
685 descsize = sizeof(struct vmbus_channel_packet_multipage_buffer) -
686 ((MAX_MULTIPAGE_BUFFER_COUNT - pfncount) *
687 sizeof(u64));
688 packetlen = descsize + bufferlen;
689 packetlen_aligned = ALIGN(packetlen, sizeof(u64));
690
691
692 /* Setup the descriptor */
693 desc.type = VM_PKT_DATA_USING_GPA_DIRECT;
694 desc.flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED;
695 desc.dataoffset8 = descsize >> 3; /* in 8-bytes grandularity */
696 desc.length8 = (u16)(packetlen_aligned >> 3);
697 desc.transactionid = requestid;
698 desc.rangecount = 1;
699
700 desc.range.len = multi_pagebuffer->len;
701 desc.range.offset = multi_pagebuffer->offset;
702
703 memcpy(desc.range.pfn_array, multi_pagebuffer->pfn_array,
704 pfncount * sizeof(u64));
705
706 sg_init_table(bufferlist, 3);
707 sg_set_buf(&bufferlist[0], &desc, descsize);
708 sg_set_buf(&bufferlist[1], buffer, bufferlen);
709 sg_set_buf(&bufferlist[2], &aligned_data,
710 packetlen_aligned - packetlen);
711
712 ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3);
713
714 if (ret == 0 && !hv_get_ringbuffer_interrupt_mask(&channel->outbound))
715 vmbus_setevent(channel);
716
717 return ret;
718}
719EXPORT_SYMBOL_GPL(vmbus_sendpacket_multipagebuffer);
720
721/**
722 * vmbus_recvpacket() - Retrieve the user packet on the specified channel
723 * @channel: Pointer to vmbus_channel structure.
724 * @buffer: Pointer to the buffer you want to receive the data into.
725 * @bufferlen: Maximum size of what the the buffer will hold
726 * @buffer_actual_len: The actual size of the data after it was received
727 * @requestid: Identifier of the request
728 *
729 * Receives directly from the hyper-v vmbus and puts the data it received
730 * into Buffer. This will receive the data unparsed from hyper-v.
731 *
732 * Mainly used by Hyper-V drivers.
733 */
734int vmbus_recvpacket(struct vmbus_channel *channel, void *buffer,
735 u32 bufferlen, u32 *buffer_actual_len, u64 *requestid)
736{
737 struct vmpacket_descriptor desc;
738 u32 packetlen;
739 u32 userlen;
740 int ret;
741
742 *buffer_actual_len = 0;
743 *requestid = 0;
744
745
746 ret = hv_ringbuffer_peek(&channel->inbound, &desc,
747 sizeof(struct vmpacket_descriptor));
748 if (ret != 0)
749 return 0;
750
751 packetlen = desc.len8 << 3;
752 userlen = packetlen - (desc.offset8 << 3);
753
754 *buffer_actual_len = userlen;
755
756 if (userlen > bufferlen) {
757
758 pr_err("Buffer too small - got %d needs %d\n",
759 bufferlen, userlen);
760 return -ETOOSMALL;
761 }
762
763 *requestid = desc.trans_id;
764
765 /* Copy over the packet to the user buffer */
766 ret = hv_ringbuffer_read(&channel->inbound, buffer, userlen,
767 (desc.offset8 << 3));
768
769
770 return 0;
771}
772EXPORT_SYMBOL(vmbus_recvpacket);
773
774/*
775 * vmbus_recvpacket_raw - Retrieve the raw packet on the specified channel
776 */
777int vmbus_recvpacket_raw(struct vmbus_channel *channel, void *buffer,
778 u32 bufferlen, u32 *buffer_actual_len,
779 u64 *requestid)
780{
781 struct vmpacket_descriptor desc;
782 u32 packetlen;
783 u32 userlen;
784 int ret;
785
786 *buffer_actual_len = 0;
787 *requestid = 0;
788
789
790 ret = hv_ringbuffer_peek(&channel->inbound, &desc,
791 sizeof(struct vmpacket_descriptor));
792 if (ret != 0)
793 return 0;
794
795
796 packetlen = desc.len8 << 3;
797 userlen = packetlen - (desc.offset8 << 3);
798
799 *buffer_actual_len = packetlen;
800
801 if (packetlen > bufferlen) {
802 pr_err("Buffer too small - needed %d bytes but "
803 "got space for only %d bytes\n",
804 packetlen, bufferlen);
805 return -ENOBUFS;
806 }
807
808 *requestid = desc.trans_id;
809
810 /* Copy over the entire packet to the user buffer */
811 ret = hv_ringbuffer_read(&channel->inbound, buffer, packetlen, 0);
812
813 return 0;
814}
815EXPORT_SYMBOL_GPL(vmbus_recvpacket_raw);