aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/hv
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/hv')
-rw-r--r--drivers/hv/channel.c33
-rw-r--r--drivers/hv/channel_mgmt.c93
-rw-r--r--drivers/hv/connection.c232
-rw-r--r--drivers/hv/hv.c72
-rw-r--r--drivers/hv/hv_balloon.c63
-rw-r--r--drivers/hv/hv_util.c46
-rw-r--r--drivers/hv/hyperv_vmbus.h65
-rw-r--r--drivers/hv/ring_buffer.c130
-rw-r--r--drivers/hv/vmbus_drv.c54
9 files changed, 593 insertions, 195 deletions
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index 773a2f25a8f0..0b122f8c7005 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -55,7 +55,7 @@ static void vmbus_setevent(struct vmbus_channel *channel)
55 [channel->monitor_grp].pending); 55 [channel->monitor_grp].pending);
56 56
57 } else { 57 } else {
58 vmbus_set_event(channel->offermsg.child_relid); 58 vmbus_set_event(channel);
59 } 59 }
60} 60}
61 61
@@ -181,7 +181,7 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
181 open_msg->ringbuffer_gpadlhandle = newchannel->ringbuffer_gpadlhandle; 181 open_msg->ringbuffer_gpadlhandle = newchannel->ringbuffer_gpadlhandle;
182 open_msg->downstream_ringbuffer_pageoffset = send_ringbuffer_size >> 182 open_msg->downstream_ringbuffer_pageoffset = send_ringbuffer_size >>
183 PAGE_SHIFT; 183 PAGE_SHIFT;
184 open_msg->server_contextarea_gpadlhandle = 0; 184 open_msg->target_vp = newchannel->target_vp;
185 185
186 if (userdatalen > MAX_USER_DEFINED_BYTES) { 186 if (userdatalen > MAX_USER_DEFINED_BYTES) {
187 err = -EINVAL; 187 err = -EINVAL;
@@ -564,6 +564,7 @@ int vmbus_sendpacket(struct vmbus_channel *channel, const void *buffer,
564 struct scatterlist bufferlist[3]; 564 struct scatterlist bufferlist[3];
565 u64 aligned_data = 0; 565 u64 aligned_data = 0;
566 int ret; 566 int ret;
567 bool signal = false;
567 568
568 569
569 /* Setup the descriptor */ 570 /* Setup the descriptor */
@@ -580,9 +581,9 @@ int vmbus_sendpacket(struct vmbus_channel *channel, const void *buffer,
580 sg_set_buf(&bufferlist[2], &aligned_data, 581 sg_set_buf(&bufferlist[2], &aligned_data,
581 packetlen_aligned - packetlen); 582 packetlen_aligned - packetlen);
582 583
583 ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3); 584 ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal);
584 585
585 if (ret == 0 && !hv_get_ringbuffer_interrupt_mask(&channel->outbound)) 586 if (ret == 0 && signal)
586 vmbus_setevent(channel); 587 vmbus_setevent(channel);
587 588
588 return ret; 589 return ret;
@@ -606,6 +607,7 @@ int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
606 u32 packetlen_aligned; 607 u32 packetlen_aligned;
607 struct scatterlist bufferlist[3]; 608 struct scatterlist bufferlist[3];
608 u64 aligned_data = 0; 609 u64 aligned_data = 0;
610 bool signal = false;
609 611
610 if (pagecount > MAX_PAGE_BUFFER_COUNT) 612 if (pagecount > MAX_PAGE_BUFFER_COUNT)
611 return -EINVAL; 613 return -EINVAL;
@@ -641,9 +643,9 @@ int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
641 sg_set_buf(&bufferlist[2], &aligned_data, 643 sg_set_buf(&bufferlist[2], &aligned_data,
642 packetlen_aligned - packetlen); 644 packetlen_aligned - packetlen);
643 645
644 ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3); 646 ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal);
645 647
646 if (ret == 0 && !hv_get_ringbuffer_interrupt_mask(&channel->outbound)) 648 if (ret == 0 && signal)
647 vmbus_setevent(channel); 649 vmbus_setevent(channel);
648 650
649 return ret; 651 return ret;
@@ -665,6 +667,7 @@ int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
665 u32 packetlen_aligned; 667 u32 packetlen_aligned;
666 struct scatterlist bufferlist[3]; 668 struct scatterlist bufferlist[3];
667 u64 aligned_data = 0; 669 u64 aligned_data = 0;
670 bool signal = false;
668 u32 pfncount = NUM_PAGES_SPANNED(multi_pagebuffer->offset, 671 u32 pfncount = NUM_PAGES_SPANNED(multi_pagebuffer->offset,
669 multi_pagebuffer->len); 672 multi_pagebuffer->len);
670 673
@@ -703,9 +706,9 @@ int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
703 sg_set_buf(&bufferlist[2], &aligned_data, 706 sg_set_buf(&bufferlist[2], &aligned_data,
704 packetlen_aligned - packetlen); 707 packetlen_aligned - packetlen);
705 708
706 ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3); 709 ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal);
707 710
708 if (ret == 0 && !hv_get_ringbuffer_interrupt_mask(&channel->outbound)) 711 if (ret == 0 && signal)
709 vmbus_setevent(channel); 712 vmbus_setevent(channel);
710 713
711 return ret; 714 return ret;
@@ -732,6 +735,7 @@ int vmbus_recvpacket(struct vmbus_channel *channel, void *buffer,
732 u32 packetlen; 735 u32 packetlen;
733 u32 userlen; 736 u32 userlen;
734 int ret; 737 int ret;
738 bool signal = false;
735 739
736 *buffer_actual_len = 0; 740 *buffer_actual_len = 0;
737 *requestid = 0; 741 *requestid = 0;
@@ -758,8 +762,10 @@ int vmbus_recvpacket(struct vmbus_channel *channel, void *buffer,
758 762
759 /* Copy over the packet to the user buffer */ 763 /* Copy over the packet to the user buffer */
760 ret = hv_ringbuffer_read(&channel->inbound, buffer, userlen, 764 ret = hv_ringbuffer_read(&channel->inbound, buffer, userlen,
761 (desc.offset8 << 3)); 765 (desc.offset8 << 3), &signal);
762 766
767 if (signal)
768 vmbus_setevent(channel);
763 769
764 return 0; 770 return 0;
765} 771}
@@ -774,8 +780,8 @@ int vmbus_recvpacket_raw(struct vmbus_channel *channel, void *buffer,
774{ 780{
775 struct vmpacket_descriptor desc; 781 struct vmpacket_descriptor desc;
776 u32 packetlen; 782 u32 packetlen;
777 u32 userlen;
778 int ret; 783 int ret;
784 bool signal = false;
779 785
780 *buffer_actual_len = 0; 786 *buffer_actual_len = 0;
781 *requestid = 0; 787 *requestid = 0;
@@ -788,7 +794,6 @@ int vmbus_recvpacket_raw(struct vmbus_channel *channel, void *buffer,
788 794
789 795
790 packetlen = desc.len8 << 3; 796 packetlen = desc.len8 << 3;
791 userlen = packetlen - (desc.offset8 << 3);
792 797
793 *buffer_actual_len = packetlen; 798 *buffer_actual_len = packetlen;
794 799
@@ -802,7 +807,11 @@ int vmbus_recvpacket_raw(struct vmbus_channel *channel, void *buffer,
802 *requestid = desc.trans_id; 807 *requestid = desc.trans_id;
803 808
804 /* Copy over the entire packet to the user buffer */ 809 /* Copy over the entire packet to the user buffer */
805 ret = hv_ringbuffer_read(&channel->inbound, buffer, packetlen, 0); 810 ret = hv_ringbuffer_read(&channel->inbound, buffer, packetlen, 0,
811 &signal);
812
813 if (signal)
814 vmbus_setevent(channel);
806 815
807 return 0; 816 return 0;
808} 817}
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index 2f84c5cff8d4..53a8600162a5 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -257,6 +257,70 @@ static void vmbus_process_offer(struct work_struct *work)
257 } 257 }
258} 258}
259 259
260enum {
261 IDE = 0,
262 SCSI,
263 NIC,
264 MAX_PERF_CHN,
265};
266
267/*
268 * This is an array of device_ids (device types) that are performance critical.
269 * We attempt to distribute the interrupt load for these devices across
270 * all available CPUs.
271 */
272static const struct hv_vmbus_device_id hp_devs[] = {
273 /* IDE */
274 { HV_IDE_GUID, },
275 /* Storage - SCSI */
276 { HV_SCSI_GUID, },
277 /* Network */
278 { HV_NIC_GUID, },
279};
280
281
282/*
283 * We use this state to statically distribute the channel interrupt load.
284 */
285static u32 next_vp;
286
287/*
288 * Starting with Win8, we can statically distribute the incoming
289 * channel interrupt load by binding a channel to VCPU. We
290 * implement here a simple round robin scheme for distributing
291 * the interrupt load.
292 * We will bind channels that are not performance critical to cpu 0 and
293 * performance critical channels (IDE, SCSI and Network) will be uniformly
294 * distributed across all available CPUs.
295 */
296static u32 get_vp_index(uuid_le *type_guid)
297{
298 u32 cur_cpu;
299 int i;
300 bool perf_chn = false;
301 u32 max_cpus = num_online_cpus();
302
303 for (i = IDE; i < MAX_PERF_CHN; i++) {
304 if (!memcmp(type_guid->b, hp_devs[i].guid,
305 sizeof(uuid_le))) {
306 perf_chn = true;
307 break;
308 }
309 }
310 if ((vmbus_proto_version == VERSION_WS2008) ||
311 (vmbus_proto_version == VERSION_WIN7) || (!perf_chn)) {
312 /*
313 * Prior to win8, all channel interrupts are
314 * delivered on cpu 0.
315 * Also if the channel is not a performance critical
316 * channel, bind it to cpu 0.
317 */
318 return 0;
319 }
320 cur_cpu = (++next_vp % max_cpus);
321 return 0;
322}
323
260/* 324/*
261 * vmbus_onoffer - Handler for channel offers from vmbus in parent partition. 325 * vmbus_onoffer - Handler for channel offers from vmbus in parent partition.
262 * 326 *
@@ -275,6 +339,35 @@ static void vmbus_onoffer(struct vmbus_channel_message_header *hdr)
275 return; 339 return;
276 } 340 }
277 341
342 /*
343 * By default we setup state to enable batched
344 * reading. A specific service can choose to
345 * disable this prior to opening the channel.
346 */
347 newchannel->batched_reading = true;
348
349 /*
350 * Setup state for signalling the host.
351 */
352 newchannel->sig_event = (struct hv_input_signal_event *)
353 (ALIGN((unsigned long)
354 &newchannel->sig_buf,
355 HV_HYPERCALL_PARAM_ALIGN));
356
357 newchannel->sig_event->connectionid.asu32 = 0;
358 newchannel->sig_event->connectionid.u.id = VMBUS_EVENT_CONNECTION_ID;
359 newchannel->sig_event->flag_number = 0;
360 newchannel->sig_event->rsvdz = 0;
361
362 if (vmbus_proto_version != VERSION_WS2008) {
363 newchannel->is_dedicated_interrupt =
364 (offer->is_dedicated_interrupt != 0);
365 newchannel->sig_event->connectionid.u.id =
366 offer->connection_id;
367 }
368
369 newchannel->target_vp = get_vp_index(&offer->offer.if_type);
370
278 memcpy(&newchannel->offermsg, offer, 371 memcpy(&newchannel->offermsg, offer,
279 sizeof(struct vmbus_channel_offer_channel)); 372 sizeof(struct vmbus_channel_offer_channel));
280 newchannel->monitor_grp = (u8)offer->monitorid / 32; 373 newchannel->monitor_grp = (u8)offer->monitorid / 32;
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index 650c9f0b6642..253a74ba245c 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -30,6 +30,7 @@
30#include <linux/slab.h> 30#include <linux/slab.h>
31#include <linux/vmalloc.h> 31#include <linux/vmalloc.h>
32#include <linux/hyperv.h> 32#include <linux/hyperv.h>
33#include <linux/export.h>
33#include <asm/hyperv.h> 34#include <asm/hyperv.h>
34#include "hyperv_vmbus.h" 35#include "hyperv_vmbus.h"
35 36
@@ -40,15 +41,99 @@ struct vmbus_connection vmbus_connection = {
40}; 41};
41 42
42/* 43/*
44 * Negotiated protocol version with the host.
45 */
46__u32 vmbus_proto_version;
47EXPORT_SYMBOL_GPL(vmbus_proto_version);
48
49static __u32 vmbus_get_next_version(__u32 current_version)
50{
51 switch (current_version) {
52 case (VERSION_WIN7):
53 return VERSION_WS2008;
54
55 case (VERSION_WIN8):
56 return VERSION_WIN7;
57
58 case (VERSION_WS2008):
59 default:
60 return VERSION_INVAL;
61 }
62}
63
64static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo,
65 __u32 version)
66{
67 int ret = 0;
68 struct vmbus_channel_initiate_contact *msg;
69 unsigned long flags;
70 int t;
71
72 init_completion(&msginfo->waitevent);
73
74 msg = (struct vmbus_channel_initiate_contact *)msginfo->msg;
75
76 msg->header.msgtype = CHANNELMSG_INITIATE_CONTACT;
77 msg->vmbus_version_requested = version;
78 msg->interrupt_page = virt_to_phys(vmbus_connection.int_page);
79 msg->monitor_page1 = virt_to_phys(vmbus_connection.monitor_pages);
80 msg->monitor_page2 = virt_to_phys(
81 (void *)((unsigned long)vmbus_connection.monitor_pages +
82 PAGE_SIZE));
83
84 /*
85 * Add to list before we send the request since we may
86 * receive the response before returning from this routine
87 */
88 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
89 list_add_tail(&msginfo->msglistentry,
90 &vmbus_connection.chn_msg_list);
91
92 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
93
94 ret = vmbus_post_msg(msg,
95 sizeof(struct vmbus_channel_initiate_contact));
96 if (ret != 0) {
97 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
98 list_del(&msginfo->msglistentry);
99 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock,
100 flags);
101 return ret;
102 }
103
104 /* Wait for the connection response */
105 t = wait_for_completion_timeout(&msginfo->waitevent, 5*HZ);
106 if (t == 0) {
107 spin_lock_irqsave(&vmbus_connection.channelmsg_lock,
108 flags);
109 list_del(&msginfo->msglistentry);
110 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock,
111 flags);
112 return -ETIMEDOUT;
113 }
114
115 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
116 list_del(&msginfo->msglistentry);
117 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
118
119 /* Check if successful */
120 if (msginfo->response.version_response.version_supported) {
121 vmbus_connection.conn_state = CONNECTED;
122 } else {
123 return -ECONNREFUSED;
124 }
125
126 return ret;
127}
128
129/*
43 * vmbus_connect - Sends a connect request on the partition service connection 130 * vmbus_connect - Sends a connect request on the partition service connection
44 */ 131 */
45int vmbus_connect(void) 132int vmbus_connect(void)
46{ 133{
47 int ret = 0; 134 int ret = 0;
48 int t;
49 struct vmbus_channel_msginfo *msginfo = NULL; 135 struct vmbus_channel_msginfo *msginfo = NULL;
50 struct vmbus_channel_initiate_contact *msg; 136 __u32 version;
51 unsigned long flags;
52 137
53 /* Initialize the vmbus connection */ 138 /* Initialize the vmbus connection */
54 vmbus_connection.conn_state = CONNECTING; 139 vmbus_connection.conn_state = CONNECTING;
@@ -99,69 +184,38 @@ int vmbus_connect(void)
99 goto cleanup; 184 goto cleanup;
100 } 185 }
101 186
102 init_completion(&msginfo->waitevent);
103
104 msg = (struct vmbus_channel_initiate_contact *)msginfo->msg;
105
106 msg->header.msgtype = CHANNELMSG_INITIATE_CONTACT;
107 msg->vmbus_version_requested = VMBUS_REVISION_NUMBER;
108 msg->interrupt_page = virt_to_phys(vmbus_connection.int_page);
109 msg->monitor_page1 = virt_to_phys(vmbus_connection.monitor_pages);
110 msg->monitor_page2 = virt_to_phys(
111 (void *)((unsigned long)vmbus_connection.monitor_pages +
112 PAGE_SIZE));
113
114 /* 187 /*
115 * Add to list before we send the request since we may 188 * Negotiate a compatible VMBUS version number with the
116 * receive the response before returning from this routine 189 * host. We start with the highest number we can support
190 * and work our way down until we negotiate a compatible
191 * version.
117 */ 192 */
118 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
119 list_add_tail(&msginfo->msglistentry,
120 &vmbus_connection.chn_msg_list);
121 193
122 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags); 194 version = VERSION_CURRENT;
123 195
124 ret = vmbus_post_msg(msg, 196 do {
125 sizeof(struct vmbus_channel_initiate_contact)); 197 ret = vmbus_negotiate_version(msginfo, version);
126 if (ret != 0) { 198 if (ret == 0)
127 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags); 199 break;
128 list_del(&msginfo->msglistentry);
129 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock,
130 flags);
131 goto cleanup;
132 }
133 200
134 /* Wait for the connection response */ 201 version = vmbus_get_next_version(version);
135 t = wait_for_completion_timeout(&msginfo->waitevent, 5*HZ); 202 } while (version != VERSION_INVAL);
136 if (t == 0) {
137 spin_lock_irqsave(&vmbus_connection.channelmsg_lock,
138 flags);
139 list_del(&msginfo->msglistentry);
140 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock,
141 flags);
142 ret = -ETIMEDOUT;
143 goto cleanup;
144 }
145 203
146 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags); 204 if (version == VERSION_INVAL)
147 list_del(&msginfo->msglistentry);
148 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
149
150 /* Check if successful */
151 if (msginfo->response.version_response.version_supported) {
152 vmbus_connection.conn_state = CONNECTED;
153 } else {
154 pr_err("Unable to connect, "
155 "Version %d not supported by Hyper-V\n",
156 VMBUS_REVISION_NUMBER);
157 ret = -ECONNREFUSED;
158 goto cleanup; 205 goto cleanup;
159 } 206
207 vmbus_proto_version = version;
208 pr_info("Hyper-V Host Build:%d-%d.%d-%d-%d.%d; Vmbus version:%d.%d\n",
209 host_info_eax, host_info_ebx >> 16,
210 host_info_ebx & 0xFFFF, host_info_ecx,
211 host_info_edx >> 24, host_info_edx & 0xFFFFFF,
212 version >> 16, version & 0xFFFF);
160 213
161 kfree(msginfo); 214 kfree(msginfo);
162 return 0; 215 return 0;
163 216
164cleanup: 217cleanup:
218 pr_err("Unable to connect to host\n");
165 vmbus_connection.conn_state = DISCONNECTED; 219 vmbus_connection.conn_state = DISCONNECTED;
166 220
167 if (vmbus_connection.work_queue) 221 if (vmbus_connection.work_queue)
@@ -212,6 +266,9 @@ static void process_chn_event(u32 relid)
212{ 266{
213 struct vmbus_channel *channel; 267 struct vmbus_channel *channel;
214 unsigned long flags; 268 unsigned long flags;
269 void *arg;
270 bool read_state;
271 u32 bytes_to_read;
215 272
216 /* 273 /*
217 * Find the channel based on this relid and invokes the 274 * Find the channel based on this relid and invokes the
@@ -234,10 +291,29 @@ static void process_chn_event(u32 relid)
234 */ 291 */
235 292
236 spin_lock_irqsave(&channel->inbound_lock, flags); 293 spin_lock_irqsave(&channel->inbound_lock, flags);
237 if (channel->onchannel_callback != NULL) 294 if (channel->onchannel_callback != NULL) {
238 channel->onchannel_callback(channel->channel_callback_context); 295 arg = channel->channel_callback_context;
239 else 296 read_state = channel->batched_reading;
297 /*
298 * This callback reads the messages sent by the host.
299 * We can optimize host to guest signaling by ensuring:
300 * 1. While reading the channel, we disable interrupts from
301 * host.
302 * 2. Ensure that we process all posted messages from the host
303 * before returning from this callback.
304 * 3. Once we return, enable signaling from the host. Once this
305 * state is set we check to see if additional packets are
306 * available to read. In this case we repeat the process.
307 */
308
309 do {
310 hv_begin_read(&channel->inbound);
311 channel->onchannel_callback(arg);
312 bytes_to_read = hv_end_read(&channel->inbound);
313 } while (read_state && (bytes_to_read != 0));
314 } else {
240 pr_err("no channel callback for relid - %u\n", relid); 315 pr_err("no channel callback for relid - %u\n", relid);
316 }
241 317
242 spin_unlock_irqrestore(&channel->inbound_lock, flags); 318 spin_unlock_irqrestore(&channel->inbound_lock, flags);
243} 319}
@@ -248,10 +324,32 @@ static void process_chn_event(u32 relid)
248void vmbus_on_event(unsigned long data) 324void vmbus_on_event(unsigned long data)
249{ 325{
250 u32 dword; 326 u32 dword;
251 u32 maxdword = MAX_NUM_CHANNELS_SUPPORTED >> 5; 327 u32 maxdword;
252 int bit; 328 int bit;
253 u32 relid; 329 u32 relid;
254 u32 *recv_int_page = vmbus_connection.recv_int_page; 330 u32 *recv_int_page = NULL;
331 void *page_addr;
332 int cpu = smp_processor_id();
333 union hv_synic_event_flags *event;
334
335 if ((vmbus_proto_version == VERSION_WS2008) ||
336 (vmbus_proto_version == VERSION_WIN7)) {
337 maxdword = MAX_NUM_CHANNELS_SUPPORTED >> 5;
338 recv_int_page = vmbus_connection.recv_int_page;
339 } else {
340 /*
341 * When the host is win8 and beyond, the event page
342 * can be directly checked to get the id of the channel
343 * that has the interrupt pending.
344 */
345 maxdword = HV_EVENT_FLAGS_DWORD_COUNT;
346 page_addr = hv_context.synic_event_page[cpu];
347 event = (union hv_synic_event_flags *)page_addr +
348 VMBUS_MESSAGE_SINT;
349 recv_int_page = event->flags32;
350 }
351
352
255 353
256 /* Check events */ 354 /* Check events */
257 if (!recv_int_page) 355 if (!recv_int_page)
@@ -307,12 +405,16 @@ int vmbus_post_msg(void *buffer, size_t buflen)
307/* 405/*
308 * vmbus_set_event - Send an event notification to the parent 406 * vmbus_set_event - Send an event notification to the parent
309 */ 407 */
310int vmbus_set_event(u32 child_relid) 408int vmbus_set_event(struct vmbus_channel *channel)
311{ 409{
312 /* Each u32 represents 32 channels */ 410 u32 child_relid = channel->offermsg.child_relid;
313 sync_set_bit(child_relid & 31, 411
314 (unsigned long *)vmbus_connection.send_int_page + 412 if (!channel->is_dedicated_interrupt) {
315 (child_relid >> 5)); 413 /* Each u32 represents 32 channels */
414 sync_set_bit(child_relid & 31,
415 (unsigned long *)vmbus_connection.send_int_page +
416 (child_relid >> 5));
417 }
316 418
317 return hv_signal_event(); 419 return hv_signal_event(channel->sig_event);
318} 420}
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
index 3648f8f0f368..1c5481da6e4a 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -27,6 +27,7 @@
27#include <linux/vmalloc.h> 27#include <linux/vmalloc.h>
28#include <linux/hyperv.h> 28#include <linux/hyperv.h>
29#include <linux/version.h> 29#include <linux/version.h>
30#include <linux/interrupt.h>
30#include <asm/hyperv.h> 31#include <asm/hyperv.h>
31#include "hyperv_vmbus.h" 32#include "hyperv_vmbus.h"
32 33
@@ -34,13 +35,16 @@
34struct hv_context hv_context = { 35struct hv_context hv_context = {
35 .synic_initialized = false, 36 .synic_initialized = false,
36 .hypercall_page = NULL, 37 .hypercall_page = NULL,
37 .signal_event_param = NULL,
38 .signal_event_buffer = NULL,
39}; 38};
40 39
41/* 40/*
42 * query_hypervisor_info - Get version info of the windows hypervisor 41 * query_hypervisor_info - Get version info of the windows hypervisor
43 */ 42 */
43unsigned int host_info_eax;
44unsigned int host_info_ebx;
45unsigned int host_info_ecx;
46unsigned int host_info_edx;
47
44static int query_hypervisor_info(void) 48static int query_hypervisor_info(void)
45{ 49{
46 unsigned int eax; 50 unsigned int eax;
@@ -70,13 +74,10 @@ static int query_hypervisor_info(void)
70 edx = 0; 74 edx = 0;
71 op = HVCPUID_VERSION; 75 op = HVCPUID_VERSION;
72 cpuid(op, &eax, &ebx, &ecx, &edx); 76 cpuid(op, &eax, &ebx, &ecx, &edx);
73 pr_info("Hyper-V Host OS Build:%d-%d.%d-%d-%d.%d\n", 77 host_info_eax = eax;
74 eax, 78 host_info_ebx = ebx;
75 ebx >> 16, 79 host_info_ecx = ecx;
76 ebx & 0xFFFF, 80 host_info_edx = edx;
77 ecx,
78 edx >> 24,
79 edx & 0xFFFFFF);
80 } 81 }
81 return max_leaf; 82 return max_leaf;
82} 83}
@@ -137,6 +138,10 @@ int hv_init(void)
137 memset(hv_context.synic_event_page, 0, sizeof(void *) * NR_CPUS); 138 memset(hv_context.synic_event_page, 0, sizeof(void *) * NR_CPUS);
138 memset(hv_context.synic_message_page, 0, 139 memset(hv_context.synic_message_page, 0,
139 sizeof(void *) * NR_CPUS); 140 sizeof(void *) * NR_CPUS);
141 memset(hv_context.vp_index, 0,
142 sizeof(int) * NR_CPUS);
143 memset(hv_context.event_dpc, 0,
144 sizeof(void *) * NR_CPUS);
140 145
141 max_leaf = query_hypervisor_info(); 146 max_leaf = query_hypervisor_info();
142 147
@@ -168,24 +173,6 @@ int hv_init(void)
168 173
169 hv_context.hypercall_page = virtaddr; 174 hv_context.hypercall_page = virtaddr;
170 175
171 /* Setup the global signal event param for the signal event hypercall */
172 hv_context.signal_event_buffer =
173 kmalloc(sizeof(struct hv_input_signal_event_buffer),
174 GFP_KERNEL);
175 if (!hv_context.signal_event_buffer)
176 goto cleanup;
177
178 hv_context.signal_event_param =
179 (struct hv_input_signal_event *)
180 (ALIGN((unsigned long)
181 hv_context.signal_event_buffer,
182 HV_HYPERCALL_PARAM_ALIGN));
183 hv_context.signal_event_param->connectionid.asu32 = 0;
184 hv_context.signal_event_param->connectionid.u.id =
185 VMBUS_EVENT_CONNECTION_ID;
186 hv_context.signal_event_param->flag_number = 0;
187 hv_context.signal_event_param->rsvdz = 0;
188
189 return 0; 176 return 0;
190 177
191cleanup: 178cleanup:
@@ -213,10 +200,6 @@ void hv_cleanup(void)
213 /* Reset our OS id */ 200 /* Reset our OS id */
214 wrmsrl(HV_X64_MSR_GUEST_OS_ID, 0); 201 wrmsrl(HV_X64_MSR_GUEST_OS_ID, 0);
215 202
216 kfree(hv_context.signal_event_buffer);
217 hv_context.signal_event_buffer = NULL;
218 hv_context.signal_event_param = NULL;
219
220 if (hv_context.hypercall_page) { 203 if (hv_context.hypercall_page) {
221 hypercall_msr.as_uint64 = 0; 204 hypercall_msr.as_uint64 = 0;
222 wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64); 205 wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
@@ -273,13 +256,12 @@ int hv_post_message(union hv_connection_id connection_id,
273 * 256 *
274 * This involves a hypercall. 257 * This involves a hypercall.
275 */ 258 */
276u16 hv_signal_event(void) 259u16 hv_signal_event(void *con_id)
277{ 260{
278 u16 status; 261 u16 status;
279 262
280 status = do_hypercall(HVCALL_SIGNAL_EVENT, 263 status = (do_hypercall(HVCALL_SIGNAL_EVENT, con_id, NULL) & 0xFFFF);
281 hv_context.signal_event_param, 264
282 NULL) & 0xFFFF;
283 return status; 265 return status;
284} 266}
285 267
@@ -297,6 +279,7 @@ void hv_synic_init(void *irqarg)
297 union hv_synic_siefp siefp; 279 union hv_synic_siefp siefp;
298 union hv_synic_sint shared_sint; 280 union hv_synic_sint shared_sint;
299 union hv_synic_scontrol sctrl; 281 union hv_synic_scontrol sctrl;
282 u64 vp_index;
300 283
301 u32 irq_vector = *((u32 *)(irqarg)); 284 u32 irq_vector = *((u32 *)(irqarg));
302 int cpu = smp_processor_id(); 285 int cpu = smp_processor_id();
@@ -307,6 +290,15 @@ void hv_synic_init(void *irqarg)
307 /* Check the version */ 290 /* Check the version */
308 rdmsrl(HV_X64_MSR_SVERSION, version); 291 rdmsrl(HV_X64_MSR_SVERSION, version);
309 292
293 hv_context.event_dpc[cpu] = (struct tasklet_struct *)
294 kmalloc(sizeof(struct tasklet_struct),
295 GFP_ATOMIC);
296 if (hv_context.event_dpc[cpu] == NULL) {
297 pr_err("Unable to allocate event dpc\n");
298 goto cleanup;
299 }
300 tasklet_init(hv_context.event_dpc[cpu], vmbus_on_event, cpu);
301
310 hv_context.synic_message_page[cpu] = 302 hv_context.synic_message_page[cpu] =
311 (void *)get_zeroed_page(GFP_ATOMIC); 303 (void *)get_zeroed_page(GFP_ATOMIC);
312 304
@@ -345,7 +337,7 @@ void hv_synic_init(void *irqarg)
345 shared_sint.as_uint64 = 0; 337 shared_sint.as_uint64 = 0;
346 shared_sint.vector = irq_vector; /* HV_SHARED_SINT_IDT_VECTOR + 0x20; */ 338 shared_sint.vector = irq_vector; /* HV_SHARED_SINT_IDT_VECTOR + 0x20; */
347 shared_sint.masked = false; 339 shared_sint.masked = false;
348 shared_sint.auto_eoi = false; 340 shared_sint.auto_eoi = true;
349 341
350 wrmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64); 342 wrmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
351 343
@@ -356,6 +348,14 @@ void hv_synic_init(void *irqarg)
356 wrmsrl(HV_X64_MSR_SCONTROL, sctrl.as_uint64); 348 wrmsrl(HV_X64_MSR_SCONTROL, sctrl.as_uint64);
357 349
358 hv_context.synic_initialized = true; 350 hv_context.synic_initialized = true;
351
352 /*
353 * Setup the mapping between Hyper-V's notion
354 * of cpuid and Linux' notion of cpuid.
355 * This array will be indexed using Linux cpuid.
356 */
357 rdmsrl(HV_X64_MSR_VP_INDEX, vp_index);
358 hv_context.vp_index[cpu] = (u32)vp_index;
359 return; 359 return;
360 360
361cleanup: 361cleanup:
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index dd289fd179ca..37873213e24f 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -29,7 +29,6 @@
29#include <linux/memory_hotplug.h> 29#include <linux/memory_hotplug.h>
30#include <linux/memory.h> 30#include <linux/memory.h>
31#include <linux/notifier.h> 31#include <linux/notifier.h>
32#include <linux/mman.h>
33#include <linux/percpu_counter.h> 32#include <linux/percpu_counter.h>
34 33
35#include <linux/hyperv.h> 34#include <linux/hyperv.h>
@@ -415,10 +414,17 @@ struct dm_info_msg {
415 414
416static bool hot_add; 415static bool hot_add;
417static bool do_hot_add; 416static bool do_hot_add;
417/*
418 * Delay reporting memory pressure by
419 * the specified number of seconds.
420 */
421static uint pressure_report_delay = 30;
418 422
419module_param(hot_add, bool, (S_IRUGO | S_IWUSR)); 423module_param(hot_add, bool, (S_IRUGO | S_IWUSR));
420MODULE_PARM_DESC(hot_add, "If set attempt memory hot_add"); 424MODULE_PARM_DESC(hot_add, "If set attempt memory hot_add");
421 425
426module_param(pressure_report_delay, uint, (S_IRUGO | S_IWUSR));
427MODULE_PARM_DESC(pressure_report_delay, "Delay in secs in reporting pressure");
422static atomic_t trans_id = ATOMIC_INIT(0); 428static atomic_t trans_id = ATOMIC_INIT(0);
423 429
424static int dm_ring_size = (5 * PAGE_SIZE); 430static int dm_ring_size = (5 * PAGE_SIZE);
@@ -517,6 +523,34 @@ static void process_info(struct hv_dynmem_device *dm, struct dm_info_msg *msg)
517 } 523 }
518} 524}
519 525
526unsigned long compute_balloon_floor(void)
527{
528 unsigned long min_pages;
529#define MB2PAGES(mb) ((mb) << (20 - PAGE_SHIFT))
530 /* Simple continuous piecewiese linear function:
531 * max MiB -> min MiB gradient
532 * 0 0
533 * 16 16
534 * 32 24
535 * 128 72 (1/2)
536 * 512 168 (1/4)
537 * 2048 360 (1/8)
538 * 8192 552 (1/32)
539 * 32768 1320
540 * 131072 4392
541 */
542 if (totalram_pages < MB2PAGES(128))
543 min_pages = MB2PAGES(8) + (totalram_pages >> 1);
544 else if (totalram_pages < MB2PAGES(512))
545 min_pages = MB2PAGES(40) + (totalram_pages >> 2);
546 else if (totalram_pages < MB2PAGES(2048))
547 min_pages = MB2PAGES(104) + (totalram_pages >> 3);
548 else
549 min_pages = MB2PAGES(296) + (totalram_pages >> 5);
550#undef MB2PAGES
551 return min_pages;
552}
553
520/* 554/*
521 * Post our status as it relates memory pressure to the 555 * Post our status as it relates memory pressure to the
522 * host. Host expects the guests to post this status 556 * host. Host expects the guests to post this status
@@ -530,15 +564,30 @@ static void process_info(struct hv_dynmem_device *dm, struct dm_info_msg *msg)
530static void post_status(struct hv_dynmem_device *dm) 564static void post_status(struct hv_dynmem_device *dm)
531{ 565{
532 struct dm_status status; 566 struct dm_status status;
567 struct sysinfo val;
533 568
534 569 if (pressure_report_delay > 0) {
570 --pressure_report_delay;
571 return;
572 }
573 si_meminfo(&val);
535 memset(&status, 0, sizeof(struct dm_status)); 574 memset(&status, 0, sizeof(struct dm_status));
536 status.hdr.type = DM_STATUS_REPORT; 575 status.hdr.type = DM_STATUS_REPORT;
537 status.hdr.size = sizeof(struct dm_status); 576 status.hdr.size = sizeof(struct dm_status);
538 status.hdr.trans_id = atomic_inc_return(&trans_id); 577 status.hdr.trans_id = atomic_inc_return(&trans_id);
539 578
540 579 /*
541 status.num_committed = vm_memory_committed(); 580 * The host expects the guest to report free memory.
581 * Further, the host expects the pressure information to
582 * include the ballooned out pages.
583 * For a given amount of memory that we are managing, we
584 * need to compute a floor below which we should not balloon.
585 * Compute this and add it to the pressure report.
586 */
587 status.num_avail = val.freeram;
588 status.num_committed = vm_memory_committed() +
589 dm->num_pages_ballooned +
590 compute_balloon_floor();
542 591
543 vmbus_sendpacket(dm->dev->channel, &status, 592 vmbus_sendpacket(dm->dev->channel, &status,
544 sizeof(struct dm_status), 593 sizeof(struct dm_status),
@@ -547,8 +596,6 @@ static void post_status(struct hv_dynmem_device *dm)
547 596
548} 597}
549 598
550
551
552static void free_balloon_pages(struct hv_dynmem_device *dm, 599static void free_balloon_pages(struct hv_dynmem_device *dm,
553 union dm_mem_page_range *range_array) 600 union dm_mem_page_range *range_array)
554{ 601{
@@ -1013,9 +1060,7 @@ static int balloon_remove(struct hv_device *dev)
1013static const struct hv_vmbus_device_id id_table[] = { 1060static const struct hv_vmbus_device_id id_table[] = {
1014 /* Dynamic Memory Class ID */ 1061 /* Dynamic Memory Class ID */
1015 /* 525074DC-8985-46e2-8057-A307DC18A502 */ 1062 /* 525074DC-8985-46e2-8057-A307DC18A502 */
1016 { VMBUS_DEVICE(0xdc, 0x74, 0x50, 0X52, 0x85, 0x89, 0xe2, 0x46, 1063 { HV_DM_GUID, },
1017 0x80, 0x57, 0xa3, 0x07, 0xdc, 0x18, 0xa5, 0x02)
1018 },
1019 { }, 1064 { },
1020}; 1065};
1021 1066
diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c
index a0667de7a04c..1d4cbd8e8261 100644
--- a/drivers/hv/hv_util.c
+++ b/drivers/hv/hv_util.c
@@ -49,6 +49,16 @@ static struct hv_util_service util_kvp = {
49 .util_deinit = hv_kvp_deinit, 49 .util_deinit = hv_kvp_deinit,
50}; 50};
51 51
52static void perform_shutdown(struct work_struct *dummy)
53{
54 orderly_poweroff(true);
55}
56
57/*
58 * Perform the shutdown operation in a thread context.
59 */
60static DECLARE_WORK(shutdown_work, perform_shutdown);
61
52static void shutdown_onchannelcallback(void *context) 62static void shutdown_onchannelcallback(void *context)
53{ 63{
54 struct vmbus_channel *channel = context; 64 struct vmbus_channel *channel = context;
@@ -106,7 +116,7 @@ static void shutdown_onchannelcallback(void *context)
106 } 116 }
107 117
108 if (execute_shutdown == true) 118 if (execute_shutdown == true)
109 orderly_poweroff(true); 119 schedule_work(&shutdown_work);
110} 120}
111 121
112/* 122/*
@@ -274,6 +284,16 @@ static int util_probe(struct hv_device *dev,
274 } 284 }
275 } 285 }
276 286
287 /*
288 * The set of services managed by the util driver are not performance
289 * critical and do not need batched reading. Furthermore, some services
290 * such as KVP can only handle one message from the host at a time.
291 * Turn off batched reading for all util drivers before we open the
292 * channel.
293 */
294
295 set_channel_read_state(dev->channel, false);
296
277 ret = vmbus_open(dev->channel, 4 * PAGE_SIZE, 4 * PAGE_SIZE, NULL, 0, 297 ret = vmbus_open(dev->channel, 4 * PAGE_SIZE, 4 * PAGE_SIZE, NULL, 0,
278 srv->util_cb, dev->channel); 298 srv->util_cb, dev->channel);
279 if (ret) 299 if (ret)
@@ -304,21 +324,21 @@ static int util_remove(struct hv_device *dev)
304 324
305static const struct hv_vmbus_device_id id_table[] = { 325static const struct hv_vmbus_device_id id_table[] = {
306 /* Shutdown guid */ 326 /* Shutdown guid */
307 { VMBUS_DEVICE(0x31, 0x60, 0x0B, 0X0E, 0x13, 0x52, 0x34, 0x49, 327 { HV_SHUTDOWN_GUID,
308 0x81, 0x8B, 0x38, 0XD9, 0x0C, 0xED, 0x39, 0xDB) 328 .driver_data = (unsigned long)&util_shutdown
309 .driver_data = (unsigned long)&util_shutdown }, 329 },
310 /* Time synch guid */ 330 /* Time synch guid */
311 { VMBUS_DEVICE(0x30, 0xe6, 0x27, 0x95, 0xae, 0xd0, 0x7b, 0x49, 331 { HV_TS_GUID,
312 0xad, 0xce, 0xe8, 0x0a, 0xb0, 0x17, 0x5c, 0xaf) 332 .driver_data = (unsigned long)&util_timesynch
313 .driver_data = (unsigned long)&util_timesynch }, 333 },
314 /* Heartbeat guid */ 334 /* Heartbeat guid */
315 { VMBUS_DEVICE(0x39, 0x4f, 0x16, 0x57, 0x15, 0x91, 0x78, 0x4e, 335 { HV_HEART_BEAT_GUID,
316 0xab, 0x55, 0x38, 0x2f, 0x3b, 0xd5, 0x42, 0x2d) 336 .driver_data = (unsigned long)&util_heartbeat
317 .driver_data = (unsigned long)&util_heartbeat }, 337 },
318 /* KVP guid */ 338 /* KVP guid */
319 { VMBUS_DEVICE(0xe7, 0xf4, 0xa0, 0xa9, 0x45, 0x5a, 0x96, 0x4d, 339 { HV_KVP_GUID,
320 0xb8, 0x27, 0x8a, 0x84, 0x1e, 0x8c, 0x3, 0xe6) 340 .driver_data = (unsigned long)&util_kvp
321 .driver_data = (unsigned long)&util_kvp }, 341 },
322 { }, 342 { },
323}; 343};
324 344
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index d8d1fadb398a..12f2f9e989f7 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -101,15 +101,6 @@ enum hv_message_type {
101/* Define invalid partition identifier. */ 101/* Define invalid partition identifier. */
102#define HV_PARTITION_ID_INVALID ((u64)0x0) 102#define HV_PARTITION_ID_INVALID ((u64)0x0)
103 103
104/* Define connection identifier type. */
105union hv_connection_id {
106 u32 asu32;
107 struct {
108 u32 id:24;
109 u32 reserved:8;
110 } u;
111};
112
113/* Define port identifier type. */ 104/* Define port identifier type. */
114union hv_port_id { 105union hv_port_id {
115 u32 asu32; 106 u32 asu32;
@@ -338,13 +329,6 @@ struct hv_input_post_message {
338 u64 payload[HV_MESSAGE_PAYLOAD_QWORD_COUNT]; 329 u64 payload[HV_MESSAGE_PAYLOAD_QWORD_COUNT];
339}; 330};
340 331
341/* Definition of the hv_signal_event hypercall input structure. */
342struct hv_input_signal_event {
343 union hv_connection_id connectionid;
344 u16 flag_number;
345 u16 rsvdz;
346};
347
348/* 332/*
349 * Versioning definitions used for guests reporting themselves to the 333 * Versioning definitions used for guests reporting themselves to the
350 * hypervisor, and visa versa. 334 * hypervisor, and visa versa.
@@ -498,11 +482,6 @@ static const uuid_le VMBUS_SERVICE_ID = {
498 482
499 483
500 484
501struct hv_input_signal_event_buffer {
502 u64 align8;
503 struct hv_input_signal_event event;
504};
505
506struct hv_context { 485struct hv_context {
507 /* We only support running on top of Hyper-V 486 /* We only support running on top of Hyper-V
508 * So at this point this really can only contain the Hyper-V ID 487 * So at this point this really can only contain the Hyper-V ID
@@ -513,16 +492,24 @@ struct hv_context {
513 492
514 bool synic_initialized; 493 bool synic_initialized;
515 494
516 /*
517 * This is used as an input param to HvCallSignalEvent hypercall. The
518 * input param is immutable in our usage and must be dynamic mem (vs
519 * stack or global). */
520 struct hv_input_signal_event_buffer *signal_event_buffer;
521 /* 8-bytes aligned of the buffer above */
522 struct hv_input_signal_event *signal_event_param;
523
524 void *synic_message_page[NR_CPUS]; 495 void *synic_message_page[NR_CPUS];
525 void *synic_event_page[NR_CPUS]; 496 void *synic_event_page[NR_CPUS];
497 /*
498 * Hypervisor's notion of virtual processor ID is different from
499 * Linux' notion of CPU ID. This information can only be retrieved
500 * in the context of the calling CPU. Setup a map for easy access
501 * to this information:
502 *
503 * vp_index[a] is the Hyper-V's processor ID corresponding to
504 * Linux cpuid 'a'.
505 */
506 u32 vp_index[NR_CPUS];
507 /*
508 * Starting with win8, we can take channel interrupts on any CPU;
509 * we will manage the tasklet that handles events on a per CPU
510 * basis.
511 */
512 struct tasklet_struct *event_dpc[NR_CPUS];
526}; 513};
527 514
528extern struct hv_context hv_context; 515extern struct hv_context hv_context;
@@ -538,12 +525,19 @@ extern int hv_post_message(union hv_connection_id connection_id,
538 enum hv_message_type message_type, 525 enum hv_message_type message_type,
539 void *payload, size_t payload_size); 526 void *payload, size_t payload_size);
540 527
541extern u16 hv_signal_event(void); 528extern u16 hv_signal_event(void *con_id);
542 529
543extern void hv_synic_init(void *irqarg); 530extern void hv_synic_init(void *irqarg);
544 531
545extern void hv_synic_cleanup(void *arg); 532extern void hv_synic_cleanup(void *arg);
546 533
534/*
535 * Host version information.
536 */
537extern unsigned int host_info_eax;
538extern unsigned int host_info_ebx;
539extern unsigned int host_info_ecx;
540extern unsigned int host_info_edx;
547 541
548/* Interface */ 542/* Interface */
549 543
@@ -555,7 +549,7 @@ void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info);
555 549
556int hv_ringbuffer_write(struct hv_ring_buffer_info *ring_info, 550int hv_ringbuffer_write(struct hv_ring_buffer_info *ring_info,
557 struct scatterlist *sglist, 551 struct scatterlist *sglist,
558 u32 sgcount); 552 u32 sgcount, bool *signal);
559 553
560int hv_ringbuffer_peek(struct hv_ring_buffer_info *ring_info, void *buffer, 554int hv_ringbuffer_peek(struct hv_ring_buffer_info *ring_info, void *buffer,
561 u32 buflen); 555 u32 buflen);
@@ -563,13 +557,16 @@ int hv_ringbuffer_peek(struct hv_ring_buffer_info *ring_info, void *buffer,
563int hv_ringbuffer_read(struct hv_ring_buffer_info *ring_info, 557int hv_ringbuffer_read(struct hv_ring_buffer_info *ring_info,
564 void *buffer, 558 void *buffer,
565 u32 buflen, 559 u32 buflen,
566 u32 offset); 560 u32 offset, bool *signal);
567 561
568u32 hv_get_ringbuffer_interrupt_mask(struct hv_ring_buffer_info *ring_info);
569 562
570void hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info, 563void hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
571 struct hv_ring_buffer_debug_info *debug_info); 564 struct hv_ring_buffer_debug_info *debug_info);
572 565
566void hv_begin_read(struct hv_ring_buffer_info *rbi);
567
568u32 hv_end_read(struct hv_ring_buffer_info *rbi);
569
573/* 570/*
574 * Maximum channels is determined by the size of the interrupt page 571 * Maximum channels is determined by the size of the interrupt page
575 * which is PAGE_SIZE. 1/2 of PAGE_SIZE is for send endpoint interrupt 572 * which is PAGE_SIZE. 1/2 of PAGE_SIZE is for send endpoint interrupt
@@ -657,7 +654,7 @@ int vmbus_connect(void);
657 654
658int vmbus_post_msg(void *buffer, size_t buflen); 655int vmbus_post_msg(void *buffer, size_t buflen);
659 656
660int vmbus_set_event(u32 child_relid); 657int vmbus_set_event(struct vmbus_channel *channel);
661 658
662void vmbus_on_event(unsigned long data); 659void vmbus_on_event(unsigned long data);
663 660
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index 7233c88f01b8..cafa72ffdc30 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -29,6 +29,105 @@
29 29
30#include "hyperv_vmbus.h" 30#include "hyperv_vmbus.h"
31 31
32void hv_begin_read(struct hv_ring_buffer_info *rbi)
33{
34 rbi->ring_buffer->interrupt_mask = 1;
35 smp_mb();
36}
37
38u32 hv_end_read(struct hv_ring_buffer_info *rbi)
39{
40 u32 read;
41 u32 write;
42
43 rbi->ring_buffer->interrupt_mask = 0;
44 smp_mb();
45
46 /*
47 * Now check to see if the ring buffer is still empty.
48 * If it is not, we raced and we need to process new
49 * incoming messages.
50 */
51 hv_get_ringbuffer_availbytes(rbi, &read, &write);
52
53 return read;
54}
55
56/*
57 * When we write to the ring buffer, check if the host needs to
58 * be signaled. Here is the details of this protocol:
59 *
60 * 1. The host guarantees that while it is draining the
61 * ring buffer, it will set the interrupt_mask to
62 * indicate it does not need to be interrupted when
63 * new data is placed.
64 *
65 * 2. The host guarantees that it will completely drain
66 * the ring buffer before exiting the read loop. Further,
67 * once the ring buffer is empty, it will clear the
68 * interrupt_mask and re-check to see if new data has
69 * arrived.
70 */
71
72static bool hv_need_to_signal(u32 old_write, struct hv_ring_buffer_info *rbi)
73{
74 if (rbi->ring_buffer->interrupt_mask)
75 return false;
76
77 /*
78 * This is the only case we need to signal when the
79 * ring transitions from being empty to non-empty.
80 */
81 if (old_write == rbi->ring_buffer->read_index)
82 return true;
83
84 return false;
85}
86
87/*
88 * To optimize the flow management on the send-side,
89 * when the sender is blocked because of lack of
90 * sufficient space in the ring buffer, potential the
91 * consumer of the ring buffer can signal the producer.
92 * This is controlled by the following parameters:
93 *
94 * 1. pending_send_sz: This is the size in bytes that the
95 * producer is trying to send.
96 * 2. The feature bit feat_pending_send_sz set to indicate if
97 * the consumer of the ring will signal when the ring
98 * state transitions from being full to a state where
99 * there is room for the producer to send the pending packet.
100 */
101
102static bool hv_need_to_signal_on_read(u32 old_rd,
103 struct hv_ring_buffer_info *rbi)
104{
105 u32 prev_write_sz;
106 u32 cur_write_sz;
107 u32 r_size;
108 u32 write_loc = rbi->ring_buffer->write_index;
109 u32 read_loc = rbi->ring_buffer->read_index;
110 u32 pending_sz = rbi->ring_buffer->pending_send_sz;
111
112 /*
113 * If the other end is not blocked on write don't bother.
114 */
115 if (pending_sz == 0)
116 return false;
117
118 r_size = rbi->ring_datasize;
119 cur_write_sz = write_loc >= read_loc ? r_size - (write_loc - read_loc) :
120 read_loc - write_loc;
121
122 prev_write_sz = write_loc >= old_rd ? r_size - (write_loc - old_rd) :
123 old_rd - write_loc;
124
125
126 if ((prev_write_sz < pending_sz) && (cur_write_sz >= pending_sz))
127 return true;
128
129 return false;
130}
32 131
33/* 132/*
34 * hv_get_next_write_location() 133 * hv_get_next_write_location()
@@ -239,19 +338,6 @@ void hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
239 } 338 }
240} 339}
241 340
242
243/*
244 *
245 * hv_get_ringbuffer_interrupt_mask()
246 *
247 * Get the interrupt mask for the specified ring buffer
248 *
249 */
250u32 hv_get_ringbuffer_interrupt_mask(struct hv_ring_buffer_info *rbi)
251{
252 return rbi->ring_buffer->interrupt_mask;
253}
254
255/* 341/*
256 * 342 *
257 * hv_ringbuffer_init() 343 * hv_ringbuffer_init()
@@ -298,7 +384,7 @@ void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
298 * 384 *
299 */ 385 */
300int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info, 386int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
301 struct scatterlist *sglist, u32 sgcount) 387 struct scatterlist *sglist, u32 sgcount, bool *signal)
302{ 388{
303 int i = 0; 389 int i = 0;
304 u32 bytes_avail_towrite; 390 u32 bytes_avail_towrite;
@@ -307,6 +393,7 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
307 393
308 struct scatterlist *sg; 394 struct scatterlist *sg;
309 u32 next_write_location; 395 u32 next_write_location;
396 u32 old_write;
310 u64 prev_indices = 0; 397 u64 prev_indices = 0;
311 unsigned long flags; 398 unsigned long flags;
312 399
@@ -335,6 +422,8 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
335 /* Write to the ring buffer */ 422 /* Write to the ring buffer */
336 next_write_location = hv_get_next_write_location(outring_info); 423 next_write_location = hv_get_next_write_location(outring_info);
337 424
425 old_write = next_write_location;
426
338 for_each_sg(sglist, sg, sgcount, i) 427 for_each_sg(sglist, sg, sgcount, i)
339 { 428 {
340 next_write_location = hv_copyto_ringbuffer(outring_info, 429 next_write_location = hv_copyto_ringbuffer(outring_info,
@@ -351,14 +440,16 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
351 &prev_indices, 440 &prev_indices,
352 sizeof(u64)); 441 sizeof(u64));
353 442
354 /* Make sure we flush all writes before updating the writeIndex */ 443 /* Issue a full memory barrier before updating the write index */
355 smp_wmb(); 444 smp_mb();
356 445
357 /* Now, update the write location */ 446 /* Now, update the write location */
358 hv_set_next_write_location(outring_info, next_write_location); 447 hv_set_next_write_location(outring_info, next_write_location);
359 448
360 449
361 spin_unlock_irqrestore(&outring_info->ring_lock, flags); 450 spin_unlock_irqrestore(&outring_info->ring_lock, flags);
451
452 *signal = hv_need_to_signal(old_write, outring_info);
362 return 0; 453 return 0;
363} 454}
364 455
@@ -414,13 +505,14 @@ int hv_ringbuffer_peek(struct hv_ring_buffer_info *Inring_info,
414 * 505 *
415 */ 506 */
416int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer, 507int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer,
417 u32 buflen, u32 offset) 508 u32 buflen, u32 offset, bool *signal)
418{ 509{
419 u32 bytes_avail_towrite; 510 u32 bytes_avail_towrite;
420 u32 bytes_avail_toread; 511 u32 bytes_avail_toread;
421 u32 next_read_location = 0; 512 u32 next_read_location = 0;
422 u64 prev_indices = 0; 513 u64 prev_indices = 0;
423 unsigned long flags; 514 unsigned long flags;
515 u32 old_read;
424 516
425 if (buflen <= 0) 517 if (buflen <= 0)
426 return -EINVAL; 518 return -EINVAL;
@@ -431,6 +523,8 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer,
431 &bytes_avail_toread, 523 &bytes_avail_toread,
432 &bytes_avail_towrite); 524 &bytes_avail_towrite);
433 525
526 old_read = bytes_avail_toread;
527
434 /* Make sure there is something to read */ 528 /* Make sure there is something to read */
435 if (bytes_avail_toread < buflen) { 529 if (bytes_avail_toread < buflen) {
436 spin_unlock_irqrestore(&inring_info->ring_lock, flags); 530 spin_unlock_irqrestore(&inring_info->ring_lock, flags);
@@ -461,5 +555,7 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer,
461 555
462 spin_unlock_irqrestore(&inring_info->ring_lock, flags); 556 spin_unlock_irqrestore(&inring_info->ring_lock, flags);
463 557
558 *signal = hv_need_to_signal_on_read(old_read, inring_info);
559
464 return 0; 560 return 0;
465} 561}
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 8e1a9ec53003..cf19dfa5ead1 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -33,6 +33,7 @@
33#include <acpi/acpi_bus.h> 33#include <acpi/acpi_bus.h>
34#include <linux/completion.h> 34#include <linux/completion.h>
35#include <linux/hyperv.h> 35#include <linux/hyperv.h>
36#include <linux/kernel_stat.h>
36#include <asm/hyperv.h> 37#include <asm/hyperv.h>
37#include <asm/hypervisor.h> 38#include <asm/hypervisor.h>
38#include "hyperv_vmbus.h" 39#include "hyperv_vmbus.h"
@@ -41,7 +42,6 @@
41static struct acpi_device *hv_acpi_dev; 42static struct acpi_device *hv_acpi_dev;
42 43
43static struct tasklet_struct msg_dpc; 44static struct tasklet_struct msg_dpc;
44static struct tasklet_struct event_dpc;
45static struct completion probe_event; 45static struct completion probe_event;
46static int irq; 46static int irq;
47 47
@@ -454,21 +454,40 @@ static irqreturn_t vmbus_isr(int irq, void *dev_id)
454 union hv_synic_event_flags *event; 454 union hv_synic_event_flags *event;
455 bool handled = false; 455 bool handled = false;
456 456
457 page_addr = hv_context.synic_event_page[cpu];
458 if (page_addr == NULL)
459 return IRQ_NONE;
460
461 event = (union hv_synic_event_flags *)page_addr +
462 VMBUS_MESSAGE_SINT;
457 /* 463 /*
458 * Check for events before checking for messages. This is the order 464 * Check for events before checking for messages. This is the order
459 * in which events and messages are checked in Windows guests on 465 * in which events and messages are checked in Windows guests on
460 * Hyper-V, and the Windows team suggested we do the same. 466 * Hyper-V, and the Windows team suggested we do the same.
461 */ 467 */
462 468
463 page_addr = hv_context.synic_event_page[cpu]; 469 if ((vmbus_proto_version == VERSION_WS2008) ||
464 event = (union hv_synic_event_flags *)page_addr + VMBUS_MESSAGE_SINT; 470 (vmbus_proto_version == VERSION_WIN7)) {
465 471
466 /* Since we are a child, we only need to check bit 0 */ 472 /* Since we are a child, we only need to check bit 0 */
467 if (sync_test_and_clear_bit(0, (unsigned long *) &event->flags32[0])) { 473 if (sync_test_and_clear_bit(0,
474 (unsigned long *) &event->flags32[0])) {
475 handled = true;
476 }
477 } else {
478 /*
479 * Our host is win8 or above. The signaling mechanism
480 * has changed and we can directly look at the event page.
481 * If bit n is set then we have an interrup on the channel
482 * whose id is n.
483 */
468 handled = true; 484 handled = true;
469 tasklet_schedule(&event_dpc);
470 } 485 }
471 486
487 if (handled)
488 tasklet_schedule(hv_context.event_dpc[cpu]);
489
490
472 page_addr = hv_context.synic_message_page[cpu]; 491 page_addr = hv_context.synic_message_page[cpu];
473 msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT; 492 msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT;
474 493
@@ -485,6 +504,19 @@ static irqreturn_t vmbus_isr(int irq, void *dev_id)
485} 504}
486 505
487/* 506/*
507 * vmbus interrupt flow handler:
508 * vmbus interrupts can concurrently occur on multiple CPUs and
509 * can be handled concurrently.
510 */
511
512static void vmbus_flow_handler(unsigned int irq, struct irq_desc *desc)
513{
514 kstat_incr_irqs_this_cpu(irq, desc);
515
516 desc->action->handler(irq, desc->action->dev_id);
517}
518
519/*
488 * vmbus_bus_init -Main vmbus driver initialization routine. 520 * vmbus_bus_init -Main vmbus driver initialization routine.
489 * 521 *
490 * Here, we 522 * Here, we
@@ -506,7 +538,6 @@ static int vmbus_bus_init(int irq)
506 } 538 }
507 539
508 tasklet_init(&msg_dpc, vmbus_on_msg_dpc, 0); 540 tasklet_init(&msg_dpc, vmbus_on_msg_dpc, 0);
509 tasklet_init(&event_dpc, vmbus_on_event, 0);
510 541
511 ret = bus_register(&hv_bus); 542 ret = bus_register(&hv_bus);
512 if (ret) 543 if (ret)
@@ -520,6 +551,13 @@ static int vmbus_bus_init(int irq)
520 goto err_unregister; 551 goto err_unregister;
521 } 552 }
522 553
554 /*
555 * Vmbus interrupts can be handled concurrently on
556 * different CPUs. Establish an appropriate interrupt flow
557 * handler that can support this model.
558 */
559 irq_set_handler(irq, vmbus_flow_handler);
560
523 vector = IRQ0_VECTOR + irq; 561 vector = IRQ0_VECTOR + irq;
524 562
525 /* 563 /*
@@ -575,8 +613,6 @@ int __vmbus_driver_register(struct hv_driver *hv_driver, struct module *owner, c
575 613
576 ret = driver_register(&hv_driver->driver); 614 ret = driver_register(&hv_driver->driver);
577 615
578 vmbus_request_offers();
579
580 return ret; 616 return ret;
581} 617}
582EXPORT_SYMBOL_GPL(__vmbus_driver_register); 618EXPORT_SYMBOL_GPL(__vmbus_driver_register);