summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-09-24 15:36:31 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2019-09-24 15:36:31 -0400
commitaf5a7e99cce2a24e98487e70f99c8716643cc445 (patch)
treea5db067426726c9d7027898164e103dee9fb3ad3
parent0b36c9eed232760fbf51921818f48b3699f1f1ca (diff)
parentd8bd2d442bb2688b428ac7164e5dc6d95d4fa65b (diff)
Merge tag 'hyperv-next-signed' of git://git.kernel.org/pub/scm/linux/kernel/git/hyperv/linux
Pull Hyper-V updates from Sasha Levin: - first round of vmbus hibernation support (Dexuan Cui) - remove dependencies on PAGE_SIZE (Maya Nakamura) - move the hyper-v tools/ code into the tools build system (Andy Shevchenko) - hyper-v balloon cleanups (Dexuan Cui) * tag 'hyperv-next-signed' of git://git.kernel.org/pub/scm/linux/kernel/git/hyperv/linux: Drivers: hv: vmbus: Resume after fixing up old primary channels Drivers: hv: vmbus: Suspend after cleaning up hv_sock and sub channels Drivers: hv: vmbus: Clean up hv_sock channels by force upon suspend Drivers: hv: vmbus: Suspend/resume the vmbus itself for hibernation Drivers: hv: vmbus: Ignore the offers when resuming from hibernation Drivers: hv: vmbus: Implement suspend/resume for VSC drivers for hibernation Drivers: hv: vmbus: Add a helper function is_sub_channel() Drivers: hv: vmbus: Suspend/resume the synic for hibernation Drivers: hv: vmbus: Break out synic enable and disable operations HID: hv: Remove dependencies on PAGE_SIZE for ring buffer Tools: hv: move to tools buildsystem hv_balloon: Reorganize the probe function hv_balloon: Use a static page for the balloon_up send buffer
-rw-r--r--drivers/hid/hid-hyperv.c4
-rw-r--r--drivers/hv/channel_mgmt.c161
-rw-r--r--drivers/hv/connection.c8
-rw-r--r--drivers/hv/hv.c66
-rw-r--r--drivers/hv/hv_balloon.c143
-rw-r--r--drivers/hv/hyperv_vmbus.h30
-rw-r--r--drivers/hv/vmbus_drv.c265
-rw-r--r--include/linux/hyperv.h16
-rw-r--r--tools/hv/Build3
-rw-r--r--tools/hv/Makefile51
10 files changed, 613 insertions, 134 deletions
diff --git a/drivers/hid/hid-hyperv.c b/drivers/hid/hid-hyperv.c
index 7795831d37c2..cc5b09b87ab0 100644
--- a/drivers/hid/hid-hyperv.c
+++ b/drivers/hid/hid-hyperv.c
@@ -104,8 +104,8 @@ struct synthhid_input_report {
104 104
105#pragma pack(pop) 105#pragma pack(pop)
106 106
107#define INPUTVSC_SEND_RING_BUFFER_SIZE (10*PAGE_SIZE) 107#define INPUTVSC_SEND_RING_BUFFER_SIZE (40 * 1024)
108#define INPUTVSC_RECV_RING_BUFFER_SIZE (10*PAGE_SIZE) 108#define INPUTVSC_RECV_RING_BUFFER_SIZE (40 * 1024)
109 109
110 110
111enum pipe_prot_msg_type { 111enum pipe_prot_msg_type {
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index addcef50df7a..8eb167540b4f 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -407,7 +407,15 @@ void hv_process_channel_removal(struct vmbus_channel *channel)
407 cpumask_clear_cpu(channel->target_cpu, 407 cpumask_clear_cpu(channel->target_cpu,
408 &primary_channel->alloced_cpus_in_node); 408 &primary_channel->alloced_cpus_in_node);
409 409
410 vmbus_release_relid(channel->offermsg.child_relid); 410 /*
411 * Upon suspend, an in-use hv_sock channel is marked as "rescinded" and
412 * the relid is invalidated; after hibernation, when the user-space app
413 * destroys the channel, the relid is INVALID_RELID, and in this case
414 * it's unnecessary and unsafe to release the old relid, since the same
415 * relid can refer to a completely different channel now.
416 */
417 if (channel->offermsg.child_relid != INVALID_RELID)
418 vmbus_release_relid(channel->offermsg.child_relid);
411 419
412 free_channel(channel); 420 free_channel(channel);
413} 421}
@@ -545,6 +553,10 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
545 553
546 mutex_lock(&vmbus_connection.channel_mutex); 554 mutex_lock(&vmbus_connection.channel_mutex);
547 555
556 /* Remember the channels that should be cleaned up upon suspend. */
557 if (is_hvsock_channel(newchannel) || is_sub_channel(newchannel))
558 atomic_inc(&vmbus_connection.nr_chan_close_on_suspend);
559
548 /* 560 /*
549 * Now that we have acquired the channel_mutex, 561 * Now that we have acquired the channel_mutex,
550 * we can release the potentially racing rescind thread. 562 * we can release the potentially racing rescind thread.
@@ -847,6 +859,67 @@ void vmbus_initiate_unload(bool crash)
847 vmbus_wait_for_unload(); 859 vmbus_wait_for_unload();
848} 860}
849 861
862static void check_ready_for_resume_event(void)
863{
864 /*
865 * If all the old primary channels have been fixed up, then it's safe
866 * to resume.
867 */
868 if (atomic_dec_and_test(&vmbus_connection.nr_chan_fixup_on_resume))
869 complete(&vmbus_connection.ready_for_resume_event);
870}
871
872static void vmbus_setup_channel_state(struct vmbus_channel *channel,
873 struct vmbus_channel_offer_channel *offer)
874{
875 /*
876 * Setup state for signalling the host.
877 */
878 channel->sig_event = VMBUS_EVENT_CONNECTION_ID;
879
880 if (vmbus_proto_version != VERSION_WS2008) {
881 channel->is_dedicated_interrupt =
882 (offer->is_dedicated_interrupt != 0);
883 channel->sig_event = offer->connection_id;
884 }
885
886 memcpy(&channel->offermsg, offer,
887 sizeof(struct vmbus_channel_offer_channel));
888 channel->monitor_grp = (u8)offer->monitorid / 32;
889 channel->monitor_bit = (u8)offer->monitorid % 32;
890}
891
892/*
893 * find_primary_channel_by_offer - Get the channel object given the new offer.
894 * This is only used in the resume path of hibernation.
895 */
896static struct vmbus_channel *
897find_primary_channel_by_offer(const struct vmbus_channel_offer_channel *offer)
898{
899 struct vmbus_channel *channel = NULL, *iter;
900 const guid_t *inst1, *inst2;
901
902 /* Ignore sub-channel offers. */
903 if (offer->offer.sub_channel_index != 0)
904 return NULL;
905
906 mutex_lock(&vmbus_connection.channel_mutex);
907
908 list_for_each_entry(iter, &vmbus_connection.chn_list, listentry) {
909 inst1 = &iter->offermsg.offer.if_instance;
910 inst2 = &offer->offer.if_instance;
911
912 if (guid_equal(inst1, inst2)) {
913 channel = iter;
914 break;
915 }
916 }
917
918 mutex_unlock(&vmbus_connection.channel_mutex);
919
920 return channel;
921}
922
850/* 923/*
851 * vmbus_onoffer - Handler for channel offers from vmbus in parent partition. 924 * vmbus_onoffer - Handler for channel offers from vmbus in parent partition.
852 * 925 *
@@ -854,12 +927,58 @@ void vmbus_initiate_unload(bool crash)
854static void vmbus_onoffer(struct vmbus_channel_message_header *hdr) 927static void vmbus_onoffer(struct vmbus_channel_message_header *hdr)
855{ 928{
856 struct vmbus_channel_offer_channel *offer; 929 struct vmbus_channel_offer_channel *offer;
857 struct vmbus_channel *newchannel; 930 struct vmbus_channel *oldchannel, *newchannel;
931 size_t offer_sz;
858 932
859 offer = (struct vmbus_channel_offer_channel *)hdr; 933 offer = (struct vmbus_channel_offer_channel *)hdr;
860 934
861 trace_vmbus_onoffer(offer); 935 trace_vmbus_onoffer(offer);
862 936
937 oldchannel = find_primary_channel_by_offer(offer);
938
939 if (oldchannel != NULL) {
940 atomic_dec(&vmbus_connection.offer_in_progress);
941
942 /*
943 * We're resuming from hibernation: all the sub-channel and
944 * hv_sock channels we had before the hibernation should have
945 * been cleaned up, and now we must be seeing a re-offered
946 * primary channel that we had before the hibernation.
947 */
948
949 WARN_ON(oldchannel->offermsg.child_relid != INVALID_RELID);
950 /* Fix up the relid. */
951 oldchannel->offermsg.child_relid = offer->child_relid;
952
953 offer_sz = sizeof(*offer);
954 if (memcmp(offer, &oldchannel->offermsg, offer_sz) == 0) {
955 check_ready_for_resume_event();
956 return;
957 }
958
959 /*
960 * This is not an error, since the host can also change the
961 * other field(s) of the offer, e.g. on WS RS5 (Build 17763),
962 * the offer->connection_id of the Mellanox VF vmbus device
963 * can change when the host reoffers the device upon resume.
964 */
965 pr_debug("vmbus offer changed: relid=%d\n",
966 offer->child_relid);
967
968 print_hex_dump_debug("Old vmbus offer: ", DUMP_PREFIX_OFFSET,
969 16, 4, &oldchannel->offermsg, offer_sz,
970 false);
971 print_hex_dump_debug("New vmbus offer: ", DUMP_PREFIX_OFFSET,
972 16, 4, offer, offer_sz, false);
973
974 /* Fix up the old channel. */
975 vmbus_setup_channel_state(oldchannel, offer);
976
977 check_ready_for_resume_event();
978
979 return;
980 }
981
863 /* Allocate the channel object and save this offer. */ 982 /* Allocate the channel object and save this offer. */
864 newchannel = alloc_channel(); 983 newchannel = alloc_channel();
865 if (!newchannel) { 984 if (!newchannel) {
@@ -869,25 +988,21 @@ static void vmbus_onoffer(struct vmbus_channel_message_header *hdr)
869 return; 988 return;
870 } 989 }
871 990
872 /* 991 vmbus_setup_channel_state(newchannel, offer);
873 * Setup state for signalling the host.
874 */
875 newchannel->sig_event = VMBUS_EVENT_CONNECTION_ID;
876
877 if (vmbus_proto_version != VERSION_WS2008) {
878 newchannel->is_dedicated_interrupt =
879 (offer->is_dedicated_interrupt != 0);
880 newchannel->sig_event = offer->connection_id;
881 }
882
883 memcpy(&newchannel->offermsg, offer,
884 sizeof(struct vmbus_channel_offer_channel));
885 newchannel->monitor_grp = (u8)offer->monitorid / 32;
886 newchannel->monitor_bit = (u8)offer->monitorid % 32;
887 992
888 vmbus_process_offer(newchannel); 993 vmbus_process_offer(newchannel);
889} 994}
890 995
996static void check_ready_for_suspend_event(void)
997{
998 /*
999 * If all the sub-channels or hv_sock channels have been cleaned up,
1000 * then it's safe to suspend.
1001 */
1002 if (atomic_dec_and_test(&vmbus_connection.nr_chan_close_on_suspend))
1003 complete(&vmbus_connection.ready_for_suspend_event);
1004}
1005
891/* 1006/*
892 * vmbus_onoffer_rescind - Rescind offer handler. 1007 * vmbus_onoffer_rescind - Rescind offer handler.
893 * 1008 *
@@ -898,6 +1013,7 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
898 struct vmbus_channel_rescind_offer *rescind; 1013 struct vmbus_channel_rescind_offer *rescind;
899 struct vmbus_channel *channel; 1014 struct vmbus_channel *channel;
900 struct device *dev; 1015 struct device *dev;
1016 bool clean_up_chan_for_suspend;
901 1017
902 rescind = (struct vmbus_channel_rescind_offer *)hdr; 1018 rescind = (struct vmbus_channel_rescind_offer *)hdr;
903 1019
@@ -937,6 +1053,8 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
937 return; 1053 return;
938 } 1054 }
939 1055
1056 clean_up_chan_for_suspend = is_hvsock_channel(channel) ||
1057 is_sub_channel(channel);
940 /* 1058 /*
941 * Before setting channel->rescind in vmbus_rescind_cleanup(), we 1059 * Before setting channel->rescind in vmbus_rescind_cleanup(), we
942 * should make sure the channel callback is not running any more. 1060 * should make sure the channel callback is not running any more.
@@ -962,6 +1080,10 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
962 if (channel->device_obj) { 1080 if (channel->device_obj) {
963 if (channel->chn_rescind_callback) { 1081 if (channel->chn_rescind_callback) {
964 channel->chn_rescind_callback(channel); 1082 channel->chn_rescind_callback(channel);
1083
1084 if (clean_up_chan_for_suspend)
1085 check_ready_for_suspend_event();
1086
965 return; 1087 return;
966 } 1088 }
967 /* 1089 /*
@@ -994,6 +1116,11 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
994 } 1116 }
995 mutex_unlock(&vmbus_connection.channel_mutex); 1117 mutex_unlock(&vmbus_connection.channel_mutex);
996 } 1118 }
1119
1120 /* The "channel" may have been freed. Do not access it any longer. */
1121
1122 if (clean_up_chan_for_suspend)
1123 check_ready_for_suspend_event();
997} 1124}
998 1125
999void vmbus_hvsock_device_unregister(struct vmbus_channel *channel) 1126void vmbus_hvsock_device_unregister(struct vmbus_channel *channel)
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index 09829e15d4a0..6e4c015783ff 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -26,6 +26,11 @@
26struct vmbus_connection vmbus_connection = { 26struct vmbus_connection vmbus_connection = {
27 .conn_state = DISCONNECTED, 27 .conn_state = DISCONNECTED,
28 .next_gpadl_handle = ATOMIC_INIT(0xE1E10), 28 .next_gpadl_handle = ATOMIC_INIT(0xE1E10),
29
30 .ready_for_suspend_event= COMPLETION_INITIALIZER(
31 vmbus_connection.ready_for_suspend_event),
32 .ready_for_resume_event = COMPLETION_INITIALIZER(
33 vmbus_connection.ready_for_resume_event),
29}; 34};
30EXPORT_SYMBOL_GPL(vmbus_connection); 35EXPORT_SYMBOL_GPL(vmbus_connection);
31 36
@@ -59,8 +64,7 @@ static __u32 vmbus_get_next_version(__u32 current_version)
59 } 64 }
60} 65}
61 66
62static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo, 67int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo, u32 version)
63 __u32 version)
64{ 68{
65 int ret = 0; 69 int ret = 0;
66 unsigned int cur_cpu; 70 unsigned int cur_cpu;
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
index 6188fb7dda42..fcc52797c169 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -154,7 +154,7 @@ void hv_synic_free(void)
154 * retrieve the initialized message and event pages. Otherwise, we create and 154 * retrieve the initialized message and event pages. Otherwise, we create and
155 * initialize the message and event pages. 155 * initialize the message and event pages.
156 */ 156 */
157int hv_synic_init(unsigned int cpu) 157void hv_synic_enable_regs(unsigned int cpu)
158{ 158{
159 struct hv_per_cpu_context *hv_cpu 159 struct hv_per_cpu_context *hv_cpu
160 = per_cpu_ptr(hv_context.cpu_context, cpu); 160 = per_cpu_ptr(hv_context.cpu_context, cpu);
@@ -196,6 +196,11 @@ int hv_synic_init(unsigned int cpu)
196 sctrl.enable = 1; 196 sctrl.enable = 1;
197 197
198 hv_set_synic_state(sctrl.as_uint64); 198 hv_set_synic_state(sctrl.as_uint64);
199}
200
201int hv_synic_init(unsigned int cpu)
202{
203 hv_synic_enable_regs(cpu);
199 204
200 hv_stimer_init(cpu); 205 hv_stimer_init(cpu);
201 206
@@ -205,20 +210,45 @@ int hv_synic_init(unsigned int cpu)
205/* 210/*
206 * hv_synic_cleanup - Cleanup routine for hv_synic_init(). 211 * hv_synic_cleanup - Cleanup routine for hv_synic_init().
207 */ 212 */
208int hv_synic_cleanup(unsigned int cpu) 213void hv_synic_disable_regs(unsigned int cpu)
209{ 214{
210 union hv_synic_sint shared_sint; 215 union hv_synic_sint shared_sint;
211 union hv_synic_simp simp; 216 union hv_synic_simp simp;
212 union hv_synic_siefp siefp; 217 union hv_synic_siefp siefp;
213 union hv_synic_scontrol sctrl; 218 union hv_synic_scontrol sctrl;
219
220 hv_get_synint_state(VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
221
222 shared_sint.masked = 1;
223
224 /* Need to correctly cleanup in the case of SMP!!! */
225 /* Disable the interrupt */
226 hv_set_synint_state(VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
227
228 hv_get_simp(simp.as_uint64);
229 simp.simp_enabled = 0;
230 simp.base_simp_gpa = 0;
231
232 hv_set_simp(simp.as_uint64);
233
234 hv_get_siefp(siefp.as_uint64);
235 siefp.siefp_enabled = 0;
236 siefp.base_siefp_gpa = 0;
237
238 hv_set_siefp(siefp.as_uint64);
239
240 /* Disable the global synic bit */
241 hv_get_synic_state(sctrl.as_uint64);
242 sctrl.enable = 0;
243 hv_set_synic_state(sctrl.as_uint64);
244}
245
246int hv_synic_cleanup(unsigned int cpu)
247{
214 struct vmbus_channel *channel, *sc; 248 struct vmbus_channel *channel, *sc;
215 bool channel_found = false; 249 bool channel_found = false;
216 unsigned long flags; 250 unsigned long flags;
217 251
218 hv_get_synic_state(sctrl.as_uint64);
219 if (sctrl.enable != 1)
220 return -EFAULT;
221
222 /* 252 /*
223 * Search for channels which are bound to the CPU we're about to 253 * Search for channels which are bound to the CPU we're about to
224 * cleanup. In case we find one and vmbus is still connected we need to 254 * cleanup. In case we find one and vmbus is still connected we need to
@@ -249,29 +279,7 @@ int hv_synic_cleanup(unsigned int cpu)
249 279
250 hv_stimer_cleanup(cpu); 280 hv_stimer_cleanup(cpu);
251 281
252 hv_get_synint_state(VMBUS_MESSAGE_SINT, shared_sint.as_uint64); 282 hv_synic_disable_regs(cpu);
253
254 shared_sint.masked = 1;
255
256 /* Need to correctly cleanup in the case of SMP!!! */
257 /* Disable the interrupt */
258 hv_set_synint_state(VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
259
260 hv_get_simp(simp.as_uint64);
261 simp.simp_enabled = 0;
262 simp.base_simp_gpa = 0;
263
264 hv_set_simp(simp.as_uint64);
265
266 hv_get_siefp(siefp.as_uint64);
267 siefp.siefp_enabled = 0;
268 siefp.base_siefp_gpa = 0;
269
270 hv_set_siefp(siefp.as_uint64);
271
272 /* Disable the global synic bit */
273 sctrl.enable = 0;
274 hv_set_synic_state(sctrl.as_uint64);
275 283
276 return 0; 284 return 0;
277} 285}
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index 6fb4ea5f0304..34bd73526afd 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -494,7 +494,7 @@ enum hv_dm_state {
494 494
495 495
496static __u8 recv_buffer[PAGE_SIZE]; 496static __u8 recv_buffer[PAGE_SIZE];
497static __u8 *send_buffer; 497static __u8 balloon_up_send_buffer[PAGE_SIZE];
498#define PAGES_IN_2M 512 498#define PAGES_IN_2M 512
499#define HA_CHUNK (32 * 1024) 499#define HA_CHUNK (32 * 1024)
500 500
@@ -1292,8 +1292,8 @@ static void balloon_up(struct work_struct *dummy)
1292 } 1292 }
1293 1293
1294 while (!done) { 1294 while (!done) {
1295 bl_resp = (struct dm_balloon_response *)send_buffer; 1295 memset(balloon_up_send_buffer, 0, PAGE_SIZE);
1296 memset(send_buffer, 0, PAGE_SIZE); 1296 bl_resp = (struct dm_balloon_response *)balloon_up_send_buffer;
1297 bl_resp->hdr.type = DM_BALLOON_RESPONSE; 1297 bl_resp->hdr.type = DM_BALLOON_RESPONSE;
1298 bl_resp->hdr.size = sizeof(struct dm_balloon_response); 1298 bl_resp->hdr.size = sizeof(struct dm_balloon_response);
1299 bl_resp->more_pages = 1; 1299 bl_resp->more_pages = 1;
@@ -1564,58 +1564,18 @@ static void balloon_onchannelcallback(void *context)
1564 1564
1565} 1565}
1566 1566
1567static int balloon_probe(struct hv_device *dev, 1567static int balloon_connect_vsp(struct hv_device *dev)
1568 const struct hv_vmbus_device_id *dev_id)
1569{ 1568{
1570 int ret;
1571 unsigned long t;
1572 struct dm_version_request version_req; 1569 struct dm_version_request version_req;
1573 struct dm_capabilities cap_msg; 1570 struct dm_capabilities cap_msg;
1574 1571 unsigned long t;
1575#ifdef CONFIG_MEMORY_HOTPLUG 1572 int ret;
1576 do_hot_add = hot_add;
1577#else
1578 do_hot_add = false;
1579#endif
1580
1581 /*
1582 * First allocate a send buffer.
1583 */
1584
1585 send_buffer = kmalloc(PAGE_SIZE, GFP_KERNEL);
1586 if (!send_buffer)
1587 return -ENOMEM;
1588 1573
1589 ret = vmbus_open(dev->channel, dm_ring_size, dm_ring_size, NULL, 0, 1574 ret = vmbus_open(dev->channel, dm_ring_size, dm_ring_size, NULL, 0,
1590 balloon_onchannelcallback, dev); 1575 balloon_onchannelcallback, dev);
1591
1592 if (ret) 1576 if (ret)
1593 goto probe_error0; 1577 return ret;
1594 1578
1595 dm_device.dev = dev;
1596 dm_device.state = DM_INITIALIZING;
1597 dm_device.next_version = DYNMEM_PROTOCOL_VERSION_WIN8;
1598 init_completion(&dm_device.host_event);
1599 init_completion(&dm_device.config_event);
1600 INIT_LIST_HEAD(&dm_device.ha_region_list);
1601 spin_lock_init(&dm_device.ha_lock);
1602 INIT_WORK(&dm_device.balloon_wrk.wrk, balloon_up);
1603 INIT_WORK(&dm_device.ha_wrk.wrk, hot_add_req);
1604 dm_device.host_specified_ha_region = false;
1605
1606 dm_device.thread =
1607 kthread_run(dm_thread_func, &dm_device, "hv_balloon");
1608 if (IS_ERR(dm_device.thread)) {
1609 ret = PTR_ERR(dm_device.thread);
1610 goto probe_error1;
1611 }
1612
1613#ifdef CONFIG_MEMORY_HOTPLUG
1614 set_online_page_callback(&hv_online_page);
1615 register_memory_notifier(&hv_memory_nb);
1616#endif
1617
1618 hv_set_drvdata(dev, &dm_device);
1619 /* 1579 /*
1620 * Initiate the hand shake with the host and negotiate 1580 * Initiate the hand shake with the host and negotiate
1621 * a version that the host can support. We start with the 1581 * a version that the host can support. We start with the
@@ -1631,16 +1591,15 @@ static int balloon_probe(struct hv_device *dev,
1631 dm_device.version = version_req.version.version; 1591 dm_device.version = version_req.version.version;
1632 1592
1633 ret = vmbus_sendpacket(dev->channel, &version_req, 1593 ret = vmbus_sendpacket(dev->channel, &version_req,
1634 sizeof(struct dm_version_request), 1594 sizeof(struct dm_version_request),
1635 (unsigned long)NULL, 1595 (unsigned long)NULL, VM_PKT_DATA_INBAND, 0);
1636 VM_PKT_DATA_INBAND, 0);
1637 if (ret) 1596 if (ret)
1638 goto probe_error2; 1597 goto out;
1639 1598
1640 t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ); 1599 t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ);
1641 if (t == 0) { 1600 if (t == 0) {
1642 ret = -ETIMEDOUT; 1601 ret = -ETIMEDOUT;
1643 goto probe_error2; 1602 goto out;
1644 } 1603 }
1645 1604
1646 /* 1605 /*
@@ -1648,8 +1607,8 @@ static int balloon_probe(struct hv_device *dev,
1648 * fail the probe function. 1607 * fail the probe function.
1649 */ 1608 */
1650 if (dm_device.state == DM_INIT_ERROR) { 1609 if (dm_device.state == DM_INIT_ERROR) {
1651 ret = -ETIMEDOUT; 1610 ret = -EPROTO;
1652 goto probe_error2; 1611 goto out;
1653 } 1612 }
1654 1613
1655 pr_info("Using Dynamic Memory protocol version %u.%u\n", 1614 pr_info("Using Dynamic Memory protocol version %u.%u\n",
@@ -1682,16 +1641,15 @@ static int balloon_probe(struct hv_device *dev,
1682 cap_msg.max_page_number = -1; 1641 cap_msg.max_page_number = -1;
1683 1642
1684 ret = vmbus_sendpacket(dev->channel, &cap_msg, 1643 ret = vmbus_sendpacket(dev->channel, &cap_msg,
1685 sizeof(struct dm_capabilities), 1644 sizeof(struct dm_capabilities),
1686 (unsigned long)NULL, 1645 (unsigned long)NULL, VM_PKT_DATA_INBAND, 0);
1687 VM_PKT_DATA_INBAND, 0);
1688 if (ret) 1646 if (ret)
1689 goto probe_error2; 1647 goto out;
1690 1648
1691 t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ); 1649 t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ);
1692 if (t == 0) { 1650 if (t == 0) {
1693 ret = -ETIMEDOUT; 1651 ret = -ETIMEDOUT;
1694 goto probe_error2; 1652 goto out;
1695 } 1653 }
1696 1654
1697 /* 1655 /*
@@ -1699,25 +1657,65 @@ static int balloon_probe(struct hv_device *dev,
1699 * fail the probe function. 1657 * fail the probe function.
1700 */ 1658 */
1701 if (dm_device.state == DM_INIT_ERROR) { 1659 if (dm_device.state == DM_INIT_ERROR) {
1702 ret = -ETIMEDOUT; 1660 ret = -EPROTO;
1703 goto probe_error2; 1661 goto out;
1704 } 1662 }
1705 1663
1664 return 0;
1665out:
1666 vmbus_close(dev->channel);
1667 return ret;
1668}
1669
1670static int balloon_probe(struct hv_device *dev,
1671 const struct hv_vmbus_device_id *dev_id)
1672{
1673 int ret;
1674
1675#ifdef CONFIG_MEMORY_HOTPLUG
1676 do_hot_add = hot_add;
1677#else
1678 do_hot_add = false;
1679#endif
1680 dm_device.dev = dev;
1681 dm_device.state = DM_INITIALIZING;
1682 dm_device.next_version = DYNMEM_PROTOCOL_VERSION_WIN8;
1683 init_completion(&dm_device.host_event);
1684 init_completion(&dm_device.config_event);
1685 INIT_LIST_HEAD(&dm_device.ha_region_list);
1686 spin_lock_init(&dm_device.ha_lock);
1687 INIT_WORK(&dm_device.balloon_wrk.wrk, balloon_up);
1688 INIT_WORK(&dm_device.ha_wrk.wrk, hot_add_req);
1689 dm_device.host_specified_ha_region = false;
1690
1691#ifdef CONFIG_MEMORY_HOTPLUG
1692 set_online_page_callback(&hv_online_page);
1693 register_memory_notifier(&hv_memory_nb);
1694#endif
1695
1696 hv_set_drvdata(dev, &dm_device);
1697
1698 ret = balloon_connect_vsp(dev);
1699 if (ret != 0)
1700 return ret;
1701
1706 dm_device.state = DM_INITIALIZED; 1702 dm_device.state = DM_INITIALIZED;
1707 last_post_time = jiffies; 1703
1704 dm_device.thread =
1705 kthread_run(dm_thread_func, &dm_device, "hv_balloon");
1706 if (IS_ERR(dm_device.thread)) {
1707 ret = PTR_ERR(dm_device.thread);
1708 goto probe_error;
1709 }
1708 1710
1709 return 0; 1711 return 0;
1710 1712
1711probe_error2: 1713probe_error:
1714 vmbus_close(dev->channel);
1712#ifdef CONFIG_MEMORY_HOTPLUG 1715#ifdef CONFIG_MEMORY_HOTPLUG
1716 unregister_memory_notifier(&hv_memory_nb);
1713 restore_online_page_callback(&hv_online_page); 1717 restore_online_page_callback(&hv_online_page);
1714#endif 1718#endif
1715 kthread_stop(dm_device.thread);
1716
1717probe_error1:
1718 vmbus_close(dev->channel);
1719probe_error0:
1720 kfree(send_buffer);
1721 return ret; 1719 return ret;
1722} 1720}
1723 1721
@@ -1734,12 +1732,11 @@ static int balloon_remove(struct hv_device *dev)
1734 cancel_work_sync(&dm->balloon_wrk.wrk); 1732 cancel_work_sync(&dm->balloon_wrk.wrk);
1735 cancel_work_sync(&dm->ha_wrk.wrk); 1733 cancel_work_sync(&dm->ha_wrk.wrk);
1736 1734
1737 vmbus_close(dev->channel);
1738 kthread_stop(dm->thread); 1735 kthread_stop(dm->thread);
1739 kfree(send_buffer); 1736 vmbus_close(dev->channel);
1740#ifdef CONFIG_MEMORY_HOTPLUG 1737#ifdef CONFIG_MEMORY_HOTPLUG
1741 restore_online_page_callback(&hv_online_page);
1742 unregister_memory_notifier(&hv_memory_nb); 1738 unregister_memory_notifier(&hv_memory_nb);
1739 restore_online_page_callback(&hv_online_page);
1743#endif 1740#endif
1744 spin_lock_irqsave(&dm_device.ha_lock, flags); 1741 spin_lock_irqsave(&dm_device.ha_lock, flags);
1745 list_for_each_entry_safe(has, tmp, &dm->ha_region_list, list) { 1742 list_for_each_entry_safe(has, tmp, &dm->ha_region_list, list) {
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index 50eaa1fd6e45..af9379a3bf89 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -169,8 +169,10 @@ extern int hv_synic_alloc(void);
169 169
170extern void hv_synic_free(void); 170extern void hv_synic_free(void);
171 171
172extern void hv_synic_enable_regs(unsigned int cpu);
172extern int hv_synic_init(unsigned int cpu); 173extern int hv_synic_init(unsigned int cpu);
173 174
175extern void hv_synic_disable_regs(unsigned int cpu);
174extern int hv_synic_cleanup(unsigned int cpu); 176extern int hv_synic_cleanup(unsigned int cpu);
175 177
176/* Interface */ 178/* Interface */
@@ -256,6 +258,32 @@ struct vmbus_connection {
256 struct workqueue_struct *work_queue; 258 struct workqueue_struct *work_queue;
257 struct workqueue_struct *handle_primary_chan_wq; 259 struct workqueue_struct *handle_primary_chan_wq;
258 struct workqueue_struct *handle_sub_chan_wq; 260 struct workqueue_struct *handle_sub_chan_wq;
261
262 /*
263 * The number of sub-channels and hv_sock channels that should be
264 * cleaned up upon suspend: sub-channels will be re-created upon
265 * resume, and hv_sock channels should not survive suspend.
266 */
267 atomic_t nr_chan_close_on_suspend;
268 /*
269 * vmbus_bus_suspend() waits for "nr_chan_close_on_suspend" to
270 * drop to zero.
271 */
272 struct completion ready_for_suspend_event;
273
274 /*
275 * The number of primary channels that should be "fixed up"
276 * upon resume: these channels are re-offered upon resume, and some
277 * fields of the channel offers (i.e. child_relid and connection_id)
278 * can change, so the old offermsg must be fixed up, before the resume
279 * callbacks of the VSC drivers start to further touch the channels.
280 */
281 atomic_t nr_chan_fixup_on_resume;
282 /*
283 * vmbus_bus_resume() waits for "nr_chan_fixup_on_resume" to
284 * drop to zero.
285 */
286 struct completion ready_for_resume_event;
259}; 287};
260 288
261 289
@@ -270,6 +298,8 @@ struct vmbus_msginfo {
270 298
271extern struct vmbus_connection vmbus_connection; 299extern struct vmbus_connection vmbus_connection;
272 300
301int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo, u32 version);
302
273static inline void vmbus_send_interrupt(u32 relid) 303static inline void vmbus_send_interrupt(u32 relid)
274{ 304{
275 sync_set_bit(relid, vmbus_connection.send_int_page); 305 sync_set_bit(relid, vmbus_connection.send_int_page);
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index ebd35fc35290..391f0b225c9a 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -24,12 +24,14 @@
24#include <linux/sched/task_stack.h> 24#include <linux/sched/task_stack.h>
25 25
26#include <asm/mshyperv.h> 26#include <asm/mshyperv.h>
27#include <linux/delay.h>
27#include <linux/notifier.h> 28#include <linux/notifier.h>
28#include <linux/ptrace.h> 29#include <linux/ptrace.h>
29#include <linux/screen_info.h> 30#include <linux/screen_info.h>
30#include <linux/kdebug.h> 31#include <linux/kdebug.h>
31#include <linux/efi.h> 32#include <linux/efi.h>
32#include <linux/random.h> 33#include <linux/random.h>
34#include <linux/syscore_ops.h>
33#include <clocksource/hyperv_timer.h> 35#include <clocksource/hyperv_timer.h>
34#include "hyperv_vmbus.h" 36#include "hyperv_vmbus.h"
35 37
@@ -910,6 +912,43 @@ static void vmbus_shutdown(struct device *child_device)
910 drv->shutdown(dev); 912 drv->shutdown(dev);
911} 913}
912 914
915/*
916 * vmbus_suspend - Suspend a vmbus device
917 */
918static int vmbus_suspend(struct device *child_device)
919{
920 struct hv_driver *drv;
921 struct hv_device *dev = device_to_hv_device(child_device);
922
923 /* The device may not be attached yet */
924 if (!child_device->driver)
925 return 0;
926
927 drv = drv_to_hv_drv(child_device->driver);
928 if (!drv->suspend)
929 return -EOPNOTSUPP;
930
931 return drv->suspend(dev);
932}
933
934/*
935 * vmbus_resume - Resume a vmbus device
936 */
937static int vmbus_resume(struct device *child_device)
938{
939 struct hv_driver *drv;
940 struct hv_device *dev = device_to_hv_device(child_device);
941
942 /* The device may not be attached yet */
943 if (!child_device->driver)
944 return 0;
945
946 drv = drv_to_hv_drv(child_device->driver);
947 if (!drv->resume)
948 return -EOPNOTSUPP;
949
950 return drv->resume(dev);
951}
913 952
914/* 953/*
915 * vmbus_device_release - Final callback release of the vmbus child device 954 * vmbus_device_release - Final callback release of the vmbus child device
@@ -925,6 +964,14 @@ static void vmbus_device_release(struct device *device)
925 kfree(hv_dev); 964 kfree(hv_dev);
926} 965}
927 966
967/*
968 * Note: we must use SET_NOIRQ_SYSTEM_SLEEP_PM_OPS rather than
969 * SET_SYSTEM_SLEEP_PM_OPS: see the comment before vmbus_bus_pm.
970 */
971static const struct dev_pm_ops vmbus_pm = {
972 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(vmbus_suspend, vmbus_resume)
973};
974
928/* The one and only one */ 975/* The one and only one */
929static struct bus_type hv_bus = { 976static struct bus_type hv_bus = {
930 .name = "vmbus", 977 .name = "vmbus",
@@ -935,6 +982,7 @@ static struct bus_type hv_bus = {
935 .uevent = vmbus_uevent, 982 .uevent = vmbus_uevent,
936 .dev_groups = vmbus_dev_groups, 983 .dev_groups = vmbus_dev_groups,
937 .drv_groups = vmbus_drv_groups, 984 .drv_groups = vmbus_drv_groups,
985 .pm = &vmbus_pm,
938}; 986};
939 987
940struct onmessage_work_context { 988struct onmessage_work_context {
@@ -1022,6 +1070,41 @@ msg_handled:
1022 vmbus_signal_eom(msg, message_type); 1070 vmbus_signal_eom(msg, message_type);
1023} 1071}
1024 1072
1073/*
1074 * Fake RESCIND_CHANNEL messages to clean up hv_sock channels by force for
1075 * hibernation, because hv_sock connections can not persist across hibernation.
1076 */
1077static void vmbus_force_channel_rescinded(struct vmbus_channel *channel)
1078{
1079 struct onmessage_work_context *ctx;
1080 struct vmbus_channel_rescind_offer *rescind;
1081
1082 WARN_ON(!is_hvsock_channel(channel));
1083
1084 /*
1085 * sizeof(*ctx) is small and the allocation should really not fail,
1086 * otherwise the state of the hv_sock connections ends up in limbo.
1087 */
1088 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL | __GFP_NOFAIL);
1089
1090 /*
1091 * So far, these are not really used by Linux. Just set them to the
1092 * reasonable values conforming to the definitions of the fields.
1093 */
1094 ctx->msg.header.message_type = 1;
1095 ctx->msg.header.payload_size = sizeof(*rescind);
1096
1097 /* These values are actually used by Linux. */
1098 rescind = (struct vmbus_channel_rescind_offer *)ctx->msg.u.payload;
1099 rescind->header.msgtype = CHANNELMSG_RESCIND_CHANNELOFFER;
1100 rescind->child_relid = channel->offermsg.child_relid;
1101
1102 INIT_WORK(&ctx->work, vmbus_onmessage_work);
1103
1104 queue_work_on(vmbus_connection.connect_cpu,
1105 vmbus_connection.work_queue,
1106 &ctx->work);
1107}
1025 1108
1026/* 1109/*
1027 * Direct callback for channels using other deferred processing 1110 * Direct callback for channels using other deferred processing
@@ -2042,6 +2125,129 @@ acpi_walk_err:
2042 return ret_val; 2125 return ret_val;
2043} 2126}
2044 2127
2128static int vmbus_bus_suspend(struct device *dev)
2129{
2130 struct vmbus_channel *channel, *sc;
2131 unsigned long flags;
2132
2133 while (atomic_read(&vmbus_connection.offer_in_progress) != 0) {
2134 /*
2135 * We wait here until the completion of any channel
2136 * offers that are currently in progress.
2137 */
2138 msleep(1);
2139 }
2140
2141 mutex_lock(&vmbus_connection.channel_mutex);
2142 list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
2143 if (!is_hvsock_channel(channel))
2144 continue;
2145
2146 vmbus_force_channel_rescinded(channel);
2147 }
2148 mutex_unlock(&vmbus_connection.channel_mutex);
2149
2150 /*
2151 * Wait until all the sub-channels and hv_sock channels have been
2152 * cleaned up. Sub-channels should be destroyed upon suspend, otherwise
2153 * they would conflict with the new sub-channels that will be created
2154 * in the resume path. hv_sock channels should also be destroyed, but
2155 * a hv_sock channel of an established hv_sock connection can not be
2156 * really destroyed since it may still be referenced by the userspace
2157 * application, so we just force the hv_sock channel to be rescinded
2158 * by vmbus_force_channel_rescinded(), and the userspace application
2159 * will thoroughly destroy the channel after hibernation.
2160 *
2161 * Note: the counter nr_chan_close_on_suspend may never go above 0 if
2162 * the VM has no sub-channel and hv_sock channel, e.g. a 1-vCPU VM.
2163 */
2164 if (atomic_read(&vmbus_connection.nr_chan_close_on_suspend) > 0)
2165 wait_for_completion(&vmbus_connection.ready_for_suspend_event);
2166
2167 WARN_ON(atomic_read(&vmbus_connection.nr_chan_fixup_on_resume) != 0);
2168
2169 mutex_lock(&vmbus_connection.channel_mutex);
2170
2171 list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
2172 /*
2173 * Invalidate the field. Upon resume, vmbus_onoffer() will fix
2174 * up the field, and the other fields (if necessary).
2175 */
2176 channel->offermsg.child_relid = INVALID_RELID;
2177
2178 if (is_hvsock_channel(channel)) {
2179 if (!channel->rescind) {
2180 pr_err("hv_sock channel not rescinded!\n");
2181 WARN_ON_ONCE(1);
2182 }
2183 continue;
2184 }
2185
2186 spin_lock_irqsave(&channel->lock, flags);
2187 list_for_each_entry(sc, &channel->sc_list, sc_list) {
2188 pr_err("Sub-channel not deleted!\n");
2189 WARN_ON_ONCE(1);
2190 }
2191 spin_unlock_irqrestore(&channel->lock, flags);
2192
2193 atomic_inc(&vmbus_connection.nr_chan_fixup_on_resume);
2194 }
2195
2196 mutex_unlock(&vmbus_connection.channel_mutex);
2197
2198 vmbus_initiate_unload(false);
2199
2200 vmbus_connection.conn_state = DISCONNECTED;
2201
2202 /* Reset the event for the next resume. */
2203 reinit_completion(&vmbus_connection.ready_for_resume_event);
2204
2205 return 0;
2206}
2207
2208static int vmbus_bus_resume(struct device *dev)
2209{
2210 struct vmbus_channel_msginfo *msginfo;
2211 size_t msgsize;
2212 int ret;
2213
2214 /*
2215 * We only use the 'vmbus_proto_version', which was in use before
2216 * hibernation, to re-negotiate with the host.
2217 */
2218 if (vmbus_proto_version == VERSION_INVAL ||
2219 vmbus_proto_version == 0) {
2220 pr_err("Invalid proto version = 0x%x\n", vmbus_proto_version);
2221 return -EINVAL;
2222 }
2223
2224 msgsize = sizeof(*msginfo) +
2225 sizeof(struct vmbus_channel_initiate_contact);
2226
2227 msginfo = kzalloc(msgsize, GFP_KERNEL);
2228
2229 if (msginfo == NULL)
2230 return -ENOMEM;
2231
2232 ret = vmbus_negotiate_version(msginfo, vmbus_proto_version);
2233
2234 kfree(msginfo);
2235
2236 if (ret != 0)
2237 return ret;
2238
2239 WARN_ON(atomic_read(&vmbus_connection.nr_chan_fixup_on_resume) == 0);
2240
2241 vmbus_request_offers();
2242
2243 wait_for_completion(&vmbus_connection.ready_for_resume_event);
2244
2245 /* Reset the event for the next suspend. */
2246 reinit_completion(&vmbus_connection.ready_for_suspend_event);
2247
2248 return 0;
2249}
2250
2045static const struct acpi_device_id vmbus_acpi_device_ids[] = { 2251static const struct acpi_device_id vmbus_acpi_device_ids[] = {
2046 {"VMBUS", 0}, 2252 {"VMBUS", 0},
2047 {"VMBus", 0}, 2253 {"VMBus", 0},
@@ -2049,6 +2255,19 @@ static const struct acpi_device_id vmbus_acpi_device_ids[] = {
2049}; 2255};
2050MODULE_DEVICE_TABLE(acpi, vmbus_acpi_device_ids); 2256MODULE_DEVICE_TABLE(acpi, vmbus_acpi_device_ids);
2051 2257
2258/*
2259 * Note: we must use SET_NOIRQ_SYSTEM_SLEEP_PM_OPS rather than
2260 * SET_SYSTEM_SLEEP_PM_OPS, otherwise NIC SR-IOV can not work, because the
2261 * "pci_dev_pm_ops" uses the "noirq" callbacks: in the resume path, the
2262 * pci "noirq" restore callback runs before "non-noirq" callbacks (see
2263 * resume_target_kernel() -> dpm_resume_start(), and hibernation_restore() ->
2264 * dpm_resume_end()). This means vmbus_bus_resume() and the pci-hyperv's
2265 * resume callback must also run via the "noirq" callbacks.
2266 */
2267static const struct dev_pm_ops vmbus_bus_pm = {
2268 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(vmbus_bus_suspend, vmbus_bus_resume)
2269};
2270
2052static struct acpi_driver vmbus_acpi_driver = { 2271static struct acpi_driver vmbus_acpi_driver = {
2053 .name = "vmbus", 2272 .name = "vmbus",
2054 .ids = vmbus_acpi_device_ids, 2273 .ids = vmbus_acpi_device_ids,
@@ -2056,6 +2275,7 @@ static struct acpi_driver vmbus_acpi_driver = {
2056 .add = vmbus_acpi_add, 2275 .add = vmbus_acpi_add,
2057 .remove = vmbus_acpi_remove, 2276 .remove = vmbus_acpi_remove,
2058 }, 2277 },
2278 .drv.pm = &vmbus_bus_pm,
2059}; 2279};
2060 2280
2061static void hv_kexec_handler(void) 2281static void hv_kexec_handler(void)
@@ -2086,6 +2306,47 @@ static void hv_crash_handler(struct pt_regs *regs)
2086 hyperv_cleanup(); 2306 hyperv_cleanup();
2087}; 2307};
2088 2308
2309static int hv_synic_suspend(void)
2310{
2311 /*
2312 * When we reach here, all the non-boot CPUs have been offlined, and
2313 * the stimers on them have been unbound in hv_synic_cleanup() ->
2314 * hv_stimer_cleanup() -> clockevents_unbind_device().
2315 *
2316 * hv_synic_suspend() only runs on CPU0 with interrupts disabled. Here
2317 * we do not unbind the stimer on CPU0 because: 1) it's unnecessary
2318 * because the interrupts remain disabled between syscore_suspend()
2319 * and syscore_resume(): see create_image() and resume_target_kernel();
2320 * 2) the stimer on CPU0 is automatically disabled later by
2321 * syscore_suspend() -> timekeeping_suspend() -> tick_suspend() -> ...
2322 * -> clockevents_shutdown() -> ... -> hv_ce_shutdown(); 3) a warning
2323 * would be triggered if we call clockevents_unbind_device(), which
2324 * may sleep, in an interrupts-disabled context. So, we intentionally
2325 * don't call hv_stimer_cleanup(0) here.
2326 */
2327
2328 hv_synic_disable_regs(0);
2329
2330 return 0;
2331}
2332
2333static void hv_synic_resume(void)
2334{
2335 hv_synic_enable_regs(0);
2336
2337 /*
2338 * Note: we don't need to call hv_stimer_init(0), because the timer
2339 * on CPU0 is not unbound in hv_synic_suspend(), and the timer is
2340 * automatically re-enabled in timekeeping_resume().
2341 */
2342}
2343
2344/* The callbacks run only on CPU0, with irqs_disabled. */
2345static struct syscore_ops hv_synic_syscore_ops = {
2346 .suspend = hv_synic_suspend,
2347 .resume = hv_synic_resume,
2348};
2349
2089static int __init hv_acpi_init(void) 2350static int __init hv_acpi_init(void)
2090{ 2351{
2091 int ret, t; 2352 int ret, t;
@@ -2116,6 +2377,8 @@ static int __init hv_acpi_init(void)
2116 hv_setup_kexec_handler(hv_kexec_handler); 2377 hv_setup_kexec_handler(hv_kexec_handler);
2117 hv_setup_crash_handler(hv_crash_handler); 2378 hv_setup_crash_handler(hv_crash_handler);
2118 2379
2380 register_syscore_ops(&hv_synic_syscore_ops);
2381
2119 return 0; 2382 return 0;
2120 2383
2121cleanup: 2384cleanup:
@@ -2128,6 +2391,8 @@ static void __exit vmbus_exit(void)
2128{ 2391{
2129 int cpu; 2392 int cpu;
2130 2393
2394 unregister_syscore_ops(&hv_synic_syscore_ops);
2395
2131 hv_remove_kexec_handler(); 2396 hv_remove_kexec_handler();
2132 hv_remove_crash_handler(); 2397 hv_remove_crash_handler();
2133 vmbus_connection.conn_state = DISCONNECTED; 2398 vmbus_connection.conn_state = DISCONNECTED;
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 2afe6fdc1dda..b4a017093b69 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -245,7 +245,10 @@ struct vmbus_channel_offer {
245 } pipe; 245 } pipe;
246 } u; 246 } u;
247 /* 247 /*
248 * The sub_channel_index is defined in win8. 248 * The sub_channel_index is defined in Win8: a value of zero means a
249 * primary channel and a value of non-zero means a sub-channel.
250 *
251 * Before Win8, the field is reserved, meaning it's always zero.
249 */ 252 */
250 u16 sub_channel_index; 253 u16 sub_channel_index;
251 u16 reserved3; 254 u16 reserved3;
@@ -423,6 +426,9 @@ enum vmbus_channel_message_type {
423 CHANNELMSG_COUNT 426 CHANNELMSG_COUNT
424}; 427};
425 428
429/* Hyper-V supports about 2048 channels, and the RELIDs start with 1. */
430#define INVALID_RELID U32_MAX
431
426struct vmbus_channel_message_header { 432struct vmbus_channel_message_header {
427 enum vmbus_channel_message_type msgtype; 433 enum vmbus_channel_message_type msgtype;
428 u32 padding; 434 u32 padding;
@@ -934,6 +940,11 @@ static inline bool is_hvsock_channel(const struct vmbus_channel *c)
934 VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER); 940 VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER);
935} 941}
936 942
943static inline bool is_sub_channel(const struct vmbus_channel *c)
944{
945 return c->offermsg.offer.sub_channel_index != 0;
946}
947
937static inline void set_channel_affinity_state(struct vmbus_channel *c, 948static inline void set_channel_affinity_state(struct vmbus_channel *c,
938 enum hv_numa_policy policy) 949 enum hv_numa_policy policy)
939{ 950{
@@ -1149,6 +1160,9 @@ struct hv_driver {
1149 int (*remove)(struct hv_device *); 1160 int (*remove)(struct hv_device *);
1150 void (*shutdown)(struct hv_device *); 1161 void (*shutdown)(struct hv_device *);
1151 1162
1163 int (*suspend)(struct hv_device *);
1164 int (*resume)(struct hv_device *);
1165
1152}; 1166};
1153 1167
1154/* Base device object */ 1168/* Base device object */
diff --git a/tools/hv/Build b/tools/hv/Build
new file mode 100644
index 000000000000..6cf51fa4b306
--- /dev/null
+++ b/tools/hv/Build
@@ -0,0 +1,3 @@
1hv_kvp_daemon-y += hv_kvp_daemon.o
2hv_vss_daemon-y += hv_vss_daemon.o
3hv_fcopy_daemon-y += hv_fcopy_daemon.o
diff --git a/tools/hv/Makefile b/tools/hv/Makefile
index 5db5e62cebda..b57143d9459c 100644
--- a/tools/hv/Makefile
+++ b/tools/hv/Makefile
@@ -1,28 +1,55 @@
1# SPDX-License-Identifier: GPL-2.0 1# SPDX-License-Identifier: GPL-2.0
2# Makefile for Hyper-V tools 2# Makefile for Hyper-V tools
3 3include ../scripts/Makefile.include
4WARNINGS = -Wall -Wextra
5CFLAGS = $(WARNINGS) -g $(shell getconf LFS_CFLAGS)
6
7CFLAGS += -D__EXPORTED_HEADERS__ -I../../include/uapi -I../../include
8 4
9sbindir ?= /usr/sbin 5sbindir ?= /usr/sbin
10libexecdir ?= /usr/libexec 6libexecdir ?= /usr/libexec
11sharedstatedir ?= /var/lib 7sharedstatedir ?= /var/lib
12 8
13ALL_PROGRAMS := hv_kvp_daemon hv_vss_daemon hv_fcopy_daemon 9ifeq ($(srctree),)
10srctree := $(patsubst %/,%,$(dir $(CURDIR)))
11srctree := $(patsubst %/,%,$(dir $(srctree)))
12endif
13
14# Do not use make's built-in rules
15# (this improves performance and avoids hard-to-debug behaviour);
16MAKEFLAGS += -r
17
18override CFLAGS += -O2 -Wall -g -D_GNU_SOURCE -I$(OUTPUT)include
19
20ALL_TARGETS := hv_kvp_daemon hv_vss_daemon hv_fcopy_daemon
21ALL_PROGRAMS := $(patsubst %,$(OUTPUT)%,$(ALL_TARGETS))
14 22
15ALL_SCRIPTS := hv_get_dhcp_info.sh hv_get_dns_info.sh hv_set_ifconfig.sh 23ALL_SCRIPTS := hv_get_dhcp_info.sh hv_get_dns_info.sh hv_set_ifconfig.sh
16 24
17all: $(ALL_PROGRAMS) 25all: $(ALL_PROGRAMS)
18 26
19%: %.c 27export srctree OUTPUT CC LD CFLAGS
20 $(CC) $(CFLAGS) -o $@ $^ 28include $(srctree)/tools/build/Makefile.include
29
30HV_KVP_DAEMON_IN := $(OUTPUT)hv_kvp_daemon-in.o
31$(HV_KVP_DAEMON_IN): FORCE
32 $(Q)$(MAKE) $(build)=hv_kvp_daemon
33$(OUTPUT)hv_kvp_daemon: $(HV_KVP_DAEMON_IN)
34 $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $< -o $@
35
36HV_VSS_DAEMON_IN := $(OUTPUT)hv_vss_daemon-in.o
37$(HV_VSS_DAEMON_IN): FORCE
38 $(Q)$(MAKE) $(build)=hv_vss_daemon
39$(OUTPUT)hv_vss_daemon: $(HV_VSS_DAEMON_IN)
40 $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $< -o $@
41
42HV_FCOPY_DAEMON_IN := $(OUTPUT)hv_fcopy_daemon-in.o
43$(HV_FCOPY_DAEMON_IN): FORCE
44 $(Q)$(MAKE) $(build)=hv_fcopy_daemon
45$(OUTPUT)hv_fcopy_daemon: $(HV_FCOPY_DAEMON_IN)
46 $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $< -o $@
21 47
22clean: 48clean:
23 $(RM) hv_kvp_daemon hv_vss_daemon hv_fcopy_daemon 49 rm -f $(ALL_PROGRAMS)
50 find $(if $(OUTPUT),$(OUTPUT),.) -name '*.o' -delete -o -name '\.*.d' -delete
24 51
25install: all 52install: $(ALL_PROGRAMS)
26 install -d -m 755 $(DESTDIR)$(sbindir); \ 53 install -d -m 755 $(DESTDIR)$(sbindir); \
27 install -d -m 755 $(DESTDIR)$(libexecdir)/hypervkvpd; \ 54 install -d -m 755 $(DESTDIR)$(libexecdir)/hypervkvpd; \
28 install -d -m 755 $(DESTDIR)$(sharedstatedir); \ 55 install -d -m 755 $(DESTDIR)$(sharedstatedir); \
@@ -33,3 +60,7 @@ install: all
33 for script in $(ALL_SCRIPTS); do \ 60 for script in $(ALL_SCRIPTS); do \
34 install $$script -m 755 $(DESTDIR)$(libexecdir)/hypervkvpd/$${script%.sh}; \ 61 install $$script -m 755 $(DESTDIR)$(libexecdir)/hypervkvpd/$${script%.sh}; \
35 done 62 done
63
64FORCE:
65
66.PHONY: all install clean FORCE prepare