diff options
author | Stephen Hemminger <stephen@networkplumber.org> | 2018-09-14 12:10:17 -0400 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2018-09-25 14:33:24 -0400 |
commit | ae6935ed7d424ffa74d634da00767e7b03c98fd3 (patch) | |
tree | d26a4096301a8555277b50eb9741bcb978a308b2 | |
parent | 52a42c2a90226dc61c99bbd0cb096deeb52c334b (diff) |
vmbus: split ring buffer allocation from open
The UIO driver needs the ring buffer to be persistent(reused)
across open/close. Split the allocation and setup of ring buffer
out of vmbus_open. For normal usage vmbus_open/vmbus_close there
are no changes; only impacts uio_hv_generic which needs to keep
ring buffer memory and reuse when application restarts.
Signed-off-by: Stephen Hemminger <sthemmin@microsoft.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r-- | drivers/hv/channel.c | 267 | ||||
-rw-r--r-- | drivers/hv/ring_buffer.c | 1 | ||||
-rw-r--r-- | include/linux/hyperv.h | 9 |
3 files changed, 162 insertions, 115 deletions
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c index 56ec0d96d876..ddadb7efd1cc 100644 --- a/drivers/hv/channel.c +++ b/drivers/hv/channel.c | |||
@@ -79,84 +79,96 @@ void vmbus_setevent(struct vmbus_channel *channel) | |||
79 | } | 79 | } |
80 | EXPORT_SYMBOL_GPL(vmbus_setevent); | 80 | EXPORT_SYMBOL_GPL(vmbus_setevent); |
81 | 81 | ||
82 | /* | 82 | /* vmbus_free_ring - drop mapping of ring buffer */ |
83 | * vmbus_open - Open the specified channel. | 83 | void vmbus_free_ring(struct vmbus_channel *channel) |
84 | */ | ||
85 | int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size, | ||
86 | u32 recv_ringbuffer_size, void *userdata, u32 userdatalen, | ||
87 | void (*onchannelcallback)(void *context), void *context) | ||
88 | { | 84 | { |
89 | struct vmbus_channel_open_channel *open_msg; | 85 | hv_ringbuffer_cleanup(&channel->outbound); |
90 | struct vmbus_channel_msginfo *open_info = NULL; | 86 | hv_ringbuffer_cleanup(&channel->inbound); |
91 | unsigned long flags; | ||
92 | int ret, err = 0; | ||
93 | struct page *page; | ||
94 | unsigned int order; | ||
95 | 87 | ||
96 | if (send_ringbuffer_size % PAGE_SIZE || | 88 | if (channel->ringbuffer_page) { |
97 | recv_ringbuffer_size % PAGE_SIZE) | 89 | __free_pages(channel->ringbuffer_page, |
98 | return -EINVAL; | 90 | get_order(channel->ringbuffer_pagecount |
91 | << PAGE_SHIFT)); | ||
92 | channel->ringbuffer_page = NULL; | ||
93 | } | ||
94 | } | ||
95 | EXPORT_SYMBOL_GPL(vmbus_free_ring); | ||
99 | 96 | ||
100 | order = get_order(send_ringbuffer_size + recv_ringbuffer_size); | 97 | /* vmbus_alloc_ring - allocate and map pages for ring buffer */ |
98 | int vmbus_alloc_ring(struct vmbus_channel *newchannel, | ||
99 | u32 send_size, u32 recv_size) | ||
100 | { | ||
101 | struct page *page; | ||
102 | int order; | ||
101 | 103 | ||
102 | spin_lock_irqsave(&newchannel->lock, flags); | 104 | if (send_size % PAGE_SIZE || recv_size % PAGE_SIZE) |
103 | if (newchannel->state == CHANNEL_OPEN_STATE) { | ||
104 | newchannel->state = CHANNEL_OPENING_STATE; | ||
105 | } else { | ||
106 | spin_unlock_irqrestore(&newchannel->lock, flags); | ||
107 | return -EINVAL; | 105 | return -EINVAL; |
108 | } | ||
109 | spin_unlock_irqrestore(&newchannel->lock, flags); | ||
110 | |||
111 | newchannel->onchannel_callback = onchannelcallback; | ||
112 | newchannel->channel_callback_context = context; | ||
113 | 106 | ||
114 | /* Allocate the ring buffer */ | 107 | /* Allocate the ring buffer */ |
108 | order = get_order(send_size + recv_size); | ||
115 | page = alloc_pages_node(cpu_to_node(newchannel->target_cpu), | 109 | page = alloc_pages_node(cpu_to_node(newchannel->target_cpu), |
116 | GFP_KERNEL|__GFP_ZERO, order); | 110 | GFP_KERNEL|__GFP_ZERO, order); |
117 | 111 | ||
118 | if (!page) | 112 | if (!page) |
119 | page = alloc_pages(GFP_KERNEL|__GFP_ZERO, order); | 113 | page = alloc_pages(GFP_KERNEL|__GFP_ZERO, order); |
120 | 114 | ||
121 | if (!page) { | 115 | if (!page) |
122 | err = -ENOMEM; | 116 | return -ENOMEM; |
123 | goto error_set_chnstate; | ||
124 | } | ||
125 | 117 | ||
126 | newchannel->ringbuffer_page = page; | 118 | newchannel->ringbuffer_page = page; |
127 | newchannel->ringbuffer_pagecount = (send_ringbuffer_size + | 119 | newchannel->ringbuffer_pagecount = (send_size + recv_size) >> PAGE_SHIFT; |
128 | recv_ringbuffer_size) >> PAGE_SHIFT; | 120 | newchannel->ringbuffer_send_offset = send_size >> PAGE_SHIFT; |
129 | 121 | ||
130 | ret = hv_ringbuffer_init(&newchannel->outbound, page, | 122 | return 0; |
131 | send_ringbuffer_size >> PAGE_SHIFT); | 123 | } |
124 | EXPORT_SYMBOL_GPL(vmbus_alloc_ring); | ||
132 | 125 | ||
133 | if (ret != 0) { | 126 | static int __vmbus_open(struct vmbus_channel *newchannel, |
134 | err = ret; | 127 | void *userdata, u32 userdatalen, |
135 | goto error_free_pages; | 128 | void (*onchannelcallback)(void *context), void *context) |
136 | } | 129 | { |
130 | struct vmbus_channel_open_channel *open_msg; | ||
131 | struct vmbus_channel_msginfo *open_info = NULL; | ||
132 | struct page *page = newchannel->ringbuffer_page; | ||
133 | u32 send_pages, recv_pages; | ||
134 | unsigned long flags; | ||
135 | int err; | ||
137 | 136 | ||
138 | ret = hv_ringbuffer_init(&newchannel->inbound, | 137 | if (userdatalen > MAX_USER_DEFINED_BYTES) |
139 | &page[send_ringbuffer_size >> PAGE_SHIFT], | 138 | return -EINVAL; |
140 | recv_ringbuffer_size >> PAGE_SHIFT); | 139 | |
141 | if (ret != 0) { | 140 | send_pages = newchannel->ringbuffer_send_offset; |
142 | err = ret; | 141 | recv_pages = newchannel->ringbuffer_pagecount - send_pages; |
143 | goto error_free_pages; | 142 | |
143 | spin_lock_irqsave(&newchannel->lock, flags); | ||
144 | if (newchannel->state != CHANNEL_OPEN_STATE) { | ||
145 | spin_unlock_irqrestore(&newchannel->lock, flags); | ||
146 | return -EINVAL; | ||
144 | } | 147 | } |
148 | spin_unlock_irqrestore(&newchannel->lock, flags); | ||
145 | 149 | ||
150 | newchannel->state = CHANNEL_OPENING_STATE; | ||
151 | newchannel->onchannel_callback = onchannelcallback; | ||
152 | newchannel->channel_callback_context = context; | ||
153 | |||
154 | err = hv_ringbuffer_init(&newchannel->outbound, page, send_pages); | ||
155 | if (err) | ||
156 | goto error_clean_ring; | ||
157 | |||
158 | err = hv_ringbuffer_init(&newchannel->inbound, | ||
159 | &page[send_pages], recv_pages); | ||
160 | if (err) | ||
161 | goto error_clean_ring; | ||
146 | 162 | ||
147 | /* Establish the gpadl for the ring buffer */ | 163 | /* Establish the gpadl for the ring buffer */ |
148 | newchannel->ringbuffer_gpadlhandle = 0; | 164 | newchannel->ringbuffer_gpadlhandle = 0; |
149 | 165 | ||
150 | ret = vmbus_establish_gpadl(newchannel, | 166 | err = vmbus_establish_gpadl(newchannel, |
151 | page_address(page), | 167 | page_address(newchannel->ringbuffer_page), |
152 | send_ringbuffer_size + | 168 | (send_pages + recv_pages) << PAGE_SHIFT, |
153 | recv_ringbuffer_size, | ||
154 | &newchannel->ringbuffer_gpadlhandle); | 169 | &newchannel->ringbuffer_gpadlhandle); |
155 | 170 | if (err) | |
156 | if (ret != 0) { | 171 | goto error_clean_ring; |
157 | err = ret; | ||
158 | goto error_free_pages; | ||
159 | } | ||
160 | 172 | ||
161 | /* Create and init the channel open message */ | 173 | /* Create and init the channel open message */ |
162 | open_info = kmalloc(sizeof(*open_info) + | 174 | open_info = kmalloc(sizeof(*open_info) + |
@@ -175,15 +187,9 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size, | |||
175 | open_msg->openid = newchannel->offermsg.child_relid; | 187 | open_msg->openid = newchannel->offermsg.child_relid; |
176 | open_msg->child_relid = newchannel->offermsg.child_relid; | 188 | open_msg->child_relid = newchannel->offermsg.child_relid; |
177 | open_msg->ringbuffer_gpadlhandle = newchannel->ringbuffer_gpadlhandle; | 189 | open_msg->ringbuffer_gpadlhandle = newchannel->ringbuffer_gpadlhandle; |
178 | open_msg->downstream_ringbuffer_pageoffset = send_ringbuffer_size >> | 190 | open_msg->downstream_ringbuffer_pageoffset = newchannel->ringbuffer_send_offset; |
179 | PAGE_SHIFT; | ||
180 | open_msg->target_vp = newchannel->target_vp; | 191 | open_msg->target_vp = newchannel->target_vp; |
181 | 192 | ||
182 | if (userdatalen > MAX_USER_DEFINED_BYTES) { | ||
183 | err = -EINVAL; | ||
184 | goto error_free_gpadl; | ||
185 | } | ||
186 | |||
187 | if (userdatalen) | 193 | if (userdatalen) |
188 | memcpy(open_msg->userdata, userdata, userdatalen); | 194 | memcpy(open_msg->userdata, userdata, userdatalen); |
189 | 195 | ||
@@ -194,18 +200,16 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size, | |||
194 | 200 | ||
195 | if (newchannel->rescind) { | 201 | if (newchannel->rescind) { |
196 | err = -ENODEV; | 202 | err = -ENODEV; |
197 | goto error_free_gpadl; | 203 | goto error_free_info; |
198 | } | 204 | } |
199 | 205 | ||
200 | ret = vmbus_post_msg(open_msg, | 206 | err = vmbus_post_msg(open_msg, |
201 | sizeof(struct vmbus_channel_open_channel), true); | 207 | sizeof(struct vmbus_channel_open_channel), true); |
202 | 208 | ||
203 | trace_vmbus_open(open_msg, ret); | 209 | trace_vmbus_open(open_msg, err); |
204 | 210 | ||
205 | if (ret != 0) { | 211 | if (err != 0) |
206 | err = ret; | ||
207 | goto error_clean_msglist; | 212 | goto error_clean_msglist; |
208 | } | ||
209 | 213 | ||
210 | wait_for_completion(&open_info->waitevent); | 214 | wait_for_completion(&open_info->waitevent); |
211 | 215 | ||
@@ -215,12 +219,12 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size, | |||
215 | 219 | ||
216 | if (newchannel->rescind) { | 220 | if (newchannel->rescind) { |
217 | err = -ENODEV; | 221 | err = -ENODEV; |
218 | goto error_free_gpadl; | 222 | goto error_free_info; |
219 | } | 223 | } |
220 | 224 | ||
221 | if (open_info->response.open_result.status) { | 225 | if (open_info->response.open_result.status) { |
222 | err = -EAGAIN; | 226 | err = -EAGAIN; |
223 | goto error_free_gpadl; | 227 | goto error_free_info; |
224 | } | 228 | } |
225 | 229 | ||
226 | newchannel->state = CHANNEL_OPENED_STATE; | 230 | newchannel->state = CHANNEL_OPENED_STATE; |
@@ -231,18 +235,50 @@ error_clean_msglist: | |||
231 | spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags); | 235 | spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags); |
232 | list_del(&open_info->msglistentry); | 236 | list_del(&open_info->msglistentry); |
233 | spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags); | 237 | spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags); |
234 | 238 | error_free_info: | |
239 | kfree(open_info); | ||
235 | error_free_gpadl: | 240 | error_free_gpadl: |
236 | vmbus_teardown_gpadl(newchannel, newchannel->ringbuffer_gpadlhandle); | 241 | vmbus_teardown_gpadl(newchannel, newchannel->ringbuffer_gpadlhandle); |
237 | kfree(open_info); | 242 | newchannel->ringbuffer_gpadlhandle = 0; |
238 | error_free_pages: | 243 | error_clean_ring: |
239 | hv_ringbuffer_cleanup(&newchannel->outbound); | 244 | hv_ringbuffer_cleanup(&newchannel->outbound); |
240 | hv_ringbuffer_cleanup(&newchannel->inbound); | 245 | hv_ringbuffer_cleanup(&newchannel->inbound); |
241 | __free_pages(page, order); | ||
242 | error_set_chnstate: | ||
243 | newchannel->state = CHANNEL_OPEN_STATE; | 246 | newchannel->state = CHANNEL_OPEN_STATE; |
244 | return err; | 247 | return err; |
245 | } | 248 | } |
249 | |||
250 | /* | ||
251 | * vmbus_connect_ring - Open the channel but reuse ring buffer | ||
252 | */ | ||
253 | int vmbus_connect_ring(struct vmbus_channel *newchannel, | ||
254 | void (*onchannelcallback)(void *context), void *context) | ||
255 | { | ||
256 | return __vmbus_open(newchannel, NULL, 0, onchannelcallback, context); | ||
257 | } | ||
258 | EXPORT_SYMBOL_GPL(vmbus_connect_ring); | ||
259 | |||
260 | /* | ||
261 | * vmbus_open - Open the specified channel. | ||
262 | */ | ||
263 | int vmbus_open(struct vmbus_channel *newchannel, | ||
264 | u32 send_ringbuffer_size, u32 recv_ringbuffer_size, | ||
265 | void *userdata, u32 userdatalen, | ||
266 | void (*onchannelcallback)(void *context), void *context) | ||
267 | { | ||
268 | int err; | ||
269 | |||
270 | err = vmbus_alloc_ring(newchannel, send_ringbuffer_size, | ||
271 | recv_ringbuffer_size); | ||
272 | if (err) | ||
273 | return err; | ||
274 | |||
275 | err = __vmbus_open(newchannel, userdata, userdatalen, | ||
276 | onchannelcallback, context); | ||
277 | if (err) | ||
278 | vmbus_free_ring(newchannel); | ||
279 | |||
280 | return err; | ||
281 | } | ||
246 | EXPORT_SYMBOL_GPL(vmbus_open); | 282 | EXPORT_SYMBOL_GPL(vmbus_open); |
247 | 283 | ||
248 | /* Used for Hyper-V Socket: a guest client's connect() to the host */ | 284 | /* Used for Hyper-V Socket: a guest client's connect() to the host */ |
@@ -610,10 +646,8 @@ static int vmbus_close_internal(struct vmbus_channel *channel) | |||
610 | * in Hyper-V Manager), the driver's remove() invokes vmbus_close(): | 646 | * in Hyper-V Manager), the driver's remove() invokes vmbus_close(): |
611 | * here we should skip most of the below cleanup work. | 647 | * here we should skip most of the below cleanup work. |
612 | */ | 648 | */ |
613 | if (channel->state != CHANNEL_OPENED_STATE) { | 649 | if (channel->state != CHANNEL_OPENED_STATE) |
614 | ret = -EINVAL; | 650 | return -EINVAL; |
615 | goto out; | ||
616 | } | ||
617 | 651 | ||
618 | channel->state = CHANNEL_OPEN_STATE; | 652 | channel->state = CHANNEL_OPEN_STATE; |
619 | 653 | ||
@@ -635,11 +669,10 @@ static int vmbus_close_internal(struct vmbus_channel *channel) | |||
635 | * If we failed to post the close msg, | 669 | * If we failed to post the close msg, |
636 | * it is perhaps better to leak memory. | 670 | * it is perhaps better to leak memory. |
637 | */ | 671 | */ |
638 | goto out; | ||
639 | } | 672 | } |
640 | 673 | ||
641 | /* Tear down the gpadl for the channel's ring buffer */ | 674 | /* Tear down the gpadl for the channel's ring buffer */ |
642 | if (channel->ringbuffer_gpadlhandle) { | 675 | else if (channel->ringbuffer_gpadlhandle) { |
643 | ret = vmbus_teardown_gpadl(channel, | 676 | ret = vmbus_teardown_gpadl(channel, |
644 | channel->ringbuffer_gpadlhandle); | 677 | channel->ringbuffer_gpadlhandle); |
645 | if (ret) { | 678 | if (ret) { |
@@ -648,59 +681,63 @@ static int vmbus_close_internal(struct vmbus_channel *channel) | |||
648 | * If we failed to teardown gpadl, | 681 | * If we failed to teardown gpadl, |
649 | * it is perhaps better to leak memory. | 682 | * it is perhaps better to leak memory. |
650 | */ | 683 | */ |
651 | goto out; | ||
652 | } | 684 | } |
653 | } | ||
654 | |||
655 | /* Cleanup the ring buffers for this channel */ | ||
656 | hv_ringbuffer_cleanup(&channel->outbound); | ||
657 | hv_ringbuffer_cleanup(&channel->inbound); | ||
658 | 685 | ||
659 | __free_pages(channel->ringbuffer_page, | 686 | channel->ringbuffer_gpadlhandle = 0; |
660 | get_order(channel->ringbuffer_pagecount << PAGE_SHIFT)); | 687 | } |
661 | 688 | ||
662 | out: | ||
663 | return ret; | 689 | return ret; |
664 | } | 690 | } |
665 | 691 | ||
666 | /* | 692 | /* disconnect ring - close all channels */ |
667 | * vmbus_close - Close the specified channel | 693 | int vmbus_disconnect_ring(struct vmbus_channel *channel) |
668 | */ | ||
669 | void vmbus_close(struct vmbus_channel *channel) | ||
670 | { | 694 | { |
671 | struct list_head *cur, *tmp; | 695 | struct vmbus_channel *cur_channel, *tmp; |
672 | struct vmbus_channel *cur_channel; | 696 | unsigned long flags; |
697 | LIST_HEAD(list); | ||
698 | int ret; | ||
673 | 699 | ||
674 | if (channel->primary_channel != NULL) { | 700 | if (channel->primary_channel != NULL) |
675 | /* | 701 | return -EINVAL; |
676 | * We will only close sub-channels when | 702 | |
677 | * the primary is closed. | 703 | /* Snapshot the list of subchannels */ |
678 | */ | 704 | spin_lock_irqsave(&channel->lock, flags); |
679 | return; | 705 | list_splice_init(&channel->sc_list, &list); |
680 | } | 706 | channel->num_sc = 0; |
681 | /* | 707 | spin_unlock_irqrestore(&channel->lock, flags); |
682 | * Close all the sub-channels first and then close the | 708 | |
683 | * primary channel. | 709 | list_for_each_entry_safe(cur_channel, tmp, &list, sc_list) { |
684 | */ | 710 | if (cur_channel->rescind) |
685 | list_for_each_safe(cur, tmp, &channel->sc_list) { | ||
686 | cur_channel = list_entry(cur, struct vmbus_channel, sc_list); | ||
687 | if (cur_channel->rescind) { | ||
688 | wait_for_completion(&cur_channel->rescind_event); | 711 | wait_for_completion(&cur_channel->rescind_event); |
689 | mutex_lock(&vmbus_connection.channel_mutex); | 712 | |
690 | vmbus_close_internal(cur_channel); | 713 | mutex_lock(&vmbus_connection.channel_mutex); |
691 | hv_process_channel_removal(cur_channel); | 714 | if (vmbus_close_internal(cur_channel) == 0) { |
692 | } else { | 715 | vmbus_free_ring(cur_channel); |
693 | mutex_lock(&vmbus_connection.channel_mutex); | 716 | |
694 | vmbus_close_internal(cur_channel); | 717 | if (cur_channel->rescind) |
718 | hv_process_channel_removal(cur_channel); | ||
695 | } | 719 | } |
696 | mutex_unlock(&vmbus_connection.channel_mutex); | 720 | mutex_unlock(&vmbus_connection.channel_mutex); |
697 | } | 721 | } |
722 | |||
698 | /* | 723 | /* |
699 | * Now close the primary. | 724 | * Now close the primary. |
700 | */ | 725 | */ |
701 | mutex_lock(&vmbus_connection.channel_mutex); | 726 | mutex_lock(&vmbus_connection.channel_mutex); |
702 | vmbus_close_internal(channel); | 727 | ret = vmbus_close_internal(channel); |
703 | mutex_unlock(&vmbus_connection.channel_mutex); | 728 | mutex_unlock(&vmbus_connection.channel_mutex); |
729 | |||
730 | return ret; | ||
731 | } | ||
732 | EXPORT_SYMBOL_GPL(vmbus_disconnect_ring); | ||
733 | |||
734 | /* | ||
735 | * vmbus_close - Close the specified channel | ||
736 | */ | ||
737 | void vmbus_close(struct vmbus_channel *channel) | ||
738 | { | ||
739 | if (vmbus_disconnect_ring(channel) == 0) | ||
740 | vmbus_free_ring(channel); | ||
704 | } | 741 | } |
705 | EXPORT_SYMBOL_GPL(vmbus_close); | 742 | EXPORT_SYMBOL_GPL(vmbus_close); |
706 | 743 | ||
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c index 3e90eb91db45..64d0c85d5161 100644 --- a/drivers/hv/ring_buffer.c +++ b/drivers/hv/ring_buffer.c | |||
@@ -241,6 +241,7 @@ int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info, | |||
241 | void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info) | 241 | void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info) |
242 | { | 242 | { |
243 | vunmap(ring_info->ring_buffer); | 243 | vunmap(ring_info->ring_buffer); |
244 | ring_info->ring_buffer = NULL; | ||
244 | } | 245 | } |
245 | 246 | ||
246 | /* Write to the ring buffer. */ | 247 | /* Write to the ring buffer. */ |
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index a6c32d2d090b..b3e24368930a 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h | |||
@@ -741,6 +741,7 @@ struct vmbus_channel { | |||
741 | /* Allocated memory for ring buffer */ | 741 | /* Allocated memory for ring buffer */ |
742 | struct page *ringbuffer_page; | 742 | struct page *ringbuffer_page; |
743 | u32 ringbuffer_pagecount; | 743 | u32 ringbuffer_pagecount; |
744 | u32 ringbuffer_send_offset; | ||
744 | struct hv_ring_buffer_info outbound; /* send to parent */ | 745 | struct hv_ring_buffer_info outbound; /* send to parent */ |
745 | struct hv_ring_buffer_info inbound; /* receive from parent */ | 746 | struct hv_ring_buffer_info inbound; /* receive from parent */ |
746 | 747 | ||
@@ -1021,6 +1022,14 @@ struct vmbus_packet_mpb_array { | |||
1021 | struct hv_mpb_array range; | 1022 | struct hv_mpb_array range; |
1022 | } __packed; | 1023 | } __packed; |
1023 | 1024 | ||
1025 | int vmbus_alloc_ring(struct vmbus_channel *channel, | ||
1026 | u32 send_size, u32 recv_size); | ||
1027 | void vmbus_free_ring(struct vmbus_channel *channel); | ||
1028 | |||
1029 | int vmbus_connect_ring(struct vmbus_channel *channel, | ||
1030 | void (*onchannel_callback)(void *context), | ||
1031 | void *context); | ||
1032 | int vmbus_disconnect_ring(struct vmbus_channel *channel); | ||
1024 | 1033 | ||
1025 | extern int vmbus_open(struct vmbus_channel *channel, | 1034 | extern int vmbus_open(struct vmbus_channel *channel, |
1026 | u32 send_ringbuffersize, | 1035 | u32 send_ringbuffersize, |