aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/hv/channel_mgmt.c
diff options
context:
space:
mode:
authorK. Y. Srinivasan <kys@microsoft.com>2014-04-08 21:45:54 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2014-05-03 19:24:26 -0400
commit3a28fa35d6658703cd26f9c16aaea0eae06afd40 (patch)
treebcc8f07bbd9365e0a270c85af0e50ce3e1682987 /drivers/hv/channel_mgmt.c
parentd3ba720dd58cdf6630fee4b89482c465d5ad0d0f (diff)
Drivers: hv: vmbus: Implement per-CPU mapping of relid to channel
Currently the mapping of the relID to channel is done under the protection of a single spin lock. Starting with ws2012, each channel is bound to a specific VCPU in the guest. Use this binding to eliminate the spin lock by setting up per-cpu state for mapping relId to the channel. Signed-off-by: K. Y. Srinivasan <kys@microsoft.com> Reviewed-by: Haiyang Zhang <haiyangz@microsoft.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/hv/channel_mgmt.c')
-rw-r--r--drivers/hv/channel_mgmt.c41
1 files changed, 40 insertions, 1 deletions
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index 6f7fdd9a7e77..6c8b032cacba 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -149,6 +149,7 @@ static struct vmbus_channel *alloc_channel(void)
149 spin_lock_init(&channel->sc_lock); 149 spin_lock_init(&channel->sc_lock);
150 150
151 INIT_LIST_HEAD(&channel->sc_list); 151 INIT_LIST_HEAD(&channel->sc_list);
152 INIT_LIST_HEAD(&channel->percpu_list);
152 153
153 channel->controlwq = create_workqueue("hv_vmbus_ctl"); 154 channel->controlwq = create_workqueue("hv_vmbus_ctl");
154 if (!channel->controlwq) { 155 if (!channel->controlwq) {
@@ -188,7 +189,20 @@ static void free_channel(struct vmbus_channel *channel)
188 queue_work(vmbus_connection.work_queue, &channel->work); 189 queue_work(vmbus_connection.work_queue, &channel->work);
189} 190}
190 191
192static void percpu_channel_enq(void *arg)
193{
194 struct vmbus_channel *channel = arg;
195 int cpu = smp_processor_id();
196
197 list_add_tail(&channel->percpu_list, &hv_context.percpu_list[cpu]);
198}
191 199
200static void percpu_channel_deq(void *arg)
201{
202 struct vmbus_channel *channel = arg;
203
204 list_del(&channel->percpu_list);
205}
192 206
193/* 207/*
194 * vmbus_process_rescind_offer - 208 * vmbus_process_rescind_offer -
@@ -210,6 +224,12 @@ static void vmbus_process_rescind_offer(struct work_struct *work)
210 msg.header.msgtype = CHANNELMSG_RELID_RELEASED; 224 msg.header.msgtype = CHANNELMSG_RELID_RELEASED;
211 vmbus_post_msg(&msg, sizeof(struct vmbus_channel_relid_released)); 225 vmbus_post_msg(&msg, sizeof(struct vmbus_channel_relid_released));
212 226
227 if (channel->target_cpu != smp_processor_id())
228 smp_call_function_single(channel->target_cpu,
229 percpu_channel_deq, channel, true);
230 else
231 percpu_channel_deq(channel);
232
213 if (channel->primary_channel == NULL) { 233 if (channel->primary_channel == NULL) {
214 spin_lock_irqsave(&vmbus_connection.channel_lock, flags); 234 spin_lock_irqsave(&vmbus_connection.channel_lock, flags);
215 list_del(&channel->listentry); 235 list_del(&channel->listentry);
@@ -245,6 +265,7 @@ static void vmbus_process_offer(struct work_struct *work)
245 work); 265 work);
246 struct vmbus_channel *channel; 266 struct vmbus_channel *channel;
247 bool fnew = true; 267 bool fnew = true;
268 bool enq = false;
248 int ret; 269 int ret;
249 unsigned long flags; 270 unsigned long flags;
250 271
@@ -264,12 +285,22 @@ static void vmbus_process_offer(struct work_struct *work)
264 } 285 }
265 } 286 }
266 287
267 if (fnew) 288 if (fnew) {
268 list_add_tail(&newchannel->listentry, 289 list_add_tail(&newchannel->listentry,
269 &vmbus_connection.chn_list); 290 &vmbus_connection.chn_list);
291 enq = true;
292 }
270 293
271 spin_unlock_irqrestore(&vmbus_connection.channel_lock, flags); 294 spin_unlock_irqrestore(&vmbus_connection.channel_lock, flags);
272 295
296 if (enq) {
297 if (newchannel->target_cpu != smp_processor_id())
298 smp_call_function_single(newchannel->target_cpu,
299 percpu_channel_enq,
300 newchannel, true);
301 else
302 percpu_channel_enq(newchannel);
303 }
273 if (!fnew) { 304 if (!fnew) {
274 /* 305 /*
275 * Check to see if this is a sub-channel. 306 * Check to see if this is a sub-channel.
@@ -282,6 +313,14 @@ static void vmbus_process_offer(struct work_struct *work)
282 spin_lock_irqsave(&channel->sc_lock, flags); 313 spin_lock_irqsave(&channel->sc_lock, flags);
283 list_add_tail(&newchannel->sc_list, &channel->sc_list); 314 list_add_tail(&newchannel->sc_list, &channel->sc_list);
284 spin_unlock_irqrestore(&channel->sc_lock, flags); 315 spin_unlock_irqrestore(&channel->sc_lock, flags);
316
317 if (newchannel->target_cpu != smp_processor_id())
318 smp_call_function_single(newchannel->target_cpu,
319 percpu_channel_enq,
320 newchannel, true);
321 else
322 percpu_channel_enq(newchannel);
323
285 newchannel->state = CHANNEL_OPEN_STATE; 324 newchannel->state = CHANNEL_OPEN_STATE;
286 if (channel->sc_creation_callback != NULL) 325 if (channel->sc_creation_callback != NULL)
287 channel->sc_creation_callback(newchannel); 326 channel->sc_creation_callback(newchannel);