aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/hv/channel_mgmt.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/hv/channel_mgmt.c')
-rw-r--r--drivers/hv/channel_mgmt.c52
1 files changed, 47 insertions, 5 deletions
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index fa920469bf10..6c8b032cacba 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -149,6 +149,7 @@ static struct vmbus_channel *alloc_channel(void)
149 spin_lock_init(&channel->sc_lock); 149 spin_lock_init(&channel->sc_lock);
150 150
151 INIT_LIST_HEAD(&channel->sc_list); 151 INIT_LIST_HEAD(&channel->sc_list);
152 INIT_LIST_HEAD(&channel->percpu_list);
152 153
153 channel->controlwq = create_workqueue("hv_vmbus_ctl"); 154 channel->controlwq = create_workqueue("hv_vmbus_ctl");
154 if (!channel->controlwq) { 155 if (!channel->controlwq) {
@@ -188,7 +189,20 @@ static void free_channel(struct vmbus_channel *channel)
188 queue_work(vmbus_connection.work_queue, &channel->work); 189 queue_work(vmbus_connection.work_queue, &channel->work);
189} 190}
190 191
192static void percpu_channel_enq(void *arg)
193{
194 struct vmbus_channel *channel = arg;
195 int cpu = smp_processor_id();
196
197 list_add_tail(&channel->percpu_list, &hv_context.percpu_list[cpu]);
198}
191 199
200static void percpu_channel_deq(void *arg)
201{
202 struct vmbus_channel *channel = arg;
203
204 list_del(&channel->percpu_list);
205}
192 206
193/* 207/*
194 * vmbus_process_rescind_offer - 208 * vmbus_process_rescind_offer -
@@ -210,6 +224,12 @@ static void vmbus_process_rescind_offer(struct work_struct *work)
210 msg.header.msgtype = CHANNELMSG_RELID_RELEASED; 224 msg.header.msgtype = CHANNELMSG_RELID_RELEASED;
211 vmbus_post_msg(&msg, sizeof(struct vmbus_channel_relid_released)); 225 vmbus_post_msg(&msg, sizeof(struct vmbus_channel_relid_released));
212 226
227 if (channel->target_cpu != smp_processor_id())
228 smp_call_function_single(channel->target_cpu,
229 percpu_channel_deq, channel, true);
230 else
231 percpu_channel_deq(channel);
232
213 if (channel->primary_channel == NULL) { 233 if (channel->primary_channel == NULL) {
214 spin_lock_irqsave(&vmbus_connection.channel_lock, flags); 234 spin_lock_irqsave(&vmbus_connection.channel_lock, flags);
215 list_del(&channel->listentry); 235 list_del(&channel->listentry);
@@ -245,6 +265,7 @@ static void vmbus_process_offer(struct work_struct *work)
245 work); 265 work);
246 struct vmbus_channel *channel; 266 struct vmbus_channel *channel;
247 bool fnew = true; 267 bool fnew = true;
268 bool enq = false;
248 int ret; 269 int ret;
249 unsigned long flags; 270 unsigned long flags;
250 271
@@ -264,12 +285,22 @@ static void vmbus_process_offer(struct work_struct *work)
264 } 285 }
265 } 286 }
266 287
267 if (fnew) 288 if (fnew) {
268 list_add_tail(&newchannel->listentry, 289 list_add_tail(&newchannel->listentry,
269 &vmbus_connection.chn_list); 290 &vmbus_connection.chn_list);
291 enq = true;
292 }
270 293
271 spin_unlock_irqrestore(&vmbus_connection.channel_lock, flags); 294 spin_unlock_irqrestore(&vmbus_connection.channel_lock, flags);
272 295
296 if (enq) {
297 if (newchannel->target_cpu != smp_processor_id())
298 smp_call_function_single(newchannel->target_cpu,
299 percpu_channel_enq,
300 newchannel, true);
301 else
302 percpu_channel_enq(newchannel);
303 }
273 if (!fnew) { 304 if (!fnew) {
274 /* 305 /*
275 * Check to see if this is a sub-channel. 306 * Check to see if this is a sub-channel.
@@ -282,6 +313,14 @@ static void vmbus_process_offer(struct work_struct *work)
282 spin_lock_irqsave(&channel->sc_lock, flags); 313 spin_lock_irqsave(&channel->sc_lock, flags);
283 list_add_tail(&newchannel->sc_list, &channel->sc_list); 314 list_add_tail(&newchannel->sc_list, &channel->sc_list);
284 spin_unlock_irqrestore(&channel->sc_lock, flags); 315 spin_unlock_irqrestore(&channel->sc_lock, flags);
316
317 if (newchannel->target_cpu != smp_processor_id())
318 smp_call_function_single(newchannel->target_cpu,
319 percpu_channel_enq,
320 newchannel, true);
321 else
322 percpu_channel_enq(newchannel);
323
285 newchannel->state = CHANNEL_OPEN_STATE; 324 newchannel->state = CHANNEL_OPEN_STATE;
286 if (channel->sc_creation_callback != NULL) 325 if (channel->sc_creation_callback != NULL)
287 channel->sc_creation_callback(newchannel); 326 channel->sc_creation_callback(newchannel);
@@ -365,7 +404,7 @@ static u32 next_vp;
365 * performance critical channels (IDE, SCSI and Network) will be uniformly 404 * performance critical channels (IDE, SCSI and Network) will be uniformly
366 * distributed across all available CPUs. 405 * distributed across all available CPUs.
367 */ 406 */
368static u32 get_vp_index(uuid_le *type_guid) 407static void init_vp_index(struct vmbus_channel *channel, uuid_le *type_guid)
369{ 408{
370 u32 cur_cpu; 409 u32 cur_cpu;
371 int i; 410 int i;
@@ -387,10 +426,13 @@ static u32 get_vp_index(uuid_le *type_guid)
387 * Also if the channel is not a performance critical 426 * Also if the channel is not a performance critical
388 * channel, bind it to cpu 0. 427 * channel, bind it to cpu 0.
389 */ 428 */
390 return 0; 429 channel->target_cpu = 0;
430 channel->target_vp = 0;
431 return;
391 } 432 }
392 cur_cpu = (++next_vp % max_cpus); 433 cur_cpu = (++next_vp % max_cpus);
393 return hv_context.vp_index[cur_cpu]; 434 channel->target_cpu = cur_cpu;
435 channel->target_vp = hv_context.vp_index[cur_cpu];
394} 436}
395 437
396/* 438/*
@@ -438,7 +480,7 @@ static void vmbus_onoffer(struct vmbus_channel_message_header *hdr)
438 offer->connection_id; 480 offer->connection_id;
439 } 481 }
440 482
441 newchannel->target_vp = get_vp_index(&offer->offer.if_type); 483 init_vp_index(newchannel, &offer->offer.if_type);
442 484
443 memcpy(&newchannel->offermsg, offer, 485 memcpy(&newchannel->offermsg, offer,
444 sizeof(struct vmbus_channel_offer_channel)); 486 sizeof(struct vmbus_channel_offer_channel));