diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-08-04 11:49:08 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-08-04 11:49:08 -0400 |
commit | 1ddc6dd855f01977e9e1795037fca1c8be028d24 (patch) | |
tree | d4e2cfd4ae6cb1717ab5fb1633946b9138db87ba /drivers/xen | |
parent | ed8bbba0f617aca2c219a236019012784b22cf1f (diff) | |
parent | fcdf31a7c162de0c93a2bee51df4688ab0a348f8 (diff) |
Merge tag 'for-linus-4.2-rc5-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip
Pull xen bug fixes from David Vrabel:
- don't lose interrupts when offlining CPUs
- fix gntdev oops during unmap
- drop the balloon lock occasionally to allow domain create/destroy
* tag 'for-linus-4.2-rc5-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip:
xen/events/fifo: Handle linked events when closing a port
xen: release lock occasionally during ballooning
xen/gntdevt: Fix race condition in gntdev_release()
Diffstat (limited to 'drivers/xen')
-rw-r--r-- | drivers/xen/balloon.c | 15 | ||||
-rw-r--r-- | drivers/xen/events/events_base.c | 10 | ||||
-rw-r--r-- | drivers/xen/events/events_fifo.c | 45 | ||||
-rw-r--r-- | drivers/xen/events/events_internal.h | 7 | ||||
-rw-r--r-- | drivers/xen/gntdev.c | 2 |
5 files changed, 62 insertions, 17 deletions
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index fd933695f232..bf4a23c7c591 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c | |||
@@ -472,7 +472,7 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp) | |||
472 | } | 472 | } |
473 | 473 | ||
474 | /* | 474 | /* |
475 | * We avoid multiple worker processes conflicting via the balloon mutex. | 475 | * As this is a work item it is guaranteed to run as a single instance only. |
476 | * We may of course race updates of the target counts (which are protected | 476 | * We may of course race updates of the target counts (which are protected |
477 | * by the balloon lock), or with changes to the Xen hard limit, but we will | 477 | * by the balloon lock), or with changes to the Xen hard limit, but we will |
478 | * recover from these in time. | 478 | * recover from these in time. |
@@ -482,9 +482,10 @@ static void balloon_process(struct work_struct *work) | |||
482 | enum bp_state state = BP_DONE; | 482 | enum bp_state state = BP_DONE; |
483 | long credit; | 483 | long credit; |
484 | 484 | ||
485 | mutex_lock(&balloon_mutex); | ||
486 | 485 | ||
487 | do { | 486 | do { |
487 | mutex_lock(&balloon_mutex); | ||
488 | |||
488 | credit = current_credit(); | 489 | credit = current_credit(); |
489 | 490 | ||
490 | if (credit > 0) { | 491 | if (credit > 0) { |
@@ -499,17 +500,15 @@ static void balloon_process(struct work_struct *work) | |||
499 | 500 | ||
500 | state = update_schedule(state); | 501 | state = update_schedule(state); |
501 | 502 | ||
502 | #ifndef CONFIG_PREEMPT | 503 | mutex_unlock(&balloon_mutex); |
503 | if (need_resched()) | 504 | |
504 | schedule(); | 505 | cond_resched(); |
505 | #endif | 506 | |
506 | } while (credit && state == BP_DONE); | 507 | } while (credit && state == BP_DONE); |
507 | 508 | ||
508 | /* Schedule more work if there is some still to be done. */ | 509 | /* Schedule more work if there is some still to be done. */ |
509 | if (state == BP_EAGAIN) | 510 | if (state == BP_EAGAIN) |
510 | schedule_delayed_work(&balloon_worker, balloon_stats.schedule_delay * HZ); | 511 | schedule_delayed_work(&balloon_worker, balloon_stats.schedule_delay * HZ); |
511 | |||
512 | mutex_unlock(&balloon_mutex); | ||
513 | } | 512 | } |
514 | 513 | ||
515 | /* Resets the Xen limit, sets new target, and kicks off processing. */ | 514 | /* Resets the Xen limit, sets new target, and kicks off processing. */ |
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c index 96093ae369a5..1495eccb1617 100644 --- a/drivers/xen/events/events_base.c +++ b/drivers/xen/events/events_base.c | |||
@@ -452,10 +452,12 @@ static void xen_free_irq(unsigned irq) | |||
452 | irq_free_desc(irq); | 452 | irq_free_desc(irq); |
453 | } | 453 | } |
454 | 454 | ||
455 | static void xen_evtchn_close(unsigned int port) | 455 | static void xen_evtchn_close(unsigned int port, unsigned int cpu) |
456 | { | 456 | { |
457 | struct evtchn_close close; | 457 | struct evtchn_close close; |
458 | 458 | ||
459 | xen_evtchn_op_close(port, cpu); | ||
460 | |||
459 | close.port = port; | 461 | close.port = port; |
460 | if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0) | 462 | if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0) |
461 | BUG(); | 463 | BUG(); |
@@ -544,7 +546,7 @@ out: | |||
544 | 546 | ||
545 | err: | 547 | err: |
546 | pr_err("irq%d: Failed to set port to irq mapping (%d)\n", irq, rc); | 548 | pr_err("irq%d: Failed to set port to irq mapping (%d)\n", irq, rc); |
547 | xen_evtchn_close(evtchn); | 549 | xen_evtchn_close(evtchn, NR_CPUS); |
548 | return 0; | 550 | return 0; |
549 | } | 551 | } |
550 | 552 | ||
@@ -565,7 +567,7 @@ static void shutdown_pirq(struct irq_data *data) | |||
565 | return; | 567 | return; |
566 | 568 | ||
567 | mask_evtchn(evtchn); | 569 | mask_evtchn(evtchn); |
568 | xen_evtchn_close(evtchn); | 570 | xen_evtchn_close(evtchn, cpu_from_evtchn(evtchn)); |
569 | xen_irq_info_cleanup(info); | 571 | xen_irq_info_cleanup(info); |
570 | } | 572 | } |
571 | 573 | ||
@@ -609,7 +611,7 @@ static void __unbind_from_irq(unsigned int irq) | |||
609 | if (VALID_EVTCHN(evtchn)) { | 611 | if (VALID_EVTCHN(evtchn)) { |
610 | unsigned int cpu = cpu_from_irq(irq); | 612 | unsigned int cpu = cpu_from_irq(irq); |
611 | 613 | ||
612 | xen_evtchn_close(evtchn); | 614 | xen_evtchn_close(evtchn, cpu); |
613 | 615 | ||
614 | switch (type_from_irq(irq)) { | 616 | switch (type_from_irq(irq)) { |
615 | case IRQT_VIRQ: | 617 | case IRQT_VIRQ: |
diff --git a/drivers/xen/events/events_fifo.c b/drivers/xen/events/events_fifo.c index ed673e1acd61..6df8aac966b9 100644 --- a/drivers/xen/events/events_fifo.c +++ b/drivers/xen/events/events_fifo.c | |||
@@ -255,6 +255,12 @@ static void evtchn_fifo_unmask(unsigned port) | |||
255 | } | 255 | } |
256 | } | 256 | } |
257 | 257 | ||
258 | static bool evtchn_fifo_is_linked(unsigned port) | ||
259 | { | ||
260 | event_word_t *word = event_word_from_port(port); | ||
261 | return sync_test_bit(EVTCHN_FIFO_BIT(LINKED, word), BM(word)); | ||
262 | } | ||
263 | |||
258 | static uint32_t clear_linked(volatile event_word_t *word) | 264 | static uint32_t clear_linked(volatile event_word_t *word) |
259 | { | 265 | { |
260 | event_word_t new, old, w; | 266 | event_word_t new, old, w; |
@@ -281,7 +287,8 @@ static void handle_irq_for_port(unsigned port) | |||
281 | 287 | ||
282 | static void consume_one_event(unsigned cpu, | 288 | static void consume_one_event(unsigned cpu, |
283 | struct evtchn_fifo_control_block *control_block, | 289 | struct evtchn_fifo_control_block *control_block, |
284 | unsigned priority, unsigned long *ready) | 290 | unsigned priority, unsigned long *ready, |
291 | bool drop) | ||
285 | { | 292 | { |
286 | struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu); | 293 | struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu); |
287 | uint32_t head; | 294 | uint32_t head; |
@@ -313,13 +320,15 @@ static void consume_one_event(unsigned cpu, | |||
313 | if (head == 0) | 320 | if (head == 0) |
314 | clear_bit(priority, ready); | 321 | clear_bit(priority, ready); |
315 | 322 | ||
316 | if (evtchn_fifo_is_pending(port) && !evtchn_fifo_is_masked(port)) | 323 | if (evtchn_fifo_is_pending(port) && !evtchn_fifo_is_masked(port)) { |
317 | handle_irq_for_port(port); | 324 | if (likely(!drop)) |
325 | handle_irq_for_port(port); | ||
326 | } | ||
318 | 327 | ||
319 | q->head[priority] = head; | 328 | q->head[priority] = head; |
320 | } | 329 | } |
321 | 330 | ||
322 | static void evtchn_fifo_handle_events(unsigned cpu) | 331 | static void __evtchn_fifo_handle_events(unsigned cpu, bool drop) |
323 | { | 332 | { |
324 | struct evtchn_fifo_control_block *control_block; | 333 | struct evtchn_fifo_control_block *control_block; |
325 | unsigned long ready; | 334 | unsigned long ready; |
@@ -331,11 +340,16 @@ static void evtchn_fifo_handle_events(unsigned cpu) | |||
331 | 340 | ||
332 | while (ready) { | 341 | while (ready) { |
333 | q = find_first_bit(&ready, EVTCHN_FIFO_MAX_QUEUES); | 342 | q = find_first_bit(&ready, EVTCHN_FIFO_MAX_QUEUES); |
334 | consume_one_event(cpu, control_block, q, &ready); | 343 | consume_one_event(cpu, control_block, q, &ready, drop); |
335 | ready |= xchg(&control_block->ready, 0); | 344 | ready |= xchg(&control_block->ready, 0); |
336 | } | 345 | } |
337 | } | 346 | } |
338 | 347 | ||
348 | static void evtchn_fifo_handle_events(unsigned cpu) | ||
349 | { | ||
350 | __evtchn_fifo_handle_events(cpu, false); | ||
351 | } | ||
352 | |||
339 | static void evtchn_fifo_resume(void) | 353 | static void evtchn_fifo_resume(void) |
340 | { | 354 | { |
341 | unsigned cpu; | 355 | unsigned cpu; |
@@ -371,6 +385,26 @@ static void evtchn_fifo_resume(void) | |||
371 | event_array_pages = 0; | 385 | event_array_pages = 0; |
372 | } | 386 | } |
373 | 387 | ||
388 | static void evtchn_fifo_close(unsigned port, unsigned int cpu) | ||
389 | { | ||
390 | if (cpu == NR_CPUS) | ||
391 | return; | ||
392 | |||
393 | get_online_cpus(); | ||
394 | if (cpu_online(cpu)) { | ||
395 | if (WARN_ON(irqs_disabled())) | ||
396 | goto out; | ||
397 | |||
398 | while (evtchn_fifo_is_linked(port)) | ||
399 | cpu_relax(); | ||
400 | } else { | ||
401 | __evtchn_fifo_handle_events(cpu, true); | ||
402 | } | ||
403 | |||
404 | out: | ||
405 | put_online_cpus(); | ||
406 | } | ||
407 | |||
374 | static const struct evtchn_ops evtchn_ops_fifo = { | 408 | static const struct evtchn_ops evtchn_ops_fifo = { |
375 | .max_channels = evtchn_fifo_max_channels, | 409 | .max_channels = evtchn_fifo_max_channels, |
376 | .nr_channels = evtchn_fifo_nr_channels, | 410 | .nr_channels = evtchn_fifo_nr_channels, |
@@ -384,6 +418,7 @@ static const struct evtchn_ops evtchn_ops_fifo = { | |||
384 | .unmask = evtchn_fifo_unmask, | 418 | .unmask = evtchn_fifo_unmask, |
385 | .handle_events = evtchn_fifo_handle_events, | 419 | .handle_events = evtchn_fifo_handle_events, |
386 | .resume = evtchn_fifo_resume, | 420 | .resume = evtchn_fifo_resume, |
421 | .close = evtchn_fifo_close, | ||
387 | }; | 422 | }; |
388 | 423 | ||
389 | static int evtchn_fifo_alloc_control_block(unsigned cpu) | 424 | static int evtchn_fifo_alloc_control_block(unsigned cpu) |
diff --git a/drivers/xen/events/events_internal.h b/drivers/xen/events/events_internal.h index 50c2050a1e32..d18e12315ec0 100644 --- a/drivers/xen/events/events_internal.h +++ b/drivers/xen/events/events_internal.h | |||
@@ -68,6 +68,7 @@ struct evtchn_ops { | |||
68 | bool (*test_and_set_mask)(unsigned port); | 68 | bool (*test_and_set_mask)(unsigned port); |
69 | void (*mask)(unsigned port); | 69 | void (*mask)(unsigned port); |
70 | void (*unmask)(unsigned port); | 70 | void (*unmask)(unsigned port); |
71 | void (*close)(unsigned port, unsigned cpu); | ||
71 | 72 | ||
72 | void (*handle_events)(unsigned cpu); | 73 | void (*handle_events)(unsigned cpu); |
73 | void (*resume)(void); | 74 | void (*resume)(void); |
@@ -145,6 +146,12 @@ static inline void xen_evtchn_resume(void) | |||
145 | evtchn_ops->resume(); | 146 | evtchn_ops->resume(); |
146 | } | 147 | } |
147 | 148 | ||
149 | static inline void xen_evtchn_op_close(unsigned port, unsigned cpu) | ||
150 | { | ||
151 | if (evtchn_ops->close) | ||
152 | return evtchn_ops->close(port, cpu); | ||
153 | } | ||
154 | |||
148 | void xen_evtchn_2l_init(void); | 155 | void xen_evtchn_2l_init(void); |
149 | int xen_evtchn_fifo_init(void); | 156 | int xen_evtchn_fifo_init(void); |
150 | 157 | ||
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c index 67b9163db718..0dbb222daaf1 100644 --- a/drivers/xen/gntdev.c +++ b/drivers/xen/gntdev.c | |||
@@ -568,12 +568,14 @@ static int gntdev_release(struct inode *inode, struct file *flip) | |||
568 | 568 | ||
569 | pr_debug("priv %p\n", priv); | 569 | pr_debug("priv %p\n", priv); |
570 | 570 | ||
571 | mutex_lock(&priv->lock); | ||
571 | while (!list_empty(&priv->maps)) { | 572 | while (!list_empty(&priv->maps)) { |
572 | map = list_entry(priv->maps.next, struct grant_map, next); | 573 | map = list_entry(priv->maps.next, struct grant_map, next); |
573 | list_del(&map->next); | 574 | list_del(&map->next); |
574 | gntdev_put_map(NULL /* already removed */, map); | 575 | gntdev_put_map(NULL /* already removed */, map); |
575 | } | 576 | } |
576 | WARN_ON(!list_empty(&priv->freeable_maps)); | 577 | WARN_ON(!list_empty(&priv->freeable_maps)); |
578 | mutex_unlock(&priv->lock); | ||
577 | 579 | ||
578 | if (use_ptemod) | 580 | if (use_ptemod) |
579 | mmu_notifier_unregister(&priv->mn, priv->mm); | 581 | mmu_notifier_unregister(&priv->mn, priv->mm); |