diff options
author | Boris Ostrovsky <boris.ostrovsky@oracle.com> | 2016-03-18 10:11:07 -0400 |
---|---|---|
committer | David Vrabel <david.vrabel@citrix.com> | 2016-04-04 06:18:00 -0400 |
commit | ff1e22e7a638a0782f54f81a6c9cb139aca2da35 (patch) | |
tree | 6dad8a5e1792259b3ec1258d04fb35fd72987347 /drivers/xen/events | |
parent | 85d1a29de8e4e5bce20ca02103acf1082a6e530a (diff) |
xen/events: Mask a moving irq
Moving an unmasked irq may result in irq handler being invoked on both
source and target CPUs.
With 2-level this can happen as follows:
On source CPU:
evtchn_2l_handle_events() ->
generic_handle_irq() ->
handle_edge_irq() ->
eoi_pirq():
irq_move_irq(data);
/***** WE ARE HERE *****/
if (VALID_EVTCHN(evtchn))
clear_evtchn(evtchn);
If at this moment target processor is handling an unrelated event in
evtchn_2l_handle_events()'s loop it may pick up our event since target's
cpu_evtchn_mask claims that this event belongs to it *and* the event is
unmasked and still pending. At the same time, source CPU will continue
executing its own handle_edge_irq().
With FIFO interrupt the scenario is similar: irq_move_irq() may result
in a EVTCHNOP_unmask hypercall which, in turn, may make the event
pending on the target CPU.
We can avoid this situation by moving and clearing the event while
keeping event masked.
Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Cc: stable@vger.kernel.org
Signed-off-by: David Vrabel <david.vrabel@citrix.com>
Diffstat (limited to 'drivers/xen/events')
-rw-r--r-- | drivers/xen/events/events_base.c | 28 |
1 files changed, 24 insertions, 4 deletions
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c index 488017a0806a..cb7138c97c69 100644 --- a/drivers/xen/events/events_base.c +++ b/drivers/xen/events/events_base.c | |||
@@ -484,9 +484,19 @@ static void eoi_pirq(struct irq_data *data) | |||
484 | struct physdev_eoi eoi = { .irq = pirq_from_irq(data->irq) }; | 484 | struct physdev_eoi eoi = { .irq = pirq_from_irq(data->irq) }; |
485 | int rc = 0; | 485 | int rc = 0; |
486 | 486 | ||
487 | irq_move_irq(data); | 487 | if (!VALID_EVTCHN(evtchn)) |
488 | return; | ||
488 | 489 | ||
489 | if (VALID_EVTCHN(evtchn)) | 490 | if (unlikely(irqd_is_setaffinity_pending(data))) { |
491 | int masked = test_and_set_mask(evtchn); | ||
492 | |||
493 | clear_evtchn(evtchn); | ||
494 | |||
495 | irq_move_masked_irq(data); | ||
496 | |||
497 | if (!masked) | ||
498 | unmask_evtchn(evtchn); | ||
499 | } else | ||
490 | clear_evtchn(evtchn); | 500 | clear_evtchn(evtchn); |
491 | 501 | ||
492 | if (pirq_needs_eoi(data->irq)) { | 502 | if (pirq_needs_eoi(data->irq)) { |
@@ -1357,9 +1367,19 @@ static void ack_dynirq(struct irq_data *data) | |||
1357 | { | 1367 | { |
1358 | int evtchn = evtchn_from_irq(data->irq); | 1368 | int evtchn = evtchn_from_irq(data->irq); |
1359 | 1369 | ||
1360 | irq_move_irq(data); | 1370 | if (!VALID_EVTCHN(evtchn)) |
1371 | return; | ||
1361 | 1372 | ||
1362 | if (VALID_EVTCHN(evtchn)) | 1373 | if (unlikely(irqd_is_setaffinity_pending(data))) { |
1374 | int masked = test_and_set_mask(evtchn); | ||
1375 | |||
1376 | clear_evtchn(evtchn); | ||
1377 | |||
1378 | irq_move_masked_irq(data); | ||
1379 | |||
1380 | if (!masked) | ||
1381 | unmask_evtchn(evtchn); | ||
1382 | } else | ||
1363 | clear_evtchn(evtchn); | 1383 | clear_evtchn(evtchn); |
1364 | } | 1384 | } |
1365 | 1385 | ||