diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-01-18 19:44:24 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-01-18 19:44:24 -0500 |
commit | a200dcb34693084e56496960d855afdeaaf9578f (patch) | |
tree | bf65e4350460b7f98247278469f7600d1808c3fc /arch/ia64 | |
parent | d05d82f7110b08fd36178a641b69a1f206e1142b (diff) | |
parent | 43e361f23c49dbddf74f56ddf6cdd85c5dbff6da (diff) |
Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
Pull virtio barrier rework+fixes from Michael Tsirkin:
"This adds a new kind of barrier, and reworks virtio and xen to use it.
Plus some fixes here and there"
* tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost: (44 commits)
checkpatch: add virt barriers
checkpatch: check for __smp outside barrier.h
checkpatch.pl: add missing memory barriers
virtio: make find_vqs() checkpatch.pl-friendly
virtio_balloon: fix race between migration and ballooning
virtio_balloon: fix race by fill and leak
s390: more efficient smp barriers
s390: use generic memory barriers
xen/events: use virt_xxx barriers
xen/io: use virt_xxx barriers
xenbus: use virt_xxx barriers
virtio_ring: use virt_store_mb
sh: move xchg_cmpxchg to a header by itself
sh: support 1 and 2 byte xchg
virtio_ring: update weak barriers to use virt_xxx
Revert "virtio_ring: Update weak barriers to use dma_wmb/rmb"
asm-generic: implement virt_xxx memory barriers
x86: define __smp_xxx
xtensa: define __smp_xxx
tile: define __smp_xxx
...
Diffstat (limited to 'arch/ia64')
-rw-r--r-- | arch/ia64/include/asm/barrier.h | 24 | ||||
-rw-r--r-- | arch/ia64/kernel/iosapic.c | 6 |
2 files changed, 10 insertions, 20 deletions
diff --git a/arch/ia64/include/asm/barrier.h b/arch/ia64/include/asm/barrier.h index 209c4b817c95..588f1614cafc 100644 --- a/arch/ia64/include/asm/barrier.h +++ b/arch/ia64/include/asm/barrier.h | |||
@@ -42,34 +42,24 @@ | |||
42 | #define dma_rmb() mb() | 42 | #define dma_rmb() mb() |
43 | #define dma_wmb() mb() | 43 | #define dma_wmb() mb() |
44 | 44 | ||
45 | #ifdef CONFIG_SMP | 45 | # define __smp_mb() mb() |
46 | # define smp_mb() mb() | ||
47 | #else | ||
48 | # define smp_mb() barrier() | ||
49 | #endif | ||
50 | 46 | ||
51 | #define smp_rmb() smp_mb() | 47 | #define __smp_mb__before_atomic() barrier() |
52 | #define smp_wmb() smp_mb() | 48 | #define __smp_mb__after_atomic() barrier() |
53 | |||
54 | #define read_barrier_depends() do { } while (0) | ||
55 | #define smp_read_barrier_depends() do { } while (0) | ||
56 | |||
57 | #define smp_mb__before_atomic() barrier() | ||
58 | #define smp_mb__after_atomic() barrier() | ||
59 | 49 | ||
60 | /* | 50 | /* |
61 | * IA64 GCC turns volatile stores into st.rel and volatile loads into ld.acq no | 51 | * IA64 GCC turns volatile stores into st.rel and volatile loads into ld.acq no |
62 | * need for asm trickery! | 52 | * need for asm trickery! |
63 | */ | 53 | */ |
64 | 54 | ||
65 | #define smp_store_release(p, v) \ | 55 | #define __smp_store_release(p, v) \ |
66 | do { \ | 56 | do { \ |
67 | compiletime_assert_atomic_type(*p); \ | 57 | compiletime_assert_atomic_type(*p); \ |
68 | barrier(); \ | 58 | barrier(); \ |
69 | WRITE_ONCE(*p, v); \ | 59 | WRITE_ONCE(*p, v); \ |
70 | } while (0) | 60 | } while (0) |
71 | 61 | ||
72 | #define smp_load_acquire(p) \ | 62 | #define __smp_load_acquire(p) \ |
73 | ({ \ | 63 | ({ \ |
74 | typeof(*p) ___p1 = READ_ONCE(*p); \ | 64 | typeof(*p) ___p1 = READ_ONCE(*p); \ |
75 | compiletime_assert_atomic_type(*p); \ | 65 | compiletime_assert_atomic_type(*p); \ |
@@ -77,12 +67,12 @@ do { \ | |||
77 | ___p1; \ | 67 | ___p1; \ |
78 | }) | 68 | }) |
79 | 69 | ||
80 | #define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0) | ||
81 | |||
82 | /* | 70 | /* |
83 | * The group barrier in front of the rsm & ssm are necessary to ensure | 71 | * The group barrier in front of the rsm & ssm are necessary to ensure |
84 | * that none of the previous instructions in the same group are | 72 | * that none of the previous instructions in the same group are |
85 | * affected by the rsm/ssm. | 73 | * affected by the rsm/ssm. |
86 | */ | 74 | */ |
87 | 75 | ||
76 | #include <asm-generic/barrier.h> | ||
77 | |||
88 | #endif /* _ASM_IA64_BARRIER_H */ | 78 | #endif /* _ASM_IA64_BARRIER_H */ |
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c index d2fae054d988..90fde5b8669d 100644 --- a/arch/ia64/kernel/iosapic.c +++ b/arch/ia64/kernel/iosapic.c | |||
@@ -256,7 +256,7 @@ set_rte (unsigned int gsi, unsigned int irq, unsigned int dest, int mask) | |||
256 | } | 256 | } |
257 | 257 | ||
258 | static void | 258 | static void |
259 | nop (struct irq_data *data) | 259 | iosapic_nop (struct irq_data *data) |
260 | { | 260 | { |
261 | /* do nothing... */ | 261 | /* do nothing... */ |
262 | } | 262 | } |
@@ -415,7 +415,7 @@ iosapic_unmask_level_irq (struct irq_data *data) | |||
415 | #define iosapic_shutdown_level_irq mask_irq | 415 | #define iosapic_shutdown_level_irq mask_irq |
416 | #define iosapic_enable_level_irq unmask_irq | 416 | #define iosapic_enable_level_irq unmask_irq |
417 | #define iosapic_disable_level_irq mask_irq | 417 | #define iosapic_disable_level_irq mask_irq |
418 | #define iosapic_ack_level_irq nop | 418 | #define iosapic_ack_level_irq iosapic_nop |
419 | 419 | ||
420 | static struct irq_chip irq_type_iosapic_level = { | 420 | static struct irq_chip irq_type_iosapic_level = { |
421 | .name = "IO-SAPIC-level", | 421 | .name = "IO-SAPIC-level", |
@@ -453,7 +453,7 @@ iosapic_ack_edge_irq (struct irq_data *data) | |||
453 | } | 453 | } |
454 | 454 | ||
455 | #define iosapic_enable_edge_irq unmask_irq | 455 | #define iosapic_enable_edge_irq unmask_irq |
456 | #define iosapic_disable_edge_irq nop | 456 | #define iosapic_disable_edge_irq iosapic_nop |
457 | 457 | ||
458 | static struct irq_chip irq_type_iosapic_edge = { | 458 | static struct irq_chip irq_type_iosapic_edge = { |
459 | .name = "IO-SAPIC-edge", | 459 | .name = "IO-SAPIC-edge", |