aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-i386
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-i386')
-rw-r--r--include/asm-i386/io.h3
-rw-r--r--include/asm-i386/io_apic.h1
-rw-r--r--include/asm-i386/system.h5
3 files changed, 3 insertions, 6 deletions
diff --git a/include/asm-i386/io.h b/include/asm-i386/io.h
index 7b65b5b00034..e8e0bd641120 100644
--- a/include/asm-i386/io.h
+++ b/include/asm-i386/io.h
@@ -112,6 +112,9 @@ extern void __iomem * __ioremap(unsigned long offset, unsigned long size, unsign
112 * writew/writel functions and the other mmio helpers. The returned 112 * writew/writel functions and the other mmio helpers. The returned
113 * address is not guaranteed to be usable directly as a virtual 113 * address is not guaranteed to be usable directly as a virtual
114 * address. 114 * address.
115 *
116 * If the area you are trying to map is a PCI BAR you should have a
117 * look at pci_iomap().
115 */ 118 */
116 119
117static inline void __iomem * ioremap(unsigned long offset, unsigned long size) 120static inline void __iomem * ioremap(unsigned long offset, unsigned long size)
diff --git a/include/asm-i386/io_apic.h b/include/asm-i386/io_apic.h
index 340764076d5f..dbe734ddf2af 100644
--- a/include/asm-i386/io_apic.h
+++ b/include/asm-i386/io_apic.h
@@ -150,7 +150,6 @@ extern int (*ioapic_renumber_irq)(int ioapic, int irq);
150 150
151#else /* !CONFIG_X86_IO_APIC */ 151#else /* !CONFIG_X86_IO_APIC */
152#define io_apic_assign_pci_irqs 0 152#define io_apic_assign_pci_irqs 0
153static inline void disable_ioapic_setup(void) { }
154#endif 153#endif
155 154
156#endif 155#endif
diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h
index 609756c61676..d69ba937e092 100644
--- a/include/asm-i386/system.h
+++ b/include/asm-i386/system.h
@@ -214,11 +214,6 @@ static inline unsigned long get_limit(unsigned long segment)
214 */ 214 */
215 215
216 216
217/*
218 * Actually only lfence would be needed for mb() because all stores done
219 * by the kernel should be already ordered. But keep a full barrier for now.
220 */
221
222#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2) 217#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
223#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2) 218#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
224 219