diff options
Diffstat (limited to 'include')
-rw-r--r-- | include/asm-x86/amd_iommu_types.h | 8 | ||||
-rw-r--r-- | include/asm-x86/i387.h | 2 | ||||
-rw-r--r-- | include/asm-x86/io.h | 18 | ||||
-rw-r--r-- | include/asm-x86/mmzone_32.h | 6 | ||||
-rw-r--r-- | include/asm-x86/pgtable_64.h | 2 | ||||
-rw-r--r-- | include/asm-x86/processor.h | 23 | ||||
-rw-r--r-- | include/asm-x86/spinlock.h | 4 |
7 files changed, 46 insertions, 17 deletions
diff --git a/include/asm-x86/amd_iommu_types.h b/include/asm-x86/amd_iommu_types.h index 22aa58ca1991..dcc812067394 100644 --- a/include/asm-x86/amd_iommu_types.h +++ b/include/asm-x86/amd_iommu_types.h | |||
@@ -31,9 +31,6 @@ | |||
31 | #define ALIAS_TABLE_ENTRY_SIZE 2 | 31 | #define ALIAS_TABLE_ENTRY_SIZE 2 |
32 | #define RLOOKUP_TABLE_ENTRY_SIZE (sizeof(void *)) | 32 | #define RLOOKUP_TABLE_ENTRY_SIZE (sizeof(void *)) |
33 | 33 | ||
34 | /* helper macros */ | ||
35 | #define LOW_U32(x) ((x) & ((1ULL << 32)-1)) | ||
36 | |||
37 | /* Length of the MMIO region for the AMD IOMMU */ | 34 | /* Length of the MMIO region for the AMD IOMMU */ |
38 | #define MMIO_REGION_LENGTH 0x4000 | 35 | #define MMIO_REGION_LENGTH 0x4000 |
39 | 36 | ||
@@ -69,6 +66,9 @@ | |||
69 | #define MMIO_EVT_TAIL_OFFSET 0x2018 | 66 | #define MMIO_EVT_TAIL_OFFSET 0x2018 |
70 | #define MMIO_STATUS_OFFSET 0x2020 | 67 | #define MMIO_STATUS_OFFSET 0x2020 |
71 | 68 | ||
69 | /* MMIO status bits */ | ||
70 | #define MMIO_STATUS_COM_WAIT_INT_MASK 0x04 | ||
71 | |||
72 | /* feature control bits */ | 72 | /* feature control bits */ |
73 | #define CONTROL_IOMMU_EN 0x00ULL | 73 | #define CONTROL_IOMMU_EN 0x00ULL |
74 | #define CONTROL_HT_TUN_EN 0x01ULL | 74 | #define CONTROL_HT_TUN_EN 0x01ULL |
@@ -89,6 +89,7 @@ | |||
89 | #define CMD_INV_IOMMU_PAGES 0x03 | 89 | #define CMD_INV_IOMMU_PAGES 0x03 |
90 | 90 | ||
91 | #define CMD_COMPL_WAIT_STORE_MASK 0x01 | 91 | #define CMD_COMPL_WAIT_STORE_MASK 0x01 |
92 | #define CMD_COMPL_WAIT_INT_MASK 0x02 | ||
92 | #define CMD_INV_IOMMU_PAGES_SIZE_MASK 0x01 | 93 | #define CMD_INV_IOMMU_PAGES_SIZE_MASK 0x01 |
93 | #define CMD_INV_IOMMU_PAGES_PDE_MASK 0x02 | 94 | #define CMD_INV_IOMMU_PAGES_PDE_MASK 0x02 |
94 | 95 | ||
@@ -99,6 +100,7 @@ | |||
99 | #define DEV_ENTRY_TRANSLATION 0x01 | 100 | #define DEV_ENTRY_TRANSLATION 0x01 |
100 | #define DEV_ENTRY_IR 0x3d | 101 | #define DEV_ENTRY_IR 0x3d |
101 | #define DEV_ENTRY_IW 0x3e | 102 | #define DEV_ENTRY_IW 0x3e |
103 | #define DEV_ENTRY_NO_PAGE_FAULT 0x62 | ||
102 | #define DEV_ENTRY_EX 0x67 | 104 | #define DEV_ENTRY_EX 0x67 |
103 | #define DEV_ENTRY_SYSMGT1 0x68 | 105 | #define DEV_ENTRY_SYSMGT1 0x68 |
104 | #define DEV_ENTRY_SYSMGT2 0x69 | 106 | #define DEV_ENTRY_SYSMGT2 0x69 |
diff --git a/include/asm-x86/i387.h b/include/asm-x86/i387.h index 6d3b21063419..56d00e31aec0 100644 --- a/include/asm-x86/i387.h +++ b/include/asm-x86/i387.h | |||
@@ -63,8 +63,6 @@ static inline int restore_fpu_checking(struct i387_fxsave_struct *fx) | |||
63 | #else | 63 | #else |
64 | : [fx] "cdaSDb" (fx), "m" (*fx), "0" (0)); | 64 | : [fx] "cdaSDb" (fx), "m" (*fx), "0" (0)); |
65 | #endif | 65 | #endif |
66 | if (unlikely(err)) | ||
67 | init_fpu(current); | ||
68 | return err; | 66 | return err; |
69 | } | 67 | } |
70 | 68 | ||
diff --git a/include/asm-x86/io.h b/include/asm-x86/io.h index bf5d629b3a39..0f954dc89cb3 100644 --- a/include/asm-x86/io.h +++ b/include/asm-x86/io.h | |||
@@ -21,7 +21,7 @@ extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys); | |||
21 | 21 | ||
22 | #define build_mmio_read(name, size, type, reg, barrier) \ | 22 | #define build_mmio_read(name, size, type, reg, barrier) \ |
23 | static inline type name(const volatile void __iomem *addr) \ | 23 | static inline type name(const volatile void __iomem *addr) \ |
24 | { type ret; asm volatile("mov" size " %1,%0":"=" reg (ret) \ | 24 | { type ret; asm volatile("mov" size " %1,%0":reg (ret) \ |
25 | :"m" (*(volatile type __force *)addr) barrier); return ret; } | 25 | :"m" (*(volatile type __force *)addr) barrier); return ret; } |
26 | 26 | ||
27 | #define build_mmio_write(name, size, type, reg, barrier) \ | 27 | #define build_mmio_write(name, size, type, reg, barrier) \ |
@@ -29,13 +29,13 @@ static inline void name(type val, volatile void __iomem *addr) \ | |||
29 | { asm volatile("mov" size " %0,%1": :reg (val), \ | 29 | { asm volatile("mov" size " %0,%1": :reg (val), \ |
30 | "m" (*(volatile type __force *)addr) barrier); } | 30 | "m" (*(volatile type __force *)addr) barrier); } |
31 | 31 | ||
32 | build_mmio_read(readb, "b", unsigned char, "q", :"memory") | 32 | build_mmio_read(readb, "b", unsigned char, "=q", :"memory") |
33 | build_mmio_read(readw, "w", unsigned short, "r", :"memory") | 33 | build_mmio_read(readw, "w", unsigned short, "=r", :"memory") |
34 | build_mmio_read(readl, "l", unsigned int, "r", :"memory") | 34 | build_mmio_read(readl, "l", unsigned int, "=r", :"memory") |
35 | 35 | ||
36 | build_mmio_read(__readb, "b", unsigned char, "q", ) | 36 | build_mmio_read(__readb, "b", unsigned char, "=q", ) |
37 | build_mmio_read(__readw, "w", unsigned short, "r", ) | 37 | build_mmio_read(__readw, "w", unsigned short, "=r", ) |
38 | build_mmio_read(__readl, "l", unsigned int, "r", ) | 38 | build_mmio_read(__readl, "l", unsigned int, "=r", ) |
39 | 39 | ||
40 | build_mmio_write(writeb, "b", unsigned char, "q", :"memory") | 40 | build_mmio_write(writeb, "b", unsigned char, "q", :"memory") |
41 | build_mmio_write(writew, "w", unsigned short, "r", :"memory") | 41 | build_mmio_write(writew, "w", unsigned short, "r", :"memory") |
@@ -59,8 +59,8 @@ build_mmio_write(__writel, "l", unsigned int, "r", ) | |||
59 | #define mmiowb() barrier() | 59 | #define mmiowb() barrier() |
60 | 60 | ||
61 | #ifdef CONFIG_X86_64 | 61 | #ifdef CONFIG_X86_64 |
62 | build_mmio_read(readq, "q", unsigned long, "r", :"memory") | 62 | build_mmio_read(readq, "q", unsigned long, "=r", :"memory") |
63 | build_mmio_read(__readq, "q", unsigned long, "r", ) | 63 | build_mmio_read(__readq, "q", unsigned long, "=r", ) |
64 | build_mmio_write(writeq, "q", unsigned long, "r", :"memory") | 64 | build_mmio_write(writeq, "q", unsigned long, "r", :"memory") |
65 | build_mmio_write(__writeq, "q", unsigned long, "r", ) | 65 | build_mmio_write(__writeq, "q", unsigned long, "r", ) |
66 | 66 | ||
diff --git a/include/asm-x86/mmzone_32.h b/include/asm-x86/mmzone_32.h index b2298a227567..5862e6460658 100644 --- a/include/asm-x86/mmzone_32.h +++ b/include/asm-x86/mmzone_32.h | |||
@@ -97,10 +97,16 @@ static inline int pfn_valid(int pfn) | |||
97 | reserve_bootmem_node(NODE_DATA(0), (addr), (size), (flags)) | 97 | reserve_bootmem_node(NODE_DATA(0), (addr), (size), (flags)) |
98 | #define alloc_bootmem(x) \ | 98 | #define alloc_bootmem(x) \ |
99 | __alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) | 99 | __alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) |
100 | #define alloc_bootmem_nopanic(x) \ | ||
101 | __alloc_bootmem_node_nopanic(NODE_DATA(0), (x), SMP_CACHE_BYTES, \ | ||
102 | __pa(MAX_DMA_ADDRESS)) | ||
100 | #define alloc_bootmem_low(x) \ | 103 | #define alloc_bootmem_low(x) \ |
101 | __alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, 0) | 104 | __alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, 0) |
102 | #define alloc_bootmem_pages(x) \ | 105 | #define alloc_bootmem_pages(x) \ |
103 | __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) | 106 | __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) |
107 | #define alloc_bootmem_pages_nopanic(x) \ | ||
108 | __alloc_bootmem_node_nopanic(NODE_DATA(0), (x), PAGE_SIZE, \ | ||
109 | __pa(MAX_DMA_ADDRESS)) | ||
104 | #define alloc_bootmem_low_pages(x) \ | 110 | #define alloc_bootmem_low_pages(x) \ |
105 | __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, 0) | 111 | __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, 0) |
106 | #define alloc_bootmem_node(pgdat, x) \ | 112 | #define alloc_bootmem_node(pgdat, x) \ |
diff --git a/include/asm-x86/pgtable_64.h b/include/asm-x86/pgtable_64.h index ac5fff4cc58a..549144d03d99 100644 --- a/include/asm-x86/pgtable_64.h +++ b/include/asm-x86/pgtable_64.h | |||
@@ -151,7 +151,7 @@ static inline void native_pgd_clear(pgd_t *pgd) | |||
151 | #define VMALLOC_END _AC(0xffffe1ffffffffff, UL) | 151 | #define VMALLOC_END _AC(0xffffe1ffffffffff, UL) |
152 | #define VMEMMAP_START _AC(0xffffe20000000000, UL) | 152 | #define VMEMMAP_START _AC(0xffffe20000000000, UL) |
153 | #define MODULES_VADDR _AC(0xffffffffa0000000, UL) | 153 | #define MODULES_VADDR _AC(0xffffffffa0000000, UL) |
154 | #define MODULES_END _AC(0xfffffffffff00000, UL) | 154 | #define MODULES_END _AC(0xffffffffff000000, UL) |
155 | #define MODULES_LEN (MODULES_END - MODULES_VADDR) | 155 | #define MODULES_LEN (MODULES_END - MODULES_VADDR) |
156 | 156 | ||
157 | #ifndef __ASSEMBLY__ | 157 | #ifndef __ASSEMBLY__ |
diff --git a/include/asm-x86/processor.h b/include/asm-x86/processor.h index 5f58da401b43..4df3e2f6fb56 100644 --- a/include/asm-x86/processor.h +++ b/include/asm-x86/processor.h | |||
@@ -728,6 +728,29 @@ extern unsigned long boot_option_idle_override; | |||
728 | extern unsigned long idle_halt; | 728 | extern unsigned long idle_halt; |
729 | extern unsigned long idle_nomwait; | 729 | extern unsigned long idle_nomwait; |
730 | 730 | ||
731 | /* | ||
732 | * on systems with caches, caches must be flashed as the absolute | ||
733 | * last instruction before going into a suspended halt. Otherwise, | ||
734 | * dirty data can linger in the cache and become stale on resume, | ||
735 | * leading to strange errors. | ||
736 | * | ||
737 | * perform a variety of operations to guarantee that the compiler | ||
738 | * will not reorder instructions. wbinvd itself is serializing | ||
739 | * so the processor will not reorder. | ||
740 | * | ||
741 | * Systems without cache can just go into halt. | ||
742 | */ | ||
743 | static inline void wbinvd_halt(void) | ||
744 | { | ||
745 | mb(); | ||
746 | /* check for clflush to determine if wbinvd is legal */ | ||
747 | if (cpu_has_clflush) | ||
748 | asm volatile("cli; wbinvd; 1: hlt; jmp 1b" : : : "memory"); | ||
749 | else | ||
750 | while (1) | ||
751 | halt(); | ||
752 | } | ||
753 | |||
731 | extern void enable_sep_cpu(void); | 754 | extern void enable_sep_cpu(void); |
732 | extern int sysenter_setup(void); | 755 | extern int sysenter_setup(void); |
733 | 756 | ||
diff --git a/include/asm-x86/spinlock.h b/include/asm-x86/spinlock.h index 4f9a9861799a..e39c790dbfd2 100644 --- a/include/asm-x86/spinlock.h +++ b/include/asm-x86/spinlock.h | |||
@@ -65,7 +65,7 @@ static inline int __ticket_spin_is_contended(raw_spinlock_t *lock) | |||
65 | { | 65 | { |
66 | int tmp = ACCESS_ONCE(lock->slock); | 66 | int tmp = ACCESS_ONCE(lock->slock); |
67 | 67 | ||
68 | return (((tmp >> 8) & 0xff) - (tmp & 0xff)) > 1; | 68 | return (((tmp >> 8) - tmp) & 0xff) > 1; |
69 | } | 69 | } |
70 | 70 | ||
71 | static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) | 71 | static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) |
@@ -127,7 +127,7 @@ static inline int __ticket_spin_is_contended(raw_spinlock_t *lock) | |||
127 | { | 127 | { |
128 | int tmp = ACCESS_ONCE(lock->slock); | 128 | int tmp = ACCESS_ONCE(lock->slock); |
129 | 129 | ||
130 | return (((tmp >> 16) & 0xffff) - (tmp & 0xffff)) > 1; | 130 | return (((tmp >> 16) - tmp) & 0xffff) > 1; |
131 | } | 131 | } |
132 | 132 | ||
133 | static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) | 133 | static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) |