diff options
Diffstat (limited to 'arch')
79 files changed, 569 insertions, 464 deletions
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h index 6e8f05c8a1c8..d7575554e407 100644 --- a/arch/arm/include/asm/assembler.h +++ b/arch/arm/include/asm/assembler.h | |||
@@ -215,7 +215,7 @@ | |||
215 | @ Slightly optimised to avoid incrementing the pointer twice | 215 | @ Slightly optimised to avoid incrementing the pointer twice |
216 | usraccoff \instr, \reg, \ptr, \inc, 0, \cond, \abort | 216 | usraccoff \instr, \reg, \ptr, \inc, 0, \cond, \abort |
217 | .if \rept == 2 | 217 | .if \rept == 2 |
218 | usraccoff \instr, \reg, \ptr, \inc, 4, \cond, \abort | 218 | usraccoff \instr, \reg, \ptr, \inc, \inc, \cond, \abort |
219 | .endif | 219 | .endif |
220 | 220 | ||
221 | add\cond \ptr, #\rept * \inc | 221 | add\cond \ptr, #\rept * \inc |
diff --git a/arch/arm/include/asm/kgdb.h b/arch/arm/include/asm/kgdb.h index 08265993227f..48066ce9ea34 100644 --- a/arch/arm/include/asm/kgdb.h +++ b/arch/arm/include/asm/kgdb.h | |||
@@ -70,7 +70,8 @@ extern int kgdb_fault_expected; | |||
70 | #define _GP_REGS 16 | 70 | #define _GP_REGS 16 |
71 | #define _FP_REGS 8 | 71 | #define _FP_REGS 8 |
72 | #define _EXTRA_REGS 2 | 72 | #define _EXTRA_REGS 2 |
73 | #define DBG_MAX_REG_NUM (_GP_REGS + (_FP_REGS * 3) + _EXTRA_REGS) | 73 | #define GDB_MAX_REGS (_GP_REGS + (_FP_REGS * 3) + _EXTRA_REGS) |
74 | #define DBG_MAX_REG_NUM (_GP_REGS + _FP_REGS + _EXTRA_REGS) | ||
74 | 75 | ||
75 | #define KGDB_MAX_NO_CPUS 1 | 76 | #define KGDB_MAX_NO_CPUS 1 |
76 | #define BUFMAX 400 | 77 | #define BUFMAX 400 |
@@ -93,7 +94,7 @@ extern int kgdb_fault_expected; | |||
93 | #define _SPT 13 | 94 | #define _SPT 13 |
94 | #define _LR 14 | 95 | #define _LR 14 |
95 | #define _PC 15 | 96 | #define _PC 15 |
96 | #define _CPSR (DBG_MAX_REG_NUM - 1) | 97 | #define _CPSR (GDB_MAX_REGS - 1) |
97 | 98 | ||
98 | /* | 99 | /* |
99 | * So that we can denote the end of a frame for tracing, | 100 | * So that we can denote the end of a frame for tracing, |
diff --git a/arch/arm/kernel/kgdb.c b/arch/arm/kernel/kgdb.c index d6e8b4d2e60d..778c2f7024ff 100644 --- a/arch/arm/kernel/kgdb.c +++ b/arch/arm/kernel/kgdb.c | |||
@@ -79,7 +79,7 @@ sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task) | |||
79 | return; | 79 | return; |
80 | 80 | ||
81 | /* Initialize to zero */ | 81 | /* Initialize to zero */ |
82 | for (regno = 0; regno < DBG_MAX_REG_NUM; regno++) | 82 | for (regno = 0; regno < GDB_MAX_REGS; regno++) |
83 | gdb_regs[regno] = 0; | 83 | gdb_regs[regno] = 0; |
84 | 84 | ||
85 | /* Otherwise, we have only some registers from switch_to() */ | 85 | /* Otherwise, we have only some registers from switch_to() */ |
diff --git a/arch/arm/lib/findbit.S b/arch/arm/lib/findbit.S index 1e4cbd4e7be9..64f6bc1a9132 100644 --- a/arch/arm/lib/findbit.S +++ b/arch/arm/lib/findbit.S | |||
@@ -174,8 +174,8 @@ ENDPROC(_find_next_bit_be) | |||
174 | */ | 174 | */ |
175 | .L_found: | 175 | .L_found: |
176 | #if __LINUX_ARM_ARCH__ >= 5 | 176 | #if __LINUX_ARM_ARCH__ >= 5 |
177 | rsb r1, r3, #0 | 177 | rsb r0, r3, #0 |
178 | and r3, r3, r1 | 178 | and r3, r3, r0 |
179 | clz r3, r3 | 179 | clz r3, r3 |
180 | rsb r3, r3, #31 | 180 | rsb r3, r3, #31 |
181 | add r0, r2, r3 | 181 | add r0, r2, r3 |
@@ -190,5 +190,7 @@ ENDPROC(_find_next_bit_be) | |||
190 | addeq r2, r2, #1 | 190 | addeq r2, r2, #1 |
191 | mov r0, r2 | 191 | mov r0, r2 |
192 | #endif | 192 | #endif |
193 | cmp r1, r0 @ Clamp to maxbit | ||
194 | movlo r0, r1 | ||
193 | mov pc, lr | 195 | mov pc, lr |
194 | 196 | ||
diff --git a/arch/arm/mach-at91/include/mach/at91_mci.h b/arch/arm/mach-at91/include/mach/at91_mci.h index 57f8ee154943..27ac6f550fe3 100644 --- a/arch/arm/mach-at91/include/mach/at91_mci.h +++ b/arch/arm/mach-at91/include/mach/at91_mci.h | |||
@@ -74,6 +74,8 @@ | |||
74 | #define AT91_MCI_TRTYP_BLOCK (0 << 19) | 74 | #define AT91_MCI_TRTYP_BLOCK (0 << 19) |
75 | #define AT91_MCI_TRTYP_MULTIPLE (1 << 19) | 75 | #define AT91_MCI_TRTYP_MULTIPLE (1 << 19) |
76 | #define AT91_MCI_TRTYP_STREAM (2 << 19) | 76 | #define AT91_MCI_TRTYP_STREAM (2 << 19) |
77 | #define AT91_MCI_TRTYP_SDIO_BYTE (4 << 19) | ||
78 | #define AT91_MCI_TRTYP_SDIO_BLOCK (5 << 19) | ||
77 | 79 | ||
78 | #define AT91_MCI_BLKR 0x18 /* Block Register */ | 80 | #define AT91_MCI_BLKR 0x18 /* Block Register */ |
79 | #define AT91_MCI_BLKR_BCNT(n) ((0xffff & (n)) << 0) /* Block count */ | 81 | #define AT91_MCI_BLKR_BCNT(n) ((0xffff & (n)) << 0) /* Block count */ |
diff --git a/arch/arm/mach-cns3xxx/include/mach/debug-macro.S b/arch/arm/mach-cns3xxx/include/mach/debug-macro.S index d16ce7eb00e9..9b50442d4b9b 100644 --- a/arch/arm/mach-cns3xxx/include/mach/debug-macro.S +++ b/arch/arm/mach-cns3xxx/include/mach/debug-macro.S | |||
@@ -10,7 +10,7 @@ | |||
10 | * published by the Free Software Foundation. | 10 | * published by the Free Software Foundation. |
11 | */ | 11 | */ |
12 | 12 | ||
13 | .macro addruart,rx | 13 | .macro addruart,rx,rtmp |
14 | mrc p15, 0, \rx, c1, c0 | 14 | mrc p15, 0, \rx, c1, c0 |
15 | tst \rx, #1 @ MMU enabled? | 15 | tst \rx, #1 @ MMU enabled? |
16 | moveq \rx, #0x10000000 | 16 | moveq \rx, #0x10000000 |
diff --git a/arch/arm/mach-cns3xxx/pcie.c b/arch/arm/mach-cns3xxx/pcie.c index 38088c36936c..78defd71a829 100644 --- a/arch/arm/mach-cns3xxx/pcie.c +++ b/arch/arm/mach-cns3xxx/pcie.c | |||
@@ -369,7 +369,7 @@ static int __init cns3xxx_pcie_init(void) | |||
369 | { | 369 | { |
370 | int i; | 370 | int i; |
371 | 371 | ||
372 | hook_fault_code(16 + 6, cns3xxx_pcie_abort_handler, SIGBUS, | 372 | hook_fault_code(16 + 6, cns3xxx_pcie_abort_handler, SIGBUS, 0, |
373 | "imprecise external abort"); | 373 | "imprecise external abort"); |
374 | 374 | ||
375 | for (i = 0; i < ARRAY_SIZE(cns3xxx_pcie); i++) { | 375 | for (i = 0; i < ARRAY_SIZE(cns3xxx_pcie); i++) { |
diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S index 86aa689ef1aa..47010d8114b0 100644 --- a/arch/arm/mm/cache-v6.S +++ b/arch/arm/mm/cache-v6.S | |||
@@ -196,6 +196,10 @@ ENTRY(v6_flush_kern_dcache_area) | |||
196 | * - end - virtual end address of region | 196 | * - end - virtual end address of region |
197 | */ | 197 | */ |
198 | v6_dma_inv_range: | 198 | v6_dma_inv_range: |
199 | #ifdef CONFIG_DMA_CACHE_RWFO | ||
200 | ldrb r2, [r0] @ read for ownership | ||
201 | strb r2, [r0] @ write for ownership | ||
202 | #endif | ||
199 | tst r0, #D_CACHE_LINE_SIZE - 1 | 203 | tst r0, #D_CACHE_LINE_SIZE - 1 |
200 | bic r0, r0, #D_CACHE_LINE_SIZE - 1 | 204 | bic r0, r0, #D_CACHE_LINE_SIZE - 1 |
201 | #ifdef HARVARD_CACHE | 205 | #ifdef HARVARD_CACHE |
@@ -204,6 +208,10 @@ v6_dma_inv_range: | |||
204 | mcrne p15, 0, r0, c7, c11, 1 @ clean unified line | 208 | mcrne p15, 0, r0, c7, c11, 1 @ clean unified line |
205 | #endif | 209 | #endif |
206 | tst r1, #D_CACHE_LINE_SIZE - 1 | 210 | tst r1, #D_CACHE_LINE_SIZE - 1 |
211 | #ifdef CONFIG_DMA_CACHE_RWFO | ||
212 | ldrneb r2, [r1, #-1] @ read for ownership | ||
213 | strneb r2, [r1, #-1] @ write for ownership | ||
214 | #endif | ||
207 | bic r1, r1, #D_CACHE_LINE_SIZE - 1 | 215 | bic r1, r1, #D_CACHE_LINE_SIZE - 1 |
208 | #ifdef HARVARD_CACHE | 216 | #ifdef HARVARD_CACHE |
209 | mcrne p15, 0, r1, c7, c14, 1 @ clean & invalidate D line | 217 | mcrne p15, 0, r1, c7, c14, 1 @ clean & invalidate D line |
@@ -211,10 +219,6 @@ v6_dma_inv_range: | |||
211 | mcrne p15, 0, r1, c7, c15, 1 @ clean & invalidate unified line | 219 | mcrne p15, 0, r1, c7, c15, 1 @ clean & invalidate unified line |
212 | #endif | 220 | #endif |
213 | 1: | 221 | 1: |
214 | #ifdef CONFIG_DMA_CACHE_RWFO | ||
215 | ldr r2, [r0] @ read for ownership | ||
216 | str r2, [r0] @ write for ownership | ||
217 | #endif | ||
218 | #ifdef HARVARD_CACHE | 222 | #ifdef HARVARD_CACHE |
219 | mcr p15, 0, r0, c7, c6, 1 @ invalidate D line | 223 | mcr p15, 0, r0, c7, c6, 1 @ invalidate D line |
220 | #else | 224 | #else |
@@ -222,6 +226,10 @@ v6_dma_inv_range: | |||
222 | #endif | 226 | #endif |
223 | add r0, r0, #D_CACHE_LINE_SIZE | 227 | add r0, r0, #D_CACHE_LINE_SIZE |
224 | cmp r0, r1 | 228 | cmp r0, r1 |
229 | #ifdef CONFIG_DMA_CACHE_RWFO | ||
230 | ldrlo r2, [r0] @ read for ownership | ||
231 | strlo r2, [r0] @ write for ownership | ||
232 | #endif | ||
225 | blo 1b | 233 | blo 1b |
226 | mov r0, #0 | 234 | mov r0, #0 |
227 | mcr p15, 0, r0, c7, c10, 4 @ drain write buffer | 235 | mcr p15, 0, r0, c7, c10, 4 @ drain write buffer |
@@ -256,12 +264,12 @@ v6_dma_clean_range: | |||
256 | * - end - virtual end address of region | 264 | * - end - virtual end address of region |
257 | */ | 265 | */ |
258 | ENTRY(v6_dma_flush_range) | 266 | ENTRY(v6_dma_flush_range) |
259 | bic r0, r0, #D_CACHE_LINE_SIZE - 1 | ||
260 | 1: | ||
261 | #ifdef CONFIG_DMA_CACHE_RWFO | 267 | #ifdef CONFIG_DMA_CACHE_RWFO |
262 | ldr r2, [r0] @ read for ownership | 268 | ldrb r2, [r0] @ read for ownership |
263 | str r2, [r0] @ write for ownership | 269 | strb r2, [r0] @ write for ownership |
264 | #endif | 270 | #endif |
271 | bic r0, r0, #D_CACHE_LINE_SIZE - 1 | ||
272 | 1: | ||
265 | #ifdef HARVARD_CACHE | 273 | #ifdef HARVARD_CACHE |
266 | mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line | 274 | mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line |
267 | #else | 275 | #else |
@@ -269,6 +277,10 @@ ENTRY(v6_dma_flush_range) | |||
269 | #endif | 277 | #endif |
270 | add r0, r0, #D_CACHE_LINE_SIZE | 278 | add r0, r0, #D_CACHE_LINE_SIZE |
271 | cmp r0, r1 | 279 | cmp r0, r1 |
280 | #ifdef CONFIG_DMA_CACHE_RWFO | ||
281 | ldrlob r2, [r0] @ read for ownership | ||
282 | strlob r2, [r0] @ write for ownership | ||
283 | #endif | ||
272 | blo 1b | 284 | blo 1b |
273 | mov r0, #0 | 285 | mov r0, #0 |
274 | mcr p15, 0, r0, c7, c10, 4 @ drain write buffer | 286 | mcr p15, 0, r0, c7, c10, 4 @ drain write buffer |
diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c index 9b906dec1ca1..56036ff04deb 100644 --- a/arch/arm/mm/fault-armv.c +++ b/arch/arm/mm/fault-armv.c | |||
@@ -65,6 +65,30 @@ static int do_adjust_pte(struct vm_area_struct *vma, unsigned long address, | |||
65 | return ret; | 65 | return ret; |
66 | } | 66 | } |
67 | 67 | ||
68 | #if USE_SPLIT_PTLOCKS | ||
69 | /* | ||
70 | * If we are using split PTE locks, then we need to take the page | ||
71 | * lock here. Otherwise we are using shared mm->page_table_lock | ||
72 | * which is already locked, thus cannot take it. | ||
73 | */ | ||
74 | static inline void do_pte_lock(spinlock_t *ptl) | ||
75 | { | ||
76 | /* | ||
77 | * Use nested version here to indicate that we are already | ||
78 | * holding one similar spinlock. | ||
79 | */ | ||
80 | spin_lock_nested(ptl, SINGLE_DEPTH_NESTING); | ||
81 | } | ||
82 | |||
83 | static inline void do_pte_unlock(spinlock_t *ptl) | ||
84 | { | ||
85 | spin_unlock(ptl); | ||
86 | } | ||
87 | #else /* !USE_SPLIT_PTLOCKS */ | ||
88 | static inline void do_pte_lock(spinlock_t *ptl) {} | ||
89 | static inline void do_pte_unlock(spinlock_t *ptl) {} | ||
90 | #endif /* USE_SPLIT_PTLOCKS */ | ||
91 | |||
68 | static int adjust_pte(struct vm_area_struct *vma, unsigned long address, | 92 | static int adjust_pte(struct vm_area_struct *vma, unsigned long address, |
69 | unsigned long pfn) | 93 | unsigned long pfn) |
70 | { | 94 | { |
@@ -89,11 +113,11 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address, | |||
89 | */ | 113 | */ |
90 | ptl = pte_lockptr(vma->vm_mm, pmd); | 114 | ptl = pte_lockptr(vma->vm_mm, pmd); |
91 | pte = pte_offset_map_nested(pmd, address); | 115 | pte = pte_offset_map_nested(pmd, address); |
92 | spin_lock(ptl); | 116 | do_pte_lock(ptl); |
93 | 117 | ||
94 | ret = do_adjust_pte(vma, address, pfn, pte); | 118 | ret = do_adjust_pte(vma, address, pfn, pte); |
95 | 119 | ||
96 | spin_unlock(ptl); | 120 | do_pte_unlock(ptl); |
97 | pte_unmap_nested(pte); | 121 | pte_unmap_nested(pte); |
98 | 122 | ||
99 | return ret; | 123 | return ret; |
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 7185b00650fe..4e2f620de09b 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c | |||
@@ -282,6 +282,12 @@ void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc) | |||
282 | memblock_reserve(__pa(_stext), _end - _stext); | 282 | memblock_reserve(__pa(_stext), _end - _stext); |
283 | #endif | 283 | #endif |
284 | #ifdef CONFIG_BLK_DEV_INITRD | 284 | #ifdef CONFIG_BLK_DEV_INITRD |
285 | if (phys_initrd_size && | ||
286 | memblock_is_region_reserved(phys_initrd_start, phys_initrd_size)) { | ||
287 | pr_err("INITRD: 0x%08lx+0x%08lx overlaps in-use memory region - disabling initrd\n", | ||
288 | phys_initrd_start, phys_initrd_size); | ||
289 | phys_initrd_start = phys_initrd_size = 0; | ||
290 | } | ||
285 | if (phys_initrd_size) { | 291 | if (phys_initrd_size) { |
286 | memblock_reserve(phys_initrd_start, phys_initrd_size); | 292 | memblock_reserve(phys_initrd_start, phys_initrd_size); |
287 | 293 | ||
diff --git a/arch/arm/plat-omap/dma.c b/arch/arm/plat-omap/dma.c index ec7eddf9e525..f5c5b8da9a87 100644 --- a/arch/arm/plat-omap/dma.c +++ b/arch/arm/plat-omap/dma.c | |||
@@ -30,6 +30,7 @@ | |||
30 | #include <linux/irq.h> | 30 | #include <linux/irq.h> |
31 | #include <linux/io.h> | 31 | #include <linux/io.h> |
32 | #include <linux/slab.h> | 32 | #include <linux/slab.h> |
33 | #include <linux/delay.h> | ||
33 | 34 | ||
34 | #include <asm/system.h> | 35 | #include <asm/system.h> |
35 | #include <mach/hardware.h> | 36 | #include <mach/hardware.h> |
@@ -996,11 +997,17 @@ void omap_start_dma(int lch) | |||
996 | l = dma_read(CCR(lch)); | 997 | l = dma_read(CCR(lch)); |
997 | 998 | ||
998 | /* | 999 | /* |
999 | * Errata: On ES2.0 BUFFERING disable must be set. | 1000 | * Errata: Inter Frame DMA buffering issue (All OMAP2420 and |
1000 | * This will always fail on ES1.0 | 1001 | * OMAP2430ES1.0): DMA will wrongly buffer elements if packing and |
1002 | * bursting is enabled. This might result in data gets stalled in | ||
1003 | * FIFO at the end of the block. | ||
1004 | * Workaround: DMA channels must have BUFFERING_DISABLED bit set to | ||
1005 | * guarantee no data will stay in the DMA FIFO in case inter frame | ||
1006 | * buffering occurs. | ||
1001 | */ | 1007 | */ |
1002 | if (cpu_is_omap24xx()) | 1008 | if (cpu_is_omap2420() || |
1003 | l |= OMAP_DMA_CCR_EN; | 1009 | (cpu_is_omap2430() && (omap_type() == OMAP2430_REV_ES1_0))) |
1010 | l |= OMAP_DMA_CCR_BUFFERING_DISABLE; | ||
1004 | 1011 | ||
1005 | l |= OMAP_DMA_CCR_EN; | 1012 | l |= OMAP_DMA_CCR_EN; |
1006 | dma_write(l, CCR(lch)); | 1013 | dma_write(l, CCR(lch)); |
@@ -1018,8 +1025,39 @@ void omap_stop_dma(int lch) | |||
1018 | dma_write(0, CICR(lch)); | 1025 | dma_write(0, CICR(lch)); |
1019 | 1026 | ||
1020 | l = dma_read(CCR(lch)); | 1027 | l = dma_read(CCR(lch)); |
1021 | l &= ~OMAP_DMA_CCR_EN; | 1028 | /* OMAP3 Errata i541: sDMA FIFO draining does not finish */ |
1022 | dma_write(l, CCR(lch)); | 1029 | if (cpu_is_omap34xx() && (l & OMAP_DMA_CCR_SEL_SRC_DST_SYNC)) { |
1030 | int i = 0; | ||
1031 | u32 sys_cf; | ||
1032 | |||
1033 | /* Configure No-Standby */ | ||
1034 | l = dma_read(OCP_SYSCONFIG); | ||
1035 | sys_cf = l; | ||
1036 | l &= ~DMA_SYSCONFIG_MIDLEMODE_MASK; | ||
1037 | l |= DMA_SYSCONFIG_MIDLEMODE(DMA_IDLEMODE_NO_IDLE); | ||
1038 | dma_write(l , OCP_SYSCONFIG); | ||
1039 | |||
1040 | l = dma_read(CCR(lch)); | ||
1041 | l &= ~OMAP_DMA_CCR_EN; | ||
1042 | dma_write(l, CCR(lch)); | ||
1043 | |||
1044 | /* Wait for sDMA FIFO drain */ | ||
1045 | l = dma_read(CCR(lch)); | ||
1046 | while (i < 100 && (l & (OMAP_DMA_CCR_RD_ACTIVE | | ||
1047 | OMAP_DMA_CCR_WR_ACTIVE))) { | ||
1048 | udelay(5); | ||
1049 | i++; | ||
1050 | l = dma_read(CCR(lch)); | ||
1051 | } | ||
1052 | if (i >= 100) | ||
1053 | printk(KERN_ERR "DMA drain did not complete on " | ||
1054 | "lch %d\n", lch); | ||
1055 | /* Restore OCP_SYSCONFIG */ | ||
1056 | dma_write(sys_cf, OCP_SYSCONFIG); | ||
1057 | } else { | ||
1058 | l &= ~OMAP_DMA_CCR_EN; | ||
1059 | dma_write(l, CCR(lch)); | ||
1060 | } | ||
1023 | 1061 | ||
1024 | if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) { | 1062 | if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) { |
1025 | int next_lch, cur_lch = lch; | 1063 | int next_lch, cur_lch = lch; |
diff --git a/arch/arm/plat-omap/include/plat/dma.h b/arch/arm/plat-omap/include/plat/dma.h index af3a03941add..cf66f85a011c 100644 --- a/arch/arm/plat-omap/include/plat/dma.h +++ b/arch/arm/plat-omap/include/plat/dma.h | |||
@@ -335,6 +335,10 @@ | |||
335 | #define OMAP2_DMA_MISALIGNED_ERR_IRQ (1 << 11) | 335 | #define OMAP2_DMA_MISALIGNED_ERR_IRQ (1 << 11) |
336 | 336 | ||
337 | #define OMAP_DMA_CCR_EN (1 << 7) | 337 | #define OMAP_DMA_CCR_EN (1 << 7) |
338 | #define OMAP_DMA_CCR_RD_ACTIVE (1 << 9) | ||
339 | #define OMAP_DMA_CCR_WR_ACTIVE (1 << 10) | ||
340 | #define OMAP_DMA_CCR_SEL_SRC_DST_SYNC (1 << 24) | ||
341 | #define OMAP_DMA_CCR_BUFFERING_DISABLE (1 << 25) | ||
338 | 342 | ||
339 | #define OMAP_DMA_DATA_TYPE_S8 0x00 | 343 | #define OMAP_DMA_DATA_TYPE_S8 0x00 |
340 | #define OMAP_DMA_DATA_TYPE_S16 0x01 | 344 | #define OMAP_DMA_DATA_TYPE_S16 0x01 |
diff --git a/arch/avr32/include/asm/syscalls.h b/arch/avr32/include/asm/syscalls.h index ab608b70b24d..730a461c606f 100644 --- a/arch/avr32/include/asm/syscalls.h +++ b/arch/avr32/include/asm/syscalls.h | |||
@@ -16,18 +16,9 @@ | |||
16 | #include <linux/signal.h> | 16 | #include <linux/signal.h> |
17 | 17 | ||
18 | /* kernel/process.c */ | 18 | /* kernel/process.c */ |
19 | asmlinkage int sys_fork(struct pt_regs *); | ||
20 | asmlinkage int sys_clone(unsigned long, unsigned long, | 19 | asmlinkage int sys_clone(unsigned long, unsigned long, |
21 | unsigned long, unsigned long, | 20 | unsigned long, unsigned long, |
22 | struct pt_regs *); | 21 | struct pt_regs *); |
23 | asmlinkage int sys_vfork(struct pt_regs *); | ||
24 | asmlinkage int sys_execve(const char __user *, char __user *__user *, | ||
25 | char __user *__user *, struct pt_regs *); | ||
26 | |||
27 | /* kernel/signal.c */ | ||
28 | asmlinkage int sys_sigaltstack(const stack_t __user *, stack_t __user *, | ||
29 | struct pt_regs *); | ||
30 | asmlinkage int sys_rt_sigreturn(struct pt_regs *); | ||
31 | 22 | ||
32 | /* mm/cache.c */ | 23 | /* mm/cache.c */ |
33 | asmlinkage int sys_cacheflush(int, void __user *, size_t); | 24 | asmlinkage int sys_cacheflush(int, void __user *, size_t); |
diff --git a/arch/microblaze/Makefile b/arch/microblaze/Makefile index 592c7079de88..b07d990bbed8 100644 --- a/arch/microblaze/Makefile +++ b/arch/microblaze/Makefile | |||
@@ -72,12 +72,16 @@ export MMU DTB | |||
72 | 72 | ||
73 | all: linux.bin | 73 | all: linux.bin |
74 | 74 | ||
75 | BOOT_TARGETS = linux.bin linux.bin.gz simpleImage.% | 75 | # With make 3.82 we cannot mix normal and wildcard targets |
76 | BOOT_TARGETS1 = linux.bin linux.bin.gz | ||
77 | BOOT_TARGETS2 = simpleImage.% | ||
76 | 78 | ||
77 | archclean: | 79 | archclean: |
78 | $(Q)$(MAKE) $(clean)=$(boot) | 80 | $(Q)$(MAKE) $(clean)=$(boot) |
79 | 81 | ||
80 | $(BOOT_TARGETS): vmlinux | 82 | $(BOOT_TARGETS1): vmlinux |
83 | $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ | ||
84 | $(BOOT_TARGETS2): vmlinux | ||
81 | $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ | 85 | $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ |
82 | 86 | ||
83 | define archhelp | 87 | define archhelp |
diff --git a/arch/mips/jz4740/board-qi_lb60.c b/arch/mips/jz4740/board-qi_lb60.c index 5742bb4d78f4..5c0a3575877c 100644 --- a/arch/mips/jz4740/board-qi_lb60.c +++ b/arch/mips/jz4740/board-qi_lb60.c | |||
@@ -5,7 +5,7 @@ | |||
5 | * | 5 | * |
6 | * Copyright (c) 2009 Qi Hardware inc., | 6 | * Copyright (c) 2009 Qi Hardware inc., |
7 | * Author: Xiangfu Liu <xiangfu@qi-hardware.com> | 7 | * Author: Xiangfu Liu <xiangfu@qi-hardware.com> |
8 | * Copyright 2010, Lars-Petrer Clausen <lars@metafoo.de> | 8 | * Copyright 2010, Lars-Peter Clausen <lars@metafoo.de> |
9 | * | 9 | * |
10 | * This program is free software; you can redistribute it and/or modify | 10 | * This program is free software; you can redistribute it and/or modify |
11 | * it under the terms of the GNU General Public License version 2 or later | 11 | * it under the terms of the GNU General Public License version 2 or later |
@@ -235,7 +235,7 @@ static const unsigned int qi_lb60_keypad_rows[] = { | |||
235 | QI_LB60_GPIO_KEYIN(3), | 235 | QI_LB60_GPIO_KEYIN(3), |
236 | QI_LB60_GPIO_KEYIN(4), | 236 | QI_LB60_GPIO_KEYIN(4), |
237 | QI_LB60_GPIO_KEYIN(5), | 237 | QI_LB60_GPIO_KEYIN(5), |
238 | QI_LB60_GPIO_KEYIN(7), | 238 | QI_LB60_GPIO_KEYIN(6), |
239 | QI_LB60_GPIO_KEYIN8, | 239 | QI_LB60_GPIO_KEYIN8, |
240 | }; | 240 | }; |
241 | 241 | ||
diff --git a/arch/parisc/kernel/firmware.c b/arch/parisc/kernel/firmware.c index df971fa0c32f..4896ed090585 100644 --- a/arch/parisc/kernel/firmware.c +++ b/arch/parisc/kernel/firmware.c | |||
@@ -1126,15 +1126,13 @@ int pdc_iodc_print(const unsigned char *str, unsigned count) | |||
1126 | unsigned int i; | 1126 | unsigned int i; |
1127 | unsigned long flags; | 1127 | unsigned long flags; |
1128 | 1128 | ||
1129 | for (i = 0; i < count && i < 79;) { | 1129 | for (i = 0; i < count;) { |
1130 | switch(str[i]) { | 1130 | switch(str[i]) { |
1131 | case '\n': | 1131 | case '\n': |
1132 | iodc_dbuf[i+0] = '\r'; | 1132 | iodc_dbuf[i+0] = '\r'; |
1133 | iodc_dbuf[i+1] = '\n'; | 1133 | iodc_dbuf[i+1] = '\n'; |
1134 | i += 2; | 1134 | i += 2; |
1135 | goto print; | 1135 | goto print; |
1136 | case '\b': /* BS */ | ||
1137 | i--; /* overwrite last */ | ||
1138 | default: | 1136 | default: |
1139 | iodc_dbuf[i] = str[i]; | 1137 | iodc_dbuf[i] = str[i]; |
1140 | i++; | 1138 | i++; |
@@ -1142,15 +1140,6 @@ int pdc_iodc_print(const unsigned char *str, unsigned count) | |||
1142 | } | 1140 | } |
1143 | } | 1141 | } |
1144 | 1142 | ||
1145 | /* if we're at the end of line, and not already inserting a newline, | ||
1146 | * insert one anyway. iodc console doesn't claim to support >79 char | ||
1147 | * lines. don't account for this in the return value. | ||
1148 | */ | ||
1149 | if (i == 79 && iodc_dbuf[i-1] != '\n') { | ||
1150 | iodc_dbuf[i+0] = '\r'; | ||
1151 | iodc_dbuf[i+1] = '\n'; | ||
1152 | } | ||
1153 | |||
1154 | print: | 1143 | print: |
1155 | spin_lock_irqsave(&pdc_lock, flags); | 1144 | spin_lock_irqsave(&pdc_lock, flags); |
1156 | real32_call(PAGE0->mem_cons.iodc_io, | 1145 | real32_call(PAGE0->mem_cons.iodc_io, |
diff --git a/arch/powerpc/boot/dts/p1022ds.dts b/arch/powerpc/boot/dts/p1022ds.dts index 8bcb10b92677..d086e0f8bbd3 100644 --- a/arch/powerpc/boot/dts/p1022ds.dts +++ b/arch/powerpc/boot/dts/p1022ds.dts | |||
@@ -280,13 +280,13 @@ | |||
280 | ranges = <0x0 0xc100 0x200>; | 280 | ranges = <0x0 0xc100 0x200>; |
281 | cell-index = <1>; | 281 | cell-index = <1>; |
282 | dma00: dma-channel@0 { | 282 | dma00: dma-channel@0 { |
283 | compatible = "fsl,eloplus-dma-channel"; | 283 | compatible = "fsl,ssi-dma-channel"; |
284 | reg = <0x0 0x80>; | 284 | reg = <0x0 0x80>; |
285 | cell-index = <0>; | 285 | cell-index = <0>; |
286 | interrupts = <76 2>; | 286 | interrupts = <76 2>; |
287 | }; | 287 | }; |
288 | dma01: dma-channel@80 { | 288 | dma01: dma-channel@80 { |
289 | compatible = "fsl,eloplus-dma-channel"; | 289 | compatible = "fsl,ssi-dma-channel"; |
290 | reg = <0x80 0x80>; | 290 | reg = <0x80 0x80>; |
291 | cell-index = <1>; | 291 | cell-index = <1>; |
292 | interrupts = <77 2>; | 292 | interrupts = <77 2>; |
diff --git a/arch/powerpc/kernel/cpu_setup_6xx.S b/arch/powerpc/kernel/cpu_setup_6xx.S index 55cba4a8a959..f8cd9fba4d35 100644 --- a/arch/powerpc/kernel/cpu_setup_6xx.S +++ b/arch/powerpc/kernel/cpu_setup_6xx.S | |||
@@ -18,7 +18,7 @@ | |||
18 | #include <asm/mmu.h> | 18 | #include <asm/mmu.h> |
19 | 19 | ||
20 | _GLOBAL(__setup_cpu_603) | 20 | _GLOBAL(__setup_cpu_603) |
21 | mflr r4 | 21 | mflr r5 |
22 | BEGIN_MMU_FTR_SECTION | 22 | BEGIN_MMU_FTR_SECTION |
23 | li r10,0 | 23 | li r10,0 |
24 | mtspr SPRN_SPRG_603_LRU,r10 /* init SW LRU tracking */ | 24 | mtspr SPRN_SPRG_603_LRU,r10 /* init SW LRU tracking */ |
@@ -27,60 +27,60 @@ BEGIN_FTR_SECTION | |||
27 | bl __init_fpu_registers | 27 | bl __init_fpu_registers |
28 | END_FTR_SECTION_IFCLR(CPU_FTR_FPU_UNAVAILABLE) | 28 | END_FTR_SECTION_IFCLR(CPU_FTR_FPU_UNAVAILABLE) |
29 | bl setup_common_caches | 29 | bl setup_common_caches |
30 | mtlr r4 | 30 | mtlr r5 |
31 | blr | 31 | blr |
32 | _GLOBAL(__setup_cpu_604) | 32 | _GLOBAL(__setup_cpu_604) |
33 | mflr r4 | 33 | mflr r5 |
34 | bl setup_common_caches | 34 | bl setup_common_caches |
35 | bl setup_604_hid0 | 35 | bl setup_604_hid0 |
36 | mtlr r4 | 36 | mtlr r5 |
37 | blr | 37 | blr |
38 | _GLOBAL(__setup_cpu_750) | 38 | _GLOBAL(__setup_cpu_750) |
39 | mflr r4 | 39 | mflr r5 |
40 | bl __init_fpu_registers | 40 | bl __init_fpu_registers |
41 | bl setup_common_caches | 41 | bl setup_common_caches |
42 | bl setup_750_7400_hid0 | 42 | bl setup_750_7400_hid0 |
43 | mtlr r4 | 43 | mtlr r5 |
44 | blr | 44 | blr |
45 | _GLOBAL(__setup_cpu_750cx) | 45 | _GLOBAL(__setup_cpu_750cx) |
46 | mflr r4 | 46 | mflr r5 |
47 | bl __init_fpu_registers | 47 | bl __init_fpu_registers |
48 | bl setup_common_caches | 48 | bl setup_common_caches |
49 | bl setup_750_7400_hid0 | 49 | bl setup_750_7400_hid0 |
50 | bl setup_750cx | 50 | bl setup_750cx |
51 | mtlr r4 | 51 | mtlr r5 |
52 | blr | 52 | blr |
53 | _GLOBAL(__setup_cpu_750fx) | 53 | _GLOBAL(__setup_cpu_750fx) |
54 | mflr r4 | 54 | mflr r5 |
55 | bl __init_fpu_registers | 55 | bl __init_fpu_registers |
56 | bl setup_common_caches | 56 | bl setup_common_caches |
57 | bl setup_750_7400_hid0 | 57 | bl setup_750_7400_hid0 |
58 | bl setup_750fx | 58 | bl setup_750fx |
59 | mtlr r4 | 59 | mtlr r5 |
60 | blr | 60 | blr |
61 | _GLOBAL(__setup_cpu_7400) | 61 | _GLOBAL(__setup_cpu_7400) |
62 | mflr r4 | 62 | mflr r5 |
63 | bl __init_fpu_registers | 63 | bl __init_fpu_registers |
64 | bl setup_7400_workarounds | 64 | bl setup_7400_workarounds |
65 | bl setup_common_caches | 65 | bl setup_common_caches |
66 | bl setup_750_7400_hid0 | 66 | bl setup_750_7400_hid0 |
67 | mtlr r4 | 67 | mtlr r5 |
68 | blr | 68 | blr |
69 | _GLOBAL(__setup_cpu_7410) | 69 | _GLOBAL(__setup_cpu_7410) |
70 | mflr r4 | 70 | mflr r5 |
71 | bl __init_fpu_registers | 71 | bl __init_fpu_registers |
72 | bl setup_7410_workarounds | 72 | bl setup_7410_workarounds |
73 | bl setup_common_caches | 73 | bl setup_common_caches |
74 | bl setup_750_7400_hid0 | 74 | bl setup_750_7400_hid0 |
75 | li r3,0 | 75 | li r3,0 |
76 | mtspr SPRN_L2CR2,r3 | 76 | mtspr SPRN_L2CR2,r3 |
77 | mtlr r4 | 77 | mtlr r5 |
78 | blr | 78 | blr |
79 | _GLOBAL(__setup_cpu_745x) | 79 | _GLOBAL(__setup_cpu_745x) |
80 | mflr r4 | 80 | mflr r5 |
81 | bl setup_common_caches | 81 | bl setup_common_caches |
82 | bl setup_745x_specifics | 82 | bl setup_745x_specifics |
83 | mtlr r4 | 83 | mtlr r5 |
84 | blr | 84 | blr |
85 | 85 | ||
86 | /* Enable caches for 603's, 604, 750 & 7400 */ | 86 | /* Enable caches for 603's, 604, 750 & 7400 */ |
@@ -194,10 +194,10 @@ setup_750cx: | |||
194 | cror 4*cr0+eq,4*cr0+eq,4*cr1+eq | 194 | cror 4*cr0+eq,4*cr0+eq,4*cr1+eq |
195 | cror 4*cr0+eq,4*cr0+eq,4*cr2+eq | 195 | cror 4*cr0+eq,4*cr0+eq,4*cr2+eq |
196 | bnelr | 196 | bnelr |
197 | lwz r6,CPU_SPEC_FEATURES(r5) | 197 | lwz r6,CPU_SPEC_FEATURES(r4) |
198 | li r7,CPU_FTR_CAN_NAP | 198 | li r7,CPU_FTR_CAN_NAP |
199 | andc r6,r6,r7 | 199 | andc r6,r6,r7 |
200 | stw r6,CPU_SPEC_FEATURES(r5) | 200 | stw r6,CPU_SPEC_FEATURES(r4) |
201 | blr | 201 | blr |
202 | 202 | ||
203 | /* 750fx specific | 203 | /* 750fx specific |
@@ -225,12 +225,12 @@ BEGIN_FTR_SECTION | |||
225 | andis. r11,r11,L3CR_L3E@h | 225 | andis. r11,r11,L3CR_L3E@h |
226 | beq 1f | 226 | beq 1f |
227 | END_FTR_SECTION_IFSET(CPU_FTR_L3CR) | 227 | END_FTR_SECTION_IFSET(CPU_FTR_L3CR) |
228 | lwz r6,CPU_SPEC_FEATURES(r5) | 228 | lwz r6,CPU_SPEC_FEATURES(r4) |
229 | andi. r0,r6,CPU_FTR_L3_DISABLE_NAP | 229 | andi. r0,r6,CPU_FTR_L3_DISABLE_NAP |
230 | beq 1f | 230 | beq 1f |
231 | li r7,CPU_FTR_CAN_NAP | 231 | li r7,CPU_FTR_CAN_NAP |
232 | andc r6,r6,r7 | 232 | andc r6,r6,r7 |
233 | stw r6,CPU_SPEC_FEATURES(r5) | 233 | stw r6,CPU_SPEC_FEATURES(r4) |
234 | 1: | 234 | 1: |
235 | mfspr r11,SPRN_HID0 | 235 | mfspr r11,SPRN_HID0 |
236 | 236 | ||
diff --git a/arch/powerpc/kernel/ppc970-pmu.c b/arch/powerpc/kernel/ppc970-pmu.c index 8eff48e20dba..3fee685de4df 100644 --- a/arch/powerpc/kernel/ppc970-pmu.c +++ b/arch/powerpc/kernel/ppc970-pmu.c | |||
@@ -169,9 +169,11 @@ static int p970_marked_instr_event(u64 event) | |||
169 | switch (unit) { | 169 | switch (unit) { |
170 | case PM_VPU: | 170 | case PM_VPU: |
171 | mask = 0x4c; /* byte 0 bits 2,3,6 */ | 171 | mask = 0x4c; /* byte 0 bits 2,3,6 */ |
172 | break; | ||
172 | case PM_LSU0: | 173 | case PM_LSU0: |
173 | /* byte 2 bits 0,2,3,4,6; all of byte 1 */ | 174 | /* byte 2 bits 0,2,3,4,6; all of byte 1 */ |
174 | mask = 0x085dff00; | 175 | mask = 0x085dff00; |
176 | break; | ||
175 | case PM_LSU1L: | 177 | case PM_LSU1L: |
176 | mask = 0x50 << 24; /* byte 3 bits 4,6 */ | 178 | mask = 0x50 << 24; /* byte 3 bits 4,6 */ |
177 | break; | 179 | break; |
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index 09dffe6efa46..1eb64ba43a08 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c | |||
@@ -1122,7 +1122,7 @@ void hash_preload(struct mm_struct *mm, unsigned long ea, | |||
1122 | else | 1122 | else |
1123 | #endif /* CONFIG_PPC_HAS_HASH_64K */ | 1123 | #endif /* CONFIG_PPC_HAS_HASH_64K */ |
1124 | rc = __hash_page_4K(ea, access, vsid, ptep, trap, local, ssize, | 1124 | rc = __hash_page_4K(ea, access, vsid, ptep, trap, local, ssize, |
1125 | subpage_protection(pgdir, ea)); | 1125 | subpage_protection(mm, ea)); |
1126 | 1126 | ||
1127 | /* Dump some info in case of hash insertion failure, they should | 1127 | /* Dump some info in case of hash insertion failure, they should |
1128 | * never happen so it is really useful to know if/when they do | 1128 | * never happen so it is really useful to know if/when they do |
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index 002878ccf90b..1537ab31880b 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c | |||
@@ -181,7 +181,7 @@ static void unmap_cpu_from_node(unsigned long cpu) | |||
181 | dbg("removing cpu %lu from node %d\n", cpu, node); | 181 | dbg("removing cpu %lu from node %d\n", cpu, node); |
182 | 182 | ||
183 | if (cpumask_test_cpu(cpu, node_to_cpumask_map[node])) { | 183 | if (cpumask_test_cpu(cpu, node_to_cpumask_map[node])) { |
184 | cpumask_set_cpu(cpu, node_to_cpumask_map[node]); | 184 | cpumask_clear_cpu(cpu, node_to_cpumask_map[node]); |
185 | } else { | 185 | } else { |
186 | printk(KERN_ERR "WARNING: cpu %lu not found in node %d\n", | 186 | printk(KERN_ERR "WARNING: cpu %lu not found in node %d\n", |
187 | cpu, node); | 187 | cpu, node); |
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c index cf79b46d8f88..568b503d68bd 100644 --- a/arch/powerpc/platforms/pseries/lpar.c +++ b/arch/powerpc/platforms/pseries/lpar.c | |||
@@ -680,6 +680,13 @@ EXPORT_SYMBOL(arch_free_page); | |||
680 | /* NB: reg/unreg are called while guarded with the tracepoints_mutex */ | 680 | /* NB: reg/unreg are called while guarded with the tracepoints_mutex */ |
681 | extern long hcall_tracepoint_refcount; | 681 | extern long hcall_tracepoint_refcount; |
682 | 682 | ||
683 | /* | ||
684 | * Since the tracing code might execute hcalls we need to guard against | ||
685 | * recursion. One example of this are spinlocks calling H_YIELD on | ||
686 | * shared processor partitions. | ||
687 | */ | ||
688 | static DEFINE_PER_CPU(unsigned int, hcall_trace_depth); | ||
689 | |||
683 | void hcall_tracepoint_regfunc(void) | 690 | void hcall_tracepoint_regfunc(void) |
684 | { | 691 | { |
685 | hcall_tracepoint_refcount++; | 692 | hcall_tracepoint_refcount++; |
@@ -692,12 +699,42 @@ void hcall_tracepoint_unregfunc(void) | |||
692 | 699 | ||
693 | void __trace_hcall_entry(unsigned long opcode, unsigned long *args) | 700 | void __trace_hcall_entry(unsigned long opcode, unsigned long *args) |
694 | { | 701 | { |
702 | unsigned long flags; | ||
703 | unsigned int *depth; | ||
704 | |||
705 | local_irq_save(flags); | ||
706 | |||
707 | depth = &__get_cpu_var(hcall_trace_depth); | ||
708 | |||
709 | if (*depth) | ||
710 | goto out; | ||
711 | |||
712 | (*depth)++; | ||
695 | trace_hcall_entry(opcode, args); | 713 | trace_hcall_entry(opcode, args); |
714 | (*depth)--; | ||
715 | |||
716 | out: | ||
717 | local_irq_restore(flags); | ||
696 | } | 718 | } |
697 | 719 | ||
698 | void __trace_hcall_exit(long opcode, unsigned long retval, | 720 | void __trace_hcall_exit(long opcode, unsigned long retval, |
699 | unsigned long *retbuf) | 721 | unsigned long *retbuf) |
700 | { | 722 | { |
723 | unsigned long flags; | ||
724 | unsigned int *depth; | ||
725 | |||
726 | local_irq_save(flags); | ||
727 | |||
728 | depth = &__get_cpu_var(hcall_trace_depth); | ||
729 | |||
730 | if (*depth) | ||
731 | goto out; | ||
732 | |||
733 | (*depth)++; | ||
701 | trace_hcall_exit(opcode, retval, retbuf); | 734 | trace_hcall_exit(opcode, retval, retbuf); |
735 | (*depth)--; | ||
736 | |||
737 | out: | ||
738 | local_irq_restore(flags); | ||
702 | } | 739 | } |
703 | #endif | 740 | #endif |
diff --git a/arch/powerpc/sysdev/fsl_rio.c b/arch/powerpc/sysdev/fsl_rio.c index 3017532319c8..b7f02a484e54 100644 --- a/arch/powerpc/sysdev/fsl_rio.c +++ b/arch/powerpc/sysdev/fsl_rio.c | |||
@@ -954,7 +954,6 @@ fsl_rio_dbell_handler(int irq, void *dev_instance) | |||
954 | if (dsr & DOORBELL_DSR_QFI) { | 954 | if (dsr & DOORBELL_DSR_QFI) { |
955 | pr_info("RIO: doorbell queue full\n"); | 955 | pr_info("RIO: doorbell queue full\n"); |
956 | out_be32(&priv->msg_regs->dsr, DOORBELL_DSR_QFI); | 956 | out_be32(&priv->msg_regs->dsr, DOORBELL_DSR_QFI); |
957 | goto out; | ||
958 | } | 957 | } |
959 | 958 | ||
960 | /* XXX Need to check/dispatch until queue empty */ | 959 | /* XXX Need to check/dispatch until queue empty */ |
diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c index ac151399ef34..1995c1712fc8 100644 --- a/arch/s390/kernel/nmi.c +++ b/arch/s390/kernel/nmi.c | |||
@@ -95,7 +95,6 @@ EXPORT_SYMBOL_GPL(s390_handle_mcck); | |||
95 | static int notrace s390_revalidate_registers(struct mci *mci) | 95 | static int notrace s390_revalidate_registers(struct mci *mci) |
96 | { | 96 | { |
97 | int kill_task; | 97 | int kill_task; |
98 | u64 tmpclock; | ||
99 | u64 zero; | 98 | u64 zero; |
100 | void *fpt_save_area, *fpt_creg_save_area; | 99 | void *fpt_save_area, *fpt_creg_save_area; |
101 | 100 | ||
@@ -214,11 +213,10 @@ static int notrace s390_revalidate_registers(struct mci *mci) | |||
214 | : "0", "cc"); | 213 | : "0", "cc"); |
215 | #endif | 214 | #endif |
216 | /* Revalidate clock comparator register */ | 215 | /* Revalidate clock comparator register */ |
217 | asm volatile( | 216 | if (S390_lowcore.clock_comparator == -1) |
218 | " stck 0(%1)\n" | 217 | set_clock_comparator(S390_lowcore.mcck_clock); |
219 | " sckc 0(%1)" | 218 | else |
220 | : "=m" (tmpclock) : "a" (&(tmpclock)) : "cc", "memory"); | 219 | set_clock_comparator(S390_lowcore.clock_comparator); |
221 | |||
222 | /* Check if old PSW is valid */ | 220 | /* Check if old PSW is valid */ |
223 | if (!mci->wp) | 221 | if (!mci->wp) |
224 | /* | 222 | /* |
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c index 3479f1b0d4e0..c1e326cedea5 100644 --- a/arch/s390/kernel/vtime.c +++ b/arch/s390/kernel/vtime.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/kernel_stat.h> | 19 | #include <linux/kernel_stat.h> |
20 | #include <linux/rcupdate.h> | 20 | #include <linux/rcupdate.h> |
21 | #include <linux/posix-timers.h> | 21 | #include <linux/posix-timers.h> |
22 | #include <linux/cpu.h> | ||
22 | 23 | ||
23 | #include <asm/s390_ext.h> | 24 | #include <asm/s390_ext.h> |
24 | #include <asm/timer.h> | 25 | #include <asm/timer.h> |
@@ -565,6 +566,23 @@ void init_cpu_vtimer(void) | |||
565 | __ctl_set_bit(0,10); | 566 | __ctl_set_bit(0,10); |
566 | } | 567 | } |
567 | 568 | ||
569 | static int __cpuinit s390_nohz_notify(struct notifier_block *self, | ||
570 | unsigned long action, void *hcpu) | ||
571 | { | ||
572 | struct s390_idle_data *idle; | ||
573 | long cpu = (long) hcpu; | ||
574 | |||
575 | idle = &per_cpu(s390_idle, cpu); | ||
576 | switch (action) { | ||
577 | case CPU_DYING: | ||
578 | case CPU_DYING_FROZEN: | ||
579 | idle->nohz_delay = 0; | ||
580 | default: | ||
581 | break; | ||
582 | } | ||
583 | return NOTIFY_OK; | ||
584 | } | ||
585 | |||
568 | void __init vtime_init(void) | 586 | void __init vtime_init(void) |
569 | { | 587 | { |
570 | /* request the cpu timer external interrupt */ | 588 | /* request the cpu timer external interrupt */ |
@@ -573,5 +591,6 @@ void __init vtime_init(void) | |||
573 | 591 | ||
574 | /* Enable cpu timer interrupts on the boot cpu. */ | 592 | /* Enable cpu timer interrupts on the boot cpu. */ |
575 | init_cpu_vtimer(); | 593 | init_cpu_vtimer(); |
594 | cpu_notifier(s390_nohz_notify, 0); | ||
576 | } | 595 | } |
577 | 596 | ||
diff --git a/arch/s390/lib/delay.c b/arch/s390/lib/delay.c index 752b362bf651..7c37ec359ec2 100644 --- a/arch/s390/lib/delay.c +++ b/arch/s390/lib/delay.c | |||
@@ -29,17 +29,21 @@ static void __udelay_disabled(unsigned long long usecs) | |||
29 | { | 29 | { |
30 | unsigned long mask, cr0, cr0_saved; | 30 | unsigned long mask, cr0, cr0_saved; |
31 | u64 clock_saved; | 31 | u64 clock_saved; |
32 | u64 end; | ||
32 | 33 | ||
34 | mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_EXT; | ||
35 | end = get_clock() + (usecs << 12); | ||
33 | clock_saved = local_tick_disable(); | 36 | clock_saved = local_tick_disable(); |
34 | set_clock_comparator(get_clock() + (usecs << 12)); | ||
35 | __ctl_store(cr0_saved, 0, 0); | 37 | __ctl_store(cr0_saved, 0, 0); |
36 | cr0 = (cr0_saved & 0xffff00e0) | 0x00000800; | 38 | cr0 = (cr0_saved & 0xffff00e0) | 0x00000800; |
37 | __ctl_load(cr0 , 0, 0); | 39 | __ctl_load(cr0 , 0, 0); |
38 | mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_EXT; | ||
39 | lockdep_off(); | 40 | lockdep_off(); |
40 | trace_hardirqs_on(); | 41 | do { |
41 | __load_psw_mask(mask); | 42 | set_clock_comparator(end); |
42 | local_irq_disable(); | 43 | trace_hardirqs_on(); |
44 | __load_psw_mask(mask); | ||
45 | local_irq_disable(); | ||
46 | } while (get_clock() < end); | ||
43 | lockdep_on(); | 47 | lockdep_on(); |
44 | __ctl_load(cr0_saved, 0, 0); | 48 | __ctl_load(cr0_saved, 0, 0); |
45 | local_tick_enable(clock_saved); | 49 | local_tick_enable(clock_saved); |
diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h index b237d525d592..34ba197880dd 100644 --- a/arch/sh/include/asm/io.h +++ b/arch/sh/include/asm/io.h | |||
@@ -322,7 +322,15 @@ __ioremap_29bit(phys_addr_t offset, unsigned long size, pgprot_t prot) | |||
322 | * mapping must be done by the PMB or by using page tables. | 322 | * mapping must be done by the PMB or by using page tables. |
323 | */ | 323 | */ |
324 | if (likely(PXSEG(offset) < P3SEG && PXSEG(last_addr) < P3SEG)) { | 324 | if (likely(PXSEG(offset) < P3SEG && PXSEG(last_addr) < P3SEG)) { |
325 | if (unlikely(pgprot_val(prot) & _PAGE_CACHABLE)) | 325 | u64 flags = pgprot_val(prot); |
326 | |||
327 | /* | ||
328 | * Anything using the legacy PTEA space attributes needs | ||
329 | * to be kicked down to page table mappings. | ||
330 | */ | ||
331 | if (unlikely(flags & _PAGE_PCC_MASK)) | ||
332 | return NULL; | ||
333 | if (unlikely(flags & _PAGE_CACHABLE)) | ||
326 | return (void __iomem *)P1SEGADDR(offset); | 334 | return (void __iomem *)P1SEGADDR(offset); |
327 | 335 | ||
328 | return (void __iomem *)P2SEGADDR(offset); | 336 | return (void __iomem *)P2SEGADDR(offset); |
diff --git a/arch/sh/include/asm/pgtable_32.h b/arch/sh/include/asm/pgtable_32.h index e172d696e52b..45743bffe865 100644 --- a/arch/sh/include/asm/pgtable_32.h +++ b/arch/sh/include/asm/pgtable_32.h | |||
@@ -76,6 +76,10 @@ | |||
76 | /* Wrapper for extended mode pgprot twiddling */ | 76 | /* Wrapper for extended mode pgprot twiddling */ |
77 | #define _PAGE_EXT(x) ((unsigned long long)(x) << 32) | 77 | #define _PAGE_EXT(x) ((unsigned long long)(x) << 32) |
78 | 78 | ||
79 | #ifdef CONFIG_X2TLB | ||
80 | #define _PAGE_PCC_MASK 0x00000000 /* No legacy PTEA support */ | ||
81 | #else | ||
82 | |||
79 | /* software: moves to PTEA.TC (Timing Control) */ | 83 | /* software: moves to PTEA.TC (Timing Control) */ |
80 | #define _PAGE_PCC_AREA5 0x00000000 /* use BSC registers for area5 */ | 84 | #define _PAGE_PCC_AREA5 0x00000000 /* use BSC registers for area5 */ |
81 | #define _PAGE_PCC_AREA6 0x80000000 /* use BSC registers for area6 */ | 85 | #define _PAGE_PCC_AREA6 0x80000000 /* use BSC registers for area6 */ |
@@ -89,7 +93,8 @@ | |||
89 | #define _PAGE_PCC_ATR8 0x60000000 /* Attribute Memory space, 8 bit bus */ | 93 | #define _PAGE_PCC_ATR8 0x60000000 /* Attribute Memory space, 8 bit bus */ |
90 | #define _PAGE_PCC_ATR16 0x60000001 /* Attribute Memory space, 6 bit bus */ | 94 | #define _PAGE_PCC_ATR16 0x60000001 /* Attribute Memory space, 6 bit bus */ |
91 | 95 | ||
92 | #ifndef CONFIG_X2TLB | 96 | #define _PAGE_PCC_MASK 0xe0000001 |
97 | |||
93 | /* copy the ptea attributes */ | 98 | /* copy the ptea attributes */ |
94 | static inline unsigned long copy_ptea_attributes(unsigned long x) | 99 | static inline unsigned long copy_ptea_attributes(unsigned long x) |
95 | { | 100 | { |
diff --git a/arch/sh/include/asm/syscalls_32.h b/arch/sh/include/asm/syscalls_32.h index be201fdc97aa..ae717e3c26d6 100644 --- a/arch/sh/include/asm/syscalls_32.h +++ b/arch/sh/include/asm/syscalls_32.h | |||
@@ -19,9 +19,10 @@ asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp, | |||
19 | asmlinkage int sys_vfork(unsigned long r4, unsigned long r5, | 19 | asmlinkage int sys_vfork(unsigned long r4, unsigned long r5, |
20 | unsigned long r6, unsigned long r7, | 20 | unsigned long r6, unsigned long r7, |
21 | struct pt_regs __regs); | 21 | struct pt_regs __regs); |
22 | asmlinkage int sys_execve(const char __user *ufilename, char __user * __user *uargv, | 22 | asmlinkage int sys_execve(const char __user *ufilename, |
23 | char __user * __user *uenvp, unsigned long r7, | 23 | const char __user *const __user *uargv, |
24 | struct pt_regs __regs); | 24 | const char __user *const __user *uenvp, |
25 | unsigned long r7, struct pt_regs __regs); | ||
25 | asmlinkage int sys_sigsuspend(old_sigset_t mask, unsigned long r5, | 26 | asmlinkage int sys_sigsuspend(old_sigset_t mask, unsigned long r5, |
26 | unsigned long r6, unsigned long r7, | 27 | unsigned long r6, unsigned long r7, |
27 | struct pt_regs __regs); | 28 | struct pt_regs __regs); |
diff --git a/arch/sparc/include/asm/openprom.h b/arch/sparc/include/asm/openprom.h index 963e1a45c35f..f61a5017878f 100644 --- a/arch/sparc/include/asm/openprom.h +++ b/arch/sparc/include/asm/openprom.h | |||
@@ -37,7 +37,7 @@ struct linux_dev_v2_funcs { | |||
37 | int (*v2_dev_open)(char *devpath); | 37 | int (*v2_dev_open)(char *devpath); |
38 | void (*v2_dev_close)(int d); | 38 | void (*v2_dev_close)(int d); |
39 | int (*v2_dev_read)(int d, char *buf, int nbytes); | 39 | int (*v2_dev_read)(int d, char *buf, int nbytes); |
40 | int (*v2_dev_write)(int d, char *buf, int nbytes); | 40 | int (*v2_dev_write)(int d, const char *buf, int nbytes); |
41 | int (*v2_dev_seek)(int d, int hi, int lo); | 41 | int (*v2_dev_seek)(int d, int hi, int lo); |
42 | 42 | ||
43 | /* Never issued (multistage load support) */ | 43 | /* Never issued (multistage load support) */ |
diff --git a/arch/sparc/include/asm/oplib_32.h b/arch/sparc/include/asm/oplib_32.h index 33e31ce6b31f..618a5bd4660d 100644 --- a/arch/sparc/include/asm/oplib_32.h +++ b/arch/sparc/include/asm/oplib_32.h | |||
@@ -60,25 +60,6 @@ extern char *prom_getbootargs(void); | |||
60 | extern char *prom_mapio(char *virt_hint, int io_space, unsigned int phys_addr, unsigned int num_bytes); | 60 | extern char *prom_mapio(char *virt_hint, int io_space, unsigned int phys_addr, unsigned int num_bytes); |
61 | extern void prom_unmapio(char *virt_addr, unsigned int num_bytes); | 61 | extern void prom_unmapio(char *virt_addr, unsigned int num_bytes); |
62 | 62 | ||
63 | /* Device operations. */ | ||
64 | |||
65 | /* Open the device described by the passed string. Note, that the format | ||
66 | * of the string is different on V0 vs. V2->higher proms. The caller must | ||
67 | * know what he/she is doing! Returns the device descriptor, an int. | ||
68 | */ | ||
69 | extern int prom_devopen(char *device_string); | ||
70 | |||
71 | /* Close a previously opened device described by the passed integer | ||
72 | * descriptor. | ||
73 | */ | ||
74 | extern int prom_devclose(int device_handle); | ||
75 | |||
76 | /* Do a seek operation on the device described by the passed integer | ||
77 | * descriptor. | ||
78 | */ | ||
79 | extern void prom_seek(int device_handle, unsigned int seek_hival, | ||
80 | unsigned int seek_lowval); | ||
81 | |||
82 | /* Miscellaneous routines, don't really fit in any category per se. */ | 63 | /* Miscellaneous routines, don't really fit in any category per se. */ |
83 | 64 | ||
84 | /* Reboot the machine with the command line passed. */ | 65 | /* Reboot the machine with the command line passed. */ |
@@ -121,19 +102,8 @@ extern int prom_getrev(void); | |||
121 | /* Get the prom firmware revision. */ | 102 | /* Get the prom firmware revision. */ |
122 | extern int prom_getprev(void); | 103 | extern int prom_getprev(void); |
123 | 104 | ||
124 | /* Character operations to/from the console.... */ | 105 | /* Write a buffer of characters to the console. */ |
125 | 106 | extern void prom_console_write_buf(const char *buf, int len); | |
126 | /* Non-blocking get character from console. */ | ||
127 | extern int prom_nbgetchar(void); | ||
128 | |||
129 | /* Non-blocking put character to console. */ | ||
130 | extern int prom_nbputchar(char character); | ||
131 | |||
132 | /* Blocking get character from console. */ | ||
133 | extern char prom_getchar(void); | ||
134 | |||
135 | /* Blocking put character to console. */ | ||
136 | extern void prom_putchar(char character); | ||
137 | 107 | ||
138 | /* Prom's internal routines, don't use in kernel/boot code. */ | 108 | /* Prom's internal routines, don't use in kernel/boot code. */ |
139 | extern void prom_printf(const char *fmt, ...); | 109 | extern void prom_printf(const char *fmt, ...); |
@@ -238,7 +208,6 @@ extern int prom_node_has_property(int node, char *property); | |||
238 | extern int prom_setprop(int node, const char *prop_name, char *prop_value, | 208 | extern int prom_setprop(int node, const char *prop_name, char *prop_value, |
239 | int value_size); | 209 | int value_size); |
240 | 210 | ||
241 | extern int prom_pathtoinode(char *path); | ||
242 | extern int prom_inst2pkg(int); | 211 | extern int prom_inst2pkg(int); |
243 | 212 | ||
244 | /* Dorking with Bus ranges... */ | 213 | /* Dorking with Bus ranges... */ |
diff --git a/arch/sparc/include/asm/oplib_64.h b/arch/sparc/include/asm/oplib_64.h index 3e0b2d62303d..209463d62626 100644 --- a/arch/sparc/include/asm/oplib_64.h +++ b/arch/sparc/include/asm/oplib_64.h | |||
@@ -67,27 +67,6 @@ extern void prom_init(void *cif_handler, void *cif_stack); | |||
67 | /* Boot argument acquisition, returns the boot command line string. */ | 67 | /* Boot argument acquisition, returns the boot command line string. */ |
68 | extern char *prom_getbootargs(void); | 68 | extern char *prom_getbootargs(void); |
69 | 69 | ||
70 | /* Device utilities. */ | ||
71 | |||
72 | /* Device operations. */ | ||
73 | |||
74 | /* Open the device described by the passed string. Note, that the format | ||
75 | * of the string is different on V0 vs. V2->higher proms. The caller must | ||
76 | * know what he/she is doing! Returns the device descriptor, an int. | ||
77 | */ | ||
78 | extern int prom_devopen(const char *device_string); | ||
79 | |||
80 | /* Close a previously opened device described by the passed integer | ||
81 | * descriptor. | ||
82 | */ | ||
83 | extern int prom_devclose(int device_handle); | ||
84 | |||
85 | /* Do a seek operation on the device described by the passed integer | ||
86 | * descriptor. | ||
87 | */ | ||
88 | extern void prom_seek(int device_handle, unsigned int seek_hival, | ||
89 | unsigned int seek_lowval); | ||
90 | |||
91 | /* Miscellaneous routines, don't really fit in any category per se. */ | 70 | /* Miscellaneous routines, don't really fit in any category per se. */ |
92 | 71 | ||
93 | /* Reboot the machine with the command line passed. */ | 72 | /* Reboot the machine with the command line passed. */ |
@@ -109,33 +88,14 @@ extern void prom_halt(void) __attribute__ ((noreturn)); | |||
109 | /* Halt and power-off the machine. */ | 88 | /* Halt and power-off the machine. */ |
110 | extern void prom_halt_power_off(void) __attribute__ ((noreturn)); | 89 | extern void prom_halt_power_off(void) __attribute__ ((noreturn)); |
111 | 90 | ||
112 | /* Set the PROM 'sync' callback function to the passed function pointer. | ||
113 | * When the user gives the 'sync' command at the prom prompt while the | ||
114 | * kernel is still active, the prom will call this routine. | ||
115 | * | ||
116 | */ | ||
117 | typedef int (*callback_func_t)(long *cmd); | ||
118 | extern void prom_setcallback(callback_func_t func_ptr); | ||
119 | |||
120 | /* Acquire the IDPROM of the root node in the prom device tree. This | 91 | /* Acquire the IDPROM of the root node in the prom device tree. This |
121 | * gets passed a buffer where you would like it stuffed. The return value | 92 | * gets passed a buffer where you would like it stuffed. The return value |
122 | * is the format type of this idprom or 0xff on error. | 93 | * is the format type of this idprom or 0xff on error. |
123 | */ | 94 | */ |
124 | extern unsigned char prom_get_idprom(char *idp_buffer, int idpbuf_size); | 95 | extern unsigned char prom_get_idprom(char *idp_buffer, int idpbuf_size); |
125 | 96 | ||
126 | /* Character operations to/from the console.... */ | 97 | /* Write a buffer of characters to the console. */ |
127 | 98 | extern void prom_console_write_buf(const char *buf, int len); | |
128 | /* Non-blocking get character from console. */ | ||
129 | extern int prom_nbgetchar(void); | ||
130 | |||
131 | /* Non-blocking put character to console. */ | ||
132 | extern int prom_nbputchar(char character); | ||
133 | |||
134 | /* Blocking get character from console. */ | ||
135 | extern char prom_getchar(void); | ||
136 | |||
137 | /* Blocking put character to console. */ | ||
138 | extern void prom_putchar(char character); | ||
139 | 99 | ||
140 | /* Prom's internal routines, don't use in kernel/boot code. */ | 100 | /* Prom's internal routines, don't use in kernel/boot code. */ |
141 | extern void prom_printf(const char *fmt, ...); | 101 | extern void prom_printf(const char *fmt, ...); |
@@ -278,9 +238,7 @@ extern int prom_finddevice(const char *name); | |||
278 | extern int prom_setprop(int node, const char *prop_name, char *prop_value, | 238 | extern int prom_setprop(int node, const char *prop_name, char *prop_value, |
279 | int value_size); | 239 | int value_size); |
280 | 240 | ||
281 | extern int prom_pathtoinode(const char *path); | ||
282 | extern int prom_inst2pkg(int); | 241 | extern int prom_inst2pkg(int); |
283 | extern int prom_service_exists(const char *service_name); | ||
284 | extern void prom_sun4v_guest_soft_state(void); | 242 | extern void prom_sun4v_guest_soft_state(void); |
285 | 243 | ||
286 | extern int prom_ihandle2path(int handle, char *buffer, int bufsize); | 244 | extern int prom_ihandle2path(int handle, char *buffer, int bufsize); |
diff --git a/arch/sparc/kernel/leon_kernel.c b/arch/sparc/kernel/leon_kernel.c index 6a7b4dbc8e09..dcefd2211552 100644 --- a/arch/sparc/kernel/leon_kernel.c +++ b/arch/sparc/kernel/leon_kernel.c | |||
@@ -114,7 +114,7 @@ void __init leon_init_timers(irq_handler_t counter_fn) | |||
114 | if (leon3_gptimer_regs && leon3_irqctrl_regs) { | 114 | if (leon3_gptimer_regs && leon3_irqctrl_regs) { |
115 | LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[0].val, 0); | 115 | LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[0].val, 0); |
116 | LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[0].rld, | 116 | LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[0].rld, |
117 | (((1000000 / 100) - 1))); | 117 | (((1000000 / HZ) - 1))); |
118 | LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[0].ctrl, 0); | 118 | LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[0].ctrl, 0); |
119 | 119 | ||
120 | #ifdef CONFIG_SMP | 120 | #ifdef CONFIG_SMP |
@@ -128,7 +128,7 @@ void __init leon_init_timers(irq_handler_t counter_fn) | |||
128 | } | 128 | } |
129 | 129 | ||
130 | LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[1].val, 0); | 130 | LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[1].val, 0); |
131 | LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[1].rld, (((1000000/100) - 1))); | 131 | LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[1].rld, (((1000000/HZ) - 1))); |
132 | LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[1].ctrl, 0); | 132 | LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[1].ctrl, 0); |
133 | # endif | 133 | # endif |
134 | 134 | ||
diff --git a/arch/sparc/prom/Makefile b/arch/sparc/prom/Makefile index 1b8c073adb44..816c0fa12dc0 100644 --- a/arch/sparc/prom/Makefile +++ b/arch/sparc/prom/Makefile | |||
@@ -6,7 +6,6 @@ ccflags := -Werror | |||
6 | 6 | ||
7 | lib-y := bootstr_$(BITS).o | 7 | lib-y := bootstr_$(BITS).o |
8 | lib-$(CONFIG_SPARC32) += devmap.o | 8 | lib-$(CONFIG_SPARC32) += devmap.o |
9 | lib-y += devops_$(BITS).o | ||
10 | lib-y += init_$(BITS).o | 9 | lib-y += init_$(BITS).o |
11 | lib-$(CONFIG_SPARC32) += memory.o | 10 | lib-$(CONFIG_SPARC32) += memory.o |
12 | lib-y += misc_$(BITS).o | 11 | lib-y += misc_$(BITS).o |
diff --git a/arch/sparc/prom/console_32.c b/arch/sparc/prom/console_32.c index 5340264b78f5..48863108a44c 100644 --- a/arch/sparc/prom/console_32.c +++ b/arch/sparc/prom/console_32.c | |||
@@ -16,63 +16,26 @@ | |||
16 | 16 | ||
17 | extern void restore_current(void); | 17 | extern void restore_current(void); |
18 | 18 | ||
19 | /* Non blocking get character from console input device, returns -1 | ||
20 | * if no input was taken. This can be used for polling. | ||
21 | */ | ||
22 | int | ||
23 | prom_nbgetchar(void) | ||
24 | { | ||
25 | static char inc; | ||
26 | int i = -1; | ||
27 | unsigned long flags; | ||
28 | |||
29 | spin_lock_irqsave(&prom_lock, flags); | ||
30 | switch(prom_vers) { | ||
31 | case PROM_V0: | ||
32 | i = (*(romvec->pv_nbgetchar))(); | ||
33 | break; | ||
34 | case PROM_V2: | ||
35 | case PROM_V3: | ||
36 | if( (*(romvec->pv_v2devops).v2_dev_read)(*romvec->pv_v2bootargs.fd_stdin , &inc, 0x1) == 1) { | ||
37 | i = inc; | ||
38 | } else { | ||
39 | i = -1; | ||
40 | } | ||
41 | break; | ||
42 | default: | ||
43 | i = -1; | ||
44 | break; | ||
45 | }; | ||
46 | restore_current(); | ||
47 | spin_unlock_irqrestore(&prom_lock, flags); | ||
48 | return i; /* Ugh, we could spin forever on unsupported proms ;( */ | ||
49 | } | ||
50 | |||
51 | /* Non blocking put character to console device, returns -1 if | 19 | /* Non blocking put character to console device, returns -1 if |
52 | * unsuccessful. | 20 | * unsuccessful. |
53 | */ | 21 | */ |
54 | int | 22 | static int prom_nbputchar(const char *buf) |
55 | prom_nbputchar(char c) | ||
56 | { | 23 | { |
57 | static char outc; | ||
58 | unsigned long flags; | 24 | unsigned long flags; |
59 | int i = -1; | 25 | int i = -1; |
60 | 26 | ||
61 | spin_lock_irqsave(&prom_lock, flags); | 27 | spin_lock_irqsave(&prom_lock, flags); |
62 | switch(prom_vers) { | 28 | switch(prom_vers) { |
63 | case PROM_V0: | 29 | case PROM_V0: |
64 | i = (*(romvec->pv_nbputchar))(c); | 30 | i = (*(romvec->pv_nbputchar))(*buf); |
65 | break; | 31 | break; |
66 | case PROM_V2: | 32 | case PROM_V2: |
67 | case PROM_V3: | 33 | case PROM_V3: |
68 | outc = c; | 34 | if ((*(romvec->pv_v2devops).v2_dev_write)(*romvec->pv_v2bootargs.fd_stdout, |
69 | if( (*(romvec->pv_v2devops).v2_dev_write)(*romvec->pv_v2bootargs.fd_stdout, &outc, 0x1) == 1) | 35 | buf, 0x1) == 1) |
70 | i = 0; | 36 | i = 0; |
71 | else | ||
72 | i = -1; | ||
73 | break; | 37 | break; |
74 | default: | 38 | default: |
75 | i = -1; | ||
76 | break; | 39 | break; |
77 | }; | 40 | }; |
78 | restore_current(); | 41 | restore_current(); |
@@ -80,18 +43,14 @@ prom_nbputchar(char c) | |||
80 | return i; /* Ugh, we could spin forever on unsupported proms ;( */ | 43 | return i; /* Ugh, we could spin forever on unsupported proms ;( */ |
81 | } | 44 | } |
82 | 45 | ||
83 | /* Blocking version of get character routine above. */ | 46 | void prom_console_write_buf(const char *buf, int len) |
84 | char | ||
85 | prom_getchar(void) | ||
86 | { | 47 | { |
87 | int character; | 48 | while (len) { |
88 | while((character = prom_nbgetchar()) == -1) ; | 49 | int n = prom_nbputchar(buf); |
89 | return (char) character; | 50 | if (n) |
51 | continue; | ||
52 | len--; | ||
53 | buf++; | ||
54 | } | ||
90 | } | 55 | } |
91 | 56 | ||
92 | /* Blocking version of put character routine above. */ | ||
93 | void | ||
94 | prom_putchar(char c) | ||
95 | { | ||
96 | while(prom_nbputchar(c) == -1) ; | ||
97 | } | ||
diff --git a/arch/sparc/prom/console_64.c b/arch/sparc/prom/console_64.c index 10322dc2f557..ed39e75828bd 100644 --- a/arch/sparc/prom/console_64.c +++ b/arch/sparc/prom/console_64.c | |||
@@ -15,85 +15,34 @@ | |||
15 | 15 | ||
16 | extern int prom_stdin, prom_stdout; | 16 | extern int prom_stdin, prom_stdout; |
17 | 17 | ||
18 | /* Non blocking get character from console input device, returns -1 | 18 | static int __prom_console_write_buf(const char *buf, int len) |
19 | * if no input was taken. This can be used for polling. | ||
20 | */ | ||
21 | inline int | ||
22 | prom_nbgetchar(void) | ||
23 | { | ||
24 | unsigned long args[7]; | ||
25 | char inc; | ||
26 | |||
27 | args[0] = (unsigned long) "read"; | ||
28 | args[1] = 3; | ||
29 | args[2] = 1; | ||
30 | args[3] = (unsigned int) prom_stdin; | ||
31 | args[4] = (unsigned long) &inc; | ||
32 | args[5] = 1; | ||
33 | args[6] = (unsigned long) -1; | ||
34 | |||
35 | p1275_cmd_direct(args); | ||
36 | |||
37 | if (args[6] == 1) | ||
38 | return inc; | ||
39 | return -1; | ||
40 | } | ||
41 | |||
42 | /* Non blocking put character to console device, returns -1 if | ||
43 | * unsuccessful. | ||
44 | */ | ||
45 | inline int | ||
46 | prom_nbputchar(char c) | ||
47 | { | 19 | { |
48 | unsigned long args[7]; | 20 | unsigned long args[7]; |
49 | char outc; | 21 | int ret; |
50 | |||
51 | outc = c; | ||
52 | 22 | ||
53 | args[0] = (unsigned long) "write"; | 23 | args[0] = (unsigned long) "write"; |
54 | args[1] = 3; | 24 | args[1] = 3; |
55 | args[2] = 1; | 25 | args[2] = 1; |
56 | args[3] = (unsigned int) prom_stdout; | 26 | args[3] = (unsigned int) prom_stdout; |
57 | args[4] = (unsigned long) &outc; | 27 | args[4] = (unsigned long) buf; |
58 | args[5] = 1; | 28 | args[5] = (unsigned int) len; |
59 | args[6] = (unsigned long) -1; | 29 | args[6] = (unsigned long) -1; |
60 | 30 | ||
61 | p1275_cmd_direct(args); | 31 | p1275_cmd_direct(args); |
62 | 32 | ||
63 | if (args[6] == 1) | 33 | ret = (int) args[6]; |
64 | return 0; | 34 | if (ret < 0) |
65 | else | ||
66 | return -1; | 35 | return -1; |
36 | return ret; | ||
67 | } | 37 | } |
68 | 38 | ||
69 | /* Blocking version of get character routine above. */ | 39 | void prom_console_write_buf(const char *buf, int len) |
70 | char | ||
71 | prom_getchar(void) | ||
72 | { | ||
73 | int character; | ||
74 | while((character = prom_nbgetchar()) == -1) ; | ||
75 | return (char) character; | ||
76 | } | ||
77 | |||
78 | /* Blocking version of put character routine above. */ | ||
79 | void | ||
80 | prom_putchar(char c) | ||
81 | { | 40 | { |
82 | prom_nbputchar(c); | 41 | while (len) { |
83 | } | 42 | int n = __prom_console_write_buf(buf, len); |
84 | 43 | if (n < 0) | |
85 | void | 44 | continue; |
86 | prom_puts(const char *s, int len) | 45 | len -= n; |
87 | { | 46 | buf += len; |
88 | unsigned long args[7]; | 47 | } |
89 | |||
90 | args[0] = (unsigned long) "write"; | ||
91 | args[1] = 3; | ||
92 | args[2] = 1; | ||
93 | args[3] = (unsigned int) prom_stdout; | ||
94 | args[4] = (unsigned long) s; | ||
95 | args[5] = len; | ||
96 | args[6] = (unsigned long) -1; | ||
97 | |||
98 | p1275_cmd_direct(args); | ||
99 | } | 48 | } |
diff --git a/arch/sparc/prom/misc_64.c b/arch/sparc/prom/misc_64.c index 6cb1581d6aef..2fdcebf9a5df 100644 --- a/arch/sparc/prom/misc_64.c +++ b/arch/sparc/prom/misc_64.c | |||
@@ -18,7 +18,7 @@ | |||
18 | #include <asm/system.h> | 18 | #include <asm/system.h> |
19 | #include <asm/ldc.h> | 19 | #include <asm/ldc.h> |
20 | 20 | ||
21 | int prom_service_exists(const char *service_name) | 21 | static int prom_service_exists(const char *service_name) |
22 | { | 22 | { |
23 | unsigned long args[5]; | 23 | unsigned long args[5]; |
24 | 24 | ||
@@ -150,20 +150,6 @@ void prom_halt_power_off(void) | |||
150 | prom_halt(); | 150 | prom_halt(); |
151 | } | 151 | } |
152 | 152 | ||
153 | /* Set prom sync handler to call function 'funcp'. */ | ||
154 | void prom_setcallback(callback_func_t funcp) | ||
155 | { | ||
156 | unsigned long args[5]; | ||
157 | if (!funcp) | ||
158 | return; | ||
159 | args[0] = (unsigned long) "set-callback"; | ||
160 | args[1] = 1; | ||
161 | args[2] = 1; | ||
162 | args[3] = (unsigned long) funcp; | ||
163 | args[4] = (unsigned long) -1; | ||
164 | p1275_cmd_direct(args); | ||
165 | } | ||
166 | |||
167 | /* Get the idprom and stuff it into buffer 'idbuf'. Returns the | 153 | /* Get the idprom and stuff it into buffer 'idbuf'. Returns the |
168 | * format type. 'num_bytes' is the number of bytes that your idbuf | 154 | * format type. 'num_bytes' is the number of bytes that your idbuf |
169 | * has space for. Returns 0xff on error. | 155 | * has space for. Returns 0xff on error. |
diff --git a/arch/sparc/prom/printf.c b/arch/sparc/prom/printf.c index ca869266b9f3..d9682f06b3b0 100644 --- a/arch/sparc/prom/printf.c +++ b/arch/sparc/prom/printf.c | |||
@@ -15,22 +15,45 @@ | |||
15 | 15 | ||
16 | #include <linux/kernel.h> | 16 | #include <linux/kernel.h> |
17 | #include <linux/compiler.h> | 17 | #include <linux/compiler.h> |
18 | #include <linux/spinlock.h> | ||
18 | 19 | ||
19 | #include <asm/openprom.h> | 20 | #include <asm/openprom.h> |
20 | #include <asm/oplib.h> | 21 | #include <asm/oplib.h> |
21 | 22 | ||
23 | #define CONSOLE_WRITE_BUF_SIZE 1024 | ||
24 | |||
22 | static char ppbuf[1024]; | 25 | static char ppbuf[1024]; |
26 | static char console_write_buf[CONSOLE_WRITE_BUF_SIZE]; | ||
27 | static DEFINE_RAW_SPINLOCK(console_write_lock); | ||
23 | 28 | ||
24 | void notrace prom_write(const char *buf, unsigned int n) | 29 | void notrace prom_write(const char *buf, unsigned int n) |
25 | { | 30 | { |
26 | char ch; | 31 | unsigned int dest_len; |
32 | unsigned long flags; | ||
33 | char *dest; | ||
34 | |||
35 | dest = console_write_buf; | ||
36 | raw_spin_lock_irqsave(&console_write_lock, flags); | ||
27 | 37 | ||
28 | while (n != 0) { | 38 | dest_len = 0; |
29 | --n; | 39 | while (n-- != 0) { |
30 | if ((ch = *buf++) == '\n') | 40 | char ch = *buf++; |
31 | prom_putchar('\r'); | 41 | if (ch == '\n') { |
32 | prom_putchar(ch); | 42 | *dest++ = '\r'; |
43 | dest_len++; | ||
44 | } | ||
45 | *dest++ = ch; | ||
46 | dest_len++; | ||
47 | if (dest_len >= CONSOLE_WRITE_BUF_SIZE - 1) { | ||
48 | prom_console_write_buf(console_write_buf, dest_len); | ||
49 | dest = console_write_buf; | ||
50 | dest_len = 0; | ||
51 | } | ||
33 | } | 52 | } |
53 | if (dest_len) | ||
54 | prom_console_write_buf(console_write_buf, dest_len); | ||
55 | |||
56 | raw_spin_unlock_irqrestore(&console_write_lock, flags); | ||
34 | } | 57 | } |
35 | 58 | ||
36 | void notrace prom_printf(const char *fmt, ...) | 59 | void notrace prom_printf(const char *fmt, ...) |
diff --git a/arch/sparc/prom/tree_32.c b/arch/sparc/prom/tree_32.c index b21592f8e3fe..71e7f080a576 100644 --- a/arch/sparc/prom/tree_32.c +++ b/arch/sparc/prom/tree_32.c | |||
@@ -341,18 +341,3 @@ int prom_inst2pkg(int inst) | |||
341 | if (node == -1) return 0; | 341 | if (node == -1) return 0; |
342 | return node; | 342 | return node; |
343 | } | 343 | } |
344 | |||
345 | /* Return 'node' assigned to a particular prom 'path' | ||
346 | * FIXME: Should work for v0 as well | ||
347 | */ | ||
348 | int prom_pathtoinode(char *path) | ||
349 | { | ||
350 | int node, inst; | ||
351 | |||
352 | inst = prom_devopen (path); | ||
353 | if (inst == -1) return 0; | ||
354 | node = prom_inst2pkg (inst); | ||
355 | prom_devclose (inst); | ||
356 | if (node == -1) return 0; | ||
357 | return node; | ||
358 | } | ||
diff --git a/arch/sparc/prom/tree_64.c b/arch/sparc/prom/tree_64.c index 9d3f9137a43a..8327b1b68f4b 100644 --- a/arch/sparc/prom/tree_64.c +++ b/arch/sparc/prom/tree_64.c | |||
@@ -374,24 +374,6 @@ inline int prom_inst2pkg(int inst) | |||
374 | return node; | 374 | return node; |
375 | } | 375 | } |
376 | 376 | ||
377 | /* Return 'node' assigned to a particular prom 'path' | ||
378 | * FIXME: Should work for v0 as well | ||
379 | */ | ||
380 | int | ||
381 | prom_pathtoinode(const char *path) | ||
382 | { | ||
383 | int node, inst; | ||
384 | |||
385 | inst = prom_devopen (path); | ||
386 | if (inst == 0) | ||
387 | return 0; | ||
388 | node = prom_inst2pkg(inst); | ||
389 | prom_devclose(inst); | ||
390 | if (node == -1) | ||
391 | return 0; | ||
392 | return node; | ||
393 | } | ||
394 | |||
395 | int prom_ihandle2path(int handle, char *buffer, int bufsize) | 377 | int prom_ihandle2path(int handle, char *buffer, int bufsize) |
396 | { | 378 | { |
397 | unsigned long args[7]; | 379 | unsigned long args[7]; |
diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c index 84c29111756c..aaf6282bacc3 100644 --- a/arch/tile/kernel/process.c +++ b/arch/tile/kernel/process.c | |||
@@ -212,6 +212,13 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, | |||
212 | childregs->sp = sp; /* override with new user stack pointer */ | 212 | childregs->sp = sp; /* override with new user stack pointer */ |
213 | 213 | ||
214 | /* | 214 | /* |
215 | * If CLONE_SETTLS is set, set "tp" in the new task to "r4", | ||
216 | * which is passed in as arg #5 to sys_clone(). | ||
217 | */ | ||
218 | if (clone_flags & CLONE_SETTLS) | ||
219 | childregs->tp = regs->regs[4]; | ||
220 | |||
221 | /* | ||
215 | * Copy the callee-saved registers from the passed pt_regs struct | 222 | * Copy the callee-saved registers from the passed pt_regs struct |
216 | * into the context-switch callee-saved registers area. | 223 | * into the context-switch callee-saved registers area. |
217 | * We have to restore the callee-saved registers since we may | 224 | * We have to restore the callee-saved registers since we may |
diff --git a/arch/um/drivers/line.c b/arch/um/drivers/line.c index 7f7338c90784..1664cce7b0ac 100644 --- a/arch/um/drivers/line.c +++ b/arch/um/drivers/line.c | |||
@@ -727,6 +727,9 @@ struct winch { | |||
727 | 727 | ||
728 | static void free_winch(struct winch *winch, int free_irq_ok) | 728 | static void free_winch(struct winch *winch, int free_irq_ok) |
729 | { | 729 | { |
730 | if (free_irq_ok) | ||
731 | free_irq(WINCH_IRQ, winch); | ||
732 | |||
730 | list_del(&winch->list); | 733 | list_del(&winch->list); |
731 | 734 | ||
732 | if (winch->pid != -1) | 735 | if (winch->pid != -1) |
@@ -735,8 +738,6 @@ static void free_winch(struct winch *winch, int free_irq_ok) | |||
735 | os_close_file(winch->fd); | 738 | os_close_file(winch->fd); |
736 | if (winch->stack != 0) | 739 | if (winch->stack != 0) |
737 | free_stack(winch->stack, 0); | 740 | free_stack(winch->stack, 0); |
738 | if (free_irq_ok) | ||
739 | free_irq(WINCH_IRQ, winch); | ||
740 | kfree(winch); | 741 | kfree(winch); |
741 | } | 742 | } |
742 | 743 | ||
diff --git a/arch/um/kernel/uml.lds.S b/arch/um/kernel/uml.lds.S index ec6378550671..9a873d765615 100644 --- a/arch/um/kernel/uml.lds.S +++ b/arch/um/kernel/uml.lds.S | |||
@@ -22,7 +22,7 @@ SECTIONS | |||
22 | _text = .; | 22 | _text = .; |
23 | _stext = .; | 23 | _stext = .; |
24 | __init_begin = .; | 24 | __init_begin = .; |
25 | INIT_TEXT_SECTION(PAGE_SIZE) | 25 | INIT_TEXT_SECTION(0) |
26 | . = ALIGN(PAGE_SIZE); | 26 | . = ALIGN(PAGE_SIZE); |
27 | 27 | ||
28 | .text : | 28 | .text : |
diff --git a/arch/um/os-Linux/time.c b/arch/um/os-Linux/time.c index dec5678fc17f..6e3359d6a839 100644 --- a/arch/um/os-Linux/time.c +++ b/arch/um/os-Linux/time.c | |||
@@ -60,7 +60,7 @@ static inline long long timeval_to_ns(const struct timeval *tv) | |||
60 | long long disable_timer(void) | 60 | long long disable_timer(void) |
61 | { | 61 | { |
62 | struct itimerval time = ((struct itimerval) { { 0, 0 }, { 0, 0 } }); | 62 | struct itimerval time = ((struct itimerval) { { 0, 0 }, { 0, 0 } }); |
63 | int remain, max = UM_NSEC_PER_SEC / UM_HZ; | 63 | long long remain, max = UM_NSEC_PER_SEC / UM_HZ; |
64 | 64 | ||
65 | if (setitimer(ITIMER_VIRTUAL, &time, &time) < 0) | 65 | if (setitimer(ITIMER_VIRTUAL, &time, &time) < 0) |
66 | printk(UM_KERN_ERR "disable_timer - setitimer failed, " | 66 | printk(UM_KERN_ERR "disable_timer - setitimer failed, " |
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index 3f76523589af..f857bd39cdfb 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h | |||
@@ -152,7 +152,7 @@ | |||
152 | #define X86_FEATURE_3DNOWPREFETCH (6*32+ 8) /* 3DNow prefetch instructions */ | 152 | #define X86_FEATURE_3DNOWPREFETCH (6*32+ 8) /* 3DNow prefetch instructions */ |
153 | #define X86_FEATURE_OSVW (6*32+ 9) /* OS Visible Workaround */ | 153 | #define X86_FEATURE_OSVW (6*32+ 9) /* OS Visible Workaround */ |
154 | #define X86_FEATURE_IBS (6*32+10) /* Instruction Based Sampling */ | 154 | #define X86_FEATURE_IBS (6*32+10) /* Instruction Based Sampling */ |
155 | #define X86_FEATURE_SSE5 (6*32+11) /* SSE-5 */ | 155 | #define X86_FEATURE_XOP (6*32+11) /* extended AVX instructions */ |
156 | #define X86_FEATURE_SKINIT (6*32+12) /* SKINIT/STGI instructions */ | 156 | #define X86_FEATURE_SKINIT (6*32+12) /* SKINIT/STGI instructions */ |
157 | #define X86_FEATURE_WDT (6*32+13) /* Watchdog timer */ | 157 | #define X86_FEATURE_WDT (6*32+13) /* Watchdog timer */ |
158 | #define X86_FEATURE_NODEID_MSR (6*32+19) /* NodeId MSR */ | 158 | #define X86_FEATURE_NODEID_MSR (6*32+19) /* NodeId MSR */ |
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h index 30a3e9776123..6a45ec41ec26 100644 --- a/arch/x86/include/asm/io.h +++ b/arch/x86/include/asm/io.h | |||
@@ -206,6 +206,7 @@ static inline void __iomem *ioremap(resource_size_t offset, unsigned long size) | |||
206 | 206 | ||
207 | extern void iounmap(volatile void __iomem *addr); | 207 | extern void iounmap(volatile void __iomem *addr); |
208 | 208 | ||
209 | extern void set_iounmap_nonlazy(void); | ||
209 | 210 | ||
210 | #ifdef __KERNEL__ | 211 | #ifdef __KERNEL__ |
211 | 212 | ||
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index c52e2eb40a1e..6986312bb670 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h | |||
@@ -79,7 +79,7 @@ | |||
79 | #define KVM_NUM_MMU_PAGES (1 << KVM_MMU_HASH_SHIFT) | 79 | #define KVM_NUM_MMU_PAGES (1 << KVM_MMU_HASH_SHIFT) |
80 | #define KVM_MIN_FREE_MMU_PAGES 5 | 80 | #define KVM_MIN_FREE_MMU_PAGES 5 |
81 | #define KVM_REFILL_PAGES 25 | 81 | #define KVM_REFILL_PAGES 25 |
82 | #define KVM_MAX_CPUID_ENTRIES 40 | 82 | #define KVM_MAX_CPUID_ENTRIES 80 |
83 | #define KVM_NR_FIXED_MTRR_REGION 88 | 83 | #define KVM_NR_FIXED_MTRR_REGION 88 |
84 | #define KVM_NR_VAR_MTRR 8 | 84 | #define KVM_NR_VAR_MTRR 8 |
85 | 85 | ||
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h index 4a2d4e0c18d9..8b5393ec1080 100644 --- a/arch/x86/include/asm/mmu_context.h +++ b/arch/x86/include/asm/mmu_context.h | |||
@@ -36,8 +36,6 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | |||
36 | unsigned cpu = smp_processor_id(); | 36 | unsigned cpu = smp_processor_id(); |
37 | 37 | ||
38 | if (likely(prev != next)) { | 38 | if (likely(prev != next)) { |
39 | /* stop flush ipis for the previous mm */ | ||
40 | cpumask_clear_cpu(cpu, mm_cpumask(prev)); | ||
41 | #ifdef CONFIG_SMP | 39 | #ifdef CONFIG_SMP |
42 | percpu_write(cpu_tlbstate.state, TLBSTATE_OK); | 40 | percpu_write(cpu_tlbstate.state, TLBSTATE_OK); |
43 | percpu_write(cpu_tlbstate.active_mm, next); | 41 | percpu_write(cpu_tlbstate.active_mm, next); |
@@ -47,6 +45,9 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | |||
47 | /* Re-load page tables */ | 45 | /* Re-load page tables */ |
48 | load_cr3(next->pgd); | 46 | load_cr3(next->pgd); |
49 | 47 | ||
48 | /* stop flush ipis for the previous mm */ | ||
49 | cpumask_clear_cpu(cpu, mm_cpumask(prev)); | ||
50 | |||
50 | /* | 51 | /* |
51 | * load the LDT, if the LDT is different: | 52 | * load the LDT, if the LDT is different: |
52 | */ | 53 | */ |
diff --git a/arch/x86/include/asm/mrst.h b/arch/x86/include/asm/mrst.h index 16350740edf6..33fc2966beb7 100644 --- a/arch/x86/include/asm/mrst.h +++ b/arch/x86/include/asm/mrst.h | |||
@@ -26,7 +26,7 @@ enum mrst_cpu_type { | |||
26 | }; | 26 | }; |
27 | 27 | ||
28 | extern enum mrst_cpu_type __mrst_cpu_chip; | 28 | extern enum mrst_cpu_type __mrst_cpu_chip; |
29 | static enum mrst_cpu_type mrst_identify_cpu(void) | 29 | static inline enum mrst_cpu_type mrst_identify_cpu(void) |
30 | { | 30 | { |
31 | return __mrst_cpu_chip; | 31 | return __mrst_cpu_chip; |
32 | } | 32 | } |
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index ebaa04a8d3af..37ea41c63b49 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h | |||
@@ -768,29 +768,6 @@ extern unsigned long idle_halt; | |||
768 | extern unsigned long idle_nomwait; | 768 | extern unsigned long idle_nomwait; |
769 | extern bool c1e_detected; | 769 | extern bool c1e_detected; |
770 | 770 | ||
771 | /* | ||
772 | * on systems with caches, caches must be flashed as the absolute | ||
773 | * last instruction before going into a suspended halt. Otherwise, | ||
774 | * dirty data can linger in the cache and become stale on resume, | ||
775 | * leading to strange errors. | ||
776 | * | ||
777 | * perform a variety of operations to guarantee that the compiler | ||
778 | * will not reorder instructions. wbinvd itself is serializing | ||
779 | * so the processor will not reorder. | ||
780 | * | ||
781 | * Systems without cache can just go into halt. | ||
782 | */ | ||
783 | static inline void wbinvd_halt(void) | ||
784 | { | ||
785 | mb(); | ||
786 | /* check for clflush to determine if wbinvd is legal */ | ||
787 | if (cpu_has_clflush) | ||
788 | asm volatile("cli; wbinvd; 1: hlt; jmp 1b" : : : "memory"); | ||
789 | else | ||
790 | while (1) | ||
791 | halt(); | ||
792 | } | ||
793 | |||
794 | extern void enable_sep_cpu(void); | 771 | extern void enable_sep_cpu(void); |
795 | extern int sysenter_setup(void); | 772 | extern int sysenter_setup(void); |
796 | 773 | ||
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h index 4cfc90824068..4c2f63c7fc1b 100644 --- a/arch/x86/include/asm/smp.h +++ b/arch/x86/include/asm/smp.h | |||
@@ -50,7 +50,7 @@ struct smp_ops { | |||
50 | void (*smp_prepare_cpus)(unsigned max_cpus); | 50 | void (*smp_prepare_cpus)(unsigned max_cpus); |
51 | void (*smp_cpus_done)(unsigned max_cpus); | 51 | void (*smp_cpus_done)(unsigned max_cpus); |
52 | 52 | ||
53 | void (*smp_send_stop)(void); | 53 | void (*stop_other_cpus)(int wait); |
54 | void (*smp_send_reschedule)(int cpu); | 54 | void (*smp_send_reschedule)(int cpu); |
55 | 55 | ||
56 | int (*cpu_up)(unsigned cpu); | 56 | int (*cpu_up)(unsigned cpu); |
@@ -73,7 +73,12 @@ extern struct smp_ops smp_ops; | |||
73 | 73 | ||
74 | static inline void smp_send_stop(void) | 74 | static inline void smp_send_stop(void) |
75 | { | 75 | { |
76 | smp_ops.smp_send_stop(); | 76 | smp_ops.stop_other_cpus(0); |
77 | } | ||
78 | |||
79 | static inline void stop_other_cpus(void) | ||
80 | { | ||
81 | smp_ops.stop_other_cpus(1); | ||
77 | } | 82 | } |
78 | 83 | ||
79 | static inline void smp_prepare_boot_cpu(void) | 84 | static inline void smp_prepare_boot_cpu(void) |
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index e3b534cda49a..e0f220e158c1 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c | |||
@@ -1340,6 +1340,14 @@ void __cpuinit end_local_APIC_setup(void) | |||
1340 | 1340 | ||
1341 | setup_apic_nmi_watchdog(NULL); | 1341 | setup_apic_nmi_watchdog(NULL); |
1342 | apic_pm_activate(); | 1342 | apic_pm_activate(); |
1343 | |||
1344 | /* | ||
1345 | * Now that local APIC setup is completed for BP, configure the fault | ||
1346 | * handling for interrupt remapping. | ||
1347 | */ | ||
1348 | if (!smp_processor_id() && intr_remapping_enabled) | ||
1349 | enable_drhd_fault_handling(); | ||
1350 | |||
1343 | } | 1351 | } |
1344 | 1352 | ||
1345 | #ifdef CONFIG_X86_X2APIC | 1353 | #ifdef CONFIG_X86_X2APIC |
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index 5c5b8f3dddb5..4d90327853b7 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c | |||
@@ -1397,6 +1397,7 @@ int setup_ioapic_entry(int apic_id, int irq, | |||
1397 | irte.dlvry_mode = apic->irq_delivery_mode; | 1397 | irte.dlvry_mode = apic->irq_delivery_mode; |
1398 | irte.vector = vector; | 1398 | irte.vector = vector; |
1399 | irte.dest_id = IRTE_DEST(destination); | 1399 | irte.dest_id = IRTE_DEST(destination); |
1400 | irte.redir_hint = 1; | ||
1400 | 1401 | ||
1401 | /* Set source-id of interrupt request */ | 1402 | /* Set source-id of interrupt request */ |
1402 | set_ioapic_sid(&irte, apic_id); | 1403 | set_ioapic_sid(&irte, apic_id); |
@@ -3348,6 +3349,7 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, | |||
3348 | irte.dlvry_mode = apic->irq_delivery_mode; | 3349 | irte.dlvry_mode = apic->irq_delivery_mode; |
3349 | irte.vector = cfg->vector; | 3350 | irte.vector = cfg->vector; |
3350 | irte.dest_id = IRTE_DEST(dest); | 3351 | irte.dest_id = IRTE_DEST(dest); |
3352 | irte.redir_hint = 1; | ||
3351 | 3353 | ||
3352 | /* Set source-id of interrupt request */ | 3354 | /* Set source-id of interrupt request */ |
3353 | if (pdev) | 3355 | if (pdev) |
@@ -3624,6 +3626,7 @@ static int dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask) | |||
3624 | msg.data |= MSI_DATA_VECTOR(cfg->vector); | 3626 | msg.data |= MSI_DATA_VECTOR(cfg->vector); |
3625 | msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK; | 3627 | msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK; |
3626 | msg.address_lo |= MSI_ADDR_DEST_ID(dest); | 3628 | msg.address_lo |= MSI_ADDR_DEST_ID(dest); |
3629 | msg.address_hi = MSI_ADDR_BASE_HI | MSI_ADDR_EXT_DEST_ID(dest); | ||
3627 | 3630 | ||
3628 | dmar_msi_write(irq, &msg); | 3631 | dmar_msi_write(irq, &msg); |
3629 | 3632 | ||
diff --git a/arch/x86/kernel/apic/probe_64.c b/arch/x86/kernel/apic/probe_64.c index 83e9be4778e2..fac49a845064 100644 --- a/arch/x86/kernel/apic/probe_64.c +++ b/arch/x86/kernel/apic/probe_64.c | |||
@@ -76,13 +76,6 @@ void __init default_setup_apic_routing(void) | |||
76 | /* need to update phys_pkg_id */ | 76 | /* need to update phys_pkg_id */ |
77 | apic->phys_pkg_id = apicid_phys_pkg_id; | 77 | apic->phys_pkg_id = apicid_phys_pkg_id; |
78 | } | 78 | } |
79 | |||
80 | /* | ||
81 | * Now that apic routing model is selected, configure the | ||
82 | * fault handling for intr remapping. | ||
83 | */ | ||
84 | if (intr_remapping_enabled) | ||
85 | enable_drhd_fault_handling(); | ||
86 | } | 79 | } |
87 | 80 | ||
88 | /* Same for both flat and physical. */ | 81 | /* Same for both flat and physical. */ |
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index ba5f62f45f01..81fa3cb12f39 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c | |||
@@ -305,8 +305,7 @@ static void __cpuinit amd_detect_cmp(struct cpuinfo_x86 *c) | |||
305 | /* use socket ID also for last level cache */ | 305 | /* use socket ID also for last level cache */ |
306 | per_cpu(cpu_llc_id, cpu) = c->phys_proc_id; | 306 | per_cpu(cpu_llc_id, cpu) = c->phys_proc_id; |
307 | /* fixup topology information on multi-node processors */ | 307 | /* fixup topology information on multi-node processors */ |
308 | if ((c->x86 == 0x10) && (c->x86_model == 9)) | 308 | amd_fixup_dcm(c); |
309 | amd_fixup_dcm(c); | ||
310 | #endif | 309 | #endif |
311 | } | 310 | } |
312 | 311 | ||
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c index cd8da247dda1..a2baafb2fe6d 100644 --- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c | |||
@@ -701,6 +701,7 @@ static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy) | |||
701 | per_cpu(acfreq_data, policy->cpu) = NULL; | 701 | per_cpu(acfreq_data, policy->cpu) = NULL; |
702 | acpi_processor_unregister_performance(data->acpi_data, | 702 | acpi_processor_unregister_performance(data->acpi_data, |
703 | policy->cpu); | 703 | policy->cpu); |
704 | kfree(data->freq_table); | ||
704 | kfree(data); | 705 | kfree(data); |
705 | } | 706 | } |
706 | 707 | ||
diff --git a/arch/x86/kernel/cpu/mtrr/cleanup.c b/arch/x86/kernel/cpu/mtrr/cleanup.c index c5f59d071425..ac140c7be396 100644 --- a/arch/x86/kernel/cpu/mtrr/cleanup.c +++ b/arch/x86/kernel/cpu/mtrr/cleanup.c | |||
@@ -827,7 +827,7 @@ int __init amd_special_default_mtrr(void) | |||
827 | 827 | ||
828 | if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) | 828 | if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) |
829 | return 0; | 829 | return 0; |
830 | if (boot_cpu_data.x86 < 0xf || boot_cpu_data.x86 > 0x11) | 830 | if (boot_cpu_data.x86 < 0xf) |
831 | return 0; | 831 | return 0; |
832 | /* In case some hypervisor doesn't pass SYSCFG through: */ | 832 | /* In case some hypervisor doesn't pass SYSCFG through: */ |
833 | if (rdmsr_safe(MSR_K8_SYSCFG, &l, &h) < 0) | 833 | if (rdmsr_safe(MSR_K8_SYSCFG, &l, &h) < 0) |
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c index 01c0f3ee6cc3..bebabec5b448 100644 --- a/arch/x86/kernel/cpu/mtrr/main.c +++ b/arch/x86/kernel/cpu/mtrr/main.c | |||
@@ -793,13 +793,21 @@ void set_mtrr_aps_delayed_init(void) | |||
793 | } | 793 | } |
794 | 794 | ||
795 | /* | 795 | /* |
796 | * MTRR initialization for all AP's | 796 | * Delayed MTRR initialization for all AP's |
797 | */ | 797 | */ |
798 | void mtrr_aps_init(void) | 798 | void mtrr_aps_init(void) |
799 | { | 799 | { |
800 | if (!use_intel()) | 800 | if (!use_intel()) |
801 | return; | 801 | return; |
802 | 802 | ||
803 | /* | ||
804 | * Check if someone has requested the delay of AP MTRR initialization, | ||
805 | * by doing set_mtrr_aps_delayed_init(), prior to this point. If not, | ||
806 | * then we are done. | ||
807 | */ | ||
808 | if (!mtrr_aps_delayed_init) | ||
809 | return; | ||
810 | |||
803 | set_mtrr(~0U, 0, 0, 0); | 811 | set_mtrr(~0U, 0, 0, 0); |
804 | mtrr_aps_delayed_init = false; | 812 | mtrr_aps_delayed_init = false; |
805 | } | 813 | } |
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c index c2897b7b4a3b..46d58448c3af 100644 --- a/arch/x86/kernel/cpu/perf_event_amd.c +++ b/arch/x86/kernel/cpu/perf_event_amd.c | |||
@@ -52,7 +52,7 @@ static __initconst const u64 amd_hw_cache_event_ids | |||
52 | [ C(DTLB) ] = { | 52 | [ C(DTLB) ] = { |
53 | [ C(OP_READ) ] = { | 53 | [ C(OP_READ) ] = { |
54 | [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */ | 54 | [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */ |
55 | [ C(RESULT_MISS) ] = 0x0046, /* L1 DTLB and L2 DLTB Miss */ | 55 | [ C(RESULT_MISS) ] = 0x0746, /* L1_DTLB_AND_L2_DLTB_MISS.ALL */ |
56 | }, | 56 | }, |
57 | [ C(OP_WRITE) ] = { | 57 | [ C(OP_WRITE) ] = { |
58 | [ C(RESULT_ACCESS) ] = 0, | 58 | [ C(RESULT_ACCESS) ] = 0, |
@@ -66,7 +66,7 @@ static __initconst const u64 amd_hw_cache_event_ids | |||
66 | [ C(ITLB) ] = { | 66 | [ C(ITLB) ] = { |
67 | [ C(OP_READ) ] = { | 67 | [ C(OP_READ) ] = { |
68 | [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction fecthes */ | 68 | [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction fecthes */ |
69 | [ C(RESULT_MISS) ] = 0x0085, /* Instr. fetch ITLB misses */ | 69 | [ C(RESULT_MISS) ] = 0x0385, /* L1_ITLB_AND_L2_ITLB_MISS.ALL */ |
70 | }, | 70 | }, |
71 | [ C(OP_WRITE) ] = { | 71 | [ C(OP_WRITE) ] = { |
72 | [ C(RESULT_ACCESS) ] = -1, | 72 | [ C(RESULT_ACCESS) ] = -1, |
diff --git a/arch/x86/kernel/crash_dump_64.c b/arch/x86/kernel/crash_dump_64.c index 045b36cada65..994828899e09 100644 --- a/arch/x86/kernel/crash_dump_64.c +++ b/arch/x86/kernel/crash_dump_64.c | |||
@@ -34,7 +34,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf, | |||
34 | if (!csize) | 34 | if (!csize) |
35 | return 0; | 35 | return 0; |
36 | 36 | ||
37 | vaddr = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE); | 37 | vaddr = ioremap_cache(pfn << PAGE_SHIFT, PAGE_SIZE); |
38 | if (!vaddr) | 38 | if (!vaddr) |
39 | return -ENOMEM; | 39 | return -ENOMEM; |
40 | 40 | ||
@@ -46,6 +46,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf, | |||
46 | } else | 46 | } else |
47 | memcpy(buf, vaddr + offset, csize); | 47 | memcpy(buf, vaddr + offset, csize); |
48 | 48 | ||
49 | set_iounmap_nonlazy(); | ||
49 | iounmap(vaddr); | 50 | iounmap(vaddr); |
50 | return csize; | 51 | return csize; |
51 | } | 52 | } |
diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c index ff15c9dcc25d..42c594254507 100644 --- a/arch/x86/kernel/hw_breakpoint.c +++ b/arch/x86/kernel/hw_breakpoint.c | |||
@@ -433,6 +433,10 @@ static int __kprobes hw_breakpoint_handler(struct die_args *args) | |||
433 | dr6_p = (unsigned long *)ERR_PTR(args->err); | 433 | dr6_p = (unsigned long *)ERR_PTR(args->err); |
434 | dr6 = *dr6_p; | 434 | dr6 = *dr6_p; |
435 | 435 | ||
436 | /* If it's a single step, TRAP bits are random */ | ||
437 | if (dr6 & DR_STEP) | ||
438 | return NOTIFY_DONE; | ||
439 | |||
436 | /* Do an early return if no trap bits are set in DR6 */ | 440 | /* Do an early return if no trap bits are set in DR6 */ |
437 | if ((dr6 & DR_TRAP_BITS) == 0) | 441 | if ((dr6 & DR_TRAP_BITS) == 0) |
438 | return NOTIFY_DONE; | 442 | return NOTIFY_DONE; |
diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c index 356170262a93..2573689bda77 100644 --- a/arch/x86/kernel/microcode_intel.c +++ b/arch/x86/kernel/microcode_intel.c | |||
@@ -364,8 +364,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size, | |||
364 | 364 | ||
365 | /* For performance reasons, reuse mc area when possible */ | 365 | /* For performance reasons, reuse mc area when possible */ |
366 | if (!mc || mc_size > curr_mc_size) { | 366 | if (!mc || mc_size > curr_mc_size) { |
367 | if (mc) | 367 | vfree(mc); |
368 | vfree(mc); | ||
369 | mc = vmalloc(mc_size); | 368 | mc = vmalloc(mc_size); |
370 | if (!mc) | 369 | if (!mc) |
371 | break; | 370 | break; |
@@ -374,13 +373,11 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size, | |||
374 | 373 | ||
375 | if (get_ucode_data(mc, ucode_ptr, mc_size) || | 374 | if (get_ucode_data(mc, ucode_ptr, mc_size) || |
376 | microcode_sanity_check(mc) < 0) { | 375 | microcode_sanity_check(mc) < 0) { |
377 | vfree(mc); | ||
378 | break; | 376 | break; |
379 | } | 377 | } |
380 | 378 | ||
381 | if (get_matching_microcode(&uci->cpu_sig, mc, new_rev)) { | 379 | if (get_matching_microcode(&uci->cpu_sig, mc, new_rev)) { |
382 | if (new_mc) | 380 | vfree(new_mc); |
383 | vfree(new_mc); | ||
384 | new_rev = mc_header.rev; | 381 | new_rev = mc_header.rev; |
385 | new_mc = mc; | 382 | new_mc = mc; |
386 | mc = NULL; /* trigger new vmalloc */ | 383 | mc = NULL; /* trigger new vmalloc */ |
@@ -390,12 +387,10 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size, | |||
390 | leftover -= mc_size; | 387 | leftover -= mc_size; |
391 | } | 388 | } |
392 | 389 | ||
393 | if (mc) | 390 | vfree(mc); |
394 | vfree(mc); | ||
395 | 391 | ||
396 | if (leftover) { | 392 | if (leftover) { |
397 | if (new_mc) | 393 | vfree(new_mc); |
398 | vfree(new_mc); | ||
399 | state = UCODE_ERROR; | 394 | state = UCODE_ERROR; |
400 | goto out; | 395 | goto out; |
401 | } | 396 | } |
@@ -405,8 +400,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size, | |||
405 | goto out; | 400 | goto out; |
406 | } | 401 | } |
407 | 402 | ||
408 | if (uci->mc) | 403 | vfree(uci->mc); |
409 | vfree(uci->mc); | ||
410 | uci->mc = (struct microcode_intel *)new_mc; | 404 | uci->mc = (struct microcode_intel *)new_mc; |
411 | 405 | ||
412 | pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n", | 406 | pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n", |
diff --git a/arch/x86/kernel/olpc.c b/arch/x86/kernel/olpc.c index 0e0cdde519be..a2bd899b2b83 100644 --- a/arch/x86/kernel/olpc.c +++ b/arch/x86/kernel/olpc.c | |||
@@ -114,6 +114,7 @@ int olpc_ec_cmd(unsigned char cmd, unsigned char *inbuf, size_t inlen, | |||
114 | unsigned long flags; | 114 | unsigned long flags; |
115 | int ret = -EIO; | 115 | int ret = -EIO; |
116 | int i; | 116 | int i; |
117 | int restarts = 0; | ||
117 | 118 | ||
118 | spin_lock_irqsave(&ec_lock, flags); | 119 | spin_lock_irqsave(&ec_lock, flags); |
119 | 120 | ||
@@ -169,7 +170,9 @@ restart: | |||
169 | if (wait_on_obf(0x6c, 1)) { | 170 | if (wait_on_obf(0x6c, 1)) { |
170 | printk(KERN_ERR "olpc-ec: timeout waiting for" | 171 | printk(KERN_ERR "olpc-ec: timeout waiting for" |
171 | " EC to provide data!\n"); | 172 | " EC to provide data!\n"); |
172 | goto restart; | 173 | if (restarts++ < 10) |
174 | goto restart; | ||
175 | goto err; | ||
173 | } | 176 | } |
174 | outbuf[i] = inb(0x68); | 177 | outbuf[i] = inb(0x68); |
175 | pr_devel("olpc-ec: received 0x%x\n", outbuf[i]); | 178 | pr_devel("olpc-ec: received 0x%x\n", outbuf[i]); |
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index e3af342fe83a..76a0d715a031 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c | |||
@@ -641,7 +641,7 @@ void native_machine_shutdown(void) | |||
641 | /* O.K Now that I'm on the appropriate processor, | 641 | /* O.K Now that I'm on the appropriate processor, |
642 | * stop all of the others. | 642 | * stop all of the others. |
643 | */ | 643 | */ |
644 | smp_send_stop(); | 644 | stop_other_cpus(); |
645 | #endif | 645 | #endif |
646 | 646 | ||
647 | lapic_shutdown(); | 647 | lapic_shutdown(); |
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c index 74cca6014c0e..96af3a8e7326 100644 --- a/arch/x86/kernel/smp.c +++ b/arch/x86/kernel/smp.c | |||
@@ -174,10 +174,10 @@ asmlinkage void smp_reboot_interrupt(void) | |||
174 | irq_exit(); | 174 | irq_exit(); |
175 | } | 175 | } |
176 | 176 | ||
177 | static void native_smp_send_stop(void) | 177 | static void native_stop_other_cpus(int wait) |
178 | { | 178 | { |
179 | unsigned long flags; | 179 | unsigned long flags; |
180 | unsigned long wait; | 180 | unsigned long timeout; |
181 | 181 | ||
182 | if (reboot_force) | 182 | if (reboot_force) |
183 | return; | 183 | return; |
@@ -194,9 +194,12 @@ static void native_smp_send_stop(void) | |||
194 | if (num_online_cpus() > 1) { | 194 | if (num_online_cpus() > 1) { |
195 | apic->send_IPI_allbutself(REBOOT_VECTOR); | 195 | apic->send_IPI_allbutself(REBOOT_VECTOR); |
196 | 196 | ||
197 | /* Don't wait longer than a second */ | 197 | /* |
198 | wait = USEC_PER_SEC; | 198 | * Don't wait longer than a second if the caller |
199 | while (num_online_cpus() > 1 && wait--) | 199 | * didn't ask us to wait. |
200 | */ | ||
201 | timeout = USEC_PER_SEC; | ||
202 | while (num_online_cpus() > 1 && (wait || timeout--)) | ||
200 | udelay(1); | 203 | udelay(1); |
201 | } | 204 | } |
202 | 205 | ||
@@ -254,7 +257,7 @@ struct smp_ops smp_ops = { | |||
254 | .smp_prepare_cpus = native_smp_prepare_cpus, | 257 | .smp_prepare_cpus = native_smp_prepare_cpus, |
255 | .smp_cpus_done = native_smp_cpus_done, | 258 | .smp_cpus_done = native_smp_cpus_done, |
256 | 259 | ||
257 | .smp_send_stop = native_smp_send_stop, | 260 | .stop_other_cpus = native_stop_other_cpus, |
258 | .smp_send_reschedule = native_smp_send_reschedule, | 261 | .smp_send_reschedule = native_smp_send_reschedule, |
259 | 262 | ||
260 | .cpu_up = native_cpu_up, | 263 | .cpu_up = native_cpu_up, |
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 8b3bfc4dd708..016179e5ba09 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
@@ -1383,11 +1383,94 @@ void play_dead_common(void) | |||
1383 | local_irq_disable(); | 1383 | local_irq_disable(); |
1384 | } | 1384 | } |
1385 | 1385 | ||
1386 | #define MWAIT_SUBSTATE_MASK 0xf | ||
1387 | #define MWAIT_SUBSTATE_SIZE 4 | ||
1388 | |||
1389 | #define CPUID_MWAIT_LEAF 5 | ||
1390 | #define CPUID5_ECX_EXTENSIONS_SUPPORTED 0x1 | ||
1391 | |||
1392 | /* | ||
1393 | * We need to flush the caches before going to sleep, lest we have | ||
1394 | * dirty data in our caches when we come back up. | ||
1395 | */ | ||
1396 | static inline void mwait_play_dead(void) | ||
1397 | { | ||
1398 | unsigned int eax, ebx, ecx, edx; | ||
1399 | unsigned int highest_cstate = 0; | ||
1400 | unsigned int highest_subcstate = 0; | ||
1401 | int i; | ||
1402 | void *mwait_ptr; | ||
1403 | |||
1404 | if (!cpu_has(¤t_cpu_data, X86_FEATURE_MWAIT)) | ||
1405 | return; | ||
1406 | if (!cpu_has(¤t_cpu_data, X86_FEATURE_CLFLSH)) | ||
1407 | return; | ||
1408 | if (current_cpu_data.cpuid_level < CPUID_MWAIT_LEAF) | ||
1409 | return; | ||
1410 | |||
1411 | eax = CPUID_MWAIT_LEAF; | ||
1412 | ecx = 0; | ||
1413 | native_cpuid(&eax, &ebx, &ecx, &edx); | ||
1414 | |||
1415 | /* | ||
1416 | * eax will be 0 if EDX enumeration is not valid. | ||
1417 | * Initialized below to cstate, sub_cstate value when EDX is valid. | ||
1418 | */ | ||
1419 | if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED)) { | ||
1420 | eax = 0; | ||
1421 | } else { | ||
1422 | edx >>= MWAIT_SUBSTATE_SIZE; | ||
1423 | for (i = 0; i < 7 && edx; i++, edx >>= MWAIT_SUBSTATE_SIZE) { | ||
1424 | if (edx & MWAIT_SUBSTATE_MASK) { | ||
1425 | highest_cstate = i; | ||
1426 | highest_subcstate = edx & MWAIT_SUBSTATE_MASK; | ||
1427 | } | ||
1428 | } | ||
1429 | eax = (highest_cstate << MWAIT_SUBSTATE_SIZE) | | ||
1430 | (highest_subcstate - 1); | ||
1431 | } | ||
1432 | |||
1433 | /* | ||
1434 | * This should be a memory location in a cache line which is | ||
1435 | * unlikely to be touched by other processors. The actual | ||
1436 | * content is immaterial as it is not actually modified in any way. | ||
1437 | */ | ||
1438 | mwait_ptr = ¤t_thread_info()->flags; | ||
1439 | |||
1440 | wbinvd(); | ||
1441 | |||
1442 | while (1) { | ||
1443 | /* | ||
1444 | * The CLFLUSH is a workaround for erratum AAI65 for | ||
1445 | * the Xeon 7400 series. It's not clear it is actually | ||
1446 | * needed, but it should be harmless in either case. | ||
1447 | * The WBINVD is insufficient due to the spurious-wakeup | ||
1448 | * case where we return around the loop. | ||
1449 | */ | ||
1450 | clflush(mwait_ptr); | ||
1451 | __monitor(mwait_ptr, 0, 0); | ||
1452 | mb(); | ||
1453 | __mwait(eax, 0); | ||
1454 | } | ||
1455 | } | ||
1456 | |||
1457 | static inline void hlt_play_dead(void) | ||
1458 | { | ||
1459 | if (current_cpu_data.x86 >= 4) | ||
1460 | wbinvd(); | ||
1461 | |||
1462 | while (1) { | ||
1463 | native_halt(); | ||
1464 | } | ||
1465 | } | ||
1466 | |||
1386 | void native_play_dead(void) | 1467 | void native_play_dead(void) |
1387 | { | 1468 | { |
1388 | play_dead_common(); | 1469 | play_dead_common(); |
1389 | tboot_shutdown(TB_SHUTDOWN_WFS); | 1470 | tboot_shutdown(TB_SHUTDOWN_WFS); |
1390 | wbinvd_halt(); | 1471 | |
1472 | mwait_play_dead(); /* Only returns on failure */ | ||
1473 | hlt_play_dead(); | ||
1391 | } | 1474 | } |
1392 | 1475 | ||
1393 | #else /* ... !CONFIG_HOTPLUG_CPU */ | 1476 | #else /* ... !CONFIG_HOTPLUG_CPU */ |
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 60788dee0f8a..9f4edeb21323 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c | |||
@@ -575,6 +575,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code) | |||
575 | if (regs->flags & X86_VM_MASK) { | 575 | if (regs->flags & X86_VM_MASK) { |
576 | handle_vm86_trap((struct kernel_vm86_regs *) regs, | 576 | handle_vm86_trap((struct kernel_vm86_regs *) regs, |
577 | error_code, 1); | 577 | error_code, 1); |
578 | preempt_conditional_cli(regs); | ||
578 | return; | 579 | return; |
579 | } | 580 | } |
580 | 581 | ||
diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c index 5ffb5622f793..61fb98519622 100644 --- a/arch/x86/kernel/vm86_32.c +++ b/arch/x86/kernel/vm86_32.c | |||
@@ -551,8 +551,14 @@ cannot_handle: | |||
551 | int handle_vm86_trap(struct kernel_vm86_regs *regs, long error_code, int trapno) | 551 | int handle_vm86_trap(struct kernel_vm86_regs *regs, long error_code, int trapno) |
552 | { | 552 | { |
553 | if (VMPI.is_vm86pus) { | 553 | if (VMPI.is_vm86pus) { |
554 | if ((trapno == 3) || (trapno == 1)) | 554 | if ((trapno == 3) || (trapno == 1)) { |
555 | return_to_32bit(regs, VM86_TRAP + (trapno << 8)); | 555 | KVM86->regs32->ax = VM86_TRAP + (trapno << 8); |
556 | /* setting this flag forces the code in entry_32.S to | ||
557 | call save_v86_state() and change the stack pointer | ||
558 | to KVM86->regs32 */ | ||
559 | set_thread_flag(TIF_IRET); | ||
560 | return 0; | ||
561 | } | ||
556 | do_int(regs, trapno, (unsigned char __user *) (regs->pt.ss << 4), SP(regs)); | 562 | do_int(regs, trapno, (unsigned char __user *) (regs->pt.ss << 4), SP(regs)); |
557 | return 0; | 563 | return 0; |
558 | } | 564 | } |
diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c index 9c253bd65e24..547128546cc3 100644 --- a/arch/x86/kernel/xsave.c +++ b/arch/x86/kernel/xsave.c | |||
@@ -394,7 +394,8 @@ static void __init setup_xstate_init(void) | |||
394 | * Setup init_xstate_buf to represent the init state of | 394 | * Setup init_xstate_buf to represent the init state of |
395 | * all the features managed by the xsave | 395 | * all the features managed by the xsave |
396 | */ | 396 | */ |
397 | init_xstate_buf = alloc_bootmem(xstate_size); | 397 | init_xstate_buf = alloc_bootmem_align(xstate_size, |
398 | __alignof__(struct xsave_struct)); | ||
398 | init_xstate_buf->i387.mxcsr = MXCSR_DEFAULT; | 399 | init_xstate_buf->i387.mxcsr = MXCSR_DEFAULT; |
399 | 400 | ||
400 | clts(); | 401 | clts(); |
diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c index 4b7b73ce2098..9f163e61283c 100644 --- a/arch/x86/kvm/i8259.c +++ b/arch/x86/kvm/i8259.c | |||
@@ -570,6 +570,8 @@ struct kvm_pic *kvm_create_pic(struct kvm *kvm) | |||
570 | s->pics[1].elcr_mask = 0xde; | 570 | s->pics[1].elcr_mask = 0xde; |
571 | s->pics[0].pics_state = s; | 571 | s->pics[0].pics_state = s; |
572 | s->pics[1].pics_state = s; | 572 | s->pics[1].pics_state = s; |
573 | s->pics[0].isr_ack = 0xff; | ||
574 | s->pics[1].isr_ack = 0xff; | ||
573 | 575 | ||
574 | /* | 576 | /* |
575 | * Initialize PIO device | 577 | * Initialize PIO device |
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 311f6dad8951..7fed5b793faf 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -2254,6 +2254,10 @@ static int mmu_alloc_roots(struct kvm_vcpu *vcpu) | |||
2254 | return 0; | 2254 | return 0; |
2255 | } | 2255 | } |
2256 | direct = !is_paging(vcpu); | 2256 | direct = !is_paging(vcpu); |
2257 | |||
2258 | if (mmu_check_root(vcpu, root_gfn)) | ||
2259 | return 1; | ||
2260 | |||
2257 | for (i = 0; i < 4; ++i) { | 2261 | for (i = 0; i < 4; ++i) { |
2258 | hpa_t root = vcpu->arch.mmu.pae_root[i]; | 2262 | hpa_t root = vcpu->arch.mmu.pae_root[i]; |
2259 | 2263 | ||
@@ -2265,13 +2269,13 @@ static int mmu_alloc_roots(struct kvm_vcpu *vcpu) | |||
2265 | continue; | 2269 | continue; |
2266 | } | 2270 | } |
2267 | root_gfn = pdptr >> PAGE_SHIFT; | 2271 | root_gfn = pdptr >> PAGE_SHIFT; |
2272 | if (mmu_check_root(vcpu, root_gfn)) | ||
2273 | return 1; | ||
2268 | } else if (vcpu->arch.mmu.root_level == 0) | 2274 | } else if (vcpu->arch.mmu.root_level == 0) |
2269 | root_gfn = 0; | 2275 | root_gfn = 0; |
2270 | if (mmu_check_root(vcpu, root_gfn)) | ||
2271 | return 1; | ||
2272 | if (tdp_enabled) { | 2276 | if (tdp_enabled) { |
2273 | direct = 1; | 2277 | direct = 1; |
2274 | root_gfn = i << 30; | 2278 | root_gfn = i << (30 - PAGE_SHIFT); |
2275 | } | 2279 | } |
2276 | spin_lock(&vcpu->kvm->mmu_lock); | 2280 | spin_lock(&vcpu->kvm->mmu_lock); |
2277 | kvm_mmu_free_some_pages(vcpu); | 2281 | kvm_mmu_free_some_pages(vcpu); |
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 8a3f9f64f86f..e7c3f3bd08fc 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
@@ -88,6 +88,14 @@ struct nested_state { | |||
88 | /* A VMEXIT is required but not yet emulated */ | 88 | /* A VMEXIT is required but not yet emulated */ |
89 | bool exit_required; | 89 | bool exit_required; |
90 | 90 | ||
91 | /* | ||
92 | * If we vmexit during an instruction emulation we need this to restore | ||
93 | * the l1 guest rip after the emulation | ||
94 | */ | ||
95 | unsigned long vmexit_rip; | ||
96 | unsigned long vmexit_rsp; | ||
97 | unsigned long vmexit_rax; | ||
98 | |||
91 | /* cache for intercepts of the guest */ | 99 | /* cache for intercepts of the guest */ |
92 | u16 intercept_cr_read; | 100 | u16 intercept_cr_read; |
93 | u16 intercept_cr_write; | 101 | u16 intercept_cr_write; |
@@ -1206,8 +1214,12 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) | |||
1206 | if (old == new) { | 1214 | if (old == new) { |
1207 | /* cr0 write with ts and mp unchanged */ | 1215 | /* cr0 write with ts and mp unchanged */ |
1208 | svm->vmcb->control.exit_code = SVM_EXIT_CR0_SEL_WRITE; | 1216 | svm->vmcb->control.exit_code = SVM_EXIT_CR0_SEL_WRITE; |
1209 | if (nested_svm_exit_handled(svm) == NESTED_EXIT_DONE) | 1217 | if (nested_svm_exit_handled(svm) == NESTED_EXIT_DONE) { |
1218 | svm->nested.vmexit_rip = kvm_rip_read(vcpu); | ||
1219 | svm->nested.vmexit_rsp = kvm_register_read(vcpu, VCPU_REGS_RSP); | ||
1220 | svm->nested.vmexit_rax = kvm_register_read(vcpu, VCPU_REGS_RAX); | ||
1210 | return; | 1221 | return; |
1222 | } | ||
1211 | } | 1223 | } |
1212 | } | 1224 | } |
1213 | 1225 | ||
@@ -2399,6 +2411,23 @@ static int emulate_on_interception(struct vcpu_svm *svm) | |||
2399 | return emulate_instruction(&svm->vcpu, 0, 0, 0) == EMULATE_DONE; | 2411 | return emulate_instruction(&svm->vcpu, 0, 0, 0) == EMULATE_DONE; |
2400 | } | 2412 | } |
2401 | 2413 | ||
2414 | static int cr0_write_interception(struct vcpu_svm *svm) | ||
2415 | { | ||
2416 | struct kvm_vcpu *vcpu = &svm->vcpu; | ||
2417 | int r; | ||
2418 | |||
2419 | r = emulate_instruction(&svm->vcpu, 0, 0, 0); | ||
2420 | |||
2421 | if (svm->nested.vmexit_rip) { | ||
2422 | kvm_register_write(vcpu, VCPU_REGS_RIP, svm->nested.vmexit_rip); | ||
2423 | kvm_register_write(vcpu, VCPU_REGS_RSP, svm->nested.vmexit_rsp); | ||
2424 | kvm_register_write(vcpu, VCPU_REGS_RAX, svm->nested.vmexit_rax); | ||
2425 | svm->nested.vmexit_rip = 0; | ||
2426 | } | ||
2427 | |||
2428 | return r == EMULATE_DONE; | ||
2429 | } | ||
2430 | |||
2402 | static int cr8_write_interception(struct vcpu_svm *svm) | 2431 | static int cr8_write_interception(struct vcpu_svm *svm) |
2403 | { | 2432 | { |
2404 | struct kvm_run *kvm_run = svm->vcpu.run; | 2433 | struct kvm_run *kvm_run = svm->vcpu.run; |
@@ -2672,7 +2701,7 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm) = { | |||
2672 | [SVM_EXIT_READ_CR4] = emulate_on_interception, | 2701 | [SVM_EXIT_READ_CR4] = emulate_on_interception, |
2673 | [SVM_EXIT_READ_CR8] = emulate_on_interception, | 2702 | [SVM_EXIT_READ_CR8] = emulate_on_interception, |
2674 | [SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception, | 2703 | [SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception, |
2675 | [SVM_EXIT_WRITE_CR0] = emulate_on_interception, | 2704 | [SVM_EXIT_WRITE_CR0] = cr0_write_interception, |
2676 | [SVM_EXIT_WRITE_CR3] = emulate_on_interception, | 2705 | [SVM_EXIT_WRITE_CR3] = emulate_on_interception, |
2677 | [SVM_EXIT_WRITE_CR4] = emulate_on_interception, | 2706 | [SVM_EXIT_WRITE_CR4] = emulate_on_interception, |
2678 | [SVM_EXIT_WRITE_CR8] = cr8_write_interception, | 2707 | [SVM_EXIT_WRITE_CR8] = cr8_write_interception, |
@@ -3252,6 +3281,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) | |||
3252 | vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip; | 3281 | vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip; |
3253 | 3282 | ||
3254 | load_host_msrs(vcpu); | 3283 | load_host_msrs(vcpu); |
3284 | kvm_load_ldt(ldt_selector); | ||
3255 | loadsegment(fs, fs_selector); | 3285 | loadsegment(fs, fs_selector); |
3256 | #ifdef CONFIG_X86_64 | 3286 | #ifdef CONFIG_X86_64 |
3257 | load_gs_index(gs_selector); | 3287 | load_gs_index(gs_selector); |
@@ -3259,7 +3289,6 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) | |||
3259 | #else | 3289 | #else |
3260 | loadsegment(gs, gs_selector); | 3290 | loadsegment(gs, gs_selector); |
3261 | #endif | 3291 | #endif |
3262 | kvm_load_ldt(ldt_selector); | ||
3263 | 3292 | ||
3264 | reload_tss(vcpu); | 3293 | reload_tss(vcpu); |
3265 | 3294 | ||
@@ -3354,6 +3383,14 @@ static void svm_cpuid_update(struct kvm_vcpu *vcpu) | |||
3354 | static void svm_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry) | 3383 | static void svm_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry) |
3355 | { | 3384 | { |
3356 | switch (func) { | 3385 | switch (func) { |
3386 | case 0x00000001: | ||
3387 | /* Mask out xsave bit as long as it is not supported by SVM */ | ||
3388 | entry->ecx &= ~(bit(X86_FEATURE_XSAVE)); | ||
3389 | break; | ||
3390 | case 0x80000001: | ||
3391 | if (nested) | ||
3392 | entry->ecx |= (1 << 2); /* Set SVM bit */ | ||
3393 | break; | ||
3357 | case 0x8000000A: | 3394 | case 0x8000000A: |
3358 | entry->eax = 1; /* SVM revision 1 */ | 3395 | entry->eax = 1; /* SVM revision 1 */ |
3359 | entry->ebx = 8; /* Lets support 8 ASIDs in case we add proper | 3396 | entry->ebx = 8; /* Lets support 8 ASIDs in case we add proper |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 7bddfab12013..b3986fec7e68 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -828,10 +828,9 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu) | |||
828 | #endif | 828 | #endif |
829 | 829 | ||
830 | #ifdef CONFIG_X86_64 | 830 | #ifdef CONFIG_X86_64 |
831 | if (is_long_mode(&vmx->vcpu)) { | 831 | rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base); |
832 | rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base); | 832 | if (is_long_mode(&vmx->vcpu)) |
833 | wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); | 833 | wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); |
834 | } | ||
835 | #endif | 834 | #endif |
836 | for (i = 0; i < vmx->save_nmsrs; ++i) | 835 | for (i = 0; i < vmx->save_nmsrs; ++i) |
837 | kvm_set_shared_msr(vmx->guest_msrs[i].index, | 836 | kvm_set_shared_msr(vmx->guest_msrs[i].index, |
@@ -846,23 +845,23 @@ static void __vmx_load_host_state(struct vcpu_vmx *vmx) | |||
846 | 845 | ||
847 | ++vmx->vcpu.stat.host_state_reload; | 846 | ++vmx->vcpu.stat.host_state_reload; |
848 | vmx->host_state.loaded = 0; | 847 | vmx->host_state.loaded = 0; |
849 | if (vmx->host_state.fs_reload_needed) | 848 | #ifdef CONFIG_X86_64 |
850 | loadsegment(fs, vmx->host_state.fs_sel); | 849 | if (is_long_mode(&vmx->vcpu)) |
850 | rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); | ||
851 | #endif | ||
851 | if (vmx->host_state.gs_ldt_reload_needed) { | 852 | if (vmx->host_state.gs_ldt_reload_needed) { |
852 | kvm_load_ldt(vmx->host_state.ldt_sel); | 853 | kvm_load_ldt(vmx->host_state.ldt_sel); |
853 | #ifdef CONFIG_X86_64 | 854 | #ifdef CONFIG_X86_64 |
854 | load_gs_index(vmx->host_state.gs_sel); | 855 | load_gs_index(vmx->host_state.gs_sel); |
855 | wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gs); | ||
856 | #else | 856 | #else |
857 | loadsegment(gs, vmx->host_state.gs_sel); | 857 | loadsegment(gs, vmx->host_state.gs_sel); |
858 | #endif | 858 | #endif |
859 | } | 859 | } |
860 | if (vmx->host_state.fs_reload_needed) | ||
861 | loadsegment(fs, vmx->host_state.fs_sel); | ||
860 | reload_tss(); | 862 | reload_tss(); |
861 | #ifdef CONFIG_X86_64 | 863 | #ifdef CONFIG_X86_64 |
862 | if (is_long_mode(&vmx->vcpu)) { | 864 | wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base); |
863 | rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); | ||
864 | wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base); | ||
865 | } | ||
866 | #endif | 865 | #endif |
867 | if (current_thread_info()->status & TS_USEDFPU) | 866 | if (current_thread_info()->status & TS_USEDFPU) |
868 | clts(); | 867 | clts(); |
@@ -4249,11 +4248,6 @@ static int vmx_get_lpage_level(void) | |||
4249 | return PT_PDPE_LEVEL; | 4248 | return PT_PDPE_LEVEL; |
4250 | } | 4249 | } |
4251 | 4250 | ||
4252 | static inline u32 bit(int bitno) | ||
4253 | { | ||
4254 | return 1 << (bitno & 31); | ||
4255 | } | ||
4256 | |||
4257 | static void vmx_cpuid_update(struct kvm_vcpu *vcpu) | 4251 | static void vmx_cpuid_update(struct kvm_vcpu *vcpu) |
4258 | { | 4252 | { |
4259 | struct kvm_cpuid_entry2 *best; | 4253 | struct kvm_cpuid_entry2 *best; |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 3a09c625d526..a5746de6f402 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -153,11 +153,6 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { | |||
153 | 153 | ||
154 | u64 __read_mostly host_xcr0; | 154 | u64 __read_mostly host_xcr0; |
155 | 155 | ||
156 | static inline u32 bit(int bitno) | ||
157 | { | ||
158 | return 1 << (bitno & 31); | ||
159 | } | ||
160 | |||
161 | static void kvm_on_user_return(struct user_return_notifier *urn) | 156 | static void kvm_on_user_return(struct user_return_notifier *urn) |
162 | { | 157 | { |
163 | unsigned slot; | 158 | unsigned slot; |
@@ -1994,9 +1989,9 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, | |||
1994 | 0 /* Reserved, AES */ | F(XSAVE) | 0 /* OSXSAVE */ | F(AVX); | 1989 | 0 /* Reserved, AES */ | F(XSAVE) | 0 /* OSXSAVE */ | F(AVX); |
1995 | /* cpuid 0x80000001.ecx */ | 1990 | /* cpuid 0x80000001.ecx */ |
1996 | const u32 kvm_supported_word6_x86_features = | 1991 | const u32 kvm_supported_word6_x86_features = |
1997 | F(LAHF_LM) | F(CMP_LEGACY) | F(SVM) | 0 /* ExtApicSpace */ | | 1992 | F(LAHF_LM) | F(CMP_LEGACY) | 0 /*SVM*/ | 0 /* ExtApicSpace */ | |
1998 | F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) | | 1993 | F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) | |
1999 | F(3DNOWPREFETCH) | 0 /* OSVW */ | 0 /* IBS */ | F(SSE5) | | 1994 | F(3DNOWPREFETCH) | 0 /* OSVW */ | 0 /* IBS */ | F(XOP) | |
2000 | 0 /* SKINIT */ | 0 /* WDT */; | 1995 | 0 /* SKINIT */ | 0 /* WDT */; |
2001 | 1996 | ||
2002 | /* all calls to cpuid_count() should be made on the same cpu */ | 1997 | /* all calls to cpuid_count() should be made on the same cpu */ |
@@ -2305,6 +2300,7 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu, | |||
2305 | !kvm_exception_is_soft(vcpu->arch.exception.nr); | 2300 | !kvm_exception_is_soft(vcpu->arch.exception.nr); |
2306 | events->exception.nr = vcpu->arch.exception.nr; | 2301 | events->exception.nr = vcpu->arch.exception.nr; |
2307 | events->exception.has_error_code = vcpu->arch.exception.has_error_code; | 2302 | events->exception.has_error_code = vcpu->arch.exception.has_error_code; |
2303 | events->exception.pad = 0; | ||
2308 | events->exception.error_code = vcpu->arch.exception.error_code; | 2304 | events->exception.error_code = vcpu->arch.exception.error_code; |
2309 | 2305 | ||
2310 | events->interrupt.injected = | 2306 | events->interrupt.injected = |
@@ -2318,12 +2314,14 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu, | |||
2318 | events->nmi.injected = vcpu->arch.nmi_injected; | 2314 | events->nmi.injected = vcpu->arch.nmi_injected; |
2319 | events->nmi.pending = vcpu->arch.nmi_pending; | 2315 | events->nmi.pending = vcpu->arch.nmi_pending; |
2320 | events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu); | 2316 | events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu); |
2317 | events->nmi.pad = 0; | ||
2321 | 2318 | ||
2322 | events->sipi_vector = vcpu->arch.sipi_vector; | 2319 | events->sipi_vector = vcpu->arch.sipi_vector; |
2323 | 2320 | ||
2324 | events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING | 2321 | events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING |
2325 | | KVM_VCPUEVENT_VALID_SIPI_VECTOR | 2322 | | KVM_VCPUEVENT_VALID_SIPI_VECTOR |
2326 | | KVM_VCPUEVENT_VALID_SHADOW); | 2323 | | KVM_VCPUEVENT_VALID_SHADOW); |
2324 | memset(&events->reserved, 0, sizeof(events->reserved)); | ||
2327 | } | 2325 | } |
2328 | 2326 | ||
2329 | static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, | 2327 | static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, |
@@ -2366,6 +2364,7 @@ static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu, | |||
2366 | dbgregs->dr6 = vcpu->arch.dr6; | 2364 | dbgregs->dr6 = vcpu->arch.dr6; |
2367 | dbgregs->dr7 = vcpu->arch.dr7; | 2365 | dbgregs->dr7 = vcpu->arch.dr7; |
2368 | dbgregs->flags = 0; | 2366 | dbgregs->flags = 0; |
2367 | memset(&dbgregs->reserved, 0, sizeof(dbgregs->reserved)); | ||
2369 | } | 2368 | } |
2370 | 2369 | ||
2371 | static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu, | 2370 | static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu, |
@@ -2849,6 +2848,7 @@ static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps) | |||
2849 | sizeof(ps->channels)); | 2848 | sizeof(ps->channels)); |
2850 | ps->flags = kvm->arch.vpit->pit_state.flags; | 2849 | ps->flags = kvm->arch.vpit->pit_state.flags; |
2851 | mutex_unlock(&kvm->arch.vpit->pit_state.lock); | 2850 | mutex_unlock(&kvm->arch.vpit->pit_state.lock); |
2851 | memset(&ps->reserved, 0, sizeof(ps->reserved)); | ||
2852 | return r; | 2852 | return r; |
2853 | } | 2853 | } |
2854 | 2854 | ||
@@ -2912,10 +2912,6 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, | |||
2912 | struct kvm_memslots *slots, *old_slots; | 2912 | struct kvm_memslots *slots, *old_slots; |
2913 | unsigned long *dirty_bitmap; | 2913 | unsigned long *dirty_bitmap; |
2914 | 2914 | ||
2915 | spin_lock(&kvm->mmu_lock); | ||
2916 | kvm_mmu_slot_remove_write_access(kvm, log->slot); | ||
2917 | spin_unlock(&kvm->mmu_lock); | ||
2918 | |||
2919 | r = -ENOMEM; | 2915 | r = -ENOMEM; |
2920 | dirty_bitmap = vmalloc(n); | 2916 | dirty_bitmap = vmalloc(n); |
2921 | if (!dirty_bitmap) | 2917 | if (!dirty_bitmap) |
@@ -2937,6 +2933,10 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, | |||
2937 | dirty_bitmap = old_slots->memslots[log->slot].dirty_bitmap; | 2933 | dirty_bitmap = old_slots->memslots[log->slot].dirty_bitmap; |
2938 | kfree(old_slots); | 2934 | kfree(old_slots); |
2939 | 2935 | ||
2936 | spin_lock(&kvm->mmu_lock); | ||
2937 | kvm_mmu_slot_remove_write_access(kvm, log->slot); | ||
2938 | spin_unlock(&kvm->mmu_lock); | ||
2939 | |||
2940 | r = -EFAULT; | 2940 | r = -EFAULT; |
2941 | if (copy_to_user(log->dirty_bitmap, dirty_bitmap, n)) { | 2941 | if (copy_to_user(log->dirty_bitmap, dirty_bitmap, n)) { |
2942 | vfree(dirty_bitmap); | 2942 | vfree(dirty_bitmap); |
@@ -3229,6 +3229,7 @@ long kvm_arch_vm_ioctl(struct file *filp, | |||
3229 | now_ns = timespec_to_ns(&now); | 3229 | now_ns = timespec_to_ns(&now); |
3230 | user_ns.clock = kvm->arch.kvmclock_offset + now_ns; | 3230 | user_ns.clock = kvm->arch.kvmclock_offset + now_ns; |
3231 | user_ns.flags = 0; | 3231 | user_ns.flags = 0; |
3232 | memset(&user_ns.pad, 0, sizeof(user_ns.pad)); | ||
3232 | 3233 | ||
3233 | r = -EFAULT; | 3234 | r = -EFAULT; |
3234 | if (copy_to_user(argp, &user_ns, sizeof(user_ns))) | 3235 | if (copy_to_user(argp, &user_ns, sizeof(user_ns))) |
@@ -5111,6 +5112,8 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, | |||
5111 | 5112 | ||
5112 | mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4; | 5113 | mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4; |
5113 | kvm_x86_ops->set_cr4(vcpu, sregs->cr4); | 5114 | kvm_x86_ops->set_cr4(vcpu, sregs->cr4); |
5115 | if (sregs->cr4 & X86_CR4_OSXSAVE) | ||
5116 | update_cpuid(vcpu); | ||
5114 | if (!is_long_mode(vcpu) && is_pae(vcpu)) { | 5117 | if (!is_long_mode(vcpu) && is_pae(vcpu)) { |
5115 | load_pdptrs(vcpu, vcpu->arch.cr3); | 5118 | load_pdptrs(vcpu, vcpu->arch.cr3); |
5116 | mmu_reset_needed = 1; | 5119 | mmu_reset_needed = 1; |
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index b7a404722d2b..0bf327453499 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h | |||
@@ -65,6 +65,11 @@ static inline int is_paging(struct kvm_vcpu *vcpu) | |||
65 | return kvm_read_cr0_bits(vcpu, X86_CR0_PG); | 65 | return kvm_read_cr0_bits(vcpu, X86_CR0_PG); |
66 | } | 66 | } |
67 | 67 | ||
68 | static inline u32 bit(int bitno) | ||
69 | { | ||
70 | return 1 << (bitno & 31); | ||
71 | } | ||
72 | |||
68 | void kvm_before_handle_nmi(struct kvm_vcpu *vcpu); | 73 | void kvm_before_handle_nmi(struct kvm_vcpu *vcpu); |
69 | void kvm_after_handle_nmi(struct kvm_vcpu *vcpu); | 74 | void kvm_after_handle_nmi(struct kvm_vcpu *vcpu); |
70 | 75 | ||
diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c index b67a6b5aa8d4..42623310c968 100644 --- a/arch/x86/oprofile/op_model_amd.c +++ b/arch/x86/oprofile/op_model_amd.c | |||
@@ -484,21 +484,29 @@ static int __init_ibs_nmi(void) | |||
484 | return 0; | 484 | return 0; |
485 | } | 485 | } |
486 | 486 | ||
487 | /* initialize the APIC for the IBS interrupts if available */ | 487 | /* |
488 | * check and reserve APIC extended interrupt LVT offset for IBS if | ||
489 | * available | ||
490 | * | ||
491 | * init_ibs() preforms implicitly cpu-local operations, so pin this | ||
492 | * thread to its current CPU | ||
493 | */ | ||
494 | |||
488 | static void init_ibs(void) | 495 | static void init_ibs(void) |
489 | { | 496 | { |
490 | ibs_caps = get_ibs_caps(); | 497 | preempt_disable(); |
491 | 498 | ||
499 | ibs_caps = get_ibs_caps(); | ||
492 | if (!ibs_caps) | 500 | if (!ibs_caps) |
493 | return; | 501 | goto out; |
494 | 502 | ||
495 | if (__init_ibs_nmi()) { | 503 | if (__init_ibs_nmi() < 0) |
496 | ibs_caps = 0; | 504 | ibs_caps = 0; |
497 | return; | 505 | else |
498 | } | 506 | printk(KERN_INFO "oprofile: AMD IBS detected (0x%08x)\n", ibs_caps); |
499 | 507 | ||
500 | printk(KERN_INFO "oprofile: AMD IBS detected (0x%08x)\n", | 508 | out: |
501 | (unsigned)ibs_caps); | 509 | preempt_enable(); |
502 | } | 510 | } |
503 | 511 | ||
504 | static int (*create_arch_files)(struct super_block *sb, struct dentry *root); | 512 | static int (*create_arch_files)(struct super_block *sb, struct dentry *root); |
diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile index 4a2afa1bac51..b6552b189bcd 100644 --- a/arch/x86/vdso/Makefile +++ b/arch/x86/vdso/Makefile | |||
@@ -25,7 +25,7 @@ targets += vdso.so vdso.so.dbg vdso.lds $(vobjs-y) | |||
25 | 25 | ||
26 | export CPPFLAGS_vdso.lds += -P -C | 26 | export CPPFLAGS_vdso.lds += -P -C |
27 | 27 | ||
28 | VDSO_LDFLAGS_vdso.lds = -m elf_x86_64 -Wl,-soname=linux-vdso.so.1 \ | 28 | VDSO_LDFLAGS_vdso.lds = -m64 -Wl,-soname=linux-vdso.so.1 \ |
29 | -Wl,-z,max-page-size=4096 -Wl,-z,common-page-size=4096 | 29 | -Wl,-z,max-page-size=4096 -Wl,-z,common-page-size=4096 |
30 | 30 | ||
31 | $(obj)/vdso.o: $(src)/vdso.S $(obj)/vdso.so | 31 | $(obj)/vdso.o: $(src)/vdso.S $(obj)/vdso.so |
@@ -69,7 +69,7 @@ vdso32.so-$(VDSO32-y) += sysenter | |||
69 | vdso32-images = $(vdso32.so-y:%=vdso32-%.so) | 69 | vdso32-images = $(vdso32.so-y:%=vdso32-%.so) |
70 | 70 | ||
71 | CPPFLAGS_vdso32.lds = $(CPPFLAGS_vdso.lds) | 71 | CPPFLAGS_vdso32.lds = $(CPPFLAGS_vdso.lds) |
72 | VDSO_LDFLAGS_vdso32.lds = -m elf_i386 -Wl,-soname=linux-gate.so.1 | 72 | VDSO_LDFLAGS_vdso32.lds = -m32 -Wl,-soname=linux-gate.so.1 |
73 | 73 | ||
74 | # This makes sure the $(obj) subdirectory exists even though vdso32/ | 74 | # This makes sure the $(obj) subdirectory exists even though vdso32/ |
75 | # is not a kbuild sub-make subdirectory. | 75 | # is not a kbuild sub-make subdirectory. |
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 7d46c8441418..0f6cd146f1ee 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c | |||
@@ -1017,10 +1017,6 @@ static void xen_reboot(int reason) | |||
1017 | { | 1017 | { |
1018 | struct sched_shutdown r = { .reason = reason }; | 1018 | struct sched_shutdown r = { .reason = reason }; |
1019 | 1019 | ||
1020 | #ifdef CONFIG_SMP | ||
1021 | smp_send_stop(); | ||
1022 | #endif | ||
1023 | |||
1024 | if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r)) | 1020 | if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r)) |
1025 | BUG(); | 1021 | BUG(); |
1026 | } | 1022 | } |
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index 25f232b18a82..f4d010031465 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c | |||
@@ -400,9 +400,9 @@ static void stop_self(void *v) | |||
400 | BUG(); | 400 | BUG(); |
401 | } | 401 | } |
402 | 402 | ||
403 | static void xen_smp_send_stop(void) | 403 | static void xen_stop_other_cpus(int wait) |
404 | { | 404 | { |
405 | smp_call_function(stop_self, NULL, 0); | 405 | smp_call_function(stop_self, NULL, wait); |
406 | } | 406 | } |
407 | 407 | ||
408 | static void xen_smp_send_reschedule(int cpu) | 408 | static void xen_smp_send_reschedule(int cpu) |
@@ -470,7 +470,7 @@ static const struct smp_ops xen_smp_ops __initdata = { | |||
470 | .cpu_disable = xen_cpu_disable, | 470 | .cpu_disable = xen_cpu_disable, |
471 | .play_dead = xen_play_dead, | 471 | .play_dead = xen_play_dead, |
472 | 472 | ||
473 | .smp_send_stop = xen_smp_send_stop, | 473 | .stop_other_cpus = xen_stop_other_cpus, |
474 | .smp_send_reschedule = xen_smp_send_reschedule, | 474 | .smp_send_reschedule = xen_smp_send_reschedule, |
475 | 475 | ||
476 | .send_call_func_ipi = xen_smp_send_call_function_ipi, | 476 | .send_call_func_ipi = xen_smp_send_call_function_ipi, |