diff options
Diffstat (limited to 'arch')
26 files changed, 110 insertions, 185 deletions
diff --git a/arch/arm/common/it8152.c b/arch/arm/common/it8152.c index 1bec96e85196..42ff90b46dfb 100644 --- a/arch/arm/common/it8152.c +++ b/arch/arm/common/it8152.c | |||
@@ -352,3 +352,4 @@ struct pci_bus * __init it8152_pci_scan_bus(int nr, struct pci_sys_data *sys) | |||
352 | return pci_scan_bus(nr, &it8152_ops, sys); | 352 | return pci_scan_bus(nr, &it8152_ops, sys); |
353 | } | 353 | } |
354 | 354 | ||
355 | EXPORT_SYMBOL(dma_set_coherent_mask); | ||
diff --git a/arch/arm/include/asm/hardware/it8152.h b/arch/arm/include/asm/hardware/it8152.h index 21fa272301f8..b2f95c72287c 100644 --- a/arch/arm/include/asm/hardware/it8152.h +++ b/arch/arm/include/asm/hardware/it8152.h | |||
@@ -76,6 +76,7 @@ extern unsigned long it8152_base_address; | |||
76 | IT8152_PD_IRQ(0) Audio controller (ACR) | 76 | IT8152_PD_IRQ(0) Audio controller (ACR) |
77 | */ | 77 | */ |
78 | #define IT8152_IRQ(x) (IRQ_BOARD_START + (x)) | 78 | #define IT8152_IRQ(x) (IRQ_BOARD_START + (x)) |
79 | #define IT8152_LAST_IRQ (IRQ_BOARD_START + 40) | ||
79 | 80 | ||
80 | /* IRQ-sources in 3 groups - local devices, LPC (serial), and external PCI */ | 81 | /* IRQ-sources in 3 groups - local devices, LPC (serial), and external PCI */ |
81 | #define IT8152_LD_IRQ_COUNT 9 | 82 | #define IT8152_LD_IRQ_COUNT 9 |
diff --git a/arch/arm/include/asm/highmem.h b/arch/arm/include/asm/highmem.h index 1fc684e70ab6..7080e2c8fa62 100644 --- a/arch/arm/include/asm/highmem.h +++ b/arch/arm/include/asm/highmem.h | |||
@@ -25,9 +25,6 @@ extern void *kmap_high(struct page *page); | |||
25 | extern void *kmap_high_get(struct page *page); | 25 | extern void *kmap_high_get(struct page *page); |
26 | extern void kunmap_high(struct page *page); | 26 | extern void kunmap_high(struct page *page); |
27 | 27 | ||
28 | extern void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte); | ||
29 | extern void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte); | ||
30 | |||
31 | /* | 28 | /* |
32 | * The following functions are already defined by <linux/highmem.h> | 29 | * The following functions are already defined by <linux/highmem.h> |
33 | * when CONFIG_HIGHMEM is not set. | 30 | * when CONFIG_HIGHMEM is not set. |
diff --git a/arch/arm/include/asm/sizes.h b/arch/arm/include/asm/sizes.h index 4fc1565e4f93..316bb2b2be3d 100644 --- a/arch/arm/include/asm/sizes.h +++ b/arch/arm/include/asm/sizes.h | |||
@@ -13,9 +13,6 @@ | |||
13 | * along with this program; if not, write to the Free Software | 13 | * along with this program; if not, write to the Free Software |
14 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 14 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
15 | */ | 15 | */ |
16 | /* DO NOT EDIT!! - this file automatically generated | ||
17 | * from .s file by awk -f s2h.awk | ||
18 | */ | ||
19 | /* Size definitions | 16 | /* Size definitions |
20 | * Copyright (C) ARM Limited 1998. All rights reserved. | 17 | * Copyright (C) ARM Limited 1998. All rights reserved. |
21 | */ | 18 | */ |
@@ -25,6 +22,9 @@ | |||
25 | 22 | ||
26 | /* handy sizes */ | 23 | /* handy sizes */ |
27 | #define SZ_16 0x00000010 | 24 | #define SZ_16 0x00000010 |
25 | #define SZ_32 0x00000020 | ||
26 | #define SZ_64 0x00000040 | ||
27 | #define SZ_128 0x00000080 | ||
28 | #define SZ_256 0x00000100 | 28 | #define SZ_256 0x00000100 |
29 | #define SZ_512 0x00000200 | 29 | #define SZ_512 0x00000200 |
30 | 30 | ||
diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h index 1120f18a6b17..80025948b8ad 100644 --- a/arch/arm/include/asm/system.h +++ b/arch/arm/include/asm/system.h | |||
@@ -150,6 +150,7 @@ extern unsigned int user_debug; | |||
150 | #define rmb() dmb() | 150 | #define rmb() dmb() |
151 | #define wmb() mb() | 151 | #define wmb() mb() |
152 | #else | 152 | #else |
153 | #include <asm/memory.h> | ||
153 | #define mb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) | 154 | #define mb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) |
154 | #define rmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) | 155 | #define rmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) |
155 | #define wmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) | 156 | #define wmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0) |
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index 8bfa98757cd2..80bf8cd88d7c 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S | |||
@@ -29,6 +29,9 @@ ret_fast_syscall: | |||
29 | ldr r1, [tsk, #TI_FLAGS] | 29 | ldr r1, [tsk, #TI_FLAGS] |
30 | tst r1, #_TIF_WORK_MASK | 30 | tst r1, #_TIF_WORK_MASK |
31 | bne fast_work_pending | 31 | bne fast_work_pending |
32 | #if defined(CONFIG_IRQSOFF_TRACER) | ||
33 | asm_trace_hardirqs_on | ||
34 | #endif | ||
32 | 35 | ||
33 | /* perform architecture specific actions before user return */ | 36 | /* perform architecture specific actions before user return */ |
34 | arch_ret_to_user r1, lr | 37 | arch_ret_to_user r1, lr |
@@ -65,6 +68,9 @@ ret_slow_syscall: | |||
65 | tst r1, #_TIF_WORK_MASK | 68 | tst r1, #_TIF_WORK_MASK |
66 | bne work_pending | 69 | bne work_pending |
67 | no_work_pending: | 70 | no_work_pending: |
71 | #if defined(CONFIG_IRQSOFF_TRACER) | ||
72 | asm_trace_hardirqs_on | ||
73 | #endif | ||
68 | /* perform architecture specific actions before user return */ | 74 | /* perform architecture specific actions before user return */ |
69 | arch_ret_to_user r1, lr | 75 | arch_ret_to_user r1, lr |
70 | 76 | ||
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 8c1959590252..9066473c0ebc 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c | |||
@@ -310,7 +310,6 @@ asmlinkage void __cpuinit secondary_start_kernel(void) | |||
310 | * All kernel threads share the same mm context; grab a | 310 | * All kernel threads share the same mm context; grab a |
311 | * reference and switch to it. | 311 | * reference and switch to it. |
312 | */ | 312 | */ |
313 | atomic_inc(&mm->mm_users); | ||
314 | atomic_inc(&mm->mm_count); | 313 | atomic_inc(&mm->mm_count); |
315 | current->active_mm = mm; | 314 | current->active_mm = mm; |
316 | cpumask_set_cpu(cpu, mm_cpumask(mm)); | 315 | cpumask_set_cpu(cpu, mm_cpumask(mm)); |
diff --git a/arch/arm/mach-at91/include/mach/at91_mci.h b/arch/arm/mach-at91/include/mach/at91_mci.h index 57f8ee154943..27ac6f550fe3 100644 --- a/arch/arm/mach-at91/include/mach/at91_mci.h +++ b/arch/arm/mach-at91/include/mach/at91_mci.h | |||
@@ -74,6 +74,8 @@ | |||
74 | #define AT91_MCI_TRTYP_BLOCK (0 << 19) | 74 | #define AT91_MCI_TRTYP_BLOCK (0 << 19) |
75 | #define AT91_MCI_TRTYP_MULTIPLE (1 << 19) | 75 | #define AT91_MCI_TRTYP_MULTIPLE (1 << 19) |
76 | #define AT91_MCI_TRTYP_STREAM (2 << 19) | 76 | #define AT91_MCI_TRTYP_STREAM (2 << 19) |
77 | #define AT91_MCI_TRTYP_SDIO_BYTE (4 << 19) | ||
78 | #define AT91_MCI_TRTYP_SDIO_BLOCK (5 << 19) | ||
77 | 79 | ||
78 | #define AT91_MCI_BLKR 0x18 /* Block Register */ | 80 | #define AT91_MCI_BLKR 0x18 /* Block Register */ |
79 | #define AT91_MCI_BLKR_BCNT(n) ((0xffff & (n)) << 0) /* Block count */ | 81 | #define AT91_MCI_BLKR_BCNT(n) ((0xffff & (n)) << 0) /* Block count */ |
diff --git a/arch/arm/mach-ixp4xx/common-pci.c b/arch/arm/mach-ixp4xx/common-pci.c index 24498a932ba6..a54b3db80366 100644 --- a/arch/arm/mach-ixp4xx/common-pci.c +++ b/arch/arm/mach-ixp4xx/common-pci.c | |||
@@ -513,4 +513,4 @@ int dma_set_coherent_mask(struct device *dev, u64 mask) | |||
513 | 513 | ||
514 | EXPORT_SYMBOL(ixp4xx_pci_read); | 514 | EXPORT_SYMBOL(ixp4xx_pci_read); |
515 | EXPORT_SYMBOL(ixp4xx_pci_write); | 515 | EXPORT_SYMBOL(ixp4xx_pci_write); |
516 | 516 | EXPORT_SYMBOL(dma_set_coherent_mask); | |
diff --git a/arch/arm/mach-pxa/Kconfig b/arch/arm/mach-pxa/Kconfig index dd235ecc9d6c..c93e73d54dd1 100644 --- a/arch/arm/mach-pxa/Kconfig +++ b/arch/arm/mach-pxa/Kconfig | |||
@@ -540,6 +540,7 @@ config MACH_ICONTROL | |||
540 | config ARCH_PXA_ESERIES | 540 | config ARCH_PXA_ESERIES |
541 | bool "PXA based Toshiba e-series PDAs" | 541 | bool "PXA based Toshiba e-series PDAs" |
542 | select PXA25x | 542 | select PXA25x |
543 | select FB_W100 | ||
543 | 544 | ||
544 | config MACH_E330 | 545 | config MACH_E330 |
545 | bool "Toshiba e330" | 546 | bool "Toshiba e330" |
diff --git a/arch/arm/mach-pxa/sleep.S b/arch/arm/mach-pxa/sleep.S index 52c30b01a671..ae008110db4e 100644 --- a/arch/arm/mach-pxa/sleep.S +++ b/arch/arm/mach-pxa/sleep.S | |||
@@ -353,8 +353,8 @@ resume_turn_on_mmu: | |||
353 | 353 | ||
354 | @ Let us ensure we jump to resume_after_mmu only when the mcr above | 354 | @ Let us ensure we jump to resume_after_mmu only when the mcr above |
355 | @ actually took effect. They call it the "cpwait" operation. | 355 | @ actually took effect. They call it the "cpwait" operation. |
356 | mrc p15, 0, r1, c2, c0, 0 @ queue a dependency on CP15 | 356 | mrc p15, 0, r0, c2, c0, 0 @ queue a dependency on CP15 |
357 | sub pc, r2, r1, lsr #32 @ jump to virtual addr | 357 | sub pc, r2, r0, lsr #32 @ jump to virtual addr |
358 | nop | 358 | nop |
359 | nop | 359 | nop |
360 | nop | 360 | nop |
diff --git a/arch/arm/mm/cache-feroceon-l2.c b/arch/arm/mm/cache-feroceon-l2.c index 6e77c042d8e9..e0b0e7a4ec68 100644 --- a/arch/arm/mm/cache-feroceon-l2.c +++ b/arch/arm/mm/cache-feroceon-l2.c | |||
@@ -13,13 +13,9 @@ | |||
13 | */ | 13 | */ |
14 | 14 | ||
15 | #include <linux/init.h> | 15 | #include <linux/init.h> |
16 | #include <linux/highmem.h> | ||
16 | #include <asm/cacheflush.h> | 17 | #include <asm/cacheflush.h> |
17 | #include <asm/kmap_types.h> | ||
18 | #include <asm/fixmap.h> | ||
19 | #include <asm/pgtable.h> | ||
20 | #include <asm/tlbflush.h> | ||
21 | #include <plat/cache-feroceon-l2.h> | 18 | #include <plat/cache-feroceon-l2.h> |
22 | #include "mm.h" | ||
23 | 19 | ||
24 | /* | 20 | /* |
25 | * Low-level cache maintenance operations. | 21 | * Low-level cache maintenance operations. |
@@ -39,27 +35,30 @@ | |||
39 | * between which we don't want to be preempted. | 35 | * between which we don't want to be preempted. |
40 | */ | 36 | */ |
41 | 37 | ||
42 | static inline unsigned long l2_start_va(unsigned long paddr) | 38 | static inline unsigned long l2_get_va(unsigned long paddr) |
43 | { | 39 | { |
44 | #ifdef CONFIG_HIGHMEM | 40 | #ifdef CONFIG_HIGHMEM |
45 | /* | 41 | /* |
46 | * Let's do our own fixmap stuff in a minimal way here. | ||
47 | * Because range ops can't be done on physical addresses, | 42 | * Because range ops can't be done on physical addresses, |
48 | * we simply install a virtual mapping for it only for the | 43 | * we simply install a virtual mapping for it only for the |
49 | * TLB lookup to occur, hence no need to flush the untouched | 44 | * TLB lookup to occur, hence no need to flush the untouched |
50 | * memory mapping. This is protected with the disabling of | 45 | * memory mapping afterwards (note: a cache flush may happen |
51 | * interrupts by the caller. | 46 | * in some circumstances depending on the path taken in kunmap_atomic). |
52 | */ | 47 | */ |
53 | unsigned long idx = KM_L2_CACHE + KM_TYPE_NR * smp_processor_id(); | 48 | void *vaddr = kmap_atomic_pfn(paddr >> PAGE_SHIFT); |
54 | unsigned long vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); | 49 | return (unsigned long)vaddr + (paddr & ~PAGE_MASK); |
55 | set_pte_ext(TOP_PTE(vaddr), pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL), 0); | ||
56 | local_flush_tlb_kernel_page(vaddr); | ||
57 | return vaddr + (paddr & ~PAGE_MASK); | ||
58 | #else | 50 | #else |
59 | return __phys_to_virt(paddr); | 51 | return __phys_to_virt(paddr); |
60 | #endif | 52 | #endif |
61 | } | 53 | } |
62 | 54 | ||
55 | static inline void l2_put_va(unsigned long vaddr) | ||
56 | { | ||
57 | #ifdef CONFIG_HIGHMEM | ||
58 | kunmap_atomic((void *)vaddr); | ||
59 | #endif | ||
60 | } | ||
61 | |||
63 | static inline void l2_clean_pa(unsigned long addr) | 62 | static inline void l2_clean_pa(unsigned long addr) |
64 | { | 63 | { |
65 | __asm__("mcr p15, 1, %0, c15, c9, 3" : : "r" (addr)); | 64 | __asm__("mcr p15, 1, %0, c15, c9, 3" : : "r" (addr)); |
@@ -76,13 +75,14 @@ static inline void l2_clean_pa_range(unsigned long start, unsigned long end) | |||
76 | */ | 75 | */ |
77 | BUG_ON((start ^ end) >> PAGE_SHIFT); | 76 | BUG_ON((start ^ end) >> PAGE_SHIFT); |
78 | 77 | ||
79 | raw_local_irq_save(flags); | 78 | va_start = l2_get_va(start); |
80 | va_start = l2_start_va(start); | ||
81 | va_end = va_start + (end - start); | 79 | va_end = va_start + (end - start); |
80 | raw_local_irq_save(flags); | ||
82 | __asm__("mcr p15, 1, %0, c15, c9, 4\n\t" | 81 | __asm__("mcr p15, 1, %0, c15, c9, 4\n\t" |
83 | "mcr p15, 1, %1, c15, c9, 5" | 82 | "mcr p15, 1, %1, c15, c9, 5" |
84 | : : "r" (va_start), "r" (va_end)); | 83 | : : "r" (va_start), "r" (va_end)); |
85 | raw_local_irq_restore(flags); | 84 | raw_local_irq_restore(flags); |
85 | l2_put_va(va_start); | ||
86 | } | 86 | } |
87 | 87 | ||
88 | static inline void l2_clean_inv_pa(unsigned long addr) | 88 | static inline void l2_clean_inv_pa(unsigned long addr) |
@@ -106,13 +106,14 @@ static inline void l2_inv_pa_range(unsigned long start, unsigned long end) | |||
106 | */ | 106 | */ |
107 | BUG_ON((start ^ end) >> PAGE_SHIFT); | 107 | BUG_ON((start ^ end) >> PAGE_SHIFT); |
108 | 108 | ||
109 | raw_local_irq_save(flags); | 109 | va_start = l2_get_va(start); |
110 | va_start = l2_start_va(start); | ||
111 | va_end = va_start + (end - start); | 110 | va_end = va_start + (end - start); |
111 | raw_local_irq_save(flags); | ||
112 | __asm__("mcr p15, 1, %0, c15, c11, 4\n\t" | 112 | __asm__("mcr p15, 1, %0, c15, c11, 4\n\t" |
113 | "mcr p15, 1, %1, c15, c11, 5" | 113 | "mcr p15, 1, %1, c15, c11, 5" |
114 | : : "r" (va_start), "r" (va_end)); | 114 | : : "r" (va_start), "r" (va_end)); |
115 | raw_local_irq_restore(flags); | 115 | raw_local_irq_restore(flags); |
116 | l2_put_va(va_start); | ||
116 | } | 117 | } |
117 | 118 | ||
118 | static inline void l2_inv_all(void) | 119 | static inline void l2_inv_all(void) |
diff --git a/arch/arm/mm/cache-xsc3l2.c b/arch/arm/mm/cache-xsc3l2.c index c3154928bccd..5a32020471e3 100644 --- a/arch/arm/mm/cache-xsc3l2.c +++ b/arch/arm/mm/cache-xsc3l2.c | |||
@@ -17,14 +17,10 @@ | |||
17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
18 | */ | 18 | */ |
19 | #include <linux/init.h> | 19 | #include <linux/init.h> |
20 | #include <linux/highmem.h> | ||
20 | #include <asm/system.h> | 21 | #include <asm/system.h> |
21 | #include <asm/cputype.h> | 22 | #include <asm/cputype.h> |
22 | #include <asm/cacheflush.h> | 23 | #include <asm/cacheflush.h> |
23 | #include <asm/kmap_types.h> | ||
24 | #include <asm/fixmap.h> | ||
25 | #include <asm/pgtable.h> | ||
26 | #include <asm/tlbflush.h> | ||
27 | #include "mm.h" | ||
28 | 24 | ||
29 | #define CR_L2 (1 << 26) | 25 | #define CR_L2 (1 << 26) |
30 | 26 | ||
@@ -71,16 +67,15 @@ static inline void xsc3_l2_inv_all(void) | |||
71 | dsb(); | 67 | dsb(); |
72 | } | 68 | } |
73 | 69 | ||
70 | static inline void l2_unmap_va(unsigned long va) | ||
71 | { | ||
74 | #ifdef CONFIG_HIGHMEM | 72 | #ifdef CONFIG_HIGHMEM |
75 | #define l2_map_save_flags(x) raw_local_save_flags(x) | 73 | if (va != -1) |
76 | #define l2_map_restore_flags(x) raw_local_irq_restore(x) | 74 | kunmap_atomic((void *)va); |
77 | #else | ||
78 | #define l2_map_save_flags(x) ((x) = 0) | ||
79 | #define l2_map_restore_flags(x) ((void)(x)) | ||
80 | #endif | 75 | #endif |
76 | } | ||
81 | 77 | ||
82 | static inline unsigned long l2_map_va(unsigned long pa, unsigned long prev_va, | 78 | static inline unsigned long l2_map_va(unsigned long pa, unsigned long prev_va) |
83 | unsigned long flags) | ||
84 | { | 79 | { |
85 | #ifdef CONFIG_HIGHMEM | 80 | #ifdef CONFIG_HIGHMEM |
86 | unsigned long va = prev_va & PAGE_MASK; | 81 | unsigned long va = prev_va & PAGE_MASK; |
@@ -89,17 +84,10 @@ static inline unsigned long l2_map_va(unsigned long pa, unsigned long prev_va, | |||
89 | /* | 84 | /* |
90 | * Switching to a new page. Because cache ops are | 85 | * Switching to a new page. Because cache ops are |
91 | * using virtual addresses only, we must put a mapping | 86 | * using virtual addresses only, we must put a mapping |
92 | * in place for it. We also enable interrupts for a | 87 | * in place for it. |
93 | * short while and disable them again to protect this | ||
94 | * mapping. | ||
95 | */ | 88 | */ |
96 | unsigned long idx; | 89 | l2_unmap_va(prev_va); |
97 | raw_local_irq_restore(flags); | 90 | va = (unsigned long)kmap_atomic_pfn(pa >> PAGE_SHIFT); |
98 | idx = KM_L2_CACHE + KM_TYPE_NR * smp_processor_id(); | ||
99 | va = __fix_to_virt(FIX_KMAP_BEGIN + idx); | ||
100 | raw_local_irq_restore(flags | PSR_I_BIT); | ||
101 | set_pte_ext(TOP_PTE(va), pfn_pte(pa >> PAGE_SHIFT, PAGE_KERNEL), 0); | ||
102 | local_flush_tlb_kernel_page(va); | ||
103 | } | 91 | } |
104 | return va + (pa_offset >> (32 - PAGE_SHIFT)); | 92 | return va + (pa_offset >> (32 - PAGE_SHIFT)); |
105 | #else | 93 | #else |
@@ -109,7 +97,7 @@ static inline unsigned long l2_map_va(unsigned long pa, unsigned long prev_va, | |||
109 | 97 | ||
110 | static void xsc3_l2_inv_range(unsigned long start, unsigned long end) | 98 | static void xsc3_l2_inv_range(unsigned long start, unsigned long end) |
111 | { | 99 | { |
112 | unsigned long vaddr, flags; | 100 | unsigned long vaddr; |
113 | 101 | ||
114 | if (start == 0 && end == -1ul) { | 102 | if (start == 0 && end == -1ul) { |
115 | xsc3_l2_inv_all(); | 103 | xsc3_l2_inv_all(); |
@@ -117,13 +105,12 @@ static void xsc3_l2_inv_range(unsigned long start, unsigned long end) | |||
117 | } | 105 | } |
118 | 106 | ||
119 | vaddr = -1; /* to force the first mapping */ | 107 | vaddr = -1; /* to force the first mapping */ |
120 | l2_map_save_flags(flags); | ||
121 | 108 | ||
122 | /* | 109 | /* |
123 | * Clean and invalidate partial first cache line. | 110 | * Clean and invalidate partial first cache line. |
124 | */ | 111 | */ |
125 | if (start & (CACHE_LINE_SIZE - 1)) { | 112 | if (start & (CACHE_LINE_SIZE - 1)) { |
126 | vaddr = l2_map_va(start & ~(CACHE_LINE_SIZE - 1), vaddr, flags); | 113 | vaddr = l2_map_va(start & ~(CACHE_LINE_SIZE - 1), vaddr); |
127 | xsc3_l2_clean_mva(vaddr); | 114 | xsc3_l2_clean_mva(vaddr); |
128 | xsc3_l2_inv_mva(vaddr); | 115 | xsc3_l2_inv_mva(vaddr); |
129 | start = (start | (CACHE_LINE_SIZE - 1)) + 1; | 116 | start = (start | (CACHE_LINE_SIZE - 1)) + 1; |
@@ -133,7 +120,7 @@ static void xsc3_l2_inv_range(unsigned long start, unsigned long end) | |||
133 | * Invalidate all full cache lines between 'start' and 'end'. | 120 | * Invalidate all full cache lines between 'start' and 'end'. |
134 | */ | 121 | */ |
135 | while (start < (end & ~(CACHE_LINE_SIZE - 1))) { | 122 | while (start < (end & ~(CACHE_LINE_SIZE - 1))) { |
136 | vaddr = l2_map_va(start, vaddr, flags); | 123 | vaddr = l2_map_va(start, vaddr); |
137 | xsc3_l2_inv_mva(vaddr); | 124 | xsc3_l2_inv_mva(vaddr); |
138 | start += CACHE_LINE_SIZE; | 125 | start += CACHE_LINE_SIZE; |
139 | } | 126 | } |
@@ -142,31 +129,30 @@ static void xsc3_l2_inv_range(unsigned long start, unsigned long end) | |||
142 | * Clean and invalidate partial last cache line. | 129 | * Clean and invalidate partial last cache line. |
143 | */ | 130 | */ |
144 | if (start < end) { | 131 | if (start < end) { |
145 | vaddr = l2_map_va(start, vaddr, flags); | 132 | vaddr = l2_map_va(start, vaddr); |
146 | xsc3_l2_clean_mva(vaddr); | 133 | xsc3_l2_clean_mva(vaddr); |
147 | xsc3_l2_inv_mva(vaddr); | 134 | xsc3_l2_inv_mva(vaddr); |
148 | } | 135 | } |
149 | 136 | ||
150 | l2_map_restore_flags(flags); | 137 | l2_unmap_va(vaddr); |
151 | 138 | ||
152 | dsb(); | 139 | dsb(); |
153 | } | 140 | } |
154 | 141 | ||
155 | static void xsc3_l2_clean_range(unsigned long start, unsigned long end) | 142 | static void xsc3_l2_clean_range(unsigned long start, unsigned long end) |
156 | { | 143 | { |
157 | unsigned long vaddr, flags; | 144 | unsigned long vaddr; |
158 | 145 | ||
159 | vaddr = -1; /* to force the first mapping */ | 146 | vaddr = -1; /* to force the first mapping */ |
160 | l2_map_save_flags(flags); | ||
161 | 147 | ||
162 | start &= ~(CACHE_LINE_SIZE - 1); | 148 | start &= ~(CACHE_LINE_SIZE - 1); |
163 | while (start < end) { | 149 | while (start < end) { |
164 | vaddr = l2_map_va(start, vaddr, flags); | 150 | vaddr = l2_map_va(start, vaddr); |
165 | xsc3_l2_clean_mva(vaddr); | 151 | xsc3_l2_clean_mva(vaddr); |
166 | start += CACHE_LINE_SIZE; | 152 | start += CACHE_LINE_SIZE; |
167 | } | 153 | } |
168 | 154 | ||
169 | l2_map_restore_flags(flags); | 155 | l2_unmap_va(vaddr); |
170 | 156 | ||
171 | dsb(); | 157 | dsb(); |
172 | } | 158 | } |
@@ -193,7 +179,7 @@ static inline void xsc3_l2_flush_all(void) | |||
193 | 179 | ||
194 | static void xsc3_l2_flush_range(unsigned long start, unsigned long end) | 180 | static void xsc3_l2_flush_range(unsigned long start, unsigned long end) |
195 | { | 181 | { |
196 | unsigned long vaddr, flags; | 182 | unsigned long vaddr; |
197 | 183 | ||
198 | if (start == 0 && end == -1ul) { | 184 | if (start == 0 && end == -1ul) { |
199 | xsc3_l2_flush_all(); | 185 | xsc3_l2_flush_all(); |
@@ -201,17 +187,16 @@ static void xsc3_l2_flush_range(unsigned long start, unsigned long end) | |||
201 | } | 187 | } |
202 | 188 | ||
203 | vaddr = -1; /* to force the first mapping */ | 189 | vaddr = -1; /* to force the first mapping */ |
204 | l2_map_save_flags(flags); | ||
205 | 190 | ||
206 | start &= ~(CACHE_LINE_SIZE - 1); | 191 | start &= ~(CACHE_LINE_SIZE - 1); |
207 | while (start < end) { | 192 | while (start < end) { |
208 | vaddr = l2_map_va(start, vaddr, flags); | 193 | vaddr = l2_map_va(start, vaddr); |
209 | xsc3_l2_clean_mva(vaddr); | 194 | xsc3_l2_clean_mva(vaddr); |
210 | xsc3_l2_inv_mva(vaddr); | 195 | xsc3_l2_inv_mva(vaddr); |
211 | start += CACHE_LINE_SIZE; | 196 | start += CACHE_LINE_SIZE; |
212 | } | 197 | } |
213 | 198 | ||
214 | l2_map_restore_flags(flags); | 199 | l2_unmap_va(vaddr); |
215 | 200 | ||
216 | dsb(); | 201 | dsb(); |
217 | } | 202 | } |
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index ac6a36142fcd..809f1bf9fa29 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/init.h> | 17 | #include <linux/init.h> |
18 | #include <linux/device.h> | 18 | #include <linux/device.h> |
19 | #include <linux/dma-mapping.h> | 19 | #include <linux/dma-mapping.h> |
20 | #include <linux/highmem.h> | ||
20 | 21 | ||
21 | #include <asm/memory.h> | 22 | #include <asm/memory.h> |
22 | #include <asm/highmem.h> | 23 | #include <asm/highmem.h> |
@@ -480,10 +481,10 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset, | |||
480 | op(vaddr, len, dir); | 481 | op(vaddr, len, dir); |
481 | kunmap_high(page); | 482 | kunmap_high(page); |
482 | } else if (cache_is_vipt()) { | 483 | } else if (cache_is_vipt()) { |
483 | pte_t saved_pte; | 484 | /* unmapped pages might still be cached */ |
484 | vaddr = kmap_high_l1_vipt(page, &saved_pte); | 485 | vaddr = kmap_atomic(page); |
485 | op(vaddr + offset, len, dir); | 486 | op(vaddr + offset, len, dir); |
486 | kunmap_high_l1_vipt(page, saved_pte); | 487 | kunmap_atomic(vaddr); |
487 | } | 488 | } |
488 | } else { | 489 | } else { |
489 | vaddr = page_address(page) + offset; | 490 | vaddr = page_address(page) + offset; |
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c index 391ffae75098..c29f2839f1d2 100644 --- a/arch/arm/mm/flush.c +++ b/arch/arm/mm/flush.c | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/module.h> | 10 | #include <linux/module.h> |
11 | #include <linux/mm.h> | 11 | #include <linux/mm.h> |
12 | #include <linux/pagemap.h> | 12 | #include <linux/pagemap.h> |
13 | #include <linux/highmem.h> | ||
13 | 14 | ||
14 | #include <asm/cacheflush.h> | 15 | #include <asm/cacheflush.h> |
15 | #include <asm/cachetype.h> | 16 | #include <asm/cachetype.h> |
@@ -180,10 +181,10 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page) | |||
180 | __cpuc_flush_dcache_area(addr, PAGE_SIZE); | 181 | __cpuc_flush_dcache_area(addr, PAGE_SIZE); |
181 | kunmap_high(page); | 182 | kunmap_high(page); |
182 | } else if (cache_is_vipt()) { | 183 | } else if (cache_is_vipt()) { |
183 | pte_t saved_pte; | 184 | /* unmapped pages might still be cached */ |
184 | addr = kmap_high_l1_vipt(page, &saved_pte); | 185 | addr = kmap_atomic(page); |
185 | __cpuc_flush_dcache_area(addr, PAGE_SIZE); | 186 | __cpuc_flush_dcache_area(addr, PAGE_SIZE); |
186 | kunmap_high_l1_vipt(page, saved_pte); | 187 | kunmap_atomic(addr); |
187 | } | 188 | } |
188 | } | 189 | } |
189 | 190 | ||
diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c index c435fd9e1da9..807c0573abbe 100644 --- a/arch/arm/mm/highmem.c +++ b/arch/arm/mm/highmem.c | |||
@@ -140,90 +140,3 @@ struct page *kmap_atomic_to_page(const void *ptr) | |||
140 | pte = TOP_PTE(vaddr); | 140 | pte = TOP_PTE(vaddr); |
141 | return pte_page(*pte); | 141 | return pte_page(*pte); |
142 | } | 142 | } |
143 | |||
144 | #ifdef CONFIG_CPU_CACHE_VIPT | ||
145 | |||
146 | #include <linux/percpu.h> | ||
147 | |||
148 | /* | ||
149 | * The VIVT cache of a highmem page is always flushed before the page | ||
150 | * is unmapped. Hence unmapped highmem pages need no cache maintenance | ||
151 | * in that case. | ||
152 | * | ||
153 | * However unmapped pages may still be cached with a VIPT cache, and | ||
154 | * it is not possible to perform cache maintenance on them using physical | ||
155 | * addresses unfortunately. So we have no choice but to set up a temporary | ||
156 | * virtual mapping for that purpose. | ||
157 | * | ||
158 | * Yet this VIPT cache maintenance may be triggered from DMA support | ||
159 | * functions which are possibly called from interrupt context. As we don't | ||
160 | * want to keep interrupt disabled all the time when such maintenance is | ||
161 | * taking place, we therefore allow for some reentrancy by preserving and | ||
162 | * restoring the previous fixmap entry before the interrupted context is | ||
163 | * resumed. If the reentrancy depth is 0 then there is no need to restore | ||
164 | * the previous fixmap, and leaving the current one in place allow it to | ||
165 | * be reused the next time without a TLB flush (common with DMA). | ||
166 | */ | ||
167 | |||
168 | static DEFINE_PER_CPU(int, kmap_high_l1_vipt_depth); | ||
169 | |||
170 | void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte) | ||
171 | { | ||
172 | unsigned int idx, cpu; | ||
173 | int *depth; | ||
174 | unsigned long vaddr, flags; | ||
175 | pte_t pte, *ptep; | ||
176 | |||
177 | if (!in_interrupt()) | ||
178 | preempt_disable(); | ||
179 | |||
180 | cpu = smp_processor_id(); | ||
181 | depth = &per_cpu(kmap_high_l1_vipt_depth, cpu); | ||
182 | |||
183 | idx = KM_L1_CACHE + KM_TYPE_NR * cpu; | ||
184 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); | ||
185 | ptep = TOP_PTE(vaddr); | ||
186 | pte = mk_pte(page, kmap_prot); | ||
187 | |||
188 | raw_local_irq_save(flags); | ||
189 | (*depth)++; | ||
190 | if (pte_val(*ptep) == pte_val(pte)) { | ||
191 | *saved_pte = pte; | ||
192 | } else { | ||
193 | *saved_pte = *ptep; | ||
194 | set_pte_ext(ptep, pte, 0); | ||
195 | local_flush_tlb_kernel_page(vaddr); | ||
196 | } | ||
197 | raw_local_irq_restore(flags); | ||
198 | |||
199 | return (void *)vaddr; | ||
200 | } | ||
201 | |||
202 | void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte) | ||
203 | { | ||
204 | unsigned int idx, cpu = smp_processor_id(); | ||
205 | int *depth = &per_cpu(kmap_high_l1_vipt_depth, cpu); | ||
206 | unsigned long vaddr, flags; | ||
207 | pte_t pte, *ptep; | ||
208 | |||
209 | idx = KM_L1_CACHE + KM_TYPE_NR * cpu; | ||
210 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); | ||
211 | ptep = TOP_PTE(vaddr); | ||
212 | pte = mk_pte(page, kmap_prot); | ||
213 | |||
214 | BUG_ON(pte_val(*ptep) != pte_val(pte)); | ||
215 | BUG_ON(*depth <= 0); | ||
216 | |||
217 | raw_local_irq_save(flags); | ||
218 | (*depth)--; | ||
219 | if (*depth != 0 && pte_val(pte) != pte_val(saved_pte)) { | ||
220 | set_pte_ext(ptep, saved_pte, 0); | ||
221 | local_flush_tlb_kernel_page(vaddr); | ||
222 | } | ||
223 | raw_local_irq_restore(flags); | ||
224 | |||
225 | if (!in_interrupt()) | ||
226 | preempt_enable(); | ||
227 | } | ||
228 | |||
229 | #endif /* CONFIG_CPU_CACHE_VIPT */ | ||
diff --git a/arch/mn10300/kernel/irq.c b/arch/mn10300/kernel/irq.c index c2e44597c22b..ac11754ecec5 100644 --- a/arch/mn10300/kernel/irq.c +++ b/arch/mn10300/kernel/irq.c | |||
@@ -459,7 +459,7 @@ void migrate_irqs(void) | |||
459 | tmp = CROSS_GxICR(irq, new); | 459 | tmp = CROSS_GxICR(irq, new); |
460 | 460 | ||
461 | x &= GxICR_LEVEL | GxICR_ENABLE; | 461 | x &= GxICR_LEVEL | GxICR_ENABLE; |
462 | if (GxICR(irq) & GxICR_REQUEST) { | 462 | if (GxICR(irq) & GxICR_REQUEST) |
463 | x |= GxICR_REQUEST | GxICR_DETECT; | 463 | x |= GxICR_REQUEST | GxICR_DETECT; |
464 | CROSS_GxICR(irq, new) = x; | 464 | CROSS_GxICR(irq, new) = x; |
465 | tmp = CROSS_GxICR(irq, new); | 465 | tmp = CROSS_GxICR(irq, new); |
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c index fea833e18ad5..e0d703c7fdf7 100644 --- a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c +++ b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c | |||
@@ -63,6 +63,7 @@ | |||
63 | #include <linux/of_gpio.h> | 63 | #include <linux/of_gpio.h> |
64 | #include <linux/kernel.h> | 64 | #include <linux/kernel.h> |
65 | #include <linux/slab.h> | 65 | #include <linux/slab.h> |
66 | #include <linux/fs.h> | ||
66 | #include <linux/watchdog.h> | 67 | #include <linux/watchdog.h> |
67 | #include <linux/miscdevice.h> | 68 | #include <linux/miscdevice.h> |
68 | #include <linux/uaccess.h> | 69 | #include <linux/uaccess.h> |
diff --git a/arch/sh/boards/mach-se/7206/irq.c b/arch/sh/boards/mach-se/7206/irq.c index d961949600fd..9070d7e60704 100644 --- a/arch/sh/boards/mach-se/7206/irq.c +++ b/arch/sh/boards/mach-se/7206/irq.c | |||
@@ -140,7 +140,7 @@ void __init init_se7206_IRQ(void) | |||
140 | make_se7206_irq(IRQ1_IRQ); /* ATA */ | 140 | make_se7206_irq(IRQ1_IRQ); /* ATA */ |
141 | make_se7206_irq(IRQ3_IRQ); /* SLOT / PCM */ | 141 | make_se7206_irq(IRQ3_IRQ); /* SLOT / PCM */ |
142 | 142 | ||
143 | __raw_writew(__raw_readw(INTC_ICR1) | 0x000b, INTC_ICR); /* ICR1 */ | 143 | __raw_writew(__raw_readw(INTC_ICR1) | 0x000b, INTC_ICR1); /* ICR1 */ |
144 | 144 | ||
145 | /* FPGA System register setup*/ | 145 | /* FPGA System register setup*/ |
146 | __raw_writew(0x0000,INTSTS0); /* Clear INTSTS0 */ | 146 | __raw_writew(0x0000,INTSTS0); /* Clear INTSTS0 */ |
diff --git a/arch/sh/kernel/cpu/sh2a/clock-sh7201.c b/arch/sh/kernel/cpu/sh2a/clock-sh7201.c index b26264dc2aef..c509c40cba4b 100644 --- a/arch/sh/kernel/cpu/sh2a/clock-sh7201.c +++ b/arch/sh/kernel/cpu/sh2a/clock-sh7201.c | |||
@@ -34,7 +34,7 @@ static const int pfc_divisors[]={1,2,3,4,6,8,12}; | |||
34 | 34 | ||
35 | static void master_clk_init(struct clk *clk) | 35 | static void master_clk_init(struct clk *clk) |
36 | { | 36 | { |
37 | return 10000000 * PLL2 * pll1rate[(__raw_readw(FREQCR) >> 8) & 0x0007]; | 37 | clk->rate = 10000000 * PLL2 * pll1rate[(__raw_readw(FREQCR) >> 8) & 0x0007]; |
38 | } | 38 | } |
39 | 39 | ||
40 | static struct clk_ops sh7201_master_clk_ops = { | 40 | static struct clk_ops sh7201_master_clk_ops = { |
diff --git a/arch/sh/kernel/cpu/sh4/clock-sh4-202.c b/arch/sh/kernel/cpu/sh4/clock-sh4-202.c index b601fa3978d1..6282a839e08e 100644 --- a/arch/sh/kernel/cpu/sh4/clock-sh4-202.c +++ b/arch/sh/kernel/cpu/sh4/clock-sh4-202.c | |||
@@ -81,8 +81,7 @@ static void shoc_clk_init(struct clk *clk) | |||
81 | for (i = 0; i < ARRAY_SIZE(frqcr3_divisors); i++) { | 81 | for (i = 0; i < ARRAY_SIZE(frqcr3_divisors); i++) { |
82 | int divisor = frqcr3_divisors[i]; | 82 | int divisor = frqcr3_divisors[i]; |
83 | 83 | ||
84 | if (clk->ops->set_rate(clk, clk->parent->rate / | 84 | if (clk->ops->set_rate(clk, clk->parent->rate / divisor) == 0) |
85 | divisor, 0) == 0) | ||
86 | break; | 85 | break; |
87 | } | 86 | } |
88 | 87 | ||
diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c index dcb65cc0a053..1a1b606d3e92 100644 --- a/arch/x86/kernel/microcode_intel.c +++ b/arch/x86/kernel/microcode_intel.c | |||
@@ -364,8 +364,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size, | |||
364 | 364 | ||
365 | /* For performance reasons, reuse mc area when possible */ | 365 | /* For performance reasons, reuse mc area when possible */ |
366 | if (!mc || mc_size > curr_mc_size) { | 366 | if (!mc || mc_size > curr_mc_size) { |
367 | if (mc) | 367 | vfree(mc); |
368 | vfree(mc); | ||
369 | mc = vmalloc(mc_size); | 368 | mc = vmalloc(mc_size); |
370 | if (!mc) | 369 | if (!mc) |
371 | break; | 370 | break; |
@@ -374,13 +373,11 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size, | |||
374 | 373 | ||
375 | if (get_ucode_data(mc, ucode_ptr, mc_size) || | 374 | if (get_ucode_data(mc, ucode_ptr, mc_size) || |
376 | microcode_sanity_check(mc) < 0) { | 375 | microcode_sanity_check(mc) < 0) { |
377 | vfree(mc); | ||
378 | break; | 376 | break; |
379 | } | 377 | } |
380 | 378 | ||
381 | if (get_matching_microcode(&uci->cpu_sig, mc, new_rev)) { | 379 | if (get_matching_microcode(&uci->cpu_sig, mc, new_rev)) { |
382 | if (new_mc) | 380 | vfree(new_mc); |
383 | vfree(new_mc); | ||
384 | new_rev = mc_header.rev; | 381 | new_rev = mc_header.rev; |
385 | new_mc = mc; | 382 | new_mc = mc; |
386 | mc = NULL; /* trigger new vmalloc */ | 383 | mc = NULL; /* trigger new vmalloc */ |
@@ -390,12 +387,10 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size, | |||
390 | leftover -= mc_size; | 387 | leftover -= mc_size; |
391 | } | 388 | } |
392 | 389 | ||
393 | if (mc) | 390 | vfree(mc); |
394 | vfree(mc); | ||
395 | 391 | ||
396 | if (leftover) { | 392 | if (leftover) { |
397 | if (new_mc) | 393 | vfree(new_mc); |
398 | vfree(new_mc); | ||
399 | state = UCODE_ERROR; | 394 | state = UCODE_ERROR; |
400 | goto out; | 395 | goto out; |
401 | } | 396 | } |
@@ -405,8 +400,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size, | |||
405 | goto out; | 400 | goto out; |
406 | } | 401 | } |
407 | 402 | ||
408 | if (uci->mc) | 403 | vfree(uci->mc); |
409 | vfree(uci->mc); | ||
410 | uci->mc = (struct microcode_intel *)new_mc; | 404 | uci->mc = (struct microcode_intel *)new_mc; |
411 | 405 | ||
412 | pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n", | 406 | pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n", |
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 85268f8eadf6..a0f52af256a0 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c | |||
@@ -501,7 +501,18 @@ static inline unsigned long long get_total_mem(void) | |||
501 | return total << PAGE_SHIFT; | 501 | return total << PAGE_SHIFT; |
502 | } | 502 | } |
503 | 503 | ||
504 | #define DEFAULT_BZIMAGE_ADDR_MAX 0x37FFFFFF | 504 | /* |
505 | * Keep the crash kernel below this limit. On 32 bits earlier kernels | ||
506 | * would limit the kernel to the low 512 MiB due to mapping restrictions. | ||
507 | * On 64 bits, kexec-tools currently limits us to 896 MiB; increase this | ||
508 | * limit once kexec-tools are fixed. | ||
509 | */ | ||
510 | #ifdef CONFIG_X86_32 | ||
511 | # define CRASH_KERNEL_ADDR_MAX (512 << 20) | ||
512 | #else | ||
513 | # define CRASH_KERNEL_ADDR_MAX (896 << 20) | ||
514 | #endif | ||
515 | |||
505 | static void __init reserve_crashkernel(void) | 516 | static void __init reserve_crashkernel(void) |
506 | { | 517 | { |
507 | unsigned long long total_mem; | 518 | unsigned long long total_mem; |
@@ -520,10 +531,10 @@ static void __init reserve_crashkernel(void) | |||
520 | const unsigned long long alignment = 16<<20; /* 16M */ | 531 | const unsigned long long alignment = 16<<20; /* 16M */ |
521 | 532 | ||
522 | /* | 533 | /* |
523 | * kexec want bzImage is below DEFAULT_BZIMAGE_ADDR_MAX | 534 | * kexec want bzImage is below CRASH_KERNEL_ADDR_MAX |
524 | */ | 535 | */ |
525 | crash_base = memblock_find_in_range(alignment, | 536 | crash_base = memblock_find_in_range(alignment, |
526 | DEFAULT_BZIMAGE_ADDR_MAX, crash_size, alignment); | 537 | CRASH_KERNEL_ADDR_MAX, crash_size, alignment); |
527 | 538 | ||
528 | if (crash_base == MEMBLOCK_ERROR) { | 539 | if (crash_base == MEMBLOCK_ERROR) { |
529 | pr_info("crashkernel reservation failed - No suitable area found.\n"); | 540 | pr_info("crashkernel reservation failed - No suitable area found.\n"); |
diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c index f628234fbeca..3cece05e4ac4 100644 --- a/arch/x86/kvm/i8259.c +++ b/arch/x86/kvm/i8259.c | |||
@@ -575,6 +575,8 @@ struct kvm_pic *kvm_create_pic(struct kvm *kvm) | |||
575 | s->pics[1].elcr_mask = 0xde; | 575 | s->pics[1].elcr_mask = 0xde; |
576 | s->pics[0].pics_state = s; | 576 | s->pics[0].pics_state = s; |
577 | s->pics[1].pics_state = s; | 577 | s->pics[1].pics_state = s; |
578 | s->pics[0].isr_ack = 0xff; | ||
579 | s->pics[1].isr_ack = 0xff; | ||
578 | 580 | ||
579 | /* | 581 | /* |
580 | * Initialize PIO device | 582 | * Initialize PIO device |
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index fb8b376bf28c..fbb04aee8301 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -2394,7 +2394,8 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu) | |||
2394 | ASSERT(!VALID_PAGE(root)); | 2394 | ASSERT(!VALID_PAGE(root)); |
2395 | spin_lock(&vcpu->kvm->mmu_lock); | 2395 | spin_lock(&vcpu->kvm->mmu_lock); |
2396 | kvm_mmu_free_some_pages(vcpu); | 2396 | kvm_mmu_free_some_pages(vcpu); |
2397 | sp = kvm_mmu_get_page(vcpu, i << 30, i << 30, | 2397 | sp = kvm_mmu_get_page(vcpu, i << (30 - PAGE_SHIFT), |
2398 | i << 30, | ||
2398 | PT32_ROOT_LEVEL, 1, ACC_ALL, | 2399 | PT32_ROOT_LEVEL, 1, ACC_ALL, |
2399 | NULL); | 2400 | NULL); |
2400 | root = __pa(sp->spt); | 2401 | root = __pa(sp->spt); |
diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c index a011bcc0f943..7d90d47655ba 100644 --- a/arch/x86/oprofile/op_model_amd.c +++ b/arch/x86/oprofile/op_model_amd.c | |||
@@ -630,21 +630,29 @@ static int __init_ibs_nmi(void) | |||
630 | return 0; | 630 | return 0; |
631 | } | 631 | } |
632 | 632 | ||
633 | /* initialize the APIC for the IBS interrupts if available */ | 633 | /* |
634 | * check and reserve APIC extended interrupt LVT offset for IBS if | ||
635 | * available | ||
636 | * | ||
637 | * init_ibs() preforms implicitly cpu-local operations, so pin this | ||
638 | * thread to its current CPU | ||
639 | */ | ||
640 | |||
634 | static void init_ibs(void) | 641 | static void init_ibs(void) |
635 | { | 642 | { |
636 | ibs_caps = get_ibs_caps(); | 643 | preempt_disable(); |
637 | 644 | ||
645 | ibs_caps = get_ibs_caps(); | ||
638 | if (!ibs_caps) | 646 | if (!ibs_caps) |
639 | return; | 647 | goto out; |
640 | 648 | ||
641 | if (__init_ibs_nmi()) { | 649 | if (__init_ibs_nmi() < 0) |
642 | ibs_caps = 0; | 650 | ibs_caps = 0; |
643 | return; | 651 | else |
644 | } | 652 | printk(KERN_INFO "oprofile: AMD IBS detected (0x%08x)\n", ibs_caps); |
645 | 653 | ||
646 | printk(KERN_INFO "oprofile: AMD IBS detected (0x%08x)\n", | 654 | out: |
647 | (unsigned)ibs_caps); | 655 | preempt_enable(); |
648 | } | 656 | } |
649 | 657 | ||
650 | static int (*create_arch_files)(struct super_block *sb, struct dentry *root); | 658 | static int (*create_arch_files)(struct super_block *sb, struct dentry *root); |