diff options
47 files changed, 2534 insertions, 413 deletions
diff --git a/arch/alpha/mm/init.c b/arch/alpha/mm/init.c index 5d7a16eab312..af71d38c8e41 100644 --- a/arch/alpha/mm/init.c +++ b/arch/alpha/mm/init.c | |||
@@ -189,9 +189,21 @@ callback_init(void * kernel_end) | |||
189 | 189 | ||
190 | if (alpha_using_srm) { | 190 | if (alpha_using_srm) { |
191 | static struct vm_struct console_remap_vm; | 191 | static struct vm_struct console_remap_vm; |
192 | unsigned long vaddr = VMALLOC_START; | 192 | unsigned long nr_pages = 0; |
193 | unsigned long vaddr; | ||
193 | unsigned long i, j; | 194 | unsigned long i, j; |
194 | 195 | ||
196 | /* calculate needed size */ | ||
197 | for (i = 0; i < crb->map_entries; ++i) | ||
198 | nr_pages += crb->map[i].count; | ||
199 | |||
200 | /* register the vm area */ | ||
201 | console_remap_vm.flags = VM_ALLOC; | ||
202 | console_remap_vm.size = nr_pages << PAGE_SHIFT; | ||
203 | vm_area_register_early(&console_remap_vm, PAGE_SIZE); | ||
204 | |||
205 | vaddr = (unsigned long)console_remap_vm.addr; | ||
206 | |||
195 | /* Set up the third level PTEs and update the virtual | 207 | /* Set up the third level PTEs and update the virtual |
196 | addresses of the CRB entries. */ | 208 | addresses of the CRB entries. */ |
197 | for (i = 0; i < crb->map_entries; ++i) { | 209 | for (i = 0; i < crb->map_entries; ++i) { |
@@ -213,12 +225,6 @@ callback_init(void * kernel_end) | |||
213 | vaddr += PAGE_SIZE; | 225 | vaddr += PAGE_SIZE; |
214 | } | 226 | } |
215 | } | 227 | } |
216 | |||
217 | /* Let vmalloc know that we've allocated some space. */ | ||
218 | console_remap_vm.flags = VM_ALLOC; | ||
219 | console_remap_vm.addr = (void *) VMALLOC_START; | ||
220 | console_remap_vm.size = vaddr - VMALLOC_START; | ||
221 | vmlist = &console_remap_vm; | ||
222 | } | 228 | } |
223 | 229 | ||
224 | callback_init_done = 1; | 230 | callback_init_done = 1; |
diff --git a/arch/avr32/Kconfig b/arch/avr32/Kconfig index b189680d18b0..05fe3053dcae 100644 --- a/arch/avr32/Kconfig +++ b/arch/avr32/Kconfig | |||
@@ -181,7 +181,7 @@ source "kernel/Kconfig.preempt" | |||
181 | config QUICKLIST | 181 | config QUICKLIST |
182 | def_bool y | 182 | def_bool y |
183 | 183 | ||
184 | config HAVE_ARCH_BOOTMEM_NODE | 184 | config HAVE_ARCH_BOOTMEM |
185 | def_bool n | 185 | def_bool n |
186 | 186 | ||
187 | config ARCH_HAVE_MEMORY_PRESENT | 187 | config ARCH_HAVE_MEMORY_PRESENT |
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 469f3450bf81..31758378bcd2 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -138,6 +138,9 @@ config ARCH_HAS_CACHE_LINE_SIZE | |||
138 | config HAVE_SETUP_PER_CPU_AREA | 138 | config HAVE_SETUP_PER_CPU_AREA |
139 | def_bool y | 139 | def_bool y |
140 | 140 | ||
141 | config HAVE_DYNAMIC_PER_CPU_AREA | ||
142 | def_bool y | ||
143 | |||
141 | config HAVE_CPUMASK_OF_CPU_MAP | 144 | config HAVE_CPUMASK_OF_CPU_MAP |
142 | def_bool X86_64_SMP | 145 | def_bool X86_64_SMP |
143 | 146 | ||
@@ -780,6 +783,11 @@ config X86_MCE_AMD | |||
780 | Additional support for AMD specific MCE features such as | 783 | Additional support for AMD specific MCE features such as |
781 | the DRAM Error Threshold. | 784 | the DRAM Error Threshold. |
782 | 785 | ||
786 | config X86_MCE_THRESHOLD | ||
787 | depends on X86_MCE_AMD || X86_MCE_INTEL | ||
788 | bool | ||
789 | default y | ||
790 | |||
783 | config X86_MCE_NONFATAL | 791 | config X86_MCE_NONFATAL |
784 | tristate "Check for non-fatal errors on AMD Athlon/Duron / Intel Pentium 4" | 792 | tristate "Check for non-fatal errors on AMD Athlon/Duron / Intel Pentium 4" |
785 | depends on X86_32 && X86_MCE | 793 | depends on X86_32 && X86_MCE |
@@ -1125,7 +1133,7 @@ config NODES_SHIFT | |||
1125 | Specify the maximum number of NUMA Nodes available on the target | 1133 | Specify the maximum number of NUMA Nodes available on the target |
1126 | system. Increases memory reserved to accomodate various tables. | 1134 | system. Increases memory reserved to accomodate various tables. |
1127 | 1135 | ||
1128 | config HAVE_ARCH_BOOTMEM_NODE | 1136 | config HAVE_ARCH_BOOTMEM |
1129 | def_bool y | 1137 | def_bool y |
1130 | depends on X86_32 && NUMA | 1138 | depends on X86_32 && NUMA |
1131 | 1139 | ||
diff --git a/arch/x86/include/asm/apicdef.h b/arch/x86/include/asm/apicdef.h index 63134e31e8b9..bc9514fb3b13 100644 --- a/arch/x86/include/asm/apicdef.h +++ b/arch/x86/include/asm/apicdef.h | |||
@@ -53,6 +53,7 @@ | |||
53 | #define APIC_ESR_SENDILL 0x00020 | 53 | #define APIC_ESR_SENDILL 0x00020 |
54 | #define APIC_ESR_RECVILL 0x00040 | 54 | #define APIC_ESR_RECVILL 0x00040 |
55 | #define APIC_ESR_ILLREGA 0x00080 | 55 | #define APIC_ESR_ILLREGA 0x00080 |
56 | #define APIC_LVTCMCI 0x2f0 | ||
56 | #define APIC_ICR 0x300 | 57 | #define APIC_ICR 0x300 |
57 | #define APIC_DEST_SELF 0x40000 | 58 | #define APIC_DEST_SELF 0x40000 |
58 | #define APIC_DEST_ALLINC 0x80000 | 59 | #define APIC_DEST_ALLINC 0x80000 |
diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h index 2f8466540fb5..5b301b7ff5f4 100644 --- a/arch/x86/include/asm/cacheflush.h +++ b/arch/x86/include/asm/cacheflush.h | |||
@@ -5,24 +5,43 @@ | |||
5 | #include <linux/mm.h> | 5 | #include <linux/mm.h> |
6 | 6 | ||
7 | /* Caches aren't brain-dead on the intel. */ | 7 | /* Caches aren't brain-dead on the intel. */ |
8 | #define flush_cache_all() do { } while (0) | 8 | static inline void flush_cache_all(void) { } |
9 | #define flush_cache_mm(mm) do { } while (0) | 9 | static inline void flush_cache_mm(struct mm_struct *mm) { } |
10 | #define flush_cache_dup_mm(mm) do { } while (0) | 10 | static inline void flush_cache_dup_mm(struct mm_struct *mm) { } |
11 | #define flush_cache_range(vma, start, end) do { } while (0) | 11 | static inline void flush_cache_range(struct vm_area_struct *vma, |
12 | #define flush_cache_page(vma, vmaddr, pfn) do { } while (0) | 12 | unsigned long start, unsigned long end) { } |
13 | #define flush_dcache_page(page) do { } while (0) | 13 | static inline void flush_cache_page(struct vm_area_struct *vma, |
14 | #define flush_dcache_mmap_lock(mapping) do { } while (0) | 14 | unsigned long vmaddr, unsigned long pfn) { } |
15 | #define flush_dcache_mmap_unlock(mapping) do { } while (0) | 15 | static inline void flush_dcache_page(struct page *page) { } |
16 | #define flush_icache_range(start, end) do { } while (0) | 16 | static inline void flush_dcache_mmap_lock(struct address_space *mapping) { } |
17 | #define flush_icache_page(vma, pg) do { } while (0) | 17 | static inline void flush_dcache_mmap_unlock(struct address_space *mapping) { } |
18 | #define flush_icache_user_range(vma, pg, adr, len) do { } while (0) | 18 | static inline void flush_icache_range(unsigned long start, |
19 | #define flush_cache_vmap(start, end) do { } while (0) | 19 | unsigned long end) { } |
20 | #define flush_cache_vunmap(start, end) do { } while (0) | 20 | static inline void flush_icache_page(struct vm_area_struct *vma, |
21 | struct page *page) { } | ||
22 | static inline void flush_icache_user_range(struct vm_area_struct *vma, | ||
23 | struct page *page, | ||
24 | unsigned long addr, | ||
25 | unsigned long len) { } | ||
26 | static inline void flush_cache_vmap(unsigned long start, unsigned long end) { } | ||
27 | static inline void flush_cache_vunmap(unsigned long start, | ||
28 | unsigned long end) { } | ||
21 | 29 | ||
22 | #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ | 30 | static inline void copy_to_user_page(struct vm_area_struct *vma, |
23 | memcpy((dst), (src), (len)) | 31 | struct page *page, unsigned long vaddr, |
24 | #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ | 32 | void *dst, const void *src, |
25 | memcpy((dst), (src), (len)) | 33 | unsigned long len) |
34 | { | ||
35 | memcpy(dst, src, len); | ||
36 | } | ||
37 | |||
38 | static inline void copy_from_user_page(struct vm_area_struct *vma, | ||
39 | struct page *page, unsigned long vaddr, | ||
40 | void *dst, const void *src, | ||
41 | unsigned long len) | ||
42 | { | ||
43 | memcpy(dst, src, len); | ||
44 | } | ||
26 | 45 | ||
27 | #define PG_non_WB PG_arch_1 | 46 | #define PG_non_WB PG_arch_1 |
28 | PAGEFLAG(NonWB, non_WB) | 47 | PAGEFLAG(NonWB, non_WB) |
diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h index 48f0004db8c9..71c9e5183982 100644 --- a/arch/x86/include/asm/i387.h +++ b/arch/x86/include/asm/i387.h | |||
@@ -172,7 +172,13 @@ static inline void __save_init_fpu(struct task_struct *tsk) | |||
172 | 172 | ||
173 | #else /* CONFIG_X86_32 */ | 173 | #else /* CONFIG_X86_32 */ |
174 | 174 | ||
175 | extern void finit(void); | 175 | #ifdef CONFIG_MATH_EMULATION |
176 | extern void finit_task(struct task_struct *tsk); | ||
177 | #else | ||
178 | static inline void finit_task(struct task_struct *tsk) | ||
179 | { | ||
180 | } | ||
181 | #endif | ||
176 | 182 | ||
177 | static inline void tolerant_fwait(void) | 183 | static inline void tolerant_fwait(void) |
178 | { | 184 | { |
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h index 683d0b4c00fc..e5383e3d2f8c 100644 --- a/arch/x86/include/asm/io.h +++ b/arch/x86/include/asm/io.h | |||
@@ -172,8 +172,6 @@ static inline void __iomem *ioremap(resource_size_t offset, unsigned long size) | |||
172 | 172 | ||
173 | extern void iounmap(volatile void __iomem *addr); | 173 | extern void iounmap(volatile void __iomem *addr); |
174 | 174 | ||
175 | extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys); | ||
176 | |||
177 | 175 | ||
178 | #ifdef CONFIG_X86_32 | 176 | #ifdef CONFIG_X86_32 |
179 | # include "io_32.h" | 177 | # include "io_32.h" |
@@ -198,7 +196,6 @@ extern void early_ioremap_reset(void); | |||
198 | extern void __iomem *early_ioremap(unsigned long offset, unsigned long size); | 196 | extern void __iomem *early_ioremap(unsigned long offset, unsigned long size); |
199 | extern void __iomem *early_memremap(unsigned long offset, unsigned long size); | 197 | extern void __iomem *early_memremap(unsigned long offset, unsigned long size); |
200 | extern void early_iounmap(void __iomem *addr, unsigned long size); | 198 | extern void early_iounmap(void __iomem *addr, unsigned long size); |
201 | extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys); | ||
202 | 199 | ||
203 | #define IO_SPACE_LIMIT 0xffff | 200 | #define IO_SPACE_LIMIT 0xffff |
204 | 201 | ||
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h index 32c6e17b960b..563933e06a35 100644 --- a/arch/x86/include/asm/mce.h +++ b/arch/x86/include/asm/mce.h | |||
@@ -11,6 +11,8 @@ | |||
11 | */ | 11 | */ |
12 | 12 | ||
13 | #define MCG_CTL_P (1UL<<8) /* MCG_CAP register available */ | 13 | #define MCG_CTL_P (1UL<<8) /* MCG_CAP register available */ |
14 | #define MCG_EXT_P (1ULL<<9) /* Extended registers available */ | ||
15 | #define MCG_CMCI_P (1ULL<<10) /* CMCI supported */ | ||
14 | 16 | ||
15 | #define MCG_STATUS_RIPV (1UL<<0) /* restart ip valid */ | 17 | #define MCG_STATUS_RIPV (1UL<<0) /* restart ip valid */ |
16 | #define MCG_STATUS_EIPV (1UL<<1) /* ip points to correct instruction */ | 18 | #define MCG_STATUS_EIPV (1UL<<1) /* ip points to correct instruction */ |
@@ -90,14 +92,29 @@ extern int mce_disabled; | |||
90 | 92 | ||
91 | #include <asm/atomic.h> | 93 | #include <asm/atomic.h> |
92 | 94 | ||
95 | void mce_setup(struct mce *m); | ||
93 | void mce_log(struct mce *m); | 96 | void mce_log(struct mce *m); |
94 | DECLARE_PER_CPU(struct sys_device, device_mce); | 97 | DECLARE_PER_CPU(struct sys_device, device_mce); |
95 | extern void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu); | 98 | extern void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu); |
96 | 99 | ||
100 | /* | ||
101 | * To support more than 128 would need to escape the predefined | ||
102 | * Linux defined extended banks first. | ||
103 | */ | ||
104 | #define MAX_NR_BANKS (MCE_EXTENDED_BANK - 1) | ||
105 | |||
97 | #ifdef CONFIG_X86_MCE_INTEL | 106 | #ifdef CONFIG_X86_MCE_INTEL |
98 | void mce_intel_feature_init(struct cpuinfo_x86 *c); | 107 | void mce_intel_feature_init(struct cpuinfo_x86 *c); |
108 | void cmci_clear(void); | ||
109 | void cmci_reenable(void); | ||
110 | void cmci_rediscover(int dying); | ||
111 | void cmci_recheck(void); | ||
99 | #else | 112 | #else |
100 | static inline void mce_intel_feature_init(struct cpuinfo_x86 *c) { } | 113 | static inline void mce_intel_feature_init(struct cpuinfo_x86 *c) { } |
114 | static inline void cmci_clear(void) {} | ||
115 | static inline void cmci_reenable(void) {} | ||
116 | static inline void cmci_rediscover(int dying) {} | ||
117 | static inline void cmci_recheck(void) {} | ||
101 | #endif | 118 | #endif |
102 | 119 | ||
103 | #ifdef CONFIG_X86_MCE_AMD | 120 | #ifdef CONFIG_X86_MCE_AMD |
@@ -106,11 +123,23 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c); | |||
106 | static inline void mce_amd_feature_init(struct cpuinfo_x86 *c) { } | 123 | static inline void mce_amd_feature_init(struct cpuinfo_x86 *c) { } |
107 | #endif | 124 | #endif |
108 | 125 | ||
109 | void mce_log_therm_throt_event(unsigned int cpu, __u64 status); | 126 | extern int mce_available(struct cpuinfo_x86 *c); |
127 | |||
128 | void mce_log_therm_throt_event(__u64 status); | ||
110 | 129 | ||
111 | extern atomic_t mce_entry; | 130 | extern atomic_t mce_entry; |
112 | 131 | ||
113 | extern void do_machine_check(struct pt_regs *, long); | 132 | extern void do_machine_check(struct pt_regs *, long); |
133 | |||
134 | typedef DECLARE_BITMAP(mce_banks_t, MAX_NR_BANKS); | ||
135 | DECLARE_PER_CPU(mce_banks_t, mce_poll_banks); | ||
136 | |||
137 | enum mcp_flags { | ||
138 | MCP_TIMESTAMP = (1 << 0), /* log time stamp */ | ||
139 | MCP_UC = (1 << 1), /* log uncorrected errors */ | ||
140 | }; | ||
141 | extern void machine_check_poll(enum mcp_flags flags, mce_banks_t *b); | ||
142 | |||
114 | extern int mce_notify_user(void); | 143 | extern int mce_notify_user(void); |
115 | 144 | ||
116 | #endif /* !CONFIG_X86_32 */ | 145 | #endif /* !CONFIG_X86_32 */ |
@@ -120,8 +149,8 @@ extern void mcheck_init(struct cpuinfo_x86 *c); | |||
120 | #else | 149 | #else |
121 | #define mcheck_init(c) do { } while (0) | 150 | #define mcheck_init(c) do { } while (0) |
122 | #endif | 151 | #endif |
123 | extern void stop_mce(void); | 152 | |
124 | extern void restart_mce(void); | 153 | extern void (*mce_threshold_vector)(void); |
125 | 154 | ||
126 | #endif /* __KERNEL__ */ | 155 | #endif /* __KERNEL__ */ |
127 | #endif /* _ASM_X86_MCE_H */ | 156 | #endif /* _ASM_X86_MCE_H */ |
diff --git a/arch/x86/include/asm/mmzone_32.h b/arch/x86/include/asm/mmzone_32.h index 105fb90a0635..ede6998bd92c 100644 --- a/arch/x86/include/asm/mmzone_32.h +++ b/arch/x86/include/asm/mmzone_32.h | |||
@@ -91,46 +91,9 @@ static inline int pfn_valid(int pfn) | |||
91 | #endif /* CONFIG_DISCONTIGMEM */ | 91 | #endif /* CONFIG_DISCONTIGMEM */ |
92 | 92 | ||
93 | #ifdef CONFIG_NEED_MULTIPLE_NODES | 93 | #ifdef CONFIG_NEED_MULTIPLE_NODES |
94 | 94 | /* always use node 0 for bootmem on this numa platform */ | |
95 | /* | 95 | #define bootmem_arch_preferred_node(__bdata, size, align, goal, limit) \ |
96 | * Following are macros that are specific to this numa platform. | 96 | (NODE_DATA(0)->bdata) |
97 | */ | ||
98 | #define reserve_bootmem(addr, size, flags) \ | ||
99 | reserve_bootmem_node(NODE_DATA(0), (addr), (size), (flags)) | ||
100 | #define alloc_bootmem(x) \ | ||
101 | __alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) | ||
102 | #define alloc_bootmem_nopanic(x) \ | ||
103 | __alloc_bootmem_node_nopanic(NODE_DATA(0), (x), SMP_CACHE_BYTES, \ | ||
104 | __pa(MAX_DMA_ADDRESS)) | ||
105 | #define alloc_bootmem_low(x) \ | ||
106 | __alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, 0) | ||
107 | #define alloc_bootmem_pages(x) \ | ||
108 | __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) | ||
109 | #define alloc_bootmem_pages_nopanic(x) \ | ||
110 | __alloc_bootmem_node_nopanic(NODE_DATA(0), (x), PAGE_SIZE, \ | ||
111 | __pa(MAX_DMA_ADDRESS)) | ||
112 | #define alloc_bootmem_low_pages(x) \ | ||
113 | __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, 0) | ||
114 | #define alloc_bootmem_node(pgdat, x) \ | ||
115 | ({ \ | ||
116 | struct pglist_data __maybe_unused \ | ||
117 | *__alloc_bootmem_node__pgdat = (pgdat); \ | ||
118 | __alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, \ | ||
119 | __pa(MAX_DMA_ADDRESS)); \ | ||
120 | }) | ||
121 | #define alloc_bootmem_pages_node(pgdat, x) \ | ||
122 | ({ \ | ||
123 | struct pglist_data __maybe_unused \ | ||
124 | *__alloc_bootmem_node__pgdat = (pgdat); \ | ||
125 | __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, \ | ||
126 | __pa(MAX_DMA_ADDRESS)); \ | ||
127 | }) | ||
128 | #define alloc_bootmem_low_pages_node(pgdat, x) \ | ||
129 | ({ \ | ||
130 | struct pglist_data __maybe_unused \ | ||
131 | *__alloc_bootmem_node__pgdat = (pgdat); \ | ||
132 | __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, 0); \ | ||
133 | }) | ||
134 | #endif /* CONFIG_NEED_MULTIPLE_NODES */ | 97 | #endif /* CONFIG_NEED_MULTIPLE_NODES */ |
135 | 98 | ||
136 | #endif /* _ASM_X86_MMZONE_32_H */ | 99 | #endif /* _ASM_X86_MMZONE_32_H */ |
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index 358acc59ae04..2dbd2314139e 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h | |||
@@ -77,6 +77,11 @@ | |||
77 | #define MSR_IA32_MC0_ADDR 0x00000402 | 77 | #define MSR_IA32_MC0_ADDR 0x00000402 |
78 | #define MSR_IA32_MC0_MISC 0x00000403 | 78 | #define MSR_IA32_MC0_MISC 0x00000403 |
79 | 79 | ||
80 | /* These are consecutive and not in the normal 4er MCE bank block */ | ||
81 | #define MSR_IA32_MC0_CTL2 0x00000280 | ||
82 | #define CMCI_EN (1ULL << 30) | ||
83 | #define CMCI_THRESHOLD_MASK 0xffffULL | ||
84 | |||
80 | #define MSR_P6_PERFCTR0 0x000000c1 | 85 | #define MSR_P6_PERFCTR0 0x000000c1 |
81 | #define MSR_P6_PERFCTR1 0x000000c2 | 86 | #define MSR_P6_PERFCTR1 0x000000c2 |
82 | #define MSR_P6_EVNTSEL0 0x00000186 | 87 | #define MSR_P6_EVNTSEL0 0x00000186 |
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index aee103b26d01..8f1d2fbec1d4 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h | |||
@@ -43,6 +43,14 @@ | |||
43 | #else /* ...!ASSEMBLY */ | 43 | #else /* ...!ASSEMBLY */ |
44 | 44 | ||
45 | #include <linux/stringify.h> | 45 | #include <linux/stringify.h> |
46 | #include <asm/sections.h> | ||
47 | |||
48 | #define __addr_to_pcpu_ptr(addr) \ | ||
49 | (void *)((unsigned long)(addr) - (unsigned long)pcpu_base_addr \ | ||
50 | + (unsigned long)__per_cpu_start) | ||
51 | #define __pcpu_ptr_to_addr(ptr) \ | ||
52 | (void *)((unsigned long)(ptr) + (unsigned long)pcpu_base_addr \ | ||
53 | - (unsigned long)__per_cpu_start) | ||
46 | 54 | ||
47 | #ifdef CONFIG_SMP | 55 | #ifdef CONFIG_SMP |
48 | #define __percpu_arg(x) "%%"__stringify(__percpu_seg)":%P" #x | 56 | #define __percpu_arg(x) "%%"__stringify(__percpu_seg)":%P" #x |
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 1c097a3a6669..d0812e155f1d 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h | |||
@@ -288,6 +288,8 @@ static inline int is_new_memtype_allowed(unsigned long flags, | |||
288 | return 1; | 288 | return 1; |
289 | } | 289 | } |
290 | 290 | ||
291 | pmd_t *populate_extra_pmd(unsigned long vaddr); | ||
292 | pte_t *populate_extra_pte(unsigned long vaddr); | ||
291 | #endif /* __ASSEMBLY__ */ | 293 | #endif /* __ASSEMBLY__ */ |
292 | 294 | ||
293 | #ifdef CONFIG_X86_32 | 295 | #ifdef CONFIG_X86_32 |
diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h index 4bd990ee43df..1a918dde46b5 100644 --- a/arch/x86/include/asm/xen/page.h +++ b/arch/x86/include/asm/xen/page.h | |||
@@ -164,6 +164,7 @@ static inline pte_t __pte_ma(pteval_t x) | |||
164 | 164 | ||
165 | 165 | ||
166 | xmaddr_t arbitrary_virt_to_machine(void *address); | 166 | xmaddr_t arbitrary_virt_to_machine(void *address); |
167 | unsigned long arbitrary_virt_to_mfn(void *vaddr); | ||
167 | void make_lowmem_page_readonly(void *vaddr); | 168 | void make_lowmem_page_readonly(void *vaddr); |
168 | void make_lowmem_page_readwrite(void *vaddr); | 169 | void make_lowmem_page_readwrite(void *vaddr); |
169 | 170 | ||
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index 6907b8e85d52..4c80f1557433 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c | |||
@@ -414,9 +414,17 @@ void __init alternative_instructions(void) | |||
414 | that might execute the to be patched code. | 414 | that might execute the to be patched code. |
415 | Other CPUs are not running. */ | 415 | Other CPUs are not running. */ |
416 | stop_nmi(); | 416 | stop_nmi(); |
417 | #ifdef CONFIG_X86_MCE | 417 | |
418 | stop_mce(); | 418 | /* |
419 | #endif | 419 | * Don't stop machine check exceptions while patching. |
420 | * MCEs only happen when something got corrupted and in this | ||
421 | * case we must do something about the corruption. | ||
422 | * Ignoring it is worse than a unlikely patching race. | ||
423 | * Also machine checks tend to be broadcast and if one CPU | ||
424 | * goes into machine check the others follow quickly, so we don't | ||
425 | * expect a machine check to cause undue problems during to code | ||
426 | * patching. | ||
427 | */ | ||
420 | 428 | ||
421 | apply_alternatives(__alt_instructions, __alt_instructions_end); | 429 | apply_alternatives(__alt_instructions, __alt_instructions_end); |
422 | 430 | ||
@@ -456,9 +464,6 @@ void __init alternative_instructions(void) | |||
456 | (unsigned long)__smp_locks_end); | 464 | (unsigned long)__smp_locks_end); |
457 | 465 | ||
458 | restart_nmi(); | 466 | restart_nmi(); |
459 | #ifdef CONFIG_X86_MCE | ||
460 | restart_mce(); | ||
461 | #endif | ||
462 | } | 467 | } |
463 | 468 | ||
464 | /** | 469 | /** |
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index f9cecdfd05c5..30909a258d0f 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c | |||
@@ -46,6 +46,7 @@ | |||
46 | #include <asm/idle.h> | 46 | #include <asm/idle.h> |
47 | #include <asm/mtrr.h> | 47 | #include <asm/mtrr.h> |
48 | #include <asm/smp.h> | 48 | #include <asm/smp.h> |
49 | #include <asm/mce.h> | ||
49 | 50 | ||
50 | unsigned int num_processors; | 51 | unsigned int num_processors; |
51 | 52 | ||
@@ -842,6 +843,14 @@ void clear_local_APIC(void) | |||
842 | apic_write(APIC_LVTTHMR, v | APIC_LVT_MASKED); | 843 | apic_write(APIC_LVTTHMR, v | APIC_LVT_MASKED); |
843 | } | 844 | } |
844 | #endif | 845 | #endif |
846 | #ifdef CONFIG_X86_MCE_INTEL | ||
847 | if (maxlvt >= 6) { | ||
848 | v = apic_read(APIC_LVTCMCI); | ||
849 | if (!(v & APIC_LVT_MASKED)) | ||
850 | apic_write(APIC_LVTCMCI, v | APIC_LVT_MASKED); | ||
851 | } | ||
852 | #endif | ||
853 | |||
845 | /* | 854 | /* |
846 | * Clean APIC state for other OSs: | 855 | * Clean APIC state for other OSs: |
847 | */ | 856 | */ |
@@ -1241,6 +1250,12 @@ void __cpuinit setup_local_APIC(void) | |||
1241 | apic_write(APIC_LVT1, value); | 1250 | apic_write(APIC_LVT1, value); |
1242 | 1251 | ||
1243 | preempt_enable(); | 1252 | preempt_enable(); |
1253 | |||
1254 | #ifdef CONFIG_X86_MCE_INTEL | ||
1255 | /* Recheck CMCI information after local APIC is up on CPU #0 */ | ||
1256 | if (smp_processor_id() == 0) | ||
1257 | cmci_recheck(); | ||
1258 | #endif | ||
1244 | } | 1259 | } |
1245 | 1260 | ||
1246 | void __cpuinit end_local_APIC_setup(void) | 1261 | void __cpuinit end_local_APIC_setup(void) |
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c index 4b1c319d30c3..22590cf688ae 100644 --- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c | |||
@@ -601,7 +601,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) | |||
601 | if (!data) | 601 | if (!data) |
602 | return -ENOMEM; | 602 | return -ENOMEM; |
603 | 603 | ||
604 | data->acpi_data = percpu_ptr(acpi_perf_data, cpu); | 604 | data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu); |
605 | per_cpu(drv_data, cpu) = data; | 605 | per_cpu(drv_data, cpu) = data; |
606 | 606 | ||
607 | if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) | 607 | if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) |
diff --git a/arch/x86/kernel/cpu/mcheck/Makefile b/arch/x86/kernel/cpu/mcheck/Makefile index d7d2323bbb69..b2f89829bbe8 100644 --- a/arch/x86/kernel/cpu/mcheck/Makefile +++ b/arch/x86/kernel/cpu/mcheck/Makefile | |||
@@ -4,3 +4,4 @@ obj-$(CONFIG_X86_32) += k7.o p4.o p5.o p6.o winchip.o | |||
4 | obj-$(CONFIG_X86_MCE_INTEL) += mce_intel_64.o | 4 | obj-$(CONFIG_X86_MCE_INTEL) += mce_intel_64.o |
5 | obj-$(CONFIG_X86_MCE_AMD) += mce_amd_64.o | 5 | obj-$(CONFIG_X86_MCE_AMD) += mce_amd_64.o |
6 | obj-$(CONFIG_X86_MCE_NONFATAL) += non-fatal.o | 6 | obj-$(CONFIG_X86_MCE_NONFATAL) += non-fatal.o |
7 | obj-$(CONFIG_X86_MCE_THRESHOLD) += threshold.o | ||
diff --git a/arch/x86/kernel/cpu/mcheck/mce_32.c b/arch/x86/kernel/cpu/mcheck/mce_32.c index dfaebce3633e..3552119b091d 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_32.c +++ b/arch/x86/kernel/cpu/mcheck/mce_32.c | |||
@@ -60,20 +60,6 @@ void mcheck_init(struct cpuinfo_x86 *c) | |||
60 | } | 60 | } |
61 | } | 61 | } |
62 | 62 | ||
63 | static unsigned long old_cr4 __initdata; | ||
64 | |||
65 | void __init stop_mce(void) | ||
66 | { | ||
67 | old_cr4 = read_cr4(); | ||
68 | clear_in_cr4(X86_CR4_MCE); | ||
69 | } | ||
70 | |||
71 | void __init restart_mce(void) | ||
72 | { | ||
73 | if (old_cr4 & X86_CR4_MCE) | ||
74 | set_in_cr4(X86_CR4_MCE); | ||
75 | } | ||
76 | |||
77 | static int __init mcheck_disable(char *str) | 63 | static int __init mcheck_disable(char *str) |
78 | { | 64 | { |
79 | mce_disabled = 1; | 65 | mce_disabled = 1; |
diff --git a/arch/x86/kernel/cpu/mcheck/mce_64.c b/arch/x86/kernel/cpu/mcheck/mce_64.c index fe79985ce0f2..bfbd5323a635 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_64.c +++ b/arch/x86/kernel/cpu/mcheck/mce_64.c | |||
@@ -3,6 +3,8 @@ | |||
3 | * K8 parts Copyright 2002,2003 Andi Kleen, SuSE Labs. | 3 | * K8 parts Copyright 2002,2003 Andi Kleen, SuSE Labs. |
4 | * Rest from unknown author(s). | 4 | * Rest from unknown author(s). |
5 | * 2004 Andi Kleen. Rewrote most of it. | 5 | * 2004 Andi Kleen. Rewrote most of it. |
6 | * Copyright 2008 Intel Corporation | ||
7 | * Author: Andi Kleen | ||
6 | */ | 8 | */ |
7 | 9 | ||
8 | #include <linux/init.h> | 10 | #include <linux/init.h> |
@@ -24,6 +26,9 @@ | |||
24 | #include <linux/ctype.h> | 26 | #include <linux/ctype.h> |
25 | #include <linux/kmod.h> | 27 | #include <linux/kmod.h> |
26 | #include <linux/kdebug.h> | 28 | #include <linux/kdebug.h> |
29 | #include <linux/kobject.h> | ||
30 | #include <linux/sysfs.h> | ||
31 | #include <linux/ratelimit.h> | ||
27 | #include <asm/processor.h> | 32 | #include <asm/processor.h> |
28 | #include <asm/msr.h> | 33 | #include <asm/msr.h> |
29 | #include <asm/mce.h> | 34 | #include <asm/mce.h> |
@@ -32,7 +37,6 @@ | |||
32 | #include <asm/idle.h> | 37 | #include <asm/idle.h> |
33 | 38 | ||
34 | #define MISC_MCELOG_MINOR 227 | 39 | #define MISC_MCELOG_MINOR 227 |
35 | #define NR_SYSFS_BANKS 6 | ||
36 | 40 | ||
37 | atomic_t mce_entry; | 41 | atomic_t mce_entry; |
38 | 42 | ||
@@ -47,7 +51,7 @@ static int mce_dont_init; | |||
47 | */ | 51 | */ |
48 | static int tolerant = 1; | 52 | static int tolerant = 1; |
49 | static int banks; | 53 | static int banks; |
50 | static unsigned long bank[NR_SYSFS_BANKS] = { [0 ... NR_SYSFS_BANKS-1] = ~0UL }; | 54 | static u64 *bank; |
51 | static unsigned long notify_user; | 55 | static unsigned long notify_user; |
52 | static int rip_msr; | 56 | static int rip_msr; |
53 | static int mce_bootlog = -1; | 57 | static int mce_bootlog = -1; |
@@ -58,6 +62,19 @@ static char *trigger_argv[2] = { trigger, NULL }; | |||
58 | 62 | ||
59 | static DECLARE_WAIT_QUEUE_HEAD(mce_wait); | 63 | static DECLARE_WAIT_QUEUE_HEAD(mce_wait); |
60 | 64 | ||
65 | /* MCA banks polled by the period polling timer for corrected events */ | ||
66 | DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = { | ||
67 | [0 ... BITS_TO_LONGS(MAX_NR_BANKS)-1] = ~0UL | ||
68 | }; | ||
69 | |||
70 | /* Do initial initialization of a struct mce */ | ||
71 | void mce_setup(struct mce *m) | ||
72 | { | ||
73 | memset(m, 0, sizeof(struct mce)); | ||
74 | m->cpu = smp_processor_id(); | ||
75 | rdtscll(m->tsc); | ||
76 | } | ||
77 | |||
61 | /* | 78 | /* |
62 | * Lockless MCE logging infrastructure. | 79 | * Lockless MCE logging infrastructure. |
63 | * This avoids deadlocks on printk locks without having to break locks. Also | 80 | * This avoids deadlocks on printk locks without having to break locks. Also |
@@ -119,11 +136,11 @@ static void print_mce(struct mce *m) | |||
119 | print_symbol("{%s}", m->ip); | 136 | print_symbol("{%s}", m->ip); |
120 | printk("\n"); | 137 | printk("\n"); |
121 | } | 138 | } |
122 | printk(KERN_EMERG "TSC %Lx ", m->tsc); | 139 | printk(KERN_EMERG "TSC %llx ", m->tsc); |
123 | if (m->addr) | 140 | if (m->addr) |
124 | printk("ADDR %Lx ", m->addr); | 141 | printk("ADDR %llx ", m->addr); |
125 | if (m->misc) | 142 | if (m->misc) |
126 | printk("MISC %Lx ", m->misc); | 143 | printk("MISC %llx ", m->misc); |
127 | printk("\n"); | 144 | printk("\n"); |
128 | printk(KERN_EMERG "This is not a software problem!\n"); | 145 | printk(KERN_EMERG "This is not a software problem!\n"); |
129 | printk(KERN_EMERG "Run through mcelog --ascii to decode " | 146 | printk(KERN_EMERG "Run through mcelog --ascii to decode " |
@@ -149,8 +166,10 @@ static void mce_panic(char *msg, struct mce *backup, unsigned long start) | |||
149 | panic(msg); | 166 | panic(msg); |
150 | } | 167 | } |
151 | 168 | ||
152 | static int mce_available(struct cpuinfo_x86 *c) | 169 | int mce_available(struct cpuinfo_x86 *c) |
153 | { | 170 | { |
171 | if (mce_dont_init) | ||
172 | return 0; | ||
154 | return cpu_has(c, X86_FEATURE_MCE) && cpu_has(c, X86_FEATURE_MCA); | 173 | return cpu_has(c, X86_FEATURE_MCE) && cpu_has(c, X86_FEATURE_MCA); |
155 | } | 174 | } |
156 | 175 | ||
@@ -172,7 +191,77 @@ static inline void mce_get_rip(struct mce *m, struct pt_regs *regs) | |||
172 | } | 191 | } |
173 | 192 | ||
174 | /* | 193 | /* |
175 | * The actual machine check handler | 194 | * Poll for corrected events or events that happened before reset. |
195 | * Those are just logged through /dev/mcelog. | ||
196 | * | ||
197 | * This is executed in standard interrupt context. | ||
198 | */ | ||
199 | void machine_check_poll(enum mcp_flags flags, mce_banks_t *b) | ||
200 | { | ||
201 | struct mce m; | ||
202 | int i; | ||
203 | |||
204 | mce_setup(&m); | ||
205 | |||
206 | rdmsrl(MSR_IA32_MCG_STATUS, m.mcgstatus); | ||
207 | for (i = 0; i < banks; i++) { | ||
208 | if (!bank[i] || !test_bit(i, *b)) | ||
209 | continue; | ||
210 | |||
211 | m.misc = 0; | ||
212 | m.addr = 0; | ||
213 | m.bank = i; | ||
214 | m.tsc = 0; | ||
215 | |||
216 | barrier(); | ||
217 | rdmsrl(MSR_IA32_MC0_STATUS + i*4, m.status); | ||
218 | if (!(m.status & MCI_STATUS_VAL)) | ||
219 | continue; | ||
220 | |||
221 | /* | ||
222 | * Uncorrected events are handled by the exception handler | ||
223 | * when it is enabled. But when the exception is disabled log | ||
224 | * everything. | ||
225 | * | ||
226 | * TBD do the same check for MCI_STATUS_EN here? | ||
227 | */ | ||
228 | if ((m.status & MCI_STATUS_UC) && !(flags & MCP_UC)) | ||
229 | continue; | ||
230 | |||
231 | if (m.status & MCI_STATUS_MISCV) | ||
232 | rdmsrl(MSR_IA32_MC0_MISC + i*4, m.misc); | ||
233 | if (m.status & MCI_STATUS_ADDRV) | ||
234 | rdmsrl(MSR_IA32_MC0_ADDR + i*4, m.addr); | ||
235 | |||
236 | if (!(flags & MCP_TIMESTAMP)) | ||
237 | m.tsc = 0; | ||
238 | /* | ||
239 | * Don't get the IP here because it's unlikely to | ||
240 | * have anything to do with the actual error location. | ||
241 | */ | ||
242 | |||
243 | mce_log(&m); | ||
244 | add_taint(TAINT_MACHINE_CHECK); | ||
245 | |||
246 | /* | ||
247 | * Clear state for this bank. | ||
248 | */ | ||
249 | wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0); | ||
250 | } | ||
251 | |||
252 | /* | ||
253 | * Don't clear MCG_STATUS here because it's only defined for | ||
254 | * exceptions. | ||
255 | */ | ||
256 | } | ||
257 | |||
258 | /* | ||
259 | * The actual machine check handler. This only handles real | ||
260 | * exceptions when something got corrupted coming in through int 18. | ||
261 | * | ||
262 | * This is executed in NMI context not subject to normal locking rules. This | ||
263 | * implies that most kernel services cannot be safely used. Don't even | ||
264 | * think about putting a printk in there! | ||
176 | */ | 265 | */ |
177 | void do_machine_check(struct pt_regs * regs, long error_code) | 266 | void do_machine_check(struct pt_regs * regs, long error_code) |
178 | { | 267 | { |
@@ -190,17 +279,18 @@ void do_machine_check(struct pt_regs * regs, long error_code) | |||
190 | * error. | 279 | * error. |
191 | */ | 280 | */ |
192 | int kill_it = 0; | 281 | int kill_it = 0; |
282 | DECLARE_BITMAP(toclear, MAX_NR_BANKS); | ||
193 | 283 | ||
194 | atomic_inc(&mce_entry); | 284 | atomic_inc(&mce_entry); |
195 | 285 | ||
196 | if ((regs | 286 | if (notify_die(DIE_NMI, "machine check", regs, error_code, |
197 | && notify_die(DIE_NMI, "machine check", regs, error_code, | ||
198 | 18, SIGKILL) == NOTIFY_STOP) | 287 | 18, SIGKILL) == NOTIFY_STOP) |
199 | || !banks) | 288 | goto out2; |
289 | if (!banks) | ||
200 | goto out2; | 290 | goto out2; |
201 | 291 | ||
202 | memset(&m, 0, sizeof(struct mce)); | 292 | mce_setup(&m); |
203 | m.cpu = smp_processor_id(); | 293 | |
204 | rdmsrl(MSR_IA32_MCG_STATUS, m.mcgstatus); | 294 | rdmsrl(MSR_IA32_MCG_STATUS, m.mcgstatus); |
205 | /* if the restart IP is not valid, we're done for */ | 295 | /* if the restart IP is not valid, we're done for */ |
206 | if (!(m.mcgstatus & MCG_STATUS_RIPV)) | 296 | if (!(m.mcgstatus & MCG_STATUS_RIPV)) |
@@ -210,18 +300,32 @@ void do_machine_check(struct pt_regs * regs, long error_code) | |||
210 | barrier(); | 300 | barrier(); |
211 | 301 | ||
212 | for (i = 0; i < banks; i++) { | 302 | for (i = 0; i < banks; i++) { |
213 | if (i < NR_SYSFS_BANKS && !bank[i]) | 303 | __clear_bit(i, toclear); |
304 | if (!bank[i]) | ||
214 | continue; | 305 | continue; |
215 | 306 | ||
216 | m.misc = 0; | 307 | m.misc = 0; |
217 | m.addr = 0; | 308 | m.addr = 0; |
218 | m.bank = i; | 309 | m.bank = i; |
219 | m.tsc = 0; | ||
220 | 310 | ||
221 | rdmsrl(MSR_IA32_MC0_STATUS + i*4, m.status); | 311 | rdmsrl(MSR_IA32_MC0_STATUS + i*4, m.status); |
222 | if ((m.status & MCI_STATUS_VAL) == 0) | 312 | if ((m.status & MCI_STATUS_VAL) == 0) |
223 | continue; | 313 | continue; |
224 | 314 | ||
315 | /* | ||
316 | * Non uncorrected errors are handled by machine_check_poll | ||
317 | * Leave them alone. | ||
318 | */ | ||
319 | if ((m.status & MCI_STATUS_UC) == 0) | ||
320 | continue; | ||
321 | |||
322 | /* | ||
323 | * Set taint even when machine check was not enabled. | ||
324 | */ | ||
325 | add_taint(TAINT_MACHINE_CHECK); | ||
326 | |||
327 | __set_bit(i, toclear); | ||
328 | |||
225 | if (m.status & MCI_STATUS_EN) { | 329 | if (m.status & MCI_STATUS_EN) { |
226 | /* if PCC was set, there's no way out */ | 330 | /* if PCC was set, there's no way out */ |
227 | no_way_out |= !!(m.status & MCI_STATUS_PCC); | 331 | no_way_out |= !!(m.status & MCI_STATUS_PCC); |
@@ -235,6 +339,12 @@ void do_machine_check(struct pt_regs * regs, long error_code) | |||
235 | no_way_out = 1; | 339 | no_way_out = 1; |
236 | kill_it = 1; | 340 | kill_it = 1; |
237 | } | 341 | } |
342 | } else { | ||
343 | /* | ||
344 | * Machine check event was not enabled. Clear, but | ||
345 | * ignore. | ||
346 | */ | ||
347 | continue; | ||
238 | } | 348 | } |
239 | 349 | ||
240 | if (m.status & MCI_STATUS_MISCV) | 350 | if (m.status & MCI_STATUS_MISCV) |
@@ -243,10 +353,7 @@ void do_machine_check(struct pt_regs * regs, long error_code) | |||
243 | rdmsrl(MSR_IA32_MC0_ADDR + i*4, m.addr); | 353 | rdmsrl(MSR_IA32_MC0_ADDR + i*4, m.addr); |
244 | 354 | ||
245 | mce_get_rip(&m, regs); | 355 | mce_get_rip(&m, regs); |
246 | if (error_code >= 0) | 356 | mce_log(&m); |
247 | rdtscll(m.tsc); | ||
248 | if (error_code != -2) | ||
249 | mce_log(&m); | ||
250 | 357 | ||
251 | /* Did this bank cause the exception? */ | 358 | /* Did this bank cause the exception? */ |
252 | /* Assume that the bank with uncorrectable errors did it, | 359 | /* Assume that the bank with uncorrectable errors did it, |
@@ -255,14 +362,8 @@ void do_machine_check(struct pt_regs * regs, long error_code) | |||
255 | panicm = m; | 362 | panicm = m; |
256 | panicm_found = 1; | 363 | panicm_found = 1; |
257 | } | 364 | } |
258 | |||
259 | add_taint(TAINT_MACHINE_CHECK); | ||
260 | } | 365 | } |
261 | 366 | ||
262 | /* Never do anything final in the polling timer */ | ||
263 | if (!regs) | ||
264 | goto out; | ||
265 | |||
266 | /* If we didn't find an uncorrectable error, pick | 367 | /* If we didn't find an uncorrectable error, pick |
267 | the last one (shouldn't happen, just being safe). */ | 368 | the last one (shouldn't happen, just being safe). */ |
268 | if (!panicm_found) | 369 | if (!panicm_found) |
@@ -309,10 +410,11 @@ void do_machine_check(struct pt_regs * regs, long error_code) | |||
309 | /* notify userspace ASAP */ | 410 | /* notify userspace ASAP */ |
310 | set_thread_flag(TIF_MCE_NOTIFY); | 411 | set_thread_flag(TIF_MCE_NOTIFY); |
311 | 412 | ||
312 | out: | ||
313 | /* the last thing we do is clear state */ | 413 | /* the last thing we do is clear state */ |
314 | for (i = 0; i < banks; i++) | 414 | for (i = 0; i < banks; i++) { |
315 | wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0); | 415 | if (test_bit(i, toclear)) |
416 | wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0); | ||
417 | } | ||
316 | wrmsrl(MSR_IA32_MCG_STATUS, 0); | 418 | wrmsrl(MSR_IA32_MCG_STATUS, 0); |
317 | out2: | 419 | out2: |
318 | atomic_dec(&mce_entry); | 420 | atomic_dec(&mce_entry); |
@@ -332,15 +434,13 @@ void do_machine_check(struct pt_regs * regs, long error_code) | |||
332 | * and historically has been the register value of the | 434 | * and historically has been the register value of the |
333 | * MSR_IA32_THERMAL_STATUS (Intel) msr. | 435 | * MSR_IA32_THERMAL_STATUS (Intel) msr. |
334 | */ | 436 | */ |
335 | void mce_log_therm_throt_event(unsigned int cpu, __u64 status) | 437 | void mce_log_therm_throt_event(__u64 status) |
336 | { | 438 | { |
337 | struct mce m; | 439 | struct mce m; |
338 | 440 | ||
339 | memset(&m, 0, sizeof(m)); | 441 | mce_setup(&m); |
340 | m.cpu = cpu; | ||
341 | m.bank = MCE_THERMAL_BANK; | 442 | m.bank = MCE_THERMAL_BANK; |
342 | m.status = status; | 443 | m.status = status; |
343 | rdtscll(m.tsc); | ||
344 | mce_log(&m); | 444 | mce_log(&m); |
345 | } | 445 | } |
346 | #endif /* CONFIG_X86_MCE_INTEL */ | 446 | #endif /* CONFIG_X86_MCE_INTEL */ |
@@ -353,18 +453,18 @@ void mce_log_therm_throt_event(unsigned int cpu, __u64 status) | |||
353 | 453 | ||
354 | static int check_interval = 5 * 60; /* 5 minutes */ | 454 | static int check_interval = 5 * 60; /* 5 minutes */ |
355 | static int next_interval; /* in jiffies */ | 455 | static int next_interval; /* in jiffies */ |
356 | static void mcheck_timer(struct work_struct *work); | 456 | static void mcheck_timer(unsigned long); |
357 | static DECLARE_DELAYED_WORK(mcheck_work, mcheck_timer); | 457 | static DEFINE_PER_CPU(struct timer_list, mce_timer); |
358 | 458 | ||
359 | static void mcheck_check_cpu(void *info) | 459 | static void mcheck_timer(unsigned long data) |
360 | { | 460 | { |
361 | if (mce_available(¤t_cpu_data)) | 461 | struct timer_list *t = &per_cpu(mce_timer, data); |
362 | do_machine_check(NULL, 0); | ||
363 | } | ||
364 | 462 | ||
365 | static void mcheck_timer(struct work_struct *work) | 463 | WARN_ON(smp_processor_id() != data); |
366 | { | 464 | |
367 | on_each_cpu(mcheck_check_cpu, NULL, 1); | 465 | if (mce_available(¤t_cpu_data)) |
466 | machine_check_poll(MCP_TIMESTAMP, | ||
467 | &__get_cpu_var(mce_poll_banks)); | ||
368 | 468 | ||
369 | /* | 469 | /* |
370 | * Alert userspace if needed. If we logged an MCE, reduce the | 470 | * Alert userspace if needed. If we logged an MCE, reduce the |
@@ -377,31 +477,41 @@ static void mcheck_timer(struct work_struct *work) | |||
377 | (int)round_jiffies_relative(check_interval*HZ)); | 477 | (int)round_jiffies_relative(check_interval*HZ)); |
378 | } | 478 | } |
379 | 479 | ||
380 | schedule_delayed_work(&mcheck_work, next_interval); | 480 | t->expires = jiffies + next_interval; |
481 | add_timer(t); | ||
482 | } | ||
483 | |||
484 | static void mce_do_trigger(struct work_struct *work) | ||
485 | { | ||
486 | call_usermodehelper(trigger, trigger_argv, NULL, UMH_NO_WAIT); | ||
381 | } | 487 | } |
382 | 488 | ||
489 | static DECLARE_WORK(mce_trigger_work, mce_do_trigger); | ||
490 | |||
383 | /* | 491 | /* |
384 | * This is only called from process context. This is where we do | 492 | * Notify the user(s) about new machine check events. |
385 | * anything we need to alert userspace about new MCEs. This is called | 493 | * Can be called from interrupt context, but not from machine check/NMI |
386 | * directly from the poller and also from entry.S and idle, thanks to | 494 | * context. |
387 | * TIF_MCE_NOTIFY. | ||
388 | */ | 495 | */ |
389 | int mce_notify_user(void) | 496 | int mce_notify_user(void) |
390 | { | 497 | { |
498 | /* Not more than two messages every minute */ | ||
499 | static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2); | ||
500 | |||
391 | clear_thread_flag(TIF_MCE_NOTIFY); | 501 | clear_thread_flag(TIF_MCE_NOTIFY); |
392 | if (test_and_clear_bit(0, ¬ify_user)) { | 502 | if (test_and_clear_bit(0, ¬ify_user)) { |
393 | static unsigned long last_print; | ||
394 | unsigned long now = jiffies; | ||
395 | |||
396 | wake_up_interruptible(&mce_wait); | 503 | wake_up_interruptible(&mce_wait); |
397 | if (trigger[0]) | ||
398 | call_usermodehelper(trigger, trigger_argv, NULL, | ||
399 | UMH_NO_WAIT); | ||
400 | 504 | ||
401 | if (time_after_eq(now, last_print + (check_interval*HZ))) { | 505 | /* |
402 | last_print = now; | 506 | * There is no risk of missing notifications because |
507 | * work_pending is always cleared before the function is | ||
508 | * executed. | ||
509 | */ | ||
510 | if (trigger[0] && !work_pending(&mce_trigger_work)) | ||
511 | schedule_work(&mce_trigger_work); | ||
512 | |||
513 | if (__ratelimit(&ratelimit)) | ||
403 | printk(KERN_INFO "Machine check events logged\n"); | 514 | printk(KERN_INFO "Machine check events logged\n"); |
404 | } | ||
405 | 515 | ||
406 | return 1; | 516 | return 1; |
407 | } | 517 | } |
@@ -425,63 +535,78 @@ static struct notifier_block mce_idle_notifier = { | |||
425 | 535 | ||
426 | static __init int periodic_mcheck_init(void) | 536 | static __init int periodic_mcheck_init(void) |
427 | { | 537 | { |
428 | next_interval = check_interval * HZ; | 538 | idle_notifier_register(&mce_idle_notifier); |
429 | if (next_interval) | 539 | return 0; |
430 | schedule_delayed_work(&mcheck_work, | ||
431 | round_jiffies_relative(next_interval)); | ||
432 | idle_notifier_register(&mce_idle_notifier); | ||
433 | return 0; | ||
434 | } | 540 | } |
435 | __initcall(periodic_mcheck_init); | 541 | __initcall(periodic_mcheck_init); |
436 | 542 | ||
437 | |||
438 | /* | 543 | /* |
439 | * Initialize Machine Checks for a CPU. | 544 | * Initialize Machine Checks for a CPU. |
440 | */ | 545 | */ |
441 | static void mce_init(void *dummy) | 546 | static int mce_cap_init(void) |
442 | { | 547 | { |
443 | u64 cap; | 548 | u64 cap; |
444 | int i; | 549 | unsigned b; |
445 | 550 | ||
446 | rdmsrl(MSR_IA32_MCG_CAP, cap); | 551 | rdmsrl(MSR_IA32_MCG_CAP, cap); |
447 | banks = cap & 0xff; | 552 | b = cap & 0xff; |
448 | if (banks > MCE_EXTENDED_BANK) { | 553 | if (b > MAX_NR_BANKS) { |
449 | banks = MCE_EXTENDED_BANK; | 554 | printk(KERN_WARNING |
450 | printk(KERN_INFO "MCE: warning: using only %d banks\n", | 555 | "MCE: Using only %u machine check banks out of %u\n", |
451 | MCE_EXTENDED_BANK); | 556 | MAX_NR_BANKS, b); |
557 | b = MAX_NR_BANKS; | ||
452 | } | 558 | } |
559 | |||
560 | /* Don't support asymmetric configurations today */ | ||
561 | WARN_ON(banks != 0 && b != banks); | ||
562 | banks = b; | ||
563 | if (!bank) { | ||
564 | bank = kmalloc(banks * sizeof(u64), GFP_KERNEL); | ||
565 | if (!bank) | ||
566 | return -ENOMEM; | ||
567 | memset(bank, 0xff, banks * sizeof(u64)); | ||
568 | } | ||
569 | |||
453 | /* Use accurate RIP reporting if available. */ | 570 | /* Use accurate RIP reporting if available. */ |
454 | if ((cap & (1<<9)) && ((cap >> 16) & 0xff) >= 9) | 571 | if ((cap & (1<<9)) && ((cap >> 16) & 0xff) >= 9) |
455 | rip_msr = MSR_IA32_MCG_EIP; | 572 | rip_msr = MSR_IA32_MCG_EIP; |
456 | 573 | ||
457 | /* Log the machine checks left over from the previous reset. | 574 | return 0; |
458 | This also clears all registers */ | 575 | } |
459 | do_machine_check(NULL, mce_bootlog ? -1 : -2); | 576 | |
577 | static void mce_init(void *dummy) | ||
578 | { | ||
579 | u64 cap; | ||
580 | int i; | ||
581 | mce_banks_t all_banks; | ||
582 | |||
583 | /* | ||
584 | * Log the machine checks left over from the previous reset. | ||
585 | */ | ||
586 | bitmap_fill(all_banks, MAX_NR_BANKS); | ||
587 | machine_check_poll(MCP_UC, &all_banks); | ||
460 | 588 | ||
461 | set_in_cr4(X86_CR4_MCE); | 589 | set_in_cr4(X86_CR4_MCE); |
462 | 590 | ||
591 | rdmsrl(MSR_IA32_MCG_CAP, cap); | ||
463 | if (cap & MCG_CTL_P) | 592 | if (cap & MCG_CTL_P) |
464 | wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff); | 593 | wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff); |
465 | 594 | ||
466 | for (i = 0; i < banks; i++) { | 595 | for (i = 0; i < banks; i++) { |
467 | if (i < NR_SYSFS_BANKS) | 596 | wrmsrl(MSR_IA32_MC0_CTL+4*i, bank[i]); |
468 | wrmsrl(MSR_IA32_MC0_CTL+4*i, bank[i]); | ||
469 | else | ||
470 | wrmsrl(MSR_IA32_MC0_CTL+4*i, ~0UL); | ||
471 | |||
472 | wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0); | 597 | wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0); |
473 | } | 598 | } |
474 | } | 599 | } |
475 | 600 | ||
476 | /* Add per CPU specific workarounds here */ | 601 | /* Add per CPU specific workarounds here */ |
477 | static void __cpuinit mce_cpu_quirks(struct cpuinfo_x86 *c) | 602 | static void mce_cpu_quirks(struct cpuinfo_x86 *c) |
478 | { | 603 | { |
479 | /* This should be disabled by the BIOS, but isn't always */ | 604 | /* This should be disabled by the BIOS, but isn't always */ |
480 | if (c->x86_vendor == X86_VENDOR_AMD) { | 605 | if (c->x86_vendor == X86_VENDOR_AMD) { |
481 | if(c->x86 == 15) | 606 | if (c->x86 == 15 && banks > 4) |
482 | /* disable GART TBL walk error reporting, which trips off | 607 | /* disable GART TBL walk error reporting, which trips off |
483 | incorrectly with the IOMMU & 3ware & Cerberus. */ | 608 | incorrectly with the IOMMU & 3ware & Cerberus. */ |
484 | clear_bit(10, &bank[4]); | 609 | clear_bit(10, (unsigned long *)&bank[4]); |
485 | if(c->x86 <= 17 && mce_bootlog < 0) | 610 | if(c->x86 <= 17 && mce_bootlog < 0) |
486 | /* Lots of broken BIOS around that don't clear them | 611 | /* Lots of broken BIOS around that don't clear them |
487 | by default and leave crap in there. Don't log. */ | 612 | by default and leave crap in there. Don't log. */ |
@@ -504,20 +629,38 @@ static void mce_cpu_features(struct cpuinfo_x86 *c) | |||
504 | } | 629 | } |
505 | } | 630 | } |
506 | 631 | ||
632 | static void mce_init_timer(void) | ||
633 | { | ||
634 | struct timer_list *t = &__get_cpu_var(mce_timer); | ||
635 | |||
636 | /* data race harmless because everyone sets to the same value */ | ||
637 | if (!next_interval) | ||
638 | next_interval = check_interval * HZ; | ||
639 | if (!next_interval) | ||
640 | return; | ||
641 | setup_timer(t, mcheck_timer, smp_processor_id()); | ||
642 | t->expires = round_jiffies_relative(jiffies + next_interval); | ||
643 | add_timer(t); | ||
644 | } | ||
645 | |||
507 | /* | 646 | /* |
508 | * Called for each booted CPU to set up machine checks. | 647 | * Called for each booted CPU to set up machine checks. |
509 | * Must be called with preempt off. | 648 | * Must be called with preempt off. |
510 | */ | 649 | */ |
511 | void __cpuinit mcheck_init(struct cpuinfo_x86 *c) | 650 | void __cpuinit mcheck_init(struct cpuinfo_x86 *c) |
512 | { | 651 | { |
513 | mce_cpu_quirks(c); | 652 | if (!mce_available(c)) |
653 | return; | ||
514 | 654 | ||
515 | if (mce_dont_init || | 655 | if (mce_cap_init() < 0) { |
516 | !mce_available(c)) | 656 | mce_dont_init = 1; |
517 | return; | 657 | return; |
658 | } | ||
659 | mce_cpu_quirks(c); | ||
518 | 660 | ||
519 | mce_init(NULL); | 661 | mce_init(NULL); |
520 | mce_cpu_features(c); | 662 | mce_cpu_features(c); |
663 | mce_init_timer(); | ||
521 | } | 664 | } |
522 | 665 | ||
523 | /* | 666 | /* |
@@ -573,7 +716,7 @@ static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize, | |||
573 | { | 716 | { |
574 | unsigned long *cpu_tsc; | 717 | unsigned long *cpu_tsc; |
575 | static DEFINE_MUTEX(mce_read_mutex); | 718 | static DEFINE_MUTEX(mce_read_mutex); |
576 | unsigned next; | 719 | unsigned prev, next; |
577 | char __user *buf = ubuf; | 720 | char __user *buf = ubuf; |
578 | int i, err; | 721 | int i, err; |
579 | 722 | ||
@@ -592,25 +735,32 @@ static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize, | |||
592 | } | 735 | } |
593 | 736 | ||
594 | err = 0; | 737 | err = 0; |
595 | for (i = 0; i < next; i++) { | 738 | prev = 0; |
596 | unsigned long start = jiffies; | 739 | do { |
597 | 740 | for (i = prev; i < next; i++) { | |
598 | while (!mcelog.entry[i].finished) { | 741 | unsigned long start = jiffies; |
599 | if (time_after_eq(jiffies, start + 2)) { | 742 | |
600 | memset(mcelog.entry + i,0, sizeof(struct mce)); | 743 | while (!mcelog.entry[i].finished) { |
601 | goto timeout; | 744 | if (time_after_eq(jiffies, start + 2)) { |
745 | memset(mcelog.entry + i, 0, | ||
746 | sizeof(struct mce)); | ||
747 | goto timeout; | ||
748 | } | ||
749 | cpu_relax(); | ||
602 | } | 750 | } |
603 | cpu_relax(); | 751 | smp_rmb(); |
752 | err |= copy_to_user(buf, mcelog.entry + i, | ||
753 | sizeof(struct mce)); | ||
754 | buf += sizeof(struct mce); | ||
755 | timeout: | ||
756 | ; | ||
604 | } | 757 | } |
605 | smp_rmb(); | ||
606 | err |= copy_to_user(buf, mcelog.entry + i, sizeof(struct mce)); | ||
607 | buf += sizeof(struct mce); | ||
608 | timeout: | ||
609 | ; | ||
610 | } | ||
611 | 758 | ||
612 | memset(mcelog.entry, 0, next * sizeof(struct mce)); | 759 | memset(mcelog.entry + prev, 0, |
613 | mcelog.next = 0; | 760 | (next - prev) * sizeof(struct mce)); |
761 | prev = next; | ||
762 | next = cmpxchg(&mcelog.next, prev, 0); | ||
763 | } while (next != prev); | ||
614 | 764 | ||
615 | synchronize_sched(); | 765 | synchronize_sched(); |
616 | 766 | ||
@@ -680,20 +830,6 @@ static struct miscdevice mce_log_device = { | |||
680 | &mce_chrdev_ops, | 830 | &mce_chrdev_ops, |
681 | }; | 831 | }; |
682 | 832 | ||
683 | static unsigned long old_cr4 __initdata; | ||
684 | |||
685 | void __init stop_mce(void) | ||
686 | { | ||
687 | old_cr4 = read_cr4(); | ||
688 | clear_in_cr4(X86_CR4_MCE); | ||
689 | } | ||
690 | |||
691 | void __init restart_mce(void) | ||
692 | { | ||
693 | if (old_cr4 & X86_CR4_MCE) | ||
694 | set_in_cr4(X86_CR4_MCE); | ||
695 | } | ||
696 | |||
697 | /* | 833 | /* |
698 | * Old style boot options parsing. Only for compatibility. | 834 | * Old style boot options parsing. Only for compatibility. |
699 | */ | 835 | */ |
@@ -703,8 +839,7 @@ static int __init mcheck_disable(char *str) | |||
703 | return 1; | 839 | return 1; |
704 | } | 840 | } |
705 | 841 | ||
706 | /* mce=off disables machine check. Note you can re-enable it later | 842 | /* mce=off disables machine check. |
707 | using sysfs. | ||
708 | mce=TOLERANCELEVEL (number, see above) | 843 | mce=TOLERANCELEVEL (number, see above) |
709 | mce=bootlog Log MCEs from before booting. Disabled by default on AMD. | 844 | mce=bootlog Log MCEs from before booting. Disabled by default on AMD. |
710 | mce=nobootlog Don't log MCEs from before booting. */ | 845 | mce=nobootlog Don't log MCEs from before booting. */ |
@@ -728,6 +863,29 @@ __setup("mce=", mcheck_enable); | |||
728 | * Sysfs support | 863 | * Sysfs support |
729 | */ | 864 | */ |
730 | 865 | ||
866 | /* | ||
867 | * Disable machine checks on suspend and shutdown. We can't really handle | ||
868 | * them later. | ||
869 | */ | ||
870 | static int mce_disable(void) | ||
871 | { | ||
872 | int i; | ||
873 | |||
874 | for (i = 0; i < banks; i++) | ||
875 | wrmsrl(MSR_IA32_MC0_CTL + i*4, 0); | ||
876 | return 0; | ||
877 | } | ||
878 | |||
879 | static int mce_suspend(struct sys_device *dev, pm_message_t state) | ||
880 | { | ||
881 | return mce_disable(); | ||
882 | } | ||
883 | |||
884 | static int mce_shutdown(struct sys_device *dev) | ||
885 | { | ||
886 | return mce_disable(); | ||
887 | } | ||
888 | |||
731 | /* On resume clear all MCE state. Don't want to see leftovers from the BIOS. | 889 | /* On resume clear all MCE state. Don't want to see leftovers from the BIOS. |
732 | Only one CPU is active at this time, the others get readded later using | 890 | Only one CPU is active at this time, the others get readded later using |
733 | CPU hotplug. */ | 891 | CPU hotplug. */ |
@@ -738,20 +896,24 @@ static int mce_resume(struct sys_device *dev) | |||
738 | return 0; | 896 | return 0; |
739 | } | 897 | } |
740 | 898 | ||
899 | static void mce_cpu_restart(void *data) | ||
900 | { | ||
901 | del_timer_sync(&__get_cpu_var(mce_timer)); | ||
902 | if (mce_available(¤t_cpu_data)) | ||
903 | mce_init(NULL); | ||
904 | mce_init_timer(); | ||
905 | } | ||
906 | |||
741 | /* Reinit MCEs after user configuration changes */ | 907 | /* Reinit MCEs after user configuration changes */ |
742 | static void mce_restart(void) | 908 | static void mce_restart(void) |
743 | { | 909 | { |
744 | if (next_interval) | ||
745 | cancel_delayed_work(&mcheck_work); | ||
746 | /* Timer race is harmless here */ | ||
747 | on_each_cpu(mce_init, NULL, 1); | ||
748 | next_interval = check_interval * HZ; | 910 | next_interval = check_interval * HZ; |
749 | if (next_interval) | 911 | on_each_cpu(mce_cpu_restart, NULL, 1); |
750 | schedule_delayed_work(&mcheck_work, | ||
751 | round_jiffies_relative(next_interval)); | ||
752 | } | 912 | } |
753 | 913 | ||
754 | static struct sysdev_class mce_sysclass = { | 914 | static struct sysdev_class mce_sysclass = { |
915 | .suspend = mce_suspend, | ||
916 | .shutdown = mce_shutdown, | ||
755 | .resume = mce_resume, | 917 | .resume = mce_resume, |
756 | .name = "machinecheck", | 918 | .name = "machinecheck", |
757 | }; | 919 | }; |
@@ -778,16 +940,26 @@ void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu) __cpuinit | |||
778 | } \ | 940 | } \ |
779 | static SYSDEV_ATTR(name, 0644, show_ ## name, set_ ## name); | 941 | static SYSDEV_ATTR(name, 0644, show_ ## name, set_ ## name); |
780 | 942 | ||
781 | /* | 943 | static struct sysdev_attribute *bank_attrs; |
782 | * TBD should generate these dynamically based on number of available banks. | 944 | |
783 | * Have only 6 contol banks in /sysfs until then. | 945 | static ssize_t show_bank(struct sys_device *s, struct sysdev_attribute *attr, |
784 | */ | 946 | char *buf) |
785 | ACCESSOR(bank0ctl,bank[0],mce_restart()) | 947 | { |
786 | ACCESSOR(bank1ctl,bank[1],mce_restart()) | 948 | u64 b = bank[attr - bank_attrs]; |
787 | ACCESSOR(bank2ctl,bank[2],mce_restart()) | 949 | return sprintf(buf, "%llx\n", b); |
788 | ACCESSOR(bank3ctl,bank[3],mce_restart()) | 950 | } |
789 | ACCESSOR(bank4ctl,bank[4],mce_restart()) | 951 | |
790 | ACCESSOR(bank5ctl,bank[5],mce_restart()) | 952 | static ssize_t set_bank(struct sys_device *s, struct sysdev_attribute *attr, |
953 | const char *buf, size_t siz) | ||
954 | { | ||
955 | char *end; | ||
956 | u64 new = simple_strtoull(buf, &end, 0); | ||
957 | if (end == buf) | ||
958 | return -EINVAL; | ||
959 | bank[attr - bank_attrs] = new; | ||
960 | mce_restart(); | ||
961 | return end-buf; | ||
962 | } | ||
791 | 963 | ||
792 | static ssize_t show_trigger(struct sys_device *s, struct sysdev_attribute *attr, | 964 | static ssize_t show_trigger(struct sys_device *s, struct sysdev_attribute *attr, |
793 | char *buf) | 965 | char *buf) |
@@ -814,8 +986,6 @@ static SYSDEV_ATTR(trigger, 0644, show_trigger, set_trigger); | |||
814 | static SYSDEV_INT_ATTR(tolerant, 0644, tolerant); | 986 | static SYSDEV_INT_ATTR(tolerant, 0644, tolerant); |
815 | ACCESSOR(check_interval,check_interval,mce_restart()) | 987 | ACCESSOR(check_interval,check_interval,mce_restart()) |
816 | static struct sysdev_attribute *mce_attributes[] = { | 988 | static struct sysdev_attribute *mce_attributes[] = { |
817 | &attr_bank0ctl, &attr_bank1ctl, &attr_bank2ctl, | ||
818 | &attr_bank3ctl, &attr_bank4ctl, &attr_bank5ctl, | ||
819 | &attr_tolerant.attr, &attr_check_interval, &attr_trigger, | 989 | &attr_tolerant.attr, &attr_check_interval, &attr_trigger, |
820 | NULL | 990 | NULL |
821 | }; | 991 | }; |
@@ -845,11 +1015,22 @@ static __cpuinit int mce_create_device(unsigned int cpu) | |||
845 | if (err) | 1015 | if (err) |
846 | goto error; | 1016 | goto error; |
847 | } | 1017 | } |
1018 | for (i = 0; i < banks; i++) { | ||
1019 | err = sysdev_create_file(&per_cpu(device_mce, cpu), | ||
1020 | &bank_attrs[i]); | ||
1021 | if (err) | ||
1022 | goto error2; | ||
1023 | } | ||
848 | cpu_set(cpu, mce_device_initialized); | 1024 | cpu_set(cpu, mce_device_initialized); |
849 | 1025 | ||
850 | return 0; | 1026 | return 0; |
1027 | error2: | ||
1028 | while (--i >= 0) { | ||
1029 | sysdev_remove_file(&per_cpu(device_mce, cpu), | ||
1030 | &bank_attrs[i]); | ||
1031 | } | ||
851 | error: | 1032 | error: |
852 | while (i--) { | 1033 | while (--i >= 0) { |
853 | sysdev_remove_file(&per_cpu(device_mce,cpu), | 1034 | sysdev_remove_file(&per_cpu(device_mce,cpu), |
854 | mce_attributes[i]); | 1035 | mce_attributes[i]); |
855 | } | 1036 | } |
@@ -868,15 +1049,46 @@ static __cpuinit void mce_remove_device(unsigned int cpu) | |||
868 | for (i = 0; mce_attributes[i]; i++) | 1049 | for (i = 0; mce_attributes[i]; i++) |
869 | sysdev_remove_file(&per_cpu(device_mce,cpu), | 1050 | sysdev_remove_file(&per_cpu(device_mce,cpu), |
870 | mce_attributes[i]); | 1051 | mce_attributes[i]); |
1052 | for (i = 0; i < banks; i++) | ||
1053 | sysdev_remove_file(&per_cpu(device_mce, cpu), | ||
1054 | &bank_attrs[i]); | ||
871 | sysdev_unregister(&per_cpu(device_mce,cpu)); | 1055 | sysdev_unregister(&per_cpu(device_mce,cpu)); |
872 | cpu_clear(cpu, mce_device_initialized); | 1056 | cpu_clear(cpu, mce_device_initialized); |
873 | } | 1057 | } |
874 | 1058 | ||
1059 | /* Make sure there are no machine checks on offlined CPUs. */ | ||
1060 | static void mce_disable_cpu(void *h) | ||
1061 | { | ||
1062 | int i; | ||
1063 | unsigned long action = *(unsigned long *)h; | ||
1064 | |||
1065 | if (!mce_available(¤t_cpu_data)) | ||
1066 | return; | ||
1067 | if (!(action & CPU_TASKS_FROZEN)) | ||
1068 | cmci_clear(); | ||
1069 | for (i = 0; i < banks; i++) | ||
1070 | wrmsrl(MSR_IA32_MC0_CTL + i*4, 0); | ||
1071 | } | ||
1072 | |||
1073 | static void mce_reenable_cpu(void *h) | ||
1074 | { | ||
1075 | int i; | ||
1076 | unsigned long action = *(unsigned long *)h; | ||
1077 | |||
1078 | if (!mce_available(¤t_cpu_data)) | ||
1079 | return; | ||
1080 | if (!(action & CPU_TASKS_FROZEN)) | ||
1081 | cmci_reenable(); | ||
1082 | for (i = 0; i < banks; i++) | ||
1083 | wrmsrl(MSR_IA32_MC0_CTL + i*4, bank[i]); | ||
1084 | } | ||
1085 | |||
875 | /* Get notified when a cpu comes on/off. Be hotplug friendly. */ | 1086 | /* Get notified when a cpu comes on/off. Be hotplug friendly. */ |
876 | static int __cpuinit mce_cpu_callback(struct notifier_block *nfb, | 1087 | static int __cpuinit mce_cpu_callback(struct notifier_block *nfb, |
877 | unsigned long action, void *hcpu) | 1088 | unsigned long action, void *hcpu) |
878 | { | 1089 | { |
879 | unsigned int cpu = (unsigned long)hcpu; | 1090 | unsigned int cpu = (unsigned long)hcpu; |
1091 | struct timer_list *t = &per_cpu(mce_timer, cpu); | ||
880 | 1092 | ||
881 | switch (action) { | 1093 | switch (action) { |
882 | case CPU_ONLINE: | 1094 | case CPU_ONLINE: |
@@ -891,6 +1103,21 @@ static int __cpuinit mce_cpu_callback(struct notifier_block *nfb, | |||
891 | threshold_cpu_callback(action, cpu); | 1103 | threshold_cpu_callback(action, cpu); |
892 | mce_remove_device(cpu); | 1104 | mce_remove_device(cpu); |
893 | break; | 1105 | break; |
1106 | case CPU_DOWN_PREPARE: | ||
1107 | case CPU_DOWN_PREPARE_FROZEN: | ||
1108 | del_timer_sync(t); | ||
1109 | smp_call_function_single(cpu, mce_disable_cpu, &action, 1); | ||
1110 | break; | ||
1111 | case CPU_DOWN_FAILED: | ||
1112 | case CPU_DOWN_FAILED_FROZEN: | ||
1113 | t->expires = round_jiffies_relative(jiffies + next_interval); | ||
1114 | add_timer_on(t, cpu); | ||
1115 | smp_call_function_single(cpu, mce_reenable_cpu, &action, 1); | ||
1116 | break; | ||
1117 | case CPU_POST_DEAD: | ||
1118 | /* intentionally ignoring frozen here */ | ||
1119 | cmci_rediscover(cpu); | ||
1120 | break; | ||
894 | } | 1121 | } |
895 | return NOTIFY_OK; | 1122 | return NOTIFY_OK; |
896 | } | 1123 | } |
@@ -899,6 +1126,34 @@ static struct notifier_block mce_cpu_notifier __cpuinitdata = { | |||
899 | .notifier_call = mce_cpu_callback, | 1126 | .notifier_call = mce_cpu_callback, |
900 | }; | 1127 | }; |
901 | 1128 | ||
1129 | static __init int mce_init_banks(void) | ||
1130 | { | ||
1131 | int i; | ||
1132 | |||
1133 | bank_attrs = kzalloc(sizeof(struct sysdev_attribute) * banks, | ||
1134 | GFP_KERNEL); | ||
1135 | if (!bank_attrs) | ||
1136 | return -ENOMEM; | ||
1137 | |||
1138 | for (i = 0; i < banks; i++) { | ||
1139 | struct sysdev_attribute *a = &bank_attrs[i]; | ||
1140 | a->attr.name = kasprintf(GFP_KERNEL, "bank%d", i); | ||
1141 | if (!a->attr.name) | ||
1142 | goto nomem; | ||
1143 | a->attr.mode = 0644; | ||
1144 | a->show = show_bank; | ||
1145 | a->store = set_bank; | ||
1146 | } | ||
1147 | return 0; | ||
1148 | |||
1149 | nomem: | ||
1150 | while (--i >= 0) | ||
1151 | kfree(bank_attrs[i].attr.name); | ||
1152 | kfree(bank_attrs); | ||
1153 | bank_attrs = NULL; | ||
1154 | return -ENOMEM; | ||
1155 | } | ||
1156 | |||
902 | static __init int mce_init_device(void) | 1157 | static __init int mce_init_device(void) |
903 | { | 1158 | { |
904 | int err; | 1159 | int err; |
@@ -906,6 +1161,11 @@ static __init int mce_init_device(void) | |||
906 | 1161 | ||
907 | if (!mce_available(&boot_cpu_data)) | 1162 | if (!mce_available(&boot_cpu_data)) |
908 | return -EIO; | 1163 | return -EIO; |
1164 | |||
1165 | err = mce_init_banks(); | ||
1166 | if (err) | ||
1167 | return err; | ||
1168 | |||
909 | err = sysdev_class_register(&mce_sysclass); | 1169 | err = sysdev_class_register(&mce_sysclass); |
910 | if (err) | 1170 | if (err) |
911 | return err; | 1171 | return err; |
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c index 9817506dd469..c5a32f92d07e 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c | |||
@@ -79,6 +79,8 @@ static unsigned char shared_bank[NR_BANKS] = { | |||
79 | 79 | ||
80 | static DEFINE_PER_CPU(unsigned char, bank_map); /* see which banks are on */ | 80 | static DEFINE_PER_CPU(unsigned char, bank_map); /* see which banks are on */ |
81 | 81 | ||
82 | static void amd_threshold_interrupt(void); | ||
83 | |||
82 | /* | 84 | /* |
83 | * CPU Initialization | 85 | * CPU Initialization |
84 | */ | 86 | */ |
@@ -174,6 +176,8 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c) | |||
174 | tr.reset = 0; | 176 | tr.reset = 0; |
175 | tr.old_limit = 0; | 177 | tr.old_limit = 0; |
176 | threshold_restart_bank(&tr); | 178 | threshold_restart_bank(&tr); |
179 | |||
180 | mce_threshold_vector = amd_threshold_interrupt; | ||
177 | } | 181 | } |
178 | } | 182 | } |
179 | } | 183 | } |
@@ -187,19 +191,13 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c) | |||
187 | * the interrupt goes off when error_count reaches threshold_limit. | 191 | * the interrupt goes off when error_count reaches threshold_limit. |
188 | * the handler will simply log mcelog w/ software defined bank number. | 192 | * the handler will simply log mcelog w/ software defined bank number. |
189 | */ | 193 | */ |
190 | asmlinkage void mce_threshold_interrupt(void) | 194 | static void amd_threshold_interrupt(void) |
191 | { | 195 | { |
192 | unsigned int bank, block; | 196 | unsigned int bank, block; |
193 | struct mce m; | 197 | struct mce m; |
194 | u32 low = 0, high = 0, address = 0; | 198 | u32 low = 0, high = 0, address = 0; |
195 | 199 | ||
196 | ack_APIC_irq(); | 200 | mce_setup(&m); |
197 | exit_idle(); | ||
198 | irq_enter(); | ||
199 | |||
200 | memset(&m, 0, sizeof(m)); | ||
201 | rdtscll(m.tsc); | ||
202 | m.cpu = smp_processor_id(); | ||
203 | 201 | ||
204 | /* assume first bank caused it */ | 202 | /* assume first bank caused it */ |
205 | for (bank = 0; bank < NR_BANKS; ++bank) { | 203 | for (bank = 0; bank < NR_BANKS; ++bank) { |
@@ -233,7 +231,8 @@ asmlinkage void mce_threshold_interrupt(void) | |||
233 | 231 | ||
234 | /* Log the machine check that caused the threshold | 232 | /* Log the machine check that caused the threshold |
235 | event. */ | 233 | event. */ |
236 | do_machine_check(NULL, 0); | 234 | machine_check_poll(MCP_TIMESTAMP, |
235 | &__get_cpu_var(mce_poll_banks)); | ||
237 | 236 | ||
238 | if (high & MASK_OVERFLOW_HI) { | 237 | if (high & MASK_OVERFLOW_HI) { |
239 | rdmsrl(address, m.misc); | 238 | rdmsrl(address, m.misc); |
@@ -243,13 +242,10 @@ asmlinkage void mce_threshold_interrupt(void) | |||
243 | + bank * NR_BLOCKS | 242 | + bank * NR_BLOCKS |
244 | + block; | 243 | + block; |
245 | mce_log(&m); | 244 | mce_log(&m); |
246 | goto out; | 245 | return; |
247 | } | 246 | } |
248 | } | 247 | } |
249 | } | 248 | } |
250 | out: | ||
251 | inc_irq_stat(irq_threshold_count); | ||
252 | irq_exit(); | ||
253 | } | 249 | } |
254 | 250 | ||
255 | /* | 251 | /* |
diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel_64.c b/arch/x86/kernel/cpu/mcheck/mce_intel_64.c index aa5e287c98e0..aaa7d9730938 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_intel_64.c +++ b/arch/x86/kernel/cpu/mcheck/mce_intel_64.c | |||
@@ -1,6 +1,8 @@ | |||
1 | /* | 1 | /* |
2 | * Intel specific MCE features. | 2 | * Intel specific MCE features. |
3 | * Copyright 2004 Zwane Mwaikambo <zwane@linuxpower.ca> | 3 | * Copyright 2004 Zwane Mwaikambo <zwane@linuxpower.ca> |
4 | * Copyright (C) 2008, 2009 Intel Corporation | ||
5 | * Author: Andi Kleen | ||
4 | */ | 6 | */ |
5 | 7 | ||
6 | #include <linux/init.h> | 8 | #include <linux/init.h> |
@@ -13,6 +15,7 @@ | |||
13 | #include <asm/hw_irq.h> | 15 | #include <asm/hw_irq.h> |
14 | #include <asm/idle.h> | 16 | #include <asm/idle.h> |
15 | #include <asm/therm_throt.h> | 17 | #include <asm/therm_throt.h> |
18 | #include <asm/apic.h> | ||
16 | 19 | ||
17 | asmlinkage void smp_thermal_interrupt(void) | 20 | asmlinkage void smp_thermal_interrupt(void) |
18 | { | 21 | { |
@@ -25,7 +28,7 @@ asmlinkage void smp_thermal_interrupt(void) | |||
25 | 28 | ||
26 | rdmsrl(MSR_IA32_THERM_STATUS, msr_val); | 29 | rdmsrl(MSR_IA32_THERM_STATUS, msr_val); |
27 | if (therm_throt_process(msr_val & 1)) | 30 | if (therm_throt_process(msr_val & 1)) |
28 | mce_log_therm_throt_event(smp_processor_id(), msr_val); | 31 | mce_log_therm_throt_event(msr_val); |
29 | 32 | ||
30 | inc_irq_stat(irq_thermal_count); | 33 | inc_irq_stat(irq_thermal_count); |
31 | irq_exit(); | 34 | irq_exit(); |
@@ -85,7 +88,209 @@ static void intel_init_thermal(struct cpuinfo_x86 *c) | |||
85 | return; | 88 | return; |
86 | } | 89 | } |
87 | 90 | ||
91 | /* | ||
92 | * Support for Intel Correct Machine Check Interrupts. This allows | ||
93 | * the CPU to raise an interrupt when a corrected machine check happened. | ||
94 | * Normally we pick those up using a regular polling timer. | ||
95 | * Also supports reliable discovery of shared banks. | ||
96 | */ | ||
97 | |||
98 | static DEFINE_PER_CPU(mce_banks_t, mce_banks_owned); | ||
99 | |||
100 | /* | ||
101 | * cmci_discover_lock protects against parallel discovery attempts | ||
102 | * which could race against each other. | ||
103 | */ | ||
104 | static DEFINE_SPINLOCK(cmci_discover_lock); | ||
105 | |||
106 | #define CMCI_THRESHOLD 1 | ||
107 | |||
108 | static int cmci_supported(int *banks) | ||
109 | { | ||
110 | u64 cap; | ||
111 | |||
112 | /* | ||
113 | * Vendor check is not strictly needed, but the initial | ||
114 | * initialization is vendor keyed and this | ||
115 | * makes sure none of the backdoors are entered otherwise. | ||
116 | */ | ||
117 | if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) | ||
118 | return 0; | ||
119 | if (!cpu_has_apic || lapic_get_maxlvt() < 6) | ||
120 | return 0; | ||
121 | rdmsrl(MSR_IA32_MCG_CAP, cap); | ||
122 | *banks = min_t(unsigned, MAX_NR_BANKS, cap & 0xff); | ||
123 | return !!(cap & MCG_CMCI_P); | ||
124 | } | ||
125 | |||
126 | /* | ||
127 | * The interrupt handler. This is called on every event. | ||
128 | * Just call the poller directly to log any events. | ||
129 | * This could in theory increase the threshold under high load, | ||
130 | * but doesn't for now. | ||
131 | */ | ||
132 | static void intel_threshold_interrupt(void) | ||
133 | { | ||
134 | machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned)); | ||
135 | mce_notify_user(); | ||
136 | } | ||
137 | |||
138 | static void print_update(char *type, int *hdr, int num) | ||
139 | { | ||
140 | if (*hdr == 0) | ||
141 | printk(KERN_INFO "CPU %d MCA banks", smp_processor_id()); | ||
142 | *hdr = 1; | ||
143 | printk(KERN_CONT " %s:%d", type, num); | ||
144 | } | ||
145 | |||
146 | /* | ||
147 | * Enable CMCI (Corrected Machine Check Interrupt) for available MCE banks | ||
148 | * on this CPU. Use the algorithm recommended in the SDM to discover shared | ||
149 | * banks. | ||
150 | */ | ||
151 | static void cmci_discover(int banks, int boot) | ||
152 | { | ||
153 | unsigned long *owned = (void *)&__get_cpu_var(mce_banks_owned); | ||
154 | int hdr = 0; | ||
155 | int i; | ||
156 | |||
157 | spin_lock(&cmci_discover_lock); | ||
158 | for (i = 0; i < banks; i++) { | ||
159 | u64 val; | ||
160 | |||
161 | if (test_bit(i, owned)) | ||
162 | continue; | ||
163 | |||
164 | rdmsrl(MSR_IA32_MC0_CTL2 + i, val); | ||
165 | |||
166 | /* Already owned by someone else? */ | ||
167 | if (val & CMCI_EN) { | ||
168 | if (test_and_clear_bit(i, owned) || boot) | ||
169 | print_update("SHD", &hdr, i); | ||
170 | __clear_bit(i, __get_cpu_var(mce_poll_banks)); | ||
171 | continue; | ||
172 | } | ||
173 | |||
174 | val |= CMCI_EN | CMCI_THRESHOLD; | ||
175 | wrmsrl(MSR_IA32_MC0_CTL2 + i, val); | ||
176 | rdmsrl(MSR_IA32_MC0_CTL2 + i, val); | ||
177 | |||
178 | /* Did the enable bit stick? -- the bank supports CMCI */ | ||
179 | if (val & CMCI_EN) { | ||
180 | if (!test_and_set_bit(i, owned) || boot) | ||
181 | print_update("CMCI", &hdr, i); | ||
182 | __clear_bit(i, __get_cpu_var(mce_poll_banks)); | ||
183 | } else { | ||
184 | WARN_ON(!test_bit(i, __get_cpu_var(mce_poll_banks))); | ||
185 | } | ||
186 | } | ||
187 | spin_unlock(&cmci_discover_lock); | ||
188 | if (hdr) | ||
189 | printk(KERN_CONT "\n"); | ||
190 | } | ||
191 | |||
192 | /* | ||
193 | * Just in case we missed an event during initialization check | ||
194 | * all the CMCI owned banks. | ||
195 | */ | ||
196 | void cmci_recheck(void) | ||
197 | { | ||
198 | unsigned long flags; | ||
199 | int banks; | ||
200 | |||
201 | if (!mce_available(¤t_cpu_data) || !cmci_supported(&banks)) | ||
202 | return; | ||
203 | local_irq_save(flags); | ||
204 | machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned)); | ||
205 | local_irq_restore(flags); | ||
206 | } | ||
207 | |||
208 | /* | ||
209 | * Disable CMCI on this CPU for all banks it owns when it goes down. | ||
210 | * This allows other CPUs to claim the banks on rediscovery. | ||
211 | */ | ||
212 | void cmci_clear(void) | ||
213 | { | ||
214 | int i; | ||
215 | int banks; | ||
216 | u64 val; | ||
217 | |||
218 | if (!cmci_supported(&banks)) | ||
219 | return; | ||
220 | spin_lock(&cmci_discover_lock); | ||
221 | for (i = 0; i < banks; i++) { | ||
222 | if (!test_bit(i, __get_cpu_var(mce_banks_owned))) | ||
223 | continue; | ||
224 | /* Disable CMCI */ | ||
225 | rdmsrl(MSR_IA32_MC0_CTL2 + i, val); | ||
226 | val &= ~(CMCI_EN|CMCI_THRESHOLD_MASK); | ||
227 | wrmsrl(MSR_IA32_MC0_CTL2 + i, val); | ||
228 | __clear_bit(i, __get_cpu_var(mce_banks_owned)); | ||
229 | } | ||
230 | spin_unlock(&cmci_discover_lock); | ||
231 | } | ||
232 | |||
233 | /* | ||
234 | * After a CPU went down cycle through all the others and rediscover | ||
235 | * Must run in process context. | ||
236 | */ | ||
237 | void cmci_rediscover(int dying) | ||
238 | { | ||
239 | int banks; | ||
240 | int cpu; | ||
241 | cpumask_var_t old; | ||
242 | |||
243 | if (!cmci_supported(&banks)) | ||
244 | return; | ||
245 | if (!alloc_cpumask_var(&old, GFP_KERNEL)) | ||
246 | return; | ||
247 | cpumask_copy(old, ¤t->cpus_allowed); | ||
248 | |||
249 | for_each_online_cpu (cpu) { | ||
250 | if (cpu == dying) | ||
251 | continue; | ||
252 | if (set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu))) | ||
253 | continue; | ||
254 | /* Recheck banks in case CPUs don't all have the same */ | ||
255 | if (cmci_supported(&banks)) | ||
256 | cmci_discover(banks, 0); | ||
257 | } | ||
258 | |||
259 | set_cpus_allowed_ptr(current, old); | ||
260 | free_cpumask_var(old); | ||
261 | } | ||
262 | |||
263 | /* | ||
264 | * Reenable CMCI on this CPU in case a CPU down failed. | ||
265 | */ | ||
266 | void cmci_reenable(void) | ||
267 | { | ||
268 | int banks; | ||
269 | if (cmci_supported(&banks)) | ||
270 | cmci_discover(banks, 0); | ||
271 | } | ||
272 | |||
273 | static __cpuinit void intel_init_cmci(void) | ||
274 | { | ||
275 | int banks; | ||
276 | |||
277 | if (!cmci_supported(&banks)) | ||
278 | return; | ||
279 | |||
280 | mce_threshold_vector = intel_threshold_interrupt; | ||
281 | cmci_discover(banks, 1); | ||
282 | /* | ||
283 | * For CPU #0 this runs with still disabled APIC, but that's | ||
284 | * ok because only the vector is set up. We still do another | ||
285 | * check for the banks later for CPU #0 just to make sure | ||
286 | * to not miss any events. | ||
287 | */ | ||
288 | apic_write(APIC_LVTCMCI, THRESHOLD_APIC_VECTOR|APIC_DM_FIXED); | ||
289 | cmci_recheck(); | ||
290 | } | ||
291 | |||
88 | void mce_intel_feature_init(struct cpuinfo_x86 *c) | 292 | void mce_intel_feature_init(struct cpuinfo_x86 *c) |
89 | { | 293 | { |
90 | intel_init_thermal(c); | 294 | intel_init_thermal(c); |
295 | intel_init_cmci(); | ||
91 | } | 296 | } |
diff --git a/arch/x86/kernel/cpu/mcheck/threshold.c b/arch/x86/kernel/cpu/mcheck/threshold.c new file mode 100644 index 000000000000..23ee9e730f78 --- /dev/null +++ b/arch/x86/kernel/cpu/mcheck/threshold.c | |||
@@ -0,0 +1,29 @@ | |||
1 | /* | ||
2 | * Common corrected MCE threshold handler code: | ||
3 | */ | ||
4 | #include <linux/interrupt.h> | ||
5 | #include <linux/kernel.h> | ||
6 | |||
7 | #include <asm/irq_vectors.h> | ||
8 | #include <asm/apic.h> | ||
9 | #include <asm/idle.h> | ||
10 | #include <asm/mce.h> | ||
11 | |||
12 | static void default_threshold_interrupt(void) | ||
13 | { | ||
14 | printk(KERN_ERR "Unexpected threshold interrupt at vector %x\n", | ||
15 | THRESHOLD_APIC_VECTOR); | ||
16 | } | ||
17 | |||
18 | void (*mce_threshold_vector)(void) = default_threshold_interrupt; | ||
19 | |||
20 | asmlinkage void mce_threshold_interrupt(void) | ||
21 | { | ||
22 | exit_idle(); | ||
23 | irq_enter(); | ||
24 | inc_irq_stat(irq_threshold_count); | ||
25 | mce_threshold_vector(); | ||
26 | irq_exit(); | ||
27 | /* Ack only at the end to avoid potential reentry */ | ||
28 | ack_APIC_irq(); | ||
29 | } | ||
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c index b0f61f0dcd0a..f2f8540a7f3d 100644 --- a/arch/x86/kernel/i387.c +++ b/arch/x86/kernel/i387.c | |||
@@ -136,7 +136,7 @@ int init_fpu(struct task_struct *tsk) | |||
136 | #ifdef CONFIG_X86_32 | 136 | #ifdef CONFIG_X86_32 |
137 | if (!HAVE_HWFP) { | 137 | if (!HAVE_HWFP) { |
138 | memset(tsk->thread.xstate, 0, xstate_size); | 138 | memset(tsk->thread.xstate, 0, xstate_size); |
139 | finit(); | 139 | finit_task(tsk); |
140 | set_stopped_child_used_math(tsk); | 140 | set_stopped_child_used_math(tsk); |
141 | return 0; | 141 | return 0; |
142 | } | 142 | } |
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c index 9dc6b2b24275..3b09634a5153 100644 --- a/arch/x86/kernel/irq_32.c +++ b/arch/x86/kernel/irq_32.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/cpu.h> | 16 | #include <linux/cpu.h> |
17 | #include <linux/delay.h> | 17 | #include <linux/delay.h> |
18 | #include <linux/uaccess.h> | 18 | #include <linux/uaccess.h> |
19 | #include <linux/percpu.h> | ||
19 | 20 | ||
20 | #include <asm/apic.h> | 21 | #include <asm/apic.h> |
21 | 22 | ||
@@ -55,13 +56,13 @@ static inline void print_stack_overflow(void) { } | |||
55 | union irq_ctx { | 56 | union irq_ctx { |
56 | struct thread_info tinfo; | 57 | struct thread_info tinfo; |
57 | u32 stack[THREAD_SIZE/sizeof(u32)]; | 58 | u32 stack[THREAD_SIZE/sizeof(u32)]; |
58 | }; | 59 | } __attribute__((aligned(PAGE_SIZE))); |
59 | 60 | ||
60 | static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly; | 61 | static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx); |
61 | static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly; | 62 | static DEFINE_PER_CPU(union irq_ctx *, softirq_ctx); |
62 | 63 | ||
63 | static char softirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss; | 64 | static DEFINE_PER_CPU_PAGE_ALIGNED(union irq_ctx, hardirq_stack); |
64 | static char hardirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss; | 65 | static DEFINE_PER_CPU_PAGE_ALIGNED(union irq_ctx, softirq_stack); |
65 | 66 | ||
66 | static void call_on_stack(void *func, void *stack) | 67 | static void call_on_stack(void *func, void *stack) |
67 | { | 68 | { |
@@ -81,7 +82,7 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq) | |||
81 | u32 *isp, arg1, arg2; | 82 | u32 *isp, arg1, arg2; |
82 | 83 | ||
83 | curctx = (union irq_ctx *) current_thread_info(); | 84 | curctx = (union irq_ctx *) current_thread_info(); |
84 | irqctx = hardirq_ctx[smp_processor_id()]; | 85 | irqctx = __get_cpu_var(hardirq_ctx); |
85 | 86 | ||
86 | /* | 87 | /* |
87 | * this is where we switch to the IRQ stack. However, if we are | 88 | * this is where we switch to the IRQ stack. However, if we are |
@@ -125,34 +126,34 @@ void __cpuinit irq_ctx_init(int cpu) | |||
125 | { | 126 | { |
126 | union irq_ctx *irqctx; | 127 | union irq_ctx *irqctx; |
127 | 128 | ||
128 | if (hardirq_ctx[cpu]) | 129 | if (per_cpu(hardirq_ctx, cpu)) |
129 | return; | 130 | return; |
130 | 131 | ||
131 | irqctx = (union irq_ctx*) &hardirq_stack[cpu*THREAD_SIZE]; | 132 | irqctx = &per_cpu(hardirq_stack, cpu); |
132 | irqctx->tinfo.task = NULL; | 133 | irqctx->tinfo.task = NULL; |
133 | irqctx->tinfo.exec_domain = NULL; | 134 | irqctx->tinfo.exec_domain = NULL; |
134 | irqctx->tinfo.cpu = cpu; | 135 | irqctx->tinfo.cpu = cpu; |
135 | irqctx->tinfo.preempt_count = HARDIRQ_OFFSET; | 136 | irqctx->tinfo.preempt_count = HARDIRQ_OFFSET; |
136 | irqctx->tinfo.addr_limit = MAKE_MM_SEG(0); | 137 | irqctx->tinfo.addr_limit = MAKE_MM_SEG(0); |
137 | 138 | ||
138 | hardirq_ctx[cpu] = irqctx; | 139 | per_cpu(hardirq_ctx, cpu) = irqctx; |
139 | 140 | ||
140 | irqctx = (union irq_ctx *) &softirq_stack[cpu*THREAD_SIZE]; | 141 | irqctx = &per_cpu(softirq_stack, cpu); |
141 | irqctx->tinfo.task = NULL; | 142 | irqctx->tinfo.task = NULL; |
142 | irqctx->tinfo.exec_domain = NULL; | 143 | irqctx->tinfo.exec_domain = NULL; |
143 | irqctx->tinfo.cpu = cpu; | 144 | irqctx->tinfo.cpu = cpu; |
144 | irqctx->tinfo.preempt_count = 0; | 145 | irqctx->tinfo.preempt_count = 0; |
145 | irqctx->tinfo.addr_limit = MAKE_MM_SEG(0); | 146 | irqctx->tinfo.addr_limit = MAKE_MM_SEG(0); |
146 | 147 | ||
147 | softirq_ctx[cpu] = irqctx; | 148 | per_cpu(softirq_ctx, cpu) = irqctx; |
148 | 149 | ||
149 | printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n", | 150 | printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n", |
150 | cpu, hardirq_ctx[cpu], softirq_ctx[cpu]); | 151 | cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu)); |
151 | } | 152 | } |
152 | 153 | ||
153 | void irq_ctx_exit(int cpu) | 154 | void irq_ctx_exit(int cpu) |
154 | { | 155 | { |
155 | hardirq_ctx[cpu] = NULL; | 156 | per_cpu(hardirq_ctx, cpu) = NULL; |
156 | } | 157 | } |
157 | 158 | ||
158 | asmlinkage void do_softirq(void) | 159 | asmlinkage void do_softirq(void) |
@@ -169,7 +170,7 @@ asmlinkage void do_softirq(void) | |||
169 | 170 | ||
170 | if (local_softirq_pending()) { | 171 | if (local_softirq_pending()) { |
171 | curctx = current_thread_info(); | 172 | curctx = current_thread_info(); |
172 | irqctx = softirq_ctx[smp_processor_id()]; | 173 | irqctx = __get_cpu_var(softirq_ctx); |
173 | irqctx->tinfo.task = curctx->task; | 174 | irqctx->tinfo.task = curctx->task; |
174 | irqctx->tinfo.previous_esp = current_stack_pointer; | 175 | irqctx->tinfo.previous_esp = current_stack_pointer; |
175 | 176 | ||
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index 1cc18d439bbb..2aef36d8aca2 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c | |||
@@ -216,6 +216,14 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = { | |||
216 | DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq"), | 216 | DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq"), |
217 | }, | 217 | }, |
218 | }, | 218 | }, |
219 | { /* Handle problems with rebooting on Dell XPS710 */ | ||
220 | .callback = set_bios_reboot, | ||
221 | .ident = "Dell XPS710", | ||
222 | .matches = { | ||
223 | DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), | ||
224 | DMI_MATCH(DMI_PRODUCT_NAME, "Dell XPS710"), | ||
225 | }, | ||
226 | }, | ||
219 | { } | 227 | { } |
220 | }; | 228 | }; |
221 | 229 | ||
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index d992e6cff730..c29f301d3885 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c | |||
@@ -7,6 +7,7 @@ | |||
7 | #include <linux/crash_dump.h> | 7 | #include <linux/crash_dump.h> |
8 | #include <linux/smp.h> | 8 | #include <linux/smp.h> |
9 | #include <linux/topology.h> | 9 | #include <linux/topology.h> |
10 | #include <linux/pfn.h> | ||
10 | #include <asm/sections.h> | 11 | #include <asm/sections.h> |
11 | #include <asm/processor.h> | 12 | #include <asm/processor.h> |
12 | #include <asm/setup.h> | 13 | #include <asm/setup.h> |
@@ -41,6 +42,321 @@ unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = { | |||
41 | }; | 42 | }; |
42 | EXPORT_SYMBOL(__per_cpu_offset); | 43 | EXPORT_SYMBOL(__per_cpu_offset); |
43 | 44 | ||
45 | /** | ||
46 | * pcpu_need_numa - determine percpu allocation needs to consider NUMA | ||
47 | * | ||
48 | * If NUMA is not configured or there is only one NUMA node available, | ||
49 | * there is no reason to consider NUMA. This function determines | ||
50 | * whether percpu allocation should consider NUMA or not. | ||
51 | * | ||
52 | * RETURNS: | ||
53 | * true if NUMA should be considered; otherwise, false. | ||
54 | */ | ||
55 | static bool __init pcpu_need_numa(void) | ||
56 | { | ||
57 | #ifdef CONFIG_NEED_MULTIPLE_NODES | ||
58 | pg_data_t *last = NULL; | ||
59 | unsigned int cpu; | ||
60 | |||
61 | for_each_possible_cpu(cpu) { | ||
62 | int node = early_cpu_to_node(cpu); | ||
63 | |||
64 | if (node_online(node) && NODE_DATA(node) && | ||
65 | last && last != NODE_DATA(node)) | ||
66 | return true; | ||
67 | |||
68 | last = NODE_DATA(node); | ||
69 | } | ||
70 | #endif | ||
71 | return false; | ||
72 | } | ||
73 | |||
74 | /** | ||
75 | * pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu | ||
76 | * @cpu: cpu to allocate for | ||
77 | * @size: size allocation in bytes | ||
78 | * @align: alignment | ||
79 | * | ||
80 | * Allocate @size bytes aligned at @align for cpu @cpu. This wrapper | ||
81 | * does the right thing for NUMA regardless of the current | ||
82 | * configuration. | ||
83 | * | ||
84 | * RETURNS: | ||
85 | * Pointer to the allocated area on success, NULL on failure. | ||
86 | */ | ||
87 | static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size, | ||
88 | unsigned long align) | ||
89 | { | ||
90 | const unsigned long goal = __pa(MAX_DMA_ADDRESS); | ||
91 | #ifdef CONFIG_NEED_MULTIPLE_NODES | ||
92 | int node = early_cpu_to_node(cpu); | ||
93 | void *ptr; | ||
94 | |||
95 | if (!node_online(node) || !NODE_DATA(node)) { | ||
96 | ptr = __alloc_bootmem_nopanic(size, align, goal); | ||
97 | pr_info("cpu %d has no node %d or node-local memory\n", | ||
98 | cpu, node); | ||
99 | pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n", | ||
100 | cpu, size, __pa(ptr)); | ||
101 | } else { | ||
102 | ptr = __alloc_bootmem_node_nopanic(NODE_DATA(node), | ||
103 | size, align, goal); | ||
104 | pr_debug("per cpu data for cpu%d %lu bytes on node%d at " | ||
105 | "%016lx\n", cpu, size, node, __pa(ptr)); | ||
106 | } | ||
107 | return ptr; | ||
108 | #else | ||
109 | return __alloc_bootmem_nopanic(size, align, goal); | ||
110 | #endif | ||
111 | } | ||
112 | |||
113 | /* | ||
114 | * Remap allocator | ||
115 | * | ||
116 | * This allocator uses PMD page as unit. A PMD page is allocated for | ||
117 | * each cpu and each is remapped into vmalloc area using PMD mapping. | ||
118 | * As PMD page is quite large, only part of it is used for the first | ||
119 | * chunk. Unused part is returned to the bootmem allocator. | ||
120 | * | ||
121 | * So, the PMD pages are mapped twice - once to the physical mapping | ||
122 | * and to the vmalloc area for the first percpu chunk. The double | ||
123 | * mapping does add one more PMD TLB entry pressure but still is much | ||
124 | * better than only using 4k mappings while still being NUMA friendly. | ||
125 | */ | ||
126 | #ifdef CONFIG_NEED_MULTIPLE_NODES | ||
127 | static size_t pcpur_size __initdata; | ||
128 | static void **pcpur_ptrs __initdata; | ||
129 | |||
130 | static struct page * __init pcpur_get_page(unsigned int cpu, int pageno) | ||
131 | { | ||
132 | size_t off = (size_t)pageno << PAGE_SHIFT; | ||
133 | |||
134 | if (off >= pcpur_size) | ||
135 | return NULL; | ||
136 | |||
137 | return virt_to_page(pcpur_ptrs[cpu] + off); | ||
138 | } | ||
139 | |||
140 | static ssize_t __init setup_pcpu_remap(size_t static_size) | ||
141 | { | ||
142 | static struct vm_struct vm; | ||
143 | pg_data_t *last; | ||
144 | size_t ptrs_size; | ||
145 | unsigned int cpu; | ||
146 | ssize_t ret; | ||
147 | |||
148 | /* | ||
149 | * If large page isn't supported, there's no benefit in doing | ||
150 | * this. Also, on non-NUMA, embedding is better. | ||
151 | */ | ||
152 | if (!cpu_has_pse || pcpu_need_numa()) | ||
153 | return -EINVAL; | ||
154 | |||
155 | last = NULL; | ||
156 | for_each_possible_cpu(cpu) { | ||
157 | int node = early_cpu_to_node(cpu); | ||
158 | |||
159 | if (node_online(node) && NODE_DATA(node) && | ||
160 | last && last != NODE_DATA(node)) | ||
161 | goto proceed; | ||
162 | |||
163 | last = NODE_DATA(node); | ||
164 | } | ||
165 | return -EINVAL; | ||
166 | |||
167 | proceed: | ||
168 | /* | ||
169 | * Currently supports only single page. Supporting multiple | ||
170 | * pages won't be too difficult if it ever becomes necessary. | ||
171 | */ | ||
172 | pcpur_size = PFN_ALIGN(static_size + PERCPU_DYNAMIC_RESERVE); | ||
173 | if (pcpur_size > PMD_SIZE) { | ||
174 | pr_warning("PERCPU: static data is larger than large page, " | ||
175 | "can't use large page\n"); | ||
176 | return -EINVAL; | ||
177 | } | ||
178 | |||
179 | /* allocate pointer array and alloc large pages */ | ||
180 | ptrs_size = PFN_ALIGN(num_possible_cpus() * sizeof(pcpur_ptrs[0])); | ||
181 | pcpur_ptrs = alloc_bootmem(ptrs_size); | ||
182 | |||
183 | for_each_possible_cpu(cpu) { | ||
184 | pcpur_ptrs[cpu] = pcpu_alloc_bootmem(cpu, PMD_SIZE, PMD_SIZE); | ||
185 | if (!pcpur_ptrs[cpu]) | ||
186 | goto enomem; | ||
187 | |||
188 | /* | ||
189 | * Only use pcpur_size bytes and give back the rest. | ||
190 | * | ||
191 | * Ingo: The 2MB up-rounding bootmem is needed to make | ||
192 | * sure the partial 2MB page is still fully RAM - it's | ||
193 | * not well-specified to have a PAT-incompatible area | ||
194 | * (unmapped RAM, device memory, etc.) in that hole. | ||
195 | */ | ||
196 | free_bootmem(__pa(pcpur_ptrs[cpu] + pcpur_size), | ||
197 | PMD_SIZE - pcpur_size); | ||
198 | |||
199 | memcpy(pcpur_ptrs[cpu], __per_cpu_load, static_size); | ||
200 | } | ||
201 | |||
202 | /* allocate address and map */ | ||
203 | vm.flags = VM_ALLOC; | ||
204 | vm.size = num_possible_cpus() * PMD_SIZE; | ||
205 | vm_area_register_early(&vm, PMD_SIZE); | ||
206 | |||
207 | for_each_possible_cpu(cpu) { | ||
208 | pmd_t *pmd; | ||
209 | |||
210 | pmd = populate_extra_pmd((unsigned long)vm.addr | ||
211 | + cpu * PMD_SIZE); | ||
212 | set_pmd(pmd, pfn_pmd(page_to_pfn(virt_to_page(pcpur_ptrs[cpu])), | ||
213 | PAGE_KERNEL_LARGE)); | ||
214 | } | ||
215 | |||
216 | /* we're ready, commit */ | ||
217 | pr_info("PERCPU: Remapped at %p with large pages, static data " | ||
218 | "%zu bytes\n", vm.addr, static_size); | ||
219 | |||
220 | ret = pcpu_setup_first_chunk(pcpur_get_page, static_size, PMD_SIZE, | ||
221 | pcpur_size - static_size, vm.addr, NULL); | ||
222 | goto out_free_ar; | ||
223 | |||
224 | enomem: | ||
225 | for_each_possible_cpu(cpu) | ||
226 | if (pcpur_ptrs[cpu]) | ||
227 | free_bootmem(__pa(pcpur_ptrs[cpu]), PMD_SIZE); | ||
228 | ret = -ENOMEM; | ||
229 | out_free_ar: | ||
230 | free_bootmem(__pa(pcpur_ptrs), ptrs_size); | ||
231 | return ret; | ||
232 | } | ||
233 | #else | ||
234 | static ssize_t __init setup_pcpu_remap(size_t static_size) | ||
235 | { | ||
236 | return -EINVAL; | ||
237 | } | ||
238 | #endif | ||
239 | |||
240 | /* | ||
241 | * Embedding allocator | ||
242 | * | ||
243 | * The first chunk is sized to just contain the static area plus | ||
244 | * PERCPU_DYNAMIC_RESERVE and allocated as a contiguous area using | ||
245 | * bootmem allocator and used as-is without being mapped into vmalloc | ||
246 | * area. This enables the first chunk to piggy back on the linear | ||
247 | * physical PMD mapping and doesn't add any additional pressure to | ||
248 | * TLB. | ||
249 | */ | ||
250 | static void *pcpue_ptr __initdata; | ||
251 | static size_t pcpue_unit_size __initdata; | ||
252 | |||
253 | static struct page * __init pcpue_get_page(unsigned int cpu, int pageno) | ||
254 | { | ||
255 | return virt_to_page(pcpue_ptr + cpu * pcpue_unit_size | ||
256 | + ((size_t)pageno << PAGE_SHIFT)); | ||
257 | } | ||
258 | |||
259 | static ssize_t __init setup_pcpu_embed(size_t static_size) | ||
260 | { | ||
261 | unsigned int cpu; | ||
262 | |||
263 | /* | ||
264 | * If large page isn't supported, there's no benefit in doing | ||
265 | * this. Also, embedding allocation doesn't play well with | ||
266 | * NUMA. | ||
267 | */ | ||
268 | if (!cpu_has_pse || pcpu_need_numa()) | ||
269 | return -EINVAL; | ||
270 | |||
271 | /* allocate and copy */ | ||
272 | pcpue_unit_size = PFN_ALIGN(static_size + PERCPU_DYNAMIC_RESERVE); | ||
273 | pcpue_unit_size = max_t(size_t, pcpue_unit_size, PCPU_MIN_UNIT_SIZE); | ||
274 | pcpue_ptr = pcpu_alloc_bootmem(0, num_possible_cpus() * pcpue_unit_size, | ||
275 | PAGE_SIZE); | ||
276 | if (!pcpue_ptr) | ||
277 | return -ENOMEM; | ||
278 | |||
279 | for_each_possible_cpu(cpu) | ||
280 | memcpy(pcpue_ptr + cpu * pcpue_unit_size, __per_cpu_load, | ||
281 | static_size); | ||
282 | |||
283 | /* we're ready, commit */ | ||
284 | pr_info("PERCPU: Embedded %zu pages at %p, static data %zu bytes\n", | ||
285 | pcpue_unit_size >> PAGE_SHIFT, pcpue_ptr, static_size); | ||
286 | |||
287 | return pcpu_setup_first_chunk(pcpue_get_page, static_size, | ||
288 | pcpue_unit_size, | ||
289 | pcpue_unit_size - static_size, pcpue_ptr, | ||
290 | NULL); | ||
291 | } | ||
292 | |||
293 | /* | ||
294 | * 4k page allocator | ||
295 | * | ||
296 | * This is the basic allocator. Static percpu area is allocated | ||
297 | * page-by-page and most of initialization is done by the generic | ||
298 | * setup function. | ||
299 | */ | ||
300 | static struct page **pcpu4k_pages __initdata; | ||
301 | static int pcpu4k_nr_static_pages __initdata; | ||
302 | |||
303 | static struct page * __init pcpu4k_get_page(unsigned int cpu, int pageno) | ||
304 | { | ||
305 | if (pageno < pcpu4k_nr_static_pages) | ||
306 | return pcpu4k_pages[cpu * pcpu4k_nr_static_pages + pageno]; | ||
307 | return NULL; | ||
308 | } | ||
309 | |||
310 | static void __init pcpu4k_populate_pte(unsigned long addr) | ||
311 | { | ||
312 | populate_extra_pte(addr); | ||
313 | } | ||
314 | |||
315 | static ssize_t __init setup_pcpu_4k(size_t static_size) | ||
316 | { | ||
317 | size_t pages_size; | ||
318 | unsigned int cpu; | ||
319 | int i, j; | ||
320 | ssize_t ret; | ||
321 | |||
322 | pcpu4k_nr_static_pages = PFN_UP(static_size); | ||
323 | |||
324 | /* unaligned allocations can't be freed, round up to page size */ | ||
325 | pages_size = PFN_ALIGN(pcpu4k_nr_static_pages * num_possible_cpus() | ||
326 | * sizeof(pcpu4k_pages[0])); | ||
327 | pcpu4k_pages = alloc_bootmem(pages_size); | ||
328 | |||
329 | /* allocate and copy */ | ||
330 | j = 0; | ||
331 | for_each_possible_cpu(cpu) | ||
332 | for (i = 0; i < pcpu4k_nr_static_pages; i++) { | ||
333 | void *ptr; | ||
334 | |||
335 | ptr = pcpu_alloc_bootmem(cpu, PAGE_SIZE, PAGE_SIZE); | ||
336 | if (!ptr) | ||
337 | goto enomem; | ||
338 | |||
339 | memcpy(ptr, __per_cpu_load + i * PAGE_SIZE, PAGE_SIZE); | ||
340 | pcpu4k_pages[j++] = virt_to_page(ptr); | ||
341 | } | ||
342 | |||
343 | /* we're ready, commit */ | ||
344 | pr_info("PERCPU: Allocated %d 4k pages, static data %zu bytes\n", | ||
345 | pcpu4k_nr_static_pages, static_size); | ||
346 | |||
347 | ret = pcpu_setup_first_chunk(pcpu4k_get_page, static_size, 0, 0, NULL, | ||
348 | pcpu4k_populate_pte); | ||
349 | goto out_free_ar; | ||
350 | |||
351 | enomem: | ||
352 | while (--j >= 0) | ||
353 | free_bootmem(__pa(page_address(pcpu4k_pages[j])), PAGE_SIZE); | ||
354 | ret = -ENOMEM; | ||
355 | out_free_ar: | ||
356 | free_bootmem(__pa(pcpu4k_pages), pages_size); | ||
357 | return ret; | ||
358 | } | ||
359 | |||
44 | static inline void setup_percpu_segment(int cpu) | 360 | static inline void setup_percpu_segment(int cpu) |
45 | { | 361 | { |
46 | #ifdef CONFIG_X86_32 | 362 | #ifdef CONFIG_X86_32 |
@@ -61,38 +377,35 @@ static inline void setup_percpu_segment(int cpu) | |||
61 | */ | 377 | */ |
62 | void __init setup_per_cpu_areas(void) | 378 | void __init setup_per_cpu_areas(void) |
63 | { | 379 | { |
64 | ssize_t size; | 380 | size_t static_size = __per_cpu_end - __per_cpu_start; |
65 | char *ptr; | 381 | unsigned int cpu; |
66 | int cpu; | 382 | unsigned long delta; |
67 | 383 | size_t pcpu_unit_size; | |
68 | /* Copy section for each CPU (we discard the original) */ | 384 | ssize_t ret; |
69 | size = roundup(PERCPU_ENOUGH_ROOM, PAGE_SIZE); | ||
70 | 385 | ||
71 | pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n", | 386 | pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n", |
72 | NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids); | 387 | NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids); |
73 | 388 | ||
74 | pr_info("PERCPU: Allocating %zd bytes of per cpu data\n", size); | 389 | /* |
390 | * Allocate percpu area. If PSE is supported, try to make use | ||
391 | * of large page mappings. Please read comments on top of | ||
392 | * each allocator for details. | ||
393 | */ | ||
394 | ret = setup_pcpu_remap(static_size); | ||
395 | if (ret < 0) | ||
396 | ret = setup_pcpu_embed(static_size); | ||
397 | if (ret < 0) | ||
398 | ret = setup_pcpu_4k(static_size); | ||
399 | if (ret < 0) | ||
400 | panic("cannot allocate static percpu area (%zu bytes, err=%zd)", | ||
401 | static_size, ret); | ||
75 | 402 | ||
76 | for_each_possible_cpu(cpu) { | 403 | pcpu_unit_size = ret; |
77 | #ifndef CONFIG_NEED_MULTIPLE_NODES | ||
78 | ptr = alloc_bootmem_pages(size); | ||
79 | #else | ||
80 | int node = early_cpu_to_node(cpu); | ||
81 | if (!node_online(node) || !NODE_DATA(node)) { | ||
82 | ptr = alloc_bootmem_pages(size); | ||
83 | pr_info("cpu %d has no node %d or node-local memory\n", | ||
84 | cpu, node); | ||
85 | pr_debug("per cpu data for cpu%d at %016lx\n", | ||
86 | cpu, __pa(ptr)); | ||
87 | } else { | ||
88 | ptr = alloc_bootmem_pages_node(NODE_DATA(node), size); | ||
89 | pr_debug("per cpu data for cpu%d on node%d at %016lx\n", | ||
90 | cpu, node, __pa(ptr)); | ||
91 | } | ||
92 | #endif | ||
93 | 404 | ||
94 | memcpy(ptr, __per_cpu_load, __per_cpu_end - __per_cpu_start); | 405 | /* alrighty, percpu areas up and running */ |
95 | per_cpu_offset(cpu) = ptr - __per_cpu_start; | 406 | delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; |
407 | for_each_possible_cpu(cpu) { | ||
408 | per_cpu_offset(cpu) = delta + cpu * pcpu_unit_size; | ||
96 | per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu); | 409 | per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu); |
97 | per_cpu(cpu_number, cpu) = cpu; | 410 | per_cpu(cpu_number, cpu) = cpu; |
98 | setup_percpu_segment(cpu); | 411 | setup_percpu_segment(cpu); |
@@ -125,8 +438,6 @@ void __init setup_per_cpu_areas(void) | |||
125 | */ | 438 | */ |
126 | if (cpu == boot_cpu_id) | 439 | if (cpu == boot_cpu_id) |
127 | switch_to_new_gdt(cpu); | 440 | switch_to_new_gdt(cpu); |
128 | |||
129 | DBG("PERCPU: cpu %4d %p\n", cpu, ptr); | ||
130 | } | 441 | } |
131 | 442 | ||
132 | /* indicate the early static arrays will soon be gone */ | 443 | /* indicate the early static arrays will soon be gone */ |
diff --git a/arch/x86/math-emu/fpu_aux.c b/arch/x86/math-emu/fpu_aux.c index 491e737ce547..aa0987088774 100644 --- a/arch/x86/math-emu/fpu_aux.c +++ b/arch/x86/math-emu/fpu_aux.c | |||
@@ -30,20 +30,29 @@ static void fclex(void) | |||
30 | } | 30 | } |
31 | 31 | ||
32 | /* Needs to be externally visible */ | 32 | /* Needs to be externally visible */ |
33 | void finit(void) | 33 | void finit_task(struct task_struct *tsk) |
34 | { | 34 | { |
35 | control_word = 0x037f; | 35 | struct i387_soft_struct *soft = &tsk->thread.xstate->soft; |
36 | partial_status = 0; | 36 | struct address *oaddr, *iaddr; |
37 | top = 0; /* We don't keep top in the status word internally. */ | 37 | soft->cwd = 0x037f; |
38 | fpu_tag_word = 0xffff; | 38 | soft->swd = 0; |
39 | soft->ftop = 0; /* We don't keep top in the status word internally. */ | ||
40 | soft->twd = 0xffff; | ||
39 | /* The behaviour is different from that detailed in | 41 | /* The behaviour is different from that detailed in |
40 | Section 15.1.6 of the Intel manual */ | 42 | Section 15.1.6 of the Intel manual */ |
41 | operand_address.offset = 0; | 43 | oaddr = (struct address *)&soft->foo; |
42 | operand_address.selector = 0; | 44 | oaddr->offset = 0; |
43 | instruction_address.offset = 0; | 45 | oaddr->selector = 0; |
44 | instruction_address.selector = 0; | 46 | iaddr = (struct address *)&soft->fip; |
45 | instruction_address.opcode = 0; | 47 | iaddr->offset = 0; |
46 | no_ip_update = 1; | 48 | iaddr->selector = 0; |
49 | iaddr->opcode = 0; | ||
50 | soft->no_update = 1; | ||
51 | } | ||
52 | |||
53 | void finit(void) | ||
54 | { | ||
55 | finit_task(current); | ||
47 | } | 56 | } |
48 | 57 | ||
49 | /* | 58 | /* |
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index d57dfffb0213..2966c6b8d304 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c | |||
@@ -131,6 +131,23 @@ static pte_t * __init one_page_table_init(pmd_t *pmd) | |||
131 | return pte_offset_kernel(pmd, 0); | 131 | return pte_offset_kernel(pmd, 0); |
132 | } | 132 | } |
133 | 133 | ||
134 | pmd_t * __init populate_extra_pmd(unsigned long vaddr) | ||
135 | { | ||
136 | int pgd_idx = pgd_index(vaddr); | ||
137 | int pmd_idx = pmd_index(vaddr); | ||
138 | |||
139 | return one_md_table_init(swapper_pg_dir + pgd_idx) + pmd_idx; | ||
140 | } | ||
141 | |||
142 | pte_t * __init populate_extra_pte(unsigned long vaddr) | ||
143 | { | ||
144 | int pte_idx = pte_index(vaddr); | ||
145 | pmd_t *pmd; | ||
146 | |||
147 | pmd = populate_extra_pmd(vaddr); | ||
148 | return one_page_table_init(pmd) + pte_idx; | ||
149 | } | ||
150 | |||
134 | static pte_t *__init page_table_kmap_check(pte_t *pte, pmd_t *pmd, | 151 | static pte_t *__init page_table_kmap_check(pte_t *pte, pmd_t *pmd, |
135 | unsigned long vaddr, pte_t *lastpte) | 152 | unsigned long vaddr, pte_t *lastpte) |
136 | { | 153 | { |
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 7dd7ce49d69b..8a853bc3b287 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c | |||
@@ -161,34 +161,51 @@ static __ref void *spp_getpage(void) | |||
161 | return ptr; | 161 | return ptr; |
162 | } | 162 | } |
163 | 163 | ||
164 | void | 164 | static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr) |
165 | set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte) | ||
166 | { | 165 | { |
167 | pud_t *pud; | 166 | if (pgd_none(*pgd)) { |
168 | pmd_t *pmd; | 167 | pud_t *pud = (pud_t *)spp_getpage(); |
169 | pte_t *pte; | 168 | pgd_populate(&init_mm, pgd, pud); |
169 | if (pud != pud_offset(pgd, 0)) | ||
170 | printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n", | ||
171 | pud, pud_offset(pgd, 0)); | ||
172 | } | ||
173 | return pud_offset(pgd, vaddr); | ||
174 | } | ||
170 | 175 | ||
171 | pud = pud_page + pud_index(vaddr); | 176 | static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr) |
177 | { | ||
172 | if (pud_none(*pud)) { | 178 | if (pud_none(*pud)) { |
173 | pmd = (pmd_t *) spp_getpage(); | 179 | pmd_t *pmd = (pmd_t *) spp_getpage(); |
174 | pud_populate(&init_mm, pud, pmd); | 180 | pud_populate(&init_mm, pud, pmd); |
175 | if (pmd != pmd_offset(pud, 0)) { | 181 | if (pmd != pmd_offset(pud, 0)) |
176 | printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n", | 182 | printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n", |
177 | pmd, pmd_offset(pud, 0)); | 183 | pmd, pmd_offset(pud, 0)); |
178 | return; | ||
179 | } | ||
180 | } | 184 | } |
181 | pmd = pmd_offset(pud, vaddr); | 185 | return pmd_offset(pud, vaddr); |
186 | } | ||
187 | |||
188 | static pte_t *fill_pte(pmd_t *pmd, unsigned long vaddr) | ||
189 | { | ||
182 | if (pmd_none(*pmd)) { | 190 | if (pmd_none(*pmd)) { |
183 | pte = (pte_t *) spp_getpage(); | 191 | pte_t *pte = (pte_t *) spp_getpage(); |
184 | pmd_populate_kernel(&init_mm, pmd, pte); | 192 | pmd_populate_kernel(&init_mm, pmd, pte); |
185 | if (pte != pte_offset_kernel(pmd, 0)) { | 193 | if (pte != pte_offset_kernel(pmd, 0)) |
186 | printk(KERN_ERR "PAGETABLE BUG #02!\n"); | 194 | printk(KERN_ERR "PAGETABLE BUG #02!\n"); |
187 | return; | ||
188 | } | ||
189 | } | 195 | } |
196 | return pte_offset_kernel(pmd, vaddr); | ||
197 | } | ||
198 | |||
199 | void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte) | ||
200 | { | ||
201 | pud_t *pud; | ||
202 | pmd_t *pmd; | ||
203 | pte_t *pte; | ||
204 | |||
205 | pud = pud_page + pud_index(vaddr); | ||
206 | pmd = fill_pmd(pud, vaddr); | ||
207 | pte = fill_pte(pmd, vaddr); | ||
190 | 208 | ||
191 | pte = pte_offset_kernel(pmd, vaddr); | ||
192 | set_pte(pte, new_pte); | 209 | set_pte(pte, new_pte); |
193 | 210 | ||
194 | /* | 211 | /* |
@@ -198,8 +215,7 @@ set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte) | |||
198 | __flush_tlb_one(vaddr); | 215 | __flush_tlb_one(vaddr); |
199 | } | 216 | } |
200 | 217 | ||
201 | void | 218 | void set_pte_vaddr(unsigned long vaddr, pte_t pteval) |
202 | set_pte_vaddr(unsigned long vaddr, pte_t pteval) | ||
203 | { | 219 | { |
204 | pgd_t *pgd; | 220 | pgd_t *pgd; |
205 | pud_t *pud_page; | 221 | pud_t *pud_page; |
@@ -216,6 +232,24 @@ set_pte_vaddr(unsigned long vaddr, pte_t pteval) | |||
216 | set_pte_vaddr_pud(pud_page, vaddr, pteval); | 232 | set_pte_vaddr_pud(pud_page, vaddr, pteval); |
217 | } | 233 | } |
218 | 234 | ||
235 | pmd_t * __init populate_extra_pmd(unsigned long vaddr) | ||
236 | { | ||
237 | pgd_t *pgd; | ||
238 | pud_t *pud; | ||
239 | |||
240 | pgd = pgd_offset_k(vaddr); | ||
241 | pud = fill_pud(pgd, vaddr); | ||
242 | return fill_pmd(pud, vaddr); | ||
243 | } | ||
244 | |||
245 | pte_t * __init populate_extra_pte(unsigned long vaddr) | ||
246 | { | ||
247 | pmd_t *pmd; | ||
248 | |||
249 | pmd = populate_extra_pmd(vaddr); | ||
250 | return fill_pte(pmd, vaddr); | ||
251 | } | ||
252 | |||
219 | /* | 253 | /* |
220 | * Create large page table mappings for a range of physical addresses. | 254 | * Create large page table mappings for a range of physical addresses. |
221 | */ | 255 | */ |
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index c52f4034c7fd..82cd39a6cbd3 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c | |||
@@ -103,7 +103,7 @@ static void xen_vcpu_setup(int cpu) | |||
103 | 103 | ||
104 | vcpup = &per_cpu(xen_vcpu_info, cpu); | 104 | vcpup = &per_cpu(xen_vcpu_info, cpu); |
105 | 105 | ||
106 | info.mfn = virt_to_mfn(vcpup); | 106 | info.mfn = arbitrary_virt_to_mfn(vcpup); |
107 | info.offset = offset_in_page(vcpup); | 107 | info.offset = offset_in_page(vcpup); |
108 | 108 | ||
109 | printk(KERN_DEBUG "trying to map vcpu_info %d at %p, mfn %llx, offset %d\n", | 109 | printk(KERN_DEBUG "trying to map vcpu_info %d at %p, mfn %llx, offset %d\n", |
@@ -301,8 +301,10 @@ static void xen_load_gdt(const struct desc_ptr *dtr) | |||
301 | frames = mcs.args; | 301 | frames = mcs.args; |
302 | 302 | ||
303 | for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) { | 303 | for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) { |
304 | frames[f] = virt_to_mfn(va); | 304 | frames[f] = arbitrary_virt_to_mfn((void *)va); |
305 | |||
305 | make_lowmem_page_readonly((void *)va); | 306 | make_lowmem_page_readonly((void *)va); |
307 | make_lowmem_page_readonly(mfn_to_virt(frames[f])); | ||
306 | } | 308 | } |
307 | 309 | ||
308 | MULTI_set_gdt(mcs.mc, frames, size / sizeof(struct desc_struct)); | 310 | MULTI_set_gdt(mcs.mc, frames, size / sizeof(struct desc_struct)); |
@@ -314,7 +316,7 @@ static void load_TLS_descriptor(struct thread_struct *t, | |||
314 | unsigned int cpu, unsigned int i) | 316 | unsigned int cpu, unsigned int i) |
315 | { | 317 | { |
316 | struct desc_struct *gdt = get_cpu_gdt_table(cpu); | 318 | struct desc_struct *gdt = get_cpu_gdt_table(cpu); |
317 | xmaddr_t maddr = virt_to_machine(&gdt[GDT_ENTRY_TLS_MIN+i]); | 319 | xmaddr_t maddr = arbitrary_virt_to_machine(&gdt[GDT_ENTRY_TLS_MIN+i]); |
318 | struct multicall_space mc = __xen_mc_entry(0); | 320 | struct multicall_space mc = __xen_mc_entry(0); |
319 | 321 | ||
320 | MULTI_update_descriptor(mc.mc, maddr.maddr, t->tls_array[i]); | 322 | MULTI_update_descriptor(mc.mc, maddr.maddr, t->tls_array[i]); |
@@ -488,7 +490,7 @@ static void xen_write_gdt_entry(struct desc_struct *dt, int entry, | |||
488 | break; | 490 | break; |
489 | 491 | ||
490 | default: { | 492 | default: { |
491 | xmaddr_t maddr = virt_to_machine(&dt[entry]); | 493 | xmaddr_t maddr = arbitrary_virt_to_machine(&dt[entry]); |
492 | 494 | ||
493 | xen_mc_flush(); | 495 | xen_mc_flush(); |
494 | if (HYPERVISOR_update_descriptor(maddr.maddr, *(u64 *)desc)) | 496 | if (HYPERVISOR_update_descriptor(maddr.maddr, *(u64 *)desc)) |
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 319bd40a57c2..cb6afa4ec95c 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c | |||
@@ -276,6 +276,13 @@ void set_phys_to_machine(unsigned long pfn, unsigned long mfn) | |||
276 | p2m_top[topidx][idx] = mfn; | 276 | p2m_top[topidx][idx] = mfn; |
277 | } | 277 | } |
278 | 278 | ||
279 | unsigned long arbitrary_virt_to_mfn(void *vaddr) | ||
280 | { | ||
281 | xmaddr_t maddr = arbitrary_virt_to_machine(vaddr); | ||
282 | |||
283 | return PFN_DOWN(maddr.maddr); | ||
284 | } | ||
285 | |||
279 | xmaddr_t arbitrary_virt_to_machine(void *vaddr) | 286 | xmaddr_t arbitrary_virt_to_machine(void *vaddr) |
280 | { | 287 | { |
281 | unsigned long address = (unsigned long)vaddr; | 288 | unsigned long address = (unsigned long)vaddr; |
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index 035582ae815d..8d470562ffc9 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c | |||
@@ -219,6 +219,7 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle) | |||
219 | { | 219 | { |
220 | struct vcpu_guest_context *ctxt; | 220 | struct vcpu_guest_context *ctxt; |
221 | struct desc_struct *gdt; | 221 | struct desc_struct *gdt; |
222 | unsigned long gdt_mfn; | ||
222 | 223 | ||
223 | if (cpumask_test_and_set_cpu(cpu, xen_cpu_initialized_map)) | 224 | if (cpumask_test_and_set_cpu(cpu, xen_cpu_initialized_map)) |
224 | return 0; | 225 | return 0; |
@@ -248,9 +249,12 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle) | |||
248 | ctxt->ldt_ents = 0; | 249 | ctxt->ldt_ents = 0; |
249 | 250 | ||
250 | BUG_ON((unsigned long)gdt & ~PAGE_MASK); | 251 | BUG_ON((unsigned long)gdt & ~PAGE_MASK); |
252 | |||
253 | gdt_mfn = arbitrary_virt_to_mfn(gdt); | ||
251 | make_lowmem_page_readonly(gdt); | 254 | make_lowmem_page_readonly(gdt); |
255 | make_lowmem_page_readonly(mfn_to_virt(gdt_mfn)); | ||
252 | 256 | ||
253 | ctxt->gdt_frames[0] = virt_to_mfn(gdt); | 257 | ctxt->gdt_frames[0] = gdt_mfn; |
254 | ctxt->gdt_ents = GDT_ENTRIES; | 258 | ctxt->gdt_ents = GDT_ENTRIES; |
255 | 259 | ||
256 | ctxt->user_regs.cs = __KERNEL_CS; | 260 | ctxt->user_regs.cs = __KERNEL_CS; |
diff --git a/block/blktrace.c b/block/blktrace.c index 7cf9d1ff45a0..028120a0965a 100644 --- a/block/blktrace.c +++ b/block/blktrace.c | |||
@@ -363,7 +363,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev, | |||
363 | if (!bt->sequence) | 363 | if (!bt->sequence) |
364 | goto err; | 364 | goto err; |
365 | 365 | ||
366 | bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG); | 366 | bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char)); |
367 | if (!bt->msg_data) | 367 | if (!bt->msg_data) |
368 | goto err; | 368 | goto err; |
369 | 369 | ||
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c index 9cc769b587ff..68fd3d292799 100644 --- a/drivers/acpi/processor_perflib.c +++ b/drivers/acpi/processor_perflib.c | |||
@@ -516,12 +516,12 @@ int acpi_processor_preregister_performance( | |||
516 | continue; | 516 | continue; |
517 | } | 517 | } |
518 | 518 | ||
519 | if (!performance || !percpu_ptr(performance, i)) { | 519 | if (!performance || !per_cpu_ptr(performance, i)) { |
520 | retval = -EINVAL; | 520 | retval = -EINVAL; |
521 | continue; | 521 | continue; |
522 | } | 522 | } |
523 | 523 | ||
524 | pr->performance = percpu_ptr(performance, i); | 524 | pr->performance = per_cpu_ptr(performance, i); |
525 | cpumask_set_cpu(i, pr->performance->shared_cpu_map); | 525 | cpumask_set_cpu(i, pr->performance->shared_cpu_map); |
526 | if (acpi_processor_get_psd(pr)) { | 526 | if (acpi_processor_get_psd(pr)) { |
527 | retval = -EINVAL; | 527 | retval = -EINVAL; |
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h index 95837bfb5256..455d83219fae 100644 --- a/include/linux/bootmem.h +++ b/include/linux/bootmem.h | |||
@@ -65,23 +65,20 @@ extern void free_bootmem(unsigned long addr, unsigned long size); | |||
65 | #define BOOTMEM_DEFAULT 0 | 65 | #define BOOTMEM_DEFAULT 0 |
66 | #define BOOTMEM_EXCLUSIVE (1<<0) | 66 | #define BOOTMEM_EXCLUSIVE (1<<0) |
67 | 67 | ||
68 | extern int reserve_bootmem(unsigned long addr, | ||
69 | unsigned long size, | ||
70 | int flags); | ||
68 | extern int reserve_bootmem_node(pg_data_t *pgdat, | 71 | extern int reserve_bootmem_node(pg_data_t *pgdat, |
69 | unsigned long physaddr, | 72 | unsigned long physaddr, |
70 | unsigned long size, | 73 | unsigned long size, |
71 | int flags); | 74 | int flags); |
72 | #ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE | ||
73 | extern int reserve_bootmem(unsigned long addr, unsigned long size, int flags); | ||
74 | #endif | ||
75 | 75 | ||
76 | extern void *__alloc_bootmem_nopanic(unsigned long size, | 76 | extern void *__alloc_bootmem(unsigned long size, |
77 | unsigned long align, | 77 | unsigned long align, |
78 | unsigned long goal); | 78 | unsigned long goal); |
79 | extern void *__alloc_bootmem(unsigned long size, | 79 | extern void *__alloc_bootmem_nopanic(unsigned long size, |
80 | unsigned long align, | 80 | unsigned long align, |
81 | unsigned long goal); | 81 | unsigned long goal); |
82 | extern void *__alloc_bootmem_low(unsigned long size, | ||
83 | unsigned long align, | ||
84 | unsigned long goal); | ||
85 | extern void *__alloc_bootmem_node(pg_data_t *pgdat, | 82 | extern void *__alloc_bootmem_node(pg_data_t *pgdat, |
86 | unsigned long size, | 83 | unsigned long size, |
87 | unsigned long align, | 84 | unsigned long align, |
@@ -90,30 +87,35 @@ extern void *__alloc_bootmem_node_nopanic(pg_data_t *pgdat, | |||
90 | unsigned long size, | 87 | unsigned long size, |
91 | unsigned long align, | 88 | unsigned long align, |
92 | unsigned long goal); | 89 | unsigned long goal); |
90 | extern void *__alloc_bootmem_low(unsigned long size, | ||
91 | unsigned long align, | ||
92 | unsigned long goal); | ||
93 | extern void *__alloc_bootmem_low_node(pg_data_t *pgdat, | 93 | extern void *__alloc_bootmem_low_node(pg_data_t *pgdat, |
94 | unsigned long size, | 94 | unsigned long size, |
95 | unsigned long align, | 95 | unsigned long align, |
96 | unsigned long goal); | 96 | unsigned long goal); |
97 | #ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE | 97 | |
98 | #define alloc_bootmem(x) \ | 98 | #define alloc_bootmem(x) \ |
99 | __alloc_bootmem(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) | 99 | __alloc_bootmem(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) |
100 | #define alloc_bootmem_nopanic(x) \ | 100 | #define alloc_bootmem_nopanic(x) \ |
101 | __alloc_bootmem_nopanic(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) | 101 | __alloc_bootmem_nopanic(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) |
102 | #define alloc_bootmem_low(x) \ | ||
103 | __alloc_bootmem_low(x, SMP_CACHE_BYTES, 0) | ||
104 | #define alloc_bootmem_pages(x) \ | 102 | #define alloc_bootmem_pages(x) \ |
105 | __alloc_bootmem(x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) | 103 | __alloc_bootmem(x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) |
106 | #define alloc_bootmem_pages_nopanic(x) \ | 104 | #define alloc_bootmem_pages_nopanic(x) \ |
107 | __alloc_bootmem_nopanic(x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) | 105 | __alloc_bootmem_nopanic(x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) |
108 | #define alloc_bootmem_low_pages(x) \ | ||
109 | __alloc_bootmem_low(x, PAGE_SIZE, 0) | ||
110 | #define alloc_bootmem_node(pgdat, x) \ | 106 | #define alloc_bootmem_node(pgdat, x) \ |
111 | __alloc_bootmem_node(pgdat, x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) | 107 | __alloc_bootmem_node(pgdat, x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) |
112 | #define alloc_bootmem_pages_node(pgdat, x) \ | 108 | #define alloc_bootmem_pages_node(pgdat, x) \ |
113 | __alloc_bootmem_node(pgdat, x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) | 109 | __alloc_bootmem_node(pgdat, x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) |
110 | #define alloc_bootmem_pages_node_nopanic(pgdat, x) \ | ||
111 | __alloc_bootmem_node_nopanic(pgdat, x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) | ||
112 | |||
113 | #define alloc_bootmem_low(x) \ | ||
114 | __alloc_bootmem_low(x, SMP_CACHE_BYTES, 0) | ||
115 | #define alloc_bootmem_low_pages(x) \ | ||
116 | __alloc_bootmem_low(x, PAGE_SIZE, 0) | ||
114 | #define alloc_bootmem_low_pages_node(pgdat, x) \ | 117 | #define alloc_bootmem_low_pages_node(pgdat, x) \ |
115 | __alloc_bootmem_low_node(pgdat, x, PAGE_SIZE, 0) | 118 | __alloc_bootmem_low_node(pgdat, x, PAGE_SIZE, 0) |
116 | #endif /* !CONFIG_HAVE_ARCH_BOOTMEM_NODE */ | ||
117 | 119 | ||
118 | extern int reserve_bootmem_generic(unsigned long addr, unsigned long size, | 120 | extern int reserve_bootmem_generic(unsigned long addr, unsigned long size, |
119 | int flags); | 121 | int flags); |
diff --git a/include/linux/percpu.h b/include/linux/percpu.h index 3577ffd90d45..545b068bcb70 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h | |||
@@ -76,52 +76,98 @@ | |||
76 | 76 | ||
77 | #ifdef CONFIG_SMP | 77 | #ifdef CONFIG_SMP |
78 | 78 | ||
79 | #ifdef CONFIG_HAVE_DYNAMIC_PER_CPU_AREA | ||
80 | |||
81 | /* minimum unit size, also is the maximum supported allocation size */ | ||
82 | #define PCPU_MIN_UNIT_SIZE (16UL << PAGE_SHIFT) | ||
83 | |||
84 | /* | ||
85 | * PERCPU_DYNAMIC_RESERVE indicates the amount of free area to piggy | ||
86 | * back on the first chunk if arch is manually allocating and mapping | ||
87 | * it for faster access (as a part of large page mapping for example). | ||
88 | * Note that dynamic percpu allocator covers both static and dynamic | ||
89 | * areas, so these values are bigger than PERCPU_MODULE_RESERVE. | ||
90 | * | ||
91 | * On typical configuration with modules, the following values leave | ||
92 | * about 8k of free space on the first chunk after boot on both x86_32 | ||
93 | * and 64 when module support is enabled. When module support is | ||
94 | * disabled, it's much tighter. | ||
95 | */ | ||
96 | #ifndef PERCPU_DYNAMIC_RESERVE | ||
97 | # if BITS_PER_LONG > 32 | ||
98 | # ifdef CONFIG_MODULES | ||
99 | # define PERCPU_DYNAMIC_RESERVE (6 << PAGE_SHIFT) | ||
100 | # else | ||
101 | # define PERCPU_DYNAMIC_RESERVE (4 << PAGE_SHIFT) | ||
102 | # endif | ||
103 | # else | ||
104 | # ifdef CONFIG_MODULES | ||
105 | # define PERCPU_DYNAMIC_RESERVE (4 << PAGE_SHIFT) | ||
106 | # else | ||
107 | # define PERCPU_DYNAMIC_RESERVE (2 << PAGE_SHIFT) | ||
108 | # endif | ||
109 | # endif | ||
110 | #endif /* PERCPU_DYNAMIC_RESERVE */ | ||
111 | |||
112 | extern void *pcpu_base_addr; | ||
113 | |||
114 | typedef struct page * (*pcpu_get_page_fn_t)(unsigned int cpu, int pageno); | ||
115 | typedef void (*pcpu_populate_pte_fn_t)(unsigned long addr); | ||
116 | |||
117 | extern size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn, | ||
118 | size_t static_size, size_t unit_size, | ||
119 | size_t free_size, void *base_addr, | ||
120 | pcpu_populate_pte_fn_t populate_pte_fn); | ||
121 | |||
122 | /* | ||
123 | * Use this to get to a cpu's version of the per-cpu object | ||
124 | * dynamically allocated. Non-atomic access to the current CPU's | ||
125 | * version should probably be combined with get_cpu()/put_cpu(). | ||
126 | */ | ||
127 | #define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu))) | ||
128 | |||
129 | #else /* CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */ | ||
130 | |||
79 | struct percpu_data { | 131 | struct percpu_data { |
80 | void *ptrs[1]; | 132 | void *ptrs[1]; |
81 | }; | 133 | }; |
82 | 134 | ||
83 | #define __percpu_disguise(pdata) (struct percpu_data *)~(unsigned long)(pdata) | 135 | #define __percpu_disguise(pdata) (struct percpu_data *)~(unsigned long)(pdata) |
84 | /* | 136 | |
85 | * Use this to get to a cpu's version of the per-cpu object dynamically | 137 | #define per_cpu_ptr(ptr, cpu) \ |
86 | * allocated. Non-atomic access to the current CPU's version should | 138 | ({ \ |
87 | * probably be combined with get_cpu()/put_cpu(). | 139 | struct percpu_data *__p = __percpu_disguise(ptr); \ |
88 | */ | 140 | (__typeof__(ptr))__p->ptrs[(cpu)]; \ |
89 | #define percpu_ptr(ptr, cpu) \ | ||
90 | ({ \ | ||
91 | struct percpu_data *__p = __percpu_disguise(ptr); \ | ||
92 | (__typeof__(ptr))__p->ptrs[(cpu)]; \ | ||
93 | }) | 141 | }) |
94 | 142 | ||
95 | extern void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask); | 143 | #endif /* CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */ |
96 | extern void percpu_free(void *__pdata); | 144 | |
145 | extern void *__alloc_percpu(size_t size, size_t align); | ||
146 | extern void free_percpu(void *__pdata); | ||
97 | 147 | ||
98 | #else /* CONFIG_SMP */ | 148 | #else /* CONFIG_SMP */ |
99 | 149 | ||
100 | #define percpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); }) | 150 | #define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); }) |
101 | 151 | ||
102 | static __always_inline void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask) | 152 | static inline void *__alloc_percpu(size_t size, size_t align) |
103 | { | 153 | { |
104 | return kzalloc(size, gfp); | 154 | /* |
155 | * Can't easily make larger alignment work with kmalloc. WARN | ||
156 | * on it. Larger alignment should only be used for module | ||
157 | * percpu sections on SMP for which this path isn't used. | ||
158 | */ | ||
159 | WARN_ON_ONCE(align > SMP_CACHE_BYTES); | ||
160 | return kzalloc(size, GFP_KERNEL); | ||
105 | } | 161 | } |
106 | 162 | ||
107 | static inline void percpu_free(void *__pdata) | 163 | static inline void free_percpu(void *p) |
108 | { | 164 | { |
109 | kfree(__pdata); | 165 | kfree(p); |
110 | } | 166 | } |
111 | 167 | ||
112 | #endif /* CONFIG_SMP */ | 168 | #endif /* CONFIG_SMP */ |
113 | 169 | ||
114 | #define percpu_alloc_mask(size, gfp, mask) \ | 170 | #define alloc_percpu(type) (type *)__alloc_percpu(sizeof(type), \ |
115 | __percpu_alloc_mask((size), (gfp), &(mask)) | 171 | __alignof__(type)) |
116 | |||
117 | #define percpu_alloc(size, gfp) percpu_alloc_mask((size), (gfp), cpu_online_map) | ||
118 | |||
119 | /* (legacy) interface for use without CPU hotplug handling */ | ||
120 | |||
121 | #define __alloc_percpu(size) percpu_alloc_mask((size), GFP_KERNEL, \ | ||
122 | cpu_possible_map) | ||
123 | #define alloc_percpu(type) (type *)__alloc_percpu(sizeof(type)) | ||
124 | #define free_percpu(ptr) percpu_free((ptr)) | ||
125 | #define per_cpu_ptr(ptr, cpu) percpu_ptr((ptr), (cpu)) | ||
126 | 172 | ||
127 | #endif /* __LINUX_PERCPU_H */ | 173 | #endif /* __LINUX_PERCPU_H */ |
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index 9c0890c7a06a..a43ebec3a7b9 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h | |||
@@ -95,6 +95,9 @@ extern struct vm_struct *remove_vm_area(const void *addr); | |||
95 | 95 | ||
96 | extern int map_vm_area(struct vm_struct *area, pgprot_t prot, | 96 | extern int map_vm_area(struct vm_struct *area, pgprot_t prot, |
97 | struct page ***pages); | 97 | struct page ***pages); |
98 | extern int map_kernel_range_noflush(unsigned long start, unsigned long size, | ||
99 | pgprot_t prot, struct page **pages); | ||
100 | extern void unmap_kernel_range_noflush(unsigned long addr, unsigned long size); | ||
98 | extern void unmap_kernel_range(unsigned long addr, unsigned long size); | 101 | extern void unmap_kernel_range(unsigned long addr, unsigned long size); |
99 | 102 | ||
100 | /* Allocate/destroy a 'vmalloc' VM area. */ | 103 | /* Allocate/destroy a 'vmalloc' VM area. */ |
@@ -110,5 +113,6 @@ extern long vwrite(char *buf, char *addr, unsigned long count); | |||
110 | */ | 113 | */ |
111 | extern rwlock_t vmlist_lock; | 114 | extern rwlock_t vmlist_lock; |
112 | extern struct vm_struct *vmlist; | 115 | extern struct vm_struct *vmlist; |
116 | extern __init void vm_area_register_early(struct vm_struct *vm, size_t align); | ||
113 | 117 | ||
114 | #endif /* _LINUX_VMALLOC_H */ | 118 | #endif /* _LINUX_VMALLOC_H */ |
diff --git a/kernel/module.c b/kernel/module.c index ba22484a987e..1f0657ae555b 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
@@ -51,6 +51,7 @@ | |||
51 | #include <linux/tracepoint.h> | 51 | #include <linux/tracepoint.h> |
52 | #include <linux/ftrace.h> | 52 | #include <linux/ftrace.h> |
53 | #include <linux/async.h> | 53 | #include <linux/async.h> |
54 | #include <linux/percpu.h> | ||
54 | 55 | ||
55 | #if 0 | 56 | #if 0 |
56 | #define DEBUGP printk | 57 | #define DEBUGP printk |
@@ -366,6 +367,34 @@ static struct module *find_module(const char *name) | |||
366 | } | 367 | } |
367 | 368 | ||
368 | #ifdef CONFIG_SMP | 369 | #ifdef CONFIG_SMP |
370 | |||
371 | #ifdef CONFIG_HAVE_DYNAMIC_PER_CPU_AREA | ||
372 | |||
373 | static void *percpu_modalloc(unsigned long size, unsigned long align, | ||
374 | const char *name) | ||
375 | { | ||
376 | void *ptr; | ||
377 | |||
378 | if (align > PAGE_SIZE) { | ||
379 | printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n", | ||
380 | name, align, PAGE_SIZE); | ||
381 | align = PAGE_SIZE; | ||
382 | } | ||
383 | |||
384 | ptr = __alloc_percpu(size, align); | ||
385 | if (!ptr) | ||
386 | printk(KERN_WARNING | ||
387 | "Could not allocate %lu bytes percpu data\n", size); | ||
388 | return ptr; | ||
389 | } | ||
390 | |||
391 | static void percpu_modfree(void *freeme) | ||
392 | { | ||
393 | free_percpu(freeme); | ||
394 | } | ||
395 | |||
396 | #else /* ... !CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */ | ||
397 | |||
369 | /* Number of blocks used and allocated. */ | 398 | /* Number of blocks used and allocated. */ |
370 | static unsigned int pcpu_num_used, pcpu_num_allocated; | 399 | static unsigned int pcpu_num_used, pcpu_num_allocated; |
371 | /* Size of each block. -ve means used. */ | 400 | /* Size of each block. -ve means used. */ |
@@ -480,21 +509,6 @@ static void percpu_modfree(void *freeme) | |||
480 | } | 509 | } |
481 | } | 510 | } |
482 | 511 | ||
483 | static unsigned int find_pcpusec(Elf_Ehdr *hdr, | ||
484 | Elf_Shdr *sechdrs, | ||
485 | const char *secstrings) | ||
486 | { | ||
487 | return find_sec(hdr, sechdrs, secstrings, ".data.percpu"); | ||
488 | } | ||
489 | |||
490 | static void percpu_modcopy(void *pcpudest, const void *from, unsigned long size) | ||
491 | { | ||
492 | int cpu; | ||
493 | |||
494 | for_each_possible_cpu(cpu) | ||
495 | memcpy(pcpudest + per_cpu_offset(cpu), from, size); | ||
496 | } | ||
497 | |||
498 | static int percpu_modinit(void) | 512 | static int percpu_modinit(void) |
499 | { | 513 | { |
500 | pcpu_num_used = 2; | 514 | pcpu_num_used = 2; |
@@ -513,7 +527,26 @@ static int percpu_modinit(void) | |||
513 | return 0; | 527 | return 0; |
514 | } | 528 | } |
515 | __initcall(percpu_modinit); | 529 | __initcall(percpu_modinit); |
530 | |||
531 | #endif /* CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */ | ||
532 | |||
533 | static unsigned int find_pcpusec(Elf_Ehdr *hdr, | ||
534 | Elf_Shdr *sechdrs, | ||
535 | const char *secstrings) | ||
536 | { | ||
537 | return find_sec(hdr, sechdrs, secstrings, ".data.percpu"); | ||
538 | } | ||
539 | |||
540 | static void percpu_modcopy(void *pcpudest, const void *from, unsigned long size) | ||
541 | { | ||
542 | int cpu; | ||
543 | |||
544 | for_each_possible_cpu(cpu) | ||
545 | memcpy(pcpudest + per_cpu_offset(cpu), from, size); | ||
546 | } | ||
547 | |||
516 | #else /* ... !CONFIG_SMP */ | 548 | #else /* ... !CONFIG_SMP */ |
549 | |||
517 | static inline void *percpu_modalloc(unsigned long size, unsigned long align, | 550 | static inline void *percpu_modalloc(unsigned long size, unsigned long align, |
518 | const char *name) | 551 | const char *name) |
519 | { | 552 | { |
@@ -535,6 +568,7 @@ static inline void percpu_modcopy(void *pcpudst, const void *src, | |||
535 | /* pcpusec should be 0, and size of that section should be 0. */ | 568 | /* pcpusec should be 0, and size of that section should be 0. */ |
536 | BUG_ON(size != 0); | 569 | BUG_ON(size != 0); |
537 | } | 570 | } |
571 | |||
538 | #endif /* CONFIG_SMP */ | 572 | #endif /* CONFIG_SMP */ |
539 | 573 | ||
540 | #define MODINFO_ATTR(field) \ | 574 | #define MODINFO_ATTR(field) \ |
diff --git a/kernel/sched.c b/kernel/sched.c index 4070cd34effd..0a76d0b6f215 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -9485,7 +9485,7 @@ cpuacct_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp) | |||
9485 | 9485 | ||
9486 | static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu) | 9486 | static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu) |
9487 | { | 9487 | { |
9488 | u64 *cpuusage = percpu_ptr(ca->cpuusage, cpu); | 9488 | u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu); |
9489 | u64 data; | 9489 | u64 data; |
9490 | 9490 | ||
9491 | #ifndef CONFIG_64BIT | 9491 | #ifndef CONFIG_64BIT |
@@ -9504,7 +9504,7 @@ static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu) | |||
9504 | 9504 | ||
9505 | static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val) | 9505 | static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val) |
9506 | { | 9506 | { |
9507 | u64 *cpuusage = percpu_ptr(ca->cpuusage, cpu); | 9507 | u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu); |
9508 | 9508 | ||
9509 | #ifndef CONFIG_64BIT | 9509 | #ifndef CONFIG_64BIT |
9510 | /* | 9510 | /* |
@@ -9600,7 +9600,7 @@ static void cpuacct_charge(struct task_struct *tsk, u64 cputime) | |||
9600 | ca = task_ca(tsk); | 9600 | ca = task_ca(tsk); |
9601 | 9601 | ||
9602 | for (; ca; ca = ca->parent) { | 9602 | for (; ca; ca = ca->parent) { |
9603 | u64 *cpuusage = percpu_ptr(ca->cpuusage, cpu); | 9603 | u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu); |
9604 | *cpuusage += cputime; | 9604 | *cpuusage += cputime; |
9605 | } | 9605 | } |
9606 | } | 9606 | } |
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c index 0cd415ee62a2..74541ca49536 100644 --- a/kernel/stop_machine.c +++ b/kernel/stop_machine.c | |||
@@ -170,7 +170,7 @@ int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus) | |||
170 | * doesn't hit this CPU until we're ready. */ | 170 | * doesn't hit this CPU until we're ready. */ |
171 | get_cpu(); | 171 | get_cpu(); |
172 | for_each_online_cpu(i) { | 172 | for_each_online_cpu(i) { |
173 | sm_work = percpu_ptr(stop_machine_work, i); | 173 | sm_work = per_cpu_ptr(stop_machine_work, i); |
174 | INIT_WORK(sm_work, stop_cpu); | 174 | INIT_WORK(sm_work, stop_cpu); |
175 | queue_work_on(i, stop_machine_wq, sm_work); | 175 | queue_work_on(i, stop_machine_wq, sm_work); |
176 | } | 176 | } |
diff --git a/mm/Makefile b/mm/Makefile index 72255be57f89..818569b68f46 100644 --- a/mm/Makefile +++ b/mm/Makefile | |||
@@ -30,6 +30,10 @@ obj-$(CONFIG_FAILSLAB) += failslab.o | |||
30 | obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o | 30 | obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o |
31 | obj-$(CONFIG_FS_XIP) += filemap_xip.o | 31 | obj-$(CONFIG_FS_XIP) += filemap_xip.o |
32 | obj-$(CONFIG_MIGRATION) += migrate.o | 32 | obj-$(CONFIG_MIGRATION) += migrate.o |
33 | ifdef CONFIG_HAVE_DYNAMIC_PER_CPU_AREA | ||
34 | obj-$(CONFIG_SMP) += percpu.o | ||
35 | else | ||
33 | obj-$(CONFIG_SMP) += allocpercpu.o | 36 | obj-$(CONFIG_SMP) += allocpercpu.o |
37 | endif | ||
34 | obj-$(CONFIG_QUICKLIST) += quicklist.o | 38 | obj-$(CONFIG_QUICKLIST) += quicklist.o |
35 | obj-$(CONFIG_CGROUP_MEM_RES_CTLR) += memcontrol.o page_cgroup.o | 39 | obj-$(CONFIG_CGROUP_MEM_RES_CTLR) += memcontrol.o page_cgroup.o |
diff --git a/mm/allocpercpu.c b/mm/allocpercpu.c index 4297bc41bfd2..3653c570232b 100644 --- a/mm/allocpercpu.c +++ b/mm/allocpercpu.c | |||
@@ -99,45 +99,51 @@ static int __percpu_populate_mask(void *__pdata, size_t size, gfp_t gfp, | |||
99 | __percpu_populate_mask((__pdata), (size), (gfp), &(mask)) | 99 | __percpu_populate_mask((__pdata), (size), (gfp), &(mask)) |
100 | 100 | ||
101 | /** | 101 | /** |
102 | * percpu_alloc_mask - initial setup of per-cpu data | 102 | * alloc_percpu - initial setup of per-cpu data |
103 | * @size: size of per-cpu object | 103 | * @size: size of per-cpu object |
104 | * @gfp: may sleep or not etc. | 104 | * @align: alignment |
105 | * @mask: populate per-data for cpu's selected through mask bits | ||
106 | * | 105 | * |
107 | * Populating per-cpu data for all online cpu's would be a typical use case, | 106 | * Allocate dynamic percpu area. Percpu objects are populated with |
108 | * which is simplified by the percpu_alloc() wrapper. | 107 | * zeroed buffers. |
109 | * Per-cpu objects are populated with zeroed buffers. | ||
110 | */ | 108 | */ |
111 | void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask) | 109 | void *__alloc_percpu(size_t size, size_t align) |
112 | { | 110 | { |
113 | /* | 111 | /* |
114 | * We allocate whole cache lines to avoid false sharing | 112 | * We allocate whole cache lines to avoid false sharing |
115 | */ | 113 | */ |
116 | size_t sz = roundup(nr_cpu_ids * sizeof(void *), cache_line_size()); | 114 | size_t sz = roundup(nr_cpu_ids * sizeof(void *), cache_line_size()); |
117 | void *pdata = kzalloc(sz, gfp); | 115 | void *pdata = kzalloc(sz, GFP_KERNEL); |
118 | void *__pdata = __percpu_disguise(pdata); | 116 | void *__pdata = __percpu_disguise(pdata); |
119 | 117 | ||
118 | /* | ||
119 | * Can't easily make larger alignment work with kmalloc. WARN | ||
120 | * on it. Larger alignment should only be used for module | ||
121 | * percpu sections on SMP for which this path isn't used. | ||
122 | */ | ||
123 | WARN_ON_ONCE(align > __alignof__(unsigned long long)); | ||
124 | |||
120 | if (unlikely(!pdata)) | 125 | if (unlikely(!pdata)) |
121 | return NULL; | 126 | return NULL; |
122 | if (likely(!__percpu_populate_mask(__pdata, size, gfp, mask))) | 127 | if (likely(!__percpu_populate_mask(__pdata, size, GFP_KERNEL, |
128 | &cpu_possible_map))) | ||
123 | return __pdata; | 129 | return __pdata; |
124 | kfree(pdata); | 130 | kfree(pdata); |
125 | return NULL; | 131 | return NULL; |
126 | } | 132 | } |
127 | EXPORT_SYMBOL_GPL(__percpu_alloc_mask); | 133 | EXPORT_SYMBOL_GPL(__alloc_percpu); |
128 | 134 | ||
129 | /** | 135 | /** |
130 | * percpu_free - final cleanup of per-cpu data | 136 | * free_percpu - final cleanup of per-cpu data |
131 | * @__pdata: object to clean up | 137 | * @__pdata: object to clean up |
132 | * | 138 | * |
133 | * We simply clean up any per-cpu object left. No need for the client to | 139 | * We simply clean up any per-cpu object left. No need for the client to |
134 | * track and specify through a bis mask which per-cpu objects are to free. | 140 | * track and specify through a bis mask which per-cpu objects are to free. |
135 | */ | 141 | */ |
136 | void percpu_free(void *__pdata) | 142 | void free_percpu(void *__pdata) |
137 | { | 143 | { |
138 | if (unlikely(!__pdata)) | 144 | if (unlikely(!__pdata)) |
139 | return; | 145 | return; |
140 | __percpu_depopulate_mask(__pdata, &cpu_possible_map); | 146 | __percpu_depopulate_mask(__pdata, &cpu_possible_map); |
141 | kfree(__percpu_disguise(__pdata)); | 147 | kfree(__percpu_disguise(__pdata)); |
142 | } | 148 | } |
143 | EXPORT_SYMBOL_GPL(percpu_free); | 149 | EXPORT_SYMBOL_GPL(free_percpu); |
diff --git a/mm/bootmem.c b/mm/bootmem.c index 51a0ccf61e0e..daf92713f7de 100644 --- a/mm/bootmem.c +++ b/mm/bootmem.c | |||
@@ -382,7 +382,6 @@ int __init reserve_bootmem_node(pg_data_t *pgdat, unsigned long physaddr, | |||
382 | return mark_bootmem_node(pgdat->bdata, start, end, 1, flags); | 382 | return mark_bootmem_node(pgdat->bdata, start, end, 1, flags); |
383 | } | 383 | } |
384 | 384 | ||
385 | #ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE | ||
386 | /** | 385 | /** |
387 | * reserve_bootmem - mark a page range as usable | 386 | * reserve_bootmem - mark a page range as usable |
388 | * @addr: starting address of the range | 387 | * @addr: starting address of the range |
@@ -403,7 +402,6 @@ int __init reserve_bootmem(unsigned long addr, unsigned long size, | |||
403 | 402 | ||
404 | return mark_bootmem(start, end, 1, flags); | 403 | return mark_bootmem(start, end, 1, flags); |
405 | } | 404 | } |
406 | #endif /* !CONFIG_HAVE_ARCH_BOOTMEM_NODE */ | ||
407 | 405 | ||
408 | static unsigned long align_idx(struct bootmem_data *bdata, unsigned long idx, | 406 | static unsigned long align_idx(struct bootmem_data *bdata, unsigned long idx, |
409 | unsigned long step) | 407 | unsigned long step) |
@@ -429,8 +427,8 @@ static unsigned long align_off(struct bootmem_data *bdata, unsigned long off, | |||
429 | } | 427 | } |
430 | 428 | ||
431 | static void * __init alloc_bootmem_core(struct bootmem_data *bdata, | 429 | static void * __init alloc_bootmem_core(struct bootmem_data *bdata, |
432 | unsigned long size, unsigned long align, | 430 | unsigned long size, unsigned long align, |
433 | unsigned long goal, unsigned long limit) | 431 | unsigned long goal, unsigned long limit) |
434 | { | 432 | { |
435 | unsigned long fallback = 0; | 433 | unsigned long fallback = 0; |
436 | unsigned long min, max, start, sidx, midx, step; | 434 | unsigned long min, max, start, sidx, midx, step; |
@@ -530,17 +528,34 @@ find_block: | |||
530 | return NULL; | 528 | return NULL; |
531 | } | 529 | } |
532 | 530 | ||
531 | static void * __init alloc_arch_preferred_bootmem(bootmem_data_t *bdata, | ||
532 | unsigned long size, unsigned long align, | ||
533 | unsigned long goal, unsigned long limit) | ||
534 | { | ||
535 | #ifdef CONFIG_HAVE_ARCH_BOOTMEM | ||
536 | bootmem_data_t *p_bdata; | ||
537 | |||
538 | p_bdata = bootmem_arch_preferred_node(bdata, size, align, goal, limit); | ||
539 | if (p_bdata) | ||
540 | return alloc_bootmem_core(p_bdata, size, align, goal, limit); | ||
541 | #endif | ||
542 | return NULL; | ||
543 | } | ||
544 | |||
533 | static void * __init ___alloc_bootmem_nopanic(unsigned long size, | 545 | static void * __init ___alloc_bootmem_nopanic(unsigned long size, |
534 | unsigned long align, | 546 | unsigned long align, |
535 | unsigned long goal, | 547 | unsigned long goal, |
536 | unsigned long limit) | 548 | unsigned long limit) |
537 | { | 549 | { |
538 | bootmem_data_t *bdata; | 550 | bootmem_data_t *bdata; |
551 | void *region; | ||
539 | 552 | ||
540 | restart: | 553 | restart: |
541 | list_for_each_entry(bdata, &bdata_list, list) { | 554 | region = alloc_arch_preferred_bootmem(NULL, size, align, goal, limit); |
542 | void *region; | 555 | if (region) |
556 | return region; | ||
543 | 557 | ||
558 | list_for_each_entry(bdata, &bdata_list, list) { | ||
544 | if (goal && bdata->node_low_pfn <= PFN_DOWN(goal)) | 559 | if (goal && bdata->node_low_pfn <= PFN_DOWN(goal)) |
545 | continue; | 560 | continue; |
546 | if (limit && bdata->node_min_pfn >= PFN_DOWN(limit)) | 561 | if (limit && bdata->node_min_pfn >= PFN_DOWN(limit)) |
@@ -618,6 +633,10 @@ static void * __init ___alloc_bootmem_node(bootmem_data_t *bdata, | |||
618 | { | 633 | { |
619 | void *ptr; | 634 | void *ptr; |
620 | 635 | ||
636 | ptr = alloc_arch_preferred_bootmem(bdata, size, align, goal, limit); | ||
637 | if (ptr) | ||
638 | return ptr; | ||
639 | |||
621 | ptr = alloc_bootmem_core(bdata, size, align, goal, limit); | 640 | ptr = alloc_bootmem_core(bdata, size, align, goal, limit); |
622 | if (ptr) | 641 | if (ptr) |
623 | return ptr; | 642 | return ptr; |
@@ -674,6 +693,10 @@ void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size, | |||
674 | { | 693 | { |
675 | void *ptr; | 694 | void *ptr; |
676 | 695 | ||
696 | ptr = alloc_arch_preferred_bootmem(pgdat->bdata, size, align, goal, 0); | ||
697 | if (ptr) | ||
698 | return ptr; | ||
699 | |||
677 | ptr = alloc_bootmem_core(pgdat->bdata, size, align, goal, 0); | 700 | ptr = alloc_bootmem_core(pgdat->bdata, size, align, goal, 0); |
678 | if (ptr) | 701 | if (ptr) |
679 | return ptr; | 702 | return ptr; |
diff --git a/mm/percpu.c b/mm/percpu.c new file mode 100644 index 000000000000..3d0f5456827c --- /dev/null +++ b/mm/percpu.c | |||
@@ -0,0 +1,979 @@ | |||
1 | /* | ||
2 | * linux/mm/percpu.c - percpu memory allocator | ||
3 | * | ||
4 | * Copyright (C) 2009 SUSE Linux Products GmbH | ||
5 | * Copyright (C) 2009 Tejun Heo <tj@kernel.org> | ||
6 | * | ||
7 | * This file is released under the GPLv2. | ||
8 | * | ||
9 | * This is percpu allocator which can handle both static and dynamic | ||
10 | * areas. Percpu areas are allocated in chunks in vmalloc area. Each | ||
11 | * chunk is consisted of num_possible_cpus() units and the first chunk | ||
12 | * is used for static percpu variables in the kernel image (special | ||
13 | * boot time alloc/init handling necessary as these areas need to be | ||
14 | * brought up before allocation services are running). Unit grows as | ||
15 | * necessary and all units grow or shrink in unison. When a chunk is | ||
16 | * filled up, another chunk is allocated. ie. in vmalloc area | ||
17 | * | ||
18 | * c0 c1 c2 | ||
19 | * ------------------- ------------------- ------------ | ||
20 | * | u0 | u1 | u2 | u3 | | u0 | u1 | u2 | u3 | | u0 | u1 | u | ||
21 | * ------------------- ...... ------------------- .... ------------ | ||
22 | * | ||
23 | * Allocation is done in offset-size areas of single unit space. Ie, | ||
24 | * an area of 512 bytes at 6k in c1 occupies 512 bytes at 6k of c1:u0, | ||
25 | * c1:u1, c1:u2 and c1:u3. Percpu access can be done by configuring | ||
26 | * percpu base registers UNIT_SIZE apart. | ||
27 | * | ||
28 | * There are usually many small percpu allocations many of them as | ||
29 | * small as 4 bytes. The allocator organizes chunks into lists | ||
30 | * according to free size and tries to allocate from the fullest one. | ||
31 | * Each chunk keeps the maximum contiguous area size hint which is | ||
32 | * guaranteed to be eqaul to or larger than the maximum contiguous | ||
33 | * area in the chunk. This helps the allocator not to iterate the | ||
34 | * chunk maps unnecessarily. | ||
35 | * | ||
36 | * Allocation state in each chunk is kept using an array of integers | ||
37 | * on chunk->map. A positive value in the map represents a free | ||
38 | * region and negative allocated. Allocation inside a chunk is done | ||
39 | * by scanning this map sequentially and serving the first matching | ||
40 | * entry. This is mostly copied from the percpu_modalloc() allocator. | ||
41 | * Chunks are also linked into a rb tree to ease address to chunk | ||
42 | * mapping during free. | ||
43 | * | ||
44 | * To use this allocator, arch code should do the followings. | ||
45 | * | ||
46 | * - define CONFIG_HAVE_DYNAMIC_PER_CPU_AREA | ||
47 | * | ||
48 | * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate | ||
49 | * regular address to percpu pointer and back | ||
50 | * | ||
51 | * - use pcpu_setup_first_chunk() during percpu area initialization to | ||
52 | * setup the first chunk containing the kernel static percpu area | ||
53 | */ | ||
54 | |||
55 | #include <linux/bitmap.h> | ||
56 | #include <linux/bootmem.h> | ||
57 | #include <linux/list.h> | ||
58 | #include <linux/mm.h> | ||
59 | #include <linux/module.h> | ||
60 | #include <linux/mutex.h> | ||
61 | #include <linux/percpu.h> | ||
62 | #include <linux/pfn.h> | ||
63 | #include <linux/rbtree.h> | ||
64 | #include <linux/slab.h> | ||
65 | #include <linux/vmalloc.h> | ||
66 | |||
67 | #include <asm/cacheflush.h> | ||
68 | #include <asm/tlbflush.h> | ||
69 | |||
70 | #define PCPU_SLOT_BASE_SHIFT 5 /* 1-31 shares the same slot */ | ||
71 | #define PCPU_DFL_MAP_ALLOC 16 /* start a map with 16 ents */ | ||
72 | |||
73 | struct pcpu_chunk { | ||
74 | struct list_head list; /* linked to pcpu_slot lists */ | ||
75 | struct rb_node rb_node; /* key is chunk->vm->addr */ | ||
76 | int free_size; /* free bytes in the chunk */ | ||
77 | int contig_hint; /* max contiguous size hint */ | ||
78 | struct vm_struct *vm; /* mapped vmalloc region */ | ||
79 | int map_used; /* # of map entries used */ | ||
80 | int map_alloc; /* # of map entries allocated */ | ||
81 | int *map; /* allocation map */ | ||
82 | bool immutable; /* no [de]population allowed */ | ||
83 | struct page *page[]; /* #cpus * UNIT_PAGES */ | ||
84 | }; | ||
85 | |||
86 | static int pcpu_unit_pages __read_mostly; | ||
87 | static int pcpu_unit_size __read_mostly; | ||
88 | static int pcpu_chunk_size __read_mostly; | ||
89 | static int pcpu_nr_slots __read_mostly; | ||
90 | static size_t pcpu_chunk_struct_size __read_mostly; | ||
91 | |||
92 | /* the address of the first chunk which starts with the kernel static area */ | ||
93 | void *pcpu_base_addr __read_mostly; | ||
94 | EXPORT_SYMBOL_GPL(pcpu_base_addr); | ||
95 | |||
96 | /* the size of kernel static area */ | ||
97 | static int pcpu_static_size __read_mostly; | ||
98 | |||
99 | /* | ||
100 | * One mutex to rule them all. | ||
101 | * | ||
102 | * The following mutex is grabbed in the outermost public alloc/free | ||
103 | * interface functions and released only when the operation is | ||
104 | * complete. As such, every function in this file other than the | ||
105 | * outermost functions are called under pcpu_mutex. | ||
106 | * | ||
107 | * It can easily be switched to use spinlock such that only the area | ||
108 | * allocation and page population commit are protected with it doing | ||
109 | * actual [de]allocation without holding any lock. However, given | ||
110 | * what this allocator does, I think it's better to let them run | ||
111 | * sequentially. | ||
112 | */ | ||
113 | static DEFINE_MUTEX(pcpu_mutex); | ||
114 | |||
115 | static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */ | ||
116 | static struct rb_root pcpu_addr_root = RB_ROOT; /* chunks by address */ | ||
117 | |||
118 | static int __pcpu_size_to_slot(int size) | ||
119 | { | ||
120 | int highbit = fls(size); /* size is in bytes */ | ||
121 | return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1); | ||
122 | } | ||
123 | |||
124 | static int pcpu_size_to_slot(int size) | ||
125 | { | ||
126 | if (size == pcpu_unit_size) | ||
127 | return pcpu_nr_slots - 1; | ||
128 | return __pcpu_size_to_slot(size); | ||
129 | } | ||
130 | |||
131 | static int pcpu_chunk_slot(const struct pcpu_chunk *chunk) | ||
132 | { | ||
133 | if (chunk->free_size < sizeof(int) || chunk->contig_hint < sizeof(int)) | ||
134 | return 0; | ||
135 | |||
136 | return pcpu_size_to_slot(chunk->free_size); | ||
137 | } | ||
138 | |||
139 | static int pcpu_page_idx(unsigned int cpu, int page_idx) | ||
140 | { | ||
141 | return cpu * pcpu_unit_pages + page_idx; | ||
142 | } | ||
143 | |||
144 | static struct page **pcpu_chunk_pagep(struct pcpu_chunk *chunk, | ||
145 | unsigned int cpu, int page_idx) | ||
146 | { | ||
147 | return &chunk->page[pcpu_page_idx(cpu, page_idx)]; | ||
148 | } | ||
149 | |||
150 | static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk, | ||
151 | unsigned int cpu, int page_idx) | ||
152 | { | ||
153 | return (unsigned long)chunk->vm->addr + | ||
154 | (pcpu_page_idx(cpu, page_idx) << PAGE_SHIFT); | ||
155 | } | ||
156 | |||
157 | static bool pcpu_chunk_page_occupied(struct pcpu_chunk *chunk, | ||
158 | int page_idx) | ||
159 | { | ||
160 | return *pcpu_chunk_pagep(chunk, 0, page_idx) != NULL; | ||
161 | } | ||
162 | |||
163 | /** | ||
164 | * pcpu_realloc - versatile realloc | ||
165 | * @p: the current pointer (can be NULL for new allocations) | ||
166 | * @size: the current size in bytes (can be 0 for new allocations) | ||
167 | * @new_size: the wanted new size in bytes (can be 0 for free) | ||
168 | * | ||
169 | * More robust realloc which can be used to allocate, resize or free a | ||
170 | * memory area of arbitrary size. If the needed size goes over | ||
171 | * PAGE_SIZE, kernel VM is used. | ||
172 | * | ||
173 | * RETURNS: | ||
174 | * The new pointer on success, NULL on failure. | ||
175 | */ | ||
176 | static void *pcpu_realloc(void *p, size_t size, size_t new_size) | ||
177 | { | ||
178 | void *new; | ||
179 | |||
180 | if (new_size <= PAGE_SIZE) | ||
181 | new = kmalloc(new_size, GFP_KERNEL); | ||
182 | else | ||
183 | new = vmalloc(new_size); | ||
184 | if (new_size && !new) | ||
185 | return NULL; | ||
186 | |||
187 | memcpy(new, p, min(size, new_size)); | ||
188 | if (new_size > size) | ||
189 | memset(new + size, 0, new_size - size); | ||
190 | |||
191 | if (size <= PAGE_SIZE) | ||
192 | kfree(p); | ||
193 | else | ||
194 | vfree(p); | ||
195 | |||
196 | return new; | ||
197 | } | ||
198 | |||
199 | /** | ||
200 | * pcpu_chunk_relocate - put chunk in the appropriate chunk slot | ||
201 | * @chunk: chunk of interest | ||
202 | * @oslot: the previous slot it was on | ||
203 | * | ||
204 | * This function is called after an allocation or free changed @chunk. | ||
205 | * New slot according to the changed state is determined and @chunk is | ||
206 | * moved to the slot. | ||
207 | */ | ||
208 | static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot) | ||
209 | { | ||
210 | int nslot = pcpu_chunk_slot(chunk); | ||
211 | |||
212 | if (oslot != nslot) { | ||
213 | if (oslot < nslot) | ||
214 | list_move(&chunk->list, &pcpu_slot[nslot]); | ||
215 | else | ||
216 | list_move_tail(&chunk->list, &pcpu_slot[nslot]); | ||
217 | } | ||
218 | } | ||
219 | |||
220 | static struct rb_node **pcpu_chunk_rb_search(void *addr, | ||
221 | struct rb_node **parentp) | ||
222 | { | ||
223 | struct rb_node **p = &pcpu_addr_root.rb_node; | ||
224 | struct rb_node *parent = NULL; | ||
225 | struct pcpu_chunk *chunk; | ||
226 | |||
227 | while (*p) { | ||
228 | parent = *p; | ||
229 | chunk = rb_entry(parent, struct pcpu_chunk, rb_node); | ||
230 | |||
231 | if (addr < chunk->vm->addr) | ||
232 | p = &(*p)->rb_left; | ||
233 | else if (addr > chunk->vm->addr) | ||
234 | p = &(*p)->rb_right; | ||
235 | else | ||
236 | break; | ||
237 | } | ||
238 | |||
239 | if (parentp) | ||
240 | *parentp = parent; | ||
241 | return p; | ||
242 | } | ||
243 | |||
244 | /** | ||
245 | * pcpu_chunk_addr_search - search for chunk containing specified address | ||
246 | * @addr: address to search for | ||
247 | * | ||
248 | * Look for chunk which might contain @addr. More specifically, it | ||
249 | * searchs for the chunk with the highest start address which isn't | ||
250 | * beyond @addr. | ||
251 | * | ||
252 | * RETURNS: | ||
253 | * The address of the found chunk. | ||
254 | */ | ||
255 | static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr) | ||
256 | { | ||
257 | struct rb_node *n, *parent; | ||
258 | struct pcpu_chunk *chunk; | ||
259 | |||
260 | n = *pcpu_chunk_rb_search(addr, &parent); | ||
261 | if (!n) { | ||
262 | /* no exactly matching chunk, the parent is the closest */ | ||
263 | n = parent; | ||
264 | BUG_ON(!n); | ||
265 | } | ||
266 | chunk = rb_entry(n, struct pcpu_chunk, rb_node); | ||
267 | |||
268 | if (addr < chunk->vm->addr) { | ||
269 | /* the parent was the next one, look for the previous one */ | ||
270 | n = rb_prev(n); | ||
271 | BUG_ON(!n); | ||
272 | chunk = rb_entry(n, struct pcpu_chunk, rb_node); | ||
273 | } | ||
274 | |||
275 | return chunk; | ||
276 | } | ||
277 | |||
278 | /** | ||
279 | * pcpu_chunk_addr_insert - insert chunk into address rb tree | ||
280 | * @new: chunk to insert | ||
281 | * | ||
282 | * Insert @new into address rb tree. | ||
283 | */ | ||
284 | static void pcpu_chunk_addr_insert(struct pcpu_chunk *new) | ||
285 | { | ||
286 | struct rb_node **p, *parent; | ||
287 | |||
288 | p = pcpu_chunk_rb_search(new->vm->addr, &parent); | ||
289 | BUG_ON(*p); | ||
290 | rb_link_node(&new->rb_node, parent, p); | ||
291 | rb_insert_color(&new->rb_node, &pcpu_addr_root); | ||
292 | } | ||
293 | |||
294 | /** | ||
295 | * pcpu_split_block - split a map block | ||
296 | * @chunk: chunk of interest | ||
297 | * @i: index of map block to split | ||
298 | * @head: head size in bytes (can be 0) | ||
299 | * @tail: tail size in bytes (can be 0) | ||
300 | * | ||
301 | * Split the @i'th map block into two or three blocks. If @head is | ||
302 | * non-zero, @head bytes block is inserted before block @i moving it | ||
303 | * to @i+1 and reducing its size by @head bytes. | ||
304 | * | ||
305 | * If @tail is non-zero, the target block, which can be @i or @i+1 | ||
306 | * depending on @head, is reduced by @tail bytes and @tail byte block | ||
307 | * is inserted after the target block. | ||
308 | * | ||
309 | * RETURNS: | ||
310 | * 0 on success, -errno on failure. | ||
311 | */ | ||
312 | static int pcpu_split_block(struct pcpu_chunk *chunk, int i, int head, int tail) | ||
313 | { | ||
314 | int nr_extra = !!head + !!tail; | ||
315 | int target = chunk->map_used + nr_extra; | ||
316 | |||
317 | /* reallocation required? */ | ||
318 | if (chunk->map_alloc < target) { | ||
319 | int new_alloc = chunk->map_alloc; | ||
320 | int *new; | ||
321 | |||
322 | while (new_alloc < target) | ||
323 | new_alloc *= 2; | ||
324 | |||
325 | new = pcpu_realloc(chunk->map, | ||
326 | chunk->map_alloc * sizeof(new[0]), | ||
327 | new_alloc * sizeof(new[0])); | ||
328 | if (!new) | ||
329 | return -ENOMEM; | ||
330 | |||
331 | chunk->map_alloc = new_alloc; | ||
332 | chunk->map = new; | ||
333 | } | ||
334 | |||
335 | /* insert a new subblock */ | ||
336 | memmove(&chunk->map[i + nr_extra], &chunk->map[i], | ||
337 | sizeof(chunk->map[0]) * (chunk->map_used - i)); | ||
338 | chunk->map_used += nr_extra; | ||
339 | |||
340 | if (head) { | ||
341 | chunk->map[i + 1] = chunk->map[i] - head; | ||
342 | chunk->map[i++] = head; | ||
343 | } | ||
344 | if (tail) { | ||
345 | chunk->map[i++] -= tail; | ||
346 | chunk->map[i] = tail; | ||
347 | } | ||
348 | return 0; | ||
349 | } | ||
350 | |||
351 | /** | ||
352 | * pcpu_alloc_area - allocate area from a pcpu_chunk | ||
353 | * @chunk: chunk of interest | ||
354 | * @size: wanted size in bytes | ||
355 | * @align: wanted align | ||
356 | * | ||
357 | * Try to allocate @size bytes area aligned at @align from @chunk. | ||
358 | * Note that this function only allocates the offset. It doesn't | ||
359 | * populate or map the area. | ||
360 | * | ||
361 | * RETURNS: | ||
362 | * Allocated offset in @chunk on success, -errno on failure. | ||
363 | */ | ||
364 | static int pcpu_alloc_area(struct pcpu_chunk *chunk, int size, int align) | ||
365 | { | ||
366 | int oslot = pcpu_chunk_slot(chunk); | ||
367 | int max_contig = 0; | ||
368 | int i, off; | ||
369 | |||
370 | /* | ||
371 | * The static chunk initially doesn't have map attached | ||
372 | * because kmalloc wasn't available during init. Give it one. | ||
373 | */ | ||
374 | if (unlikely(!chunk->map)) { | ||
375 | chunk->map = pcpu_realloc(NULL, 0, | ||
376 | PCPU_DFL_MAP_ALLOC * sizeof(chunk->map[0])); | ||
377 | if (!chunk->map) | ||
378 | return -ENOMEM; | ||
379 | |||
380 | chunk->map_alloc = PCPU_DFL_MAP_ALLOC; | ||
381 | chunk->map[chunk->map_used++] = -pcpu_static_size; | ||
382 | if (chunk->free_size) | ||
383 | chunk->map[chunk->map_used++] = chunk->free_size; | ||
384 | } | ||
385 | |||
386 | for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++])) { | ||
387 | bool is_last = i + 1 == chunk->map_used; | ||
388 | int head, tail; | ||
389 | |||
390 | /* extra for alignment requirement */ | ||
391 | head = ALIGN(off, align) - off; | ||
392 | BUG_ON(i == 0 && head != 0); | ||
393 | |||
394 | if (chunk->map[i] < 0) | ||
395 | continue; | ||
396 | if (chunk->map[i] < head + size) { | ||
397 | max_contig = max(chunk->map[i], max_contig); | ||
398 | continue; | ||
399 | } | ||
400 | |||
401 | /* | ||
402 | * If head is small or the previous block is free, | ||
403 | * merge'em. Note that 'small' is defined as smaller | ||
404 | * than sizeof(int), which is very small but isn't too | ||
405 | * uncommon for percpu allocations. | ||
406 | */ | ||
407 | if (head && (head < sizeof(int) || chunk->map[i - 1] > 0)) { | ||
408 | if (chunk->map[i - 1] > 0) | ||
409 | chunk->map[i - 1] += head; | ||
410 | else { | ||
411 | chunk->map[i - 1] -= head; | ||
412 | chunk->free_size -= head; | ||
413 | } | ||
414 | chunk->map[i] -= head; | ||
415 | off += head; | ||
416 | head = 0; | ||
417 | } | ||
418 | |||
419 | /* if tail is small, just keep it around */ | ||
420 | tail = chunk->map[i] - head - size; | ||
421 | if (tail < sizeof(int)) | ||
422 | tail = 0; | ||
423 | |||
424 | /* split if warranted */ | ||
425 | if (head || tail) { | ||
426 | if (pcpu_split_block(chunk, i, head, tail)) | ||
427 | return -ENOMEM; | ||
428 | if (head) { | ||
429 | i++; | ||
430 | off += head; | ||
431 | max_contig = max(chunk->map[i - 1], max_contig); | ||
432 | } | ||
433 | if (tail) | ||
434 | max_contig = max(chunk->map[i + 1], max_contig); | ||
435 | } | ||
436 | |||
437 | /* update hint and mark allocated */ | ||
438 | if (is_last) | ||
439 | chunk->contig_hint = max_contig; /* fully scanned */ | ||
440 | else | ||
441 | chunk->contig_hint = max(chunk->contig_hint, | ||
442 | max_contig); | ||
443 | |||
444 | chunk->free_size -= chunk->map[i]; | ||
445 | chunk->map[i] = -chunk->map[i]; | ||
446 | |||
447 | pcpu_chunk_relocate(chunk, oslot); | ||
448 | return off; | ||
449 | } | ||
450 | |||
451 | chunk->contig_hint = max_contig; /* fully scanned */ | ||
452 | pcpu_chunk_relocate(chunk, oslot); | ||
453 | |||
454 | /* | ||
455 | * Tell the upper layer that this chunk has no area left. | ||
456 | * Note that this is not an error condition but a notification | ||
457 | * to upper layer that it needs to look at other chunks. | ||
458 | * -ENOSPC is chosen as it isn't used in memory subsystem and | ||
459 | * matches the meaning in a way. | ||
460 | */ | ||
461 | return -ENOSPC; | ||
462 | } | ||
463 | |||
464 | /** | ||
465 | * pcpu_free_area - free area to a pcpu_chunk | ||
466 | * @chunk: chunk of interest | ||
467 | * @freeme: offset of area to free | ||
468 | * | ||
469 | * Free area starting from @freeme to @chunk. Note that this function | ||
470 | * only modifies the allocation map. It doesn't depopulate or unmap | ||
471 | * the area. | ||
472 | */ | ||
473 | static void pcpu_free_area(struct pcpu_chunk *chunk, int freeme) | ||
474 | { | ||
475 | int oslot = pcpu_chunk_slot(chunk); | ||
476 | int i, off; | ||
477 | |||
478 | for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++])) | ||
479 | if (off == freeme) | ||
480 | break; | ||
481 | BUG_ON(off != freeme); | ||
482 | BUG_ON(chunk->map[i] > 0); | ||
483 | |||
484 | chunk->map[i] = -chunk->map[i]; | ||
485 | chunk->free_size += chunk->map[i]; | ||
486 | |||
487 | /* merge with previous? */ | ||
488 | if (i > 0 && chunk->map[i - 1] >= 0) { | ||
489 | chunk->map[i - 1] += chunk->map[i]; | ||
490 | chunk->map_used--; | ||
491 | memmove(&chunk->map[i], &chunk->map[i + 1], | ||
492 | (chunk->map_used - i) * sizeof(chunk->map[0])); | ||
493 | i--; | ||
494 | } | ||
495 | /* merge with next? */ | ||
496 | if (i + 1 < chunk->map_used && chunk->map[i + 1] >= 0) { | ||
497 | chunk->map[i] += chunk->map[i + 1]; | ||
498 | chunk->map_used--; | ||
499 | memmove(&chunk->map[i + 1], &chunk->map[i + 2], | ||
500 | (chunk->map_used - (i + 1)) * sizeof(chunk->map[0])); | ||
501 | } | ||
502 | |||
503 | chunk->contig_hint = max(chunk->map[i], chunk->contig_hint); | ||
504 | pcpu_chunk_relocate(chunk, oslot); | ||
505 | } | ||
506 | |||
507 | /** | ||
508 | * pcpu_unmap - unmap pages out of a pcpu_chunk | ||
509 | * @chunk: chunk of interest | ||
510 | * @page_start: page index of the first page to unmap | ||
511 | * @page_end: page index of the last page to unmap + 1 | ||
512 | * @flush: whether to flush cache and tlb or not | ||
513 | * | ||
514 | * For each cpu, unmap pages [@page_start,@page_end) out of @chunk. | ||
515 | * If @flush is true, vcache is flushed before unmapping and tlb | ||
516 | * after. | ||
517 | */ | ||
518 | static void pcpu_unmap(struct pcpu_chunk *chunk, int page_start, int page_end, | ||
519 | bool flush) | ||
520 | { | ||
521 | unsigned int last = num_possible_cpus() - 1; | ||
522 | unsigned int cpu; | ||
523 | |||
524 | /* unmap must not be done on immutable chunk */ | ||
525 | WARN_ON(chunk->immutable); | ||
526 | |||
527 | /* | ||
528 | * Each flushing trial can be very expensive, issue flush on | ||
529 | * the whole region at once rather than doing it for each cpu. | ||
530 | * This could be an overkill but is more scalable. | ||
531 | */ | ||
532 | if (flush) | ||
533 | flush_cache_vunmap(pcpu_chunk_addr(chunk, 0, page_start), | ||
534 | pcpu_chunk_addr(chunk, last, page_end)); | ||
535 | |||
536 | for_each_possible_cpu(cpu) | ||
537 | unmap_kernel_range_noflush( | ||
538 | pcpu_chunk_addr(chunk, cpu, page_start), | ||
539 | (page_end - page_start) << PAGE_SHIFT); | ||
540 | |||
541 | /* ditto as flush_cache_vunmap() */ | ||
542 | if (flush) | ||
543 | flush_tlb_kernel_range(pcpu_chunk_addr(chunk, 0, page_start), | ||
544 | pcpu_chunk_addr(chunk, last, page_end)); | ||
545 | } | ||
546 | |||
547 | /** | ||
548 | * pcpu_depopulate_chunk - depopulate and unmap an area of a pcpu_chunk | ||
549 | * @chunk: chunk to depopulate | ||
550 | * @off: offset to the area to depopulate | ||
551 | * @size: size of the area to depopulate in bytes | ||
552 | * @flush: whether to flush cache and tlb or not | ||
553 | * | ||
554 | * For each cpu, depopulate and unmap pages [@page_start,@page_end) | ||
555 | * from @chunk. If @flush is true, vcache is flushed before unmapping | ||
556 | * and tlb after. | ||
557 | */ | ||
558 | static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size, | ||
559 | bool flush) | ||
560 | { | ||
561 | int page_start = PFN_DOWN(off); | ||
562 | int page_end = PFN_UP(off + size); | ||
563 | int unmap_start = -1; | ||
564 | int uninitialized_var(unmap_end); | ||
565 | unsigned int cpu; | ||
566 | int i; | ||
567 | |||
568 | for (i = page_start; i < page_end; i++) { | ||
569 | for_each_possible_cpu(cpu) { | ||
570 | struct page **pagep = pcpu_chunk_pagep(chunk, cpu, i); | ||
571 | |||
572 | if (!*pagep) | ||
573 | continue; | ||
574 | |||
575 | __free_page(*pagep); | ||
576 | |||
577 | /* | ||
578 | * If it's partial depopulation, it might get | ||
579 | * populated or depopulated again. Mark the | ||
580 | * page gone. | ||
581 | */ | ||
582 | *pagep = NULL; | ||
583 | |||
584 | unmap_start = unmap_start < 0 ? i : unmap_start; | ||
585 | unmap_end = i + 1; | ||
586 | } | ||
587 | } | ||
588 | |||
589 | if (unmap_start >= 0) | ||
590 | pcpu_unmap(chunk, unmap_start, unmap_end, flush); | ||
591 | } | ||
592 | |||
593 | /** | ||
594 | * pcpu_map - map pages into a pcpu_chunk | ||
595 | * @chunk: chunk of interest | ||
596 | * @page_start: page index of the first page to map | ||
597 | * @page_end: page index of the last page to map + 1 | ||
598 | * | ||
599 | * For each cpu, map pages [@page_start,@page_end) into @chunk. | ||
600 | * vcache is flushed afterwards. | ||
601 | */ | ||
602 | static int pcpu_map(struct pcpu_chunk *chunk, int page_start, int page_end) | ||
603 | { | ||
604 | unsigned int last = num_possible_cpus() - 1; | ||
605 | unsigned int cpu; | ||
606 | int err; | ||
607 | |||
608 | /* map must not be done on immutable chunk */ | ||
609 | WARN_ON(chunk->immutable); | ||
610 | |||
611 | for_each_possible_cpu(cpu) { | ||
612 | err = map_kernel_range_noflush( | ||
613 | pcpu_chunk_addr(chunk, cpu, page_start), | ||
614 | (page_end - page_start) << PAGE_SHIFT, | ||
615 | PAGE_KERNEL, | ||
616 | pcpu_chunk_pagep(chunk, cpu, page_start)); | ||
617 | if (err < 0) | ||
618 | return err; | ||
619 | } | ||
620 | |||
621 | /* flush at once, please read comments in pcpu_unmap() */ | ||
622 | flush_cache_vmap(pcpu_chunk_addr(chunk, 0, page_start), | ||
623 | pcpu_chunk_addr(chunk, last, page_end)); | ||
624 | return 0; | ||
625 | } | ||
626 | |||
627 | /** | ||
628 | * pcpu_populate_chunk - populate and map an area of a pcpu_chunk | ||
629 | * @chunk: chunk of interest | ||
630 | * @off: offset to the area to populate | ||
631 | * @size: size of the area to populate in bytes | ||
632 | * | ||
633 | * For each cpu, populate and map pages [@page_start,@page_end) into | ||
634 | * @chunk. The area is cleared on return. | ||
635 | */ | ||
636 | static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size) | ||
637 | { | ||
638 | const gfp_t alloc_mask = GFP_KERNEL | __GFP_HIGHMEM | __GFP_COLD; | ||
639 | int page_start = PFN_DOWN(off); | ||
640 | int page_end = PFN_UP(off + size); | ||
641 | int map_start = -1; | ||
642 | int uninitialized_var(map_end); | ||
643 | unsigned int cpu; | ||
644 | int i; | ||
645 | |||
646 | for (i = page_start; i < page_end; i++) { | ||
647 | if (pcpu_chunk_page_occupied(chunk, i)) { | ||
648 | if (map_start >= 0) { | ||
649 | if (pcpu_map(chunk, map_start, map_end)) | ||
650 | goto err; | ||
651 | map_start = -1; | ||
652 | } | ||
653 | continue; | ||
654 | } | ||
655 | |||
656 | map_start = map_start < 0 ? i : map_start; | ||
657 | map_end = i + 1; | ||
658 | |||
659 | for_each_possible_cpu(cpu) { | ||
660 | struct page **pagep = pcpu_chunk_pagep(chunk, cpu, i); | ||
661 | |||
662 | *pagep = alloc_pages_node(cpu_to_node(cpu), | ||
663 | alloc_mask, 0); | ||
664 | if (!*pagep) | ||
665 | goto err; | ||
666 | } | ||
667 | } | ||
668 | |||
669 | if (map_start >= 0 && pcpu_map(chunk, map_start, map_end)) | ||
670 | goto err; | ||
671 | |||
672 | for_each_possible_cpu(cpu) | ||
673 | memset(chunk->vm->addr + cpu * pcpu_unit_size + off, 0, | ||
674 | size); | ||
675 | |||
676 | return 0; | ||
677 | err: | ||
678 | /* likely under heavy memory pressure, give memory back */ | ||
679 | pcpu_depopulate_chunk(chunk, off, size, true); | ||
680 | return -ENOMEM; | ||
681 | } | ||
682 | |||
683 | static void free_pcpu_chunk(struct pcpu_chunk *chunk) | ||
684 | { | ||
685 | if (!chunk) | ||
686 | return; | ||
687 | if (chunk->vm) | ||
688 | free_vm_area(chunk->vm); | ||
689 | pcpu_realloc(chunk->map, chunk->map_alloc * sizeof(chunk->map[0]), 0); | ||
690 | kfree(chunk); | ||
691 | } | ||
692 | |||
693 | static struct pcpu_chunk *alloc_pcpu_chunk(void) | ||
694 | { | ||
695 | struct pcpu_chunk *chunk; | ||
696 | |||
697 | chunk = kzalloc(pcpu_chunk_struct_size, GFP_KERNEL); | ||
698 | if (!chunk) | ||
699 | return NULL; | ||
700 | |||
701 | chunk->map = pcpu_realloc(NULL, 0, | ||
702 | PCPU_DFL_MAP_ALLOC * sizeof(chunk->map[0])); | ||
703 | chunk->map_alloc = PCPU_DFL_MAP_ALLOC; | ||
704 | chunk->map[chunk->map_used++] = pcpu_unit_size; | ||
705 | |||
706 | chunk->vm = get_vm_area(pcpu_chunk_size, GFP_KERNEL); | ||
707 | if (!chunk->vm) { | ||
708 | free_pcpu_chunk(chunk); | ||
709 | return NULL; | ||
710 | } | ||
711 | |||
712 | INIT_LIST_HEAD(&chunk->list); | ||
713 | chunk->free_size = pcpu_unit_size; | ||
714 | chunk->contig_hint = pcpu_unit_size; | ||
715 | |||
716 | return chunk; | ||
717 | } | ||
718 | |||
719 | /** | ||
720 | * __alloc_percpu - allocate percpu area | ||
721 | * @size: size of area to allocate in bytes | ||
722 | * @align: alignment of area (max PAGE_SIZE) | ||
723 | * | ||
724 | * Allocate percpu area of @size bytes aligned at @align. Might | ||
725 | * sleep. Might trigger writeouts. | ||
726 | * | ||
727 | * RETURNS: | ||
728 | * Percpu pointer to the allocated area on success, NULL on failure. | ||
729 | */ | ||
730 | void *__alloc_percpu(size_t size, size_t align) | ||
731 | { | ||
732 | void *ptr = NULL; | ||
733 | struct pcpu_chunk *chunk; | ||
734 | int slot, off; | ||
735 | |||
736 | if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) { | ||
737 | WARN(true, "illegal size (%zu) or align (%zu) for " | ||
738 | "percpu allocation\n", size, align); | ||
739 | return NULL; | ||
740 | } | ||
741 | |||
742 | mutex_lock(&pcpu_mutex); | ||
743 | |||
744 | /* allocate area */ | ||
745 | for (slot = pcpu_size_to_slot(size); slot < pcpu_nr_slots; slot++) { | ||
746 | list_for_each_entry(chunk, &pcpu_slot[slot], list) { | ||
747 | if (size > chunk->contig_hint) | ||
748 | continue; | ||
749 | off = pcpu_alloc_area(chunk, size, align); | ||
750 | if (off >= 0) | ||
751 | goto area_found; | ||
752 | if (off != -ENOSPC) | ||
753 | goto out_unlock; | ||
754 | } | ||
755 | } | ||
756 | |||
757 | /* hmmm... no space left, create a new chunk */ | ||
758 | chunk = alloc_pcpu_chunk(); | ||
759 | if (!chunk) | ||
760 | goto out_unlock; | ||
761 | pcpu_chunk_relocate(chunk, -1); | ||
762 | pcpu_chunk_addr_insert(chunk); | ||
763 | |||
764 | off = pcpu_alloc_area(chunk, size, align); | ||
765 | if (off < 0) | ||
766 | goto out_unlock; | ||
767 | |||
768 | area_found: | ||
769 | /* populate, map and clear the area */ | ||
770 | if (pcpu_populate_chunk(chunk, off, size)) { | ||
771 | pcpu_free_area(chunk, off); | ||
772 | goto out_unlock; | ||
773 | } | ||
774 | |||
775 | ptr = __addr_to_pcpu_ptr(chunk->vm->addr + off); | ||
776 | out_unlock: | ||
777 | mutex_unlock(&pcpu_mutex); | ||
778 | return ptr; | ||
779 | } | ||
780 | EXPORT_SYMBOL_GPL(__alloc_percpu); | ||
781 | |||
782 | static void pcpu_kill_chunk(struct pcpu_chunk *chunk) | ||
783 | { | ||
784 | WARN_ON(chunk->immutable); | ||
785 | pcpu_depopulate_chunk(chunk, 0, pcpu_unit_size, false); | ||
786 | list_del(&chunk->list); | ||
787 | rb_erase(&chunk->rb_node, &pcpu_addr_root); | ||
788 | free_pcpu_chunk(chunk); | ||
789 | } | ||
790 | |||
791 | /** | ||
792 | * free_percpu - free percpu area | ||
793 | * @ptr: pointer to area to free | ||
794 | * | ||
795 | * Free percpu area @ptr. Might sleep. | ||
796 | */ | ||
797 | void free_percpu(void *ptr) | ||
798 | { | ||
799 | void *addr = __pcpu_ptr_to_addr(ptr); | ||
800 | struct pcpu_chunk *chunk; | ||
801 | int off; | ||
802 | |||
803 | if (!ptr) | ||
804 | return; | ||
805 | |||
806 | mutex_lock(&pcpu_mutex); | ||
807 | |||
808 | chunk = pcpu_chunk_addr_search(addr); | ||
809 | off = addr - chunk->vm->addr; | ||
810 | |||
811 | pcpu_free_area(chunk, off); | ||
812 | |||
813 | /* the chunk became fully free, kill one if there are other free ones */ | ||
814 | if (chunk->free_size == pcpu_unit_size) { | ||
815 | struct pcpu_chunk *pos; | ||
816 | |||
817 | list_for_each_entry(pos, | ||
818 | &pcpu_slot[pcpu_chunk_slot(chunk)], list) | ||
819 | if (pos != chunk) { | ||
820 | pcpu_kill_chunk(pos); | ||
821 | break; | ||
822 | } | ||
823 | } | ||
824 | |||
825 | mutex_unlock(&pcpu_mutex); | ||
826 | } | ||
827 | EXPORT_SYMBOL_GPL(free_percpu); | ||
828 | |||
829 | /** | ||
830 | * pcpu_setup_first_chunk - initialize the first percpu chunk | ||
831 | * @get_page_fn: callback to fetch page pointer | ||
832 | * @static_size: the size of static percpu area in bytes | ||
833 | * @unit_size: unit size in bytes, must be multiple of PAGE_SIZE, 0 for auto | ||
834 | * @free_size: free size in bytes, 0 for auto | ||
835 | * @base_addr: mapped address, NULL for auto | ||
836 | * @populate_pte_fn: callback to allocate pagetable, NULL if unnecessary | ||
837 | * | ||
838 | * Initialize the first percpu chunk which contains the kernel static | ||
839 | * perpcu area. This function is to be called from arch percpu area | ||
840 | * setup path. The first two parameters are mandatory. The rest are | ||
841 | * optional. | ||
842 | * | ||
843 | * @get_page_fn() should return pointer to percpu page given cpu | ||
844 | * number and page number. It should at least return enough pages to | ||
845 | * cover the static area. The returned pages for static area should | ||
846 | * have been initialized with valid data. If @unit_size is specified, | ||
847 | * it can also return pages after the static area. NULL return | ||
848 | * indicates end of pages for the cpu. Note that @get_page_fn() must | ||
849 | * return the same number of pages for all cpus. | ||
850 | * | ||
851 | * @unit_size, if non-zero, determines unit size and must be aligned | ||
852 | * to PAGE_SIZE and equal to or larger than @static_size + @free_size. | ||
853 | * | ||
854 | * @free_size determines the number of free bytes after the static | ||
855 | * area in the first chunk. If zero, whatever left is available. | ||
856 | * Specifying non-zero value make percpu leave the area after | ||
857 | * @static_size + @free_size alone. | ||
858 | * | ||
859 | * Non-null @base_addr means that the caller already allocated virtual | ||
860 | * region for the first chunk and mapped it. percpu must not mess | ||
861 | * with the chunk. Note that @base_addr with 0 @unit_size or non-NULL | ||
862 | * @populate_pte_fn doesn't make any sense. | ||
863 | * | ||
864 | * @populate_pte_fn is used to populate the pagetable. NULL means the | ||
865 | * caller already populated the pagetable. | ||
866 | * | ||
867 | * RETURNS: | ||
868 | * The determined pcpu_unit_size which can be used to initialize | ||
869 | * percpu access. | ||
870 | */ | ||
871 | size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn, | ||
872 | size_t static_size, size_t unit_size, | ||
873 | size_t free_size, void *base_addr, | ||
874 | pcpu_populate_pte_fn_t populate_pte_fn) | ||
875 | { | ||
876 | static struct vm_struct static_vm; | ||
877 | struct pcpu_chunk *static_chunk; | ||
878 | unsigned int cpu; | ||
879 | int nr_pages; | ||
880 | int err, i; | ||
881 | |||
882 | /* santiy checks */ | ||
883 | BUG_ON(!static_size); | ||
884 | BUG_ON(!unit_size && free_size); | ||
885 | BUG_ON(unit_size && unit_size < static_size + free_size); | ||
886 | BUG_ON(unit_size & ~PAGE_MASK); | ||
887 | BUG_ON(base_addr && !unit_size); | ||
888 | BUG_ON(base_addr && populate_pte_fn); | ||
889 | |||
890 | if (unit_size) | ||
891 | pcpu_unit_pages = unit_size >> PAGE_SHIFT; | ||
892 | else | ||
893 | pcpu_unit_pages = max_t(int, PCPU_MIN_UNIT_SIZE >> PAGE_SHIFT, | ||
894 | PFN_UP(static_size)); | ||
895 | |||
896 | pcpu_static_size = static_size; | ||
897 | pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT; | ||
898 | pcpu_chunk_size = num_possible_cpus() * pcpu_unit_size; | ||
899 | pcpu_chunk_struct_size = sizeof(struct pcpu_chunk) | ||
900 | + num_possible_cpus() * pcpu_unit_pages * sizeof(struct page *); | ||
901 | |||
902 | /* | ||
903 | * Allocate chunk slots. The additional last slot is for | ||
904 | * empty chunks. | ||
905 | */ | ||
906 | pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2; | ||
907 | pcpu_slot = alloc_bootmem(pcpu_nr_slots * sizeof(pcpu_slot[0])); | ||
908 | for (i = 0; i < pcpu_nr_slots; i++) | ||
909 | INIT_LIST_HEAD(&pcpu_slot[i]); | ||
910 | |||
911 | /* init static_chunk */ | ||
912 | static_chunk = alloc_bootmem(pcpu_chunk_struct_size); | ||
913 | INIT_LIST_HEAD(&static_chunk->list); | ||
914 | static_chunk->vm = &static_vm; | ||
915 | |||
916 | if (free_size) | ||
917 | static_chunk->free_size = free_size; | ||
918 | else | ||
919 | static_chunk->free_size = pcpu_unit_size - pcpu_static_size; | ||
920 | |||
921 | static_chunk->contig_hint = static_chunk->free_size; | ||
922 | |||
923 | /* allocate vm address */ | ||
924 | static_vm.flags = VM_ALLOC; | ||
925 | static_vm.size = pcpu_chunk_size; | ||
926 | |||
927 | if (!base_addr) | ||
928 | vm_area_register_early(&static_vm, PAGE_SIZE); | ||
929 | else { | ||
930 | /* | ||
931 | * Pages already mapped. No need to remap into | ||
932 | * vmalloc area. In this case the static chunk can't | ||
933 | * be mapped or unmapped by percpu and is marked | ||
934 | * immutable. | ||
935 | */ | ||
936 | static_vm.addr = base_addr; | ||
937 | static_chunk->immutable = true; | ||
938 | } | ||
939 | |||
940 | /* assign pages */ | ||
941 | nr_pages = -1; | ||
942 | for_each_possible_cpu(cpu) { | ||
943 | for (i = 0; i < pcpu_unit_pages; i++) { | ||
944 | struct page *page = get_page_fn(cpu, i); | ||
945 | |||
946 | if (!page) | ||
947 | break; | ||
948 | *pcpu_chunk_pagep(static_chunk, cpu, i) = page; | ||
949 | } | ||
950 | |||
951 | BUG_ON(i < PFN_UP(pcpu_static_size)); | ||
952 | |||
953 | if (nr_pages < 0) | ||
954 | nr_pages = i; | ||
955 | else | ||
956 | BUG_ON(nr_pages != i); | ||
957 | } | ||
958 | |||
959 | /* map them */ | ||
960 | if (populate_pte_fn) { | ||
961 | for_each_possible_cpu(cpu) | ||
962 | for (i = 0; i < nr_pages; i++) | ||
963 | populate_pte_fn(pcpu_chunk_addr(static_chunk, | ||
964 | cpu, i)); | ||
965 | |||
966 | err = pcpu_map(static_chunk, 0, nr_pages); | ||
967 | if (err) | ||
968 | panic("failed to setup static percpu area, err=%d\n", | ||
969 | err); | ||
970 | } | ||
971 | |||
972 | /* link static_chunk in */ | ||
973 | pcpu_chunk_relocate(static_chunk, -1); | ||
974 | pcpu_chunk_addr_insert(static_chunk); | ||
975 | |||
976 | /* we're done */ | ||
977 | pcpu_base_addr = (void *)pcpu_chunk_addr(static_chunk, 0, 0); | ||
978 | return pcpu_unit_size; | ||
979 | } | ||
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 11a929872ebd..af58324c361a 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/radix-tree.h> | 24 | #include <linux/radix-tree.h> |
25 | #include <linux/rcupdate.h> | 25 | #include <linux/rcupdate.h> |
26 | #include <linux/bootmem.h> | 26 | #include <linux/bootmem.h> |
27 | #include <linux/pfn.h> | ||
27 | 28 | ||
28 | #include <asm/atomic.h> | 29 | #include <asm/atomic.h> |
29 | #include <asm/uaccess.h> | 30 | #include <asm/uaccess.h> |
@@ -152,8 +153,8 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr, | |||
152 | * | 153 | * |
153 | * Ie. pte at addr+N*PAGE_SIZE shall point to pfn corresponding to pages[N] | 154 | * Ie. pte at addr+N*PAGE_SIZE shall point to pfn corresponding to pages[N] |
154 | */ | 155 | */ |
155 | static int vmap_page_range(unsigned long start, unsigned long end, | 156 | static int vmap_page_range_noflush(unsigned long start, unsigned long end, |
156 | pgprot_t prot, struct page **pages) | 157 | pgprot_t prot, struct page **pages) |
157 | { | 158 | { |
158 | pgd_t *pgd; | 159 | pgd_t *pgd; |
159 | unsigned long next; | 160 | unsigned long next; |
@@ -169,13 +170,22 @@ static int vmap_page_range(unsigned long start, unsigned long end, | |||
169 | if (err) | 170 | if (err) |
170 | break; | 171 | break; |
171 | } while (pgd++, addr = next, addr != end); | 172 | } while (pgd++, addr = next, addr != end); |
172 | flush_cache_vmap(start, end); | ||
173 | 173 | ||
174 | if (unlikely(err)) | 174 | if (unlikely(err)) |
175 | return err; | 175 | return err; |
176 | return nr; | 176 | return nr; |
177 | } | 177 | } |
178 | 178 | ||
179 | static int vmap_page_range(unsigned long start, unsigned long end, | ||
180 | pgprot_t prot, struct page **pages) | ||
181 | { | ||
182 | int ret; | ||
183 | |||
184 | ret = vmap_page_range_noflush(start, end, prot, pages); | ||
185 | flush_cache_vmap(start, end); | ||
186 | return ret; | ||
187 | } | ||
188 | |||
179 | static inline int is_vmalloc_or_module_addr(const void *x) | 189 | static inline int is_vmalloc_or_module_addr(const void *x) |
180 | { | 190 | { |
181 | /* | 191 | /* |
@@ -990,6 +1000,32 @@ void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t pro | |||
990 | } | 1000 | } |
991 | EXPORT_SYMBOL(vm_map_ram); | 1001 | EXPORT_SYMBOL(vm_map_ram); |
992 | 1002 | ||
1003 | /** | ||
1004 | * vm_area_register_early - register vmap area early during boot | ||
1005 | * @vm: vm_struct to register | ||
1006 | * @align: requested alignment | ||
1007 | * | ||
1008 | * This function is used to register kernel vm area before | ||
1009 | * vmalloc_init() is called. @vm->size and @vm->flags should contain | ||
1010 | * proper values on entry and other fields should be zero. On return, | ||
1011 | * vm->addr contains the allocated address. | ||
1012 | * | ||
1013 | * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING. | ||
1014 | */ | ||
1015 | void __init vm_area_register_early(struct vm_struct *vm, size_t align) | ||
1016 | { | ||
1017 | static size_t vm_init_off __initdata; | ||
1018 | unsigned long addr; | ||
1019 | |||
1020 | addr = ALIGN(VMALLOC_START + vm_init_off, align); | ||
1021 | vm_init_off = PFN_ALIGN(addr + vm->size) - VMALLOC_START; | ||
1022 | |||
1023 | vm->addr = (void *)addr; | ||
1024 | |||
1025 | vm->next = vmlist; | ||
1026 | vmlist = vm; | ||
1027 | } | ||
1028 | |||
993 | void __init vmalloc_init(void) | 1029 | void __init vmalloc_init(void) |
994 | { | 1030 | { |
995 | struct vmap_area *va; | 1031 | struct vmap_area *va; |
@@ -1017,6 +1053,58 @@ void __init vmalloc_init(void) | |||
1017 | vmap_initialized = true; | 1053 | vmap_initialized = true; |
1018 | } | 1054 | } |
1019 | 1055 | ||
1056 | /** | ||
1057 | * map_kernel_range_noflush - map kernel VM area with the specified pages | ||
1058 | * @addr: start of the VM area to map | ||
1059 | * @size: size of the VM area to map | ||
1060 | * @prot: page protection flags to use | ||
1061 | * @pages: pages to map | ||
1062 | * | ||
1063 | * Map PFN_UP(@size) pages at @addr. The VM area @addr and @size | ||
1064 | * specify should have been allocated using get_vm_area() and its | ||
1065 | * friends. | ||
1066 | * | ||
1067 | * NOTE: | ||
1068 | * This function does NOT do any cache flushing. The caller is | ||
1069 | * responsible for calling flush_cache_vmap() on to-be-mapped areas | ||
1070 | * before calling this function. | ||
1071 | * | ||
1072 | * RETURNS: | ||
1073 | * The number of pages mapped on success, -errno on failure. | ||
1074 | */ | ||
1075 | int map_kernel_range_noflush(unsigned long addr, unsigned long size, | ||
1076 | pgprot_t prot, struct page **pages) | ||
1077 | { | ||
1078 | return vmap_page_range_noflush(addr, addr + size, prot, pages); | ||
1079 | } | ||
1080 | |||
1081 | /** | ||
1082 | * unmap_kernel_range_noflush - unmap kernel VM area | ||
1083 | * @addr: start of the VM area to unmap | ||
1084 | * @size: size of the VM area to unmap | ||
1085 | * | ||
1086 | * Unmap PFN_UP(@size) pages at @addr. The VM area @addr and @size | ||
1087 | * specify should have been allocated using get_vm_area() and its | ||
1088 | * friends. | ||
1089 | * | ||
1090 | * NOTE: | ||
1091 | * This function does NOT do any cache flushing. The caller is | ||
1092 | * responsible for calling flush_cache_vunmap() on to-be-mapped areas | ||
1093 | * before calling this function and flush_tlb_kernel_range() after. | ||
1094 | */ | ||
1095 | void unmap_kernel_range_noflush(unsigned long addr, unsigned long size) | ||
1096 | { | ||
1097 | vunmap_page_range(addr, addr + size); | ||
1098 | } | ||
1099 | |||
1100 | /** | ||
1101 | * unmap_kernel_range - unmap kernel VM area and flush cache and TLB | ||
1102 | * @addr: start of the VM area to unmap | ||
1103 | * @size: size of the VM area to unmap | ||
1104 | * | ||
1105 | * Similar to unmap_kernel_range_noflush() but flushes vcache before | ||
1106 | * the unmapping and tlb after. | ||
1107 | */ | ||
1020 | void unmap_kernel_range(unsigned long addr, unsigned long size) | 1108 | void unmap_kernel_range(unsigned long addr, unsigned long size) |
1021 | { | 1109 | { |
1022 | unsigned long end = addr + size; | 1110 | unsigned long end = addr + size; |
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 743f5542d65a..3a3dad801354 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c | |||
@@ -1375,10 +1375,10 @@ EXPORT_SYMBOL_GPL(snmp_fold_field); | |||
1375 | int snmp_mib_init(void *ptr[2], size_t mibsize) | 1375 | int snmp_mib_init(void *ptr[2], size_t mibsize) |
1376 | { | 1376 | { |
1377 | BUG_ON(ptr == NULL); | 1377 | BUG_ON(ptr == NULL); |
1378 | ptr[0] = __alloc_percpu(mibsize); | 1378 | ptr[0] = __alloc_percpu(mibsize, __alignof__(unsigned long long)); |
1379 | if (!ptr[0]) | 1379 | if (!ptr[0]) |
1380 | goto err0; | 1380 | goto err0; |
1381 | ptr[1] = __alloc_percpu(mibsize); | 1381 | ptr[1] = __alloc_percpu(mibsize, __alignof__(unsigned long long)); |
1382 | if (!ptr[1]) | 1382 | if (!ptr[1]) |
1383 | goto err1; | 1383 | goto err1; |
1384 | return 0; | 1384 | return 0; |
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 97f71153584f..bf895401218f 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
@@ -3376,7 +3376,7 @@ int __init ip_rt_init(void) | |||
3376 | int rc = 0; | 3376 | int rc = 0; |
3377 | 3377 | ||
3378 | #ifdef CONFIG_NET_CLS_ROUTE | 3378 | #ifdef CONFIG_NET_CLS_ROUTE |
3379 | ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct)); | 3379 | ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct)); |
3380 | if (!ip_rt_acct) | 3380 | if (!ip_rt_acct) |
3381 | panic("IP: failed to allocate ip_rt_acct\n"); | 3381 | panic("IP: failed to allocate ip_rt_acct\n"); |
3382 | #endif | 3382 | #endif |