diff options
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/include/asm/bitops.h | 14 | ||||
-rw-r--r-- | arch/x86/include/asm/io.h | 1 | ||||
-rw-r--r-- | arch/x86/include/asm/msr-index.h | 29 | ||||
-rw-r--r-- | arch/x86/include/asm/pgalloc.h | 1 | ||||
-rw-r--r-- | arch/x86/include/asm/syscalls.h | 2 | ||||
-rw-r--r-- | arch/x86/include/asm/timex.h | 13 | ||||
-rw-r--r-- | arch/x86/kernel/apic.c | 9 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c | 35 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/intel.c | 13 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mtrr/generic.c | 12 | ||||
-rw-r--r-- | arch/x86/kernel/setup_percpu.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/signal.c | 11 | ||||
-rw-r--r-- | arch/x86/kernel/tlb_uv.c | 1 | ||||
-rw-r--r-- | arch/x86/kernel/vmi_32.c | 2 | ||||
-rw-r--r-- | arch/x86/lib/usercopy_32.c | 4 | ||||
-rw-r--r-- | arch/x86/lib/usercopy_64.c | 4 | ||||
-rw-r--r-- | arch/x86/mm/init_32.c | 48 | ||||
-rw-r--r-- | arch/x86/mm/init_64.c | 2 | ||||
-rw-r--r-- | arch/x86/mm/iomap_32.c | 10 | ||||
-rw-r--r-- | arch/x86/mm/ioremap.c | 25 | ||||
-rw-r--r-- | arch/x86/mm/pageattr.c | 49 | ||||
-rw-r--r-- | arch/x86/mm/pat.c | 43 |
22 files changed, 223 insertions, 107 deletions
diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h index e02a359d2aa5..02b47a603fc8 100644 --- a/arch/x86/include/asm/bitops.h +++ b/arch/x86/include/asm/bitops.h | |||
@@ -3,6 +3,9 @@ | |||
3 | 3 | ||
4 | /* | 4 | /* |
5 | * Copyright 1992, Linus Torvalds. | 5 | * Copyright 1992, Linus Torvalds. |
6 | * | ||
7 | * Note: inlines with more than a single statement should be marked | ||
8 | * __always_inline to avoid problems with older gcc's inlining heuristics. | ||
6 | */ | 9 | */ |
7 | 10 | ||
8 | #ifndef _LINUX_BITOPS_H | 11 | #ifndef _LINUX_BITOPS_H |
@@ -53,7 +56,8 @@ | |||
53 | * Note that @nr may be almost arbitrarily large; this function is not | 56 | * Note that @nr may be almost arbitrarily large; this function is not |
54 | * restricted to acting on a single-word quantity. | 57 | * restricted to acting on a single-word quantity. |
55 | */ | 58 | */ |
56 | static inline void set_bit(unsigned int nr, volatile unsigned long *addr) | 59 | static __always_inline void |
60 | set_bit(unsigned int nr, volatile unsigned long *addr) | ||
57 | { | 61 | { |
58 | if (IS_IMMEDIATE(nr)) { | 62 | if (IS_IMMEDIATE(nr)) { |
59 | asm volatile(LOCK_PREFIX "orb %1,%0" | 63 | asm volatile(LOCK_PREFIX "orb %1,%0" |
@@ -90,7 +94,8 @@ static inline void __set_bit(int nr, volatile unsigned long *addr) | |||
90 | * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() | 94 | * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() |
91 | * in order to ensure changes are visible on other processors. | 95 | * in order to ensure changes are visible on other processors. |
92 | */ | 96 | */ |
93 | static inline void clear_bit(int nr, volatile unsigned long *addr) | 97 | static __always_inline void |
98 | clear_bit(int nr, volatile unsigned long *addr) | ||
94 | { | 99 | { |
95 | if (IS_IMMEDIATE(nr)) { | 100 | if (IS_IMMEDIATE(nr)) { |
96 | asm volatile(LOCK_PREFIX "andb %1,%0" | 101 | asm volatile(LOCK_PREFIX "andb %1,%0" |
@@ -204,7 +209,8 @@ static inline int test_and_set_bit(int nr, volatile unsigned long *addr) | |||
204 | * | 209 | * |
205 | * This is the same as test_and_set_bit on x86. | 210 | * This is the same as test_and_set_bit on x86. |
206 | */ | 211 | */ |
207 | static inline int test_and_set_bit_lock(int nr, volatile unsigned long *addr) | 212 | static __always_inline int |
213 | test_and_set_bit_lock(int nr, volatile unsigned long *addr) | ||
208 | { | 214 | { |
209 | return test_and_set_bit(nr, addr); | 215 | return test_and_set_bit(nr, addr); |
210 | } | 216 | } |
@@ -300,7 +306,7 @@ static inline int test_and_change_bit(int nr, volatile unsigned long *addr) | |||
300 | return oldbit; | 306 | return oldbit; |
301 | } | 307 | } |
302 | 308 | ||
303 | static inline int constant_test_bit(unsigned int nr, const volatile unsigned long *addr) | 309 | static __always_inline int constant_test_bit(unsigned int nr, const volatile unsigned long *addr) |
304 | { | 310 | { |
305 | return ((1UL << (nr % BITS_PER_LONG)) & | 311 | return ((1UL << (nr % BITS_PER_LONG)) & |
306 | (((unsigned long *)addr)[nr / BITS_PER_LONG])) != 0; | 312 | (((unsigned long *)addr)[nr / BITS_PER_LONG])) != 0; |
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h index 05cfed4485fa..1dbbdf4be9b4 100644 --- a/arch/x86/include/asm/io.h +++ b/arch/x86/include/asm/io.h | |||
@@ -99,7 +99,6 @@ extern void __iomem *ioremap_wc(unsigned long offset, unsigned long size); | |||
99 | * A boot-time mapping is currently limited to at most 16 pages. | 99 | * A boot-time mapping is currently limited to at most 16 pages. |
100 | */ | 100 | */ |
101 | extern void early_ioremap_init(void); | 101 | extern void early_ioremap_init(void); |
102 | extern void early_ioremap_clear(void); | ||
103 | extern void early_ioremap_reset(void); | 102 | extern void early_ioremap_reset(void); |
104 | extern void __iomem *early_ioremap(unsigned long offset, unsigned long size); | 103 | extern void __iomem *early_ioremap(unsigned long offset, unsigned long size); |
105 | extern void __iomem *early_memremap(unsigned long offset, unsigned long size); | 104 | extern void __iomem *early_memremap(unsigned long offset, unsigned long size); |
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index cb58643947b9..358acc59ae04 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h | |||
@@ -202,6 +202,35 @@ | |||
202 | #define MSR_IA32_THERM_STATUS 0x0000019c | 202 | #define MSR_IA32_THERM_STATUS 0x0000019c |
203 | #define MSR_IA32_MISC_ENABLE 0x000001a0 | 203 | #define MSR_IA32_MISC_ENABLE 0x000001a0 |
204 | 204 | ||
205 | /* MISC_ENABLE bits: architectural */ | ||
206 | #define MSR_IA32_MISC_ENABLE_FAST_STRING (1ULL << 0) | ||
207 | #define MSR_IA32_MISC_ENABLE_TCC (1ULL << 1) | ||
208 | #define MSR_IA32_MISC_ENABLE_EMON (1ULL << 7) | ||
209 | #define MSR_IA32_MISC_ENABLE_BTS_UNAVAIL (1ULL << 11) | ||
210 | #define MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL (1ULL << 12) | ||
211 | #define MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP (1ULL << 16) | ||
212 | #define MSR_IA32_MISC_ENABLE_MWAIT (1ULL << 18) | ||
213 | #define MSR_IA32_MISC_ENABLE_LIMIT_CPUID (1ULL << 22) | ||
214 | #define MSR_IA32_MISC_ENABLE_XTPR_DISABLE (1ULL << 23) | ||
215 | #define MSR_IA32_MISC_ENABLE_XD_DISABLE (1ULL << 34) | ||
216 | |||
217 | /* MISC_ENABLE bits: model-specific, meaning may vary from core to core */ | ||
218 | #define MSR_IA32_MISC_ENABLE_X87_COMPAT (1ULL << 2) | ||
219 | #define MSR_IA32_MISC_ENABLE_TM1 (1ULL << 3) | ||
220 | #define MSR_IA32_MISC_ENABLE_SPLIT_LOCK_DISABLE (1ULL << 4) | ||
221 | #define MSR_IA32_MISC_ENABLE_L3CACHE_DISABLE (1ULL << 6) | ||
222 | #define MSR_IA32_MISC_ENABLE_SUPPRESS_LOCK (1ULL << 8) | ||
223 | #define MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE (1ULL << 9) | ||
224 | #define MSR_IA32_MISC_ENABLE_FERR (1ULL << 10) | ||
225 | #define MSR_IA32_MISC_ENABLE_FERR_MULTIPLEX (1ULL << 10) | ||
226 | #define MSR_IA32_MISC_ENABLE_TM2 (1ULL << 13) | ||
227 | #define MSR_IA32_MISC_ENABLE_ADJ_PREF_DISABLE (1ULL << 19) | ||
228 | #define MSR_IA32_MISC_ENABLE_SPEEDSTEP_LOCK (1ULL << 20) | ||
229 | #define MSR_IA32_MISC_ENABLE_L1D_CONTEXT (1ULL << 24) | ||
230 | #define MSR_IA32_MISC_ENABLE_DCU_PREF_DISABLE (1ULL << 37) | ||
231 | #define MSR_IA32_MISC_ENABLE_TURBO_DISABLE (1ULL << 38) | ||
232 | #define MSR_IA32_MISC_ENABLE_IP_PREF_DISABLE (1ULL << 39) | ||
233 | |||
205 | /* Intel Model 6 */ | 234 | /* Intel Model 6 */ |
206 | #define MSR_P6_EVNTSEL0 0x00000186 | 235 | #define MSR_P6_EVNTSEL0 0x00000186 |
207 | #define MSR_P6_EVNTSEL1 0x00000187 | 236 | #define MSR_P6_EVNTSEL1 0x00000187 |
diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h index cb7c151a8bff..dd14c54ac718 100644 --- a/arch/x86/include/asm/pgalloc.h +++ b/arch/x86/include/asm/pgalloc.h | |||
@@ -42,6 +42,7 @@ static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) | |||
42 | 42 | ||
43 | static inline void pte_free(struct mm_struct *mm, struct page *pte) | 43 | static inline void pte_free(struct mm_struct *mm, struct page *pte) |
44 | { | 44 | { |
45 | pgtable_page_dtor(pte); | ||
45 | __free_page(pte); | 46 | __free_page(pte); |
46 | } | 47 | } |
47 | 48 | ||
diff --git a/arch/x86/include/asm/syscalls.h b/arch/x86/include/asm/syscalls.h index 9c6797c3e56c..c0b0bda754ee 100644 --- a/arch/x86/include/asm/syscalls.h +++ b/arch/x86/include/asm/syscalls.h | |||
@@ -40,7 +40,7 @@ asmlinkage int sys_sigaction(int, const struct old_sigaction __user *, | |||
40 | struct old_sigaction __user *); | 40 | struct old_sigaction __user *); |
41 | asmlinkage int sys_sigaltstack(unsigned long); | 41 | asmlinkage int sys_sigaltstack(unsigned long); |
42 | asmlinkage unsigned long sys_sigreturn(unsigned long); | 42 | asmlinkage unsigned long sys_sigreturn(unsigned long); |
43 | asmlinkage int sys_rt_sigreturn(struct pt_regs); | 43 | asmlinkage int sys_rt_sigreturn(unsigned long); |
44 | 44 | ||
45 | /* kernel/ioport.c */ | 45 | /* kernel/ioport.c */ |
46 | asmlinkage long sys_iopl(unsigned long); | 46 | asmlinkage long sys_iopl(unsigned long); |
diff --git a/arch/x86/include/asm/timex.h b/arch/x86/include/asm/timex.h index 1287dc1347d6..b5c9d45c981f 100644 --- a/arch/x86/include/asm/timex.h +++ b/arch/x86/include/asm/timex.h | |||
@@ -1,18 +1,13 @@ | |||
1 | /* x86 architecture timex specifications */ | ||
2 | #ifndef _ASM_X86_TIMEX_H | 1 | #ifndef _ASM_X86_TIMEX_H |
3 | #define _ASM_X86_TIMEX_H | 2 | #define _ASM_X86_TIMEX_H |
4 | 3 | ||
5 | #include <asm/processor.h> | 4 | #include <asm/processor.h> |
6 | #include <asm/tsc.h> | 5 | #include <asm/tsc.h> |
7 | 6 | ||
8 | #ifdef CONFIG_X86_ELAN | 7 | /* The PIT ticks at this frequency (in HZ): */ |
9 | # define PIT_TICK_RATE 1189200 /* AMD Elan has different frequency! */ | 8 | #define PIT_TICK_RATE 1193182 |
10 | #elif defined(CONFIG_X86_RDC321X) | 9 | |
11 | # define PIT_TICK_RATE 1041667 /* Underlying HZ for R8610 */ | 10 | #define CLOCK_TICK_RATE PIT_TICK_RATE |
12 | #else | ||
13 | # define PIT_TICK_RATE 1193182 /* Underlying HZ */ | ||
14 | #endif | ||
15 | #define CLOCK_TICK_RATE PIT_TICK_RATE | ||
16 | 11 | ||
17 | #define ARCH_HAS_READ_CURRENT_TIMER | 12 | #define ARCH_HAS_READ_CURRENT_TIMER |
18 | 13 | ||
diff --git a/arch/x86/kernel/apic.c b/arch/x86/kernel/apic.c index 0f830e4f5675..4b6df2469fe3 100644 --- a/arch/x86/kernel/apic.c +++ b/arch/x86/kernel/apic.c | |||
@@ -895,6 +895,10 @@ void disable_local_APIC(void) | |||
895 | { | 895 | { |
896 | unsigned int value; | 896 | unsigned int value; |
897 | 897 | ||
898 | /* APIC hasn't been mapped yet */ | ||
899 | if (!apic_phys) | ||
900 | return; | ||
901 | |||
898 | clear_local_APIC(); | 902 | clear_local_APIC(); |
899 | 903 | ||
900 | /* | 904 | /* |
@@ -1833,6 +1837,11 @@ void __cpuinit generic_processor_info(int apicid, int version) | |||
1833 | num_processors++; | 1837 | num_processors++; |
1834 | cpu = cpumask_next_zero(-1, cpu_present_mask); | 1838 | cpu = cpumask_next_zero(-1, cpu_present_mask); |
1835 | 1839 | ||
1840 | if (version != apic_version[boot_cpu_physical_apicid]) | ||
1841 | WARN_ONCE(1, | ||
1842 | "ACPI: apic version mismatch, bootcpu: %x cpu %d: %x\n", | ||
1843 | apic_version[boot_cpu_physical_apicid], cpu, version); | ||
1844 | |||
1836 | physid_set(apicid, phys_cpu_present_map); | 1845 | physid_set(apicid, phys_cpu_present_map); |
1837 | if (apicid == boot_cpu_physical_apicid) { | 1846 | if (apicid == boot_cpu_physical_apicid) { |
1838 | /* | 1847 | /* |
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c index 6f11e029e8c5..4b1c319d30c3 100644 --- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c | |||
@@ -145,13 +145,14 @@ typedef union { | |||
145 | 145 | ||
146 | struct drv_cmd { | 146 | struct drv_cmd { |
147 | unsigned int type; | 147 | unsigned int type; |
148 | cpumask_var_t mask; | 148 | const struct cpumask *mask; |
149 | drv_addr_union addr; | 149 | drv_addr_union addr; |
150 | u32 val; | 150 | u32 val; |
151 | }; | 151 | }; |
152 | 152 | ||
153 | static void do_drv_read(struct drv_cmd *cmd) | 153 | static long do_drv_read(void *_cmd) |
154 | { | 154 | { |
155 | struct drv_cmd *cmd = _cmd; | ||
155 | u32 h; | 156 | u32 h; |
156 | 157 | ||
157 | switch (cmd->type) { | 158 | switch (cmd->type) { |
@@ -166,10 +167,12 @@ static void do_drv_read(struct drv_cmd *cmd) | |||
166 | default: | 167 | default: |
167 | break; | 168 | break; |
168 | } | 169 | } |
170 | return 0; | ||
169 | } | 171 | } |
170 | 172 | ||
171 | static void do_drv_write(struct drv_cmd *cmd) | 173 | static long do_drv_write(void *_cmd) |
172 | { | 174 | { |
175 | struct drv_cmd *cmd = _cmd; | ||
173 | u32 lo, hi; | 176 | u32 lo, hi; |
174 | 177 | ||
175 | switch (cmd->type) { | 178 | switch (cmd->type) { |
@@ -186,30 +189,23 @@ static void do_drv_write(struct drv_cmd *cmd) | |||
186 | default: | 189 | default: |
187 | break; | 190 | break; |
188 | } | 191 | } |
192 | return 0; | ||
189 | } | 193 | } |
190 | 194 | ||
191 | static void drv_read(struct drv_cmd *cmd) | 195 | static void drv_read(struct drv_cmd *cmd) |
192 | { | 196 | { |
193 | cpumask_t saved_mask = current->cpus_allowed; | ||
194 | cmd->val = 0; | 197 | cmd->val = 0; |
195 | 198 | ||
196 | set_cpus_allowed_ptr(current, cmd->mask); | 199 | work_on_cpu(cpumask_any(cmd->mask), do_drv_read, cmd); |
197 | do_drv_read(cmd); | ||
198 | set_cpus_allowed_ptr(current, &saved_mask); | ||
199 | } | 200 | } |
200 | 201 | ||
201 | static void drv_write(struct drv_cmd *cmd) | 202 | static void drv_write(struct drv_cmd *cmd) |
202 | { | 203 | { |
203 | cpumask_t saved_mask = current->cpus_allowed; | ||
204 | unsigned int i; | 204 | unsigned int i; |
205 | 205 | ||
206 | for_each_cpu(i, cmd->mask) { | 206 | for_each_cpu(i, cmd->mask) { |
207 | set_cpus_allowed_ptr(current, cpumask_of(i)); | 207 | work_on_cpu(i, do_drv_write, cmd); |
208 | do_drv_write(cmd); | ||
209 | } | 208 | } |
210 | |||
211 | set_cpus_allowed_ptr(current, &saved_mask); | ||
212 | return; | ||
213 | } | 209 | } |
214 | 210 | ||
215 | static u32 get_cur_val(const struct cpumask *mask) | 211 | static u32 get_cur_val(const struct cpumask *mask) |
@@ -235,8 +231,7 @@ static u32 get_cur_val(const struct cpumask *mask) | |||
235 | return 0; | 231 | return 0; |
236 | } | 232 | } |
237 | 233 | ||
238 | cpumask_copy(cmd.mask, mask); | 234 | cmd.mask = mask; |
239 | |||
240 | drv_read(&cmd); | 235 | drv_read(&cmd); |
241 | 236 | ||
242 | dprintk("get_cur_val = %u\n", cmd.val); | 237 | dprintk("get_cur_val = %u\n", cmd.val); |
@@ -368,7 +363,7 @@ static unsigned int get_cur_freq_on_cpu(unsigned int cpu) | |||
368 | return freq; | 363 | return freq; |
369 | } | 364 | } |
370 | 365 | ||
371 | static unsigned int check_freqs(const cpumask_t *mask, unsigned int freq, | 366 | static unsigned int check_freqs(const struct cpumask *mask, unsigned int freq, |
372 | struct acpi_cpufreq_data *data) | 367 | struct acpi_cpufreq_data *data) |
373 | { | 368 | { |
374 | unsigned int cur_freq; | 369 | unsigned int cur_freq; |
@@ -403,9 +398,6 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, | |||
403 | return -ENODEV; | 398 | return -ENODEV; |
404 | } | 399 | } |
405 | 400 | ||
406 | if (unlikely(!alloc_cpumask_var(&cmd.mask, GFP_KERNEL))) | ||
407 | return -ENOMEM; | ||
408 | |||
409 | perf = data->acpi_data; | 401 | perf = data->acpi_data; |
410 | result = cpufreq_frequency_table_target(policy, | 402 | result = cpufreq_frequency_table_target(policy, |
411 | data->freq_table, | 403 | data->freq_table, |
@@ -450,9 +442,9 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, | |||
450 | 442 | ||
451 | /* cpufreq holds the hotplug lock, so we are safe from here on */ | 443 | /* cpufreq holds the hotplug lock, so we are safe from here on */ |
452 | if (policy->shared_type != CPUFREQ_SHARED_TYPE_ANY) | 444 | if (policy->shared_type != CPUFREQ_SHARED_TYPE_ANY) |
453 | cpumask_and(cmd.mask, cpu_online_mask, policy->cpus); | 445 | cmd.mask = policy->cpus; |
454 | else | 446 | else |
455 | cpumask_copy(cmd.mask, cpumask_of(policy->cpu)); | 447 | cmd.mask = cpumask_of(policy->cpu); |
456 | 448 | ||
457 | freqs.old = perf->states[perf->state].core_frequency * 1000; | 449 | freqs.old = perf->states[perf->state].core_frequency * 1000; |
458 | freqs.new = data->freq_table[next_state].frequency; | 450 | freqs.new = data->freq_table[next_state].frequency; |
@@ -479,7 +471,6 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, | |||
479 | perf->state = next_perf_state; | 471 | perf->state = next_perf_state; |
480 | 472 | ||
481 | out: | 473 | out: |
482 | free_cpumask_var(cmd.mask); | ||
483 | return result; | 474 | return result; |
484 | } | 475 | } |
485 | 476 | ||
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 8ea6929e974c..549f2ada55f5 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c | |||
@@ -29,6 +29,19 @@ | |||
29 | 29 | ||
30 | static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) | 30 | static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) |
31 | { | 31 | { |
32 | /* Unmask CPUID levels if masked: */ | ||
33 | if (c->x86 == 6 && c->x86_model >= 15) { | ||
34 | u64 misc_enable; | ||
35 | |||
36 | rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable); | ||
37 | |||
38 | if (misc_enable & MSR_IA32_MISC_ENABLE_LIMIT_CPUID) { | ||
39 | misc_enable &= ~MSR_IA32_MISC_ENABLE_LIMIT_CPUID; | ||
40 | wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable); | ||
41 | c->cpuid_level = cpuid_eax(0); | ||
42 | } | ||
43 | } | ||
44 | |||
32 | if ((c->x86 == 0xf && c->x86_model >= 0x03) || | 45 | if ((c->x86 == 0xf && c->x86_model >= 0x03) || |
33 | (c->x86 == 0x6 && c->x86_model >= 0x0e)) | 46 | (c->x86 == 0x6 && c->x86_model >= 0x0e)) |
34 | set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); | 47 | set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); |
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c index b59ddcc88cd8..0c0a455fe95c 100644 --- a/arch/x86/kernel/cpu/mtrr/generic.c +++ b/arch/x86/kernel/cpu/mtrr/generic.c | |||
@@ -33,11 +33,13 @@ u64 mtrr_tom2; | |||
33 | struct mtrr_state_type mtrr_state = {}; | 33 | struct mtrr_state_type mtrr_state = {}; |
34 | EXPORT_SYMBOL_GPL(mtrr_state); | 34 | EXPORT_SYMBOL_GPL(mtrr_state); |
35 | 35 | ||
36 | #undef MODULE_PARAM_PREFIX | 36 | static int __initdata mtrr_show; |
37 | #define MODULE_PARAM_PREFIX "mtrr." | 37 | static int __init mtrr_debug(char *opt) |
38 | 38 | { | |
39 | static int mtrr_show; | 39 | mtrr_show = 1; |
40 | module_param_named(show, mtrr_show, bool, 0); | 40 | return 0; |
41 | } | ||
42 | early_param("mtrr.show", mtrr_debug); | ||
41 | 43 | ||
42 | /* | 44 | /* |
43 | * Returns the effective MTRR type for the region | 45 | * Returns the effective MTRR type for the region |
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index 55c46074eba0..01161077a49c 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c | |||
@@ -136,7 +136,7 @@ static void __init setup_cpu_pda_map(void) | |||
136 | #ifdef CONFIG_X86_64 | 136 | #ifdef CONFIG_X86_64 |
137 | 137 | ||
138 | /* correctly size the local cpu masks */ | 138 | /* correctly size the local cpu masks */ |
139 | static void setup_cpu_local_masks(void) | 139 | static void __init setup_cpu_local_masks(void) |
140 | { | 140 | { |
141 | alloc_bootmem_cpumask_var(&cpu_initialized_mask); | 141 | alloc_bootmem_cpumask_var(&cpu_initialized_mask); |
142 | alloc_bootmem_cpumask_var(&cpu_callin_mask); | 142 | alloc_bootmem_cpumask_var(&cpu_callin_mask); |
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c index 89bb7668041d..df0587f24c54 100644 --- a/arch/x86/kernel/signal.c +++ b/arch/x86/kernel/signal.c | |||
@@ -632,9 +632,16 @@ badframe: | |||
632 | } | 632 | } |
633 | 633 | ||
634 | #ifdef CONFIG_X86_32 | 634 | #ifdef CONFIG_X86_32 |
635 | asmlinkage int sys_rt_sigreturn(struct pt_regs regs) | 635 | /* |
636 | * Note: do not pass in pt_regs directly as with tail-call optimization | ||
637 | * GCC will incorrectly stomp on the caller's frame and corrupt user-space | ||
638 | * register state: | ||
639 | */ | ||
640 | asmlinkage int sys_rt_sigreturn(unsigned long __unused) | ||
636 | { | 641 | { |
637 | return do_rt_sigreturn(®s); | 642 | struct pt_regs *regs = (struct pt_regs *)&__unused; |
643 | |||
644 | return do_rt_sigreturn(regs); | ||
638 | } | 645 | } |
639 | #else /* !CONFIG_X86_32 */ | 646 | #else /* !CONFIG_X86_32 */ |
640 | asmlinkage long sys_rt_sigreturn(struct pt_regs *regs) | 647 | asmlinkage long sys_rt_sigreturn(struct pt_regs *regs) |
diff --git a/arch/x86/kernel/tlb_uv.c b/arch/x86/kernel/tlb_uv.c index f885023167e0..6812b829ed83 100644 --- a/arch/x86/kernel/tlb_uv.c +++ b/arch/x86/kernel/tlb_uv.c | |||
@@ -200,6 +200,7 @@ static int uv_wait_completion(struct bau_desc *bau_desc, | |||
200 | destination_timeouts = 0; | 200 | destination_timeouts = 0; |
201 | } | 201 | } |
202 | } | 202 | } |
203 | cpu_relax(); | ||
203 | } | 204 | } |
204 | return FLUSH_COMPLETE; | 205 | return FLUSH_COMPLETE; |
205 | } | 206 | } |
diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c index 23206ba16874..1d3302cc2ddf 100644 --- a/arch/x86/kernel/vmi_32.c +++ b/arch/x86/kernel/vmi_32.c | |||
@@ -858,7 +858,7 @@ void __init vmi_init(void) | |||
858 | #endif | 858 | #endif |
859 | } | 859 | } |
860 | 860 | ||
861 | void vmi_activate(void) | 861 | void __init vmi_activate(void) |
862 | { | 862 | { |
863 | unsigned long flags; | 863 | unsigned long flags; |
864 | 864 | ||
diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c index 4a20b2f9a381..7c8ca91bb9ec 100644 --- a/arch/x86/lib/usercopy_32.c +++ b/arch/x86/lib/usercopy_32.c | |||
@@ -56,7 +56,7 @@ do { \ | |||
56 | " jmp 2b\n" \ | 56 | " jmp 2b\n" \ |
57 | ".previous\n" \ | 57 | ".previous\n" \ |
58 | _ASM_EXTABLE(0b,3b) \ | 58 | _ASM_EXTABLE(0b,3b) \ |
59 | : "=d"(res), "=c"(count), "=&a" (__d0), "=&S" (__d1), \ | 59 | : "=&d"(res), "=&c"(count), "=&a" (__d0), "=&S" (__d1), \ |
60 | "=&D" (__d2) \ | 60 | "=&D" (__d2) \ |
61 | : "i"(-EFAULT), "0"(count), "1"(count), "3"(src), "4"(dst) \ | 61 | : "i"(-EFAULT), "0"(count), "1"(count), "3"(src), "4"(dst) \ |
62 | : "memory"); \ | 62 | : "memory"); \ |
@@ -218,7 +218,7 @@ long strnlen_user(const char __user *s, long n) | |||
218 | " .align 4\n" | 218 | " .align 4\n" |
219 | " .long 0b,2b\n" | 219 | " .long 0b,2b\n" |
220 | ".previous" | 220 | ".previous" |
221 | :"=r" (n), "=D" (s), "=a" (res), "=c" (tmp) | 221 | :"=&r" (n), "=&D" (s), "=&a" (res), "=&c" (tmp) |
222 | :"0" (n), "1" (s), "2" (0), "3" (mask) | 222 | :"0" (n), "1" (s), "2" (0), "3" (mask) |
223 | :"cc"); | 223 | :"cc"); |
224 | return res & mask; | 224 | return res & mask; |
diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c index 64d6c84e6353..ec13cb5f17ed 100644 --- a/arch/x86/lib/usercopy_64.c +++ b/arch/x86/lib/usercopy_64.c | |||
@@ -32,7 +32,7 @@ do { \ | |||
32 | " jmp 2b\n" \ | 32 | " jmp 2b\n" \ |
33 | ".previous\n" \ | 33 | ".previous\n" \ |
34 | _ASM_EXTABLE(0b,3b) \ | 34 | _ASM_EXTABLE(0b,3b) \ |
35 | : "=r"(res), "=c"(count), "=&a" (__d0), "=&S" (__d1), \ | 35 | : "=&r"(res), "=&c"(count), "=&a" (__d0), "=&S" (__d1), \ |
36 | "=&D" (__d2) \ | 36 | "=&D" (__d2) \ |
37 | : "i"(-EFAULT), "0"(count), "1"(count), "3"(src), "4"(dst) \ | 37 | : "i"(-EFAULT), "0"(count), "1"(count), "3"(src), "4"(dst) \ |
38 | : "memory"); \ | 38 | : "memory"); \ |
@@ -86,7 +86,7 @@ unsigned long __clear_user(void __user *addr, unsigned long size) | |||
86 | ".previous\n" | 86 | ".previous\n" |
87 | _ASM_EXTABLE(0b,3b) | 87 | _ASM_EXTABLE(0b,3b) |
88 | _ASM_EXTABLE(1b,2b) | 88 | _ASM_EXTABLE(1b,2b) |
89 | : [size8] "=c"(size), [dst] "=&D" (__d0) | 89 | : [size8] "=&c"(size), [dst] "=&D" (__d0) |
90 | : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr), | 90 | : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr), |
91 | [zero] "r" (0UL), [eight] "r" (8UL)); | 91 | [zero] "r" (0UL), [eight] "r" (8UL)); |
92 | return size; | 92 | return size; |
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 88f1b10de3be..2cef05074413 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c | |||
@@ -138,6 +138,47 @@ static pte_t * __init one_page_table_init(pmd_t *pmd) | |||
138 | return pte_offset_kernel(pmd, 0); | 138 | return pte_offset_kernel(pmd, 0); |
139 | } | 139 | } |
140 | 140 | ||
141 | static pte_t *__init page_table_kmap_check(pte_t *pte, pmd_t *pmd, | ||
142 | unsigned long vaddr, pte_t *lastpte) | ||
143 | { | ||
144 | #ifdef CONFIG_HIGHMEM | ||
145 | /* | ||
146 | * Something (early fixmap) may already have put a pte | ||
147 | * page here, which causes the page table allocation | ||
148 | * to become nonlinear. Attempt to fix it, and if it | ||
149 | * is still nonlinear then we have to bug. | ||
150 | */ | ||
151 | int pmd_idx_kmap_begin = fix_to_virt(FIX_KMAP_END) >> PMD_SHIFT; | ||
152 | int pmd_idx_kmap_end = fix_to_virt(FIX_KMAP_BEGIN) >> PMD_SHIFT; | ||
153 | |||
154 | if (pmd_idx_kmap_begin != pmd_idx_kmap_end | ||
155 | && (vaddr >> PMD_SHIFT) >= pmd_idx_kmap_begin | ||
156 | && (vaddr >> PMD_SHIFT) <= pmd_idx_kmap_end | ||
157 | && ((__pa(pte) >> PAGE_SHIFT) < table_start | ||
158 | || (__pa(pte) >> PAGE_SHIFT) >= table_end)) { | ||
159 | pte_t *newpte; | ||
160 | int i; | ||
161 | |||
162 | BUG_ON(after_init_bootmem); | ||
163 | newpte = alloc_low_page(); | ||
164 | for (i = 0; i < PTRS_PER_PTE; i++) | ||
165 | set_pte(newpte + i, pte[i]); | ||
166 | |||
167 | paravirt_alloc_pte(&init_mm, __pa(newpte) >> PAGE_SHIFT); | ||
168 | set_pmd(pmd, __pmd(__pa(newpte)|_PAGE_TABLE)); | ||
169 | BUG_ON(newpte != pte_offset_kernel(pmd, 0)); | ||
170 | __flush_tlb_all(); | ||
171 | |||
172 | paravirt_release_pte(__pa(pte) >> PAGE_SHIFT); | ||
173 | pte = newpte; | ||
174 | } | ||
175 | BUG_ON(vaddr < fix_to_virt(FIX_KMAP_BEGIN - 1) | ||
176 | && vaddr > fix_to_virt(FIX_KMAP_END) | ||
177 | && lastpte && lastpte + PTRS_PER_PTE != pte); | ||
178 | #endif | ||
179 | return pte; | ||
180 | } | ||
181 | |||
141 | /* | 182 | /* |
142 | * This function initializes a certain range of kernel virtual memory | 183 | * This function initializes a certain range of kernel virtual memory |
143 | * with new bootmem page tables, everywhere page tables are missing in | 184 | * with new bootmem page tables, everywhere page tables are missing in |
@@ -154,6 +195,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base) | |||
154 | unsigned long vaddr; | 195 | unsigned long vaddr; |
155 | pgd_t *pgd; | 196 | pgd_t *pgd; |
156 | pmd_t *pmd; | 197 | pmd_t *pmd; |
198 | pte_t *pte = NULL; | ||
157 | 199 | ||
158 | vaddr = start; | 200 | vaddr = start; |
159 | pgd_idx = pgd_index(vaddr); | 201 | pgd_idx = pgd_index(vaddr); |
@@ -165,7 +207,8 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base) | |||
165 | pmd = pmd + pmd_index(vaddr); | 207 | pmd = pmd + pmd_index(vaddr); |
166 | for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); | 208 | for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); |
167 | pmd++, pmd_idx++) { | 209 | pmd++, pmd_idx++) { |
168 | one_page_table_init(pmd); | 210 | pte = page_table_kmap_check(one_page_table_init(pmd), |
211 | pmd, vaddr, pte); | ||
169 | 212 | ||
170 | vaddr += PMD_SIZE; | 213 | vaddr += PMD_SIZE; |
171 | } | 214 | } |
@@ -508,7 +551,6 @@ static void __init early_ioremap_page_table_range_init(pgd_t *pgd_base) | |||
508 | * Fixed mappings, only the page table structure has to be | 551 | * Fixed mappings, only the page table structure has to be |
509 | * created - mappings will be set by set_fixmap(): | 552 | * created - mappings will be set by set_fixmap(): |
510 | */ | 553 | */ |
511 | early_ioremap_clear(); | ||
512 | vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; | 554 | vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; |
513 | end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK; | 555 | end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK; |
514 | page_table_range_init(vaddr, end, pgd_base); | 556 | page_table_range_init(vaddr, end, pgd_base); |
@@ -801,7 +843,7 @@ static void __init find_early_table_space(unsigned long end, int use_pse) | |||
801 | tables += PAGE_ALIGN(ptes * sizeof(pte_t)); | 843 | tables += PAGE_ALIGN(ptes * sizeof(pte_t)); |
802 | 844 | ||
803 | /* for fixmap */ | 845 | /* for fixmap */ |
804 | tables += PAGE_SIZE * 2; | 846 | tables += PAGE_ALIGN(__end_of_fixed_addresses * sizeof(pte_t)); |
805 | 847 | ||
806 | /* | 848 | /* |
807 | * RED-PEN putting page tables only on node 0 could | 849 | * RED-PEN putting page tables only on node 0 could |
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 23f68e77ad1f..e6d36b490250 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c | |||
@@ -596,7 +596,7 @@ static void __init init_gbpages(void) | |||
596 | direct_gbpages = 0; | 596 | direct_gbpages = 0; |
597 | } | 597 | } |
598 | 598 | ||
599 | static unsigned long __init kernel_physical_mapping_init(unsigned long start, | 599 | static unsigned long __meminit kernel_physical_mapping_init(unsigned long start, |
600 | unsigned long end, | 600 | unsigned long end, |
601 | unsigned long page_size_mask) | 601 | unsigned long page_size_mask) |
602 | { | 602 | { |
diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c index d0151d8ce452..ca53224fc56c 100644 --- a/arch/x86/mm/iomap_32.c +++ b/arch/x86/mm/iomap_32.c | |||
@@ -17,6 +17,7 @@ | |||
17 | */ | 17 | */ |
18 | 18 | ||
19 | #include <asm/iomap.h> | 19 | #include <asm/iomap.h> |
20 | #include <asm/pat.h> | ||
20 | #include <linux/module.h> | 21 | #include <linux/module.h> |
21 | 22 | ||
22 | /* Map 'pfn' using fixed map 'type' and protections 'prot' | 23 | /* Map 'pfn' using fixed map 'type' and protections 'prot' |
@@ -29,6 +30,15 @@ iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot) | |||
29 | 30 | ||
30 | pagefault_disable(); | 31 | pagefault_disable(); |
31 | 32 | ||
33 | /* | ||
34 | * For non-PAT systems, promote PAGE_KERNEL_WC to PAGE_KERNEL_UC_MINUS. | ||
35 | * PAGE_KERNEL_WC maps to PWT, which translates to uncached if the | ||
36 | * MTRR is UC or WC. UC_MINUS gets the real intention, of the | ||
37 | * user, which is "WC if the MTRR is WC, UC if you can't do that." | ||
38 | */ | ||
39 | if (!pat_enabled && pgprot_val(prot) == pgprot_val(PAGE_KERNEL_WC)) | ||
40 | prot = PAGE_KERNEL_UC_MINUS; | ||
41 | |||
32 | idx = type + KM_TYPE_NR*smp_processor_id(); | 42 | idx = type + KM_TYPE_NR*smp_processor_id(); |
33 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); | 43 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); |
34 | set_pte(kmap_pte-idx, pfn_pte(pfn, prot)); | 44 | set_pte(kmap_pte-idx, pfn_pte(pfn, prot)); |
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index bd85d42819e1..af750ab973b6 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c | |||
@@ -557,34 +557,9 @@ void __init early_ioremap_init(void) | |||
557 | } | 557 | } |
558 | } | 558 | } |
559 | 559 | ||
560 | void __init early_ioremap_clear(void) | ||
561 | { | ||
562 | pmd_t *pmd; | ||
563 | |||
564 | if (early_ioremap_debug) | ||
565 | printk(KERN_INFO "early_ioremap_clear()\n"); | ||
566 | |||
567 | pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)); | ||
568 | pmd_clear(pmd); | ||
569 | paravirt_release_pte(__pa(bm_pte) >> PAGE_SHIFT); | ||
570 | __flush_tlb_all(); | ||
571 | } | ||
572 | |||
573 | void __init early_ioremap_reset(void) | 560 | void __init early_ioremap_reset(void) |
574 | { | 561 | { |
575 | enum fixed_addresses idx; | ||
576 | unsigned long addr, phys; | ||
577 | pte_t *pte; | ||
578 | |||
579 | after_paging_init = 1; | 562 | after_paging_init = 1; |
580 | for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) { | ||
581 | addr = fix_to_virt(idx); | ||
582 | pte = early_ioremap_pte(addr); | ||
583 | if (pte_present(*pte)) { | ||
584 | phys = pte_val(*pte) & PAGE_MASK; | ||
585 | set_fixmap(idx, phys); | ||
586 | } | ||
587 | } | ||
588 | } | 563 | } |
589 | 564 | ||
590 | static void __init __early_set_fixmap(enum fixed_addresses idx, | 565 | static void __init __early_set_fixmap(enum fixed_addresses idx, |
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index e89d24815f26..84ba74820ad6 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c | |||
@@ -534,6 +534,36 @@ out_unlock: | |||
534 | return 0; | 534 | return 0; |
535 | } | 535 | } |
536 | 536 | ||
537 | static int __cpa_process_fault(struct cpa_data *cpa, unsigned long vaddr, | ||
538 | int primary) | ||
539 | { | ||
540 | /* | ||
541 | * Ignore all non primary paths. | ||
542 | */ | ||
543 | if (!primary) | ||
544 | return 0; | ||
545 | |||
546 | /* | ||
547 | * Ignore the NULL PTE for kernel identity mapping, as it is expected | ||
548 | * to have holes. | ||
549 | * Also set numpages to '1' indicating that we processed cpa req for | ||
550 | * one virtual address page and its pfn. TBD: numpages can be set based | ||
551 | * on the initial value and the level returned by lookup_address(). | ||
552 | */ | ||
553 | if (within(vaddr, PAGE_OFFSET, | ||
554 | PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT))) { | ||
555 | cpa->numpages = 1; | ||
556 | cpa->pfn = __pa(vaddr) >> PAGE_SHIFT; | ||
557 | return 0; | ||
558 | } else { | ||
559 | WARN(1, KERN_WARNING "CPA: called for zero pte. " | ||
560 | "vaddr = %lx cpa->vaddr = %lx\n", vaddr, | ||
561 | *cpa->vaddr); | ||
562 | |||
563 | return -EFAULT; | ||
564 | } | ||
565 | } | ||
566 | |||
537 | static int __change_page_attr(struct cpa_data *cpa, int primary) | 567 | static int __change_page_attr(struct cpa_data *cpa, int primary) |
538 | { | 568 | { |
539 | unsigned long address; | 569 | unsigned long address; |
@@ -549,17 +579,11 @@ static int __change_page_attr(struct cpa_data *cpa, int primary) | |||
549 | repeat: | 579 | repeat: |
550 | kpte = lookup_address(address, &level); | 580 | kpte = lookup_address(address, &level); |
551 | if (!kpte) | 581 | if (!kpte) |
552 | return 0; | 582 | return __cpa_process_fault(cpa, address, primary); |
553 | 583 | ||
554 | old_pte = *kpte; | 584 | old_pte = *kpte; |
555 | if (!pte_val(old_pte)) { | 585 | if (!pte_val(old_pte)) |
556 | if (!primary) | 586 | return __cpa_process_fault(cpa, address, primary); |
557 | return 0; | ||
558 | WARN(1, KERN_WARNING "CPA: called for zero pte. " | ||
559 | "vaddr = %lx cpa->vaddr = %lx\n", address, | ||
560 | *cpa->vaddr); | ||
561 | return -EINVAL; | ||
562 | } | ||
563 | 587 | ||
564 | if (level == PG_LEVEL_4K) { | 588 | if (level == PG_LEVEL_4K) { |
565 | pte_t new_pte; | 589 | pte_t new_pte; |
@@ -657,12 +681,7 @@ static int cpa_process_alias(struct cpa_data *cpa) | |||
657 | vaddr = *cpa->vaddr; | 681 | vaddr = *cpa->vaddr; |
658 | 682 | ||
659 | if (!(within(vaddr, PAGE_OFFSET, | 683 | if (!(within(vaddr, PAGE_OFFSET, |
660 | PAGE_OFFSET + (max_low_pfn_mapped << PAGE_SHIFT)) | 684 | PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT)))) { |
661 | #ifdef CONFIG_X86_64 | ||
662 | || within(vaddr, PAGE_OFFSET + (1UL<<32), | ||
663 | PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT)) | ||
664 | #endif | ||
665 | )) { | ||
666 | 685 | ||
667 | alias_cpa = *cpa; | 686 | alias_cpa = *cpa; |
668 | temp_cpa_vaddr = (unsigned long) __va(cpa->pfn << PAGE_SHIFT); | 687 | temp_cpa_vaddr = (unsigned long) __va(cpa->pfn << PAGE_SHIFT); |
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c index 8b08fb955274..7b61036427df 100644 --- a/arch/x86/mm/pat.c +++ b/arch/x86/mm/pat.c | |||
@@ -333,11 +333,23 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type, | |||
333 | req_type & _PAGE_CACHE_MASK); | 333 | req_type & _PAGE_CACHE_MASK); |
334 | } | 334 | } |
335 | 335 | ||
336 | is_range_ram = pagerange_is_ram(start, end); | 336 | if (new_type) |
337 | if (is_range_ram == 1) | 337 | *new_type = actual_type; |
338 | return reserve_ram_pages_type(start, end, req_type, new_type); | 338 | |
339 | else if (is_range_ram < 0) | 339 | /* |
340 | return -EINVAL; | 340 | * For legacy reasons, some parts of the physical address range in the |
341 | * legacy 1MB region is treated as non-RAM (even when listed as RAM in | ||
342 | * the e820 tables). So we will track the memory attributes of this | ||
343 | * legacy 1MB region using the linear memtype_list always. | ||
344 | */ | ||
345 | if (end >= ISA_END_ADDRESS) { | ||
346 | is_range_ram = pagerange_is_ram(start, end); | ||
347 | if (is_range_ram == 1) | ||
348 | return reserve_ram_pages_type(start, end, req_type, | ||
349 | new_type); | ||
350 | else if (is_range_ram < 0) | ||
351 | return -EINVAL; | ||
352 | } | ||
341 | 353 | ||
342 | new = kmalloc(sizeof(struct memtype), GFP_KERNEL); | 354 | new = kmalloc(sizeof(struct memtype), GFP_KERNEL); |
343 | if (!new) | 355 | if (!new) |
@@ -347,9 +359,6 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type, | |||
347 | new->end = end; | 359 | new->end = end; |
348 | new->type = actual_type; | 360 | new->type = actual_type; |
349 | 361 | ||
350 | if (new_type) | ||
351 | *new_type = actual_type; | ||
352 | |||
353 | spin_lock(&memtype_lock); | 362 | spin_lock(&memtype_lock); |
354 | 363 | ||
355 | if (cached_entry && start >= cached_start) | 364 | if (cached_entry && start >= cached_start) |
@@ -437,11 +446,19 @@ int free_memtype(u64 start, u64 end) | |||
437 | if (is_ISA_range(start, end - 1)) | 446 | if (is_ISA_range(start, end - 1)) |
438 | return 0; | 447 | return 0; |
439 | 448 | ||
440 | is_range_ram = pagerange_is_ram(start, end); | 449 | /* |
441 | if (is_range_ram == 1) | 450 | * For legacy reasons, some parts of the physical address range in the |
442 | return free_ram_pages_type(start, end); | 451 | * legacy 1MB region is treated as non-RAM (even when listed as RAM in |
443 | else if (is_range_ram < 0) | 452 | * the e820 tables). So we will track the memory attributes of this |
444 | return -EINVAL; | 453 | * legacy 1MB region using the linear memtype_list always. |
454 | */ | ||
455 | if (end >= ISA_END_ADDRESS) { | ||
456 | is_range_ram = pagerange_is_ram(start, end); | ||
457 | if (is_range_ram == 1) | ||
458 | return free_ram_pages_type(start, end); | ||
459 | else if (is_range_ram < 0) | ||
460 | return -EINVAL; | ||
461 | } | ||
445 | 462 | ||
446 | spin_lock(&memtype_lock); | 463 | spin_lock(&memtype_lock); |
447 | list_for_each_entry(entry, &memtype_list, nd) { | 464 | list_for_each_entry(entry, &memtype_list, nd) { |