aboutsummaryrefslogtreecommitdiffstats
path: root/arch/i386/kernel/cpu
diff options
context:
space:
mode:
Diffstat (limited to 'arch/i386/kernel/cpu')
-rw-r--r--arch/i386/kernel/cpu/common.c15
-rw-r--r--arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c1
-rw-r--r--arch/i386/kernel/cpu/cpufreq/p4-clockmod.c1
-rw-r--r--arch/i386/kernel/cpu/cpufreq/powernow-k8.c1
-rw-r--r--arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c1
-rw-r--r--arch/i386/kernel/cpu/intel_cacheinfo.c87
-rw-r--r--arch/i386/kernel/cpu/mcheck/p6.c11
-rw-r--r--arch/i386/kernel/cpu/mtrr/if.c119
-rw-r--r--arch/i386/kernel/cpu/proc.c2
9 files changed, 142 insertions, 96 deletions
diff --git a/arch/i386/kernel/cpu/common.c b/arch/i386/kernel/cpu/common.c
index 9ad43be9a01f..74145a33cb0f 100644
--- a/arch/i386/kernel/cpu/common.c
+++ b/arch/i386/kernel/cpu/common.c
@@ -573,6 +573,7 @@ void __devinit cpu_init(void)
573 int cpu = smp_processor_id(); 573 int cpu = smp_processor_id();
574 struct tss_struct * t = &per_cpu(init_tss, cpu); 574 struct tss_struct * t = &per_cpu(init_tss, cpu);
575 struct thread_struct *thread = &current->thread; 575 struct thread_struct *thread = &current->thread;
576 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
576 __u32 stk16_off = (__u32)&per_cpu(cpu_16bit_stack, cpu); 577 __u32 stk16_off = (__u32)&per_cpu(cpu_16bit_stack, cpu);
577 578
578 if (cpu_test_and_set(cpu, cpu_initialized)) { 579 if (cpu_test_and_set(cpu, cpu_initialized)) {
@@ -594,24 +595,16 @@ void __devinit cpu_init(void)
594 * Initialize the per-CPU GDT with the boot GDT, 595 * Initialize the per-CPU GDT with the boot GDT,
595 * and set up the GDT descriptor: 596 * and set up the GDT descriptor:
596 */ 597 */
597 memcpy(&per_cpu(cpu_gdt_table, cpu), cpu_gdt_table, 598 memcpy(gdt, cpu_gdt_table, GDT_SIZE);
598 GDT_SIZE);
599 599
600 /* Set up GDT entry for 16bit stack */ 600 /* Set up GDT entry for 16bit stack */
601 *(__u64 *)&(per_cpu(cpu_gdt_table, cpu)[GDT_ENTRY_ESPFIX_SS]) |= 601 *(__u64 *)(&gdt[GDT_ENTRY_ESPFIX_SS]) |=
602 ((((__u64)stk16_off) << 16) & 0x000000ffffff0000ULL) | 602 ((((__u64)stk16_off) << 16) & 0x000000ffffff0000ULL) |
603 ((((__u64)stk16_off) << 32) & 0xff00000000000000ULL) | 603 ((((__u64)stk16_off) << 32) & 0xff00000000000000ULL) |
604 (CPU_16BIT_STACK_SIZE - 1); 604 (CPU_16BIT_STACK_SIZE - 1);
605 605
606 cpu_gdt_descr[cpu].size = GDT_SIZE - 1; 606 cpu_gdt_descr[cpu].size = GDT_SIZE - 1;
607 cpu_gdt_descr[cpu].address = 607 cpu_gdt_descr[cpu].address = (unsigned long)gdt;
608 (unsigned long)&per_cpu(cpu_gdt_table, cpu);
609
610 /*
611 * Set up the per-thread TLS descriptor cache:
612 */
613 memcpy(thread->tls_array, &per_cpu(cpu_gdt_table, cpu),
614 GDT_ENTRY_TLS_ENTRIES * 8);
615 608
616 load_gdt(&cpu_gdt_descr[cpu]); 609 load_gdt(&cpu_gdt_descr[cpu]);
617 load_idt(&idt_descr); 610 load_idt(&idt_descr);
diff --git a/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
index 822c8ce9d1f1..caa9f7711343 100644
--- a/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
+++ b/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
@@ -32,6 +32,7 @@
32#include <linux/proc_fs.h> 32#include <linux/proc_fs.h>
33#include <linux/seq_file.h> 33#include <linux/seq_file.h>
34#include <linux/compiler.h> 34#include <linux/compiler.h>
35#include <linux/sched.h> /* current */
35#include <asm/io.h> 36#include <asm/io.h>
36#include <asm/delay.h> 37#include <asm/delay.h>
37#include <asm/uaccess.h> 38#include <asm/uaccess.h>
diff --git a/arch/i386/kernel/cpu/cpufreq/p4-clockmod.c b/arch/i386/kernel/cpu/cpufreq/p4-clockmod.c
index aa622d52c6e5..270f2188d68b 100644
--- a/arch/i386/kernel/cpu/cpufreq/p4-clockmod.c
+++ b/arch/i386/kernel/cpu/cpufreq/p4-clockmod.c
@@ -28,6 +28,7 @@
28#include <linux/cpufreq.h> 28#include <linux/cpufreq.h>
29#include <linux/slab.h> 29#include <linux/slab.h>
30#include <linux/cpumask.h> 30#include <linux/cpumask.h>
31#include <linux/sched.h> /* current / set_cpus_allowed() */
31 32
32#include <asm/processor.h> 33#include <asm/processor.h>
33#include <asm/msr.h> 34#include <asm/msr.h>
diff --git a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
index 58ca98fdc2ca..2d5c9adba0cd 100644
--- a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
+++ b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c
@@ -32,6 +32,7 @@
32#include <linux/slab.h> 32#include <linux/slab.h>
33#include <linux/string.h> 33#include <linux/string.h>
34#include <linux/cpumask.h> 34#include <linux/cpumask.h>
35#include <linux/sched.h> /* for current / set_cpus_allowed() */
35 36
36#include <asm/msr.h> 37#include <asm/msr.h>
37#include <asm/io.h> 38#include <asm/io.h>
diff --git a/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c b/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
index c397b6220430..1465974256c9 100644
--- a/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
+++ b/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
@@ -22,6 +22,7 @@
22#include <linux/init.h> 22#include <linux/init.h>
23#include <linux/cpufreq.h> 23#include <linux/cpufreq.h>
24#include <linux/config.h> 24#include <linux/config.h>
25#include <linux/sched.h> /* current */
25#include <linux/delay.h> 26#include <linux/delay.h>
26#include <linux/compiler.h> 27#include <linux/compiler.h>
27 28
diff --git a/arch/i386/kernel/cpu/intel_cacheinfo.c b/arch/i386/kernel/cpu/intel_cacheinfo.c
index 9e0d5f83cb9f..4dc42a189ae5 100644
--- a/arch/i386/kernel/cpu/intel_cacheinfo.c
+++ b/arch/i386/kernel/cpu/intel_cacheinfo.c
@@ -3,6 +3,7 @@
3 * 3 *
4 * Changes: 4 * Changes:
5 * Venkatesh Pallipadi : Adding cache identification through cpuid(4) 5 * Venkatesh Pallipadi : Adding cache identification through cpuid(4)
6 * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure.
6 */ 7 */
7 8
8#include <linux/init.h> 9#include <linux/init.h>
@@ -10,6 +11,7 @@
10#include <linux/device.h> 11#include <linux/device.h>
11#include <linux/compiler.h> 12#include <linux/compiler.h>
12#include <linux/cpu.h> 13#include <linux/cpu.h>
14#include <linux/sched.h>
13 15
14#include <asm/processor.h> 16#include <asm/processor.h>
15#include <asm/smp.h> 17#include <asm/smp.h>
@@ -28,7 +30,7 @@ struct _cache_table
28}; 30};
29 31
30/* all the cache descriptor types we care about (no TLB or trace cache entries) */ 32/* all the cache descriptor types we care about (no TLB or trace cache entries) */
31static struct _cache_table cache_table[] __devinitdata = 33static struct _cache_table cache_table[] __cpuinitdata =
32{ 34{
33 { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */ 35 { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */
34 { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */ 36 { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */
@@ -117,10 +119,9 @@ struct _cpuid4_info {
117 cpumask_t shared_cpu_map; 119 cpumask_t shared_cpu_map;
118}; 120};
119 121
120#define MAX_CACHE_LEAVES 4
121static unsigned short num_cache_leaves; 122static unsigned short num_cache_leaves;
122 123
123static int __devinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf) 124static int __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
124{ 125{
125 unsigned int eax, ebx, ecx, edx; 126 unsigned int eax, ebx, ecx, edx;
126 union _cpuid4_leaf_eax cache_eax; 127 union _cpuid4_leaf_eax cache_eax;
@@ -144,23 +145,18 @@ static int __init find_num_cache_leaves(void)
144{ 145{
145 unsigned int eax, ebx, ecx, edx; 146 unsigned int eax, ebx, ecx, edx;
146 union _cpuid4_leaf_eax cache_eax; 147 union _cpuid4_leaf_eax cache_eax;
147 int i; 148 int i = -1;
148 int retval;
149 149
150 retval = MAX_CACHE_LEAVES; 150 do {
151 /* Do cpuid(4) loop to find out num_cache_leaves */ 151 ++i;
152 for (i = 0; i < MAX_CACHE_LEAVES; i++) { 152 /* Do cpuid(4) loop to find out num_cache_leaves */
153 cpuid_count(4, i, &eax, &ebx, &ecx, &edx); 153 cpuid_count(4, i, &eax, &ebx, &ecx, &edx);
154 cache_eax.full = eax; 154 cache_eax.full = eax;
155 if (cache_eax.split.type == CACHE_TYPE_NULL) { 155 } while (cache_eax.split.type != CACHE_TYPE_NULL);
156 retval = i; 156 return i;
157 break;
158 }
159 }
160 return retval;
161} 157}
162 158
163unsigned int __devinit init_intel_cacheinfo(struct cpuinfo_x86 *c) 159unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
164{ 160{
165 unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; /* Cache sizes */ 161 unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; /* Cache sizes */
166 unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */ 162 unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
@@ -284,13 +280,7 @@ unsigned int __devinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
284 if ( l3 ) 280 if ( l3 )
285 printk(KERN_INFO "CPU: L3 cache: %dK\n", l3); 281 printk(KERN_INFO "CPU: L3 cache: %dK\n", l3);
286 282
287 /* 283 c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
288 * This assumes the L3 cache is shared; it typically lives in
289 * the northbridge. The L1 caches are included by the L2
290 * cache, and so should not be included for the purpose of
291 * SMP switching weights.
292 */
293 c->x86_cache_size = l2 ? l2 : (l1i+l1d);
294 } 284 }
295 285
296 return l2; 286 return l2;
@@ -301,7 +291,7 @@ static struct _cpuid4_info *cpuid4_info[NR_CPUS];
301#define CPUID4_INFO_IDX(x,y) (&((cpuid4_info[x])[y])) 291#define CPUID4_INFO_IDX(x,y) (&((cpuid4_info[x])[y]))
302 292
303#ifdef CONFIG_SMP 293#ifdef CONFIG_SMP
304static void __devinit cache_shared_cpu_map_setup(unsigned int cpu, int index) 294static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
305{ 295{
306 struct _cpuid4_info *this_leaf; 296 struct _cpuid4_info *this_leaf;
307 unsigned long num_threads_sharing; 297 unsigned long num_threads_sharing;
@@ -334,7 +324,7 @@ static void free_cache_attributes(unsigned int cpu)
334 cpuid4_info[cpu] = NULL; 324 cpuid4_info[cpu] = NULL;
335} 325}
336 326
337static int __devinit detect_cache_attributes(unsigned int cpu) 327static int __cpuinit detect_cache_attributes(unsigned int cpu)
338{ 328{
339 struct _cpuid4_info *this_leaf; 329 struct _cpuid4_info *this_leaf;
340 unsigned long j; 330 unsigned long j;
@@ -511,7 +501,7 @@ static void cpuid4_cache_sysfs_exit(unsigned int cpu)
511 free_cache_attributes(cpu); 501 free_cache_attributes(cpu);
512} 502}
513 503
514static int __devinit cpuid4_cache_sysfs_init(unsigned int cpu) 504static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu)
515{ 505{
516 506
517 if (num_cache_leaves == 0) 507 if (num_cache_leaves == 0)
@@ -542,7 +532,7 @@ err_out:
542} 532}
543 533
544/* Add/Remove cache interface for CPU device */ 534/* Add/Remove cache interface for CPU device */
545static int __devinit cache_add_dev(struct sys_device * sys_dev) 535static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
546{ 536{
547 unsigned int cpu = sys_dev->id; 537 unsigned int cpu = sys_dev->id;
548 unsigned long i, j; 538 unsigned long i, j;
@@ -579,7 +569,7 @@ static int __devinit cache_add_dev(struct sys_device * sys_dev)
579 return retval; 569 return retval;
580} 570}
581 571
582static int __devexit cache_remove_dev(struct sys_device * sys_dev) 572static void __cpuexit cache_remove_dev(struct sys_device * sys_dev)
583{ 573{
584 unsigned int cpu = sys_dev->id; 574 unsigned int cpu = sys_dev->id;
585 unsigned long i; 575 unsigned long i;
@@ -588,24 +578,49 @@ static int __devexit cache_remove_dev(struct sys_device * sys_dev)
588 kobject_unregister(&(INDEX_KOBJECT_PTR(cpu,i)->kobj)); 578 kobject_unregister(&(INDEX_KOBJECT_PTR(cpu,i)->kobj));
589 kobject_unregister(cache_kobject[cpu]); 579 kobject_unregister(cache_kobject[cpu]);
590 cpuid4_cache_sysfs_exit(cpu); 580 cpuid4_cache_sysfs_exit(cpu);
591 return 0; 581 return;
582}
583
584static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
585 unsigned long action, void *hcpu)
586{
587 unsigned int cpu = (unsigned long)hcpu;
588 struct sys_device *sys_dev;
589
590 sys_dev = get_cpu_sysdev(cpu);
591 switch (action) {
592 case CPU_ONLINE:
593 cache_add_dev(sys_dev);
594 break;
595 case CPU_DEAD:
596 cache_remove_dev(sys_dev);
597 break;
598 }
599 return NOTIFY_OK;
592} 600}
593 601
594static struct sysdev_driver cache_sysdev_driver = { 602static struct notifier_block cacheinfo_cpu_notifier =
595 .add = cache_add_dev, 603{
596 .remove = __devexit_p(cache_remove_dev), 604 .notifier_call = cacheinfo_cpu_callback,
597}; 605};
598 606
599/* Register/Unregister the cpu_cache driver */ 607static int __cpuinit cache_sysfs_init(void)
600static int __devinit cache_register_driver(void)
601{ 608{
609 int i;
610
602 if (num_cache_leaves == 0) 611 if (num_cache_leaves == 0)
603 return 0; 612 return 0;
604 613
605 return sysdev_driver_register(&cpu_sysdev_class,&cache_sysdev_driver); 614 register_cpu_notifier(&cacheinfo_cpu_notifier);
615
616 for_each_online_cpu(i) {
617 cacheinfo_cpu_callback(&cacheinfo_cpu_notifier, CPU_ONLINE,
618 (void *)(long)i);
619 }
620
621 return 0;
606} 622}
607 623
608device_initcall(cache_register_driver); 624device_initcall(cache_sysfs_init);
609 625
610#endif 626#endif
611
diff --git a/arch/i386/kernel/cpu/mcheck/p6.c b/arch/i386/kernel/cpu/mcheck/p6.c
index 3c035b8fa3d9..979b18bc95c1 100644
--- a/arch/i386/kernel/cpu/mcheck/p6.c
+++ b/arch/i386/kernel/cpu/mcheck/p6.c
@@ -102,11 +102,16 @@ void __devinit intel_p6_mcheck_init(struct cpuinfo_x86 *c)
102 wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff); 102 wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff);
103 nr_mce_banks = l & 0xff; 103 nr_mce_banks = l & 0xff;
104 104
105 /* Don't enable bank 0 on intel P6 cores, it goes bang quickly. */ 105 /*
106 for (i=1; i<nr_mce_banks; i++) { 106 * Following the example in IA-32 SDM Vol 3:
107 * - MC0_CTL should not be written
108 * - Status registers on all banks should be cleared on reset
109 */
110 for (i=1; i<nr_mce_banks; i++)
107 wrmsr (MSR_IA32_MC0_CTL+4*i, 0xffffffff, 0xffffffff); 111 wrmsr (MSR_IA32_MC0_CTL+4*i, 0xffffffff, 0xffffffff);
112
113 for (i=0; i<nr_mce_banks; i++)
108 wrmsr (MSR_IA32_MC0_STATUS+4*i, 0x0, 0x0); 114 wrmsr (MSR_IA32_MC0_STATUS+4*i, 0x0, 0x0);
109 }
110 115
111 set_in_cr4 (X86_CR4_MCE); 116 set_in_cr4 (X86_CR4_MCE);
112 printk (KERN_INFO "Intel machine check reporting enabled on CPU#%d.\n", 117 printk (KERN_INFO "Intel machine check reporting enabled on CPU#%d.\n",
diff --git a/arch/i386/kernel/cpu/mtrr/if.c b/arch/i386/kernel/cpu/mtrr/if.c
index 1923e0aed26a..cf39e205d33c 100644
--- a/arch/i386/kernel/cpu/mtrr/if.c
+++ b/arch/i386/kernel/cpu/mtrr/if.c
@@ -149,60 +149,89 @@ mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos)
149 return -EINVAL; 149 return -EINVAL;
150} 150}
151 151
152static int 152static long
153mtrr_ioctl(struct inode *inode, struct file *file, 153mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg)
154 unsigned int cmd, unsigned long __arg)
155{ 154{
156 int err; 155 int err = 0;
157 mtrr_type type; 156 mtrr_type type;
158 struct mtrr_sentry sentry; 157 struct mtrr_sentry sentry;
159 struct mtrr_gentry gentry; 158 struct mtrr_gentry gentry;
160 void __user *arg = (void __user *) __arg; 159 void __user *arg = (void __user *) __arg;
161 160
162 switch (cmd) { 161 switch (cmd) {
162 case MTRRIOC_ADD_ENTRY:
163 case MTRRIOC_SET_ENTRY:
164 case MTRRIOC_DEL_ENTRY:
165 case MTRRIOC_KILL_ENTRY:
166 case MTRRIOC_ADD_PAGE_ENTRY:
167 case MTRRIOC_SET_PAGE_ENTRY:
168 case MTRRIOC_DEL_PAGE_ENTRY:
169 case MTRRIOC_KILL_PAGE_ENTRY:
170 if (copy_from_user(&sentry, arg, sizeof sentry))
171 return -EFAULT;
172 break;
173 case MTRRIOC_GET_ENTRY:
174 case MTRRIOC_GET_PAGE_ENTRY:
175 if (copy_from_user(&gentry, arg, sizeof gentry))
176 return -EFAULT;
177 break;
178#ifdef CONFIG_COMPAT
179 case MTRRIOC32_ADD_ENTRY:
180 case MTRRIOC32_SET_ENTRY:
181 case MTRRIOC32_DEL_ENTRY:
182 case MTRRIOC32_KILL_ENTRY:
183 case MTRRIOC32_ADD_PAGE_ENTRY:
184 case MTRRIOC32_SET_PAGE_ENTRY:
185 case MTRRIOC32_DEL_PAGE_ENTRY:
186 case MTRRIOC32_KILL_PAGE_ENTRY: {
187 struct mtrr_sentry32 __user *s32 = (struct mtrr_sentry32 __user *)__arg;
188 err = get_user(sentry.base, &s32->base);
189 err |= get_user(sentry.size, &s32->size);
190 err |= get_user(sentry.type, &s32->type);
191 if (err)
192 return err;
193 break;
194 }
195 case MTRRIOC32_GET_ENTRY:
196 case MTRRIOC32_GET_PAGE_ENTRY: {
197 struct mtrr_gentry32 __user *g32 = (struct mtrr_gentry32 __user *)__arg;
198 err = get_user(gentry.regnum, &g32->regnum);
199 err |= get_user(gentry.base, &g32->base);
200 err |= get_user(gentry.size, &g32->size);
201 err |= get_user(gentry.type, &g32->type);
202 if (err)
203 return err;
204 break;
205 }
206#endif
207 }
208
209 switch (cmd) {
163 default: 210 default:
164 return -ENOTTY; 211 return -ENOTTY;
165 case MTRRIOC_ADD_ENTRY: 212 case MTRRIOC_ADD_ENTRY:
166 if (!capable(CAP_SYS_ADMIN)) 213 if (!capable(CAP_SYS_ADMIN))
167 return -EPERM; 214 return -EPERM;
168 if (copy_from_user(&sentry, arg, sizeof sentry))
169 return -EFAULT;
170 err = 215 err =
171 mtrr_file_add(sentry.base, sentry.size, sentry.type, 1, 216 mtrr_file_add(sentry.base, sentry.size, sentry.type, 1,
172 file, 0); 217 file, 0);
173 if (err < 0)
174 return err;
175 break; 218 break;
176 case MTRRIOC_SET_ENTRY: 219 case MTRRIOC_SET_ENTRY:
177 if (!capable(CAP_SYS_ADMIN)) 220 if (!capable(CAP_SYS_ADMIN))
178 return -EPERM; 221 return -EPERM;
179 if (copy_from_user(&sentry, arg, sizeof sentry))
180 return -EFAULT;
181 err = mtrr_add(sentry.base, sentry.size, sentry.type, 0); 222 err = mtrr_add(sentry.base, sentry.size, sentry.type, 0);
182 if (err < 0)
183 return err;
184 break; 223 break;
185 case MTRRIOC_DEL_ENTRY: 224 case MTRRIOC_DEL_ENTRY:
186 if (!capable(CAP_SYS_ADMIN)) 225 if (!capable(CAP_SYS_ADMIN))
187 return -EPERM; 226 return -EPERM;
188 if (copy_from_user(&sentry, arg, sizeof sentry))
189 return -EFAULT;
190 err = mtrr_file_del(sentry.base, sentry.size, file, 0); 227 err = mtrr_file_del(sentry.base, sentry.size, file, 0);
191 if (err < 0)
192 return err;
193 break; 228 break;
194 case MTRRIOC_KILL_ENTRY: 229 case MTRRIOC_KILL_ENTRY:
195 if (!capable(CAP_SYS_ADMIN)) 230 if (!capable(CAP_SYS_ADMIN))
196 return -EPERM; 231 return -EPERM;
197 if (copy_from_user(&sentry, arg, sizeof sentry))
198 return -EFAULT;
199 err = mtrr_del(-1, sentry.base, sentry.size); 232 err = mtrr_del(-1, sentry.base, sentry.size);
200 if (err < 0)
201 return err;
202 break; 233 break;
203 case MTRRIOC_GET_ENTRY: 234 case MTRRIOC_GET_ENTRY:
204 if (copy_from_user(&gentry, arg, sizeof gentry))
205 return -EFAULT;
206 if (gentry.regnum >= num_var_ranges) 235 if (gentry.regnum >= num_var_ranges)
207 return -EINVAL; 236 return -EINVAL;
208 mtrr_if->get(gentry.regnum, &gentry.base, &gentry.size, &type); 237 mtrr_if->get(gentry.regnum, &gentry.base, &gentry.size, &type);
@@ -217,60 +246,59 @@ mtrr_ioctl(struct inode *inode, struct file *file,
217 gentry.type = type; 246 gentry.type = type;
218 } 247 }
219 248
220 if (copy_to_user(arg, &gentry, sizeof gentry))
221 return -EFAULT;
222 break; 249 break;
223 case MTRRIOC_ADD_PAGE_ENTRY: 250 case MTRRIOC_ADD_PAGE_ENTRY:
224 if (!capable(CAP_SYS_ADMIN)) 251 if (!capable(CAP_SYS_ADMIN))
225 return -EPERM; 252 return -EPERM;
226 if (copy_from_user(&sentry, arg, sizeof sentry))
227 return -EFAULT;
228 err = 253 err =
229 mtrr_file_add(sentry.base, sentry.size, sentry.type, 1, 254 mtrr_file_add(sentry.base, sentry.size, sentry.type, 1,
230 file, 1); 255 file, 1);
231 if (err < 0)
232 return err;
233 break; 256 break;
234 case MTRRIOC_SET_PAGE_ENTRY: 257 case MTRRIOC_SET_PAGE_ENTRY:
235 if (!capable(CAP_SYS_ADMIN)) 258 if (!capable(CAP_SYS_ADMIN))
236 return -EPERM; 259 return -EPERM;
237 if (copy_from_user(&sentry, arg, sizeof sentry))
238 return -EFAULT;
239 err = mtrr_add_page(sentry.base, sentry.size, sentry.type, 0); 260 err = mtrr_add_page(sentry.base, sentry.size, sentry.type, 0);
240 if (err < 0)
241 return err;
242 break; 261 break;
243 case MTRRIOC_DEL_PAGE_ENTRY: 262 case MTRRIOC_DEL_PAGE_ENTRY:
244 if (!capable(CAP_SYS_ADMIN)) 263 if (!capable(CAP_SYS_ADMIN))
245 return -EPERM; 264 return -EPERM;
246 if (copy_from_user(&sentry, arg, sizeof sentry))
247 return -EFAULT;
248 err = mtrr_file_del(sentry.base, sentry.size, file, 1); 265 err = mtrr_file_del(sentry.base, sentry.size, file, 1);
249 if (err < 0)
250 return err;
251 break; 266 break;
252 case MTRRIOC_KILL_PAGE_ENTRY: 267 case MTRRIOC_KILL_PAGE_ENTRY:
253 if (!capable(CAP_SYS_ADMIN)) 268 if (!capable(CAP_SYS_ADMIN))
254 return -EPERM; 269 return -EPERM;
255 if (copy_from_user(&sentry, arg, sizeof sentry))
256 return -EFAULT;
257 err = mtrr_del_page(-1, sentry.base, sentry.size); 270 err = mtrr_del_page(-1, sentry.base, sentry.size);
258 if (err < 0)
259 return err;
260 break; 271 break;
261 case MTRRIOC_GET_PAGE_ENTRY: 272 case MTRRIOC_GET_PAGE_ENTRY:
262 if (copy_from_user(&gentry, arg, sizeof gentry))
263 return -EFAULT;
264 if (gentry.regnum >= num_var_ranges) 273 if (gentry.regnum >= num_var_ranges)
265 return -EINVAL; 274 return -EINVAL;
266 mtrr_if->get(gentry.regnum, &gentry.base, &gentry.size, &type); 275 mtrr_if->get(gentry.regnum, &gentry.base, &gentry.size, &type);
267 gentry.type = type; 276 gentry.type = type;
277 break;
278 }
279
280 if (err)
281 return err;
268 282
283 switch(cmd) {
284 case MTRRIOC_GET_ENTRY:
285 case MTRRIOC_GET_PAGE_ENTRY:
269 if (copy_to_user(arg, &gentry, sizeof gentry)) 286 if (copy_to_user(arg, &gentry, sizeof gentry))
270 return -EFAULT; 287 err = -EFAULT;
288 break;
289#ifdef CONFIG_COMPAT
290 case MTRRIOC32_GET_ENTRY:
291 case MTRRIOC32_GET_PAGE_ENTRY: {
292 struct mtrr_gentry32 __user *g32 = (struct mtrr_gentry32 __user *)__arg;
293 err = put_user(gentry.base, &g32->base);
294 err |= put_user(gentry.size, &g32->size);
295 err |= put_user(gentry.regnum, &g32->regnum);
296 err |= put_user(gentry.type, &g32->type);
271 break; 297 break;
272 } 298 }
273 return 0; 299#endif
300 }
301 return err;
274} 302}
275 303
276static int 304static int
@@ -310,7 +338,8 @@ static struct file_operations mtrr_fops = {
310 .read = seq_read, 338 .read = seq_read,
311 .llseek = seq_lseek, 339 .llseek = seq_lseek,
312 .write = mtrr_write, 340 .write = mtrr_write,
313 .ioctl = mtrr_ioctl, 341 .unlocked_ioctl = mtrr_ioctl,
342 .compat_ioctl = mtrr_ioctl,
314 .release = mtrr_close, 343 .release = mtrr_close,
315}; 344};
316 345
diff --git a/arch/i386/kernel/cpu/proc.c b/arch/i386/kernel/cpu/proc.c
index 8bd77d948a84..41b871ecf4b3 100644
--- a/arch/i386/kernel/cpu/proc.c
+++ b/arch/i386/kernel/cpu/proc.c
@@ -44,7 +44,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
44 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 44 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
45 45
46 /* Intel-defined (#2) */ 46 /* Intel-defined (#2) */
47 "pni", NULL, NULL, "monitor", "ds_cpl", NULL, NULL, "est", 47 "pni", NULL, NULL, "monitor", "ds_cpl", "vmx", NULL, "est",
48 "tm2", NULL, "cid", NULL, NULL, "cx16", "xtpr", NULL, 48 "tm2", NULL, "cid", NULL, NULL, "cx16", "xtpr", NULL,
49 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 49 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
50 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 50 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,