aboutsummaryrefslogtreecommitdiffstats
path: root/arch/i386/kernel/cpu
diff options
context:
space:
mode:
Diffstat (limited to 'arch/i386/kernel/cpu')
-rw-r--r--arch/i386/kernel/cpu/amd.c5
-rw-r--r--arch/i386/kernel/cpu/common.c251
-rw-r--r--arch/i386/kernel/cpu/cpufreq/Kconfig6
-rw-r--r--arch/i386/kernel/cpu/cpufreq/Makefile2
-rw-r--r--arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c810
-rw-r--r--arch/i386/kernel/cpu/cpufreq/gx-suspmod.c4
-rw-r--r--arch/i386/kernel/cpu/cpufreq/longhaul.c128
-rw-r--r--arch/i386/kernel/cpu/cpufreq/p4-clockmod.c38
-rw-r--r--arch/i386/kernel/cpu/cpufreq/sc520_freq.c7
-rw-r--r--arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c15
-rw-r--r--arch/i386/kernel/cpu/cpufreq/speedstep-lib.c32
-rw-r--r--arch/i386/kernel/cpu/cpufreq/speedstep-lib.h1
-rw-r--r--arch/i386/kernel/cpu/cpufreq/speedstep-smi.c3
-rw-r--r--arch/i386/kernel/cpu/cyrix.c2
-rw-r--r--arch/i386/kernel/cpu/intel.c12
-rw-r--r--arch/i386/kernel/cpu/intel_cacheinfo.c11
-rw-r--r--arch/i386/kernel/cpu/mcheck/non-fatal.c6
-rw-r--r--arch/i386/kernel/cpu/mcheck/therm_throt.c3
-rw-r--r--arch/i386/kernel/cpu/mtrr/Makefile4
-rw-r--r--arch/i386/kernel/cpu/mtrr/amd.c2
-rw-r--r--arch/i386/kernel/cpu/mtrr/centaur.c9
-rw-r--r--arch/i386/kernel/cpu/mtrr/cyrix.c25
-rw-r--r--arch/i386/kernel/cpu/mtrr/generic.c78
-rw-r--r--arch/i386/kernel/cpu/mtrr/if.c31
-rw-r--r--arch/i386/kernel/cpu/mtrr/main.c71
-rw-r--r--arch/i386/kernel/cpu/mtrr/mtrr.h25
-rw-r--r--arch/i386/kernel/cpu/proc.c3
27 files changed, 1004 insertions, 580 deletions
diff --git a/arch/i386/kernel/cpu/amd.c b/arch/i386/kernel/cpu/amd.c
index e4758095d87a..41cfea57232b 100644
--- a/arch/i386/kernel/cpu/amd.c
+++ b/arch/i386/kernel/cpu/amd.c
@@ -104,10 +104,7 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
104 f_vide(); 104 f_vide();
105 rdtscl(d2); 105 rdtscl(d2);
106 d = d2-d; 106 d = d2-d;
107 107
108 /* Knock these two lines out if it debugs out ok */
109 printk(KERN_INFO "AMD K6 stepping B detected - ");
110 /* -- cut here -- */
111 if (d > 20*K6_BUG_LOOP) 108 if (d > 20*K6_BUG_LOOP)
112 printk("system stability may be impaired when more than 32 MB are used.\n"); 109 printk("system stability may be impaired when more than 32 MB are used.\n");
113 else 110 else
diff --git a/arch/i386/kernel/cpu/common.c b/arch/i386/kernel/cpu/common.c
index d9f3e3c31f05..8689d62abd4a 100644
--- a/arch/i386/kernel/cpu/common.c
+++ b/arch/i386/kernel/cpu/common.c
@@ -18,14 +18,15 @@
18#include <asm/apic.h> 18#include <asm/apic.h>
19#include <mach_apic.h> 19#include <mach_apic.h>
20#endif 20#endif
21#include <asm/pda.h>
21 22
22#include "cpu.h" 23#include "cpu.h"
23 24
24DEFINE_PER_CPU(struct Xgt_desc_struct, cpu_gdt_descr); 25DEFINE_PER_CPU(struct Xgt_desc_struct, cpu_gdt_descr);
25EXPORT_PER_CPU_SYMBOL(cpu_gdt_descr); 26EXPORT_PER_CPU_SYMBOL(cpu_gdt_descr);
26 27
27DEFINE_PER_CPU(unsigned char, cpu_16bit_stack[CPU_16BIT_STACK_SIZE]); 28struct i386_pda *_cpu_pda[NR_CPUS] __read_mostly;
28EXPORT_PER_CPU_SYMBOL(cpu_16bit_stack); 29EXPORT_SYMBOL(_cpu_pda);
29 30
30static int cachesize_override __cpuinitdata = -1; 31static int cachesize_override __cpuinitdata = -1;
31static int disable_x86_fxsr __cpuinitdata; 32static int disable_x86_fxsr __cpuinitdata;
@@ -53,7 +54,7 @@ static struct cpu_dev __cpuinitdata default_cpu = {
53 .c_init = default_init, 54 .c_init = default_init,
54 .c_vendor = "Unknown", 55 .c_vendor = "Unknown",
55}; 56};
56static struct cpu_dev * this_cpu = &default_cpu; 57static struct cpu_dev * this_cpu __cpuinitdata = &default_cpu;
57 58
58static int __init cachesize_setup(char *str) 59static int __init cachesize_setup(char *str)
59{ 60{
@@ -235,29 +236,14 @@ static int __cpuinit have_cpuid_p(void)
235 return flag_is_changeable_p(X86_EFLAGS_ID); 236 return flag_is_changeable_p(X86_EFLAGS_ID);
236} 237}
237 238
238/* Do minimum CPU detection early. 239void __init cpu_detect(struct cpuinfo_x86 *c)
239 Fields really needed: vendor, cpuid_level, family, model, mask, cache alignment.
240 The others are not touched to avoid unwanted side effects.
241
242 WARNING: this function is only called on the BP. Don't add code here
243 that is supposed to run on all CPUs. */
244static void __init early_cpu_detect(void)
245{ 240{
246 struct cpuinfo_x86 *c = &boot_cpu_data;
247
248 c->x86_cache_alignment = 32;
249
250 if (!have_cpuid_p())
251 return;
252
253 /* Get vendor name */ 241 /* Get vendor name */
254 cpuid(0x00000000, &c->cpuid_level, 242 cpuid(0x00000000, &c->cpuid_level,
255 (int *)&c->x86_vendor_id[0], 243 (int *)&c->x86_vendor_id[0],
256 (int *)&c->x86_vendor_id[8], 244 (int *)&c->x86_vendor_id[8],
257 (int *)&c->x86_vendor_id[4]); 245 (int *)&c->x86_vendor_id[4]);
258 246
259 get_cpu_vendor(c, 1);
260
261 c->x86 = 4; 247 c->x86 = 4;
262 if (c->cpuid_level >= 0x00000001) { 248 if (c->cpuid_level >= 0x00000001) {
263 u32 junk, tfms, cap0, misc; 249 u32 junk, tfms, cap0, misc;
@@ -274,6 +260,26 @@ static void __init early_cpu_detect(void)
274 } 260 }
275} 261}
276 262
263/* Do minimum CPU detection early.
264 Fields really needed: vendor, cpuid_level, family, model, mask, cache alignment.
265 The others are not touched to avoid unwanted side effects.
266
267 WARNING: this function is only called on the BP. Don't add code here
268 that is supposed to run on all CPUs. */
269static void __init early_cpu_detect(void)
270{
271 struct cpuinfo_x86 *c = &boot_cpu_data;
272
273 c->x86_cache_alignment = 32;
274
275 if (!have_cpuid_p())
276 return;
277
278 cpu_detect(c);
279
280 get_cpu_vendor(c, 1);
281}
282
277static void __cpuinit generic_identify(struct cpuinfo_x86 * c) 283static void __cpuinit generic_identify(struct cpuinfo_x86 * c)
278{ 284{
279 u32 tfms, xlvl; 285 u32 tfms, xlvl;
@@ -308,6 +314,8 @@ static void __cpuinit generic_identify(struct cpuinfo_x86 * c)
308#else 314#else
309 c->apicid = (ebx >> 24) & 0xFF; 315 c->apicid = (ebx >> 24) & 0xFF;
310#endif 316#endif
317 if (c->x86_capability[0] & (1<<19))
318 c->x86_clflush_size = ((ebx >> 8) & 0xff) * 8;
311 } else { 319 } else {
312 /* Have CPUID level 0 only - unheard of */ 320 /* Have CPUID level 0 only - unheard of */
313 c->x86 = 4; 321 c->x86 = 4;
@@ -372,6 +380,7 @@ void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
372 c->x86_vendor_id[0] = '\0'; /* Unset */ 380 c->x86_vendor_id[0] = '\0'; /* Unset */
373 c->x86_model_id[0] = '\0'; /* Unset */ 381 c->x86_model_id[0] = '\0'; /* Unset */
374 c->x86_max_cores = 1; 382 c->x86_max_cores = 1;
383 c->x86_clflush_size = 32;
375 memset(&c->x86_capability, 0, sizeof c->x86_capability); 384 memset(&c->x86_capability, 0, sizeof c->x86_capability);
376 385
377 if (!have_cpuid_p()) { 386 if (!have_cpuid_p()) {
@@ -591,42 +600,24 @@ void __init early_cpu_init(void)
591 disable_pse = 1; 600 disable_pse = 1;
592#endif 601#endif
593} 602}
594/* 603
595 * cpu_init() initializes state that is per-CPU. Some data is already 604/* Make sure %gs is initialized properly in idle threads */
596 * initialized (naturally) in the bootstrap process, such as the GDT 605struct pt_regs * __devinit idle_regs(struct pt_regs *regs)
597 * and IDT. We reload them nevertheless, this function acts as a
598 * 'CPU state barrier', nothing should get across.
599 */
600void __cpuinit cpu_init(void)
601{ 606{
602 int cpu = smp_processor_id(); 607 memset(regs, 0, sizeof(struct pt_regs));
603 struct tss_struct * t = &per_cpu(init_tss, cpu); 608 regs->xgs = __KERNEL_PDA;
604 struct thread_struct *thread = &current->thread; 609 return regs;
605 struct desc_struct *gdt; 610}
606 __u32 stk16_off = (__u32)&per_cpu(cpu_16bit_stack, cpu);
607 struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu);
608 611
609 if (cpu_test_and_set(cpu, cpu_initialized)) { 612static __cpuinit int alloc_gdt(int cpu)
610 printk(KERN_WARNING "CPU#%d already initialized!\n", cpu); 613{
611 for (;;) local_irq_enable(); 614 struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu);
612 } 615 struct desc_struct *gdt;
613 printk(KERN_INFO "Initializing CPU#%d\n", cpu); 616 struct i386_pda *pda;
614 617
615 if (cpu_has_vme || cpu_has_tsc || cpu_has_de) 618 gdt = (struct desc_struct *)cpu_gdt_descr->address;
616 clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE); 619 pda = cpu_pda(cpu);
617 if (tsc_disable && cpu_has_tsc) {
618 printk(KERN_NOTICE "Disabling TSC...\n");
619 /**** FIX-HPA: DOES THIS REALLY BELONG HERE? ****/
620 clear_bit(X86_FEATURE_TSC, boot_cpu_data.x86_capability);
621 set_in_cr4(X86_CR4_TSD);
622 }
623 620
624 /* The CPU hotplug case */
625 if (cpu_gdt_descr->address) {
626 gdt = (struct desc_struct *)cpu_gdt_descr->address;
627 memset(gdt, 0, PAGE_SIZE);
628 goto old_gdt;
629 }
630 /* 621 /*
631 * This is a horrible hack to allocate the GDT. The problem 622 * This is a horrible hack to allocate the GDT. The problem
632 * is that cpu_init() is called really early for the boot CPU 623 * is that cpu_init() is called really early for the boot CPU
@@ -634,43 +625,130 @@ void __cpuinit cpu_init(void)
634 * CPUs, when bootmem will have gone away 625 * CPUs, when bootmem will have gone away
635 */ 626 */
636 if (NODE_DATA(0)->bdata->node_bootmem_map) { 627 if (NODE_DATA(0)->bdata->node_bootmem_map) {
637 gdt = (struct desc_struct *)alloc_bootmem_pages(PAGE_SIZE); 628 BUG_ON(gdt != NULL || pda != NULL);
638 /* alloc_bootmem_pages panics on failure, so no check */ 629
630 gdt = alloc_bootmem_pages(PAGE_SIZE);
631 pda = alloc_bootmem(sizeof(*pda));
632 /* alloc_bootmem(_pages) panics on failure, so no check */
633
639 memset(gdt, 0, PAGE_SIZE); 634 memset(gdt, 0, PAGE_SIZE);
635 memset(pda, 0, sizeof(*pda));
640 } else { 636 } else {
641 gdt = (struct desc_struct *)get_zeroed_page(GFP_KERNEL); 637 /* GDT and PDA might already have been allocated if
642 if (unlikely(!gdt)) { 638 this is a CPU hotplug re-insertion. */
643 printk(KERN_CRIT "CPU%d failed to allocate GDT\n", cpu); 639 if (gdt == NULL)
644 for (;;) 640 gdt = (struct desc_struct *)get_zeroed_page(GFP_KERNEL);
645 local_irq_enable(); 641
642 if (pda == NULL)
643 pda = kmalloc_node(sizeof(*pda), GFP_KERNEL, cpu_to_node(cpu));
644
645 if (unlikely(!gdt || !pda)) {
646 free_pages((unsigned long)gdt, 0);
647 kfree(pda);
648 return 0;
646 } 649 }
647 } 650 }
648old_gdt: 651
652 cpu_gdt_descr->address = (unsigned long)gdt;
653 cpu_pda(cpu) = pda;
654
655 return 1;
656}
657
658/* Initial PDA used by boot CPU */
659struct i386_pda boot_pda = {
660 ._pda = &boot_pda,
661 .cpu_number = 0,
662 .pcurrent = &init_task,
663};
664
665static inline void set_kernel_gs(void)
666{
667 /* Set %gs for this CPU's PDA. Memory clobber is to create a
668 barrier with respect to any PDA operations, so the compiler
669 doesn't move any before here. */
670 asm volatile ("mov %0, %%gs" : : "r" (__KERNEL_PDA) : "memory");
671}
672
673/* Initialize the CPU's GDT and PDA. The boot CPU does this for
674 itself, but secondaries find this done for them. */
675__cpuinit int init_gdt(int cpu, struct task_struct *idle)
676{
677 struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu);
678 struct desc_struct *gdt;
679 struct i386_pda *pda;
680
681 /* For non-boot CPUs, the GDT and PDA should already have been
682 allocated. */
683 if (!alloc_gdt(cpu)) {
684 printk(KERN_CRIT "CPU%d failed to allocate GDT or PDA\n", cpu);
685 return 0;
686 }
687
688 gdt = (struct desc_struct *)cpu_gdt_descr->address;
689 pda = cpu_pda(cpu);
690
691 BUG_ON(gdt == NULL || pda == NULL);
692
649 /* 693 /*
650 * Initialize the per-CPU GDT with the boot GDT, 694 * Initialize the per-CPU GDT with the boot GDT,
651 * and set up the GDT descriptor: 695 * and set up the GDT descriptor:
652 */ 696 */
653 memcpy(gdt, cpu_gdt_table, GDT_SIZE); 697 memcpy(gdt, cpu_gdt_table, GDT_SIZE);
698 cpu_gdt_descr->size = GDT_SIZE - 1;
654 699
655 /* Set up GDT entry for 16bit stack */ 700 pack_descriptor((u32 *)&gdt[GDT_ENTRY_PDA].a,
656 *(__u64 *)(&gdt[GDT_ENTRY_ESPFIX_SS]) |= 701 (u32 *)&gdt[GDT_ENTRY_PDA].b,
657 ((((__u64)stk16_off) << 16) & 0x000000ffffff0000ULL) | 702 (unsigned long)pda, sizeof(*pda) - 1,
658 ((((__u64)stk16_off) << 32) & 0xff00000000000000ULL) | 703 0x80 | DESCTYPE_S | 0x2, 0); /* present read-write data segment */
659 (CPU_16BIT_STACK_SIZE - 1);
660 704
661 cpu_gdt_descr->size = GDT_SIZE - 1; 705 memset(pda, 0, sizeof(*pda));
662 cpu_gdt_descr->address = (unsigned long)gdt; 706 pda->_pda = pda;
707 pda->cpu_number = cpu;
708 pda->pcurrent = idle;
709
710 return 1;
711}
712
713/* Common CPU init for both boot and secondary CPUs */
714static void __cpuinit _cpu_init(int cpu, struct task_struct *curr)
715{
716 struct tss_struct * t = &per_cpu(init_tss, cpu);
717 struct thread_struct *thread = &curr->thread;
718 struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu);
663 719
720 /* Reinit these anyway, even if they've already been done (on
721 the boot CPU, this will transition from the boot gdt+pda to
722 the real ones). */
664 load_gdt(cpu_gdt_descr); 723 load_gdt(cpu_gdt_descr);
724 set_kernel_gs();
725
726 if (cpu_test_and_set(cpu, cpu_initialized)) {
727 printk(KERN_WARNING "CPU#%d already initialized!\n", cpu);
728 for (;;) local_irq_enable();
729 }
730
731 printk(KERN_INFO "Initializing CPU#%d\n", cpu);
732
733 if (cpu_has_vme || cpu_has_tsc || cpu_has_de)
734 clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
735 if (tsc_disable && cpu_has_tsc) {
736 printk(KERN_NOTICE "Disabling TSC...\n");
737 /**** FIX-HPA: DOES THIS REALLY BELONG HERE? ****/
738 clear_bit(X86_FEATURE_TSC, boot_cpu_data.x86_capability);
739 set_in_cr4(X86_CR4_TSD);
740 }
741
665 load_idt(&idt_descr); 742 load_idt(&idt_descr);
666 743
667 /* 744 /*
668 * Set up and load the per-CPU TSS and LDT 745 * Set up and load the per-CPU TSS and LDT
669 */ 746 */
670 atomic_inc(&init_mm.mm_count); 747 atomic_inc(&init_mm.mm_count);
671 current->active_mm = &init_mm; 748 curr->active_mm = &init_mm;
672 BUG_ON(current->mm); 749 if (curr->mm)
673 enter_lazy_tlb(&init_mm, current); 750 BUG();
751 enter_lazy_tlb(&init_mm, curr);
674 752
675 load_esp0(t, thread); 753 load_esp0(t, thread);
676 set_tss_desc(cpu,t); 754 set_tss_desc(cpu,t);
@@ -682,8 +760,8 @@ old_gdt:
682 __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss); 760 __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss);
683#endif 761#endif
684 762
685 /* Clear %fs and %gs. */ 763 /* Clear %fs. */
686 asm volatile ("movl %0, %%fs; movl %0, %%gs" : : "r" (0)); 764 asm volatile ("mov %0, %%fs" : : "r" (0));
687 765
688 /* Clear all 6 debug registers: */ 766 /* Clear all 6 debug registers: */
689 set_debugreg(0, 0); 767 set_debugreg(0, 0);
@@ -701,6 +779,37 @@ old_gdt:
701 mxcsr_feature_mask_init(); 779 mxcsr_feature_mask_init();
702} 780}
703 781
782/* Entrypoint to initialize secondary CPU */
783void __cpuinit secondary_cpu_init(void)
784{
785 int cpu = smp_processor_id();
786 struct task_struct *curr = current;
787
788 _cpu_init(cpu, curr);
789}
790
791/*
792 * cpu_init() initializes state that is per-CPU. Some data is already
793 * initialized (naturally) in the bootstrap process, such as the GDT
794 * and IDT. We reload them nevertheless, this function acts as a
795 * 'CPU state barrier', nothing should get across.
796 */
797void __cpuinit cpu_init(void)
798{
799 int cpu = smp_processor_id();
800 struct task_struct *curr = current;
801
802 /* Set up the real GDT and PDA, so we can transition from the
803 boot versions. */
804 if (!init_gdt(cpu, curr)) {
805 /* failed to allocate something; not much we can do... */
806 for (;;)
807 local_irq_enable();
808 }
809
810 _cpu_init(cpu, curr);
811}
812
704#ifdef CONFIG_HOTPLUG_CPU 813#ifdef CONFIG_HOTPLUG_CPU
705void __cpuinit cpu_uninit(void) 814void __cpuinit cpu_uninit(void)
706{ 815{
diff --git a/arch/i386/kernel/cpu/cpufreq/Kconfig b/arch/i386/kernel/cpu/cpufreq/Kconfig
index ccc1edff5c97..5299c5bf4454 100644
--- a/arch/i386/kernel/cpu/cpufreq/Kconfig
+++ b/arch/i386/kernel/cpu/cpufreq/Kconfig
@@ -17,6 +17,7 @@ config X86_ACPI_CPUFREQ
17 help 17 help
18 This driver adds a CPUFreq driver which utilizes the ACPI 18 This driver adds a CPUFreq driver which utilizes the ACPI
19 Processor Performance States. 19 Processor Performance States.
20 This driver also supports Intel Enhanced Speedstep.
20 21
21 For details, take a look at <file:Documentation/cpu-freq/>. 22 For details, take a look at <file:Documentation/cpu-freq/>.
22 23
@@ -121,11 +122,14 @@ config X86_SPEEDSTEP_CENTRINO
121 If in doubt, say N. 122 If in doubt, say N.
122 123
123config X86_SPEEDSTEP_CENTRINO_ACPI 124config X86_SPEEDSTEP_CENTRINO_ACPI
124 bool "Use ACPI tables to decode valid frequency/voltage pairs" 125 bool "Use ACPI tables to decode valid frequency/voltage (deprecated)"
125 depends on X86_SPEEDSTEP_CENTRINO && ACPI_PROCESSOR 126 depends on X86_SPEEDSTEP_CENTRINO && ACPI_PROCESSOR
126 depends on !(X86_SPEEDSTEP_CENTRINO = y && ACPI_PROCESSOR = m) 127 depends on !(X86_SPEEDSTEP_CENTRINO = y && ACPI_PROCESSOR = m)
127 default y 128 default y
128 help 129 help
130 This is deprecated and this functionality is now merged into
131 acpi_cpufreq (X86_ACPI_CPUFREQ). Use that driver instead of
132 speedstep_centrino.
129 Use primarily the information provided in the BIOS ACPI tables 133 Use primarily the information provided in the BIOS ACPI tables
130 to determine valid CPU frequency and voltage pairings. It is 134 to determine valid CPU frequency and voltage pairings. It is
131 required for the driver to work on non-Banias CPUs. 135 required for the driver to work on non-Banias CPUs.
diff --git a/arch/i386/kernel/cpu/cpufreq/Makefile b/arch/i386/kernel/cpu/cpufreq/Makefile
index 2e894f1c8910..8de3abe322a9 100644
--- a/arch/i386/kernel/cpu/cpufreq/Makefile
+++ b/arch/i386/kernel/cpu/cpufreq/Makefile
@@ -7,9 +7,9 @@ obj-$(CONFIG_SC520_CPUFREQ) += sc520_freq.o
7obj-$(CONFIG_X86_LONGRUN) += longrun.o 7obj-$(CONFIG_X86_LONGRUN) += longrun.o
8obj-$(CONFIG_X86_GX_SUSPMOD) += gx-suspmod.o 8obj-$(CONFIG_X86_GX_SUSPMOD) += gx-suspmod.o
9obj-$(CONFIG_X86_SPEEDSTEP_ICH) += speedstep-ich.o 9obj-$(CONFIG_X86_SPEEDSTEP_ICH) += speedstep-ich.o
10obj-$(CONFIG_X86_SPEEDSTEP_CENTRINO) += speedstep-centrino.o
11obj-$(CONFIG_X86_SPEEDSTEP_LIB) += speedstep-lib.o 10obj-$(CONFIG_X86_SPEEDSTEP_LIB) += speedstep-lib.o
12obj-$(CONFIG_X86_SPEEDSTEP_SMI) += speedstep-smi.o 11obj-$(CONFIG_X86_SPEEDSTEP_SMI) += speedstep-smi.o
13obj-$(CONFIG_X86_ACPI_CPUFREQ) += acpi-cpufreq.o 12obj-$(CONFIG_X86_ACPI_CPUFREQ) += acpi-cpufreq.o
13obj-$(CONFIG_X86_SPEEDSTEP_CENTRINO) += speedstep-centrino.o
14obj-$(CONFIG_X86_P4_CLOCKMOD) += p4-clockmod.o 14obj-$(CONFIG_X86_P4_CLOCKMOD) += p4-clockmod.o
15obj-$(CONFIG_X86_CPUFREQ_NFORCE2) += cpufreq-nforce2.o 15obj-$(CONFIG_X86_CPUFREQ_NFORCE2) += cpufreq-nforce2.o
diff --git a/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
index 57c880bf0bd6..10baa3501ed3 100644
--- a/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
+++ b/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
@@ -1,9 +1,10 @@
1/* 1/*
2 * acpi-cpufreq.c - ACPI Processor P-States Driver ($Revision: 1.3 $) 2 * acpi-cpufreq.c - ACPI Processor P-States Driver ($Revision: 1.4 $)
3 * 3 *
4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> 4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> 5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
6 * Copyright (C) 2002 - 2004 Dominik Brodowski <linux@brodo.de> 6 * Copyright (C) 2002 - 2004 Dominik Brodowski <linux@brodo.de>
7 * Copyright (C) 2006 Denis Sadykov <denis.m.sadykov@intel.com>
7 * 8 *
8 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 9 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
9 * 10 *
@@ -27,202 +28,370 @@
27#include <linux/kernel.h> 28#include <linux/kernel.h>
28#include <linux/module.h> 29#include <linux/module.h>
29#include <linux/init.h> 30#include <linux/init.h>
31#include <linux/smp.h>
32#include <linux/sched.h>
30#include <linux/cpufreq.h> 33#include <linux/cpufreq.h>
31#include <linux/proc_fs.h>
32#include <linux/seq_file.h>
33#include <linux/compiler.h> 34#include <linux/compiler.h>
34#include <linux/sched.h> /* current */
35#include <linux/dmi.h> 35#include <linux/dmi.h>
36#include <asm/io.h>
37#include <asm/delay.h>
38#include <asm/uaccess.h>
39 36
40#include <linux/acpi.h> 37#include <linux/acpi.h>
41#include <acpi/processor.h> 38#include <acpi/processor.h>
42 39
40#include <asm/io.h>
41#include <asm/msr.h>
42#include <asm/processor.h>
43#include <asm/cpufeature.h>
44#include <asm/delay.h>
45#include <asm/uaccess.h>
46
43#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "acpi-cpufreq", msg) 47#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "acpi-cpufreq", msg)
44 48
45MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski"); 49MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski");
46MODULE_DESCRIPTION("ACPI Processor P-States Driver"); 50MODULE_DESCRIPTION("ACPI Processor P-States Driver");
47MODULE_LICENSE("GPL"); 51MODULE_LICENSE("GPL");
48 52
53enum {
54 UNDEFINED_CAPABLE = 0,
55 SYSTEM_INTEL_MSR_CAPABLE,
56 SYSTEM_IO_CAPABLE,
57};
58
59#define INTEL_MSR_RANGE (0xffff)
60#define CPUID_6_ECX_APERFMPERF_CAPABILITY (0x1)
49 61
50struct cpufreq_acpi_io { 62struct acpi_cpufreq_data {
51 struct acpi_processor_performance *acpi_data; 63 struct acpi_processor_performance *acpi_data;
52 struct cpufreq_frequency_table *freq_table; 64 struct cpufreq_frequency_table *freq_table;
53 unsigned int resume; 65 unsigned int max_freq;
66 unsigned int resume;
67 unsigned int cpu_feature;
54}; 68};
55 69
56static struct cpufreq_acpi_io *acpi_io_data[NR_CPUS]; 70static struct acpi_cpufreq_data *drv_data[NR_CPUS];
57static struct acpi_processor_performance *acpi_perf_data[NR_CPUS]; 71static struct acpi_processor_performance *acpi_perf_data[NR_CPUS];
58 72
59static struct cpufreq_driver acpi_cpufreq_driver; 73static struct cpufreq_driver acpi_cpufreq_driver;
60 74
61static unsigned int acpi_pstate_strict; 75static unsigned int acpi_pstate_strict;
62 76
63static int 77static int check_est_cpu(unsigned int cpuid)
64acpi_processor_write_port(
65 u16 port,
66 u8 bit_width,
67 u32 value)
68{ 78{
69 if (bit_width <= 8) { 79 struct cpuinfo_x86 *cpu = &cpu_data[cpuid];
70 outb(value, port); 80
71 } else if (bit_width <= 16) { 81 if (cpu->x86_vendor != X86_VENDOR_INTEL ||
72 outw(value, port); 82 !cpu_has(cpu, X86_FEATURE_EST))
73 } else if (bit_width <= 32) { 83 return 0;
74 outl(value, port); 84
75 } else { 85 return 1;
76 return -ENODEV; 86}
87
88static unsigned extract_io(u32 value, struct acpi_cpufreq_data *data)
89{
90 struct acpi_processor_performance *perf;
91 int i;
92
93 perf = data->acpi_data;
94
95 for (i=0; i<perf->state_count; i++) {
96 if (value == perf->states[i].status)
97 return data->freq_table[i].frequency;
77 } 98 }
78 return 0; 99 return 0;
79} 100}
80 101
81static int 102static unsigned extract_msr(u32 msr, struct acpi_cpufreq_data *data)
82acpi_processor_read_port(
83 u16 port,
84 u8 bit_width,
85 u32 *ret)
86{ 103{
87 *ret = 0; 104 int i;
88 if (bit_width <= 8) { 105 struct acpi_processor_performance *perf;
89 *ret = inb(port); 106
90 } else if (bit_width <= 16) { 107 msr &= INTEL_MSR_RANGE;
91 *ret = inw(port); 108 perf = data->acpi_data;
92 } else if (bit_width <= 32) { 109
93 *ret = inl(port); 110 for (i=0; data->freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
94 } else { 111 if (msr == perf->states[data->freq_table[i].index].status)
95 return -ENODEV; 112 return data->freq_table[i].frequency;
96 } 113 }
97 return 0; 114 return data->freq_table[0].frequency;
98} 115}
99 116
100static int 117static unsigned extract_freq(u32 val, struct acpi_cpufreq_data *data)
101acpi_processor_set_performance (
102 struct cpufreq_acpi_io *data,
103 unsigned int cpu,
104 int state)
105{ 118{
106 u16 port = 0; 119 switch (data->cpu_feature) {
107 u8 bit_width = 0; 120 case SYSTEM_INTEL_MSR_CAPABLE:
108 int i = 0; 121 return extract_msr(val, data);
109 int ret = 0; 122 case SYSTEM_IO_CAPABLE:
110 u32 value = 0; 123 return extract_io(val, data);
111 int retval; 124 default:
112 struct acpi_processor_performance *perf; 125 return 0;
113
114 dprintk("acpi_processor_set_performance\n");
115
116 retval = 0;
117 perf = data->acpi_data;
118 if (state == perf->state) {
119 if (unlikely(data->resume)) {
120 dprintk("Called after resume, resetting to P%d\n", state);
121 data->resume = 0;
122 } else {
123 dprintk("Already at target state (P%d)\n", state);
124 return (retval);
125 }
126 } 126 }
127}
128
129struct msr_addr {
130 u32 reg;
131};
127 132
128 dprintk("Transitioning from P%d to P%d\n", perf->state, state); 133struct io_addr {
134 u16 port;
135 u8 bit_width;
136};
129 137
130 /* 138typedef union {
131 * First we write the target state's 'control' value to the 139 struct msr_addr msr;
132 * control_register. 140 struct io_addr io;
133 */ 141} drv_addr_union;
134 142
135 port = perf->control_register.address; 143struct drv_cmd {
136 bit_width = perf->control_register.bit_width; 144 unsigned int type;
137 value = (u32) perf->states[state].control; 145 cpumask_t mask;
146 drv_addr_union addr;
147 u32 val;
148};
138 149
139 dprintk("Writing 0x%08x to port 0x%04x\n", value, port); 150static void do_drv_read(struct drv_cmd *cmd)
151{
152 u32 h;
153
154 switch (cmd->type) {
155 case SYSTEM_INTEL_MSR_CAPABLE:
156 rdmsr(cmd->addr.msr.reg, cmd->val, h);
157 break;
158 case SYSTEM_IO_CAPABLE:
159 acpi_os_read_port((acpi_io_address)cmd->addr.io.port,
160 &cmd->val,
161 (u32)cmd->addr.io.bit_width);
162 break;
163 default:
164 break;
165 }
166}
140 167
141 ret = acpi_processor_write_port(port, bit_width, value); 168static void do_drv_write(struct drv_cmd *cmd)
142 if (ret) { 169{
143 dprintk("Invalid port width 0x%04x\n", bit_width); 170 u32 h = 0;
144 return (ret); 171
172 switch (cmd->type) {
173 case SYSTEM_INTEL_MSR_CAPABLE:
174 wrmsr(cmd->addr.msr.reg, cmd->val, h);
175 break;
176 case SYSTEM_IO_CAPABLE:
177 acpi_os_write_port((acpi_io_address)cmd->addr.io.port,
178 cmd->val,
179 (u32)cmd->addr.io.bit_width);
180 break;
181 default:
182 break;
145 } 183 }
184}
146 185
186static void drv_read(struct drv_cmd *cmd)
187{
188 cpumask_t saved_mask = current->cpus_allowed;
189 cmd->val = 0;
190
191 set_cpus_allowed(current, cmd->mask);
192 do_drv_read(cmd);
193 set_cpus_allowed(current, saved_mask);
194}
195
196static void drv_write(struct drv_cmd *cmd)
197{
198 cpumask_t saved_mask = current->cpus_allowed;
199 unsigned int i;
200
201 for_each_cpu_mask(i, cmd->mask) {
202 set_cpus_allowed(current, cpumask_of_cpu(i));
203 do_drv_write(cmd);
204 }
205
206 set_cpus_allowed(current, saved_mask);
207 return;
208}
209
210static u32 get_cur_val(cpumask_t mask)
211{
212 struct acpi_processor_performance *perf;
213 struct drv_cmd cmd;
214
215 if (unlikely(cpus_empty(mask)))
216 return 0;
217
218 switch (drv_data[first_cpu(mask)]->cpu_feature) {
219 case SYSTEM_INTEL_MSR_CAPABLE:
220 cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
221 cmd.addr.msr.reg = MSR_IA32_PERF_STATUS;
222 break;
223 case SYSTEM_IO_CAPABLE:
224 cmd.type = SYSTEM_IO_CAPABLE;
225 perf = drv_data[first_cpu(mask)]->acpi_data;
226 cmd.addr.io.port = perf->control_register.address;
227 cmd.addr.io.bit_width = perf->control_register.bit_width;
228 break;
229 default:
230 return 0;
231 }
232
233 cmd.mask = mask;
234
235 drv_read(&cmd);
236
237 dprintk("get_cur_val = %u\n", cmd.val);
238
239 return cmd.val;
240}
241
242/*
243 * Return the measured active (C0) frequency on this CPU since last call
244 * to this function.
245 * Input: cpu number
246 * Return: Average CPU frequency in terms of max frequency (zero on error)
247 *
248 * We use IA32_MPERF and IA32_APERF MSRs to get the measured performance
249 * over a period of time, while CPU is in C0 state.
250 * IA32_MPERF counts at the rate of max advertised frequency
251 * IA32_APERF counts at the rate of actual CPU frequency
252 * Only IA32_APERF/IA32_MPERF ratio is architecturally defined and
253 * no meaning should be associated with absolute values of these MSRs.
254 */
255static unsigned int get_measured_perf(unsigned int cpu)
256{
257 union {
258 struct {
259 u32 lo;
260 u32 hi;
261 } split;
262 u64 whole;
263 } aperf_cur, mperf_cur;
264
265 cpumask_t saved_mask;
266 unsigned int perf_percent;
267 unsigned int retval;
268
269 saved_mask = current->cpus_allowed;
270 set_cpus_allowed(current, cpumask_of_cpu(cpu));
271 if (get_cpu() != cpu) {
272 /* We were not able to run on requested processor */
273 put_cpu();
274 return 0;
275 }
276
277 rdmsr(MSR_IA32_APERF, aperf_cur.split.lo, aperf_cur.split.hi);
278 rdmsr(MSR_IA32_MPERF, mperf_cur.split.lo, mperf_cur.split.hi);
279
280 wrmsr(MSR_IA32_APERF, 0,0);
281 wrmsr(MSR_IA32_MPERF, 0,0);
282
283#ifdef __i386__
147 /* 284 /*
148 * Assume the write went through when acpi_pstate_strict is not used. 285 * We dont want to do 64 bit divide with 32 bit kernel
149 * As read status_register is an expensive operation and there 286 * Get an approximate value. Return failure in case we cannot get
150 * are no specific error cases where an IO port write will fail. 287 * an approximate value.
151 */ 288 */
152 if (acpi_pstate_strict) { 289 if (unlikely(aperf_cur.split.hi || mperf_cur.split.hi)) {
153 /* Then we read the 'status_register' and compare the value 290 int shift_count;
154 * with the target state's 'status' to make sure the 291 u32 h;
155 * transition was successful. 292
156 * Note that we'll poll for up to 1ms (100 cycles of 10us) 293 h = max_t(u32, aperf_cur.split.hi, mperf_cur.split.hi);
157 * before giving up. 294 shift_count = fls(h);
158 */ 295
159 296 aperf_cur.whole >>= shift_count;
160 port = perf->status_register.address; 297 mperf_cur.whole >>= shift_count;
161 bit_width = perf->status_register.bit_width; 298 }
162 299
163 dprintk("Looking for 0x%08x from port 0x%04x\n", 300 if (((unsigned long)(-1) / 100) < aperf_cur.split.lo) {
164 (u32) perf->states[state].status, port); 301 int shift_count = 7;
165 302 aperf_cur.split.lo >>= shift_count;
166 for (i = 0; i < 100; i++) { 303 mperf_cur.split.lo >>= shift_count;
167 ret = acpi_processor_read_port(port, bit_width, &value); 304 }
168 if (ret) { 305
169 dprintk("Invalid port width 0x%04x\n", bit_width); 306 if (aperf_cur.split.lo && mperf_cur.split.lo)
170 return (ret); 307 perf_percent = (aperf_cur.split.lo * 100) / mperf_cur.split.lo;
171 } 308 else
172 if (value == (u32) perf->states[state].status) 309 perf_percent = 0;
173 break; 310
174 udelay(10); 311#else
175 } 312 if (unlikely(((unsigned long)(-1) / 100) < aperf_cur.whole)) {
176 } else { 313 int shift_count = 7;
177 value = (u32) perf->states[state].status; 314 aperf_cur.whole >>= shift_count;
315 mperf_cur.whole >>= shift_count;
178 } 316 }
179 317
180 if (unlikely(value != (u32) perf->states[state].status)) { 318 if (aperf_cur.whole && mperf_cur.whole)
181 printk(KERN_WARNING "acpi-cpufreq: Transition failed\n"); 319 perf_percent = (aperf_cur.whole * 100) / mperf_cur.whole;
182 retval = -ENODEV; 320 else
183 return (retval); 321 perf_percent = 0;
322
323#endif
324
325 retval = drv_data[cpu]->max_freq * perf_percent / 100;
326
327 put_cpu();
328 set_cpus_allowed(current, saved_mask);
329
330 dprintk("cpu %d: performance percent %d\n", cpu, perf_percent);
331 return retval;
332}
333
334static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
335{
336 struct acpi_cpufreq_data *data = drv_data[cpu];
337 unsigned int freq;
338
339 dprintk("get_cur_freq_on_cpu (%d)\n", cpu);
340
341 if (unlikely(data == NULL ||
342 data->acpi_data == NULL || data->freq_table == NULL)) {
343 return 0;
184 } 344 }
185 345
186 dprintk("Transition successful after %d microseconds\n", i * 10); 346 freq = extract_freq(get_cur_val(cpumask_of_cpu(cpu)), data);
347 dprintk("cur freq = %u\n", freq);
187 348
188 perf->state = state; 349 return freq;
189 return (retval);
190} 350}
191 351
352static unsigned int check_freqs(cpumask_t mask, unsigned int freq,
353 struct acpi_cpufreq_data *data)
354{
355 unsigned int cur_freq;
356 unsigned int i;
357
358 for (i=0; i<100; i++) {
359 cur_freq = extract_freq(get_cur_val(mask), data);
360 if (cur_freq == freq)
361 return 1;
362 udelay(10);
363 }
364 return 0;
365}
192 366
193static int 367static int acpi_cpufreq_target(struct cpufreq_policy *policy,
194acpi_cpufreq_target ( 368 unsigned int target_freq, unsigned int relation)
195 struct cpufreq_policy *policy,
196 unsigned int target_freq,
197 unsigned int relation)
198{ 369{
199 struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu]; 370 struct acpi_cpufreq_data *data = drv_data[policy->cpu];
200 struct acpi_processor_performance *perf; 371 struct acpi_processor_performance *perf;
201 struct cpufreq_freqs freqs; 372 struct cpufreq_freqs freqs;
202 cpumask_t online_policy_cpus; 373 cpumask_t online_policy_cpus;
203 cpumask_t saved_mask; 374 struct drv_cmd cmd;
204 cpumask_t set_mask; 375 unsigned int msr;
205 cpumask_t covered_cpus; 376 unsigned int next_state = 0; /* Index into freq_table */
206 unsigned int cur_state = 0; 377 unsigned int next_perf_state = 0; /* Index into perf table */
207 unsigned int next_state = 0; 378 unsigned int i;
208 unsigned int result = 0; 379 int result = 0;
209 unsigned int j;
210 unsigned int tmp;
211 380
212 dprintk("acpi_cpufreq_setpolicy\n"); 381 dprintk("acpi_cpufreq_target %d (%d)\n", target_freq, policy->cpu);
213 382
214 result = cpufreq_frequency_table_target(policy, 383 if (unlikely(data == NULL ||
215 data->freq_table, 384 data->acpi_data == NULL || data->freq_table == NULL)) {
216 target_freq, 385 return -ENODEV;
217 relation, 386 }
218 &next_state);
219 if (unlikely(result))
220 return (result);
221 387
222 perf = data->acpi_data; 388 perf = data->acpi_data;
223 cur_state = perf->state; 389 result = cpufreq_frequency_table_target(policy,
224 freqs.old = data->freq_table[cur_state].frequency; 390 data->freq_table,
225 freqs.new = data->freq_table[next_state].frequency; 391 target_freq,
392 relation, &next_state);
393 if (unlikely(result))
394 return -ENODEV;
226 395
227#ifdef CONFIG_HOTPLUG_CPU 396#ifdef CONFIG_HOTPLUG_CPU
228 /* cpufreq holds the hotplug lock, so we are safe from here on */ 397 /* cpufreq holds the hotplug lock, so we are safe from here on */
@@ -231,106 +400,85 @@ acpi_cpufreq_target (
231 online_policy_cpus = policy->cpus; 400 online_policy_cpus = policy->cpus;
232#endif 401#endif
233 402
234 for_each_cpu_mask(j, online_policy_cpus) { 403 next_perf_state = data->freq_table[next_state].index;
235 freqs.cpu = j; 404 if (perf->state == next_perf_state) {
236 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 405 if (unlikely(data->resume)) {
406 dprintk("Called after resume, resetting to P%d\n",
407 next_perf_state);
408 data->resume = 0;
409 } else {
410 dprintk("Already at target state (P%d)\n",
411 next_perf_state);
412 return 0;
413 }
237 } 414 }
238 415
239 /* 416 switch (data->cpu_feature) {
240 * We need to call driver->target() on all or any CPU in 417 case SYSTEM_INTEL_MSR_CAPABLE:
241 * policy->cpus, depending on policy->shared_type. 418 cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
242 */ 419 cmd.addr.msr.reg = MSR_IA32_PERF_CTL;
243 saved_mask = current->cpus_allowed; 420 msr =
244 cpus_clear(covered_cpus); 421 (u32) perf->states[next_perf_state].
245 for_each_cpu_mask(j, online_policy_cpus) { 422 control & INTEL_MSR_RANGE;
246 /* 423 cmd.val = get_cur_val(online_policy_cpus);
247 * Support for SMP systems. 424 cmd.val = (cmd.val & ~INTEL_MSR_RANGE) | msr;
248 * Make sure we are running on CPU that wants to change freq 425 break;
249 */ 426 case SYSTEM_IO_CAPABLE:
250 cpus_clear(set_mask); 427 cmd.type = SYSTEM_IO_CAPABLE;
251 if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) 428 cmd.addr.io.port = perf->control_register.address;
252 cpus_or(set_mask, set_mask, online_policy_cpus); 429 cmd.addr.io.bit_width = perf->control_register.bit_width;
253 else 430 cmd.val = (u32) perf->states[next_perf_state].control;
254 cpu_set(j, set_mask); 431 break;
255 432 default:
256 set_cpus_allowed(current, set_mask); 433 return -ENODEV;
257 if (unlikely(!cpu_isset(smp_processor_id(), set_mask))) { 434 }
258 dprintk("couldn't limit to CPUs in this domain\n");
259 result = -EAGAIN;
260 break;
261 }
262 435
263 result = acpi_processor_set_performance (data, j, next_state); 436 cpus_clear(cmd.mask);
264 if (result) {
265 result = -EAGAIN;
266 break;
267 }
268 437
269 if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) 438 if (policy->shared_type != CPUFREQ_SHARED_TYPE_ANY)
270 break; 439 cmd.mask = online_policy_cpus;
271 440 else
272 cpu_set(j, covered_cpus); 441 cpu_set(policy->cpu, cmd.mask);
273 }
274 442
275 for_each_cpu_mask(j, online_policy_cpus) { 443 freqs.old = perf->states[perf->state].core_frequency * 1000;
276 freqs.cpu = j; 444 freqs.new = data->freq_table[next_state].frequency;
277 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 445 for_each_cpu_mask(i, cmd.mask) {
446 freqs.cpu = i;
447 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
278 } 448 }
279 449
280 if (unlikely(result)) { 450 drv_write(&cmd);
281 /*
282 * We have failed halfway through the frequency change.
283 * We have sent callbacks to online_policy_cpus and
284 * acpi_processor_set_performance() has been called on
285 * coverd_cpus. Best effort undo..
286 */
287
288 if (!cpus_empty(covered_cpus)) {
289 for_each_cpu_mask(j, covered_cpus) {
290 policy->cpu = j;
291 acpi_processor_set_performance (data,
292 j,
293 cur_state);
294 }
295 }
296 451
297 tmp = freqs.new; 452 if (acpi_pstate_strict) {
298 freqs.new = freqs.old; 453 if (!check_freqs(cmd.mask, freqs.new, data)) {
299 freqs.old = tmp; 454 dprintk("acpi_cpufreq_target failed (%d)\n",
300 for_each_cpu_mask(j, online_policy_cpus) { 455 policy->cpu);
301 freqs.cpu = j; 456 return -EAGAIN;
302 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
303 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
304 } 457 }
305 } 458 }
306 459
307 set_cpus_allowed(current, saved_mask); 460 for_each_cpu_mask(i, cmd.mask) {
308 return (result); 461 freqs.cpu = i;
309} 462 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
463 }
464 perf->state = next_perf_state;
310 465
466 return result;
467}
311 468
312static int 469static int acpi_cpufreq_verify(struct cpufreq_policy *policy)
313acpi_cpufreq_verify (
314 struct cpufreq_policy *policy)
315{ 470{
316 unsigned int result = 0; 471 struct acpi_cpufreq_data *data = drv_data[policy->cpu];
317 struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu];
318 472
319 dprintk("acpi_cpufreq_verify\n"); 473 dprintk("acpi_cpufreq_verify\n");
320 474
321 result = cpufreq_frequency_table_verify(policy, 475 return cpufreq_frequency_table_verify(policy, data->freq_table);
322 data->freq_table);
323
324 return (result);
325} 476}
326 477
327
328static unsigned long 478static unsigned long
329acpi_cpufreq_guess_freq ( 479acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu)
330 struct cpufreq_acpi_io *data,
331 unsigned int cpu)
332{ 480{
333 struct acpi_processor_performance *perf = data->acpi_data; 481 struct acpi_processor_performance *perf = data->acpi_data;
334 482
335 if (cpu_khz) { 483 if (cpu_khz) {
336 /* search the closest match to cpu_khz */ 484 /* search the closest match to cpu_khz */
@@ -338,16 +486,16 @@ acpi_cpufreq_guess_freq (
338 unsigned long freq; 486 unsigned long freq;
339 unsigned long freqn = perf->states[0].core_frequency * 1000; 487 unsigned long freqn = perf->states[0].core_frequency * 1000;
340 488
341 for (i = 0; i < (perf->state_count - 1); i++) { 489 for (i=0; i<(perf->state_count-1); i++) {
342 freq = freqn; 490 freq = freqn;
343 freqn = perf->states[i+1].core_frequency * 1000; 491 freqn = perf->states[i+1].core_frequency * 1000;
344 if ((2 * cpu_khz) > (freqn + freq)) { 492 if ((2 * cpu_khz) > (freqn + freq)) {
345 perf->state = i; 493 perf->state = i;
346 return (freq); 494 return freq;
347 } 495 }
348 } 496 }
349 perf->state = perf->state_count - 1; 497 perf->state = perf->state_count-1;
350 return (freqn); 498 return freqn;
351 } else { 499 } else {
352 /* assume CPU is at P0... */ 500 /* assume CPU is at P0... */
353 perf->state = 0; 501 perf->state = 0;
@@ -355,7 +503,6 @@ acpi_cpufreq_guess_freq (
355 } 503 }
356} 504}
357 505
358
359/* 506/*
360 * acpi_cpufreq_early_init - initialize ACPI P-States library 507 * acpi_cpufreq_early_init - initialize ACPI P-States library
361 * 508 *
@@ -364,30 +511,34 @@ acpi_cpufreq_guess_freq (
364 * do _PDC and _PSD and find out the processor dependency for the 511 * do _PDC and _PSD and find out the processor dependency for the
365 * actual init that will happen later... 512 * actual init that will happen later...
366 */ 513 */
367static int acpi_cpufreq_early_init_acpi(void) 514static int acpi_cpufreq_early_init(void)
368{ 515{
369 struct acpi_processor_performance *data; 516 struct acpi_processor_performance *data;
370 unsigned int i, j; 517 cpumask_t covered;
518 unsigned int i, j;
371 519
372 dprintk("acpi_cpufreq_early_init\n"); 520 dprintk("acpi_cpufreq_early_init\n");
373 521
374 for_each_possible_cpu(i) { 522 for_each_possible_cpu(i) {
375 data = kzalloc(sizeof(struct acpi_processor_performance), 523 data = kzalloc(sizeof(struct acpi_processor_performance),
376 GFP_KERNEL); 524 GFP_KERNEL);
377 if (!data) { 525 if (!data) {
378 for_each_possible_cpu(j) { 526 for_each_cpu_mask(j, covered) {
379 kfree(acpi_perf_data[j]); 527 kfree(acpi_perf_data[j]);
380 acpi_perf_data[j] = NULL; 528 acpi_perf_data[j] = NULL;
381 } 529 }
382 return (-ENOMEM); 530 return -ENOMEM;
383 } 531 }
384 acpi_perf_data[i] = data; 532 acpi_perf_data[i] = data;
533 cpu_set(i, covered);
385 } 534 }
386 535
387 /* Do initialization in ACPI core */ 536 /* Do initialization in ACPI core */
388 return acpi_processor_preregister_performance(acpi_perf_data); 537 acpi_processor_preregister_performance(acpi_perf_data);
538 return 0;
389} 539}
390 540
541#ifdef CONFIG_SMP
391/* 542/*
392 * Some BIOSes do SW_ANY coordination internally, either set it up in hw 543 * Some BIOSes do SW_ANY coordination internally, either set it up in hw
393 * or do it in BIOS firmware and won't inform about it to OS. If not 544 * or do it in BIOS firmware and won't inform about it to OS. If not
@@ -414,39 +565,42 @@ static struct dmi_system_id sw_any_bug_dmi_table[] = {
414 }, 565 },
415 { } 566 { }
416}; 567};
568#endif
417 569
418static int 570static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
419acpi_cpufreq_cpu_init (
420 struct cpufreq_policy *policy)
421{ 571{
422 unsigned int i; 572 unsigned int i;
423 unsigned int cpu = policy->cpu; 573 unsigned int valid_states = 0;
424 struct cpufreq_acpi_io *data; 574 unsigned int cpu = policy->cpu;
425 unsigned int result = 0; 575 struct acpi_cpufreq_data *data;
576 unsigned int result = 0;
426 struct cpuinfo_x86 *c = &cpu_data[policy->cpu]; 577 struct cpuinfo_x86 *c = &cpu_data[policy->cpu];
427 struct acpi_processor_performance *perf; 578 struct acpi_processor_performance *perf;
428 579
429 dprintk("acpi_cpufreq_cpu_init\n"); 580 dprintk("acpi_cpufreq_cpu_init\n");
430 581
431 if (!acpi_perf_data[cpu]) 582 if (!acpi_perf_data[cpu])
432 return (-ENODEV); 583 return -ENODEV;
433 584
434 data = kzalloc(sizeof(struct cpufreq_acpi_io), GFP_KERNEL); 585 data = kzalloc(sizeof(struct acpi_cpufreq_data), GFP_KERNEL);
435 if (!data) 586 if (!data)
436 return (-ENOMEM); 587 return -ENOMEM;
437 588
438 data->acpi_data = acpi_perf_data[cpu]; 589 data->acpi_data = acpi_perf_data[cpu];
439 acpi_io_data[cpu] = data; 590 drv_data[cpu] = data;
440 591
441 result = acpi_processor_register_performance(data->acpi_data, cpu); 592 if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
593 acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
442 594
595 result = acpi_processor_register_performance(data->acpi_data, cpu);
443 if (result) 596 if (result)
444 goto err_free; 597 goto err_free;
445 598
446 perf = data->acpi_data; 599 perf = data->acpi_data;
447 policy->shared_type = perf->shared_type; 600 policy->shared_type = perf->shared_type;
601
448 /* 602 /*
449 * Will let policy->cpus know about dependency only when software 603 * Will let policy->cpus know about dependency only when software
450 * coordination is required. 604 * coordination is required.
451 */ 605 */
452 if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL || 606 if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL ||
@@ -462,10 +616,6 @@ acpi_cpufreq_cpu_init (
462 } 616 }
463#endif 617#endif
464 618
465 if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
466 acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
467 }
468
469 /* capability check */ 619 /* capability check */
470 if (perf->state_count <= 1) { 620 if (perf->state_count <= 1) {
471 dprintk("No P-States\n"); 621 dprintk("No P-States\n");
@@ -473,17 +623,33 @@ acpi_cpufreq_cpu_init (
473 goto err_unreg; 623 goto err_unreg;
474 } 624 }
475 625
476 if ((perf->control_register.space_id != ACPI_ADR_SPACE_SYSTEM_IO) || 626 if (perf->control_register.space_id != perf->status_register.space_id) {
477 (perf->status_register.space_id != ACPI_ADR_SPACE_SYSTEM_IO)) {
478 dprintk("Unsupported address space [%d, %d]\n",
479 (u32) (perf->control_register.space_id),
480 (u32) (perf->status_register.space_id));
481 result = -ENODEV; 627 result = -ENODEV;
482 goto err_unreg; 628 goto err_unreg;
483 } 629 }
484 630
485 /* alloc freq_table */ 631 switch (perf->control_register.space_id) {
486 data->freq_table = kmalloc(sizeof(struct cpufreq_frequency_table) * (perf->state_count + 1), GFP_KERNEL); 632 case ACPI_ADR_SPACE_SYSTEM_IO:
633 dprintk("SYSTEM IO addr space\n");
634 data->cpu_feature = SYSTEM_IO_CAPABLE;
635 break;
636 case ACPI_ADR_SPACE_FIXED_HARDWARE:
637 dprintk("HARDWARE addr space\n");
638 if (!check_est_cpu(cpu)) {
639 result = -ENODEV;
640 goto err_unreg;
641 }
642 data->cpu_feature = SYSTEM_INTEL_MSR_CAPABLE;
643 break;
644 default:
645 dprintk("Unknown addr space %d\n",
646 (u32) (perf->control_register.space_id));
647 result = -ENODEV;
648 goto err_unreg;
649 }
650
651 data->freq_table = kmalloc(sizeof(struct cpufreq_frequency_table) *
652 (perf->state_count+1), GFP_KERNEL);
487 if (!data->freq_table) { 653 if (!data->freq_table) {
488 result = -ENOMEM; 654 result = -ENOMEM;
489 goto err_unreg; 655 goto err_unreg;
@@ -492,129 +658,141 @@ acpi_cpufreq_cpu_init (
492 /* detect transition latency */ 658 /* detect transition latency */
493 policy->cpuinfo.transition_latency = 0; 659 policy->cpuinfo.transition_latency = 0;
494 for (i=0; i<perf->state_count; i++) { 660 for (i=0; i<perf->state_count; i++) {
495 if ((perf->states[i].transition_latency * 1000) > policy->cpuinfo.transition_latency) 661 if ((perf->states[i].transition_latency * 1000) >
496 policy->cpuinfo.transition_latency = perf->states[i].transition_latency * 1000; 662 policy->cpuinfo.transition_latency)
663 policy->cpuinfo.transition_latency =
664 perf->states[i].transition_latency * 1000;
497 } 665 }
498 policy->governor = CPUFREQ_DEFAULT_GOVERNOR; 666 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
499 667
500 /* The current speed is unknown and not detectable by ACPI... */ 668 data->max_freq = perf->states[0].core_frequency * 1000;
501 policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
502
503 /* table init */ 669 /* table init */
504 for (i=0; i<=perf->state_count; i++) 670 for (i=0; i<perf->state_count; i++) {
505 { 671 if (i>0 && perf->states[i].core_frequency ==
506 data->freq_table[i].index = i; 672 perf->states[i-1].core_frequency)
507 if (i<perf->state_count) 673 continue;
508 data->freq_table[i].frequency = perf->states[i].core_frequency * 1000; 674
509 else 675 data->freq_table[valid_states].index = i;
510 data->freq_table[i].frequency = CPUFREQ_TABLE_END; 676 data->freq_table[valid_states].frequency =
677 perf->states[i].core_frequency * 1000;
678 valid_states++;
511 } 679 }
680 data->freq_table[valid_states].frequency = CPUFREQ_TABLE_END;
681 perf->state = 0;
512 682
513 result = cpufreq_frequency_table_cpuinfo(policy, data->freq_table); 683 result = cpufreq_frequency_table_cpuinfo(policy, data->freq_table);
514 if (result) { 684 if (result)
515 goto err_freqfree; 685 goto err_freqfree;
686
687 switch (perf->control_register.space_id) {
688 case ACPI_ADR_SPACE_SYSTEM_IO:
689 /* Current speed is unknown and not detectable by IO port */
690 policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
691 break;
692 case ACPI_ADR_SPACE_FIXED_HARDWARE:
693 acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
694 policy->cur = get_cur_freq_on_cpu(cpu);
695 break;
696 default:
697 break;
516 } 698 }
517 699
518 /* notify BIOS that we exist */ 700 /* notify BIOS that we exist */
519 acpi_processor_notify_smm(THIS_MODULE); 701 acpi_processor_notify_smm(THIS_MODULE);
520 702
521 printk(KERN_INFO "acpi-cpufreq: CPU%u - ACPI performance management activated.\n", 703 /* Check for APERF/MPERF support in hardware */
522 cpu); 704 if (c->x86_vendor == X86_VENDOR_INTEL && c->cpuid_level >= 6) {
705 unsigned int ecx;
706 ecx = cpuid_ecx(6);
707 if (ecx & CPUID_6_ECX_APERFMPERF_CAPABILITY)
708 acpi_cpufreq_driver.getavg = get_measured_perf;
709 }
710
711 dprintk("CPU%u - ACPI performance management activated.\n", cpu);
523 for (i = 0; i < perf->state_count; i++) 712 for (i = 0; i < perf->state_count; i++)
524 dprintk(" %cP%d: %d MHz, %d mW, %d uS\n", 713 dprintk(" %cP%d: %d MHz, %d mW, %d uS\n",
525 (i == perf->state?'*':' '), i, 714 (i == perf->state ? '*' : ' '), i,
526 (u32) perf->states[i].core_frequency, 715 (u32) perf->states[i].core_frequency,
527 (u32) perf->states[i].power, 716 (u32) perf->states[i].power,
528 (u32) perf->states[i].transition_latency); 717 (u32) perf->states[i].transition_latency);
529 718
530 cpufreq_frequency_table_get_attr(data->freq_table, policy->cpu); 719 cpufreq_frequency_table_get_attr(data->freq_table, policy->cpu);
531 720
532 /* 721 /*
533 * the first call to ->target() should result in us actually 722 * the first call to ->target() should result in us actually
534 * writing something to the appropriate registers. 723 * writing something to the appropriate registers.
535 */ 724 */
536 data->resume = 1; 725 data->resume = 1;
537
538 return (result);
539 726
540 err_freqfree: 727 return result;
728
729err_freqfree:
541 kfree(data->freq_table); 730 kfree(data->freq_table);
542 err_unreg: 731err_unreg:
543 acpi_processor_unregister_performance(perf, cpu); 732 acpi_processor_unregister_performance(perf, cpu);
544 err_free: 733err_free:
545 kfree(data); 734 kfree(data);
546 acpi_io_data[cpu] = NULL; 735 drv_data[cpu] = NULL;
547 736
548 return (result); 737 return result;
549} 738}
550 739
551 740static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy)
552static int
553acpi_cpufreq_cpu_exit (
554 struct cpufreq_policy *policy)
555{ 741{
556 struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu]; 742 struct acpi_cpufreq_data *data = drv_data[policy->cpu];
557
558 743
559 dprintk("acpi_cpufreq_cpu_exit\n"); 744 dprintk("acpi_cpufreq_cpu_exit\n");
560 745
561 if (data) { 746 if (data) {
562 cpufreq_frequency_table_put_attr(policy->cpu); 747 cpufreq_frequency_table_put_attr(policy->cpu);
563 acpi_io_data[policy->cpu] = NULL; 748 drv_data[policy->cpu] = NULL;
564 acpi_processor_unregister_performance(data->acpi_data, policy->cpu); 749 acpi_processor_unregister_performance(data->acpi_data,
750 policy->cpu);
565 kfree(data); 751 kfree(data);
566 } 752 }
567 753
568 return (0); 754 return 0;
569} 755}
570 756
571static int 757static int acpi_cpufreq_resume(struct cpufreq_policy *policy)
572acpi_cpufreq_resume (
573 struct cpufreq_policy *policy)
574{ 758{
575 struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu]; 759 struct acpi_cpufreq_data *data = drv_data[policy->cpu];
576
577 760
578 dprintk("acpi_cpufreq_resume\n"); 761 dprintk("acpi_cpufreq_resume\n");
579 762
580 data->resume = 1; 763 data->resume = 1;
581 764
582 return (0); 765 return 0;
583} 766}
584 767
585 768static struct freq_attr *acpi_cpufreq_attr[] = {
586static struct freq_attr* acpi_cpufreq_attr[] = {
587 &cpufreq_freq_attr_scaling_available_freqs, 769 &cpufreq_freq_attr_scaling_available_freqs,
588 NULL, 770 NULL,
589}; 771};
590 772
591static struct cpufreq_driver acpi_cpufreq_driver = { 773static struct cpufreq_driver acpi_cpufreq_driver = {
592 .verify = acpi_cpufreq_verify, 774 .verify = acpi_cpufreq_verify,
593 .target = acpi_cpufreq_target, 775 .target = acpi_cpufreq_target,
594 .init = acpi_cpufreq_cpu_init, 776 .init = acpi_cpufreq_cpu_init,
595 .exit = acpi_cpufreq_cpu_exit, 777 .exit = acpi_cpufreq_cpu_exit,
596 .resume = acpi_cpufreq_resume, 778 .resume = acpi_cpufreq_resume,
597 .name = "acpi-cpufreq", 779 .name = "acpi-cpufreq",
598 .owner = THIS_MODULE, 780 .owner = THIS_MODULE,
599 .attr = acpi_cpufreq_attr, 781 .attr = acpi_cpufreq_attr,
600}; 782};
601 783
602 784static int __init acpi_cpufreq_init(void)
603static int __init
604acpi_cpufreq_init (void)
605{ 785{
606 dprintk("acpi_cpufreq_init\n"); 786 dprintk("acpi_cpufreq_init\n");
607 787
608 acpi_cpufreq_early_init_acpi(); 788 acpi_cpufreq_early_init();
609 789
610 return cpufreq_register_driver(&acpi_cpufreq_driver); 790 return cpufreq_register_driver(&acpi_cpufreq_driver);
611} 791}
612 792
613 793static void __exit acpi_cpufreq_exit(void)
614static void __exit
615acpi_cpufreq_exit (void)
616{ 794{
617 unsigned int i; 795 unsigned int i;
618 dprintk("acpi_cpufreq_exit\n"); 796 dprintk("acpi_cpufreq_exit\n");
619 797
620 cpufreq_unregister_driver(&acpi_cpufreq_driver); 798 cpufreq_unregister_driver(&acpi_cpufreq_driver);
@@ -627,7 +805,9 @@ acpi_cpufreq_exit (void)
627} 805}
628 806
629module_param(acpi_pstate_strict, uint, 0644); 807module_param(acpi_pstate_strict, uint, 0644);
630MODULE_PARM_DESC(acpi_pstate_strict, "value 0 or non-zero. non-zero -> strict ACPI checks are performed during frequency changes."); 808MODULE_PARM_DESC(acpi_pstate_strict,
809 "value 0 or non-zero. non-zero -> strict ACPI checks are "
810 "performed during frequency changes.");
631 811
632late_initcall(acpi_cpufreq_init); 812late_initcall(acpi_cpufreq_init);
633module_exit(acpi_cpufreq_exit); 813module_exit(acpi_cpufreq_exit);
diff --git a/arch/i386/kernel/cpu/cpufreq/gx-suspmod.c b/arch/i386/kernel/cpu/cpufreq/gx-suspmod.c
index 92afa3bc84f1..6667e9cceb9f 100644
--- a/arch/i386/kernel/cpu/cpufreq/gx-suspmod.c
+++ b/arch/i386/kernel/cpu/cpufreq/gx-suspmod.c
@@ -447,7 +447,6 @@ static int __init cpufreq_gx_init(void)
447 int ret; 447 int ret;
448 struct gxfreq_params *params; 448 struct gxfreq_params *params;
449 struct pci_dev *gx_pci; 449 struct pci_dev *gx_pci;
450 u32 class_rev;
451 450
452 /* Test if we have the right hardware */ 451 /* Test if we have the right hardware */
453 if ((gx_pci = gx_detect_chipset()) == NULL) 452 if ((gx_pci = gx_detect_chipset()) == NULL)
@@ -472,8 +471,7 @@ static int __init cpufreq_gx_init(void)
472 pci_read_config_byte(params->cs55x0, PCI_PMER2, &(params->pci_pmer2)); 471 pci_read_config_byte(params->cs55x0, PCI_PMER2, &(params->pci_pmer2));
473 pci_read_config_byte(params->cs55x0, PCI_MODON, &(params->on_duration)); 472 pci_read_config_byte(params->cs55x0, PCI_MODON, &(params->on_duration));
474 pci_read_config_byte(params->cs55x0, PCI_MODOFF, &(params->off_duration)); 473 pci_read_config_byte(params->cs55x0, PCI_MODOFF, &(params->off_duration));
475 pci_read_config_dword(params->cs55x0, PCI_CLASS_REVISION, &class_rev); 474 pci_read_config_byte(params->cs55x0, PCI_REVISION_ID, &params->pci_rev);
476 params->pci_rev = class_rev && 0xff;
477 475
478 if ((ret = cpufreq_register_driver(&gx_suspmod_driver))) { 476 if ((ret = cpufreq_register_driver(&gx_suspmod_driver))) {
479 kfree(params); 477 kfree(params);
diff --git a/arch/i386/kernel/cpu/cpufreq/longhaul.c b/arch/i386/kernel/cpu/cpufreq/longhaul.c
index 7233abe5d695..e940e00b96c9 100644
--- a/arch/i386/kernel/cpu/cpufreq/longhaul.c
+++ b/arch/i386/kernel/cpu/cpufreq/longhaul.c
@@ -52,6 +52,10 @@
52#define CPU_EZRA_T 4 52#define CPU_EZRA_T 4
53#define CPU_NEHEMIAH 5 53#define CPU_NEHEMIAH 5
54 54
55/* Flags */
56#define USE_ACPI_C3 (1 << 1)
57#define USE_NORTHBRIDGE (1 << 2)
58
55static int cpu_model; 59static int cpu_model;
56static unsigned int numscales=16; 60static unsigned int numscales=16;
57static unsigned int fsb; 61static unsigned int fsb;
@@ -68,7 +72,7 @@ static unsigned int minmult, maxmult;
68static int can_scale_voltage; 72static int can_scale_voltage;
69static struct acpi_processor *pr = NULL; 73static struct acpi_processor *pr = NULL;
70static struct acpi_processor_cx *cx = NULL; 74static struct acpi_processor_cx *cx = NULL;
71static int port22_en; 75static u8 longhaul_flags;
72 76
73/* Module parameters */ 77/* Module parameters */
74static int scale_voltage; 78static int scale_voltage;
@@ -80,7 +84,6 @@ static int ignore_latency;
80/* Clock ratios multiplied by 10 */ 84/* Clock ratios multiplied by 10 */
81static int clock_ratio[32]; 85static int clock_ratio[32];
82static int eblcr_table[32]; 86static int eblcr_table[32];
83static unsigned int highest_speed, lowest_speed; /* kHz */
84static int longhaul_version; 87static int longhaul_version;
85static struct cpufreq_frequency_table *longhaul_table; 88static struct cpufreq_frequency_table *longhaul_table;
86 89
@@ -178,7 +181,7 @@ static void do_powersaver(int cx_address, unsigned int clock_ratio_index)
178 safe_halt(); 181 safe_halt();
179 /* Change frequency on next halt or sleep */ 182 /* Change frequency on next halt or sleep */
180 wrmsrl(MSR_VIA_LONGHAUL, longhaul.val); 183 wrmsrl(MSR_VIA_LONGHAUL, longhaul.val);
181 if (port22_en) { 184 if (!cx_address) {
182 ACPI_FLUSH_CPU_CACHE(); 185 ACPI_FLUSH_CPU_CACHE();
183 /* Invoke C1 */ 186 /* Invoke C1 */
184 halt(); 187 halt();
@@ -189,7 +192,6 @@ static void do_powersaver(int cx_address, unsigned int clock_ratio_index)
189 /* Dummy op - must do something useless after P_LVL3 read */ 192 /* Dummy op - must do something useless after P_LVL3 read */
190 t = inl(acpi_fadt.xpm_tmr_blk.address); 193 t = inl(acpi_fadt.xpm_tmr_blk.address);
191 } 194 }
192
193 /* Disable bus ratio bit */ 195 /* Disable bus ratio bit */
194 local_irq_disable(); 196 local_irq_disable();
195 longhaul.bits.RevisionKey = longhaul.bits.RevisionID; 197 longhaul.bits.RevisionKey = longhaul.bits.RevisionID;
@@ -243,15 +245,14 @@ static void longhaul_setstate(unsigned int clock_ratio_index)
243 outb(0xFF,0xA1); /* Overkill */ 245 outb(0xFF,0xA1); /* Overkill */
244 outb(0xFE,0x21); /* TMR0 only */ 246 outb(0xFE,0x21); /* TMR0 only */
245 247
246 if (pr->flags.bm_control) { 248 if (longhaul_flags & USE_NORTHBRIDGE) {
249 /* Disable AGP and PCI arbiters */
250 outb(3, 0x22);
251 } else if ((pr != NULL) && pr->flags.bm_control) {
247 /* Disable bus master arbitration */ 252 /* Disable bus master arbitration */
248 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1, 253 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1,
249 ACPI_MTX_DO_NOT_LOCK); 254 ACPI_MTX_DO_NOT_LOCK);
250 } else if (port22_en) {
251 /* Disable AGP and PCI arbiters */
252 outb(3, 0x22);
253 } 255 }
254
255 switch (longhaul_version) { 256 switch (longhaul_version) {
256 257
257 /* 258 /*
@@ -278,22 +279,25 @@ static void longhaul_setstate(unsigned int clock_ratio_index)
278 * to work in practice. 279 * to work in practice.
279 */ 280 */
280 case TYPE_POWERSAVER: 281 case TYPE_POWERSAVER:
281 /* Don't allow wakeup */ 282 if (longhaul_flags & USE_ACPI_C3) {
282 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0, 283 /* Don't allow wakeup */
283 ACPI_MTX_DO_NOT_LOCK); 284 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0,
284 do_powersaver(cx->address, clock_ratio_index); 285 ACPI_MTX_DO_NOT_LOCK);
286 do_powersaver(cx->address, clock_ratio_index);
287 } else {
288 do_powersaver(0, clock_ratio_index);
289 }
285 break; 290 break;
286 } 291 }
287 292
288 if (pr->flags.bm_control) { 293 if (longhaul_flags & USE_NORTHBRIDGE) {
294 /* Enable arbiters */
295 outb(0, 0x22);
296 } else if ((pr != NULL) && pr->flags.bm_control) {
289 /* Enable bus master arbitration */ 297 /* Enable bus master arbitration */
290 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0, 298 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0,
291 ACPI_MTX_DO_NOT_LOCK); 299 ACPI_MTX_DO_NOT_LOCK);
292 } else if (port22_en) {
293 /* Enable arbiters */
294 outb(0, 0x22);
295 } 300 }
296
297 outb(pic2_mask,0xA1); /* restore mask */ 301 outb(pic2_mask,0xA1); /* restore mask */
298 outb(pic1_mask,0x21); 302 outb(pic1_mask,0x21);
299 303
@@ -314,12 +318,12 @@ static void longhaul_setstate(unsigned int clock_ratio_index)
314 318
315#define ROUNDING 0xf 319#define ROUNDING 0xf
316 320
317static int _guess(int guess) 321static int _guess(int guess, int mult)
318{ 322{
319 int target; 323 int target;
320 324
321 target = ((maxmult/10)*guess); 325 target = ((mult/10)*guess);
322 if (maxmult%10 != 0) 326 if (mult%10 != 0)
323 target += (guess/2); 327 target += (guess/2);
324 target += ROUNDING/2; 328 target += ROUNDING/2;
325 target &= ~ROUNDING; 329 target &= ~ROUNDING;
@@ -327,17 +331,17 @@ static int _guess(int guess)
327} 331}
328 332
329 333
330static int guess_fsb(void) 334static int guess_fsb(int mult)
331{ 335{
332 int speed = (cpu_khz/1000); 336 int speed = (cpu_khz/1000);
333 int i; 337 int i;
334 int speeds[3] = { 66, 100, 133 }; 338 int speeds[] = { 66, 100, 133, 200 };
335 339
336 speed += ROUNDING/2; 340 speed += ROUNDING/2;
337 speed &= ~ROUNDING; 341 speed &= ~ROUNDING;
338 342
339 for (i=0; i<3; i++) { 343 for (i=0; i<4; i++) {
340 if (_guess(speeds[i]) == speed) 344 if (_guess(speeds[i], mult) == speed)
341 return speeds[i]; 345 return speeds[i];
342 } 346 }
343 return 0; 347 return 0;
@@ -354,9 +358,7 @@ static int __init longhaul_get_ranges(void)
354 130, 150, 160, 140, -1, 155, -1, 145 }; 358 130, 150, 160, 140, -1, 155, -1, 145 };
355 unsigned int j, k = 0; 359 unsigned int j, k = 0;
356 union msr_longhaul longhaul; 360 union msr_longhaul longhaul;
357 unsigned long lo, hi; 361 int mult = 0;
358 unsigned int eblcr_fsb_table_v1[] = { 66, 133, 100, -1 };
359 unsigned int eblcr_fsb_table_v2[] = { 133, 100, -1, 66 };
360 362
361 switch (longhaul_version) { 363 switch (longhaul_version) {
362 case TYPE_LONGHAUL_V1: 364 case TYPE_LONGHAUL_V1:
@@ -364,30 +366,18 @@ static int __init longhaul_get_ranges(void)
364 /* Ugh, Longhaul v1 didn't have the min/max MSRs. 366 /* Ugh, Longhaul v1 didn't have the min/max MSRs.
365 Assume min=3.0x & max = whatever we booted at. */ 367 Assume min=3.0x & max = whatever we booted at. */
366 minmult = 30; 368 minmult = 30;
367 maxmult = longhaul_get_cpu_mult(); 369 maxmult = mult = longhaul_get_cpu_mult();
368 rdmsr (MSR_IA32_EBL_CR_POWERON, lo, hi);
369 invalue = (lo & (1<<18|1<<19)) >>18;
370 if (cpu_model==CPU_SAMUEL || cpu_model==CPU_SAMUEL2)
371 fsb = eblcr_fsb_table_v1[invalue];
372 else
373 fsb = guess_fsb();
374 break; 370 break;
375 371
376 case TYPE_POWERSAVER: 372 case TYPE_POWERSAVER:
377 /* Ezra-T */ 373 /* Ezra-T */
378 if (cpu_model==CPU_EZRA_T) { 374 if (cpu_model==CPU_EZRA_T) {
375 minmult = 30;
379 rdmsrl (MSR_VIA_LONGHAUL, longhaul.val); 376 rdmsrl (MSR_VIA_LONGHAUL, longhaul.val);
380 invalue = longhaul.bits.MaxMHzBR; 377 invalue = longhaul.bits.MaxMHzBR;
381 if (longhaul.bits.MaxMHzBR4) 378 if (longhaul.bits.MaxMHzBR4)
382 invalue += 16; 379 invalue += 16;
383 maxmult=ezra_t_multipliers[invalue]; 380 maxmult = mult = ezra_t_multipliers[invalue];
384
385 invalue = longhaul.bits.MinMHzBR;
386 if (longhaul.bits.MinMHzBR4 == 1)
387 minmult = 30;
388 else
389 minmult = ezra_t_multipliers[invalue];
390 fsb = eblcr_fsb_table_v2[longhaul.bits.MaxMHzFSB];
391 break; 381 break;
392 } 382 }
393 383
@@ -407,21 +397,16 @@ static int __init longhaul_get_ranges(void)
407 * But it works, so we don't grumble. 397 * But it works, so we don't grumble.
408 */ 398 */
409 minmult=40; 399 minmult=40;
410 maxmult=longhaul_get_cpu_mult(); 400 maxmult = mult = longhaul_get_cpu_mult();
411
412 /* Starting with the 1.2GHz parts, theres a 200MHz bus. */
413 if ((cpu_khz/1000) > 1200)
414 fsb = 200;
415 else
416 fsb = eblcr_fsb_table_v2[longhaul.bits.MaxMHzFSB];
417 break; 401 break;
418 } 402 }
419 } 403 }
404 fsb = guess_fsb(mult);
420 405
421 dprintk ("MinMult:%d.%dx MaxMult:%d.%dx\n", 406 dprintk ("MinMult:%d.%dx MaxMult:%d.%dx\n",
422 minmult/10, minmult%10, maxmult/10, maxmult%10); 407 minmult/10, minmult%10, maxmult/10, maxmult%10);
423 408
424 if (fsb == -1) { 409 if (fsb == 0) {
425 printk (KERN_INFO PFX "Invalid (reserved) FSB!\n"); 410 printk (KERN_INFO PFX "Invalid (reserved) FSB!\n");
426 return -EINVAL; 411 return -EINVAL;
427 } 412 }
@@ -583,6 +568,10 @@ static int enable_arbiter_disable(void)
583 if (dev == NULL) { 568 if (dev == NULL) {
584 reg = 0x76; 569 reg = 0x76;
585 dev = pci_find_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_862X_0, NULL); 570 dev = pci_find_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_862X_0, NULL);
571 /* Find CN400 V-Link host bridge */
572 if (dev == NULL)
573 dev = pci_find_device(PCI_VENDOR_ID_VIA, 0x7259, NULL);
574
586 } 575 }
587 if (dev != NULL) { 576 if (dev != NULL) {
588 /* Enable access to port 0x22 */ 577 /* Enable access to port 0x22 */
@@ -687,27 +676,32 @@ static int __init longhaul_cpu_init(struct cpufreq_policy *policy)
687 /* Find ACPI data for processor */ 676 /* Find ACPI data for processor */
688 acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX, 677 acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX,
689 &longhaul_walk_callback, NULL, (void *)&pr); 678 &longhaul_walk_callback, NULL, (void *)&pr);
690 if (pr == NULL)
691 goto err_acpi;
692 679
693 if (longhaul_version == TYPE_POWERSAVER) { 680 /* Check ACPI support for C3 state */
694 /* Check ACPI support for C3 state */ 681 if ((pr != NULL) && (longhaul_version == TYPE_POWERSAVER)) {
695 cx = &pr->power.states[ACPI_STATE_C3]; 682 cx = &pr->power.states[ACPI_STATE_C3];
696 if (cx->address > 0 && 683 if (cx->address > 0 &&
697 (cx->latency <= 1000 || ignore_latency != 0) ) { 684 (cx->latency <= 1000 || ignore_latency != 0) ) {
685 longhaul_flags |= USE_ACPI_C3;
698 goto print_support_type; 686 goto print_support_type;
699 } 687 }
700 } 688 }
689 /* Check if northbridge is friendly */
690 if (enable_arbiter_disable()) {
691 longhaul_flags |= USE_NORTHBRIDGE;
692 goto print_support_type;
693 }
694
695 /* No ACPI C3 or we can't use it */
701 /* Check ACPI support for bus master arbiter disable */ 696 /* Check ACPI support for bus master arbiter disable */
702 if (!pr->flags.bm_control) { 697 if ((pr == NULL) || !(pr->flags.bm_control)) {
703 if (enable_arbiter_disable()) { 698 printk(KERN_ERR PFX
704 port22_en = 1; 699 "No ACPI support. Unsupported northbridge.\n");
705 } else { 700 return -ENODEV;
706 goto err_acpi;
707 }
708 } 701 }
702
709print_support_type: 703print_support_type:
710 if (!port22_en) { 704 if (!(longhaul_flags & USE_NORTHBRIDGE)) {
711 printk (KERN_INFO PFX "Using ACPI support.\n"); 705 printk (KERN_INFO PFX "Using ACPI support.\n");
712 } else { 706 } else {
713 printk (KERN_INFO PFX "Using northbridge support.\n"); 707 printk (KERN_INFO PFX "Using northbridge support.\n");
@@ -732,10 +726,6 @@ print_support_type:
732 cpufreq_frequency_table_get_attr(longhaul_table, policy->cpu); 726 cpufreq_frequency_table_get_attr(longhaul_table, policy->cpu);
733 727
734 return 0; 728 return 0;
735
736err_acpi:
737 printk(KERN_ERR PFX "No ACPI support. No VT8601 or VT8623 northbridge. Aborting.\n");
738 return -ENODEV;
739} 729}
740 730
741static int __devexit longhaul_cpu_exit(struct cpufreq_policy *policy) 731static int __devexit longhaul_cpu_exit(struct cpufreq_policy *policy)
@@ -770,8 +760,8 @@ static int __init longhaul_init(void)
770 760
771#ifdef CONFIG_SMP 761#ifdef CONFIG_SMP
772 if (num_online_cpus() > 1) { 762 if (num_online_cpus() > 1) {
773 return -ENODEV;
774 printk(KERN_ERR PFX "More than 1 CPU detected, longhaul disabled.\n"); 763 printk(KERN_ERR PFX "More than 1 CPU detected, longhaul disabled.\n");
764 return -ENODEV;
775 } 765 }
776#endif 766#endif
777#ifdef CONFIG_X86_IO_APIC 767#ifdef CONFIG_X86_IO_APIC
@@ -783,8 +773,10 @@ static int __init longhaul_init(void)
783 switch (c->x86_model) { 773 switch (c->x86_model) {
784 case 6 ... 9: 774 case 6 ... 9:
785 return cpufreq_register_driver(&longhaul_driver); 775 return cpufreq_register_driver(&longhaul_driver);
776 case 10:
777 printk(KERN_ERR PFX "Use acpi-cpufreq driver for VIA C7\n");
786 default: 778 default:
787 printk (KERN_INFO PFX "Unknown VIA CPU. Contact davej@codemonkey.org.uk\n"); 779 ;;
788 } 780 }
789 781
790 return -ENODEV; 782 return -ENODEV;
diff --git a/arch/i386/kernel/cpu/cpufreq/p4-clockmod.c b/arch/i386/kernel/cpu/cpufreq/p4-clockmod.c
index 304d2eaa4a1b..bec50170b75a 100644
--- a/arch/i386/kernel/cpu/cpufreq/p4-clockmod.c
+++ b/arch/i386/kernel/cpu/cpufreq/p4-clockmod.c
@@ -163,29 +163,27 @@ static int cpufreq_p4_verify(struct cpufreq_policy *policy)
163 163
164static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c) 164static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
165{ 165{
166 if ((c->x86 == 0x06) && (c->x86_model == 0x09)) { 166 if (c->x86 == 0x06) {
167 /* Pentium M (Banias) */ 167 if (cpu_has(c, X86_FEATURE_EST))
168 printk(KERN_WARNING PFX "Warning: Pentium M detected. " 168 printk(KERN_WARNING PFX "Warning: EST-capable CPU detected. "
169 "The speedstep_centrino module offers voltage scaling" 169 "The acpi-cpufreq module offers voltage scaling"
170 " in addition of frequency scaling. You should use " 170 " in addition of frequency scaling. You should use "
171 "that instead of p4-clockmod, if possible.\n"); 171 "that instead of p4-clockmod, if possible.\n");
172 return speedstep_get_processor_frequency(SPEEDSTEP_PROCESSOR_PM); 172 switch (c->x86_model) {
173 } 173 case 0x0E: /* Core */
174 174 case 0x0F: /* Core Duo */
175 if ((c->x86 == 0x06) && (c->x86_model == 0x0D)) { 175 p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
176 /* Pentium M (Dothan) */ 176 return speedstep_get_processor_frequency(SPEEDSTEP_PROCESSOR_PCORE);
177 printk(KERN_WARNING PFX "Warning: Pentium M detected. " 177 case 0x0D: /* Pentium M (Dothan) */
178 "The speedstep_centrino module offers voltage scaling" 178 p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
179 " in addition of frequency scaling. You should use " 179 /* fall through */
180 "that instead of p4-clockmod, if possible.\n"); 180 case 0x09: /* Pentium M (Banias) */
181 /* on P-4s, the TSC runs with constant frequency independent whether 181 return speedstep_get_processor_frequency(SPEEDSTEP_PROCESSOR_PM);
182 * throttling is active or not. */ 182 }
183 p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
184 return speedstep_get_processor_frequency(SPEEDSTEP_PROCESSOR_PM);
185 } 183 }
186 184
187 if (c->x86 != 0xF) { 185 if (c->x86 != 0xF) {
188 printk(KERN_WARNING PFX "Unknown p4-clockmod-capable CPU. Please send an e-mail to <linux@brodo.de>\n"); 186 printk(KERN_WARNING PFX "Unknown p4-clockmod-capable CPU. Please send an e-mail to <cpufreq@lists.linux.org.uk>\n");
189 return 0; 187 return 0;
190 } 188 }
191 189
diff --git a/arch/i386/kernel/cpu/cpufreq/sc520_freq.c b/arch/i386/kernel/cpu/cpufreq/sc520_freq.c
index ef457d50f4ac..b8fb4b521c62 100644
--- a/arch/i386/kernel/cpu/cpufreq/sc520_freq.c
+++ b/arch/i386/kernel/cpu/cpufreq/sc520_freq.c
@@ -153,6 +153,7 @@ static struct cpufreq_driver sc520_freq_driver = {
153static int __init sc520_freq_init(void) 153static int __init sc520_freq_init(void)
154{ 154{
155 struct cpuinfo_x86 *c = cpu_data; 155 struct cpuinfo_x86 *c = cpu_data;
156 int err;
156 157
157 /* Test if we have the right hardware */ 158 /* Test if we have the right hardware */
158 if(c->x86_vendor != X86_VENDOR_AMD || 159 if(c->x86_vendor != X86_VENDOR_AMD ||
@@ -166,7 +167,11 @@ static int __init sc520_freq_init(void)
166 return -ENOMEM; 167 return -ENOMEM;
167 } 168 }
168 169
169 return cpufreq_register_driver(&sc520_freq_driver); 170 err = cpufreq_register_driver(&sc520_freq_driver);
171 if (err)
172 iounmap(cpuctl);
173
174 return err;
170} 175}
171 176
172 177
diff --git a/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c b/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
index e8993baf3d14..f43b987f952b 100644
--- a/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
+++ b/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
@@ -36,6 +36,7 @@
36 36
37#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "speedstep-centrino", msg) 37#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "speedstep-centrino", msg)
38 38
39#define INTEL_MSR_RANGE (0xffff)
39 40
40struct cpu_id 41struct cpu_id
41{ 42{
@@ -379,6 +380,7 @@ static int centrino_cpu_early_init_acpi(void)
379} 380}
380 381
381 382
383#ifdef CONFIG_SMP
382/* 384/*
383 * Some BIOSes do SW_ANY coordination internally, either set it up in hw 385 * Some BIOSes do SW_ANY coordination internally, either set it up in hw
384 * or do it in BIOS firmware and won't inform about it to OS. If not 386 * or do it in BIOS firmware and won't inform about it to OS. If not
@@ -392,7 +394,6 @@ static int sw_any_bug_found(struct dmi_system_id *d)
392 return 0; 394 return 0;
393} 395}
394 396
395
396static struct dmi_system_id sw_any_bug_dmi_table[] = { 397static struct dmi_system_id sw_any_bug_dmi_table[] = {
397 { 398 {
398 .callback = sw_any_bug_found, 399 .callback = sw_any_bug_found,
@@ -405,7 +406,7 @@ static struct dmi_system_id sw_any_bug_dmi_table[] = {
405 }, 406 },
406 { } 407 { }
407}; 408};
408 409#endif
409 410
410/* 411/*
411 * centrino_cpu_init_acpi - register with ACPI P-States library 412 * centrino_cpu_init_acpi - register with ACPI P-States library
@@ -463,8 +464,9 @@ static int centrino_cpu_init_acpi(struct cpufreq_policy *policy)
463 } 464 }
464 465
465 for (i=0; i<p->state_count; i++) { 466 for (i=0; i<p->state_count; i++) {
466 if (p->states[i].control != p->states[i].status) { 467 if ((p->states[i].control & INTEL_MSR_RANGE) !=
467 dprintk("Different control (%llu) and status values (%llu)\n", 468 (p->states[i].status & INTEL_MSR_RANGE)) {
469 dprintk("Different MSR bits in control (%llu) and status (%llu)\n",
468 p->states[i].control, p->states[i].status); 470 p->states[i].control, p->states[i].status);
469 result = -EINVAL; 471 result = -EINVAL;
470 goto err_unreg; 472 goto err_unreg;
@@ -500,7 +502,7 @@ static int centrino_cpu_init_acpi(struct cpufreq_policy *policy)
500 } 502 }
501 503
502 for (i=0; i<p->state_count; i++) { 504 for (i=0; i<p->state_count; i++) {
503 centrino_model[cpu]->op_points[i].index = p->states[i].control; 505 centrino_model[cpu]->op_points[i].index = p->states[i].control & INTEL_MSR_RANGE;
504 centrino_model[cpu]->op_points[i].frequency = p->states[i].core_frequency * 1000; 506 centrino_model[cpu]->op_points[i].frequency = p->states[i].core_frequency * 1000;
505 dprintk("adding state %i with frequency %u and control value %04x\n", 507 dprintk("adding state %i with frequency %u and control value %04x\n",
506 i, centrino_model[cpu]->op_points[i].frequency, centrino_model[cpu]->op_points[i].index); 508 i, centrino_model[cpu]->op_points[i].frequency, centrino_model[cpu]->op_points[i].index);
@@ -531,6 +533,9 @@ static int centrino_cpu_init_acpi(struct cpufreq_policy *policy)
531 533
532 /* notify BIOS that we exist */ 534 /* notify BIOS that we exist */
533 acpi_processor_notify_smm(THIS_MODULE); 535 acpi_processor_notify_smm(THIS_MODULE);
536 printk("speedstep-centrino with X86_SPEEDSTEP_CENTRINO_ACPI "
537 "config is deprecated.\n "
538 "Use X86_ACPI_CPUFREQ (acpi-cpufreq) instead.\n" );
534 539
535 return 0; 540 return 0;
536 541
diff --git a/arch/i386/kernel/cpu/cpufreq/speedstep-lib.c b/arch/i386/kernel/cpu/cpufreq/speedstep-lib.c
index 4f46cac155c4..d59277c00911 100644
--- a/arch/i386/kernel/cpu/cpufreq/speedstep-lib.c
+++ b/arch/i386/kernel/cpu/cpufreq/speedstep-lib.c
@@ -123,6 +123,36 @@ static unsigned int pentiumM_get_frequency(void)
123 return (msr_tmp * 100 * 1000); 123 return (msr_tmp * 100 * 1000);
124} 124}
125 125
126static unsigned int pentium_core_get_frequency(void)
127{
128 u32 fsb = 0;
129 u32 msr_lo, msr_tmp;
130
131 rdmsr(MSR_FSB_FREQ, msr_lo, msr_tmp);
132 /* see table B-2 of 25366920.pdf */
133 switch (msr_lo & 0x07) {
134 case 5:
135 fsb = 100000;
136 break;
137 case 1:
138 fsb = 133333;
139 break;
140 case 3:
141 fsb = 166667;
142 break;
143 default:
144 printk(KERN_ERR "PCORE - MSR_FSB_FREQ undefined value");
145 }
146
147 rdmsr(MSR_IA32_EBL_CR_POWERON, msr_lo, msr_tmp);
148 dprintk("PCORE - MSR_IA32_EBL_CR_POWERON: 0x%x 0x%x\n", msr_lo, msr_tmp);
149
150 msr_tmp = (msr_lo >> 22) & 0x1f;
151 dprintk("bits 22-26 are 0x%x, speed is %u\n", msr_tmp, (msr_tmp * fsb));
152
153 return (msr_tmp * fsb);
154}
155
126 156
127static unsigned int pentium4_get_frequency(void) 157static unsigned int pentium4_get_frequency(void)
128{ 158{
@@ -174,6 +204,8 @@ static unsigned int pentium4_get_frequency(void)
174unsigned int speedstep_get_processor_frequency(unsigned int processor) 204unsigned int speedstep_get_processor_frequency(unsigned int processor)
175{ 205{
176 switch (processor) { 206 switch (processor) {
207 case SPEEDSTEP_PROCESSOR_PCORE:
208 return pentium_core_get_frequency();
177 case SPEEDSTEP_PROCESSOR_PM: 209 case SPEEDSTEP_PROCESSOR_PM:
178 return pentiumM_get_frequency(); 210 return pentiumM_get_frequency();
179 case SPEEDSTEP_PROCESSOR_P4D: 211 case SPEEDSTEP_PROCESSOR_P4D:
diff --git a/arch/i386/kernel/cpu/cpufreq/speedstep-lib.h b/arch/i386/kernel/cpu/cpufreq/speedstep-lib.h
index b735429c50b4..b11bcc608cac 100644
--- a/arch/i386/kernel/cpu/cpufreq/speedstep-lib.h
+++ b/arch/i386/kernel/cpu/cpufreq/speedstep-lib.h
@@ -22,6 +22,7 @@
22 * the speedstep_get_processor_frequency() call. */ 22 * the speedstep_get_processor_frequency() call. */
23#define SPEEDSTEP_PROCESSOR_PM 0xFFFFFF03 /* Pentium M */ 23#define SPEEDSTEP_PROCESSOR_PM 0xFFFFFF03 /* Pentium M */
24#define SPEEDSTEP_PROCESSOR_P4D 0xFFFFFF04 /* desktop P4 */ 24#define SPEEDSTEP_PROCESSOR_P4D 0xFFFFFF04 /* desktop P4 */
25#define SPEEDSTEP_PROCESSOR_PCORE 0xFFFFFF05 /* Core */
25 26
26/* speedstep states -- only two of them */ 27/* speedstep states -- only two of them */
27 28
diff --git a/arch/i386/kernel/cpu/cpufreq/speedstep-smi.c b/arch/i386/kernel/cpu/cpufreq/speedstep-smi.c
index c28333d53646..ff0d89806114 100644
--- a/arch/i386/kernel/cpu/cpufreq/speedstep-smi.c
+++ b/arch/i386/kernel/cpu/cpufreq/speedstep-smi.c
@@ -360,9 +360,6 @@ static int __init speedstep_init(void)
360 case SPEEDSTEP_PROCESSOR_PIII_C: 360 case SPEEDSTEP_PROCESSOR_PIII_C:
361 case SPEEDSTEP_PROCESSOR_PIII_C_EARLY: 361 case SPEEDSTEP_PROCESSOR_PIII_C_EARLY:
362 break; 362 break;
363 case SPEEDSTEP_PROCESSOR_P4M:
364 printk(KERN_INFO "speedstep-smi: you're trying to use this cpufreq driver on a Pentium 4-based CPU. Most likely it will not work.\n");
365 break;
366 default: 363 default:
367 speedstep_processor = 0; 364 speedstep_processor = 0;
368 } 365 }
diff --git a/arch/i386/kernel/cpu/cyrix.c b/arch/i386/kernel/cpu/cyrix.c
index c0c3b59de32c..abcff92f994c 100644
--- a/arch/i386/kernel/cpu/cyrix.c
+++ b/arch/i386/kernel/cpu/cyrix.c
@@ -173,7 +173,7 @@ static void __cpuinit geode_configure(void)
173 ccr4 = getCx86(CX86_CCR4); 173 ccr4 = getCx86(CX86_CCR4);
174 ccr4 |= 0x38; /* FPU fast, DTE cache, Mem bypass */ 174 ccr4 |= 0x38; /* FPU fast, DTE cache, Mem bypass */
175 175
176 setCx86(CX86_CCR3, ccr3); 176 setCx86(CX86_CCR4, ccr4);
177 177
178 set_cx86_memwb(); 178 set_cx86_memwb();
179 set_cx86_reorder(); 179 set_cx86_reorder();
diff --git a/arch/i386/kernel/cpu/intel.c b/arch/i386/kernel/cpu/intel.c
index 94a95aa5227e..56fe26584957 100644
--- a/arch/i386/kernel/cpu/intel.c
+++ b/arch/i386/kernel/cpu/intel.c
@@ -107,7 +107,7 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c)
107 * Note that the workaround only should be initialized once... 107 * Note that the workaround only should be initialized once...
108 */ 108 */
109 c->f00f_bug = 0; 109 c->f00f_bug = 0;
110 if ( c->x86 == 5 ) { 110 if (!paravirt_enabled() && c->x86 == 5) {
111 static int f00f_workaround_enabled = 0; 111 static int f00f_workaround_enabled = 0;
112 112
113 c->f00f_bug = 1; 113 c->f00f_bug = 1;
@@ -195,8 +195,16 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c)
195 if ((c->x86 == 0xf && c->x86_model >= 0x03) || 195 if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
196 (c->x86 == 0x6 && c->x86_model >= 0x0e)) 196 (c->x86 == 0x6 && c->x86_model >= 0x0e))
197 set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability); 197 set_bit(X86_FEATURE_CONSTANT_TSC, c->x86_capability);
198}
199 198
199 if (cpu_has_ds) {
200 unsigned int l1;
201 rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
202 if (!(l1 & (1<<11)))
203 set_bit(X86_FEATURE_BTS, c->x86_capability);
204 if (!(l1 & (1<<12)))
205 set_bit(X86_FEATURE_PEBS, c->x86_capability);
206 }
207}
200 208
201static unsigned int __cpuinit intel_size_cache(struct cpuinfo_x86 * c, unsigned int size) 209static unsigned int __cpuinit intel_size_cache(struct cpuinfo_x86 * c, unsigned int size)
202{ 210{
diff --git a/arch/i386/kernel/cpu/intel_cacheinfo.c b/arch/i386/kernel/cpu/intel_cacheinfo.c
index 5c43be47587f..80b4c5d421b1 100644
--- a/arch/i386/kernel/cpu/intel_cacheinfo.c
+++ b/arch/i386/kernel/cpu/intel_cacheinfo.c
@@ -480,12 +480,10 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu)
480 if (num_cache_leaves == 0) 480 if (num_cache_leaves == 0)
481 return -ENOENT; 481 return -ENOENT;
482 482
483 cpuid4_info[cpu] = kmalloc( 483 cpuid4_info[cpu] = kzalloc(
484 sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL); 484 sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL);
485 if (unlikely(cpuid4_info[cpu] == NULL)) 485 if (unlikely(cpuid4_info[cpu] == NULL))
486 return -ENOMEM; 486 return -ENOMEM;
487 memset(cpuid4_info[cpu], 0,
488 sizeof(struct _cpuid4_info) * num_cache_leaves);
489 487
490 oldmask = current->cpus_allowed; 488 oldmask = current->cpus_allowed;
491 retval = set_cpus_allowed(current, cpumask_of_cpu(cpu)); 489 retval = set_cpus_allowed(current, cpumask_of_cpu(cpu));
@@ -658,17 +656,14 @@ static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu)
658 return -ENOENT; 656 return -ENOENT;
659 657
660 /* Allocate all required memory */ 658 /* Allocate all required memory */
661 cache_kobject[cpu] = kmalloc(sizeof(struct kobject), GFP_KERNEL); 659 cache_kobject[cpu] = kzalloc(sizeof(struct kobject), GFP_KERNEL);
662 if (unlikely(cache_kobject[cpu] == NULL)) 660 if (unlikely(cache_kobject[cpu] == NULL))
663 goto err_out; 661 goto err_out;
664 memset(cache_kobject[cpu], 0, sizeof(struct kobject));
665 662
666 index_kobject[cpu] = kmalloc( 663 index_kobject[cpu] = kzalloc(
667 sizeof(struct _index_kobject ) * num_cache_leaves, GFP_KERNEL); 664 sizeof(struct _index_kobject ) * num_cache_leaves, GFP_KERNEL);
668 if (unlikely(index_kobject[cpu] == NULL)) 665 if (unlikely(index_kobject[cpu] == NULL))
669 goto err_out; 666 goto err_out;
670 memset(index_kobject[cpu], 0,
671 sizeof(struct _index_kobject) * num_cache_leaves);
672 667
673 return 0; 668 return 0;
674 669
diff --git a/arch/i386/kernel/cpu/mcheck/non-fatal.c b/arch/i386/kernel/cpu/mcheck/non-fatal.c
index 1f9153ae5b03..6b5d3518a1c0 100644
--- a/arch/i386/kernel/cpu/mcheck/non-fatal.c
+++ b/arch/i386/kernel/cpu/mcheck/non-fatal.c
@@ -51,10 +51,10 @@ static void mce_checkregs (void *info)
51 } 51 }
52} 52}
53 53
54static void mce_work_fn(void *data); 54static void mce_work_fn(struct work_struct *work);
55static DECLARE_WORK(mce_work, mce_work_fn, NULL); 55static DECLARE_DELAYED_WORK(mce_work, mce_work_fn);
56 56
57static void mce_work_fn(void *data) 57static void mce_work_fn(struct work_struct *work)
58{ 58{
59 on_each_cpu(mce_checkregs, NULL, 1, 1); 59 on_each_cpu(mce_checkregs, NULL, 1, 1);
60 schedule_delayed_work(&mce_work, MCE_RATE); 60 schedule_delayed_work(&mce_work, MCE_RATE);
diff --git a/arch/i386/kernel/cpu/mcheck/therm_throt.c b/arch/i386/kernel/cpu/mcheck/therm_throt.c
index 2d8703b7ce65..065005c3f168 100644
--- a/arch/i386/kernel/cpu/mcheck/therm_throt.c
+++ b/arch/i386/kernel/cpu/mcheck/therm_throt.c
@@ -20,6 +20,7 @@
20#include <linux/cpu.h> 20#include <linux/cpu.h>
21#include <asm/cpu.h> 21#include <asm/cpu.h>
22#include <linux/notifier.h> 22#include <linux/notifier.h>
23#include <linux/jiffies.h>
23#include <asm/therm_throt.h> 24#include <asm/therm_throt.h>
24 25
25/* How long to wait between reporting thermal events */ 26/* How long to wait between reporting thermal events */
@@ -115,7 +116,6 @@ static __cpuinit int thermal_throttle_add_dev(struct sys_device *sys_dev)
115 return sysfs_create_group(&sys_dev->kobj, &thermal_throttle_attr_group); 116 return sysfs_create_group(&sys_dev->kobj, &thermal_throttle_attr_group);
116} 117}
117 118
118#ifdef CONFIG_HOTPLUG_CPU
119static __cpuinit void thermal_throttle_remove_dev(struct sys_device *sys_dev) 119static __cpuinit void thermal_throttle_remove_dev(struct sys_device *sys_dev)
120{ 120{
121 return sysfs_remove_group(&sys_dev->kobj, &thermal_throttle_attr_group); 121 return sysfs_remove_group(&sys_dev->kobj, &thermal_throttle_attr_group);
@@ -152,7 +152,6 @@ static struct notifier_block thermal_throttle_cpu_notifier =
152{ 152{
153 .notifier_call = thermal_throttle_cpu_callback, 153 .notifier_call = thermal_throttle_cpu_callback,
154}; 154};
155#endif /* CONFIG_HOTPLUG_CPU */
156 155
157static __init int thermal_throttle_init_device(void) 156static __init int thermal_throttle_init_device(void)
158{ 157{
diff --git a/arch/i386/kernel/cpu/mtrr/Makefile b/arch/i386/kernel/cpu/mtrr/Makefile
index a25b701ab84e..191fc0533649 100644
--- a/arch/i386/kernel/cpu/mtrr/Makefile
+++ b/arch/i386/kernel/cpu/mtrr/Makefile
@@ -1,5 +1,3 @@
1obj-y := main.o if.o generic.o state.o 1obj-y := main.o if.o generic.o state.o
2obj-y += amd.o 2obj-$(CONFIG_X86_32) += amd.o cyrix.o centaur.o
3obj-y += cyrix.o
4obj-y += centaur.o
5 3
diff --git a/arch/i386/kernel/cpu/mtrr/amd.c b/arch/i386/kernel/cpu/mtrr/amd.c
index 1a1e04b6fd00..0949cdbf848a 100644
--- a/arch/i386/kernel/cpu/mtrr/amd.c
+++ b/arch/i386/kernel/cpu/mtrr/amd.c
@@ -7,7 +7,7 @@
7 7
8static void 8static void
9amd_get_mtrr(unsigned int reg, unsigned long *base, 9amd_get_mtrr(unsigned int reg, unsigned long *base,
10 unsigned int *size, mtrr_type * type) 10 unsigned long *size, mtrr_type * type)
11{ 11{
12 unsigned long low, high; 12 unsigned long low, high;
13 13
diff --git a/arch/i386/kernel/cpu/mtrr/centaur.c b/arch/i386/kernel/cpu/mtrr/centaur.c
index 33f00ac314ef..cb9aa3a7a7ab 100644
--- a/arch/i386/kernel/cpu/mtrr/centaur.c
+++ b/arch/i386/kernel/cpu/mtrr/centaur.c
@@ -17,7 +17,7 @@ static u8 centaur_mcr_type; /* 0 for winchip, 1 for winchip2 */
17 */ 17 */
18 18
19static int 19static int
20centaur_get_free_region(unsigned long base, unsigned long size) 20centaur_get_free_region(unsigned long base, unsigned long size, int replace_reg)
21/* [SUMMARY] Get a free MTRR. 21/* [SUMMARY] Get a free MTRR.
22 <base> The starting (base) address of the region. 22 <base> The starting (base) address of the region.
23 <size> The size (in bytes) of the region. 23 <size> The size (in bytes) of the region.
@@ -26,10 +26,11 @@ centaur_get_free_region(unsigned long base, unsigned long size)
26{ 26{
27 int i, max; 27 int i, max;
28 mtrr_type ltype; 28 mtrr_type ltype;
29 unsigned long lbase; 29 unsigned long lbase, lsize;
30 unsigned int lsize;
31 30
32 max = num_var_ranges; 31 max = num_var_ranges;
32 if (replace_reg >= 0 && replace_reg < max)
33 return replace_reg;
33 for (i = 0; i < max; ++i) { 34 for (i = 0; i < max; ++i) {
34 if (centaur_mcr_reserved & (1 << i)) 35 if (centaur_mcr_reserved & (1 << i))
35 continue; 36 continue;
@@ -49,7 +50,7 @@ mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi)
49 50
50static void 51static void
51centaur_get_mcr(unsigned int reg, unsigned long *base, 52centaur_get_mcr(unsigned int reg, unsigned long *base,
52 unsigned int *size, mtrr_type * type) 53 unsigned long *size, mtrr_type * type)
53{ 54{
54 *base = centaur_mcr[reg].high >> PAGE_SHIFT; 55 *base = centaur_mcr[reg].high >> PAGE_SHIFT;
55 *size = -(centaur_mcr[reg].low & 0xfffff000) >> PAGE_SHIFT; 56 *size = -(centaur_mcr[reg].low & 0xfffff000) >> PAGE_SHIFT;
diff --git a/arch/i386/kernel/cpu/mtrr/cyrix.c b/arch/i386/kernel/cpu/mtrr/cyrix.c
index 9027a987006b..0737a596db43 100644
--- a/arch/i386/kernel/cpu/mtrr/cyrix.c
+++ b/arch/i386/kernel/cpu/mtrr/cyrix.c
@@ -9,7 +9,7 @@ int arr3_protected;
9 9
10static void 10static void
11cyrix_get_arr(unsigned int reg, unsigned long *base, 11cyrix_get_arr(unsigned int reg, unsigned long *base,
12 unsigned int *size, mtrr_type * type) 12 unsigned long *size, mtrr_type * type)
13{ 13{
14 unsigned long flags; 14 unsigned long flags;
15 unsigned char arr, ccr3, rcr, shift; 15 unsigned char arr, ccr3, rcr, shift;
@@ -77,7 +77,7 @@ cyrix_get_arr(unsigned int reg, unsigned long *base,
77} 77}
78 78
79static int 79static int
80cyrix_get_free_region(unsigned long base, unsigned long size) 80cyrix_get_free_region(unsigned long base, unsigned long size, int replace_reg)
81/* [SUMMARY] Get a free ARR. 81/* [SUMMARY] Get a free ARR.
82 <base> The starting (base) address of the region. 82 <base> The starting (base) address of the region.
83 <size> The size (in bytes) of the region. 83 <size> The size (in bytes) of the region.
@@ -86,9 +86,24 @@ cyrix_get_free_region(unsigned long base, unsigned long size)
86{ 86{
87 int i; 87 int i;
88 mtrr_type ltype; 88 mtrr_type ltype;
89 unsigned long lbase; 89 unsigned long lbase, lsize;
90 unsigned int lsize;
91 90
91 switch (replace_reg) {
92 case 7:
93 if (size < 0x40)
94 break;
95 case 6:
96 case 5:
97 case 4:
98 return replace_reg;
99 case 3:
100 if (arr3_protected)
101 break;
102 case 2:
103 case 1:
104 case 0:
105 return replace_reg;
106 }
92 /* If we are to set up a region >32M then look at ARR7 immediately */ 107 /* If we are to set up a region >32M then look at ARR7 immediately */
93 if (size > 0x2000) { 108 if (size > 0x2000) {
94 cyrix_get_arr(7, &lbase, &lsize, &ltype); 109 cyrix_get_arr(7, &lbase, &lsize, &ltype);
@@ -214,7 +229,7 @@ static void cyrix_set_arr(unsigned int reg, unsigned long base,
214 229
215typedef struct { 230typedef struct {
216 unsigned long base; 231 unsigned long base;
217 unsigned int size; 232 unsigned long size;
218 mtrr_type type; 233 mtrr_type type;
219} arr_state_t; 234} arr_state_t;
220 235
diff --git a/arch/i386/kernel/cpu/mtrr/generic.c b/arch/i386/kernel/cpu/mtrr/generic.c
index 0b61eed8bbd8..f77fc53db654 100644
--- a/arch/i386/kernel/cpu/mtrr/generic.c
+++ b/arch/i386/kernel/cpu/mtrr/generic.c
@@ -3,6 +3,7 @@
3#include <linux/init.h> 3#include <linux/init.h>
4#include <linux/slab.h> 4#include <linux/slab.h>
5#include <linux/mm.h> 5#include <linux/mm.h>
6#include <linux/module.h>
6#include <asm/io.h> 7#include <asm/io.h>
7#include <asm/mtrr.h> 8#include <asm/mtrr.h>
8#include <asm/msr.h> 9#include <asm/msr.h>
@@ -15,12 +16,19 @@ struct mtrr_state {
15 struct mtrr_var_range *var_ranges; 16 struct mtrr_var_range *var_ranges;
16 mtrr_type fixed_ranges[NUM_FIXED_RANGES]; 17 mtrr_type fixed_ranges[NUM_FIXED_RANGES];
17 unsigned char enabled; 18 unsigned char enabled;
19 unsigned char have_fixed;
18 mtrr_type def_type; 20 mtrr_type def_type;
19}; 21};
20 22
21static unsigned long smp_changes_mask; 23static unsigned long smp_changes_mask;
22static struct mtrr_state mtrr_state = {}; 24static struct mtrr_state mtrr_state = {};
23 25
26#undef MODULE_PARAM_PREFIX
27#define MODULE_PARAM_PREFIX "mtrr."
28
29static __initdata int mtrr_show;
30module_param_named(show, mtrr_show, bool, 0);
31
24/* Get the MSR pair relating to a var range */ 32/* Get the MSR pair relating to a var range */
25static void __init 33static void __init
26get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr) 34get_mtrr_var_range(unsigned int index, struct mtrr_var_range *vr)
@@ -43,6 +51,14 @@ get_fixed_ranges(mtrr_type * frs)
43 rdmsr(MTRRfix4K_C0000_MSR + i, p[6 + i * 2], p[7 + i * 2]); 51 rdmsr(MTRRfix4K_C0000_MSR + i, p[6 + i * 2], p[7 + i * 2]);
44} 52}
45 53
54static void __init print_fixed(unsigned base, unsigned step, const mtrr_type*types)
55{
56 unsigned i;
57
58 for (i = 0; i < 8; ++i, ++types, base += step)
59 printk(KERN_INFO "MTRR %05X-%05X %s\n", base, base + step - 1, mtrr_attrib_to_str(*types));
60}
61
46/* Grab all of the MTRR state for this CPU into *state */ 62/* Grab all of the MTRR state for this CPU into *state */
47void __init get_mtrr_state(void) 63void __init get_mtrr_state(void)
48{ 64{
@@ -58,13 +74,49 @@ void __init get_mtrr_state(void)
58 } 74 }
59 vrs = mtrr_state.var_ranges; 75 vrs = mtrr_state.var_ranges;
60 76
77 rdmsr(MTRRcap_MSR, lo, dummy);
78 mtrr_state.have_fixed = (lo >> 8) & 1;
79
61 for (i = 0; i < num_var_ranges; i++) 80 for (i = 0; i < num_var_ranges; i++)
62 get_mtrr_var_range(i, &vrs[i]); 81 get_mtrr_var_range(i, &vrs[i]);
63 get_fixed_ranges(mtrr_state.fixed_ranges); 82 if (mtrr_state.have_fixed)
83 get_fixed_ranges(mtrr_state.fixed_ranges);
64 84
65 rdmsr(MTRRdefType_MSR, lo, dummy); 85 rdmsr(MTRRdefType_MSR, lo, dummy);
66 mtrr_state.def_type = (lo & 0xff); 86 mtrr_state.def_type = (lo & 0xff);
67 mtrr_state.enabled = (lo & 0xc00) >> 10; 87 mtrr_state.enabled = (lo & 0xc00) >> 10;
88
89 if (mtrr_show) {
90 int high_width;
91
92 printk(KERN_INFO "MTRR default type: %s\n", mtrr_attrib_to_str(mtrr_state.def_type));
93 if (mtrr_state.have_fixed) {
94 printk(KERN_INFO "MTRR fixed ranges %sabled:\n",
95 mtrr_state.enabled & 1 ? "en" : "dis");
96 print_fixed(0x00000, 0x10000, mtrr_state.fixed_ranges + 0);
97 for (i = 0; i < 2; ++i)
98 print_fixed(0x80000 + i * 0x20000, 0x04000, mtrr_state.fixed_ranges + (i + 1) * 8);
99 for (i = 0; i < 8; ++i)
100 print_fixed(0xC0000 + i * 0x08000, 0x01000, mtrr_state.fixed_ranges + (i + 3) * 8);
101 }
102 printk(KERN_INFO "MTRR variable ranges %sabled:\n",
103 mtrr_state.enabled & 2 ? "en" : "dis");
104 high_width = ((size_or_mask ? ffs(size_or_mask) - 1 : 32) - (32 - PAGE_SHIFT) + 3) / 4;
105 for (i = 0; i < num_var_ranges; ++i) {
106 if (mtrr_state.var_ranges[i].mask_lo & (1 << 11))
107 printk(KERN_INFO "MTRR %u base %0*X%05X000 mask %0*X%05X000 %s\n",
108 i,
109 high_width,
110 mtrr_state.var_ranges[i].base_hi,
111 mtrr_state.var_ranges[i].base_lo >> 12,
112 high_width,
113 mtrr_state.var_ranges[i].mask_hi,
114 mtrr_state.var_ranges[i].mask_lo >> 12,
115 mtrr_attrib_to_str(mtrr_state.var_ranges[i].base_lo & 0xff));
116 else
117 printk(KERN_INFO "MTRR %u disabled\n", i);
118 }
119 }
68} 120}
69 121
70/* Some BIOS's are fucked and don't set all MTRRs the same! */ 122/* Some BIOS's are fucked and don't set all MTRRs the same! */
@@ -95,7 +147,7 @@ void mtrr_wrmsr(unsigned msr, unsigned a, unsigned b)
95 smp_processor_id(), msr, a, b); 147 smp_processor_id(), msr, a, b);
96} 148}
97 149
98int generic_get_free_region(unsigned long base, unsigned long size) 150int generic_get_free_region(unsigned long base, unsigned long size, int replace_reg)
99/* [SUMMARY] Get a free MTRR. 151/* [SUMMARY] Get a free MTRR.
100 <base> The starting (base) address of the region. 152 <base> The starting (base) address of the region.
101 <size> The size (in bytes) of the region. 153 <size> The size (in bytes) of the region.
@@ -104,10 +156,11 @@ int generic_get_free_region(unsigned long base, unsigned long size)
104{ 156{
105 int i, max; 157 int i, max;
106 mtrr_type ltype; 158 mtrr_type ltype;
107 unsigned long lbase; 159 unsigned long lbase, lsize;
108 unsigned lsize;
109 160
110 max = num_var_ranges; 161 max = num_var_ranges;
162 if (replace_reg >= 0 && replace_reg < max)
163 return replace_reg;
111 for (i = 0; i < max; ++i) { 164 for (i = 0; i < max; ++i) {
112 mtrr_if->get(i, &lbase, &lsize, &ltype); 165 mtrr_if->get(i, &lbase, &lsize, &ltype);
113 if (lsize == 0) 166 if (lsize == 0)
@@ -117,7 +170,7 @@ int generic_get_free_region(unsigned long base, unsigned long size)
117} 170}
118 171
119static void generic_get_mtrr(unsigned int reg, unsigned long *base, 172static void generic_get_mtrr(unsigned int reg, unsigned long *base,
120 unsigned int *size, mtrr_type * type) 173 unsigned long *size, mtrr_type *type)
121{ 174{
122 unsigned int mask_lo, mask_hi, base_lo, base_hi; 175 unsigned int mask_lo, mask_hi, base_lo, base_hi;
123 176
@@ -202,7 +255,9 @@ static int set_mtrr_var_ranges(unsigned int index, struct mtrr_var_range *vr)
202 return changed; 255 return changed;
203} 256}
204 257
205static unsigned long set_mtrr_state(u32 deftype_lo, u32 deftype_hi) 258static u32 deftype_lo, deftype_hi;
259
260static unsigned long set_mtrr_state(void)
206/* [SUMMARY] Set the MTRR state for this CPU. 261/* [SUMMARY] Set the MTRR state for this CPU.
207 <state> The MTRR state information to read. 262 <state> The MTRR state information to read.
208 <ctxt> Some relevant CPU context. 263 <ctxt> Some relevant CPU context.
@@ -217,14 +272,14 @@ static unsigned long set_mtrr_state(u32 deftype_lo, u32 deftype_hi)
217 if (set_mtrr_var_ranges(i, &mtrr_state.var_ranges[i])) 272 if (set_mtrr_var_ranges(i, &mtrr_state.var_ranges[i]))
218 change_mask |= MTRR_CHANGE_MASK_VARIABLE; 273 change_mask |= MTRR_CHANGE_MASK_VARIABLE;
219 274
220 if (set_fixed_ranges(mtrr_state.fixed_ranges)) 275 if (mtrr_state.have_fixed && set_fixed_ranges(mtrr_state.fixed_ranges))
221 change_mask |= MTRR_CHANGE_MASK_FIXED; 276 change_mask |= MTRR_CHANGE_MASK_FIXED;
222 277
223 /* Set_mtrr_restore restores the old value of MTRRdefType, 278 /* Set_mtrr_restore restores the old value of MTRRdefType,
224 so to set it we fiddle with the saved value */ 279 so to set it we fiddle with the saved value */
225 if ((deftype_lo & 0xff) != mtrr_state.def_type 280 if ((deftype_lo & 0xff) != mtrr_state.def_type
226 || ((deftype_lo & 0xc00) >> 10) != mtrr_state.enabled) { 281 || ((deftype_lo & 0xc00) >> 10) != mtrr_state.enabled) {
227 deftype_lo |= (mtrr_state.def_type | mtrr_state.enabled << 10); 282 deftype_lo = (deftype_lo & ~0xcff) | mtrr_state.def_type | (mtrr_state.enabled << 10);
228 change_mask |= MTRR_CHANGE_MASK_DEFTYPE; 283 change_mask |= MTRR_CHANGE_MASK_DEFTYPE;
229 } 284 }
230 285
@@ -233,7 +288,6 @@ static unsigned long set_mtrr_state(u32 deftype_lo, u32 deftype_hi)
233 288
234 289
235static unsigned long cr4 = 0; 290static unsigned long cr4 = 0;
236static u32 deftype_lo, deftype_hi;
237static DEFINE_SPINLOCK(set_atomicity_lock); 291static DEFINE_SPINLOCK(set_atomicity_lock);
238 292
239/* 293/*
@@ -271,7 +325,7 @@ static void prepare_set(void) __acquires(set_atomicity_lock)
271 rdmsr(MTRRdefType_MSR, deftype_lo, deftype_hi); 325 rdmsr(MTRRdefType_MSR, deftype_lo, deftype_hi);
272 326
273 /* Disable MTRRs, and set the default type to uncached */ 327 /* Disable MTRRs, and set the default type to uncached */
274 mtrr_wrmsr(MTRRdefType_MSR, deftype_lo & 0xf300UL, deftype_hi); 328 mtrr_wrmsr(MTRRdefType_MSR, deftype_lo & ~0xcff, deftype_hi);
275} 329}
276 330
277static void post_set(void) __releases(set_atomicity_lock) 331static void post_set(void) __releases(set_atomicity_lock)
@@ -300,7 +354,7 @@ static void generic_set_all(void)
300 prepare_set(); 354 prepare_set();
301 355
302 /* Actually set the state */ 356 /* Actually set the state */
303 mask = set_mtrr_state(deftype_lo,deftype_hi); 357 mask = set_mtrr_state();
304 358
305 post_set(); 359 post_set();
306 local_irq_restore(flags); 360 local_irq_restore(flags);
@@ -366,7 +420,7 @@ int generic_validate_add_page(unsigned long base, unsigned long size, unsigned i
366 printk(KERN_WARNING "mtrr: base(0x%lx000) is not 4 MiB aligned\n", base); 420 printk(KERN_WARNING "mtrr: base(0x%lx000) is not 4 MiB aligned\n", base);
367 return -EINVAL; 421 return -EINVAL;
368 } 422 }
369 if (!(base + size < 0x70000000 || base > 0x7003FFFF) && 423 if (!(base + size < 0x70000 || base > 0x7003F) &&
370 (type == MTRR_TYPE_WRCOMB 424 (type == MTRR_TYPE_WRCOMB
371 || type == MTRR_TYPE_WRBACK)) { 425 || type == MTRR_TYPE_WRBACK)) {
372 printk(KERN_WARNING "mtrr: writable mtrr between 0x70000000 and 0x7003FFFF may hang the CPU.\n"); 426 printk(KERN_WARNING "mtrr: writable mtrr between 0x70000000 and 0x7003FFFF may hang the CPU.\n");
diff --git a/arch/i386/kernel/cpu/mtrr/if.c b/arch/i386/kernel/cpu/mtrr/if.c
index 5ac051bb9d55..5ae1705eafa6 100644
--- a/arch/i386/kernel/cpu/mtrr/if.c
+++ b/arch/i386/kernel/cpu/mtrr/if.c
@@ -17,7 +17,7 @@ extern unsigned int *usage_table;
17 17
18#define FILE_FCOUNT(f) (((struct seq_file *)((f)->private_data))->private) 18#define FILE_FCOUNT(f) (((struct seq_file *)((f)->private_data))->private)
19 19
20static char *mtrr_strings[MTRR_NUM_TYPES] = 20static const char *const mtrr_strings[MTRR_NUM_TYPES] =
21{ 21{
22 "uncachable", /* 0 */ 22 "uncachable", /* 0 */
23 "write-combining", /* 1 */ 23 "write-combining", /* 1 */
@@ -28,7 +28,7 @@ static char *mtrr_strings[MTRR_NUM_TYPES] =
28 "write-back", /* 6 */ 28 "write-back", /* 6 */
29}; 29};
30 30
31char *mtrr_attrib_to_str(int x) 31const char *mtrr_attrib_to_str(int x)
32{ 32{
33 return (x <= 6) ? mtrr_strings[x] : "?"; 33 return (x <= 6) ? mtrr_strings[x] : "?";
34} 34}
@@ -44,10 +44,9 @@ mtrr_file_add(unsigned long base, unsigned long size,
44 44
45 max = num_var_ranges; 45 max = num_var_ranges;
46 if (fcount == NULL) { 46 if (fcount == NULL) {
47 fcount = kmalloc(max * sizeof *fcount, GFP_KERNEL); 47 fcount = kzalloc(max * sizeof *fcount, GFP_KERNEL);
48 if (!fcount) 48 if (!fcount)
49 return -ENOMEM; 49 return -ENOMEM;
50 memset(fcount, 0, max * sizeof *fcount);
51 FILE_FCOUNT(file) = fcount; 50 FILE_FCOUNT(file) = fcount;
52 } 51 }
53 if (!page) { 52 if (!page) {
@@ -155,6 +154,7 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg)
155{ 154{
156 int err = 0; 155 int err = 0;
157 mtrr_type type; 156 mtrr_type type;
157 unsigned long size;
158 struct mtrr_sentry sentry; 158 struct mtrr_sentry sentry;
159 struct mtrr_gentry gentry; 159 struct mtrr_gentry gentry;
160 void __user *arg = (void __user *) __arg; 160 void __user *arg = (void __user *) __arg;
@@ -235,15 +235,15 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg)
235 case MTRRIOC_GET_ENTRY: 235 case MTRRIOC_GET_ENTRY:
236 if (gentry.regnum >= num_var_ranges) 236 if (gentry.regnum >= num_var_ranges)
237 return -EINVAL; 237 return -EINVAL;
238 mtrr_if->get(gentry.regnum, &gentry.base, &gentry.size, &type); 238 mtrr_if->get(gentry.regnum, &gentry.base, &size, &type);
239 239
240 /* Hide entries that go above 4GB */ 240 /* Hide entries that go above 4GB */
241 if (gentry.base + gentry.size > 0x100000 241 if (gentry.base + size - 1 >= (1UL << (8 * sizeof(gentry.size) - PAGE_SHIFT))
242 || gentry.size == 0x100000) 242 || size >= (1UL << (8 * sizeof(gentry.size) - PAGE_SHIFT)))
243 gentry.base = gentry.size = gentry.type = 0; 243 gentry.base = gentry.size = gentry.type = 0;
244 else { 244 else {
245 gentry.base <<= PAGE_SHIFT; 245 gentry.base <<= PAGE_SHIFT;
246 gentry.size <<= PAGE_SHIFT; 246 gentry.size = size << PAGE_SHIFT;
247 gentry.type = type; 247 gentry.type = type;
248 } 248 }
249 249
@@ -273,8 +273,14 @@ mtrr_ioctl(struct file *file, unsigned int cmd, unsigned long __arg)
273 case MTRRIOC_GET_PAGE_ENTRY: 273 case MTRRIOC_GET_PAGE_ENTRY:
274 if (gentry.regnum >= num_var_ranges) 274 if (gentry.regnum >= num_var_ranges)
275 return -EINVAL; 275 return -EINVAL;
276 mtrr_if->get(gentry.regnum, &gentry.base, &gentry.size, &type); 276 mtrr_if->get(gentry.regnum, &gentry.base, &size, &type);
277 gentry.type = type; 277 /* Hide entries that would overflow */
278 if (size != (__typeof__(gentry.size))size)
279 gentry.base = gentry.size = gentry.type = 0;
280 else {
281 gentry.size = size;
282 gentry.type = type;
283 }
278 break; 284 break;
279 } 285 }
280 286
@@ -353,8 +359,7 @@ static int mtrr_seq_show(struct seq_file *seq, void *offset)
353 char factor; 359 char factor;
354 int i, max, len; 360 int i, max, len;
355 mtrr_type type; 361 mtrr_type type;
356 unsigned long base; 362 unsigned long base, size;
357 unsigned int size;
358 363
359 len = 0; 364 len = 0;
360 max = num_var_ranges; 365 max = num_var_ranges;
@@ -373,7 +378,7 @@ static int mtrr_seq_show(struct seq_file *seq, void *offset)
373 } 378 }
374 /* RED-PEN: base can be > 32bit */ 379 /* RED-PEN: base can be > 32bit */
375 len += seq_printf(seq, 380 len += seq_printf(seq,
376 "reg%02i: base=0x%05lx000 (%4liMB), size=%4i%cB: %s, count=%d\n", 381 "reg%02i: base=0x%05lx000 (%4luMB), size=%4lu%cB: %s, count=%d\n",
377 i, base, base >> (20 - PAGE_SHIFT), size, factor, 382 i, base, base >> (20 - PAGE_SHIFT), size, factor,
378 mtrr_attrib_to_str(type), usage_table[i]); 383 mtrr_attrib_to_str(type), usage_table[i]);
379 } 384 }
diff --git a/arch/i386/kernel/cpu/mtrr/main.c b/arch/i386/kernel/cpu/mtrr/main.c
index fff90bda4733..16bb7ea87145 100644
--- a/arch/i386/kernel/cpu/mtrr/main.c
+++ b/arch/i386/kernel/cpu/mtrr/main.c
@@ -59,7 +59,11 @@ struct mtrr_ops * mtrr_if = NULL;
59static void set_mtrr(unsigned int reg, unsigned long base, 59static void set_mtrr(unsigned int reg, unsigned long base,
60 unsigned long size, mtrr_type type); 60 unsigned long size, mtrr_type type);
61 61
62#ifndef CONFIG_X86_64
62extern int arr3_protected; 63extern int arr3_protected;
64#else
65#define arr3_protected 0
66#endif
63 67
64void set_mtrr_ops(struct mtrr_ops * ops) 68void set_mtrr_ops(struct mtrr_ops * ops)
65{ 69{
@@ -168,6 +172,13 @@ static void ipi_handler(void *info)
168 172
169#endif 173#endif
170 174
175static inline int types_compatible(mtrr_type type1, mtrr_type type2) {
176 return type1 == MTRR_TYPE_UNCACHABLE ||
177 type2 == MTRR_TYPE_UNCACHABLE ||
178 (type1 == MTRR_TYPE_WRTHROUGH && type2 == MTRR_TYPE_WRBACK) ||
179 (type1 == MTRR_TYPE_WRBACK && type2 == MTRR_TYPE_WRTHROUGH);
180}
181
171/** 182/**
172 * set_mtrr - update mtrrs on all processors 183 * set_mtrr - update mtrrs on all processors
173 * @reg: mtrr in question 184 * @reg: mtrr in question
@@ -263,8 +274,8 @@ static void set_mtrr(unsigned int reg, unsigned long base,
263 274
264/** 275/**
265 * mtrr_add_page - Add a memory type region 276 * mtrr_add_page - Add a memory type region
266 * @base: Physical base address of region in pages (4 KB) 277 * @base: Physical base address of region in pages (in units of 4 kB!)
267 * @size: Physical size of region in pages (4 KB) 278 * @size: Physical size of region in pages (4 kB)
268 * @type: Type of MTRR desired 279 * @type: Type of MTRR desired
269 * @increment: If this is true do usage counting on the region 280 * @increment: If this is true do usage counting on the region
270 * 281 *
@@ -300,11 +311,9 @@ static void set_mtrr(unsigned int reg, unsigned long base,
300int mtrr_add_page(unsigned long base, unsigned long size, 311int mtrr_add_page(unsigned long base, unsigned long size,
301 unsigned int type, char increment) 312 unsigned int type, char increment)
302{ 313{
303 int i; 314 int i, replace, error;
304 mtrr_type ltype; 315 mtrr_type ltype;
305 unsigned long lbase; 316 unsigned long lbase, lsize;
306 unsigned int lsize;
307 int error;
308 317
309 if (!mtrr_if) 318 if (!mtrr_if)
310 return -ENXIO; 319 return -ENXIO;
@@ -324,12 +333,18 @@ int mtrr_add_page(unsigned long base, unsigned long size,
324 return -ENOSYS; 333 return -ENOSYS;
325 } 334 }
326 335
336 if (!size) {
337 printk(KERN_WARNING "mtrr: zero sized request\n");
338 return -EINVAL;
339 }
340
327 if (base & size_or_mask || size & size_or_mask) { 341 if (base & size_or_mask || size & size_or_mask) {
328 printk(KERN_WARNING "mtrr: base or size exceeds the MTRR width\n"); 342 printk(KERN_WARNING "mtrr: base or size exceeds the MTRR width\n");
329 return -EINVAL; 343 return -EINVAL;
330 } 344 }
331 345
332 error = -EINVAL; 346 error = -EINVAL;
347 replace = -1;
333 348
334 /* No CPU hotplug when we change MTRR entries */ 349 /* No CPU hotplug when we change MTRR entries */
335 lock_cpu_hotplug(); 350 lock_cpu_hotplug();
@@ -337,21 +352,28 @@ int mtrr_add_page(unsigned long base, unsigned long size,
337 mutex_lock(&mtrr_mutex); 352 mutex_lock(&mtrr_mutex);
338 for (i = 0; i < num_var_ranges; ++i) { 353 for (i = 0; i < num_var_ranges; ++i) {
339 mtrr_if->get(i, &lbase, &lsize, &ltype); 354 mtrr_if->get(i, &lbase, &lsize, &ltype);
340 if (base >= lbase + lsize) 355 if (!lsize || base > lbase + lsize - 1 || base + size - 1 < lbase)
341 continue;
342 if ((base < lbase) && (base + size <= lbase))
343 continue; 356 continue;
344 /* At this point we know there is some kind of overlap/enclosure */ 357 /* At this point we know there is some kind of overlap/enclosure */
345 if ((base < lbase) || (base + size > lbase + lsize)) { 358 if (base < lbase || base + size - 1 > lbase + lsize - 1) {
359 if (base <= lbase && base + size - 1 >= lbase + lsize - 1) {
360 /* New region encloses an existing region */
361 if (type == ltype) {
362 replace = replace == -1 ? i : -2;
363 continue;
364 }
365 else if (types_compatible(type, ltype))
366 continue;
367 }
346 printk(KERN_WARNING 368 printk(KERN_WARNING
347 "mtrr: 0x%lx000,0x%lx000 overlaps existing" 369 "mtrr: 0x%lx000,0x%lx000 overlaps existing"
348 " 0x%lx000,0x%x000\n", base, size, lbase, 370 " 0x%lx000,0x%lx000\n", base, size, lbase,
349 lsize); 371 lsize);
350 goto out; 372 goto out;
351 } 373 }
352 /* New region is enclosed by an existing region */ 374 /* New region is enclosed by an existing region */
353 if (ltype != type) { 375 if (ltype != type) {
354 if (type == MTRR_TYPE_UNCACHABLE) 376 if (types_compatible(type, ltype))
355 continue; 377 continue;
356 printk (KERN_WARNING "mtrr: type mismatch for %lx000,%lx000 old: %s new: %s\n", 378 printk (KERN_WARNING "mtrr: type mismatch for %lx000,%lx000 old: %s new: %s\n",
357 base, size, mtrr_attrib_to_str(ltype), 379 base, size, mtrr_attrib_to_str(ltype),
@@ -364,10 +386,18 @@ int mtrr_add_page(unsigned long base, unsigned long size,
364 goto out; 386 goto out;
365 } 387 }
366 /* Search for an empty MTRR */ 388 /* Search for an empty MTRR */
367 i = mtrr_if->get_free_region(base, size); 389 i = mtrr_if->get_free_region(base, size, replace);
368 if (i >= 0) { 390 if (i >= 0) {
369 set_mtrr(i, base, size, type); 391 set_mtrr(i, base, size, type);
370 usage_table[i] = 1; 392 if (likely(replace < 0))
393 usage_table[i] = 1;
394 else {
395 usage_table[i] = usage_table[replace] + !!increment;
396 if (unlikely(replace != i)) {
397 set_mtrr(replace, 0, 0, 0);
398 usage_table[replace] = 0;
399 }
400 }
371 } else 401 } else
372 printk(KERN_INFO "mtrr: no more MTRRs available\n"); 402 printk(KERN_INFO "mtrr: no more MTRRs available\n");
373 error = i; 403 error = i;
@@ -455,8 +485,7 @@ int mtrr_del_page(int reg, unsigned long base, unsigned long size)
455{ 485{
456 int i, max; 486 int i, max;
457 mtrr_type ltype; 487 mtrr_type ltype;
458 unsigned long lbase; 488 unsigned long lbase, lsize;
459 unsigned int lsize;
460 int error = -EINVAL; 489 int error = -EINVAL;
461 490
462 if (!mtrr_if) 491 if (!mtrr_if)
@@ -544,9 +573,11 @@ extern void centaur_init_mtrr(void);
544 573
545static void __init init_ifs(void) 574static void __init init_ifs(void)
546{ 575{
576#ifndef CONFIG_X86_64
547 amd_init_mtrr(); 577 amd_init_mtrr();
548 cyrix_init_mtrr(); 578 cyrix_init_mtrr();
549 centaur_init_mtrr(); 579 centaur_init_mtrr();
580#endif
550} 581}
551 582
552/* The suspend/resume methods are only for CPU without MTRR. CPU using generic 583/* The suspend/resume methods are only for CPU without MTRR. CPU using generic
@@ -555,7 +586,7 @@ static void __init init_ifs(void)
555struct mtrr_value { 586struct mtrr_value {
556 mtrr_type ltype; 587 mtrr_type ltype;
557 unsigned long lbase; 588 unsigned long lbase;
558 unsigned int lsize; 589 unsigned long lsize;
559}; 590};
560 591
561static struct mtrr_value * mtrr_state; 592static struct mtrr_value * mtrr_state;
@@ -565,10 +596,8 @@ static int mtrr_save(struct sys_device * sysdev, pm_message_t state)
565 int i; 596 int i;
566 int size = num_var_ranges * sizeof(struct mtrr_value); 597 int size = num_var_ranges * sizeof(struct mtrr_value);
567 598
568 mtrr_state = kmalloc(size,GFP_ATOMIC); 599 mtrr_state = kzalloc(size,GFP_ATOMIC);
569 if (mtrr_state) 600 if (!mtrr_state)
570 memset(mtrr_state,0,size);
571 else
572 return -ENOMEM; 601 return -ENOMEM;
573 602
574 for (i = 0; i < num_var_ranges; i++) { 603 for (i = 0; i < num_var_ranges; i++) {
diff --git a/arch/i386/kernel/cpu/mtrr/mtrr.h b/arch/i386/kernel/cpu/mtrr/mtrr.h
index 99c9f2682041..d61ea9db6cfe 100644
--- a/arch/i386/kernel/cpu/mtrr/mtrr.h
+++ b/arch/i386/kernel/cpu/mtrr/mtrr.h
@@ -43,15 +43,16 @@ struct mtrr_ops {
43 void (*set_all)(void); 43 void (*set_all)(void);
44 44
45 void (*get)(unsigned int reg, unsigned long *base, 45 void (*get)(unsigned int reg, unsigned long *base,
46 unsigned int *size, mtrr_type * type); 46 unsigned long *size, mtrr_type * type);
47 int (*get_free_region) (unsigned long base, unsigned long size); 47 int (*get_free_region)(unsigned long base, unsigned long size,
48 48 int replace_reg);
49 int (*validate_add_page)(unsigned long base, unsigned long size, 49 int (*validate_add_page)(unsigned long base, unsigned long size,
50 unsigned int type); 50 unsigned int type);
51 int (*have_wrcomb)(void); 51 int (*have_wrcomb)(void);
52}; 52};
53 53
54extern int generic_get_free_region(unsigned long base, unsigned long size); 54extern int generic_get_free_region(unsigned long base, unsigned long size,
55 int replace_reg);
55extern int generic_validate_add_page(unsigned long base, unsigned long size, 56extern int generic_validate_add_page(unsigned long base, unsigned long size,
56 unsigned int type); 57 unsigned int type);
57 58
@@ -62,17 +63,17 @@ extern int positive_have_wrcomb(void);
62/* library functions for processor-specific routines */ 63/* library functions for processor-specific routines */
63struct set_mtrr_context { 64struct set_mtrr_context {
64 unsigned long flags; 65 unsigned long flags;
65 unsigned long deftype_lo;
66 unsigned long deftype_hi;
67 unsigned long cr4val; 66 unsigned long cr4val;
68 unsigned long ccr3; 67 u32 deftype_lo;
68 u32 deftype_hi;
69 u32 ccr3;
69}; 70};
70 71
71struct mtrr_var_range { 72struct mtrr_var_range {
72 unsigned long base_lo; 73 u32 base_lo;
73 unsigned long base_hi; 74 u32 base_hi;
74 unsigned long mask_lo; 75 u32 mask_lo;
75 unsigned long mask_hi; 76 u32 mask_hi;
76}; 77};
77 78
78void set_mtrr_done(struct set_mtrr_context *ctxt); 79void set_mtrr_done(struct set_mtrr_context *ctxt);
@@ -92,6 +93,6 @@ extern struct mtrr_ops * mtrr_if;
92extern unsigned int num_var_ranges; 93extern unsigned int num_var_ranges;
93 94
94void mtrr_state_warn(void); 95void mtrr_state_warn(void);
95char *mtrr_attrib_to_str(int x); 96const char *mtrr_attrib_to_str(int x);
96void mtrr_wrmsr(unsigned, unsigned, unsigned); 97void mtrr_wrmsr(unsigned, unsigned, unsigned);
97 98
diff --git a/arch/i386/kernel/cpu/proc.c b/arch/i386/kernel/cpu/proc.c
index 76aac088a323..6624d8583c42 100644
--- a/arch/i386/kernel/cpu/proc.c
+++ b/arch/i386/kernel/cpu/proc.c
@@ -152,9 +152,10 @@ static int show_cpuinfo(struct seq_file *m, void *v)
152 seq_printf(m, " [%d]", i); 152 seq_printf(m, " [%d]", i);
153 } 153 }
154 154
155 seq_printf(m, "\nbogomips\t: %lu.%02lu\n\n", 155 seq_printf(m, "\nbogomips\t: %lu.%02lu\n",
156 c->loops_per_jiffy/(500000/HZ), 156 c->loops_per_jiffy/(500000/HZ),
157 (c->loops_per_jiffy/(5000/HZ)) % 100); 157 (c->loops_per_jiffy/(5000/HZ)) % 100);
158 seq_printf(m, "clflush size\t: %u\n\n", c->x86_clflush_size);
158 159
159 return 0; 160 return 0;
160} 161}