diff options
author | Dave Jones <davej@redhat.com> | 2008-05-22 18:54:32 -0400 |
---|---|---|
committer | H. Peter Anvin <hpa@zytor.com> | 2008-05-30 18:46:29 -0400 |
commit | a82fbe31cb387bb246e2d3b3c177f551bb991135 (patch) | |
tree | 8e7480b39805ff6c608d557aa92eeb47b18b229b /arch/x86/kernel | |
parent | 4d285878564bb46cf64e54be18eeffe33ca583a0 (diff) |
x86: Move the 64-bit Intel specific parts out of setup_64.c
Create a separate intel_64.c file in the cpu/ dir for
the useful parts to live in.
Signed-off-by: Dave Jones <davej@redhat.com>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r-- | arch/x86/kernel/cpu/Makefile | 1 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/intel_64.c | 97 | ||||
-rw-r--r-- | arch/x86/kernel/setup_64.c | 93 |
3 files changed, 100 insertions, 91 deletions
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile index ef065c1a2e1a..b7a11924fed7 100644 --- a/arch/x86/kernel/cpu/Makefile +++ b/arch/x86/kernel/cpu/Makefile | |||
@@ -12,6 +12,7 @@ obj-$(CONFIG_X86_32) += cyrix.o | |||
12 | obj-$(CONFIG_X86_32) += centaur.o | 12 | obj-$(CONFIG_X86_32) += centaur.o |
13 | obj-$(CONFIG_X86_32) += transmeta.o | 13 | obj-$(CONFIG_X86_32) += transmeta.o |
14 | obj-$(CONFIG_X86_32) += intel.o | 14 | obj-$(CONFIG_X86_32) += intel.o |
15 | obj-$(CONFIG_X86_64) += intel_64.o | ||
15 | obj-$(CONFIG_X86_32) += umc.o | 16 | obj-$(CONFIG_X86_32) += umc.o |
16 | 17 | ||
17 | obj-$(CONFIG_X86_MCE) += mcheck/ | 18 | obj-$(CONFIG_X86_MCE) += mcheck/ |
diff --git a/arch/x86/kernel/cpu/intel_64.c b/arch/x86/kernel/cpu/intel_64.c new file mode 100644 index 000000000000..e5f929f6c3d4 --- /dev/null +++ b/arch/x86/kernel/cpu/intel_64.c | |||
@@ -0,0 +1,97 @@ | |||
1 | #include <linux/init.h> | ||
2 | #include <linux/smp.h> | ||
3 | #include <asm/processor.h> | ||
4 | #include <asm/ptrace.h> | ||
5 | #include <asm/topology.h> | ||
6 | #include <asm/numa_64.h> | ||
7 | |||
8 | void __cpuinit early_init_intel(struct cpuinfo_x86 *c) | ||
9 | { | ||
10 | if ((c->x86 == 0xf && c->x86_model >= 0x03) || | ||
11 | (c->x86 == 0x6 && c->x86_model >= 0x0e)) | ||
12 | set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); | ||
13 | } | ||
14 | |||
15 | /* | ||
16 | * find out the number of processor cores on the die | ||
17 | */ | ||
18 | static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c) | ||
19 | { | ||
20 | unsigned int eax, t; | ||
21 | |||
22 | if (c->cpuid_level < 4) | ||
23 | return 1; | ||
24 | |||
25 | cpuid_count(4, 0, &eax, &t, &t, &t); | ||
26 | |||
27 | if (eax & 0x1f) | ||
28 | return ((eax >> 26) + 1); | ||
29 | else | ||
30 | return 1; | ||
31 | } | ||
32 | |||
33 | static void __cpuinit srat_detect_node(void) | ||
34 | { | ||
35 | #ifdef CONFIG_NUMA | ||
36 | unsigned node; | ||
37 | int cpu = smp_processor_id(); | ||
38 | int apicid = hard_smp_processor_id(); | ||
39 | |||
40 | /* Don't do the funky fallback heuristics the AMD version employs | ||
41 | for now. */ | ||
42 | node = apicid_to_node[apicid]; | ||
43 | if (node == NUMA_NO_NODE || !node_online(node)) | ||
44 | node = first_node(node_online_map); | ||
45 | numa_set_node(cpu, node); | ||
46 | |||
47 | printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node); | ||
48 | #endif | ||
49 | } | ||
50 | |||
51 | void __cpuinit init_intel(struct cpuinfo_x86 *c) | ||
52 | { | ||
53 | /* Cache sizes */ | ||
54 | unsigned n; | ||
55 | |||
56 | init_intel_cacheinfo(c); | ||
57 | if (c->cpuid_level > 9) { | ||
58 | unsigned eax = cpuid_eax(10); | ||
59 | /* Check for version and the number of counters */ | ||
60 | if ((eax & 0xff) && (((eax>>8) & 0xff) > 1)) | ||
61 | set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON); | ||
62 | } | ||
63 | |||
64 | if (cpu_has_ds) { | ||
65 | unsigned int l1, l2; | ||
66 | rdmsr(MSR_IA32_MISC_ENABLE, l1, l2); | ||
67 | if (!(l1 & (1<<11))) | ||
68 | set_cpu_cap(c, X86_FEATURE_BTS); | ||
69 | if (!(l1 & (1<<12))) | ||
70 | set_cpu_cap(c, X86_FEATURE_PEBS); | ||
71 | } | ||
72 | |||
73 | |||
74 | if (cpu_has_bts) | ||
75 | ds_init_intel(c); | ||
76 | |||
77 | n = c->extended_cpuid_level; | ||
78 | if (n >= 0x80000008) { | ||
79 | unsigned eax = cpuid_eax(0x80000008); | ||
80 | c->x86_virt_bits = (eax >> 8) & 0xff; | ||
81 | c->x86_phys_bits = eax & 0xff; | ||
82 | /* CPUID workaround for Intel 0F34 CPU */ | ||
83 | if (c->x86_vendor == X86_VENDOR_INTEL && | ||
84 | c->x86 == 0xF && c->x86_model == 0x3 && | ||
85 | c->x86_mask == 0x4) | ||
86 | c->x86_phys_bits = 36; | ||
87 | } | ||
88 | |||
89 | if (c->x86 == 15) | ||
90 | c->x86_cache_alignment = c->x86_clflush_size * 2; | ||
91 | if (c->x86 == 6) | ||
92 | set_cpu_cap(c, X86_FEATURE_REP_GOOD); | ||
93 | set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); | ||
94 | c->x86_max_cores = intel_num_cpu_cores(c); | ||
95 | |||
96 | srat_detect_node(); | ||
97 | } | ||
diff --git a/arch/x86/kernel/setup_64.c b/arch/x86/kernel/setup_64.c index b07b1997ed97..c4e6a0b6c303 100644 --- a/arch/x86/kernel/setup_64.c +++ b/arch/x86/kernel/setup_64.c | |||
@@ -622,97 +622,6 @@ out: | |||
622 | #endif | 622 | #endif |
623 | } | 623 | } |
624 | 624 | ||
625 | /* | ||
626 | * find out the number of processor cores on the die | ||
627 | */ | ||
628 | static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c) | ||
629 | { | ||
630 | unsigned int eax, t; | ||
631 | |||
632 | if (c->cpuid_level < 4) | ||
633 | return 1; | ||
634 | |||
635 | cpuid_count(4, 0, &eax, &t, &t, &t); | ||
636 | |||
637 | if (eax & 0x1f) | ||
638 | return ((eax >> 26) + 1); | ||
639 | else | ||
640 | return 1; | ||
641 | } | ||
642 | |||
643 | static void __cpuinit srat_detect_node(void) | ||
644 | { | ||
645 | #ifdef CONFIG_NUMA | ||
646 | unsigned node; | ||
647 | int cpu = smp_processor_id(); | ||
648 | int apicid = hard_smp_processor_id(); | ||
649 | |||
650 | /* Don't do the funky fallback heuristics the AMD version employs | ||
651 | for now. */ | ||
652 | node = apicid_to_node[apicid]; | ||
653 | if (node == NUMA_NO_NODE || !node_online(node)) | ||
654 | node = first_node(node_online_map); | ||
655 | numa_set_node(cpu, node); | ||
656 | |||
657 | printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node); | ||
658 | #endif | ||
659 | } | ||
660 | |||
661 | static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) | ||
662 | { | ||
663 | if ((c->x86 == 0xf && c->x86_model >= 0x03) || | ||
664 | (c->x86 == 0x6 && c->x86_model >= 0x0e)) | ||
665 | set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); | ||
666 | } | ||
667 | |||
668 | static void __cpuinit init_intel(struct cpuinfo_x86 *c) | ||
669 | { | ||
670 | /* Cache sizes */ | ||
671 | unsigned n; | ||
672 | |||
673 | init_intel_cacheinfo(c); | ||
674 | if (c->cpuid_level > 9) { | ||
675 | unsigned eax = cpuid_eax(10); | ||
676 | /* Check for version and the number of counters */ | ||
677 | if ((eax & 0xff) && (((eax>>8) & 0xff) > 1)) | ||
678 | set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON); | ||
679 | } | ||
680 | |||
681 | if (cpu_has_ds) { | ||
682 | unsigned int l1, l2; | ||
683 | rdmsr(MSR_IA32_MISC_ENABLE, l1, l2); | ||
684 | if (!(l1 & (1<<11))) | ||
685 | set_cpu_cap(c, X86_FEATURE_BTS); | ||
686 | if (!(l1 & (1<<12))) | ||
687 | set_cpu_cap(c, X86_FEATURE_PEBS); | ||
688 | } | ||
689 | |||
690 | |||
691 | if (cpu_has_bts) | ||
692 | ds_init_intel(c); | ||
693 | |||
694 | n = c->extended_cpuid_level; | ||
695 | if (n >= 0x80000008) { | ||
696 | unsigned eax = cpuid_eax(0x80000008); | ||
697 | c->x86_virt_bits = (eax >> 8) & 0xff; | ||
698 | c->x86_phys_bits = eax & 0xff; | ||
699 | /* CPUID workaround for Intel 0F34 CPU */ | ||
700 | if (c->x86_vendor == X86_VENDOR_INTEL && | ||
701 | c->x86 == 0xF && c->x86_model == 0x3 && | ||
702 | c->x86_mask == 0x4) | ||
703 | c->x86_phys_bits = 36; | ||
704 | } | ||
705 | |||
706 | if (c->x86 == 15) | ||
707 | c->x86_cache_alignment = c->x86_clflush_size * 2; | ||
708 | if (c->x86 == 6) | ||
709 | set_cpu_cap(c, X86_FEATURE_REP_GOOD); | ||
710 | set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); | ||
711 | c->x86_max_cores = intel_num_cpu_cores(c); | ||
712 | |||
713 | srat_detect_node(); | ||
714 | } | ||
715 | |||
716 | static void __cpuinit early_init_centaur(struct cpuinfo_x86 *c) | 625 | static void __cpuinit early_init_centaur(struct cpuinfo_x86 *c) |
717 | { | 626 | { |
718 | if (c->x86 == 0x6 && c->x86_model >= 0xf) | 627 | if (c->x86 == 0x6 && c->x86_model >= 0xf) |
@@ -756,6 +665,8 @@ static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c) | |||
756 | // FIXME: Needs to use cpu_vendor_dev_register | 665 | // FIXME: Needs to use cpu_vendor_dev_register |
757 | extern void __cpuinit early_init_amd(struct cpuinfo_x86 *c); | 666 | extern void __cpuinit early_init_amd(struct cpuinfo_x86 *c); |
758 | extern void __cpuinit init_amd(struct cpuinfo_x86 *c); | 667 | extern void __cpuinit init_amd(struct cpuinfo_x86 *c); |
668 | extern void __cpuinit early_init_intel(struct cpuinfo_x86 *c); | ||
669 | extern void __cpuinit init_intel(struct cpuinfo_x86 *c); | ||
759 | 670 | ||
760 | /* Do some early cpuid on the boot CPU to get some parameter that are | 671 | /* Do some early cpuid on the boot CPU to get some parameter that are |
761 | needed before check_bugs. Everything advanced is in identify_cpu | 672 | needed before check_bugs. Everything advanced is in identify_cpu |