diff options
author | David S. Miller <davem@sunset.davemloft.net> | 2007-05-25 18:49:59 -0400 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2007-05-29 05:49:41 -0400 |
commit | 5cbc30737398b49f62ae8603129ce43ac7db1a41 (patch) | |
tree | 45d01a686865e6fd9c32b670f77af1e37db03008 /arch | |
parent | e01c0d6d8cf29c1c11725837b265598cab687952 (diff) |
[SPARC64]: Use machine description and OBP properly for cpu probing.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/sparc64/kernel/Makefile | 4 | ||||
-rw-r--r-- | arch/sparc64/kernel/devices.c | 196 | ||||
-rw-r--r-- | arch/sparc64/kernel/entry.S | 9 | ||||
-rw-r--r-- | arch/sparc64/kernel/irq.c | 83 | ||||
-rw-r--r-- | arch/sparc64/kernel/mdesc.c | 619 | ||||
-rw-r--r-- | arch/sparc64/kernel/pci_sabre.c | 7 | ||||
-rw-r--r-- | arch/sparc64/kernel/prom.c | 148 | ||||
-rw-r--r-- | arch/sparc64/kernel/setup.c | 18 | ||||
-rw-r--r-- | arch/sparc64/kernel/smp.c | 136 | ||||
-rw-r--r-- | arch/sparc64/kernel/sun4v_ivec.S | 30 | ||||
-rw-r--r-- | arch/sparc64/kernel/time.c | 9 | ||||
-rw-r--r-- | arch/sparc64/kernel/traps.c | 27 | ||||
-rw-r--r-- | arch/sparc64/mm/init.c | 17 |
13 files changed, 923 insertions, 380 deletions
diff --git a/arch/sparc64/kernel/Makefile b/arch/sparc64/kernel/Makefile index 18e31a8db017..d8d19093d12f 100644 --- a/arch/sparc64/kernel/Makefile +++ b/arch/sparc64/kernel/Makefile | |||
@@ -8,11 +8,11 @@ EXTRA_CFLAGS := -Werror | |||
8 | extra-y := head.o init_task.o vmlinux.lds | 8 | extra-y := head.o init_task.o vmlinux.lds |
9 | 9 | ||
10 | obj-y := process.o setup.o cpu.o idprom.o \ | 10 | obj-y := process.o setup.o cpu.o idprom.o \ |
11 | traps.o devices.o auxio.o una_asm.o \ | 11 | traps.o auxio.o una_asm.o \ |
12 | irq.o ptrace.o time.o sys_sparc.o signal.o \ | 12 | irq.o ptrace.o time.o sys_sparc.o signal.o \ |
13 | unaligned.o central.o pci.o starfire.o semaphore.o \ | 13 | unaligned.o central.o pci.o starfire.o semaphore.o \ |
14 | power.o sbus.o iommu_common.o sparc64_ksyms.o chmc.o \ | 14 | power.o sbus.o iommu_common.o sparc64_ksyms.o chmc.o \ |
15 | visemul.o prom.o of_device.o hvapi.o sstate.o | 15 | visemul.o prom.o of_device.o hvapi.o sstate.o mdesc.o |
16 | 16 | ||
17 | obj-$(CONFIG_STACKTRACE) += stacktrace.o | 17 | obj-$(CONFIG_STACKTRACE) += stacktrace.o |
18 | obj-$(CONFIG_PCI) += ebus.o isa.o pci_common.o pci_iommu.o \ | 18 | obj-$(CONFIG_PCI) += ebus.o isa.o pci_common.o pci_iommu.o \ |
diff --git a/arch/sparc64/kernel/devices.c b/arch/sparc64/kernel/devices.c deleted file mode 100644 index 0e03c8e218cd..000000000000 --- a/arch/sparc64/kernel/devices.c +++ /dev/null | |||
@@ -1,196 +0,0 @@ | |||
1 | /* devices.c: Initial scan of the prom device tree for important | ||
2 | * Sparc device nodes which we need to find. | ||
3 | * | ||
4 | * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) | ||
5 | */ | ||
6 | |||
7 | #include <linux/kernel.h> | ||
8 | #include <linux/threads.h> | ||
9 | #include <linux/init.h> | ||
10 | #include <linux/ioport.h> | ||
11 | #include <linux/string.h> | ||
12 | #include <linux/spinlock.h> | ||
13 | #include <linux/errno.h> | ||
14 | #include <linux/bootmem.h> | ||
15 | |||
16 | #include <asm/page.h> | ||
17 | #include <asm/oplib.h> | ||
18 | #include <asm/system.h> | ||
19 | #include <asm/smp.h> | ||
20 | #include <asm/spitfire.h> | ||
21 | #include <asm/timer.h> | ||
22 | #include <asm/cpudata.h> | ||
23 | |||
24 | /* Used to synchronize accesses to NatSemi SUPER I/O chip configure | ||
25 | * operations in asm/ns87303.h | ||
26 | */ | ||
27 | DEFINE_SPINLOCK(ns87303_lock); | ||
28 | |||
29 | extern void cpu_probe(void); | ||
30 | extern void central_probe(void); | ||
31 | |||
32 | static const char *cpu_mid_prop(void) | ||
33 | { | ||
34 | if (tlb_type == spitfire) | ||
35 | return "upa-portid"; | ||
36 | return "portid"; | ||
37 | } | ||
38 | |||
39 | static int get_cpu_mid(struct device_node *dp) | ||
40 | { | ||
41 | struct property *prop; | ||
42 | |||
43 | if (tlb_type == hypervisor) { | ||
44 | struct linux_prom64_registers *reg; | ||
45 | int len; | ||
46 | |||
47 | prop = of_find_property(dp, "cpuid", &len); | ||
48 | if (prop && len == 4) | ||
49 | return *(int *) prop->value; | ||
50 | |||
51 | prop = of_find_property(dp, "reg", NULL); | ||
52 | reg = prop->value; | ||
53 | return (reg[0].phys_addr >> 32) & 0x0fffffffUL; | ||
54 | } else { | ||
55 | const char *prop_name = cpu_mid_prop(); | ||
56 | |||
57 | prop = of_find_property(dp, prop_name, NULL); | ||
58 | if (prop) | ||
59 | return *(int *) prop->value; | ||
60 | return 0; | ||
61 | } | ||
62 | } | ||
63 | |||
64 | static int check_cpu_node(struct device_node *dp, int *cur_inst, | ||
65 | int (*compare)(struct device_node *, int, void *), | ||
66 | void *compare_arg, | ||
67 | struct device_node **dev_node, int *mid) | ||
68 | { | ||
69 | if (!compare(dp, *cur_inst, compare_arg)) { | ||
70 | if (dev_node) | ||
71 | *dev_node = dp; | ||
72 | if (mid) | ||
73 | *mid = get_cpu_mid(dp); | ||
74 | return 0; | ||
75 | } | ||
76 | |||
77 | (*cur_inst)++; | ||
78 | |||
79 | return -ENODEV; | ||
80 | } | ||
81 | |||
82 | static int __cpu_find_by(int (*compare)(struct device_node *, int, void *), | ||
83 | void *compare_arg, | ||
84 | struct device_node **dev_node, int *mid) | ||
85 | { | ||
86 | struct device_node *dp; | ||
87 | int cur_inst; | ||
88 | |||
89 | cur_inst = 0; | ||
90 | for_each_node_by_type(dp, "cpu") { | ||
91 | int err = check_cpu_node(dp, &cur_inst, | ||
92 | compare, compare_arg, | ||
93 | dev_node, mid); | ||
94 | if (err == 0) | ||
95 | return 0; | ||
96 | } | ||
97 | |||
98 | return -ENODEV; | ||
99 | } | ||
100 | |||
101 | static int cpu_instance_compare(struct device_node *dp, int instance, void *_arg) | ||
102 | { | ||
103 | int desired_instance = (int) (long) _arg; | ||
104 | |||
105 | if (instance == desired_instance) | ||
106 | return 0; | ||
107 | return -ENODEV; | ||
108 | } | ||
109 | |||
110 | int cpu_find_by_instance(int instance, struct device_node **dev_node, int *mid) | ||
111 | { | ||
112 | return __cpu_find_by(cpu_instance_compare, (void *)(long)instance, | ||
113 | dev_node, mid); | ||
114 | } | ||
115 | |||
116 | static int cpu_mid_compare(struct device_node *dp, int instance, void *_arg) | ||
117 | { | ||
118 | int desired_mid = (int) (long) _arg; | ||
119 | int this_mid; | ||
120 | |||
121 | this_mid = get_cpu_mid(dp); | ||
122 | if (this_mid == desired_mid) | ||
123 | return 0; | ||
124 | return -ENODEV; | ||
125 | } | ||
126 | |||
127 | int cpu_find_by_mid(int mid, struct device_node **dev_node) | ||
128 | { | ||
129 | return __cpu_find_by(cpu_mid_compare, (void *)(long)mid, | ||
130 | dev_node, NULL); | ||
131 | } | ||
132 | |||
133 | void __init device_scan(void) | ||
134 | { | ||
135 | /* FIX ME FAST... -DaveM */ | ||
136 | ioport_resource.end = 0xffffffffffffffffUL; | ||
137 | |||
138 | prom_printf("Booting Linux...\n"); | ||
139 | |||
140 | #ifndef CONFIG_SMP | ||
141 | { | ||
142 | struct device_node *dp; | ||
143 | int err, def; | ||
144 | |||
145 | err = cpu_find_by_instance(0, &dp, NULL); | ||
146 | if (err) { | ||
147 | prom_printf("No cpu nodes, cannot continue\n"); | ||
148 | prom_halt(); | ||
149 | } | ||
150 | cpu_data(0).clock_tick = | ||
151 | of_getintprop_default(dp, "clock-frequency", 0); | ||
152 | |||
153 | def = ((tlb_type == hypervisor) ? | ||
154 | (8 * 1024) : | ||
155 | (16 * 1024)); | ||
156 | cpu_data(0).dcache_size = of_getintprop_default(dp, | ||
157 | "dcache-size", | ||
158 | def); | ||
159 | |||
160 | def = 32; | ||
161 | cpu_data(0).dcache_line_size = | ||
162 | of_getintprop_default(dp, "dcache-line-size", def); | ||
163 | |||
164 | def = 16 * 1024; | ||
165 | cpu_data(0).icache_size = of_getintprop_default(dp, | ||
166 | "icache-size", | ||
167 | def); | ||
168 | |||
169 | def = 32; | ||
170 | cpu_data(0).icache_line_size = | ||
171 | of_getintprop_default(dp, "icache-line-size", def); | ||
172 | |||
173 | def = ((tlb_type == hypervisor) ? | ||
174 | (3 * 1024 * 1024) : | ||
175 | (4 * 1024 * 1024)); | ||
176 | cpu_data(0).ecache_size = of_getintprop_default(dp, | ||
177 | "ecache-size", | ||
178 | def); | ||
179 | |||
180 | def = 64; | ||
181 | cpu_data(0).ecache_line_size = | ||
182 | of_getintprop_default(dp, "ecache-line-size", def); | ||
183 | printk("CPU[0]: Caches " | ||
184 | "D[sz(%d):line_sz(%d)] " | ||
185 | "I[sz(%d):line_sz(%d)] " | ||
186 | "E[sz(%d):line_sz(%d)]\n", | ||
187 | cpu_data(0).dcache_size, cpu_data(0).dcache_line_size, | ||
188 | cpu_data(0).icache_size, cpu_data(0).icache_line_size, | ||
189 | cpu_data(0).ecache_size, cpu_data(0).ecache_line_size); | ||
190 | } | ||
191 | #endif | ||
192 | |||
193 | central_probe(); | ||
194 | |||
195 | cpu_probe(); | ||
196 | } | ||
diff --git a/arch/sparc64/kernel/entry.S b/arch/sparc64/kernel/entry.S index b5dbd5709155..f8cc3c0731c7 100644 --- a/arch/sparc64/kernel/entry.S +++ b/arch/sparc64/kernel/entry.S | |||
@@ -1949,3 +1949,12 @@ sun4v_mach_set_soft_state: | |||
1949 | ta HV_FAST_TRAP | 1949 | ta HV_FAST_TRAP |
1950 | retl | 1950 | retl |
1951 | nop | 1951 | nop |
1952 | |||
1953 | .globl sun4v_mach_desc | ||
1954 | sun4v_mach_desc: | ||
1955 | mov %o2, %o4 | ||
1956 | mov HV_FAST_MACH_DESC, %o5 | ||
1957 | ta HV_FAST_TRAP | ||
1958 | stx %o1, [%o4] | ||
1959 | retl | ||
1960 | nop | ||
diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c index 3edc18e1b818..a36f8dd0c021 100644 --- a/arch/sparc64/kernel/irq.c +++ b/arch/sparc64/kernel/irq.c | |||
@@ -171,8 +171,6 @@ skip: | |||
171 | return 0; | 171 | return 0; |
172 | } | 172 | } |
173 | 173 | ||
174 | extern unsigned long real_hard_smp_processor_id(void); | ||
175 | |||
176 | static unsigned int sun4u_compute_tid(unsigned long imap, unsigned long cpuid) | 174 | static unsigned int sun4u_compute_tid(unsigned long imap, unsigned long cpuid) |
177 | { | 175 | { |
178 | unsigned int tid; | 176 | unsigned int tid; |
@@ -694,9 +692,20 @@ void init_irqwork_curcpu(void) | |||
694 | trap_block[cpu].irq_worklist = 0; | 692 | trap_block[cpu].irq_worklist = 0; |
695 | } | 693 | } |
696 | 694 | ||
697 | static void __cpuinit register_one_mondo(unsigned long paddr, unsigned long type) | 695 | /* Please be very careful with register_one_mondo() and |
696 | * sun4v_register_mondo_queues(). | ||
697 | * | ||
698 | * On SMP this gets invoked from the CPU trampoline before | ||
699 | * the cpu has fully taken over the trap table from OBP, | ||
700 | * and it's kernel stack + %g6 thread register state is | ||
701 | * not fully cooked yet. | ||
702 | * | ||
703 | * Therefore you cannot make any OBP calls, not even prom_printf, | ||
704 | * from these two routines. | ||
705 | */ | ||
706 | static void __cpuinit register_one_mondo(unsigned long paddr, unsigned long type, unsigned long qmask) | ||
698 | { | 707 | { |
699 | unsigned long num_entries = 128; | 708 | unsigned long num_entries = (qmask + 1) / 64; |
700 | unsigned long status; | 709 | unsigned long status; |
701 | 710 | ||
702 | status = sun4v_cpu_qconf(type, paddr, num_entries); | 711 | status = sun4v_cpu_qconf(type, paddr, num_entries); |
@@ -711,44 +720,58 @@ static void __cpuinit sun4v_register_mondo_queues(int this_cpu) | |||
711 | { | 720 | { |
712 | struct trap_per_cpu *tb = &trap_block[this_cpu]; | 721 | struct trap_per_cpu *tb = &trap_block[this_cpu]; |
713 | 722 | ||
714 | register_one_mondo(tb->cpu_mondo_pa, HV_CPU_QUEUE_CPU_MONDO); | 723 | register_one_mondo(tb->cpu_mondo_pa, HV_CPU_QUEUE_CPU_MONDO, |
715 | register_one_mondo(tb->dev_mondo_pa, HV_CPU_QUEUE_DEVICE_MONDO); | 724 | tb->cpu_mondo_qmask); |
716 | register_one_mondo(tb->resum_mondo_pa, HV_CPU_QUEUE_RES_ERROR); | 725 | register_one_mondo(tb->dev_mondo_pa, HV_CPU_QUEUE_DEVICE_MONDO, |
717 | register_one_mondo(tb->nonresum_mondo_pa, HV_CPU_QUEUE_NONRES_ERROR); | 726 | tb->dev_mondo_qmask); |
727 | register_one_mondo(tb->resum_mondo_pa, HV_CPU_QUEUE_RES_ERROR, | ||
728 | tb->resum_qmask); | ||
729 | register_one_mondo(tb->nonresum_mondo_pa, HV_CPU_QUEUE_NONRES_ERROR, | ||
730 | tb->nonresum_qmask); | ||
718 | } | 731 | } |
719 | 732 | ||
720 | static void __cpuinit alloc_one_mondo(unsigned long *pa_ptr, int use_bootmem) | 733 | static void __cpuinit alloc_one_mondo(unsigned long *pa_ptr, unsigned long qmask, int use_bootmem) |
721 | { | 734 | { |
722 | void *page; | 735 | unsigned long size = PAGE_ALIGN(qmask + 1); |
736 | unsigned long order = get_order(size); | ||
737 | void *p = NULL; | ||
723 | 738 | ||
724 | if (use_bootmem) | 739 | if (use_bootmem) { |
725 | page = alloc_bootmem_low_pages(PAGE_SIZE); | 740 | p = __alloc_bootmem_low(size, size, 0); |
726 | else | 741 | } else { |
727 | page = (void *) get_zeroed_page(GFP_ATOMIC); | 742 | struct page *page = alloc_pages(GFP_ATOMIC | __GFP_ZERO, order); |
743 | if (page) | ||
744 | p = page_address(page); | ||
745 | } | ||
728 | 746 | ||
729 | if (!page) { | 747 | if (!p) { |
730 | prom_printf("SUN4V: Error, cannot allocate mondo queue.\n"); | 748 | prom_printf("SUN4V: Error, cannot allocate mondo queue.\n"); |
731 | prom_halt(); | 749 | prom_halt(); |
732 | } | 750 | } |
733 | 751 | ||
734 | *pa_ptr = __pa(page); | 752 | *pa_ptr = __pa(p); |
735 | } | 753 | } |
736 | 754 | ||
737 | static void __cpuinit alloc_one_kbuf(unsigned long *pa_ptr, int use_bootmem) | 755 | static void __cpuinit alloc_one_kbuf(unsigned long *pa_ptr, unsigned long qmask, int use_bootmem) |
738 | { | 756 | { |
739 | void *page; | 757 | unsigned long size = PAGE_ALIGN(qmask + 1); |
758 | unsigned long order = get_order(size); | ||
759 | void *p = NULL; | ||
740 | 760 | ||
741 | if (use_bootmem) | 761 | if (use_bootmem) { |
742 | page = alloc_bootmem_low_pages(PAGE_SIZE); | 762 | p = __alloc_bootmem_low(size, size, 0); |
743 | else | 763 | } else { |
744 | page = (void *) get_zeroed_page(GFP_ATOMIC); | 764 | struct page *page = alloc_pages(GFP_ATOMIC | __GFP_ZERO, order); |
765 | if (page) | ||
766 | p = page_address(page); | ||
767 | } | ||
745 | 768 | ||
746 | if (!page) { | 769 | if (!p) { |
747 | prom_printf("SUN4V: Error, cannot allocate kbuf page.\n"); | 770 | prom_printf("SUN4V: Error, cannot allocate kbuf page.\n"); |
748 | prom_halt(); | 771 | prom_halt(); |
749 | } | 772 | } |
750 | 773 | ||
751 | *pa_ptr = __pa(page); | 774 | *pa_ptr = __pa(p); |
752 | } | 775 | } |
753 | 776 | ||
754 | static void __cpuinit init_cpu_send_mondo_info(struct trap_per_cpu *tb, int use_bootmem) | 777 | static void __cpuinit init_cpu_send_mondo_info(struct trap_per_cpu *tb, int use_bootmem) |
@@ -779,12 +802,12 @@ void __cpuinit sun4v_init_mondo_queues(int use_bootmem, int cpu, int alloc, int | |||
779 | struct trap_per_cpu *tb = &trap_block[cpu]; | 802 | struct trap_per_cpu *tb = &trap_block[cpu]; |
780 | 803 | ||
781 | if (alloc) { | 804 | if (alloc) { |
782 | alloc_one_mondo(&tb->cpu_mondo_pa, use_bootmem); | 805 | alloc_one_mondo(&tb->cpu_mondo_pa, tb->cpu_mondo_qmask, use_bootmem); |
783 | alloc_one_mondo(&tb->dev_mondo_pa, use_bootmem); | 806 | alloc_one_mondo(&tb->dev_mondo_pa, tb->dev_mondo_qmask, use_bootmem); |
784 | alloc_one_mondo(&tb->resum_mondo_pa, use_bootmem); | 807 | alloc_one_mondo(&tb->resum_mondo_pa, tb->resum_qmask, use_bootmem); |
785 | alloc_one_kbuf(&tb->resum_kernel_buf_pa, use_bootmem); | 808 | alloc_one_kbuf(&tb->resum_kernel_buf_pa, tb->resum_qmask, use_bootmem); |
786 | alloc_one_mondo(&tb->nonresum_mondo_pa, use_bootmem); | 809 | alloc_one_mondo(&tb->nonresum_mondo_pa, tb->nonresum_qmask, use_bootmem); |
787 | alloc_one_kbuf(&tb->nonresum_kernel_buf_pa, use_bootmem); | 810 | alloc_one_kbuf(&tb->nonresum_kernel_buf_pa, tb->nonresum_qmask, use_bootmem); |
788 | 811 | ||
789 | init_cpu_send_mondo_info(tb, use_bootmem); | 812 | init_cpu_send_mondo_info(tb, use_bootmem); |
790 | } | 813 | } |
diff --git a/arch/sparc64/kernel/mdesc.c b/arch/sparc64/kernel/mdesc.c new file mode 100644 index 000000000000..9246c2cf9574 --- /dev/null +++ b/arch/sparc64/kernel/mdesc.c | |||
@@ -0,0 +1,619 @@ | |||
1 | /* mdesc.c: Sun4V machine description handling. | ||
2 | * | ||
3 | * Copyright (C) 2007 David S. Miller <davem@davemloft.net> | ||
4 | */ | ||
5 | #include <linux/kernel.h> | ||
6 | #include <linux/types.h> | ||
7 | #include <linux/bootmem.h> | ||
8 | #include <linux/log2.h> | ||
9 | |||
10 | #include <asm/hypervisor.h> | ||
11 | #include <asm/mdesc.h> | ||
12 | #include <asm/prom.h> | ||
13 | #include <asm/oplib.h> | ||
14 | #include <asm/smp.h> | ||
15 | |||
16 | /* Unlike the OBP device tree, the machine description is a full-on | ||
17 | * DAG. An arbitrary number of ARCs are possible from one | ||
18 | * node to other nodes and thus we can't use the OBP device_node | ||
19 | * data structure to represent these nodes inside of the kernel. | ||
20 | * | ||
21 | * Actually, it isn't even a DAG, because there are back pointers | ||
22 | * which create cycles in the graph. | ||
23 | * | ||
24 | * mdesc_hdr and mdesc_elem describe the layout of the data structure | ||
25 | * we get from the Hypervisor. | ||
26 | */ | ||
27 | struct mdesc_hdr { | ||
28 | u32 version; /* Transport version */ | ||
29 | u32 node_sz; /* node block size */ | ||
30 | u32 name_sz; /* name block size */ | ||
31 | u32 data_sz; /* data block size */ | ||
32 | }; | ||
33 | |||
34 | struct mdesc_elem { | ||
35 | u8 tag; | ||
36 | #define MD_LIST_END 0x00 | ||
37 | #define MD_NODE 0x4e | ||
38 | #define MD_NODE_END 0x45 | ||
39 | #define MD_NOOP 0x20 | ||
40 | #define MD_PROP_ARC 0x61 | ||
41 | #define MD_PROP_VAL 0x76 | ||
42 | #define MD_PROP_STR 0x73 | ||
43 | #define MD_PROP_DATA 0x64 | ||
44 | u8 name_len; | ||
45 | u16 resv; | ||
46 | u32 name_offset; | ||
47 | union { | ||
48 | struct { | ||
49 | u32 data_len; | ||
50 | u32 data_offset; | ||
51 | } data; | ||
52 | u64 val; | ||
53 | } d; | ||
54 | }; | ||
55 | |||
56 | static struct mdesc_hdr *main_mdesc; | ||
57 | static struct mdesc_node *allnodes; | ||
58 | |||
59 | static struct mdesc_node *allnodes_tail; | ||
60 | static unsigned int unique_id; | ||
61 | |||
62 | static struct mdesc_node **mdesc_hash; | ||
63 | static unsigned int mdesc_hash_size; | ||
64 | |||
65 | static inline unsigned int node_hashfn(u64 node) | ||
66 | { | ||
67 | return ((unsigned int) (node ^ (node >> 8) ^ (node >> 16))) | ||
68 | & (mdesc_hash_size - 1); | ||
69 | } | ||
70 | |||
71 | static inline void hash_node(struct mdesc_node *mp) | ||
72 | { | ||
73 | struct mdesc_node **head = &mdesc_hash[node_hashfn(mp->node)]; | ||
74 | |||
75 | mp->hash_next = *head; | ||
76 | *head = mp; | ||
77 | |||
78 | if (allnodes_tail) { | ||
79 | allnodes_tail->allnodes_next = mp; | ||
80 | allnodes_tail = mp; | ||
81 | } else { | ||
82 | allnodes = allnodes_tail = mp; | ||
83 | } | ||
84 | } | ||
85 | |||
86 | static struct mdesc_node *find_node(u64 node) | ||
87 | { | ||
88 | struct mdesc_node *mp = mdesc_hash[node_hashfn(node)]; | ||
89 | |||
90 | while (mp) { | ||
91 | if (mp->node == node) | ||
92 | return mp; | ||
93 | |||
94 | mp = mp->hash_next; | ||
95 | } | ||
96 | return NULL; | ||
97 | } | ||
98 | |||
99 | struct property *md_find_property(const struct mdesc_node *mp, | ||
100 | const char *name, | ||
101 | int *lenp) | ||
102 | { | ||
103 | struct property *pp; | ||
104 | |||
105 | for (pp = mp->properties; pp != 0; pp = pp->next) { | ||
106 | if (strcasecmp(pp->name, name) == 0) { | ||
107 | if (lenp) | ||
108 | *lenp = pp->length; | ||
109 | break; | ||
110 | } | ||
111 | } | ||
112 | return pp; | ||
113 | } | ||
114 | EXPORT_SYMBOL(md_find_property); | ||
115 | |||
116 | /* | ||
117 | * Find a property with a given name for a given node | ||
118 | * and return the value. | ||
119 | */ | ||
120 | const void *md_get_property(const struct mdesc_node *mp, const char *name, | ||
121 | int *lenp) | ||
122 | { | ||
123 | struct property *pp = md_find_property(mp, name, lenp); | ||
124 | return pp ? pp->value : NULL; | ||
125 | } | ||
126 | EXPORT_SYMBOL(md_get_property); | ||
127 | |||
128 | struct mdesc_node *md_find_node_by_name(struct mdesc_node *from, | ||
129 | const char *name) | ||
130 | { | ||
131 | struct mdesc_node *mp; | ||
132 | |||
133 | mp = from ? from->allnodes_next : allnodes; | ||
134 | for (; mp != NULL; mp = mp->allnodes_next) { | ||
135 | if (strcmp(mp->name, name) == 0) | ||
136 | break; | ||
137 | } | ||
138 | return mp; | ||
139 | } | ||
140 | EXPORT_SYMBOL(md_find_node_by_name); | ||
141 | |||
142 | static unsigned int mdesc_early_allocated; | ||
143 | |||
144 | static void * __init mdesc_early_alloc(unsigned long size) | ||
145 | { | ||
146 | void *ret; | ||
147 | |||
148 | ret = __alloc_bootmem(size, SMP_CACHE_BYTES, 0UL); | ||
149 | if (ret == NULL) { | ||
150 | prom_printf("MDESC: alloc of %lu bytes failed.\n", size); | ||
151 | prom_halt(); | ||
152 | } | ||
153 | |||
154 | memset(ret, 0, size); | ||
155 | |||
156 | mdesc_early_allocated += size; | ||
157 | |||
158 | return ret; | ||
159 | } | ||
160 | |||
161 | static unsigned int __init count_arcs(struct mdesc_elem *ep) | ||
162 | { | ||
163 | unsigned int ret = 0; | ||
164 | |||
165 | ep++; | ||
166 | while (ep->tag != MD_NODE_END) { | ||
167 | if (ep->tag == MD_PROP_ARC) | ||
168 | ret++; | ||
169 | ep++; | ||
170 | } | ||
171 | return ret; | ||
172 | } | ||
173 | |||
174 | static void __init mdesc_node_alloc(u64 node, struct mdesc_elem *ep, const char *names) | ||
175 | { | ||
176 | unsigned int num_arcs = count_arcs(ep); | ||
177 | struct mdesc_node *mp; | ||
178 | |||
179 | mp = mdesc_early_alloc(sizeof(*mp) + | ||
180 | (num_arcs * sizeof(struct mdesc_arc))); | ||
181 | mp->name = names + ep->name_offset; | ||
182 | mp->node = node; | ||
183 | mp->unique_id = unique_id++; | ||
184 | mp->num_arcs = num_arcs; | ||
185 | |||
186 | hash_node(mp); | ||
187 | } | ||
188 | |||
189 | static inline struct mdesc_elem *node_block(struct mdesc_hdr *mdesc) | ||
190 | { | ||
191 | return (struct mdesc_elem *) (mdesc + 1); | ||
192 | } | ||
193 | |||
194 | static inline void *name_block(struct mdesc_hdr *mdesc) | ||
195 | { | ||
196 | return ((void *) node_block(mdesc)) + mdesc->node_sz; | ||
197 | } | ||
198 | |||
199 | static inline void *data_block(struct mdesc_hdr *mdesc) | ||
200 | { | ||
201 | return ((void *) name_block(mdesc)) + mdesc->name_sz; | ||
202 | } | ||
203 | |||
204 | /* In order to avoid recursion (the graph can be very deep) we use a | ||
205 | * two pass algorithm. First we allocate all the nodes and hash them. | ||
206 | * Then we iterate over each node, filling in the arcs and properties. | ||
207 | */ | ||
208 | static void __init build_all_nodes(struct mdesc_hdr *mdesc) | ||
209 | { | ||
210 | struct mdesc_elem *start, *ep; | ||
211 | struct mdesc_node *mp; | ||
212 | const char *names; | ||
213 | void *data; | ||
214 | u64 last_node; | ||
215 | |||
216 | start = ep = node_block(mdesc); | ||
217 | last_node = mdesc->node_sz / 16; | ||
218 | |||
219 | names = name_block(mdesc); | ||
220 | |||
221 | while (1) { | ||
222 | u64 node = ep - start; | ||
223 | |||
224 | if (ep->tag == MD_LIST_END) | ||
225 | break; | ||
226 | |||
227 | if (ep->tag != MD_NODE) { | ||
228 | prom_printf("MDESC: Inconsistent element list.\n"); | ||
229 | prom_halt(); | ||
230 | } | ||
231 | |||
232 | mdesc_node_alloc(node, ep, names); | ||
233 | |||
234 | if (ep->d.val >= last_node) { | ||
235 | printk("MDESC: Warning, early break out of node scan.\n"); | ||
236 | printk("MDESC: Next node [%lu] last_node [%lu].\n", | ||
237 | node, last_node); | ||
238 | break; | ||
239 | } | ||
240 | |||
241 | ep = start + ep->d.val; | ||
242 | } | ||
243 | |||
244 | data = data_block(mdesc); | ||
245 | for (mp = allnodes; mp; mp = mp->allnodes_next) { | ||
246 | struct mdesc_elem *ep = start + mp->node; | ||
247 | struct property **link = &mp->properties; | ||
248 | unsigned int this_arc = 0; | ||
249 | |||
250 | ep++; | ||
251 | while (ep->tag != MD_NODE_END) { | ||
252 | switch (ep->tag) { | ||
253 | case MD_PROP_ARC: { | ||
254 | struct mdesc_node *target; | ||
255 | |||
256 | if (this_arc >= mp->num_arcs) { | ||
257 | prom_printf("MDESC: ARC overrun [%u:%u]\n", | ||
258 | this_arc, mp->num_arcs); | ||
259 | prom_halt(); | ||
260 | } | ||
261 | target = find_node(ep->d.val); | ||
262 | if (!target) { | ||
263 | printk("MDESC: Warning, arc points to " | ||
264 | "missing node, ignoring.\n"); | ||
265 | break; | ||
266 | } | ||
267 | mp->arcs[this_arc].name = | ||
268 | (names + ep->name_offset); | ||
269 | mp->arcs[this_arc].arc = target; | ||
270 | this_arc++; | ||
271 | break; | ||
272 | } | ||
273 | |||
274 | case MD_PROP_VAL: | ||
275 | case MD_PROP_STR: | ||
276 | case MD_PROP_DATA: { | ||
277 | struct property *p = mdesc_early_alloc(sizeof(*p)); | ||
278 | |||
279 | p->unique_id = unique_id++; | ||
280 | p->name = (char *) names + ep->name_offset; | ||
281 | if (ep->tag == MD_PROP_VAL) { | ||
282 | p->value = &ep->d.val; | ||
283 | p->length = 8; | ||
284 | } else { | ||
285 | p->value = data + ep->d.data.data_offset; | ||
286 | p->length = ep->d.data.data_len; | ||
287 | } | ||
288 | *link = p; | ||
289 | link = &p->next; | ||
290 | break; | ||
291 | } | ||
292 | |||
293 | case MD_NOOP: | ||
294 | break; | ||
295 | |||
296 | default: | ||
297 | printk("MDESC: Warning, ignoring unknown tag type %02x\n", | ||
298 | ep->tag); | ||
299 | } | ||
300 | ep++; | ||
301 | } | ||
302 | } | ||
303 | } | ||
304 | |||
305 | static unsigned int __init count_nodes(struct mdesc_hdr *mdesc) | ||
306 | { | ||
307 | struct mdesc_elem *ep = node_block(mdesc); | ||
308 | struct mdesc_elem *end; | ||
309 | unsigned int cnt = 0; | ||
310 | |||
311 | end = ((void *)ep) + mdesc->node_sz; | ||
312 | while (ep < end) { | ||
313 | if (ep->tag == MD_NODE) | ||
314 | cnt++; | ||
315 | ep++; | ||
316 | } | ||
317 | return cnt; | ||
318 | } | ||
319 | |||
320 | static void __init report_platform_properties(void) | ||
321 | { | ||
322 | struct mdesc_node *pn = md_find_node_by_name(NULL, "platform"); | ||
323 | const char *s; | ||
324 | const u64 *v; | ||
325 | |||
326 | if (!pn) { | ||
327 | prom_printf("No platform node in machine-description.\n"); | ||
328 | prom_halt(); | ||
329 | } | ||
330 | |||
331 | s = md_get_property(pn, "banner-name", NULL); | ||
332 | printk("PLATFORM: banner-name [%s]\n", s); | ||
333 | s = md_get_property(pn, "name", NULL); | ||
334 | printk("PLATFORM: name [%s]\n", s); | ||
335 | |||
336 | v = md_get_property(pn, "hostid", NULL); | ||
337 | if (v) | ||
338 | printk("PLATFORM: hostid [%08lx]\n", *v); | ||
339 | v = md_get_property(pn, "serial#", NULL); | ||
340 | if (v) | ||
341 | printk("PLATFORM: serial# [%08lx]\n", *v); | ||
342 | v = md_get_property(pn, "stick-frequency", NULL); | ||
343 | printk("PLATFORM: stick-frequency [%08lx]\n", *v); | ||
344 | v = md_get_property(pn, "mac-address", NULL); | ||
345 | if (v) | ||
346 | printk("PLATFORM: mac-address [%lx]\n", *v); | ||
347 | v = md_get_property(pn, "watchdog-resolution", NULL); | ||
348 | if (v) | ||
349 | printk("PLATFORM: watchdog-resolution [%lu ms]\n", *v); | ||
350 | v = md_get_property(pn, "watchdog-max-timeout", NULL); | ||
351 | if (v) | ||
352 | printk("PLATFORM: watchdog-max-timeout [%lu ms]\n", *v); | ||
353 | v = md_get_property(pn, "max-cpus", NULL); | ||
354 | if (v) | ||
355 | printk("PLATFORM: max-cpus [%lu]\n", *v); | ||
356 | } | ||
357 | |||
358 | static int inline find_in_proplist(const char *list, const char *match, int len) | ||
359 | { | ||
360 | while (len > 0) { | ||
361 | int l; | ||
362 | |||
363 | if (!strcmp(list, match)) | ||
364 | return 1; | ||
365 | l = strlen(list) + 1; | ||
366 | list += l; | ||
367 | len -= l; | ||
368 | } | ||
369 | return 0; | ||
370 | } | ||
371 | |||
372 | static void __init fill_in_one_cache(cpuinfo_sparc *c, struct mdesc_node *mp) | ||
373 | { | ||
374 | const u64 *level = md_get_property(mp, "level", NULL); | ||
375 | const u64 *size = md_get_property(mp, "size", NULL); | ||
376 | const u64 *line_size = md_get_property(mp, "line-size", NULL); | ||
377 | const char *type; | ||
378 | int type_len; | ||
379 | |||
380 | type = md_get_property(mp, "type", &type_len); | ||
381 | |||
382 | switch (*level) { | ||
383 | case 1: | ||
384 | if (find_in_proplist(type, "instn", type_len)) { | ||
385 | c->icache_size = *size; | ||
386 | c->icache_line_size = *line_size; | ||
387 | } else if (find_in_proplist(type, "data", type_len)) { | ||
388 | c->dcache_size = *size; | ||
389 | c->dcache_line_size = *line_size; | ||
390 | } | ||
391 | break; | ||
392 | |||
393 | case 2: | ||
394 | c->ecache_size = *size; | ||
395 | c->ecache_line_size = *line_size; | ||
396 | break; | ||
397 | |||
398 | default: | ||
399 | break; | ||
400 | } | ||
401 | |||
402 | if (*level == 1) { | ||
403 | unsigned int i; | ||
404 | |||
405 | for (i = 0; i < mp->num_arcs; i++) { | ||
406 | struct mdesc_node *t = mp->arcs[i].arc; | ||
407 | |||
408 | if (strcmp(mp->arcs[i].name, "fwd")) | ||
409 | continue; | ||
410 | |||
411 | if (!strcmp(t->name, "cache")) | ||
412 | fill_in_one_cache(c, t); | ||
413 | } | ||
414 | } | ||
415 | } | ||
416 | |||
417 | static void __init mark_core_ids(struct mdesc_node *mp, int core_id) | ||
418 | { | ||
419 | unsigned int i; | ||
420 | |||
421 | for (i = 0; i < mp->num_arcs; i++) { | ||
422 | struct mdesc_node *t = mp->arcs[i].arc; | ||
423 | const u64 *id; | ||
424 | |||
425 | if (strcmp(mp->arcs[i].name, "back")) | ||
426 | continue; | ||
427 | |||
428 | if (!strcmp(t->name, "cpu")) { | ||
429 | id = md_get_property(t, "id", NULL); | ||
430 | if (*id < NR_CPUS) | ||
431 | cpu_data(*id).core_id = core_id; | ||
432 | } else { | ||
433 | unsigned int j; | ||
434 | |||
435 | for (j = 0; j < t->num_arcs; j++) { | ||
436 | struct mdesc_node *n = t->arcs[j].arc; | ||
437 | |||
438 | if (strcmp(t->arcs[j].name, "back")) | ||
439 | continue; | ||
440 | |||
441 | if (strcmp(n->name, "cpu")) | ||
442 | continue; | ||
443 | |||
444 | id = md_get_property(n, "id", NULL); | ||
445 | if (*id < NR_CPUS) | ||
446 | cpu_data(*id).core_id = core_id; | ||
447 | } | ||
448 | } | ||
449 | } | ||
450 | } | ||
451 | |||
452 | static void __init set_core_ids(void) | ||
453 | { | ||
454 | struct mdesc_node *mp; | ||
455 | int idx; | ||
456 | |||
457 | idx = 1; | ||
458 | md_for_each_node_by_name(mp, "cache") { | ||
459 | const u64 *level = md_get_property(mp, "level", NULL); | ||
460 | const char *type; | ||
461 | int len; | ||
462 | |||
463 | if (*level != 1) | ||
464 | continue; | ||
465 | |||
466 | type = md_get_property(mp, "type", &len); | ||
467 | if (!find_in_proplist(type, "instn", len)) | ||
468 | continue; | ||
469 | |||
470 | mark_core_ids(mp, idx); | ||
471 | |||
472 | idx++; | ||
473 | } | ||
474 | } | ||
475 | |||
476 | static void __init get_one_mondo_bits(const u64 *p, unsigned int *mask, unsigned char def) | ||
477 | { | ||
478 | u64 val; | ||
479 | |||
480 | if (!p) | ||
481 | goto use_default; | ||
482 | val = *p; | ||
483 | |||
484 | if (!val || val >= 64) | ||
485 | goto use_default; | ||
486 | |||
487 | *mask = ((1U << val) * 64U) - 1U; | ||
488 | return; | ||
489 | |||
490 | use_default: | ||
491 | *mask = ((1U << def) * 64U) - 1U; | ||
492 | } | ||
493 | |||
494 | static void __init get_mondo_data(struct mdesc_node *mp, struct trap_per_cpu *tb) | ||
495 | { | ||
496 | const u64 *val; | ||
497 | |||
498 | val = md_get_property(mp, "q-cpu-mondo-#bits", NULL); | ||
499 | get_one_mondo_bits(val, &tb->cpu_mondo_qmask, 7); | ||
500 | |||
501 | val = md_get_property(mp, "q-dev-mondo-#bits", NULL); | ||
502 | get_one_mondo_bits(val, &tb->dev_mondo_qmask, 7); | ||
503 | |||
504 | val = md_get_property(mp, "q-resumable-#bits", NULL); | ||
505 | get_one_mondo_bits(val, &tb->resum_qmask, 6); | ||
506 | |||
507 | val = md_get_property(mp, "q-nonresumable-#bits", NULL); | ||
508 | get_one_mondo_bits(val, &tb->nonresum_qmask, 2); | ||
509 | } | ||
510 | |||
511 | static void __init mdesc_fill_in_cpu_data(void) | ||
512 | { | ||
513 | struct mdesc_node *mp; | ||
514 | |||
515 | ncpus_probed = 0; | ||
516 | md_for_each_node_by_name(mp, "cpu") { | ||
517 | const u64 *id = md_get_property(mp, "id", NULL); | ||
518 | const u64 *cfreq = md_get_property(mp, "clock-frequency", NULL); | ||
519 | struct trap_per_cpu *tb; | ||
520 | cpuinfo_sparc *c; | ||
521 | unsigned int i; | ||
522 | int cpuid; | ||
523 | |||
524 | ncpus_probed++; | ||
525 | |||
526 | cpuid = *id; | ||
527 | |||
528 | #ifdef CONFIG_SMP | ||
529 | if (cpuid >= NR_CPUS) | ||
530 | continue; | ||
531 | #else | ||
532 | /* On uniprocessor we only want the values for the | ||
533 | * real physical cpu the kernel booted onto, however | ||
534 | * cpu_data() only has one entry at index 0. | ||
535 | */ | ||
536 | if (cpuid != real_hard_smp_processor_id()) | ||
537 | continue; | ||
538 | cpuid = 0; | ||
539 | #endif | ||
540 | |||
541 | c = &cpu_data(cpuid); | ||
542 | c->clock_tick = *cfreq; | ||
543 | |||
544 | tb = &trap_block[cpuid]; | ||
545 | get_mondo_data(mp, tb); | ||
546 | |||
547 | for (i = 0; i < mp->num_arcs; i++) { | ||
548 | struct mdesc_node *t = mp->arcs[i].arc; | ||
549 | unsigned int j; | ||
550 | |||
551 | if (strcmp(mp->arcs[i].name, "fwd")) | ||
552 | continue; | ||
553 | |||
554 | if (!strcmp(t->name, "cache")) { | ||
555 | fill_in_one_cache(c, t); | ||
556 | continue; | ||
557 | } | ||
558 | |||
559 | for (j = 0; j < t->num_arcs; j++) { | ||
560 | struct mdesc_node *n; | ||
561 | |||
562 | n = t->arcs[j].arc; | ||
563 | if (strcmp(t->arcs[j].name, "fwd")) | ||
564 | continue; | ||
565 | |||
566 | if (!strcmp(n->name, "cache")) | ||
567 | fill_in_one_cache(c, n); | ||
568 | } | ||
569 | } | ||
570 | |||
571 | #ifdef CONFIG_SMP | ||
572 | cpu_set(cpuid, cpu_present_map); | ||
573 | cpu_set(cpuid, phys_cpu_present_map); | ||
574 | #endif | ||
575 | |||
576 | c->core_id = 0; | ||
577 | } | ||
578 | |||
579 | set_core_ids(); | ||
580 | |||
581 | smp_fill_in_sib_core_maps(); | ||
582 | } | ||
583 | |||
584 | void __init sun4v_mdesc_init(void) | ||
585 | { | ||
586 | unsigned long len, real_len, status; | ||
587 | |||
588 | (void) sun4v_mach_desc(0UL, 0UL, &len); | ||
589 | |||
590 | printk("MDESC: Size is %lu bytes.\n", len); | ||
591 | |||
592 | main_mdesc = mdesc_early_alloc(len); | ||
593 | |||
594 | status = sun4v_mach_desc(__pa(main_mdesc), len, &real_len); | ||
595 | if (status != HV_EOK || real_len > len) { | ||
596 | prom_printf("sun4v_mach_desc fails, err(%lu), " | ||
597 | "len(%lu), real_len(%lu)\n", | ||
598 | status, len, real_len); | ||
599 | prom_halt(); | ||
600 | } | ||
601 | |||
602 | len = count_nodes(main_mdesc); | ||
603 | printk("MDESC: %lu nodes.\n", len); | ||
604 | |||
605 | len = roundup_pow_of_two(len); | ||
606 | |||
607 | mdesc_hash = mdesc_early_alloc(len * sizeof(struct mdesc_node *)); | ||
608 | mdesc_hash_size = len; | ||
609 | |||
610 | printk("MDESC: Hash size %lu entries.\n", len); | ||
611 | |||
612 | build_all_nodes(main_mdesc); | ||
613 | |||
614 | printk("MDESC: Built graph with %u bytes of memory.\n", | ||
615 | mdesc_early_allocated); | ||
616 | |||
617 | report_platform_properties(); | ||
618 | mdesc_fill_in_cpu_data(); | ||
619 | } | ||
diff --git a/arch/sparc64/kernel/pci_sabre.c b/arch/sparc64/kernel/pci_sabre.c index e2377796de89..323d6c278518 100644 --- a/arch/sparc64/kernel/pci_sabre.c +++ b/arch/sparc64/kernel/pci_sabre.c | |||
@@ -762,9 +762,10 @@ void sabre_init(struct device_node *dp, char *model_name) | |||
762 | /* Of course, Sun has to encode things a thousand | 762 | /* Of course, Sun has to encode things a thousand |
763 | * different ways, inconsistently. | 763 | * different ways, inconsistently. |
764 | */ | 764 | */ |
765 | cpu_find_by_instance(0, &dp, NULL); | 765 | for_each_node_by_type(dp, "cpu") { |
766 | if (!strcmp(dp->name, "SUNW,UltraSPARC-IIe")) | 766 | if (!strcmp(dp->name, "SUNW,UltraSPARC-IIe")) |
767 | hummingbird_p = 1; | 767 | hummingbird_p = 1; |
768 | } | ||
768 | } | 769 | } |
769 | } | 770 | } |
770 | 771 | ||
diff --git a/arch/sparc64/kernel/prom.c b/arch/sparc64/kernel/prom.c index 02830e4671f5..dad4b3ba705f 100644 --- a/arch/sparc64/kernel/prom.c +++ b/arch/sparc64/kernel/prom.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <asm/irq.h> | 28 | #include <asm/irq.h> |
29 | #include <asm/asi.h> | 29 | #include <asm/asi.h> |
30 | #include <asm/upa.h> | 30 | #include <asm/upa.h> |
31 | #include <asm/smp.h> | ||
31 | 32 | ||
32 | static struct device_node *allnodes; | 33 | static struct device_node *allnodes; |
33 | 34 | ||
@@ -1665,6 +1666,150 @@ static struct device_node * __init build_tree(struct device_node *parent, phandl | |||
1665 | return ret; | 1666 | return ret; |
1666 | } | 1667 | } |
1667 | 1668 | ||
1669 | static const char *get_mid_prop(void) | ||
1670 | { | ||
1671 | return (tlb_type == spitfire ? "upa-portid" : "portid"); | ||
1672 | } | ||
1673 | |||
1674 | struct device_node *of_find_node_by_cpuid(int cpuid) | ||
1675 | { | ||
1676 | struct device_node *dp; | ||
1677 | const char *mid_prop = get_mid_prop(); | ||
1678 | |||
1679 | for_each_node_by_type(dp, "cpu") { | ||
1680 | int id = of_getintprop_default(dp, mid_prop, -1); | ||
1681 | const char *this_mid_prop = mid_prop; | ||
1682 | |||
1683 | if (id < 0) { | ||
1684 | this_mid_prop = "cpuid"; | ||
1685 | id = of_getintprop_default(dp, this_mid_prop, -1); | ||
1686 | } | ||
1687 | |||
1688 | if (id < 0) { | ||
1689 | prom_printf("OF: Serious problem, cpu lacks " | ||
1690 | "%s property", this_mid_prop); | ||
1691 | prom_halt(); | ||
1692 | } | ||
1693 | if (cpuid == id) | ||
1694 | return dp; | ||
1695 | } | ||
1696 | return NULL; | ||
1697 | } | ||
1698 | |||
1699 | static void __init of_fill_in_cpu_data(void) | ||
1700 | { | ||
1701 | struct device_node *dp; | ||
1702 | const char *mid_prop = get_mid_prop(); | ||
1703 | |||
1704 | ncpus_probed = 0; | ||
1705 | for_each_node_by_type(dp, "cpu") { | ||
1706 | int cpuid = of_getintprop_default(dp, mid_prop, -1); | ||
1707 | const char *this_mid_prop = mid_prop; | ||
1708 | struct device_node *portid_parent; | ||
1709 | int portid = -1; | ||
1710 | |||
1711 | portid_parent = NULL; | ||
1712 | if (cpuid < 0) { | ||
1713 | this_mid_prop = "cpuid"; | ||
1714 | cpuid = of_getintprop_default(dp, this_mid_prop, -1); | ||
1715 | if (cpuid >= 0) { | ||
1716 | int limit = 2; | ||
1717 | |||
1718 | portid_parent = dp; | ||
1719 | while (limit--) { | ||
1720 | portid_parent = portid_parent->parent; | ||
1721 | if (!portid_parent) | ||
1722 | break; | ||
1723 | portid = of_getintprop_default(portid_parent, | ||
1724 | "portid", -1); | ||
1725 | if (portid >= 0) | ||
1726 | break; | ||
1727 | } | ||
1728 | } | ||
1729 | } | ||
1730 | |||
1731 | if (cpuid < 0) { | ||
1732 | prom_printf("OF: Serious problem, cpu lacks " | ||
1733 | "%s property", this_mid_prop); | ||
1734 | prom_halt(); | ||
1735 | } | ||
1736 | |||
1737 | ncpus_probed++; | ||
1738 | |||
1739 | #ifdef CONFIG_SMP | ||
1740 | if (cpuid >= NR_CPUS) | ||
1741 | continue; | ||
1742 | #else | ||
1743 | /* On uniprocessor we only want the values for the | ||
1744 | * real physical cpu the kernel booted onto, however | ||
1745 | * cpu_data() only has one entry at index 0. | ||
1746 | */ | ||
1747 | if (cpuid != real_hard_smp_processor_id()) | ||
1748 | continue; | ||
1749 | cpuid = 0; | ||
1750 | #endif | ||
1751 | |||
1752 | cpu_data(cpuid).clock_tick = | ||
1753 | of_getintprop_default(dp, "clock-frequency", 0); | ||
1754 | |||
1755 | if (portid_parent) { | ||
1756 | cpu_data(cpuid).dcache_size = | ||
1757 | of_getintprop_default(dp, "l1-dcache-size", | ||
1758 | 16 * 1024); | ||
1759 | cpu_data(cpuid).dcache_line_size = | ||
1760 | of_getintprop_default(dp, "l1-dcache-line-size", | ||
1761 | 32); | ||
1762 | cpu_data(cpuid).icache_size = | ||
1763 | of_getintprop_default(dp, "l1-icache-size", | ||
1764 | 8 * 1024); | ||
1765 | cpu_data(cpuid).icache_line_size = | ||
1766 | of_getintprop_default(dp, "l1-icache-line-size", | ||
1767 | 32); | ||
1768 | cpu_data(cpuid).ecache_size = | ||
1769 | of_getintprop_default(dp, "l2-cache-size", 0); | ||
1770 | cpu_data(cpuid).ecache_line_size = | ||
1771 | of_getintprop_default(dp, "l2-cache-line-size", 0); | ||
1772 | if (!cpu_data(cpuid).ecache_size || | ||
1773 | !cpu_data(cpuid).ecache_line_size) { | ||
1774 | cpu_data(cpuid).ecache_size = | ||
1775 | of_getintprop_default(portid_parent, | ||
1776 | "l2-cache-size", | ||
1777 | (4 * 1024 * 1024)); | ||
1778 | cpu_data(cpuid).ecache_line_size = | ||
1779 | of_getintprop_default(portid_parent, | ||
1780 | "l2-cache-line-size", 64); | ||
1781 | } | ||
1782 | |||
1783 | cpu_data(cpuid).core_id = portid + 1; | ||
1784 | } else { | ||
1785 | cpu_data(cpuid).dcache_size = | ||
1786 | of_getintprop_default(dp, "dcache-size", 16 * 1024); | ||
1787 | cpu_data(cpuid).dcache_line_size = | ||
1788 | of_getintprop_default(dp, "dcache-line-size", 32); | ||
1789 | |||
1790 | cpu_data(cpuid).icache_size = | ||
1791 | of_getintprop_default(dp, "icache-size", 16 * 1024); | ||
1792 | cpu_data(cpuid).icache_line_size = | ||
1793 | of_getintprop_default(dp, "icache-line-size", 32); | ||
1794 | |||
1795 | cpu_data(cpuid).ecache_size = | ||
1796 | of_getintprop_default(dp, "ecache-size", | ||
1797 | (4 * 1024 * 1024)); | ||
1798 | cpu_data(cpuid).ecache_line_size = | ||
1799 | of_getintprop_default(dp, "ecache-line-size", 64); | ||
1800 | |||
1801 | cpu_data(cpuid).core_id = 0; | ||
1802 | } | ||
1803 | |||
1804 | #ifdef CONFIG_SMP | ||
1805 | cpu_set(cpuid, cpu_present_map); | ||
1806 | cpu_set(cpuid, phys_cpu_present_map); | ||
1807 | #endif | ||
1808 | } | ||
1809 | |||
1810 | smp_fill_in_sib_core_maps(); | ||
1811 | } | ||
1812 | |||
1668 | void __init prom_build_devicetree(void) | 1813 | void __init prom_build_devicetree(void) |
1669 | { | 1814 | { |
1670 | struct device_node **nextp; | 1815 | struct device_node **nextp; |
@@ -1679,4 +1824,7 @@ void __init prom_build_devicetree(void) | |||
1679 | &nextp); | 1824 | &nextp); |
1680 | printk("PROM: Built device tree with %u bytes of memory.\n", | 1825 | printk("PROM: Built device tree with %u bytes of memory.\n", |
1681 | prom_early_allocated); | 1826 | prom_early_allocated); |
1827 | |||
1828 | if (tlb_type != hypervisor) | ||
1829 | of_fill_in_cpu_data(); | ||
1682 | } | 1830 | } |
diff --git a/arch/sparc64/kernel/setup.c b/arch/sparc64/kernel/setup.c index dea9c3c9ec5f..de9b4c13f1c7 100644 --- a/arch/sparc64/kernel/setup.c +++ b/arch/sparc64/kernel/setup.c | |||
@@ -46,11 +46,17 @@ | |||
46 | #include <asm/sections.h> | 46 | #include <asm/sections.h> |
47 | #include <asm/setup.h> | 47 | #include <asm/setup.h> |
48 | #include <asm/mmu.h> | 48 | #include <asm/mmu.h> |
49 | #include <asm/ns87303.h> | ||
49 | 50 | ||
50 | #ifdef CONFIG_IP_PNP | 51 | #ifdef CONFIG_IP_PNP |
51 | #include <net/ipconfig.h> | 52 | #include <net/ipconfig.h> |
52 | #endif | 53 | #endif |
53 | 54 | ||
55 | /* Used to synchronize accesses to NatSemi SUPER I/O chip configure | ||
56 | * operations in asm/ns87303.h | ||
57 | */ | ||
58 | DEFINE_SPINLOCK(ns87303_lock); | ||
59 | |||
54 | struct screen_info screen_info = { | 60 | struct screen_info screen_info = { |
55 | 0, 0, /* orig-x, orig-y */ | 61 | 0, 0, /* orig-x, orig-y */ |
56 | 0, /* unused */ | 62 | 0, /* unused */ |
@@ -370,8 +376,6 @@ void __init setup_arch(char **cmdline_p) | |||
370 | init_cur_cpu_trap(current_thread_info()); | 376 | init_cur_cpu_trap(current_thread_info()); |
371 | 377 | ||
372 | paging_init(); | 378 | paging_init(); |
373 | |||
374 | smp_setup_cpu_possible_map(); | ||
375 | } | 379 | } |
376 | 380 | ||
377 | static int __init set_preferred_console(void) | 381 | static int __init set_preferred_console(void) |
@@ -424,7 +428,7 @@ extern void mmu_info(struct seq_file *); | |||
424 | unsigned int dcache_parity_tl1_occurred; | 428 | unsigned int dcache_parity_tl1_occurred; |
425 | unsigned int icache_parity_tl1_occurred; | 429 | unsigned int icache_parity_tl1_occurred; |
426 | 430 | ||
427 | static int ncpus_probed; | 431 | int ncpus_probed; |
428 | 432 | ||
429 | static int show_cpuinfo(struct seq_file *m, void *__unused) | 433 | static int show_cpuinfo(struct seq_file *m, void *__unused) |
430 | { | 434 | { |
@@ -516,14 +520,6 @@ static int __init topology_init(void) | |||
516 | 520 | ||
517 | err = -ENOMEM; | 521 | err = -ENOMEM; |
518 | 522 | ||
519 | /* Count the number of physically present processors in | ||
520 | * the machine, even on uniprocessor, so that /proc/cpuinfo | ||
521 | * output is consistent with 2.4.x | ||
522 | */ | ||
523 | ncpus_probed = 0; | ||
524 | while (!cpu_find_by_instance(ncpus_probed, NULL, NULL)) | ||
525 | ncpus_probed++; | ||
526 | |||
527 | for_each_possible_cpu(i) { | 523 | for_each_possible_cpu(i) { |
528 | struct cpu *p = kzalloc(sizeof(*p), GFP_KERNEL); | 524 | struct cpu *p = kzalloc(sizeof(*p), GFP_KERNEL); |
529 | if (p) { | 525 | if (p) { |
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c index 24fdf1d0adc5..f7fa873c800d 100644 --- a/arch/sparc64/kernel/smp.c +++ b/arch/sparc64/kernel/smp.c | |||
@@ -40,6 +40,7 @@ | |||
40 | #include <asm/tlb.h> | 40 | #include <asm/tlb.h> |
41 | #include <asm/sections.h> | 41 | #include <asm/sections.h> |
42 | #include <asm/prom.h> | 42 | #include <asm/prom.h> |
43 | #include <asm/mdesc.h> | ||
43 | 44 | ||
44 | extern void calibrate_delay(void); | 45 | extern void calibrate_delay(void); |
45 | 46 | ||
@@ -75,53 +76,6 @@ void smp_bogo(struct seq_file *m) | |||
75 | i, cpu_data(i).clock_tick); | 76 | i, cpu_data(i).clock_tick); |
76 | } | 77 | } |
77 | 78 | ||
78 | void __init smp_store_cpu_info(int id) | ||
79 | { | ||
80 | struct device_node *dp; | ||
81 | int def; | ||
82 | |||
83 | cpu_data(id).udelay_val = loops_per_jiffy; | ||
84 | |||
85 | cpu_find_by_mid(id, &dp); | ||
86 | cpu_data(id).clock_tick = | ||
87 | of_getintprop_default(dp, "clock-frequency", 0); | ||
88 | |||
89 | def = ((tlb_type == hypervisor) ? (8 * 1024) : (16 * 1024)); | ||
90 | cpu_data(id).dcache_size = | ||
91 | of_getintprop_default(dp, "dcache-size", def); | ||
92 | |||
93 | def = 32; | ||
94 | cpu_data(id).dcache_line_size = | ||
95 | of_getintprop_default(dp, "dcache-line-size", def); | ||
96 | |||
97 | def = 16 * 1024; | ||
98 | cpu_data(id).icache_size = | ||
99 | of_getintprop_default(dp, "icache-size", def); | ||
100 | |||
101 | def = 32; | ||
102 | cpu_data(id).icache_line_size = | ||
103 | of_getintprop_default(dp, "icache-line-size", def); | ||
104 | |||
105 | def = ((tlb_type == hypervisor) ? | ||
106 | (3 * 1024 * 1024) : | ||
107 | (4 * 1024 * 1024)); | ||
108 | cpu_data(id).ecache_size = | ||
109 | of_getintprop_default(dp, "ecache-size", def); | ||
110 | |||
111 | def = 64; | ||
112 | cpu_data(id).ecache_line_size = | ||
113 | of_getintprop_default(dp, "ecache-line-size", def); | ||
114 | |||
115 | printk("CPU[%d]: Caches " | ||
116 | "D[sz(%d):line_sz(%d)] " | ||
117 | "I[sz(%d):line_sz(%d)] " | ||
118 | "E[sz(%d):line_sz(%d)]\n", | ||
119 | id, | ||
120 | cpu_data(id).dcache_size, cpu_data(id).dcache_line_size, | ||
121 | cpu_data(id).icache_size, cpu_data(id).icache_line_size, | ||
122 | cpu_data(id).ecache_size, cpu_data(id).ecache_line_size); | ||
123 | } | ||
124 | |||
125 | extern void setup_sparc64_timer(void); | 79 | extern void setup_sparc64_timer(void); |
126 | 80 | ||
127 | static volatile unsigned long callin_flag = 0; | 81 | static volatile unsigned long callin_flag = 0; |
@@ -145,7 +99,7 @@ void __init smp_callin(void) | |||
145 | local_irq_enable(); | 99 | local_irq_enable(); |
146 | 100 | ||
147 | calibrate_delay(); | 101 | calibrate_delay(); |
148 | smp_store_cpu_info(cpuid); | 102 | cpu_data(cpuid).udelay_val = loops_per_jiffy; |
149 | callin_flag = 1; | 103 | callin_flag = 1; |
150 | __asm__ __volatile__("membar #Sync\n\t" | 104 | __asm__ __volatile__("membar #Sync\n\t" |
151 | "flush %%g6" : : : "memory"); | 105 | "flush %%g6" : : : "memory"); |
@@ -340,9 +294,8 @@ static int __devinit smp_boot_one_cpu(unsigned int cpu) | |||
340 | 294 | ||
341 | prom_startcpu_cpuid(cpu, entry, cookie); | 295 | prom_startcpu_cpuid(cpu, entry, cookie); |
342 | } else { | 296 | } else { |
343 | struct device_node *dp; | 297 | struct device_node *dp = of_find_node_by_cpuid(cpu); |
344 | 298 | ||
345 | cpu_find_by_mid(cpu, &dp); | ||
346 | prom_startcpu(dp->node, entry, cookie); | 299 | prom_startcpu(dp->node, entry, cookie); |
347 | } | 300 | } |
348 | 301 | ||
@@ -1191,23 +1144,14 @@ int setup_profiling_timer(unsigned int multiplier) | |||
1191 | 1144 | ||
1192 | static void __init smp_tune_scheduling(void) | 1145 | static void __init smp_tune_scheduling(void) |
1193 | { | 1146 | { |
1194 | struct device_node *dp; | 1147 | unsigned int smallest = ~0U; |
1195 | int instance; | 1148 | int i; |
1196 | unsigned int def, smallest = ~0U; | ||
1197 | |||
1198 | def = ((tlb_type == hypervisor) ? | ||
1199 | (3 * 1024 * 1024) : | ||
1200 | (4 * 1024 * 1024)); | ||
1201 | 1149 | ||
1202 | instance = 0; | 1150 | for (i = 0; i < NR_CPUS; i++) { |
1203 | while (!cpu_find_by_instance(instance, &dp, NULL)) { | 1151 | unsigned int val = cpu_data(i).ecache_size; |
1204 | unsigned int val; | ||
1205 | 1152 | ||
1206 | val = of_getintprop_default(dp, "ecache-size", def); | 1153 | if (val && val < smallest) |
1207 | if (val < smallest) | ||
1208 | smallest = val; | 1154 | smallest = val; |
1209 | |||
1210 | instance++; | ||
1211 | } | 1155 | } |
1212 | 1156 | ||
1213 | /* Any value less than 256K is nonsense. */ | 1157 | /* Any value less than 256K is nonsense. */ |
@@ -1230,58 +1174,42 @@ void __init smp_prepare_cpus(unsigned int max_cpus) | |||
1230 | int i; | 1174 | int i; |
1231 | 1175 | ||
1232 | if (num_possible_cpus() > max_cpus) { | 1176 | if (num_possible_cpus() > max_cpus) { |
1233 | int instance, mid; | 1177 | for_each_possible_cpu(i) { |
1234 | 1178 | if (i != boot_cpu_id) { | |
1235 | instance = 0; | 1179 | cpu_clear(i, phys_cpu_present_map); |
1236 | while (!cpu_find_by_instance(instance, NULL, &mid)) { | 1180 | cpu_clear(i, cpu_present_map); |
1237 | if (mid != boot_cpu_id) { | ||
1238 | cpu_clear(mid, phys_cpu_present_map); | ||
1239 | cpu_clear(mid, cpu_present_map); | ||
1240 | if (num_possible_cpus() <= max_cpus) | 1181 | if (num_possible_cpus() <= max_cpus) |
1241 | break; | 1182 | break; |
1242 | } | 1183 | } |
1243 | instance++; | ||
1244 | } | ||
1245 | } | ||
1246 | |||
1247 | for_each_possible_cpu(i) { | ||
1248 | if (tlb_type == hypervisor) { | ||
1249 | int j; | ||
1250 | |||
1251 | /* XXX get this mapping from machine description */ | ||
1252 | for_each_possible_cpu(j) { | ||
1253 | if ((j >> 2) == (i >> 2)) | ||
1254 | cpu_set(j, cpu_sibling_map[i]); | ||
1255 | } | ||
1256 | } else { | ||
1257 | cpu_set(i, cpu_sibling_map[i]); | ||
1258 | } | 1184 | } |
1259 | } | 1185 | } |
1260 | 1186 | ||
1261 | smp_store_cpu_info(boot_cpu_id); | 1187 | cpu_data(boot_cpu_id).udelay_val = loops_per_jiffy; |
1262 | smp_tune_scheduling(); | 1188 | smp_tune_scheduling(); |
1263 | } | 1189 | } |
1264 | 1190 | ||
1265 | /* Set this up early so that things like the scheduler can init | 1191 | void __devinit smp_prepare_boot_cpu(void) |
1266 | * properly. We use the same cpu mask for both the present and | ||
1267 | * possible cpu map. | ||
1268 | */ | ||
1269 | void __init smp_setup_cpu_possible_map(void) | ||
1270 | { | 1192 | { |
1271 | int instance, mid; | ||
1272 | |||
1273 | instance = 0; | ||
1274 | while (!cpu_find_by_instance(instance, NULL, &mid)) { | ||
1275 | if (mid < NR_CPUS) { | ||
1276 | cpu_set(mid, phys_cpu_present_map); | ||
1277 | cpu_set(mid, cpu_present_map); | ||
1278 | } | ||
1279 | instance++; | ||
1280 | } | ||
1281 | } | 1193 | } |
1282 | 1194 | ||
1283 | void __devinit smp_prepare_boot_cpu(void) | 1195 | void __devinit smp_fill_in_sib_core_maps(void) |
1284 | { | 1196 | { |
1197 | unsigned int i; | ||
1198 | |||
1199 | for_each_possible_cpu(i) { | ||
1200 | unsigned int j; | ||
1201 | |||
1202 | if (cpu_data(i).core_id == 0) { | ||
1203 | cpu_set(i, cpu_sibling_map[i]); | ||
1204 | continue; | ||
1205 | } | ||
1206 | |||
1207 | for_each_possible_cpu(j) { | ||
1208 | if (cpu_data(i).core_id == | ||
1209 | cpu_data(j).core_id) | ||
1210 | cpu_set(j, cpu_sibling_map[i]); | ||
1211 | } | ||
1212 | } | ||
1285 | } | 1213 | } |
1286 | 1214 | ||
1287 | int __cpuinit __cpu_up(unsigned int cpu) | 1215 | int __cpuinit __cpu_up(unsigned int cpu) |
@@ -1337,7 +1265,7 @@ unsigned long __per_cpu_shift __read_mostly; | |||
1337 | EXPORT_SYMBOL(__per_cpu_base); | 1265 | EXPORT_SYMBOL(__per_cpu_base); |
1338 | EXPORT_SYMBOL(__per_cpu_shift); | 1266 | EXPORT_SYMBOL(__per_cpu_shift); |
1339 | 1267 | ||
1340 | void __init setup_per_cpu_areas(void) | 1268 | void __init real_setup_per_cpu_areas(void) |
1341 | { | 1269 | { |
1342 | unsigned long goal, size, i; | 1270 | unsigned long goal, size, i; |
1343 | char *ptr; | 1271 | char *ptr; |
diff --git a/arch/sparc64/kernel/sun4v_ivec.S b/arch/sparc64/kernel/sun4v_ivec.S index 405855dd886b..574bc248bca6 100644 --- a/arch/sparc64/kernel/sun4v_ivec.S +++ b/arch/sparc64/kernel/sun4v_ivec.S | |||
@@ -22,12 +22,12 @@ sun4v_cpu_mondo: | |||
22 | be,pn %xcc, sun4v_cpu_mondo_queue_empty | 22 | be,pn %xcc, sun4v_cpu_mondo_queue_empty |
23 | nop | 23 | nop |
24 | 24 | ||
25 | /* Get &trap_block[smp_processor_id()] into %g3. */ | 25 | /* Get &trap_block[smp_processor_id()] into %g4. */ |
26 | ldxa [%g0] ASI_SCRATCHPAD, %g3 | 26 | ldxa [%g0] ASI_SCRATCHPAD, %g4 |
27 | sub %g3, TRAP_PER_CPU_FAULT_INFO, %g3 | 27 | sub %g4, TRAP_PER_CPU_FAULT_INFO, %g4 |
28 | 28 | ||
29 | /* Get CPU mondo queue base phys address into %g7. */ | 29 | /* Get CPU mondo queue base phys address into %g7. */ |
30 | ldx [%g3 + TRAP_PER_CPU_CPU_MONDO_PA], %g7 | 30 | ldx [%g4 + TRAP_PER_CPU_CPU_MONDO_PA], %g7 |
31 | 31 | ||
32 | /* Now get the cross-call arguments and handler PC, same | 32 | /* Now get the cross-call arguments and handler PC, same |
33 | * layout as sun4u: | 33 | * layout as sun4u: |
@@ -47,8 +47,7 @@ sun4v_cpu_mondo: | |||
47 | add %g2, 0x40 - 0x8 - 0x8, %g2 | 47 | add %g2, 0x40 - 0x8 - 0x8, %g2 |
48 | 48 | ||
49 | /* Update queue head pointer. */ | 49 | /* Update queue head pointer. */ |
50 | sethi %hi(8192 - 1), %g4 | 50 | lduw [%g4 + TRAP_PER_CPU_CPU_MONDO_QMASK], %g4 |
51 | or %g4, %lo(8192 - 1), %g4 | ||
52 | and %g2, %g4, %g2 | 51 | and %g2, %g4, %g2 |
53 | 52 | ||
54 | mov INTRQ_CPU_MONDO_HEAD, %g4 | 53 | mov INTRQ_CPU_MONDO_HEAD, %g4 |
@@ -71,12 +70,12 @@ sun4v_dev_mondo: | |||
71 | be,pn %xcc, sun4v_dev_mondo_queue_empty | 70 | be,pn %xcc, sun4v_dev_mondo_queue_empty |
72 | nop | 71 | nop |
73 | 72 | ||
74 | /* Get &trap_block[smp_processor_id()] into %g3. */ | 73 | /* Get &trap_block[smp_processor_id()] into %g4. */ |
75 | ldxa [%g0] ASI_SCRATCHPAD, %g3 | 74 | ldxa [%g0] ASI_SCRATCHPAD, %g4 |
76 | sub %g3, TRAP_PER_CPU_FAULT_INFO, %g3 | 75 | sub %g4, TRAP_PER_CPU_FAULT_INFO, %g4 |
77 | 76 | ||
78 | /* Get DEV mondo queue base phys address into %g5. */ | 77 | /* Get DEV mondo queue base phys address into %g5. */ |
79 | ldx [%g3 + TRAP_PER_CPU_DEV_MONDO_PA], %g5 | 78 | ldx [%g4 + TRAP_PER_CPU_DEV_MONDO_PA], %g5 |
80 | 79 | ||
81 | /* Load IVEC into %g3. */ | 80 | /* Load IVEC into %g3. */ |
82 | ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 | 81 | ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3 |
@@ -90,8 +89,7 @@ sun4v_dev_mondo: | |||
90 | */ | 89 | */ |
91 | 90 | ||
92 | /* Update queue head pointer, this frees up some registers. */ | 91 | /* Update queue head pointer, this frees up some registers. */ |
93 | sethi %hi(8192 - 1), %g4 | 92 | lduw [%g4 + TRAP_PER_CPU_DEV_MONDO_QMASK], %g4 |
94 | or %g4, %lo(8192 - 1), %g4 | ||
95 | and %g2, %g4, %g2 | 93 | and %g2, %g4, %g2 |
96 | 94 | ||
97 | mov INTRQ_DEVICE_MONDO_HEAD, %g4 | 95 | mov INTRQ_DEVICE_MONDO_HEAD, %g4 |
@@ -143,6 +141,8 @@ sun4v_res_mondo: | |||
143 | brnz,pn %g1, sun4v_res_mondo_queue_full | 141 | brnz,pn %g1, sun4v_res_mondo_queue_full |
144 | nop | 142 | nop |
145 | 143 | ||
144 | lduw [%g3 + TRAP_PER_CPU_RESUM_QMASK], %g4 | ||
145 | |||
146 | /* Remember this entry's offset in %g1. */ | 146 | /* Remember this entry's offset in %g1. */ |
147 | mov %g2, %g1 | 147 | mov %g2, %g1 |
148 | 148 | ||
@@ -173,8 +173,6 @@ sun4v_res_mondo: | |||
173 | add %g2, 0x08, %g2 | 173 | add %g2, 0x08, %g2 |
174 | 174 | ||
175 | /* Update queue head pointer. */ | 175 | /* Update queue head pointer. */ |
176 | sethi %hi(8192 - 1), %g4 | ||
177 | or %g4, %lo(8192 - 1), %g4 | ||
178 | and %g2, %g4, %g2 | 176 | and %g2, %g4, %g2 |
179 | 177 | ||
180 | mov INTRQ_RESUM_MONDO_HEAD, %g4 | 178 | mov INTRQ_RESUM_MONDO_HEAD, %g4 |
@@ -254,6 +252,8 @@ sun4v_nonres_mondo: | |||
254 | brnz,pn %g1, sun4v_nonres_mondo_queue_full | 252 | brnz,pn %g1, sun4v_nonres_mondo_queue_full |
255 | nop | 253 | nop |
256 | 254 | ||
255 | lduw [%g3 + TRAP_PER_CPU_NONRESUM_QMASK], %g4 | ||
256 | |||
257 | /* Remember this entry's offset in %g1. */ | 257 | /* Remember this entry's offset in %g1. */ |
258 | mov %g2, %g1 | 258 | mov %g2, %g1 |
259 | 259 | ||
@@ -284,8 +284,6 @@ sun4v_nonres_mondo: | |||
284 | add %g2, 0x08, %g2 | 284 | add %g2, 0x08, %g2 |
285 | 285 | ||
286 | /* Update queue head pointer. */ | 286 | /* Update queue head pointer. */ |
287 | sethi %hi(8192 - 1), %g4 | ||
288 | or %g4, %lo(8192 - 1), %g4 | ||
289 | and %g2, %g4, %g2 | 287 | and %g2, %g4, %g2 |
290 | 288 | ||
291 | mov INTRQ_NONRESUM_MONDO_HEAD, %g4 | 289 | mov INTRQ_NONRESUM_MONDO_HEAD, %g4 |
diff --git a/arch/sparc64/kernel/time.c b/arch/sparc64/kernel/time.c index 2d63d7689962..0f62ea82953c 100644 --- a/arch/sparc64/kernel/time.c +++ b/arch/sparc64/kernel/time.c | |||
@@ -862,7 +862,6 @@ fs_initcall(clock_init); | |||
862 | static unsigned long sparc64_init_timers(void) | 862 | static unsigned long sparc64_init_timers(void) |
863 | { | 863 | { |
864 | struct device_node *dp; | 864 | struct device_node *dp; |
865 | struct property *prop; | ||
866 | unsigned long clock; | 865 | unsigned long clock; |
867 | #ifdef CONFIG_SMP | 866 | #ifdef CONFIG_SMP |
868 | extern void smp_tick_init(void); | 867 | extern void smp_tick_init(void); |
@@ -879,17 +878,15 @@ static unsigned long sparc64_init_timers(void) | |||
879 | if (manuf == 0x17 && impl == 0x13) { | 878 | if (manuf == 0x17 && impl == 0x13) { |
880 | /* Hummingbird, aka Ultra-IIe */ | 879 | /* Hummingbird, aka Ultra-IIe */ |
881 | tick_ops = &hbtick_operations; | 880 | tick_ops = &hbtick_operations; |
882 | prop = of_find_property(dp, "stick-frequency", NULL); | 881 | clock = of_getintprop_default(dp, "stick-frequency", 0); |
883 | } else { | 882 | } else { |
884 | tick_ops = &tick_operations; | 883 | tick_ops = &tick_operations; |
885 | cpu_find_by_instance(0, &dp, NULL); | 884 | clock = local_cpu_data().clock_tick; |
886 | prop = of_find_property(dp, "clock-frequency", NULL); | ||
887 | } | 885 | } |
888 | } else { | 886 | } else { |
889 | tick_ops = &stick_operations; | 887 | tick_ops = &stick_operations; |
890 | prop = of_find_property(dp, "stick-frequency", NULL); | 888 | clock = of_getintprop_default(dp, "stick-frequency", 0); |
891 | } | 889 | } |
892 | clock = *(unsigned int *) prop->value; | ||
893 | 890 | ||
894 | #ifdef CONFIG_SMP | 891 | #ifdef CONFIG_SMP |
895 | smp_tick_init(); | 892 | smp_tick_init(); |
diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c index d0fde36395b4..00a9e3286c83 100644 --- a/arch/sparc64/kernel/traps.c +++ b/arch/sparc64/kernel/traps.c | |||
@@ -795,8 +795,7 @@ extern unsigned int cheetah_deferred_trap_vector[], cheetah_deferred_trap_vector | |||
795 | void __init cheetah_ecache_flush_init(void) | 795 | void __init cheetah_ecache_flush_init(void) |
796 | { | 796 | { |
797 | unsigned long largest_size, smallest_linesize, order, ver; | 797 | unsigned long largest_size, smallest_linesize, order, ver; |
798 | struct device_node *dp; | 798 | int i, sz; |
799 | int i, instance, sz; | ||
800 | 799 | ||
801 | /* Scan all cpu device tree nodes, note two values: | 800 | /* Scan all cpu device tree nodes, note two values: |
802 | * 1) largest E-cache size | 801 | * 1) largest E-cache size |
@@ -805,18 +804,20 @@ void __init cheetah_ecache_flush_init(void) | |||
805 | largest_size = 0UL; | 804 | largest_size = 0UL; |
806 | smallest_linesize = ~0UL; | 805 | smallest_linesize = ~0UL; |
807 | 806 | ||
808 | instance = 0; | 807 | for (i = 0; i < NR_CPUS; i++) { |
809 | while (!cpu_find_by_instance(instance, &dp, NULL)) { | ||
810 | unsigned long val; | 808 | unsigned long val; |
811 | 809 | ||
812 | val = of_getintprop_default(dp, "ecache-size", | 810 | val = cpu_data(i).ecache_size; |
813 | (2 * 1024 * 1024)); | 811 | if (!val) |
812 | continue; | ||
813 | |||
814 | if (val > largest_size) | 814 | if (val > largest_size) |
815 | largest_size = val; | 815 | largest_size = val; |
816 | val = of_getintprop_default(dp, "ecache-line-size", 64); | 816 | |
817 | val = cpu_data(i).ecache_line_size; | ||
817 | if (val < smallest_linesize) | 818 | if (val < smallest_linesize) |
818 | smallest_linesize = val; | 819 | smallest_linesize = val; |
819 | instance++; | 820 | |
820 | } | 821 | } |
821 | 822 | ||
822 | if (largest_size == 0UL || smallest_linesize == ~0UL) { | 823 | if (largest_size == 0UL || smallest_linesize == ~0UL) { |
@@ -2564,7 +2565,15 @@ void __init trap_init(void) | |||
2564 | (TRAP_PER_CPU_TSB_HUGE_TEMP != | 2565 | (TRAP_PER_CPU_TSB_HUGE_TEMP != |
2565 | offsetof(struct trap_per_cpu, tsb_huge_temp)) || | 2566 | offsetof(struct trap_per_cpu, tsb_huge_temp)) || |
2566 | (TRAP_PER_CPU_IRQ_WORKLIST != | 2567 | (TRAP_PER_CPU_IRQ_WORKLIST != |
2567 | offsetof(struct trap_per_cpu, irq_worklist))) | 2568 | offsetof(struct trap_per_cpu, irq_worklist)) || |
2569 | (TRAP_PER_CPU_CPU_MONDO_QMASK != | ||
2570 | offsetof(struct trap_per_cpu, cpu_mondo_qmask)) || | ||
2571 | (TRAP_PER_CPU_DEV_MONDO_QMASK != | ||
2572 | offsetof(struct trap_per_cpu, dev_mondo_qmask)) || | ||
2573 | (TRAP_PER_CPU_RESUM_QMASK != | ||
2574 | offsetof(struct trap_per_cpu, resum_qmask)) || | ||
2575 | (TRAP_PER_CPU_NONRESUM_QMASK != | ||
2576 | offsetof(struct trap_per_cpu, nonresum_qmask))) | ||
2568 | trap_per_cpu_offsets_are_bolixed_dave(); | 2577 | trap_per_cpu_offsets_are_bolixed_dave(); |
2569 | 2578 | ||
2570 | if ((TSB_CONFIG_TSB != | 2579 | if ((TSB_CONFIG_TSB != |
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c index 0c9995c3b8ed..977698269d3a 100644 --- a/arch/sparc64/mm/init.c +++ b/arch/sparc64/mm/init.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <linux/kprobes.h> | 23 | #include <linux/kprobes.h> |
24 | #include <linux/cache.h> | 24 | #include <linux/cache.h> |
25 | #include <linux/sort.h> | 25 | #include <linux/sort.h> |
26 | #include <linux/percpu.h> | ||
26 | 27 | ||
27 | #include <asm/head.h> | 28 | #include <asm/head.h> |
28 | #include <asm/system.h> | 29 | #include <asm/system.h> |
@@ -44,8 +45,7 @@ | |||
44 | #include <asm/hypervisor.h> | 45 | #include <asm/hypervisor.h> |
45 | #include <asm/prom.h> | 46 | #include <asm/prom.h> |
46 | #include <asm/sstate.h> | 47 | #include <asm/sstate.h> |
47 | 48 | #include <asm/mdesc.h> | |
48 | extern void device_scan(void); | ||
49 | 49 | ||
50 | #define MAX_PHYS_ADDRESS (1UL << 42UL) | 50 | #define MAX_PHYS_ADDRESS (1UL << 42UL) |
51 | #define KPTE_BITMAP_CHUNK_SZ (256UL * 1024UL * 1024UL) | 51 | #define KPTE_BITMAP_CHUNK_SZ (256UL * 1024UL * 1024UL) |
@@ -1335,6 +1335,9 @@ void __cpuinit sun4v_ktsb_register(void) | |||
1335 | extern void cheetah_ecache_flush_init(void); | 1335 | extern void cheetah_ecache_flush_init(void); |
1336 | extern void sun4v_patch_tlb_handlers(void); | 1336 | extern void sun4v_patch_tlb_handlers(void); |
1337 | 1337 | ||
1338 | extern void cpu_probe(void); | ||
1339 | extern void central_probe(void); | ||
1340 | |||
1338 | static unsigned long last_valid_pfn; | 1341 | static unsigned long last_valid_pfn; |
1339 | pgd_t swapper_pg_dir[2048]; | 1342 | pgd_t swapper_pg_dir[2048]; |
1340 | 1343 | ||
@@ -1419,8 +1422,13 @@ void __init paging_init(void) | |||
1419 | 1422 | ||
1420 | kernel_physical_mapping_init(); | 1423 | kernel_physical_mapping_init(); |
1421 | 1424 | ||
1425 | real_setup_per_cpu_areas(); | ||
1426 | |||
1422 | prom_build_devicetree(); | 1427 | prom_build_devicetree(); |
1423 | 1428 | ||
1429 | if (tlb_type == hypervisor) | ||
1430 | sun4v_mdesc_init(); | ||
1431 | |||
1424 | { | 1432 | { |
1425 | unsigned long zones_size[MAX_NR_ZONES]; | 1433 | unsigned long zones_size[MAX_NR_ZONES]; |
1426 | unsigned long zholes_size[MAX_NR_ZONES]; | 1434 | unsigned long zholes_size[MAX_NR_ZONES]; |
@@ -1437,7 +1445,10 @@ void __init paging_init(void) | |||
1437 | zholes_size); | 1445 | zholes_size); |
1438 | } | 1446 | } |
1439 | 1447 | ||
1440 | device_scan(); | 1448 | prom_printf("Booting Linux...\n"); |
1449 | |||
1450 | central_probe(); | ||
1451 | cpu_probe(); | ||
1441 | } | 1452 | } |
1442 | 1453 | ||
1443 | static void __init taint_real_pages(void) | 1454 | static void __init taint_real_pages(void) |