diff options
Diffstat (limited to 'arch/blackfin/kernel/setup.c')
-rw-r--r-- | arch/blackfin/kernel/setup.c | 218 |
1 files changed, 148 insertions, 70 deletions
diff --git a/arch/blackfin/kernel/setup.c b/arch/blackfin/kernel/setup.c index 71a9a8c53cea..b2a811347b65 100644 --- a/arch/blackfin/kernel/setup.c +++ b/arch/blackfin/kernel/setup.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/bootmem.h> | 13 | #include <linux/bootmem.h> |
14 | #include <linux/seq_file.h> | 14 | #include <linux/seq_file.h> |
15 | #include <linux/cpu.h> | 15 | #include <linux/cpu.h> |
16 | #include <linux/mm.h> | ||
16 | #include <linux/module.h> | 17 | #include <linux/module.h> |
17 | #include <linux/tty.h> | 18 | #include <linux/tty.h> |
18 | #include <linux/pfn.h> | 19 | #include <linux/pfn.h> |
@@ -26,11 +27,10 @@ | |||
26 | #include <asm/blackfin.h> | 27 | #include <asm/blackfin.h> |
27 | #include <asm/cplbinit.h> | 28 | #include <asm/cplbinit.h> |
28 | #include <asm/div64.h> | 29 | #include <asm/div64.h> |
30 | #include <asm/cpu.h> | ||
29 | #include <asm/fixed_code.h> | 31 | #include <asm/fixed_code.h> |
30 | #include <asm/early_printk.h> | 32 | #include <asm/early_printk.h> |
31 | 33 | ||
32 | static DEFINE_PER_CPU(struct cpu, cpu_devices); | ||
33 | |||
34 | u16 _bfin_swrst; | 34 | u16 _bfin_swrst; |
35 | EXPORT_SYMBOL(_bfin_swrst); | 35 | EXPORT_SYMBOL(_bfin_swrst); |
36 | 36 | ||
@@ -79,27 +79,68 @@ static struct change_member *change_point[2*BFIN_MEMMAP_MAX] __initdata; | |||
79 | static struct bfin_memmap_entry *overlap_list[BFIN_MEMMAP_MAX] __initdata; | 79 | static struct bfin_memmap_entry *overlap_list[BFIN_MEMMAP_MAX] __initdata; |
80 | static struct bfin_memmap_entry new_map[BFIN_MEMMAP_MAX] __initdata; | 80 | static struct bfin_memmap_entry new_map[BFIN_MEMMAP_MAX] __initdata; |
81 | 81 | ||
82 | void __init bfin_cache_init(void) | 82 | DEFINE_PER_CPU(struct blackfin_cpudata, cpu_data); |
83 | { | 83 | |
84 | static int early_init_clkin_hz(char *buf); | ||
85 | |||
84 | #if defined(CONFIG_BFIN_DCACHE) || defined(CONFIG_BFIN_ICACHE) | 86 | #if defined(CONFIG_BFIN_DCACHE) || defined(CONFIG_BFIN_ICACHE) |
85 | generate_cplb_tables(); | 87 | void __init generate_cplb_tables(void) |
88 | { | ||
89 | unsigned int cpu; | ||
90 | |||
91 | generate_cplb_tables_all(); | ||
92 | /* Generate per-CPU I&D CPLB tables */ | ||
93 | for (cpu = 0; cpu < num_possible_cpus(); ++cpu) | ||
94 | generate_cplb_tables_cpu(cpu); | ||
95 | } | ||
86 | #endif | 96 | #endif |
87 | 97 | ||
98 | void __cpuinit bfin_setup_caches(unsigned int cpu) | ||
99 | { | ||
88 | #ifdef CONFIG_BFIN_ICACHE | 100 | #ifdef CONFIG_BFIN_ICACHE |
89 | bfin_icache_init(); | 101 | bfin_icache_init(icplb_tbl[cpu]); |
90 | printk(KERN_INFO "Instruction Cache Enabled\n"); | ||
91 | #endif | 102 | #endif |
92 | 103 | ||
93 | #ifdef CONFIG_BFIN_DCACHE | 104 | #ifdef CONFIG_BFIN_DCACHE |
94 | bfin_dcache_init(); | 105 | bfin_dcache_init(dcplb_tbl[cpu]); |
95 | printk(KERN_INFO "Data Cache Enabled" | 106 | #endif |
107 | |||
108 | /* | ||
109 | * In cache coherence emulation mode, we need to have the | ||
110 | * D-cache enabled before running any atomic operation which | ||
111 | * might invove cache invalidation (i.e. spinlock, rwlock). | ||
112 | * So printk's are deferred until then. | ||
113 | */ | ||
114 | #ifdef CONFIG_BFIN_ICACHE | ||
115 | printk(KERN_INFO "Instruction Cache Enabled for CPU%u\n", cpu); | ||
116 | #endif | ||
117 | #ifdef CONFIG_BFIN_DCACHE | ||
118 | printk(KERN_INFO "Data Cache Enabled for CPU%u" | ||
96 | # if defined CONFIG_BFIN_WB | 119 | # if defined CONFIG_BFIN_WB |
97 | " (write-back)" | 120 | " (write-back)" |
98 | # elif defined CONFIG_BFIN_WT | 121 | # elif defined CONFIG_BFIN_WT |
99 | " (write-through)" | 122 | " (write-through)" |
100 | # endif | 123 | # endif |
101 | "\n"); | 124 | "\n", cpu); |
125 | #endif | ||
126 | } | ||
127 | |||
128 | void __cpuinit bfin_setup_cpudata(unsigned int cpu) | ||
129 | { | ||
130 | struct blackfin_cpudata *cpudata = &per_cpu(cpu_data, cpu); | ||
131 | |||
132 | cpudata->idle = current; | ||
133 | cpudata->loops_per_jiffy = loops_per_jiffy; | ||
134 | cpudata->imemctl = bfin_read_IMEM_CONTROL(); | ||
135 | cpudata->dmemctl = bfin_read_DMEM_CONTROL(); | ||
136 | } | ||
137 | |||
138 | void __init bfin_cache_init(void) | ||
139 | { | ||
140 | #if defined(CONFIG_BFIN_DCACHE) || defined(CONFIG_BFIN_ICACHE) | ||
141 | generate_cplb_tables(); | ||
102 | #endif | 142 | #endif |
143 | bfin_setup_caches(0); | ||
103 | } | 144 | } |
104 | 145 | ||
105 | void __init bfin_relocate_l1_mem(void) | 146 | void __init bfin_relocate_l1_mem(void) |
@@ -109,6 +150,8 @@ void __init bfin_relocate_l1_mem(void) | |||
109 | unsigned long l1_data_b_length; | 150 | unsigned long l1_data_b_length; |
110 | unsigned long l2_length; | 151 | unsigned long l2_length; |
111 | 152 | ||
153 | blackfin_dma_early_init(); | ||
154 | |||
112 | l1_code_length = _etext_l1 - _stext_l1; | 155 | l1_code_length = _etext_l1 - _stext_l1; |
113 | if (l1_code_length > L1_CODE_LENGTH) | 156 | if (l1_code_length > L1_CODE_LENGTH) |
114 | panic("L1 Instruction SRAM Overflow\n"); | 157 | panic("L1 Instruction SRAM Overflow\n"); |
@@ -230,7 +273,7 @@ static int __init sanitize_memmap(struct bfin_memmap_entry *map, int *pnr_map) | |||
230 | /* record all known change-points (starting and ending addresses), | 273 | /* record all known change-points (starting and ending addresses), |
231 | omitting those that are for empty memory regions */ | 274 | omitting those that are for empty memory regions */ |
232 | chgidx = 0; | 275 | chgidx = 0; |
233 | for (i = 0; i < old_nr; i++) { | 276 | for (i = 0; i < old_nr; i++) { |
234 | if (map[i].size != 0) { | 277 | if (map[i].size != 0) { |
235 | change_point[chgidx]->addr = map[i].addr; | 278 | change_point[chgidx]->addr = map[i].addr; |
236 | change_point[chgidx++]->pentry = &map[i]; | 279 | change_point[chgidx++]->pentry = &map[i]; |
@@ -238,13 +281,13 @@ static int __init sanitize_memmap(struct bfin_memmap_entry *map, int *pnr_map) | |||
238 | change_point[chgidx++]->pentry = &map[i]; | 281 | change_point[chgidx++]->pentry = &map[i]; |
239 | } | 282 | } |
240 | } | 283 | } |
241 | chg_nr = chgidx; /* true number of change-points */ | 284 | chg_nr = chgidx; /* true number of change-points */ |
242 | 285 | ||
243 | /* sort change-point list by memory addresses (low -> high) */ | 286 | /* sort change-point list by memory addresses (low -> high) */ |
244 | still_changing = 1; | 287 | still_changing = 1; |
245 | while (still_changing) { | 288 | while (still_changing) { |
246 | still_changing = 0; | 289 | still_changing = 0; |
247 | for (i = 1; i < chg_nr; i++) { | 290 | for (i = 1; i < chg_nr; i++) { |
248 | /* if <current_addr> > <last_addr>, swap */ | 291 | /* if <current_addr> > <last_addr>, swap */ |
249 | /* or, if current=<start_addr> & last=<end_addr>, swap */ | 292 | /* or, if current=<start_addr> & last=<end_addr>, swap */ |
250 | if ((change_point[i]->addr < change_point[i-1]->addr) || | 293 | if ((change_point[i]->addr < change_point[i-1]->addr) || |
@@ -261,10 +304,10 @@ static int __init sanitize_memmap(struct bfin_memmap_entry *map, int *pnr_map) | |||
261 | } | 304 | } |
262 | 305 | ||
263 | /* create a new memmap, removing overlaps */ | 306 | /* create a new memmap, removing overlaps */ |
264 | overlap_entries = 0; /* number of entries in the overlap table */ | 307 | overlap_entries = 0; /* number of entries in the overlap table */ |
265 | new_entry = 0; /* index for creating new memmap entries */ | 308 | new_entry = 0; /* index for creating new memmap entries */ |
266 | last_type = 0; /* start with undefined memory type */ | 309 | last_type = 0; /* start with undefined memory type */ |
267 | last_addr = 0; /* start with 0 as last starting address */ | 310 | last_addr = 0; /* start with 0 as last starting address */ |
268 | /* loop through change-points, determining affect on the new memmap */ | 311 | /* loop through change-points, determining affect on the new memmap */ |
269 | for (chgidx = 0; chgidx < chg_nr; chgidx++) { | 312 | for (chgidx = 0; chgidx < chg_nr; chgidx++) { |
270 | /* keep track of all overlapping memmap entries */ | 313 | /* keep track of all overlapping memmap entries */ |
@@ -286,14 +329,14 @@ static int __init sanitize_memmap(struct bfin_memmap_entry *map, int *pnr_map) | |||
286 | if (overlap_list[i]->type > current_type) | 329 | if (overlap_list[i]->type > current_type) |
287 | current_type = overlap_list[i]->type; | 330 | current_type = overlap_list[i]->type; |
288 | /* continue building up new memmap based on this information */ | 331 | /* continue building up new memmap based on this information */ |
289 | if (current_type != last_type) { | 332 | if (current_type != last_type) { |
290 | if (last_type != 0) { | 333 | if (last_type != 0) { |
291 | new_map[new_entry].size = | 334 | new_map[new_entry].size = |
292 | change_point[chgidx]->addr - last_addr; | 335 | change_point[chgidx]->addr - last_addr; |
293 | /* move forward only if the new size was non-zero */ | 336 | /* move forward only if the new size was non-zero */ |
294 | if (new_map[new_entry].size != 0) | 337 | if (new_map[new_entry].size != 0) |
295 | if (++new_entry >= BFIN_MEMMAP_MAX) | 338 | if (++new_entry >= BFIN_MEMMAP_MAX) |
296 | break; /* no more space left for new entries */ | 339 | break; /* no more space left for new entries */ |
297 | } | 340 | } |
298 | if (current_type != 0) { | 341 | if (current_type != 0) { |
299 | new_map[new_entry].addr = change_point[chgidx]->addr; | 342 | new_map[new_entry].addr = change_point[chgidx]->addr; |
@@ -303,9 +346,9 @@ static int __init sanitize_memmap(struct bfin_memmap_entry *map, int *pnr_map) | |||
303 | last_type = current_type; | 346 | last_type = current_type; |
304 | } | 347 | } |
305 | } | 348 | } |
306 | new_nr = new_entry; /* retain count for new entries */ | 349 | new_nr = new_entry; /* retain count for new entries */ |
307 | 350 | ||
308 | /* copy new mapping into original location */ | 351 | /* copy new mapping into original location */ |
309 | memcpy(map, new_map, new_nr*sizeof(struct bfin_memmap_entry)); | 352 | memcpy(map, new_map, new_nr*sizeof(struct bfin_memmap_entry)); |
310 | *pnr_map = new_nr; | 353 | *pnr_map = new_nr; |
311 | 354 | ||
@@ -361,7 +404,6 @@ static __init int parse_memmap(char *arg) | |||
361 | * - "memmap=XXX[KkmM][@][$]XXX[KkmM]" defines a memory region | 404 | * - "memmap=XXX[KkmM][@][$]XXX[KkmM]" defines a memory region |
362 | * @ from <start> to <start>+<mem>, type RAM | 405 | * @ from <start> to <start>+<mem>, type RAM |
363 | * $ from <start> to <start>+<mem>, type RESERVED | 406 | * $ from <start> to <start>+<mem>, type RESERVED |
364 | * | ||
365 | */ | 407 | */ |
366 | static __init void parse_cmdline_early(char *cmdline_p) | 408 | static __init void parse_cmdline_early(char *cmdline_p) |
367 | { | 409 | { |
@@ -383,14 +425,15 @@ static __init void parse_cmdline_early(char *cmdline_p) | |||
383 | if (*to != ' ') { | 425 | if (*to != ' ') { |
384 | if (*to == '$' | 426 | if (*to == '$' |
385 | || *(to + 1) == '$') | 427 | || *(to + 1) == '$') |
386 | reserved_mem_dcache_on = | 428 | reserved_mem_dcache_on = 1; |
387 | 1; | ||
388 | if (*to == '#' | 429 | if (*to == '#' |
389 | || *(to + 1) == '#') | 430 | || *(to + 1) == '#') |
390 | reserved_mem_icache_on = | 431 | reserved_mem_icache_on = 1; |
391 | 1; | ||
392 | } | 432 | } |
393 | } | 433 | } |
434 | } else if (!memcmp(to, "clkin_hz=", 9)) { | ||
435 | to += 9; | ||
436 | early_init_clkin_hz(to); | ||
394 | } else if (!memcmp(to, "earlyprintk=", 12)) { | 437 | } else if (!memcmp(to, "earlyprintk=", 12)) { |
395 | to += 12; | 438 | to += 12; |
396 | setup_early_printk(to); | 439 | setup_early_printk(to); |
@@ -417,9 +460,8 @@ static __init void parse_cmdline_early(char *cmdline_p) | |||
417 | * [_ramend - DMA_UNCACHED_REGION, | 460 | * [_ramend - DMA_UNCACHED_REGION, |
418 | * _ramend]: uncached DMA region | 461 | * _ramend]: uncached DMA region |
419 | * [_ramend, physical_mem_end]: memory not managed by kernel | 462 | * [_ramend, physical_mem_end]: memory not managed by kernel |
420 | * | ||
421 | */ | 463 | */ |
422 | static __init void memory_setup(void) | 464 | static __init void memory_setup(void) |
423 | { | 465 | { |
424 | #ifdef CONFIG_MTD_UCLINUX | 466 | #ifdef CONFIG_MTD_UCLINUX |
425 | unsigned long mtd_phys = 0; | 467 | unsigned long mtd_phys = 0; |
@@ -436,7 +478,7 @@ static __init void memory_setup(void) | |||
436 | memory_end = _ramend - DMA_UNCACHED_REGION; | 478 | memory_end = _ramend - DMA_UNCACHED_REGION; |
437 | 479 | ||
438 | #ifdef CONFIG_MPU | 480 | #ifdef CONFIG_MPU |
439 | /* Round up to multiple of 4MB. */ | 481 | /* Round up to multiple of 4MB */ |
440 | memory_start = (_ramstart + 0x3fffff) & ~0x3fffff; | 482 | memory_start = (_ramstart + 0x3fffff) & ~0x3fffff; |
441 | #else | 483 | #else |
442 | memory_start = PAGE_ALIGN(_ramstart); | 484 | memory_start = PAGE_ALIGN(_ramstart); |
@@ -616,7 +658,7 @@ static __init void setup_bootmem_allocator(void) | |||
616 | end_pfn = memory_end >> PAGE_SHIFT; | 658 | end_pfn = memory_end >> PAGE_SHIFT; |
617 | 659 | ||
618 | /* | 660 | /* |
619 | * give all the memory to the bootmap allocator, tell it to put the | 661 | * give all the memory to the bootmap allocator, tell it to put the |
620 | * boot mem_map at the start of memory. | 662 | * boot mem_map at the start of memory. |
621 | */ | 663 | */ |
622 | bootmap_size = init_bootmem_node(NODE_DATA(0), | 664 | bootmap_size = init_bootmem_node(NODE_DATA(0), |
@@ -791,7 +833,11 @@ void __init setup_arch(char **cmdline_p) | |||
791 | bfin_write_SWRST(_bfin_swrst | DOUBLE_FAULT); | 833 | bfin_write_SWRST(_bfin_swrst | DOUBLE_FAULT); |
792 | #endif | 834 | #endif |
793 | 835 | ||
836 | #ifdef CONFIG_SMP | ||
837 | if (_bfin_swrst & SWRST_DBL_FAULT_A) { | ||
838 | #else | ||
794 | if (_bfin_swrst & RESET_DOUBLE) { | 839 | if (_bfin_swrst & RESET_DOUBLE) { |
840 | #endif | ||
795 | printk(KERN_EMERG "Recovering from DOUBLE FAULT event\n"); | 841 | printk(KERN_EMERG "Recovering from DOUBLE FAULT event\n"); |
796 | #ifdef CONFIG_DEBUG_DOUBLEFAULT | 842 | #ifdef CONFIG_DEBUG_DOUBLEFAULT |
797 | /* We assume the crashing kernel, and the current symbol table match */ | 843 | /* We assume the crashing kernel, and the current symbol table match */ |
@@ -823,9 +869,12 @@ void __init setup_arch(char **cmdline_p) | |||
823 | if (bfin_compiled_revid() == -1) | 869 | if (bfin_compiled_revid() == -1) |
824 | printk(KERN_ERR "Warning: Compiled for Rev none, but running on Rev %d\n", | 870 | printk(KERN_ERR "Warning: Compiled for Rev none, but running on Rev %d\n", |
825 | bfin_revid()); | 871 | bfin_revid()); |
826 | else if (bfin_compiled_revid() != 0xffff) | 872 | else if (bfin_compiled_revid() != 0xffff) { |
827 | printk(KERN_ERR "Warning: Compiled for Rev %d, but running on Rev %d\n", | 873 | printk(KERN_ERR "Warning: Compiled for Rev %d, but running on Rev %d\n", |
828 | bfin_compiled_revid(), bfin_revid()); | 874 | bfin_compiled_revid(), bfin_revid()); |
875 | if (bfin_compiled_revid() > bfin_revid()) | ||
876 | panic("Error: you are missing anomaly workarounds for this rev\n"); | ||
877 | } | ||
829 | } | 878 | } |
830 | if (bfin_revid() < CONFIG_BF_REV_MIN || bfin_revid() > CONFIG_BF_REV_MAX) | 879 | if (bfin_revid() < CONFIG_BF_REV_MIN || bfin_revid() > CONFIG_BF_REV_MAX) |
831 | printk(KERN_ERR "Warning: Unsupported Chip Revision ADSP-%s Rev 0.%d detected\n", | 880 | printk(KERN_ERR "Warning: Unsupported Chip Revision ADSP-%s Rev 0.%d detected\n", |
@@ -835,7 +884,7 @@ void __init setup_arch(char **cmdline_p) | |||
835 | printk(KERN_INFO "Blackfin Linux support by http://blackfin.uclinux.org/\n"); | 884 | printk(KERN_INFO "Blackfin Linux support by http://blackfin.uclinux.org/\n"); |
836 | 885 | ||
837 | printk(KERN_INFO "Processor Speed: %lu MHz core clock and %lu MHz System Clock\n", | 886 | printk(KERN_INFO "Processor Speed: %lu MHz core clock and %lu MHz System Clock\n", |
838 | cclk / 1000000, sclk / 1000000); | 887 | cclk / 1000000, sclk / 1000000); |
839 | 888 | ||
840 | if (ANOMALY_05000273 && (cclk >> 1) <= sclk) | 889 | if (ANOMALY_05000273 && (cclk >> 1) <= sclk) |
841 | printk("\n\n\nANOMALY_05000273: CCLK must be >= 2*SCLK !!!\n\n\n"); | 890 | printk("\n\n\nANOMALY_05000273: CCLK must be >= 2*SCLK !!!\n\n\n"); |
@@ -867,18 +916,21 @@ void __init setup_arch(char **cmdline_p) | |||
867 | BUG_ON((char *)&safe_user_instruction - (char *)&fixed_code_start | 916 | BUG_ON((char *)&safe_user_instruction - (char *)&fixed_code_start |
868 | != SAFE_USER_INSTRUCTION - FIXED_CODE_START); | 917 | != SAFE_USER_INSTRUCTION - FIXED_CODE_START); |
869 | 918 | ||
919 | #ifdef CONFIG_SMP | ||
920 | platform_init_cpus(); | ||
921 | #endif | ||
870 | init_exception_vectors(); | 922 | init_exception_vectors(); |
871 | bfin_cache_init(); | 923 | bfin_cache_init(); /* Initialize caches for the boot CPU */ |
872 | } | 924 | } |
873 | 925 | ||
874 | static int __init topology_init(void) | 926 | static int __init topology_init(void) |
875 | { | 927 | { |
876 | int cpu; | 928 | unsigned int cpu; |
929 | /* Record CPU-private information for the boot processor. */ | ||
930 | bfin_setup_cpudata(0); | ||
877 | 931 | ||
878 | for_each_possible_cpu(cpu) { | 932 | for_each_possible_cpu(cpu) { |
879 | struct cpu *c = &per_cpu(cpu_devices, cpu); | 933 | register_cpu(&per_cpu(cpu_data, cpu).cpu, cpu); |
880 | |||
881 | register_cpu(c, cpu); | ||
882 | } | 934 | } |
883 | 935 | ||
884 | return 0; | 936 | return 0; |
@@ -886,36 +938,54 @@ static int __init topology_init(void) | |||
886 | 938 | ||
887 | subsys_initcall(topology_init); | 939 | subsys_initcall(topology_init); |
888 | 940 | ||
941 | /* Get the input clock frequency */ | ||
942 | static u_long cached_clkin_hz = CONFIG_CLKIN_HZ; | ||
943 | static u_long get_clkin_hz(void) | ||
944 | { | ||
945 | return cached_clkin_hz; | ||
946 | } | ||
947 | static int __init early_init_clkin_hz(char *buf) | ||
948 | { | ||
949 | cached_clkin_hz = simple_strtoul(buf, NULL, 0); | ||
950 | #ifdef BFIN_KERNEL_CLOCK | ||
951 | if (cached_clkin_hz != CONFIG_CLKIN_HZ) | ||
952 | panic("cannot change clkin_hz when reprogramming clocks"); | ||
953 | #endif | ||
954 | return 1; | ||
955 | } | ||
956 | early_param("clkin_hz=", early_init_clkin_hz); | ||
957 | |||
889 | /* Get the voltage input multiplier */ | 958 | /* Get the voltage input multiplier */ |
890 | static u_long cached_vco_pll_ctl, cached_vco; | ||
891 | static u_long get_vco(void) | 959 | static u_long get_vco(void) |
892 | { | 960 | { |
893 | u_long msel; | 961 | static u_long cached_vco; |
962 | u_long msel, pll_ctl; | ||
894 | 963 | ||
895 | u_long pll_ctl = bfin_read_PLL_CTL(); | 964 | /* The assumption here is that VCO never changes at runtime. |
896 | if (pll_ctl == cached_vco_pll_ctl) | 965 | * If, someday, we support that, then we'll have to change this. |
966 | */ | ||
967 | if (cached_vco) | ||
897 | return cached_vco; | 968 | return cached_vco; |
898 | else | ||
899 | cached_vco_pll_ctl = pll_ctl; | ||
900 | 969 | ||
970 | pll_ctl = bfin_read_PLL_CTL(); | ||
901 | msel = (pll_ctl >> 9) & 0x3F; | 971 | msel = (pll_ctl >> 9) & 0x3F; |
902 | if (0 == msel) | 972 | if (0 == msel) |
903 | msel = 64; | 973 | msel = 64; |
904 | 974 | ||
905 | cached_vco = CONFIG_CLKIN_HZ; | 975 | cached_vco = get_clkin_hz(); |
906 | cached_vco >>= (1 & pll_ctl); /* DF bit */ | 976 | cached_vco >>= (1 & pll_ctl); /* DF bit */ |
907 | cached_vco *= msel; | 977 | cached_vco *= msel; |
908 | return cached_vco; | 978 | return cached_vco; |
909 | } | 979 | } |
910 | 980 | ||
911 | /* Get the Core clock */ | 981 | /* Get the Core clock */ |
912 | static u_long cached_cclk_pll_div, cached_cclk; | ||
913 | u_long get_cclk(void) | 982 | u_long get_cclk(void) |
914 | { | 983 | { |
984 | static u_long cached_cclk_pll_div, cached_cclk; | ||
915 | u_long csel, ssel; | 985 | u_long csel, ssel; |
916 | 986 | ||
917 | if (bfin_read_PLL_STAT() & 0x1) | 987 | if (bfin_read_PLL_STAT() & 0x1) |
918 | return CONFIG_CLKIN_HZ; | 988 | return get_clkin_hz(); |
919 | 989 | ||
920 | ssel = bfin_read_PLL_DIV(); | 990 | ssel = bfin_read_PLL_DIV(); |
921 | if (ssel == cached_cclk_pll_div) | 991 | if (ssel == cached_cclk_pll_div) |
@@ -934,21 +1004,21 @@ u_long get_cclk(void) | |||
934 | EXPORT_SYMBOL(get_cclk); | 1004 | EXPORT_SYMBOL(get_cclk); |
935 | 1005 | ||
936 | /* Get the System clock */ | 1006 | /* Get the System clock */ |
937 | static u_long cached_sclk_pll_div, cached_sclk; | ||
938 | u_long get_sclk(void) | 1007 | u_long get_sclk(void) |
939 | { | 1008 | { |
1009 | static u_long cached_sclk; | ||
940 | u_long ssel; | 1010 | u_long ssel; |
941 | 1011 | ||
942 | if (bfin_read_PLL_STAT() & 0x1) | 1012 | /* The assumption here is that SCLK never changes at runtime. |
943 | return CONFIG_CLKIN_HZ; | 1013 | * If, someday, we support that, then we'll have to change this. |
944 | 1014 | */ | |
945 | ssel = bfin_read_PLL_DIV(); | 1015 | if (cached_sclk) |
946 | if (ssel == cached_sclk_pll_div) | ||
947 | return cached_sclk; | 1016 | return cached_sclk; |
948 | else | ||
949 | cached_sclk_pll_div = ssel; | ||
950 | 1017 | ||
951 | ssel &= 0xf; | 1018 | if (bfin_read_PLL_STAT() & 0x1) |
1019 | return get_clkin_hz(); | ||
1020 | |||
1021 | ssel = bfin_read_PLL_DIV() & 0xf; | ||
952 | if (0 == ssel) { | 1022 | if (0 == ssel) { |
953 | printk(KERN_WARNING "Invalid System Clock\n"); | 1023 | printk(KERN_WARNING "Invalid System Clock\n"); |
954 | ssel = 1; | 1024 | ssel = 1; |
@@ -982,17 +1052,18 @@ static int show_cpuinfo(struct seq_file *m, void *v) | |||
982 | { | 1052 | { |
983 | char *cpu, *mmu, *fpu, *vendor, *cache; | 1053 | char *cpu, *mmu, *fpu, *vendor, *cache; |
984 | uint32_t revid; | 1054 | uint32_t revid; |
985 | 1055 | int cpu_num = *(unsigned int *)v; | |
986 | u_long cclk = 0, sclk = 0; | 1056 | u_long sclk, cclk; |
987 | u_int icache_size = BFIN_ICACHESIZE / 1024, dcache_size = 0, dsup_banks = 0; | 1057 | u_int icache_size = BFIN_ICACHESIZE / 1024, dcache_size = 0, dsup_banks = 0; |
1058 | struct blackfin_cpudata *cpudata = &per_cpu(cpu_data, cpu_num); | ||
988 | 1059 | ||
989 | cpu = CPU; | 1060 | cpu = CPU; |
990 | mmu = "none"; | 1061 | mmu = "none"; |
991 | fpu = "none"; | 1062 | fpu = "none"; |
992 | revid = bfin_revid(); | 1063 | revid = bfin_revid(); |
993 | 1064 | ||
994 | cclk = get_cclk(); | ||
995 | sclk = get_sclk(); | 1065 | sclk = get_sclk(); |
1066 | cclk = get_cclk(); | ||
996 | 1067 | ||
997 | switch (bfin_read_CHIPID() & CHIPID_MANUFACTURE) { | 1068 | switch (bfin_read_CHIPID() & CHIPID_MANUFACTURE) { |
998 | case 0xca: | 1069 | case 0xca: |
@@ -1003,10 +1074,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) | |||
1003 | break; | 1074 | break; |
1004 | } | 1075 | } |
1005 | 1076 | ||
1006 | seq_printf(m, "processor\t: %d\n" | 1077 | seq_printf(m, "processor\t: %d\n" "vendor_id\t: %s\n", cpu_num, vendor); |
1007 | "vendor_id\t: %s\n", | ||
1008 | *(unsigned int *)v, | ||
1009 | vendor); | ||
1010 | 1078 | ||
1011 | if (CPUID == bfin_cpuid()) | 1079 | if (CPUID == bfin_cpuid()) |
1012 | seq_printf(m, "cpu family\t: 0x%04x\n", CPUID); | 1080 | seq_printf(m, "cpu family\t: 0x%04x\n", CPUID); |
@@ -1029,12 +1097,12 @@ static int show_cpuinfo(struct seq_file *m, void *v) | |||
1029 | sclk/1000000, sclk%1000000); | 1097 | sclk/1000000, sclk%1000000); |
1030 | seq_printf(m, "bogomips\t: %lu.%02lu\n" | 1098 | seq_printf(m, "bogomips\t: %lu.%02lu\n" |
1031 | "Calibration\t: %lu loops\n", | 1099 | "Calibration\t: %lu loops\n", |
1032 | (loops_per_jiffy * HZ) / 500000, | 1100 | (cpudata->loops_per_jiffy * HZ) / 500000, |
1033 | ((loops_per_jiffy * HZ) / 5000) % 100, | 1101 | ((cpudata->loops_per_jiffy * HZ) / 5000) % 100, |
1034 | (loops_per_jiffy * HZ)); | 1102 | (cpudata->loops_per_jiffy * HZ)); |
1035 | 1103 | ||
1036 | /* Check Cache configutation */ | 1104 | /* Check Cache configutation */ |
1037 | switch (bfin_read_DMEM_CONTROL() & (1 << DMC0_P | 1 << DMC1_P)) { | 1105 | switch (cpudata->dmemctl & (1 << DMC0_P | 1 << DMC1_P)) { |
1038 | case ACACHE_BSRAM: | 1106 | case ACACHE_BSRAM: |
1039 | cache = "dbank-A/B\t: cache/sram"; | 1107 | cache = "dbank-A/B\t: cache/sram"; |
1040 | dcache_size = 16; | 1108 | dcache_size = 16; |
@@ -1058,10 +1126,10 @@ static int show_cpuinfo(struct seq_file *m, void *v) | |||
1058 | } | 1126 | } |
1059 | 1127 | ||
1060 | /* Is it turned on? */ | 1128 | /* Is it turned on? */ |
1061 | if ((bfin_read_DMEM_CONTROL() & (ENDCPLB | DMC_ENABLE)) != (ENDCPLB | DMC_ENABLE)) | 1129 | if ((cpudata->dmemctl & (ENDCPLB | DMC_ENABLE)) != (ENDCPLB | DMC_ENABLE)) |
1062 | dcache_size = 0; | 1130 | dcache_size = 0; |
1063 | 1131 | ||
1064 | if ((bfin_read_IMEM_CONTROL() & (IMC | ENICPLB)) != (IMC | ENICPLB)) | 1132 | if ((cpudata->imemctl & (IMC | ENICPLB)) != (IMC | ENICPLB)) |
1065 | icache_size = 0; | 1133 | icache_size = 0; |
1066 | 1134 | ||
1067 | seq_printf(m, "cache size\t: %d KB(L1 icache) " | 1135 | seq_printf(m, "cache size\t: %d KB(L1 icache) " |
@@ -1086,8 +1154,11 @@ static int show_cpuinfo(struct seq_file *m, void *v) | |||
1086 | "dcache setup\t: %d Super-banks/%d Sub-banks/%d Ways, %d Lines/Way\n", | 1154 | "dcache setup\t: %d Super-banks/%d Sub-banks/%d Ways, %d Lines/Way\n", |
1087 | dsup_banks, BFIN_DSUBBANKS, BFIN_DWAYS, | 1155 | dsup_banks, BFIN_DSUBBANKS, BFIN_DWAYS, |
1088 | BFIN_DLINES); | 1156 | BFIN_DLINES); |
1157 | #ifdef __ARCH_SYNC_CORE_DCACHE | ||
1158 | seq_printf(m, "SMP Dcache Flushes\t: %lu\n\n", cpudata->dcache_invld_count); | ||
1159 | #endif | ||
1089 | #ifdef CONFIG_BFIN_ICACHE_LOCK | 1160 | #ifdef CONFIG_BFIN_ICACHE_LOCK |
1090 | switch ((bfin_read_IMEM_CONTROL() >> 3) & WAYALL_L) { | 1161 | switch ((cpudata->imemctl >> 3) & WAYALL_L) { |
1091 | case WAY0_L: | 1162 | case WAY0_L: |
1092 | seq_printf(m, "Way0 Locked-Down\n"); | 1163 | seq_printf(m, "Way0 Locked-Down\n"); |
1093 | break; | 1164 | break; |
@@ -1137,6 +1208,12 @@ static int show_cpuinfo(struct seq_file *m, void *v) | |||
1137 | seq_printf(m, "No Ways are locked\n"); | 1208 | seq_printf(m, "No Ways are locked\n"); |
1138 | } | 1209 | } |
1139 | #endif | 1210 | #endif |
1211 | |||
1212 | if (cpu_num != num_possible_cpus() - 1) | ||
1213 | return 0; | ||
1214 | |||
1215 | if (L2_LENGTH) | ||
1216 | seq_printf(m, "L2 SRAM\t\t: %dKB\n", L2_LENGTH/0x400); | ||
1140 | seq_printf(m, "board name\t: %s\n", bfin_board_name); | 1217 | seq_printf(m, "board name\t: %s\n", bfin_board_name); |
1141 | seq_printf(m, "board memory\t: %ld kB (0x%p -> 0x%p)\n", | 1218 | seq_printf(m, "board memory\t: %ld kB (0x%p -> 0x%p)\n", |
1142 | physical_mem_end >> 10, (void *)0, (void *)physical_mem_end); | 1219 | physical_mem_end >> 10, (void *)0, (void *)physical_mem_end); |
@@ -1144,6 +1221,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) | |||
1144 | ((int)memory_end - (int)_stext) >> 10, | 1221 | ((int)memory_end - (int)_stext) >> 10, |
1145 | _stext, | 1222 | _stext, |
1146 | (void *)memory_end); | 1223 | (void *)memory_end); |
1224 | seq_printf(m, "\n"); | ||
1147 | 1225 | ||
1148 | return 0; | 1226 | return 0; |
1149 | } | 1227 | } |