aboutsummaryrefslogtreecommitdiffstats
path: root/arch/blackfin/kernel/setup.c
diff options
context:
space:
mode:
authorGraf Yang <graf.yang@analog.com>2008-11-18 04:48:22 -0500
committerBryan Wu <cooloney@kernel.org>2008-11-18 04:48:22 -0500
commit8f65873e47784a390949f0d61e5692dbf2a8253e (patch)
tree4d9509bf5e52ebac190d79de04b783829d44f49e /arch/blackfin/kernel/setup.c
parentb8a989893cbdeb6c97a7b5af5f38fb0e480235f9 (diff)
Blackfin arch: SMP supporting patchset: Blackfin kernel and memory management code
Blackfin dual core BF561 processor can support SMP like features. https://docs.blackfin.uclinux.org/doku.php?id=linux-kernel:smp-like In this patch, we provide SMP extend to Blackfin kernel and memory management code Singed-off-by: Graf Yang <graf.yang@analog.com> Signed-off-by: Mike Frysinger <vapier.adi@gmail.com> Signed-off-by: Bryan Wu <cooloney@kernel.org>
Diffstat (limited to 'arch/blackfin/kernel/setup.c')
-rw-r--r--arch/blackfin/kernel/setup.c163
1 files changed, 111 insertions, 52 deletions
diff --git a/arch/blackfin/kernel/setup.c b/arch/blackfin/kernel/setup.c
index 71a9a8c53cea..c644d234a02e 100644
--- a/arch/blackfin/kernel/setup.c
+++ b/arch/blackfin/kernel/setup.c
@@ -26,11 +26,10 @@
26#include <asm/blackfin.h> 26#include <asm/blackfin.h>
27#include <asm/cplbinit.h> 27#include <asm/cplbinit.h>
28#include <asm/div64.h> 28#include <asm/div64.h>
29#include <asm/cpu.h>
29#include <asm/fixed_code.h> 30#include <asm/fixed_code.h>
30#include <asm/early_printk.h> 31#include <asm/early_printk.h>
31 32
32static DEFINE_PER_CPU(struct cpu, cpu_devices);
33
34u16 _bfin_swrst; 33u16 _bfin_swrst;
35EXPORT_SYMBOL(_bfin_swrst); 34EXPORT_SYMBOL(_bfin_swrst);
36 35
@@ -79,29 +78,76 @@ static struct change_member *change_point[2*BFIN_MEMMAP_MAX] __initdata;
79static struct bfin_memmap_entry *overlap_list[BFIN_MEMMAP_MAX] __initdata; 78static struct bfin_memmap_entry *overlap_list[BFIN_MEMMAP_MAX] __initdata;
80static struct bfin_memmap_entry new_map[BFIN_MEMMAP_MAX] __initdata; 79static struct bfin_memmap_entry new_map[BFIN_MEMMAP_MAX] __initdata;
81 80
82void __init bfin_cache_init(void) 81DEFINE_PER_CPU(struct blackfin_cpudata, cpu_data);
83{ 82
84#if defined(CONFIG_BFIN_DCACHE) || defined(CONFIG_BFIN_ICACHE) 83#if defined(CONFIG_BFIN_DCACHE) || defined(CONFIG_BFIN_ICACHE)
85 generate_cplb_tables(); 84void __init generate_cplb_tables(void)
85{
86 unsigned int cpu;
87
88 /* Generate per-CPU I&D CPLB tables */
89 for (cpu = 0; cpu < num_possible_cpus(); ++cpu)
90 generate_cplb_tables_cpu(cpu);
91}
86#endif 92#endif
87 93
94void __cpuinit bfin_setup_caches(unsigned int cpu)
95{
88#ifdef CONFIG_BFIN_ICACHE 96#ifdef CONFIG_BFIN_ICACHE
89 bfin_icache_init(); 97#ifdef CONFIG_MPU
90 printk(KERN_INFO "Instruction Cache Enabled\n"); 98 bfin_icache_init(icplb_tbl[cpu]);
99#else
100 bfin_icache_init(icplb_tables[cpu]);
101#endif
91#endif 102#endif
92 103
93#ifdef CONFIG_BFIN_DCACHE 104#ifdef CONFIG_BFIN_DCACHE
94 bfin_dcache_init(); 105#ifdef CONFIG_MPU
95 printk(KERN_INFO "Data Cache Enabled" 106 bfin_dcache_init(dcplb_tbl[cpu]);
107#else
108 bfin_dcache_init(dcplb_tables[cpu]);
109#endif
110#endif
111
112 /*
113 * In cache coherence emulation mode, we need to have the
114 * D-cache enabled before running any atomic operation which
115 * might invove cache invalidation (i.e. spinlock, rwlock).
116 * So printk's are deferred until then.
117 */
118#ifdef CONFIG_BFIN_ICACHE
119 printk(KERN_INFO "Instruction Cache Enabled for CPU%u\n", cpu);
120#endif
121#ifdef CONFIG_BFIN_DCACHE
122 printk(KERN_INFO "Data Cache Enabled for CPU%u"
96# if defined CONFIG_BFIN_WB 123# if defined CONFIG_BFIN_WB
97 " (write-back)" 124 " (write-back)"
98# elif defined CONFIG_BFIN_WT 125# elif defined CONFIG_BFIN_WT
99 " (write-through)" 126 " (write-through)"
100# endif 127# endif
101 "\n"); 128 "\n", cpu);
102#endif 129#endif
103} 130}
104 131
132void __cpuinit bfin_setup_cpudata(unsigned int cpu)
133{
134 struct blackfin_cpudata *cpudata = &per_cpu(cpu_data, cpu);
135
136 cpudata->idle = current;
137 cpudata->loops_per_jiffy = loops_per_jiffy;
138 cpudata->cclk = get_cclk();
139 cpudata->imemctl = bfin_read_IMEM_CONTROL();
140 cpudata->dmemctl = bfin_read_DMEM_CONTROL();
141}
142
143void __init bfin_cache_init(void)
144{
145#if defined(CONFIG_BFIN_DCACHE) || defined(CONFIG_BFIN_ICACHE)
146 generate_cplb_tables();
147#endif
148 bfin_setup_caches(0);
149}
150
105void __init bfin_relocate_l1_mem(void) 151void __init bfin_relocate_l1_mem(void)
106{ 152{
107 unsigned long l1_code_length; 153 unsigned long l1_code_length;
@@ -230,7 +276,7 @@ static int __init sanitize_memmap(struct bfin_memmap_entry *map, int *pnr_map)
230 /* record all known change-points (starting and ending addresses), 276 /* record all known change-points (starting and ending addresses),
231 omitting those that are for empty memory regions */ 277 omitting those that are for empty memory regions */
232 chgidx = 0; 278 chgidx = 0;
233 for (i = 0; i < old_nr; i++) { 279 for (i = 0; i < old_nr; i++) {
234 if (map[i].size != 0) { 280 if (map[i].size != 0) {
235 change_point[chgidx]->addr = map[i].addr; 281 change_point[chgidx]->addr = map[i].addr;
236 change_point[chgidx++]->pentry = &map[i]; 282 change_point[chgidx++]->pentry = &map[i];
@@ -238,13 +284,13 @@ static int __init sanitize_memmap(struct bfin_memmap_entry *map, int *pnr_map)
238 change_point[chgidx++]->pentry = &map[i]; 284 change_point[chgidx++]->pentry = &map[i];
239 } 285 }
240 } 286 }
241 chg_nr = chgidx; /* true number of change-points */ 287 chg_nr = chgidx; /* true number of change-points */
242 288
243 /* sort change-point list by memory addresses (low -> high) */ 289 /* sort change-point list by memory addresses (low -> high) */
244 still_changing = 1; 290 still_changing = 1;
245 while (still_changing) { 291 while (still_changing) {
246 still_changing = 0; 292 still_changing = 0;
247 for (i = 1; i < chg_nr; i++) { 293 for (i = 1; i < chg_nr; i++) {
248 /* if <current_addr> > <last_addr>, swap */ 294 /* if <current_addr> > <last_addr>, swap */
249 /* or, if current=<start_addr> & last=<end_addr>, swap */ 295 /* or, if current=<start_addr> & last=<end_addr>, swap */
250 if ((change_point[i]->addr < change_point[i-1]->addr) || 296 if ((change_point[i]->addr < change_point[i-1]->addr) ||
@@ -261,10 +307,10 @@ static int __init sanitize_memmap(struct bfin_memmap_entry *map, int *pnr_map)
261 } 307 }
262 308
263 /* create a new memmap, removing overlaps */ 309 /* create a new memmap, removing overlaps */
264 overlap_entries = 0; /* number of entries in the overlap table */ 310 overlap_entries = 0; /* number of entries in the overlap table */
265 new_entry = 0; /* index for creating new memmap entries */ 311 new_entry = 0; /* index for creating new memmap entries */
266 last_type = 0; /* start with undefined memory type */ 312 last_type = 0; /* start with undefined memory type */
267 last_addr = 0; /* start with 0 as last starting address */ 313 last_addr = 0; /* start with 0 as last starting address */
268 /* loop through change-points, determining affect on the new memmap */ 314 /* loop through change-points, determining affect on the new memmap */
269 for (chgidx = 0; chgidx < chg_nr; chgidx++) { 315 for (chgidx = 0; chgidx < chg_nr; chgidx++) {
270 /* keep track of all overlapping memmap entries */ 316 /* keep track of all overlapping memmap entries */
@@ -286,14 +332,14 @@ static int __init sanitize_memmap(struct bfin_memmap_entry *map, int *pnr_map)
286 if (overlap_list[i]->type > current_type) 332 if (overlap_list[i]->type > current_type)
287 current_type = overlap_list[i]->type; 333 current_type = overlap_list[i]->type;
288 /* continue building up new memmap based on this information */ 334 /* continue building up new memmap based on this information */
289 if (current_type != last_type) { 335 if (current_type != last_type) {
290 if (last_type != 0) { 336 if (last_type != 0) {
291 new_map[new_entry].size = 337 new_map[new_entry].size =
292 change_point[chgidx]->addr - last_addr; 338 change_point[chgidx]->addr - last_addr;
293 /* move forward only if the new size was non-zero */ 339 /* move forward only if the new size was non-zero */
294 if (new_map[new_entry].size != 0) 340 if (new_map[new_entry].size != 0)
295 if (++new_entry >= BFIN_MEMMAP_MAX) 341 if (++new_entry >= BFIN_MEMMAP_MAX)
296 break; /* no more space left for new entries */ 342 break; /* no more space left for new entries */
297 } 343 }
298 if (current_type != 0) { 344 if (current_type != 0) {
299 new_map[new_entry].addr = change_point[chgidx]->addr; 345 new_map[new_entry].addr = change_point[chgidx]->addr;
@@ -303,9 +349,9 @@ static int __init sanitize_memmap(struct bfin_memmap_entry *map, int *pnr_map)
303 last_type = current_type; 349 last_type = current_type;
304 } 350 }
305 } 351 }
306 new_nr = new_entry; /* retain count for new entries */ 352 new_nr = new_entry; /* retain count for new entries */
307 353
308 /* copy new mapping into original location */ 354 /* copy new mapping into original location */
309 memcpy(map, new_map, new_nr*sizeof(struct bfin_memmap_entry)); 355 memcpy(map, new_map, new_nr*sizeof(struct bfin_memmap_entry));
310 *pnr_map = new_nr; 356 *pnr_map = new_nr;
311 357
@@ -361,7 +407,6 @@ static __init int parse_memmap(char *arg)
361 * - "memmap=XXX[KkmM][@][$]XXX[KkmM]" defines a memory region 407 * - "memmap=XXX[KkmM][@][$]XXX[KkmM]" defines a memory region
362 * @ from <start> to <start>+<mem>, type RAM 408 * @ from <start> to <start>+<mem>, type RAM
363 * $ from <start> to <start>+<mem>, type RESERVED 409 * $ from <start> to <start>+<mem>, type RESERVED
364 *
365 */ 410 */
366static __init void parse_cmdline_early(char *cmdline_p) 411static __init void parse_cmdline_early(char *cmdline_p)
367{ 412{
@@ -383,12 +428,10 @@ static __init void parse_cmdline_early(char *cmdline_p)
383 if (*to != ' ') { 428 if (*to != ' ') {
384 if (*to == '$' 429 if (*to == '$'
385 || *(to + 1) == '$') 430 || *(to + 1) == '$')
386 reserved_mem_dcache_on = 431 reserved_mem_dcache_on = 1;
387 1;
388 if (*to == '#' 432 if (*to == '#'
389 || *(to + 1) == '#') 433 || *(to + 1) == '#')
390 reserved_mem_icache_on = 434 reserved_mem_icache_on = 1;
391 1;
392 } 435 }
393 } 436 }
394 } else if (!memcmp(to, "earlyprintk=", 12)) { 437 } else if (!memcmp(to, "earlyprintk=", 12)) {
@@ -417,9 +460,8 @@ static __init void parse_cmdline_early(char *cmdline_p)
417 * [_ramend - DMA_UNCACHED_REGION, 460 * [_ramend - DMA_UNCACHED_REGION,
418 * _ramend]: uncached DMA region 461 * _ramend]: uncached DMA region
419 * [_ramend, physical_mem_end]: memory not managed by kernel 462 * [_ramend, physical_mem_end]: memory not managed by kernel
420 *
421 */ 463 */
422static __init void memory_setup(void) 464static __init void memory_setup(void)
423{ 465{
424#ifdef CONFIG_MTD_UCLINUX 466#ifdef CONFIG_MTD_UCLINUX
425 unsigned long mtd_phys = 0; 467 unsigned long mtd_phys = 0;
@@ -436,7 +478,7 @@ static __init void memory_setup(void)
436 memory_end = _ramend - DMA_UNCACHED_REGION; 478 memory_end = _ramend - DMA_UNCACHED_REGION;
437 479
438#ifdef CONFIG_MPU 480#ifdef CONFIG_MPU
439 /* Round up to multiple of 4MB. */ 481 /* Round up to multiple of 4MB */
440 memory_start = (_ramstart + 0x3fffff) & ~0x3fffff; 482 memory_start = (_ramstart + 0x3fffff) & ~0x3fffff;
441#else 483#else
442 memory_start = PAGE_ALIGN(_ramstart); 484 memory_start = PAGE_ALIGN(_ramstart);
@@ -616,7 +658,7 @@ static __init void setup_bootmem_allocator(void)
616 end_pfn = memory_end >> PAGE_SHIFT; 658 end_pfn = memory_end >> PAGE_SHIFT;
617 659
618 /* 660 /*
619 * give all the memory to the bootmap allocator, tell it to put the 661 * give all the memory to the bootmap allocator, tell it to put the
620 * boot mem_map at the start of memory. 662 * boot mem_map at the start of memory.
621 */ 663 */
622 bootmap_size = init_bootmem_node(NODE_DATA(0), 664 bootmap_size = init_bootmem_node(NODE_DATA(0),
@@ -791,7 +833,11 @@ void __init setup_arch(char **cmdline_p)
791 bfin_write_SWRST(_bfin_swrst | DOUBLE_FAULT); 833 bfin_write_SWRST(_bfin_swrst | DOUBLE_FAULT);
792#endif 834#endif
793 835
836#ifdef CONFIG_SMP
837 if (_bfin_swrst & SWRST_DBL_FAULT_A) {
838#else
794 if (_bfin_swrst & RESET_DOUBLE) { 839 if (_bfin_swrst & RESET_DOUBLE) {
840#endif
795 printk(KERN_EMERG "Recovering from DOUBLE FAULT event\n"); 841 printk(KERN_EMERG "Recovering from DOUBLE FAULT event\n");
796#ifdef CONFIG_DEBUG_DOUBLEFAULT 842#ifdef CONFIG_DEBUG_DOUBLEFAULT
797 /* We assume the crashing kernel, and the current symbol table match */ 843 /* We assume the crashing kernel, and the current symbol table match */
@@ -835,7 +881,7 @@ void __init setup_arch(char **cmdline_p)
835 printk(KERN_INFO "Blackfin Linux support by http://blackfin.uclinux.org/\n"); 881 printk(KERN_INFO "Blackfin Linux support by http://blackfin.uclinux.org/\n");
836 882
837 printk(KERN_INFO "Processor Speed: %lu MHz core clock and %lu MHz System Clock\n", 883 printk(KERN_INFO "Processor Speed: %lu MHz core clock and %lu MHz System Clock\n",
838 cclk / 1000000, sclk / 1000000); 884 cclk / 1000000, sclk / 1000000);
839 885
840 if (ANOMALY_05000273 && (cclk >> 1) <= sclk) 886 if (ANOMALY_05000273 && (cclk >> 1) <= sclk)
841 printk("\n\n\nANOMALY_05000273: CCLK must be >= 2*SCLK !!!\n\n\n"); 887 printk("\n\n\nANOMALY_05000273: CCLK must be >= 2*SCLK !!!\n\n\n");
@@ -867,18 +913,21 @@ void __init setup_arch(char **cmdline_p)
867 BUG_ON((char *)&safe_user_instruction - (char *)&fixed_code_start 913 BUG_ON((char *)&safe_user_instruction - (char *)&fixed_code_start
868 != SAFE_USER_INSTRUCTION - FIXED_CODE_START); 914 != SAFE_USER_INSTRUCTION - FIXED_CODE_START);
869 915
916#ifdef CONFIG_SMP
917 platform_init_cpus();
918#endif
870 init_exception_vectors(); 919 init_exception_vectors();
871 bfin_cache_init(); 920 bfin_cache_init(); /* Initialize caches for the boot CPU */
872} 921}
873 922
874static int __init topology_init(void) 923static int __init topology_init(void)
875{ 924{
876 int cpu; 925 unsigned int cpu;
926 /* Record CPU-private information for the boot processor. */
927 bfin_setup_cpudata(0);
877 928
878 for_each_possible_cpu(cpu) { 929 for_each_possible_cpu(cpu) {
879 struct cpu *c = &per_cpu(cpu_devices, cpu); 930 register_cpu(&per_cpu(cpu_data, cpu).cpu, cpu);
880
881 register_cpu(c, cpu);
882 } 931 }
883 932
884 return 0; 933 return 0;
@@ -983,15 +1032,15 @@ static int show_cpuinfo(struct seq_file *m, void *v)
983 char *cpu, *mmu, *fpu, *vendor, *cache; 1032 char *cpu, *mmu, *fpu, *vendor, *cache;
984 uint32_t revid; 1033 uint32_t revid;
985 1034
986 u_long cclk = 0, sclk = 0; 1035 u_long sclk = 0;
987 u_int icache_size = BFIN_ICACHESIZE / 1024, dcache_size = 0, dsup_banks = 0; 1036 u_int icache_size = BFIN_ICACHESIZE / 1024, dcache_size = 0, dsup_banks = 0;
1037 struct blackfin_cpudata *cpudata = &per_cpu(cpu_data, *(unsigned int *)v);
988 1038
989 cpu = CPU; 1039 cpu = CPU;
990 mmu = "none"; 1040 mmu = "none";
991 fpu = "none"; 1041 fpu = "none";
992 revid = bfin_revid(); 1042 revid = bfin_revid();
993 1043
994 cclk = get_cclk();
995 sclk = get_sclk(); 1044 sclk = get_sclk();
996 1045
997 switch (bfin_read_CHIPID() & CHIPID_MANUFACTURE) { 1046 switch (bfin_read_CHIPID() & CHIPID_MANUFACTURE) {
@@ -1003,10 +1052,8 @@ static int show_cpuinfo(struct seq_file *m, void *v)
1003 break; 1052 break;
1004 } 1053 }
1005 1054
1006 seq_printf(m, "processor\t: %d\n" 1055 seq_printf(m, "processor\t: %d\n" "vendor_id\t: %s\n",
1007 "vendor_id\t: %s\n", 1056 *(unsigned int *)v, vendor);
1008 *(unsigned int *)v,
1009 vendor);
1010 1057
1011 if (CPUID == bfin_cpuid()) 1058 if (CPUID == bfin_cpuid())
1012 seq_printf(m, "cpu family\t: 0x%04x\n", CPUID); 1059 seq_printf(m, "cpu family\t: 0x%04x\n", CPUID);
@@ -1016,7 +1063,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
1016 1063
1017 seq_printf(m, "model name\t: ADSP-%s %lu(MHz CCLK) %lu(MHz SCLK) (%s)\n" 1064 seq_printf(m, "model name\t: ADSP-%s %lu(MHz CCLK) %lu(MHz SCLK) (%s)\n"
1018 "stepping\t: %d\n", 1065 "stepping\t: %d\n",
1019 cpu, cclk/1000000, sclk/1000000, 1066 cpu, cpudata->cclk/1000000, sclk/1000000,
1020#ifdef CONFIG_MPU 1067#ifdef CONFIG_MPU
1021 "mpu on", 1068 "mpu on",
1022#else 1069#else
@@ -1025,16 +1072,16 @@ static int show_cpuinfo(struct seq_file *m, void *v)
1025 revid); 1072 revid);
1026 1073
1027 seq_printf(m, "cpu MHz\t\t: %lu.%03lu/%lu.%03lu\n", 1074 seq_printf(m, "cpu MHz\t\t: %lu.%03lu/%lu.%03lu\n",
1028 cclk/1000000, cclk%1000000, 1075 cpudata->cclk/1000000, cpudata->cclk%1000000,
1029 sclk/1000000, sclk%1000000); 1076 sclk/1000000, sclk%1000000);
1030 seq_printf(m, "bogomips\t: %lu.%02lu\n" 1077 seq_printf(m, "bogomips\t: %lu.%02lu\n"
1031 "Calibration\t: %lu loops\n", 1078 "Calibration\t: %lu loops\n",
1032 (loops_per_jiffy * HZ) / 500000, 1079 (cpudata->loops_per_jiffy * HZ) / 500000,
1033 ((loops_per_jiffy * HZ) / 5000) % 100, 1080 ((cpudata->loops_per_jiffy * HZ) / 5000) % 100,
1034 (loops_per_jiffy * HZ)); 1081 (cpudata->loops_per_jiffy * HZ));
1035 1082
1036 /* Check Cache configutation */ 1083 /* Check Cache configutation */
1037 switch (bfin_read_DMEM_CONTROL() & (1 << DMC0_P | 1 << DMC1_P)) { 1084 switch (cpudata->dmemctl & (1 << DMC0_P | 1 << DMC1_P)) {
1038 case ACACHE_BSRAM: 1085 case ACACHE_BSRAM:
1039 cache = "dbank-A/B\t: cache/sram"; 1086 cache = "dbank-A/B\t: cache/sram";
1040 dcache_size = 16; 1087 dcache_size = 16;
@@ -1058,10 +1105,10 @@ static int show_cpuinfo(struct seq_file *m, void *v)
1058 } 1105 }
1059 1106
1060 /* Is it turned on? */ 1107 /* Is it turned on? */
1061 if ((bfin_read_DMEM_CONTROL() & (ENDCPLB | DMC_ENABLE)) != (ENDCPLB | DMC_ENABLE)) 1108 if ((cpudata->dmemctl & (ENDCPLB | DMC_ENABLE)) != (ENDCPLB | DMC_ENABLE))
1062 dcache_size = 0; 1109 dcache_size = 0;
1063 1110
1064 if ((bfin_read_IMEM_CONTROL() & (IMC | ENICPLB)) != (IMC | ENICPLB)) 1111 if ((cpudata->imemctl & (IMC | ENICPLB)) != (IMC | ENICPLB))
1065 icache_size = 0; 1112 icache_size = 0;
1066 1113
1067 seq_printf(m, "cache size\t: %d KB(L1 icache) " 1114 seq_printf(m, "cache size\t: %d KB(L1 icache) "
@@ -1086,8 +1133,13 @@ static int show_cpuinfo(struct seq_file *m, void *v)
1086 "dcache setup\t: %d Super-banks/%d Sub-banks/%d Ways, %d Lines/Way\n", 1133 "dcache setup\t: %d Super-banks/%d Sub-banks/%d Ways, %d Lines/Way\n",
1087 dsup_banks, BFIN_DSUBBANKS, BFIN_DWAYS, 1134 dsup_banks, BFIN_DSUBBANKS, BFIN_DWAYS,
1088 BFIN_DLINES); 1135 BFIN_DLINES);
1136#ifdef __ARCH_SYNC_CORE_DCACHE
1137 seq_printf(m,
1138 "SMP Dcache Flushes\t: %lu\n\n",
1139 per_cpu(cpu_data, *(unsigned int *)v).dcache_invld_count);
1140#endif
1089#ifdef CONFIG_BFIN_ICACHE_LOCK 1141#ifdef CONFIG_BFIN_ICACHE_LOCK
1090 switch ((bfin_read_IMEM_CONTROL() >> 3) & WAYALL_L) { 1142 switch ((cpudata->imemctl >> 3) & WAYALL_L) {
1091 case WAY0_L: 1143 case WAY0_L:
1092 seq_printf(m, "Way0 Locked-Down\n"); 1144 seq_printf(m, "Way0 Locked-Down\n");
1093 break; 1145 break;
@@ -1137,6 +1189,12 @@ static int show_cpuinfo(struct seq_file *m, void *v)
1137 seq_printf(m, "No Ways are locked\n"); 1189 seq_printf(m, "No Ways are locked\n");
1138 } 1190 }
1139#endif 1191#endif
1192 if (*(unsigned int *)v != NR_CPUS-1)
1193 return 0;
1194
1195#if L2_LENGTH
1196 seq_printf(m, "L2 SRAM\t\t: %dKB\n", L2_LENGTH/0x400);
1197#endif
1140 seq_printf(m, "board name\t: %s\n", bfin_board_name); 1198 seq_printf(m, "board name\t: %s\n", bfin_board_name);
1141 seq_printf(m, "board memory\t: %ld kB (0x%p -> 0x%p)\n", 1199 seq_printf(m, "board memory\t: %ld kB (0x%p -> 0x%p)\n",
1142 physical_mem_end >> 10, (void *)0, (void *)physical_mem_end); 1200 physical_mem_end >> 10, (void *)0, (void *)physical_mem_end);
@@ -1144,6 +1202,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
1144 ((int)memory_end - (int)_stext) >> 10, 1202 ((int)memory_end - (int)_stext) >> 10,
1145 _stext, 1203 _stext,
1146 (void *)memory_end); 1204 (void *)memory_end);
1205 seq_printf(m, "\n");
1147 1206
1148 return 0; 1207 return 0;
1149} 1208}