aboutsummaryrefslogtreecommitdiffstats
path: root/arch/blackfin
diff options
context:
space:
mode:
authorBernd Schmidt <bernds_cb1@t-online.de>2009-01-07 10:14:38 -0500
committerBryan Wu <cooloney@kernel.org>2009-01-07 10:14:38 -0500
commitdbdf20db537a5369c65330f878ad4905020a8bfa (patch)
treec7fa553755e2d75a6e98d3f32fbe41fab9f72609 /arch/blackfin
parent6651ece9e257302ee695ee76e69a4427f7033235 (diff)
Blackfin arch: Faster C implementation of no-MPU CPLB handler
This is a mixture ofcMichael McTernan's patch and the existing cplb-mpu code. We ditch the old cplb-nompu implementation, which is a good example of why a good algorithm in a HLL is preferrable to a bad algorithm written in assembly. Rather than try to construct a table of all posible CPLBs and search it, we just create a (smaller) table of memory regions and their attributes. Some of the data structures are now unified for both the mpu and nompu cases. A lot of needless complexity in cplbinit.c is removed. Further optimizations: * compile cplbmgr.c with a lot of -ffixed-reg options, and omit saving these registers on the stack when entering a CPLB exception. * lose cli/nop/nop/sti sequences for some workarounds - these don't * make sense in an exception context Additional code unification should be possible after this. [Mike Frysinger <vapier.adi@gmail.com>: - convert CPP if statements to C if statements - remove redundant statements - use a do...while loop rather than a for loop to get slightly better optimization and to avoid gcc "may be used uninitialized" warnings ... we know that the [id]cplb_nr_bounds variables will never be 0, so this is OK - the no-mpu code was the last user of MAX_MEM_SIZE and with that rewritten, we can punt it - add some BUG_ON() checks to make sure we dont overflow the small cplb_bounds array - add i/d cplb entries for the bootrom because there is functions/data in there we want to access - we do not need a NULL trailing entry as any time we access the bounds arrays, we use the nr_bounds variable ] Signed-off-by: Michael McTernan <mmcternan@airvana.com> Signed-off-by: Mike Frysinger <vapier.adi@gmail.com> Signed-off-by: Bernd Schmidt <bernds_cb1@t-online.de> Signed-off-by: Bryan Wu <cooloney@kernel.org>
Diffstat (limited to 'arch/blackfin')
-rw-r--r--arch/blackfin/Kconfig8
-rw-r--r--arch/blackfin/include/asm/context.S39
-rw-r--r--arch/blackfin/include/asm/cplb-mpu.h62
-rw-r--r--arch/blackfin/include/asm/cplb.h4
-rw-r--r--arch/blackfin/include/asm/cplbinit.h108
-rw-r--r--arch/blackfin/include/asm/entry.h2
-rw-r--r--arch/blackfin/kernel/cplb-mpu/Makefile5
-rw-r--r--arch/blackfin/kernel/cplb-mpu/cplbinit.c4
-rw-r--r--arch/blackfin/kernel/cplb-mpu/cplbmgr.c9
-rw-r--r--arch/blackfin/kernel/cplb-nompu/Makefile7
-rw-r--r--arch/blackfin/kernel/cplb-nompu/cacheinit.c26
-rw-r--r--arch/blackfin/kernel/cplb-nompu/cplbhdlr.S130
-rw-r--r--arch/blackfin/kernel/cplb-nompu/cplbinit.c498
-rw-r--r--arch/blackfin/kernel/cplb-nompu/cplbmgr.S648
-rw-r--r--arch/blackfin/kernel/cplb-nompu/cplbmgr.c283
-rw-r--r--arch/blackfin/kernel/cplbinfo.c84
-rw-r--r--arch/blackfin/kernel/setup.c9
-rw-r--r--arch/blackfin/mach-common/entry.S21
-rw-r--r--arch/blackfin/mm/init.c10
19 files changed, 515 insertions, 1442 deletions
diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig
index b58af2b1bffe..bbfeecae6403 100644
--- a/arch/blackfin/Kconfig
+++ b/arch/blackfin/Kconfig
@@ -524,14 +524,6 @@ config MEM_SDGCTL
524 default 0x0 524 default 0x0
525endmenu 525endmenu
526 526
527config MAX_MEM_SIZE
528 int "Max SDRAM Memory Size in MBytes"
529 depends on !MPU
530 default 512
531 help
532 This is the max memory size that the kernel will create CPLB
533 tables for. Your system will not be able to handle any more.
534
535# 527#
536# Max & Min Speeds for various Chips 528# Max & Min Speeds for various Chips
537# 529#
diff --git a/arch/blackfin/include/asm/context.S b/arch/blackfin/include/asm/context.S
index 9ce21f68e914..16561ab18b38 100644
--- a/arch/blackfin/include/asm/context.S
+++ b/arch/blackfin/include/asm/context.S
@@ -357,3 +357,42 @@
357 SYSCFG = [sp++]; 357 SYSCFG = [sp++];
358 csync; 358 csync;
359.endm 359.endm
360
361.macro save_context_cplb
362 [--sp] = (R7:0, P5:0);
363 [--sp] = fp;
364
365 [--sp] = a0.x;
366 [--sp] = a0.w;
367 [--sp] = a1.x;
368 [--sp] = a1.w;
369
370 [--sp] = LC0;
371 [--sp] = LC1;
372 [--sp] = LT0;
373 [--sp] = LT1;
374 [--sp] = LB0;
375 [--sp] = LB1;
376
377 [--sp] = RETS;
378.endm
379
380.macro restore_context_cplb
381 RETS = [sp++];
382
383 LB1 = [sp++];
384 LB0 = [sp++];
385 LT1 = [sp++];
386 LT0 = [sp++];
387 LC1 = [sp++];
388 LC0 = [sp++];
389
390 a1.w = [sp++];
391 a1.x = [sp++];
392 a0.w = [sp++];
393 a0.x = [sp++];
394
395 fp = [sp++];
396
397 (R7:0, P5:0) = [SP++];
398.endm
diff --git a/arch/blackfin/include/asm/cplb-mpu.h b/arch/blackfin/include/asm/cplb-mpu.h
deleted file mode 100644
index 80680ad7a378..000000000000
--- a/arch/blackfin/include/asm/cplb-mpu.h
+++ /dev/null
@@ -1,62 +0,0 @@
1/*
2 * File: include/asm-blackfin/cplbinit.h
3 * Based on:
4 * Author:
5 *
6 * Created:
7 * Description:
8 *
9 * Modified:
10 * Copyright 2004-2006 Analog Devices Inc.
11 *
12 * Bugs: Enter bugs at http://blackfin.uclinux.org/
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or
17 * (at your option) any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, see the file COPYING, or write
26 * to the Free Software Foundation, Inc.,
27 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
28 */
29#ifndef __ASM_BFIN_CPLB_MPU_H
30#define __ASM_BFIN_CPLB_MPU_H
31#include <linux/threads.h>
32
33struct cplb_entry {
34 unsigned long data, addr;
35};
36
37struct mem_region {
38 unsigned long start, end;
39 unsigned long dcplb_data;
40 unsigned long icplb_data;
41};
42
43extern struct cplb_entry dcplb_tbl[NR_CPUS][MAX_CPLBS];
44extern struct cplb_entry icplb_tbl[NR_CPUS][MAX_CPLBS];
45extern int first_switched_icplb;
46extern int first_mask_dcplb;
47extern int first_switched_dcplb;
48
49extern int nr_dcplb_miss[], nr_icplb_miss[], nr_icplb_supv_miss[];
50extern int nr_dcplb_prot[], nr_cplb_flush[];
51
52extern int page_mask_order;
53extern int page_mask_nelts;
54
55extern unsigned long *current_rwx_mask[NR_CPUS];
56
57extern void flush_switched_cplbs(unsigned int);
58extern void set_mask_dcplbs(unsigned long *, unsigned int);
59
60extern void __noreturn panic_cplb_error(int seqstat, struct pt_regs *);
61
62#endif /* __ASM_BFIN_CPLB_MPU_H */
diff --git a/arch/blackfin/include/asm/cplb.h b/arch/blackfin/include/asm/cplb.h
index 5f7545d06200..ad566ff9ad16 100644
--- a/arch/blackfin/include/asm/cplb.h
+++ b/arch/blackfin/include/asm/cplb.h
@@ -116,4 +116,8 @@
116#define CPLB_INOCACHE CPLB_USER_RD | CPLB_VALID 116#define CPLB_INOCACHE CPLB_USER_RD | CPLB_VALID
117#define CPLB_IDOCACHE CPLB_INOCACHE | CPLB_L1_CHBL 117#define CPLB_IDOCACHE CPLB_INOCACHE | CPLB_L1_CHBL
118 118
119#define FAULT_RW (1 << 16)
120#define FAULT_USERSUPV (1 << 17)
121#define FAULT_CPLBBITS 0x0000ffff
122
119#endif /* _CPLB_H */ 123#endif /* _CPLB_H */
diff --git a/arch/blackfin/include/asm/cplbinit.h b/arch/blackfin/include/asm/cplbinit.h
index 2aeec87d24e1..05b14a631d0c 100644
--- a/arch/blackfin/include/asm/cplbinit.h
+++ b/arch/blackfin/include/asm/cplbinit.h
@@ -32,96 +32,56 @@
32 32
33#include <asm/blackfin.h> 33#include <asm/blackfin.h>
34#include <asm/cplb.h> 34#include <asm/cplb.h>
35#include <linux/threads.h>
35 36
36#ifdef CONFIG_MPU 37#ifdef CONFIG_CPLB_SWITCH_TAB_L1
37 38# define PDT_ATTR __attribute__((l1_data))
38#include <asm/cplb-mpu.h>
39extern void bfin_icache_init(struct cplb_entry *icplb_tbl);
40extern void bfin_dcache_init(struct cplb_entry *icplb_tbl);
41
42#else 39#else
40# define PDT_ATTR
41#endif
43 42
44#define INITIAL_T 0x1 43struct cplb_entry {
45#define SWITCH_T 0x2 44 unsigned long data, addr;
46#define I_CPLB 0x4 45};
47#define D_CPLB 0x8
48 46
49#define ASYNC_MEMORY_CPLB_COVERAGE ((ASYNC_BANK0_SIZE + ASYNC_BANK1_SIZE + \ 47struct cplb_boundary {
50 ASYNC_BANK2_SIZE + ASYNC_BANK3_SIZE) / SIZE_4M) 48 unsigned long eaddr; /* End of this region. */
49 unsigned long data; /* CPLB data value. */
50};
51 51
52#define CPLB_MEM CONFIG_MAX_MEM_SIZE 52extern struct cplb_boundary dcplb_bounds[];
53extern struct cplb_boundary icplb_bounds[];
54extern int dcplb_nr_bounds, icplb_nr_bounds;
53 55
54/* 56extern struct cplb_entry dcplb_tbl[NR_CPUS][MAX_CPLBS];
55* Number of required data CPLB switchtable entries 57extern struct cplb_entry icplb_tbl[NR_CPUS][MAX_CPLBS];
56* MEMSIZE / 4 (we mostly install 4M page size CPLBs 58extern int first_switched_icplb;
57* approx 16 for smaller 1MB page size CPLBs for allignment purposes 59extern int first_switched_dcplb;
58* 1 for L1 Data Memory
59* possibly 1 for L2 Data Memory
60* 1 for CONFIG_DEBUG_HUNT_FOR_ZERO
61* 1 for ASYNC Memory
62*/
63#define MAX_SWITCH_D_CPLBS (((CPLB_MEM / 4) + 16 + 1 + 1 + 1 \
64 + ASYNC_MEMORY_CPLB_COVERAGE) * 2)
65 60
66/* 61extern int nr_dcplb_miss[], nr_icplb_miss[], nr_icplb_supv_miss[];
67* Number of required instruction CPLB switchtable entries 62extern int nr_dcplb_prot[], nr_cplb_flush[];
68* MEMSIZE / 4 (we mostly install 4M page size CPLBs
69* approx 12 for smaller 1MB page size CPLBs for allignment purposes
70* 1 for L1 Instruction Memory
71* possibly 1 for L2 Instruction Memory
72* 1 for CONFIG_DEBUG_HUNT_FOR_ZERO
73*/
74#define MAX_SWITCH_I_CPLBS (((CPLB_MEM / 4) + 12 + 1 + 1 + 1) * 2)
75
76/* Number of CPLB table entries, used for cplb-nompu. */
77#define CPLB_TBL_ENTRIES (16 * 4)
78
79enum {
80 ZERO_P, L1I_MEM, L1D_MEM, L2_MEM, SDRAM_KERN, SDRAM_RAM_MTD, SDRAM_DMAZ,
81 RES_MEM, ASYNC_MEM, OCB_ROM
82};
83 63
84struct cplb_desc { 64#ifdef CONFIG_MPU
85 u32 start; /* start address */
86 u32 end; /* end address */
87 u32 psize; /* prefered size if any otherwise 1MB or 4MB*/
88 u16 attr;/* attributes */
89 u16 i_conf;/* I-CPLB DATA */
90 u16 d_conf;/* D-CPLB DATA */
91 u16 valid;/* valid */
92 const s8 name[30];/* name */
93};
94 65
95struct cplb_tab { 66extern int first_mask_dcplb;
96 u_long *tab;
97 u16 pos;
98 u16 size;
99};
100 67
101extern u_long icplb_tables[NR_CPUS][CPLB_TBL_ENTRIES+1]; 68extern int page_mask_order;
102extern u_long dcplb_tables[NR_CPUS][CPLB_TBL_ENTRIES+1]; 69extern int page_mask_nelts;
103 70
104/* Till here we are discussing about the static memory management model. 71extern unsigned long *current_rwx_mask[NR_CPUS];
105 * However, the operating envoronments commonly define more CPLB
106 * descriptors to cover the entire addressable memory than will fit into
107 * the available on-chip 16 CPLB MMRs. When this happens, the below table
108 * will be used which will hold all the potentially required CPLB descriptors
109 *
110 * This is how Page descriptor Table is implemented in uClinux/Blackfin.
111 */
112 72
113extern u_long ipdt_tables[NR_CPUS][MAX_SWITCH_I_CPLBS+1]; 73extern void flush_switched_cplbs(unsigned int);
114extern u_long dpdt_tables[NR_CPUS][MAX_SWITCH_D_CPLBS+1]; 74extern void set_mask_dcplbs(unsigned long *, unsigned int);
115#ifdef CONFIG_CPLB_INFO 75
116extern u_long ipdt_swapcount_tables[NR_CPUS][MAX_SWITCH_I_CPLBS]; 76extern void __noreturn panic_cplb_error(int seqstat, struct pt_regs *);
117extern u_long dpdt_swapcount_tables[NR_CPUS][MAX_SWITCH_D_CPLBS];
118#endif
119extern void bfin_icache_init(u_long icplbs[]);
120extern void bfin_dcache_init(u_long dcplbs[]);
121 77
122#endif /* CONFIG_MPU */ 78#endif /* CONFIG_MPU */
123 79
80extern void bfin_icache_init(struct cplb_entry *icplb_tbl);
81extern void bfin_dcache_init(struct cplb_entry *icplb_tbl);
82
124#if defined(CONFIG_BFIN_DCACHE) || defined(CONFIG_BFIN_ICACHE) 83#if defined(CONFIG_BFIN_DCACHE) || defined(CONFIG_BFIN_ICACHE)
84extern void generate_cplb_tables_all(void);
125extern void generate_cplb_tables_cpu(unsigned int cpu); 85extern void generate_cplb_tables_cpu(unsigned int cpu);
126#endif 86#endif
127#endif 87#endif
diff --git a/arch/blackfin/include/asm/entry.h b/arch/blackfin/include/asm/entry.h
index c4f721e0d00d..d94e4f5139d2 100644
--- a/arch/blackfin/include/asm/entry.h
+++ b/arch/blackfin/include/asm/entry.h
@@ -53,9 +53,11 @@
53/* This one pushes RETI without using CLI. Interrupts are enabled. */ 53/* This one pushes RETI without using CLI. Interrupts are enabled. */
54#define SAVE_CONTEXT_SYSCALL save_context_syscall 54#define SAVE_CONTEXT_SYSCALL save_context_syscall
55#define SAVE_CONTEXT save_context_with_interrupts 55#define SAVE_CONTEXT save_context_with_interrupts
56#define SAVE_CONTEXT_CPLB save_context_cplb
56 57
57#define RESTORE_ALL_SYS restore_context_no_interrupts 58#define RESTORE_ALL_SYS restore_context_no_interrupts
58#define RESTORE_CONTEXT restore_context_with_interrupts 59#define RESTORE_CONTEXT restore_context_with_interrupts
60#define RESTORE_CONTEXT_CPLB restore_context_cplb
59 61
60#endif /* __ASSEMBLY__ */ 62#endif /* __ASSEMBLY__ */
61#endif /* __BFIN_ENTRY_H */ 63#endif /* __BFIN_ENTRY_H */
diff --git a/arch/blackfin/kernel/cplb-mpu/Makefile b/arch/blackfin/kernel/cplb-mpu/Makefile
index bd92301a704b..7d70d3bf3212 100644
--- a/arch/blackfin/kernel/cplb-mpu/Makefile
+++ b/arch/blackfin/kernel/cplb-mpu/Makefile
@@ -3,3 +3,8 @@
3# 3#
4 4
5obj-y := cplbinit.o cacheinit.o cplbmgr.o 5obj-y := cplbinit.o cacheinit.o cplbmgr.o
6
7CFLAGS_cplbmgr.o := -ffixed-I0 -ffixed-I1 -ffixed-I2 -ffixed-I3 \
8 -ffixed-L0 -ffixed-L1 -ffixed-L2 -ffixed-L3 \
9 -ffixed-M0 -ffixed-M1 -ffixed-M2 -ffixed-M3 \
10 -ffixed-B0 -ffixed-B1 -ffixed-B2 -ffixed-B3
diff --git a/arch/blackfin/kernel/cplb-mpu/cplbinit.c b/arch/blackfin/kernel/cplb-mpu/cplbinit.c
index 1ea7c18435ae..bdb958486e76 100644
--- a/arch/blackfin/kernel/cplb-mpu/cplbinit.c
+++ b/arch/blackfin/kernel/cplb-mpu/cplbinit.c
@@ -107,3 +107,7 @@ void __init generate_cplb_tables_cpu(unsigned int cpu)
107 while (i_i < MAX_CPLBS) 107 while (i_i < MAX_CPLBS)
108 icplb_tbl[cpu][i_i++].data = 0; 108 icplb_tbl[cpu][i_i++].data = 0;
109} 109}
110
111void generate_cplb_tables_all(void)
112{
113}
diff --git a/arch/blackfin/kernel/cplb-mpu/cplbmgr.c b/arch/blackfin/kernel/cplb-mpu/cplbmgr.c
index 76bd99177de5..5ef5d1a787fc 100644
--- a/arch/blackfin/kernel/cplb-mpu/cplbmgr.c
+++ b/arch/blackfin/kernel/cplb-mpu/cplbmgr.c
@@ -25,8 +25,13 @@
25#include <asm/cplbinit.h> 25#include <asm/cplbinit.h>
26#include <asm/mmu_context.h> 26#include <asm/mmu_context.h>
27 27
28#define FAULT_RW (1 << 16) 28/*
29#define FAULT_USERSUPV (1 << 17) 29 * WARNING
30 *
31 * This file is compiled with certain -ffixed-reg options. We have to
32 * make sure not to call any functions here that could clobber these
33 * registers.
34 */
30 35
31int page_mask_nelts; 36int page_mask_nelts;
32int page_mask_order; 37int page_mask_order;
diff --git a/arch/blackfin/kernel/cplb-nompu/Makefile b/arch/blackfin/kernel/cplb-nompu/Makefile
index 4010eca1c6c2..7d70d3bf3212 100644
--- a/arch/blackfin/kernel/cplb-nompu/Makefile
+++ b/arch/blackfin/kernel/cplb-nompu/Makefile
@@ -2,4 +2,9 @@
2# arch/blackfin/kernel/cplb-nompu/Makefile 2# arch/blackfin/kernel/cplb-nompu/Makefile
3# 3#
4 4
5obj-y := cplbinit.o cacheinit.o cplbhdlr.o cplbmgr.o 5obj-y := cplbinit.o cacheinit.o cplbmgr.o
6
7CFLAGS_cplbmgr.o := -ffixed-I0 -ffixed-I1 -ffixed-I2 -ffixed-I3 \
8 -ffixed-L0 -ffixed-L1 -ffixed-L2 -ffixed-L3 \
9 -ffixed-M0 -ffixed-M1 -ffixed-M2 -ffixed-M3 \
10 -ffixed-B0 -ffixed-B1 -ffixed-B2 -ffixed-B3
diff --git a/arch/blackfin/kernel/cplb-nompu/cacheinit.c b/arch/blackfin/kernel/cplb-nompu/cacheinit.c
index 3a385aec67d5..c6ff947f9d37 100644
--- a/arch/blackfin/kernel/cplb-nompu/cacheinit.c
+++ b/arch/blackfin/kernel/cplb-nompu/cacheinit.c
@@ -25,19 +25,15 @@
25#include <asm/cplbinit.h> 25#include <asm/cplbinit.h>
26 26
27#if defined(CONFIG_BFIN_ICACHE) 27#if defined(CONFIG_BFIN_ICACHE)
28void __cpuinit bfin_icache_init(u_long icplb[]) 28void __cpuinit bfin_icache_init(struct cplb_entry *icplb_tbl)
29{ 29{
30 unsigned long *table = icplb;
31 unsigned long ctrl; 30 unsigned long ctrl;
32 int i; 31 int i;
33 32
33 SSYNC();
34 for (i = 0; i < MAX_CPLBS; i++) { 34 for (i = 0; i < MAX_CPLBS; i++) {
35 unsigned long addr = *table++; 35 bfin_write32(ICPLB_ADDR0 + i * 4, icplb_tbl[i].addr);
36 unsigned long data = *table++; 36 bfin_write32(ICPLB_DATA0 + i * 4, icplb_tbl[i].data);
37 if (addr == (unsigned long)-1)
38 break;
39 bfin_write32(ICPLB_ADDR0 + i * 4, addr);
40 bfin_write32(ICPLB_DATA0 + i * 4, data);
41 } 37 }
42 ctrl = bfin_read_IMEM_CONTROL(); 38 ctrl = bfin_read_IMEM_CONTROL();
43 ctrl |= IMC | ENICPLB; 39 ctrl |= IMC | ENICPLB;
@@ -47,24 +43,20 @@ void __cpuinit bfin_icache_init(u_long icplb[])
47#endif 43#endif
48 44
49#if defined(CONFIG_BFIN_DCACHE) 45#if defined(CONFIG_BFIN_DCACHE)
50void __cpuinit bfin_dcache_init(u_long dcplb[]) 46void __cpuinit bfin_dcache_init(struct cplb_entry *dcplb_tbl)
51{ 47{
52 unsigned long *table = dcplb;
53 unsigned long ctrl; 48 unsigned long ctrl;
54 int i; 49 int i;
55 50
51 SSYNC();
56 for (i = 0; i < MAX_CPLBS; i++) { 52 for (i = 0; i < MAX_CPLBS; i++) {
57 unsigned long addr = *table++; 53 bfin_write32(DCPLB_ADDR0 + i * 4, dcplb_tbl[i].addr);
58 unsigned long data = *table++; 54 bfin_write32(DCPLB_DATA0 + i * 4, dcplb_tbl[i].data);
59 if (addr == (unsigned long)-1)
60 break;
61 bfin_write32(DCPLB_ADDR0 + i * 4, addr);
62 bfin_write32(DCPLB_DATA0 + i * 4, data);
63 } 55 }
56
64 ctrl = bfin_read_DMEM_CONTROL(); 57 ctrl = bfin_read_DMEM_CONTROL();
65 ctrl |= DMEM_CNTR; 58 ctrl |= DMEM_CNTR;
66 bfin_write_DMEM_CONTROL(ctrl); 59 bfin_write_DMEM_CONTROL(ctrl);
67
68 SSYNC(); 60 SSYNC();
69} 61}
70#endif 62#endif
diff --git a/arch/blackfin/kernel/cplb-nompu/cplbhdlr.S b/arch/blackfin/kernel/cplb-nompu/cplbhdlr.S
deleted file mode 100644
index ecbabc0a1fed..000000000000
--- a/arch/blackfin/kernel/cplb-nompu/cplbhdlr.S
+++ /dev/null
@@ -1,130 +0,0 @@
1/*
2 * File: arch/blackfin/mach-common/cplbhdlr.S
3 * Based on:
4 * Author: LG Soft India
5 *
6 * Created: ?
7 * Description: CPLB exception handler
8 *
9 * Modified:
10 * Copyright 2004-2006 Analog Devices Inc.
11 *
12 * Bugs: Enter bugs at http://blackfin.uclinux.org/
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or
17 * (at your option) any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, see the file COPYING, or write
26 * to the Free Software Foundation, Inc.,
27 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
28 */
29
30#include <linux/linkage.h>
31#include <asm/cplb.h>
32#include <asm/entry.h>
33
34#ifdef CONFIG_EXCPT_IRQ_SYSC_L1
35.section .l1.text
36#else
37.text
38#endif
39
40.type _cplb_mgr, STT_FUNC;
41.type _panic_cplb_error, STT_FUNC;
42
43.align 2
44
45ENTRY(__cplb_hdr)
46 R2 = SEQSTAT;
47
48 /* Mask the contents of SEQSTAT and leave only EXCAUSE in R2 */
49 R2 <<= 26;
50 R2 >>= 26;
51
52 R1 = 0x23; /* Data access CPLB protection violation */
53 CC = R2 == R1;
54 IF !CC JUMP .Lnot_data_write;
55 R0 = 2; /* is a write to data space*/
56 JUMP .Lis_icplb_miss;
57
58.Lnot_data_write:
59 R1 = 0x2C; /* CPLB miss on an instruction fetch */
60 CC = R2 == R1;
61 R0 = 0; /* is_data_miss == False*/
62 IF CC JUMP .Lis_icplb_miss;
63
64 R1 = 0x26;
65 CC = R2 == R1;
66 IF !CC JUMP .Lunknown;
67
68 R0 = 1; /* is_data_miss == True*/
69
70.Lis_icplb_miss:
71
72#if defined(CONFIG_BFIN_ICACHE) || defined(CONFIG_BFIN_DCACHE)
73# if defined(CONFIG_BFIN_ICACHE) && !defined(CONFIG_BFIN_DCACHE)
74 R1 = CPLB_ENABLE_ICACHE;
75# endif
76# if !defined(CONFIG_BFIN_ICACHE) && defined(CONFIG_BFIN_DCACHE)
77 R1 = CPLB_ENABLE_DCACHE;
78# endif
79# if defined(CONFIG_BFIN_ICACHE) && defined(CONFIG_BFIN_DCACHE)
80 R1 = CPLB_ENABLE_DCACHE | CPLB_ENABLE_ICACHE;
81# endif
82#else
83 R1 = 0;
84#endif
85
86 [--SP] = RETS;
87 CALL _cplb_mgr;
88 RETS = [SP++];
89 CC = R0 == 0;
90 IF !CC JUMP .Lnot_replaced;
91 RTS;
92
93/*
94 * Diagnostic exception handlers
95 */
96.Lunknown:
97 R0 = CPLB_UNKNOWN_ERR;
98 JUMP .Lcplb_error;
99
100.Lnot_replaced:
101 CC = R0 == CPLB_NO_UNLOCKED;
102 IF !CC JUMP .Lnext_check;
103 R0 = CPLB_NO_UNLOCKED;
104 JUMP .Lcplb_error;
105
106.Lnext_check:
107 CC = R0 == CPLB_NO_ADDR_MATCH;
108 IF !CC JUMP .Lnext_check2;
109 R0 = CPLB_NO_ADDR_MATCH;
110 JUMP .Lcplb_error;
111
112.Lnext_check2:
113 CC = R0 == CPLB_PROT_VIOL;
114 IF !CC JUMP .Lstrange_return_from_cplb_mgr;
115 R0 = CPLB_PROT_VIOL;
116 JUMP .Lcplb_error;
117
118.Lstrange_return_from_cplb_mgr:
119 IDLE;
120 CSYNC;
121 JUMP .Lstrange_return_from_cplb_mgr;
122
123.Lcplb_error:
124 R1 = sp;
125 SP += -12;
126 call _panic_cplb_error;
127 SP += 12;
128 JUMP.L _handle_bad_cplb;
129
130ENDPROC(__cplb_hdr)
diff --git a/arch/blackfin/kernel/cplb-nompu/cplbinit.c b/arch/blackfin/kernel/cplb-nompu/cplbinit.c
index 4c010ba50a80..0e28f7595733 100644
--- a/arch/blackfin/kernel/cplb-nompu/cplbinit.c
+++ b/arch/blackfin/kernel/cplb-nompu/cplbinit.c
@@ -29,417 +29,143 @@
29#include <asm/cplbinit.h> 29#include <asm/cplbinit.h>
30#include <asm/mem_map.h> 30#include <asm/mem_map.h>
31 31
32u_long icplb_tables[NR_CPUS][CPLB_TBL_ENTRIES+1]; 32struct cplb_entry icplb_tbl[NR_CPUS][MAX_CPLBS] PDT_ATTR;
33u_long dcplb_tables[NR_CPUS][CPLB_TBL_ENTRIES+1]; 33struct cplb_entry dcplb_tbl[NR_CPUS][MAX_CPLBS] PDT_ATTR;
34 34
35#ifdef CONFIG_CPLB_SWITCH_TAB_L1 35int first_switched_icplb PDT_ATTR;
36#define PDT_ATTR __attribute__((l1_data)) 36int first_switched_dcplb PDT_ATTR;
37#else
38#define PDT_ATTR
39#endif
40
41u_long ipdt_tables[NR_CPUS][MAX_SWITCH_I_CPLBS+1] PDT_ATTR;
42u_long dpdt_tables[NR_CPUS][MAX_SWITCH_D_CPLBS+1] PDT_ATTR;
43#ifdef CONFIG_CPLB_INFO
44u_long ipdt_swapcount_tables[NR_CPUS][MAX_SWITCH_I_CPLBS] PDT_ATTR;
45u_long dpdt_swapcount_tables[NR_CPUS][MAX_SWITCH_D_CPLBS] PDT_ATTR;
46#endif
47 37
48struct s_cplb { 38struct cplb_boundary dcplb_bounds[9] PDT_ATTR;
49 struct cplb_tab init_i; 39struct cplb_boundary icplb_bounds[7] PDT_ATTR;
50 struct cplb_tab init_d;
51 struct cplb_tab switch_i;
52 struct cplb_tab switch_d;
53};
54 40
55#if defined(CONFIG_BFIN_DCACHE) || defined(CONFIG_BFIN_ICACHE) 41int icplb_nr_bounds PDT_ATTR;
56static struct cplb_desc cplb_data[] = { 42int dcplb_nr_bounds PDT_ATTR;
57 {
58 .start = 0,
59 .end = SIZE_1K,
60 .psize = SIZE_1K,
61 .attr = INITIAL_T | SWITCH_T | I_CPLB | D_CPLB,
62 .i_conf = SDRAM_OOPS,
63 .d_conf = SDRAM_OOPS,
64#if defined(CONFIG_DEBUG_HUNT_FOR_ZERO)
65 .valid = 1,
66#else
67 .valid = 0,
68#endif
69 .name = "Zero Pointer Guard Page",
70 },
71 {
72 .start = 0, /* dyanmic */
73 .end = 0, /* dynamic */
74 .psize = SIZE_4M,
75 .attr = INITIAL_T | SWITCH_T | I_CPLB,
76 .i_conf = L1_IMEMORY,
77 .d_conf = 0,
78 .valid = 1,
79 .name = "L1 I-Memory",
80 },
81 {
82 .start = 0, /* dynamic */
83 .end = 0, /* dynamic */
84 .psize = SIZE_4M,
85 .attr = INITIAL_T | SWITCH_T | D_CPLB,
86 .i_conf = 0,
87 .d_conf = L1_DMEMORY,
88#if ((L1_DATA_A_LENGTH > 0) || (L1_DATA_B_LENGTH > 0))
89 .valid = 1,
90#else
91 .valid = 0,
92#endif
93 .name = "L1 D-Memory",
94 },
95 {
96 .start = L2_START,
97 .end = L2_START + L2_LENGTH,
98 .psize = SIZE_1M,
99 .attr = L2_ATTR,
100 .i_conf = L2_IMEMORY,
101 .d_conf = L2_DMEMORY,
102 .valid = (L2_LENGTH > 0),
103 .name = "L2 Memory",
104 },
105 {
106 .start = 0,
107 .end = 0, /* dynamic */
108 .psize = 0,
109 .attr = INITIAL_T | SWITCH_T | I_CPLB | D_CPLB,
110 .i_conf = SDRAM_IGENERIC,
111 .d_conf = SDRAM_DGENERIC,
112 .valid = 1,
113 .name = "Kernel Memory",
114 },
115 {
116 .start = 0, /* dynamic */
117 .end = 0, /* dynamic */
118 .psize = 0,
119 .attr = INITIAL_T | SWITCH_T | D_CPLB,
120 .i_conf = SDRAM_IGENERIC,
121 .d_conf = SDRAM_DNON_CHBL,
122 .valid = 1,
123 .name = "uClinux MTD Memory",
124 },
125 {
126 .start = 0, /* dynamic */
127 .end = 0, /* dynamic */
128 .psize = SIZE_1M,
129 .attr = INITIAL_T | SWITCH_T | D_CPLB,
130 .d_conf = SDRAM_DNON_CHBL,
131 .valid = 1,
132 .name = "Uncached DMA Zone",
133 },
134 {
135 .start = 0, /* dynamic */
136 .end = 0, /* dynamic */
137 .psize = 0,
138 .attr = SWITCH_T | D_CPLB,
139 .i_conf = 0, /* dynamic */
140 .d_conf = 0, /* dynamic */
141 .valid = 1,
142 .name = "Reserved Memory",
143 },
144 {
145 .start = ASYNC_BANK0_BASE,
146 .end = ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE,
147 .psize = 0,
148 .attr = SWITCH_T | D_CPLB,
149 .d_conf = SDRAM_EBIU,
150 .valid = 1,
151 .name = "Asynchronous Memory Banks",
152 },
153 {
154 .start = BOOT_ROM_START,
155 .end = BOOT_ROM_START + BOOT_ROM_LENGTH,
156 .psize = SIZE_1M,
157 .attr = SWITCH_T | I_CPLB | D_CPLB,
158 .i_conf = SDRAM_IGENERIC,
159 .d_conf = SDRAM_DGENERIC,
160 .valid = 1,
161 .name = "On-Chip BootROM",
162 },
163};
164 43
165static bool __init lock_kernel_check(u32 start, u32 end) 44void __init generate_cplb_tables_cpu(unsigned int cpu)
166{ 45{
167 if (start >= (u32)__init_begin || end <= (u32)_stext) 46 int i_d, i_i;
168 return false; 47 unsigned long addr;
169
170 /* This cplb block overlapped with kernel area. */
171 return true;
172}
173 48
174static void __init 49 struct cplb_entry *d_tbl = dcplb_tbl[cpu];
175fill_cplbtab(struct cplb_tab *table, 50 struct cplb_entry *i_tbl = icplb_tbl[cpu];
176 unsigned long start, unsigned long end,
177 unsigned long block_size, unsigned long cplb_data)
178{
179 int i;
180 51
181 switch (block_size) { 52 printk(KERN_INFO "NOMPU: setting up cplb tables\n");
182 case SIZE_4M:
183 i = 3;
184 break;
185 case SIZE_1M:
186 i = 2;
187 break;
188 case SIZE_4K:
189 i = 1;
190 break;
191 case SIZE_1K:
192 default:
193 i = 0;
194 break;
195 }
196 53
197 cplb_data = (cplb_data & ~(3 << 16)) | (i << 16); 54 i_d = i_i = 0;
198 55
199 while ((start < end) && (table->pos < table->size)) { 56 /* Set up the zero page. */
57 d_tbl[i_d].addr = 0;
58 d_tbl[i_d++].data = SDRAM_OOPS | PAGE_SIZE_1KB;
200 59
201 table->tab[table->pos++] = start; 60 /* Cover kernel memory with 4M pages. */
61 addr = 0;
202 62
203 if (lock_kernel_check(start, start + block_size)) 63 for (; addr < memory_start; addr += 4 * 1024 * 1024) {
204 table->tab[table->pos++] = 64 d_tbl[i_d].addr = addr;
205 cplb_data | CPLB_LOCK | CPLB_DIRTY; 65 d_tbl[i_d++].data = SDRAM_DGENERIC | PAGE_SIZE_4MB;
206 else 66 i_tbl[i_i].addr = addr;
207 table->tab[table->pos++] = cplb_data; 67 i_tbl[i_i++].data = SDRAM_IGENERIC | PAGE_SIZE_4MB;
68 }
208 69
209 start += block_size; 70 /* Cover L1 memory. One 4M area for code and data each is enough. */
71 if (L1_DATA_A_LENGTH || L1_DATA_B_LENGTH) {
72 d_tbl[i_d].addr = L1_DATA_A_START;
73 d_tbl[i_d++].data = L1_DMEMORY | PAGE_SIZE_4MB;
210 } 74 }
211} 75 i_tbl[i_i].addr = L1_CODE_START;
76 i_tbl[i_i++].data = L1_IMEMORY | PAGE_SIZE_4MB;
212 77
213static void __init close_cplbtab(struct cplb_tab *table) 78 first_switched_dcplb = i_d;
214{ 79 first_switched_icplb = i_i;
215 while (table->pos < table->size)
216 table->tab[table->pos++] = 0;
217}
218 80
219/* helper function */ 81 BUG_ON(first_switched_dcplb > MAX_CPLBS);
220static void __init 82 BUG_ON(first_switched_icplb > MAX_CPLBS);
221__fill_code_cplbtab(struct cplb_tab *t, int i, u32 a_start, u32 a_end)
222{
223 if (cplb_data[i].psize) {
224 fill_cplbtab(t,
225 cplb_data[i].start,
226 cplb_data[i].end,
227 cplb_data[i].psize,
228 cplb_data[i].i_conf);
229 } else {
230#if defined(CONFIG_BFIN_ICACHE)
231 if (ANOMALY_05000263 && i == SDRAM_KERN) {
232 fill_cplbtab(t,
233 cplb_data[i].start,
234 cplb_data[i].end,
235 SIZE_4M,
236 cplb_data[i].i_conf);
237 } else
238#endif
239 {
240 fill_cplbtab(t,
241 cplb_data[i].start,
242 a_start,
243 SIZE_1M,
244 cplb_data[i].i_conf);
245 fill_cplbtab(t,
246 a_start,
247 a_end,
248 SIZE_4M,
249 cplb_data[i].i_conf);
250 fill_cplbtab(t, a_end,
251 cplb_data[i].end,
252 SIZE_1M,
253 cplb_data[i].i_conf);
254 }
255 }
256}
257 83
258static void __init 84 while (i_d < MAX_CPLBS)
259__fill_data_cplbtab(struct cplb_tab *t, int i, u32 a_start, u32 a_end) 85 d_tbl[i_d++].data = 0;
260{ 86 while (i_i < MAX_CPLBS)
261 if (cplb_data[i].psize) { 87 i_tbl[i_i++].data = 0;
262 fill_cplbtab(t,
263 cplb_data[i].start,
264 cplb_data[i].end,
265 cplb_data[i].psize,
266 cplb_data[i].d_conf);
267 } else {
268 fill_cplbtab(t,
269 cplb_data[i].start,
270 a_start, SIZE_1M,
271 cplb_data[i].d_conf);
272 fill_cplbtab(t, a_start,
273 a_end, SIZE_4M,
274 cplb_data[i].d_conf);
275 fill_cplbtab(t, a_end,
276 cplb_data[i].end,
277 SIZE_1M,
278 cplb_data[i].d_conf);
279 }
280} 88}
281 89
282void __init generate_cplb_tables_cpu(unsigned int cpu) 90void __init generate_cplb_tables_all(void)
283{ 91{
92 int i_d, i_i;
284 93
285 u16 i, j, process; 94 i_d = 0;
286 u32 a_start, a_end, as, ae, as_1m; 95 /* Normal RAM, including MTD FS. */
287
288 struct cplb_tab *t_i = NULL;
289 struct cplb_tab *t_d = NULL;
290 struct s_cplb cplb;
291
292 printk(KERN_INFO "NOMPU: setting up cplb tables for global access\n");
293
294 cplb.init_i.size = CPLB_TBL_ENTRIES;
295 cplb.init_d.size = CPLB_TBL_ENTRIES;
296 cplb.switch_i.size = MAX_SWITCH_I_CPLBS;
297 cplb.switch_d.size = MAX_SWITCH_D_CPLBS;
298
299 cplb.init_i.pos = 0;
300 cplb.init_d.pos = 0;
301 cplb.switch_i.pos = 0;
302 cplb.switch_d.pos = 0;
303
304 cplb.init_i.tab = icplb_tables[cpu];
305 cplb.init_d.tab = dcplb_tables[cpu];
306 cplb.switch_i.tab = ipdt_tables[cpu];
307 cplb.switch_d.tab = dpdt_tables[cpu];
308
309 cplb_data[L1I_MEM].start = get_l1_code_start_cpu(cpu);
310 cplb_data[L1I_MEM].end = cplb_data[L1I_MEM].start + L1_CODE_LENGTH;
311 cplb_data[L1D_MEM].start = get_l1_data_a_start_cpu(cpu);
312 cplb_data[L1D_MEM].end = get_l1_data_b_start_cpu(cpu) + L1_DATA_B_LENGTH;
313 cplb_data[SDRAM_KERN].end = memory_end;
314
315#ifdef CONFIG_MTD_UCLINUX 96#ifdef CONFIG_MTD_UCLINUX
316 cplb_data[SDRAM_RAM_MTD].start = memory_mtd_start; 97 dcplb_bounds[i_d].eaddr = memory_mtd_start + mtd_size;
317 cplb_data[SDRAM_RAM_MTD].end = memory_mtd_start + mtd_size;
318 cplb_data[SDRAM_RAM_MTD].valid = mtd_size > 0;
319# if defined(CONFIG_ROMFS_FS)
320 cplb_data[SDRAM_RAM_MTD].attr |= I_CPLB;
321
322 /*
323 * The ROMFS_FS size is often not multiple of 1MB.
324 * This can cause multiple CPLB sets covering the same memory area.
325 * This will then cause multiple CPLB hit exceptions.
326 * Workaround: We ensure a contiguous memory area by extending the kernel
327 * memory section over the mtd section.
328 * For ROMFS_FS memory must be covered with ICPLBs anyways.
329 * So there is no difference between kernel and mtd memory setup.
330 */
331
332 cplb_data[SDRAM_KERN].end = memory_mtd_start + mtd_size;;
333 cplb_data[SDRAM_RAM_MTD].valid = 0;
334
335# endif
336#else 98#else
337 cplb_data[SDRAM_RAM_MTD].valid = 0; 99 dcplb_bounds[i_d].eaddr = memory_end;
338#endif 100#endif
101 dcplb_bounds[i_d++].data = SDRAM_DGENERIC;
102 /* DMA uncached region. */
103 if (DMA_UNCACHED_REGION) {
104 dcplb_bounds[i_d].eaddr = _ramend;
105 dcplb_bounds[i_d++].data = SDRAM_DNON_CHBL;
106 }
107 if (_ramend != physical_mem_end) {
108 /* Reserved memory. */
109 dcplb_bounds[i_d].eaddr = physical_mem_end;
110 dcplb_bounds[i_d++].data = (reserved_mem_dcache_on ?
111 SDRAM_DGENERIC : SDRAM_DNON_CHBL);
112 }
113 /* Addressing hole up to the async bank. */
114 dcplb_bounds[i_d].eaddr = ASYNC_BANK0_BASE;
115 dcplb_bounds[i_d++].data = 0;
116 /* ASYNC banks. */
117 dcplb_bounds[i_d].eaddr = ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE;
118 dcplb_bounds[i_d++].data = SDRAM_EBIU;
119 /* Addressing hole up to BootROM. */
120 dcplb_bounds[i_d].eaddr = BOOT_ROM_START;
121 dcplb_bounds[i_d++].data = 0;
122 /* BootROM -- largest one should be less than 1 meg. */
123 dcplb_bounds[i_d].eaddr = BOOT_ROM_START + (1 * 1024 * 1024);
124 dcplb_bounds[i_d++].data = SDRAM_DGENERIC;
125 if (L2_LENGTH) {
126 /* Addressing hole up to L2 SRAM. */
127 dcplb_bounds[i_d].eaddr = L2_START;
128 dcplb_bounds[i_d++].data = 0;
129 /* L2 SRAM. */
130 dcplb_bounds[i_d].eaddr = L2_START + L2_LENGTH;
131 dcplb_bounds[i_d++].data = L2_DMEMORY;
132 }
133 dcplb_nr_bounds = i_d;
134 BUG_ON(dcplb_nr_bounds > ARRAY_SIZE(dcplb_bounds));
339 135
340 cplb_data[SDRAM_DMAZ].start = _ramend - DMA_UNCACHED_REGION; 136 i_i = 0;
341 cplb_data[SDRAM_DMAZ].end = _ramend; 137 /* Normal RAM, including MTD FS. */
342
343 cplb_data[RES_MEM].start = _ramend;
344 cplb_data[RES_MEM].end = physical_mem_end;
345
346 if (reserved_mem_dcache_on)
347 cplb_data[RES_MEM].d_conf = SDRAM_DGENERIC;
348 else
349 cplb_data[RES_MEM].d_conf = SDRAM_DNON_CHBL;
350
351 if (reserved_mem_icache_on)
352 cplb_data[RES_MEM].i_conf = SDRAM_IGENERIC;
353 else
354 cplb_data[RES_MEM].i_conf = SDRAM_INON_CHBL;
355
356 for (i = ZERO_P; i < ARRAY_SIZE(cplb_data); ++i) {
357 if (!cplb_data[i].valid)
358 continue;
359
360 as_1m = cplb_data[i].start % SIZE_1M;
361
362 /* We need to make sure all sections are properly 1M aligned
363 * However between Kernel Memory and the Kernel mtd section, depending on the
364 * rootfs size, there can be overlapping memory areas.
365 */
366
367 if (as_1m && i != L1I_MEM && i != L1D_MEM) {
368#ifdef CONFIG_MTD_UCLINUX 138#ifdef CONFIG_MTD_UCLINUX
369 if (i == SDRAM_RAM_MTD) { 139 icplb_bounds[i_i].eaddr = memory_mtd_start + mtd_size;
370 if ((cplb_data[SDRAM_KERN].end + 1) > cplb_data[SDRAM_RAM_MTD].start) 140#else
371 cplb_data[SDRAM_RAM_MTD].start = (cplb_data[i].start & (-2*SIZE_1M)) + SIZE_1M; 141 icplb_bounds[i_i].eaddr = memory_end;
372 else
373 cplb_data[SDRAM_RAM_MTD].start = (cplb_data[i].start & (-2*SIZE_1M));
374 } else
375#endif 142#endif
376 printk(KERN_WARNING "Unaligned Start of %s at 0x%X\n", 143 icplb_bounds[i_i++].data = SDRAM_IGENERIC;
377 cplb_data[i].name, cplb_data[i].start); 144 /* DMA uncached region. */
378 } 145 if (DMA_UNCACHED_REGION) {
379 146 icplb_bounds[i_i].eaddr = _ramend;
380 as = cplb_data[i].start % SIZE_4M; 147 icplb_bounds[i_i++].data = 0;
381 ae = cplb_data[i].end % SIZE_4M;
382
383 if (as)
384 a_start = cplb_data[i].start + (SIZE_4M - (as));
385 else
386 a_start = cplb_data[i].start;
387
388 a_end = cplb_data[i].end - ae;
389
390 for (j = INITIAL_T; j <= SWITCH_T; j++) {
391
392 switch (j) {
393 case INITIAL_T:
394 if (cplb_data[i].attr & INITIAL_T) {
395 t_i = &cplb.init_i;
396 t_d = &cplb.init_d;
397 process = 1;
398 } else
399 process = 0;
400 break;
401 case SWITCH_T:
402 if (cplb_data[i].attr & SWITCH_T) {
403 t_i = &cplb.switch_i;
404 t_d = &cplb.switch_d;
405 process = 1;
406 } else
407 process = 0;
408 break;
409 default:
410 process = 0;
411 break;
412 }
413
414 if (!process)
415 continue;
416 if (cplb_data[i].attr & I_CPLB)
417 __fill_code_cplbtab(t_i, i, a_start, a_end);
418
419 if (cplb_data[i].attr & D_CPLB)
420 __fill_data_cplbtab(t_d, i, a_start, a_end);
421 }
422 } 148 }
423 149 if (_ramend != physical_mem_end) {
424 /* make sure we locked the kernel start */ 150 /* Reserved memory. */
425 BUG_ON(cplb.init_i.pos < 2 + cplb_data[ZERO_P].valid); 151 icplb_bounds[i_i].eaddr = physical_mem_end;
426 BUG_ON(cplb.init_d.pos < 1 + cplb_data[ZERO_P].valid + cplb_data[L1D_MEM].valid); 152 icplb_bounds[i_i++].data = (reserved_mem_icache_on ?
427 153 SDRAM_IGENERIC : SDRAM_INON_CHBL);
428 /* make sure we didnt overflow the table */ 154 }
429 BUG_ON(cplb.init_i.size < cplb.init_i.pos); 155 /* Addressing hole up to BootROM. */
430 BUG_ON(cplb.init_d.size < cplb.init_d.pos); 156 icplb_bounds[i_i].eaddr = BOOT_ROM_START;
431 BUG_ON(cplb.switch_i.size < cplb.switch_i.pos); 157 icplb_bounds[i_i++].data = 0;
432 BUG_ON(cplb.switch_d.size < cplb.switch_d.pos); 158 /* BootROM -- largest one should be less than 1 meg. */
433 159 icplb_bounds[i_i].eaddr = BOOT_ROM_START + (1 * 1024 * 1024);
434 /* close tables */ 160 icplb_bounds[i_i++].data = SDRAM_IGENERIC;
435 close_cplbtab(&cplb.init_i); 161 if (L2_LENGTH) {
436 close_cplbtab(&cplb.init_d); 162 /* Addressing hole up to L2 SRAM, including the async bank. */
437 163 icplb_bounds[i_i].eaddr = L2_START;
438 cplb.init_i.tab[cplb.init_i.pos] = -1; 164 icplb_bounds[i_i++].data = 0;
439 cplb.init_d.tab[cplb.init_d.pos] = -1; 165 /* L2 SRAM. */
440 cplb.switch_i.tab[cplb.switch_i.pos] = -1; 166 icplb_bounds[i_i].eaddr = L2_START + L2_LENGTH;
441 cplb.switch_d.tab[cplb.switch_d.pos] = -1; 167 icplb_bounds[i_i++].data = L2_IMEMORY;
442 168 }
169 icplb_nr_bounds = i_i;
170 BUG_ON(icplb_nr_bounds > ARRAY_SIZE(icplb_bounds));
443} 171}
444
445#endif
diff --git a/arch/blackfin/kernel/cplb-nompu/cplbmgr.S b/arch/blackfin/kernel/cplb-nompu/cplbmgr.S
deleted file mode 100644
index f4ca76c72394..000000000000
--- a/arch/blackfin/kernel/cplb-nompu/cplbmgr.S
+++ /dev/null
@@ -1,648 +0,0 @@
1/*
2 * File: arch/blackfin/mach-common/cplbmgtr.S
3 * Based on:
4 * Author: LG Soft India
5 *
6 * Created: ?
7 * Description: CPLB replacement routine for CPLB mismatch
8 *
9 * Modified:
10 * Copyright 2004-2006 Analog Devices Inc.
11 *
12 * Bugs: Enter bugs at http://blackfin.uclinux.org/
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or
17 * (at your option) any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, see the file COPYING, or write
26 * to the Free Software Foundation, Inc.,
27 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
28 */
29
30/* Usage: int _cplb_mgr(is_data_miss,int enable_cache)
31 * is_data_miss==2 => Mark as Dirty, write to the clean data page
32 * is_data_miss==1 => Replace a data CPLB.
33 * is_data_miss==0 => Replace an instruction CPLB.
34 *
35 * Returns:
36 * CPLB_RELOADED => Successfully updated CPLB table.
37 * CPLB_NO_UNLOCKED => All CPLBs are locked, so cannot be evicted.
38 * This indicates that the CPLBs in the configuration
39 * tablei are badly configured, as this should never
40 * occur.
41 * CPLB_NO_ADDR_MATCH => The address being accessed, that triggered the
42 * exception, is not covered by any of the CPLBs in
43 * the configuration table. The application is
44 * presumably misbehaving.
45 * CPLB_PROT_VIOL => The address being accessed, that triggered the
46 * exception, was not a first-write to a clean Write
47 * Back Data page, and so presumably is a genuine
48 * violation of the page's protection attributes.
49 * The application is misbehaving.
50 */
51
52#include <linux/linkage.h>
53#include <asm/blackfin.h>
54#include <asm/cplb.h>
55#include <asm/asm-offsets.h>
56
57#ifdef CONFIG_EXCPT_IRQ_SYSC_L1
58.section .l1.text
59#else
60.text
61#endif
62
63.align 2;
64ENTRY(_cplb_mgr)
65
66 [--SP]=( R7:4,P5:3 );
67
68 CC = R0 == 2;
69 IF CC JUMP .Ldcplb_write;
70
71 CC = R0 == 0;
72 IF !CC JUMP .Ldcplb_miss_compare;
73
74 /* ICPLB Miss Exception. We need to choose one of the
75 * currently-installed CPLBs, and replace it with one
76 * from the configuration table.
77 */
78
79 /* A multi-word instruction can cross a page boundary. This means the
80 * first part of the instruction can be in a valid page, but the
81 * second part is not, and hence generates the instruction miss.
82 * However, the fault address is for the start of the instruction,
83 * not the part that's in the bad page. Therefore, we have to check
84 * whether the fault address applies to a page that is already present
85 * in the table.
86 */
87
88 P4.L = LO(ICPLB_FAULT_ADDR);
89 P4.H = HI(ICPLB_FAULT_ADDR);
90
91 P1 = 16;
92 P5.L = _page_size_table;
93 P5.H = _page_size_table;
94
95 P0.L = LO(ICPLB_DATA0);
96 P0.H = HI(ICPLB_DATA0);
97 R4 = [P4]; /* Get faulting address*/
98 R6 = 64; /* Advance past the fault address, which*/
99 R6 = R6 + R4; /* we'll use if we find a match*/
100 R3 = ((16 << 8) | 2); /* Extract mask, two bits at posn 16 */
101
102 R5 = 0;
103.Lisearch:
104
105 R1 = [P0-0x100]; /* Address for this CPLB */
106
107 R0 = [P0++]; /* Info for this CPLB*/
108 CC = BITTST(R0,0); /* Is the CPLB valid?*/
109 IF !CC JUMP .Lnomatch; /* Skip it, if not.*/
110 CC = R4 < R1(IU); /* If fault address less than page start*/
111 IF CC JUMP .Lnomatch; /* then skip this one.*/
112 R2 = EXTRACT(R0,R3.L) (Z); /* Get page size*/
113 P1 = R2;
114 P1 = P5 + (P1<<2); /* index into page-size table*/
115 R2 = [P1]; /* Get the page size*/
116 R1 = R1 + R2; /* and add to page start, to get page end*/
117 CC = R4 < R1(IU); /* and see whether fault addr is in page.*/
118 IF !CC R4 = R6; /* If so, advance the address and finish loop.*/
119 IF !CC JUMP .Lisearch_done;
120.Lnomatch:
121 /* Go around again*/
122 R5 += 1;
123 CC = BITTST(R5, 4); /* i.e CC = R5 >= 16*/
124 IF !CC JUMP .Lisearch;
125
126.Lisearch_done:
127 I0 = R4; /* Fault address we'll search for*/
128
129 /* set up pointers */
130 P0.L = LO(ICPLB_DATA0);
131 P0.H = HI(ICPLB_DATA0);
132
133 /* The replacement procedure for ICPLBs */
134
135 P4.L = LO(IMEM_CONTROL);
136 P4.H = HI(IMEM_CONTROL);
137
138 /* Turn off CPLBs while we work, necessary according to HRM before
139 * modifying CPLB descriptors
140 */
141 R5 = [P4]; /* Control Register*/
142 BITCLR(R5,ENICPLB_P);
143 CLI R1;
144 SSYNC; /* SSYNC required before writing to IMEM_CONTROL. */
145 .align 8;
146 [P4] = R5;
147 SSYNC;
148 STI R1;
149
150 R1 = -1; /* end point comparison */
151 R3 = 16; /* counter */
152
153 /* Search through CPLBs for first non-locked entry */
154 /* Overwrite it by moving everyone else up by 1 */
155.Licheck_lock:
156 R0 = [P0++];
157 R3 = R3 + R1;
158 CC = R3 == R1;
159 IF CC JUMP .Lall_locked;
160 CC = BITTST(R0, 0); /* an invalid entry is good */
161 IF !CC JUMP .Lifound_victim;
162 CC = BITTST(R0,1); /* but a locked entry isn't */
163 IF CC JUMP .Licheck_lock;
164
165.Lifound_victim:
166#ifdef CONFIG_CPLB_INFO
167 R7 = [P0 - 0x104];
168 GET_PDA(P2, R2);
169 P3 = [P2 + PDA_IPDT_SWAPCOUNT];
170 P2 = [P2 + PDA_IPDT];
171 P3 += -4;
172.Licount:
173 R2 = [P2]; /* address from config table */
174 P2 += 8;
175 P3 += 8;
176 CC = R2==-1;
177 IF CC JUMP .Licount_done;
178 CC = R7==R2;
179 IF !CC JUMP .Licount;
180 R7 = [P3];
181 R7 += 1;
182 [P3] = R7;
183 CSYNC;
184.Licount_done:
185#endif
186 LC0=R3;
187 LSETUP(.Lis_move,.Lie_move) LC0;
188.Lis_move:
189 R0 = [P0];
190 [P0 - 4] = R0;
191 R0 = [P0 - 0x100];
192 [P0-0x104] = R0;
193.Lie_move:
194 P0+=4;
195
196 /* Clear ICPLB_DATA15, in case we don't find a replacement
197 * otherwise, we would have a duplicate entry, and will crash
198 */
199 R0 = 0;
200 [P0 - 4] = R0;
201
202 /* We've made space in the ICPLB table, so that ICPLB15
203 * is now free to be overwritten. Next, we have to determine
204 * which CPLB we need to install, from the configuration
205 * table. This is a matter of getting the start-of-page
206 * addresses and page-lengths from the config table, and
207 * determining whether the fault address falls within that
208 * range.
209 */
210
211 GET_PDA(P3, R0);
212 P2 = [P3 + PDA_IPDT];
213#ifdef CONFIG_CPLB_INFO
214 P3 = [P3 + PDA_IPDT_SWAPCOUNT];
215 P3 += -8;
216#endif
217 P0.L = _page_size_table;
218 P0.H = _page_size_table;
219
220 /* Retrieve our fault address (which may have been advanced
221 * because the faulting instruction crossed a page boundary).
222 */
223
224 R0 = I0;
225
226 /* An extraction pattern, to get the page-size bits from
227 * the CPLB data entry. Bits 16-17, so two bits at posn 16.
228 */
229
230 R1 = ((16<<8)|2);
231.Linext: R4 = [P2++]; /* address from config table */
232 R2 = [P2++]; /* data from config table */
233#ifdef CONFIG_CPLB_INFO
234 P3 += 8;
235#endif
236
237 CC = R4 == -1; /* End of config table*/
238 IF CC JUMP .Lno_page_in_table;
239
240 /* See if failed address > start address */
241 CC = R4 <= R0(IU);
242 IF !CC JUMP .Linext;
243
244 /* extract page size (17:16)*/
245 R3 = EXTRACT(R2, R1.L) (Z);
246
247 /* add page size to addr to get range */
248
249 P5 = R3;
250 P5 = P0 + (P5 << 2); /* scaled, for int access*/
251 R3 = [P5];
252 R3 = R3 + R4;
253
254 /* See if failed address < (start address + page size) */
255 CC = R0 < R3(IU);
256 IF !CC JUMP .Linext;
257
258 /* We've found a CPLB in the config table that covers
259 * the faulting address, so install this CPLB into the
260 * last entry of the table.
261 */
262
263 P1.L = LO(ICPLB_DATA15); /* ICPLB_DATA15 */
264 P1.H = HI(ICPLB_DATA15);
265 [P1] = R2;
266 [P1-0x100] = R4;
267#ifdef CONFIG_CPLB_INFO
268 R3 = [P3];
269 R3 += 1;
270 [P3] = R3;
271#endif
272
273 /* P4 points to IMEM_CONTROL, and R5 contains its old
274 * value, after we disabled ICPLBS. Re-enable them.
275 */
276
277 BITSET(R5,ENICPLB_P);
278 CLI R2;
279 SSYNC; /* SSYNC required before writing to IMEM_CONTROL. */
280 .align 8;
281 [P4] = R5;
282 SSYNC;
283 STI R2;
284
285 ( R7:4,P5:3 ) = [SP++];
286 R0 = CPLB_RELOADED;
287 RTS;
288
289/* FAILED CASES*/
290.Lno_page_in_table:
291 R0 = CPLB_NO_ADDR_MATCH;
292 JUMP .Lfail_ret;
293
294.Lall_locked:
295 R0 = CPLB_NO_UNLOCKED;
296 JUMP .Lfail_ret;
297
298.Lprot_violation:
299 R0 = CPLB_PROT_VIOL;
300
301.Lfail_ret:
302 /* Make sure we turn protection/cache back on, even in the failing case */
303 BITSET(R5,ENICPLB_P);
304 CLI R2;
305 SSYNC; /* SSYNC required before writing to IMEM_CONTROL. */
306 .align 8;
307 [P4] = R5;
308 SSYNC;
309 STI R2;
310
311 ( R7:4,P5:3 ) = [SP++];
312 RTS;
313
314.Ldcplb_write:
315
316 /* if a DCPLB is marked as write-back (CPLB_WT==0), and
317 * it is clean (CPLB_DIRTY==0), then a write to the
318 * CPLB's page triggers a protection violation. We have to
319 * mark the CPLB as dirty, to indicate that there are
320 * pending writes associated with the CPLB.
321 */
322
323 P4.L = LO(DCPLB_STATUS);
324 P4.H = HI(DCPLB_STATUS);
325 P3.L = LO(DCPLB_DATA0);
326 P3.H = HI(DCPLB_DATA0);
327 R5 = [P4];
328
329 /* A protection violation can be caused by more than just writes
330 * to a clean WB page, so we have to ensure that:
331 * - It's a write
332 * - to a clean WB page
333 * - and is allowed in the mode the access occurred.
334 */
335
336 CC = BITTST(R5, 16); /* ensure it was a write*/
337 IF !CC JUMP .Lprot_violation;
338
339 /* to check the rest, we have to retrieve the DCPLB.*/
340
341 /* The low half of DCPLB_STATUS is a bit mask*/
342
343 R2 = R5.L (Z); /* indicating which CPLB triggered the event.*/
344 R3 = 30; /* so we can use this to determine the offset*/
345 R2.L = SIGNBITS R2;
346 R2 = R2.L (Z); /* into the DCPLB table.*/
347 R3 = R3 - R2;
348 P4 = R3;
349 P3 = P3 + (P4<<2);
350 R3 = [P3]; /* Retrieve the CPLB*/
351
352 /* Now we can check whether it's a clean WB page*/
353
354 CC = BITTST(R3, 14); /* 0==WB, 1==WT*/
355 IF CC JUMP .Lprot_violation;
356 CC = BITTST(R3, 7); /* 0 == clean, 1 == dirty*/
357 IF CC JUMP .Lprot_violation;
358
359 /* Check whether the write is allowed in the mode that was active.*/
360
361 R2 = 1<<3; /* checking write in user mode*/
362 CC = BITTST(R5, 17); /* 0==was user, 1==was super*/
363 R5 = CC;
364 R2 <<= R5; /* if was super, check write in super mode*/
365 R2 = R3 & R2;
366 CC = R2 == 0;
367 IF CC JUMP .Lprot_violation;
368
369 /* It's a genuine write-to-clean-page.*/
370
371 BITSET(R3, 7); /* mark as dirty*/
372 [P3] = R3; /* and write back.*/
373 NOP;
374 CSYNC;
375 ( R7:4,P5:3 ) = [SP++];
376 R0 = CPLB_RELOADED;
377 RTS;
378
379.Ldcplb_miss_compare:
380
381 /* Data CPLB Miss event. We need to choose a CPLB to
382 * evict, and then locate a new CPLB to install from the
383 * config table, that covers the faulting address.
384 */
385
386 P1.L = LO(DCPLB_DATA15);
387 P1.H = HI(DCPLB_DATA15);
388
389 P4.L = LO(DCPLB_FAULT_ADDR);
390 P4.H = HI(DCPLB_FAULT_ADDR);
391 R4 = [P4];
392 I0 = R4;
393
394 /* The replacement procedure for DCPLBs*/
395
396 R6 = R1; /* Save for later*/
397
398 /* Turn off CPLBs while we work.*/
399 P4.L = LO(DMEM_CONTROL);
400 P4.H = HI(DMEM_CONTROL);
401 R5 = [P4];
402 BITCLR(R5,ENDCPLB_P);
403 CLI R0;
404 SSYNC; /* SSYNC required before writing to DMEM_CONTROL. */
405 .align 8;
406 [P4] = R5;
407 SSYNC;
408 STI R0;
409
410 /* Start looking for a CPLB to evict. Our order of preference
411 * is: invalid CPLBs, clean CPLBs, dirty CPLBs. Locked CPLBs
412 * are no good.
413 */
414
415 I1.L = LO(DCPLB_DATA0);
416 I1.H = HI(DCPLB_DATA0);
417 P1 = 2;
418 P2 = 16;
419 I2.L = _dcplb_preference;
420 I2.H = _dcplb_preference;
421 LSETUP(.Lsdsearch1, .Ledsearch1) LC0 = P1;
422.Lsdsearch1:
423 R0 = [I2++]; /* Get the bits we're interested in*/
424 P0 = I1; /* Go back to start of table*/
425 LSETUP (.Lsdsearch2, .Ledsearch2) LC1 = P2;
426.Lsdsearch2:
427 R1 = [P0++]; /* Fetch each installed CPLB in turn*/
428 R2 = R1 & R0; /* and test for interesting bits.*/
429 CC = R2 == 0; /* If none are set, it'll do.*/
430 IF !CC JUMP .Lskip_stack_check;
431
432 R2 = [P0 - 0x104]; /* R2 - PageStart */
433 P3.L = _page_size_table; /* retrieve end address */
434 P3.H = _page_size_table; /* retrieve end address */
435 R3 = 0x1002; /* 16th - position, 2 bits -length */
436#if ANOMALY_05000209
437 nop; /* Anomaly 05000209 */
438#endif
439 R7 = EXTRACT(R1,R3.l);
440 R7 = R7 << 2; /* Page size index offset */
441 P5 = R7;
442 P3 = P3 + P5;
443 R7 = [P3]; /* page size in bytes */
444
445 R7 = R2 + R7; /* R7 - PageEnd */
446 R4 = SP; /* Test SP is in range */
447
448 CC = R7 < R4; /* if PageEnd < SP */
449 IF CC JUMP .Ldfound_victim;
450 R3 = 0x284; /* stack length from start of trap till
451 * the point.
452 * 20 stack locations for future modifications
453 */
454 R4 = R4 + R3;
455 CC = R4 < R2; /* if SP + stacklen < PageStart */
456 IF CC JUMP .Ldfound_victim;
457.Lskip_stack_check:
458
459.Ledsearch2: NOP;
460.Ledsearch1: NOP;
461
462 /* If we got here, we didn't find a DCPLB we considered
463 * replacable, which means all of them were locked.
464 */
465
466 JUMP .Lall_locked;
467.Ldfound_victim:
468
469#ifdef CONFIG_CPLB_INFO
470 R7 = [P0 - 0x104];
471 GET_PDA(P2, R2);
472 P3 = [P2 + PDA_DPDT_SWAPCOUNT];
473 P2 = [P2 + PDA_DPDT];
474 P3 += -4;
475.Ldicount:
476 R2 = [P2];
477 P2 += 8;
478 P3 += 8;
479 CC = R2==-1;
480 IF CC JUMP .Ldicount_done;
481 CC = R7==R2;
482 IF !CC JUMP .Ldicount;
483 R7 = [P3];
484 R7 += 1;
485 [P3] = R7;
486.Ldicount_done:
487#endif
488
489 /* Clean down the hardware loops*/
490 R2 = 0;
491 LC1 = R2;
492 LC0 = R2;
493
494 /* There's a suitable victim in [P0-4] (because we've
495 * advanced already).
496 */
497
498.LDdoverwrite:
499
500 /* [P0-4] is a suitable victim CPLB, so we want to
501 * overwrite it by moving all the following CPLBs
502 * one space closer to the start.
503 */
504
505 R1.L = LO(DCPLB_DATA16); /* DCPLB_DATA15 + 4 */
506 R1.H = HI(DCPLB_DATA16);
507 R0 = P0;
508
509 /* If the victim happens to be in DCPLB15,
510 * we don't need to move anything.
511 */
512
513 CC = R1 == R0;
514 IF CC JUMP .Lde_moved;
515 R1 = R1 - R0;
516 R1 >>= 2;
517 P1 = R1;
518 LSETUP(.Lds_move, .Lde_move) LC0=P1;
519.Lds_move:
520 R0 = [P0++]; /* move data */
521 [P0 - 8] = R0;
522 R0 = [P0-0x104] /* move address */
523.Lde_move:
524 [P0-0x108] = R0;
525
526.Lde_moved:
527 NOP;
528
529 /* Clear DCPLB_DATA15, in case we don't find a replacement
530 * otherwise, we would have a duplicate entry, and will crash
531 */
532 R0 = 0;
533 [P0 - 0x4] = R0;
534
535 /* We've now made space in DCPLB15 for the new CPLB to be
536 * installed. The next stage is to locate a CPLB in the
537 * config table that covers the faulting address.
538 */
539
540 R0 = I0; /* Our faulting address */
541
542 GET_PDA(P3, R1);
543 P2 = [P3 + PDA_DPDT];
544#ifdef CONFIG_CPLB_INFO
545 P3 = [P3 + PDA_DPDT_SWAPCOUNT];
546 P3 += -8;
547#endif
548
549 P1.L = _page_size_table;
550 P1.H = _page_size_table;
551
552 /* An extraction pattern, to retrieve bits 17:16.*/
553
554 R1 = (16<<8)|2;
555.Ldnext: R4 = [P2++]; /* address */
556 R2 = [P2++]; /* data */
557#ifdef CONFIG_CPLB_INFO
558 P3 += 8;
559#endif
560
561 CC = R4 == -1;
562 IF CC JUMP .Lno_page_in_table;
563
564 /* See if failed address > start address */
565 CC = R4 <= R0(IU);
566 IF !CC JUMP .Ldnext;
567
568 /* extract page size (17:16)*/
569 R3 = EXTRACT(R2, R1.L) (Z);
570
571 /* add page size to addr to get range */
572
573 P5 = R3;
574 P5 = P1 + (P5 << 2);
575 R3 = [P5];
576 R3 = R3 + R4;
577
578 /* See if failed address < (start address + page size) */
579 CC = R0 < R3(IU);
580 IF !CC JUMP .Ldnext;
581
582 /* We've found the CPLB that should be installed, so
583 * write it into CPLB15, masking off any caching bits
584 * if necessary.
585 */
586
587 P1.L = LO(DCPLB_DATA15);
588 P1.H = HI(DCPLB_DATA15);
589
590 /* If the DCPLB has cache bits set, but caching hasn't
591 * been enabled, then we want to mask off the cache-in-L1
592 * bit before installing. Moreover, if caching is off, we
593 * also want to ensure that the DCPLB has WT mode set, rather
594 * than WB, since WB pages still trigger first-write exceptions
595 * even when not caching is off, and the page isn't marked as
596 * cachable. Finally, we could mark the page as clean, not dirty,
597 * but we choose to leave that decision to the user; if the user
598 * chooses to have a CPLB pre-defined as dirty, then they always
599 * pay the cost of flushing during eviction, but don't pay the
600 * cost of first-write exceptions to mark the page as dirty.
601 */
602
603#ifdef CONFIG_BFIN_WT
604 BITSET(R6, 14); /* Set WT*/
605#endif
606
607 [P1] = R2;
608 [P1-0x100] = R4;
609#ifdef CONFIG_CPLB_INFO
610 R3 = [P3];
611 R3 += 1;
612 [P3] = R3;
613#endif
614
615 /* We've installed the CPLB, so re-enable CPLBs. P4
616 * points to DMEM_CONTROL, and R5 is the value we
617 * last wrote to it, when we were disabling CPLBs.
618 */
619
620 BITSET(R5,ENDCPLB_P);
621 CLI R2;
622 .align 8;
623 [P4] = R5;
624 SSYNC;
625 STI R2;
626
627 ( R7:4,P5:3 ) = [SP++];
628 R0 = CPLB_RELOADED;
629 RTS;
630ENDPROC(_cplb_mgr)
631
632#ifdef CONFIG_CPLB_SWITCH_TAB_L1
633.section .l1.data
634#else
635.data
636#endif
637
638ENTRY(_page_size_table)
639.byte4 0x00000400; /* 1K */
640.byte4 0x00001000; /* 4K */
641.byte4 0x00100000; /* 1M */
642.byte4 0x00400000; /* 4M */
643END(_page_size_table)
644
645ENTRY(_dcplb_preference)
646.byte4 0x00000001; /* valid bit */
647.byte4 0x00000002; /* lock bit */
648END(_dcplb_preference)
diff --git a/arch/blackfin/kernel/cplb-nompu/cplbmgr.c b/arch/blackfin/kernel/cplb-nompu/cplbmgr.c
new file mode 100644
index 000000000000..376249ab2694
--- /dev/null
+++ b/arch/blackfin/kernel/cplb-nompu/cplbmgr.c
@@ -0,0 +1,283 @@
1/*
2 * File: arch/blackfin/kernel/cplb-nompu-c/cplbmgr.c
3 * Based on: arch/blackfin/kernel/cplb-mpu/cplbmgr.c
4 * Author: Michael McTernan <mmcternan@airvana.com>
5 *
6 * Created: 01Nov2008
7 * Description: CPLB miss handler.
8 *
9 * Modified:
10 * Copyright 2008 Airvana Inc.
11 * Copyright 2004-2007 Analog Devices Inc.
12 *
13 * Bugs: Enter bugs at http://blackfin.uclinux.org/
14 *
15 * This program is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License as published by
17 * the Free Software Foundation; either version 2 of the License, or
18 * (at your option) any later version.
19 *
20 * This program is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23 * GNU General Public License for more details.
24 */
25
26#include <linux/kernel.h>
27#include <asm/blackfin.h>
28#include <asm/cplbinit.h>
29#include <asm/cplb.h>
30#include <asm/mmu_context.h>
31
32/*
33 * WARNING
34 *
35 * This file is compiled with certain -ffixed-reg options. We have to
36 * make sure not to call any functions here that could clobber these
37 * registers.
38 */
39
40int nr_dcplb_miss[NR_CPUS], nr_icplb_miss[NR_CPUS];
41int nr_dcplb_supv_miss[NR_CPUS], nr_icplb_supv_miss[NR_CPUS];
42int nr_cplb_flush[NR_CPUS], nr_dcplb_prot[NR_CPUS];
43
44#ifdef CONFIG_EXCPT_IRQ_SYSC_L1
45#define MGR_ATTR __attribute__((l1_text))
46#else
47#define MGR_ATTR
48#endif
49
50/*
51 * We're in an exception handler. The normal cli nop nop workaround
52 * isn't going to do very much, as the only thing that can interrupt
53 * us is an NMI, and the cli isn't going to stop that.
54 */
55#define NOWA_SSYNC __asm__ __volatile__ ("ssync;")
56
57/* Anomaly handlers provide SSYNCs, so avoid extra if anomaly is present */
58#if ANOMALY_05000125
59
60#define bfin_write_DMEM_CONTROL_SSYNC(v) bfin_write_DMEM_CONTROL(v)
61#define bfin_write_IMEM_CONTROL_SSYNC(v) bfin_write_IMEM_CONTROL(v)
62
63#else
64
65#define bfin_write_DMEM_CONTROL_SSYNC(v) \
66 do { NOWA_SSYNC; bfin_write_DMEM_CONTROL(v); NOWA_SSYNC; } while (0)
67#define bfin_write_IMEM_CONTROL_SSYNC(v) \
68 do { NOWA_SSYNC; bfin_write_IMEM_CONTROL(v); NOWA_SSYNC; } while (0)
69
70#endif
71
72static inline void write_dcplb_data(int cpu, int idx, unsigned long data,
73 unsigned long addr)
74{
75 unsigned long ctrl = bfin_read_DMEM_CONTROL();
76 bfin_write_DMEM_CONTROL_SSYNC(ctrl & ~ENDCPLB);
77 bfin_write32(DCPLB_DATA0 + idx * 4, data);
78 bfin_write32(DCPLB_ADDR0 + idx * 4, addr);
79 bfin_write_DMEM_CONTROL_SSYNC(ctrl);
80
81#ifdef CONFIG_CPLB_INFO
82 dcplb_tbl[cpu][idx].addr = addr;
83 dcplb_tbl[cpu][idx].data = data;
84#endif
85}
86
87static inline void write_icplb_data(int cpu, int idx, unsigned long data,
88 unsigned long addr)
89{
90 unsigned long ctrl = bfin_read_IMEM_CONTROL();
91
92 bfin_write_IMEM_CONTROL_SSYNC(ctrl & ~ENICPLB);
93 bfin_write32(ICPLB_DATA0 + idx * 4, data);
94 bfin_write32(ICPLB_ADDR0 + idx * 4, addr);
95 bfin_write_IMEM_CONTROL_SSYNC(ctrl);
96
97#ifdef CONFIG_CPLB_INFO
98 icplb_tbl[cpu][idx].addr = addr;
99 icplb_tbl[cpu][idx].data = data;
100#endif
101}
102
103/*
104 * Given the contents of the status register, return the index of the
105 * CPLB that caused the fault.
106 */
107static inline int faulting_cplb_index(int status)
108{
109 int signbits = __builtin_bfin_norm_fr1x32(status & 0xFFFF);
110 return 30 - signbits;
111}
112
113/*
114 * Given the contents of the status register and the DCPLB_DATA contents,
115 * return true if a write access should be permitted.
116 */
117static inline int write_permitted(int status, unsigned long data)
118{
119 if (status & FAULT_USERSUPV)
120 return !!(data & CPLB_SUPV_WR);
121 else
122 return !!(data & CPLB_USER_WR);
123}
124
125/* Counters to implement round-robin replacement. */
126static int icplb_rr_index[NR_CPUS] PDT_ATTR;
127static int dcplb_rr_index[NR_CPUS] PDT_ATTR;
128
129/*
130 * Find an ICPLB entry to be evicted and return its index.
131 */
132static int evict_one_icplb(int cpu)
133{
134 int i = first_switched_icplb + icplb_rr_index[cpu];
135 if (i >= MAX_CPLBS) {
136 i -= MAX_CPLBS - first_switched_icplb;
137 icplb_rr_index[cpu] -= MAX_CPLBS - first_switched_icplb;
138 }
139 icplb_rr_index[cpu]++;
140 return i;
141}
142
143static int evict_one_dcplb(int cpu)
144{
145 int i = first_switched_dcplb + dcplb_rr_index[cpu];
146 if (i >= MAX_CPLBS) {
147 i -= MAX_CPLBS - first_switched_dcplb;
148 dcplb_rr_index[cpu] -= MAX_CPLBS - first_switched_dcplb;
149 }
150 dcplb_rr_index[cpu]++;
151 return i;
152}
153
154MGR_ATTR static int icplb_miss(int cpu)
155{
156 unsigned long addr = bfin_read_ICPLB_FAULT_ADDR();
157 int status = bfin_read_ICPLB_STATUS();
158 int idx;
159 unsigned long i_data, base, addr1, eaddr;
160
161 nr_icplb_miss[cpu]++;
162 if (unlikely(status & FAULT_USERSUPV))
163 nr_icplb_supv_miss[cpu]++;
164
165 base = 0;
166 for (idx = 0; idx < icplb_nr_bounds; idx++) {
167 eaddr = icplb_bounds[idx].eaddr;
168 if (addr < eaddr)
169 break;
170 base = eaddr;
171 }
172 if (unlikely(idx == icplb_nr_bounds))
173 return CPLB_NO_ADDR_MATCH;
174
175 i_data = icplb_bounds[idx].data;
176 if (unlikely(i_data == 0))
177 return CPLB_NO_ADDR_MATCH;
178
179 addr1 = addr & ~(SIZE_4M - 1);
180 addr &= ~(SIZE_1M - 1);
181 i_data |= PAGE_SIZE_1MB;
182 if (addr1 >= base && (addr1 + SIZE_4M) <= eaddr) {
183 /*
184 * This works because
185 * (PAGE_SIZE_4MB & PAGE_SIZE_1MB) == PAGE_SIZE_1MB.
186 */
187 i_data |= PAGE_SIZE_4MB;
188 addr = addr1;
189 }
190
191 /* Pick entry to evict */
192 idx = evict_one_icplb(cpu);
193
194 write_icplb_data(cpu, idx, i_data, addr);
195
196 return CPLB_RELOADED;
197}
198
199MGR_ATTR static int dcplb_miss(int cpu)
200{
201 unsigned long addr = bfin_read_DCPLB_FAULT_ADDR();
202 int status = bfin_read_DCPLB_STATUS();
203 int idx;
204 unsigned long d_data, base, addr1, eaddr;
205
206 nr_dcplb_miss[cpu]++;
207 if (unlikely(status & FAULT_USERSUPV))
208 nr_dcplb_supv_miss[cpu]++;
209
210 base = 0;
211 for (idx = 0; idx < dcplb_nr_bounds; idx++) {
212 eaddr = dcplb_bounds[idx].eaddr;
213 if (addr < eaddr)
214 break;
215 base = eaddr;
216 }
217 if (unlikely(idx == dcplb_nr_bounds))
218 return CPLB_NO_ADDR_MATCH;
219
220 d_data = dcplb_bounds[idx].data;
221 if (unlikely(d_data == 0))
222 return CPLB_NO_ADDR_MATCH;
223
224 addr1 = addr & ~(SIZE_4M - 1);
225 addr &= ~(SIZE_1M - 1);
226 d_data |= PAGE_SIZE_1MB;
227 if (addr1 >= base && (addr1 + SIZE_4M) <= eaddr) {
228 /*
229 * This works because
230 * (PAGE_SIZE_4MB & PAGE_SIZE_1MB) == PAGE_SIZE_1MB.
231 */
232 d_data |= PAGE_SIZE_4MB;
233 addr = addr1;
234 }
235
236 /* Pick entry to evict */
237 idx = evict_one_dcplb(cpu);
238
239 write_dcplb_data(cpu, idx, d_data, addr);
240
241 return CPLB_RELOADED;
242}
243
244MGR_ATTR static noinline int dcplb_protection_fault(int cpu)
245{
246 int status = bfin_read_DCPLB_STATUS();
247
248 nr_dcplb_prot[cpu]++;
249
250 if (likely(status & FAULT_RW)) {
251 int idx = faulting_cplb_index(status);
252 unsigned long regaddr = DCPLB_DATA0 + idx * 4;
253 unsigned long data = bfin_read32(regaddr);
254
255 /* Check if fault is to dirty a clean page */
256 if (!(data & CPLB_WT) && !(data & CPLB_DIRTY) &&
257 write_permitted(status, data)) {
258
259 dcplb_tbl[cpu][idx].data = data;
260 bfin_write32(regaddr, data);
261 return CPLB_RELOADED;
262 }
263 }
264
265 return CPLB_PROT_VIOL;
266}
267
268MGR_ATTR int cplb_hdr(int seqstat, struct pt_regs *regs)
269{
270 int cause = seqstat & 0x3f;
271 unsigned int cpu = smp_processor_id();
272 switch (cause) {
273 case 0x2C:
274 return icplb_miss(cpu);
275 case 0x26:
276 return dcplb_miss(cpu);
277 default:
278 if (unlikely(cause == 0x23))
279 return dcplb_protection_fault(cpu);
280
281 return CPLB_UNKNOWN_ERR;
282 }
283}
diff --git a/arch/blackfin/kernel/cplbinfo.c b/arch/blackfin/kernel/cplbinfo.c
index 1d3bbec1a195..64d78300dd08 100644
--- a/arch/blackfin/kernel/cplbinfo.c
+++ b/arch/blackfin/kernel/cplbinfo.c
@@ -20,8 +20,6 @@ static char const page_strtbl[][3] = { "1K", "4K", "1M", "4M" };
20#define page(flags) (((flags) & 0x30000) >> 16) 20#define page(flags) (((flags) & 0x30000) >> 16)
21#define strpage(flags) page_strtbl[page(flags)] 21#define strpage(flags) page_strtbl[page(flags)]
22 22
23#ifdef CONFIG_MPU
24
25struct cplbinfo_data { 23struct cplbinfo_data {
26 loff_t pos; 24 loff_t pos;
27 char cplb_type; 25 char cplb_type;
@@ -75,88 +73,6 @@ static void cplbinfo_seq_init(struct cplbinfo_data *cdata, unsigned int cpu)
75 } 73 }
76} 74}
77 75
78#else
79
80struct cplbinfo_data {
81 loff_t pos;
82 char cplb_type;
83 u32 mem_control;
84 unsigned long *pdt_tables, *pdt_swapcount;
85 unsigned long cplb_addr, cplb_data;
86};
87
88extern int page_size_table[];
89
90static int cplb_find_entry(unsigned long addr_tbl, unsigned long data_tbl,
91 unsigned long addr_find, unsigned long data_find)
92{
93 int i;
94
95 for (i = 0; i < 16; ++i) {
96 unsigned long cplb_addr = bfin_read32(addr_tbl + i * 4);
97 unsigned long cplb_data = bfin_read32(data_tbl + i * 4);
98 if (addr_find >= cplb_addr &&
99 addr_find < cplb_addr + page_size_table[page(cplb_data)] &&
100 cplb_data == data_find)
101 return i;
102 }
103
104 return -1;
105}
106
107static void cplbinfo_print_header(struct seq_file *m)
108{
109 seq_printf(m, "Address\t\tData\tSize\tValid\tLocked\tSwapin\tiCount\toCount\n");
110}
111
112static int cplbinfo_nomore(struct cplbinfo_data *cdata)
113{
114 return cdata->pdt_tables[cdata->pos * 2] == 0xffffffff;
115}
116
117static int cplbinfo_show(struct seq_file *m, void *p)
118{
119 struct cplbinfo_data *cdata;
120 unsigned long data, addr;
121 int entry;
122 loff_t pos;
123
124 cdata = p;
125 pos = cdata->pos * 2;
126 addr = cdata->pdt_tables[pos];
127 data = cdata->pdt_tables[pos + 1];
128 entry = cplb_find_entry(cdata->cplb_addr, cdata->cplb_data, addr, data);
129
130 seq_printf(m,
131 "0x%08lx\t0x%05lx\t%s\t%c\t%c\t%2d\t%ld\t%ld\n",
132 addr, data, strpage(data),
133 (data & CPLB_VALID) ? 'Y' : 'N',
134 (data & CPLB_LOCK) ? 'Y' : 'N', entry,
135 cdata->pdt_swapcount[pos],
136 cdata->pdt_swapcount[pos + 1]);
137
138 return 0;
139}
140
141static void cplbinfo_seq_init(struct cplbinfo_data *cdata, unsigned int cpu)
142{
143 if (cdata->cplb_type == 'I') {
144 cdata->mem_control = bfin_read_IMEM_CONTROL();
145 cdata->pdt_tables = ipdt_tables[cpu];
146 cdata->pdt_swapcount = ipdt_swapcount_tables[cpu];
147 cdata->cplb_addr = ICPLB_ADDR0;
148 cdata->cplb_data = ICPLB_DATA0;
149 } else {
150 cdata->mem_control = bfin_read_DMEM_CONTROL();
151 cdata->pdt_tables = dpdt_tables[cpu];
152 cdata->pdt_swapcount = dpdt_swapcount_tables[cpu];
153 cdata->cplb_addr = DCPLB_ADDR0;
154 cdata->cplb_data = DCPLB_DATA0;
155 }
156}
157
158#endif
159
160static void *cplbinfo_start(struct seq_file *m, loff_t *pos) 76static void *cplbinfo_start(struct seq_file *m, loff_t *pos)
161{ 77{
162 struct cplbinfo_data *cdata = m->private; 78 struct cplbinfo_data *cdata = m->private;
diff --git a/arch/blackfin/kernel/setup.c b/arch/blackfin/kernel/setup.c
index d3d37e7f465b..20d04a1bc861 100644
--- a/arch/blackfin/kernel/setup.c
+++ b/arch/blackfin/kernel/setup.c
@@ -88,6 +88,7 @@ void __init generate_cplb_tables(void)
88{ 88{
89 unsigned int cpu; 89 unsigned int cpu;
90 90
91 generate_cplb_tables_all();
91 /* Generate per-CPU I&D CPLB tables */ 92 /* Generate per-CPU I&D CPLB tables */
92 for (cpu = 0; cpu < num_possible_cpus(); ++cpu) 93 for (cpu = 0; cpu < num_possible_cpus(); ++cpu)
93 generate_cplb_tables_cpu(cpu); 94 generate_cplb_tables_cpu(cpu);
@@ -97,19 +98,11 @@ void __init generate_cplb_tables(void)
97void __cpuinit bfin_setup_caches(unsigned int cpu) 98void __cpuinit bfin_setup_caches(unsigned int cpu)
98{ 99{
99#ifdef CONFIG_BFIN_ICACHE 100#ifdef CONFIG_BFIN_ICACHE
100#ifdef CONFIG_MPU
101 bfin_icache_init(icplb_tbl[cpu]); 101 bfin_icache_init(icplb_tbl[cpu]);
102#else
103 bfin_icache_init(icplb_tables[cpu]);
104#endif
105#endif 102#endif
106 103
107#ifdef CONFIG_BFIN_DCACHE 104#ifdef CONFIG_BFIN_DCACHE
108#ifdef CONFIG_MPU
109 bfin_dcache_init(dcplb_tbl[cpu]); 105 bfin_dcache_init(dcplb_tbl[cpu]);
110#else
111 bfin_dcache_init(dcplb_tables[cpu]);
112#endif
113#endif 106#endif
114 107
115 /* 108 /*
diff --git a/arch/blackfin/mach-common/entry.S b/arch/blackfin/mach-common/entry.S
index 0efd5f5b7ba0..fae774651374 100644
--- a/arch/blackfin/mach-common/entry.S
+++ b/arch/blackfin/mach-common/entry.S
@@ -112,24 +112,21 @@ ENTRY(_ex_dcplb_viol)
112ENTRY(_ex_dcplb_miss) 112ENTRY(_ex_dcplb_miss)
113ENTRY(_ex_icplb_miss) 113ENTRY(_ex_icplb_miss)
114 (R7:6,P5:4) = [sp++]; 114 (R7:6,P5:4) = [sp++];
115 ASTAT = [sp++]; 115 /* We leave the previously pushed ASTAT on the stack. */
116 SAVE_ALL_SYS 116 SAVE_CONTEXT_CPLB
117#ifdef CONFIG_MPU 117
118 /* We must load R1 here, _before_ DEBUG_HWTRACE_SAVE, since that 118 /* We must load R1 here, _before_ DEBUG_HWTRACE_SAVE, since that
119 * will change the stack pointer. */ 119 * will change the stack pointer. */
120 R0 = SEQSTAT; 120 R0 = SEQSTAT;
121 R1 = SP; 121 R1 = SP;
122#endif 122
123 DEBUG_HWTRACE_SAVE(p5, r7) 123 DEBUG_HWTRACE_SAVE(p5, r7)
124#ifdef CONFIG_MPU 124
125 sp += -12; 125 sp += -12;
126 call _cplb_hdr; 126 call _cplb_hdr;
127 sp += 12; 127 sp += 12;
128 CC = R0 == 0; 128 CC = R0 == 0;
129 IF !CC JUMP _handle_bad_cplb; 129 IF !CC JUMP _handle_bad_cplb;
130#else
131 call __cplb_hdr;
132#endif
133 130
134#ifdef CONFIG_DEBUG_DOUBLEFAULT 131#ifdef CONFIG_DEBUG_DOUBLEFAULT
135 /* While we were processing this, did we double fault? */ 132 /* While we were processing this, did we double fault? */
@@ -143,7 +140,8 @@ ENTRY(_ex_icplb_miss)
143#endif 140#endif
144 141
145 DEBUG_HWTRACE_RESTORE(p5, r7) 142 DEBUG_HWTRACE_RESTORE(p5, r7)
146 RESTORE_ALL_SYS 143 RESTORE_CONTEXT_CPLB
144 ASTAT = [SP++];
147 SP = EX_SCRATCH_REG; 145 SP = EX_SCRATCH_REG;
148 rtx; 146 rtx;
149ENDPROC(_ex_icplb_miss) 147ENDPROC(_ex_icplb_miss)
@@ -298,9 +296,8 @@ ENTRY(_handle_bad_cplb)
298 * the stack to get ready so, we can fall through - we 296 * the stack to get ready so, we can fall through - we
299 * need to make a CPLB exception look like a normal exception 297 * need to make a CPLB exception look like a normal exception
300 */ 298 */
301 299 RESTORE_CONTEXT_CPLB
302 RESTORE_ALL_SYS 300 /* ASTAT is still on the stack, where it is needed. */
303 [--sp] = ASTAT;
304 [--sp] = (R7:6,P5:4); 301 [--sp] = (R7:6,P5:4);
305 302
306ENTRY(_ex_replaceable) 303ENTRY(_ex_replaceable)
diff --git a/arch/blackfin/mm/init.c b/arch/blackfin/mm/init.c
index 57d306b9c56d..d0532b72bba5 100644
--- a/arch/blackfin/mm/init.c
+++ b/arch/blackfin/mm/init.c
@@ -119,16 +119,6 @@ asmlinkage void init_pda(void)
119 119
120 cpu_pda[cpu].ex_stack = exception_stack[cpu + 1]; 120 cpu_pda[cpu].ex_stack = exception_stack[cpu + 1];
121 121
122#ifdef CONFIG_MPU
123#else
124 cpu_pda[cpu].ipdt = ipdt_tables[cpu];
125 cpu_pda[cpu].dpdt = dpdt_tables[cpu];
126#ifdef CONFIG_CPLB_INFO
127 cpu_pda[cpu].ipdt_swapcount = ipdt_swapcount_tables[cpu];
128 cpu_pda[cpu].dpdt_swapcount = dpdt_swapcount_tables[cpu];
129#endif
130#endif
131
132#ifdef CONFIG_SMP 122#ifdef CONFIG_SMP
133 cpu_pda[cpu].imask = 0x1f; 123 cpu_pda[cpu].imask = 0x1f;
134#endif 124#endif