aboutsummaryrefslogtreecommitdiffstats
path: root/arch/blackfin/mach-common
diff options
context:
space:
mode:
Diffstat (limited to 'arch/blackfin/mach-common')
-rw-r--r--arch/blackfin/mach-common/Makefile12
-rw-r--r--arch/blackfin/mach-common/cache.S253
-rw-r--r--arch/blackfin/mach-common/cacheinit.S137
-rw-r--r--arch/blackfin/mach-common/cplbhdlr.S130
-rw-r--r--arch/blackfin/mach-common/cplbinfo.c211
-rw-r--r--arch/blackfin/mach-common/cplbmgr.S607
-rw-r--r--arch/blackfin/mach-common/dpmc.S418
-rw-r--r--arch/blackfin/mach-common/entry.S1207
-rw-r--r--arch/blackfin/mach-common/interrupt.S253
-rw-r--r--arch/blackfin/mach-common/ints-priority-dc.c476
-rw-r--r--arch/blackfin/mach-common/ints-priority-sc.c577
-rw-r--r--arch/blackfin/mach-common/irqpanic.c194
-rw-r--r--arch/blackfin/mach-common/lock.S204
-rw-r--r--arch/blackfin/mach-common/pm.c181
14 files changed, 4860 insertions, 0 deletions
diff --git a/arch/blackfin/mach-common/Makefile b/arch/blackfin/mach-common/Makefile
new file mode 100644
index 000000000000..d3a49073d196
--- /dev/null
+++ b/arch/blackfin/mach-common/Makefile
@@ -0,0 +1,12 @@
1#
2# arch/blackfin/mach-common/Makefile
3#
4
5obj-y := \
6 cache.o cacheinit.o cplbhdlr.o cplbmgr.o entry.o \
7 interrupt.o lock.o dpmc.o irqpanic.o
8
9obj-$(CONFIG_CPLB_INFO) += cplbinfo.o
10obj-$(CONFIG_BFIN_SINGLE_CORE) += ints-priority-sc.o
11obj-$(CONFIG_BFIN_DUAL_CORE) += ints-priority-dc.o
12obj-$(CONFIG_PM) += pm.o
diff --git a/arch/blackfin/mach-common/cache.S b/arch/blackfin/mach-common/cache.S
new file mode 100644
index 000000000000..bb9446ef66ef
--- /dev/null
+++ b/arch/blackfin/mach-common/cache.S
@@ -0,0 +1,253 @@
1/*
2 * File: arch/blackfin/mach-common/cache.S
3 * Based on:
4 * Author: LG Soft India
5 *
6 * Created:
7 * Description: cache control support
8 *
9 * Modified:
10 * Copyright 2004-2006 Analog Devices Inc.
11 *
12 * Bugs: Enter bugs at http://blackfin.uclinux.org/
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or
17 * (at your option) any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, see the file COPYING, or write
26 * to the Free Software Foundation, Inc.,
27 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
28 */
29
30#include <linux/linkage.h>
31#include <asm/cplb.h>
32#include <asm/entry.h>
33#include <asm/blackfin.h>
34#include <asm/cache.h>
35
36.text
37.align 2
38ENTRY(_cache_invalidate)
39
40 /*
41 * Icache or DcacheA or DcacheB Invalidation
42 * or any combination thereof
43 * R0 has bits
44 * CPLB_ENABLE_ICACHE_P,CPLB_ENABLE_DCACHE_P,CPLB_ENABLE_DCACHE2_P
45 * set as required
46 */
47 [--SP] = R7;
48
49 R7 = R0;
50 CC = BITTST(R7,CPLB_ENABLE_ICACHE_P);
51 IF !CC JUMP .Lno_icache;
52 [--SP] = RETS;
53 CALL _icache_invalidate;
54 RETS = [SP++];
55.Lno_icache:
56 CC = BITTST(R7,CPLB_ENABLE_DCACHE_P);
57 IF !CC JUMP .Lno_dcache_a;
58 R0 = 0; /* specifies bank A */
59 [--SP] = RETS;
60 CALL _dcache_invalidate;
61 RETS = [SP++];
62.Lno_dcache_a:
63 CC = BITTST(R7,CPLB_ENABLE_DCACHE2_P);
64 IF !CC JUMP .Lno_dcache_b;
65 R0 = 0;
66 BITSET(R0, 23); /* specifies bank B */
67 [--SP] = RETS;
68 CALL _dcache_invalidate;
69 RETS = [SP++];
70.Lno_dcache_b:
71 R7 = [SP++];
72 RTS;
73
74/* Invalidate the Entire Instruction cache by
75 * disabling IMC bit
76 */
77ENTRY(_icache_invalidate)
78ENTRY(_invalidate_entire_icache)
79 [--SP] = ( R7:5);
80
81 P0.L = (IMEM_CONTROL & 0xFFFF);
82 P0.H = (IMEM_CONTROL >> 16);
83 R7 = [P0];
84
85 /* Clear the IMC bit , All valid bits in the instruction
86 * cache are set to the invalid state
87 */
88 BITCLR(R7,IMC_P);
89 CLI R6;
90 SSYNC; /* SSYNC required before invalidating cache. */
91 .align 8;
92 [P0] = R7;
93 SSYNC;
94 STI R6;
95
96 /* Configures the instruction cache agian */
97 R6 = (IMC | ENICPLB);
98 R7 = R7 | R6;
99
100 CLI R6;
101 SSYNC; /* SSYNC required before writing to IMEM_CONTROL. */
102 .align 8;
103 [P0] = R7;
104 SSYNC;
105 STI R6;
106
107 ( R7:5) = [SP++];
108 RTS;
109
110/*
111 * blackfin_cache_flush_range(start, end)
112 * Invalidate all cache lines assocoiated with this
113 * area of memory.
114 *
115 * start: Start address
116 * end: End address
117 */
118ENTRY(_blackfin_icache_flush_range)
119 R2 = -L1_CACHE_BYTES;
120 R2 = R0 & R2;
121 P0 = R2;
122 P1 = R1;
123 CSYNC;
124 IFLUSH [P0];
1251:
126 IFLUSH [P0++];
127 CC = P0 < P1 (iu);
128 IF CC JUMP 1b (bp);
129 IFLUSH [P0];
130 SSYNC;
131 RTS;
132
133/*
134 * blackfin_icache_dcache_flush_range(start, end)
135 * FLUSH all cache lines assocoiated with this
136 * area of memory.
137 *
138 * start: Start address
139 * end: End address
140 */
141
142ENTRY(_blackfin_icache_dcache_flush_range)
143 R2 = -L1_CACHE_BYTES;
144 R2 = R0 & R2;
145 P0 = R2;
146 P1 = R1;
147 CSYNC;
148 IFLUSH [P0];
1491:
150 FLUSH [P0];
151 IFLUSH [P0++];
152 CC = P0 < P1 (iu);
153 IF CC JUMP 1b (bp);
154 IFLUSH [P0];
155 FLUSH [P0];
156 SSYNC;
157 RTS;
158
159/* Throw away all D-cached data in specified region without any obligation to
160 * write them back. However, we must clean the D-cached entries around the
161 * boundaries of the start and/or end address is not cache aligned.
162 *
163 * Start: start address,
164 * end : end address.
165 */
166
167ENTRY(_blackfin_dcache_invalidate_range)
168 R2 = -L1_CACHE_BYTES;
169 R2 = R0 & R2;
170 P0 = R2;
171 P1 = R1;
172 CSYNC;
173 FLUSHINV[P0];
1741:
175 FLUSHINV[P0++];
176 CC = P0 < P1 (iu);
177 IF CC JUMP 1b (bp);
178
179 /* If the data crosses a cache line, then we'll be pointing to
180 * the last cache line, but won't have flushed/invalidated it yet,
181 * so do one more.
182 */
183 FLUSHINV[P0];
184 SSYNC;
185 RTS;
186
187/* Invalidate the Entire Data cache by
188 * clearing DMC[1:0] bits
189 */
190ENTRY(_invalidate_entire_dcache)
191ENTRY(_dcache_invalidate)
192 [--SP] = ( R7:6);
193
194 P0.L = (DMEM_CONTROL & 0xFFFF);
195 P0.H = (DMEM_CONTROL >> 16);
196 R7 = [P0];
197
198 /* Clear the DMC[1:0] bits, All valid bits in the data
199 * cache are set to the invalid state
200 */
201 BITCLR(R7,DMC0_P);
202 BITCLR(R7,DMC1_P);
203 CLI R6;
204 SSYNC; /* SSYNC required before writing to DMEM_CONTROL. */
205 .align 8;
206 [P0] = R7;
207 SSYNC;
208 STI R6;
209
210 /* Configures the data cache again */
211
212 R6 = DMEM_CNTR;
213 R7 = R7 | R6;
214
215 CLI R6;
216 SSYNC; /* SSYNC required before writing to DMEM_CONTROL. */
217 .align 8;
218 [P0] = R7;
219 SSYNC;
220 STI R6;
221
222 ( R7:6) = [SP++];
223 RTS;
224
225ENTRY(_blackfin_dcache_flush_range)
226 R2 = -L1_CACHE_BYTES;
227 R2 = R0 & R2;
228 P0 = R2;
229 P1 = R1;
230 CSYNC;
231 FLUSH[P0];
2321:
233 FLUSH[P0++];
234 CC = P0 < P1 (iu);
235 IF CC JUMP 1b (bp);
236
237 /* If the data crosses a cache line, then we'll be pointing to
238 * the last cache line, but won't have flushed it yet, so do
239 * one more.
240 */
241 FLUSH[P0];
242 SSYNC;
243 RTS;
244
245ENTRY(_blackfin_dflush_page)
246 P1 = 1 << (PAGE_SHIFT - L1_CACHE_SHIFT);
247 P0 = R0;
248 CSYNC;
249 FLUSH[P0];
250 LSETUP (.Lfl1, .Lfl1) LC0 = P1;
251.Lfl1: FLUSH [P0++];
252 SSYNC;
253 RTS;
diff --git a/arch/blackfin/mach-common/cacheinit.S b/arch/blackfin/mach-common/cacheinit.S
new file mode 100644
index 000000000000..8c17f099e5eb
--- /dev/null
+++ b/arch/blackfin/mach-common/cacheinit.S
@@ -0,0 +1,137 @@
1/*
2 * File: arch/blackfin/mach-common/cacheinit.S
3 * Based on:
4 * Author: LG Soft India
5 *
6 * Created: ?
7 * Description: cache initialization
8 *
9 * Modified:
10 * Copyright 2004-2006 Analog Devices Inc.
11 *
12 * Bugs: Enter bugs at http://blackfin.uclinux.org/
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or
17 * (at your option) any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, see the file COPYING, or write
26 * to the Free Software Foundation, Inc.,
27 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
28 */
29
30/* This function sets up the data and instruction cache. The
31 * tables like icplb table, dcplb table and Page Descriptor table
32 * are defined in cplbtab.h. You can configure those tables for
33 * your suitable requirements
34 */
35
36#include <linux/linkage.h>
37#include <asm/blackfin.h>
38
39.text
40
41#if defined(CONFIG_BLKFIN_CACHE)
42ENTRY(_bfin_icache_init)
43
44 /* Initialize Instruction CPLBS */
45
46 I0.L = (ICPLB_ADDR0 & 0xFFFF);
47 I0.H = (ICPLB_ADDR0 >> 16);
48
49 I1.L = (ICPLB_DATA0 & 0xFFFF);
50 I1.H = (ICPLB_DATA0 >> 16);
51
52 I2.L = _icplb_table;
53 I2.H = _icplb_table;
54
55 r1 = -1; /* end point comparison */
56 r3 = 15; /* max counter */
57
58/* read entries from table */
59
60.Lread_iaddr:
61 R0 = [I2++];
62 CC = R0 == R1;
63 IF CC JUMP .Lidone;
64 [I0++] = R0;
65
66.Lread_idata:
67 R2 = [I2++];
68 [I1++] = R2;
69 R3 = R3 + R1;
70 CC = R3 == R1;
71 IF !CC JUMP .Lread_iaddr;
72
73.Lidone:
74 /* Enable Instruction Cache */
75 P0.l = (IMEM_CONTROL & 0xFFFF);
76 P0.h = (IMEM_CONTROL >> 16);
77 R1 = [P0];
78 R0 = (IMC | ENICPLB);
79 R0 = R0 | R1;
80
81 /* Anomaly 05000125 */
82 CLI R2;
83 SSYNC; /* SSYNC required before writing to IMEM_CONTROL. */
84 .align 8;
85 [P0] = R0;
86 SSYNC;
87 STI R2;
88 RTS;
89#endif
90
91#if defined(CONFIG_BLKFIN_DCACHE)
92ENTRY(_bfin_dcache_init)
93
94 /* Initialize Data CPLBS */
95
96 I0.L = (DCPLB_ADDR0 & 0xFFFF);
97 I0.H = (DCPLB_ADDR0 >> 16);
98
99 I1.L = (DCPLB_DATA0 & 0xFFFF);
100 I1.H = (DCPLB_DATA0 >> 16);
101
102 I2.L = _dcplb_table;
103 I2.H = _dcplb_table;
104
105 R1 = -1; /* end point comparison */
106 R3 = 15; /* max counter */
107
108 /* read entries from table */
109.Lread_daddr:
110 R0 = [I2++];
111 cc = R0 == R1;
112 IF CC JUMP .Lddone;
113 [I0++] = R0;
114
115.Lread_ddata:
116 R2 = [I2++];
117 [I1++] = R2;
118 R3 = R3 + R1;
119 CC = R3 == R1;
120 IF !CC JUMP .Lread_daddr;
121.Lddone:
122 P0.L = (DMEM_CONTROL & 0xFFFF);
123 P0.H = (DMEM_CONTROL >> 16);
124 R1 = [P0];
125
126 R0 = DMEM_CNTR;
127
128 R0 = R0 | R1;
129 /* Anomaly 05000125 */
130 CLI R2;
131 SSYNC; /* SSYNC required before writing to DMEM_CONTROL. */
132 .align 8;
133 [P0] = R0;
134 SSYNC;
135 STI R2;
136 RTS;
137#endif
diff --git a/arch/blackfin/mach-common/cplbhdlr.S b/arch/blackfin/mach-common/cplbhdlr.S
new file mode 100644
index 000000000000..b979067c49ef
--- /dev/null
+++ b/arch/blackfin/mach-common/cplbhdlr.S
@@ -0,0 +1,130 @@
1/*
2 * File: arch/blackfin/mach-common/cplbhdlr.S
3 * Based on:
4 * Author: LG Soft India
5 *
6 * Created: ?
7 * Description: CPLB exception handler
8 *
9 * Modified:
10 * Copyright 2004-2006 Analog Devices Inc.
11 *
12 * Bugs: Enter bugs at http://blackfin.uclinux.org/
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or
17 * (at your option) any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, see the file COPYING, or write
26 * to the Free Software Foundation, Inc.,
27 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
28 */
29
30#include <linux/linkage.h>
31#include <asm/cplb.h>
32#include <asm/entry.h>
33
34#ifdef CONFIG_EXCPT_IRQ_SYSC_L1
35.section .l1.text
36#else
37.text
38#endif
39
40.type _cplb_mgr, STT_FUNC;
41.type _panic_cplb_error, STT_FUNC;
42
43.align 2
44
45.global __cplb_hdr;
46.type __cplb_hdr, STT_FUNC;
47ENTRY(__cplb_hdr)
48 R2 = SEQSTAT;
49
50 /* Mask the contents of SEQSTAT and leave only EXCAUSE in R2 */
51 R2 <<= 26;
52 R2 >>= 26;
53
54 R1 = 0x23; /* Data access CPLB protection violation */
55 CC = R2 == R1;
56 IF !CC JUMP .Lnot_data_write;
57 R0 = 2; /* is a write to data space*/
58 JUMP .Lis_icplb_miss;
59
60.Lnot_data_write:
61 R1 = 0x2C; /* CPLB miss on an instruction fetch */
62 CC = R2 == R1;
63 R0 = 0; /* is_data_miss == False*/
64 IF CC JUMP .Lis_icplb_miss;
65
66 R1 = 0x26;
67 CC = R2 == R1;
68 IF !CC JUMP .Lunknown;
69
70 R0 = 1; /* is_data_miss == True*/
71
72.Lis_icplb_miss:
73
74#if defined(CONFIG_BLKFIN_CACHE) || defined(CONFIG_BLKFIN_DCACHE)
75# if defined(CONFIG_BLKFIN_CACHE) && !defined(CONFIG_BLKFIN_DCACHE)
76 R1 = CPLB_ENABLE_ICACHE;
77# endif
78# if !defined(CONFIG_BLKFIN_CACHE) && defined(CONFIG_BLKFIN_DCACHE)
79 R1 = CPLB_ENABLE_DCACHE;
80# endif
81# if defined(CONFIG_BLKFIN_CACHE) && defined(CONFIG_BLKFIN_DCACHE)
82 R1 = CPLB_ENABLE_DCACHE | CPLB_ENABLE_ICACHE;
83# endif
84#else
85 R1 = 0;
86#endif
87
88 [--SP] = RETS;
89 CALL _cplb_mgr;
90 RETS = [SP++];
91 CC = R0 == 0;
92 IF !CC JUMP .Lnot_replaced;
93 RTS;
94
95/*
96 * Diagnostic exception handlers
97 */
98.Lunknown:
99 R0 = CPLB_UNKNOWN_ERR;
100 JUMP .Lcplb_error;
101
102.Lnot_replaced:
103 CC = R0 == CPLB_NO_UNLOCKED;
104 IF !CC JUMP .Lnext_check;
105 R0 = CPLB_NO_UNLOCKED;
106 JUMP .Lcplb_error;
107
108.Lnext_check:
109 CC = R0 == CPLB_NO_ADDR_MATCH;
110 IF !CC JUMP .Lnext_check2;
111 R0 = CPLB_NO_ADDR_MATCH;
112 JUMP .Lcplb_error;
113
114.Lnext_check2:
115 CC = R0 == CPLB_PROT_VIOL;
116 IF !CC JUMP .Lstrange_return_from_cplb_mgr;
117 R0 = CPLB_PROT_VIOL;
118 JUMP .Lcplb_error;
119
120.Lstrange_return_from_cplb_mgr:
121 IDLE;
122 CSYNC;
123 JUMP .Lstrange_return_from_cplb_mgr;
124
125.Lcplb_error:
126 R1 = sp;
127 SP += -12;
128 call _panic_cplb_error;
129 SP += 12;
130 JUMP _handle_bad_cplb;
diff --git a/arch/blackfin/mach-common/cplbinfo.c b/arch/blackfin/mach-common/cplbinfo.c
new file mode 100644
index 000000000000..d65fac39d1bf
--- /dev/null
+++ b/arch/blackfin/mach-common/cplbinfo.c
@@ -0,0 +1,211 @@
1/*
2 * File: arch/blackfin/mach-common/cplbinfo.c
3 * Based on:
4 * Author: Sonic Zhang <sonic.zhang@analog.com>
5 *
6 * Created: Jan. 2005
7 * Description: Display CPLB status
8 *
9 * Modified:
10 * Copyright 2004-2006 Analog Devices Inc.
11 *
12 * Bugs: Enter bugs at http://blackfin.uclinux.org/
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or
17 * (at your option) any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, see the file COPYING, or write
26 * to the Free Software Foundation, Inc.,
27 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
28 */
29
30#include <linux/module.h>
31#include <linux/kernel.h>
32#include <linux/init.h>
33#include <linux/proc_fs.h>
34
35#include <asm/current.h>
36#include <asm/uaccess.h>
37#include <asm/system.h>
38
39#include <asm/cplb.h>
40#include <asm/blackfin.h>
41
42#define CPLB_I 1
43#define CPLB_D 2
44
45#define SYNC_SYS SSYNC()
46#define SYNC_CORE CSYNC()
47
48#define CPLB_BIT_PAGESIZE 0x30000
49
50static int page_size_table[4] = {
51 0x00000400, /* 1K */
52 0x00001000, /* 4K */
53 0x00100000, /* 1M */
54 0x00400000 /* 4M */
55};
56
57static char page_size_string_table[][4] = { "1K", "4K", "1M", "4M" };
58
59static int cplb_find_entry(unsigned long *cplb_addr,
60 unsigned long *cplb_data, unsigned long addr,
61 unsigned long data)
62{
63 int ii;
64
65 for (ii = 0; ii < 16; ii++)
66 if (addr >= cplb_addr[ii] && addr < cplb_addr[ii] +
67 page_size_table[(cplb_data[ii] & CPLB_BIT_PAGESIZE) >> 16]
68 && (cplb_data[ii] == data))
69 return ii;
70
71 return -1;
72}
73
74static char *cplb_print_entry(char *buf, int type)
75{
76 unsigned long *p_addr = dpdt_table;
77 unsigned long *p_data = dpdt_table + 1;
78 unsigned long *p_icount = dpdt_swapcount_table;
79 unsigned long *p_ocount = dpdt_swapcount_table + 1;
80 unsigned long *cplb_addr = (unsigned long *)DCPLB_ADDR0;
81 unsigned long *cplb_data = (unsigned long *)DCPLB_DATA0;
82 int entry = 0, used_cplb = 0;
83
84 if (type == CPLB_I) {
85 buf += sprintf(buf, "Instrction CPLB entry:\n");
86 p_addr = ipdt_table;
87 p_data = ipdt_table + 1;
88 p_icount = ipdt_swapcount_table;
89 p_ocount = ipdt_swapcount_table + 1;
90 cplb_addr = (unsigned long *)ICPLB_ADDR0;
91 cplb_data = (unsigned long *)ICPLB_DATA0;
92 } else
93 buf += sprintf(buf, "Data CPLB entry:\n");
94
95 buf += sprintf(buf, "Address\t\tData\tSize\tValid\tLocked\tSwapin\
96\tiCount\toCount\n");
97
98 while (*p_addr != 0xffffffff) {
99 entry = cplb_find_entry(cplb_addr, cplb_data, *p_addr, *p_data);
100 if (entry >= 0)
101 used_cplb |= 1 << entry;
102
103 buf +=
104 sprintf(buf,
105 "0x%08lx\t0x%05lx\t%s\t%c\t%c\t%2d\t%ld\t%ld\n",
106 *p_addr, *p_data,
107 page_size_string_table[(*p_data & 0x30000) >> 16],
108 (*p_data & CPLB_VALID) ? 'Y' : 'N',
109 (*p_data & CPLB_LOCK) ? 'Y' : 'N', entry, *p_icount,
110 *p_ocount);
111
112 p_addr += 2;
113 p_data += 2;
114 p_icount += 2;
115 p_ocount += 2;
116 }
117
118 if (used_cplb != 0xffff) {
119 buf += sprintf(buf, "Unused/mismatched CPLBs:\n");
120
121 for (entry = 0; entry < 16; entry++)
122 if (0 == ((1 << entry) & used_cplb)) {
123 int flags = cplb_data[entry];
124 buf +=
125 sprintf(buf,
126 "%2d: 0x%08lx\t0x%05x\t%s\t%c\t%c\n",
127 entry, cplb_addr[entry], flags,
128 page_size_string_table[(flags &
129 0x30000) >>
130 16],
131 (flags & CPLB_VALID) ? 'Y' : 'N',
132 (flags & CPLB_LOCK) ? 'Y' : 'N');
133 }
134 }
135
136 buf += sprintf(buf, "\n");
137
138 return buf;
139}
140
141static int cplbinfo_proc_output(char *buf)
142{
143 char *p;
144
145 p = buf;
146
147 p += sprintf(p,
148 "------------------ CPLB Information ------------------\n\n");
149
150 if (bfin_read_IMEM_CONTROL() & ENICPLB)
151 p = cplb_print_entry(p, CPLB_I);
152 else
153 p += sprintf(p, "Instruction CPLB is disabled.\n\n");
154
155 if (bfin_read_DMEM_CONTROL() & ENDCPLB)
156 p = cplb_print_entry(p, CPLB_D);
157 else
158 p += sprintf(p, "Data CPLB is disabled.\n");
159
160 return p - buf;
161}
162
163static int cplbinfo_read_proc(char *page, char **start, off_t off,
164 int count, int *eof, void *data)
165{
166 int len;
167
168 len = cplbinfo_proc_output(page);
169 if (len <= off + count)
170 *eof = 1;
171 *start = page + off;
172 len -= off;
173 if (len > count)
174 len = count;
175 if (len < 0)
176 len = 0;
177 return len;
178}
179
180static int cplbinfo_write_proc(struct file *file, const char __user *buffer,
181 unsigned long count, void *data)
182{
183 printk(KERN_INFO "Reset the CPLB swap in/out counts.\n");
184 memset(ipdt_swapcount_table, 0, MAX_SWITCH_I_CPLBS * sizeof(unsigned long));
185 memset(dpdt_swapcount_table, 0, MAX_SWITCH_D_CPLBS * sizeof(unsigned long));
186
187 return count;
188}
189
190static int __init cplbinfo_init(void)
191{
192 struct proc_dir_entry *entry;
193
194 if ((entry = create_proc_entry("cplbinfo", 0, NULL)) == NULL) {
195 return -ENOMEM;
196 }
197
198 entry->read_proc = cplbinfo_read_proc;
199 entry->write_proc = cplbinfo_write_proc;
200 entry->data = NULL;
201
202 return 0;
203}
204
205static void __exit cplbinfo_exit(void)
206{
207 remove_proc_entry("cplbinfo", NULL);
208}
209
210module_init(cplbinfo_init);
211module_exit(cplbinfo_exit);
diff --git a/arch/blackfin/mach-common/cplbmgr.S b/arch/blackfin/mach-common/cplbmgr.S
new file mode 100644
index 000000000000..f5efc4bc65e6
--- /dev/null
+++ b/arch/blackfin/mach-common/cplbmgr.S
@@ -0,0 +1,607 @@
1/*
2 * File: arch/blackfin/mach-common/cplbmgtr.S
3 * Based on:
4 * Author: LG Soft India
5 *
6 * Created: ?
7 * Description: CPLB replacement routine for CPLB mismatch
8 *
9 * Modified:
10 * Copyright 2004-2006 Analog Devices Inc.
11 *
12 * Bugs: Enter bugs at http://blackfin.uclinux.org/
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or
17 * (at your option) any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, see the file COPYING, or write
26 * to the Free Software Foundation, Inc.,
27 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
28 */
29
30/* Usage: int _cplb_mgr(is_data_miss,int enable_cache)
31 * is_data_miss==2 => Mark as Dirty, write to the clean data page
32 * is_data_miss==1 => Replace a data CPLB.
33 * is_data_miss==0 => Replace an instruction CPLB.
34 *
35 * Returns:
36 * CPLB_RELOADED => Successfully updated CPLB table.
37 * CPLB_NO_UNLOCKED => All CPLBs are locked, so cannot be evicted.
38 * This indicates that the CPLBs in the configuration
39 * tablei are badly configured, as this should never
40 * occur.
41 * CPLB_NO_ADDR_MATCH => The address being accessed, that triggered the
42 * exception, is not covered by any of the CPLBs in
43 * the configuration table. The application is
44 * presumably misbehaving.
45 * CPLB_PROT_VIOL => The address being accessed, that triggered the
46 * exception, was not a first-write to a clean Write
47 * Back Data page, and so presumably is a genuine
48 * violation of the page's protection attributes.
49 * The application is misbehaving.
50 */
51
52#include <linux/linkage.h>
53#include <asm/blackfin.h>
54#include <asm/cplb.h>
55
56#ifdef CONFIG_EXCPT_IRQ_SYSC_L1
57.section .l1.text
58#else
59.text
60#endif
61
62.align 2;
63ENTRY(_cplb_mgr)
64
65 [--SP]=( R7:4,P5:3 );
66
67 CC = R0 == 2;
68 IF CC JUMP .Ldcplb_write;
69
70 CC = R0 == 0;
71 IF !CC JUMP .Ldcplb_miss_compare;
72
73 /* ICPLB Miss Exception. We need to choose one of the
74 * currently-installed CPLBs, and replace it with one
75 * from the configuration table.
76 */
77
78 P4.L = (ICPLB_FAULT_ADDR & 0xFFFF);
79 P4.H = (ICPLB_FAULT_ADDR >> 16);
80
81 P1 = 16;
82 P5.L = _page_size_table;
83 P5.H = _page_size_table;
84
85 P0.L = (ICPLB_DATA0 & 0xFFFF);
86 P0.H = (ICPLB_DATA0 >> 16);
87 R4 = [P4]; /* Get faulting address*/
88 R6 = 64; /* Advance past the fault address, which*/
89 R6 = R6 + R4; /* we'll use if we find a match*/
90 R3 = ((16 << 8) | 2); /* Extract mask, bits 16 and 17.*/
91
92 R5 = 0;
93.Lisearch:
94
95 R1 = [P0-0x100]; /* Address for this CPLB */
96
97 R0 = [P0++]; /* Info for this CPLB*/
98 CC = BITTST(R0,0); /* Is the CPLB valid?*/
99 IF !CC JUMP .Lnomatch; /* Skip it, if not.*/
100 CC = R4 < R1(IU); /* If fault address less than page start*/
101 IF CC JUMP .Lnomatch; /* then skip this one.*/
102 R2 = EXTRACT(R0,R3.L) (Z); /* Get page size*/
103 P1 = R2;
104 P1 = P5 + (P1<<2); /* index into page-size table*/
105 R2 = [P1]; /* Get the page size*/
106 R1 = R1 + R2; /* and add to page start, to get page end*/
107 CC = R4 < R1(IU); /* and see whether fault addr is in page.*/
108 IF !CC R4 = R6; /* If so, advance the address and finish loop.*/
109 IF !CC JUMP .Lisearch_done;
110.Lnomatch:
111 /* Go around again*/
112 R5 += 1;
113 CC = BITTST(R5, 4); /* i.e CC = R5 >= 16*/
114 IF !CC JUMP .Lisearch;
115
116.Lisearch_done:
117 I0 = R4; /* Fault address we'll search for*/
118
119 /* set up pointers */
120 P0.L = (ICPLB_DATA0 & 0xFFFF);
121 P0.H = (ICPLB_DATA0 >> 16);
122
123 /* The replacement procedure for ICPLBs */
124
125 P4.L = (IMEM_CONTROL & 0xFFFF);
126 P4.H = (IMEM_CONTROL >> 16);
127
128 /* disable cplbs */
129 R5 = [P4]; /* Control Register*/
130 BITCLR(R5,ENICPLB_P);
131 CLI R1;
132 SSYNC; /* SSYNC required before writing to IMEM_CONTROL. */
133 .align 8;
134 [P4] = R5;
135 SSYNC;
136 STI R1;
137
138 R1 = -1; /* end point comparison */
139 R3 = 16; /* counter */
140
141 /* Search through CPLBs for first non-locked entry */
142 /* Overwrite it by moving everyone else up by 1 */
143.Licheck_lock:
144 R0 = [P0++];
145 R3 = R3 + R1;
146 CC = R3 == R1;
147 IF CC JUMP .Lall_locked;
148 CC = BITTST(R0, 0); /* an invalid entry is good */
149 IF !CC JUMP .Lifound_victim;
150 CC = BITTST(R0,1); /* but a locked entry isn't */
151 IF CC JUMP .Licheck_lock;
152
153.Lifound_victim:
154#ifdef CONFIG_CPLB_INFO
155 R7 = [P0 - 0x104];
156 P2.L = _ipdt_table;
157 P2.H = _ipdt_table;
158 P3.L = _ipdt_swapcount_table;
159 P3.H = _ipdt_swapcount_table;
160 P3 += -4;
161.Licount:
162 R2 = [P2]; /* address from config table */
163 P2 += 8;
164 P3 += 8;
165 CC = R2==-1;
166 IF CC JUMP .Licount_done;
167 CC = R7==R2;
168 IF !CC JUMP .Licount;
169 R7 = [P3];
170 R7 += 1;
171 [P3] = R7;
172 CSYNC;
173.Licount_done:
174#endif
175 LC0=R3;
176 LSETUP(.Lis_move,.Lie_move) LC0;
177.Lis_move:
178 R0 = [P0];
179 [P0 - 4] = R0;
180 R0 = [P0 - 0x100];
181 [P0-0x104] = R0;
182.Lie_move:P0+=4;
183
184 /* We've made space in the ICPLB table, so that ICPLB15
185 * is now free to be overwritten. Next, we have to determine
186 * which CPLB we need to install, from the configuration
187 * table. This is a matter of getting the start-of-page
188 * addresses and page-lengths from the config table, and
189 * determining whether the fault address falls within that
190 * range.
191 */
192
193 P2.L = _ipdt_table;
194 P2.H = _ipdt_table;
195#ifdef CONFIG_CPLB_INFO
196 P3.L = _ipdt_swapcount_table;
197 P3.H = _ipdt_swapcount_table;
198 P3 += -8;
199#endif
200 P0.L = _page_size_table;
201 P0.H = _page_size_table;
202
203 /* Retrieve our fault address (which may have been advanced
204 * because the faulting instruction crossed a page boundary).
205 */
206
207 R0 = I0;
208
209 /* An extraction pattern, to get the page-size bits from
210 * the CPLB data entry. Bits 16-17, so two bits at posn 16.
211 */
212
213 R1 = ((16<<8)|2);
214.Linext: R4 = [P2++]; /* address from config table */
215 R2 = [P2++]; /* data from config table */
216#ifdef CONFIG_CPLB_INFO
217 P3 += 8;
218#endif
219
220 CC = R4 == -1; /* End of config table*/
221 IF CC JUMP .Lno_page_in_table;
222
223 /* See if failed address > start address */
224 CC = R4 <= R0(IU);
225 IF !CC JUMP .Linext;
226
227 /* extract page size (17:16)*/
228 R3 = EXTRACT(R2, R1.L) (Z);
229
230 /* add page size to addr to get range */
231
232 P5 = R3;
233 P5 = P0 + (P5 << 2); /* scaled, for int access*/
234 R3 = [P5];
235 R3 = R3 + R4;
236
237 /* See if failed address < (start address + page size) */
238 CC = R0 < R3(IU);
239 IF !CC JUMP .Linext;
240
241 /* We've found a CPLB in the config table that covers
242 * the faulting address, so install this CPLB into the
243 * last entry of the table.
244 */
245
246 P1.L = (ICPLB_DATA15 & 0xFFFF); /* ICPLB_DATA15 */
247 P1.H = (ICPLB_DATA15 >> 16);
248 [P1] = R2;
249 [P1-0x100] = R4;
250#ifdef CONFIG_CPLB_INFO
251 R3 = [P3];
252 R3 += 1;
253 [P3] = R3;
254#endif
255
256 /* P4 points to IMEM_CONTROL, and R5 contains its old
257 * value, after we disabled ICPLBS. Re-enable them.
258 */
259
260 BITSET(R5,ENICPLB_P);
261 CLI R2;
262 SSYNC; /* SSYNC required before writing to IMEM_CONTROL. */
263 .align 8;
264 [P4] = R5;
265 SSYNC;
266 STI R2;
267
268 ( R7:4,P5:3 ) = [SP++];
269 R0 = CPLB_RELOADED;
270 RTS;
271
272/* FAILED CASES*/
273.Lno_page_in_table:
274 ( R7:4,P5:3 ) = [SP++];
275 R0 = CPLB_NO_ADDR_MATCH;
276 RTS;
277.Lall_locked:
278 ( R7:4,P5:3 ) = [SP++];
279 R0 = CPLB_NO_UNLOCKED;
280 RTS;
281.Lprot_violation:
282 ( R7:4,P5:3 ) = [SP++];
283 R0 = CPLB_PROT_VIOL;
284 RTS;
285
286.Ldcplb_write:
287
288 /* if a DCPLB is marked as write-back (CPLB_WT==0), and
289 * it is clean (CPLB_DIRTY==0), then a write to the
290 * CPLB's page triggers a protection violation. We have to
291 * mark the CPLB as dirty, to indicate that there are
292 * pending writes associated with the CPLB.
293 */
294
295 P4.L = (DCPLB_STATUS & 0xFFFF);
296 P4.H = (DCPLB_STATUS >> 16);
297 P3.L = (DCPLB_DATA0 & 0xFFFF);
298 P3.H = (DCPLB_DATA0 >> 16);
299 R5 = [P4];
300
301 /* A protection violation can be caused by more than just writes
302 * to a clean WB page, so we have to ensure that:
303 * - It's a write
304 * - to a clean WB page
305 * - and is allowed in the mode the access occurred.
306 */
307
308 CC = BITTST(R5, 16); /* ensure it was a write*/
309 IF !CC JUMP .Lprot_violation;
310
311 /* to check the rest, we have to retrieve the DCPLB.*/
312
313 /* The low half of DCPLB_STATUS is a bit mask*/
314
315 R2 = R5.L (Z); /* indicating which CPLB triggered the event.*/
316 R3 = 30; /* so we can use this to determine the offset*/
317 R2.L = SIGNBITS R2;
318 R2 = R2.L (Z); /* into the DCPLB table.*/
319 R3 = R3 - R2;
320 P4 = R3;
321 P3 = P3 + (P4<<2);
322 R3 = [P3]; /* Retrieve the CPLB*/
323
324 /* Now we can check whether it's a clean WB page*/
325
326 CC = BITTST(R3, 14); /* 0==WB, 1==WT*/
327 IF CC JUMP .Lprot_violation;
328 CC = BITTST(R3, 7); /* 0 == clean, 1 == dirty*/
329 IF CC JUMP .Lprot_violation;
330
331 /* Check whether the write is allowed in the mode that was active.*/
332
333 R2 = 1<<3; /* checking write in user mode*/
334 CC = BITTST(R5, 17); /* 0==was user, 1==was super*/
335 R5 = CC;
336 R2 <<= R5; /* if was super, check write in super mode*/
337 R2 = R3 & R2;
338 CC = R2 == 0;
339 IF CC JUMP .Lprot_violation;
340
341 /* It's a genuine write-to-clean-page.*/
342
343 BITSET(R3, 7); /* mark as dirty*/
344 [P3] = R3; /* and write back.*/
345 NOP;
346 CSYNC;
347 ( R7:4,P5:3 ) = [SP++];
348 R0 = CPLB_RELOADED;
349 RTS;
350
351.Ldcplb_miss_compare:
352
353 /* Data CPLB Miss event. We need to choose a CPLB to
354 * evict, and then locate a new CPLB to install from the
355 * config table, that covers the faulting address.
356 */
357
358 P1.L = (DCPLB_DATA15 & 0xFFFF);
359 P1.H = (DCPLB_DATA15 >> 16);
360
361 P4.L = (DCPLB_FAULT_ADDR & 0xFFFF);
362 P4.H = (DCPLB_FAULT_ADDR >> 16);
363 R4 = [P4];
364 I0 = R4;
365
366 /* The replacement procedure for DCPLBs*/
367
368 R6 = R1; /* Save for later*/
369
370 /* Turn off CPLBs while we work.*/
371 P4.L = (DMEM_CONTROL & 0xFFFF);
372 P4.H = (DMEM_CONTROL >> 16);
373 R5 = [P4];
374 BITCLR(R5,ENDCPLB_P);
375 CLI R0;
376 SSYNC; /* SSYNC required before writing to DMEM_CONTROL. */
377 .align 8;
378 [P4] = R5;
379 SSYNC;
380 STI R0;
381
382 /* Start looking for a CPLB to evict. Our order of preference
383 * is: invalid CPLBs, clean CPLBs, dirty CPLBs. Locked CPLBs
384 * are no good.
385 */
386
387 I1.L = (DCPLB_DATA0 & 0xFFFF);
388 I1.H = (DCPLB_DATA0 >> 16);
389 P1 = 2;
390 P2 = 16;
391 I2.L = _dcplb_preference;
392 I2.H = _dcplb_preference;
393 LSETUP(.Lsdsearch1, .Ledsearch1) LC0 = P1;
394.Lsdsearch1:
395 R0 = [I2++]; /* Get the bits we're interested in*/
396 P0 = I1; /* Go back to start of table*/
397 LSETUP (.Lsdsearch2, .Ledsearch2) LC1 = P2;
398.Lsdsearch2:
399 R1 = [P0++]; /* Fetch each installed CPLB in turn*/
400 R2 = R1 & R0; /* and test for interesting bits.*/
401 CC = R2 == 0; /* If none are set, it'll do.*/
402 IF !CC JUMP .Lskip_stack_check;
403
404 R2 = [P0 - 0x104]; /* R2 - PageStart */
405 P3.L = _page_size_table; /* retrieve end address */
406 P3.H = _page_size_table; /* retrieve end address */
407 R3 = 0x1002; /* 16th - position, 2 bits -length */
408#ifdef ANOMALY_05000209
409 nop; /* Anomaly 05000209 */
410#endif
411 R7 = EXTRACT(R1,R3.l);
412 R7 = R7 << 2; /* Page size index offset */
413 P5 = R7;
414 P3 = P3 + P5;
415 R7 = [P3]; /* page size in bytes */
416
417 R7 = R2 + R7; /* R7 - PageEnd */
418 R4 = SP; /* Test SP is in range */
419
420 CC = R7 < R4; /* if PageEnd < SP */
421 IF CC JUMP .Ldfound_victim;
422 R3 = 0x284; /* stack length from start of trap till
423 * the point.
424 * 20 stack locations for future modifications
425 */
426 R4 = R4 + R3;
427 CC = R4 < R2; /* if SP + stacklen < PageStart */
428 IF CC JUMP .Ldfound_victim;
429.Lskip_stack_check:
430
431.Ledsearch2: NOP;
432.Ledsearch1: NOP;
433
434 /* If we got here, we didn't find a DCPLB we considered
435 * replacable, which means all of them were locked.
436 */
437
438 JUMP .Lall_locked;
439.Ldfound_victim:
440
441#ifdef CONFIG_CPLB_INFO
442 R7 = [P0 - 0x104];
443 P2.L = _dpdt_table;
444 P2.H = _dpdt_table;
445 P3.L = _dpdt_swapcount_table;
446 P3.H = _dpdt_swapcount_table;
447 P3 += -4;
448.Ldicount:
449 R2 = [P2];
450 P2 += 8;
451 P3 += 8;
452 CC = R2==-1;
453 IF CC JUMP .Ldicount_done;
454 CC = R7==R2;
455 IF !CC JUMP .Ldicount;
456 R7 = [P3];
457 R7 += 1;
458 [P3] = R7;
459.Ldicount_done:
460#endif
461
462 /* Clean down the hardware loops*/
463 R2 = 0;
464 LC1 = R2;
465 LC0 = R2;
466
467 /* There's a suitable victim in [P0-4] (because we've
468 * advanced already).
469 */
470
471.LDdoverwrite:
472
473 /* [P0-4] is a suitable victim CPLB, so we want to
474 * overwrite it by moving all the following CPLBs
475 * one space closer to the start.
476 */
477
478 R1.L = (DCPLB_DATA16 & 0xFFFF); /* DCPLB_DATA15 + 4 */
479 R1.H = (DCPLB_DATA16 >> 16);
480 R0 = P0;
481
482 /* If the victim happens to be in DCPLB15,
483 * we don't need to move anything.
484 */
485
486 CC = R1 == R0;
487 IF CC JUMP .Lde_moved;
488 R1 = R1 - R0;
489 R1 >>= 2;
490 P1 = R1;
491 LSETUP(.Lds_move, .Lde_move) LC0=P1;
492.Lds_move:
493 R0 = [P0++]; /* move data */
494 [P0 - 8] = R0;
495 R0 = [P0-0x104] /* move address */
496.Lde_move: [P0-0x108] = R0;
497
498 /* We've now made space in DCPLB15 for the new CPLB to be
499 * installed. The next stage is to locate a CPLB in the
500 * config table that covers the faulting address.
501 */
502
503.Lde_moved:NOP;
504 R0 = I0; /* Our faulting address */
505
506 P2.L = _dpdt_table;
507 P2.H = _dpdt_table;
508#ifdef CONFIG_CPLB_INFO
509 P3.L = _dpdt_swapcount_table;
510 P3.H = _dpdt_swapcount_table;
511 P3 += -8;
512#endif
513
514 P1.L = _page_size_table;
515 P1.H = _page_size_table;
516
517 /* An extraction pattern, to retrieve bits 17:16.*/
518
519 R1 = (16<<8)|2;
520.Ldnext: R4 = [P2++]; /* address */
521 R2 = [P2++]; /* data */
522#ifdef CONFIG_CPLB_INFO
523 P3 += 8;
524#endif
525
526 CC = R4 == -1;
527 IF CC JUMP .Lno_page_in_table;
528
529 /* See if failed address > start address */
530 CC = R4 <= R0(IU);
531 IF !CC JUMP .Ldnext;
532
533 /* extract page size (17:16)*/
534 R3 = EXTRACT(R2, R1.L) (Z);
535
536 /* add page size to addr to get range */
537
538 P5 = R3;
539 P5 = P1 + (P5 << 2);
540 R3 = [P5];
541 R3 = R3 + R4;
542
543 /* See if failed address < (start address + page size) */
544 CC = R0 < R3(IU);
545 IF !CC JUMP .Ldnext;
546
547 /* We've found the CPLB that should be installed, so
548 * write it into CPLB15, masking off any caching bits
549 * if necessary.
550 */
551
552 P1.L = (DCPLB_DATA15 & 0xFFFF);
553 P1.H = (DCPLB_DATA15 >> 16);
554
555 /* If the DCPLB has cache bits set, but caching hasn't
556 * been enabled, then we want to mask off the cache-in-L1
557 * bit before installing. Moreover, if caching is off, we
558 * also want to ensure that the DCPLB has WT mode set, rather
559 * than WB, since WB pages still trigger first-write exceptions
560 * even when not caching is off, and the page isn't marked as
561 * cachable. Finally, we could mark the page as clean, not dirty,
562 * but we choose to leave that decision to the user; if the user
563 * chooses to have a CPLB pre-defined as dirty, then they always
564 * pay the cost of flushing during eviction, but don't pay the
565 * cost of first-write exceptions to mark the page as dirty.
566 */
567
568#ifdef CONFIG_BLKFIN_WT
569 BITSET(R6, 14); /* Set WT*/
570#endif
571
572 [P1] = R2;
573 [P1-0x100] = R4;
574#ifdef CONFIG_CPLB_INFO
575 R3 = [P3];
576 R3 += 1;
577 [P3] = R3;
578#endif
579
580 /* We've installed the CPLB, so re-enable CPLBs. P4
581 * points to DMEM_CONTROL, and R5 is the value we
582 * last wrote to it, when we were disabling CPLBs.
583 */
584
585 BITSET(R5,ENDCPLB_P);
586 CLI R2;
587 .align 8;
588 [P4] = R5;
589 SSYNC;
590 STI R2;
591
592 ( R7:4,P5:3 ) = [SP++];
593 R0 = CPLB_RELOADED;
594 RTS;
595
596.data
597.align 4;
598_page_size_table:
599.byte4 0x00000400; /* 1K */
600.byte4 0x00001000; /* 4K */
601.byte4 0x00100000; /* 1M */
602.byte4 0x00400000; /* 4M */
603
604.align 4;
605_dcplb_preference:
606.byte4 0x00000001; /* valid bit */
607.byte4 0x00000002; /* lock bit */
diff --git a/arch/blackfin/mach-common/dpmc.S b/arch/blackfin/mach-common/dpmc.S
new file mode 100644
index 000000000000..97cdcd6a00d4
--- /dev/null
+++ b/arch/blackfin/mach-common/dpmc.S
@@ -0,0 +1,418 @@
1/*
2 * File: arch/blackfin/mach-common/dpmc.S
3 * Based on:
4 * Author: LG Soft India
5 *
6 * Created: ?
7 * Description: Watchdog Timer APIs
8 *
9 * Modified:
10 * Copyright 2004-2006 Analog Devices Inc.
11 *
12 * Bugs: Enter bugs at http://blackfin.uclinux.org/
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or
17 * (at your option) any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, see the file COPYING, or write
26 * to the Free Software Foundation, Inc.,
27 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
28 */
29
30#include <linux/linkage.h>
31#include <asm/blackfin.h>
32#include <asm/mach/irq.h>
33
34.text
35
36ENTRY(_unmask_wdog_wakeup_evt)
37 [--SP] = ( R7:0, P5:0 );
38#if defined(CONFIG_BF561)
39 P0.H = hi(SICA_IWR1);
40 P0.L = lo(SICA_IWR1);
41#else
42 P0.h = (SIC_IWR >> 16);
43 P0.l = (SIC_IWR & 0xFFFF);
44#endif
45 R7 = [P0];
46#if defined(CONFIG_BF561)
47 BITSET(R7, 27);
48#else
49 BITSET(R7,(IRQ_WATCH - IVG7));
50#endif
51 [P0] = R7;
52 SSYNC;
53
54 ( R7:0, P5:0 ) = [SP++];
55 RTS;
56
57.LWRITE_TO_STAT:
58 /* When watch dog timer is enabled, a write to STAT will load the
59 * contents of CNT to STAT
60 */
61 R7 = 0x0000(z);
62#if defined(CONFIG_BF561)
63 P0.h = (WDOGA_STAT >> 16);
64 P0.l = (WDOGA_STAT & 0xFFFF);
65#else
66 P0.h = (WDOG_STAT >> 16);
67 P0.l = (WDOG_STAT & 0xFFFF);
68#endif
69 [P0] = R7;
70 SSYNC;
71 JUMP .LSKIP_WRITE_TO_STAT;
72
73ENTRY(_program_wdog_timer)
74 [--SP] = ( R7:0, P5:0 );
75#if defined(CONFIG_BF561)
76 P0.h = (WDOGA_CNT >> 16);
77 P0.l = (WDOGA_CNT & 0xFFFF);
78#else
79 P0.h = (WDOG_CNT >> 16);
80 P0.l = (WDOG_CNT & 0xFFFF);
81#endif
82 [P0] = R0;
83 SSYNC;
84
85#if defined(CONFIG_BF561)
86 P0.h = (WDOGA_CTL >> 16);
87 P0.l = (WDOGA_CTL & 0xFFFF);
88#else
89 P0.h = (WDOG_CTL >> 16);
90 P0.l = (WDOG_CTL & 0xFFFF);
91#endif
92 R7 = W[P0](Z);
93 CC = BITTST(R7,1);
94 if !CC JUMP .LWRITE_TO_STAT;
95 CC = BITTST(R7,2);
96 if !CC JUMP .LWRITE_TO_STAT;
97
98.LSKIP_WRITE_TO_STAT:
99#if defined(CONFIG_BF561)
100 P0.h = (WDOGA_CTL >> 16);
101 P0.l = (WDOGA_CTL & 0xFFFF);
102#else
103 P0.h = (WDOG_CTL >> 16);
104 P0.l = (WDOG_CTL & 0xFFFF);
105#endif
106 R7 = W[P0](Z);
107 BITCLR(R7,1); /* Enable GP event */
108 BITSET(R7,2);
109 W[P0] = R7.L;
110 SSYNC;
111 NOP;
112
113 R7 = W[P0](Z);
114 BITCLR(R7,4); /* Enable the wdog counter */
115 W[P0] = R7.L;
116 SSYNC;
117
118 ( R7:0, P5:0 ) = [SP++];
119 RTS;
120
121ENTRY(_clear_wdog_wakeup_evt)
122 [--SP] = ( R7:0, P5:0 );
123
124#if defined(CONFIG_BF561)
125 P0.h = (WDOGA_CTL >> 16);
126 P0.l = (WDOGA_CTL & 0xFFFF);
127#else
128 P0.h = (WDOG_CTL >> 16);
129 P0.l = (WDOG_CTL & 0xFFFF);
130#endif
131 R7 = 0x0AD6(Z);
132 W[P0] = R7.L;
133 SSYNC;
134
135 R7 = W[P0](Z);
136 BITSET(R7,15);
137 W[P0] = R7.L;
138 SSYNC;
139
140 R7 = W[P0](Z);
141 BITSET(R7,1);
142 BITSET(R7,2);
143 W[P0] = R7.L;
144 SSYNC;
145
146 ( R7:0, P5:0 ) = [SP++];
147 RTS;
148
149ENTRY(_disable_wdog_timer)
150 [--SP] = ( R7:0, P5:0 );
151#if defined(CONFIG_BF561)
152 P0.h = (WDOGA_CTL >> 16);
153 P0.l = (WDOGA_CTL & 0xFFFF);
154#else
155 P0.h = (WDOG_CTL >> 16);
156 P0.l = (WDOG_CTL & 0xFFFF);
157#endif
158 R7 = 0xAD6(Z);
159 W[P0] = R7.L;
160 SSYNC;
161 ( R7:0, P5:0 ) = [SP++];
162 RTS;
163
164#if !defined(CONFIG_BF561)
165
166.section .l1.text
167
168ENTRY(_sleep_mode)
169 [--SP] = ( R7:0, P5:0 );
170 [--SP] = RETS;
171
172 call _set_sic_iwr;
173
174 R0 = 0xFFFF (Z);
175 call _set_rtc_istat
176
177 P0.H = hi(PLL_CTL);
178 P0.L = lo(PLL_CTL);
179 R1 = W[P0](z);
180 BITSET (R1, 3);
181 W[P0] = R1.L;
182
183 CLI R2;
184 SSYNC;
185 IDLE;
186 STI R2;
187
188 call _test_pll_locked;
189
190 R0 = IWR_ENABLE(0);
191 call _set_sic_iwr;
192
193 P0.H = hi(PLL_CTL);
194 P0.L = lo(PLL_CTL);
195 R7 = w[p0](z);
196 BITCLR (R7, 3);
197 BITCLR (R7, 5);
198 w[p0] = R7.L;
199 IDLE;
200 call _test_pll_locked;
201
202 RETS = [SP++];
203 ( R7:0, P5:0 ) = [SP++];
204 RTS;
205
206ENTRY(_hibernate_mode)
207 [--SP] = ( R7:0, P5:0 );
208 [--SP] = RETS;
209
210 call _set_sic_iwr;
211
212 R0 = 0xFFFF (Z);
213 call _set_rtc_istat
214
215 P0.H = hi(VR_CTL);
216 P0.L = lo(VR_CTL);
217 R1 = W[P0](z);
218 BITSET (R1, 8);
219 BITCLR (R1, 0);
220 BITCLR (R1, 1);
221 W[P0] = R1.L;
222 SSYNC;
223
224 CLI R2;
225 IDLE;
226
227 /* Actually, adding anything may not be necessary...SDRAM contents
228 * are lost
229 */
230
231ENTRY(_deep_sleep)
232 [--SP] = ( R7:0, P5:0 );
233 [--SP] = RETS;
234
235 CLI R4;
236
237 call _set_sic_iwr;
238
239 call _set_sdram_srfs;
240
241 /* Clear all the interrupts,bits sticky */
242 R0 = 0xFFFF (Z);
243 call _set_rtc_istat
244
245 P0.H = hi(PLL_CTL);
246 P0.L = lo(PLL_CTL);
247 R0 = W[P0](z);
248 BITSET (R0, 5);
249 W[P0] = R0.L;
250
251 call _test_pll_locked;
252
253 SSYNC;
254 IDLE;
255
256 call _unset_sdram_srfs;
257
258 call _test_pll_locked;
259
260 R0 = IWR_ENABLE(0);
261 call _set_sic_iwr;
262
263 P0.H = hi(PLL_CTL);
264 P0.L = lo(PLL_CTL);
265 R0 = w[p0](z);
266 BITCLR (R0, 3);
267 BITCLR (R0, 5);
268 BITCLR (R0, 8);
269 w[p0] = R0;
270 IDLE;
271 call _test_pll_locked;
272
273 STI R4;
274
275 RETS = [SP++];
276 ( R7:0, P5:0 ) = [SP++];
277 RTS;
278
279ENTRY(_sleep_deeper)
280 [--SP] = ( R7:0, P5:0 );
281 [--SP] = RETS;
282
283 CLI R4;
284
285 P3 = R0;
286 R0 = IWR_ENABLE(0);
287 call _set_sic_iwr;
288 call _set_sdram_srfs;
289
290 /* Clear all the interrupts,bits sticky */
291 R0 = 0xFFFF (Z);
292 call _set_rtc_istat
293
294 P0.H = hi(PLL_DIV);
295 P0.L = lo(PLL_DIV);
296 R6 = W[P0](z);
297 R0.L = 0xF;
298 W[P0] = R0.l;
299
300 P0.H = hi(PLL_CTL);
301 P0.L = lo(PLL_CTL);
302 R5 = W[P0](z);
303 R0.L = (MIN_VC/CONFIG_CLKIN_HZ) << 9;
304 W[P0] = R0.l;
305
306 SSYNC;
307 IDLE;
308
309 call _test_pll_locked;
310
311 P0.H = hi(VR_CTL);
312 P0.L = lo(VR_CTL);
313 R7 = W[P0](z);
314 R1 = 0x6;
315 R1 <<= 16;
316 R2 = 0x0404(Z);
317 R1 = R1|R2;
318
319 R2 = DEPOSIT(R7, R1);
320 W[P0] = R2;
321
322 SSYNC;
323 IDLE;
324
325 call _test_pll_locked;
326
327 P0.H = hi(PLL_CTL);
328 P0.L = lo(PLL_CTL);
329 R0 = W[P0](z);
330 BITSET (R0, 3);
331 W[P0] = R0.L;
332
333 R0 = P3;
334 call _set_sic_iwr;
335
336 SSYNC;
337 IDLE;
338
339 call _test_pll_locked;
340
341 R0 = IWR_ENABLE(0);
342 call _set_sic_iwr;
343
344 P0.H = hi(VR_CTL);
345 P0.L = lo(VR_CTL);
346 W[P0]= R7;
347
348 SSYNC;
349 IDLE;
350
351 call _test_pll_locked;
352
353 P0.H = hi(PLL_DIV);
354 P0.L = lo(PLL_DIV);
355 W[P0]= R6;
356
357 P0.H = hi(PLL_CTL);
358 P0.L = lo(PLL_CTL);
359 w[p0] = R5;
360 IDLE;
361 call _test_pll_locked;
362
363 call _unset_sdram_srfs;
364
365 STI R4;
366
367 RETS = [SP++];
368 ( R7:0, P5:0 ) = [SP++];
369 RTS;
370
371ENTRY(_set_sdram_srfs)
372 /* set the sdram to self refresh mode */
373 P0.H = hi(EBIU_SDGCTL);
374 P0.L = lo(EBIU_SDGCTL);
375 R2 = [P0];
376 R3.H = hi(SRFS);
377 R3.L = lo(SRFS);
378 R2 = R2|R3;
379 [P0] = R2;
380 ssync;
381 RTS;
382
383ENTRY(_unset_sdram_srfs)
384 /* set the sdram out of self refresh mode */
385 P0.H = hi(EBIU_SDGCTL);
386 P0.L = lo(EBIU_SDGCTL);
387 R2 = [P0];
388 R3.H = hi(SRFS);
389 R3.L = lo(SRFS);
390 R3 = ~R3;
391 R2 = R2&R3;
392 [P0] = R2;
393 ssync;
394 RTS;
395
396ENTRY(_set_sic_iwr)
397 P0.H = hi(SIC_IWR);
398 P0.L = lo(SIC_IWR);
399 [P0] = R0;
400 SSYNC;
401 RTS;
402
403ENTRY(_set_rtc_istat)
404 P0.H = hi(RTC_ISTAT);
405 P0.L = lo(RTC_ISTAT);
406 w[P0] = R0.L;
407 SSYNC;
408 RTS;
409
410ENTRY(_test_pll_locked)
411 P0.H = hi(PLL_STAT);
412 P0.L = lo(PLL_STAT);
4131:
414 R0 = W[P0] (Z);
415 CC = BITTST(R0,5);
416 IF !CC JUMP 1b;
417 RTS;
418#endif
diff --git a/arch/blackfin/mach-common/entry.S b/arch/blackfin/mach-common/entry.S
new file mode 100644
index 000000000000..8eb0a9023482
--- /dev/null
+++ b/arch/blackfin/mach-common/entry.S
@@ -0,0 +1,1207 @@
1/*
2 * File: arch/blackfin/mach-common/entry.S
3 * Based on:
4 * Author: Linus Torvalds
5 *
6 * Created: ?
7 * Description: contains the system-call and fault low-level handling routines.
8 * This also contains the timer-interrupt handler, as well as all
9 * interrupts and faults that can result in a task-switch.
10 *
11 * Modified:
12 * Copyright 2004-2006 Analog Devices Inc.
13 *
14 * Bugs: Enter bugs at http://blackfin.uclinux.org/
15 *
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2 of the License, or
19 * (at your option) any later version.
20 *
21 * This program is distributed in the hope that it will be useful,
22 * but WITHOUT ANY WARRANTY; without even the implied warranty of
23 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
24 * GNU General Public License for more details.
25 *
26 * You should have received a copy of the GNU General Public License
27 * along with this program; if not, see the file COPYING, or write
28 * to the Free Software Foundation, Inc.,
29 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
30 */
31
32/*
33 * 25-Dec-2004 - LG Soft India
34 * 1. Fix in return_from_int, to make sure any pending
35 * system call in ILAT for this process to get
36 * executed, otherwise in case context switch happens,
37 * system call of first process (i.e in ILAT) will be
38 * carried forward to the switched process.
39 * 2. Removed Constant references for the following
40 * a. IPEND
41 * b. EXCAUSE mask
42 * c. PAGE Mask
43 */
44
45/*
46 * NOTE: This code handles signal-recognition, which happens every time
47 * after a timer-interrupt and after each system call.
48 */
49
50
51#include <linux/linkage.h>
52#include <asm/blackfin.h>
53#include <asm/unistd.h>
54#include <asm/errno.h>
55#include <asm/thread_info.h> /* TIF_NEED_RESCHED */
56#include <asm/asm-offsets.h>
57
58#include <asm/mach-common/context.S>
59
60#ifdef CONFIG_DEBUG_BFIN_NO_KERN_HWTRACE
61 /*
62 * TODO: this should be proper save/restore, but for now
63 * we'll just cheat and use 0x1/0x13
64 */
65# define DEBUG_START_HWTRACE \
66 P5.l = LO(TBUFCTL); \
67 P5.h = HI(TBUFCTL); \
68 R7 = 0x13; \
69 [P5] = R7;
70# define DEBUG_STOP_HWTRACE \
71 P5.l = LO(TBUFCTL); \
72 P5.h = HI(TBUFCTL); \
73 R7 = 0x01; \
74 [P5] = R7;
75#else
76# define DEBUG_START_HWTRACE
77# define DEBUG_STOP_HWTRACE
78#endif
79
80#ifdef CONFIG_EXCPT_IRQ_SYSC_L1
81.section .l1.text
82#else
83.text
84#endif
85
86/* Slightly simplified and streamlined entry point for CPLB misses.
87 * This one does not lower the level to IRQ5, and thus can be used to
88 * patch up CPLB misses on the kernel stack.
89 */
90ENTRY(_ex_dcplb)
91#if defined(ANOMALY_05000261)
92 /*
93 * Work around an anomaly: if we see a new DCPLB fault, return
94 * without doing anything. Then, if we get the same fault again,
95 * handle it.
96 */
97 p5.l = _last_cplb_fault_retx;
98 p5.h = _last_cplb_fault_retx;
99 r7 = [p5];
100 r6 = retx;
101 [p5] = r6;
102 cc = r6 == r7;
103 if !cc jump _return_from_exception;
104 /* fall through */
105#endif
106
107ENTRY(_ex_icplb)
108 (R7:6,P5:4) = [sp++];
109 ASTAT = [sp++];
110 SAVE_ALL_SYS
111 call __cplb_hdr;
112 DEBUG_START_HWTRACE
113 RESTORE_ALL_SYS
114 SP = RETN;
115 rtx;
116
117ENTRY(_ex_spinlock)
118 /* Transform this into a syscall - twiddle the syscall vector. */
119 p5.l = lo(EVT15);
120 p5.h = hi(EVT15);
121 r7.l = _spinlock_bh;
122 r7.h = _spinlock_bh;
123 [p5] = r7;
124 csync;
125 /* Fall through. */
126
127ENTRY(_ex_syscall)
128 DEBUG_START_HWTRACE
129 (R7:6,P5:4) = [sp++];
130 ASTAT = [sp++];
131 raise 15; /* invoked by TRAP #0, for sys call */
132 sp = retn;
133 rtx
134
135ENTRY(_spinlock_bh)
136 SAVE_ALL_SYS
137 /* To end up here, vector 15 was changed - so we have to change it
138 * back.
139 */
140 p0.l = lo(EVT15);
141 p0.h = hi(EVT15);
142 p1.l = _evt_system_call;
143 p1.h = _evt_system_call;
144 [p0] = p1;
145 csync;
146 r0 = [sp + PT_R0];
147 sp += -12;
148 call _sys_bfin_spinlock;
149 sp += 12;
150 [SP + PT_R0] = R0;
151 RESTORE_ALL_SYS
152 rti;
153
154ENTRY(_ex_soft_bp)
155 r7 = retx;
156 r7 += -2;
157 retx = r7;
158 jump.s _ex_trap_c;
159
160ENTRY(_ex_single_step)
161 r7 = retx;
162 r6 = reti;
163 cc = r7 == r6;
164 if cc jump _return_from_exception
165 r7 = syscfg;
166 bitclr (r7, 0);
167 syscfg = R7;
168
169 p5.l = lo(IPEND);
170 p5.h = hi(IPEND);
171 r6 = [p5];
172 cc = bittst(r6, 5);
173 if !cc jump _ex_trap_c;
174 p4.l = lo(EVT5);
175 p4.h = hi(EVT5);
176 r6.h = _exception_to_level5;
177 r6.l = _exception_to_level5;
178 r7 = [p4];
179 cc = r6 == r7;
180 if !cc jump _ex_trap_c;
181
182_return_from_exception:
183 DEBUG_START_HWTRACE
184 (R7:6,P5:4) = [sp++];
185 ASTAT = [sp++];
186 sp = retn;
187 rtx;
188
189ENTRY(_handle_bad_cplb)
190 /* To get here, we just tried and failed to change a CPLB
191 * so, handle things in trap_c (C code), by lowering to
192 * IRQ5, just like we normally do. Since this is not a
193 * "normal" return path, we have a do alot of stuff to
194 * the stack to get ready so, we can fall through - we
195 * need to make a CPLB exception look like a normal exception
196 */
197
198 DEBUG_START_HWTRACE
199 RESTORE_ALL_SYS
200 [--sp] = ASTAT;
201 [--sp] = (R7:6, P5:4);
202
203ENTRY(_ex_trap_c)
204 /* Call C code (trap_c) to handle the exception, which most
205 * likely involves sending a signal to the current process.
206 * To avoid double faults, lower our priority to IRQ5 first.
207 */
208 P5.h = _exception_to_level5;
209 P5.l = _exception_to_level5;
210 p4.l = lo(EVT5);
211 p4.h = hi(EVT5);
212 [p4] = p5;
213 csync;
214
215 /* Disable all interrupts, but make sure level 5 is enabled so
216 * we can switch to that level. Save the old mask. */
217 cli r6;
218 p4.l = _excpt_saved_imask;
219 p4.h = _excpt_saved_imask;
220 [p4] = r6;
221 r6 = 0x3f;
222 sti r6;
223
224 /* Save the excause into a circular buffer, in case the instruction
225 * which caused this excecptions causes others.
226 */
227 P5.l = _in_ptr_excause;
228 P5.h = _in_ptr_excause;
229 R7 = [P5];
230 R7 += 4;
231 R6 = 0xF;
232 R7 = R7 & R6;
233 [P5] = R7;
234 R6.l = _excause_circ_buf;
235 R6.h = _excause_circ_buf;
236 R7 = R7 + R6;
237 p5 = R7;
238 R6 = SEQSTAT;
239 [P5] = R6;
240
241 DEBUG_START_HWTRACE
242 (R7:6,P5:4) = [sp++];
243 ASTAT = [sp++];
244 SP = RETN;
245 raise 5;
246 rtx;
247
248ENTRY(_exception_to_level5)
249 SAVE_ALL_SYS
250
251 /* Restore interrupt mask. We haven't pushed RETI, so this
252 * doesn't enable interrupts until we return from this handler. */
253 p4.l = _excpt_saved_imask;
254 p4.h = _excpt_saved_imask;
255 r6 = [p4];
256 sti r6;
257
258 /* Restore the hardware error vector. */
259 P5.h = _evt_ivhw;
260 P5.l = _evt_ivhw;
261 p4.l = lo(EVT5);
262 p4.h = hi(EVT5);
263 [p4] = p5;
264 csync;
265
266 p2.l = lo(IPEND);
267 p2.h = hi(IPEND);
268 csync;
269 r0 = [p2]; /* Read current IPEND */
270 [sp + PT_IPEND] = r0; /* Store IPEND */
271
272 /* Pop the excause from the circular buffer and push it on the stack
273 * (in the right place - if you change the location of SEQSTAT, you
274 * must change this offset.
275 */
276.L_excep_to_5_again:
277 P5.l = _out_ptr_excause;
278 P5.h = _out_ptr_excause;
279 R7 = [P5];
280 R7 += 4;
281 R6 = 0xF;
282 R7 = R7 & R6;
283 [P5] = R7;
284 R6.l = _excause_circ_buf;
285 R6.h = _excause_circ_buf;
286 R7 = R7 + R6;
287 P5 = R7;
288 R1 = [P5];
289 [SP + 8] = r1;
290
291 r0 = sp; /* stack frame pt_regs pointer argument ==> r0 */
292 SP += -12;
293 call _trap_c;
294 SP += 12;
295
296 /* See if anything else is in the exception buffer
297 * if there is, process it
298 */
299 P5.l = _out_ptr_excause;
300 P5.h = _out_ptr_excause;
301 P4.l = _in_ptr_excause;
302 P4.h = _in_ptr_excause;
303 R6 = [P5];
304 R7 = [P4];
305 CC = R6 == R7;
306 if ! CC JUMP .L_excep_to_5_again
307
308 call _ret_from_exception;
309 RESTORE_ALL_SYS
310 rti;
311
312ENTRY(_trap) /* Exception: 4th entry into system event table(supervisor mode)*/
313 /* Since the kernel stack can be anywhere, it's not guaranteed to be
314 * covered by a CPLB. Switch to an exception stack; use RETN as a
315 * scratch register (for want of a better option).
316 */
317 retn = sp;
318 sp.l = _exception_stack_top;
319 sp.h = _exception_stack_top;
320 /* Try to deal with syscalls quickly. */
321 [--sp] = ASTAT;
322 [--sp] = (R7:6, P5:4);
323 DEBUG_STOP_HWTRACE
324 r7 = SEQSTAT; /* reason code is in bit 5:0 */
325 r6.l = lo(SEQSTAT_EXCAUSE);
326 r6.h = hi(SEQSTAT_EXCAUSE);
327 r7 = r7 & r6;
328 p5.h = _extable;
329 p5.l = _extable;
330 p4 = r7;
331 p5 = p5 + (p4 << 2);
332 p4 = [p5];
333 jump (p4);
334
335.Lbadsys:
336 r7 = -ENOSYS; /* signextending enough */
337 [sp + PT_R0] = r7; /* return value from system call */
338 jump .Lsyscall_really_exit;
339
340ENTRY(_kernel_execve)
341 link SIZEOF_PTREGS;
342 p0 = sp;
343 r3 = SIZEOF_PTREGS / 4;
344 r4 = 0(x);
3450:
346 [p0++] = r4;
347 r3 += -1;
348 cc = r3 == 0;
349 if !cc jump 0b (bp);
350
351 p0 = sp;
352 sp += -16;
353 [sp + 12] = p0;
354 call _do_execve;
355 SP += 16;
356 cc = r0 == 0;
357 if ! cc jump 1f;
358 /* Success. Copy our temporary pt_regs to the top of the kernel
359 * stack and do a normal exception return.
360 */
361 r1 = sp;
362 r0 = (-KERNEL_STACK_SIZE) (x);
363 r1 = r1 & r0;
364 p2 = r1;
365 p3 = [p2];
366 r0 = KERNEL_STACK_SIZE - 4 (z);
367 p1 = r0;
368 p1 = p1 + p2;
369
370 p0 = fp;
371 r4 = [p0--];
372 r3 = SIZEOF_PTREGS / 4;
3730:
374 r4 = [p0--];
375 [p1--] = r4;
376 r3 += -1;
377 cc = r3 == 0;
378 if ! cc jump 0b (bp);
379
380 r0 = (KERNEL_STACK_SIZE - SIZEOF_PTREGS) (z);
381 p1 = r0;
382 p1 = p1 + p2;
383 sp = p1;
384 r0 = syscfg;
385 [SP + PT_SYSCFG] = r0;
386 [p3 + (TASK_THREAD + THREAD_KSP)] = sp;
387
388 RESTORE_CONTEXT;
389 rti;
3901:
391 unlink;
392 rts;
393
394ENTRY(_system_call)
395 /* Store IPEND */
396 p2.l = lo(IPEND);
397 p2.h = hi(IPEND);
398 csync;
399 r0 = [p2];
400 [sp + PT_IPEND] = r0;
401
402 /* Store RETS for now */
403 r0 = rets;
404 [sp + PT_RESERVED] = r0;
405 /* Set the stack for the current process */
406 r7 = sp;
407 r6.l = lo(ALIGN_PAGE_MASK);
408 r6.h = hi(ALIGN_PAGE_MASK);
409 r7 = r7 & r6; /* thread_info */
410 p2 = r7;
411 p2 = [p2];
412
413 [p2+(TASK_THREAD+THREAD_KSP)] = sp;
414
415 /* Check the System Call */
416 r7 = __NR_syscall;
417 /* System call number is passed in P0 */
418 r6 = p0;
419 cc = r6 < r7;
420 if ! cc jump .Lbadsys;
421
422 /* are we tracing syscalls?*/
423 r7 = sp;
424 r6.l = lo(ALIGN_PAGE_MASK);
425 r6.h = hi(ALIGN_PAGE_MASK);
426 r7 = r7 & r6;
427 p2 = r7;
428 r7 = [p2+TI_FLAGS];
429 CC = BITTST(r7,TIF_SYSCALL_TRACE);
430 if CC JUMP _sys_trace;
431
432 /* Execute the appropriate system call */
433
434 p4 = p0;
435 p5.l = _sys_call_table;
436 p5.h = _sys_call_table;
437 p5 = p5 + (p4 << 2);
438 r0 = [sp + PT_R0];
439 r1 = [sp + PT_R1];
440 r2 = [sp + PT_R2];
441 p5 = [p5];
442
443 [--sp] = r5;
444 [--sp] = r4;
445 [--sp] = r3;
446 SP += -12;
447 call (p5);
448 SP += 24;
449 [sp + PT_R0] = r0;
450
451.Lresume_userspace:
452 r7 = sp;
453 r4.l = lo(ALIGN_PAGE_MASK);
454 r4.h = hi(ALIGN_PAGE_MASK);
455 r7 = r7 & r4; /* thread_info->flags */
456 p5 = r7;
457.Lresume_userspace_1:
458 /* Disable interrupts. */
459 [--sp] = reti;
460 reti = [sp++];
461
462 r7 = [p5 + TI_FLAGS];
463 r4.l = lo(_TIF_WORK_MASK);
464 r4.h = hi(_TIF_WORK_MASK);
465 r7 = r7 & r4;
466
467.Lsyscall_resched:
468 cc = BITTST(r7, TIF_NEED_RESCHED);
469 if !cc jump .Lsyscall_sigpending;
470
471 /* Reenable interrupts. */
472 [--sp] = reti;
473 r0 = [sp++];
474
475 SP += -12;
476 call _schedule;
477 SP += 12;
478
479 jump .Lresume_userspace_1;
480
481.Lsyscall_sigpending:
482 cc = BITTST(r7, TIF_RESTORE_SIGMASK);
483 if cc jump .Lsyscall_do_signals;
484 cc = BITTST(r7, TIF_SIGPENDING);
485 if !cc jump .Lsyscall_really_exit;
486.Lsyscall_do_signals:
487 /* Reenable interrupts. */
488 [--sp] = reti;
489 r0 = [sp++];
490
491 r0 = sp;
492 SP += -12;
493 call _do_signal;
494 SP += 12;
495
496.Lsyscall_really_exit:
497 r5 = [sp + PT_RESERVED];
498 rets = r5;
499 rts;
500
501_sys_trace:
502 call _syscall_trace;
503
504 /* Execute the appropriate system call */
505
506 p4 = [SP + PT_P0];
507 p5.l = _sys_call_table;
508 p5.h = _sys_call_table;
509 p5 = p5 + (p4 << 2);
510 r0 = [sp + PT_R0];
511 r1 = [sp + PT_R1];
512 r2 = [sp + PT_R2];
513 r3 = [sp + PT_R3];
514 r4 = [sp + PT_R4];
515 r5 = [sp + PT_R5];
516 p5 = [p5];
517
518 [--sp] = r5;
519 [--sp] = r4;
520 [--sp] = r3;
521 SP += -12;
522 call (p5);
523 SP += 24;
524 [sp + PT_R0] = r0;
525
526 call _syscall_trace;
527 jump .Lresume_userspace;
528
529ENTRY(_resume)
530 /*
531 * Beware - when entering resume, prev (the current task) is
532 * in r0, next (the new task) is in r1.
533 */
534 p0 = r0;
535 p1 = r1;
536 [--sp] = rets;
537 [--sp] = fp;
538 [--sp] = (r7:4, p5:3);
539
540 /* save usp */
541 p2 = usp;
542 [p0+(TASK_THREAD+THREAD_USP)] = p2;
543
544 /* save current kernel stack pointer */
545 [p0+(TASK_THREAD+THREAD_KSP)] = sp;
546
547 /* save program counter */
548 r1.l = _new_old_task;
549 r1.h = _new_old_task;
550 [p0+(TASK_THREAD+THREAD_PC)] = r1;
551
552 /* restore the kernel stack pointer */
553 sp = [p1+(TASK_THREAD+THREAD_KSP)];
554
555 /* restore user stack pointer */
556 p0 = [p1+(TASK_THREAD+THREAD_USP)];
557 usp = p0;
558
559 /* restore pc */
560 p0 = [p1+(TASK_THREAD+THREAD_PC)];
561 jump (p0);
562
563 /*
564 * Following code actually lands up in a new (old) task.
565 */
566
567_new_old_task:
568 (r7:4, p5:3) = [sp++];
569 fp = [sp++];
570 rets = [sp++];
571
572 /*
573 * When we come out of resume, r0 carries "old" task, becuase we are
574 * in "new" task.
575 */
576 rts;
577
578ENTRY(_ret_from_exception)
579 p2.l = lo(IPEND);
580 p2.h = hi(IPEND);
581
582 csync;
583 r0 = [p2];
584 [sp + PT_IPEND] = r0;
585
5861:
587 r1 = 0x37(Z);
588 r2 = ~r1;
589 r2.h = 0;
590 r0 = r2 & r0;
591 cc = r0 == 0;
592 if !cc jump 4f; /* if not return to user mode, get out */
593
594 /* Make sure any pending system call or deferred exception
595 * return in ILAT for this process to get executed, otherwise
596 * in case context switch happens, system call of
597 * first process (i.e in ILAT) will be carried
598 * forward to the switched process
599 */
600
601 p2.l = lo(ILAT);
602 p2.h = hi(ILAT);
603 r0 = [p2];
604 r1 = (EVT_IVG14 | EVT_IVG15) (z);
605 r0 = r0 & r1;
606 cc = r0 == 0;
607 if !cc jump 5f;
608
609 /* Set the stack for the current process */
610 r7 = sp;
611 r4.l = lo(ALIGN_PAGE_MASK);
612 r4.h = hi(ALIGN_PAGE_MASK);
613 r7 = r7 & r4; /* thread_info->flags */
614 p5 = r7;
615 r7 = [p5 + TI_FLAGS];
616 r4.l = lo(_TIF_WORK_MASK);
617 r4.h = hi(_TIF_WORK_MASK);
618 r7 = r7 & r4;
619 cc = r7 == 0;
620 if cc jump 4f;
621
622 p0.l = lo(EVT15);
623 p0.h = hi(EVT15);
624 p1.l = _schedule_and_signal;
625 p1.h = _schedule_and_signal;
626 [p0] = p1;
627 csync;
628 raise 15; /* raise evt14 to do signal or reschedule */
6294:
630 r0 = syscfg;
631 bitclr(r0, 0);
632 syscfg = r0;
6335:
634 rts;
635
636ENTRY(_return_from_int)
637 /* If someone else already raised IRQ 15, do nothing. */
638 csync;
639 p2.l = lo(ILAT);
640 p2.h = hi(ILAT);
641 r0 = [p2];
642 cc = bittst (r0, EVT_IVG15_P);
643 if cc jump 2f;
644
645 /* if not return to user mode, get out */
646 p2.l = lo(IPEND);
647 p2.h = hi(IPEND);
648 r0 = [p2];
649 r1 = 0x17(Z);
650 r2 = ~r1;
651 r2.h = 0;
652 r0 = r2 & r0;
653 r1 = 1;
654 r1 = r0 - r1;
655 r2 = r0 & r1;
656 cc = r2 == 0;
657 if !cc jump 2f;
658
659 /* Lower the interrupt level to 15. */
660 p0.l = lo(EVT15);
661 p0.h = hi(EVT15);
662 p1.l = _schedule_and_signal_from_int;
663 p1.h = _schedule_and_signal_from_int;
664 [p0] = p1;
665 csync;
666#if defined(ANOMALY_05000281)
667 r0.l = lo(CONFIG_BOOT_LOAD);
668 r0.h = hi(CONFIG_BOOT_LOAD);
669 reti = r0;
670#endif
671 r0 = 0x801f (z);
672 STI r0;
673 raise 15; /* raise evt15 to do signal or reschedule */
674 rti;
6752:
676 rts;
677
678ENTRY(_lower_to_irq14)
679#if defined(ANOMALY_05000281)
680 r0.l = lo(CONFIG_BOOT_LOAD);
681 r0.h = hi(CONFIG_BOOT_LOAD);
682 reti = r0;
683#endif
684 r0 = 0x401f;
685 sti r0;
686 raise 14;
687 rti;
688ENTRY(_evt14_softirq)
689#ifdef CONFIG_DEBUG_HWERR
690 r0 = 0x3f;
691 sti r0;
692#else
693 cli r0;
694#endif
695 [--sp] = RETI;
696 SP += 4;
697 rts;
698
699_schedule_and_signal_from_int:
700 /* To end up here, vector 15 was changed - so we have to change it
701 * back.
702 */
703 p0.l = lo(EVT15);
704 p0.h = hi(EVT15);
705 p1.l = _evt_system_call;
706 p1.h = _evt_system_call;
707 [p0] = p1;
708 csync;
709 p1 = rets;
710 [sp + PT_RESERVED] = p1;
711
712 p0.l = _irq_flags;
713 p0.h = _irq_flags;
714 r0 = [p0];
715 sti r0;
716
717 jump.s .Lresume_userspace;
718
719_schedule_and_signal:
720 SAVE_CONTEXT_SYSCALL
721 /* To end up here, vector 15 was changed - so we have to change it
722 * back.
723 */
724 p0.l = lo(EVT15);
725 p0.h = hi(EVT15);
726 p1.l = _evt_system_call;
727 p1.h = _evt_system_call;
728 [p0] = p1;
729 csync;
730 p0.l = 1f;
731 p0.h = 1f;
732 [sp + PT_RESERVED] = P0;
733 call .Lresume_userspace;
7341:
735 RESTORE_CONTEXT
736 rti;
737
738/* Make sure when we start, that the circular buffer is initialized properly
739 * R0 and P0 are call clobbered, so we can use them here.
740 */
741ENTRY(_init_exception_buff)
742 r0 = 0;
743 p0.h = _in_ptr_excause;
744 p0.l = _in_ptr_excause;
745 [p0] = r0;
746 p0.h = _out_ptr_excause;
747 p0.l = _out_ptr_excause;
748 [p0] = r0;
749 rts;
750
751/*
752 * Put these in the kernel data section - that should always be covered by
753 * a CPLB. This is needed to ensure we don't get double fault conditions
754 */
755
756#ifdef CONFIG_SYSCALL_TAB_L1
757.section .l1.data
758#else
759.data
760#endif
761ALIGN
762_extable:
763 /* entry for each EXCAUSE[5:0]
764 * This table bmust be in sync with the table in ./kernel/traps.c
765 * EXCPT instruction can provide 4 bits of EXCAUSE, allowing 16 to be user defined
766 */
767 .long _ex_syscall; /* 0x00 - User Defined - Linux Syscall */
768 .long _ex_soft_bp /* 0x01 - User Defined - Software breakpoint */
769 .long _ex_trap_c /* 0x02 - User Defined */
770 .long _ex_trap_c /* 0x03 - User Defined - Atomic test and set service */
771 .long _ex_spinlock /* 0x04 - User Defined */
772 .long _ex_trap_c /* 0x05 - User Defined */
773 .long _ex_trap_c /* 0x06 - User Defined */
774 .long _ex_trap_c /* 0x07 - User Defined */
775 .long _ex_trap_c /* 0x08 - User Defined */
776 .long _ex_trap_c /* 0x09 - User Defined */
777 .long _ex_trap_c /* 0x0A - User Defined */
778 .long _ex_trap_c /* 0x0B - User Defined */
779 .long _ex_trap_c /* 0x0C - User Defined */
780 .long _ex_trap_c /* 0x0D - User Defined */
781 .long _ex_trap_c /* 0x0E - User Defined */
782 .long _ex_trap_c /* 0x0F - User Defined */
783 .long _ex_single_step /* 0x10 - HW Single step */
784 .long _ex_trap_c /* 0x11 - Trace Buffer Full */
785 .long _ex_trap_c /* 0x12 - Reserved */
786 .long _ex_trap_c /* 0x13 - Reserved */
787 .long _ex_trap_c /* 0x14 - Reserved */
788 .long _ex_trap_c /* 0x15 - Reserved */
789 .long _ex_trap_c /* 0x16 - Reserved */
790 .long _ex_trap_c /* 0x17 - Reserved */
791 .long _ex_trap_c /* 0x18 - Reserved */
792 .long _ex_trap_c /* 0x19 - Reserved */
793 .long _ex_trap_c /* 0x1A - Reserved */
794 .long _ex_trap_c /* 0x1B - Reserved */
795 .long _ex_trap_c /* 0x1C - Reserved */
796 .long _ex_trap_c /* 0x1D - Reserved */
797 .long _ex_trap_c /* 0x1E - Reserved */
798 .long _ex_trap_c /* 0x1F - Reserved */
799 .long _ex_trap_c /* 0x20 - Reserved */
800 .long _ex_trap_c /* 0x21 - Undefined Instruction */
801 .long _ex_trap_c /* 0x22 - Illegal Instruction Combination */
802 .long _ex_dcplb /* 0x23 - Data CPLB Protection Violation */
803 .long _ex_trap_c /* 0x24 - Data access misaligned */
804 .long _ex_trap_c /* 0x25 - Unrecoverable Event */
805 .long _ex_dcplb /* 0x26 - Data CPLB Miss */
806 .long _ex_trap_c /* 0x27 - Data CPLB Multiple Hits - Linux Trap Zero */
807 .long _ex_trap_c /* 0x28 - Emulation Watchpoint */
808 .long _ex_trap_c /* 0x29 - Instruction fetch access error (535 only) */
809 .long _ex_trap_c /* 0x2A - Instruction fetch misaligned */
810 .long _ex_icplb /* 0x2B - Instruction CPLB protection Violation */
811 .long _ex_icplb /* 0x2C - Instruction CPLB miss */
812 .long _ex_trap_c /* 0x2D - Instruction CPLB Multiple Hits */
813 .long _ex_trap_c /* 0x2E - Illegal use of Supervisor Resource */
814 .long _ex_trap_c /* 0x2E - Illegal use of Supervisor Resource */
815 .long _ex_trap_c /* 0x2F - Reserved */
816 .long _ex_trap_c /* 0x30 - Reserved */
817 .long _ex_trap_c /* 0x31 - Reserved */
818 .long _ex_trap_c /* 0x32 - Reserved */
819 .long _ex_trap_c /* 0x33 - Reserved */
820 .long _ex_trap_c /* 0x34 - Reserved */
821 .long _ex_trap_c /* 0x35 - Reserved */
822 .long _ex_trap_c /* 0x36 - Reserved */
823 .long _ex_trap_c /* 0x37 - Reserved */
824 .long _ex_trap_c /* 0x38 - Reserved */
825 .long _ex_trap_c /* 0x39 - Reserved */
826 .long _ex_trap_c /* 0x3A - Reserved */
827 .long _ex_trap_c /* 0x3B - Reserved */
828 .long _ex_trap_c /* 0x3C - Reserved */
829 .long _ex_trap_c /* 0x3D - Reserved */
830 .long _ex_trap_c /* 0x3E - Reserved */
831 .long _ex_trap_c /* 0x3F - Reserved */
832
833ALIGN
834ENTRY(_sys_call_table)
835 .long _sys_ni_syscall /* 0 - old "setup()" system call*/
836 .long _sys_exit
837 .long _sys_fork
838 .long _sys_read
839 .long _sys_write
840 .long _sys_open /* 5 */
841 .long _sys_close
842 .long _sys_ni_syscall /* old waitpid */
843 .long _sys_creat
844 .long _sys_link
845 .long _sys_unlink /* 10 */
846 .long _sys_execve
847 .long _sys_chdir
848 .long _sys_time
849 .long _sys_mknod
850 .long _sys_chmod /* 15 */
851 .long _sys_chown /* chown16 */
852 .long _sys_ni_syscall /* old break syscall holder */
853 .long _sys_ni_syscall /* old stat */
854 .long _sys_lseek
855 .long _sys_getpid /* 20 */
856 .long _sys_mount
857 .long _sys_ni_syscall /* old umount */
858 .long _sys_setuid
859 .long _sys_getuid
860 .long _sys_stime /* 25 */
861 .long _sys_ptrace
862 .long _sys_alarm
863 .long _sys_ni_syscall /* old fstat */
864 .long _sys_pause
865 .long _sys_ni_syscall /* old utime */ /* 30 */
866 .long _sys_ni_syscall /* old stty syscall holder */
867 .long _sys_ni_syscall /* old gtty syscall holder */
868 .long _sys_access
869 .long _sys_nice
870 .long _sys_ni_syscall /* 35 */ /* old ftime syscall holder */
871 .long _sys_sync
872 .long _sys_kill
873 .long _sys_rename
874 .long _sys_mkdir
875 .long _sys_rmdir /* 40 */
876 .long _sys_dup
877 .long _sys_pipe
878 .long _sys_times
879 .long _sys_ni_syscall /* old prof syscall holder */
880 .long _sys_brk /* 45 */
881 .long _sys_setgid
882 .long _sys_getgid
883 .long _sys_ni_syscall /* old sys_signal */
884 .long _sys_geteuid /* geteuid16 */
885 .long _sys_getegid /* getegid16 */ /* 50 */
886 .long _sys_acct
887 .long _sys_umount /* recycled never used phys() */
888 .long _sys_ni_syscall /* old lock syscall holder */
889 .long _sys_ioctl
890 .long _sys_fcntl /* 55 */
891 .long _sys_ni_syscall /* old mpx syscall holder */
892 .long _sys_setpgid
893 .long _sys_ni_syscall /* old ulimit syscall holder */
894 .long _sys_ni_syscall /* old old uname */
895 .long _sys_umask /* 60 */
896 .long _sys_chroot
897 .long _sys_ustat
898 .long _sys_dup2
899 .long _sys_getppid
900 .long _sys_getpgrp /* 65 */
901 .long _sys_setsid
902 .long _sys_ni_syscall /* old sys_sigaction */
903 .long _sys_sgetmask
904 .long _sys_ssetmask
905 .long _sys_setreuid /* setreuid16 */ /* 70 */
906 .long _sys_setregid /* setregid16 */
907 .long _sys_ni_syscall /* old sys_sigsuspend */
908 .long _sys_ni_syscall /* old sys_sigpending */
909 .long _sys_sethostname
910 .long _sys_setrlimit /* 75 */
911 .long _sys_ni_syscall /* old getrlimit */
912 .long _sys_getrusage
913 .long _sys_gettimeofday
914 .long _sys_settimeofday
915 .long _sys_getgroups /* getgroups16 */ /* 80 */
916 .long _sys_setgroups /* setgroups16 */
917 .long _sys_ni_syscall /* old_select */
918 .long _sys_symlink
919 .long _sys_ni_syscall /* old lstat */
920 .long _sys_readlink /* 85 */
921 .long _sys_uselib
922 .long _sys_ni_syscall /* sys_swapon */
923 .long _sys_reboot
924 .long _sys_ni_syscall /* old_readdir */
925 .long _sys_ni_syscall /* sys_mmap */ /* 90 */
926 .long _sys_munmap
927 .long _sys_truncate
928 .long _sys_ftruncate
929 .long _sys_fchmod
930 .long _sys_fchown /* fchown16 */ /* 95 */
931 .long _sys_getpriority
932 .long _sys_setpriority
933 .long _sys_ni_syscall /* old profil syscall holder */
934 .long _sys_statfs
935 .long _sys_fstatfs /* 100 */
936 .long _sys_ni_syscall
937 .long _sys_ni_syscall /* old sys_socketcall */
938 .long _sys_syslog
939 .long _sys_setitimer
940 .long _sys_getitimer /* 105 */
941 .long _sys_newstat
942 .long _sys_newlstat
943 .long _sys_newfstat
944 .long _sys_ni_syscall /* old uname */
945 .long _sys_ni_syscall /* iopl for i386 */ /* 110 */
946 .long _sys_vhangup
947 .long _sys_ni_syscall /* obsolete idle() syscall */
948 .long _sys_ni_syscall /* vm86old for i386 */
949 .long _sys_wait4
950 .long _sys_ni_syscall /* 115 */ /* sys_swapoff */
951 .long _sys_sysinfo
952 .long _sys_ni_syscall /* old sys_ipc */
953 .long _sys_fsync
954 .long _sys_ni_syscall /* old sys_sigreturn */
955 .long _sys_clone /* 120 */
956 .long _sys_setdomainname
957 .long _sys_newuname
958 .long _sys_ni_syscall /* old sys_modify_ldt */
959 .long _sys_adjtimex
960 .long _sys_ni_syscall /* 125 */ /* sys_mprotect */
961 .long _sys_ni_syscall /* old sys_sigprocmask */
962 .long _sys_ni_syscall /* old "creat_module" */
963 .long _sys_init_module
964 .long _sys_delete_module
965 .long _sys_ni_syscall /* 130: old "get_kernel_syms" */
966 .long _sys_quotactl
967 .long _sys_getpgid
968 .long _sys_fchdir
969 .long _sys_bdflush
970 .long _sys_ni_syscall /* 135 */ /* sys_sysfs */
971 .long _sys_personality
972 .long _sys_ni_syscall /* for afs_syscall */
973 .long _sys_setfsuid /* setfsuid16 */
974 .long _sys_setfsgid /* setfsgid16 */
975 .long _sys_llseek /* 140 */
976 .long _sys_getdents
977 .long _sys_ni_syscall /* sys_select */
978 .long _sys_flock
979 .long _sys_ni_syscall /* sys_msync */
980 .long _sys_readv /* 145 */
981 .long _sys_writev
982 .long _sys_getsid
983 .long _sys_fdatasync
984 .long _sys_sysctl
985 .long _sys_ni_syscall /* 150 */ /* sys_mlock */
986 .long _sys_ni_syscall /* sys_munlock */
987 .long _sys_ni_syscall /* sys_mlockall */
988 .long _sys_ni_syscall /* sys_munlockall */
989 .long _sys_sched_setparam
990 .long _sys_sched_getparam /* 155 */
991 .long _sys_sched_setscheduler
992 .long _sys_sched_getscheduler
993 .long _sys_sched_yield
994 .long _sys_sched_get_priority_max
995 .long _sys_sched_get_priority_min /* 160 */
996 .long _sys_sched_rr_get_interval
997 .long _sys_nanosleep
998 .long _sys_ni_syscall /* sys_mremap */
999 .long _sys_setresuid /* setresuid16 */
1000 .long _sys_getresuid /* getresuid16 */ /* 165 */
1001 .long _sys_ni_syscall /* for vm86 */
1002 .long _sys_ni_syscall /* old "query_module" */
1003 .long _sys_ni_syscall /* sys_poll */
1004 .long _sys_ni_syscall /* sys_nfsservctl */
1005 .long _sys_setresgid /* setresgid16 */ /* 170 */
1006 .long _sys_getresgid /* getresgid16 */
1007 .long _sys_prctl
1008 .long _sys_rt_sigreturn
1009 .long _sys_rt_sigaction
1010 .long _sys_rt_sigprocmask /* 175 */
1011 .long _sys_rt_sigpending
1012 .long _sys_rt_sigtimedwait
1013 .long _sys_rt_sigqueueinfo
1014 .long _sys_rt_sigsuspend
1015 .long _sys_pread64 /* 180 */
1016 .long _sys_pwrite64
1017 .long _sys_lchown /* lchown16 */
1018 .long _sys_getcwd
1019 .long _sys_capget
1020 .long _sys_capset /* 185 */
1021 .long _sys_sigaltstack
1022 .long _sys_sendfile
1023 .long _sys_ni_syscall /* streams1 */
1024 .long _sys_ni_syscall /* streams2 */
1025 .long _sys_vfork /* 190 */
1026 .long _sys_getrlimit
1027 .long _sys_mmap2
1028 .long _sys_truncate64
1029 .long _sys_ftruncate64
1030 .long _sys_stat64 /* 195 */
1031 .long _sys_lstat64
1032 .long _sys_fstat64
1033 .long _sys_chown
1034 .long _sys_getuid
1035 .long _sys_getgid /* 200 */
1036 .long _sys_geteuid
1037 .long _sys_getegid
1038 .long _sys_setreuid
1039 .long _sys_setregid
1040 .long _sys_getgroups /* 205 */
1041 .long _sys_setgroups
1042 .long _sys_fchown
1043 .long _sys_setresuid
1044 .long _sys_getresuid
1045 .long _sys_setresgid /* 210 */
1046 .long _sys_getresgid
1047 .long _sys_lchown
1048 .long _sys_setuid
1049 .long _sys_setgid
1050 .long _sys_setfsuid /* 215 */
1051 .long _sys_setfsgid
1052 .long _sys_pivot_root
1053 .long _sys_ni_syscall /* sys_mincore */
1054 .long _sys_ni_syscall /* sys_madvise */
1055 .long _sys_getdents64 /* 220 */
1056 .long _sys_fcntl64
1057 .long _sys_ni_syscall /* reserved for TUX */
1058 .long _sys_ni_syscall
1059 .long _sys_gettid
1060 .long _sys_ni_syscall /* 225 */ /* sys_readahead */
1061 .long _sys_setxattr
1062 .long _sys_lsetxattr
1063 .long _sys_fsetxattr
1064 .long _sys_getxattr
1065 .long _sys_lgetxattr /* 230 */
1066 .long _sys_fgetxattr
1067 .long _sys_listxattr
1068 .long _sys_llistxattr
1069 .long _sys_flistxattr
1070 .long _sys_removexattr /* 235 */
1071 .long _sys_lremovexattr
1072 .long _sys_fremovexattr
1073 .long _sys_tkill
1074 .long _sys_sendfile64
1075 .long _sys_futex /* 240 */
1076 .long _sys_sched_setaffinity
1077 .long _sys_sched_getaffinity
1078 .long _sys_ni_syscall /* sys_set_thread_area */
1079 .long _sys_ni_syscall /* sys_get_thread_area */
1080 .long _sys_io_setup /* 245 */
1081 .long _sys_io_destroy
1082 .long _sys_io_getevents
1083 .long _sys_io_submit
1084 .long _sys_io_cancel
1085 .long _sys_ni_syscall /* 250 */ /* sys_alloc_hugepages */
1086 .long _sys_ni_syscall /* sys_freec_hugepages */
1087 .long _sys_exit_group
1088 .long _sys_lookup_dcookie
1089 .long _sys_bfin_spinlock
1090 .long _sys_epoll_create /* 255 */
1091 .long _sys_epoll_ctl
1092 .long _sys_epoll_wait
1093 .long _sys_ni_syscall /* remap_file_pages */
1094 .long _sys_set_tid_address
1095 .long _sys_timer_create /* 260 */
1096 .long _sys_timer_settime
1097 .long _sys_timer_gettime
1098 .long _sys_timer_getoverrun
1099 .long _sys_timer_delete
1100 .long _sys_clock_settime /* 265 */
1101 .long _sys_clock_gettime
1102 .long _sys_clock_getres
1103 .long _sys_clock_nanosleep
1104 .long _sys_statfs64
1105 .long _sys_fstatfs64 /* 270 */
1106 .long _sys_tgkill
1107 .long _sys_utimes
1108 .long _sys_fadvise64_64
1109 .long _sys_ni_syscall /* vserver */
1110 .long _sys_ni_syscall /* 275, mbind */
1111 .long _sys_ni_syscall /* get_mempolicy */
1112 .long _sys_ni_syscall /* set_mempolicy */
1113 .long _sys_mq_open
1114 .long _sys_mq_unlink
1115 .long _sys_mq_timedsend /* 280 */
1116 .long _sys_mq_timedreceive
1117 .long _sys_mq_notify
1118 .long _sys_mq_getsetattr
1119 .long _sys_ni_syscall /* kexec_load */
1120 .long _sys_waitid /* 285 */
1121 .long _sys_add_key
1122 .long _sys_request_key
1123 .long _sys_keyctl
1124 .long _sys_ioprio_set
1125 .long _sys_ioprio_get /* 290 */
1126 .long _sys_inotify_init
1127 .long _sys_inotify_add_watch
1128 .long _sys_inotify_rm_watch
1129 .long _sys_ni_syscall /* migrate_pages */
1130 .long _sys_openat /* 295 */
1131 .long _sys_mkdirat
1132 .long _sys_mknodat
1133 .long _sys_fchownat
1134 .long _sys_futimesat
1135 .long _sys_fstatat64 /* 300 */
1136 .long _sys_unlinkat
1137 .long _sys_renameat
1138 .long _sys_linkat
1139 .long _sys_symlinkat
1140 .long _sys_readlinkat /* 305 */
1141 .long _sys_fchmodat
1142 .long _sys_faccessat
1143 .long _sys_pselect6
1144 .long _sys_ppoll
1145 .long _sys_unshare /* 310 */
1146 .long _sys_sram_alloc
1147 .long _sys_sram_free
1148 .long _sys_dma_memcpy
1149 .long _sys_accept
1150 .long _sys_bind /* 315 */
1151 .long _sys_connect
1152 .long _sys_getpeername
1153 .long _sys_getsockname
1154 .long _sys_getsockopt
1155 .long _sys_listen /* 320 */
1156 .long _sys_recv
1157 .long _sys_recvfrom
1158 .long _sys_recvmsg
1159 .long _sys_send
1160 .long _sys_sendmsg /* 325 */
1161 .long _sys_sendto
1162 .long _sys_setsockopt
1163 .long _sys_shutdown
1164 .long _sys_socket
1165 .long _sys_socketpair /* 330 */
1166 .long _sys_semctl
1167 .long _sys_semget
1168 .long _sys_semop
1169 .long _sys_msgctl
1170 .long _sys_msgget /* 335 */
1171 .long _sys_msgrcv
1172 .long _sys_msgsnd
1173 .long _sys_shmat
1174 .long _sys_shmctl
1175 .long _sys_shmdt /* 340 */
1176 .long _sys_shmget
1177 .rept NR_syscalls-(.-_sys_call_table)/4
1178 .long _sys_ni_syscall
1179 .endr
1180_excpt_saved_imask:
1181 .long 0;
1182
1183_exception_stack:
1184 .rept 1024
1185 .long 0;
1186 .endr
1187_exception_stack_top:
1188
1189#if defined(ANOMALY_05000261)
1190/* Used by the assembly entry point to work around an anomaly. */
1191_last_cplb_fault_retx:
1192 .long 0;
1193#endif
1194/*
1195 * Single instructions can have multiple faults, which need to be
1196 * handled by traps.c, in irq5. We store the exception cause to ensure
1197 * we don't miss a double fault condition
1198 */
1199ENTRY(_in_ptr_excause)
1200 .long 0;
1201ENTRY(_out_ptr_excause)
1202 .long 0;
1203ALIGN
1204ENTRY(_excause_circ_buf)
1205 .rept 4
1206 .long 0
1207 .endr
diff --git a/arch/blackfin/mach-common/interrupt.S b/arch/blackfin/mach-common/interrupt.S
new file mode 100644
index 000000000000..dd45664f0d02
--- /dev/null
+++ b/arch/blackfin/mach-common/interrupt.S
@@ -0,0 +1,253 @@
1/*
2 * File: arch/blackfin/mach-common/interrupt.S
3 * Based on:
4 * Author: D. Jeff Dionne <jeff@ryeham.ee.ryerson.ca>
5 * Kenneth Albanowski <kjahds@kjahds.com>
6 *
7 * Created: ?
8 * Description: Interrupt Entries
9 *
10 * Modified:
11 * Copyright 2004-2006 Analog Devices Inc.
12 *
13 * Bugs: Enter bugs at http://blackfin.uclinux.org/
14 *
15 * This program is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License as published by
17 * the Free Software Foundation; either version 2 of the License, or
18 * (at your option) any later version.
19 *
20 * This program is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23 * GNU General Public License for more details.
24 *
25 * You should have received a copy of the GNU General Public License
26 * along with this program; if not, see the file COPYING, or write
27 * to the Free Software Foundation, Inc.,
28 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
29 */
30
31#include <asm/blackfin.h>
32#include <asm/mach/irq.h>
33#include <linux/autoconf.h>
34#include <linux/linkage.h>
35#include <asm/entry.h>
36#include <asm/asm-offsets.h>
37
38#include <asm/mach-common/context.S>
39
40#ifdef CONFIG_I_ENTRY_L1
41.section .l1.text
42#else
43.text
44#endif
45
46.align 4 /* just in case */
47
48/*
49 * initial interrupt handlers
50 */
51
52#ifndef CONFIG_KGDB
53 /* interrupt routine for emulation - 0 */
54 /* Currently used only if GDB stub is not in - invalid */
55 /* gdb-stub set the evt itself */
56 /* save registers for post-mortem only */
57ENTRY(_evt_emulation)
58 SAVE_ALL_SYS
59#ifdef CONFIG_FRAME_POINTER
60 fp = 0;
61#endif
62 r0 = IRQ_EMU;
63 r1 = sp;
64 SP += -12;
65 call _irq_panic;
66 SP += 12;
67 /* - GDB stub fills this in by itself (if defined) */
68 rte;
69#endif
70
71/* Common interrupt entry code. First we do CLI, then push
72 * RETI, to keep interrupts disabled, but to allow this state to be changed
73 * by local_bh_enable.
74 * R0 contains the interrupt number, while R1 may contain the value of IPEND,
75 * or garbage if IPEND won't be needed by the ISR. */
76__common_int_entry:
77 [--sp] = fp;
78 [--sp] = usp;
79
80 [--sp] = i0;
81 [--sp] = i1;
82 [--sp] = i2;
83 [--sp] = i3;
84
85 [--sp] = m0;
86 [--sp] = m1;
87 [--sp] = m2;
88 [--sp] = m3;
89
90 [--sp] = l0;
91 [--sp] = l1;
92 [--sp] = l2;
93 [--sp] = l3;
94
95 [--sp] = b0;
96 [--sp] = b1;
97 [--sp] = b2;
98 [--sp] = b3;
99 [--sp] = a0.x;
100 [--sp] = a0.w;
101 [--sp] = a1.x;
102 [--sp] = a1.w;
103
104 [--sp] = LC0;
105 [--sp] = LC1;
106 [--sp] = LT0;
107 [--sp] = LT1;
108 [--sp] = LB0;
109 [--sp] = LB1;
110
111 [--sp] = ASTAT;
112
113 [--sp] = r0; /* Skip reserved */
114 [--sp] = RETS;
115 r2 = RETI;
116 [--sp] = r2;
117 [--sp] = RETX;
118 [--sp] = RETN;
119 [--sp] = RETE;
120 [--sp] = SEQSTAT;
121 [--sp] = r1; /* IPEND - R1 may or may not be set up before jumping here. */
122
123 /* Switch to other method of keeping interrupts disabled. */
124#ifdef CONFIG_DEBUG_HWERR
125 r1 = 0x3f;
126 sti r1;
127#else
128 cli r1;
129#endif
130 [--sp] = RETI; /* orig_pc */
131 /* Clear all L registers. */
132 r1 = 0 (x);
133 l0 = r1;
134 l1 = r1;
135 l2 = r1;
136 l3 = r1;
137#ifdef CONFIG_FRAME_POINTER
138 fp = 0;
139#endif
140
141#ifdef ANOMALY_05000283
142 cc = r7 == r7;
143 p5.h = 0xffc0;
144 p5.l = 0x0014;
145 if cc jump 1f;
146 r7.l = W[p5];
1471:
148#endif
149 r1 = sp;
150 SP += -12;
151 call _do_irq;
152 SP += 12;
153 call _return_from_int;
154.Lcommon_restore_context:
155 RESTORE_CONTEXT
156 rti;
157
158/* interrupt routine for ivhw - 5 */
159ENTRY(_evt_ivhw)
160 SAVE_CONTEXT
161#ifdef CONFIG_FRAME_POINTER
162 fp = 0;
163#endif
164#ifdef ANOMALY_05000283
165 cc = r7 == r7;
166 p5.h = 0xffc0;
167 p5.l = 0x0014;
168 if cc jump 1f;
169 r7.l = W[p5];
1701:
171#endif
172 p0.l = lo(TBUFCTL);
173 p0.h = hi(TBUFCTL);
174 r0 = 1;
175 [p0] = r0;
176 r0 = IRQ_HWERR;
177 r1 = sp;
178
179#ifdef CONFIG_HARDWARE_PM
180 r7 = SEQSTAT;
181 r7 = r7 >>> 0xe;
182 r6 = 0x1F;
183 r7 = r7 & r6;
184 r5 = 0x12;
185 cc = r7 == r5;
186 if cc jump .Lcall_do_ovf; /* deal with performance counter overflow */
187#endif
188
189 SP += -12;
190 call _irq_panic;
191 SP += 12;
192 rti;
193#ifdef CONFIG_HARDWARE_PM
194.Lcall_do_ovf:
195
196 SP += -12;
197 call _pm_overflow;
198 SP += 12;
199
200 jump .Lcommon_restore_context;
201#endif
202
203/* interrupt routine for evt2 - 2. This is NMI. */
204ENTRY(_evt_evt2)
205 SAVE_CONTEXT
206#ifdef CONFIG_FRAME_POINTER
207 fp = 0;
208#endif
209#ifdef ANOMALY_05000283
210 cc = r7 == r7;
211 p5.h = 0xffc0;
212 p5.l = 0x0014;
213 if cc jump 1f;
214 r7.l = W[p5];
2151:
216#endif
217 r0 = IRQ_NMI;
218 r1 = sp;
219 SP += -12;
220 call _asm_do_IRQ;
221 SP += 12;
222 RESTORE_CONTEXT
223 rtn;
224
225/* interrupt routine for core timer - 6 */
226ENTRY(_evt_timer)
227 TIMER_INTERRUPT_ENTRY(EVT_IVTMR_P)
228
229/* interrupt routine for evt7 - 7 */
230ENTRY(_evt_evt7)
231 INTERRUPT_ENTRY(EVT_IVG7_P)
232ENTRY(_evt_evt8)
233 INTERRUPT_ENTRY(EVT_IVG8_P)
234ENTRY(_evt_evt9)
235 INTERRUPT_ENTRY(EVT_IVG9_P)
236ENTRY(_evt_evt10)
237 INTERRUPT_ENTRY(EVT_IVG10_P)
238ENTRY(_evt_evt11)
239 INTERRUPT_ENTRY(EVT_IVG11_P)
240ENTRY(_evt_evt12)
241 INTERRUPT_ENTRY(EVT_IVG12_P)
242ENTRY(_evt_evt13)
243 INTERRUPT_ENTRY(EVT_IVG13_P)
244
245
246 /* interrupt routine for system_call - 15 */
247ENTRY(_evt_system_call)
248 SAVE_CONTEXT_SYSCALL
249#ifdef CONFIG_FRAME_POINTER
250 fp = 0;
251#endif
252 call _system_call;
253 jump .Lcommon_restore_context;
diff --git a/arch/blackfin/mach-common/ints-priority-dc.c b/arch/blackfin/mach-common/ints-priority-dc.c
new file mode 100644
index 000000000000..f3cf07036c2a
--- /dev/null
+++ b/arch/blackfin/mach-common/ints-priority-dc.c
@@ -0,0 +1,476 @@
1/*
2 * File: arch/blackfin/mach-common/ints-priority-dc.c
3 * Based on:
4 * Author:
5 *
6 * Created: ?
7 * Description: Set up the interupt priorities
8 *
9 * Modified:
10 * 1996 Roman Zippel
11 * 1999 D. Jeff Dionne <jeff@uclinux.org>
12 * 2000-2001 Lineo, Inc. D. Jefff Dionne <jeff@lineo.ca>
13 * 2002 Arcturus Networks Inc. MaTed <mated@sympatico.ca>
14 * 2003 Metrowerks/Motorola
15 * 2003 Bas Vermeulen <bas@buyways.nl>
16 * Copyright 2004-2006 Analog Devices Inc.
17 *
18 * Bugs: Enter bugs at http://blackfin.uclinux.org/
19 *
20 * This program is free software; you can redistribute it and/or modify
21 * it under the terms of the GNU General Public License as published by
22 * the Free Software Foundation; either version 2 of the License, or
23 * (at your option) any later version.
24 *
25 * This program is distributed in the hope that it will be useful,
26 * but WITHOUT ANY WARRANTY; without even the implied warranty of
27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
28 * GNU General Public License for more details.
29 *
30 * You should have received a copy of the GNU General Public License
31 * along with this program; if not, see the file COPYING, or write
32 * to the Free Software Foundation, Inc.,
33 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
34 */
35
36#include <linux/module.h>
37#include <linux/kernel_stat.h>
38#include <linux/seq_file.h>
39#include <linux/irq.h>
40#ifdef CONFIG_KGDB
41#include <linux/kgdb.h>
42#endif
43#include <asm/traps.h>
44#include <asm/blackfin.h>
45#include <asm/gpio.h>
46#include <asm/irq_handler.h>
47
48/*
49 * NOTES:
50 * - we have separated the physical Hardware interrupt from the
51 * levels that the LINUX kernel sees (see the description in irq.h)
52 * -
53 */
54
55unsigned long irq_flags = 0;
56
57/* The number of spurious interrupts */
58atomic_t num_spurious;
59
60struct ivgx {
61 /* irq number for request_irq, available in mach-bf561/irq.h */
62 int irqno;
63 /* corresponding bit in the SICA_ISR0 register */
64 int isrflag0;
65 /* corresponding bit in the SICA_ISR1 register */
66 int isrflag1;
67} ivg_table[NR_PERI_INTS];
68
69struct ivg_slice {
70 /* position of first irq in ivg_table for given ivg */
71 struct ivgx *ifirst;
72 struct ivgx *istop;
73} ivg7_13[IVG13 - IVG7 + 1];
74
75static void search_IAR(void);
76
77/*
78 * Search SIC_IAR and fill tables with the irqvalues
79 * and their positions in the SIC_ISR register.
80 */
81static void __init search_IAR(void)
82{
83 unsigned ivg, irq_pos = 0;
84 for (ivg = 0; ivg <= IVG13 - IVG7; ivg++) {
85 int irqn;
86
87 ivg7_13[ivg].istop = ivg7_13[ivg].ifirst = &ivg_table[irq_pos];
88
89 for (irqn = 0; irqn < NR_PERI_INTS; irqn++) {
90 int iar_shift = (irqn & 7) * 4;
91 if (ivg ==
92 (0xf &
93 bfin_read32((unsigned long *)SICA_IAR0 +
94 (irqn >> 3)) >> iar_shift)) {
95 ivg_table[irq_pos].irqno = IVG7 + irqn;
96 ivg_table[irq_pos].isrflag0 =
97 (irqn < 32 ? (1 << irqn) : 0);
98 ivg_table[irq_pos].isrflag1 =
99 (irqn < 32 ? 0 : (1 << (irqn - 32)));
100 ivg7_13[ivg].istop++;
101 irq_pos++;
102 }
103 }
104 }
105}
106
107/*
108 * This is for BF561 internal IRQs
109 */
110
111static void ack_noop(unsigned int irq)
112{
113 /* Dummy function. */
114}
115
116static void bf561_core_mask_irq(unsigned int irq)
117{
118 irq_flags &= ~(1 << irq);
119 if (!irqs_disabled())
120 local_irq_enable();
121}
122
123static void bf561_core_unmask_irq(unsigned int irq)
124{
125 irq_flags |= 1 << irq;
126 /*
127 * If interrupts are enabled, IMASK must contain the same value
128 * as irq_flags. Make sure that invariant holds. If interrupts
129 * are currently disabled we need not do anything; one of the
130 * callers will take care of setting IMASK to the proper value
131 * when reenabling interrupts.
132 * local_irq_enable just does "STI irq_flags", so it's exactly
133 * what we need.
134 */
135 if (!irqs_disabled())
136 local_irq_enable();
137 return;
138}
139
140static void bf561_internal_mask_irq(unsigned int irq)
141{
142 unsigned long irq_mask;
143 if ((irq - (IRQ_CORETMR + 1)) < 32) {
144 irq_mask = (1 << (irq - (IRQ_CORETMR + 1)));
145 bfin_write_SICA_IMASK0(bfin_read_SICA_IMASK0() & ~irq_mask);
146 } else {
147 irq_mask = (1 << (irq - (IRQ_CORETMR + 1) - 32));
148 bfin_write_SICA_IMASK1(bfin_read_SICA_IMASK1() & ~irq_mask);
149 }
150}
151
152static void bf561_internal_unmask_irq(unsigned int irq)
153{
154 unsigned long irq_mask;
155
156 if ((irq - (IRQ_CORETMR + 1)) < 32) {
157 irq_mask = (1 << (irq - (IRQ_CORETMR + 1)));
158 bfin_write_SICA_IMASK0(bfin_read_SICA_IMASK0() | irq_mask);
159 } else {
160 irq_mask = (1 << (irq - (IRQ_CORETMR + 1) - 32));
161 bfin_write_SICA_IMASK1(bfin_read_SICA_IMASK1() | irq_mask);
162 }
163 SSYNC();
164}
165
166static struct irq_chip bf561_core_irqchip = {
167 .ack = ack_noop,
168 .mask = bf561_core_mask_irq,
169 .unmask = bf561_core_unmask_irq,
170};
171
172static struct irq_chip bf561_internal_irqchip = {
173 .ack = ack_noop,
174 .mask = bf561_internal_mask_irq,
175 .unmask = bf561_internal_unmask_irq,
176};
177
178#ifdef CONFIG_IRQCHIP_DEMUX_GPIO
179static unsigned short gpio_enabled[gpio_bank(MAX_BLACKFIN_GPIOS)];
180static unsigned short gpio_edge_triggered[gpio_bank(MAX_BLACKFIN_GPIOS)];
181
182static void bf561_gpio_ack_irq(unsigned int irq)
183{
184 u16 gpionr = irq - IRQ_PF0;
185
186 if(gpio_edge_triggered[gpio_bank(gpionr)] & gpio_bit(gpionr)) {
187 set_gpio_data(gpionr, 0);
188 SSYNC();
189 }
190}
191
192static void bf561_gpio_mask_ack_irq(unsigned int irq)
193{
194 u16 gpionr = irq - IRQ_PF0;
195
196 if(gpio_edge_triggered[gpio_bank(gpionr)] & gpio_bit(gpionr)) {
197 set_gpio_data(gpionr, 0);
198 SSYNC();
199 }
200
201 set_gpio_maska(gpionr, 0);
202 SSYNC();
203}
204
205static void bf561_gpio_mask_irq(unsigned int irq)
206{
207 set_gpio_maska(irq - IRQ_PF0, 0);
208 SSYNC();
209}
210
211static void bf561_gpio_unmask_irq(unsigned int irq)
212{
213 set_gpio_maska(irq - IRQ_PF0, 1);
214 SSYNC();
215}
216
217static unsigned int bf561_gpio_irq_startup(unsigned int irq)
218{
219 unsigned int ret;
220 u16 gpionr = irq - IRQ_PF0;
221
222 if (!(gpio_enabled[gpio_bank(gpionr)] & gpio_bit(gpionr))) {
223
224 ret = gpio_request(gpionr, NULL);
225 if(ret)
226 return ret;
227
228 }
229
230 gpio_enabled[gpio_bank(gpionr)] |= gpio_bit(gpionr);
231 bf561_gpio_unmask_irq(irq);
232
233 return ret;
234
235}
236
237static void bf561_gpio_irq_shutdown(unsigned int irq)
238{
239 bf561_gpio_mask_irq(irq);
240 gpio_free(irq - IRQ_PF0);
241 gpio_enabled[gpio_bank(irq - IRQ_PF0)] &= ~gpio_bit(irq - IRQ_PF0);
242}
243
244static int bf561_gpio_irq_type(unsigned int irq, unsigned int type)
245{
246
247 unsigned int ret;
248 u16 gpionr = irq - IRQ_PF0;
249
250
251 if (type == IRQ_TYPE_PROBE) {
252 /* only probe unenabled GPIO interrupt lines */
253 if (gpio_enabled[gpio_bank(gpionr)] & gpio_bit(gpionr))
254 return 0;
255 type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING;
256
257 }
258
259 if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING |
260 IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) {
261
262 if (!(gpio_enabled[gpio_bank(gpionr)] & gpio_bit(gpionr))) {
263
264 ret = gpio_request(gpionr, NULL);
265 if(ret)
266 return ret;
267
268 }
269
270 gpio_enabled[gpio_bank(gpionr)] |= gpio_bit(gpionr);
271 } else {
272 gpio_enabled[gpio_bank(gpionr)] &= ~gpio_bit(gpionr);
273 return 0;
274 }
275
276
277 set_gpio_dir(gpionr, 0);
278 set_gpio_inen(gpionr, 1);
279
280
281 if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) {
282 gpio_edge_triggered[gpio_bank(gpionr)] |= gpio_bit(gpionr);
283 set_gpio_edge(gpionr, 1);
284 } else {
285 set_gpio_edge(gpionr, 0);
286 gpio_edge_triggered[gpio_bank(gpionr)] &= ~gpio_bit(gpionr);
287 }
288
289 if ((type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
290 == (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
291 set_gpio_both(gpionr, 1);
292 else
293 set_gpio_both(gpionr, 0);
294
295 if ((type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_LEVEL_LOW)))
296 set_gpio_polar(gpionr, 1); /* low or falling edge denoted by one */
297 else
298 set_gpio_polar(gpionr, 0); /* high or rising edge denoted by zero */
299
300 SSYNC();
301
302 if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
303 set_irq_handler(irq, handle_edge_irq);
304 else
305 set_irq_handler(irq, handle_level_irq);
306
307 return 0;
308}
309
310static struct irq_chip bf561_gpio_irqchip = {
311 .ack = bf561_gpio_ack_irq,
312 .mask = bf561_gpio_mask_irq,
313 .mask_ack = bf561_gpio_mask_ack_irq,
314 .unmask = bf561_gpio_unmask_irq,
315 .set_type = bf561_gpio_irq_type,
316 .startup = bf561_gpio_irq_startup,
317 .shutdown = bf561_gpio_irq_shutdown
318};
319
320static void bf561_demux_gpio_irq(unsigned int inta_irq,
321 struct irq_desc *intb_desc)
322{
323 int irq, flag_d, mask;
324 u16 gpio;
325
326 switch (inta_irq) {
327 case IRQ_PROG0_INTA:
328 irq = IRQ_PF0;
329 break;
330 case IRQ_PROG1_INTA:
331 irq = IRQ_PF16;
332 break;
333 case IRQ_PROG2_INTA:
334 irq = IRQ_PF32;
335 break;
336 default:
337 dump_stack();
338 return;
339 }
340
341 gpio = irq - IRQ_PF0;
342
343 flag_d = get_gpiop_data(gpio);
344 mask = flag_d & (gpio_enabled[gpio_bank(gpio)] &
345 get_gpiop_maska(gpio));
346
347 do {
348 if (mask & 1) {
349 struct irq_desc *desc = irq_desc + irq;
350 desc->handle_irq(irq, desc);
351 }
352 irq++;
353 mask >>= 1;
354 } while (mask);
355
356
357}
358
359#endif /* CONFIG_IRQCHIP_DEMUX_GPIO */
360
361/*
362 * This function should be called during kernel startup to initialize
363 * the BFin IRQ handling routines.
364 */
365int __init init_arch_irq(void)
366{
367 int irq;
368 unsigned long ilat = 0;
369 /* Disable all the peripheral intrs - page 4-29 HW Ref manual */
370 bfin_write_SICA_IMASK0(SIC_UNMASK_ALL);
371 bfin_write_SICA_IMASK1(SIC_UNMASK_ALL);
372 SSYNC();
373
374 local_irq_disable();
375
376 init_exception_buff();
377
378#ifndef CONFIG_KGDB
379 bfin_write_EVT0(evt_emulation);
380#endif
381 bfin_write_EVT2(evt_evt2);
382 bfin_write_EVT3(trap);
383 bfin_write_EVT5(evt_ivhw);
384 bfin_write_EVT6(evt_timer);
385 bfin_write_EVT7(evt_evt7);
386 bfin_write_EVT8(evt_evt8);
387 bfin_write_EVT9(evt_evt9);
388 bfin_write_EVT10(evt_evt10);
389 bfin_write_EVT11(evt_evt11);
390 bfin_write_EVT12(evt_evt12);
391 bfin_write_EVT13(evt_evt13);
392 bfin_write_EVT14(evt14_softirq);
393 bfin_write_EVT15(evt_system_call);
394 CSYNC();
395
396 for (irq = 0; irq < SYS_IRQS; irq++) {
397 if (irq <= IRQ_CORETMR)
398 set_irq_chip(irq, &bf561_core_irqchip);
399 else
400 set_irq_chip(irq, &bf561_internal_irqchip);
401#ifdef CONFIG_IRQCHIP_DEMUX_GPIO
402 if ((irq != IRQ_PROG0_INTA) &&
403 (irq != IRQ_PROG1_INTA) && (irq != IRQ_PROG2_INTA)) {
404#endif
405 set_irq_handler(irq, handle_simple_irq);
406#ifdef CONFIG_IRQCHIP_DEMUX_GPIO
407 } else {
408 set_irq_chained_handler(irq, bf561_demux_gpio_irq);
409 }
410#endif
411
412 }
413
414#ifdef CONFIG_IRQCHIP_DEMUX_GPIO
415 for (irq = IRQ_PF0; irq <= IRQ_PF47; irq++) {
416 set_irq_chip(irq, &bf561_gpio_irqchip);
417 /* if configured as edge, then will be changed to do_edge_IRQ */
418 set_irq_handler(irq, handle_level_irq);
419 }
420#endif
421 bfin_write_IMASK(0);
422 CSYNC();
423 ilat = bfin_read_ILAT();
424 CSYNC();
425 bfin_write_ILAT(ilat);
426 CSYNC();
427
428 printk(KERN_INFO "Configuring Blackfin Priority Driven Interrupts\n");
429 /* IMASK=xxx is equivalent to STI xx or irq_flags=xx,
430 * local_irq_enable()
431 */
432 program_IAR();
433 /* Therefore it's better to setup IARs before interrupts enabled */
434 search_IAR();
435
436 /* Enable interrupts IVG7-15 */
437 irq_flags = irq_flags | IMASK_IVG15 |
438 IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 |
439 IMASK_IVG10 | IMASK_IVG9 | IMASK_IVG8 | IMASK_IVG7 | IMASK_IVGHW;
440
441 return 0;
442}
443
444#ifdef CONFIG_DO_IRQ_L1
445void do_irq(int vec, struct pt_regs *fp)__attribute__((l1_text));
446#endif
447
448void do_irq(int vec, struct pt_regs *fp)
449{
450 if (vec == EVT_IVTMR_P) {
451 vec = IRQ_CORETMR;
452 } else {
453 struct ivgx *ivg = ivg7_13[vec - IVG7].ifirst;
454 struct ivgx *ivg_stop = ivg7_13[vec - IVG7].istop;
455 unsigned long sic_status0, sic_status1;
456
457 SSYNC();
458 sic_status0 = bfin_read_SICA_IMASK0() & bfin_read_SICA_ISR0();
459 sic_status1 = bfin_read_SICA_IMASK1() & bfin_read_SICA_ISR1();
460
461 for (;; ivg++) {
462 if (ivg >= ivg_stop) {
463 atomic_inc(&num_spurious);
464 return;
465 } else if ((sic_status0 & ivg->isrflag0) ||
466 (sic_status1 & ivg->isrflag1))
467 break;
468 }
469 vec = ivg->irqno;
470 }
471 asm_do_IRQ(vec, fp);
472
473#ifdef CONFIG_KGDB
474 kgdb_process_breakpoint();
475#endif
476}
diff --git a/arch/blackfin/mach-common/ints-priority-sc.c b/arch/blackfin/mach-common/ints-priority-sc.c
new file mode 100644
index 000000000000..34b62288ec3c
--- /dev/null
+++ b/arch/blackfin/mach-common/ints-priority-sc.c
@@ -0,0 +1,577 @@
1/*
2 * File: arch/blackfin/mach-common/ints-priority-sc.c
3 * Based on:
4 * Author:
5 *
6 * Created: ?
7 * Description: Set up the interupt priorities
8 *
9 * Modified:
10 * 1996 Roman Zippel
11 * 1999 D. Jeff Dionne <jeff@uclinux.org>
12 * 2000-2001 Lineo, Inc. D. Jefff Dionne <jeff@lineo.ca>
13 * 2002 Arcturus Networks Inc. MaTed <mated@sympatico.ca>
14 * 2003 Metrowerks/Motorola
15 * 2003 Bas Vermeulen <bas@buyways.nl>
16 * Copyright 2004-2006 Analog Devices Inc.
17 *
18 * Bugs: Enter bugs at http://blackfin.uclinux.org/
19 *
20 * This program is free software; you can redistribute it and/or modify
21 * it under the terms of the GNU General Public License as published by
22 * the Free Software Foundation; either version 2 of the License, or
23 * (at your option) any later version.
24 *
25 * This program is distributed in the hope that it will be useful,
26 * but WITHOUT ANY WARRANTY; without even the implied warranty of
27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
28 * GNU General Public License for more details.
29 *
30 * You should have received a copy of the GNU General Public License
31 * along with this program; if not, see the file COPYING, or write
32 * to the Free Software Foundation, Inc.,
33 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
34 */
35
36#include <linux/module.h>
37#include <linux/kernel_stat.h>
38#include <linux/seq_file.h>
39#include <linux/irq.h>
40#ifdef CONFIG_KGDB
41#include <linux/kgdb.h>
42#endif
43#include <asm/traps.h>
44#include <asm/blackfin.h>
45#include <asm/gpio.h>
46#include <asm/irq_handler.h>
47
48#ifdef BF537_FAMILY
49# define BF537_GENERIC_ERROR_INT_DEMUX
50#else
51# undef BF537_GENERIC_ERROR_INT_DEMUX
52#endif
53
54/*
55 * NOTES:
56 * - we have separated the physical Hardware interrupt from the
57 * levels that the LINUX kernel sees (see the description in irq.h)
58 * -
59 */
60
61unsigned long irq_flags = 0;
62
63/* The number of spurious interrupts */
64atomic_t num_spurious;
65
66struct ivgx {
67 /* irq number for request_irq, available in mach-bf533/irq.h */
68 int irqno;
69 /* corresponding bit in the SIC_ISR register */
70 int isrflag;
71} ivg_table[NR_PERI_INTS];
72
73struct ivg_slice {
74 /* position of first irq in ivg_table for given ivg */
75 struct ivgx *ifirst;
76 struct ivgx *istop;
77} ivg7_13[IVG13 - IVG7 + 1];
78
79static void search_IAR(void);
80
81/*
82 * Search SIC_IAR and fill tables with the irqvalues
83 * and their positions in the SIC_ISR register.
84 */
85static void __init search_IAR(void)
86{
87 unsigned ivg, irq_pos = 0;
88 for (ivg = 0; ivg <= IVG13 - IVG7; ivg++) {
89 int irqn;
90
91 ivg7_13[ivg].istop = ivg7_13[ivg].ifirst =
92 &ivg_table[irq_pos];
93
94 for (irqn = 0; irqn < NR_PERI_INTS; irqn++) {
95 int iar_shift = (irqn & 7) * 4;
96 if (ivg ==
97 (0xf &
98 bfin_read32((unsigned long *) SIC_IAR0 +
99 (irqn >> 3)) >> iar_shift)) {
100 ivg_table[irq_pos].irqno = IVG7 + irqn;
101 ivg_table[irq_pos].isrflag = 1 << irqn;
102 ivg7_13[ivg].istop++;
103 irq_pos++;
104 }
105 }
106 }
107}
108
109/*
110 * This is for BF533 internal IRQs
111 */
112
113static void ack_noop(unsigned int irq)
114{
115 /* Dummy function. */
116}
117
118static void bfin_core_mask_irq(unsigned int irq)
119{
120 irq_flags &= ~(1 << irq);
121 if (!irqs_disabled())
122 local_irq_enable();
123}
124
125static void bfin_core_unmask_irq(unsigned int irq)
126{
127 irq_flags |= 1 << irq;
128 /*
129 * If interrupts are enabled, IMASK must contain the same value
130 * as irq_flags. Make sure that invariant holds. If interrupts
131 * are currently disabled we need not do anything; one of the
132 * callers will take care of setting IMASK to the proper value
133 * when reenabling interrupts.
134 * local_irq_enable just does "STI irq_flags", so it's exactly
135 * what we need.
136 */
137 if (!irqs_disabled())
138 local_irq_enable();
139 return;
140}
141
142static void bfin_internal_mask_irq(unsigned int irq)
143{
144 bfin_write_SIC_IMASK(bfin_read_SIC_IMASK() &
145 ~(1 << (irq - (IRQ_CORETMR + 1))));
146 SSYNC();
147}
148
149static void bfin_internal_unmask_irq(unsigned int irq)
150{
151 bfin_write_SIC_IMASK(bfin_read_SIC_IMASK() |
152 (1 << (irq - (IRQ_CORETMR + 1))));
153 SSYNC();
154}
155
156static struct irq_chip bfin_core_irqchip = {
157 .ack = ack_noop,
158 .mask = bfin_core_mask_irq,
159 .unmask = bfin_core_unmask_irq,
160};
161
162static struct irq_chip bfin_internal_irqchip = {
163 .ack = ack_noop,
164 .mask = bfin_internal_mask_irq,
165 .unmask = bfin_internal_unmask_irq,
166};
167
168#ifdef BF537_GENERIC_ERROR_INT_DEMUX
169static int error_int_mask;
170
171static void bfin_generic_error_ack_irq(unsigned int irq)
172{
173
174}
175
176static void bfin_generic_error_mask_irq(unsigned int irq)
177{
178 error_int_mask &= ~(1L << (irq - IRQ_PPI_ERROR));
179
180 if (!error_int_mask) {
181 local_irq_disable();
182 bfin_write_SIC_IMASK(bfin_read_SIC_IMASK() &
183 ~(1 <<
184 (IRQ_GENERIC_ERROR -
185 (IRQ_CORETMR + 1))));
186 SSYNC();
187 local_irq_enable();
188 }
189}
190
191static void bfin_generic_error_unmask_irq(unsigned int irq)
192{
193 local_irq_disable();
194 bfin_write_SIC_IMASK(bfin_read_SIC_IMASK() | 1 <<
195 (IRQ_GENERIC_ERROR - (IRQ_CORETMR + 1)));
196 SSYNC();
197 local_irq_enable();
198
199 error_int_mask |= 1L << (irq - IRQ_PPI_ERROR);
200}
201
202static struct irq_chip bfin_generic_error_irqchip = {
203 .ack = bfin_generic_error_ack_irq,
204 .mask = bfin_generic_error_mask_irq,
205 .unmask = bfin_generic_error_unmask_irq,
206};
207
208static void bfin_demux_error_irq(unsigned int int_err_irq,
209 struct irq_desc *intb_desc)
210{
211 int irq = 0;
212
213 SSYNC();
214
215#if (defined(CONFIG_BF537) || defined(CONFIG_BF536))
216 if (bfin_read_EMAC_SYSTAT() & EMAC_ERR_MASK)
217 irq = IRQ_MAC_ERROR;
218 else
219#endif
220 if (bfin_read_SPORT0_STAT() & SPORT_ERR_MASK)
221 irq = IRQ_SPORT0_ERROR;
222 else if (bfin_read_SPORT1_STAT() & SPORT_ERR_MASK)
223 irq = IRQ_SPORT1_ERROR;
224 else if (bfin_read_PPI_STATUS() & PPI_ERR_MASK)
225 irq = IRQ_PPI_ERROR;
226 else if (bfin_read_CAN_GIF() & CAN_ERR_MASK)
227 irq = IRQ_CAN_ERROR;
228 else if (bfin_read_SPI_STAT() & SPI_ERR_MASK)
229 irq = IRQ_SPI_ERROR;
230 else if ((bfin_read_UART0_IIR() & UART_ERR_MASK_STAT1) &&
231 (bfin_read_UART0_IIR() & UART_ERR_MASK_STAT0))
232 irq = IRQ_UART0_ERROR;
233 else if ((bfin_read_UART1_IIR() & UART_ERR_MASK_STAT1) &&
234 (bfin_read_UART1_IIR() & UART_ERR_MASK_STAT0))
235 irq = IRQ_UART1_ERROR;
236
237 if (irq) {
238 if (error_int_mask & (1L << (irq - IRQ_PPI_ERROR))) {
239 struct irq_desc *desc = irq_desc + irq;
240 desc->handle_irq(irq, desc);
241 } else {
242
243 switch (irq) {
244 case IRQ_PPI_ERROR:
245 bfin_write_PPI_STATUS(PPI_ERR_MASK);
246 break;
247#if (defined(CONFIG_BF537) || defined(CONFIG_BF536))
248 case IRQ_MAC_ERROR:
249 bfin_write_EMAC_SYSTAT(EMAC_ERR_MASK);
250 break;
251#endif
252 case IRQ_SPORT0_ERROR:
253 bfin_write_SPORT0_STAT(SPORT_ERR_MASK);
254 break;
255
256 case IRQ_SPORT1_ERROR:
257 bfin_write_SPORT1_STAT(SPORT_ERR_MASK);
258 break;
259
260 case IRQ_CAN_ERROR:
261 bfin_write_CAN_GIS(CAN_ERR_MASK);
262 break;
263
264 case IRQ_SPI_ERROR:
265 bfin_write_SPI_STAT(SPI_ERR_MASK);
266 break;
267
268 default:
269 break;
270 }
271
272 pr_debug("IRQ %d:"
273 " MASKED PERIPHERAL ERROR INTERRUPT ASSERTED\n",
274 irq);
275 }
276 } else
277 printk(KERN_ERR
278 "%s : %s : LINE %d :\nIRQ ?: PERIPHERAL ERROR"
279 " INTERRUPT ASSERTED BUT NO SOURCE FOUND\n",
280 __FUNCTION__, __FILE__, __LINE__);
281
282
283}
284#endif /* BF537_GENERIC_ERROR_INT_DEMUX */
285
286#ifdef CONFIG_IRQCHIP_DEMUX_GPIO
287
288static unsigned short gpio_enabled[gpio_bank(MAX_BLACKFIN_GPIOS)];
289static unsigned short gpio_edge_triggered[gpio_bank(MAX_BLACKFIN_GPIOS)];
290
291static void bfin_gpio_ack_irq(unsigned int irq)
292{
293 u16 gpionr = irq - IRQ_PF0;
294
295 if (gpio_edge_triggered[gpio_bank(gpionr)] & gpio_bit(gpionr)) {
296 set_gpio_data(gpionr, 0);
297 SSYNC();
298 }
299}
300
301static void bfin_gpio_mask_ack_irq(unsigned int irq)
302{
303 u16 gpionr = irq - IRQ_PF0;
304
305 if (gpio_edge_triggered[gpio_bank(gpionr)] & gpio_bit(gpionr)) {
306 set_gpio_data(gpionr, 0);
307 SSYNC();
308 }
309
310 set_gpio_maska(gpionr, 0);
311 SSYNC();
312}
313
314static void bfin_gpio_mask_irq(unsigned int irq)
315{
316 set_gpio_maska(irq - IRQ_PF0, 0);
317 SSYNC();
318}
319
320static void bfin_gpio_unmask_irq(unsigned int irq)
321{
322 set_gpio_maska(irq - IRQ_PF0, 1);
323 SSYNC();
324}
325
326static unsigned int bfin_gpio_irq_startup(unsigned int irq)
327{
328 unsigned int ret;
329 u16 gpionr = irq - IRQ_PF0;
330
331 if (!(gpio_enabled[gpio_bank(gpionr)] & gpio_bit(gpionr))) {
332 ret = gpio_request(gpionr, NULL);
333 if (ret)
334 return ret;
335 }
336
337 gpio_enabled[gpio_bank(gpionr)] |= gpio_bit(gpionr);
338 bfin_gpio_unmask_irq(irq);
339
340 return ret;
341}
342
343static void bfin_gpio_irq_shutdown(unsigned int irq)
344{
345 bfin_gpio_mask_irq(irq);
346 gpio_free(irq - IRQ_PF0);
347 gpio_enabled[gpio_bank(irq - IRQ_PF0)] &= ~gpio_bit(irq - IRQ_PF0);
348}
349
350static int bfin_gpio_irq_type(unsigned int irq, unsigned int type)
351{
352
353 unsigned int ret;
354 u16 gpionr = irq - IRQ_PF0;
355
356 if (type == IRQ_TYPE_PROBE) {
357 /* only probe unenabled GPIO interrupt lines */
358 if (gpio_enabled[gpio_bank(gpionr)] & gpio_bit(gpionr))
359 return 0;
360 type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING;
361 }
362
363 if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING |
364 IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW))
365 {
366 if (!(gpio_enabled[gpio_bank(gpionr)] & gpio_bit(gpionr))) {
367 ret = gpio_request(gpionr, NULL);
368 if (ret)
369 return ret;
370 }
371
372 gpio_enabled[gpio_bank(gpionr)] |= gpio_bit(gpionr);
373 } else {
374 gpio_enabled[gpio_bank(gpionr)] &= ~gpio_bit(gpionr);
375 return 0;
376 }
377
378 set_gpio_dir(gpionr, 0);
379 set_gpio_inen(gpionr, 1);
380
381 if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) {
382 gpio_edge_triggered[gpio_bank(gpionr)] |= gpio_bit(gpionr);
383 set_gpio_edge(gpionr, 1);
384 } else {
385 set_gpio_edge(gpionr, 0);
386 gpio_edge_triggered[gpio_bank(gpionr)] &= ~gpio_bit(gpionr);
387 }
388
389 if ((type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
390 == (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
391 set_gpio_both(gpionr, 1);
392 else
393 set_gpio_both(gpionr, 0);
394
395 if ((type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_LEVEL_LOW)))
396 set_gpio_polar(gpionr, 1); /* low or falling edge denoted by one */
397 else
398 set_gpio_polar(gpionr, 0); /* high or rising edge denoted by zero */
399
400 SSYNC();
401
402 if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
403 set_irq_handler(irq, handle_edge_irq);
404 else
405 set_irq_handler(irq, handle_level_irq);
406
407 return 0;
408}
409
410
411static struct irq_chip bfin_gpio_irqchip = {
412 .ack = bfin_gpio_ack_irq,
413 .mask = bfin_gpio_mask_irq,
414 .mask_ack = bfin_gpio_mask_ack_irq,
415 .unmask = bfin_gpio_unmask_irq,
416 .set_type = bfin_gpio_irq_type,
417 .startup = bfin_gpio_irq_startup,
418 .shutdown = bfin_gpio_irq_shutdown
419};
420
421static void bfin_demux_gpio_irq(unsigned int intb_irq,
422 struct irq_desc *intb_desc)
423{
424 u16 i;
425
426 for (i = 0; i < MAX_BLACKFIN_GPIOS; i+=16) {
427 int irq = IRQ_PF0 + i;
428 int flag_d = get_gpiop_data(i);
429 int mask =
430 flag_d & (gpio_enabled[gpio_bank(i)] &
431 get_gpiop_maska(i));
432
433 while (mask) {
434 if (mask & 1) {
435 struct irq_desc *desc = irq_desc + irq;
436 desc->handle_irq(irq, desc);
437 }
438 irq++;
439 mask >>= 1;
440 }
441 }
442}
443
444#endif /* CONFIG_IRQCHIP_DEMUX_GPIO */
445
446/*
447 * This function should be called during kernel startup to initialize
448 * the BFin IRQ handling routines.
449 */
450int __init init_arch_irq(void)
451{
452 int irq;
453 unsigned long ilat = 0;
454 /* Disable all the peripheral intrs - page 4-29 HW Ref manual */
455 bfin_write_SIC_IMASK(SIC_UNMASK_ALL);
456 SSYNC();
457
458 local_irq_disable();
459
460#ifndef CONFIG_KGDB
461 bfin_write_EVT0(evt_emulation);
462#endif
463 bfin_write_EVT2(evt_evt2);
464 bfin_write_EVT3(trap);
465 bfin_write_EVT5(evt_ivhw);
466 bfin_write_EVT6(evt_timer);
467 bfin_write_EVT7(evt_evt7);
468 bfin_write_EVT8(evt_evt8);
469 bfin_write_EVT9(evt_evt9);
470 bfin_write_EVT10(evt_evt10);
471 bfin_write_EVT11(evt_evt11);
472 bfin_write_EVT12(evt_evt12);
473 bfin_write_EVT13(evt_evt13);
474 bfin_write_EVT14(evt14_softirq);
475 bfin_write_EVT15(evt_system_call);
476 CSYNC();
477
478 for (irq = 0; irq < SYS_IRQS; irq++) {
479 if (irq <= IRQ_CORETMR)
480 set_irq_chip(irq, &bfin_core_irqchip);
481 else
482 set_irq_chip(irq, &bfin_internal_irqchip);
483#ifdef BF537_GENERIC_ERROR_INT_DEMUX
484 if (irq != IRQ_GENERIC_ERROR) {
485#endif
486
487#ifdef CONFIG_IRQCHIP_DEMUX_GPIO
488 if ((irq != IRQ_PROG_INTA) /*PORT F & G MASK_A Interrupt*/
489# if defined(BF537_FAMILY) && !(defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE))
490 && (irq != IRQ_MAC_RX) /*PORT H MASK_A Interrupt*/
491# endif
492 ) {
493#endif
494 set_irq_handler(irq, handle_simple_irq);
495#ifdef CONFIG_IRQCHIP_DEMUX_GPIO
496 } else {
497 set_irq_chained_handler(irq,
498 bfin_demux_gpio_irq);
499 }
500#endif
501
502#ifdef BF537_GENERIC_ERROR_INT_DEMUX
503 } else {
504 set_irq_handler(irq, bfin_demux_error_irq);
505 }
506#endif
507 }
508#ifdef BF537_GENERIC_ERROR_INT_DEMUX
509 for (irq = IRQ_PPI_ERROR; irq <= IRQ_UART1_ERROR; irq++) {
510 set_irq_chip(irq, &bfin_generic_error_irqchip);
511 set_irq_handler(irq, handle_level_irq);
512 }
513#endif
514
515#ifdef CONFIG_IRQCHIP_DEMUX_GPIO
516 for (irq = IRQ_PF0; irq < NR_IRQS; irq++) {
517 set_irq_chip(irq, &bfin_gpio_irqchip);
518 /* if configured as edge, then will be changed to do_edge_IRQ */
519 set_irq_handler(irq, handle_level_irq);
520 }
521#endif
522 bfin_write_IMASK(0);
523 CSYNC();
524 ilat = bfin_read_ILAT();
525 CSYNC();
526 bfin_write_ILAT(ilat);
527 CSYNC();
528
529 printk(KERN_INFO
530 "Configuring Blackfin Priority Driven Interrupts\n");
531 /* IMASK=xxx is equivalent to STI xx or irq_flags=xx,
532 * local_irq_enable()
533 */
534 program_IAR();
535 /* Therefore it's better to setup IARs before interrupts enabled */
536 search_IAR();
537
538 /* Enable interrupts IVG7-15 */
539 irq_flags = irq_flags | IMASK_IVG15 |
540 IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 |
541 IMASK_IVG10 | IMASK_IVG9 | IMASK_IVG8 | IMASK_IVG7 |
542 IMASK_IVGHW;
543
544 return 0;
545}
546
547#ifdef CONFIG_DO_IRQ_L1
548void do_irq(int vec, struct pt_regs *fp)__attribute__((l1_text));
549#endif
550
551void do_irq(int vec, struct pt_regs *fp)
552{
553 if (vec == EVT_IVTMR_P) {
554 vec = IRQ_CORETMR;
555 } else {
556 struct ivgx *ivg = ivg7_13[vec - IVG7].ifirst;
557 struct ivgx *ivg_stop = ivg7_13[vec - IVG7].istop;
558 unsigned long sic_status;
559
560 SSYNC();
561 sic_status = bfin_read_SIC_IMASK() & bfin_read_SIC_ISR();
562
563 for (;; ivg++) {
564 if (ivg >= ivg_stop) {
565 atomic_inc(&num_spurious);
566 return;
567 } else if (sic_status & ivg->isrflag)
568 break;
569 }
570 vec = ivg->irqno;
571 }
572 asm_do_IRQ(vec, fp);
573
574#ifdef CONFIG_KGDB
575 kgdb_process_breakpoint();
576#endif
577}
diff --git a/arch/blackfin/mach-common/irqpanic.c b/arch/blackfin/mach-common/irqpanic.c
new file mode 100644
index 000000000000..f05e3dadaf33
--- /dev/null
+++ b/arch/blackfin/mach-common/irqpanic.c
@@ -0,0 +1,194 @@
1/*
2 * File: arch/blackfin/mach-common/irqpanic.c
3 * Based on:
4 * Author:
5 *
6 * Created: ?
7 * Description: panic kernel with dump information
8 *
9 * Modified: rgetz - added cache checking code 14Feb06
10 * Copyright 2004-2006 Analog Devices Inc.
11 *
12 * Bugs: Enter bugs at http://blackfin.uclinux.org/
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or
17 * (at your option) any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, see the file COPYING, or write
26 * to the Free Software Foundation, Inc.,
27 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
28 */
29
30#include <linux/module.h>
31#include <linux/kernel_stat.h>
32#include <linux/sched.h>
33#include <asm/traps.h>
34#include <asm/blackfin.h>
35
36#include "../oprofile/op_blackfin.h"
37
38#ifdef CONFIG_DEBUG_ICACHE_CHECK
39#define L1_ICACHE_START 0xffa10000
40#define L1_ICACHE_END 0xffa13fff
41void irq_panic(int reason, struct pt_regs *regs) __attribute__ ((l1_text));
42#endif
43
44/*
45 * irq_panic - calls panic with string setup
46 */
47asmlinkage void irq_panic(int reason, struct pt_regs *regs)
48{
49 int sig = 0;
50 siginfo_t info;
51
52#ifdef CONFIG_DEBUG_ICACHE_CHECK
53 unsigned int cmd, tag, ca, cache_hi, cache_lo, *pa;
54 unsigned short i, j, die;
55 unsigned int bad[10][6];
56
57 /* check entire cache for coherency
58 * Since printk is in cacheable memory,
59 * don't call it until you have checked everything
60 */
61
62 die = 0;
63 i = 0;
64
65 /* check icache */
66
67 for (ca = L1_ICACHE_START; ca <= L1_ICACHE_END && i < 10; ca += 32) {
68
69 /* Grab various address bits for the itest_cmd fields */
70 cmd = (((ca & 0x3000) << 4) | /* ca[13:12] for SBNK[1:0] */
71 ((ca & 0x0c00) << 16) | /* ca[11:10] for WAYSEL[1:0] */
72 ((ca & 0x3f8)) | /* ca[09:03] for SET[4:0] and DW[1:0] */
73 0); /* Access Tag, Read access */
74
75 SSYNC();
76 bfin_write_ITEST_COMMAND(cmd);
77 SSYNC();
78 tag = bfin_read_ITEST_DATA0();
79 SSYNC();
80
81 /* if tag is marked as valid, check it */
82 if (tag & 1) {
83 /* The icache is arranged in 4 groups of 64-bits */
84 for (j = 0; j < 32; j += 8) {
85 cmd = ((((ca + j) & 0x3000) << 4) | /* ca[13:12] for SBNK[1:0] */
86 (((ca + j) & 0x0c00) << 16) | /* ca[11:10] for WAYSEL[1:0] */
87 (((ca + j) & 0x3f8)) | /* ca[09:03] for SET[4:0] and DW[1:0] */
88 4); /* Access Data, Read access */
89
90 SSYNC();
91 bfin_write_ITEST_COMMAND(cmd);
92 SSYNC();
93
94 cache_hi = bfin_read_ITEST_DATA1();
95 cache_lo = bfin_read_ITEST_DATA0();
96
97 pa = ((unsigned int *)((tag & 0xffffcc00) |
98 ((ca + j) & ~(0xffffcc00))));
99
100 /*
101 * Debugging this, enable
102 *
103 * printk("addr: %08x %08x%08x | %08x%08x\n",
104 * ((unsigned int *)((tag & 0xffffcc00) | ((ca+j) & ~(0xffffcc00)))),
105 * cache_hi, cache_lo, *(pa+1), *pa);
106 */
107
108 if (cache_hi != *(pa + 1) || cache_lo != *pa) {
109 /* Since icache is not working, stay out of it, by not printing */
110 die = 1;
111 bad[i][0] = (ca + j);
112 bad[i][1] = cache_hi;
113 bad[i][2] = cache_lo;
114 bad[i][3] = ((tag & 0xffffcc00) |
115 ((ca + j) & ~(0xffffcc00)));
116 bad[i][4] = *(pa + 1);
117 bad[i][5] = *(pa);
118 i++;
119 }
120 }
121 }
122 }
123 if (die) {
124 printk(KERN_EMERG "icache coherency error\n");
125 for (j = 0; j <= i; j++) {
126 printk(KERN_EMERG
127 "cache address : %08x cache value : %08x%08x\n",
128 bad[j][0], bad[j][1], bad[j][2]);
129 printk(KERN_EMERG
130 "physical address: %08x SDRAM value : %08x%08x\n",
131 bad[j][3], bad[j][4], bad[j][5]);
132 }
133 panic("icache coherency error");
134 } else {
135 printk(KERN_EMERG "icache checked, and OK\n");
136 }
137#endif
138
139 printk(KERN_EMERG "\n");
140 printk(KERN_EMERG "Exception: IRQ 0x%x entered\n", reason);
141 printk(KERN_EMERG " code=[0x%08lx], stack frame=0x%08lx, "
142 " bad PC=0x%08lx\n",
143 (unsigned long)regs->seqstat,
144 (unsigned long)regs,
145 (unsigned long)regs->pc);
146 if (reason == 0x5) {
147 printk(KERN_EMERG "----------- HARDWARE ERROR -----------\n");
148
149 /* There is only need to check for Hardware Errors, since other
150 * EXCEPTIONS are handled in TRAPS.c (MH)
151 */
152 switch (regs->seqstat & SEQSTAT_HWERRCAUSE) {
153 case (SEQSTAT_HWERRCAUSE_SYSTEM_MMR): /* System MMR Error */
154 info.si_code = BUS_ADRALN;
155 sig = SIGBUS;
156 printk(KERN_EMERG HWC_x2);
157 break;
158 case (SEQSTAT_HWERRCAUSE_EXTERN_ADDR): /* External Memory Addressing Error */
159 info.si_code = BUS_ADRERR;
160 sig = SIGBUS;
161 printk(KERN_EMERG HWC_x3);
162 break;
163 case (SEQSTAT_HWERRCAUSE_PERF_FLOW): /* Performance Monitor Overflow */
164 printk(KERN_EMERG HWC_x12);
165 break;
166 case (SEQSTAT_HWERRCAUSE_RAISE_5): /* RAISE 5 instruction */
167 printk(KERN_EMERG HWC_x18);
168 break;
169 default: /* Reserved */
170 printk(KERN_EMERG HWC_default);
171 break;
172 }
173 }
174
175 regs->ipend = bfin_read_IPEND();
176 dump_bfin_regs(regs, (void *)regs->pc);
177 if (0 == (info.si_signo = sig) || 0 == user_mode(regs)) /* in kernelspace */
178 panic("Unhandled IRQ or exceptions!\n");
179 else { /* in userspace */
180 info.si_errno = 0;
181 info.si_addr = (void *)regs->pc;
182 force_sig_info(sig, &info, current);
183 }
184}
185
186#ifdef CONFIG_HARDWARE_PM
187/*
188 * call the handler of Performance overflow
189 */
190asmlinkage void pm_overflow(int irq, struct pt_regs *regs)
191{
192 pm_overflow_handler(irq, regs);
193}
194#endif
diff --git a/arch/blackfin/mach-common/lock.S b/arch/blackfin/mach-common/lock.S
new file mode 100644
index 000000000000..2cbb15b33925
--- /dev/null
+++ b/arch/blackfin/mach-common/lock.S
@@ -0,0 +1,204 @@
1/*
2 * File: arch/blackfin/mach-common/lock.S
3 * Based on:
4 * Author: LG Soft India
5 *
6 * Created: ?
7 * Description: kernel locks
8 *
9 * Modified:
10 * Copyright 2004-2006 Analog Devices Inc.
11 *
12 * Bugs: Enter bugs at http://blackfin.uclinux.org/
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or
17 * (at your option) any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, see the file COPYING, or write
26 * to the Free Software Foundation, Inc.,
27 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
28 */
29
30#include <linux/linkage.h>
31#include <asm/cplb.h>
32#include <asm/blackfin.h>
33
34.text
35
36#ifdef CONFIG_BLKFIN_CACHE_LOCK
37
38/* When you come here, it is assumed that
39 * R0 - Which way to be locked
40 */
41
42ENTRY(_cache_grab_lock)
43
44 [--SP]=( R7:0,P5:0 );
45
46 P1.H = (IMEM_CONTROL >> 16);
47 P1.L = (IMEM_CONTROL & 0xFFFF);
48 P5.H = (ICPLB_ADDR0 >> 16);
49 P5.L = (ICPLB_ADDR0 & 0xFFFF);
50 P4.H = (ICPLB_DATA0 >> 16);
51 P4.L = (ICPLB_DATA0 & 0xFFFF);
52 R7 = R0;
53
54 /* If the code of interest already resides in the cache
55 * invalidate the entire cache itself.
56 * invalidate_entire_icache;
57 */
58
59 SP += -12;
60 [--SP] = RETS;
61 CALL _invalidate_entire_icache;
62 RETS = [SP++];
63 SP += 12;
64
65 /* Disable the Interrupts*/
66
67 CLI R3;
68
69.LLOCK_WAY:
70
71 /* Way0 - 0xFFA133E0
72 * Way1 - 0xFFA137E0
73 * Way2 - 0xFFA13BE0 Total Way Size = 4K
74 * Way3 - 0xFFA13FE0
75 */
76
77 /* Procedure Ex. -Set the locks for other ways by setting ILOC[3:1]
78 * Only Way0 of the instruction cache can now be
79 * replaced by a new code
80 */
81
82 R5 = R7;
83 CC = BITTST(R7,0);
84 IF CC JUMP .LCLEAR1;
85 R7 = 0;
86 BITSET(R7,0);
87 JUMP .LDONE1;
88
89.LCLEAR1:
90 R7 = 0;
91 BITCLR(R7,0);
92.LDONE1: R4 = R7 << 3;
93 R7 = [P1];
94 R7 = R7 | R4;
95 SSYNC; /* SSYNC required writing to IMEM_CONTROL. */
96 .align 8;
97 [P1] = R7;
98 SSYNC;
99
100 R7 = R5;
101 CC = BITTST(R7,1);
102 IF CC JUMP .LCLEAR2;
103 R7 = 0;
104 BITSET(R7,1);
105 JUMP .LDONE2;
106
107.LCLEAR2:
108 R7 = 0;
109 BITCLR(R7,1);
110.LDONE2: R4 = R7 << 3;
111 R7 = [P1];
112 R7 = R7 | R4;
113 SSYNC; /* SSYNC required writing to IMEM_CONTROL. */
114 .align 8;
115 [P1] = R7;
116 SSYNC;
117
118 R7 = R5;
119 CC = BITTST(R7,2);
120 IF CC JUMP .LCLEAR3;
121 R7 = 0;
122 BITSET(R7,2);
123 JUMP .LDONE3;
124.LCLEAR3:
125 R7 = 0;
126 BITCLR(R7,2);
127.LDONE3: R4 = R7 << 3;
128 R7 = [P1];
129 R7 = R7 | R4;
130 SSYNC; /* SSYNC required writing to IMEM_CONTROL. */
131 .align 8;
132 [P1] = R7;
133 SSYNC;
134
135
136 R7 = R5;
137 CC = BITTST(R7,3);
138 IF CC JUMP .LCLEAR4;
139 R7 = 0;
140 BITSET(R7,3);
141 JUMP .LDONE4;
142.LCLEAR4:
143 R7 = 0;
144 BITCLR(R7,3);
145.LDONE4: R4 = R7 << 3;
146 R7 = [P1];
147 R7 = R7 | R4;
148 SSYNC; /* SSYNC required writing to IMEM_CONTROL. */
149 .align 8;
150 [P1] = R7;
151 SSYNC;
152
153 STI R3;
154
155 ( R7:0,P5:0 ) = [SP++];
156
157 RTS;
158
159/* After the execution of critical code, the code is now locked into
160 * the cache way. Now we need to set ILOC.
161 *
162 * R0 - Which way to be locked
163 */
164
165ENTRY(_cache_lock)
166
167 [--SP]=( R7:0,P5:0 );
168
169 P1.H = (IMEM_CONTROL >> 16);
170 P1.L = (IMEM_CONTROL & 0xFFFF);
171
172 /* Disable the Interrupts*/
173 CLI R3;
174
175 R7 = [P1];
176 R2 = 0xFFFFFF87 (X);
177 R7 = R7 & R2;
178 R0 = R0 << 3;
179 R7 = R0 | R7;
180 SSYNC; /* SSYNC required writing to IMEM_CONTROL. */
181 .align 8;
182 [P1] = R7;
183 SSYNC;
184 /* Renable the Interrupts */
185 STI R3;
186
187 ( R7:0,P5:0 ) = [SP++];
188 RTS;
189
190#endif /* BLKFIN_CACHE_LOCK */
191
192/* Return the ILOC bits of IMEM_CONTROL
193 */
194
195ENTRY(_read_iloc)
196
197 P1.H = (IMEM_CONTROL >> 16);
198 P1.L = (IMEM_CONTROL & 0xFFFF);
199 R1 = 0xF;
200 R0 = [P1];
201 R0 = R0 >> 3;
202 R0 = R0 & R1;
203
204 RTS;
diff --git a/arch/blackfin/mach-common/pm.c b/arch/blackfin/mach-common/pm.c
new file mode 100644
index 000000000000..deb27272c658
--- /dev/null
+++ b/arch/blackfin/mach-common/pm.c
@@ -0,0 +1,181 @@
1/*
2 * File: arch/blackfin/mach-common/pm.c
3 * Based on: arm/mach-omap/pm.c
4 * Author: Cliff Brake <cbrake@accelent.com> Copyright (c) 2001
5 *
6 * Created: 2001
7 * Description: Power management for the bfin
8 *
9 * Modified: Nicolas Pitre - PXA250 support
10 * Copyright (c) 2002 Monta Vista Software, Inc.
11 * David Singleton - OMAP1510
12 * Copyright (c) 2002 Monta Vista Software, Inc.
13 * Dirk Behme <dirk.behme@de.bosch.com> - OMAP1510/1610
14 * Copyright 2004
15 * Copyright 2004-2006 Analog Devices Inc.
16 *
17 * Bugs: Enter bugs at http://blackfin.uclinux.org/
18 *
19 * This program is free software; you can redistribute it and/or modify
20 * it under the terms of the GNU General Public License as published by
21 * the Free Software Foundation; either version 2 of the License, or
22 * (at your option) any later version.
23 *
24 * This program is distributed in the hope that it will be useful,
25 * but WITHOUT ANY WARRANTY; without even the implied warranty of
26 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
27 * GNU General Public License for more details.
28 *
29 * You should have received a copy of the GNU General Public License
30 * along with this program; if not, see the file COPYING, or write
31 * to the Free Software Foundation, Inc.,
32 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
33 */
34
35#include <linux/pm.h>
36#include <linux/sched.h>
37#include <linux/proc_fs.h>
38
39#include <asm/io.h>
40#include <asm/dpmc.h>
41#include <asm/irq.h>
42
43
44#ifdef CONFIG_PM_WAKEUP_GPIO_POLAR_H
45#define WAKEUP_TYPE PM_WAKE_HIGH
46#endif
47
48#ifdef CONFIG_PM_WAKEUP_GPIO_POLAR_L
49#define WAKEUP_TYPE PM_WAKE_LOW
50#endif
51
52#ifdef CONFIG_PM_WAKEUP_GPIO_POLAR_EDGE_F
53#define WAKEUP_TYPE PM_WAKE_FALLING
54#endif
55
56#ifdef CONFIG_PM_WAKEUP_GPIO_POLAR_EDGE_R
57#define WAKEUP_TYPE PM_WAKE_RISING
58#endif
59
60#ifdef CONFIG_PM_WAKEUP_GPIO_POLAR_EDGE_B
61#define WAKEUP_TYPE PM_WAKE_BOTH_EDGES
62#endif
63
64void bfin_pm_suspend_standby_enter(void)
65{
66#ifdef CONFIG_PM_WAKEUP_BY_GPIO
67 gpio_pm_wakeup_request(CONFIG_PM_WAKEUP_GPIO_NUMBER, WAKEUP_TYPE);
68#endif
69
70#if defined(CONFIG_PM_WAKEUP_BY_GPIO) || defined(CONFIG_PM_WAKEUP_GPIO_API)
71 {
72 u32 flags;
73
74 local_irq_save(flags);
75
76 sleep_deeper(gpio_pm_setup()); /*Goto Sleep*/
77
78 gpio_pm_restore();
79
80 bfin_write_SIC_IWR(IWR_ENABLE_ALL);
81
82 local_irq_restore(flags);
83 }
84#endif
85
86#if defined(CONFIG_PM_WAKEUP_GPIO_BY_SIC_IWR)
87 sleep_deeper(CONFIG_PM_WAKEUP_SIC_IWR);
88 bfin_write_SIC_IWR(IWR_ENABLE_ALL);
89#endif /* CONFIG_PM_WAKEUP_GPIO_BY_SIC_IWR */
90}
91
92
93/*
94 * bfin_pm_prepare - Do preliminary suspend work.
95 * @state: suspend state we're entering.
96 *
97 */
98static int bfin_pm_prepare(suspend_state_t state)
99{
100 int error = 0;
101
102 switch (state) {
103 case PM_SUSPEND_STANDBY:
104 break;
105 case PM_SUSPEND_MEM:
106 return -ENOTSUPP;
107
108 case PM_SUSPEND_DISK:
109 return -ENOTSUPP;
110
111 default:
112 return -EINVAL;
113 }
114
115 return error;
116}
117
118/*
119 * bfin_pm_enter - Actually enter a sleep state.
120 * @state: State we're entering.
121 *
122 */
123static int bfin_pm_enter(suspend_state_t state)
124{
125 switch (state) {
126 case PM_SUSPEND_STANDBY:
127 bfin_pm_suspend_standby_enter();
128 break;
129 case PM_SUSPEND_MEM:
130 return -ENOTSUPP;
131
132 case PM_SUSPEND_DISK:
133 return -ENOTSUPP;
134
135 default:
136 return -EINVAL;
137 }
138
139 return 0;
140}
141
142/*
143 * bfin_pm_finish - Finish up suspend sequence.
144 * @state: State we're coming out of.
145 *
146 * This is called after we wake back up (or if entering the sleep state
147 * failed).
148 */
149static int bfin_pm_finish(suspend_state_t state)
150{
151 switch (state) {
152 case PM_SUSPEND_STANDBY:
153 break;
154
155 case PM_SUSPEND_MEM:
156 return -ENOTSUPP;
157
158 case PM_SUSPEND_DISK:
159 return -ENOTSUPP;
160
161 default:
162 return -EINVAL;
163 }
164
165 return 0;
166}
167
168struct pm_ops bfin_pm_ops = {
169 .pm_disk_mode = PM_DISK_PLATFORM,
170 .prepare = bfin_pm_prepare,
171 .enter = bfin_pm_enter,
172 .finish = bfin_pm_finish,
173};
174
175static int __init bfin_pm_init(void)
176{
177 pm_set_ops(&bfin_pm_ops);
178 return 0;
179}
180
181__initcall(bfin_pm_init);