aboutsummaryrefslogtreecommitdiffstats
path: root/arch/blackfin/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-01-07 15:00:25 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2009-01-07 15:00:25 -0500
commit5bb47b9ff3d16d40f8d45380b373497a545fa280 (patch)
treee13dd34395473342dc75eff5cbaf5b1ea753631c /arch/blackfin/kernel
parent2f2408a88cf8fa43febfd7fb5783e61b2937b0f9 (diff)
parent06af15e086e39a5a2a2413973a64af8e10122f28 (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/cooloney/blackfin-2.6
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/cooloney/blackfin-2.6: (171 commits) Blackfin arch: fix bug - BF527 0.2 silicon has different CPUID (DSPID) value Blackfin arch: Enlarge flash partition for kenel for bf533/bf537 boards Blackfin arch: fix bug: kernel crash when enable SDIO host driver Blackfin arch: Print FP at level KERN_NOTICE Blackfin arch: drop ad73311 test code Blackfin arch: update board default configs Blackfin arch: Set PB4 as the default irq for bf548 board v1.4+. Blackfin arch: fix typo in early printk bit size processing Blackfin arch: enable reprogram cclk and sclk for bf518f-ezbrd Blackfin arch: add SDIO host driver platform data Blackfin arch: fix bug - kernel stops at initial console Blackfin arch: fix bug - kernel crash after config IP for ethernet port Blackfin arch: add sdh support for bf518f-ezbrd Blackfin arch: fix bug - kernel detects BF532 incorrectly Blackfin arch: add () to avoid warnings from gcc Blackfin arch: change HWTRACE Kconfig and set it on default Blackfin arch: Clean oprofile build path for blackfin Blackfin arch: remove hardware PM code, oprofile not use it Blackfin arch: rewrite get_sclk()/get_vco() Blackfin arch: cleanup and unify the ins functions ...
Diffstat (limited to 'arch/blackfin/kernel')
-rw-r--r--arch/blackfin/kernel/Makefile7
-rw-r--r--arch/blackfin/kernel/asm-offsets.c29
-rw-r--r--arch/blackfin/kernel/bfin_dma_5xx.c936
-rw-r--r--arch/blackfin/kernel/bfin_gpio.c448
-rw-r--r--arch/blackfin/kernel/bfin_ksyms.c101
-rw-r--r--arch/blackfin/kernel/cplb-mpu/Makefile6
-rw-r--r--arch/blackfin/kernel/cplb-mpu/cacheinit.c4
-rw-r--r--arch/blackfin/kernel/cplb-mpu/cplbinfo.c136
-rw-r--r--arch/blackfin/kernel/cplb-mpu/cplbinit.c48
-rw-r--r--arch/blackfin/kernel/cplb-mpu/cplbmgr.c119
-rw-r--r--arch/blackfin/kernel/cplb-nompu/Makefile8
-rw-r--r--arch/blackfin/kernel/cplb-nompu/cacheinit.c25
-rw-r--r--arch/blackfin/kernel/cplb-nompu/cplbhdlr.S130
-rw-r--r--arch/blackfin/kernel/cplb-nompu/cplbinfo.c195
-rw-r--r--arch/blackfin/kernel/cplb-nompu/cplbinit.c521
-rw-r--r--arch/blackfin/kernel/cplb-nompu/cplbmgr.S646
-rw-r--r--arch/blackfin/kernel/cplb-nompu/cplbmgr.c283
-rw-r--r--arch/blackfin/kernel/cplbinfo.c177
-rw-r--r--arch/blackfin/kernel/early_printk.c4
-rw-r--r--arch/blackfin/kernel/entry.S5
-rw-r--r--arch/blackfin/kernel/fixed_code.S6
-rw-r--r--arch/blackfin/kernel/ipipe.c428
-rw-r--r--arch/blackfin/kernel/irqchip.c46
-rw-r--r--arch/blackfin/kernel/kgdb.c125
-rw-r--r--arch/blackfin/kernel/kgdb_test.c123
-rw-r--r--arch/blackfin/kernel/mcount.S70
-rw-r--r--arch/blackfin/kernel/module.c152
-rw-r--r--arch/blackfin/kernel/process.c32
-rw-r--r--arch/blackfin/kernel/ptrace.c17
-rw-r--r--arch/blackfin/kernel/reboot.c24
-rw-r--r--arch/blackfin/kernel/setup.c218
-rw-r--r--arch/blackfin/kernel/time.c162
-rw-r--r--arch/blackfin/kernel/traps.c75
-rw-r--r--arch/blackfin/kernel/vmlinux.lds.S9
34 files changed, 2297 insertions, 3018 deletions
diff --git a/arch/blackfin/kernel/Makefile b/arch/blackfin/kernel/Makefile
index 606adc78aa85..38a233374f07 100644
--- a/arch/blackfin/kernel/Makefile
+++ b/arch/blackfin/kernel/Makefile
@@ -7,7 +7,7 @@ extra-y := init_task.o vmlinux.lds
7obj-y := \ 7obj-y := \
8 entry.o process.o bfin_ksyms.o ptrace.o setup.o signal.o \ 8 entry.o process.o bfin_ksyms.o ptrace.o setup.o signal.o \
9 sys_bfin.o traps.o irqchip.o dma-mapping.o flat.o \ 9 sys_bfin.o traps.o irqchip.o dma-mapping.o flat.o \
10 fixed_code.o reboot.o bfin_gpio.o 10 fixed_code.o reboot.o bfin_gpio.o bfin_dma_5xx.o
11 11
12ifeq ($(CONFIG_GENERIC_CLOCKEVENTS),y) 12ifeq ($(CONFIG_GENERIC_CLOCKEVENTS),y)
13 obj-y += time-ts.o 13 obj-y += time-ts.o
@@ -15,8 +15,11 @@ else
15 obj-y += time.o 15 obj-y += time.o
16endif 16endif
17 17
18obj-$(CONFIG_IPIPE) += ipipe.o
19obj-$(CONFIG_IPIPE_TRACE_MCOUNT) += mcount.o
18obj-$(CONFIG_BFIN_GPTIMERS) += gptimers.o 20obj-$(CONFIG_BFIN_GPTIMERS) += gptimers.o
21obj-$(CONFIG_CPLB_INFO) += cplbinfo.o
19obj-$(CONFIG_MODULES) += module.o 22obj-$(CONFIG_MODULES) += module.o
20obj-$(CONFIG_BFIN_DMA_5XX) += bfin_dma_5xx.o
21obj-$(CONFIG_KGDB) += kgdb.o 23obj-$(CONFIG_KGDB) += kgdb.o
24obj-$(CONFIG_KGDB_TESTCASE) += kgdb_test.o
22obj-$(CONFIG_EARLY_PRINTK) += early_printk.o 25obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
diff --git a/arch/blackfin/kernel/asm-offsets.c b/arch/blackfin/kernel/asm-offsets.c
index 9bb85dd5ccb3..b5df9459d6d5 100644
--- a/arch/blackfin/kernel/asm-offsets.c
+++ b/arch/blackfin/kernel/asm-offsets.c
@@ -56,6 +56,9 @@ int main(void)
56 /* offsets into the thread struct */ 56 /* offsets into the thread struct */
57 DEFINE(THREAD_KSP, offsetof(struct thread_struct, ksp)); 57 DEFINE(THREAD_KSP, offsetof(struct thread_struct, ksp));
58 DEFINE(THREAD_USP, offsetof(struct thread_struct, usp)); 58 DEFINE(THREAD_USP, offsetof(struct thread_struct, usp));
59 DEFINE(THREAD_SR, offsetof(struct thread_struct, seqstat));
60 DEFINE(PT_SR, offsetof(struct thread_struct, seqstat));
61 DEFINE(THREAD_ESP0, offsetof(struct thread_struct, esp0));
59 DEFINE(THREAD_PC, offsetof(struct thread_struct, pc)); 62 DEFINE(THREAD_PC, offsetof(struct thread_struct, pc));
60 DEFINE(KERNEL_STACK_SIZE, THREAD_SIZE); 63 DEFINE(KERNEL_STACK_SIZE, THREAD_SIZE);
61 64
@@ -128,5 +131,31 @@ int main(void)
128 DEFINE(SIGSEGV, SIGSEGV); 131 DEFINE(SIGSEGV, SIGSEGV);
129 DEFINE(SIGTRAP, SIGTRAP); 132 DEFINE(SIGTRAP, SIGTRAP);
130 133
134 /* PDA management (in L1 scratchpad) */
135 DEFINE(PDA_SYSCFG, offsetof(struct blackfin_pda, syscfg));
136#ifdef CONFIG_SMP
137 DEFINE(PDA_IRQFLAGS, offsetof(struct blackfin_pda, imask));
138#endif
139 DEFINE(PDA_IPDT, offsetof(struct blackfin_pda, ipdt));
140 DEFINE(PDA_IPDT_SWAPCOUNT, offsetof(struct blackfin_pda, ipdt_swapcount));
141 DEFINE(PDA_DPDT, offsetof(struct blackfin_pda, dpdt));
142 DEFINE(PDA_DPDT_SWAPCOUNT, offsetof(struct blackfin_pda, dpdt_swapcount));
143 DEFINE(PDA_EXIPTR, offsetof(struct blackfin_pda, ex_iptr));
144 DEFINE(PDA_EXOPTR, offsetof(struct blackfin_pda, ex_optr));
145 DEFINE(PDA_EXBUF, offsetof(struct blackfin_pda, ex_buf));
146 DEFINE(PDA_EXIMASK, offsetof(struct blackfin_pda, ex_imask));
147 DEFINE(PDA_EXSTACK, offsetof(struct blackfin_pda, ex_stack));
148#ifdef ANOMALY_05000261
149 DEFINE(PDA_LFRETX, offsetof(struct blackfin_pda, last_cplb_fault_retx));
150#endif
151 DEFINE(PDA_DCPLB, offsetof(struct blackfin_pda, dcplb_fault_addr));
152 DEFINE(PDA_ICPLB, offsetof(struct blackfin_pda, icplb_fault_addr));
153 DEFINE(PDA_RETX, offsetof(struct blackfin_pda, retx));
154 DEFINE(PDA_SEQSTAT, offsetof(struct blackfin_pda, seqstat));
155#ifdef CONFIG_SMP
156 /* Inter-core lock (in L2 SRAM) */
157 DEFINE(SIZEOF_CORELOCK, sizeof(struct corelock_slot));
158#endif
159
131 return 0; 160 return 0;
132} 161}
diff --git a/arch/blackfin/kernel/bfin_dma_5xx.c b/arch/blackfin/kernel/bfin_dma_5xx.c
index 339293d677cc..07e02c0d1c07 100644
--- a/arch/blackfin/kernel/bfin_dma_5xx.c
+++ b/arch/blackfin/kernel/bfin_dma_5xx.c
@@ -1,63 +1,27 @@
1/* 1/*
2 * File: arch/blackfin/kernel/bfin_dma_5xx.c 2 * bfin_dma_5xx.c - Blackfin DMA implementation
3 * Based on:
4 * Author:
5 * 3 *
6 * Created: 4 * Copyright 2004-2008 Analog Devices Inc.
7 * Description: This file contains the simple DMA Implementation for Blackfin 5 * Licensed under the GPL-2 or later.
8 *
9 * Modified:
10 * Copyright 2004-2006 Analog Devices Inc.
11 *
12 * Bugs: Enter bugs at http://blackfin.uclinux.org/
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or
17 * (at your option) any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, see the file COPYING, or write
26 * to the Free Software Foundation, Inc.,
27 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
28 */ 6 */
29 7
30#include <linux/errno.h> 8#include <linux/errno.h>
31#include <linux/module.h>
32#include <linux/sched.h>
33#include <linux/interrupt.h> 9#include <linux/interrupt.h>
34#include <linux/kernel.h> 10#include <linux/kernel.h>
11#include <linux/module.h>
35#include <linux/param.h> 12#include <linux/param.h>
13#include <linux/proc_fs.h>
14#include <linux/sched.h>
15#include <linux/seq_file.h>
16#include <linux/spinlock.h>
36 17
37#include <asm/blackfin.h> 18#include <asm/blackfin.h>
38#include <asm/dma.h>
39#include <asm/cacheflush.h> 19#include <asm/cacheflush.h>
20#include <asm/dma.h>
21#include <asm/uaccess.h>
40 22
41/* Remove unused code not exported by symbol or internally called */ 23struct dma_channel dma_ch[MAX_DMA_CHANNELS];
42#define REMOVE_DEAD_CODE 24EXPORT_SYMBOL(dma_ch);
43
44/**************************************************************************
45 * Global Variables
46***************************************************************************/
47
48static struct dma_channel dma_ch[MAX_BLACKFIN_DMA_CHANNEL];
49
50/*------------------------------------------------------------------------------
51 * Set the Buffer Clear bit in the Configuration register of specific DMA
52 * channel. This will stop the descriptor based DMA operation.
53 *-----------------------------------------------------------------------------*/
54static void clear_dma_buffer(unsigned int channel)
55{
56 dma_ch[channel].regs->cfg |= RESTART;
57 SSYNC();
58 dma_ch[channel].regs->cfg &= ~RESTART;
59 SSYNC();
60}
61 25
62static int __init blackfin_dma_init(void) 26static int __init blackfin_dma_init(void)
63{ 27{
@@ -65,32 +29,67 @@ static int __init blackfin_dma_init(void)
65 29
66 printk(KERN_INFO "Blackfin DMA Controller\n"); 30 printk(KERN_INFO "Blackfin DMA Controller\n");
67 31
68 for (i = 0; i < MAX_BLACKFIN_DMA_CHANNEL; i++) { 32 for (i = 0; i < MAX_DMA_CHANNELS; i++) {
69 dma_ch[i].chan_status = DMA_CHANNEL_FREE; 33 dma_ch[i].chan_status = DMA_CHANNEL_FREE;
70 dma_ch[i].regs = dma_io_base_addr[i]; 34 dma_ch[i].regs = dma_io_base_addr[i];
71 mutex_init(&(dma_ch[i].dmalock)); 35 mutex_init(&(dma_ch[i].dmalock));
72 } 36 }
73 /* Mark MEMDMA Channel 0 as requested since we're using it internally */ 37 /* Mark MEMDMA Channel 0 as requested since we're using it internally */
74 dma_ch[CH_MEM_STREAM0_DEST].chan_status = DMA_CHANNEL_REQUESTED; 38 request_dma(CH_MEM_STREAM0_DEST, "Blackfin dma_memcpy");
75 dma_ch[CH_MEM_STREAM0_SRC].chan_status = DMA_CHANNEL_REQUESTED; 39 request_dma(CH_MEM_STREAM0_SRC, "Blackfin dma_memcpy");
76 40
77#if defined(CONFIG_DEB_DMA_URGENT) 41#if defined(CONFIG_DEB_DMA_URGENT)
78 bfin_write_EBIU_DDRQUE(bfin_read_EBIU_DDRQUE() 42 bfin_write_EBIU_DDRQUE(bfin_read_EBIU_DDRQUE()
79 | DEB1_URGENT | DEB2_URGENT | DEB3_URGENT); 43 | DEB1_URGENT | DEB2_URGENT | DEB3_URGENT);
80#endif 44#endif
45
81 return 0; 46 return 0;
82} 47}
83
84arch_initcall(blackfin_dma_init); 48arch_initcall(blackfin_dma_init);
85 49
86/*------------------------------------------------------------------------------ 50#ifdef CONFIG_PROC_FS
87 * Request the specific DMA channel from the system. 51static int proc_dma_show(struct seq_file *m, void *v)
88 *-----------------------------------------------------------------------------*/
89int request_dma(unsigned int channel, char *device_id)
90{ 52{
53 int i;
54
55 for (i = 0; i < MAX_DMA_CHANNELS; ++i)
56 if (dma_ch[i].chan_status != DMA_CHANNEL_FREE)
57 seq_printf(m, "%2d: %s\n", i, dma_ch[i].device_id);
58
59 return 0;
60}
91 61
62static int proc_dma_open(struct inode *inode, struct file *file)
63{
64 return single_open(file, proc_dma_show, NULL);
65}
66
67static const struct file_operations proc_dma_operations = {
68 .open = proc_dma_open,
69 .read = seq_read,
70 .llseek = seq_lseek,
71 .release = single_release,
72};
73
74static int __init proc_dma_init(void)
75{
76 return proc_create("dma", 0, NULL, &proc_dma_operations) != NULL;
77}
78late_initcall(proc_dma_init);
79#endif
80
81/**
82 * request_dma - request a DMA channel
83 *
84 * Request the specific DMA channel from the system if it's available.
85 */
86int request_dma(unsigned int channel, const char *device_id)
87{
92 pr_debug("request_dma() : BEGIN \n"); 88 pr_debug("request_dma() : BEGIN \n");
93 89
90 if (device_id == NULL)
91 printk(KERN_WARNING "request_dma(%u): no device_id given\n", channel);
92
94#if defined(CONFIG_BF561) && ANOMALY_05000182 93#if defined(CONFIG_BF561) && ANOMALY_05000182
95 if (channel >= CH_IMEM_STREAM0_DEST && channel <= CH_IMEM_STREAM1_DEST) { 94 if (channel >= CH_IMEM_STREAM0_DEST && channel <= CH_IMEM_STREAM1_DEST) {
96 if (get_cclk() > 500000000) { 95 if (get_cclk() > 500000000) {
@@ -129,60 +128,63 @@ int request_dma(unsigned int channel, char *device_id)
129#endif 128#endif
130 129
131 dma_ch[channel].device_id = device_id; 130 dma_ch[channel].device_id = device_id;
132 dma_ch[channel].irq_callback = NULL; 131 dma_ch[channel].irq = 0;
133 132
134 /* This is to be enabled by putting a restriction - 133 /* This is to be enabled by putting a restriction -
135 * you have to request DMA, before doing any operations on 134 * you have to request DMA, before doing any operations on
136 * descriptor/channel 135 * descriptor/channel
137 */ 136 */
138 pr_debug("request_dma() : END \n"); 137 pr_debug("request_dma() : END \n");
139 return channel; 138 return 0;
140} 139}
141EXPORT_SYMBOL(request_dma); 140EXPORT_SYMBOL(request_dma);
142 141
143int set_dma_callback(unsigned int channel, dma_interrupt_t callback, void *data) 142int set_dma_callback(unsigned int channel, irq_handler_t callback, void *data)
144{ 143{
145 int ret_irq = 0;
146
147 BUG_ON(!(dma_ch[channel].chan_status != DMA_CHANNEL_FREE 144 BUG_ON(!(dma_ch[channel].chan_status != DMA_CHANNEL_FREE
148 && channel < MAX_BLACKFIN_DMA_CHANNEL)); 145 && channel < MAX_DMA_CHANNELS));
149 146
150 if (callback != NULL) { 147 if (callback != NULL) {
151 int ret_val; 148 int ret;
152 ret_irq = channel2irq(channel); 149 unsigned int irq = channel2irq(channel);
153 150
154 dma_ch[channel].data = data; 151 ret = request_irq(irq, callback, IRQF_DISABLED,
152 dma_ch[channel].device_id, data);
153 if (ret)
154 return ret;
155 155
156 ret_val = 156 dma_ch[channel].irq = irq;
157 request_irq(ret_irq, (void *)callback, IRQF_DISABLED, 157 dma_ch[channel].data = data;
158 dma_ch[channel].device_id, data);
159 if (ret_val) {
160 printk(KERN_NOTICE
161 "Request irq in DMA engine failed.\n");
162 return -EPERM;
163 }
164 dma_ch[channel].irq_callback = callback;
165 } 158 }
166 return 0; 159 return 0;
167} 160}
168EXPORT_SYMBOL(set_dma_callback); 161EXPORT_SYMBOL(set_dma_callback);
169 162
170void free_dma(unsigned int channel) 163/**
164 * clear_dma_buffer - clear DMA fifos for specified channel
165 *
166 * Set the Buffer Clear bit in the Configuration register of specific DMA
167 * channel. This will stop the descriptor based DMA operation.
168 */
169static void clear_dma_buffer(unsigned int channel)
171{ 170{
172 int ret_irq; 171 dma_ch[channel].regs->cfg |= RESTART;
172 SSYNC();
173 dma_ch[channel].regs->cfg &= ~RESTART;
174}
173 175
176void free_dma(unsigned int channel)
177{
174 pr_debug("freedma() : BEGIN \n"); 178 pr_debug("freedma() : BEGIN \n");
175 BUG_ON(!(dma_ch[channel].chan_status != DMA_CHANNEL_FREE 179 BUG_ON(!(dma_ch[channel].chan_status != DMA_CHANNEL_FREE
176 && channel < MAX_BLACKFIN_DMA_CHANNEL)); 180 && channel < MAX_DMA_CHANNELS));
177 181
178 /* Halt the DMA */ 182 /* Halt the DMA */
179 disable_dma(channel); 183 disable_dma(channel);
180 clear_dma_buffer(channel); 184 clear_dma_buffer(channel);
181 185
182 if (dma_ch[channel].irq_callback != NULL) { 186 if (dma_ch[channel].irq)
183 ret_irq = channel2irq(channel); 187 free_irq(dma_ch[channel].irq, dma_ch[channel].data);
184 free_irq(ret_irq, dma_ch[channel].data);
185 }
186 188
187 /* Clear the DMA Variable in the Channel */ 189 /* Clear the DMA Variable in the Channel */
188 mutex_lock(&(dma_ch[channel].dmalock)); 190 mutex_lock(&(dma_ch[channel].dmalock));
@@ -193,294 +195,15 @@ void free_dma(unsigned int channel)
193} 195}
194EXPORT_SYMBOL(free_dma); 196EXPORT_SYMBOL(free_dma);
195 197
196void dma_enable_irq(unsigned int channel)
197{
198 int ret_irq;
199
200 pr_debug("dma_enable_irq() : BEGIN \n");
201 BUG_ON(!(dma_ch[channel].chan_status != DMA_CHANNEL_FREE
202 && channel < MAX_BLACKFIN_DMA_CHANNEL));
203
204 ret_irq = channel2irq(channel);
205 enable_irq(ret_irq);
206}
207EXPORT_SYMBOL(dma_enable_irq);
208
209void dma_disable_irq(unsigned int channel)
210{
211 int ret_irq;
212
213 pr_debug("dma_disable_irq() : BEGIN \n");
214 BUG_ON(!(dma_ch[channel].chan_status != DMA_CHANNEL_FREE
215 && channel < MAX_BLACKFIN_DMA_CHANNEL));
216
217 ret_irq = channel2irq(channel);
218 disable_irq(ret_irq);
219}
220EXPORT_SYMBOL(dma_disable_irq);
221
222int dma_channel_active(unsigned int channel)
223{
224 if (dma_ch[channel].chan_status == DMA_CHANNEL_FREE) {
225 return 0;
226 } else {
227 return 1;
228 }
229}
230EXPORT_SYMBOL(dma_channel_active);
231
232/*------------------------------------------------------------------------------
233* stop the specific DMA channel.
234*-----------------------------------------------------------------------------*/
235void disable_dma(unsigned int channel)
236{
237 pr_debug("stop_dma() : BEGIN \n");
238
239 BUG_ON(!(dma_ch[channel].chan_status != DMA_CHANNEL_FREE
240 && channel < MAX_BLACKFIN_DMA_CHANNEL));
241
242 dma_ch[channel].regs->cfg &= ~DMAEN; /* Clean the enable bit */
243 SSYNC();
244 dma_ch[channel].chan_status = DMA_CHANNEL_REQUESTED;
245 /* Needs to be enabled Later */
246 pr_debug("stop_dma() : END \n");
247 return;
248}
249EXPORT_SYMBOL(disable_dma);
250
251void enable_dma(unsigned int channel)
252{
253 pr_debug("enable_dma() : BEGIN \n");
254
255 BUG_ON(!(dma_ch[channel].chan_status != DMA_CHANNEL_FREE
256 && channel < MAX_BLACKFIN_DMA_CHANNEL));
257
258 dma_ch[channel].chan_status = DMA_CHANNEL_ENABLED;
259 dma_ch[channel].regs->curr_x_count = 0;
260 dma_ch[channel].regs->curr_y_count = 0;
261
262 dma_ch[channel].regs->cfg |= DMAEN; /* Set the enable bit */
263 SSYNC();
264 pr_debug("enable_dma() : END \n");
265 return;
266}
267EXPORT_SYMBOL(enable_dma);
268
269/*------------------------------------------------------------------------------
270* Set the Start Address register for the specific DMA channel
271* This function can be used for register based DMA,
272* to setup the start address
273* addr: Starting address of the DMA Data to be transferred.
274*-----------------------------------------------------------------------------*/
275void set_dma_start_addr(unsigned int channel, unsigned long addr)
276{
277 pr_debug("set_dma_start_addr() : BEGIN \n");
278
279 BUG_ON(!(dma_ch[channel].chan_status != DMA_CHANNEL_FREE
280 && channel < MAX_BLACKFIN_DMA_CHANNEL));
281
282 dma_ch[channel].regs->start_addr = addr;
283 SSYNC();
284 pr_debug("set_dma_start_addr() : END\n");
285}
286EXPORT_SYMBOL(set_dma_start_addr);
287
288void set_dma_next_desc_addr(unsigned int channel, unsigned long addr)
289{
290 pr_debug("set_dma_next_desc_addr() : BEGIN \n");
291
292 BUG_ON(!(dma_ch[channel].chan_status != DMA_CHANNEL_FREE
293 && channel < MAX_BLACKFIN_DMA_CHANNEL));
294
295 dma_ch[channel].regs->next_desc_ptr = addr;
296 SSYNC();
297 pr_debug("set_dma_next_desc_addr() : END\n");
298}
299EXPORT_SYMBOL(set_dma_next_desc_addr);
300
301void set_dma_curr_desc_addr(unsigned int channel, unsigned long addr)
302{
303 pr_debug("set_dma_curr_desc_addr() : BEGIN \n");
304
305 BUG_ON(!(dma_ch[channel].chan_status != DMA_CHANNEL_FREE
306 && channel < MAX_BLACKFIN_DMA_CHANNEL));
307
308 dma_ch[channel].regs->curr_desc_ptr = addr;
309 SSYNC();
310 pr_debug("set_dma_curr_desc_addr() : END\n");
311}
312EXPORT_SYMBOL(set_dma_curr_desc_addr);
313
314void set_dma_x_count(unsigned int channel, unsigned short x_count)
315{
316 BUG_ON(!(dma_ch[channel].chan_status != DMA_CHANNEL_FREE
317 && channel < MAX_BLACKFIN_DMA_CHANNEL));
318
319 dma_ch[channel].regs->x_count = x_count;
320 SSYNC();
321}
322EXPORT_SYMBOL(set_dma_x_count);
323
324void set_dma_y_count(unsigned int channel, unsigned short y_count)
325{
326 BUG_ON(!(dma_ch[channel].chan_status != DMA_CHANNEL_FREE
327 && channel < MAX_BLACKFIN_DMA_CHANNEL));
328
329 dma_ch[channel].regs->y_count = y_count;
330 SSYNC();
331}
332EXPORT_SYMBOL(set_dma_y_count);
333
334void set_dma_x_modify(unsigned int channel, short x_modify)
335{
336 BUG_ON(!(dma_ch[channel].chan_status != DMA_CHANNEL_FREE
337 && channel < MAX_BLACKFIN_DMA_CHANNEL));
338
339 dma_ch[channel].regs->x_modify = x_modify;
340 SSYNC();
341}
342EXPORT_SYMBOL(set_dma_x_modify);
343
344void set_dma_y_modify(unsigned int channel, short y_modify)
345{
346 BUG_ON(!(dma_ch[channel].chan_status != DMA_CHANNEL_FREE
347 && channel < MAX_BLACKFIN_DMA_CHANNEL));
348
349 dma_ch[channel].regs->y_modify = y_modify;
350 SSYNC();
351}
352EXPORT_SYMBOL(set_dma_y_modify);
353
354void set_dma_config(unsigned int channel, unsigned short config)
355{
356 BUG_ON(!(dma_ch[channel].chan_status != DMA_CHANNEL_FREE
357 && channel < MAX_BLACKFIN_DMA_CHANNEL));
358
359 dma_ch[channel].regs->cfg = config;
360 SSYNC();
361}
362EXPORT_SYMBOL(set_dma_config);
363
364unsigned short
365set_bfin_dma_config(char direction, char flow_mode,
366 char intr_mode, char dma_mode, char width, char syncmode)
367{
368 unsigned short config;
369
370 config =
371 ((direction << 1) | (width << 2) | (dma_mode << 4) |
372 (intr_mode << 6) | (flow_mode << 12) | (syncmode << 5));
373 return config;
374}
375EXPORT_SYMBOL(set_bfin_dma_config);
376
377void set_dma_sg(unsigned int channel, struct dmasg *sg, int nr_sg)
378{
379 BUG_ON(!(dma_ch[channel].chan_status != DMA_CHANNEL_FREE
380 && channel < MAX_BLACKFIN_DMA_CHANNEL));
381
382 dma_ch[channel].regs->cfg |= ((nr_sg & 0x0F) << 8);
383
384 dma_ch[channel].regs->next_desc_ptr = (unsigned int)sg;
385
386 SSYNC();
387}
388EXPORT_SYMBOL(set_dma_sg);
389
390void set_dma_curr_addr(unsigned int channel, unsigned long addr)
391{
392 BUG_ON(!(dma_ch[channel].chan_status != DMA_CHANNEL_FREE
393 && channel < MAX_BLACKFIN_DMA_CHANNEL));
394
395 dma_ch[channel].regs->curr_addr_ptr = addr;
396 SSYNC();
397}
398EXPORT_SYMBOL(set_dma_curr_addr);
399
400/*------------------------------------------------------------------------------
401 * Get the DMA status of a specific DMA channel from the system.
402 *-----------------------------------------------------------------------------*/
403unsigned short get_dma_curr_irqstat(unsigned int channel)
404{
405 BUG_ON(!(dma_ch[channel].chan_status != DMA_CHANNEL_FREE
406 && channel < MAX_BLACKFIN_DMA_CHANNEL));
407
408 return dma_ch[channel].regs->irq_status;
409}
410EXPORT_SYMBOL(get_dma_curr_irqstat);
411
412/*------------------------------------------------------------------------------
413 * Clear the DMA_DONE bit in DMA status. Stop the DMA completion interrupt.
414 *-----------------------------------------------------------------------------*/
415void clear_dma_irqstat(unsigned int channel)
416{
417 BUG_ON(!(dma_ch[channel].chan_status != DMA_CHANNEL_FREE
418 && channel < MAX_BLACKFIN_DMA_CHANNEL));
419 dma_ch[channel].regs->irq_status |= 3;
420}
421EXPORT_SYMBOL(clear_dma_irqstat);
422
423/*------------------------------------------------------------------------------
424 * Get current DMA xcount of a specific DMA channel from the system.
425 *-----------------------------------------------------------------------------*/
426unsigned short get_dma_curr_xcount(unsigned int channel)
427{
428 BUG_ON(!(dma_ch[channel].chan_status != DMA_CHANNEL_FREE
429 && channel < MAX_BLACKFIN_DMA_CHANNEL));
430
431 return dma_ch[channel].regs->curr_x_count;
432}
433EXPORT_SYMBOL(get_dma_curr_xcount);
434
435/*------------------------------------------------------------------------------
436 * Get current DMA ycount of a specific DMA channel from the system.
437 *-----------------------------------------------------------------------------*/
438unsigned short get_dma_curr_ycount(unsigned int channel)
439{
440 BUG_ON(!(dma_ch[channel].chan_status != DMA_CHANNEL_FREE
441 && channel < MAX_BLACKFIN_DMA_CHANNEL));
442
443 return dma_ch[channel].regs->curr_y_count;
444}
445EXPORT_SYMBOL(get_dma_curr_ycount);
446
447unsigned long get_dma_next_desc_ptr(unsigned int channel)
448{
449 BUG_ON(!(dma_ch[channel].chan_status != DMA_CHANNEL_FREE
450 && channel < MAX_BLACKFIN_DMA_CHANNEL));
451
452 return dma_ch[channel].regs->next_desc_ptr;
453}
454EXPORT_SYMBOL(get_dma_next_desc_ptr);
455
456unsigned long get_dma_curr_desc_ptr(unsigned int channel)
457{
458 BUG_ON(!(dma_ch[channel].chan_status != DMA_CHANNEL_FREE
459 && channel < MAX_BLACKFIN_DMA_CHANNEL));
460
461 return dma_ch[channel].regs->curr_desc_ptr;
462}
463EXPORT_SYMBOL(get_dma_curr_desc_ptr);
464
465unsigned long get_dma_curr_addr(unsigned int channel)
466{
467 BUG_ON(!(dma_ch[channel].chan_status != DMA_CHANNEL_FREE
468 && channel < MAX_BLACKFIN_DMA_CHANNEL));
469
470 return dma_ch[channel].regs->curr_addr_ptr;
471}
472EXPORT_SYMBOL(get_dma_curr_addr);
473
474#ifdef CONFIG_PM 198#ifdef CONFIG_PM
199# ifndef MAX_DMA_SUSPEND_CHANNELS
200# define MAX_DMA_SUSPEND_CHANNELS MAX_DMA_CHANNELS
201# endif
475int blackfin_dma_suspend(void) 202int blackfin_dma_suspend(void)
476{ 203{
477 int i; 204 int i;
478 205
479#ifdef CONFIG_BF561 /* IMDMA channels doesn't have a PERIPHERAL_MAP */ 206 for (i = 0; i < MAX_DMA_SUSPEND_CHANNELS; ++i) {
480 for (i = 0; i <= CH_MEM_STREAM3_SRC; i++) {
481#else
482 for (i = 0; i < MAX_BLACKFIN_DMA_CHANNEL; i++) {
483#endif
484 if (dma_ch[i].chan_status == DMA_CHANNEL_ENABLED) { 207 if (dma_ch[i].chan_status == DMA_CHANNEL_ENABLED) {
485 printk(KERN_ERR "DMA Channel %d failed to suspend\n", i); 208 printk(KERN_ERR "DMA Channel %d failed to suspend\n", i);
486 return -EBUSY; 209 return -EBUSY;
@@ -495,388 +218,201 @@ int blackfin_dma_suspend(void)
495void blackfin_dma_resume(void) 218void blackfin_dma_resume(void)
496{ 219{
497 int i; 220 int i;
498 221 for (i = 0; i < MAX_DMA_SUSPEND_CHANNELS; ++i)
499#ifdef CONFIG_BF561 /* IMDMA channels doesn't have a PERIPHERAL_MAP */
500 for (i = 0; i <= CH_MEM_STREAM3_SRC; i++)
501#else
502 for (i = 0; i < MAX_BLACKFIN_DMA_CHANNEL; i++)
503#endif
504 dma_ch[i].regs->peripheral_map = dma_ch[i].saved_peripheral_map; 222 dma_ch[i].regs->peripheral_map = dma_ch[i].saved_peripheral_map;
505} 223}
506#endif 224#endif
507 225
508static void *__dma_memcpy(void *dest, const void *src, size_t size) 226/**
227 * blackfin_dma_early_init - minimal DMA init
228 *
229 * Setup a few DMA registers so we can safely do DMA transfers early on in
230 * the kernel booting process. Really this just means using dma_memcpy().
231 */
232void __init blackfin_dma_early_init(void)
509{ 233{
510 int direction; /* 1 - address decrease, 0 - address increase */
511 int flag_align; /* 1 - address aligned, 0 - address unaligned */
512 int flag_2D; /* 1 - 2D DMA needed, 0 - 1D DMA needed */
513 unsigned long flags;
514
515 if (size <= 0)
516 return NULL;
517
518 local_irq_save(flags);
519
520 if ((unsigned long)src < memory_end)
521 blackfin_dcache_flush_range((unsigned int)src,
522 (unsigned int)(src + size));
523
524 if ((unsigned long)dest < memory_end)
525 blackfin_dcache_invalidate_range((unsigned int)dest,
526 (unsigned int)(dest + size));
527
528 bfin_write_MDMA_D0_IRQ_STATUS(DMA_DONE | DMA_ERR);
529
530 if ((unsigned long)src < (unsigned long)dest)
531 direction = 1;
532 else
533 direction = 0;
534
535 if ((((unsigned long)dest % 2) == 0) && (((unsigned long)src % 2) == 0)
536 && ((size % 2) == 0))
537 flag_align = 1;
538 else
539 flag_align = 0;
540
541 if (size > 0x10000) /* size > 64K */
542 flag_2D = 1;
543 else
544 flag_2D = 0;
545
546 /* Setup destination and source start address */
547 if (direction) {
548 if (flag_align) {
549 bfin_write_MDMA_D0_START_ADDR(dest + size - 2);
550 bfin_write_MDMA_S0_START_ADDR(src + size - 2);
551 } else {
552 bfin_write_MDMA_D0_START_ADDR(dest + size - 1);
553 bfin_write_MDMA_S0_START_ADDR(src + size - 1);
554 }
555 } else {
556 bfin_write_MDMA_D0_START_ADDR(dest);
557 bfin_write_MDMA_S0_START_ADDR(src);
558 }
559
560 /* Setup destination and source xcount */
561 if (flag_2D) {
562 if (flag_align) {
563 bfin_write_MDMA_D0_X_COUNT(1024 / 2);
564 bfin_write_MDMA_S0_X_COUNT(1024 / 2);
565 } else {
566 bfin_write_MDMA_D0_X_COUNT(1024);
567 bfin_write_MDMA_S0_X_COUNT(1024);
568 }
569 bfin_write_MDMA_D0_Y_COUNT(size >> 10);
570 bfin_write_MDMA_S0_Y_COUNT(size >> 10);
571 } else {
572 if (flag_align) {
573 bfin_write_MDMA_D0_X_COUNT(size / 2);
574 bfin_write_MDMA_S0_X_COUNT(size / 2);
575 } else {
576 bfin_write_MDMA_D0_X_COUNT(size);
577 bfin_write_MDMA_S0_X_COUNT(size);
578 }
579 }
580
581 /* Setup destination and source xmodify and ymodify */
582 if (direction) {
583 if (flag_align) {
584 bfin_write_MDMA_D0_X_MODIFY(-2);
585 bfin_write_MDMA_S0_X_MODIFY(-2);
586 if (flag_2D) {
587 bfin_write_MDMA_D0_Y_MODIFY(-2);
588 bfin_write_MDMA_S0_Y_MODIFY(-2);
589 }
590 } else {
591 bfin_write_MDMA_D0_X_MODIFY(-1);
592 bfin_write_MDMA_S0_X_MODIFY(-1);
593 if (flag_2D) {
594 bfin_write_MDMA_D0_Y_MODIFY(-1);
595 bfin_write_MDMA_S0_Y_MODIFY(-1);
596 }
597 }
598 } else {
599 if (flag_align) {
600 bfin_write_MDMA_D0_X_MODIFY(2);
601 bfin_write_MDMA_S0_X_MODIFY(2);
602 if (flag_2D) {
603 bfin_write_MDMA_D0_Y_MODIFY(2);
604 bfin_write_MDMA_S0_Y_MODIFY(2);
605 }
606 } else {
607 bfin_write_MDMA_D0_X_MODIFY(1);
608 bfin_write_MDMA_S0_X_MODIFY(1);
609 if (flag_2D) {
610 bfin_write_MDMA_D0_Y_MODIFY(1);
611 bfin_write_MDMA_S0_Y_MODIFY(1);
612 }
613 }
614 }
615
616 /* Enable source DMA */
617 if (flag_2D) {
618 if (flag_align) {
619 bfin_write_MDMA_S0_CONFIG(DMAEN | DMA2D | WDSIZE_16);
620 bfin_write_MDMA_D0_CONFIG(WNR | DI_EN | DMAEN | DMA2D | WDSIZE_16);
621 } else {
622 bfin_write_MDMA_S0_CONFIG(DMAEN | DMA2D);
623 bfin_write_MDMA_D0_CONFIG(WNR | DI_EN | DMAEN | DMA2D);
624 }
625 } else {
626 if (flag_align) {
627 bfin_write_MDMA_S0_CONFIG(DMAEN | WDSIZE_16);
628 bfin_write_MDMA_D0_CONFIG(WNR | DI_EN | DMAEN | WDSIZE_16);
629 } else {
630 bfin_write_MDMA_S0_CONFIG(DMAEN);
631 bfin_write_MDMA_D0_CONFIG(WNR | DI_EN | DMAEN);
632 }
633 }
634
635 SSYNC();
636
637 while (!(bfin_read_MDMA_D0_IRQ_STATUS() & DMA_DONE))
638 ;
639
640 bfin_write_MDMA_D0_IRQ_STATUS(bfin_read_MDMA_D0_IRQ_STATUS() |
641 (DMA_DONE | DMA_ERR));
642
643 bfin_write_MDMA_S0_CONFIG(0); 234 bfin_write_MDMA_S0_CONFIG(0);
644 bfin_write_MDMA_D0_CONFIG(0);
645
646 local_irq_restore(flags);
647
648 return dest;
649} 235}
650 236
651void *dma_memcpy(void *dest, const void *src, size_t size) 237/**
652{ 238 * __dma_memcpy - program the MDMA registers
653 size_t bulk; 239 *
654 size_t rest; 240 * Actually program MDMA0 and wait for the transfer to finish. Disable IRQs
655 void * addr; 241 * while programming registers so that everything is fully configured. Wait
656 242 * for DMA to finish with IRQs enabled. If interrupted, the initial DMA_DONE
657 bulk = (size >> 16) << 16; 243 * check will make sure we don't clobber any existing transfer.
658 rest = size - bulk; 244 */
659 if (bulk) 245static void __dma_memcpy(u32 daddr, s16 dmod, u32 saddr, s16 smod, size_t cnt, u32 conf)
660 __dma_memcpy(dest, src, bulk);
661 addr = __dma_memcpy(dest+bulk, src+bulk, rest);
662 return addr;
663}
664EXPORT_SYMBOL(dma_memcpy);
665
666void *safe_dma_memcpy(void *dest, const void *src, size_t size)
667{
668 void *addr;
669 addr = dma_memcpy(dest, src, size);
670 return addr;
671}
672EXPORT_SYMBOL(safe_dma_memcpy);
673
674void dma_outsb(unsigned long addr, const void *buf, unsigned short len)
675{ 246{
247 static DEFINE_SPINLOCK(mdma_lock);
676 unsigned long flags; 248 unsigned long flags;
677 249
678 local_irq_save(flags); 250 spin_lock_irqsave(&mdma_lock, flags);
679 251
680 blackfin_dcache_flush_range((unsigned int)buf, 252 if (bfin_read_MDMA_S0_CONFIG())
681 (unsigned int)(buf) + len); 253 while (!(bfin_read_MDMA_D0_IRQ_STATUS() & DMA_DONE))
254 continue;
255
256 if (conf & DMA2D) {
257 /* For larger bit sizes, we've already divided down cnt so it
258 * is no longer a multiple of 64k. So we have to break down
259 * the limit here so it is a multiple of the incoming size.
260 * There is no limitation here in terms of total size other
261 * than the hardware though as the bits lost in the shift are
262 * made up by MODIFY (== we can hit the whole address space).
263 * X: (2^(16 - 0)) * 1 == (2^(16 - 1)) * 2 == (2^(16 - 2)) * 4
264 */
265 u32 shift = abs(dmod) >> 1;
266 size_t ycnt = cnt >> (16 - shift);
267 cnt = 1 << (16 - shift);
268 bfin_write_MDMA_D0_Y_COUNT(ycnt);
269 bfin_write_MDMA_S0_Y_COUNT(ycnt);
270 bfin_write_MDMA_D0_Y_MODIFY(dmod);
271 bfin_write_MDMA_S0_Y_MODIFY(smod);
272 }
682 273
683 bfin_write_MDMA_D0_START_ADDR(addr); 274 bfin_write_MDMA_D0_START_ADDR(daddr);
684 bfin_write_MDMA_D0_X_COUNT(len); 275 bfin_write_MDMA_D0_X_COUNT(cnt);
685 bfin_write_MDMA_D0_X_MODIFY(0); 276 bfin_write_MDMA_D0_X_MODIFY(dmod);
686 bfin_write_MDMA_D0_IRQ_STATUS(DMA_DONE | DMA_ERR); 277 bfin_write_MDMA_D0_IRQ_STATUS(DMA_DONE | DMA_ERR);
687 278
688 bfin_write_MDMA_S0_START_ADDR(buf); 279 bfin_write_MDMA_S0_START_ADDR(saddr);
689 bfin_write_MDMA_S0_X_COUNT(len); 280 bfin_write_MDMA_S0_X_COUNT(cnt);
690 bfin_write_MDMA_S0_X_MODIFY(1); 281 bfin_write_MDMA_S0_X_MODIFY(smod);
691 bfin_write_MDMA_S0_IRQ_STATUS(DMA_DONE | DMA_ERR); 282 bfin_write_MDMA_S0_IRQ_STATUS(DMA_DONE | DMA_ERR);
692 283
693 bfin_write_MDMA_S0_CONFIG(DMAEN | WDSIZE_8); 284 bfin_write_MDMA_S0_CONFIG(DMAEN | conf);
694 bfin_write_MDMA_D0_CONFIG(WNR | DI_EN | DMAEN | WDSIZE_8); 285 bfin_write_MDMA_D0_CONFIG(WNR | DI_EN | DMAEN | conf);
286
287 spin_unlock_irqrestore(&mdma_lock, flags);
695 288
696 SSYNC(); 289 SSYNC();
697 290
698 while (!(bfin_read_MDMA_D0_IRQ_STATUS() & DMA_DONE)); 291 while (!(bfin_read_MDMA_D0_IRQ_STATUS() & DMA_DONE))
292 if (bfin_read_MDMA_S0_CONFIG())
293 continue;
294 else
295 return;
699 296
700 bfin_write_MDMA_D0_IRQ_STATUS(DMA_DONE | DMA_ERR); 297 bfin_write_MDMA_D0_IRQ_STATUS(DMA_DONE | DMA_ERR);
701 298
702 bfin_write_MDMA_S0_CONFIG(0); 299 bfin_write_MDMA_S0_CONFIG(0);
703 bfin_write_MDMA_D0_CONFIG(0); 300 bfin_write_MDMA_D0_CONFIG(0);
704 local_irq_restore(flags);
705
706} 301}
707EXPORT_SYMBOL(dma_outsb);
708
709 302
710void dma_insb(unsigned long addr, void *buf, unsigned short len) 303/**
304 * _dma_memcpy - translate C memcpy settings into MDMA settings
305 *
306 * Handle all the high level steps before we touch the MDMA registers. So
307 * handle direction, tweaking of sizes, and formatting of addresses.
308 */
309static void *_dma_memcpy(void *pdst, const void *psrc, size_t size)
711{ 310{
712 unsigned long flags; 311 u32 conf, shift;
713 312 s16 mod;
714 blackfin_dcache_invalidate_range((unsigned int)buf, 313 unsigned long dst = (unsigned long)pdst;
715 (unsigned int)(buf) + len); 314 unsigned long src = (unsigned long)psrc;
716
717 local_irq_save(flags);
718 bfin_write_MDMA_D0_START_ADDR(buf);
719 bfin_write_MDMA_D0_X_COUNT(len);
720 bfin_write_MDMA_D0_X_MODIFY(1);
721 bfin_write_MDMA_D0_IRQ_STATUS(DMA_DONE | DMA_ERR);
722
723 bfin_write_MDMA_S0_START_ADDR(addr);
724 bfin_write_MDMA_S0_X_COUNT(len);
725 bfin_write_MDMA_S0_X_MODIFY(0);
726 bfin_write_MDMA_S0_IRQ_STATUS(DMA_DONE | DMA_ERR);
727 315
728 bfin_write_MDMA_S0_CONFIG(DMAEN | WDSIZE_8); 316 if (size == 0)
729 bfin_write_MDMA_D0_CONFIG(WNR | DI_EN | DMAEN | WDSIZE_8); 317 return NULL;
730 318
731 SSYNC(); 319 if (dst % 4 == 0 && src % 4 == 0 && size % 4 == 0) {
320 conf = WDSIZE_32;
321 shift = 2;
322 } else if (dst % 2 == 0 && src % 2 == 0 && size % 2 == 0) {
323 conf = WDSIZE_16;
324 shift = 1;
325 } else {
326 conf = WDSIZE_8;
327 shift = 0;
328 }
732 329
733 while (!(bfin_read_MDMA_D0_IRQ_STATUS() & DMA_DONE)); 330 /* If the two memory regions have a chance of overlapping, make
331 * sure the memcpy still works as expected. Do this by having the
332 * copy run backwards instead.
333 */
334 mod = 1 << shift;
335 if (src < dst) {
336 mod *= -1;
337 dst += size + mod;
338 src += size + mod;
339 }
340 size >>= shift;
734 341
735 bfin_write_MDMA_D0_IRQ_STATUS(DMA_DONE | DMA_ERR); 342 if (size > 0x10000)
343 conf |= DMA2D;
736 344
737 bfin_write_MDMA_S0_CONFIG(0); 345 __dma_memcpy(dst, mod, src, mod, size, conf);
738 bfin_write_MDMA_D0_CONFIG(0);
739 local_irq_restore(flags);
740 346
347 return pdst;
741} 348}
742EXPORT_SYMBOL(dma_insb);
743 349
744void dma_outsw(unsigned long addr, const void *buf, unsigned short len) 350/**
351 * dma_memcpy - DMA memcpy under mutex lock
352 *
353 * Do not check arguments before starting the DMA memcpy. Break the transfer
354 * up into two pieces. The first transfer is in multiples of 64k and the
355 * second transfer is the piece smaller than 64k.
356 */
357void *dma_memcpy(void *pdst, const void *psrc, size_t size)
745{ 358{
746 unsigned long flags; 359 unsigned long dst = (unsigned long)pdst;
747 360 unsigned long src = (unsigned long)psrc;
748 local_irq_save(flags); 361 size_t bulk, rest;
749 362
750 blackfin_dcache_flush_range((unsigned int)buf, 363 if (bfin_addr_dcachable(src))
751 (unsigned int)(buf) + len * sizeof(short)); 364 blackfin_dcache_flush_range(src, src + size);
752 365
753 bfin_write_MDMA_D0_START_ADDR(addr); 366 if (bfin_addr_dcachable(dst))
754 bfin_write_MDMA_D0_X_COUNT(len); 367 blackfin_dcache_invalidate_range(dst, dst + size);
755 bfin_write_MDMA_D0_X_MODIFY(0);
756 bfin_write_MDMA_D0_IRQ_STATUS(DMA_DONE | DMA_ERR);
757
758 bfin_write_MDMA_S0_START_ADDR(buf);
759 bfin_write_MDMA_S0_X_COUNT(len);
760 bfin_write_MDMA_S0_X_MODIFY(2);
761 bfin_write_MDMA_S0_IRQ_STATUS(DMA_DONE | DMA_ERR);
762
763 bfin_write_MDMA_S0_CONFIG(DMAEN | WDSIZE_16);
764 bfin_write_MDMA_D0_CONFIG(WNR | DI_EN | DMAEN | WDSIZE_16);
765
766 SSYNC();
767
768 while (!(bfin_read_MDMA_D0_IRQ_STATUS() & DMA_DONE));
769
770 bfin_write_MDMA_D0_IRQ_STATUS(DMA_DONE | DMA_ERR);
771
772 bfin_write_MDMA_S0_CONFIG(0);
773 bfin_write_MDMA_D0_CONFIG(0);
774 local_irq_restore(flags);
775 368
369 bulk = size & ~0xffff;
370 rest = size - bulk;
371 if (bulk)
372 _dma_memcpy(pdst, psrc, bulk);
373 _dma_memcpy(pdst + bulk, psrc + bulk, rest);
374 return pdst;
776} 375}
777EXPORT_SYMBOL(dma_outsw); 376EXPORT_SYMBOL(dma_memcpy);
778 377
779void dma_insw(unsigned long addr, void *buf, unsigned short len) 378/**
379 * safe_dma_memcpy - DMA memcpy w/argument checking
380 *
381 * Verify arguments are safe before heading to dma_memcpy().
382 */
383void *safe_dma_memcpy(void *dst, const void *src, size_t size)
780{ 384{
781 unsigned long flags; 385 if (!access_ok(VERIFY_WRITE, dst, size))
782 386 return NULL;
783 blackfin_dcache_invalidate_range((unsigned int)buf, 387 if (!access_ok(VERIFY_READ, src, size))
784 (unsigned int)(buf) + len * sizeof(short)); 388 return NULL;
785 389 return dma_memcpy(dst, src, size);
786 local_irq_save(flags);
787
788 bfin_write_MDMA_D0_START_ADDR(buf);
789 bfin_write_MDMA_D0_X_COUNT(len);
790 bfin_write_MDMA_D0_X_MODIFY(2);
791 bfin_write_MDMA_D0_IRQ_STATUS(DMA_DONE | DMA_ERR);
792
793 bfin_write_MDMA_S0_START_ADDR(addr);
794 bfin_write_MDMA_S0_X_COUNT(len);
795 bfin_write_MDMA_S0_X_MODIFY(0);
796 bfin_write_MDMA_S0_IRQ_STATUS(DMA_DONE | DMA_ERR);
797
798 bfin_write_MDMA_S0_CONFIG(DMAEN | WDSIZE_16);
799 bfin_write_MDMA_D0_CONFIG(WNR | DI_EN | DMAEN | WDSIZE_16);
800
801 SSYNC();
802
803 while (!(bfin_read_MDMA_D0_IRQ_STATUS() & DMA_DONE));
804
805 bfin_write_MDMA_D0_IRQ_STATUS(DMA_DONE | DMA_ERR);
806
807 bfin_write_MDMA_S0_CONFIG(0);
808 bfin_write_MDMA_D0_CONFIG(0);
809 local_irq_restore(flags);
810
811} 390}
812EXPORT_SYMBOL(dma_insw); 391EXPORT_SYMBOL(safe_dma_memcpy);
813 392
814void dma_outsl(unsigned long addr, const void *buf, unsigned short len) 393static void _dma_out(unsigned long addr, unsigned long buf, unsigned short len,
394 u16 size, u16 dma_size)
815{ 395{
816 unsigned long flags; 396 blackfin_dcache_flush_range(buf, buf + len * size);
817 397 __dma_memcpy(addr, 0, buf, size, len, dma_size);
818 local_irq_save(flags);
819
820 blackfin_dcache_flush_range((unsigned int)buf,
821 (unsigned int)(buf) + len * sizeof(long));
822
823 bfin_write_MDMA_D0_START_ADDR(addr);
824 bfin_write_MDMA_D0_X_COUNT(len);
825 bfin_write_MDMA_D0_X_MODIFY(0);
826 bfin_write_MDMA_D0_IRQ_STATUS(DMA_DONE | DMA_ERR);
827
828 bfin_write_MDMA_S0_START_ADDR(buf);
829 bfin_write_MDMA_S0_X_COUNT(len);
830 bfin_write_MDMA_S0_X_MODIFY(4);
831 bfin_write_MDMA_S0_IRQ_STATUS(DMA_DONE | DMA_ERR);
832
833 bfin_write_MDMA_S0_CONFIG(DMAEN | WDSIZE_32);
834 bfin_write_MDMA_D0_CONFIG(WNR | DI_EN | DMAEN | WDSIZE_32);
835
836 SSYNC();
837
838 while (!(bfin_read_MDMA_D0_IRQ_STATUS() & DMA_DONE));
839
840 bfin_write_MDMA_D0_IRQ_STATUS(DMA_DONE | DMA_ERR);
841
842 bfin_write_MDMA_S0_CONFIG(0);
843 bfin_write_MDMA_D0_CONFIG(0);
844 local_irq_restore(flags);
845
846} 398}
847EXPORT_SYMBOL(dma_outsl);
848 399
849void dma_insl(unsigned long addr, void *buf, unsigned short len) 400static void _dma_in(unsigned long addr, unsigned long buf, unsigned short len,
401 u16 size, u16 dma_size)
850{ 402{
851 unsigned long flags; 403 blackfin_dcache_invalidate_range(buf, buf + len * size);
852 404 __dma_memcpy(buf, size, addr, 0, len, dma_size);
853 blackfin_dcache_invalidate_range((unsigned int)buf,
854 (unsigned int)(buf) + len * sizeof(long));
855
856 local_irq_save(flags);
857
858 bfin_write_MDMA_D0_START_ADDR(buf);
859 bfin_write_MDMA_D0_X_COUNT(len);
860 bfin_write_MDMA_D0_X_MODIFY(4);
861 bfin_write_MDMA_D0_IRQ_STATUS(DMA_DONE | DMA_ERR);
862
863 bfin_write_MDMA_S0_START_ADDR(addr);
864 bfin_write_MDMA_S0_X_COUNT(len);
865 bfin_write_MDMA_S0_X_MODIFY(0);
866 bfin_write_MDMA_S0_IRQ_STATUS(DMA_DONE | DMA_ERR);
867
868 bfin_write_MDMA_S0_CONFIG(DMAEN | WDSIZE_32);
869 bfin_write_MDMA_D0_CONFIG(WNR | DI_EN | DMAEN | WDSIZE_32);
870
871 SSYNC();
872
873 while (!(bfin_read_MDMA_D0_IRQ_STATUS() & DMA_DONE));
874
875 bfin_write_MDMA_D0_IRQ_STATUS(DMA_DONE | DMA_ERR);
876
877 bfin_write_MDMA_S0_CONFIG(0);
878 bfin_write_MDMA_D0_CONFIG(0);
879 local_irq_restore(flags);
880
881} 405}
882EXPORT_SYMBOL(dma_insl); 406
407#define MAKE_DMA_IO(io, bwl, isize, dmasize, cnst) \
408void dma_##io##s##bwl(unsigned long addr, cnst void *buf, unsigned short len) \
409{ \
410 _dma_##io(addr, (unsigned long)buf, len, isize, WDSIZE_##dmasize); \
411} \
412EXPORT_SYMBOL(dma_##io##s##bwl)
413MAKE_DMA_IO(out, b, 1, 8, const);
414MAKE_DMA_IO(in, b, 1, 8, );
415MAKE_DMA_IO(out, w, 2, 16, const);
416MAKE_DMA_IO(in, w, 2, 16, );
417MAKE_DMA_IO(out, l, 4, 32, const);
418MAKE_DMA_IO(in, l, 4, 32, );
diff --git a/arch/blackfin/kernel/bfin_gpio.c b/arch/blackfin/kernel/bfin_gpio.c
index 5c0800adb4dd..4c14331978f6 100644
--- a/arch/blackfin/kernel/bfin_gpio.c
+++ b/arch/blackfin/kernel/bfin_gpio.c
@@ -119,28 +119,28 @@ enum {
119#define AWA_DUMMY_READ(...) do { } while (0) 119#define AWA_DUMMY_READ(...) do { } while (0)
120#endif 120#endif
121 121
122#ifdef BF533_FAMILY 122#if defined(BF533_FAMILY) || defined(BF538_FAMILY)
123static struct gpio_port_t *gpio_bankb[gpio_bank(MAX_BLACKFIN_GPIOS)] = { 123static struct gpio_port_t *gpio_bankb[] = {
124 (struct gpio_port_t *) FIO_FLAG_D, 124 (struct gpio_port_t *) FIO_FLAG_D,
125}; 125};
126#endif 126#endif
127 127
128#if defined(BF527_FAMILY) || defined(BF537_FAMILY) 128#if defined(BF527_FAMILY) || defined(BF537_FAMILY) || defined(BF518_FAMILY)
129static struct gpio_port_t *gpio_bankb[gpio_bank(MAX_BLACKFIN_GPIOS)] = { 129static struct gpio_port_t *gpio_bankb[] = {
130 (struct gpio_port_t *) PORTFIO, 130 (struct gpio_port_t *) PORTFIO,
131 (struct gpio_port_t *) PORTGIO, 131 (struct gpio_port_t *) PORTGIO,
132 (struct gpio_port_t *) PORTHIO, 132 (struct gpio_port_t *) PORTHIO,
133}; 133};
134 134
135static unsigned short *port_fer[gpio_bank(MAX_BLACKFIN_GPIOS)] = { 135static unsigned short *port_fer[] = {
136 (unsigned short *) PORTF_FER, 136 (unsigned short *) PORTF_FER,
137 (unsigned short *) PORTG_FER, 137 (unsigned short *) PORTG_FER,
138 (unsigned short *) PORTH_FER, 138 (unsigned short *) PORTH_FER,
139}; 139};
140#endif 140#endif
141 141
142#ifdef BF527_FAMILY 142#if defined(BF527_FAMILY) || defined(BF518_FAMILY)
143static unsigned short *port_mux[gpio_bank(MAX_BLACKFIN_GPIOS)] = { 143static unsigned short *port_mux[] = {
144 (unsigned short *) PORTF_MUX, 144 (unsigned short *) PORTF_MUX,
145 (unsigned short *) PORTG_MUX, 145 (unsigned short *) PORTG_MUX,
146 (unsigned short *) PORTH_MUX, 146 (unsigned short *) PORTH_MUX,
@@ -155,7 +155,7 @@ u8 pmux_offset[][16] =
155#endif 155#endif
156 156
157#ifdef BF561_FAMILY 157#ifdef BF561_FAMILY
158static struct gpio_port_t *gpio_bankb[gpio_bank(MAX_BLACKFIN_GPIOS)] = { 158static struct gpio_port_t *gpio_bankb[] = {
159 (struct gpio_port_t *) FIO0_FLAG_D, 159 (struct gpio_port_t *) FIO0_FLAG_D,
160 (struct gpio_port_t *) FIO1_FLAG_D, 160 (struct gpio_port_t *) FIO1_FLAG_D,
161 (struct gpio_port_t *) FIO2_FLAG_D, 161 (struct gpio_port_t *) FIO2_FLAG_D,
@@ -163,7 +163,7 @@ static struct gpio_port_t *gpio_bankb[gpio_bank(MAX_BLACKFIN_GPIOS)] = {
163#endif 163#endif
164 164
165#ifdef BF548_FAMILY 165#ifdef BF548_FAMILY
166static struct gpio_port_t *gpio_array[gpio_bank(MAX_BLACKFIN_GPIOS)] = { 166static struct gpio_port_t *gpio_array[] = {
167 (struct gpio_port_t *)PORTA_FER, 167 (struct gpio_port_t *)PORTA_FER,
168 (struct gpio_port_t *)PORTB_FER, 168 (struct gpio_port_t *)PORTB_FER,
169 (struct gpio_port_t *)PORTC_FER, 169 (struct gpio_port_t *)PORTC_FER,
@@ -177,8 +177,9 @@ static struct gpio_port_t *gpio_array[gpio_bank(MAX_BLACKFIN_GPIOS)] = {
177}; 177};
178#endif 178#endif
179 179
180static unsigned short reserved_gpio_map[gpio_bank(MAX_BLACKFIN_GPIOS)]; 180static unsigned short reserved_gpio_map[GPIO_BANK_NUM];
181static unsigned short reserved_peri_map[gpio_bank(MAX_RESOURCES)]; 181static unsigned short reserved_peri_map[gpio_bank(MAX_RESOURCES)];
182static unsigned short reserved_gpio_irq_map[GPIO_BANK_NUM];
182 183
183#define RESOURCE_LABEL_SIZE 16 184#define RESOURCE_LABEL_SIZE 16
184 185
@@ -188,48 +189,46 @@ static struct str_ident {
188 189
189#if defined(CONFIG_PM) 190#if defined(CONFIG_PM)
190#if defined(CONFIG_BF54x) 191#if defined(CONFIG_BF54x)
191static struct gpio_port_s gpio_bank_saved[gpio_bank(MAX_BLACKFIN_GPIOS)]; 192static struct gpio_port_s gpio_bank_saved[GPIO_BANK_NUM];
192#else 193#else
193static unsigned short wakeup_map[gpio_bank(MAX_BLACKFIN_GPIOS)]; 194static unsigned short wakeup_map[GPIO_BANK_NUM];
194static unsigned char wakeup_flags_map[MAX_BLACKFIN_GPIOS]; 195static unsigned char wakeup_flags_map[MAX_BLACKFIN_GPIOS];
195static struct gpio_port_s gpio_bank_saved[gpio_bank(MAX_BLACKFIN_GPIOS)]; 196static struct gpio_port_s gpio_bank_saved[GPIO_BANK_NUM];
196 197
197#ifdef BF533_FAMILY 198#ifdef BF533_FAMILY
198static unsigned int sic_iwr_irqs[gpio_bank(MAX_BLACKFIN_GPIOS)] = {IRQ_PROG_INTB}; 199static unsigned int sic_iwr_irqs[] = {IRQ_PROG_INTB};
199#endif 200#endif
200 201
201#ifdef BF537_FAMILY 202#ifdef BF537_FAMILY
202static unsigned int sic_iwr_irqs[gpio_bank(MAX_BLACKFIN_GPIOS)] = {IRQ_PROG_INTB, IRQ_PORTG_INTB, IRQ_MAC_TX}; 203static unsigned int sic_iwr_irqs[] = {IRQ_PROG_INTB, IRQ_PORTG_INTB, IRQ_MAC_TX};
204#endif
205
206#ifdef BF538_FAMILY
207static unsigned int sic_iwr_irqs[] = {IRQ_PORTF_INTB};
203#endif 208#endif
204 209
205#ifdef BF527_FAMILY 210#if defined(BF527_FAMILY) || defined(BF518_FAMILY)
206static unsigned int sic_iwr_irqs[gpio_bank(MAX_BLACKFIN_GPIOS)] = {IRQ_PORTF_INTB, IRQ_PORTG_INTB, IRQ_PORTH_INTB}; 211static unsigned int sic_iwr_irqs[] = {IRQ_PORTF_INTB, IRQ_PORTG_INTB, IRQ_PORTH_INTB};
207#endif 212#endif
208 213
209#ifdef BF561_FAMILY 214#ifdef BF561_FAMILY
210static unsigned int sic_iwr_irqs[gpio_bank(MAX_BLACKFIN_GPIOS)] = {IRQ_PROG0_INTB, IRQ_PROG1_INTB, IRQ_PROG2_INTB}; 215static unsigned int sic_iwr_irqs[] = {IRQ_PROG0_INTB, IRQ_PROG1_INTB, IRQ_PROG2_INTB};
211#endif 216#endif
212#endif 217#endif
213#endif /* CONFIG_PM */ 218#endif /* CONFIG_PM */
214 219
215#if defined(BF548_FAMILY)
216inline int check_gpio(unsigned gpio) 220inline int check_gpio(unsigned gpio)
217{ 221{
222#if defined(BF548_FAMILY)
218 if (gpio == GPIO_PB15 || gpio == GPIO_PC14 || gpio == GPIO_PC15 223 if (gpio == GPIO_PB15 || gpio == GPIO_PC14 || gpio == GPIO_PC15
219 || gpio == GPIO_PH14 || gpio == GPIO_PH15 224 || gpio == GPIO_PH14 || gpio == GPIO_PH15
220 || gpio == GPIO_PJ14 || gpio == GPIO_PJ15 225 || gpio == GPIO_PJ14 || gpio == GPIO_PJ15)
221 || gpio >= MAX_BLACKFIN_GPIOS)
222 return -EINVAL; 226 return -EINVAL;
223 return 0; 227#endif
224}
225#else
226inline int check_gpio(unsigned gpio)
227{
228 if (gpio >= MAX_BLACKFIN_GPIOS) 228 if (gpio >= MAX_BLACKFIN_GPIOS)
229 return -EINVAL; 229 return -EINVAL;
230 return 0; 230 return 0;
231} 231}
232#endif
233 232
234static void gpio_error(unsigned gpio) 233static void gpio_error(unsigned gpio)
235{ 234{
@@ -258,35 +257,30 @@ static int cmp_label(unsigned short ident, const char *label)
258 } 257 }
259 258
260 if (label) 259 if (label)
261 return strncmp(str_ident[ident].name, 260 return strcmp(str_ident[ident].name, label);
262 label, strlen(label));
263 else 261 else
264 return -EINVAL; 262 return -EINVAL;
265} 263}
266 264
267#if defined(BF527_FAMILY) || defined(BF537_FAMILY)
268static void port_setup(unsigned gpio, unsigned short usage) 265static void port_setup(unsigned gpio, unsigned short usage)
269{ 266{
270 if (!check_gpio(gpio)) { 267 if (check_gpio(gpio))
271 if (usage == GPIO_USAGE) 268 return;
272 *port_fer[gpio_bank(gpio)] &= ~gpio_bit(gpio); 269
273 else 270#if defined(BF527_FAMILY) || defined(BF537_FAMILY) || defined(BF518_FAMILY)
274 *port_fer[gpio_bank(gpio)] |= gpio_bit(gpio); 271 if (usage == GPIO_USAGE)
275 SSYNC(); 272 *port_fer[gpio_bank(gpio)] &= ~gpio_bit(gpio);
276 } 273 else
277} 274 *port_fer[gpio_bank(gpio)] |= gpio_bit(gpio);
275 SSYNC();
278#elif defined(BF548_FAMILY) 276#elif defined(BF548_FAMILY)
279static void port_setup(unsigned gpio, unsigned short usage)
280{
281 if (usage == GPIO_USAGE) 277 if (usage == GPIO_USAGE)
282 gpio_array[gpio_bank(gpio)]->port_fer &= ~gpio_bit(gpio); 278 gpio_array[gpio_bank(gpio)]->port_fer &= ~gpio_bit(gpio);
283 else 279 else
284 gpio_array[gpio_bank(gpio)]->port_fer |= gpio_bit(gpio); 280 gpio_array[gpio_bank(gpio)]->port_fer |= gpio_bit(gpio);
285 SSYNC(); 281 SSYNC();
286}
287#else
288# define port_setup(...) do { } while (0)
289#endif 282#endif
283}
290 284
291#ifdef BF537_FAMILY 285#ifdef BF537_FAMILY
292static struct { 286static struct {
@@ -379,7 +373,7 @@ inline u16 get_portmux(unsigned short portno)
379 373
380 return (pmux >> (2 * gpio_sub_n(portno)) & 0x3); 374 return (pmux >> (2 * gpio_sub_n(portno)) & 0x3);
381} 375}
382#elif defined(BF527_FAMILY) 376#elif defined(BF527_FAMILY) || defined(BF518_FAMILY)
383inline void portmux_setup(unsigned short portno, unsigned short function) 377inline void portmux_setup(unsigned short portno, unsigned short function)
384{ 378{
385 u16 pmux, ident = P_IDENT(portno); 379 u16 pmux, ident = P_IDENT(portno);
@@ -428,13 +422,13 @@ arch_initcall(bfin_gpio_init);
428void set_gpio_ ## name(unsigned gpio, unsigned short arg) \ 422void set_gpio_ ## name(unsigned gpio, unsigned short arg) \
429{ \ 423{ \
430 unsigned long flags; \ 424 unsigned long flags; \
431 local_irq_save(flags); \ 425 local_irq_save_hw(flags); \
432 if (arg) \ 426 if (arg) \
433 gpio_bankb[gpio_bank(gpio)]->name |= gpio_bit(gpio); \ 427 gpio_bankb[gpio_bank(gpio)]->name |= gpio_bit(gpio); \
434 else \ 428 else \
435 gpio_bankb[gpio_bank(gpio)]->name &= ~gpio_bit(gpio); \ 429 gpio_bankb[gpio_bank(gpio)]->name &= ~gpio_bit(gpio); \
436 AWA_DUMMY_READ(name); \ 430 AWA_DUMMY_READ(name); \
437 local_irq_restore(flags); \ 431 local_irq_restore_hw(flags); \
438} \ 432} \
439EXPORT_SYMBOL(set_gpio_ ## name); 433EXPORT_SYMBOL(set_gpio_ ## name);
440 434
@@ -450,13 +444,13 @@ SET_GPIO(both)
450void set_gpio_ ## name(unsigned gpio, unsigned short arg) \ 444void set_gpio_ ## name(unsigned gpio, unsigned short arg) \
451{ \ 445{ \
452 unsigned long flags; \ 446 unsigned long flags; \
453 local_irq_save(flags); \ 447 local_irq_save_hw(flags); \
454 if (arg) \ 448 if (arg) \
455 gpio_bankb[gpio_bank(gpio)]->name ## _set = gpio_bit(gpio); \ 449 gpio_bankb[gpio_bank(gpio)]->name ## _set = gpio_bit(gpio); \
456 else \ 450 else \
457 gpio_bankb[gpio_bank(gpio)]->name ## _clear = gpio_bit(gpio); \ 451 gpio_bankb[gpio_bank(gpio)]->name ## _clear = gpio_bit(gpio); \
458 AWA_DUMMY_READ(name); \ 452 AWA_DUMMY_READ(name); \
459 local_irq_restore(flags); \ 453 local_irq_restore_hw(flags); \
460} \ 454} \
461EXPORT_SYMBOL(set_gpio_ ## name); 455EXPORT_SYMBOL(set_gpio_ ## name);
462#else 456#else
@@ -479,10 +473,10 @@ SET_GPIO_SC(data)
479void set_gpio_toggle(unsigned gpio) 473void set_gpio_toggle(unsigned gpio)
480{ 474{
481 unsigned long flags; 475 unsigned long flags;
482 local_irq_save(flags); 476 local_irq_save_hw(flags);
483 gpio_bankb[gpio_bank(gpio)]->toggle = gpio_bit(gpio); 477 gpio_bankb[gpio_bank(gpio)]->toggle = gpio_bit(gpio);
484 AWA_DUMMY_READ(toggle); 478 AWA_DUMMY_READ(toggle);
485 local_irq_restore(flags); 479 local_irq_restore_hw(flags);
486} 480}
487#else 481#else
488void set_gpio_toggle(unsigned gpio) 482void set_gpio_toggle(unsigned gpio)
@@ -500,10 +494,10 @@ EXPORT_SYMBOL(set_gpio_toggle);
500void set_gpiop_ ## name(unsigned gpio, unsigned short arg) \ 494void set_gpiop_ ## name(unsigned gpio, unsigned short arg) \
501{ \ 495{ \
502 unsigned long flags; \ 496 unsigned long flags; \
503 local_irq_save(flags); \ 497 local_irq_save_hw(flags); \
504 gpio_bankb[gpio_bank(gpio)]->name = arg; \ 498 gpio_bankb[gpio_bank(gpio)]->name = arg; \
505 AWA_DUMMY_READ(name); \ 499 AWA_DUMMY_READ(name); \
506 local_irq_restore(flags); \ 500 local_irq_restore_hw(flags); \
507} \ 501} \
508EXPORT_SYMBOL(set_gpiop_ ## name); 502EXPORT_SYMBOL(set_gpiop_ ## name);
509#else 503#else
@@ -531,10 +525,10 @@ unsigned short get_gpio_ ## name(unsigned gpio) \
531{ \ 525{ \
532 unsigned long flags; \ 526 unsigned long flags; \
533 unsigned short ret; \ 527 unsigned short ret; \
534 local_irq_save(flags); \ 528 local_irq_save_hw(flags); \
535 ret = 0x01 & (gpio_bankb[gpio_bank(gpio)]->name >> gpio_sub_n(gpio)); \ 529 ret = 0x01 & (gpio_bankb[gpio_bank(gpio)]->name >> gpio_sub_n(gpio)); \
536 AWA_DUMMY_READ(name); \ 530 AWA_DUMMY_READ(name); \
537 local_irq_restore(flags); \ 531 local_irq_restore_hw(flags); \
538 return ret; \ 532 return ret; \
539} \ 533} \
540EXPORT_SYMBOL(get_gpio_ ## name); 534EXPORT_SYMBOL(get_gpio_ ## name);
@@ -564,10 +558,10 @@ unsigned short get_gpiop_ ## name(unsigned gpio) \
564{ \ 558{ \
565 unsigned long flags; \ 559 unsigned long flags; \
566 unsigned short ret; \ 560 unsigned short ret; \
567 local_irq_save(flags); \ 561 local_irq_save_hw(flags); \
568 ret = (gpio_bankb[gpio_bank(gpio)]->name); \ 562 ret = (gpio_bankb[gpio_bank(gpio)]->name); \
569 AWA_DUMMY_READ(name); \ 563 AWA_DUMMY_READ(name); \
570 local_irq_restore(flags); \ 564 local_irq_restore_hw(flags); \
571 return ret; \ 565 return ret; \
572} \ 566} \
573EXPORT_SYMBOL(get_gpiop_ ## name); 567EXPORT_SYMBOL(get_gpiop_ ## name);
@@ -617,10 +611,10 @@ int gpio_pm_wakeup_request(unsigned gpio, unsigned char type)
617 if ((check_gpio(gpio) < 0) || !type) 611 if ((check_gpio(gpio) < 0) || !type)
618 return -EINVAL; 612 return -EINVAL;
619 613
620 local_irq_save(flags); 614 local_irq_save_hw(flags);
621 wakeup_map[gpio_bank(gpio)] |= gpio_bit(gpio); 615 wakeup_map[gpio_bank(gpio)] |= gpio_bit(gpio);
622 wakeup_flags_map[gpio] = type; 616 wakeup_flags_map[gpio] = type;
623 local_irq_restore(flags); 617 local_irq_restore_hw(flags);
624 618
625 return 0; 619 return 0;
626} 620}
@@ -633,11 +627,11 @@ void gpio_pm_wakeup_free(unsigned gpio)
633 if (check_gpio(gpio) < 0) 627 if (check_gpio(gpio) < 0)
634 return; 628 return;
635 629
636 local_irq_save(flags); 630 local_irq_save_hw(flags);
637 631
638 wakeup_map[gpio_bank(gpio)] &= ~gpio_bit(gpio); 632 wakeup_map[gpio_bank(gpio)] &= ~gpio_bit(gpio);
639 633
640 local_irq_restore(flags); 634 local_irq_restore_hw(flags);
641} 635}
642EXPORT_SYMBOL(gpio_pm_wakeup_free); 636EXPORT_SYMBOL(gpio_pm_wakeup_free);
643 637
@@ -679,7 +673,7 @@ u32 bfin_pm_standby_setup(void)
679 gpio_bankb[bank]->maskb = 0; 673 gpio_bankb[bank]->maskb = 0;
680 674
681 if (mask) { 675 if (mask) {
682#if defined(BF527_FAMILY) || defined(BF537_FAMILY) 676#if defined(BF527_FAMILY) || defined(BF537_FAMILY) || defined(BF518_FAMILY)
683 gpio_bank_saved[bank].fer = *port_fer[bank]; 677 gpio_bank_saved[bank].fer = *port_fer[bank];
684#endif 678#endif
685 gpio_bank_saved[bank].inen = gpio_bankb[bank]->inen; 679 gpio_bank_saved[bank].inen = gpio_bankb[bank]->inen;
@@ -724,7 +718,7 @@ void bfin_pm_standby_restore(void)
724 bank = gpio_bank(i); 718 bank = gpio_bank(i);
725 719
726 if (mask) { 720 if (mask) {
727#if defined(BF527_FAMILY) || defined(BF537_FAMILY) 721#if defined(BF527_FAMILY) || defined(BF537_FAMILY) || defined(BF518_FAMILY)
728 *port_fer[bank] = gpio_bank_saved[bank].fer; 722 *port_fer[bank] = gpio_bank_saved[bank].fer;
729#endif 723#endif
730 gpio_bankb[bank]->inen = gpio_bank_saved[bank].inen; 724 gpio_bankb[bank]->inen = gpio_bank_saved[bank].inen;
@@ -750,9 +744,9 @@ void bfin_gpio_pm_hibernate_suspend(void)
750 for (i = 0; i < MAX_BLACKFIN_GPIOS; i += GPIO_BANKSIZE) { 744 for (i = 0; i < MAX_BLACKFIN_GPIOS; i += GPIO_BANKSIZE) {
751 bank = gpio_bank(i); 745 bank = gpio_bank(i);
752 746
753#if defined(BF527_FAMILY) || defined(BF537_FAMILY) 747#if defined(BF527_FAMILY) || defined(BF537_FAMILY) || defined(BF518_FAMILY)
754 gpio_bank_saved[bank].fer = *port_fer[bank]; 748 gpio_bank_saved[bank].fer = *port_fer[bank];
755#ifdef BF527_FAMILY 749#if defined(BF527_FAMILY) || defined(BF518_FAMILY)
756 gpio_bank_saved[bank].mux = *port_mux[bank]; 750 gpio_bank_saved[bank].mux = *port_mux[bank];
757#else 751#else
758 if (bank == 0) 752 if (bank == 0)
@@ -778,8 +772,8 @@ void bfin_gpio_pm_hibernate_restore(void)
778 for (i = 0; i < MAX_BLACKFIN_GPIOS; i += GPIO_BANKSIZE) { 772 for (i = 0; i < MAX_BLACKFIN_GPIOS; i += GPIO_BANKSIZE) {
779 bank = gpio_bank(i); 773 bank = gpio_bank(i);
780 774
781#if defined(BF527_FAMILY) || defined(BF537_FAMILY) 775#if defined(BF527_FAMILY) || defined(BF537_FAMILY) || defined(BF518_FAMILY)
782#ifdef BF527_FAMILY 776#if defined(BF527_FAMILY) || defined(BF518_FAMILY)
783 *port_mux[bank] = gpio_bank_saved[bank].mux; 777 *port_mux[bank] = gpio_bank_saved[bank].mux;
784#else 778#else
785 if (bank == 0) 779 if (bank == 0)
@@ -873,7 +867,6 @@ EXPORT_SYMBOL(get_gpio_dir);
873* MODIFICATION HISTORY : 867* MODIFICATION HISTORY :
874**************************************************************/ 868**************************************************************/
875 869
876#ifdef BF548_FAMILY
877int peripheral_request(unsigned short per, const char *label) 870int peripheral_request(unsigned short per, const char *label)
878{ 871{
879 unsigned long flags; 872 unsigned long flags;
@@ -889,31 +882,35 @@ int peripheral_request(unsigned short per, const char *label)
889 if (!(per & P_DEFINED)) 882 if (!(per & P_DEFINED))
890 return -ENODEV; 883 return -ENODEV;
891 884
892 if (check_gpio(ident) < 0) 885 local_irq_save_hw(flags);
893 return -EINVAL;
894 886
895 local_irq_save(flags); 887 /* If a pin can be muxed as either GPIO or peripheral, make
896 888 * sure it is not already a GPIO pin when we request it.
897 if (unlikely(reserved_gpio_map[gpio_bank(ident)] & gpio_bit(ident))) { 889 */
890 if (unlikely(!check_gpio(ident) &&
891 reserved_gpio_map[gpio_bank(ident)] & gpio_bit(ident))) {
898 dump_stack(); 892 dump_stack();
899 printk(KERN_ERR 893 printk(KERN_ERR
900 "%s: Peripheral %d is already reserved as GPIO by %s !\n", 894 "%s: Peripheral %d is already reserved as GPIO by %s !\n",
901 __func__, ident, get_label(ident)); 895 __func__, ident, get_label(ident));
902 local_irq_restore(flags); 896 local_irq_restore_hw(flags);
903 return -EBUSY; 897 return -EBUSY;
904 } 898 }
905 899
906 if (unlikely(reserved_peri_map[gpio_bank(ident)] & gpio_bit(ident))) { 900 if (unlikely(reserved_peri_map[gpio_bank(ident)] & gpio_bit(ident))) {
907 901
908 u16 funct = get_portmux(ident);
909
910 /* 902 /*
911 * Pin functions like AMC address strobes my 903 * Pin functions like AMC address strobes my
912 * be requested and used by several drivers 904 * be requested and used by several drivers
913 */ 905 */
914 906
915 if (!((per & P_MAYSHARE) && (funct == P_FUNCT2MUX(per)))) { 907#ifdef BF548_FAMILY
908 u16 funct = get_portmux(ident);
916 909
910 if (!((per & P_MAYSHARE) && (funct == P_FUNCT2MUX(per)))) {
911#else
912 if (!(per & P_MAYSHARE)) {
913#endif
917 /* 914 /*
918 * Allow that the identical pin function can 915 * Allow that the identical pin function can
919 * be requested from the same driver twice 916 * be requested from the same driver twice
@@ -926,7 +923,7 @@ int peripheral_request(unsigned short per, const char *label)
926 printk(KERN_ERR 923 printk(KERN_ERR
927 "%s: Peripheral %d function %d is already reserved by %s !\n", 924 "%s: Peripheral %d function %d is already reserved by %s !\n",
928 __func__, ident, P_FUNCT2MUX(per), get_label(ident)); 925 __func__, ident, P_FUNCT2MUX(per), get_label(ident));
929 local_irq_restore(flags); 926 local_irq_restore_hw(flags);
930 return -EBUSY; 927 return -EBUSY;
931 } 928 }
932 } 929 }
@@ -934,89 +931,19 @@ int peripheral_request(unsigned short per, const char *label)
934 anyway: 931 anyway:
935 reserved_peri_map[gpio_bank(ident)] |= gpio_bit(ident); 932 reserved_peri_map[gpio_bank(ident)] |= gpio_bit(ident);
936 933
934#ifdef BF548_FAMILY
937 portmux_setup(ident, P_FUNCT2MUX(per)); 935 portmux_setup(ident, P_FUNCT2MUX(per));
938 port_setup(ident, PERIPHERAL_USAGE);
939
940 local_irq_restore(flags);
941 set_label(ident, label);
942
943 return 0;
944}
945EXPORT_SYMBOL(peripheral_request);
946#else 936#else
947
948int peripheral_request(unsigned short per, const char *label)
949{
950 unsigned long flags;
951 unsigned short ident = P_IDENT(per);
952
953 /*
954 * Don't cares are pins with only one dedicated function
955 */
956
957 if (per & P_DONTCARE)
958 return 0;
959
960 if (!(per & P_DEFINED))
961 return -ENODEV;
962
963 local_irq_save(flags);
964
965 if (!check_gpio(ident)) {
966
967 if (unlikely(reserved_gpio_map[gpio_bank(ident)] & gpio_bit(ident))) {
968 dump_stack();
969 printk(KERN_ERR
970 "%s: Peripheral %d is already reserved as GPIO by %s !\n",
971 __func__, ident, get_label(ident));
972 local_irq_restore(flags);
973 return -EBUSY;
974 }
975
976 }
977
978 if (unlikely(reserved_peri_map[gpio_bank(ident)] & gpio_bit(ident))) {
979
980 /*
981 * Pin functions like AMC address strobes my
982 * be requested and used by several drivers
983 */
984
985 if (!(per & P_MAYSHARE)) {
986
987 /*
988 * Allow that the identical pin function can
989 * be requested from the same driver twice
990 */
991
992 if (cmp_label(ident, label) == 0)
993 goto anyway;
994
995 dump_stack();
996 printk(KERN_ERR
997 "%s: Peripheral %d function %d is already"
998 " reserved by %s !\n",
999 __func__, ident, P_FUNCT2MUX(per),
1000 get_label(ident));
1001 local_irq_restore(flags);
1002 return -EBUSY;
1003 }
1004
1005 }
1006
1007 anyway:
1008 portmux_setup(per, P_FUNCT2MUX(per)); 937 portmux_setup(per, P_FUNCT2MUX(per));
1009 938#endif
1010 port_setup(ident, PERIPHERAL_USAGE); 939 port_setup(ident, PERIPHERAL_USAGE);
1011 940
1012 reserved_peri_map[gpio_bank(ident)] |= gpio_bit(ident); 941 local_irq_restore_hw(flags);
1013 local_irq_restore(flags);
1014 set_label(ident, label); 942 set_label(ident, label);
1015 943
1016 return 0; 944 return 0;
1017} 945}
1018EXPORT_SYMBOL(peripheral_request); 946EXPORT_SYMBOL(peripheral_request);
1019#endif
1020 947
1021int peripheral_request_list(const unsigned short per[], const char *label) 948int peripheral_request_list(const unsigned short per[], const char *label)
1022{ 949{
@@ -1053,10 +980,10 @@ void peripheral_free(unsigned short per)
1053 if (check_gpio(ident) < 0) 980 if (check_gpio(ident) < 0)
1054 return; 981 return;
1055 982
1056 local_irq_save(flags); 983 local_irq_save_hw(flags);
1057 984
1058 if (unlikely(!(reserved_peri_map[gpio_bank(ident)] & gpio_bit(ident)))) { 985 if (unlikely(!(reserved_peri_map[gpio_bank(ident)] & gpio_bit(ident)))) {
1059 local_irq_restore(flags); 986 local_irq_restore_hw(flags);
1060 return; 987 return;
1061 } 988 }
1062 989
@@ -1067,7 +994,7 @@ void peripheral_free(unsigned short per)
1067 994
1068 set_label(ident, "free"); 995 set_label(ident, "free");
1069 996
1070 local_irq_restore(flags); 997 local_irq_restore_hw(flags);
1071} 998}
1072EXPORT_SYMBOL(peripheral_free); 999EXPORT_SYMBOL(peripheral_free);
1073 1000
@@ -1094,14 +1021,14 @@ EXPORT_SYMBOL(peripheral_free_list);
1094* MODIFICATION HISTORY : 1021* MODIFICATION HISTORY :
1095**************************************************************/ 1022**************************************************************/
1096 1023
1097int gpio_request(unsigned gpio, const char *label) 1024int bfin_gpio_request(unsigned gpio, const char *label)
1098{ 1025{
1099 unsigned long flags; 1026 unsigned long flags;
1100 1027
1101 if (check_gpio(gpio) < 0) 1028 if (check_gpio(gpio) < 0)
1102 return -EINVAL; 1029 return -EINVAL;
1103 1030
1104 local_irq_save(flags); 1031 local_irq_save_hw(flags);
1105 1032
1106 /* 1033 /*
1107 * Allow that the identical GPIO can 1034 * Allow that the identical GPIO can
@@ -1110,15 +1037,15 @@ int gpio_request(unsigned gpio, const char *label)
1110 */ 1037 */
1111 1038
1112 if (cmp_label(gpio, label) == 0) { 1039 if (cmp_label(gpio, label) == 0) {
1113 local_irq_restore(flags); 1040 local_irq_restore_hw(flags);
1114 return 0; 1041 return 0;
1115 } 1042 }
1116 1043
1117 if (unlikely(reserved_gpio_map[gpio_bank(gpio)] & gpio_bit(gpio))) { 1044 if (unlikely(reserved_gpio_map[gpio_bank(gpio)] & gpio_bit(gpio))) {
1118 dump_stack(); 1045 dump_stack();
1119 printk(KERN_ERR "bfin-gpio: GPIO %d is already reserved by %s !\n", 1046 printk(KERN_ERR "bfin-gpio: GPIO %d is already reserved by %s !\n",
1120 gpio, get_label(gpio)); 1047 gpio, get_label(gpio));
1121 local_irq_restore(flags); 1048 local_irq_restore_hw(flags);
1122 return -EBUSY; 1049 return -EBUSY;
1123 } 1050 }
1124 if (unlikely(reserved_peri_map[gpio_bank(gpio)] & gpio_bit(gpio))) { 1051 if (unlikely(reserved_peri_map[gpio_bank(gpio)] & gpio_bit(gpio))) {
@@ -1126,34 +1053,37 @@ int gpio_request(unsigned gpio, const char *label)
1126 printk(KERN_ERR 1053 printk(KERN_ERR
1127 "bfin-gpio: GPIO %d is already reserved as Peripheral by %s !\n", 1054 "bfin-gpio: GPIO %d is already reserved as Peripheral by %s !\n",
1128 gpio, get_label(gpio)); 1055 gpio, get_label(gpio));
1129 local_irq_restore(flags); 1056 local_irq_restore_hw(flags);
1130 return -EBUSY; 1057 return -EBUSY;
1131 } 1058 }
1059 if (unlikely(reserved_gpio_irq_map[gpio_bank(gpio)] & gpio_bit(gpio)))
1060 printk(KERN_NOTICE "bfin-gpio: GPIO %d is already reserved as gpio-irq!"
1061 " (Documentation/blackfin/bfin-gpio-notes.txt)\n", gpio);
1132 1062
1133 reserved_gpio_map[gpio_bank(gpio)] |= gpio_bit(gpio); 1063 reserved_gpio_map[gpio_bank(gpio)] |= gpio_bit(gpio);
1064 set_label(gpio, label);
1134 1065
1135 local_irq_restore(flags); 1066 local_irq_restore_hw(flags);
1136 1067
1137 port_setup(gpio, GPIO_USAGE); 1068 port_setup(gpio, GPIO_USAGE);
1138 set_label(gpio, label);
1139 1069
1140 return 0; 1070 return 0;
1141} 1071}
1142EXPORT_SYMBOL(gpio_request); 1072EXPORT_SYMBOL(bfin_gpio_request);
1143 1073
1144void gpio_free(unsigned gpio) 1074void bfin_gpio_free(unsigned gpio)
1145{ 1075{
1146 unsigned long flags; 1076 unsigned long flags;
1147 1077
1148 if (check_gpio(gpio) < 0) 1078 if (check_gpio(gpio) < 0)
1149 return; 1079 return;
1150 1080
1151 local_irq_save(flags); 1081 local_irq_save_hw(flags);
1152 1082
1153 if (unlikely(!(reserved_gpio_map[gpio_bank(gpio)] & gpio_bit(gpio)))) { 1083 if (unlikely(!(reserved_gpio_map[gpio_bank(gpio)] & gpio_bit(gpio)))) {
1154 dump_stack(); 1084 dump_stack();
1155 gpio_error(gpio); 1085 gpio_error(gpio);
1156 local_irq_restore(flags); 1086 local_irq_restore_hw(flags);
1157 return; 1087 return;
1158 } 1088 }
1159 1089
@@ -1161,13 +1091,76 @@ void gpio_free(unsigned gpio)
1161 1091
1162 set_label(gpio, "free"); 1092 set_label(gpio, "free");
1163 1093
1164 local_irq_restore(flags); 1094 local_irq_restore_hw(flags);
1095}
1096EXPORT_SYMBOL(bfin_gpio_free);
1097
1098int bfin_gpio_irq_request(unsigned gpio, const char *label)
1099{
1100 unsigned long flags;
1101
1102 if (check_gpio(gpio) < 0)
1103 return -EINVAL;
1104
1105 local_irq_save_hw(flags);
1106
1107 if (unlikely(reserved_gpio_irq_map[gpio_bank(gpio)] & gpio_bit(gpio))) {
1108 dump_stack();
1109 printk(KERN_ERR
1110 "bfin-gpio: GPIO %d is already reserved as gpio-irq !\n",
1111 gpio);
1112 local_irq_restore_hw(flags);
1113 return -EBUSY;
1114 }
1115 if (unlikely(reserved_peri_map[gpio_bank(gpio)] & gpio_bit(gpio))) {
1116 dump_stack();
1117 printk(KERN_ERR
1118 "bfin-gpio: GPIO %d is already reserved as Peripheral by %s !\n",
1119 gpio, get_label(gpio));
1120 local_irq_restore_hw(flags);
1121 return -EBUSY;
1122 }
1123 if (unlikely(reserved_gpio_map[gpio_bank(gpio)] & gpio_bit(gpio)))
1124 printk(KERN_NOTICE "bfin-gpio: GPIO %d is already reserved by %s! "
1125 "(Documentation/blackfin/bfin-gpio-notes.txt)\n",
1126 gpio, get_label(gpio));
1127
1128 reserved_gpio_irq_map[gpio_bank(gpio)] |= gpio_bit(gpio);
1129 set_label(gpio, label);
1130
1131 local_irq_restore_hw(flags);
1132
1133 port_setup(gpio, GPIO_USAGE);
1134
1135 return 0;
1136}
1137
1138void bfin_gpio_irq_free(unsigned gpio)
1139{
1140 unsigned long flags;
1141
1142 if (check_gpio(gpio) < 0)
1143 return;
1144
1145 local_irq_save_hw(flags);
1146
1147 if (unlikely(!(reserved_gpio_irq_map[gpio_bank(gpio)] & gpio_bit(gpio)))) {
1148 dump_stack();
1149 gpio_error(gpio);
1150 local_irq_restore_hw(flags);
1151 return;
1152 }
1153
1154 reserved_gpio_irq_map[gpio_bank(gpio)] &= ~gpio_bit(gpio);
1155
1156 set_label(gpio, "free");
1157
1158 local_irq_restore_hw(flags);
1165} 1159}
1166EXPORT_SYMBOL(gpio_free);
1167 1160
1168 1161
1169#ifdef BF548_FAMILY 1162#ifdef BF548_FAMILY
1170int gpio_direction_input(unsigned gpio) 1163int bfin_gpio_direction_input(unsigned gpio)
1171{ 1164{
1172 unsigned long flags; 1165 unsigned long flags;
1173 1166
@@ -1176,16 +1169,16 @@ int gpio_direction_input(unsigned gpio)
1176 return -EINVAL; 1169 return -EINVAL;
1177 } 1170 }
1178 1171
1179 local_irq_save(flags); 1172 local_irq_save_hw(flags);
1180 gpio_array[gpio_bank(gpio)]->port_dir_clear = gpio_bit(gpio); 1173 gpio_array[gpio_bank(gpio)]->port_dir_clear = gpio_bit(gpio);
1181 gpio_array[gpio_bank(gpio)]->port_inen |= gpio_bit(gpio); 1174 gpio_array[gpio_bank(gpio)]->port_inen |= gpio_bit(gpio);
1182 local_irq_restore(flags); 1175 local_irq_restore_hw(flags);
1183 1176
1184 return 0; 1177 return 0;
1185} 1178}
1186EXPORT_SYMBOL(gpio_direction_input); 1179EXPORT_SYMBOL(bfin_gpio_direction_input);
1187 1180
1188int gpio_direction_output(unsigned gpio, int value) 1181int bfin_gpio_direction_output(unsigned gpio, int value)
1189{ 1182{
1190 unsigned long flags; 1183 unsigned long flags;
1191 1184
@@ -1194,30 +1187,30 @@ int gpio_direction_output(unsigned gpio, int value)
1194 return -EINVAL; 1187 return -EINVAL;
1195 } 1188 }
1196 1189
1197 local_irq_save(flags); 1190 local_irq_save_hw(flags);
1198 gpio_array[gpio_bank(gpio)]->port_inen &= ~gpio_bit(gpio); 1191 gpio_array[gpio_bank(gpio)]->port_inen &= ~gpio_bit(gpio);
1199 gpio_set_value(gpio, value); 1192 gpio_set_value(gpio, value);
1200 gpio_array[gpio_bank(gpio)]->port_dir_set = gpio_bit(gpio); 1193 gpio_array[gpio_bank(gpio)]->port_dir_set = gpio_bit(gpio);
1201 local_irq_restore(flags); 1194 local_irq_restore_hw(flags);
1202 1195
1203 return 0; 1196 return 0;
1204} 1197}
1205EXPORT_SYMBOL(gpio_direction_output); 1198EXPORT_SYMBOL(bfin_gpio_direction_output);
1206 1199
1207void gpio_set_value(unsigned gpio, int arg) 1200void bfin_gpio_set_value(unsigned gpio, int arg)
1208{ 1201{
1209 if (arg) 1202 if (arg)
1210 gpio_array[gpio_bank(gpio)]->port_set = gpio_bit(gpio); 1203 gpio_array[gpio_bank(gpio)]->port_set = gpio_bit(gpio);
1211 else 1204 else
1212 gpio_array[gpio_bank(gpio)]->port_clear = gpio_bit(gpio); 1205 gpio_array[gpio_bank(gpio)]->port_clear = gpio_bit(gpio);
1213} 1206}
1214EXPORT_SYMBOL(gpio_set_value); 1207EXPORT_SYMBOL(bfin_gpio_set_value);
1215 1208
1216int gpio_get_value(unsigned gpio) 1209int bfin_gpio_get_value(unsigned gpio)
1217{ 1210{
1218 return (1 & (gpio_array[gpio_bank(gpio)]->port_data >> gpio_sub_n(gpio))); 1211 return (1 & (gpio_array[gpio_bank(gpio)]->port_data >> gpio_sub_n(gpio)));
1219} 1212}
1220EXPORT_SYMBOL(gpio_get_value); 1213EXPORT_SYMBOL(bfin_gpio_get_value);
1221 1214
1222void bfin_gpio_irq_prepare(unsigned gpio) 1215void bfin_gpio_irq_prepare(unsigned gpio)
1223{ 1216{
@@ -1225,34 +1218,34 @@ void bfin_gpio_irq_prepare(unsigned gpio)
1225 1218
1226 port_setup(gpio, GPIO_USAGE); 1219 port_setup(gpio, GPIO_USAGE);
1227 1220
1228 local_irq_save(flags); 1221 local_irq_save_hw(flags);
1229 gpio_array[gpio_bank(gpio)]->port_dir_clear = gpio_bit(gpio); 1222 gpio_array[gpio_bank(gpio)]->port_dir_clear = gpio_bit(gpio);
1230 gpio_array[gpio_bank(gpio)]->port_inen |= gpio_bit(gpio); 1223 gpio_array[gpio_bank(gpio)]->port_inen |= gpio_bit(gpio);
1231 local_irq_restore(flags); 1224 local_irq_restore_hw(flags);
1232} 1225}
1233 1226
1234#else 1227#else
1235 1228
1236int gpio_get_value(unsigned gpio) 1229int bfin_gpio_get_value(unsigned gpio)
1237{ 1230{
1238 unsigned long flags; 1231 unsigned long flags;
1239 int ret; 1232 int ret;
1240 1233
1241 if (unlikely(get_gpio_edge(gpio))) { 1234 if (unlikely(get_gpio_edge(gpio))) {
1242 local_irq_save(flags); 1235 local_irq_save_hw(flags);
1243 set_gpio_edge(gpio, 0); 1236 set_gpio_edge(gpio, 0);
1244 ret = get_gpio_data(gpio); 1237 ret = get_gpio_data(gpio);
1245 set_gpio_edge(gpio, 1); 1238 set_gpio_edge(gpio, 1);
1246 local_irq_restore(flags); 1239 local_irq_restore_hw(flags);
1247 1240
1248 return ret; 1241 return ret;
1249 } else 1242 } else
1250 return get_gpio_data(gpio); 1243 return get_gpio_data(gpio);
1251} 1244}
1252EXPORT_SYMBOL(gpio_get_value); 1245EXPORT_SYMBOL(bfin_gpio_get_value);
1253 1246
1254 1247
1255int gpio_direction_input(unsigned gpio) 1248int bfin_gpio_direction_input(unsigned gpio)
1256{ 1249{
1257 unsigned long flags; 1250 unsigned long flags;
1258 1251
@@ -1261,17 +1254,17 @@ int gpio_direction_input(unsigned gpio)
1261 return -EINVAL; 1254 return -EINVAL;
1262 } 1255 }
1263 1256
1264 local_irq_save(flags); 1257 local_irq_save_hw(flags);
1265 gpio_bankb[gpio_bank(gpio)]->dir &= ~gpio_bit(gpio); 1258 gpio_bankb[gpio_bank(gpio)]->dir &= ~gpio_bit(gpio);
1266 gpio_bankb[gpio_bank(gpio)]->inen |= gpio_bit(gpio); 1259 gpio_bankb[gpio_bank(gpio)]->inen |= gpio_bit(gpio);
1267 AWA_DUMMY_READ(inen); 1260 AWA_DUMMY_READ(inen);
1268 local_irq_restore(flags); 1261 local_irq_restore_hw(flags);
1269 1262
1270 return 0; 1263 return 0;
1271} 1264}
1272EXPORT_SYMBOL(gpio_direction_input); 1265EXPORT_SYMBOL(bfin_gpio_direction_input);
1273 1266
1274int gpio_direction_output(unsigned gpio, int value) 1267int bfin_gpio_direction_output(unsigned gpio, int value)
1275{ 1268{
1276 unsigned long flags; 1269 unsigned long flags;
1277 1270
@@ -1280,7 +1273,7 @@ int gpio_direction_output(unsigned gpio, int value)
1280 return -EINVAL; 1273 return -EINVAL;
1281 } 1274 }
1282 1275
1283 local_irq_save(flags); 1276 local_irq_save_hw(flags);
1284 gpio_bankb[gpio_bank(gpio)]->inen &= ~gpio_bit(gpio); 1277 gpio_bankb[gpio_bank(gpio)]->inen &= ~gpio_bit(gpio);
1285 1278
1286 if (value) 1279 if (value)
@@ -1290,11 +1283,11 @@ int gpio_direction_output(unsigned gpio, int value)
1290 1283
1291 gpio_bankb[gpio_bank(gpio)]->dir |= gpio_bit(gpio); 1284 gpio_bankb[gpio_bank(gpio)]->dir |= gpio_bit(gpio);
1292 AWA_DUMMY_READ(dir); 1285 AWA_DUMMY_READ(dir);
1293 local_irq_restore(flags); 1286 local_irq_restore_hw(flags);
1294 1287
1295 return 0; 1288 return 0;
1296} 1289}
1297EXPORT_SYMBOL(gpio_direction_output); 1290EXPORT_SYMBOL(bfin_gpio_direction_output);
1298 1291
1299/* If we are booting from SPI and our board lacks a strong enough pull up, 1292/* If we are booting from SPI and our board lacks a strong enough pull up,
1300 * the core can reset and execute the bootrom faster than the resistor can 1293 * the core can reset and execute the bootrom faster than the resistor can
@@ -1327,14 +1320,17 @@ void bfin_gpio_irq_prepare(unsigned gpio)
1327static int gpio_proc_read(char *buf, char **start, off_t offset, 1320static int gpio_proc_read(char *buf, char **start, off_t offset,
1328 int len, int *unused_i, void *unused_v) 1321 int len, int *unused_i, void *unused_v)
1329{ 1322{
1330 int c, outlen = 0; 1323 int c, irq, gpio, outlen = 0;
1331 1324
1332 for (c = 0; c < MAX_RESOURCES; c++) { 1325 for (c = 0; c < MAX_RESOURCES; c++) {
1333 if (!check_gpio(c) && (reserved_gpio_map[gpio_bank(c)] & gpio_bit(c))) 1326 irq = reserved_gpio_irq_map[gpio_bank(c)] & gpio_bit(c);
1334 len = sprintf(buf, "GPIO_%d: %s \t\tGPIO %s\n", c, 1327 gpio = reserved_gpio_map[gpio_bank(c)] & gpio_bit(c);
1335 get_label(c), get_gpio_dir(c) ? "OUTPUT" : "INPUT"); 1328 if (!check_gpio(c) && (gpio || irq))
1329 len = sprintf(buf, "GPIO_%d: \t%s%s \t\tGPIO %s\n", c,
1330 get_label(c), (gpio && irq) ? " *" : "",
1331 get_gpio_dir(c) ? "OUTPUT" : "INPUT");
1336 else if (reserved_peri_map[gpio_bank(c)] & gpio_bit(c)) 1332 else if (reserved_peri_map[gpio_bank(c)] & gpio_bit(c))
1337 len = sprintf(buf, "GPIO_%d: %s \t\tPeripheral\n", c, get_label(c)); 1333 len = sprintf(buf, "GPIO_%d: \t%s \t\tPeripheral\n", c, get_label(c));
1338 else 1334 else
1339 continue; 1335 continue;
1340 buf += len; 1336 buf += len;
@@ -1354,3 +1350,57 @@ static __init int gpio_register_proc(void)
1354} 1350}
1355__initcall(gpio_register_proc); 1351__initcall(gpio_register_proc);
1356#endif 1352#endif
1353
1354#ifdef CONFIG_GPIOLIB
1355int bfin_gpiolib_direction_input(struct gpio_chip *chip, unsigned gpio)
1356{
1357 return bfin_gpio_direction_input(gpio);
1358}
1359
1360int bfin_gpiolib_direction_output(struct gpio_chip *chip, unsigned gpio, int level)
1361{
1362 return bfin_gpio_direction_output(gpio, level);
1363}
1364
1365int bfin_gpiolib_get_value(struct gpio_chip *chip, unsigned gpio)
1366{
1367 return bfin_gpio_get_value(gpio);
1368}
1369
1370void bfin_gpiolib_set_value(struct gpio_chip *chip, unsigned gpio, int value)
1371{
1372#ifdef BF548_FAMILY
1373 return bfin_gpio_set_value(gpio, value);
1374#else
1375 return set_gpio_data(gpio, value);
1376#endif
1377}
1378
1379int bfin_gpiolib_gpio_request(struct gpio_chip *chip, unsigned gpio)
1380{
1381 return bfin_gpio_request(gpio, chip->label);
1382}
1383
1384void bfin_gpiolib_gpio_free(struct gpio_chip *chip, unsigned gpio)
1385{
1386 return bfin_gpio_free(gpio);
1387}
1388
1389static struct gpio_chip bfin_chip = {
1390 .label = "Blackfin-GPIOlib",
1391 .direction_input = bfin_gpiolib_direction_input,
1392 .get = bfin_gpiolib_get_value,
1393 .direction_output = bfin_gpiolib_direction_output,
1394 .set = bfin_gpiolib_set_value,
1395 .request = bfin_gpiolib_gpio_request,
1396 .free = bfin_gpiolib_gpio_free,
1397 .base = 0,
1398 .ngpio = MAX_BLACKFIN_GPIOS,
1399};
1400
1401static int __init bfin_gpiolib_setup(void)
1402{
1403 return gpiochip_add(&bfin_chip);
1404}
1405arch_initcall(bfin_gpiolib_setup);
1406#endif
diff --git a/arch/blackfin/kernel/bfin_ksyms.c b/arch/blackfin/kernel/bfin_ksyms.c
index 4367330909b2..01f917d58b59 100644
--- a/arch/blackfin/kernel/bfin_ksyms.c
+++ b/arch/blackfin/kernel/bfin_ksyms.c
@@ -1,52 +1,25 @@
1/* 1/*
2 * File: arch/blackfin/kernel/bfin_ksyms.c 2 * arch/blackfin/kernel/bfin_ksyms.c - exports for random symbols
3 * Based on: none - original work
4 * Author:
5 * 3 *
6 * Created: 4 * Copyright 2004-2008 Analog Devices Inc.
7 * Description:
8 * 5 *
9 * Modified: 6 * Licensed under the GPL-2 or later.
10 * Copyright 2004-2006 Analog Devices Inc.
11 *
12 * Bugs: Enter bugs at http://blackfin.uclinux.org/
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or
17 * (at your option) any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, see the file COPYING, or write
26 * to the Free Software Foundation, Inc.,
27 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
28 */ 7 */
29 8
30#include <linux/module.h> 9#include <linux/module.h>
31#include <linux/irq.h>
32#include <linux/uaccess.h> 10#include <linux/uaccess.h>
33 11
34#include <asm/checksum.h>
35#include <asm/cacheflush.h> 12#include <asm/cacheflush.h>
36 13
37/* platform dependent support */ 14/* Allow people to have their own Blackfin exception handler in a module */
38
39EXPORT_SYMBOL(__ioremap);
40
41EXPORT_SYMBOL(ip_fast_csum);
42
43EXPORT_SYMBOL(kernel_thread);
44
45EXPORT_SYMBOL(is_in_rom);
46EXPORT_SYMBOL(bfin_return_from_exception); 15EXPORT_SYMBOL(bfin_return_from_exception);
47 16
48/* Networking helper routines. */ 17/* All the Blackfin cache functions: mach-common/cache.S */
49EXPORT_SYMBOL(csum_partial_copy); 18EXPORT_SYMBOL(blackfin_dcache_invalidate_range);
19EXPORT_SYMBOL(blackfin_icache_dcache_flush_range);
20EXPORT_SYMBOL(blackfin_icache_flush_range);
21EXPORT_SYMBOL(blackfin_dcache_flush_range);
22EXPORT_SYMBOL(blackfin_dflush_page);
50 23
51/* The following are special because they're not called 24/* The following are special because they're not called
52 * explicitly (the C compiler generates them). Fortunately, 25 * explicitly (the C compiler generates them). Fortunately,
@@ -74,8 +47,6 @@ extern void __modsi3(void);
74extern void __muldi3(void); 47extern void __muldi3(void);
75extern void __udivsi3(void); 48extern void __udivsi3(void);
76extern void __umodsi3(void); 49extern void __umodsi3(void);
77
78/* gcc lib functions */
79EXPORT_SYMBOL(__ashldi3); 50EXPORT_SYMBOL(__ashldi3);
80EXPORT_SYMBOL(__ashrdi3); 51EXPORT_SYMBOL(__ashrdi3);
81EXPORT_SYMBOL(__umulsi3_highpart); 52EXPORT_SYMBOL(__umulsi3_highpart);
@@ -87,6 +58,7 @@ EXPORT_SYMBOL(__muldi3);
87EXPORT_SYMBOL(__udivsi3); 58EXPORT_SYMBOL(__udivsi3);
88EXPORT_SYMBOL(__umodsi3); 59EXPORT_SYMBOL(__umodsi3);
89 60
61/* Input/output symbols: lib/{in,out}s.S */
90EXPORT_SYMBOL(outsb); 62EXPORT_SYMBOL(outsb);
91EXPORT_SYMBOL(insb); 63EXPORT_SYMBOL(insb);
92EXPORT_SYMBOL(outsw); 64EXPORT_SYMBOL(outsw);
@@ -96,20 +68,39 @@ EXPORT_SYMBOL(insw_8);
96EXPORT_SYMBOL(outsl); 68EXPORT_SYMBOL(outsl);
97EXPORT_SYMBOL(insl); 69EXPORT_SYMBOL(insl);
98EXPORT_SYMBOL(insl_16); 70EXPORT_SYMBOL(insl_16);
99EXPORT_SYMBOL(irq_flags);
100EXPORT_SYMBOL(iounmap);
101EXPORT_SYMBOL(blackfin_dcache_invalidate_range);
102EXPORT_SYMBOL(blackfin_icache_dcache_flush_range);
103EXPORT_SYMBOL(blackfin_icache_flush_range);
104EXPORT_SYMBOL(blackfin_dcache_flush_range);
105EXPORT_SYMBOL(blackfin_dflush_page);
106 71
107EXPORT_SYMBOL(csum_partial); 72#ifdef CONFIG_SMP
108EXPORT_SYMBOL(__init_begin); 73EXPORT_SYMBOL(__raw_atomic_update_asm);
109EXPORT_SYMBOL(__init_end); 74EXPORT_SYMBOL(__raw_atomic_clear_asm);
110EXPORT_SYMBOL(_ebss_l1); 75EXPORT_SYMBOL(__raw_atomic_set_asm);
111EXPORT_SYMBOL(_stext_l1); 76EXPORT_SYMBOL(__raw_atomic_xor_asm);
112EXPORT_SYMBOL(_etext_l1); 77EXPORT_SYMBOL(__raw_atomic_test_asm);
113EXPORT_SYMBOL(_sdata_l1); 78EXPORT_SYMBOL(__raw_xchg_1_asm);
114EXPORT_SYMBOL(_ebss_b_l1); 79EXPORT_SYMBOL(__raw_xchg_2_asm);
115EXPORT_SYMBOL(_sdata_b_l1); 80EXPORT_SYMBOL(__raw_xchg_4_asm);
81EXPORT_SYMBOL(__raw_cmpxchg_1_asm);
82EXPORT_SYMBOL(__raw_cmpxchg_2_asm);
83EXPORT_SYMBOL(__raw_cmpxchg_4_asm);
84EXPORT_SYMBOL(__raw_spin_is_locked_asm);
85EXPORT_SYMBOL(__raw_spin_lock_asm);
86EXPORT_SYMBOL(__raw_spin_trylock_asm);
87EXPORT_SYMBOL(__raw_spin_unlock_asm);
88EXPORT_SYMBOL(__raw_read_lock_asm);
89EXPORT_SYMBOL(__raw_read_trylock_asm);
90EXPORT_SYMBOL(__raw_read_unlock_asm);
91EXPORT_SYMBOL(__raw_write_lock_asm);
92EXPORT_SYMBOL(__raw_write_trylock_asm);
93EXPORT_SYMBOL(__raw_write_unlock_asm);
94EXPORT_SYMBOL(__raw_bit_set_asm);
95EXPORT_SYMBOL(__raw_bit_clear_asm);
96EXPORT_SYMBOL(__raw_bit_toggle_asm);
97EXPORT_SYMBOL(__raw_bit_test_asm);
98EXPORT_SYMBOL(__raw_bit_test_set_asm);
99EXPORT_SYMBOL(__raw_bit_test_clear_asm);
100EXPORT_SYMBOL(__raw_bit_test_toggle_asm);
101EXPORT_SYMBOL(__raw_uncached_fetch_asm);
102#ifdef __ARCH_SYNC_CORE_DCACHE
103EXPORT_SYMBOL(__raw_smp_mark_barrier_asm);
104EXPORT_SYMBOL(__raw_smp_check_barrier_asm);
105#endif
106#endif
diff --git a/arch/blackfin/kernel/cplb-mpu/Makefile b/arch/blackfin/kernel/cplb-mpu/Makefile
index 286b69357f97..7d70d3bf3212 100644
--- a/arch/blackfin/kernel/cplb-mpu/Makefile
+++ b/arch/blackfin/kernel/cplb-mpu/Makefile
@@ -4,5 +4,7 @@
4 4
5obj-y := cplbinit.o cacheinit.o cplbmgr.o 5obj-y := cplbinit.o cacheinit.o cplbmgr.o
6 6
7obj-$(CONFIG_CPLB_INFO) += cplbinfo.o 7CFLAGS_cplbmgr.o := -ffixed-I0 -ffixed-I1 -ffixed-I2 -ffixed-I3 \
8 8 -ffixed-L0 -ffixed-L1 -ffixed-L2 -ffixed-L3 \
9 -ffixed-M0 -ffixed-M1 -ffixed-M2 -ffixed-M3 \
10 -ffixed-B0 -ffixed-B1 -ffixed-B2 -ffixed-B3
diff --git a/arch/blackfin/kernel/cplb-mpu/cacheinit.c b/arch/blackfin/kernel/cplb-mpu/cacheinit.c
index a8b712a24c59..c6ff947f9d37 100644
--- a/arch/blackfin/kernel/cplb-mpu/cacheinit.c
+++ b/arch/blackfin/kernel/cplb-mpu/cacheinit.c
@@ -25,7 +25,7 @@
25#include <asm/cplbinit.h> 25#include <asm/cplbinit.h>
26 26
27#if defined(CONFIG_BFIN_ICACHE) 27#if defined(CONFIG_BFIN_ICACHE)
28void __init bfin_icache_init(void) 28void __cpuinit bfin_icache_init(struct cplb_entry *icplb_tbl)
29{ 29{
30 unsigned long ctrl; 30 unsigned long ctrl;
31 int i; 31 int i;
@@ -43,7 +43,7 @@ void __init bfin_icache_init(void)
43#endif 43#endif
44 44
45#if defined(CONFIG_BFIN_DCACHE) 45#if defined(CONFIG_BFIN_DCACHE)
46void __init bfin_dcache_init(void) 46void __cpuinit bfin_dcache_init(struct cplb_entry *dcplb_tbl)
47{ 47{
48 unsigned long ctrl; 48 unsigned long ctrl;
49 int i; 49 int i;
diff --git a/arch/blackfin/kernel/cplb-mpu/cplbinfo.c b/arch/blackfin/kernel/cplb-mpu/cplbinfo.c
deleted file mode 100644
index 822beefa3a4b..000000000000
--- a/arch/blackfin/kernel/cplb-mpu/cplbinfo.c
+++ /dev/null
@@ -1,136 +0,0 @@
1/*
2 * File: arch/blackfin/mach-common/cplbinfo.c
3 * Based on:
4 * Author: Sonic Zhang <sonic.zhang@analog.com>
5 *
6 * Created: Jan. 2005
7 * Description: Display CPLB status
8 *
9 * Modified:
10 * Copyright 2004-2006 Analog Devices Inc.
11 *
12 * Bugs: Enter bugs at http://blackfin.uclinux.org/
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or
17 * (at your option) any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, see the file COPYING, or write
26 * to the Free Software Foundation, Inc.,
27 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
28 */
29
30#include <linux/module.h>
31#include <linux/kernel.h>
32#include <linux/init.h>
33#include <linux/proc_fs.h>
34#include <linux/uaccess.h>
35
36#include <asm/current.h>
37#include <asm/system.h>
38#include <asm/cplb.h>
39#include <asm/cplbinit.h>
40#include <asm/blackfin.h>
41
42static char page_size_string_table[][4] = { "1K", "4K", "1M", "4M" };
43
44static char *cplb_print_entry(char *buf, struct cplb_entry *tbl, int switched)
45{
46 int i;
47 buf += sprintf(buf, "Index\tAddress\t\tData\tSize\tU/RD\tU/WR\tS/WR\tSwitch\n");
48 for (i = 0; i < MAX_CPLBS; i++) {
49 unsigned long data = tbl[i].data;
50 unsigned long addr = tbl[i].addr;
51 if (!(data & CPLB_VALID))
52 continue;
53
54 buf +=
55 sprintf(buf,
56 "%d\t0x%08lx\t%06lx\t%s\t%c\t%c\t%c\t%c\n",
57 i, addr, data,
58 page_size_string_table[(data & 0x30000) >> 16],
59 (data & CPLB_USER_RD) ? 'Y' : 'N',
60 (data & CPLB_USER_WR) ? 'Y' : 'N',
61 (data & CPLB_SUPV_WR) ? 'Y' : 'N',
62 i < switched ? 'N' : 'Y');
63 }
64 buf += sprintf(buf, "\n");
65
66 return buf;
67}
68
69int cplbinfo_proc_output(char *buf)
70{
71 char *p;
72
73 p = buf;
74
75 p += sprintf(p, "------------------ CPLB Information ------------------\n\n");
76
77 if (bfin_read_IMEM_CONTROL() & ENICPLB) {
78 p += sprintf(p, "Instruction CPLB entry:\n");
79 p = cplb_print_entry(p, icplb_tbl, first_switched_icplb);
80 } else
81 p += sprintf(p, "Instruction CPLB is disabled.\n\n");
82
83 if (1 || bfin_read_DMEM_CONTROL() & ENDCPLB) {
84 p += sprintf(p, "Data CPLB entry:\n");
85 p = cplb_print_entry(p, dcplb_tbl, first_switched_dcplb);
86 } else
87 p += sprintf(p, "Data CPLB is disabled.\n");
88
89 p += sprintf(p, "ICPLB miss: %d\nICPLB supervisor miss: %d\n",
90 nr_icplb_miss, nr_icplb_supv_miss);
91 p += sprintf(p, "DCPLB miss: %d\nDCPLB protection fault:%d\n",
92 nr_dcplb_miss, nr_dcplb_prot);
93 p += sprintf(p, "CPLB flushes: %d\n",
94 nr_cplb_flush);
95
96 return p - buf;
97}
98
99static int cplbinfo_read_proc(char *page, char **start, off_t off,
100 int count, int *eof, void *data)
101{
102 int len;
103
104 len = cplbinfo_proc_output(page);
105 if (len <= off + count)
106 *eof = 1;
107 *start = page + off;
108 len -= off;
109 if (len > count)
110 len = count;
111 if (len < 0)
112 len = 0;
113 return len;
114}
115
116static int __init cplbinfo_init(void)
117{
118 struct proc_dir_entry *entry;
119
120 entry = create_proc_entry("cplbinfo", 0, NULL);
121 if (!entry)
122 return -ENOMEM;
123
124 entry->read_proc = cplbinfo_read_proc;
125 entry->data = NULL;
126
127 return 0;
128}
129
130static void __exit cplbinfo_exit(void)
131{
132 remove_proc_entry("cplbinfo", NULL);
133}
134
135module_init(cplbinfo_init);
136module_exit(cplbinfo_exit);
diff --git a/arch/blackfin/kernel/cplb-mpu/cplbinit.c b/arch/blackfin/kernel/cplb-mpu/cplbinit.c
index 55af729f8495..bdb958486e76 100644
--- a/arch/blackfin/kernel/cplb-mpu/cplbinit.c
+++ b/arch/blackfin/kernel/cplb-mpu/cplbinit.c
@@ -25,18 +25,19 @@
25#include <asm/blackfin.h> 25#include <asm/blackfin.h>
26#include <asm/cplb.h> 26#include <asm/cplb.h>
27#include <asm/cplbinit.h> 27#include <asm/cplbinit.h>
28#include <asm/mem_map.h>
28 29
29#if ANOMALY_05000263 30#if ANOMALY_05000263
30# error the MPU will not function safely while Anomaly 05000263 applies 31# error the MPU will not function safely while Anomaly 05000263 applies
31#endif 32#endif
32 33
33struct cplb_entry icplb_tbl[MAX_CPLBS]; 34struct cplb_entry icplb_tbl[NR_CPUS][MAX_CPLBS];
34struct cplb_entry dcplb_tbl[MAX_CPLBS]; 35struct cplb_entry dcplb_tbl[NR_CPUS][MAX_CPLBS];
35 36
36int first_switched_icplb, first_switched_dcplb; 37int first_switched_icplb, first_switched_dcplb;
37int first_mask_dcplb; 38int first_mask_dcplb;
38 39
39void __init generate_cplb_tables(void) 40void __init generate_cplb_tables_cpu(unsigned int cpu)
40{ 41{
41 int i_d, i_i; 42 int i_d, i_i;
42 unsigned long addr; 43 unsigned long addr;
@@ -55,15 +56,16 @@ void __init generate_cplb_tables(void)
55 d_cache |= CPLB_L1_AOW | CPLB_WT; 56 d_cache |= CPLB_L1_AOW | CPLB_WT;
56#endif 57#endif
57#endif 58#endif
59
58 i_d = i_i = 0; 60 i_d = i_i = 0;
59 61
60 /* Set up the zero page. */ 62 /* Set up the zero page. */
61 dcplb_tbl[i_d].addr = 0; 63 dcplb_tbl[cpu][i_d].addr = 0;
62 dcplb_tbl[i_d++].data = SDRAM_OOPS | PAGE_SIZE_1KB; 64 dcplb_tbl[cpu][i_d++].data = SDRAM_OOPS | PAGE_SIZE_1KB;
63 65
64#if 0 66#if 0
65 icplb_tbl[i_i].addr = 0; 67 icplb_tbl[cpu][i_i].addr = 0;
66 icplb_tbl[i_i++].data = i_cache | CPLB_USER_RD | PAGE_SIZE_4KB; 68 icplb_tbl[cpu][i_i++].data = i_cache | CPLB_USER_RD | PAGE_SIZE_4KB;
67#endif 69#endif
68 70
69 /* Cover kernel memory with 4M pages. */ 71 /* Cover kernel memory with 4M pages. */
@@ -72,28 +74,28 @@ void __init generate_cplb_tables(void)
72 i_data = i_cache | CPLB_VALID | CPLB_PORTPRIO | PAGE_SIZE_4MB; 74 i_data = i_cache | CPLB_VALID | CPLB_PORTPRIO | PAGE_SIZE_4MB;
73 75
74 for (; addr < memory_start; addr += 4 * 1024 * 1024) { 76 for (; addr < memory_start; addr += 4 * 1024 * 1024) {
75 dcplb_tbl[i_d].addr = addr; 77 dcplb_tbl[cpu][i_d].addr = addr;
76 dcplb_tbl[i_d++].data = d_data; 78 dcplb_tbl[cpu][i_d++].data = d_data;
77 icplb_tbl[i_i].addr = addr; 79 icplb_tbl[cpu][i_i].addr = addr;
78 icplb_tbl[i_i++].data = i_data | (addr == 0 ? CPLB_USER_RD : 0); 80 icplb_tbl[cpu][i_i++].data = i_data | (addr == 0 ? CPLB_USER_RD : 0);
79 } 81 }
80 82
81 /* Cover L1 memory. One 4M area for code and data each is enough. */ 83 /* Cover L1 memory. One 4M area for code and data each is enough. */
82#if L1_DATA_A_LENGTH > 0 || L1_DATA_B_LENGTH > 0 84#if L1_DATA_A_LENGTH > 0 || L1_DATA_B_LENGTH > 0
83 dcplb_tbl[i_d].addr = L1_DATA_A_START; 85 dcplb_tbl[cpu][i_d].addr = get_l1_data_a_start_cpu(cpu);
84 dcplb_tbl[i_d++].data = L1_DMEMORY | PAGE_SIZE_4MB; 86 dcplb_tbl[cpu][i_d++].data = L1_DMEMORY | PAGE_SIZE_4MB;
85#endif 87#endif
86#if L1_CODE_LENGTH > 0 88#if L1_CODE_LENGTH > 0
87 icplb_tbl[i_i].addr = L1_CODE_START; 89 icplb_tbl[cpu][i_i].addr = get_l1_code_start_cpu(cpu);
88 icplb_tbl[i_i++].data = L1_IMEMORY | PAGE_SIZE_4MB; 90 icplb_tbl[cpu][i_i++].data = L1_IMEMORY | PAGE_SIZE_4MB;
89#endif 91#endif
90 92
91 /* Cover L2 memory */ 93 /* Cover L2 memory */
92#if L2_LENGTH > 0 94#if L2_LENGTH > 0
93 dcplb_tbl[i_d].addr = L2_START; 95 dcplb_tbl[cpu][i_d].addr = L2_START;
94 dcplb_tbl[i_d++].data = L2_DMEMORY | PAGE_SIZE_1MB; 96 dcplb_tbl[cpu][i_d++].data = L2_DMEMORY | PAGE_SIZE_1MB;
95 icplb_tbl[i_i].addr = L2_START; 97 icplb_tbl[cpu][i_i].addr = L2_START;
96 icplb_tbl[i_i++].data = L2_IMEMORY | PAGE_SIZE_1MB; 98 icplb_tbl[cpu][i_i++].data = L2_IMEMORY | PAGE_SIZE_1MB;
97#endif 99#endif
98 100
99 first_mask_dcplb = i_d; 101 first_mask_dcplb = i_d;
@@ -101,7 +103,11 @@ void __init generate_cplb_tables(void)
101 first_switched_icplb = i_i; 103 first_switched_icplb = i_i;
102 104
103 while (i_d < MAX_CPLBS) 105 while (i_d < MAX_CPLBS)
104 dcplb_tbl[i_d++].data = 0; 106 dcplb_tbl[cpu][i_d++].data = 0;
105 while (i_i < MAX_CPLBS) 107 while (i_i < MAX_CPLBS)
106 icplb_tbl[i_i++].data = 0; 108 icplb_tbl[cpu][i_i++].data = 0;
109}
110
111void generate_cplb_tables_all(void)
112{
107} 113}
diff --git a/arch/blackfin/kernel/cplb-mpu/cplbmgr.c b/arch/blackfin/kernel/cplb-mpu/cplbmgr.c
index baa52e261f0d..87463ce87f5a 100644
--- a/arch/blackfin/kernel/cplb-mpu/cplbmgr.c
+++ b/arch/blackfin/kernel/cplb-mpu/cplbmgr.c
@@ -25,15 +25,21 @@
25#include <asm/cplbinit.h> 25#include <asm/cplbinit.h>
26#include <asm/mmu_context.h> 26#include <asm/mmu_context.h>
27 27
28#define FAULT_RW (1 << 16) 28/*
29#define FAULT_USERSUPV (1 << 17) 29 * WARNING
30 *
31 * This file is compiled with certain -ffixed-reg options. We have to
32 * make sure not to call any functions here that could clobber these
33 * registers.
34 */
30 35
31int page_mask_nelts; 36int page_mask_nelts;
32int page_mask_order; 37int page_mask_order;
33unsigned long *current_rwx_mask; 38unsigned long *current_rwx_mask[NR_CPUS];
34 39
35int nr_dcplb_miss, nr_icplb_miss, nr_icplb_supv_miss, nr_dcplb_prot; 40int nr_dcplb_miss[NR_CPUS], nr_icplb_miss[NR_CPUS];
36int nr_cplb_flush; 41int nr_icplb_supv_miss[NR_CPUS], nr_dcplb_prot[NR_CPUS];
42int nr_cplb_flush[NR_CPUS];
37 43
38static inline void disable_dcplb(void) 44static inline void disable_dcplb(void)
39{ 45{
@@ -98,42 +104,42 @@ static inline int write_permitted(int status, unsigned long data)
98} 104}
99 105
100/* Counters to implement round-robin replacement. */ 106/* Counters to implement round-robin replacement. */
101static int icplb_rr_index, dcplb_rr_index; 107static int icplb_rr_index[NR_CPUS], dcplb_rr_index[NR_CPUS];
102 108
103/* 109/*
104 * Find an ICPLB entry to be evicted and return its index. 110 * Find an ICPLB entry to be evicted and return its index.
105 */ 111 */
106static int evict_one_icplb(void) 112static int evict_one_icplb(unsigned int cpu)
107{ 113{
108 int i; 114 int i;
109 for (i = first_switched_icplb; i < MAX_CPLBS; i++) 115 for (i = first_switched_icplb; i < MAX_CPLBS; i++)
110 if ((icplb_tbl[i].data & CPLB_VALID) == 0) 116 if ((icplb_tbl[cpu][i].data & CPLB_VALID) == 0)
111 return i; 117 return i;
112 i = first_switched_icplb + icplb_rr_index; 118 i = first_switched_icplb + icplb_rr_index[cpu];
113 if (i >= MAX_CPLBS) { 119 if (i >= MAX_CPLBS) {
114 i -= MAX_CPLBS - first_switched_icplb; 120 i -= MAX_CPLBS - first_switched_icplb;
115 icplb_rr_index -= MAX_CPLBS - first_switched_icplb; 121 icplb_rr_index[cpu] -= MAX_CPLBS - first_switched_icplb;
116 } 122 }
117 icplb_rr_index++; 123 icplb_rr_index[cpu]++;
118 return i; 124 return i;
119} 125}
120 126
121static int evict_one_dcplb(void) 127static int evict_one_dcplb(unsigned int cpu)
122{ 128{
123 int i; 129 int i;
124 for (i = first_switched_dcplb; i < MAX_CPLBS; i++) 130 for (i = first_switched_dcplb; i < MAX_CPLBS; i++)
125 if ((dcplb_tbl[i].data & CPLB_VALID) == 0) 131 if ((dcplb_tbl[cpu][i].data & CPLB_VALID) == 0)
126 return i; 132 return i;
127 i = first_switched_dcplb + dcplb_rr_index; 133 i = first_switched_dcplb + dcplb_rr_index[cpu];
128 if (i >= MAX_CPLBS) { 134 if (i >= MAX_CPLBS) {
129 i -= MAX_CPLBS - first_switched_dcplb; 135 i -= MAX_CPLBS - first_switched_dcplb;
130 dcplb_rr_index -= MAX_CPLBS - first_switched_dcplb; 136 dcplb_rr_index[cpu] -= MAX_CPLBS - first_switched_dcplb;
131 } 137 }
132 dcplb_rr_index++; 138 dcplb_rr_index[cpu]++;
133 return i; 139 return i;
134} 140}
135 141
136static noinline int dcplb_miss(void) 142static noinline int dcplb_miss(unsigned int cpu)
137{ 143{
138 unsigned long addr = bfin_read_DCPLB_FAULT_ADDR(); 144 unsigned long addr = bfin_read_DCPLB_FAULT_ADDR();
139 int status = bfin_read_DCPLB_STATUS(); 145 int status = bfin_read_DCPLB_STATUS();
@@ -141,7 +147,7 @@ static noinline int dcplb_miss(void)
141 int idx; 147 int idx;
142 unsigned long d_data; 148 unsigned long d_data;
143 149
144 nr_dcplb_miss++; 150 nr_dcplb_miss[cpu]++;
145 151
146 d_data = CPLB_SUPV_WR | CPLB_VALID | CPLB_DIRTY | PAGE_SIZE_4KB; 152 d_data = CPLB_SUPV_WR | CPLB_VALID | CPLB_DIRTY | PAGE_SIZE_4KB;
147#ifdef CONFIG_BFIN_DCACHE 153#ifdef CONFIG_BFIN_DCACHE
@@ -168,25 +174,25 @@ static noinline int dcplb_miss(void)
168 } else if (addr >= _ramend) { 174 } else if (addr >= _ramend) {
169 d_data |= CPLB_USER_RD | CPLB_USER_WR; 175 d_data |= CPLB_USER_RD | CPLB_USER_WR;
170 } else { 176 } else {
171 mask = current_rwx_mask; 177 mask = current_rwx_mask[cpu];
172 if (mask) { 178 if (mask) {
173 int page = addr >> PAGE_SHIFT; 179 int page = addr >> PAGE_SHIFT;
174 int offs = page >> 5; 180 int idx = page >> 5;
175 int bit = 1 << (page & 31); 181 int bit = 1 << (page & 31);
176 182
177 if (mask[offs] & bit) 183 if (mask[idx] & bit)
178 d_data |= CPLB_USER_RD; 184 d_data |= CPLB_USER_RD;
179 185
180 mask += page_mask_nelts; 186 mask += page_mask_nelts;
181 if (mask[offs] & bit) 187 if (mask[idx] & bit)
182 d_data |= CPLB_USER_WR; 188 d_data |= CPLB_USER_WR;
183 } 189 }
184 } 190 }
185 idx = evict_one_dcplb(); 191 idx = evict_one_dcplb(cpu);
186 192
187 addr &= PAGE_MASK; 193 addr &= PAGE_MASK;
188 dcplb_tbl[idx].addr = addr; 194 dcplb_tbl[cpu][idx].addr = addr;
189 dcplb_tbl[idx].data = d_data; 195 dcplb_tbl[cpu][idx].data = d_data;
190 196
191 disable_dcplb(); 197 disable_dcplb();
192 bfin_write32(DCPLB_DATA0 + idx * 4, d_data); 198 bfin_write32(DCPLB_DATA0 + idx * 4, d_data);
@@ -196,21 +202,21 @@ static noinline int dcplb_miss(void)
196 return 0; 202 return 0;
197} 203}
198 204
199static noinline int icplb_miss(void) 205static noinline int icplb_miss(unsigned int cpu)
200{ 206{
201 unsigned long addr = bfin_read_ICPLB_FAULT_ADDR(); 207 unsigned long addr = bfin_read_ICPLB_FAULT_ADDR();
202 int status = bfin_read_ICPLB_STATUS(); 208 int status = bfin_read_ICPLB_STATUS();
203 int idx; 209 int idx;
204 unsigned long i_data; 210 unsigned long i_data;
205 211
206 nr_icplb_miss++; 212 nr_icplb_miss[cpu]++;
207 213
208 /* If inside the uncached DMA region, fault. */ 214 /* If inside the uncached DMA region, fault. */
209 if (addr >= _ramend - DMA_UNCACHED_REGION && addr < _ramend) 215 if (addr >= _ramend - DMA_UNCACHED_REGION && addr < _ramend)
210 return CPLB_PROT_VIOL; 216 return CPLB_PROT_VIOL;
211 217
212 if (status & FAULT_USERSUPV) 218 if (status & FAULT_USERSUPV)
213 nr_icplb_supv_miss++; 219 nr_icplb_supv_miss[cpu]++;
214 220
215 /* 221 /*
216 * First, try to find a CPLB that matches this address. If we 222 * First, try to find a CPLB that matches this address. If we
@@ -218,8 +224,8 @@ static noinline int icplb_miss(void)
218 * that the instruction crosses a page boundary. 224 * that the instruction crosses a page boundary.
219 */ 225 */
220 for (idx = first_switched_icplb; idx < MAX_CPLBS; idx++) { 226 for (idx = first_switched_icplb; idx < MAX_CPLBS; idx++) {
221 if (icplb_tbl[idx].data & CPLB_VALID) { 227 if (icplb_tbl[cpu][idx].data & CPLB_VALID) {
222 unsigned long this_addr = icplb_tbl[idx].addr; 228 unsigned long this_addr = icplb_tbl[cpu][idx].addr;
223 if (this_addr <= addr && this_addr + PAGE_SIZE > addr) { 229 if (this_addr <= addr && this_addr + PAGE_SIZE > addr) {
224 addr += PAGE_SIZE; 230 addr += PAGE_SIZE;
225 break; 231 break;
@@ -257,23 +263,23 @@ static noinline int icplb_miss(void)
257 * Otherwise, check the x bitmap of the current process. 263 * Otherwise, check the x bitmap of the current process.
258 */ 264 */
259 if (!(status & FAULT_USERSUPV)) { 265 if (!(status & FAULT_USERSUPV)) {
260 unsigned long *mask = current_rwx_mask; 266 unsigned long *mask = current_rwx_mask[cpu];
261 267
262 if (mask) { 268 if (mask) {
263 int page = addr >> PAGE_SHIFT; 269 int page = addr >> PAGE_SHIFT;
264 int offs = page >> 5; 270 int idx = page >> 5;
265 int bit = 1 << (page & 31); 271 int bit = 1 << (page & 31);
266 272
267 mask += 2 * page_mask_nelts; 273 mask += 2 * page_mask_nelts;
268 if (mask[offs] & bit) 274 if (mask[idx] & bit)
269 i_data |= CPLB_USER_RD; 275 i_data |= CPLB_USER_RD;
270 } 276 }
271 } 277 }
272 } 278 }
273 idx = evict_one_icplb(); 279 idx = evict_one_icplb(cpu);
274 addr &= PAGE_MASK; 280 addr &= PAGE_MASK;
275 icplb_tbl[idx].addr = addr; 281 icplb_tbl[cpu][idx].addr = addr;
276 icplb_tbl[idx].data = i_data; 282 icplb_tbl[cpu][idx].data = i_data;
277 283
278 disable_icplb(); 284 disable_icplb();
279 bfin_write32(ICPLB_DATA0 + idx * 4, i_data); 285 bfin_write32(ICPLB_DATA0 + idx * 4, i_data);
@@ -283,19 +289,19 @@ static noinline int icplb_miss(void)
283 return 0; 289 return 0;
284} 290}
285 291
286static noinline int dcplb_protection_fault(void) 292static noinline int dcplb_protection_fault(unsigned int cpu)
287{ 293{
288 int status = bfin_read_DCPLB_STATUS(); 294 int status = bfin_read_DCPLB_STATUS();
289 295
290 nr_dcplb_prot++; 296 nr_dcplb_prot[cpu]++;
291 297
292 if (status & FAULT_RW) { 298 if (status & FAULT_RW) {
293 int idx = faulting_cplb_index(status); 299 int idx = faulting_cplb_index(status);
294 unsigned long data = dcplb_tbl[idx].data; 300 unsigned long data = dcplb_tbl[cpu][idx].data;
295 if (!(data & CPLB_WT) && !(data & CPLB_DIRTY) && 301 if (!(data & CPLB_WT) && !(data & CPLB_DIRTY) &&
296 write_permitted(status, data)) { 302 write_permitted(status, data)) {
297 data |= CPLB_DIRTY; 303 data |= CPLB_DIRTY;
298 dcplb_tbl[idx].data = data; 304 dcplb_tbl[cpu][idx].data = data;
299 bfin_write32(DCPLB_DATA0 + idx * 4, data); 305 bfin_write32(DCPLB_DATA0 + idx * 4, data);
300 return 0; 306 return 0;
301 } 307 }
@@ -306,44 +312,45 @@ static noinline int dcplb_protection_fault(void)
306int cplb_hdr(int seqstat, struct pt_regs *regs) 312int cplb_hdr(int seqstat, struct pt_regs *regs)
307{ 313{
308 int cause = seqstat & 0x3f; 314 int cause = seqstat & 0x3f;
315 unsigned int cpu = smp_processor_id();
309 switch (cause) { 316 switch (cause) {
310 case 0x23: 317 case 0x23:
311 return dcplb_protection_fault(); 318 return dcplb_protection_fault(cpu);
312 case 0x2C: 319 case 0x2C:
313 return icplb_miss(); 320 return icplb_miss(cpu);
314 case 0x26: 321 case 0x26:
315 return dcplb_miss(); 322 return dcplb_miss(cpu);
316 default: 323 default:
317 return 1; 324 return 1;
318 } 325 }
319} 326}
320 327
321void flush_switched_cplbs(void) 328void flush_switched_cplbs(unsigned int cpu)
322{ 329{
323 int i; 330 int i;
324 unsigned long flags; 331 unsigned long flags;
325 332
326 nr_cplb_flush++; 333 nr_cplb_flush[cpu]++;
327 334
328 local_irq_save(flags); 335 local_irq_save_hw(flags);
329 disable_icplb(); 336 disable_icplb();
330 for (i = first_switched_icplb; i < MAX_CPLBS; i++) { 337 for (i = first_switched_icplb; i < MAX_CPLBS; i++) {
331 icplb_tbl[i].data = 0; 338 icplb_tbl[cpu][i].data = 0;
332 bfin_write32(ICPLB_DATA0 + i * 4, 0); 339 bfin_write32(ICPLB_DATA0 + i * 4, 0);
333 } 340 }
334 enable_icplb(); 341 enable_icplb();
335 342
336 disable_dcplb(); 343 disable_dcplb();
337 for (i = first_switched_dcplb; i < MAX_CPLBS; i++) { 344 for (i = first_switched_dcplb; i < MAX_CPLBS; i++) {
338 dcplb_tbl[i].data = 0; 345 dcplb_tbl[cpu][i].data = 0;
339 bfin_write32(DCPLB_DATA0 + i * 4, 0); 346 bfin_write32(DCPLB_DATA0 + i * 4, 0);
340 } 347 }
341 enable_dcplb(); 348 enable_dcplb();
342 local_irq_restore(flags); 349 local_irq_restore_hw(flags);
343 350
344} 351}
345 352
346void set_mask_dcplbs(unsigned long *masks) 353void set_mask_dcplbs(unsigned long *masks, unsigned int cpu)
347{ 354{
348 int i; 355 int i;
349 unsigned long addr = (unsigned long)masks; 356 unsigned long addr = (unsigned long)masks;
@@ -351,12 +358,12 @@ void set_mask_dcplbs(unsigned long *masks)
351 unsigned long flags; 358 unsigned long flags;
352 359
353 if (!masks) { 360 if (!masks) {
354 current_rwx_mask = masks; 361 current_rwx_mask[cpu] = masks;
355 return; 362 return;
356 } 363 }
357 364
358 local_irq_save(flags); 365 local_irq_save_hw(flags);
359 current_rwx_mask = masks; 366 current_rwx_mask[cpu] = masks;
360 367
361 d_data = CPLB_SUPV_WR | CPLB_VALID | CPLB_DIRTY | PAGE_SIZE_4KB; 368 d_data = CPLB_SUPV_WR | CPLB_VALID | CPLB_DIRTY | PAGE_SIZE_4KB;
362#ifdef CONFIG_BFIN_DCACHE 369#ifdef CONFIG_BFIN_DCACHE
@@ -368,12 +375,12 @@ void set_mask_dcplbs(unsigned long *masks)
368 375
369 disable_dcplb(); 376 disable_dcplb();
370 for (i = first_mask_dcplb; i < first_switched_dcplb; i++) { 377 for (i = first_mask_dcplb; i < first_switched_dcplb; i++) {
371 dcplb_tbl[i].addr = addr; 378 dcplb_tbl[cpu][i].addr = addr;
372 dcplb_tbl[i].data = d_data; 379 dcplb_tbl[cpu][i].data = d_data;
373 bfin_write32(DCPLB_DATA0 + i * 4, d_data); 380 bfin_write32(DCPLB_DATA0 + i * 4, d_data);
374 bfin_write32(DCPLB_ADDR0 + i * 4, addr); 381 bfin_write32(DCPLB_ADDR0 + i * 4, addr);
375 addr += PAGE_SIZE; 382 addr += PAGE_SIZE;
376 } 383 }
377 enable_dcplb(); 384 enable_dcplb();
378 local_irq_restore(flags); 385 local_irq_restore_hw(flags);
379} 386}
diff --git a/arch/blackfin/kernel/cplb-nompu/Makefile b/arch/blackfin/kernel/cplb-nompu/Makefile
index d36ea9b5382e..7d70d3bf3212 100644
--- a/arch/blackfin/kernel/cplb-nompu/Makefile
+++ b/arch/blackfin/kernel/cplb-nompu/Makefile
@@ -2,7 +2,9 @@
2# arch/blackfin/kernel/cplb-nompu/Makefile 2# arch/blackfin/kernel/cplb-nompu/Makefile
3# 3#
4 4
5obj-y := cplbinit.o cacheinit.o cplbhdlr.o cplbmgr.o 5obj-y := cplbinit.o cacheinit.o cplbmgr.o
6
7obj-$(CONFIG_CPLB_INFO) += cplbinfo.o
8 6
7CFLAGS_cplbmgr.o := -ffixed-I0 -ffixed-I1 -ffixed-I2 -ffixed-I3 \
8 -ffixed-L0 -ffixed-L1 -ffixed-L2 -ffixed-L3 \
9 -ffixed-M0 -ffixed-M1 -ffixed-M2 -ffixed-M3 \
10 -ffixed-B0 -ffixed-B1 -ffixed-B2 -ffixed-B3
diff --git a/arch/blackfin/kernel/cplb-nompu/cacheinit.c b/arch/blackfin/kernel/cplb-nompu/cacheinit.c
index bd0831592c2c..c6ff947f9d37 100644
--- a/arch/blackfin/kernel/cplb-nompu/cacheinit.c
+++ b/arch/blackfin/kernel/cplb-nompu/cacheinit.c
@@ -25,19 +25,15 @@
25#include <asm/cplbinit.h> 25#include <asm/cplbinit.h>
26 26
27#if defined(CONFIG_BFIN_ICACHE) 27#if defined(CONFIG_BFIN_ICACHE)
28void __init bfin_icache_init(void) 28void __cpuinit bfin_icache_init(struct cplb_entry *icplb_tbl)
29{ 29{
30 unsigned long *table = icplb_table;
31 unsigned long ctrl; 30 unsigned long ctrl;
32 int i; 31 int i;
33 32
33 SSYNC();
34 for (i = 0; i < MAX_CPLBS; i++) { 34 for (i = 0; i < MAX_CPLBS; i++) {
35 unsigned long addr = *table++; 35 bfin_write32(ICPLB_ADDR0 + i * 4, icplb_tbl[i].addr);
36 unsigned long data = *table++; 36 bfin_write32(ICPLB_DATA0 + i * 4, icplb_tbl[i].data);
37 if (addr == (unsigned long)-1)
38 break;
39 bfin_write32(ICPLB_ADDR0 + i * 4, addr);
40 bfin_write32(ICPLB_DATA0 + i * 4, data);
41 } 37 }
42 ctrl = bfin_read_IMEM_CONTROL(); 38 ctrl = bfin_read_IMEM_CONTROL();
43 ctrl |= IMC | ENICPLB; 39 ctrl |= IMC | ENICPLB;
@@ -47,20 +43,17 @@ void __init bfin_icache_init(void)
47#endif 43#endif
48 44
49#if defined(CONFIG_BFIN_DCACHE) 45#if defined(CONFIG_BFIN_DCACHE)
50void __init bfin_dcache_init(void) 46void __cpuinit bfin_dcache_init(struct cplb_entry *dcplb_tbl)
51{ 47{
52 unsigned long *table = dcplb_table;
53 unsigned long ctrl; 48 unsigned long ctrl;
54 int i; 49 int i;
55 50
51 SSYNC();
56 for (i = 0; i < MAX_CPLBS; i++) { 52 for (i = 0; i < MAX_CPLBS; i++) {
57 unsigned long addr = *table++; 53 bfin_write32(DCPLB_ADDR0 + i * 4, dcplb_tbl[i].addr);
58 unsigned long data = *table++; 54 bfin_write32(DCPLB_DATA0 + i * 4, dcplb_tbl[i].data);
59 if (addr == (unsigned long)-1)
60 break;
61 bfin_write32(DCPLB_ADDR0 + i * 4, addr);
62 bfin_write32(DCPLB_DATA0 + i * 4, data);
63 } 55 }
56
64 ctrl = bfin_read_DMEM_CONTROL(); 57 ctrl = bfin_read_DMEM_CONTROL();
65 ctrl |= DMEM_CNTR; 58 ctrl |= DMEM_CNTR;
66 bfin_write_DMEM_CONTROL(ctrl); 59 bfin_write_DMEM_CONTROL(ctrl);
diff --git a/arch/blackfin/kernel/cplb-nompu/cplbhdlr.S b/arch/blackfin/kernel/cplb-nompu/cplbhdlr.S
deleted file mode 100644
index ecbabc0a1fed..000000000000
--- a/arch/blackfin/kernel/cplb-nompu/cplbhdlr.S
+++ /dev/null
@@ -1,130 +0,0 @@
1/*
2 * File: arch/blackfin/mach-common/cplbhdlr.S
3 * Based on:
4 * Author: LG Soft India
5 *
6 * Created: ?
7 * Description: CPLB exception handler
8 *
9 * Modified:
10 * Copyright 2004-2006 Analog Devices Inc.
11 *
12 * Bugs: Enter bugs at http://blackfin.uclinux.org/
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or
17 * (at your option) any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, see the file COPYING, or write
26 * to the Free Software Foundation, Inc.,
27 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
28 */
29
30#include <linux/linkage.h>
31#include <asm/cplb.h>
32#include <asm/entry.h>
33
34#ifdef CONFIG_EXCPT_IRQ_SYSC_L1
35.section .l1.text
36#else
37.text
38#endif
39
40.type _cplb_mgr, STT_FUNC;
41.type _panic_cplb_error, STT_FUNC;
42
43.align 2
44
45ENTRY(__cplb_hdr)
46 R2 = SEQSTAT;
47
48 /* Mask the contents of SEQSTAT and leave only EXCAUSE in R2 */
49 R2 <<= 26;
50 R2 >>= 26;
51
52 R1 = 0x23; /* Data access CPLB protection violation */
53 CC = R2 == R1;
54 IF !CC JUMP .Lnot_data_write;
55 R0 = 2; /* is a write to data space*/
56 JUMP .Lis_icplb_miss;
57
58.Lnot_data_write:
59 R1 = 0x2C; /* CPLB miss on an instruction fetch */
60 CC = R2 == R1;
61 R0 = 0; /* is_data_miss == False*/
62 IF CC JUMP .Lis_icplb_miss;
63
64 R1 = 0x26;
65 CC = R2 == R1;
66 IF !CC JUMP .Lunknown;
67
68 R0 = 1; /* is_data_miss == True*/
69
70.Lis_icplb_miss:
71
72#if defined(CONFIG_BFIN_ICACHE) || defined(CONFIG_BFIN_DCACHE)
73# if defined(CONFIG_BFIN_ICACHE) && !defined(CONFIG_BFIN_DCACHE)
74 R1 = CPLB_ENABLE_ICACHE;
75# endif
76# if !defined(CONFIG_BFIN_ICACHE) && defined(CONFIG_BFIN_DCACHE)
77 R1 = CPLB_ENABLE_DCACHE;
78# endif
79# if defined(CONFIG_BFIN_ICACHE) && defined(CONFIG_BFIN_DCACHE)
80 R1 = CPLB_ENABLE_DCACHE | CPLB_ENABLE_ICACHE;
81# endif
82#else
83 R1 = 0;
84#endif
85
86 [--SP] = RETS;
87 CALL _cplb_mgr;
88 RETS = [SP++];
89 CC = R0 == 0;
90 IF !CC JUMP .Lnot_replaced;
91 RTS;
92
93/*
94 * Diagnostic exception handlers
95 */
96.Lunknown:
97 R0 = CPLB_UNKNOWN_ERR;
98 JUMP .Lcplb_error;
99
100.Lnot_replaced:
101 CC = R0 == CPLB_NO_UNLOCKED;
102 IF !CC JUMP .Lnext_check;
103 R0 = CPLB_NO_UNLOCKED;
104 JUMP .Lcplb_error;
105
106.Lnext_check:
107 CC = R0 == CPLB_NO_ADDR_MATCH;
108 IF !CC JUMP .Lnext_check2;
109 R0 = CPLB_NO_ADDR_MATCH;
110 JUMP .Lcplb_error;
111
112.Lnext_check2:
113 CC = R0 == CPLB_PROT_VIOL;
114 IF !CC JUMP .Lstrange_return_from_cplb_mgr;
115 R0 = CPLB_PROT_VIOL;
116 JUMP .Lcplb_error;
117
118.Lstrange_return_from_cplb_mgr:
119 IDLE;
120 CSYNC;
121 JUMP .Lstrange_return_from_cplb_mgr;
122
123.Lcplb_error:
124 R1 = sp;
125 SP += -12;
126 call _panic_cplb_error;
127 SP += 12;
128 JUMP.L _handle_bad_cplb;
129
130ENDPROC(__cplb_hdr)
diff --git a/arch/blackfin/kernel/cplb-nompu/cplbinfo.c b/arch/blackfin/kernel/cplb-nompu/cplbinfo.c
deleted file mode 100644
index 1e74f0b97996..000000000000
--- a/arch/blackfin/kernel/cplb-nompu/cplbinfo.c
+++ /dev/null
@@ -1,195 +0,0 @@
1/*
2 * File: arch/blackfin/mach-common/cplbinfo.c
3 * Based on:
4 * Author: Sonic Zhang <sonic.zhang@analog.com>
5 *
6 * Created: Jan. 2005
7 * Description: Display CPLB status
8 *
9 * Modified:
10 * Copyright 2004-2006 Analog Devices Inc.
11 *
12 * Bugs: Enter bugs at http://blackfin.uclinux.org/
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or
17 * (at your option) any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, see the file COPYING, or write
26 * to the Free Software Foundation, Inc.,
27 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
28 */
29
30#include <linux/module.h>
31#include <linux/kernel.h>
32#include <linux/init.h>
33#include <linux/proc_fs.h>
34#include <linux/uaccess.h>
35
36#include <asm/cplbinit.h>
37#include <asm/blackfin.h>
38
39#define CPLB_I 1
40#define CPLB_D 2
41
42#define SYNC_SYS SSYNC()
43#define SYNC_CORE CSYNC()
44
45#define CPLB_BIT_PAGESIZE 0x30000
46
47static int page_size_table[4] = {
48 0x00000400, /* 1K */
49 0x00001000, /* 4K */
50 0x00100000, /* 1M */
51 0x00400000 /* 4M */
52};
53
54static char page_size_string_table[][4] = { "1K", "4K", "1M", "4M" };
55
56static int cplb_find_entry(unsigned long *cplb_addr,
57 unsigned long *cplb_data, unsigned long addr,
58 unsigned long data)
59{
60 int ii;
61
62 for (ii = 0; ii < 16; ii++)
63 if (addr >= cplb_addr[ii] && addr < cplb_addr[ii] +
64 page_size_table[(cplb_data[ii] & CPLB_BIT_PAGESIZE) >> 16]
65 && (cplb_data[ii] == data))
66 return ii;
67
68 return -1;
69}
70
71static char *cplb_print_entry(char *buf, int type)
72{
73 unsigned long *p_addr = dpdt_table;
74 unsigned long *p_data = dpdt_table + 1;
75 unsigned long *p_icount = dpdt_swapcount_table;
76 unsigned long *p_ocount = dpdt_swapcount_table + 1;
77 unsigned long *cplb_addr = (unsigned long *)DCPLB_ADDR0;
78 unsigned long *cplb_data = (unsigned long *)DCPLB_DATA0;
79 int entry = 0, used_cplb = 0;
80
81 if (type == CPLB_I) {
82 buf += sprintf(buf, "Instruction CPLB entry:\n");
83 p_addr = ipdt_table;
84 p_data = ipdt_table + 1;
85 p_icount = ipdt_swapcount_table;
86 p_ocount = ipdt_swapcount_table + 1;
87 cplb_addr = (unsigned long *)ICPLB_ADDR0;
88 cplb_data = (unsigned long *)ICPLB_DATA0;
89 } else
90 buf += sprintf(buf, "Data CPLB entry:\n");
91
92 buf += sprintf(buf, "Address\t\tData\tSize\tValid\tLocked\tSwapin\tiCount\toCount\n");
93
94 while (*p_addr != 0xffffffff) {
95 entry = cplb_find_entry(cplb_addr, cplb_data, *p_addr, *p_data);
96 if (entry >= 0)
97 used_cplb |= 1 << entry;
98
99 buf +=
100 sprintf(buf,
101 "0x%08lx\t0x%05lx\t%s\t%c\t%c\t%2d\t%ld\t%ld\n",
102 *p_addr, *p_data,
103 page_size_string_table[(*p_data & 0x30000) >> 16],
104 (*p_data & CPLB_VALID) ? 'Y' : 'N',
105 (*p_data & CPLB_LOCK) ? 'Y' : 'N', entry, *p_icount,
106 *p_ocount);
107
108 p_addr += 2;
109 p_data += 2;
110 p_icount += 2;
111 p_ocount += 2;
112 }
113
114 if (used_cplb != 0xffff) {
115 buf += sprintf(buf, "Unused/mismatched CPLBs:\n");
116
117 for (entry = 0; entry < 16; entry++)
118 if (0 == ((1 << entry) & used_cplb)) {
119 int flags = cplb_data[entry];
120 buf +=
121 sprintf(buf,
122 "%2d: 0x%08lx\t0x%05x\t%s\t%c\t%c\n",
123 entry, cplb_addr[entry], flags,
124 page_size_string_table[(flags &
125 0x30000) >>
126 16],
127 (flags & CPLB_VALID) ? 'Y' : 'N',
128 (flags & CPLB_LOCK) ? 'Y' : 'N');
129 }
130 }
131
132 buf += sprintf(buf, "\n");
133
134 return buf;
135}
136
137static int cplbinfo_proc_output(char *buf)
138{
139 char *p;
140
141 p = buf;
142
143 p += sprintf(p, "------------------ CPLB Information ------------------\n\n");
144
145 if (bfin_read_IMEM_CONTROL() & ENICPLB)
146 p = cplb_print_entry(p, CPLB_I);
147 else
148 p += sprintf(p, "Instruction CPLB is disabled.\n\n");
149
150 if (bfin_read_DMEM_CONTROL() & ENDCPLB)
151 p = cplb_print_entry(p, CPLB_D);
152 else
153 p += sprintf(p, "Data CPLB is disabled.\n");
154
155 return p - buf;
156}
157
158static int cplbinfo_read_proc(char *page, char **start, off_t off,
159 int count, int *eof, void *data)
160{
161 int len;
162
163 len = cplbinfo_proc_output(page);
164 if (len <= off + count)
165 *eof = 1;
166 *start = page + off;
167 len -= off;
168 if (len > count)
169 len = count;
170 if (len < 0)
171 len = 0;
172 return len;
173}
174
175static int __init cplbinfo_init(void)
176{
177 struct proc_dir_entry *entry;
178
179 entry = create_proc_entry("cplbinfo", 0, NULL);
180 if (!entry)
181 return -ENOMEM;
182
183 entry->read_proc = cplbinfo_read_proc;
184 entry->data = NULL;
185
186 return 0;
187}
188
189static void __exit cplbinfo_exit(void)
190{
191 remove_proc_entry("cplbinfo", NULL);
192}
193
194module_init(cplbinfo_init);
195module_exit(cplbinfo_exit);
diff --git a/arch/blackfin/kernel/cplb-nompu/cplbinit.c b/arch/blackfin/kernel/cplb-nompu/cplbinit.c
index 2debc900e246..0e28f7595733 100644
--- a/arch/blackfin/kernel/cplb-nompu/cplbinit.c
+++ b/arch/blackfin/kernel/cplb-nompu/cplbinit.c
@@ -20,445 +20,152 @@
20 * to the Free Software Foundation, Inc., 20 * to the Free Software Foundation, Inc.,
21 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 21 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 */ 22 */
23
23#include <linux/module.h> 24#include <linux/module.h>
24 25
25#include <asm/blackfin.h> 26#include <asm/blackfin.h>
26#include <asm/cacheflush.h> 27#include <asm/cacheflush.h>
27#include <asm/cplb.h> 28#include <asm/cplb.h>
28#include <asm/cplbinit.h> 29#include <asm/cplbinit.h>
30#include <asm/mem_map.h>
29 31
30#define CPLB_MEM CONFIG_MAX_MEM_SIZE 32struct cplb_entry icplb_tbl[NR_CPUS][MAX_CPLBS] PDT_ATTR;
31 33struct cplb_entry dcplb_tbl[NR_CPUS][MAX_CPLBS] PDT_ATTR;
32/*
33* Number of required data CPLB switchtable entries
34* MEMSIZE / 4 (we mostly install 4M page size CPLBs
35* approx 16 for smaller 1MB page size CPLBs for allignment purposes
36* 1 for L1 Data Memory
37* possibly 1 for L2 Data Memory
38* 1 for CONFIG_DEBUG_HUNT_FOR_ZERO
39* 1 for ASYNC Memory
40*/
41#define MAX_SWITCH_D_CPLBS (((CPLB_MEM / 4) + 16 + 1 + 1 + 1 \
42 + ASYNC_MEMORY_CPLB_COVERAGE) * 2)
43
44/*
45* Number of required instruction CPLB switchtable entries
46* MEMSIZE / 4 (we mostly install 4M page size CPLBs
47* approx 12 for smaller 1MB page size CPLBs for allignment purposes
48* 1 for L1 Instruction Memory
49* possibly 1 for L2 Instruction Memory
50* 1 for CONFIG_DEBUG_HUNT_FOR_ZERO
51*/
52#define MAX_SWITCH_I_CPLBS (((CPLB_MEM / 4) + 12 + 1 + 1 + 1) * 2)
53
54
55u_long icplb_table[MAX_CPLBS + 1];
56u_long dcplb_table[MAX_CPLBS + 1];
57
58#ifdef CONFIG_CPLB_SWITCH_TAB_L1
59# define PDT_ATTR __attribute__((l1_data))
60#else
61# define PDT_ATTR
62#endif
63
64u_long ipdt_table[MAX_SWITCH_I_CPLBS + 1] PDT_ATTR;
65u_long dpdt_table[MAX_SWITCH_D_CPLBS + 1] PDT_ATTR;
66 34
67#ifdef CONFIG_CPLB_INFO 35int first_switched_icplb PDT_ATTR;
68u_long ipdt_swapcount_table[MAX_SWITCH_I_CPLBS] PDT_ATTR; 36int first_switched_dcplb PDT_ATTR;
69u_long dpdt_swapcount_table[MAX_SWITCH_D_CPLBS] PDT_ATTR;
70#endif
71 37
72struct s_cplb { 38struct cplb_boundary dcplb_bounds[9] PDT_ATTR;
73 struct cplb_tab init_i; 39struct cplb_boundary icplb_bounds[7] PDT_ATTR;
74 struct cplb_tab init_d;
75 struct cplb_tab switch_i;
76 struct cplb_tab switch_d;
77};
78 40
79#if defined(CONFIG_BFIN_DCACHE) || defined(CONFIG_BFIN_ICACHE) 41int icplb_nr_bounds PDT_ATTR;
80static struct cplb_desc cplb_data[] = { 42int dcplb_nr_bounds PDT_ATTR;
81 {
82 .start = 0,
83 .end = SIZE_1K,
84 .psize = SIZE_1K,
85 .attr = INITIAL_T | SWITCH_T | I_CPLB | D_CPLB,
86 .i_conf = SDRAM_OOPS,
87 .d_conf = SDRAM_OOPS,
88#if defined(CONFIG_DEBUG_HUNT_FOR_ZERO)
89 .valid = 1,
90#else
91 .valid = 0,
92#endif
93 .name = "Zero Pointer Guard Page",
94 },
95 {
96 .start = L1_CODE_START,
97 .end = L1_CODE_START + L1_CODE_LENGTH,
98 .psize = SIZE_4M,
99 .attr = INITIAL_T | SWITCH_T | I_CPLB,
100 .i_conf = L1_IMEMORY,
101 .d_conf = 0,
102 .valid = 1,
103 .name = "L1 I-Memory",
104 },
105 {
106 .start = L1_DATA_A_START,
107 .end = L1_DATA_B_START + L1_DATA_B_LENGTH,
108 .psize = SIZE_4M,
109 .attr = INITIAL_T | SWITCH_T | D_CPLB,
110 .i_conf = 0,
111 .d_conf = L1_DMEMORY,
112#if ((L1_DATA_A_LENGTH > 0) || (L1_DATA_B_LENGTH > 0))
113 .valid = 1,
114#else
115 .valid = 0,
116#endif
117 .name = "L1 D-Memory",
118 },
119 {
120 .start = 0,
121 .end = 0, /* dynamic */
122 .psize = 0,
123 .attr = INITIAL_T | SWITCH_T | I_CPLB | D_CPLB,
124 .i_conf = SDRAM_IGENERIC,
125 .d_conf = SDRAM_DGENERIC,
126 .valid = 1,
127 .name = "Kernel Memory",
128 },
129 {
130 .start = 0, /* dynamic */
131 .end = 0, /* dynamic */
132 .psize = 0,
133 .attr = INITIAL_T | SWITCH_T | D_CPLB,
134 .i_conf = SDRAM_IGENERIC,
135 .d_conf = SDRAM_DNON_CHBL,
136 .valid = 1,
137 .name = "uClinux MTD Memory",
138 },
139 {
140 .start = 0, /* dynamic */
141 .end = 0, /* dynamic */
142 .psize = SIZE_1M,
143 .attr = INITIAL_T | SWITCH_T | D_CPLB,
144 .d_conf = SDRAM_DNON_CHBL,
145 .valid = 1,
146 .name = "Uncached DMA Zone",
147 },
148 {
149 .start = 0, /* dynamic */
150 .end = 0, /* dynamic */
151 .psize = 0,
152 .attr = SWITCH_T | D_CPLB,
153 .i_conf = 0, /* dynamic */
154 .d_conf = 0, /* dynamic */
155 .valid = 1,
156 .name = "Reserved Memory",
157 },
158 {
159 .start = ASYNC_BANK0_BASE,
160 .end = ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE,
161 .psize = 0,
162 .attr = SWITCH_T | D_CPLB,
163 .d_conf = SDRAM_EBIU,
164 .valid = 1,
165 .name = "Asynchronous Memory Banks",
166 },
167 {
168 .start = L2_START,
169 .end = L2_START + L2_LENGTH,
170 .psize = SIZE_1M,
171 .attr = SWITCH_T | I_CPLB | D_CPLB,
172 .i_conf = L2_IMEMORY,
173 .d_conf = L2_DMEMORY,
174 .valid = (L2_LENGTH > 0),
175 .name = "L2 Memory",
176 },
177 {
178 .start = BOOT_ROM_START,
179 .end = BOOT_ROM_START + BOOT_ROM_LENGTH,
180 .psize = SIZE_1M,
181 .attr = SWITCH_T | I_CPLB | D_CPLB,
182 .i_conf = SDRAM_IGENERIC,
183 .d_conf = SDRAM_DGENERIC,
184 .valid = 1,
185 .name = "On-Chip BootROM",
186 },
187};
188 43
189static u16 __init lock_kernel_check(u32 start, u32 end) 44void __init generate_cplb_tables_cpu(unsigned int cpu)
190{ 45{
191 if (start >= (u32)_end || end <= (u32)_stext) 46 int i_d, i_i;
192 return 0; 47 unsigned long addr;
193 48
194 /* This cplb block overlapped with kernel area. */ 49 struct cplb_entry *d_tbl = dcplb_tbl[cpu];
195 return IN_KERNEL; 50 struct cplb_entry *i_tbl = icplb_tbl[cpu];
196}
197 51
198static unsigned short __init 52 printk(KERN_INFO "NOMPU: setting up cplb tables\n");
199fill_cplbtab(struct cplb_tab *table,
200 unsigned long start, unsigned long end,
201 unsigned long block_size, unsigned long cplb_data)
202{
203 int i;
204 53
205 switch (block_size) { 54 i_d = i_i = 0;
206 case SIZE_4M:
207 i = 3;
208 break;
209 case SIZE_1M:
210 i = 2;
211 break;
212 case SIZE_4K:
213 i = 1;
214 break;
215 case SIZE_1K:
216 default:
217 i = 0;
218 break;
219 }
220
221 cplb_data = (cplb_data & ~(3 << 16)) | (i << 16);
222
223 while ((start < end) && (table->pos < table->size)) {
224 55
225 table->tab[table->pos++] = start; 56 /* Set up the zero page. */
57 d_tbl[i_d].addr = 0;
58 d_tbl[i_d++].data = SDRAM_OOPS | PAGE_SIZE_1KB;
226 59
227 if (lock_kernel_check(start, start + block_size) == IN_KERNEL) 60 /* Cover kernel memory with 4M pages. */
228 table->tab[table->pos++] = 61 addr = 0;
229 cplb_data | CPLB_LOCK | CPLB_DIRTY;
230 else
231 table->tab[table->pos++] = cplb_data;
232 62
233 start += block_size; 63 for (; addr < memory_start; addr += 4 * 1024 * 1024) {
64 d_tbl[i_d].addr = addr;
65 d_tbl[i_d++].data = SDRAM_DGENERIC | PAGE_SIZE_4MB;
66 i_tbl[i_i].addr = addr;
67 i_tbl[i_i++].data = SDRAM_IGENERIC | PAGE_SIZE_4MB;
234 } 68 }
235 return 0;
236}
237 69
238static unsigned short __init 70 /* Cover L1 memory. One 4M area for code and data each is enough. */
239close_cplbtab(struct cplb_tab *table) 71 if (L1_DATA_A_LENGTH || L1_DATA_B_LENGTH) {
240{ 72 d_tbl[i_d].addr = L1_DATA_A_START;
241 73 d_tbl[i_d++].data = L1_DMEMORY | PAGE_SIZE_4MB;
242 while (table->pos < table->size) {
243
244 table->tab[table->pos++] = 0;
245 table->tab[table->pos++] = 0; /* !CPLB_VALID */
246 } 74 }
247 return 0; 75 i_tbl[i_i].addr = L1_CODE_START;
248} 76 i_tbl[i_i++].data = L1_IMEMORY | PAGE_SIZE_4MB;
249 77
250/* helper function */ 78 first_switched_dcplb = i_d;
251static void __init 79 first_switched_icplb = i_i;
252__fill_code_cplbtab(struct cplb_tab *t, int i, u32 a_start, u32 a_end)
253{
254 if (cplb_data[i].psize) {
255 fill_cplbtab(t,
256 cplb_data[i].start,
257 cplb_data[i].end,
258 cplb_data[i].psize,
259 cplb_data[i].i_conf);
260 } else {
261#if defined(CONFIG_BFIN_ICACHE)
262 if (ANOMALY_05000263 && i == SDRAM_KERN) {
263 fill_cplbtab(t,
264 cplb_data[i].start,
265 cplb_data[i].end,
266 SIZE_4M,
267 cplb_data[i].i_conf);
268 } else
269#endif
270 {
271 fill_cplbtab(t,
272 cplb_data[i].start,
273 a_start,
274 SIZE_1M,
275 cplb_data[i].i_conf);
276 fill_cplbtab(t,
277 a_start,
278 a_end,
279 SIZE_4M,
280 cplb_data[i].i_conf);
281 fill_cplbtab(t, a_end,
282 cplb_data[i].end,
283 SIZE_1M,
284 cplb_data[i].i_conf);
285 }
286 }
287}
288 80
289static void __init 81 BUG_ON(first_switched_dcplb > MAX_CPLBS);
290__fill_data_cplbtab(struct cplb_tab *t, int i, u32 a_start, u32 a_end) 82 BUG_ON(first_switched_icplb > MAX_CPLBS);
291{ 83
292 if (cplb_data[i].psize) { 84 while (i_d < MAX_CPLBS)
293 fill_cplbtab(t, 85 d_tbl[i_d++].data = 0;
294 cplb_data[i].start, 86 while (i_i < MAX_CPLBS)
295 cplb_data[i].end, 87 i_tbl[i_i++].data = 0;
296 cplb_data[i].psize,
297 cplb_data[i].d_conf);
298 } else {
299 fill_cplbtab(t,
300 cplb_data[i].start,
301 a_start, SIZE_1M,
302 cplb_data[i].d_conf);
303 fill_cplbtab(t, a_start,
304 a_end, SIZE_4M,
305 cplb_data[i].d_conf);
306 fill_cplbtab(t, a_end,
307 cplb_data[i].end,
308 SIZE_1M,
309 cplb_data[i].d_conf);
310 }
311} 88}
312 89
313void __init generate_cplb_tables(void) 90void __init generate_cplb_tables_all(void)
314{ 91{
92 int i_d, i_i;
315 93
316 u16 i, j, process; 94 i_d = 0;
317 u32 a_start, a_end, as, ae, as_1m; 95 /* Normal RAM, including MTD FS. */
318
319 struct cplb_tab *t_i = NULL;
320 struct cplb_tab *t_d = NULL;
321 struct s_cplb cplb;
322
323 printk(KERN_INFO "NOMPU: setting up cplb tables for global access\n");
324
325 cplb.init_i.size = MAX_CPLBS;
326 cplb.init_d.size = MAX_CPLBS;
327 cplb.switch_i.size = MAX_SWITCH_I_CPLBS;
328 cplb.switch_d.size = MAX_SWITCH_D_CPLBS;
329
330 cplb.init_i.pos = 0;
331 cplb.init_d.pos = 0;
332 cplb.switch_i.pos = 0;
333 cplb.switch_d.pos = 0;
334
335 cplb.init_i.tab = icplb_table;
336 cplb.init_d.tab = dcplb_table;
337 cplb.switch_i.tab = ipdt_table;
338 cplb.switch_d.tab = dpdt_table;
339
340 cplb_data[SDRAM_KERN].end = memory_end;
341
342#ifdef CONFIG_MTD_UCLINUX 96#ifdef CONFIG_MTD_UCLINUX
343 cplb_data[SDRAM_RAM_MTD].start = memory_mtd_start; 97 dcplb_bounds[i_d].eaddr = memory_mtd_start + mtd_size;
344 cplb_data[SDRAM_RAM_MTD].end = memory_mtd_start + mtd_size;
345 cplb_data[SDRAM_RAM_MTD].valid = mtd_size > 0;
346# if defined(CONFIG_ROMFS_FS)
347 cplb_data[SDRAM_RAM_MTD].attr |= I_CPLB;
348
349 /*
350 * The ROMFS_FS size is often not multiple of 1MB.
351 * This can cause multiple CPLB sets covering the same memory area.
352 * This will then cause multiple CPLB hit exceptions.
353 * Workaround: We ensure a contiguous memory area by extending the kernel
354 * memory section over the mtd section.
355 * For ROMFS_FS memory must be covered with ICPLBs anyways.
356 * So there is no difference between kernel and mtd memory setup.
357 */
358
359 cplb_data[SDRAM_KERN].end = memory_mtd_start + mtd_size;;
360 cplb_data[SDRAM_RAM_MTD].valid = 0;
361
362# endif
363#else 98#else
364 cplb_data[SDRAM_RAM_MTD].valid = 0; 99 dcplb_bounds[i_d].eaddr = memory_end;
365#endif 100#endif
101 dcplb_bounds[i_d++].data = SDRAM_DGENERIC;
102 /* DMA uncached region. */
103 if (DMA_UNCACHED_REGION) {
104 dcplb_bounds[i_d].eaddr = _ramend;
105 dcplb_bounds[i_d++].data = SDRAM_DNON_CHBL;
106 }
107 if (_ramend != physical_mem_end) {
108 /* Reserved memory. */
109 dcplb_bounds[i_d].eaddr = physical_mem_end;
110 dcplb_bounds[i_d++].data = (reserved_mem_dcache_on ?
111 SDRAM_DGENERIC : SDRAM_DNON_CHBL);
112 }
113 /* Addressing hole up to the async bank. */
114 dcplb_bounds[i_d].eaddr = ASYNC_BANK0_BASE;
115 dcplb_bounds[i_d++].data = 0;
116 /* ASYNC banks. */
117 dcplb_bounds[i_d].eaddr = ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE;
118 dcplb_bounds[i_d++].data = SDRAM_EBIU;
119 /* Addressing hole up to BootROM. */
120 dcplb_bounds[i_d].eaddr = BOOT_ROM_START;
121 dcplb_bounds[i_d++].data = 0;
122 /* BootROM -- largest one should be less than 1 meg. */
123 dcplb_bounds[i_d].eaddr = BOOT_ROM_START + (1 * 1024 * 1024);
124 dcplb_bounds[i_d++].data = SDRAM_DGENERIC;
125 if (L2_LENGTH) {
126 /* Addressing hole up to L2 SRAM. */
127 dcplb_bounds[i_d].eaddr = L2_START;
128 dcplb_bounds[i_d++].data = 0;
129 /* L2 SRAM. */
130 dcplb_bounds[i_d].eaddr = L2_START + L2_LENGTH;
131 dcplb_bounds[i_d++].data = L2_DMEMORY;
132 }
133 dcplb_nr_bounds = i_d;
134 BUG_ON(dcplb_nr_bounds > ARRAY_SIZE(dcplb_bounds));
366 135
367 cplb_data[SDRAM_DMAZ].start = _ramend - DMA_UNCACHED_REGION; 136 i_i = 0;
368 cplb_data[SDRAM_DMAZ].end = _ramend; 137 /* Normal RAM, including MTD FS. */
369
370 cplb_data[RES_MEM].start = _ramend;
371 cplb_data[RES_MEM].end = physical_mem_end;
372
373 if (reserved_mem_dcache_on)
374 cplb_data[RES_MEM].d_conf = SDRAM_DGENERIC;
375 else
376 cplb_data[RES_MEM].d_conf = SDRAM_DNON_CHBL;
377
378 if (reserved_mem_icache_on)
379 cplb_data[RES_MEM].i_conf = SDRAM_IGENERIC;
380 else
381 cplb_data[RES_MEM].i_conf = SDRAM_INON_CHBL;
382
383 for (i = ZERO_P; i < ARRAY_SIZE(cplb_data); ++i) {
384 if (!cplb_data[i].valid)
385 continue;
386
387 as_1m = cplb_data[i].start % SIZE_1M;
388
389 /* We need to make sure all sections are properly 1M aligned
390 * However between Kernel Memory and the Kernel mtd section, depending on the
391 * rootfs size, there can be overlapping memory areas.
392 */
393
394 if (as_1m && i != L1I_MEM && i != L1D_MEM) {
395#ifdef CONFIG_MTD_UCLINUX 138#ifdef CONFIG_MTD_UCLINUX
396 if (i == SDRAM_RAM_MTD) { 139 icplb_bounds[i_i].eaddr = memory_mtd_start + mtd_size;
397 if ((cplb_data[SDRAM_KERN].end + 1) > cplb_data[SDRAM_RAM_MTD].start) 140#else
398 cplb_data[SDRAM_RAM_MTD].start = (cplb_data[i].start & (-2*SIZE_1M)) + SIZE_1M; 141 icplb_bounds[i_i].eaddr = memory_end;
399 else
400 cplb_data[SDRAM_RAM_MTD].start = (cplb_data[i].start & (-2*SIZE_1M));
401 } else
402#endif 142#endif
403 printk(KERN_WARNING "Unaligned Start of %s at 0x%X\n", 143 icplb_bounds[i_i++].data = SDRAM_IGENERIC;
404 cplb_data[i].name, cplb_data[i].start); 144 /* DMA uncached region. */
405 } 145 if (DMA_UNCACHED_REGION) {
406 146 icplb_bounds[i_i].eaddr = _ramend;
407 as = cplb_data[i].start % SIZE_4M; 147 icplb_bounds[i_i++].data = 0;
408 ae = cplb_data[i].end % SIZE_4M;
409
410 if (as)
411 a_start = cplb_data[i].start + (SIZE_4M - (as));
412 else
413 a_start = cplb_data[i].start;
414
415 a_end = cplb_data[i].end - ae;
416
417 for (j = INITIAL_T; j <= SWITCH_T; j++) {
418
419 switch (j) {
420 case INITIAL_T:
421 if (cplb_data[i].attr & INITIAL_T) {
422 t_i = &cplb.init_i;
423 t_d = &cplb.init_d;
424 process = 1;
425 } else
426 process = 0;
427 break;
428 case SWITCH_T:
429 if (cplb_data[i].attr & SWITCH_T) {
430 t_i = &cplb.switch_i;
431 t_d = &cplb.switch_d;
432 process = 1;
433 } else
434 process = 0;
435 break;
436 default:
437 process = 0;
438 break;
439 }
440
441 if (!process)
442 continue;
443 if (cplb_data[i].attr & I_CPLB)
444 __fill_code_cplbtab(t_i, i, a_start, a_end);
445
446 if (cplb_data[i].attr & D_CPLB)
447 __fill_data_cplbtab(t_d, i, a_start, a_end);
448 }
449 } 148 }
450 149 if (_ramend != physical_mem_end) {
451/* close tables */ 150 /* Reserved memory. */
452 151 icplb_bounds[i_i].eaddr = physical_mem_end;
453 close_cplbtab(&cplb.init_i); 152 icplb_bounds[i_i++].data = (reserved_mem_icache_on ?
454 close_cplbtab(&cplb.init_d); 153 SDRAM_IGENERIC : SDRAM_INON_CHBL);
455 154 }
456 cplb.init_i.tab[cplb.init_i.pos] = -1; 155 /* Addressing hole up to BootROM. */
457 cplb.init_d.tab[cplb.init_d.pos] = -1; 156 icplb_bounds[i_i].eaddr = BOOT_ROM_START;
458 cplb.switch_i.tab[cplb.switch_i.pos] = -1; 157 icplb_bounds[i_i++].data = 0;
459 cplb.switch_d.tab[cplb.switch_d.pos] = -1; 158 /* BootROM -- largest one should be less than 1 meg. */
460 159 icplb_bounds[i_i].eaddr = BOOT_ROM_START + (1 * 1024 * 1024);
160 icplb_bounds[i_i++].data = SDRAM_IGENERIC;
161 if (L2_LENGTH) {
162 /* Addressing hole up to L2 SRAM, including the async bank. */
163 icplb_bounds[i_i].eaddr = L2_START;
164 icplb_bounds[i_i++].data = 0;
165 /* L2 SRAM. */
166 icplb_bounds[i_i].eaddr = L2_START + L2_LENGTH;
167 icplb_bounds[i_i++].data = L2_IMEMORY;
168 }
169 icplb_nr_bounds = i_i;
170 BUG_ON(icplb_nr_bounds > ARRAY_SIZE(icplb_bounds));
461} 171}
462
463#endif
464
diff --git a/arch/blackfin/kernel/cplb-nompu/cplbmgr.S b/arch/blackfin/kernel/cplb-nompu/cplbmgr.S
deleted file mode 100644
index f5cf3accef37..000000000000
--- a/arch/blackfin/kernel/cplb-nompu/cplbmgr.S
+++ /dev/null
@@ -1,646 +0,0 @@
1/*
2 * File: arch/blackfin/mach-common/cplbmgtr.S
3 * Based on:
4 * Author: LG Soft India
5 *
6 * Created: ?
7 * Description: CPLB replacement routine for CPLB mismatch
8 *
9 * Modified:
10 * Copyright 2004-2006 Analog Devices Inc.
11 *
12 * Bugs: Enter bugs at http://blackfin.uclinux.org/
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or
17 * (at your option) any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, see the file COPYING, or write
26 * to the Free Software Foundation, Inc.,
27 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
28 */
29
30/* Usage: int _cplb_mgr(is_data_miss,int enable_cache)
31 * is_data_miss==2 => Mark as Dirty, write to the clean data page
32 * is_data_miss==1 => Replace a data CPLB.
33 * is_data_miss==0 => Replace an instruction CPLB.
34 *
35 * Returns:
36 * CPLB_RELOADED => Successfully updated CPLB table.
37 * CPLB_NO_UNLOCKED => All CPLBs are locked, so cannot be evicted.
38 * This indicates that the CPLBs in the configuration
39 * tablei are badly configured, as this should never
40 * occur.
41 * CPLB_NO_ADDR_MATCH => The address being accessed, that triggered the
42 * exception, is not covered by any of the CPLBs in
43 * the configuration table. The application is
44 * presumably misbehaving.
45 * CPLB_PROT_VIOL => The address being accessed, that triggered the
46 * exception, was not a first-write to a clean Write
47 * Back Data page, and so presumably is a genuine
48 * violation of the page's protection attributes.
49 * The application is misbehaving.
50 */
51
52#include <linux/linkage.h>
53#include <asm/blackfin.h>
54#include <asm/cplb.h>
55
56#ifdef CONFIG_EXCPT_IRQ_SYSC_L1
57.section .l1.text
58#else
59.text
60#endif
61
62.align 2;
63ENTRY(_cplb_mgr)
64
65 [--SP]=( R7:4,P5:3 );
66
67 CC = R0 == 2;
68 IF CC JUMP .Ldcplb_write;
69
70 CC = R0 == 0;
71 IF !CC JUMP .Ldcplb_miss_compare;
72
73 /* ICPLB Miss Exception. We need to choose one of the
74 * currently-installed CPLBs, and replace it with one
75 * from the configuration table.
76 */
77
78 /* A multi-word instruction can cross a page boundary. This means the
79 * first part of the instruction can be in a valid page, but the
80 * second part is not, and hence generates the instruction miss.
81 * However, the fault address is for the start of the instruction,
82 * not the part that's in the bad page. Therefore, we have to check
83 * whether the fault address applies to a page that is already present
84 * in the table.
85 */
86
87 P4.L = LO(ICPLB_FAULT_ADDR);
88 P4.H = HI(ICPLB_FAULT_ADDR);
89
90 P1 = 16;
91 P5.L = _page_size_table;
92 P5.H = _page_size_table;
93
94 P0.L = LO(ICPLB_DATA0);
95 P0.H = HI(ICPLB_DATA0);
96 R4 = [P4]; /* Get faulting address*/
97 R6 = 64; /* Advance past the fault address, which*/
98 R6 = R6 + R4; /* we'll use if we find a match*/
99 R3 = ((16 << 8) | 2); /* Extract mask, two bits at posn 16 */
100
101 R5 = 0;
102.Lisearch:
103
104 R1 = [P0-0x100]; /* Address for this CPLB */
105
106 R0 = [P0++]; /* Info for this CPLB*/
107 CC = BITTST(R0,0); /* Is the CPLB valid?*/
108 IF !CC JUMP .Lnomatch; /* Skip it, if not.*/
109 CC = R4 < R1(IU); /* If fault address less than page start*/
110 IF CC JUMP .Lnomatch; /* then skip this one.*/
111 R2 = EXTRACT(R0,R3.L) (Z); /* Get page size*/
112 P1 = R2;
113 P1 = P5 + (P1<<2); /* index into page-size table*/
114 R2 = [P1]; /* Get the page size*/
115 R1 = R1 + R2; /* and add to page start, to get page end*/
116 CC = R4 < R1(IU); /* and see whether fault addr is in page.*/
117 IF !CC R4 = R6; /* If so, advance the address and finish loop.*/
118 IF !CC JUMP .Lisearch_done;
119.Lnomatch:
120 /* Go around again*/
121 R5 += 1;
122 CC = BITTST(R5, 4); /* i.e CC = R5 >= 16*/
123 IF !CC JUMP .Lisearch;
124
125.Lisearch_done:
126 I0 = R4; /* Fault address we'll search for*/
127
128 /* set up pointers */
129 P0.L = LO(ICPLB_DATA0);
130 P0.H = HI(ICPLB_DATA0);
131
132 /* The replacement procedure for ICPLBs */
133
134 P4.L = LO(IMEM_CONTROL);
135 P4.H = HI(IMEM_CONTROL);
136
137 /* Turn off CPLBs while we work, necessary according to HRM before
138 * modifying CPLB descriptors
139 */
140 R5 = [P4]; /* Control Register*/
141 BITCLR(R5,ENICPLB_P);
142 CLI R1;
143 SSYNC; /* SSYNC required before writing to IMEM_CONTROL. */
144 .align 8;
145 [P4] = R5;
146 SSYNC;
147 STI R1;
148
149 R1 = -1; /* end point comparison */
150 R3 = 16; /* counter */
151
152 /* Search through CPLBs for first non-locked entry */
153 /* Overwrite it by moving everyone else up by 1 */
154.Licheck_lock:
155 R0 = [P0++];
156 R3 = R3 + R1;
157 CC = R3 == R1;
158 IF CC JUMP .Lall_locked;
159 CC = BITTST(R0, 0); /* an invalid entry is good */
160 IF !CC JUMP .Lifound_victim;
161 CC = BITTST(R0,1); /* but a locked entry isn't */
162 IF CC JUMP .Licheck_lock;
163
164.Lifound_victim:
165#ifdef CONFIG_CPLB_INFO
166 R7 = [P0 - 0x104];
167 P2.L = _ipdt_table;
168 P2.H = _ipdt_table;
169 P3.L = _ipdt_swapcount_table;
170 P3.H = _ipdt_swapcount_table;
171 P3 += -4;
172.Licount:
173 R2 = [P2]; /* address from config table */
174 P2 += 8;
175 P3 += 8;
176 CC = R2==-1;
177 IF CC JUMP .Licount_done;
178 CC = R7==R2;
179 IF !CC JUMP .Licount;
180 R7 = [P3];
181 R7 += 1;
182 [P3] = R7;
183 CSYNC;
184.Licount_done:
185#endif
186 LC0=R3;
187 LSETUP(.Lis_move,.Lie_move) LC0;
188.Lis_move:
189 R0 = [P0];
190 [P0 - 4] = R0;
191 R0 = [P0 - 0x100];
192 [P0-0x104] = R0;
193.Lie_move:
194 P0+=4;
195
196 /* Clear ICPLB_DATA15, in case we don't find a replacement
197 * otherwise, we would have a duplicate entry, and will crash
198 */
199 R0 = 0;
200 [P0 - 4] = R0;
201
202 /* We've made space in the ICPLB table, so that ICPLB15
203 * is now free to be overwritten. Next, we have to determine
204 * which CPLB we need to install, from the configuration
205 * table. This is a matter of getting the start-of-page
206 * addresses and page-lengths from the config table, and
207 * determining whether the fault address falls within that
208 * range.
209 */
210
211 P2.L = _ipdt_table;
212 P2.H = _ipdt_table;
213#ifdef CONFIG_CPLB_INFO
214 P3.L = _ipdt_swapcount_table;
215 P3.H = _ipdt_swapcount_table;
216 P3 += -8;
217#endif
218 P0.L = _page_size_table;
219 P0.H = _page_size_table;
220
221 /* Retrieve our fault address (which may have been advanced
222 * because the faulting instruction crossed a page boundary).
223 */
224
225 R0 = I0;
226
227 /* An extraction pattern, to get the page-size bits from
228 * the CPLB data entry. Bits 16-17, so two bits at posn 16.
229 */
230
231 R1 = ((16<<8)|2);
232.Linext: R4 = [P2++]; /* address from config table */
233 R2 = [P2++]; /* data from config table */
234#ifdef CONFIG_CPLB_INFO
235 P3 += 8;
236#endif
237
238 CC = R4 == -1; /* End of config table*/
239 IF CC JUMP .Lno_page_in_table;
240
241 /* See if failed address > start address */
242 CC = R4 <= R0(IU);
243 IF !CC JUMP .Linext;
244
245 /* extract page size (17:16)*/
246 R3 = EXTRACT(R2, R1.L) (Z);
247
248 /* add page size to addr to get range */
249
250 P5 = R3;
251 P5 = P0 + (P5 << 2); /* scaled, for int access*/
252 R3 = [P5];
253 R3 = R3 + R4;
254
255 /* See if failed address < (start address + page size) */
256 CC = R0 < R3(IU);
257 IF !CC JUMP .Linext;
258
259 /* We've found a CPLB in the config table that covers
260 * the faulting address, so install this CPLB into the
261 * last entry of the table.
262 */
263
264 P1.L = LO(ICPLB_DATA15); /* ICPLB_DATA15 */
265 P1.H = HI(ICPLB_DATA15);
266 [P1] = R2;
267 [P1-0x100] = R4;
268#ifdef CONFIG_CPLB_INFO
269 R3 = [P3];
270 R3 += 1;
271 [P3] = R3;
272#endif
273
274 /* P4 points to IMEM_CONTROL, and R5 contains its old
275 * value, after we disabled ICPLBS. Re-enable them.
276 */
277
278 BITSET(R5,ENICPLB_P);
279 CLI R2;
280 SSYNC; /* SSYNC required before writing to IMEM_CONTROL. */
281 .align 8;
282 [P4] = R5;
283 SSYNC;
284 STI R2;
285
286 ( R7:4,P5:3 ) = [SP++];
287 R0 = CPLB_RELOADED;
288 RTS;
289
290/* FAILED CASES*/
291.Lno_page_in_table:
292 R0 = CPLB_NO_ADDR_MATCH;
293 JUMP .Lfail_ret;
294
295.Lall_locked:
296 R0 = CPLB_NO_UNLOCKED;
297 JUMP .Lfail_ret;
298
299.Lprot_violation:
300 R0 = CPLB_PROT_VIOL;
301
302.Lfail_ret:
303 /* Make sure we turn protection/cache back on, even in the failing case */
304 BITSET(R5,ENICPLB_P);
305 CLI R2;
306 SSYNC; /* SSYNC required before writing to IMEM_CONTROL. */
307 .align 8;
308 [P4] = R5;
309 SSYNC;
310 STI R2;
311
312 ( R7:4,P5:3 ) = [SP++];
313 RTS;
314
315.Ldcplb_write:
316
317 /* if a DCPLB is marked as write-back (CPLB_WT==0), and
318 * it is clean (CPLB_DIRTY==0), then a write to the
319 * CPLB's page triggers a protection violation. We have to
320 * mark the CPLB as dirty, to indicate that there are
321 * pending writes associated with the CPLB.
322 */
323
324 P4.L = LO(DCPLB_STATUS);
325 P4.H = HI(DCPLB_STATUS);
326 P3.L = LO(DCPLB_DATA0);
327 P3.H = HI(DCPLB_DATA0);
328 R5 = [P4];
329
330 /* A protection violation can be caused by more than just writes
331 * to a clean WB page, so we have to ensure that:
332 * - It's a write
333 * - to a clean WB page
334 * - and is allowed in the mode the access occurred.
335 */
336
337 CC = BITTST(R5, 16); /* ensure it was a write*/
338 IF !CC JUMP .Lprot_violation;
339
340 /* to check the rest, we have to retrieve the DCPLB.*/
341
342 /* The low half of DCPLB_STATUS is a bit mask*/
343
344 R2 = R5.L (Z); /* indicating which CPLB triggered the event.*/
345 R3 = 30; /* so we can use this to determine the offset*/
346 R2.L = SIGNBITS R2;
347 R2 = R2.L (Z); /* into the DCPLB table.*/
348 R3 = R3 - R2;
349 P4 = R3;
350 P3 = P3 + (P4<<2);
351 R3 = [P3]; /* Retrieve the CPLB*/
352
353 /* Now we can check whether it's a clean WB page*/
354
355 CC = BITTST(R3, 14); /* 0==WB, 1==WT*/
356 IF CC JUMP .Lprot_violation;
357 CC = BITTST(R3, 7); /* 0 == clean, 1 == dirty*/
358 IF CC JUMP .Lprot_violation;
359
360 /* Check whether the write is allowed in the mode that was active.*/
361
362 R2 = 1<<3; /* checking write in user mode*/
363 CC = BITTST(R5, 17); /* 0==was user, 1==was super*/
364 R5 = CC;
365 R2 <<= R5; /* if was super, check write in super mode*/
366 R2 = R3 & R2;
367 CC = R2 == 0;
368 IF CC JUMP .Lprot_violation;
369
370 /* It's a genuine write-to-clean-page.*/
371
372 BITSET(R3, 7); /* mark as dirty*/
373 [P3] = R3; /* and write back.*/
374 NOP;
375 CSYNC;
376 ( R7:4,P5:3 ) = [SP++];
377 R0 = CPLB_RELOADED;
378 RTS;
379
380.Ldcplb_miss_compare:
381
382 /* Data CPLB Miss event. We need to choose a CPLB to
383 * evict, and then locate a new CPLB to install from the
384 * config table, that covers the faulting address.
385 */
386
387 P1.L = LO(DCPLB_DATA15);
388 P1.H = HI(DCPLB_DATA15);
389
390 P4.L = LO(DCPLB_FAULT_ADDR);
391 P4.H = HI(DCPLB_FAULT_ADDR);
392 R4 = [P4];
393 I0 = R4;
394
395 /* The replacement procedure for DCPLBs*/
396
397 R6 = R1; /* Save for later*/
398
399 /* Turn off CPLBs while we work.*/
400 P4.L = LO(DMEM_CONTROL);
401 P4.H = HI(DMEM_CONTROL);
402 R5 = [P4];
403 BITCLR(R5,ENDCPLB_P);
404 CLI R0;
405 SSYNC; /* SSYNC required before writing to DMEM_CONTROL. */
406 .align 8;
407 [P4] = R5;
408 SSYNC;
409 STI R0;
410
411 /* Start looking for a CPLB to evict. Our order of preference
412 * is: invalid CPLBs, clean CPLBs, dirty CPLBs. Locked CPLBs
413 * are no good.
414 */
415
416 I1.L = LO(DCPLB_DATA0);
417 I1.H = HI(DCPLB_DATA0);
418 P1 = 2;
419 P2 = 16;
420 I2.L = _dcplb_preference;
421 I2.H = _dcplb_preference;
422 LSETUP(.Lsdsearch1, .Ledsearch1) LC0 = P1;
423.Lsdsearch1:
424 R0 = [I2++]; /* Get the bits we're interested in*/
425 P0 = I1; /* Go back to start of table*/
426 LSETUP (.Lsdsearch2, .Ledsearch2) LC1 = P2;
427.Lsdsearch2:
428 R1 = [P0++]; /* Fetch each installed CPLB in turn*/
429 R2 = R1 & R0; /* and test for interesting bits.*/
430 CC = R2 == 0; /* If none are set, it'll do.*/
431 IF !CC JUMP .Lskip_stack_check;
432
433 R2 = [P0 - 0x104]; /* R2 - PageStart */
434 P3.L = _page_size_table; /* retrieve end address */
435 P3.H = _page_size_table; /* retrieve end address */
436 R3 = 0x1002; /* 16th - position, 2 bits -length */
437#if ANOMALY_05000209
438 nop; /* Anomaly 05000209 */
439#endif
440 R7 = EXTRACT(R1,R3.l);
441 R7 = R7 << 2; /* Page size index offset */
442 P5 = R7;
443 P3 = P3 + P5;
444 R7 = [P3]; /* page size in bytes */
445
446 R7 = R2 + R7; /* R7 - PageEnd */
447 R4 = SP; /* Test SP is in range */
448
449 CC = R7 < R4; /* if PageEnd < SP */
450 IF CC JUMP .Ldfound_victim;
451 R3 = 0x284; /* stack length from start of trap till
452 * the point.
453 * 20 stack locations for future modifications
454 */
455 R4 = R4 + R3;
456 CC = R4 < R2; /* if SP + stacklen < PageStart */
457 IF CC JUMP .Ldfound_victim;
458.Lskip_stack_check:
459
460.Ledsearch2: NOP;
461.Ledsearch1: NOP;
462
463 /* If we got here, we didn't find a DCPLB we considered
464 * replacable, which means all of them were locked.
465 */
466
467 JUMP .Lall_locked;
468.Ldfound_victim:
469
470#ifdef CONFIG_CPLB_INFO
471 R7 = [P0 - 0x104];
472 P2.L = _dpdt_table;
473 P2.H = _dpdt_table;
474 P3.L = _dpdt_swapcount_table;
475 P3.H = _dpdt_swapcount_table;
476 P3 += -4;
477.Ldicount:
478 R2 = [P2];
479 P2 += 8;
480 P3 += 8;
481 CC = R2==-1;
482 IF CC JUMP .Ldicount_done;
483 CC = R7==R2;
484 IF !CC JUMP .Ldicount;
485 R7 = [P3];
486 R7 += 1;
487 [P3] = R7;
488.Ldicount_done:
489#endif
490
491 /* Clean down the hardware loops*/
492 R2 = 0;
493 LC1 = R2;
494 LC0 = R2;
495
496 /* There's a suitable victim in [P0-4] (because we've
497 * advanced already).
498 */
499
500.LDdoverwrite:
501
502 /* [P0-4] is a suitable victim CPLB, so we want to
503 * overwrite it by moving all the following CPLBs
504 * one space closer to the start.
505 */
506
507 R1.L = LO(DCPLB_DATA16); /* DCPLB_DATA15 + 4 */
508 R1.H = HI(DCPLB_DATA16);
509 R0 = P0;
510
511 /* If the victim happens to be in DCPLB15,
512 * we don't need to move anything.
513 */
514
515 CC = R1 == R0;
516 IF CC JUMP .Lde_moved;
517 R1 = R1 - R0;
518 R1 >>= 2;
519 P1 = R1;
520 LSETUP(.Lds_move, .Lde_move) LC0=P1;
521.Lds_move:
522 R0 = [P0++]; /* move data */
523 [P0 - 8] = R0;
524 R0 = [P0-0x104] /* move address */
525.Lde_move:
526 [P0-0x108] = R0;
527
528.Lde_moved:
529 NOP;
530
531 /* Clear DCPLB_DATA15, in case we don't find a replacement
532 * otherwise, we would have a duplicate entry, and will crash
533 */
534 R0 = 0;
535 [P0 - 0x4] = R0;
536
537 /* We've now made space in DCPLB15 for the new CPLB to be
538 * installed. The next stage is to locate a CPLB in the
539 * config table that covers the faulting address.
540 */
541
542 R0 = I0; /* Our faulting address */
543
544 P2.L = _dpdt_table;
545 P2.H = _dpdt_table;
546#ifdef CONFIG_CPLB_INFO
547 P3.L = _dpdt_swapcount_table;
548 P3.H = _dpdt_swapcount_table;
549 P3 += -8;
550#endif
551
552 P1.L = _page_size_table;
553 P1.H = _page_size_table;
554
555 /* An extraction pattern, to retrieve bits 17:16.*/
556
557 R1 = (16<<8)|2;
558.Ldnext: R4 = [P2++]; /* address */
559 R2 = [P2++]; /* data */
560#ifdef CONFIG_CPLB_INFO
561 P3 += 8;
562#endif
563
564 CC = R4 == -1;
565 IF CC JUMP .Lno_page_in_table;
566
567 /* See if failed address > start address */
568 CC = R4 <= R0(IU);
569 IF !CC JUMP .Ldnext;
570
571 /* extract page size (17:16)*/
572 R3 = EXTRACT(R2, R1.L) (Z);
573
574 /* add page size to addr to get range */
575
576 P5 = R3;
577 P5 = P1 + (P5 << 2);
578 R3 = [P5];
579 R3 = R3 + R4;
580
581 /* See if failed address < (start address + page size) */
582 CC = R0 < R3(IU);
583 IF !CC JUMP .Ldnext;
584
585 /* We've found the CPLB that should be installed, so
586 * write it into CPLB15, masking off any caching bits
587 * if necessary.
588 */
589
590 P1.L = LO(DCPLB_DATA15);
591 P1.H = HI(DCPLB_DATA15);
592
593 /* If the DCPLB has cache bits set, but caching hasn't
594 * been enabled, then we want to mask off the cache-in-L1
595 * bit before installing. Moreover, if caching is off, we
596 * also want to ensure that the DCPLB has WT mode set, rather
597 * than WB, since WB pages still trigger first-write exceptions
598 * even when not caching is off, and the page isn't marked as
599 * cachable. Finally, we could mark the page as clean, not dirty,
600 * but we choose to leave that decision to the user; if the user
601 * chooses to have a CPLB pre-defined as dirty, then they always
602 * pay the cost of flushing during eviction, but don't pay the
603 * cost of first-write exceptions to mark the page as dirty.
604 */
605
606#ifdef CONFIG_BFIN_WT
607 BITSET(R6, 14); /* Set WT*/
608#endif
609
610 [P1] = R2;
611 [P1-0x100] = R4;
612#ifdef CONFIG_CPLB_INFO
613 R3 = [P3];
614 R3 += 1;
615 [P3] = R3;
616#endif
617
618 /* We've installed the CPLB, so re-enable CPLBs. P4
619 * points to DMEM_CONTROL, and R5 is the value we
620 * last wrote to it, when we were disabling CPLBs.
621 */
622
623 BITSET(R5,ENDCPLB_P);
624 CLI R2;
625 .align 8;
626 [P4] = R5;
627 SSYNC;
628 STI R2;
629
630 ( R7:4,P5:3 ) = [SP++];
631 R0 = CPLB_RELOADED;
632 RTS;
633ENDPROC(_cplb_mgr)
634
635.data
636.align 4;
637_page_size_table:
638.byte4 0x00000400; /* 1K */
639.byte4 0x00001000; /* 4K */
640.byte4 0x00100000; /* 1M */
641.byte4 0x00400000; /* 4M */
642
643.align 4;
644_dcplb_preference:
645.byte4 0x00000001; /* valid bit */
646.byte4 0x00000002; /* lock bit */
diff --git a/arch/blackfin/kernel/cplb-nompu/cplbmgr.c b/arch/blackfin/kernel/cplb-nompu/cplbmgr.c
new file mode 100644
index 000000000000..376249ab2694
--- /dev/null
+++ b/arch/blackfin/kernel/cplb-nompu/cplbmgr.c
@@ -0,0 +1,283 @@
1/*
2 * File: arch/blackfin/kernel/cplb-nompu-c/cplbmgr.c
3 * Based on: arch/blackfin/kernel/cplb-mpu/cplbmgr.c
4 * Author: Michael McTernan <mmcternan@airvana.com>
5 *
6 * Created: 01Nov2008
7 * Description: CPLB miss handler.
8 *
9 * Modified:
10 * Copyright 2008 Airvana Inc.
11 * Copyright 2004-2007 Analog Devices Inc.
12 *
13 * Bugs: Enter bugs at http://blackfin.uclinux.org/
14 *
15 * This program is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License as published by
17 * the Free Software Foundation; either version 2 of the License, or
18 * (at your option) any later version.
19 *
20 * This program is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23 * GNU General Public License for more details.
24 */
25
26#include <linux/kernel.h>
27#include <asm/blackfin.h>
28#include <asm/cplbinit.h>
29#include <asm/cplb.h>
30#include <asm/mmu_context.h>
31
32/*
33 * WARNING
34 *
35 * This file is compiled with certain -ffixed-reg options. We have to
36 * make sure not to call any functions here that could clobber these
37 * registers.
38 */
39
40int nr_dcplb_miss[NR_CPUS], nr_icplb_miss[NR_CPUS];
41int nr_dcplb_supv_miss[NR_CPUS], nr_icplb_supv_miss[NR_CPUS];
42int nr_cplb_flush[NR_CPUS], nr_dcplb_prot[NR_CPUS];
43
44#ifdef CONFIG_EXCPT_IRQ_SYSC_L1
45#define MGR_ATTR __attribute__((l1_text))
46#else
47#define MGR_ATTR
48#endif
49
50/*
51 * We're in an exception handler. The normal cli nop nop workaround
52 * isn't going to do very much, as the only thing that can interrupt
53 * us is an NMI, and the cli isn't going to stop that.
54 */
55#define NOWA_SSYNC __asm__ __volatile__ ("ssync;")
56
57/* Anomaly handlers provide SSYNCs, so avoid extra if anomaly is present */
58#if ANOMALY_05000125
59
60#define bfin_write_DMEM_CONTROL_SSYNC(v) bfin_write_DMEM_CONTROL(v)
61#define bfin_write_IMEM_CONTROL_SSYNC(v) bfin_write_IMEM_CONTROL(v)
62
63#else
64
65#define bfin_write_DMEM_CONTROL_SSYNC(v) \
66 do { NOWA_SSYNC; bfin_write_DMEM_CONTROL(v); NOWA_SSYNC; } while (0)
67#define bfin_write_IMEM_CONTROL_SSYNC(v) \
68 do { NOWA_SSYNC; bfin_write_IMEM_CONTROL(v); NOWA_SSYNC; } while (0)
69
70#endif
71
72static inline void write_dcplb_data(int cpu, int idx, unsigned long data,
73 unsigned long addr)
74{
75 unsigned long ctrl = bfin_read_DMEM_CONTROL();
76 bfin_write_DMEM_CONTROL_SSYNC(ctrl & ~ENDCPLB);
77 bfin_write32(DCPLB_DATA0 + idx * 4, data);
78 bfin_write32(DCPLB_ADDR0 + idx * 4, addr);
79 bfin_write_DMEM_CONTROL_SSYNC(ctrl);
80
81#ifdef CONFIG_CPLB_INFO
82 dcplb_tbl[cpu][idx].addr = addr;
83 dcplb_tbl[cpu][idx].data = data;
84#endif
85}
86
87static inline void write_icplb_data(int cpu, int idx, unsigned long data,
88 unsigned long addr)
89{
90 unsigned long ctrl = bfin_read_IMEM_CONTROL();
91
92 bfin_write_IMEM_CONTROL_SSYNC(ctrl & ~ENICPLB);
93 bfin_write32(ICPLB_DATA0 + idx * 4, data);
94 bfin_write32(ICPLB_ADDR0 + idx * 4, addr);
95 bfin_write_IMEM_CONTROL_SSYNC(ctrl);
96
97#ifdef CONFIG_CPLB_INFO
98 icplb_tbl[cpu][idx].addr = addr;
99 icplb_tbl[cpu][idx].data = data;
100#endif
101}
102
103/*
104 * Given the contents of the status register, return the index of the
105 * CPLB that caused the fault.
106 */
107static inline int faulting_cplb_index(int status)
108{
109 int signbits = __builtin_bfin_norm_fr1x32(status & 0xFFFF);
110 return 30 - signbits;
111}
112
113/*
114 * Given the contents of the status register and the DCPLB_DATA contents,
115 * return true if a write access should be permitted.
116 */
117static inline int write_permitted(int status, unsigned long data)
118{
119 if (status & FAULT_USERSUPV)
120 return !!(data & CPLB_SUPV_WR);
121 else
122 return !!(data & CPLB_USER_WR);
123}
124
125/* Counters to implement round-robin replacement. */
126static int icplb_rr_index[NR_CPUS] PDT_ATTR;
127static int dcplb_rr_index[NR_CPUS] PDT_ATTR;
128
129/*
130 * Find an ICPLB entry to be evicted and return its index.
131 */
132static int evict_one_icplb(int cpu)
133{
134 int i = first_switched_icplb + icplb_rr_index[cpu];
135 if (i >= MAX_CPLBS) {
136 i -= MAX_CPLBS - first_switched_icplb;
137 icplb_rr_index[cpu] -= MAX_CPLBS - first_switched_icplb;
138 }
139 icplb_rr_index[cpu]++;
140 return i;
141}
142
143static int evict_one_dcplb(int cpu)
144{
145 int i = first_switched_dcplb + dcplb_rr_index[cpu];
146 if (i >= MAX_CPLBS) {
147 i -= MAX_CPLBS - first_switched_dcplb;
148 dcplb_rr_index[cpu] -= MAX_CPLBS - first_switched_dcplb;
149 }
150 dcplb_rr_index[cpu]++;
151 return i;
152}
153
154MGR_ATTR static int icplb_miss(int cpu)
155{
156 unsigned long addr = bfin_read_ICPLB_FAULT_ADDR();
157 int status = bfin_read_ICPLB_STATUS();
158 int idx;
159 unsigned long i_data, base, addr1, eaddr;
160
161 nr_icplb_miss[cpu]++;
162 if (unlikely(status & FAULT_USERSUPV))
163 nr_icplb_supv_miss[cpu]++;
164
165 base = 0;
166 for (idx = 0; idx < icplb_nr_bounds; idx++) {
167 eaddr = icplb_bounds[idx].eaddr;
168 if (addr < eaddr)
169 break;
170 base = eaddr;
171 }
172 if (unlikely(idx == icplb_nr_bounds))
173 return CPLB_NO_ADDR_MATCH;
174
175 i_data = icplb_bounds[idx].data;
176 if (unlikely(i_data == 0))
177 return CPLB_NO_ADDR_MATCH;
178
179 addr1 = addr & ~(SIZE_4M - 1);
180 addr &= ~(SIZE_1M - 1);
181 i_data |= PAGE_SIZE_1MB;
182 if (addr1 >= base && (addr1 + SIZE_4M) <= eaddr) {
183 /*
184 * This works because
185 * (PAGE_SIZE_4MB & PAGE_SIZE_1MB) == PAGE_SIZE_1MB.
186 */
187 i_data |= PAGE_SIZE_4MB;
188 addr = addr1;
189 }
190
191 /* Pick entry to evict */
192 idx = evict_one_icplb(cpu);
193
194 write_icplb_data(cpu, idx, i_data, addr);
195
196 return CPLB_RELOADED;
197}
198
199MGR_ATTR static int dcplb_miss(int cpu)
200{
201 unsigned long addr = bfin_read_DCPLB_FAULT_ADDR();
202 int status = bfin_read_DCPLB_STATUS();
203 int idx;
204 unsigned long d_data, base, addr1, eaddr;
205
206 nr_dcplb_miss[cpu]++;
207 if (unlikely(status & FAULT_USERSUPV))
208 nr_dcplb_supv_miss[cpu]++;
209
210 base = 0;
211 for (idx = 0; idx < dcplb_nr_bounds; idx++) {
212 eaddr = dcplb_bounds[idx].eaddr;
213 if (addr < eaddr)
214 break;
215 base = eaddr;
216 }
217 if (unlikely(idx == dcplb_nr_bounds))
218 return CPLB_NO_ADDR_MATCH;
219
220 d_data = dcplb_bounds[idx].data;
221 if (unlikely(d_data == 0))
222 return CPLB_NO_ADDR_MATCH;
223
224 addr1 = addr & ~(SIZE_4M - 1);
225 addr &= ~(SIZE_1M - 1);
226 d_data |= PAGE_SIZE_1MB;
227 if (addr1 >= base && (addr1 + SIZE_4M) <= eaddr) {
228 /*
229 * This works because
230 * (PAGE_SIZE_4MB & PAGE_SIZE_1MB) == PAGE_SIZE_1MB.
231 */
232 d_data |= PAGE_SIZE_4MB;
233 addr = addr1;
234 }
235
236 /* Pick entry to evict */
237 idx = evict_one_dcplb(cpu);
238
239 write_dcplb_data(cpu, idx, d_data, addr);
240
241 return CPLB_RELOADED;
242}
243
244MGR_ATTR static noinline int dcplb_protection_fault(int cpu)
245{
246 int status = bfin_read_DCPLB_STATUS();
247
248 nr_dcplb_prot[cpu]++;
249
250 if (likely(status & FAULT_RW)) {
251 int idx = faulting_cplb_index(status);
252 unsigned long regaddr = DCPLB_DATA0 + idx * 4;
253 unsigned long data = bfin_read32(regaddr);
254
255 /* Check if fault is to dirty a clean page */
256 if (!(data & CPLB_WT) && !(data & CPLB_DIRTY) &&
257 write_permitted(status, data)) {
258
259 dcplb_tbl[cpu][idx].data = data;
260 bfin_write32(regaddr, data);
261 return CPLB_RELOADED;
262 }
263 }
264
265 return CPLB_PROT_VIOL;
266}
267
268MGR_ATTR int cplb_hdr(int seqstat, struct pt_regs *regs)
269{
270 int cause = seqstat & 0x3f;
271 unsigned int cpu = smp_processor_id();
272 switch (cause) {
273 case 0x2C:
274 return icplb_miss(cpu);
275 case 0x26:
276 return dcplb_miss(cpu);
277 default:
278 if (unlikely(cause == 0x23))
279 return dcplb_protection_fault(cpu);
280
281 return CPLB_UNKNOWN_ERR;
282 }
283}
diff --git a/arch/blackfin/kernel/cplbinfo.c b/arch/blackfin/kernel/cplbinfo.c
new file mode 100644
index 000000000000..64d78300dd08
--- /dev/null
+++ b/arch/blackfin/kernel/cplbinfo.c
@@ -0,0 +1,177 @@
1/*
2 * arch/blackfin/kernel/cplbinfo.c - display CPLB status
3 *
4 * Copyright 2004-2008 Analog Devices Inc.
5 * Licensed under the GPL-2 or later.
6 */
7
8#include <linux/ctype.h>
9#include <linux/module.h>
10#include <linux/kernel.h>
11#include <linux/init.h>
12#include <linux/proc_fs.h>
13#include <linux/seq_file.h>
14#include <linux/uaccess.h>
15
16#include <asm/cplbinit.h>
17#include <asm/blackfin.h>
18
19static char const page_strtbl[][3] = { "1K", "4K", "1M", "4M" };
20#define page(flags) (((flags) & 0x30000) >> 16)
21#define strpage(flags) page_strtbl[page(flags)]
22
23struct cplbinfo_data {
24 loff_t pos;
25 char cplb_type;
26 u32 mem_control;
27 struct cplb_entry *tbl;
28 int switched;
29};
30
31static void cplbinfo_print_header(struct seq_file *m)
32{
33 seq_printf(m, "Index\tAddress\t\tData\tSize\tU/RD\tU/WR\tS/WR\tSwitch\n");
34}
35
36static int cplbinfo_nomore(struct cplbinfo_data *cdata)
37{
38 return cdata->pos >= MAX_CPLBS;
39}
40
41static int cplbinfo_show(struct seq_file *m, void *p)
42{
43 struct cplbinfo_data *cdata;
44 unsigned long data, addr;
45 loff_t pos;
46
47 cdata = p;
48 pos = cdata->pos;
49 addr = cdata->tbl[pos].addr;
50 data = cdata->tbl[pos].data;
51
52 seq_printf(m,
53 "%d\t0x%08lx\t%05lx\t%s\t%c\t%c\t%c\t%c\n",
54 (int)pos, addr, data, strpage(data),
55 (data & CPLB_USER_RD) ? 'Y' : 'N',
56 (data & CPLB_USER_WR) ? 'Y' : 'N',
57 (data & CPLB_SUPV_WR) ? 'Y' : 'N',
58 pos < cdata->switched ? 'N' : 'Y');
59
60 return 0;
61}
62
63static void cplbinfo_seq_init(struct cplbinfo_data *cdata, unsigned int cpu)
64{
65 if (cdata->cplb_type == 'I') {
66 cdata->mem_control = bfin_read_IMEM_CONTROL();
67 cdata->tbl = icplb_tbl[cpu];
68 cdata->switched = first_switched_icplb;
69 } else {
70 cdata->mem_control = bfin_read_DMEM_CONTROL();
71 cdata->tbl = dcplb_tbl[cpu];
72 cdata->switched = first_switched_dcplb;
73 }
74}
75
76static void *cplbinfo_start(struct seq_file *m, loff_t *pos)
77{
78 struct cplbinfo_data *cdata = m->private;
79
80 if (!*pos) {
81 seq_printf(m, "%cCPLBs are %sabled: 0x%x\n", cdata->cplb_type,
82 (cdata->mem_control & ENDCPLB ? "en" : "dis"),
83 cdata->mem_control);
84 cplbinfo_print_header(m);
85 } else if (cplbinfo_nomore(cdata))
86 return NULL;
87
88 get_cpu();
89 return cdata;
90}
91
92static void *cplbinfo_next(struct seq_file *m, void *p, loff_t *pos)
93{
94 struct cplbinfo_data *cdata = p;
95 cdata->pos = ++(*pos);
96 if (cplbinfo_nomore(cdata))
97 return NULL;
98 else
99 return cdata;
100}
101
102static void cplbinfo_stop(struct seq_file *m, void *p)
103{
104 put_cpu();
105}
106
107static const struct seq_operations cplbinfo_sops = {
108 .start = cplbinfo_start,
109 .next = cplbinfo_next,
110 .stop = cplbinfo_stop,
111 .show = cplbinfo_show,
112};
113
114static int cplbinfo_open(struct inode *inode, struct file *file)
115{
116 char buf[256], *path, *p;
117 unsigned int cpu;
118 char *s_cpu, *s_cplb;
119 int ret;
120 struct seq_file *m;
121 struct cplbinfo_data *cdata;
122
123 path = d_path(&file->f_path, buf, sizeof(buf));
124 if (IS_ERR(path))
125 return PTR_ERR(path);
126 s_cpu = strstr(path, "/cpu");
127 s_cplb = strrchr(path, '/');
128 if (!s_cpu || !s_cplb)
129 return -EINVAL;
130
131 cpu = simple_strtoul(s_cpu + 4, &p, 10);
132 if (!cpu_online(cpu))
133 return -ENODEV;
134
135 ret = seq_open_private(file, &cplbinfo_sops, sizeof(*cdata));
136 if (ret)
137 return ret;
138 m = file->private_data;
139 cdata = m->private;
140
141 cdata->pos = 0;
142 cdata->cplb_type = toupper(s_cplb[1]);
143 cplbinfo_seq_init(cdata, cpu);
144
145 return 0;
146}
147
148static const struct file_operations cplbinfo_fops = {
149 .open = cplbinfo_open,
150 .read = seq_read,
151 .llseek = seq_lseek,
152 .release = seq_release_private,
153};
154
155static int __init cplbinfo_init(void)
156{
157 struct proc_dir_entry *cplb_dir, *cpu_dir;
158 char buf[10];
159 unsigned int cpu;
160
161 cplb_dir = proc_mkdir("cplbinfo", NULL);
162 if (!cplb_dir)
163 return -ENOMEM;
164
165 for_each_possible_cpu(cpu) {
166 sprintf(buf, "cpu%i", cpu);
167 cpu_dir = proc_mkdir(buf, cplb_dir);
168 if (!cpu_dir)
169 return -ENOMEM;
170
171 proc_create("icplb", S_IRUGO, cpu_dir, &cplbinfo_fops);
172 proc_create("dcplb", S_IRUGO, cpu_dir, &cplbinfo_fops);
173 }
174
175 return 0;
176}
177late_initcall(cplbinfo_init);
diff --git a/arch/blackfin/kernel/early_printk.c b/arch/blackfin/kernel/early_printk.c
index 1f4e3d2e0901..c8ad051742e2 100644
--- a/arch/blackfin/kernel/early_printk.c
+++ b/arch/blackfin/kernel/early_printk.c
@@ -105,10 +105,10 @@ static struct console * __init earlyserial_init(char *buf)
105 cflag |= CS5; 105 cflag |= CS5;
106 break; 106 break;
107 case 6: 107 case 6:
108 cflag |= CS5; 108 cflag |= CS6;
109 break; 109 break;
110 case 7: 110 case 7:
111 cflag |= CS5; 111 cflag |= CS7;
112 break; 112 break;
113 default: 113 default:
114 cflag |= CS8; 114 cflag |= CS8;
diff --git a/arch/blackfin/kernel/entry.S b/arch/blackfin/kernel/entry.S
index faea88ebb2ef..a9cfba9946b5 100644
--- a/arch/blackfin/kernel/entry.S
+++ b/arch/blackfin/kernel/entry.S
@@ -30,6 +30,7 @@
30#include <linux/linkage.h> 30#include <linux/linkage.h>
31#include <asm/thread_info.h> 31#include <asm/thread_info.h>
32#include <asm/errno.h> 32#include <asm/errno.h>
33#include <asm/blackfin.h>
33#include <asm/asm-offsets.h> 34#include <asm/asm-offsets.h>
34 35
35#include <asm/context.S> 36#include <asm/context.S>
@@ -41,6 +42,10 @@
41#endif 42#endif
42 43
43ENTRY(_ret_from_fork) 44ENTRY(_ret_from_fork)
45#ifdef CONFIG_IPIPE
46 [--sp] = reti; /* IRQs on. */
47 SP += 4;
48#endif /* CONFIG_IPIPE */
44 SP += -12; 49 SP += -12;
45 call _schedule_tail; 50 call _schedule_tail;
46 SP += 12; 51 SP += 12;
diff --git a/arch/blackfin/kernel/fixed_code.S b/arch/blackfin/kernel/fixed_code.S
index 4b03ba025488..0d2d9e0968c8 100644
--- a/arch/blackfin/kernel/fixed_code.S
+++ b/arch/blackfin/kernel/fixed_code.S
@@ -8,10 +8,12 @@
8 * BF561 SMP). 8 * BF561 SMP).
9 */ 9 */
10#include <linux/linkage.h> 10#include <linux/linkage.h>
11#include <linux/init.h>
11#include <linux/unistd.h> 12#include <linux/unistd.h>
12#include <asm/entry.h> 13#include <asm/entry.h>
13 14
14.text 15__INIT
16
15ENTRY(_fixed_code_start) 17ENTRY(_fixed_code_start)
16 18
17.align 16 19.align 16
@@ -144,3 +146,5 @@ ENTRY(_safe_user_instruction)
144ENDPROC(_safe_user_instruction) 146ENDPROC(_safe_user_instruction)
145 147
146ENTRY(_fixed_code_end) 148ENTRY(_fixed_code_end)
149
150__FINIT
diff --git a/arch/blackfin/kernel/ipipe.c b/arch/blackfin/kernel/ipipe.c
new file mode 100644
index 000000000000..339be5a3ae6a
--- /dev/null
+++ b/arch/blackfin/kernel/ipipe.c
@@ -0,0 +1,428 @@
1/* -*- linux-c -*-
2 * linux/arch/blackfin/kernel/ipipe.c
3 *
4 * Copyright (C) 2005-2007 Philippe Gerum.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
9 * USA; either version 2 of the License, or (at your option) any later
10 * version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 *
21 * Architecture-dependent I-pipe support for the Blackfin.
22 */
23
24#include <linux/kernel.h>
25#include <linux/sched.h>
26#include <linux/module.h>
27#include <linux/interrupt.h>
28#include <linux/percpu.h>
29#include <linux/bitops.h>
30#include <linux/slab.h>
31#include <linux/errno.h>
32#include <linux/kthread.h>
33#include <asm/unistd.h>
34#include <asm/system.h>
35#include <asm/atomic.h>
36#include <asm/io.h>
37
38static int create_irq_threads;
39
40DEFINE_PER_CPU(struct pt_regs, __ipipe_tick_regs);
41
42static DEFINE_PER_CPU(unsigned long, pending_irqthread_mask);
43
44static DEFINE_PER_CPU(int [IVG13 + 1], pending_irq_count);
45
46asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs);
47
48static void __ipipe_no_irqtail(void);
49
50unsigned long __ipipe_irq_tail_hook = (unsigned long)&__ipipe_no_irqtail;
51EXPORT_SYMBOL(__ipipe_irq_tail_hook);
52
53unsigned long __ipipe_core_clock;
54EXPORT_SYMBOL(__ipipe_core_clock);
55
56unsigned long __ipipe_freq_scale;
57EXPORT_SYMBOL(__ipipe_freq_scale);
58
59atomic_t __ipipe_irq_lvdepth[IVG15 + 1];
60
61unsigned long __ipipe_irq_lvmask = __all_masked_irq_flags;
62EXPORT_SYMBOL(__ipipe_irq_lvmask);
63
64static void __ipipe_ack_irq(unsigned irq, struct irq_desc *desc)
65{
66 desc->ipipe_ack(irq, desc);
67}
68
69/*
70 * __ipipe_enable_pipeline() -- We are running on the boot CPU, hw
71 * interrupts are off, and secondary CPUs are still lost in space.
72 */
73void __ipipe_enable_pipeline(void)
74{
75 unsigned irq;
76
77 __ipipe_core_clock = get_cclk(); /* Fetch this once. */
78 __ipipe_freq_scale = 1000000000UL / __ipipe_core_clock;
79
80 for (irq = 0; irq < NR_IRQS; ++irq)
81 ipipe_virtualize_irq(ipipe_root_domain,
82 irq,
83 (ipipe_irq_handler_t)&asm_do_IRQ,
84 NULL,
85 &__ipipe_ack_irq,
86 IPIPE_HANDLE_MASK | IPIPE_PASS_MASK);
87}
88
89/*
90 * __ipipe_handle_irq() -- IPIPE's generic IRQ handler. An optimistic
91 * interrupt protection log is maintained here for each domain. Hw
92 * interrupts are masked on entry.
93 */
94void __ipipe_handle_irq(unsigned irq, struct pt_regs *regs)
95{
96 struct ipipe_domain *this_domain, *next_domain;
97 struct list_head *head, *pos;
98 int m_ack, s = -1;
99
100 /*
101 * Software-triggered IRQs do not need any ack. The contents
102 * of the register frame should only be used when processing
103 * the timer interrupt, but not for handling any other
104 * interrupt.
105 */
106 m_ack = (regs == NULL || irq == IRQ_SYSTMR || irq == IRQ_CORETMR);
107
108 this_domain = ipipe_current_domain;
109
110 if (unlikely(test_bit(IPIPE_STICKY_FLAG, &this_domain->irqs[irq].control)))
111 head = &this_domain->p_link;
112 else {
113 head = __ipipe_pipeline.next;
114 next_domain = list_entry(head, struct ipipe_domain, p_link);
115 if (likely(test_bit(IPIPE_WIRED_FLAG, &next_domain->irqs[irq].control))) {
116 if (!m_ack && next_domain->irqs[irq].acknowledge != NULL)
117 next_domain->irqs[irq].acknowledge(irq, irq_desc + irq);
118 if (test_bit(IPIPE_ROOTLOCK_FLAG, &ipipe_root_domain->flags))
119 s = __test_and_set_bit(IPIPE_STALL_FLAG,
120 &ipipe_root_cpudom_var(status));
121 __ipipe_dispatch_wired(next_domain, irq);
122 goto finalize;
123 return;
124 }
125 }
126
127 /* Ack the interrupt. */
128
129 pos = head;
130
131 while (pos != &__ipipe_pipeline) {
132 next_domain = list_entry(pos, struct ipipe_domain, p_link);
133 /*
134 * For each domain handling the incoming IRQ, mark it
135 * as pending in its log.
136 */
137 if (test_bit(IPIPE_HANDLE_FLAG, &next_domain->irqs[irq].control)) {
138 /*
139 * Domains that handle this IRQ are polled for
140 * acknowledging it by decreasing priority
141 * order. The interrupt must be made pending
142 * _first_ in the domain's status flags before
143 * the PIC is unlocked.
144 */
145 __ipipe_set_irq_pending(next_domain, irq);
146
147 if (!m_ack && next_domain->irqs[irq].acknowledge != NULL) {
148 next_domain->irqs[irq].acknowledge(irq, irq_desc + irq);
149 m_ack = 1;
150 }
151 }
152
153 /*
154 * If the domain does not want the IRQ to be passed
155 * down the interrupt pipe, exit the loop now.
156 */
157 if (!test_bit(IPIPE_PASS_FLAG, &next_domain->irqs[irq].control))
158 break;
159
160 pos = next_domain->p_link.next;
161 }
162
163 /*
164 * Now walk the pipeline, yielding control to the highest
165 * priority domain that has pending interrupt(s) or
166 * immediately to the current domain if the interrupt has been
167 * marked as 'sticky'. This search does not go beyond the
168 * current domain in the pipeline. We also enforce the
169 * additional root stage lock (blackfin-specific). */
170
171 if (test_bit(IPIPE_ROOTLOCK_FLAG, &ipipe_root_domain->flags))
172 s = __test_and_set_bit(IPIPE_STALL_FLAG,
173 &ipipe_root_cpudom_var(status));
174finalize:
175
176 __ipipe_walk_pipeline(head);
177
178 if (!s)
179 __clear_bit(IPIPE_STALL_FLAG,
180 &ipipe_root_cpudom_var(status));
181}
182
183int __ipipe_check_root(void)
184{
185 return ipipe_root_domain_p;
186}
187
188void __ipipe_enable_irqdesc(struct ipipe_domain *ipd, unsigned irq)
189{
190 struct irq_desc *desc = irq_desc + irq;
191 int prio = desc->ic_prio;
192
193 desc->depth = 0;
194 if (ipd != &ipipe_root &&
195 atomic_inc_return(&__ipipe_irq_lvdepth[prio]) == 1)
196 __set_bit(prio, &__ipipe_irq_lvmask);
197}
198EXPORT_SYMBOL(__ipipe_enable_irqdesc);
199
200void __ipipe_disable_irqdesc(struct ipipe_domain *ipd, unsigned irq)
201{
202 struct irq_desc *desc = irq_desc + irq;
203 int prio = desc->ic_prio;
204
205 if (ipd != &ipipe_root &&
206 atomic_dec_and_test(&__ipipe_irq_lvdepth[prio]))
207 __clear_bit(prio, &__ipipe_irq_lvmask);
208}
209EXPORT_SYMBOL(__ipipe_disable_irqdesc);
210
211void __ipipe_stall_root_raw(void)
212{
213 /*
214 * This code is called by the ins{bwl} routines (see
215 * arch/blackfin/lib/ins.S), which are heavily used by the
216 * network stack. It masks all interrupts but those handled by
217 * non-root domains, so that we keep decent network transfer
218 * rates for Linux without inducing pathological jitter for
219 * the real-time domain.
220 */
221 __asm__ __volatile__ ("sti %0;" : : "d"(__ipipe_irq_lvmask));
222
223 __set_bit(IPIPE_STALL_FLAG,
224 &ipipe_root_cpudom_var(status));
225}
226
227void __ipipe_unstall_root_raw(void)
228{
229 __clear_bit(IPIPE_STALL_FLAG,
230 &ipipe_root_cpudom_var(status));
231
232 __asm__ __volatile__ ("sti %0;" : : "d"(bfin_irq_flags));
233}
234
235int __ipipe_syscall_root(struct pt_regs *regs)
236{
237 unsigned long flags;
238
239 /* We need to run the IRQ tail hook whenever we don't
240 * propagate a syscall to higher domains, because we know that
241 * important operations might be pending there (e.g. Xenomai
242 * deferred rescheduling). */
243
244 if (!__ipipe_syscall_watched_p(current, regs->orig_p0)) {
245 void (*hook)(void) = (void (*)(void))__ipipe_irq_tail_hook;
246 hook();
247 return 0;
248 }
249
250 /*
251 * This routine either returns:
252 * 0 -- if the syscall is to be passed to Linux;
253 * 1 -- if the syscall should not be passed to Linux, and no
254 * tail work should be performed;
255 * -1 -- if the syscall should not be passed to Linux but the
256 * tail work has to be performed (for handling signals etc).
257 */
258
259 if (__ipipe_event_monitored_p(IPIPE_EVENT_SYSCALL) &&
260 __ipipe_dispatch_event(IPIPE_EVENT_SYSCALL, regs) > 0) {
261 if (ipipe_root_domain_p && !in_atomic()) {
262 /*
263 * Sync pending VIRQs before _TIF_NEED_RESCHED
264 * is tested.
265 */
266 local_irq_save_hw(flags);
267 if ((ipipe_root_cpudom_var(irqpend_himask) & IPIPE_IRQMASK_VIRT) != 0)
268 __ipipe_sync_pipeline(IPIPE_IRQMASK_VIRT);
269 local_irq_restore_hw(flags);
270 return -1;
271 }
272 return 1;
273 }
274
275 return 0;
276}
277
278unsigned long ipipe_critical_enter(void (*syncfn) (void))
279{
280 unsigned long flags;
281
282 local_irq_save_hw(flags);
283
284 return flags;
285}
286
287void ipipe_critical_exit(unsigned long flags)
288{
289 local_irq_restore_hw(flags);
290}
291
292static void __ipipe_no_irqtail(void)
293{
294}
295
296int ipipe_get_sysinfo(struct ipipe_sysinfo *info)
297{
298 info->ncpus = num_online_cpus();
299 info->cpufreq = ipipe_cpu_freq();
300 info->archdep.tmirq = IPIPE_TIMER_IRQ;
301 info->archdep.tmfreq = info->cpufreq;
302
303 return 0;
304}
305
306/*
307 * ipipe_trigger_irq() -- Push the interrupt at front of the pipeline
308 * just like if it has been actually received from a hw source. Also
309 * works for virtual interrupts.
310 */
311int ipipe_trigger_irq(unsigned irq)
312{
313 unsigned long flags;
314
315 if (irq >= IPIPE_NR_IRQS ||
316 (ipipe_virtual_irq_p(irq)
317 && !test_bit(irq - IPIPE_VIRQ_BASE, &__ipipe_virtual_irq_map)))
318 return -EINVAL;
319
320 local_irq_save_hw(flags);
321
322 __ipipe_handle_irq(irq, NULL);
323
324 local_irq_restore_hw(flags);
325
326 return 1;
327}
328
329/* Move Linux IRQ to threads. */
330
331static int do_irqd(void *__desc)
332{
333 struct irq_desc *desc = __desc;
334 unsigned irq = desc - irq_desc;
335 int thrprio = desc->thr_prio;
336 int thrmask = 1 << thrprio;
337 int cpu = smp_processor_id();
338 cpumask_t cpumask;
339
340 sigfillset(&current->blocked);
341 current->flags |= PF_NOFREEZE;
342 cpumask = cpumask_of_cpu(cpu);
343 set_cpus_allowed(current, cpumask);
344 ipipe_setscheduler_root(current, SCHED_FIFO, 50 + thrprio);
345
346 while (!kthread_should_stop()) {
347 local_irq_disable();
348 if (!(desc->status & IRQ_SCHEDULED)) {
349 set_current_state(TASK_INTERRUPTIBLE);
350resched:
351 local_irq_enable();
352 schedule();
353 local_irq_disable();
354 }
355 __set_current_state(TASK_RUNNING);
356 /*
357 * If higher priority interrupt servers are ready to
358 * run, reschedule immediately. We need this for the
359 * GPIO demux IRQ handler to unmask the interrupt line
360 * _last_, after all GPIO IRQs have run.
361 */
362 if (per_cpu(pending_irqthread_mask, cpu) & ~(thrmask|(thrmask-1)))
363 goto resched;
364 if (--per_cpu(pending_irq_count[thrprio], cpu) == 0)
365 per_cpu(pending_irqthread_mask, cpu) &= ~thrmask;
366 desc->status &= ~IRQ_SCHEDULED;
367 desc->thr_handler(irq, &__raw_get_cpu_var(__ipipe_tick_regs));
368 local_irq_enable();
369 }
370 __set_current_state(TASK_RUNNING);
371 return 0;
372}
373
374static void kick_irqd(unsigned irq, void *cookie)
375{
376 struct irq_desc *desc = irq_desc + irq;
377 int thrprio = desc->thr_prio;
378 int thrmask = 1 << thrprio;
379 int cpu = smp_processor_id();
380
381 if (!(desc->status & IRQ_SCHEDULED)) {
382 desc->status |= IRQ_SCHEDULED;
383 per_cpu(pending_irqthread_mask, cpu) |= thrmask;
384 ++per_cpu(pending_irq_count[thrprio], cpu);
385 wake_up_process(desc->thread);
386 }
387}
388
389int ipipe_start_irq_thread(unsigned irq, struct irq_desc *desc)
390{
391 if (desc->thread || !create_irq_threads)
392 return 0;
393
394 desc->thread = kthread_create(do_irqd, desc, "IRQ %d", irq);
395 if (desc->thread == NULL) {
396 printk(KERN_ERR "irqd: could not create IRQ thread %d!\n", irq);
397 return -ENOMEM;
398 }
399
400 wake_up_process(desc->thread);
401
402 desc->thr_handler = ipipe_root_domain->irqs[irq].handler;
403 ipipe_root_domain->irqs[irq].handler = &kick_irqd;
404
405 return 0;
406}
407
408void __init ipipe_init_irq_threads(void)
409{
410 unsigned irq;
411 struct irq_desc *desc;
412
413 create_irq_threads = 1;
414
415 for (irq = 0; irq < NR_IRQS; irq++) {
416 desc = irq_desc + irq;
417 if (desc->action != NULL ||
418 (desc->status & IRQ_NOREQUEST) != 0)
419 ipipe_start_irq_thread(irq, desc);
420 }
421}
422
423EXPORT_SYMBOL(show_stack);
424
425#ifdef CONFIG_IPIPE_TRACE_MCOUNT
426void notrace _mcount(void);
427EXPORT_SYMBOL(_mcount);
428#endif /* CONFIG_IPIPE_TRACE_MCOUNT */
diff --git a/arch/blackfin/kernel/irqchip.c b/arch/blackfin/kernel/irqchip.c
index 07402f57c9de..ab8209cbbad0 100644
--- a/arch/blackfin/kernel/irqchip.c
+++ b/arch/blackfin/kernel/irqchip.c
@@ -36,7 +36,7 @@
36#include <linux/irq.h> 36#include <linux/irq.h>
37#include <asm/trace.h> 37#include <asm/trace.h>
38 38
39static unsigned long irq_err_count; 39static atomic_t irq_err_count;
40static spinlock_t irq_controller_lock; 40static spinlock_t irq_controller_lock;
41 41
42/* 42/*
@@ -48,10 +48,9 @@ void dummy_mask_unmask_irq(unsigned int irq)
48 48
49void ack_bad_irq(unsigned int irq) 49void ack_bad_irq(unsigned int irq)
50{ 50{
51 irq_err_count += 1; 51 atomic_inc(&irq_err_count);
52 printk(KERN_ERR "IRQ: spurious interrupt %d\n", irq); 52 printk(KERN_ERR "IRQ: spurious interrupt %d\n", irq);
53} 53}
54EXPORT_SYMBOL(ack_bad_irq);
55 54
56static struct irq_chip bad_chip = { 55static struct irq_chip bad_chip = {
57 .ack = dummy_mask_unmask_irq, 56 .ack = dummy_mask_unmask_irq,
@@ -72,7 +71,7 @@ static struct irq_desc bad_irq_desc = {
72 71
73int show_interrupts(struct seq_file *p, void *v) 72int show_interrupts(struct seq_file *p, void *v)
74{ 73{
75 int i = *(loff_t *) v; 74 int i = *(loff_t *) v, j;
76 struct irqaction *action; 75 struct irqaction *action;
77 unsigned long flags; 76 unsigned long flags;
78 77
@@ -80,19 +79,20 @@ int show_interrupts(struct seq_file *p, void *v)
80 spin_lock_irqsave(&irq_desc[i].lock, flags); 79 spin_lock_irqsave(&irq_desc[i].lock, flags);
81 action = irq_desc[i].action; 80 action = irq_desc[i].action;
82 if (!action) 81 if (!action)
83 goto unlock; 82 goto skip;
84 83 seq_printf(p, "%3d: ", i);
85 seq_printf(p, "%3d: %10u ", i, kstat_irqs(i)); 84 for_each_online_cpu(j)
85 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
86 seq_printf(p, " %8s", irq_desc[i].chip->name);
86 seq_printf(p, " %s", action->name); 87 seq_printf(p, " %s", action->name);
87 for (action = action->next; action; action = action->next) 88 for (action = action->next; action; action = action->next)
88 seq_printf(p, ", %s", action->name); 89 seq_printf(p, " %s", action->name);
89 90
90 seq_putc(p, '\n'); 91 seq_putc(p, '\n');
91 unlock: 92 skip:
92 spin_unlock_irqrestore(&irq_desc[i].lock, flags); 93 spin_unlock_irqrestore(&irq_desc[i].lock, flags);
93 } else if (i == NR_IRQS) { 94 } else if (i == NR_IRQS)
94 seq_printf(p, "Err: %10lu\n", irq_err_count); 95 seq_printf(p, "Err: %10u\n", atomic_read(&irq_err_count));
95 }
96 return 0; 96 return 0;
97} 97}
98 98
@@ -101,7 +101,6 @@ int show_interrupts(struct seq_file *p, void *v)
101 * come via this function. Instead, they should provide their 101 * come via this function. Instead, they should provide their
102 * own 'handler' 102 * own 'handler'
103 */ 103 */
104
105#ifdef CONFIG_DO_IRQ_L1 104#ifdef CONFIG_DO_IRQ_L1
106__attribute__((l1_text)) 105__attribute__((l1_text))
107#endif 106#endif
@@ -109,8 +108,9 @@ asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
109{ 108{
110 struct pt_regs *old_regs; 109 struct pt_regs *old_regs;
111 struct irq_desc *desc = irq_desc + irq; 110 struct irq_desc *desc = irq_desc + irq;
111#ifndef CONFIG_IPIPE
112 unsigned short pending, other_ints; 112 unsigned short pending, other_ints;
113 113#endif
114 old_regs = set_irq_regs(regs); 114 old_regs = set_irq_regs(regs);
115 115
116 /* 116 /*
@@ -121,9 +121,24 @@ asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
121 desc = &bad_irq_desc; 121 desc = &bad_irq_desc;
122 122
123 irq_enter(); 123 irq_enter();
124 124#ifdef CONFIG_DEBUG_STACKOVERFLOW
125 /* Debugging check for stack overflow: is there less than STACK_WARN free? */
126 {
127 long sp;
128
129 sp = __get_SP() & (THREAD_SIZE-1);
130
131 if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) {
132 dump_stack();
133 printk(KERN_EMERG "%s: possible stack overflow while handling irq %i "
134 " only %ld bytes free\n",
135 __func__, irq, sp - sizeof(struct thread_info));
136 }
137 }
138#endif
125 generic_handle_irq(irq); 139 generic_handle_irq(irq);
126 140
141#ifndef CONFIG_IPIPE /* Useless and bugous over the I-pipe: IRQs are threaded. */
127 /* If we're the only interrupt running (ignoring IRQ15 which is for 142 /* If we're the only interrupt running (ignoring IRQ15 which is for
128 syscalls), lower our priority to IRQ14 so that softirqs run at 143 syscalls), lower our priority to IRQ14 so that softirqs run at
129 that level. If there's another, lower-level interrupt, irq_exit 144 that level. If there's another, lower-level interrupt, irq_exit
@@ -133,6 +148,7 @@ asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
133 other_ints = pending & (pending - 1); 148 other_ints = pending & (pending - 1);
134 if (other_ints == 0) 149 if (other_ints == 0)
135 lower_to_irq14(); 150 lower_to_irq14();
151#endif /* !CONFIG_IPIPE */
136 irq_exit(); 152 irq_exit();
137 153
138 set_irq_regs(old_regs); 154 set_irq_regs(old_regs);
diff --git a/arch/blackfin/kernel/kgdb.c b/arch/blackfin/kernel/kgdb.c
index 1c5afaeb9504..b163f6d3330d 100644
--- a/arch/blackfin/kernel/kgdb.c
+++ b/arch/blackfin/kernel/kgdb.c
@@ -34,9 +34,14 @@ int gdb_bfin_vector = -1;
34#error change the definition of slavecpulocks 34#error change the definition of slavecpulocks
35#endif 35#endif
36 36
37#ifdef CONFIG_BFIN_WDT 37#define IN_MEM(addr, size, l1_addr, l1_size) \
38# error "Please unselect blackfin watchdog driver before build KGDB." 38({ \
39#endif 39 unsigned long __addr = (unsigned long)(addr); \
40 (l1_size && __addr >= l1_addr && __addr + (size) <= l1_addr + l1_size); \
41})
42#define ASYNC_BANK_SIZE \
43 (ASYNC_BANK0_SIZE + ASYNC_BANK1_SIZE + \
44 ASYNC_BANK2_SIZE + ASYNC_BANK3_SIZE)
40 45
41void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs) 46void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs)
42{ 47{
@@ -219,6 +224,7 @@ int bfin_set_hw_break(unsigned long addr, int len, enum kgdb_bptype type)
219 if (bfin_type == breakinfo[breakno].type 224 if (bfin_type == breakinfo[breakno].type
220 && !breakinfo[breakno].occupied) { 225 && !breakinfo[breakno].occupied) {
221 breakinfo[breakno].occupied = 1; 226 breakinfo[breakno].occupied = 1;
227 breakinfo[breakno].skip = 0;
222 breakinfo[breakno].enabled = 1; 228 breakinfo[breakno].enabled = 1;
223 breakinfo[breakno].addr = addr; 229 breakinfo[breakno].addr = addr;
224 breakinfo[breakno].dataacc = dataacc; 230 breakinfo[breakno].dataacc = dataacc;
@@ -363,12 +369,12 @@ void kgdb_passive_cpu_callback(void *info)
363 369
364void kgdb_roundup_cpus(unsigned long flags) 370void kgdb_roundup_cpus(unsigned long flags)
365{ 371{
366 smp_call_function(kgdb_passive_cpu_callback, NULL, 0, 0); 372 smp_call_function(kgdb_passive_cpu_callback, NULL, 0);
367} 373}
368 374
369void kgdb_roundup_cpu(int cpu, unsigned long flags) 375void kgdb_roundup_cpu(int cpu, unsigned long flags)
370{ 376{
371 smp_call_function_single(cpu, kgdb_passive_cpu_callback, NULL, 0, 0); 377 smp_call_function_single(cpu, kgdb_passive_cpu_callback, NULL, 0);
372} 378}
373#endif 379#endif
374 380
@@ -385,10 +391,8 @@ int kgdb_arch_handle_exception(int vector, int signo,
385 struct pt_regs *regs) 391 struct pt_regs *regs)
386{ 392{
387 long addr; 393 long addr;
388 long breakno;
389 char *ptr; 394 char *ptr;
390 int newPC; 395 int newPC;
391 int wp_status;
392 int i; 396 int i;
393 397
394 switch (remcom_in_buffer[0]) { 398 switch (remcom_in_buffer[0]) {
@@ -426,17 +430,6 @@ int kgdb_arch_handle_exception(int vector, int signo,
426 kgdb_single_step = i + 1; 430 kgdb_single_step = i + 1;
427 } 431 }
428 432
429 if (vector == VEC_WATCH) {
430 wp_status = bfin_read_WPSTAT();
431 for (breakno = 0; breakno < HW_WATCHPOINT_NUM; breakno++) {
432 if (wp_status & (1 << breakno)) {
433 breakinfo->skip = 1;
434 break;
435 }
436 }
437 bfin_write_WPSTAT(0);
438 }
439
440 bfin_correct_hw_break(); 433 bfin_correct_hw_break();
441 434
442 return 0; 435 return 0;
@@ -478,57 +471,32 @@ static int validate_memory_access_address(unsigned long addr, int size)
478 return 0; 471 return 0;
479 if (addr >= SYSMMR_BASE) 472 if (addr >= SYSMMR_BASE)
480 return 0; 473 return 0;
481 if (addr >= ASYNC_BANK0_BASE 474 if (IN_MEM(addr, size, ASYNC_BANK0_BASE, ASYNC_BANK_SIZE))
482 && addr + size <= ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE)
483 return 0; 475 return 0;
484 if (cpu == 0) { 476 if (cpu == 0) {
485 if (addr >= L1_SCRATCH_START 477 if (IN_MEM(addr, size, L1_SCRATCH_START, L1_SCRATCH_LENGTH))
486 && (addr + size <= L1_SCRATCH_START + L1_SCRATCH_LENGTH))
487 return 0; 478 return 0;
488#if L1_CODE_LENGTH != 0 479 if (IN_MEM(addr, size, L1_CODE_START, L1_CODE_LENGTH))
489 if (addr >= L1_CODE_START
490 && (addr + size <= L1_CODE_START + L1_CODE_LENGTH))
491 return 0; 480 return 0;
492#endif 481 if (IN_MEM(addr, size, L1_DATA_A_START, L1_DATA_A_LENGTH))
493#if L1_DATA_A_LENGTH != 0
494 if (addr >= L1_DATA_A_START
495 && (addr + size <= L1_DATA_A_START + L1_DATA_A_LENGTH))
496 return 0; 482 return 0;
497#endif 483 if (IN_MEM(addr, size, L1_DATA_B_START, L1_DATA_B_LENGTH))
498#if L1_DATA_B_LENGTH != 0
499 if (addr >= L1_DATA_B_START
500 && (addr + size <= L1_DATA_B_START + L1_DATA_B_LENGTH))
501 return 0; 484 return 0;
502#endif
503#ifdef CONFIG_SMP 485#ifdef CONFIG_SMP
504 } else if (cpu == 1) { 486 } else if (cpu == 1) {
505 if (addr >= COREB_L1_SCRATCH_START 487 if (IN_MEM(addr, size, COREB_L1_SCRATCH_START, L1_SCRATCH_LENGTH))
506 && (addr + size <= COREB_L1_SCRATCH_START
507 + L1_SCRATCH_LENGTH))
508 return 0; 488 return 0;
509# if L1_CODE_LENGTH != 0 489 if (IN_MEM(addr, size, COREB_L1_CODE_START, L1_CODE_LENGTH))
510 if (addr >= COREB_L1_CODE_START
511 && (addr + size <= COREB_L1_CODE_START + L1_CODE_LENGTH))
512 return 0; 490 return 0;
513# endif 491 if (IN_MEM(addr, size, COREB_L1_DATA_A_START, L1_DATA_A_LENGTH))
514# if L1_DATA_A_LENGTH != 0
515 if (addr >= COREB_L1_DATA_A_START
516 && (addr + size <= COREB_L1_DATA_A_START + L1_DATA_A_LENGTH))
517 return 0; 492 return 0;
518# endif 493 if (IN_MEM(addr, size, COREB_L1_DATA_B_START, L1_DATA_B_LENGTH))
519# if L1_DATA_B_LENGTH != 0
520 if (addr >= COREB_L1_DATA_B_START
521 && (addr + size <= COREB_L1_DATA_B_START + L1_DATA_B_LENGTH))
522 return 0; 494 return 0;
523# endif
524#endif 495#endif
525 } 496 }
526 497
527#if L2_LENGTH != 0 498 if (IN_MEM(addr, size, L2_START, L2_LENGTH))
528 if (addr >= L2_START
529 && addr + size <= L2_START + L2_LENGTH)
530 return 0; 499 return 0;
531#endif
532 500
533 return EFAULT; 501 return EFAULT;
534} 502}
@@ -582,12 +550,9 @@ int kgdb_mem2hex(char *mem, char *buf, int count)
582 default: 550 default:
583 err = EFAULT; 551 err = EFAULT;
584 } 552 }
585 } else if (cpu == 0 && (unsigned int)mem >= L1_CODE_START && 553 } else if ((cpu == 0 && IN_MEM(mem, count, L1_CODE_START, L1_CODE_LENGTH))
586 (unsigned int)(mem + count) <= L1_CODE_START + L1_CODE_LENGTH
587#ifdef CONFIG_SMP 554#ifdef CONFIG_SMP
588 || cpu == 1 && (unsigned int)mem >= COREB_L1_CODE_START && 555 || (cpu == 1 && IN_MEM(mem, count, COREB_L1_CODE_START, L1_CODE_LENGTH))
589 (unsigned int)(mem + count) <=
590 COREB_L1_CODE_START + L1_CODE_LENGTH
591#endif 556#endif
592 ) { 557 ) {
593 /* access L1 instruction SRAM*/ 558 /* access L1 instruction SRAM*/
@@ -658,12 +623,9 @@ int kgdb_ebin2mem(char *buf, char *mem, int count)
658 default: 623 default:
659 return EFAULT; 624 return EFAULT;
660 } 625 }
661 } else if (cpu == 0 && (unsigned int)mem >= L1_CODE_START && 626 } else if ((cpu == 0 && IN_MEM(mem, count, L1_CODE_START, L1_CODE_LENGTH))
662 (unsigned int)(mem + count) < L1_CODE_START + L1_CODE_LENGTH
663#ifdef CONFIG_SMP 627#ifdef CONFIG_SMP
664 || cpu == 1 && (unsigned int)mem >= COREB_L1_CODE_START && 628 || (cpu == 1 && IN_MEM(mem, count, COREB_L1_CODE_START, L1_CODE_LENGTH))
665 (unsigned int)(mem + count) <=
666 COREB_L1_CODE_START + L1_CODE_LENGTH
667#endif 629#endif
668 ) { 630 ) {
669 /* access L1 instruction SRAM */ 631 /* access L1 instruction SRAM */
@@ -723,12 +685,9 @@ int kgdb_hex2mem(char *buf, char *mem, int count)
723 default: 685 default:
724 return EFAULT; 686 return EFAULT;
725 } 687 }
726 } else if (cpu == 0 && (unsigned int)mem >= L1_CODE_START && 688 } else if ((cpu == 0 && IN_MEM(mem, count, L1_CODE_START, L1_CODE_LENGTH))
727 (unsigned int)(mem + count) <= L1_CODE_START + L1_CODE_LENGTH
728#ifdef CONFIG_SMP 689#ifdef CONFIG_SMP
729 || cpu == 1 && (unsigned int)mem >= COREB_L1_CODE_START && 690 || (cpu == 1 && IN_MEM(mem, count, COREB_L1_CODE_START, L1_CODE_LENGTH))
730 (unsigned int)(mem + count) <=
731 COREB_L1_CODE_START + L1_CODE_LENGTH
732#endif 691#endif
733 ) { 692 ) {
734 /* access L1 instruction SRAM */ 693 /* access L1 instruction SRAM */
@@ -745,24 +704,16 @@ int kgdb_validate_break_address(unsigned long addr)
745 704
746 if (addr >= 0x1000 && (addr + BREAK_INSTR_SIZE) <= physical_mem_end) 705 if (addr >= 0x1000 && (addr + BREAK_INSTR_SIZE) <= physical_mem_end)
747 return 0; 706 return 0;
748 if (addr >= ASYNC_BANK0_BASE 707 if (IN_MEM(addr, BREAK_INSTR_SIZE, ASYNC_BANK0_BASE, ASYNC_BANK_SIZE))
749 && addr + BREAK_INSTR_SIZE <= ASYNC_BANK3_BASE + ASYNC_BANK3_BASE)
750 return 0; 708 return 0;
751#if L1_CODE_LENGTH != 0 709 if (cpu == 0 && IN_MEM(addr, BREAK_INSTR_SIZE, L1_CODE_START, L1_CODE_LENGTH))
752 if (cpu == 0 && addr >= L1_CODE_START
753 && addr + BREAK_INSTR_SIZE <= L1_CODE_START + L1_CODE_LENGTH)
754 return 0; 710 return 0;
755# ifdef CONFIG_SMP 711#ifdef CONFIG_SMP
756 else if (cpu == 1 && addr >= COREB_L1_CODE_START 712 else if (cpu == 1 && IN_MEM(addr, BREAK_INSTR_SIZE, COREB_L1_CODE_START, L1_CODE_LENGTH))
757 && addr + BREAK_INSTR_SIZE <= COREB_L1_CODE_START + L1_CODE_LENGTH)
758 return 0; 713 return 0;
759# endif
760#endif 714#endif
761#if L2_LENGTH != 0 715 if (IN_MEM(addr, BREAK_INSTR_SIZE, L2_START, L2_LENGTH))
762 if (addr >= L2_START
763 && addr + BREAK_INSTR_SIZE <= L2_START + L2_LENGTH)
764 return 0; 716 return 0;
765#endif
766 717
767 return EFAULT; 718 return EFAULT;
768} 719}
@@ -772,13 +723,9 @@ int kgdb_arch_set_breakpoint(unsigned long addr, char *saved_instr)
772 int err; 723 int err;
773 int cpu = raw_smp_processor_id(); 724 int cpu = raw_smp_processor_id();
774 725
775 if ((cpu == 0 && (unsigned int)addr >= L1_CODE_START 726 if ((cpu == 0 && IN_MEM(addr, BREAK_INSTR_SIZE, L1_CODE_START, L1_CODE_LENGTH))
776 && (unsigned int)(addr + BREAK_INSTR_SIZE)
777 < L1_CODE_START + L1_CODE_LENGTH)
778#ifdef CONFIG_SMP 727#ifdef CONFIG_SMP
779 || (cpu == 1 && (unsigned int)addr >= COREB_L1_CODE_START 728 || (cpu == 1 && IN_MEM(addr, BREAK_INSTR_SIZE, COREB_L1_CODE_START, L1_CODE_LENGTH))
780 && (unsigned int)(addr + BREAK_INSTR_SIZE)
781 < COREB_L1_CODE_START + L1_CODE_LENGTH)
782#endif 729#endif
783 ) { 730 ) {
784 /* access L1 instruction SRAM */ 731 /* access L1 instruction SRAM */
@@ -804,9 +751,7 @@ int kgdb_arch_set_breakpoint(unsigned long addr, char *saved_instr)
804 751
805int kgdb_arch_remove_breakpoint(unsigned long addr, char *bundle) 752int kgdb_arch_remove_breakpoint(unsigned long addr, char *bundle)
806{ 753{
807 if ((unsigned int)addr >= L1_CODE_START && 754 if (IN_MEM(addr, BREAK_INSTR_SIZE, L1_CODE_START, L1_CODE_LENGTH)) {
808 (unsigned int)(addr + BREAK_INSTR_SIZE) <
809 L1_CODE_START + L1_CODE_LENGTH) {
810 /* access L1 instruction SRAM */ 755 /* access L1 instruction SRAM */
811 if (dma_memcpy((void *)addr, bundle, BREAK_INSTR_SIZE) == NULL) 756 if (dma_memcpy((void *)addr, bundle, BREAK_INSTR_SIZE) == NULL)
812 return -EFAULT; 757 return -EFAULT;
diff --git a/arch/blackfin/kernel/kgdb_test.c b/arch/blackfin/kernel/kgdb_test.c
new file mode 100644
index 000000000000..3dba9c17304a
--- /dev/null
+++ b/arch/blackfin/kernel/kgdb_test.c
@@ -0,0 +1,123 @@
1/*
2 * arch/blackfin/kernel/kgdb_test.c - Blackfin kgdb tests
3 *
4 * Copyright 2005-2008 Analog Devices Inc.
5 *
6 * Licensed under the GPL-2 or later.
7 */
8
9#include <linux/module.h>
10#include <linux/kernel.h>
11#include <linux/init.h>
12#include <linux/proc_fs.h>
13
14#include <asm/current.h>
15#include <asm/uaccess.h>
16#include <asm/system.h>
17
18#include <asm/blackfin.h>
19
20static char cmdline[256];
21static unsigned long len;
22
23static int num1 __attribute__((l1_data));
24
25void kgdb_l1_test(void) __attribute__((l1_text));
26
27void kgdb_l1_test(void)
28{
29 printk(KERN_ALERT "L1(before change) : data variable addr = 0x%p, data value is %d\n", &num1, num1);
30 printk(KERN_ALERT "L1 : code function addr = 0x%p\n", kgdb_l1_test);
31 num1 = num1 + 10 ;
32 printk(KERN_ALERT "L1(after change) : data variable addr = 0x%p, data value is %d\n", &num1, num1);
33 return ;
34}
35#if L2_LENGTH
36
37static int num2 __attribute__((l2));
38void kgdb_l2_test(void) __attribute__((l2));
39
40void kgdb_l2_test(void)
41{
42 printk(KERN_ALERT "L2(before change) : data variable addr = 0x%p, data value is %d\n", &num2, num2);
43 printk(KERN_ALERT "L2 : code function addr = 0x%p\n", kgdb_l2_test);
44 num2 = num2 + 20 ;
45 printk(KERN_ALERT "L2(after change) : data variable addr = 0x%p, data value is %d\n", &num2, num2);
46 return ;
47}
48
49#endif
50
51
52int kgdb_test(char *name, int len, int count, int z)
53{
54 printk(KERN_DEBUG "kgdb name(%d): %s, %d, %d\n", len, name, count, z);
55 count = z;
56 return count;
57}
58
59static int test_proc_output(char *buf)
60{
61 kgdb_test("hello world!", 12, 0x55, 0x10);
62 kgdb_l1_test();
63 #if L2_LENGTH
64 kgdb_l2_test();
65 #endif
66
67 return 0;
68}
69
70static int test_read_proc(char *page, char **start, off_t off,
71 int count, int *eof, void *data)
72{
73 int len;
74
75 len = test_proc_output(page);
76 if (len <= off+count)
77 *eof = 1;
78 *start = page + off;
79 len -= off;
80 if (len > count)
81 len = count;
82 if (len < 0)
83 len = 0;
84 return len;
85}
86
87static int test_write_proc(struct file *file, const char *buffer,
88 unsigned long count, void *data)
89{
90 if (count >= 256)
91 len = 255;
92 else
93 len = count;
94
95 memcpy(cmdline, buffer, count);
96 cmdline[len] = 0;
97
98 return len;
99}
100
101static int __init kgdbtest_init(void)
102{
103 struct proc_dir_entry *entry;
104
105 entry = create_proc_entry("kgdbtest", 0, NULL);
106 if (entry == NULL)
107 return -ENOMEM;
108
109 entry->read_proc = test_read_proc;
110 entry->write_proc = test_write_proc;
111 entry->data = NULL;
112
113 return 0;
114}
115
116static void __exit kgdbtest_exit(void)
117{
118 remove_proc_entry("kgdbtest", NULL);
119}
120
121module_init(kgdbtest_init);
122module_exit(kgdbtest_exit);
123MODULE_LICENSE("GPL");
diff --git a/arch/blackfin/kernel/mcount.S b/arch/blackfin/kernel/mcount.S
new file mode 100644
index 000000000000..edcfb3865f46
--- /dev/null
+++ b/arch/blackfin/kernel/mcount.S
@@ -0,0 +1,70 @@
1/*
2 * linux/arch/blackfin/mcount.S
3 *
4 * Copyright (C) 2006 Analog Devices Inc.
5 *
6 * 2007/04/12 Save index, length, modify and base registers. --rpm
7 */
8
9#include <linux/linkage.h>
10#include <asm/blackfin.h>
11
12.text
13
14.align 4 /* just in case */
15
16ENTRY(__mcount)
17 [--sp] = i0;
18 [--sp] = i1;
19 [--sp] = i2;
20 [--sp] = i3;
21 [--sp] = l0;
22 [--sp] = l1;
23 [--sp] = l2;
24 [--sp] = l3;
25 [--sp] = m0;
26 [--sp] = m1;
27 [--sp] = m2;
28 [--sp] = m3;
29 [--sp] = b0;
30 [--sp] = b1;
31 [--sp] = b2;
32 [--sp] = b3;
33 [--sp] = ( r7:0, p5:0 );
34 [--sp] = ASTAT;
35
36 p1.L = _ipipe_trace_enable;
37 p1.H = _ipipe_trace_enable;
38 r7 = [p1];
39 CC = r7 == 0;
40 if CC jump out;
41 link 0x10;
42 r0 = 0x0;
43 [sp + 0xc] = r0; /* v */
44 r0 = 0x0; /* type: IPIPE_TRACE_FN */
45 r1 = rets;
46 p0 = [fp]; /* p0: Prior FP */
47 r2 = [p0 + 4]; /* r2: Prior RETS */
48 call ___ipipe_trace;
49 unlink;
50out:
51 ASTAT = [sp++];
52 ( r7:0, p5:0 ) = [sp++];
53 b3 = [sp++];
54 b2 = [sp++];
55 b1 = [sp++];
56 b0 = [sp++];
57 m3 = [sp++];
58 m2 = [sp++];
59 m1 = [sp++];
60 m0 = [sp++];
61 l3 = [sp++];
62 l2 = [sp++];
63 l1 = [sp++];
64 l0 = [sp++];
65 i3 = [sp++];
66 i2 = [sp++];
67 i1 = [sp++];
68 i0 = [sp++];
69 rts;
70ENDPROC(__mcount)
diff --git a/arch/blackfin/kernel/module.c b/arch/blackfin/kernel/module.c
index e1bebc80a5bf..1bd7f2d018a8 100644
--- a/arch/blackfin/kernel/module.c
+++ b/arch/blackfin/kernel/module.c
@@ -37,111 +37,6 @@
37#include <asm/dma.h> 37#include <asm/dma.h>
38#include <asm/cacheflush.h> 38#include <asm/cacheflush.h>
39 39
40/*
41 * handle arithmetic relocations.
42 * See binutils/bfd/elf32-bfin.c for more details
43 */
44#define RELOC_STACK_SIZE 100
45static uint32_t reloc_stack[RELOC_STACK_SIZE];
46static unsigned int reloc_stack_tos;
47
48#define is_reloc_stack_empty() ((reloc_stack_tos > 0)?0:1)
49
50static void reloc_stack_push(uint32_t value)
51{
52 reloc_stack[reloc_stack_tos++] = value;
53}
54
55static uint32_t reloc_stack_pop(void)
56{
57 return reloc_stack[--reloc_stack_tos];
58}
59
60static uint32_t reloc_stack_operate(unsigned int oper, struct module *mod)
61{
62 uint32_t value;
63
64 switch (oper) {
65 case R_add:
66 value = reloc_stack[reloc_stack_tos - 2] +
67 reloc_stack[reloc_stack_tos - 1];
68 reloc_stack_tos -= 2;
69 break;
70 case R_sub:
71 value = reloc_stack[reloc_stack_tos - 2] -
72 reloc_stack[reloc_stack_tos - 1];
73 reloc_stack_tos -= 2;
74 break;
75 case R_mult:
76 value = reloc_stack[reloc_stack_tos - 2] *
77 reloc_stack[reloc_stack_tos - 1];
78 reloc_stack_tos -= 2;
79 break;
80 case R_div:
81 value = reloc_stack[reloc_stack_tos - 2] /
82 reloc_stack[reloc_stack_tos - 1];
83 reloc_stack_tos -= 2;
84 break;
85 case R_mod:
86 value = reloc_stack[reloc_stack_tos - 2] %
87 reloc_stack[reloc_stack_tos - 1];
88 reloc_stack_tos -= 2;
89 break;
90 case R_lshift:
91 value = reloc_stack[reloc_stack_tos - 2] <<
92 reloc_stack[reloc_stack_tos - 1];
93 reloc_stack_tos -= 2;
94 break;
95 case R_rshift:
96 value = reloc_stack[reloc_stack_tos - 2] >>
97 reloc_stack[reloc_stack_tos - 1];
98 reloc_stack_tos -= 2;
99 break;
100 case R_and:
101 value = reloc_stack[reloc_stack_tos - 2] &
102 reloc_stack[reloc_stack_tos - 1];
103 reloc_stack_tos -= 2;
104 break;
105 case R_or:
106 value = reloc_stack[reloc_stack_tos - 2] |
107 reloc_stack[reloc_stack_tos - 1];
108 reloc_stack_tos -= 2;
109 break;
110 case R_xor:
111 value = reloc_stack[reloc_stack_tos - 2] ^
112 reloc_stack[reloc_stack_tos - 1];
113 reloc_stack_tos -= 2;
114 break;
115 case R_land:
116 value = reloc_stack[reloc_stack_tos - 2] &&
117 reloc_stack[reloc_stack_tos - 1];
118 reloc_stack_tos -= 2;
119 break;
120 case R_lor:
121 value = reloc_stack[reloc_stack_tos - 2] ||
122 reloc_stack[reloc_stack_tos - 1];
123 reloc_stack_tos -= 2;
124 break;
125 case R_neg:
126 value = -reloc_stack[reloc_stack_tos - 1];
127 reloc_stack_tos--;
128 break;
129 case R_comp:
130 value = ~reloc_stack[reloc_stack_tos - 1];
131 reloc_stack_tos -= 1;
132 break;
133 default:
134 printk(KERN_WARNING "module %s: unhandled reloction\n",
135 mod->name);
136 return 0;
137 }
138
139 /* now push the new value back on stack */
140 reloc_stack_push(value);
141
142 return value;
143}
144
145void *module_alloc(unsigned long size) 40void *module_alloc(unsigned long size)
146{ 41{
147 if (size == 0) 42 if (size == 0)
@@ -334,16 +229,18 @@ apply_relocate_add(Elf_Shdr * sechdrs, const char *strtab,
334 undefined symbols have been resolved. */ 229 undefined symbols have been resolved. */
335 sym = (Elf32_Sym *) sechdrs[symindex].sh_addr 230 sym = (Elf32_Sym *) sechdrs[symindex].sh_addr
336 + ELF32_R_SYM(rel[i].r_info); 231 + ELF32_R_SYM(rel[i].r_info);
337 if (is_reloc_stack_empty()) { 232 value = sym->st_value;
338 value = sym->st_value;
339 } else {
340 value = reloc_stack_pop();
341 }
342 value += rel[i].r_addend; 233 value += rel[i].r_addend;
343 pr_debug("location is %x, value is %x type is %d \n", 234 pr_debug("location is %x, value is %x type is %d \n",
344 (unsigned int) location32, value, 235 (unsigned int) location32, value,
345 ELF32_R_TYPE(rel[i].r_info)); 236 ELF32_R_TYPE(rel[i].r_info));
346 237#ifdef CONFIG_SMP
238 if ((unsigned long)location16 >= COREB_L1_DATA_A_START) {
239 printk(KERN_ERR "module %s: cannot relocate in L1: %u (SMP kernel)",
240 mod->name, ELF32_R_TYPE(rel[i].r_info));
241 return -ENOEXEC;
242 }
243#endif
347 switch (ELF32_R_TYPE(rel[i].r_info)) { 244 switch (ELF32_R_TYPE(rel[i].r_info)) {
348 245
349 case R_pcrel24: 246 case R_pcrel24:
@@ -355,6 +252,12 @@ apply_relocate_add(Elf_Shdr * sechdrs, const char *strtab,
355 location32 = (uint32_t *) location16; 252 location32 = (uint32_t *) location16;
356 value -= (uint32_t) location32; 253 value -= (uint32_t) location32;
357 value >>= 1; 254 value >>= 1;
255 if ((value & 0xFF000000) != 0 &&
256 (value & 0xFF000000) != 0xFF000000) {
257 printk(KERN_ERR "module %s: relocation overflow\n",
258 mod->name);
259 return -ENOEXEC;
260 }
358 pr_debug("value is %x, before %x-%x after %x-%x\n", value, 261 pr_debug("value is %x, before %x-%x after %x-%x\n", value,
359 *location16, *(location16 + 1), 262 *location16, *(location16 + 1),
360 (*location16 & 0xff00) | (value >> 16 & 0x00ff), 263 (*location16 & 0xff00) | (value >> 16 & 0x00ff),
@@ -399,28 +302,6 @@ apply_relocate_add(Elf_Shdr * sechdrs, const char *strtab,
399 pr_debug("before %x after %x\n", *location32, value); 302 pr_debug("before %x after %x\n", *location32, value);
400 *location32 = value; 303 *location32 = value;
401 break; 304 break;
402 case R_push:
403 reloc_stack_push(value);
404 break;
405 case R_const:
406 reloc_stack_push(rel[i].r_addend);
407 break;
408 case R_add:
409 case R_sub:
410 case R_mult:
411 case R_div:
412 case R_mod:
413 case R_lshift:
414 case R_rshift:
415 case R_and:
416 case R_or:
417 case R_xor:
418 case R_land:
419 case R_lor:
420 case R_neg:
421 case R_comp:
422 reloc_stack_operate(ELF32_R_TYPE(rel[i].r_info), mod);
423 break;
424 default: 305 default:
425 printk(KERN_ERR "module %s: Unknown relocation: %u\n", 306 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
426 mod->name, ELF32_R_TYPE(rel[i].r_info)); 307 mod->name, ELF32_R_TYPE(rel[i].r_info));
@@ -436,6 +317,7 @@ module_finalize(const Elf_Ehdr * hdr,
436{ 317{
437 unsigned int i, strindex = 0, symindex = 0; 318 unsigned int i, strindex = 0, symindex = 0;
438 char *secstrings; 319 char *secstrings;
320 long err = 0;
439 321
440 secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; 322 secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
441 323
@@ -460,8 +342,10 @@ module_finalize(const Elf_Ehdr * hdr,
460 (strcmp(".rela.l1.text", secstrings + sechdrs[i].sh_name) == 0) || 342 (strcmp(".rela.l1.text", secstrings + sechdrs[i].sh_name) == 0) ||
461 ((strcmp(".rela.text", secstrings + sechdrs[i].sh_name) == 0) && 343 ((strcmp(".rela.text", secstrings + sechdrs[i].sh_name) == 0) &&
462 (hdr->e_flags & (EF_BFIN_CODE_IN_L1|EF_BFIN_CODE_IN_L2))))) { 344 (hdr->e_flags & (EF_BFIN_CODE_IN_L1|EF_BFIN_CODE_IN_L2))))) {
463 apply_relocate_add((Elf_Shdr *) sechdrs, strtab, 345 err = apply_relocate_add((Elf_Shdr *) sechdrs, strtab,
464 symindex, i, mod); 346 symindex, i, mod);
347 if (err < 0)
348 return -ENOEXEC;
465 } 349 }
466 } 350 }
467 return 0; 351 return 0;
diff --git a/arch/blackfin/kernel/process.c b/arch/blackfin/kernel/process.c
index 0c3ea118b657..33e2e8993f7f 100644
--- a/arch/blackfin/kernel/process.c
+++ b/arch/blackfin/kernel/process.c
@@ -39,6 +39,7 @@
39 39
40#include <asm/blackfin.h> 40#include <asm/blackfin.h>
41#include <asm/fixed_code.h> 41#include <asm/fixed_code.h>
42#include <asm/mem_map.h>
42 43
43asmlinkage void ret_from_fork(void); 44asmlinkage void ret_from_fork(void);
44 45
@@ -81,11 +82,14 @@ void cpu_idle(void)__attribute__((l1_text));
81 */ 82 */
82static void default_idle(void) 83static void default_idle(void)
83{ 84{
84 local_irq_disable(); 85#ifdef CONFIG_IPIPE
86 ipipe_suspend_domain();
87#endif
88 local_irq_disable_hw();
85 if (!need_resched()) 89 if (!need_resched())
86 idle_with_irq_disabled(); 90 idle_with_irq_disabled();
87 91
88 local_irq_enable(); 92 local_irq_enable_hw();
89} 93}
90 94
91/* 95/*
@@ -154,6 +158,7 @@ pid_t kernel_thread(int (*fn) (void *), void *arg, unsigned long flags)
154 return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL, 158 return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL,
155 NULL); 159 NULL);
156} 160}
161EXPORT_SYMBOL(kernel_thread);
157 162
158void flush_thread(void) 163void flush_thread(void)
159{ 164{
@@ -170,6 +175,13 @@ asmlinkage int bfin_clone(struct pt_regs *regs)
170 unsigned long clone_flags; 175 unsigned long clone_flags;
171 unsigned long newsp; 176 unsigned long newsp;
172 177
178#ifdef __ARCH_SYNC_CORE_DCACHE
179 if (current->rt.nr_cpus_allowed == num_possible_cpus()) {
180 current->cpus_allowed = cpumask_of_cpu(smp_processor_id());
181 current->rt.nr_cpus_allowed = 1;
182 }
183#endif
184
173 /* syscall2 puts clone_flags in r0 and usp in r1 */ 185 /* syscall2 puts clone_flags in r0 and usp in r1 */
174 clone_flags = regs->r0; 186 clone_flags = regs->r0;
175 newsp = regs->r1; 187 newsp = regs->r1;
@@ -337,22 +349,22 @@ int _access_ok(unsigned long addr, unsigned long size)
337 if (addr >= (unsigned long)__init_begin && 349 if (addr >= (unsigned long)__init_begin &&
338 addr + size <= (unsigned long)__init_end) 350 addr + size <= (unsigned long)__init_end)
339 return 1; 351 return 1;
340 if (addr >= L1_SCRATCH_START 352 if (addr >= get_l1_scratch_start()
341 && addr + size <= L1_SCRATCH_START + L1_SCRATCH_LENGTH) 353 && addr + size <= get_l1_scratch_start() + L1_SCRATCH_LENGTH)
342 return 1; 354 return 1;
343#if L1_CODE_LENGTH != 0 355#if L1_CODE_LENGTH != 0
344 if (addr >= L1_CODE_START + (_etext_l1 - _stext_l1) 356 if (addr >= get_l1_code_start() + (_etext_l1 - _stext_l1)
345 && addr + size <= L1_CODE_START + L1_CODE_LENGTH) 357 && addr + size <= get_l1_code_start() + L1_CODE_LENGTH)
346 return 1; 358 return 1;
347#endif 359#endif
348#if L1_DATA_A_LENGTH != 0 360#if L1_DATA_A_LENGTH != 0
349 if (addr >= L1_DATA_A_START + (_ebss_l1 - _sdata_l1) 361 if (addr >= get_l1_data_a_start() + (_ebss_l1 - _sdata_l1)
350 && addr + size <= L1_DATA_A_START + L1_DATA_A_LENGTH) 362 && addr + size <= get_l1_data_a_start() + L1_DATA_A_LENGTH)
351 return 1; 363 return 1;
352#endif 364#endif
353#if L1_DATA_B_LENGTH != 0 365#if L1_DATA_B_LENGTH != 0
354 if (addr >= L1_DATA_B_START + (_ebss_b_l1 - _sdata_b_l1) 366 if (addr >= get_l1_data_b_start() + (_ebss_b_l1 - _sdata_b_l1)
355 && addr + size <= L1_DATA_B_START + L1_DATA_B_LENGTH) 367 && addr + size <= get_l1_data_b_start() + L1_DATA_B_LENGTH)
356 return 1; 368 return 1;
357#endif 369#endif
358#if L2_LENGTH != 0 370#if L2_LENGTH != 0
diff --git a/arch/blackfin/kernel/ptrace.c b/arch/blackfin/kernel/ptrace.c
index 140bf00e9974..d2d388536630 100644
--- a/arch/blackfin/kernel/ptrace.c
+++ b/arch/blackfin/kernel/ptrace.c
@@ -45,6 +45,7 @@
45#include <asm/asm-offsets.h> 45#include <asm/asm-offsets.h>
46#include <asm/dma.h> 46#include <asm/dma.h>
47#include <asm/fixed_code.h> 47#include <asm/fixed_code.h>
48#include <asm/mem_map.h>
48 49
49#define TEXT_OFFSET 0 50#define TEXT_OFFSET 0
50/* 51/*
@@ -80,10 +81,12 @@ static inline struct pt_regs *get_user_regs(struct task_struct *task)
80/* 81/*
81 * Get all user integer registers. 82 * Get all user integer registers.
82 */ 83 */
83static inline int ptrace_getregs(struct task_struct *tsk, void __user * uregs) 84static inline int ptrace_getregs(struct task_struct *tsk, void __user *uregs)
84{ 85{
85 struct pt_regs *regs = get_user_regs(tsk); 86 struct pt_regs regs;
86 return copy_to_user(uregs, regs, sizeof(struct pt_regs)) ? -EFAULT : 0; 87 memcpy(&regs, get_user_regs(tsk), sizeof(regs));
88 regs.usp = tsk->thread.usp;
89 return copy_to_user(uregs, &regs, sizeof(struct pt_regs)) ? -EFAULT : 0;
87} 90}
88 91
89/* Mapping from PT_xxx to the stack offset at which the register is 92/* Mapping from PT_xxx to the stack offset at which the register is
@@ -220,8 +223,8 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
220 break; 223 break;
221 pr_debug("ptrace: user address is valid\n"); 224 pr_debug("ptrace: user address is valid\n");
222 225
223 if (L1_CODE_LENGTH != 0 && addr >= L1_CODE_START 226 if (L1_CODE_LENGTH != 0 && addr >= get_l1_code_start()
224 && addr + sizeof(tmp) <= L1_CODE_START + L1_CODE_LENGTH) { 227 && addr + sizeof(tmp) <= get_l1_code_start() + L1_CODE_LENGTH) {
225 safe_dma_memcpy (&tmp, (const void *)(addr), sizeof(tmp)); 228 safe_dma_memcpy (&tmp, (const void *)(addr), sizeof(tmp));
226 copied = sizeof(tmp); 229 copied = sizeof(tmp);
227 230
@@ -300,8 +303,8 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
300 break; 303 break;
301 pr_debug("ptrace: user address is valid\n"); 304 pr_debug("ptrace: user address is valid\n");
302 305
303 if (L1_CODE_LENGTH != 0 && addr >= L1_CODE_START 306 if (L1_CODE_LENGTH != 0 && addr >= get_l1_code_start()
304 && addr + sizeof(data) <= L1_CODE_START + L1_CODE_LENGTH) { 307 && addr + sizeof(data) <= get_l1_code_start() + L1_CODE_LENGTH) {
305 safe_dma_memcpy ((void *)(addr), &data, sizeof(data)); 308 safe_dma_memcpy ((void *)(addr), &data, sizeof(data));
306 copied = sizeof(data); 309 copied = sizeof(data);
307 310
diff --git a/arch/blackfin/kernel/reboot.c b/arch/blackfin/kernel/reboot.c
index ae97ca407b0d..eeee8cb43360 100644
--- a/arch/blackfin/kernel/reboot.c
+++ b/arch/blackfin/kernel/reboot.c
@@ -21,7 +21,7 @@
21 * the core reset. 21 * the core reset.
22 */ 22 */
23__attribute__((l1_text)) 23__attribute__((l1_text))
24static void bfin_reset(void) 24static void _bfin_reset(void)
25{ 25{
26 /* Wait for completion of "system" events such as cache line 26 /* Wait for completion of "system" events such as cache line
27 * line fills so that we avoid infinite stalls later on as 27 * line fills so that we avoid infinite stalls later on as
@@ -66,6 +66,18 @@ static void bfin_reset(void)
66 } 66 }
67} 67}
68 68
69static void bfin_reset(void)
70{
71 if (ANOMALY_05000353 || ANOMALY_05000386)
72 _bfin_reset();
73 else
74 /* the bootrom checks to see how it was reset and will
75 * automatically perform a software reset for us when
76 * it starts executing boot
77 */
78 asm("raise 1;");
79}
80
69__attribute__((weak)) 81__attribute__((weak))
70void native_machine_restart(char *cmd) 82void native_machine_restart(char *cmd)
71{ 83{
@@ -75,14 +87,10 @@ void machine_restart(char *cmd)
75{ 87{
76 native_machine_restart(cmd); 88 native_machine_restart(cmd);
77 local_irq_disable(); 89 local_irq_disable();
78 if (ANOMALY_05000353 || ANOMALY_05000386) 90 if (smp_processor_id())
79 bfin_reset(); 91 smp_call_function((void *)bfin_reset, 0, 1);
80 else 92 else
81 /* the bootrom checks to see how it was reset and will 93 bfin_reset();
82 * automatically perform a software reset for us when
83 * it starts executing boot
84 */
85 asm("raise 1;");
86} 94}
87 95
88__attribute__((weak)) 96__attribute__((weak))
diff --git a/arch/blackfin/kernel/setup.c b/arch/blackfin/kernel/setup.c
index 71a9a8c53cea..b2a811347b65 100644
--- a/arch/blackfin/kernel/setup.c
+++ b/arch/blackfin/kernel/setup.c
@@ -13,6 +13,7 @@
13#include <linux/bootmem.h> 13#include <linux/bootmem.h>
14#include <linux/seq_file.h> 14#include <linux/seq_file.h>
15#include <linux/cpu.h> 15#include <linux/cpu.h>
16#include <linux/mm.h>
16#include <linux/module.h> 17#include <linux/module.h>
17#include <linux/tty.h> 18#include <linux/tty.h>
18#include <linux/pfn.h> 19#include <linux/pfn.h>
@@ -26,11 +27,10 @@
26#include <asm/blackfin.h> 27#include <asm/blackfin.h>
27#include <asm/cplbinit.h> 28#include <asm/cplbinit.h>
28#include <asm/div64.h> 29#include <asm/div64.h>
30#include <asm/cpu.h>
29#include <asm/fixed_code.h> 31#include <asm/fixed_code.h>
30#include <asm/early_printk.h> 32#include <asm/early_printk.h>
31 33
32static DEFINE_PER_CPU(struct cpu, cpu_devices);
33
34u16 _bfin_swrst; 34u16 _bfin_swrst;
35EXPORT_SYMBOL(_bfin_swrst); 35EXPORT_SYMBOL(_bfin_swrst);
36 36
@@ -79,27 +79,68 @@ static struct change_member *change_point[2*BFIN_MEMMAP_MAX] __initdata;
79static struct bfin_memmap_entry *overlap_list[BFIN_MEMMAP_MAX] __initdata; 79static struct bfin_memmap_entry *overlap_list[BFIN_MEMMAP_MAX] __initdata;
80static struct bfin_memmap_entry new_map[BFIN_MEMMAP_MAX] __initdata; 80static struct bfin_memmap_entry new_map[BFIN_MEMMAP_MAX] __initdata;
81 81
82void __init bfin_cache_init(void) 82DEFINE_PER_CPU(struct blackfin_cpudata, cpu_data);
83{ 83
84static int early_init_clkin_hz(char *buf);
85
84#if defined(CONFIG_BFIN_DCACHE) || defined(CONFIG_BFIN_ICACHE) 86#if defined(CONFIG_BFIN_DCACHE) || defined(CONFIG_BFIN_ICACHE)
85 generate_cplb_tables(); 87void __init generate_cplb_tables(void)
88{
89 unsigned int cpu;
90
91 generate_cplb_tables_all();
92 /* Generate per-CPU I&D CPLB tables */
93 for (cpu = 0; cpu < num_possible_cpus(); ++cpu)
94 generate_cplb_tables_cpu(cpu);
95}
86#endif 96#endif
87 97
98void __cpuinit bfin_setup_caches(unsigned int cpu)
99{
88#ifdef CONFIG_BFIN_ICACHE 100#ifdef CONFIG_BFIN_ICACHE
89 bfin_icache_init(); 101 bfin_icache_init(icplb_tbl[cpu]);
90 printk(KERN_INFO "Instruction Cache Enabled\n");
91#endif 102#endif
92 103
93#ifdef CONFIG_BFIN_DCACHE 104#ifdef CONFIG_BFIN_DCACHE
94 bfin_dcache_init(); 105 bfin_dcache_init(dcplb_tbl[cpu]);
95 printk(KERN_INFO "Data Cache Enabled" 106#endif
107
108 /*
109 * In cache coherence emulation mode, we need to have the
110 * D-cache enabled before running any atomic operation which
111 * might invove cache invalidation (i.e. spinlock, rwlock).
112 * So printk's are deferred until then.
113 */
114#ifdef CONFIG_BFIN_ICACHE
115 printk(KERN_INFO "Instruction Cache Enabled for CPU%u\n", cpu);
116#endif
117#ifdef CONFIG_BFIN_DCACHE
118 printk(KERN_INFO "Data Cache Enabled for CPU%u"
96# if defined CONFIG_BFIN_WB 119# if defined CONFIG_BFIN_WB
97 " (write-back)" 120 " (write-back)"
98# elif defined CONFIG_BFIN_WT 121# elif defined CONFIG_BFIN_WT
99 " (write-through)" 122 " (write-through)"
100# endif 123# endif
101 "\n"); 124 "\n", cpu);
125#endif
126}
127
128void __cpuinit bfin_setup_cpudata(unsigned int cpu)
129{
130 struct blackfin_cpudata *cpudata = &per_cpu(cpu_data, cpu);
131
132 cpudata->idle = current;
133 cpudata->loops_per_jiffy = loops_per_jiffy;
134 cpudata->imemctl = bfin_read_IMEM_CONTROL();
135 cpudata->dmemctl = bfin_read_DMEM_CONTROL();
136}
137
138void __init bfin_cache_init(void)
139{
140#if defined(CONFIG_BFIN_DCACHE) || defined(CONFIG_BFIN_ICACHE)
141 generate_cplb_tables();
102#endif 142#endif
143 bfin_setup_caches(0);
103} 144}
104 145
105void __init bfin_relocate_l1_mem(void) 146void __init bfin_relocate_l1_mem(void)
@@ -109,6 +150,8 @@ void __init bfin_relocate_l1_mem(void)
109 unsigned long l1_data_b_length; 150 unsigned long l1_data_b_length;
110 unsigned long l2_length; 151 unsigned long l2_length;
111 152
153 blackfin_dma_early_init();
154
112 l1_code_length = _etext_l1 - _stext_l1; 155 l1_code_length = _etext_l1 - _stext_l1;
113 if (l1_code_length > L1_CODE_LENGTH) 156 if (l1_code_length > L1_CODE_LENGTH)
114 panic("L1 Instruction SRAM Overflow\n"); 157 panic("L1 Instruction SRAM Overflow\n");
@@ -230,7 +273,7 @@ static int __init sanitize_memmap(struct bfin_memmap_entry *map, int *pnr_map)
230 /* record all known change-points (starting and ending addresses), 273 /* record all known change-points (starting and ending addresses),
231 omitting those that are for empty memory regions */ 274 omitting those that are for empty memory regions */
232 chgidx = 0; 275 chgidx = 0;
233 for (i = 0; i < old_nr; i++) { 276 for (i = 0; i < old_nr; i++) {
234 if (map[i].size != 0) { 277 if (map[i].size != 0) {
235 change_point[chgidx]->addr = map[i].addr; 278 change_point[chgidx]->addr = map[i].addr;
236 change_point[chgidx++]->pentry = &map[i]; 279 change_point[chgidx++]->pentry = &map[i];
@@ -238,13 +281,13 @@ static int __init sanitize_memmap(struct bfin_memmap_entry *map, int *pnr_map)
238 change_point[chgidx++]->pentry = &map[i]; 281 change_point[chgidx++]->pentry = &map[i];
239 } 282 }
240 } 283 }
241 chg_nr = chgidx; /* true number of change-points */ 284 chg_nr = chgidx; /* true number of change-points */
242 285
243 /* sort change-point list by memory addresses (low -> high) */ 286 /* sort change-point list by memory addresses (low -> high) */
244 still_changing = 1; 287 still_changing = 1;
245 while (still_changing) { 288 while (still_changing) {
246 still_changing = 0; 289 still_changing = 0;
247 for (i = 1; i < chg_nr; i++) { 290 for (i = 1; i < chg_nr; i++) {
248 /* if <current_addr> > <last_addr>, swap */ 291 /* if <current_addr> > <last_addr>, swap */
249 /* or, if current=<start_addr> & last=<end_addr>, swap */ 292 /* or, if current=<start_addr> & last=<end_addr>, swap */
250 if ((change_point[i]->addr < change_point[i-1]->addr) || 293 if ((change_point[i]->addr < change_point[i-1]->addr) ||
@@ -261,10 +304,10 @@ static int __init sanitize_memmap(struct bfin_memmap_entry *map, int *pnr_map)
261 } 304 }
262 305
263 /* create a new memmap, removing overlaps */ 306 /* create a new memmap, removing overlaps */
264 overlap_entries = 0; /* number of entries in the overlap table */ 307 overlap_entries = 0; /* number of entries in the overlap table */
265 new_entry = 0; /* index for creating new memmap entries */ 308 new_entry = 0; /* index for creating new memmap entries */
266 last_type = 0; /* start with undefined memory type */ 309 last_type = 0; /* start with undefined memory type */
267 last_addr = 0; /* start with 0 as last starting address */ 310 last_addr = 0; /* start with 0 as last starting address */
268 /* loop through change-points, determining affect on the new memmap */ 311 /* loop through change-points, determining affect on the new memmap */
269 for (chgidx = 0; chgidx < chg_nr; chgidx++) { 312 for (chgidx = 0; chgidx < chg_nr; chgidx++) {
270 /* keep track of all overlapping memmap entries */ 313 /* keep track of all overlapping memmap entries */
@@ -286,14 +329,14 @@ static int __init sanitize_memmap(struct bfin_memmap_entry *map, int *pnr_map)
286 if (overlap_list[i]->type > current_type) 329 if (overlap_list[i]->type > current_type)
287 current_type = overlap_list[i]->type; 330 current_type = overlap_list[i]->type;
288 /* continue building up new memmap based on this information */ 331 /* continue building up new memmap based on this information */
289 if (current_type != last_type) { 332 if (current_type != last_type) {
290 if (last_type != 0) { 333 if (last_type != 0) {
291 new_map[new_entry].size = 334 new_map[new_entry].size =
292 change_point[chgidx]->addr - last_addr; 335 change_point[chgidx]->addr - last_addr;
293 /* move forward only if the new size was non-zero */ 336 /* move forward only if the new size was non-zero */
294 if (new_map[new_entry].size != 0) 337 if (new_map[new_entry].size != 0)
295 if (++new_entry >= BFIN_MEMMAP_MAX) 338 if (++new_entry >= BFIN_MEMMAP_MAX)
296 break; /* no more space left for new entries */ 339 break; /* no more space left for new entries */
297 } 340 }
298 if (current_type != 0) { 341 if (current_type != 0) {
299 new_map[new_entry].addr = change_point[chgidx]->addr; 342 new_map[new_entry].addr = change_point[chgidx]->addr;
@@ -303,9 +346,9 @@ static int __init sanitize_memmap(struct bfin_memmap_entry *map, int *pnr_map)
303 last_type = current_type; 346 last_type = current_type;
304 } 347 }
305 } 348 }
306 new_nr = new_entry; /* retain count for new entries */ 349 new_nr = new_entry; /* retain count for new entries */
307 350
308 /* copy new mapping into original location */ 351 /* copy new mapping into original location */
309 memcpy(map, new_map, new_nr*sizeof(struct bfin_memmap_entry)); 352 memcpy(map, new_map, new_nr*sizeof(struct bfin_memmap_entry));
310 *pnr_map = new_nr; 353 *pnr_map = new_nr;
311 354
@@ -361,7 +404,6 @@ static __init int parse_memmap(char *arg)
361 * - "memmap=XXX[KkmM][@][$]XXX[KkmM]" defines a memory region 404 * - "memmap=XXX[KkmM][@][$]XXX[KkmM]" defines a memory region
362 * @ from <start> to <start>+<mem>, type RAM 405 * @ from <start> to <start>+<mem>, type RAM
363 * $ from <start> to <start>+<mem>, type RESERVED 406 * $ from <start> to <start>+<mem>, type RESERVED
364 *
365 */ 407 */
366static __init void parse_cmdline_early(char *cmdline_p) 408static __init void parse_cmdline_early(char *cmdline_p)
367{ 409{
@@ -383,14 +425,15 @@ static __init void parse_cmdline_early(char *cmdline_p)
383 if (*to != ' ') { 425 if (*to != ' ') {
384 if (*to == '$' 426 if (*to == '$'
385 || *(to + 1) == '$') 427 || *(to + 1) == '$')
386 reserved_mem_dcache_on = 428 reserved_mem_dcache_on = 1;
387 1;
388 if (*to == '#' 429 if (*to == '#'
389 || *(to + 1) == '#') 430 || *(to + 1) == '#')
390 reserved_mem_icache_on = 431 reserved_mem_icache_on = 1;
391 1;
392 } 432 }
393 } 433 }
434 } else if (!memcmp(to, "clkin_hz=", 9)) {
435 to += 9;
436 early_init_clkin_hz(to);
394 } else if (!memcmp(to, "earlyprintk=", 12)) { 437 } else if (!memcmp(to, "earlyprintk=", 12)) {
395 to += 12; 438 to += 12;
396 setup_early_printk(to); 439 setup_early_printk(to);
@@ -417,9 +460,8 @@ static __init void parse_cmdline_early(char *cmdline_p)
417 * [_ramend - DMA_UNCACHED_REGION, 460 * [_ramend - DMA_UNCACHED_REGION,
418 * _ramend]: uncached DMA region 461 * _ramend]: uncached DMA region
419 * [_ramend, physical_mem_end]: memory not managed by kernel 462 * [_ramend, physical_mem_end]: memory not managed by kernel
420 *
421 */ 463 */
422static __init void memory_setup(void) 464static __init void memory_setup(void)
423{ 465{
424#ifdef CONFIG_MTD_UCLINUX 466#ifdef CONFIG_MTD_UCLINUX
425 unsigned long mtd_phys = 0; 467 unsigned long mtd_phys = 0;
@@ -436,7 +478,7 @@ static __init void memory_setup(void)
436 memory_end = _ramend - DMA_UNCACHED_REGION; 478 memory_end = _ramend - DMA_UNCACHED_REGION;
437 479
438#ifdef CONFIG_MPU 480#ifdef CONFIG_MPU
439 /* Round up to multiple of 4MB. */ 481 /* Round up to multiple of 4MB */
440 memory_start = (_ramstart + 0x3fffff) & ~0x3fffff; 482 memory_start = (_ramstart + 0x3fffff) & ~0x3fffff;
441#else 483#else
442 memory_start = PAGE_ALIGN(_ramstart); 484 memory_start = PAGE_ALIGN(_ramstart);
@@ -616,7 +658,7 @@ static __init void setup_bootmem_allocator(void)
616 end_pfn = memory_end >> PAGE_SHIFT; 658 end_pfn = memory_end >> PAGE_SHIFT;
617 659
618 /* 660 /*
619 * give all the memory to the bootmap allocator, tell it to put the 661 * give all the memory to the bootmap allocator, tell it to put the
620 * boot mem_map at the start of memory. 662 * boot mem_map at the start of memory.
621 */ 663 */
622 bootmap_size = init_bootmem_node(NODE_DATA(0), 664 bootmap_size = init_bootmem_node(NODE_DATA(0),
@@ -791,7 +833,11 @@ void __init setup_arch(char **cmdline_p)
791 bfin_write_SWRST(_bfin_swrst | DOUBLE_FAULT); 833 bfin_write_SWRST(_bfin_swrst | DOUBLE_FAULT);
792#endif 834#endif
793 835
836#ifdef CONFIG_SMP
837 if (_bfin_swrst & SWRST_DBL_FAULT_A) {
838#else
794 if (_bfin_swrst & RESET_DOUBLE) { 839 if (_bfin_swrst & RESET_DOUBLE) {
840#endif
795 printk(KERN_EMERG "Recovering from DOUBLE FAULT event\n"); 841 printk(KERN_EMERG "Recovering from DOUBLE FAULT event\n");
796#ifdef CONFIG_DEBUG_DOUBLEFAULT 842#ifdef CONFIG_DEBUG_DOUBLEFAULT
797 /* We assume the crashing kernel, and the current symbol table match */ 843 /* We assume the crashing kernel, and the current symbol table match */
@@ -823,9 +869,12 @@ void __init setup_arch(char **cmdline_p)
823 if (bfin_compiled_revid() == -1) 869 if (bfin_compiled_revid() == -1)
824 printk(KERN_ERR "Warning: Compiled for Rev none, but running on Rev %d\n", 870 printk(KERN_ERR "Warning: Compiled for Rev none, but running on Rev %d\n",
825 bfin_revid()); 871 bfin_revid());
826 else if (bfin_compiled_revid() != 0xffff) 872 else if (bfin_compiled_revid() != 0xffff) {
827 printk(KERN_ERR "Warning: Compiled for Rev %d, but running on Rev %d\n", 873 printk(KERN_ERR "Warning: Compiled for Rev %d, but running on Rev %d\n",
828 bfin_compiled_revid(), bfin_revid()); 874 bfin_compiled_revid(), bfin_revid());
875 if (bfin_compiled_revid() > bfin_revid())
876 panic("Error: you are missing anomaly workarounds for this rev\n");
877 }
829 } 878 }
830 if (bfin_revid() < CONFIG_BF_REV_MIN || bfin_revid() > CONFIG_BF_REV_MAX) 879 if (bfin_revid() < CONFIG_BF_REV_MIN || bfin_revid() > CONFIG_BF_REV_MAX)
831 printk(KERN_ERR "Warning: Unsupported Chip Revision ADSP-%s Rev 0.%d detected\n", 880 printk(KERN_ERR "Warning: Unsupported Chip Revision ADSP-%s Rev 0.%d detected\n",
@@ -835,7 +884,7 @@ void __init setup_arch(char **cmdline_p)
835 printk(KERN_INFO "Blackfin Linux support by http://blackfin.uclinux.org/\n"); 884 printk(KERN_INFO "Blackfin Linux support by http://blackfin.uclinux.org/\n");
836 885
837 printk(KERN_INFO "Processor Speed: %lu MHz core clock and %lu MHz System Clock\n", 886 printk(KERN_INFO "Processor Speed: %lu MHz core clock and %lu MHz System Clock\n",
838 cclk / 1000000, sclk / 1000000); 887 cclk / 1000000, sclk / 1000000);
839 888
840 if (ANOMALY_05000273 && (cclk >> 1) <= sclk) 889 if (ANOMALY_05000273 && (cclk >> 1) <= sclk)
841 printk("\n\n\nANOMALY_05000273: CCLK must be >= 2*SCLK !!!\n\n\n"); 890 printk("\n\n\nANOMALY_05000273: CCLK must be >= 2*SCLK !!!\n\n\n");
@@ -867,18 +916,21 @@ void __init setup_arch(char **cmdline_p)
867 BUG_ON((char *)&safe_user_instruction - (char *)&fixed_code_start 916 BUG_ON((char *)&safe_user_instruction - (char *)&fixed_code_start
868 != SAFE_USER_INSTRUCTION - FIXED_CODE_START); 917 != SAFE_USER_INSTRUCTION - FIXED_CODE_START);
869 918
919#ifdef CONFIG_SMP
920 platform_init_cpus();
921#endif
870 init_exception_vectors(); 922 init_exception_vectors();
871 bfin_cache_init(); 923 bfin_cache_init(); /* Initialize caches for the boot CPU */
872} 924}
873 925
874static int __init topology_init(void) 926static int __init topology_init(void)
875{ 927{
876 int cpu; 928 unsigned int cpu;
929 /* Record CPU-private information for the boot processor. */
930 bfin_setup_cpudata(0);
877 931
878 for_each_possible_cpu(cpu) { 932 for_each_possible_cpu(cpu) {
879 struct cpu *c = &per_cpu(cpu_devices, cpu); 933 register_cpu(&per_cpu(cpu_data, cpu).cpu, cpu);
880
881 register_cpu(c, cpu);
882 } 934 }
883 935
884 return 0; 936 return 0;
@@ -886,36 +938,54 @@ static int __init topology_init(void)
886 938
887subsys_initcall(topology_init); 939subsys_initcall(topology_init);
888 940
941/* Get the input clock frequency */
942static u_long cached_clkin_hz = CONFIG_CLKIN_HZ;
943static u_long get_clkin_hz(void)
944{
945 return cached_clkin_hz;
946}
947static int __init early_init_clkin_hz(char *buf)
948{
949 cached_clkin_hz = simple_strtoul(buf, NULL, 0);
950#ifdef BFIN_KERNEL_CLOCK
951 if (cached_clkin_hz != CONFIG_CLKIN_HZ)
952 panic("cannot change clkin_hz when reprogramming clocks");
953#endif
954 return 1;
955}
956early_param("clkin_hz=", early_init_clkin_hz);
957
889/* Get the voltage input multiplier */ 958/* Get the voltage input multiplier */
890static u_long cached_vco_pll_ctl, cached_vco;
891static u_long get_vco(void) 959static u_long get_vco(void)
892{ 960{
893 u_long msel; 961 static u_long cached_vco;
962 u_long msel, pll_ctl;
894 963
895 u_long pll_ctl = bfin_read_PLL_CTL(); 964 /* The assumption here is that VCO never changes at runtime.
896 if (pll_ctl == cached_vco_pll_ctl) 965 * If, someday, we support that, then we'll have to change this.
966 */
967 if (cached_vco)
897 return cached_vco; 968 return cached_vco;
898 else
899 cached_vco_pll_ctl = pll_ctl;
900 969
970 pll_ctl = bfin_read_PLL_CTL();
901 msel = (pll_ctl >> 9) & 0x3F; 971 msel = (pll_ctl >> 9) & 0x3F;
902 if (0 == msel) 972 if (0 == msel)
903 msel = 64; 973 msel = 64;
904 974
905 cached_vco = CONFIG_CLKIN_HZ; 975 cached_vco = get_clkin_hz();
906 cached_vco >>= (1 & pll_ctl); /* DF bit */ 976 cached_vco >>= (1 & pll_ctl); /* DF bit */
907 cached_vco *= msel; 977 cached_vco *= msel;
908 return cached_vco; 978 return cached_vco;
909} 979}
910 980
911/* Get the Core clock */ 981/* Get the Core clock */
912static u_long cached_cclk_pll_div, cached_cclk;
913u_long get_cclk(void) 982u_long get_cclk(void)
914{ 983{
984 static u_long cached_cclk_pll_div, cached_cclk;
915 u_long csel, ssel; 985 u_long csel, ssel;
916 986
917 if (bfin_read_PLL_STAT() & 0x1) 987 if (bfin_read_PLL_STAT() & 0x1)
918 return CONFIG_CLKIN_HZ; 988 return get_clkin_hz();
919 989
920 ssel = bfin_read_PLL_DIV(); 990 ssel = bfin_read_PLL_DIV();
921 if (ssel == cached_cclk_pll_div) 991 if (ssel == cached_cclk_pll_div)
@@ -934,21 +1004,21 @@ u_long get_cclk(void)
934EXPORT_SYMBOL(get_cclk); 1004EXPORT_SYMBOL(get_cclk);
935 1005
936/* Get the System clock */ 1006/* Get the System clock */
937static u_long cached_sclk_pll_div, cached_sclk;
938u_long get_sclk(void) 1007u_long get_sclk(void)
939{ 1008{
1009 static u_long cached_sclk;
940 u_long ssel; 1010 u_long ssel;
941 1011
942 if (bfin_read_PLL_STAT() & 0x1) 1012 /* The assumption here is that SCLK never changes at runtime.
943 return CONFIG_CLKIN_HZ; 1013 * If, someday, we support that, then we'll have to change this.
944 1014 */
945 ssel = bfin_read_PLL_DIV(); 1015 if (cached_sclk)
946 if (ssel == cached_sclk_pll_div)
947 return cached_sclk; 1016 return cached_sclk;
948 else
949 cached_sclk_pll_div = ssel;
950 1017
951 ssel &= 0xf; 1018 if (bfin_read_PLL_STAT() & 0x1)
1019 return get_clkin_hz();
1020
1021 ssel = bfin_read_PLL_DIV() & 0xf;
952 if (0 == ssel) { 1022 if (0 == ssel) {
953 printk(KERN_WARNING "Invalid System Clock\n"); 1023 printk(KERN_WARNING "Invalid System Clock\n");
954 ssel = 1; 1024 ssel = 1;
@@ -982,17 +1052,18 @@ static int show_cpuinfo(struct seq_file *m, void *v)
982{ 1052{
983 char *cpu, *mmu, *fpu, *vendor, *cache; 1053 char *cpu, *mmu, *fpu, *vendor, *cache;
984 uint32_t revid; 1054 uint32_t revid;
985 1055 int cpu_num = *(unsigned int *)v;
986 u_long cclk = 0, sclk = 0; 1056 u_long sclk, cclk;
987 u_int icache_size = BFIN_ICACHESIZE / 1024, dcache_size = 0, dsup_banks = 0; 1057 u_int icache_size = BFIN_ICACHESIZE / 1024, dcache_size = 0, dsup_banks = 0;
1058 struct blackfin_cpudata *cpudata = &per_cpu(cpu_data, cpu_num);
988 1059
989 cpu = CPU; 1060 cpu = CPU;
990 mmu = "none"; 1061 mmu = "none";
991 fpu = "none"; 1062 fpu = "none";
992 revid = bfin_revid(); 1063 revid = bfin_revid();
993 1064
994 cclk = get_cclk();
995 sclk = get_sclk(); 1065 sclk = get_sclk();
1066 cclk = get_cclk();
996 1067
997 switch (bfin_read_CHIPID() & CHIPID_MANUFACTURE) { 1068 switch (bfin_read_CHIPID() & CHIPID_MANUFACTURE) {
998 case 0xca: 1069 case 0xca:
@@ -1003,10 +1074,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
1003 break; 1074 break;
1004 } 1075 }
1005 1076
1006 seq_printf(m, "processor\t: %d\n" 1077 seq_printf(m, "processor\t: %d\n" "vendor_id\t: %s\n", cpu_num, vendor);
1007 "vendor_id\t: %s\n",
1008 *(unsigned int *)v,
1009 vendor);
1010 1078
1011 if (CPUID == bfin_cpuid()) 1079 if (CPUID == bfin_cpuid())
1012 seq_printf(m, "cpu family\t: 0x%04x\n", CPUID); 1080 seq_printf(m, "cpu family\t: 0x%04x\n", CPUID);
@@ -1029,12 +1097,12 @@ static int show_cpuinfo(struct seq_file *m, void *v)
1029 sclk/1000000, sclk%1000000); 1097 sclk/1000000, sclk%1000000);
1030 seq_printf(m, "bogomips\t: %lu.%02lu\n" 1098 seq_printf(m, "bogomips\t: %lu.%02lu\n"
1031 "Calibration\t: %lu loops\n", 1099 "Calibration\t: %lu loops\n",
1032 (loops_per_jiffy * HZ) / 500000, 1100 (cpudata->loops_per_jiffy * HZ) / 500000,
1033 ((loops_per_jiffy * HZ) / 5000) % 100, 1101 ((cpudata->loops_per_jiffy * HZ) / 5000) % 100,
1034 (loops_per_jiffy * HZ)); 1102 (cpudata->loops_per_jiffy * HZ));
1035 1103
1036 /* Check Cache configutation */ 1104 /* Check Cache configutation */
1037 switch (bfin_read_DMEM_CONTROL() & (1 << DMC0_P | 1 << DMC1_P)) { 1105 switch (cpudata->dmemctl & (1 << DMC0_P | 1 << DMC1_P)) {
1038 case ACACHE_BSRAM: 1106 case ACACHE_BSRAM:
1039 cache = "dbank-A/B\t: cache/sram"; 1107 cache = "dbank-A/B\t: cache/sram";
1040 dcache_size = 16; 1108 dcache_size = 16;
@@ -1058,10 +1126,10 @@ static int show_cpuinfo(struct seq_file *m, void *v)
1058 } 1126 }
1059 1127
1060 /* Is it turned on? */ 1128 /* Is it turned on? */
1061 if ((bfin_read_DMEM_CONTROL() & (ENDCPLB | DMC_ENABLE)) != (ENDCPLB | DMC_ENABLE)) 1129 if ((cpudata->dmemctl & (ENDCPLB | DMC_ENABLE)) != (ENDCPLB | DMC_ENABLE))
1062 dcache_size = 0; 1130 dcache_size = 0;
1063 1131
1064 if ((bfin_read_IMEM_CONTROL() & (IMC | ENICPLB)) != (IMC | ENICPLB)) 1132 if ((cpudata->imemctl & (IMC | ENICPLB)) != (IMC | ENICPLB))
1065 icache_size = 0; 1133 icache_size = 0;
1066 1134
1067 seq_printf(m, "cache size\t: %d KB(L1 icache) " 1135 seq_printf(m, "cache size\t: %d KB(L1 icache) "
@@ -1086,8 +1154,11 @@ static int show_cpuinfo(struct seq_file *m, void *v)
1086 "dcache setup\t: %d Super-banks/%d Sub-banks/%d Ways, %d Lines/Way\n", 1154 "dcache setup\t: %d Super-banks/%d Sub-banks/%d Ways, %d Lines/Way\n",
1087 dsup_banks, BFIN_DSUBBANKS, BFIN_DWAYS, 1155 dsup_banks, BFIN_DSUBBANKS, BFIN_DWAYS,
1088 BFIN_DLINES); 1156 BFIN_DLINES);
1157#ifdef __ARCH_SYNC_CORE_DCACHE
1158 seq_printf(m, "SMP Dcache Flushes\t: %lu\n\n", cpudata->dcache_invld_count);
1159#endif
1089#ifdef CONFIG_BFIN_ICACHE_LOCK 1160#ifdef CONFIG_BFIN_ICACHE_LOCK
1090 switch ((bfin_read_IMEM_CONTROL() >> 3) & WAYALL_L) { 1161 switch ((cpudata->imemctl >> 3) & WAYALL_L) {
1091 case WAY0_L: 1162 case WAY0_L:
1092 seq_printf(m, "Way0 Locked-Down\n"); 1163 seq_printf(m, "Way0 Locked-Down\n");
1093 break; 1164 break;
@@ -1137,6 +1208,12 @@ static int show_cpuinfo(struct seq_file *m, void *v)
1137 seq_printf(m, "No Ways are locked\n"); 1208 seq_printf(m, "No Ways are locked\n");
1138 } 1209 }
1139#endif 1210#endif
1211
1212 if (cpu_num != num_possible_cpus() - 1)
1213 return 0;
1214
1215 if (L2_LENGTH)
1216 seq_printf(m, "L2 SRAM\t\t: %dKB\n", L2_LENGTH/0x400);
1140 seq_printf(m, "board name\t: %s\n", bfin_board_name); 1217 seq_printf(m, "board name\t: %s\n", bfin_board_name);
1141 seq_printf(m, "board memory\t: %ld kB (0x%p -> 0x%p)\n", 1218 seq_printf(m, "board memory\t: %ld kB (0x%p -> 0x%p)\n",
1142 physical_mem_end >> 10, (void *)0, (void *)physical_mem_end); 1219 physical_mem_end >> 10, (void *)0, (void *)physical_mem_end);
@@ -1144,6 +1221,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
1144 ((int)memory_end - (int)_stext) >> 10, 1221 ((int)memory_end - (int)_stext) >> 10,
1145 _stext, 1222 _stext,
1146 (void *)memory_end); 1223 (void *)memory_end);
1224 seq_printf(m, "\n");
1147 1225
1148 return 0; 1226 return 0;
1149} 1227}
diff --git a/arch/blackfin/kernel/time.c b/arch/blackfin/kernel/time.c
index eb2352320454..172b4c588467 100644
--- a/arch/blackfin/kernel/time.c
+++ b/arch/blackfin/kernel/time.c
@@ -1,32 +1,11 @@
1/* 1/*
2 * File: arch/blackfin/kernel/time.c 2 * arch/blackfin/kernel/time.c
3 * Based on: none - original work
4 * Author:
5 * 3 *
6 * Created: 4 * This file contains the Blackfin-specific time handling details.
7 * Description: This file contains the bfin-specific time handling details. 5 * Most of the stuff is located in the machine specific files.
8 * Most of the stuff is located in the machine specific files.
9 * FIXME: (This file is subject for removal)
10 * 6 *
11 * Modified: 7 * Copyright 2004-2008 Analog Devices Inc.
12 * Copyright 2004-2008 Analog Devices Inc. 8 * Licensed under the GPL-2 or later.
13 *
14 * Bugs: Enter bugs at http://blackfin.uclinux.org/
15 *
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2 of the License, or
19 * (at your option) any later version.
20 *
21 * This program is distributed in the hope that it will be useful,
22 * but WITHOUT ANY WARRANTY; without even the implied warranty of
23 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
24 * GNU General Public License for more details.
25 *
26 * You should have received a copy of the GNU General Public License
27 * along with this program; if not, see the file COPYING, or write
28 * to the Free Software Foundation, Inc.,
29 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
30 */ 9 */
31 10
32#include <linux/module.h> 11#include <linux/module.h>
@@ -34,23 +13,43 @@
34#include <linux/interrupt.h> 13#include <linux/interrupt.h>
35#include <linux/time.h> 14#include <linux/time.h>
36#include <linux/irq.h> 15#include <linux/irq.h>
16#include <linux/delay.h>
37 17
38#include <asm/blackfin.h> 18#include <asm/blackfin.h>
39#include <asm/time.h> 19#include <asm/time.h>
20#include <asm/gptimers.h>
40 21
41/* This is an NTP setting */ 22/* This is an NTP setting */
42#define TICK_SIZE (tick_nsec / 1000) 23#define TICK_SIZE (tick_nsec / 1000)
43 24
44static void time_sched_init(irq_handler_t timer_routine);
45static unsigned long gettimeoffset(void);
46
47static struct irqaction bfin_timer_irq = { 25static struct irqaction bfin_timer_irq = {
48 .name = "BFIN Timer Tick", 26 .name = "Blackfin Timer Tick",
27#ifdef CONFIG_IRQ_PER_CPU
28 .flags = IRQF_DISABLED | IRQF_PERCPU,
29#else
49 .flags = IRQF_DISABLED 30 .flags = IRQF_DISABLED
31#endif
50}; 32};
51 33
52static void 34#if defined(CONFIG_TICK_SOURCE_SYSTMR0) || defined(CONFIG_IPIPE)
53time_sched_init(irq_handler_t timer_routine) 35void __init setup_system_timer0(void)
36{
37 /* Power down the core timer, just to play safe. */
38 bfin_write_TCNTL(0);
39
40 disable_gptimers(TIMER0bit);
41 set_gptimer_status(0, TIMER_STATUS_TRUN0);
42 while (get_gptimer_status(0) & TIMER_STATUS_TRUN0)
43 udelay(10);
44
45 set_gptimer_config(0, 0x59); /* IRQ enable, periodic, PWM_OUT, SCLKed, OUT PAD disabled */
46 set_gptimer_period(TIMER0_id, get_sclk() / HZ);
47 set_gptimer_pwidth(TIMER0_id, 1);
48 SSYNC();
49 enable_gptimers(TIMER0bit);
50}
51#else
52void __init setup_core_timer(void)
54{ 53{
55 u32 tcount; 54 u32 tcount;
56 55
@@ -58,10 +57,8 @@ time_sched_init(irq_handler_t timer_routine)
58 bfin_write_TCNTL(1); 57 bfin_write_TCNTL(1);
59 CSYNC(); 58 CSYNC();
60 59
61 /* 60 /* the TSCALE prescaler counter */
62 * the TSCALE prescaler counter. 61 bfin_write_TSCALE(TIME_SCALE - 1);
63 */
64 bfin_write_TSCALE((TIME_SCALE - 1));
65 62
66 tcount = ((get_cclk() / (HZ * TIME_SCALE)) - 1); 63 tcount = ((get_cclk() / (HZ * TIME_SCALE)) - 1);
67 bfin_write_TPERIOD(tcount); 64 bfin_write_TPERIOD(tcount);
@@ -71,35 +68,52 @@ time_sched_init(irq_handler_t timer_routine)
71 CSYNC(); 68 CSYNC();
72 69
73 bfin_write_TCNTL(7); 70 bfin_write_TCNTL(7);
71}
72#endif
74 73
75 bfin_timer_irq.handler = (irq_handler_t)timer_routine; 74static void __init
76 /* call setup_irq instead of request_irq because request_irq calls 75time_sched_init(irqreturn_t(*timer_routine) (int, void *))
77 * kmalloc which has not been initialized yet 76{
78 */ 77#if defined(CONFIG_TICK_SOURCE_SYSTMR0) || defined(CONFIG_IPIPE)
78 setup_system_timer0();
79 bfin_timer_irq.handler = timer_routine;
80 setup_irq(IRQ_TIMER0, &bfin_timer_irq);
81#else
82 setup_core_timer();
83 bfin_timer_irq.handler = timer_routine;
79 setup_irq(IRQ_CORETMR, &bfin_timer_irq); 84 setup_irq(IRQ_CORETMR, &bfin_timer_irq);
85#endif
80} 86}
81 87
82/* 88/*
83 * Should return useconds since last timer tick 89 * Should return useconds since last timer tick
84 */ 90 */
91#ifndef CONFIG_GENERIC_TIME
85static unsigned long gettimeoffset(void) 92static unsigned long gettimeoffset(void)
86{ 93{
87 unsigned long offset; 94 unsigned long offset;
88 unsigned long clocks_per_jiffy; 95 unsigned long clocks_per_jiffy;
89 96
97#if defined(CONFIG_TICK_SOURCE_SYSTMR0) || defined(CONFIG_IPIPE)
98 clocks_per_jiffy = bfin_read_TIMER0_PERIOD();
99 offset = bfin_read_TIMER0_COUNTER() / \
100 (((clocks_per_jiffy + 1) * HZ) / USEC_PER_SEC);
101
102 if ((get_gptimer_status(0) & TIMER_STATUS_TIMIL0) && offset < (100000 / HZ / 2))
103 offset += (USEC_PER_SEC / HZ);
104#else
90 clocks_per_jiffy = bfin_read_TPERIOD(); 105 clocks_per_jiffy = bfin_read_TPERIOD();
91 offset = 106 offset = (clocks_per_jiffy - bfin_read_TCOUNT()) / \
92 (clocks_per_jiffy - 107 (((clocks_per_jiffy + 1) * HZ) / USEC_PER_SEC);
93 bfin_read_TCOUNT()) / (((clocks_per_jiffy + 1) * HZ) /
94 USEC_PER_SEC);
95 108
96 /* Check if we just wrapped the counters and maybe missed a tick */ 109 /* Check if we just wrapped the counters and maybe missed a tick */
97 if ((bfin_read_ILAT() & (1 << IRQ_CORETMR)) 110 if ((bfin_read_ILAT() & (1 << IRQ_CORETMR))
98 && (offset < (100000 / HZ / 2))) 111 && (offset < (100000 / HZ / 2)))
99 offset += (USEC_PER_SEC / HZ); 112 offset += (USEC_PER_SEC / HZ);
100 113#endif
101 return offset; 114 return offset;
102} 115}
116#endif
103 117
104static inline int set_rtc_mmss(unsigned long nowtime) 118static inline int set_rtc_mmss(unsigned long nowtime)
105{ 119{
@@ -111,43 +125,49 @@ static inline int set_rtc_mmss(unsigned long nowtime)
111 * as well as call the "do_timer()" routine every clocktick 125 * as well as call the "do_timer()" routine every clocktick
112 */ 126 */
113#ifdef CONFIG_CORE_TIMER_IRQ_L1 127#ifdef CONFIG_CORE_TIMER_IRQ_L1
114irqreturn_t timer_interrupt(int irq, void *dummy)__attribute__((l1_text)); 128__attribute__((l1_text))
115#endif 129#endif
116
117irqreturn_t timer_interrupt(int irq, void *dummy) 130irqreturn_t timer_interrupt(int irq, void *dummy)
118{ 131{
119 /* last time the cmos clock got updated */ 132 /* last time the cmos clock got updated */
120 static long last_rtc_update; 133 static long last_rtc_update;
121 134
122 write_seqlock(&xtime_lock); 135 write_seqlock(&xtime_lock);
123 136#if defined(CONFIG_TICK_SOURCE_SYSTMR0) && !defined(CONFIG_IPIPE)
124 do_timer(1); 137/* FIXME: Here TIMIL0 is not set when IPIPE enabled, why? */
125 138 if (get_gptimer_status(0) & TIMER_STATUS_TIMIL0) {
126 profile_tick(CPU_PROFILING); 139#endif
127 140 do_timer(1);
128 /* 141
129 * If we have an externally synchronized Linux clock, then update 142 /*
130 * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be 143 * If we have an externally synchronized Linux clock, then update
131 * called as close as possible to 500 ms before the new second starts. 144 * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
132 */ 145 * called as close as possible to 500 ms before the new second starts.
133 146 */
134 if (ntp_synced() && 147 if (ntp_synced() &&
135 xtime.tv_sec > last_rtc_update + 660 && 148 xtime.tv_sec > last_rtc_update + 660 &&
136 (xtime.tv_nsec / NSEC_PER_USEC) >= 149 (xtime.tv_nsec / NSEC_PER_USEC) >=
137 500000 - ((unsigned)TICK_SIZE) / 2 150 500000 - ((unsigned)TICK_SIZE) / 2
138 && (xtime.tv_nsec / NSEC_PER_USEC) <= 151 && (xtime.tv_nsec / NSEC_PER_USEC) <=
139 500000 + ((unsigned)TICK_SIZE) / 2) { 152 500000 + ((unsigned)TICK_SIZE) / 2) {
140 if (set_rtc_mmss(xtime.tv_sec) == 0) 153 if (set_rtc_mmss(xtime.tv_sec) == 0)
141 last_rtc_update = xtime.tv_sec; 154 last_rtc_update = xtime.tv_sec;
142 else 155 else
143 /* Do it again in 60s. */ 156 /* Do it again in 60s. */
144 last_rtc_update = xtime.tv_sec - 600; 157 last_rtc_update = xtime.tv_sec - 600;
158 }
159#if defined(CONFIG_TICK_SOURCE_SYSTMR0) && !defined(CONFIG_IPIPE)
160 set_gptimer_status(0, TIMER_STATUS_TIMIL0);
145 } 161 }
162#endif
146 write_sequnlock(&xtime_lock); 163 write_sequnlock(&xtime_lock);
147 164
148#ifndef CONFIG_SMP 165#ifdef CONFIG_IPIPE
166 update_root_process_times(get_irq_regs());
167#else
149 update_process_times(user_mode(get_irq_regs())); 168 update_process_times(user_mode(get_irq_regs()));
150#endif 169#endif
170 profile_tick(CPU_PROFILING);
151 171
152 return IRQ_HANDLED; 172 return IRQ_HANDLED;
153} 173}
diff --git a/arch/blackfin/kernel/traps.c b/arch/blackfin/kernel/traps.c
index bef025b07443..17d8e4172896 100644
--- a/arch/blackfin/kernel/traps.c
+++ b/arch/blackfin/kernel/traps.c
@@ -75,16 +75,6 @@ void __init trap_init(void)
75 CSYNC(); 75 CSYNC();
76} 76}
77 77
78/*
79 * Used to save the RETX, SEQSTAT, I/D CPLB FAULT ADDR
80 * values across the transition from exception to IRQ5.
81 * We put these in L1, so they are going to be in a valid
82 * location during exception context
83 */
84__attribute__((l1_data))
85unsigned long saved_retx, saved_seqstat,
86 saved_icplb_fault_addr, saved_dcplb_fault_addr;
87
88static void decode_address(char *buf, unsigned long address) 78static void decode_address(char *buf, unsigned long address)
89{ 79{
90#ifdef CONFIG_DEBUG_VERBOSE 80#ifdef CONFIG_DEBUG_VERBOSE
@@ -211,18 +201,18 @@ asmlinkage void double_fault_c(struct pt_regs *fp)
211 printk(KERN_EMERG "\n" KERN_EMERG "Double Fault\n"); 201 printk(KERN_EMERG "\n" KERN_EMERG "Double Fault\n");
212#ifdef CONFIG_DEBUG_DOUBLEFAULT_PRINT 202#ifdef CONFIG_DEBUG_DOUBLEFAULT_PRINT
213 if (((long)fp->seqstat & SEQSTAT_EXCAUSE) == VEC_UNCOV) { 203 if (((long)fp->seqstat & SEQSTAT_EXCAUSE) == VEC_UNCOV) {
204 unsigned int cpu = smp_processor_id();
214 char buf[150]; 205 char buf[150];
215 decode_address(buf, saved_retx); 206 decode_address(buf, cpu_pda[cpu].retx);
216 printk(KERN_EMERG "While handling exception (EXCAUSE = 0x%x) at %s:\n", 207 printk(KERN_EMERG "While handling exception (EXCAUSE = 0x%x) at %s:\n",
217 (int)saved_seqstat & SEQSTAT_EXCAUSE, buf); 208 (unsigned int)cpu_pda[cpu].seqstat & SEQSTAT_EXCAUSE, buf);
218 decode_address(buf, saved_dcplb_fault_addr); 209 decode_address(buf, cpu_pda[cpu].dcplb_fault_addr);
219 printk(KERN_NOTICE " DCPLB_FAULT_ADDR: %s\n", buf); 210 printk(KERN_NOTICE " DCPLB_FAULT_ADDR: %s\n", buf);
220 decode_address(buf, saved_icplb_fault_addr); 211 decode_address(buf, cpu_pda[cpu].icplb_fault_addr);
221 printk(KERN_NOTICE " ICPLB_FAULT_ADDR: %s\n", buf); 212 printk(KERN_NOTICE " ICPLB_FAULT_ADDR: %s\n", buf);
222 213
223 decode_address(buf, fp->retx); 214 decode_address(buf, fp->retx);
224 printk(KERN_NOTICE "The instruction at %s caused a double exception\n", 215 printk(KERN_NOTICE "The instruction at %s caused a double exception\n", buf);
225 buf);
226 } else 216 } else
227#endif 217#endif
228 { 218 {
@@ -240,6 +230,9 @@ asmlinkage void trap_c(struct pt_regs *fp)
240#ifdef CONFIG_DEBUG_BFIN_HWTRACE_ON 230#ifdef CONFIG_DEBUG_BFIN_HWTRACE_ON
241 int j; 231 int j;
242#endif 232#endif
233#ifdef CONFIG_DEBUG_HUNT_FOR_ZERO
234 unsigned int cpu = smp_processor_id();
235#endif
243 int sig = 0; 236 int sig = 0;
244 siginfo_t info; 237 siginfo_t info;
245 unsigned long trapnr = fp->seqstat & SEQSTAT_EXCAUSE; 238 unsigned long trapnr = fp->seqstat & SEQSTAT_EXCAUSE;
@@ -417,7 +410,7 @@ asmlinkage void trap_c(struct pt_regs *fp)
417 info.si_code = ILL_CPLB_MULHIT; 410 info.si_code = ILL_CPLB_MULHIT;
418 sig = SIGSEGV; 411 sig = SIGSEGV;
419#ifdef CONFIG_DEBUG_HUNT_FOR_ZERO 412#ifdef CONFIG_DEBUG_HUNT_FOR_ZERO
420 if (saved_dcplb_fault_addr < FIXED_CODE_START) 413 if (cpu_pda[cpu].dcplb_fault_addr < FIXED_CODE_START)
421 verbose_printk(KERN_NOTICE "NULL pointer access\n"); 414 verbose_printk(KERN_NOTICE "NULL pointer access\n");
422 else 415 else
423#endif 416#endif
@@ -471,7 +464,7 @@ asmlinkage void trap_c(struct pt_regs *fp)
471 info.si_code = ILL_CPLB_MULHIT; 464 info.si_code = ILL_CPLB_MULHIT;
472 sig = SIGSEGV; 465 sig = SIGSEGV;
473#ifdef CONFIG_DEBUG_HUNT_FOR_ZERO 466#ifdef CONFIG_DEBUG_HUNT_FOR_ZERO
474 if (saved_icplb_fault_addr < FIXED_CODE_START) 467 if (cpu_pda[cpu].icplb_fault_addr < FIXED_CODE_START)
475 verbose_printk(KERN_NOTICE "Jump to NULL address\n"); 468 verbose_printk(KERN_NOTICE "Jump to NULL address\n");
476 else 469 else
477#endif 470#endif
@@ -584,10 +577,15 @@ asmlinkage void trap_c(struct pt_regs *fp)
584 } 577 }
585 } 578 }
586 579
587 info.si_signo = sig; 580#ifdef CONFIG_IPIPE
588 info.si_errno = 0; 581 if (!ipipe_trap_notify(fp->seqstat & 0x3f, fp))
589 info.si_addr = (void __user *)fp->pc; 582#endif
590 force_sig_info(sig, &info, current); 583 {
584 info.si_signo = sig;
585 info.si_errno = 0;
586 info.si_addr = (void __user *)fp->pc;
587 force_sig_info(sig, &info, current);
588 }
591 589
592 trace_buffer_restore(j); 590 trace_buffer_restore(j);
593 return; 591 return;
@@ -656,13 +654,13 @@ static bool get_instruction(unsigned short *val, unsigned short *address)
656 return false; 654 return false;
657} 655}
658 656
659/* 657/*
660 * decode the instruction if we are printing out the trace, as it 658 * decode the instruction if we are printing out the trace, as it
661 * makes things easier to follow, without running it through objdump 659 * makes things easier to follow, without running it through objdump
662 * These are the normal instructions which cause change of flow, which 660 * These are the normal instructions which cause change of flow, which
663 * would be at the source of the trace buffer 661 * would be at the source of the trace buffer
664 */ 662 */
665#ifdef CONFIG_DEBUG_VERBOSE 663#if defined(CONFIG_DEBUG_VERBOSE) && defined(CONFIG_DEBUG_BFIN_HWTRACE_ON)
666static void decode_instruction(unsigned short *address) 664static void decode_instruction(unsigned short *address)
667{ 665{
668 unsigned short opcode; 666 unsigned short opcode;
@@ -846,7 +844,7 @@ void show_stack(struct task_struct *task, unsigned long *stack)
846 } 844 }
847 if (fp) { 845 if (fp) {
848 frame = fp; 846 frame = fp;
849 printk(" FP: (0x%p)\n", fp); 847 printk(KERN_NOTICE " FP: (0x%p)\n", fp);
850 } else 848 } else
851 frame = 0; 849 frame = 0;
852 850
@@ -960,6 +958,7 @@ void dump_bfin_process(struct pt_regs *fp)
960 else 958 else
961 verbose_printk(KERN_NOTICE "COMM= invalid\n"); 959 verbose_printk(KERN_NOTICE "COMM= invalid\n");
962 960
961 printk(KERN_NOTICE "CPU = %d\n", current_thread_info()->cpu);
963 if (!((unsigned long)current->mm & 0x3) && (unsigned long)current->mm >= FIXED_CODE_START) 962 if (!((unsigned long)current->mm & 0x3) && (unsigned long)current->mm >= FIXED_CODE_START)
964 verbose_printk(KERN_NOTICE "TEXT = 0x%p-0x%p DATA = 0x%p-0x%p\n" 963 verbose_printk(KERN_NOTICE "TEXT = 0x%p-0x%p DATA = 0x%p-0x%p\n"
965 KERN_NOTICE " BSS = 0x%p-0x%p USER-STACK = 0x%p\n" 964 KERN_NOTICE " BSS = 0x%p-0x%p USER-STACK = 0x%p\n"
@@ -1053,6 +1052,7 @@ void show_regs(struct pt_regs *fp)
1053 struct irqaction *action; 1052 struct irqaction *action;
1054 unsigned int i; 1053 unsigned int i;
1055 unsigned long flags; 1054 unsigned long flags;
1055 unsigned int cpu = smp_processor_id();
1056 1056
1057 verbose_printk(KERN_NOTICE "\n" KERN_NOTICE "SEQUENCER STATUS:\t\t%s\n", print_tainted()); 1057 verbose_printk(KERN_NOTICE "\n" KERN_NOTICE "SEQUENCER STATUS:\t\t%s\n", print_tainted());
1058 verbose_printk(KERN_NOTICE " SEQSTAT: %08lx IPEND: %04lx SYSCFG: %04lx\n", 1058 verbose_printk(KERN_NOTICE " SEQSTAT: %08lx IPEND: %04lx SYSCFG: %04lx\n",
@@ -1112,9 +1112,9 @@ unlock:
1112 1112
1113 if (((long)fp->seqstat & SEQSTAT_EXCAUSE) && 1113 if (((long)fp->seqstat & SEQSTAT_EXCAUSE) &&
1114 (((long)fp->seqstat & SEQSTAT_EXCAUSE) != VEC_HWERR)) { 1114 (((long)fp->seqstat & SEQSTAT_EXCAUSE) != VEC_HWERR)) {
1115 decode_address(buf, saved_dcplb_fault_addr); 1115 decode_address(buf, cpu_pda[cpu].dcplb_fault_addr);
1116 verbose_printk(KERN_NOTICE "DCPLB_FAULT_ADDR: %s\n", buf); 1116 verbose_printk(KERN_NOTICE "DCPLB_FAULT_ADDR: %s\n", buf);
1117 decode_address(buf, saved_icplb_fault_addr); 1117 decode_address(buf, cpu_pda[cpu].icplb_fault_addr);
1118 verbose_printk(KERN_NOTICE "ICPLB_FAULT_ADDR: %s\n", buf); 1118 verbose_printk(KERN_NOTICE "ICPLB_FAULT_ADDR: %s\n", buf);
1119 } 1119 }
1120 1120
@@ -1153,20 +1153,21 @@ unlock:
1153asmlinkage int sys_bfin_spinlock(int *spinlock)__attribute__((l1_text)); 1153asmlinkage int sys_bfin_spinlock(int *spinlock)__attribute__((l1_text));
1154#endif 1154#endif
1155 1155
1156asmlinkage int sys_bfin_spinlock(int *spinlock) 1156static DEFINE_SPINLOCK(bfin_spinlock_lock);
1157
1158asmlinkage int sys_bfin_spinlock(int *p)
1157{ 1159{
1158 int ret = 0; 1160 int ret, tmp = 0;
1159 int tmp = 0;
1160 1161
1161 local_irq_disable(); 1162 spin_lock(&bfin_spinlock_lock); /* This would also hold kernel preemption. */
1162 ret = get_user(tmp, spinlock); 1163 ret = get_user(tmp, p);
1163 if (ret == 0) { 1164 if (likely(ret == 0)) {
1164 if (tmp) 1165 if (unlikely(tmp))
1165 ret = 1; 1166 ret = 1;
1166 tmp = 1; 1167 else
1167 put_user(tmp, spinlock); 1168 put_user(1, p);
1168 } 1169 }
1169 local_irq_enable(); 1170 spin_unlock(&bfin_spinlock_lock);
1170 return ret; 1171 return ret;
1171} 1172}
1172 1173
diff --git a/arch/blackfin/kernel/vmlinux.lds.S b/arch/blackfin/kernel/vmlinux.lds.S
index 7d12c6692a65..4b4341da0585 100644
--- a/arch/blackfin/kernel/vmlinux.lds.S
+++ b/arch/blackfin/kernel/vmlinux.lds.S
@@ -68,6 +68,8 @@ SECTIONS
68 __etext = .; 68 __etext = .;
69 } 69 }
70 70
71 NOTES
72
71 /* Just in case the first read only is a 32-bit access */ 73 /* Just in case the first read only is a 32-bit access */
72 RO_DATA(4) 74 RO_DATA(4)
73 75
@@ -109,7 +111,6 @@ SECTIONS
109#endif 111#endif
110 112
111 DATA_DATA 113 DATA_DATA
112 *(.data.*)
113 CONSTRUCTORS 114 CONSTRUCTORS
114 115
115 /* make sure the init_task is aligned to the 116 /* make sure the init_task is aligned to the
@@ -161,12 +162,14 @@ SECTIONS
161 *(.con_initcall.init) 162 *(.con_initcall.init)
162 ___con_initcall_end = .; 163 ___con_initcall_end = .;
163 } 164 }
165 PERCPU(4)
164 SECURITY_INIT 166 SECURITY_INIT
165 .init.ramfs : 167 .init.ramfs :
166 { 168 {
167 . = ALIGN(4); 169 . = ALIGN(4);
168 ___initramfs_start = .; 170 ___initramfs_start = .;
169 *(.init.ramfs) 171 *(.init.ramfs)
172 . = ALIGN(4);
170 ___initramfs_end = .; 173 ___initramfs_end = .;
171 } 174 }
172 175
@@ -212,7 +215,7 @@ SECTIONS
212 __ebss_b_l1 = .; 215 __ebss_b_l1 = .;
213 } 216 }
214 217
215 __l2_lma_start = .; 218 __l2_lma_start = LOADADDR(.data_b_l1) + SIZEOF(.data_b_l1);
216 219
217 .text_data_l2 L2_START : AT(LOADADDR(.data_b_l1) + SIZEOF(.data_b_l1)) 220 .text_data_l2 L2_START : AT(LOADADDR(.data_b_l1) + SIZEOF(.data_b_l1))
218 { 221 {
@@ -240,7 +243,7 @@ SECTIONS
240 /* Force trailing alignment of our init section so that when we 243 /* Force trailing alignment of our init section so that when we
241 * free our init memory, we don't leave behind a partial page. 244 * free our init memory, we don't leave behind a partial page.
242 */ 245 */
243 . = LOADADDR(.data_b_l1) + SIZEOF(.data_b_l1); 246 . = LOADADDR(.text_data_l2) + SIZEOF(.text_data_l2);
244 . = ALIGN(PAGE_SIZE); 247 . = ALIGN(PAGE_SIZE);
245 ___init_end = .; 248 ___init_end = .;
246 249