aboutsummaryrefslogtreecommitdiffstats
path: root/arch/blackfin/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/blackfin/kernel')
-rw-r--r--arch/blackfin/kernel/Makefile3
-rw-r--r--arch/blackfin/kernel/bfin_dma_5xx.c37
-rw-r--r--arch/blackfin/kernel/bfin_gpio.c321
-rw-r--r--arch/blackfin/kernel/bfin_ksyms.c1
-rw-r--r--arch/blackfin/kernel/cplb-mpu/cplbmgr.c8
-rw-r--r--arch/blackfin/kernel/cplb-nompu/cplbinit.c2
-rw-r--r--arch/blackfin/kernel/debug-mmrs.c1860
-rw-r--r--arch/blackfin/kernel/gptimers.c2
-rw-r--r--arch/blackfin/kernel/ipipe.c113
-rw-r--r--arch/blackfin/kernel/irqchip.c11
-rw-r--r--arch/blackfin/kernel/kgdb.c35
-rw-r--r--arch/blackfin/kernel/kgdb_test.c5
-rw-r--r--arch/blackfin/kernel/module.c45
-rw-r--r--arch/blackfin/kernel/nmi.c38
-rw-r--r--arch/blackfin/kernel/perf_event.c498
-rw-r--r--arch/blackfin/kernel/process.c16
-rw-r--r--arch/blackfin/kernel/ptrace.c28
-rw-r--r--arch/blackfin/kernel/reboot.c65
-rw-r--r--arch/blackfin/kernel/setup.c91
-rw-r--r--arch/blackfin/kernel/sys_bfin.c15
-rw-r--r--arch/blackfin/kernel/time-ts.c43
-rw-r--r--arch/blackfin/kernel/time.c6
-rw-r--r--arch/blackfin/kernel/trace.c8
-rw-r--r--arch/blackfin/kernel/traps.c2
-rw-r--r--arch/blackfin/kernel/vmlinux.lds.S11
25 files changed, 2866 insertions, 398 deletions
diff --git a/arch/blackfin/kernel/Makefile b/arch/blackfin/kernel/Makefile
index ca5ccc777772..d550b24d9e9b 100644
--- a/arch/blackfin/kernel/Makefile
+++ b/arch/blackfin/kernel/Makefile
@@ -33,7 +33,10 @@ obj-$(CONFIG_EARLY_PRINTK) += shadow_console.o
33obj-$(CONFIG_STACKTRACE) += stacktrace.o 33obj-$(CONFIG_STACKTRACE) += stacktrace.o
34obj-$(CONFIG_DEBUG_VERBOSE) += trace.o 34obj-$(CONFIG_DEBUG_VERBOSE) += trace.o
35obj-$(CONFIG_BFIN_PSEUDODBG_INSNS) += pseudodbg.o 35obj-$(CONFIG_BFIN_PSEUDODBG_INSNS) += pseudodbg.o
36obj-$(CONFIG_PERF_EVENTS) += perf_event.o
36 37
37# the kgdb test puts code into L2 and without linker 38# the kgdb test puts code into L2 and without linker
38# relaxation, we need to force long calls to/from it 39# relaxation, we need to force long calls to/from it
39CFLAGS_kgdb_test.o := -mlong-calls -O0 40CFLAGS_kgdb_test.o := -mlong-calls -O0
41
42obj-$(CONFIG_DEBUG_MMRS) += debug-mmrs.o
diff --git a/arch/blackfin/kernel/bfin_dma_5xx.c b/arch/blackfin/kernel/bfin_dma_5xx.c
index 1e485dfdc9f2..71dbaa4a48af 100644
--- a/arch/blackfin/kernel/bfin_dma_5xx.c
+++ b/arch/blackfin/kernel/bfin_dma_5xx.c
@@ -36,6 +36,11 @@ static int __init blackfin_dma_init(void)
36 36
37 printk(KERN_INFO "Blackfin DMA Controller\n"); 37 printk(KERN_INFO "Blackfin DMA Controller\n");
38 38
39
40#if ANOMALY_05000480
41 bfin_write_DMAC_TC_PER(0x0111);
42#endif
43
39 for (i = 0; i < MAX_DMA_CHANNELS; i++) { 44 for (i = 0; i < MAX_DMA_CHANNELS; i++) {
40 atomic_set(&dma_ch[i].chan_status, 0); 45 atomic_set(&dma_ch[i].chan_status, 0);
41 dma_ch[i].regs = dma_io_base_addr[i]; 46 dma_ch[i].regs = dma_io_base_addr[i];
@@ -84,6 +89,24 @@ static int __init proc_dma_init(void)
84late_initcall(proc_dma_init); 89late_initcall(proc_dma_init);
85#endif 90#endif
86 91
92static void set_dma_peripheral_map(unsigned int channel, const char *device_id)
93{
94#ifdef CONFIG_BF54x
95 unsigned int per_map;
96
97 switch (channel) {
98 case CH_UART2_RX: per_map = 0xC << 12; break;
99 case CH_UART2_TX: per_map = 0xD << 12; break;
100 case CH_UART3_RX: per_map = 0xE << 12; break;
101 case CH_UART3_TX: per_map = 0xF << 12; break;
102 default: return;
103 }
104
105 if (strncmp(device_id, "BFIN_UART", 9) == 0)
106 dma_ch[channel].regs->peripheral_map = per_map;
107#endif
108}
109
87/** 110/**
88 * request_dma - request a DMA channel 111 * request_dma - request a DMA channel
89 * 112 *
@@ -111,19 +134,7 @@ int request_dma(unsigned int channel, const char *device_id)
111 return -EBUSY; 134 return -EBUSY;
112 } 135 }
113 136
114#ifdef CONFIG_BF54x 137 set_dma_peripheral_map(channel, device_id);
115 if (channel >= CH_UART2_RX && channel <= CH_UART3_TX) {
116 unsigned int per_map;
117 per_map = dma_ch[channel].regs->peripheral_map & 0xFFF;
118 if (strncmp(device_id, "BFIN_UART", 9) == 0)
119 dma_ch[channel].regs->peripheral_map = per_map |
120 ((channel - CH_UART2_RX + 0xC)<<12);
121 else
122 dma_ch[channel].regs->peripheral_map = per_map |
123 ((channel - CH_UART2_RX + 0x6)<<12);
124 }
125#endif
126
127 dma_ch[channel].device_id = device_id; 138 dma_ch[channel].device_id = device_id;
128 dma_ch[channel].irq = 0; 139 dma_ch[channel].irq = 0;
129 140
diff --git a/arch/blackfin/kernel/bfin_gpio.c b/arch/blackfin/kernel/bfin_gpio.c
index dc07ed08b37f..bcf8cf6fe412 100644
--- a/arch/blackfin/kernel/bfin_gpio.c
+++ b/arch/blackfin/kernel/bfin_gpio.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * GPIO Abstraction Layer 2 * GPIO Abstraction Layer
3 * 3 *
4 * Copyright 2006-2009 Analog Devices Inc. 4 * Copyright 2006-2010 Analog Devices Inc.
5 * 5 *
6 * Licensed under the GPL-2 or later 6 * Licensed under the GPL-2 or later
7 */ 7 */
@@ -10,10 +10,12 @@
10#include <linux/module.h> 10#include <linux/module.h>
11#include <linux/err.h> 11#include <linux/err.h>
12#include <linux/proc_fs.h> 12#include <linux/proc_fs.h>
13#include <linux/seq_file.h>
13#include <asm/blackfin.h> 14#include <asm/blackfin.h>
14#include <asm/gpio.h> 15#include <asm/gpio.h>
15#include <asm/portmux.h> 16#include <asm/portmux.h>
16#include <linux/irq.h> 17#include <linux/irq.h>
18#include <asm/irq_handler.h>
17 19
18#if ANOMALY_05000311 || ANOMALY_05000323 20#if ANOMALY_05000311 || ANOMALY_05000323
19enum { 21enum {
@@ -215,82 +217,91 @@ static void port_setup(unsigned gpio, unsigned short usage)
215} 217}
216 218
217#ifdef BF537_FAMILY 219#ifdef BF537_FAMILY
218static struct { 220static const s8 port_mux[] = {
219 unsigned short res; 221 [GPIO_PF0] = 3,
220 unsigned short offset; 222 [GPIO_PF1] = 3,
221} port_mux_lut[] = { 223 [GPIO_PF2] = 4,
222 {.res = P_PPI0_D13, .offset = 11}, 224 [GPIO_PF3] = 4,
223 {.res = P_PPI0_D14, .offset = 11}, 225 [GPIO_PF4] = 5,
224 {.res = P_PPI0_D15, .offset = 11}, 226 [GPIO_PF5] = 6,
225 {.res = P_SPORT1_TFS, .offset = 11}, 227 [GPIO_PF6] = 7,
226 {.res = P_SPORT1_TSCLK, .offset = 11}, 228 [GPIO_PF7] = 8,
227 {.res = P_SPORT1_DTPRI, .offset = 11}, 229 [GPIO_PF8 ... GPIO_PF15] = -1,
228 {.res = P_PPI0_D10, .offset = 10}, 230 [GPIO_PG0 ... GPIO_PG7] = -1,
229 {.res = P_PPI0_D11, .offset = 10}, 231 [GPIO_PG8] = 9,
230 {.res = P_PPI0_D12, .offset = 10}, 232 [GPIO_PG9] = 9,
231 {.res = P_SPORT1_RSCLK, .offset = 10}, 233 [GPIO_PG10] = 10,
232 {.res = P_SPORT1_RFS, .offset = 10}, 234 [GPIO_PG11] = 10,
233 {.res = P_SPORT1_DRPRI, .offset = 10}, 235 [GPIO_PG12] = 10,
234 {.res = P_PPI0_D8, .offset = 9}, 236 [GPIO_PG13] = 11,
235 {.res = P_PPI0_D9, .offset = 9}, 237 [GPIO_PG14] = 11,
236 {.res = P_SPORT1_DRSEC, .offset = 9}, 238 [GPIO_PG15] = 11,
237 {.res = P_SPORT1_DTSEC, .offset = 9}, 239 [GPIO_PH0 ... GPIO_PH15] = -1,
238 {.res = P_TMR2, .offset = 8}, 240 [PORT_PJ0 ... PORT_PJ3] = -1,
239 {.res = P_PPI0_FS3, .offset = 8}, 241 [PORT_PJ4] = 1,
240 {.res = P_TMR3, .offset = 7}, 242 [PORT_PJ5] = 1,
241 {.res = P_SPI0_SSEL4, .offset = 7}, 243 [PORT_PJ6 ... PORT_PJ9] = -1,
242 {.res = P_TMR4, .offset = 6}, 244 [PORT_PJ10] = 0,
243 {.res = P_SPI0_SSEL5, .offset = 6}, 245 [PORT_PJ11] = 0,
244 {.res = P_TMR5, .offset = 5},
245 {.res = P_SPI0_SSEL6, .offset = 5},
246 {.res = P_UART1_RX, .offset = 4},
247 {.res = P_UART1_TX, .offset = 4},
248 {.res = P_TMR6, .offset = 4},
249 {.res = P_TMR7, .offset = 4},
250 {.res = P_UART0_RX, .offset = 3},
251 {.res = P_UART0_TX, .offset = 3},
252 {.res = P_DMAR0, .offset = 3},
253 {.res = P_DMAR1, .offset = 3},
254 {.res = P_SPORT0_DTSEC, .offset = 1},
255 {.res = P_SPORT0_DRSEC, .offset = 1},
256 {.res = P_CAN0_RX, .offset = 1},
257 {.res = P_CAN0_TX, .offset = 1},
258 {.res = P_SPI0_SSEL7, .offset = 1},
259 {.res = P_SPORT0_TFS, .offset = 0},
260 {.res = P_SPORT0_DTPRI, .offset = 0},
261 {.res = P_SPI0_SSEL2, .offset = 0},
262 {.res = P_SPI0_SSEL3, .offset = 0},
263}; 246};
264 247
265static void portmux_setup(unsigned short per) 248static int portmux_group_check(unsigned short per)
266{ 249{
267 u16 y, offset, muxreg; 250 u16 ident = P_IDENT(per);
268 u16 function = P_FUNCT2MUX(per); 251 u16 function = P_FUNCT2MUX(per);
252 s8 offset = port_mux[ident];
253 u16 m, pmux, pfunc;
269 254
270 for (y = 0; y < ARRAY_SIZE(port_mux_lut); y++) { 255 if (offset < 0)
271 if (port_mux_lut[y].res == per) { 256 return 0;
272
273 /* SET PORTMUX REG */
274
275 offset = port_mux_lut[y].offset;
276 muxreg = bfin_read_PORT_MUX();
277 257
278 if (offset != 1) 258 pmux = bfin_read_PORT_MUX();
279 muxreg &= ~(1 << offset); 259 for (m = 0; m < ARRAY_SIZE(port_mux); ++m) {
280 else 260 if (m == ident)
281 muxreg &= ~(3 << 1); 261 continue;
262 if (port_mux[m] != offset)
263 continue;
264 if (!is_reserved(peri, m, 1))
265 continue;
282 266
283 muxreg |= (function << offset); 267 if (offset == 1)
284 bfin_write_PORT_MUX(muxreg); 268 pfunc = (pmux >> offset) & 3;
269 else
270 pfunc = (pmux >> offset) & 1;
271 if (pfunc != function) {
272 pr_err("pin group conflict! request pin %d func %d conflict with pin %d func %d\n",
273 ident, function, m, pfunc);
274 return -EINVAL;
285 } 275 }
286 } 276 }
277
278 return 0;
279}
280
281static void portmux_setup(unsigned short per)
282{
283 u16 ident = P_IDENT(per);
284 u16 function = P_FUNCT2MUX(per);
285 s8 offset = port_mux[ident];
286 u16 pmux;
287
288 if (offset == -1)
289 return;
290
291 pmux = bfin_read_PORT_MUX();
292 if (offset != 1)
293 pmux &= ~(1 << offset);
294 else
295 pmux &= ~(3 << 1);
296 pmux |= (function << offset);
297 bfin_write_PORT_MUX(pmux);
287} 298}
288#elif defined(CONFIG_BF54x) 299#elif defined(CONFIG_BF54x)
289inline void portmux_setup(unsigned short per) 300inline void portmux_setup(unsigned short per)
290{ 301{
291 u32 pmux;
292 u16 ident = P_IDENT(per); 302 u16 ident = P_IDENT(per);
293 u16 function = P_FUNCT2MUX(per); 303 u16 function = P_FUNCT2MUX(per);
304 u32 pmux;
294 305
295 pmux = gpio_array[gpio_bank(ident)]->port_mux; 306 pmux = gpio_array[gpio_bank(ident)]->port_mux;
296 307
@@ -302,20 +313,54 @@ inline void portmux_setup(unsigned short per)
302 313
303inline u16 get_portmux(unsigned short per) 314inline u16 get_portmux(unsigned short per)
304{ 315{
305 u32 pmux;
306 u16 ident = P_IDENT(per); 316 u16 ident = P_IDENT(per);
307 317 u32 pmux = gpio_array[gpio_bank(ident)]->port_mux;
308 pmux = gpio_array[gpio_bank(ident)]->port_mux;
309
310 return (pmux >> (2 * gpio_sub_n(ident)) & 0x3); 318 return (pmux >> (2 * gpio_sub_n(ident)) & 0x3);
311} 319}
320static int portmux_group_check(unsigned short per)
321{
322 return 0;
323}
312#elif defined(CONFIG_BF52x) || defined(CONFIG_BF51x) 324#elif defined(CONFIG_BF52x) || defined(CONFIG_BF51x)
325static int portmux_group_check(unsigned short per)
326{
327 u16 ident = P_IDENT(per);
328 u16 function = P_FUNCT2MUX(per);
329 u8 offset = pmux_offset[gpio_bank(ident)][gpio_sub_n(ident)];
330 u16 pin, gpiopin, pfunc;
331
332 for (pin = 0; pin < GPIO_BANKSIZE; ++pin) {
333 if (offset != pmux_offset[gpio_bank(ident)][pin])
334 continue;
335
336 gpiopin = gpio_bank(ident) * GPIO_BANKSIZE + pin;
337 if (gpiopin == ident)
338 continue;
339 if (!is_reserved(peri, gpiopin, 1))
340 continue;
341
342 pfunc = *port_mux[gpio_bank(ident)];
343 pfunc = (pfunc >> offset) & 3;
344 if (pfunc != function) {
345 pr_err("pin group conflict! request pin %d func %d conflict with pin %d func %d\n",
346 ident, function, gpiopin, pfunc);
347 return -EINVAL;
348 }
349 }
350
351 return 0;
352}
353
313inline void portmux_setup(unsigned short per) 354inline void portmux_setup(unsigned short per)
314{ 355{
315 u16 pmux, ident = P_IDENT(per), function = P_FUNCT2MUX(per); 356 u16 ident = P_IDENT(per);
357 u16 function = P_FUNCT2MUX(per);
316 u8 offset = pmux_offset[gpio_bank(ident)][gpio_sub_n(ident)]; 358 u8 offset = pmux_offset[gpio_bank(ident)][gpio_sub_n(ident)];
359 u16 pmux;
317 360
318 pmux = *port_mux[gpio_bank(ident)]; 361 pmux = *port_mux[gpio_bank(ident)];
362 if (((pmux >> offset) & 3) == function)
363 return;
319 pmux &= ~(3 << offset); 364 pmux &= ~(3 << offset);
320 pmux |= (function & 3) << offset; 365 pmux |= (function & 3) << offset;
321 *port_mux[gpio_bank(ident)] = pmux; 366 *port_mux[gpio_bank(ident)] = pmux;
@@ -323,6 +368,10 @@ inline void portmux_setup(unsigned short per)
323} 368}
324#else 369#else
325# define portmux_setup(...) do { } while (0) 370# define portmux_setup(...) do { } while (0)
371static int portmux_group_check(unsigned short per)
372{
373 return 0;
374}
326#endif 375#endif
327 376
328#ifndef CONFIG_BF54x 377#ifndef CONFIG_BF54x
@@ -349,13 +398,13 @@ inline void portmux_setup(unsigned short per)
349void set_gpio_ ## name(unsigned gpio, unsigned short arg) \ 398void set_gpio_ ## name(unsigned gpio, unsigned short arg) \
350{ \ 399{ \
351 unsigned long flags; \ 400 unsigned long flags; \
352 local_irq_save_hw(flags); \ 401 flags = hard_local_irq_save(); \
353 if (arg) \ 402 if (arg) \
354 gpio_array[gpio_bank(gpio)]->name |= gpio_bit(gpio); \ 403 gpio_array[gpio_bank(gpio)]->name |= gpio_bit(gpio); \
355 else \ 404 else \
356 gpio_array[gpio_bank(gpio)]->name &= ~gpio_bit(gpio); \ 405 gpio_array[gpio_bank(gpio)]->name &= ~gpio_bit(gpio); \
357 AWA_DUMMY_READ(name); \ 406 AWA_DUMMY_READ(name); \
358 local_irq_restore_hw(flags); \ 407 hard_local_irq_restore(flags); \
359} \ 408} \
360EXPORT_SYMBOL(set_gpio_ ## name); 409EXPORT_SYMBOL(set_gpio_ ## name);
361 410
@@ -371,14 +420,14 @@ void set_gpio_ ## name(unsigned gpio, unsigned short arg) \
371{ \ 420{ \
372 unsigned long flags; \ 421 unsigned long flags; \
373 if (ANOMALY_05000311 || ANOMALY_05000323) \ 422 if (ANOMALY_05000311 || ANOMALY_05000323) \
374 local_irq_save_hw(flags); \ 423 flags = hard_local_irq_save(); \
375 if (arg) \ 424 if (arg) \
376 gpio_array[gpio_bank(gpio)]->name ## _set = gpio_bit(gpio); \ 425 gpio_array[gpio_bank(gpio)]->name ## _set = gpio_bit(gpio); \
377 else \ 426 else \
378 gpio_array[gpio_bank(gpio)]->name ## _clear = gpio_bit(gpio); \ 427 gpio_array[gpio_bank(gpio)]->name ## _clear = gpio_bit(gpio); \
379 if (ANOMALY_05000311 || ANOMALY_05000323) { \ 428 if (ANOMALY_05000311 || ANOMALY_05000323) { \
380 AWA_DUMMY_READ(name); \ 429 AWA_DUMMY_READ(name); \
381 local_irq_restore_hw(flags); \ 430 hard_local_irq_restore(flags); \
382 } \ 431 } \
383} \ 432} \
384EXPORT_SYMBOL(set_gpio_ ## name); 433EXPORT_SYMBOL(set_gpio_ ## name);
@@ -391,11 +440,11 @@ void set_gpio_toggle(unsigned gpio)
391{ 440{
392 unsigned long flags; 441 unsigned long flags;
393 if (ANOMALY_05000311 || ANOMALY_05000323) 442 if (ANOMALY_05000311 || ANOMALY_05000323)
394 local_irq_save_hw(flags); 443 flags = hard_local_irq_save();
395 gpio_array[gpio_bank(gpio)]->toggle = gpio_bit(gpio); 444 gpio_array[gpio_bank(gpio)]->toggle = gpio_bit(gpio);
396 if (ANOMALY_05000311 || ANOMALY_05000323) { 445 if (ANOMALY_05000311 || ANOMALY_05000323) {
397 AWA_DUMMY_READ(toggle); 446 AWA_DUMMY_READ(toggle);
398 local_irq_restore_hw(flags); 447 hard_local_irq_restore(flags);
399 } 448 }
400} 449}
401EXPORT_SYMBOL(set_gpio_toggle); 450EXPORT_SYMBOL(set_gpio_toggle);
@@ -408,11 +457,11 @@ void set_gpiop_ ## name(unsigned gpio, unsigned short arg) \
408{ \ 457{ \
409 unsigned long flags; \ 458 unsigned long flags; \
410 if (ANOMALY_05000311 || ANOMALY_05000323) \ 459 if (ANOMALY_05000311 || ANOMALY_05000323) \
411 local_irq_save_hw(flags); \ 460 flags = hard_local_irq_save(); \
412 gpio_array[gpio_bank(gpio)]->name = arg; \ 461 gpio_array[gpio_bank(gpio)]->name = arg; \
413 if (ANOMALY_05000311 || ANOMALY_05000323) { \ 462 if (ANOMALY_05000311 || ANOMALY_05000323) { \
414 AWA_DUMMY_READ(name); \ 463 AWA_DUMMY_READ(name); \
415 local_irq_restore_hw(flags); \ 464 hard_local_irq_restore(flags); \
416 } \ 465 } \
417} \ 466} \
418EXPORT_SYMBOL(set_gpiop_ ## name); 467EXPORT_SYMBOL(set_gpiop_ ## name);
@@ -433,11 +482,11 @@ unsigned short get_gpio_ ## name(unsigned gpio) \
433 unsigned long flags; \ 482 unsigned long flags; \
434 unsigned short ret; \ 483 unsigned short ret; \
435 if (ANOMALY_05000311 || ANOMALY_05000323) \ 484 if (ANOMALY_05000311 || ANOMALY_05000323) \
436 local_irq_save_hw(flags); \ 485 flags = hard_local_irq_save(); \
437 ret = 0x01 & (gpio_array[gpio_bank(gpio)]->name >> gpio_sub_n(gpio)); \ 486 ret = 0x01 & (gpio_array[gpio_bank(gpio)]->name >> gpio_sub_n(gpio)); \
438 if (ANOMALY_05000311 || ANOMALY_05000323) { \ 487 if (ANOMALY_05000311 || ANOMALY_05000323) { \
439 AWA_DUMMY_READ(name); \ 488 AWA_DUMMY_READ(name); \
440 local_irq_restore_hw(flags); \ 489 hard_local_irq_restore(flags); \
441 } \ 490 } \
442 return ret; \ 491 return ret; \
443} \ 492} \
@@ -460,11 +509,11 @@ unsigned short get_gpiop_ ## name(unsigned gpio) \
460 unsigned long flags; \ 509 unsigned long flags; \
461 unsigned short ret; \ 510 unsigned short ret; \
462 if (ANOMALY_05000311 || ANOMALY_05000323) \ 511 if (ANOMALY_05000311 || ANOMALY_05000323) \
463 local_irq_save_hw(flags); \ 512 flags = hard_local_irq_save(); \
464 ret = (gpio_array[gpio_bank(gpio)]->name); \ 513 ret = (gpio_array[gpio_bank(gpio)]->name); \
465 if (ANOMALY_05000311 || ANOMALY_05000323) { \ 514 if (ANOMALY_05000311 || ANOMALY_05000323) { \
466 AWA_DUMMY_READ(name); \ 515 AWA_DUMMY_READ(name); \
467 local_irq_restore_hw(flags); \ 516 hard_local_irq_restore(flags); \
468 } \ 517 } \
469 return ret; \ 518 return ret; \
470} \ 519} \
@@ -487,7 +536,7 @@ static const unsigned int sic_iwr_irqs[] = {
487#if defined(BF533_FAMILY) 536#if defined(BF533_FAMILY)
488 IRQ_PROG_INTB 537 IRQ_PROG_INTB
489#elif defined(BF537_FAMILY) 538#elif defined(BF537_FAMILY)
490 IRQ_PROG_INTB, IRQ_PORTG_INTB, IRQ_MAC_TX 539 IRQ_PF_INTB_WATCH, IRQ_PORTG_INTB, IRQ_PH_INTB_MAC_TX
491#elif defined(BF538_FAMILY) 540#elif defined(BF538_FAMILY)
492 IRQ_PORTF_INTB 541 IRQ_PORTF_INTB
493#elif defined(CONFIG_BF52x) || defined(CONFIG_BF51x) 542#elif defined(CONFIG_BF52x) || defined(CONFIG_BF51x)
@@ -525,14 +574,14 @@ int gpio_pm_wakeup_ctrl(unsigned gpio, unsigned ctrl)
525 if (check_gpio(gpio) < 0) 574 if (check_gpio(gpio) < 0)
526 return -EINVAL; 575 return -EINVAL;
527 576
528 local_irq_save_hw(flags); 577 flags = hard_local_irq_save();
529 if (ctrl) 578 if (ctrl)
530 reserve(wakeup, gpio); 579 reserve(wakeup, gpio);
531 else 580 else
532 unreserve(wakeup, gpio); 581 unreserve(wakeup, gpio);
533 582
534 set_gpio_maskb(gpio, ctrl); 583 set_gpio_maskb(gpio, ctrl);
535 local_irq_restore_hw(flags); 584 hard_local_irq_restore(flags);
536 585
537 return 0; 586 return 0;
538} 587}
@@ -690,7 +739,7 @@ int peripheral_request(unsigned short per, const char *label)
690 739
691 BUG_ON(ident >= MAX_RESOURCES); 740 BUG_ON(ident >= MAX_RESOURCES);
692 741
693 local_irq_save_hw(flags); 742 flags = hard_local_irq_save();
694 743
695 /* If a pin can be muxed as either GPIO or peripheral, make 744 /* If a pin can be muxed as either GPIO or peripheral, make
696 * sure it is not already a GPIO pin when we request it. 745 * sure it is not already a GPIO pin when we request it.
@@ -701,7 +750,7 @@ int peripheral_request(unsigned short per, const char *label)
701 printk(KERN_ERR 750 printk(KERN_ERR
702 "%s: Peripheral %d is already reserved as GPIO by %s !\n", 751 "%s: Peripheral %d is already reserved as GPIO by %s !\n",
703 __func__, ident, get_label(ident)); 752 __func__, ident, get_label(ident));
704 local_irq_restore_hw(flags); 753 hard_local_irq_restore(flags);
705 return -EBUSY; 754 return -EBUSY;
706 } 755 }
707 756
@@ -730,18 +779,22 @@ int peripheral_request(unsigned short per, const char *label)
730 printk(KERN_ERR 779 printk(KERN_ERR
731 "%s: Peripheral %d function %d is already reserved by %s !\n", 780 "%s: Peripheral %d function %d is already reserved by %s !\n",
732 __func__, ident, P_FUNCT2MUX(per), get_label(ident)); 781 __func__, ident, P_FUNCT2MUX(per), get_label(ident));
733 local_irq_restore_hw(flags); 782 hard_local_irq_restore(flags);
734 return -EBUSY; 783 return -EBUSY;
735 } 784 }
736 } 785 }
737 786
787 if (unlikely(portmux_group_check(per))) {
788 hard_local_irq_restore(flags);
789 return -EBUSY;
790 }
738 anyway: 791 anyway:
739 reserve(peri, ident); 792 reserve(peri, ident);
740 793
741 portmux_setup(per); 794 portmux_setup(per);
742 port_setup(ident, PERIPHERAL_USAGE); 795 port_setup(ident, PERIPHERAL_USAGE);
743 796
744 local_irq_restore_hw(flags); 797 hard_local_irq_restore(flags);
745 set_label(ident, label); 798 set_label(ident, label);
746 799
747 return 0; 800 return 0;
@@ -780,10 +833,10 @@ void peripheral_free(unsigned short per)
780 if (!(per & P_DEFINED)) 833 if (!(per & P_DEFINED))
781 return; 834 return;
782 835
783 local_irq_save_hw(flags); 836 flags = hard_local_irq_save();
784 837
785 if (unlikely(!is_reserved(peri, ident, 0))) { 838 if (unlikely(!is_reserved(peri, ident, 0))) {
786 local_irq_restore_hw(flags); 839 hard_local_irq_restore(flags);
787 return; 840 return;
788 } 841 }
789 842
@@ -794,7 +847,7 @@ void peripheral_free(unsigned short per)
794 847
795 set_label(ident, "free"); 848 set_label(ident, "free");
796 849
797 local_irq_restore_hw(flags); 850 hard_local_irq_restore(flags);
798} 851}
799EXPORT_SYMBOL(peripheral_free); 852EXPORT_SYMBOL(peripheral_free);
800 853
@@ -828,7 +881,7 @@ int bfin_gpio_request(unsigned gpio, const char *label)
828 if (check_gpio(gpio) < 0) 881 if (check_gpio(gpio) < 0)
829 return -EINVAL; 882 return -EINVAL;
830 883
831 local_irq_save_hw(flags); 884 flags = hard_local_irq_save();
832 885
833 /* 886 /*
834 * Allow that the identical GPIO can 887 * Allow that the identical GPIO can
@@ -837,7 +890,7 @@ int bfin_gpio_request(unsigned gpio, const char *label)
837 */ 890 */
838 891
839 if (cmp_label(gpio, label) == 0) { 892 if (cmp_label(gpio, label) == 0) {
840 local_irq_restore_hw(flags); 893 hard_local_irq_restore(flags);
841 return 0; 894 return 0;
842 } 895 }
843 896
@@ -846,7 +899,7 @@ int bfin_gpio_request(unsigned gpio, const char *label)
846 dump_stack(); 899 dump_stack();
847 printk(KERN_ERR "bfin-gpio: GPIO %d is already reserved by %s !\n", 900 printk(KERN_ERR "bfin-gpio: GPIO %d is already reserved by %s !\n",
848 gpio, get_label(gpio)); 901 gpio, get_label(gpio));
849 local_irq_restore_hw(flags); 902 hard_local_irq_restore(flags);
850 return -EBUSY; 903 return -EBUSY;
851 } 904 }
852 if (unlikely(is_reserved(peri, gpio, 1))) { 905 if (unlikely(is_reserved(peri, gpio, 1))) {
@@ -855,7 +908,7 @@ int bfin_gpio_request(unsigned gpio, const char *label)
855 printk(KERN_ERR 908 printk(KERN_ERR
856 "bfin-gpio: GPIO %d is already reserved as Peripheral by %s !\n", 909 "bfin-gpio: GPIO %d is already reserved as Peripheral by %s !\n",
857 gpio, get_label(gpio)); 910 gpio, get_label(gpio));
858 local_irq_restore_hw(flags); 911 hard_local_irq_restore(flags);
859 return -EBUSY; 912 return -EBUSY;
860 } 913 }
861 if (unlikely(is_reserved(gpio_irq, gpio, 1))) { 914 if (unlikely(is_reserved(gpio_irq, gpio, 1))) {
@@ -871,7 +924,7 @@ int bfin_gpio_request(unsigned gpio, const char *label)
871 reserve(gpio, gpio); 924 reserve(gpio, gpio);
872 set_label(gpio, label); 925 set_label(gpio, label);
873 926
874 local_irq_restore_hw(flags); 927 hard_local_irq_restore(flags);
875 928
876 port_setup(gpio, GPIO_USAGE); 929 port_setup(gpio, GPIO_USAGE);
877 930
@@ -888,13 +941,13 @@ void bfin_gpio_free(unsigned gpio)
888 941
889 might_sleep(); 942 might_sleep();
890 943
891 local_irq_save_hw(flags); 944 flags = hard_local_irq_save();
892 945
893 if (unlikely(!is_reserved(gpio, gpio, 0))) { 946 if (unlikely(!is_reserved(gpio, gpio, 0))) {
894 if (system_state == SYSTEM_BOOTING) 947 if (system_state == SYSTEM_BOOTING)
895 dump_stack(); 948 dump_stack();
896 gpio_error(gpio); 949 gpio_error(gpio);
897 local_irq_restore_hw(flags); 950 hard_local_irq_restore(flags);
898 return; 951 return;
899 } 952 }
900 953
@@ -902,7 +955,7 @@ void bfin_gpio_free(unsigned gpio)
902 955
903 set_label(gpio, "free"); 956 set_label(gpio, "free");
904 957
905 local_irq_restore_hw(flags); 958 hard_local_irq_restore(flags);
906} 959}
907EXPORT_SYMBOL(bfin_gpio_free); 960EXPORT_SYMBOL(bfin_gpio_free);
908 961
@@ -913,7 +966,7 @@ int bfin_special_gpio_request(unsigned gpio, const char *label)
913{ 966{
914 unsigned long flags; 967 unsigned long flags;
915 968
916 local_irq_save_hw(flags); 969 flags = hard_local_irq_save();
917 970
918 /* 971 /*
919 * Allow that the identical GPIO can 972 * Allow that the identical GPIO can
@@ -922,19 +975,19 @@ int bfin_special_gpio_request(unsigned gpio, const char *label)
922 */ 975 */
923 976
924 if (cmp_label(gpio, label) == 0) { 977 if (cmp_label(gpio, label) == 0) {
925 local_irq_restore_hw(flags); 978 hard_local_irq_restore(flags);
926 return 0; 979 return 0;
927 } 980 }
928 981
929 if (unlikely(is_reserved(special_gpio, gpio, 1))) { 982 if (unlikely(is_reserved(special_gpio, gpio, 1))) {
930 local_irq_restore_hw(flags); 983 hard_local_irq_restore(flags);
931 printk(KERN_ERR "bfin-gpio: GPIO %d is already reserved by %s !\n", 984 printk(KERN_ERR "bfin-gpio: GPIO %d is already reserved by %s !\n",
932 gpio, get_label(gpio)); 985 gpio, get_label(gpio));
933 986
934 return -EBUSY; 987 return -EBUSY;
935 } 988 }
936 if (unlikely(is_reserved(peri, gpio, 1))) { 989 if (unlikely(is_reserved(peri, gpio, 1))) {
937 local_irq_restore_hw(flags); 990 hard_local_irq_restore(flags);
938 printk(KERN_ERR 991 printk(KERN_ERR
939 "bfin-gpio: GPIO %d is already reserved as Peripheral by %s !\n", 992 "bfin-gpio: GPIO %d is already reserved as Peripheral by %s !\n",
940 gpio, get_label(gpio)); 993 gpio, get_label(gpio));
@@ -946,7 +999,7 @@ int bfin_special_gpio_request(unsigned gpio, const char *label)
946 reserve(peri, gpio); 999 reserve(peri, gpio);
947 1000
948 set_label(gpio, label); 1001 set_label(gpio, label);
949 local_irq_restore_hw(flags); 1002 hard_local_irq_restore(flags);
950 port_setup(gpio, GPIO_USAGE); 1003 port_setup(gpio, GPIO_USAGE);
951 1004
952 return 0; 1005 return 0;
@@ -959,18 +1012,18 @@ void bfin_special_gpio_free(unsigned gpio)
959 1012
960 might_sleep(); 1013 might_sleep();
961 1014
962 local_irq_save_hw(flags); 1015 flags = hard_local_irq_save();
963 1016
964 if (unlikely(!is_reserved(special_gpio, gpio, 0))) { 1017 if (unlikely(!is_reserved(special_gpio, gpio, 0))) {
965 gpio_error(gpio); 1018 gpio_error(gpio);
966 local_irq_restore_hw(flags); 1019 hard_local_irq_restore(flags);
967 return; 1020 return;
968 } 1021 }
969 1022
970 unreserve(special_gpio, gpio); 1023 unreserve(special_gpio, gpio);
971 unreserve(peri, gpio); 1024 unreserve(peri, gpio);
972 set_label(gpio, "free"); 1025 set_label(gpio, "free");
973 local_irq_restore_hw(flags); 1026 hard_local_irq_restore(flags);
974} 1027}
975EXPORT_SYMBOL(bfin_special_gpio_free); 1028EXPORT_SYMBOL(bfin_special_gpio_free);
976#endif 1029#endif
@@ -983,7 +1036,7 @@ int bfin_gpio_irq_request(unsigned gpio, const char *label)
983 if (check_gpio(gpio) < 0) 1036 if (check_gpio(gpio) < 0)
984 return -EINVAL; 1037 return -EINVAL;
985 1038
986 local_irq_save_hw(flags); 1039 flags = hard_local_irq_save();
987 1040
988 if (unlikely(is_reserved(peri, gpio, 1))) { 1041 if (unlikely(is_reserved(peri, gpio, 1))) {
989 if (system_state == SYSTEM_BOOTING) 1042 if (system_state == SYSTEM_BOOTING)
@@ -991,7 +1044,7 @@ int bfin_gpio_irq_request(unsigned gpio, const char *label)
991 printk(KERN_ERR 1044 printk(KERN_ERR
992 "bfin-gpio: GPIO %d is already reserved as Peripheral by %s !\n", 1045 "bfin-gpio: GPIO %d is already reserved as Peripheral by %s !\n",
993 gpio, get_label(gpio)); 1046 gpio, get_label(gpio));
994 local_irq_restore_hw(flags); 1047 hard_local_irq_restore(flags);
995 return -EBUSY; 1048 return -EBUSY;
996 } 1049 }
997 if (unlikely(is_reserved(gpio, gpio, 1))) 1050 if (unlikely(is_reserved(gpio, gpio, 1)))
@@ -1002,7 +1055,7 @@ int bfin_gpio_irq_request(unsigned gpio, const char *label)
1002 reserve(gpio_irq, gpio); 1055 reserve(gpio_irq, gpio);
1003 set_label(gpio, label); 1056 set_label(gpio, label);
1004 1057
1005 local_irq_restore_hw(flags); 1058 hard_local_irq_restore(flags);
1006 1059
1007 port_setup(gpio, GPIO_USAGE); 1060 port_setup(gpio, GPIO_USAGE);
1008 1061
@@ -1016,13 +1069,13 @@ void bfin_gpio_irq_free(unsigned gpio)
1016 if (check_gpio(gpio) < 0) 1069 if (check_gpio(gpio) < 0)
1017 return; 1070 return;
1018 1071
1019 local_irq_save_hw(flags); 1072 flags = hard_local_irq_save();
1020 1073
1021 if (unlikely(!is_reserved(gpio_irq, gpio, 0))) { 1074 if (unlikely(!is_reserved(gpio_irq, gpio, 0))) {
1022 if (system_state == SYSTEM_BOOTING) 1075 if (system_state == SYSTEM_BOOTING)
1023 dump_stack(); 1076 dump_stack();
1024 gpio_error(gpio); 1077 gpio_error(gpio);
1025 local_irq_restore_hw(flags); 1078 hard_local_irq_restore(flags);
1026 return; 1079 return;
1027 } 1080 }
1028 1081
@@ -1030,7 +1083,7 @@ void bfin_gpio_irq_free(unsigned gpio)
1030 1083
1031 set_label(gpio, "free"); 1084 set_label(gpio, "free");
1032 1085
1033 local_irq_restore_hw(flags); 1086 hard_local_irq_restore(flags);
1034} 1087}
1035 1088
1036static inline void __bfin_gpio_direction_input(unsigned gpio) 1089static inline void __bfin_gpio_direction_input(unsigned gpio)
@@ -1052,10 +1105,10 @@ int bfin_gpio_direction_input(unsigned gpio)
1052 return -EINVAL; 1105 return -EINVAL;
1053 } 1106 }
1054 1107
1055 local_irq_save_hw(flags); 1108 flags = hard_local_irq_save();
1056 __bfin_gpio_direction_input(gpio); 1109 __bfin_gpio_direction_input(gpio);
1057 AWA_DUMMY_READ(inen); 1110 AWA_DUMMY_READ(inen);
1058 local_irq_restore_hw(flags); 1111 hard_local_irq_restore(flags);
1059 1112
1060 return 0; 1113 return 0;
1061} 1114}
@@ -1070,9 +1123,9 @@ void bfin_gpio_irq_prepare(unsigned gpio)
1070 port_setup(gpio, GPIO_USAGE); 1123 port_setup(gpio, GPIO_USAGE);
1071 1124
1072#ifdef CONFIG_BF54x 1125#ifdef CONFIG_BF54x
1073 local_irq_save_hw(flags); 1126 flags = hard_local_irq_save();
1074 __bfin_gpio_direction_input(gpio); 1127 __bfin_gpio_direction_input(gpio);
1075 local_irq_restore_hw(flags); 1128 hard_local_irq_restore(flags);
1076#endif 1129#endif
1077} 1130}
1078 1131
@@ -1094,7 +1147,7 @@ int bfin_gpio_direction_output(unsigned gpio, int value)
1094 return -EINVAL; 1147 return -EINVAL;
1095 } 1148 }
1096 1149
1097 local_irq_save_hw(flags); 1150 flags = hard_local_irq_save();
1098 1151
1099 gpio_array[gpio_bank(gpio)]->inen &= ~gpio_bit(gpio); 1152 gpio_array[gpio_bank(gpio)]->inen &= ~gpio_bit(gpio);
1100 gpio_set_value(gpio, value); 1153 gpio_set_value(gpio, value);
@@ -1105,7 +1158,7 @@ int bfin_gpio_direction_output(unsigned gpio, int value)
1105#endif 1158#endif
1106 1159
1107 AWA_DUMMY_READ(dir); 1160 AWA_DUMMY_READ(dir);
1108 local_irq_restore_hw(flags); 1161 hard_local_irq_restore(flags);
1109 1162
1110 return 0; 1163 return 0;
1111} 1164}
@@ -1120,11 +1173,11 @@ int bfin_gpio_get_value(unsigned gpio)
1120 1173
1121 if (unlikely(get_gpio_edge(gpio))) { 1174 if (unlikely(get_gpio_edge(gpio))) {
1122 int ret; 1175 int ret;
1123 local_irq_save_hw(flags); 1176 flags = hard_local_irq_save();
1124 set_gpio_edge(gpio, 0); 1177 set_gpio_edge(gpio, 0);
1125 ret = get_gpio_data(gpio); 1178 ret = get_gpio_data(gpio);
1126 set_gpio_edge(gpio, 1); 1179 set_gpio_edge(gpio, 1);
1127 local_irq_restore_hw(flags); 1180 hard_local_irq_restore(flags);
1128 return ret; 1181 return ret;
1129 } else 1182 } else
1130 return get_gpio_data(gpio); 1183 return get_gpio_data(gpio);
@@ -1152,35 +1205,43 @@ void bfin_reset_boot_spi_cs(unsigned short pin)
1152} 1205}
1153 1206
1154#if defined(CONFIG_PROC_FS) 1207#if defined(CONFIG_PROC_FS)
1155static int gpio_proc_read(char *buf, char **start, off_t offset, 1208static int gpio_proc_show(struct seq_file *m, void *v)
1156 int len, int *unused_i, void *unused_v)
1157{ 1209{
1158 int c, irq, gpio, outlen = 0; 1210 int c, irq, gpio;
1159 1211
1160 for (c = 0; c < MAX_RESOURCES; c++) { 1212 for (c = 0; c < MAX_RESOURCES; c++) {
1161 irq = is_reserved(gpio_irq, c, 1); 1213 irq = is_reserved(gpio_irq, c, 1);
1162 gpio = is_reserved(gpio, c, 1); 1214 gpio = is_reserved(gpio, c, 1);
1163 if (!check_gpio(c) && (gpio || irq)) 1215 if (!check_gpio(c) && (gpio || irq))
1164 len = sprintf(buf, "GPIO_%d: \t%s%s \t\tGPIO %s\n", c, 1216 seq_printf(m, "GPIO_%d: \t%s%s \t\tGPIO %s\n", c,
1165 get_label(c), (gpio && irq) ? " *" : "", 1217 get_label(c), (gpio && irq) ? " *" : "",
1166 get_gpio_dir(c) ? "OUTPUT" : "INPUT"); 1218 get_gpio_dir(c) ? "OUTPUT" : "INPUT");
1167 else if (is_reserved(peri, c, 1)) 1219 else if (is_reserved(peri, c, 1))
1168 len = sprintf(buf, "GPIO_%d: \t%s \t\tPeripheral\n", c, get_label(c)); 1220 seq_printf(m, "GPIO_%d: \t%s \t\tPeripheral\n", c, get_label(c));
1169 else 1221 else
1170 continue; 1222 continue;
1171 buf += len;
1172 outlen += len;
1173 } 1223 }
1174 return outlen; 1224
1225 return 0;
1175} 1226}
1176 1227
1228static int gpio_proc_open(struct inode *inode, struct file *file)
1229{
1230 return single_open(file, gpio_proc_show, NULL);
1231}
1232
1233static const struct file_operations gpio_proc_ops = {
1234 .open = gpio_proc_open,
1235 .read = seq_read,
1236 .llseek = seq_lseek,
1237 .release = single_release,
1238};
1239
1177static __init int gpio_register_proc(void) 1240static __init int gpio_register_proc(void)
1178{ 1241{
1179 struct proc_dir_entry *proc_gpio; 1242 struct proc_dir_entry *proc_gpio;
1180 1243
1181 proc_gpio = create_proc_entry("gpio", S_IRUGO, NULL); 1244 proc_gpio = proc_create("gpio", S_IRUGO, NULL, &gpio_proc_ops);
1182 if (proc_gpio)
1183 proc_gpio->read_proc = gpio_proc_read;
1184 return proc_gpio != NULL; 1245 return proc_gpio != NULL;
1185} 1246}
1186__initcall(gpio_register_proc); 1247__initcall(gpio_register_proc);
diff --git a/arch/blackfin/kernel/bfin_ksyms.c b/arch/blackfin/kernel/bfin_ksyms.c
index 2c264b51566a..c446591b961d 100644
--- a/arch/blackfin/kernel/bfin_ksyms.c
+++ b/arch/blackfin/kernel/bfin_ksyms.c
@@ -11,6 +11,7 @@
11 11
12#include <asm/cacheflush.h> 12#include <asm/cacheflush.h>
13#include <asm/io.h> 13#include <asm/io.h>
14#include <asm/irq_handler.h>
14 15
15/* Allow people to have their own Blackfin exception handler in a module */ 16/* Allow people to have their own Blackfin exception handler in a module */
16EXPORT_SYMBOL(bfin_return_from_exception); 17EXPORT_SYMBOL(bfin_return_from_exception);
diff --git a/arch/blackfin/kernel/cplb-mpu/cplbmgr.c b/arch/blackfin/kernel/cplb-mpu/cplbmgr.c
index 87b25b1b30ed..8de92299b3ee 100644
--- a/arch/blackfin/kernel/cplb-mpu/cplbmgr.c
+++ b/arch/blackfin/kernel/cplb-mpu/cplbmgr.c
@@ -318,7 +318,7 @@ void flush_switched_cplbs(unsigned int cpu)
318 318
319 nr_cplb_flush[cpu]++; 319 nr_cplb_flush[cpu]++;
320 320
321 local_irq_save_hw(flags); 321 flags = hard_local_irq_save();
322 _disable_icplb(); 322 _disable_icplb();
323 for (i = first_switched_icplb; i < MAX_CPLBS; i++) { 323 for (i = first_switched_icplb; i < MAX_CPLBS; i++) {
324 icplb_tbl[cpu][i].data = 0; 324 icplb_tbl[cpu][i].data = 0;
@@ -332,7 +332,7 @@ void flush_switched_cplbs(unsigned int cpu)
332 bfin_write32(DCPLB_DATA0 + i * 4, 0); 332 bfin_write32(DCPLB_DATA0 + i * 4, 0);
333 } 333 }
334 _enable_dcplb(); 334 _enable_dcplb();
335 local_irq_restore_hw(flags); 335 hard_local_irq_restore(flags);
336 336
337} 337}
338 338
@@ -348,7 +348,7 @@ void set_mask_dcplbs(unsigned long *masks, unsigned int cpu)
348 return; 348 return;
349 } 349 }
350 350
351 local_irq_save_hw(flags); 351 flags = hard_local_irq_save();
352 current_rwx_mask[cpu] = masks; 352 current_rwx_mask[cpu] = masks;
353 353
354 if (L2_LENGTH && addr >= L2_START && addr < L2_START + L2_LENGTH) { 354 if (L2_LENGTH && addr >= L2_START && addr < L2_START + L2_LENGTH) {
@@ -373,5 +373,5 @@ void set_mask_dcplbs(unsigned long *masks, unsigned int cpu)
373 addr += PAGE_SIZE; 373 addr += PAGE_SIZE;
374 } 374 }
375 _enable_dcplb(); 375 _enable_dcplb();
376 local_irq_restore_hw(flags); 376 hard_local_irq_restore(flags);
377} 377}
diff --git a/arch/blackfin/kernel/cplb-nompu/cplbinit.c b/arch/blackfin/kernel/cplb-nompu/cplbinit.c
index bfe75af4e8bd..886e00014d75 100644
--- a/arch/blackfin/kernel/cplb-nompu/cplbinit.c
+++ b/arch/blackfin/kernel/cplb-nompu/cplbinit.c
@@ -116,7 +116,7 @@ void __init generate_cplb_tables_all(void)
116 ((_ramend - uncached_end) >= 1 * 1024 * 1024)) 116 ((_ramend - uncached_end) >= 1 * 1024 * 1024))
117 dcplb_bounds[i_d].eaddr = uncached_end; 117 dcplb_bounds[i_d].eaddr = uncached_end;
118 else 118 else
119 dcplb_bounds[i_d].eaddr = uncached_end & ~(1 * 1024 * 1024); 119 dcplb_bounds[i_d].eaddr = uncached_end & ~(1 * 1024 * 1024 - 1);
120 dcplb_bounds[i_d++].data = SDRAM_DGENERIC; 120 dcplb_bounds[i_d++].data = SDRAM_DGENERIC;
121 /* DMA uncached region. */ 121 /* DMA uncached region. */
122 if (DMA_UNCACHED_REGION) { 122 if (DMA_UNCACHED_REGION) {
diff --git a/arch/blackfin/kernel/debug-mmrs.c b/arch/blackfin/kernel/debug-mmrs.c
new file mode 100644
index 000000000000..fce4807ceef9
--- /dev/null
+++ b/arch/blackfin/kernel/debug-mmrs.c
@@ -0,0 +1,1860 @@
1/*
2 * debugfs interface to core/system MMRs
3 *
4 * Copyright 2007-2011 Analog Devices Inc.
5 *
6 * Licensed under the GPL-2 or later
7 */
8
9#include <linux/debugfs.h>
10#include <linux/fs.h>
11#include <linux/kernel.h>
12#include <linux/module.h>
13
14#include <asm/blackfin.h>
15#include <asm/gpio.h>
16#include <asm/gptimers.h>
17#include <asm/bfin_can.h>
18#include <asm/bfin_dma.h>
19#include <asm/bfin_ppi.h>
20#include <asm/bfin_serial.h>
21#include <asm/bfin5xx_spi.h>
22#include <asm/bfin_twi.h>
23
24/* Common code defines PORT_MUX on us, so redirect the MMR back locally */
25#ifdef BFIN_PORT_MUX
26#undef PORT_MUX
27#define PORT_MUX BFIN_PORT_MUX
28#endif
29
30#define _d(name, bits, addr, perms) debugfs_create_x##bits(name, perms, parent, (u##bits *)addr)
31#define d(name, bits, addr) _d(name, bits, addr, S_IRUSR|S_IWUSR)
32#define d_RO(name, bits, addr) _d(name, bits, addr, S_IRUSR)
33#define d_WO(name, bits, addr) _d(name, bits, addr, S_IWUSR)
34
35#define D_RO(name, bits) d_RO(#name, bits, name)
36#define D_WO(name, bits) d_WO(#name, bits, name)
37#define D32(name) d(#name, 32, name)
38#define D16(name) d(#name, 16, name)
39
40#define REGS_OFF(peri, mmr) offsetof(struct bfin_##peri##_regs, mmr)
41#define __REGS(peri, sname, rname) \
42 do { \
43 struct bfin_##peri##_regs r; \
44 void *addr = (void *)(base + REGS_OFF(peri, rname)); \
45 strcpy(_buf, sname); \
46 if (sizeof(r.rname) == 2) \
47 debugfs_create_x16(buf, S_IRUSR|S_IWUSR, parent, addr); \
48 else \
49 debugfs_create_x32(buf, S_IRUSR|S_IWUSR, parent, addr); \
50 } while (0)
51#define REGS_STR_PFX(buf, pfx, num) \
52 ({ \
53 buf + (num >= 0 ? \
54 sprintf(buf, #pfx "%i_", num) : \
55 sprintf(buf, #pfx "_")); \
56 })
57#define REGS_STR_PFX_C(buf, pfx, num) \
58 ({ \
59 buf + (num >= 0 ? \
60 sprintf(buf, #pfx "%c_", 'A' + num) : \
61 sprintf(buf, #pfx "_")); \
62 })
63
64/*
65 * Core registers (not memory mapped)
66 */
67extern u32 last_seqstat;
68
69static int debug_cclk_get(void *data, u64 *val)
70{
71 *val = get_cclk();
72 return 0;
73}
74DEFINE_SIMPLE_ATTRIBUTE(fops_debug_cclk, debug_cclk_get, NULL, "0x%08llx\n");
75
76static int debug_sclk_get(void *data, u64 *val)
77{
78 *val = get_sclk();
79 return 0;
80}
81DEFINE_SIMPLE_ATTRIBUTE(fops_debug_sclk, debug_sclk_get, NULL, "0x%08llx\n");
82
83#define DEFINE_SYSREG(sr, pre, post) \
84static int sysreg_##sr##_get(void *data, u64 *val) \
85{ \
86 unsigned long tmp; \
87 pre; \
88 __asm__ __volatile__("%0 = " #sr ";" : "=d"(tmp)); \
89 *val = tmp; \
90 return 0; \
91} \
92static int sysreg_##sr##_set(void *data, u64 val) \
93{ \
94 unsigned long tmp = val; \
95 __asm__ __volatile__(#sr " = %0;" : : "d"(tmp)); \
96 post; \
97 return 0; \
98} \
99DEFINE_SIMPLE_ATTRIBUTE(fops_sysreg_##sr, sysreg_##sr##_get, sysreg_##sr##_set, "0x%08llx\n")
100
101DEFINE_SYSREG(cycles, , );
102DEFINE_SYSREG(cycles2, __asm__ __volatile__("%0 = cycles;" : "=d"(tmp)), );
103DEFINE_SYSREG(emudat, , );
104DEFINE_SYSREG(seqstat, , );
105DEFINE_SYSREG(syscfg, , CSYNC());
106#define D_SYSREG(sr) debugfs_create_file(#sr, S_IRUSR|S_IWUSR, parent, NULL, &fops_sysreg_##sr)
107
108/*
109 * CAN
110 */
111#define CAN_OFF(mmr) REGS_OFF(can, mmr)
112#define __CAN(uname, lname) __REGS(can, #uname, lname)
113static void __init __maybe_unused
114bfin_debug_mmrs_can(struct dentry *parent, unsigned long base, int num)
115{
116 static struct dentry *am, *mb;
117 int i, j;
118 char buf[32], *_buf = REGS_STR_PFX(buf, CAN, num);
119
120 if (!am) {
121 am = debugfs_create_dir("am", parent);
122 mb = debugfs_create_dir("mb", parent);
123 }
124
125 __CAN(MC1, mc1);
126 __CAN(MD1, md1);
127 __CAN(TRS1, trs1);
128 __CAN(TRR1, trr1);
129 __CAN(TA1, ta1);
130 __CAN(AA1, aa1);
131 __CAN(RMP1, rmp1);
132 __CAN(RML1, rml1);
133 __CAN(MBTIF1, mbtif1);
134 __CAN(MBRIF1, mbrif1);
135 __CAN(MBIM1, mbim1);
136 __CAN(RFH1, rfh1);
137 __CAN(OPSS1, opss1);
138
139 __CAN(MC2, mc2);
140 __CAN(MD2, md2);
141 __CAN(TRS2, trs2);
142 __CAN(TRR2, trr2);
143 __CAN(TA2, ta2);
144 __CAN(AA2, aa2);
145 __CAN(RMP2, rmp2);
146 __CAN(RML2, rml2);
147 __CAN(MBTIF2, mbtif2);
148 __CAN(MBRIF2, mbrif2);
149 __CAN(MBIM2, mbim2);
150 __CAN(RFH2, rfh2);
151 __CAN(OPSS2, opss2);
152
153 __CAN(CLOCK, clock);
154 __CAN(TIMING, timing);
155 __CAN(DEBUG, debug);
156 __CAN(STATUS, status);
157 __CAN(CEC, cec);
158 __CAN(GIS, gis);
159 __CAN(GIM, gim);
160 __CAN(GIF, gif);
161 __CAN(CONTROL, control);
162 __CAN(INTR, intr);
163 __CAN(VERSION, version);
164 __CAN(MBTD, mbtd);
165 __CAN(EWR, ewr);
166 __CAN(ESR, esr);
167 /*__CAN(UCREG, ucreg); no longer exists */
168 __CAN(UCCNT, uccnt);
169 __CAN(UCRC, ucrc);
170 __CAN(UCCNF, uccnf);
171 __CAN(VERSION2, version2);
172
173 for (i = 0; i < 32; ++i) {
174 sprintf(_buf, "AM%02iL", i);
175 debugfs_create_x16(buf, S_IRUSR|S_IWUSR, am,
176 (u16 *)(base + CAN_OFF(msk[i].aml)));
177 sprintf(_buf, "AM%02iH", i);
178 debugfs_create_x16(buf, S_IRUSR|S_IWUSR, am,
179 (u16 *)(base + CAN_OFF(msk[i].amh)));
180
181 for (j = 0; j < 3; ++j) {
182 sprintf(_buf, "MB%02i_DATA%i", i, j);
183 debugfs_create_x16(buf, S_IRUSR|S_IWUSR, mb,
184 (u16 *)(base + CAN_OFF(chl[i].data[j*2])));
185 }
186 sprintf(_buf, "MB%02i_LENGTH", i);
187 debugfs_create_x16(buf, S_IRUSR|S_IWUSR, mb,
188 (u16 *)(base + CAN_OFF(chl[i].dlc)));
189 sprintf(_buf, "MB%02i_TIMESTAMP", i);
190 debugfs_create_x16(buf, S_IRUSR|S_IWUSR, mb,
191 (u16 *)(base + CAN_OFF(chl[i].tsv)));
192 sprintf(_buf, "MB%02i_ID0", i);
193 debugfs_create_x16(buf, S_IRUSR|S_IWUSR, mb,
194 (u16 *)(base + CAN_OFF(chl[i].id0)));
195 sprintf(_buf, "MB%02i_ID1", i);
196 debugfs_create_x16(buf, S_IRUSR|S_IWUSR, mb,
197 (u16 *)(base + CAN_OFF(chl[i].id1)));
198 }
199}
200#define CAN(num) bfin_debug_mmrs_can(parent, CAN##num##_MC1, num)
201
202/*
203 * DMA
204 */
205#define __DMA(uname, lname) __REGS(dma, #uname, lname)
206static void __init __maybe_unused
207bfin_debug_mmrs_dma(struct dentry *parent, unsigned long base, int num, char mdma, const char *pfx)
208{
209 char buf[32], *_buf;
210
211 if (mdma)
212 _buf = buf + sprintf(buf, "%s_%c%i_", pfx, mdma, num);
213 else
214 _buf = buf + sprintf(buf, "%s%i_", pfx, num);
215
216 __DMA(NEXT_DESC_PTR, next_desc_ptr);
217 __DMA(START_ADDR, start_addr);
218 __DMA(CONFIG, config);
219 __DMA(X_COUNT, x_count);
220 __DMA(X_MODIFY, x_modify);
221 __DMA(Y_COUNT, y_count);
222 __DMA(Y_MODIFY, y_modify);
223 __DMA(CURR_DESC_PTR, curr_desc_ptr);
224 __DMA(CURR_ADDR, curr_addr);
225 __DMA(IRQ_STATUS, irq_status);
226 __DMA(PERIPHERAL_MAP, peripheral_map);
227 __DMA(CURR_X_COUNT, curr_x_count);
228 __DMA(CURR_Y_COUNT, curr_y_count);
229}
230#define _DMA(num, base, mdma, pfx) bfin_debug_mmrs_dma(parent, base, num, mdma, pfx "DMA")
231#define DMA(num) _DMA(num, DMA##num##_NEXT_DESC_PTR, 0, "")
232#define _MDMA(num, x) \
233 do { \
234 _DMA(num, x##DMA_D##num##_NEXT_DESC_PTR, 'D', #x); \
235 _DMA(num, x##DMA_S##num##_NEXT_DESC_PTR, 'S', #x); \
236 } while (0)
237#define MDMA(num) _MDMA(num, M)
238#define IMDMA(num) _MDMA(num, IM)
239
240/*
241 * EPPI
242 */
243#define __EPPI(uname, lname) __REGS(eppi, #uname, lname)
244static void __init __maybe_unused
245bfin_debug_mmrs_eppi(struct dentry *parent, unsigned long base, int num)
246{
247 char buf[32], *_buf = REGS_STR_PFX(buf, EPPI, num);
248 __EPPI(STATUS, status);
249 __EPPI(HCOUNT, hcount);
250 __EPPI(HDELAY, hdelay);
251 __EPPI(VCOUNT, vcount);
252 __EPPI(VDELAY, vdelay);
253 __EPPI(FRAME, frame);
254 __EPPI(LINE, line);
255 __EPPI(CLKDIV, clkdiv);
256 __EPPI(CONTROL, control);
257 __EPPI(FS1W_HBL, fs1w_hbl);
258 __EPPI(FS1P_AVPL, fs1p_avpl);
259 __EPPI(FS2W_LVB, fs2w_lvb);
260 __EPPI(FS2P_LAVF, fs2p_lavf);
261 __EPPI(CLIP, clip);
262}
263#define EPPI(num) bfin_debug_mmrs_eppi(parent, EPPI##num##_STATUS, num)
264
265/*
266 * General Purpose Timers
267 */
268#define __GPTIMER(uname, lname) __REGS(gptimer, #uname, lname)
269static void __init __maybe_unused
270bfin_debug_mmrs_gptimer(struct dentry *parent, unsigned long base, int num)
271{
272 char buf[32], *_buf = REGS_STR_PFX(buf, TIMER, num);
273 __GPTIMER(CONFIG, config);
274 __GPTIMER(COUNTER, counter);
275 __GPTIMER(PERIOD, period);
276 __GPTIMER(WIDTH, width);
277}
278#define GPTIMER(num) bfin_debug_mmrs_gptimer(parent, TIMER##num##_CONFIG, num)
279
280/*
281 * Handshake MDMA
282 */
283#define __HMDMA(uname, lname) __REGS(hmdma, #uname, lname)
284static void __init __maybe_unused
285bfin_debug_mmrs_hmdma(struct dentry *parent, unsigned long base, int num)
286{
287 char buf[32], *_buf = REGS_STR_PFX(buf, HMDMA, num);
288 __HMDMA(CONTROL, control);
289 __HMDMA(ECINIT, ecinit);
290 __HMDMA(BCINIT, bcinit);
291 __HMDMA(ECURGENT, ecurgent);
292 __HMDMA(ECOVERFLOW, ecoverflow);
293 __HMDMA(ECOUNT, ecount);
294 __HMDMA(BCOUNT, bcount);
295}
296#define HMDMA(num) bfin_debug_mmrs_hmdma(parent, HMDMA##num##_CONTROL, num)
297
298/*
299 * Port/GPIO
300 */
301#define bfin_gpio_regs gpio_port_t
302#define __PORT(uname, lname) __REGS(gpio, #uname, lname)
303static void __init __maybe_unused
304bfin_debug_mmrs_port(struct dentry *parent, unsigned long base, int num)
305{
306 char buf[32], *_buf;
307#ifdef __ADSPBF54x__
308 _buf = REGS_STR_PFX_C(buf, PORT, num);
309 __PORT(FER, port_fer);
310 __PORT(SET, data_set);
311 __PORT(CLEAR, data_clear);
312 __PORT(DIR_SET, dir_set);
313 __PORT(DIR_CLEAR, dir_clear);
314 __PORT(INEN, inen);
315 __PORT(MUX, port_mux);
316#else
317 _buf = buf + sprintf(buf, "PORT%cIO_", num);
318 __PORT(CLEAR, data_clear);
319 __PORT(SET, data_set);
320 __PORT(TOGGLE, toggle);
321 __PORT(MASKA, maska);
322 __PORT(MASKA_CLEAR, maska_clear);
323 __PORT(MASKA_SET, maska_set);
324 __PORT(MASKA_TOGGLE, maska_toggle);
325 __PORT(MASKB, maskb);
326 __PORT(MASKB_CLEAR, maskb_clear);
327 __PORT(MASKB_SET, maskb_set);
328 __PORT(MASKB_TOGGLE, maskb_toggle);
329 __PORT(DIR, dir);
330 __PORT(POLAR, polar);
331 __PORT(EDGE, edge);
332 __PORT(BOTH, both);
333 __PORT(INEN, inen);
334#endif
335 _buf[-1] = '\0';
336 d(buf, 16, base + REGS_OFF(gpio, data));
337}
338#define PORT(base, num) bfin_debug_mmrs_port(parent, base, num)
339
340/*
341 * PPI
342 */
343#define __PPI(uname, lname) __REGS(ppi, #uname, lname)
344static void __init __maybe_unused
345bfin_debug_mmrs_ppi(struct dentry *parent, unsigned long base, int num)
346{
347 char buf[32], *_buf = REGS_STR_PFX(buf, PPI, num);
348 __PPI(CONTROL, control);
349 __PPI(STATUS, status);
350 __PPI(COUNT, count);
351 __PPI(DELAY, delay);
352 __PPI(FRAME, frame);
353}
354#define PPI(num) bfin_debug_mmrs_ppi(parent, PPI##num##_CONTROL, num)
355
356/*
357 * SPI
358 */
359#define __SPI(uname, lname) __REGS(spi, #uname, lname)
360static void __init __maybe_unused
361bfin_debug_mmrs_spi(struct dentry *parent, unsigned long base, int num)
362{
363 char buf[32], *_buf = REGS_STR_PFX(buf, SPI, num);
364 __SPI(CTL, ctl);
365 __SPI(FLG, flg);
366 __SPI(STAT, stat);
367 __SPI(TDBR, tdbr);
368 __SPI(RDBR, rdbr);
369 __SPI(BAUD, baud);
370 __SPI(SHADOW, shadow);
371}
372#define SPI(num) bfin_debug_mmrs_spi(parent, SPI##num##_REGBASE, num)
373
374/*
375 * SPORT
376 */
377static inline int sport_width(void *mmr)
378{
379 unsigned long lmmr = (unsigned long)mmr;
380 if ((lmmr & 0xff) == 0x10)
381 /* SPORT#_TX has 0x10 offset -> SPORT#_TCR2 has 0x04 offset */
382 lmmr -= 0xc;
383 else
384 /* SPORT#_RX has 0x18 offset -> SPORT#_RCR2 has 0x24 offset */
385 lmmr += 0xc;
386 /* extract SLEN field from control register 2 and add 1 */
387 return (bfin_read16(lmmr) & 0x1f) + 1;
388}
389static int sport_set(void *mmr, u64 val)
390{
391 unsigned long flags;
392 local_irq_save(flags);
393 if (sport_width(mmr) <= 16)
394 bfin_write16(mmr, val);
395 else
396 bfin_write32(mmr, val);
397 local_irq_restore(flags);
398 return 0;
399}
400static int sport_get(void *mmr, u64 *val)
401{
402 unsigned long flags;
403 local_irq_save(flags);
404 if (sport_width(mmr) <= 16)
405 *val = bfin_read16(mmr);
406 else
407 *val = bfin_read32(mmr);
408 local_irq_restore(flags);
409 return 0;
410}
411DEFINE_SIMPLE_ATTRIBUTE(fops_sport, sport_get, sport_set, "0x%08llx\n");
412/*DEFINE_SIMPLE_ATTRIBUTE(fops_sport_ro, sport_get, NULL, "0x%08llx\n");*/
413DEFINE_SIMPLE_ATTRIBUTE(fops_sport_wo, NULL, sport_set, "0x%08llx\n");
414#define SPORT_OFF(mmr) (SPORT0_##mmr - SPORT0_TCR1)
415#define _D_SPORT(name, perms, fops) \
416 do { \
417 strcpy(_buf, #name); \
418 debugfs_create_file(buf, perms, parent, (void *)(base + SPORT_OFF(name)), fops); \
419 } while (0)
420#define __SPORT_RW(name) _D_SPORT(name, S_IRUSR|S_IWUSR, &fops_sport)
421#define __SPORT_RO(name) _D_SPORT(name, S_IRUSR, &fops_sport_ro)
422#define __SPORT_WO(name) _D_SPORT(name, S_IWUSR, &fops_sport_wo)
423#define __SPORT(name, bits) \
424 do { \
425 strcpy(_buf, #name); \
426 debugfs_create_x##bits(buf, S_IRUSR|S_IWUSR, parent, (u##bits *)(base + SPORT_OFF(name))); \
427 } while (0)
428static void __init __maybe_unused
429bfin_debug_mmrs_sport(struct dentry *parent, unsigned long base, int num)
430{
431 char buf[32], *_buf = REGS_STR_PFX(buf, SPORT, num);
432 __SPORT(CHNL, 16);
433 __SPORT(MCMC1, 16);
434 __SPORT(MCMC2, 16);
435 __SPORT(MRCS0, 32);
436 __SPORT(MRCS1, 32);
437 __SPORT(MRCS2, 32);
438 __SPORT(MRCS3, 32);
439 __SPORT(MTCS0, 32);
440 __SPORT(MTCS1, 32);
441 __SPORT(MTCS2, 32);
442 __SPORT(MTCS3, 32);
443 __SPORT(RCLKDIV, 16);
444 __SPORT(RCR1, 16);
445 __SPORT(RCR2, 16);
446 __SPORT(RFSDIV, 16);
447 __SPORT_RW(RX);
448 __SPORT(STAT, 16);
449 __SPORT(TCLKDIV, 16);
450 __SPORT(TCR1, 16);
451 __SPORT(TCR2, 16);
452 __SPORT(TFSDIV, 16);
453 __SPORT_WO(TX);
454}
455#define SPORT(num) bfin_debug_mmrs_sport(parent, SPORT##num##_TCR1, num)
456
457/*
458 * TWI
459 */
460#define __TWI(uname, lname) __REGS(twi, #uname, lname)
461static void __init __maybe_unused
462bfin_debug_mmrs_twi(struct dentry *parent, unsigned long base, int num)
463{
464 char buf[32], *_buf = REGS_STR_PFX(buf, TWI, num);
465 __TWI(CLKDIV, clkdiv);
466 __TWI(CONTROL, control);
467 __TWI(SLAVE_CTL, slave_ctl);
468 __TWI(SLAVE_STAT, slave_stat);
469 __TWI(SLAVE_ADDR, slave_addr);
470 __TWI(MASTER_CTL, master_ctl);
471 __TWI(MASTER_STAT, master_stat);
472 __TWI(MASTER_ADDR, master_addr);
473 __TWI(INT_STAT, int_stat);
474 __TWI(INT_MASK, int_mask);
475 __TWI(FIFO_CTL, fifo_ctl);
476 __TWI(FIFO_STAT, fifo_stat);
477 __TWI(XMT_DATA8, xmt_data8);
478 __TWI(XMT_DATA16, xmt_data16);
479 __TWI(RCV_DATA8, rcv_data8);
480 __TWI(RCV_DATA16, rcv_data16);
481}
482#define TWI(num) bfin_debug_mmrs_twi(parent, TWI##num##_CLKDIV, num)
483
484/*
485 * UART
486 */
487#define __UART(uname, lname) __REGS(uart, #uname, lname)
488static void __init __maybe_unused
489bfin_debug_mmrs_uart(struct dentry *parent, unsigned long base, int num)
490{
491 char buf[32], *_buf = REGS_STR_PFX(buf, UART, num);
492#ifdef BFIN_UART_BF54X_STYLE
493 __UART(DLL, dll);
494 __UART(DLH, dlh);
495 __UART(GCTL, gctl);
496 __UART(LCR, lcr);
497 __UART(MCR, mcr);
498 __UART(LSR, lsr);
499 __UART(MSR, msr);
500 __UART(SCR, scr);
501 __UART(IER_SET, ier_set);
502 __UART(IER_CLEAR, ier_clear);
503 __UART(THR, thr);
504 __UART(RBR, rbr);
505#else
506 __UART(DLL, dll);
507 __UART(THR, thr);
508 __UART(RBR, rbr);
509 __UART(DLH, dlh);
510 __UART(IER, ier);
511 __UART(IIR, iir);
512 __UART(LCR, lcr);
513 __UART(MCR, mcr);
514 __UART(LSR, lsr);
515 __UART(MSR, msr);
516 __UART(SCR, scr);
517 __UART(GCTL, gctl);
518#endif
519}
520#define UART(num) bfin_debug_mmrs_uart(parent, UART##num##_DLL, num)
521
522/*
523 * The actual debugfs generation
524 */
525static struct dentry *debug_mmrs_dentry;
526
527static int __init bfin_debug_mmrs_init(void)
528{
529 struct dentry *top, *parent;
530
531 pr_info("debug-mmrs: setting up Blackfin MMR debugfs\n");
532
533 top = debugfs_create_dir("blackfin", NULL);
534 if (top == NULL)
535 return -1;
536
537 parent = debugfs_create_dir("core_regs", top);
538 debugfs_create_file("cclk", S_IRUSR, parent, NULL, &fops_debug_cclk);
539 debugfs_create_file("sclk", S_IRUSR, parent, NULL, &fops_debug_sclk);
540 debugfs_create_x32("last_seqstat", S_IRUSR, parent, &last_seqstat);
541 D_SYSREG(cycles);
542 D_SYSREG(cycles2);
543 D_SYSREG(emudat);
544 D_SYSREG(seqstat);
545 D_SYSREG(syscfg);
546
547 /* Core MMRs */
548 parent = debugfs_create_dir("ctimer", top);
549 D32(TCNTL);
550 D32(TCOUNT);
551 D32(TPERIOD);
552 D32(TSCALE);
553
554 parent = debugfs_create_dir("cec", top);
555 D32(EVT0);
556 D32(EVT1);
557 D32(EVT2);
558 D32(EVT3);
559 D32(EVT4);
560 D32(EVT5);
561 D32(EVT6);
562 D32(EVT7);
563 D32(EVT8);
564 D32(EVT9);
565 D32(EVT10);
566 D32(EVT11);
567 D32(EVT12);
568 D32(EVT13);
569 D32(EVT14);
570 D32(EVT15);
571 D32(EVT_OVERRIDE);
572 D32(IMASK);
573 D32(IPEND);
574 D32(ILAT);
575 D32(IPRIO);
576
577 parent = debugfs_create_dir("debug", top);
578 D32(DBGSTAT);
579 D32(DSPID);
580
581 parent = debugfs_create_dir("mmu", top);
582 D32(SRAM_BASE_ADDRESS);
583 D32(DCPLB_ADDR0);
584 D32(DCPLB_ADDR10);
585 D32(DCPLB_ADDR11);
586 D32(DCPLB_ADDR12);
587 D32(DCPLB_ADDR13);
588 D32(DCPLB_ADDR14);
589 D32(DCPLB_ADDR15);
590 D32(DCPLB_ADDR1);
591 D32(DCPLB_ADDR2);
592 D32(DCPLB_ADDR3);
593 D32(DCPLB_ADDR4);
594 D32(DCPLB_ADDR5);
595 D32(DCPLB_ADDR6);
596 D32(DCPLB_ADDR7);
597 D32(DCPLB_ADDR8);
598 D32(DCPLB_ADDR9);
599 D32(DCPLB_DATA0);
600 D32(DCPLB_DATA10);
601 D32(DCPLB_DATA11);
602 D32(DCPLB_DATA12);
603 D32(DCPLB_DATA13);
604 D32(DCPLB_DATA14);
605 D32(DCPLB_DATA15);
606 D32(DCPLB_DATA1);
607 D32(DCPLB_DATA2);
608 D32(DCPLB_DATA3);
609 D32(DCPLB_DATA4);
610 D32(DCPLB_DATA5);
611 D32(DCPLB_DATA6);
612 D32(DCPLB_DATA7);
613 D32(DCPLB_DATA8);
614 D32(DCPLB_DATA9);
615 D32(DCPLB_FAULT_ADDR);
616 D32(DCPLB_STATUS);
617 D32(DMEM_CONTROL);
618 D32(DTEST_COMMAND);
619 D32(DTEST_DATA0);
620 D32(DTEST_DATA1);
621
622 D32(ICPLB_ADDR0);
623 D32(ICPLB_ADDR1);
624 D32(ICPLB_ADDR2);
625 D32(ICPLB_ADDR3);
626 D32(ICPLB_ADDR4);
627 D32(ICPLB_ADDR5);
628 D32(ICPLB_ADDR6);
629 D32(ICPLB_ADDR7);
630 D32(ICPLB_ADDR8);
631 D32(ICPLB_ADDR9);
632 D32(ICPLB_ADDR10);
633 D32(ICPLB_ADDR11);
634 D32(ICPLB_ADDR12);
635 D32(ICPLB_ADDR13);
636 D32(ICPLB_ADDR14);
637 D32(ICPLB_ADDR15);
638 D32(ICPLB_DATA0);
639 D32(ICPLB_DATA1);
640 D32(ICPLB_DATA2);
641 D32(ICPLB_DATA3);
642 D32(ICPLB_DATA4);
643 D32(ICPLB_DATA5);
644 D32(ICPLB_DATA6);
645 D32(ICPLB_DATA7);
646 D32(ICPLB_DATA8);
647 D32(ICPLB_DATA9);
648 D32(ICPLB_DATA10);
649 D32(ICPLB_DATA11);
650 D32(ICPLB_DATA12);
651 D32(ICPLB_DATA13);
652 D32(ICPLB_DATA14);
653 D32(ICPLB_DATA15);
654 D32(ICPLB_FAULT_ADDR);
655 D32(ICPLB_STATUS);
656 D32(IMEM_CONTROL);
657 if (!ANOMALY_05000481) {
658 D32(ITEST_COMMAND);
659 D32(ITEST_DATA0);
660 D32(ITEST_DATA1);
661 }
662
663 parent = debugfs_create_dir("perf", top);
664 D32(PFCNTR0);
665 D32(PFCNTR1);
666 D32(PFCTL);
667
668 parent = debugfs_create_dir("trace", top);
669 D32(TBUF);
670 D32(TBUFCTL);
671 D32(TBUFSTAT);
672
673 parent = debugfs_create_dir("watchpoint", top);
674 D32(WPIACTL);
675 D32(WPIA0);
676 D32(WPIA1);
677 D32(WPIA2);
678 D32(WPIA3);
679 D32(WPIA4);
680 D32(WPIA5);
681 D32(WPIACNT0);
682 D32(WPIACNT1);
683 D32(WPIACNT2);
684 D32(WPIACNT3);
685 D32(WPIACNT4);
686 D32(WPIACNT5);
687 D32(WPDACTL);
688 D32(WPDA0);
689 D32(WPDA1);
690 D32(WPDACNT0);
691 D32(WPDACNT1);
692 D32(WPSTAT);
693
694 /* System MMRs */
695#ifdef ATAPI_CONTROL
696 parent = debugfs_create_dir("atapi", top);
697 D16(ATAPI_CONTROL);
698 D16(ATAPI_DEV_ADDR);
699 D16(ATAPI_DEV_RXBUF);
700 D16(ATAPI_DEV_TXBUF);
701 D16(ATAPI_DMA_TFRCNT);
702 D16(ATAPI_INT_MASK);
703 D16(ATAPI_INT_STATUS);
704 D16(ATAPI_LINE_STATUS);
705 D16(ATAPI_MULTI_TIM_0);
706 D16(ATAPI_MULTI_TIM_1);
707 D16(ATAPI_MULTI_TIM_2);
708 D16(ATAPI_PIO_TFRCNT);
709 D16(ATAPI_PIO_TIM_0);
710 D16(ATAPI_PIO_TIM_1);
711 D16(ATAPI_REG_TIM_0);
712 D16(ATAPI_SM_STATE);
713 D16(ATAPI_STATUS);
714 D16(ATAPI_TERMINATE);
715 D16(ATAPI_UDMAOUT_TFRCNT);
716 D16(ATAPI_ULTRA_TIM_0);
717 D16(ATAPI_ULTRA_TIM_1);
718 D16(ATAPI_ULTRA_TIM_2);
719 D16(ATAPI_ULTRA_TIM_3);
720 D16(ATAPI_UMAIN_TFRCNT);
721 D16(ATAPI_XFER_LEN);
722#endif
723
724#if defined(CAN_MC1) || defined(CAN0_MC1) || defined(CAN1_MC1)
725 parent = debugfs_create_dir("can", top);
726# ifdef CAN_MC1
727 bfin_debug_mmrs_can(parent, CAN_MC1, -1);
728# endif
729# ifdef CAN0_MC1
730 CAN(0);
731# endif
732# ifdef CAN1_MC1
733 CAN(1);
734# endif
735#endif
736
737#ifdef CNT_COMMAND
738 parent = debugfs_create_dir("counter", top);
739 D16(CNT_COMMAND);
740 D16(CNT_CONFIG);
741 D32(CNT_COUNTER);
742 D16(CNT_DEBOUNCE);
743 D16(CNT_IMASK);
744 D32(CNT_MAX);
745 D32(CNT_MIN);
746 D16(CNT_STATUS);
747#endif
748
749 parent = debugfs_create_dir("dmac", top);
750#ifdef DMA_TC_CNT
751 D16(DMAC_TC_CNT);
752 D16(DMAC_TC_PER);
753#endif
754#ifdef DMAC0_TC_CNT
755 D16(DMAC0_TC_CNT);
756 D16(DMAC0_TC_PER);
757#endif
758#ifdef DMAC1_TC_CNT
759 D16(DMAC1_TC_CNT);
760 D16(DMAC1_TC_PER);
761#endif
762#ifdef DMAC1_PERIMUX
763 D16(DMAC1_PERIMUX);
764#endif
765
766#ifdef __ADSPBF561__
767 /* XXX: should rewrite the MMR map */
768# define DMA0_NEXT_DESC_PTR DMA2_0_NEXT_DESC_PTR
769# define DMA1_NEXT_DESC_PTR DMA2_1_NEXT_DESC_PTR
770# define DMA2_NEXT_DESC_PTR DMA2_2_NEXT_DESC_PTR
771# define DMA3_NEXT_DESC_PTR DMA2_3_NEXT_DESC_PTR
772# define DMA4_NEXT_DESC_PTR DMA2_4_NEXT_DESC_PTR
773# define DMA5_NEXT_DESC_PTR DMA2_5_NEXT_DESC_PTR
774# define DMA6_NEXT_DESC_PTR DMA2_6_NEXT_DESC_PTR
775# define DMA7_NEXT_DESC_PTR DMA2_7_NEXT_DESC_PTR
776# define DMA8_NEXT_DESC_PTR DMA2_8_NEXT_DESC_PTR
777# define DMA9_NEXT_DESC_PTR DMA2_9_NEXT_DESC_PTR
778# define DMA10_NEXT_DESC_PTR DMA2_10_NEXT_DESC_PTR
779# define DMA11_NEXT_DESC_PTR DMA2_11_NEXT_DESC_PTR
780# define DMA12_NEXT_DESC_PTR DMA1_0_NEXT_DESC_PTR
781# define DMA13_NEXT_DESC_PTR DMA1_1_NEXT_DESC_PTR
782# define DMA14_NEXT_DESC_PTR DMA1_2_NEXT_DESC_PTR
783# define DMA15_NEXT_DESC_PTR DMA1_3_NEXT_DESC_PTR
784# define DMA16_NEXT_DESC_PTR DMA1_4_NEXT_DESC_PTR
785# define DMA17_NEXT_DESC_PTR DMA1_5_NEXT_DESC_PTR
786# define DMA18_NEXT_DESC_PTR DMA1_6_NEXT_DESC_PTR
787# define DMA19_NEXT_DESC_PTR DMA1_7_NEXT_DESC_PTR
788# define DMA20_NEXT_DESC_PTR DMA1_8_NEXT_DESC_PTR
789# define DMA21_NEXT_DESC_PTR DMA1_9_NEXT_DESC_PTR
790# define DMA22_NEXT_DESC_PTR DMA1_10_NEXT_DESC_PTR
791# define DMA23_NEXT_DESC_PTR DMA1_11_NEXT_DESC_PTR
792#endif
793 parent = debugfs_create_dir("dma", top);
794 DMA(0);
795 DMA(1);
796 DMA(1);
797 DMA(2);
798 DMA(3);
799 DMA(4);
800 DMA(5);
801 DMA(6);
802 DMA(7);
803#ifdef DMA8_NEXT_DESC_PTR
804 DMA(8);
805 DMA(9);
806 DMA(10);
807 DMA(11);
808#endif
809#ifdef DMA12_NEXT_DESC_PTR
810 DMA(12);
811 DMA(13);
812 DMA(14);
813 DMA(15);
814 DMA(16);
815 DMA(17);
816 DMA(18);
817 DMA(19);
818#endif
819#ifdef DMA20_NEXT_DESC_PTR
820 DMA(20);
821 DMA(21);
822 DMA(22);
823 DMA(23);
824#endif
825
826 parent = debugfs_create_dir("ebiu_amc", top);
827 D32(EBIU_AMBCTL0);
828 D32(EBIU_AMBCTL1);
829 D16(EBIU_AMGCTL);
830#ifdef EBIU_MBSCTL
831 D16(EBIU_MBSCTL);
832 D32(EBIU_ARBSTAT);
833 D32(EBIU_MODE);
834 D16(EBIU_FCTL);
835#endif
836
837#ifdef EBIU_SDGCTL
838 parent = debugfs_create_dir("ebiu_sdram", top);
839# ifdef __ADSPBF561__
840 D32(EBIU_SDBCTL);
841# else
842 D16(EBIU_SDBCTL);
843# endif
844 D32(EBIU_SDGCTL);
845 D16(EBIU_SDRRC);
846 D16(EBIU_SDSTAT);
847#endif
848
849#ifdef EBIU_DDRACCT
850 parent = debugfs_create_dir("ebiu_ddr", top);
851 D32(EBIU_DDRACCT);
852 D32(EBIU_DDRARCT);
853 D32(EBIU_DDRBRC0);
854 D32(EBIU_DDRBRC1);
855 D32(EBIU_DDRBRC2);
856 D32(EBIU_DDRBRC3);
857 D32(EBIU_DDRBRC4);
858 D32(EBIU_DDRBRC5);
859 D32(EBIU_DDRBRC6);
860 D32(EBIU_DDRBRC7);
861 D32(EBIU_DDRBWC0);
862 D32(EBIU_DDRBWC1);
863 D32(EBIU_DDRBWC2);
864 D32(EBIU_DDRBWC3);
865 D32(EBIU_DDRBWC4);
866 D32(EBIU_DDRBWC5);
867 D32(EBIU_DDRBWC6);
868 D32(EBIU_DDRBWC7);
869 D32(EBIU_DDRCTL0);
870 D32(EBIU_DDRCTL1);
871 D32(EBIU_DDRCTL2);
872 D32(EBIU_DDRCTL3);
873 D32(EBIU_DDRGC0);
874 D32(EBIU_DDRGC1);
875 D32(EBIU_DDRGC2);
876 D32(EBIU_DDRGC3);
877 D32(EBIU_DDRMCCL);
878 D32(EBIU_DDRMCEN);
879 D32(EBIU_DDRQUE);
880 D32(EBIU_DDRTACT);
881 D32(EBIU_ERRADD);
882 D16(EBIU_ERRMST);
883 D16(EBIU_RSTCTL);
884#endif
885
886#ifdef EMAC_ADDRHI
887 parent = debugfs_create_dir("emac", top);
888 D32(EMAC_ADDRHI);
889 D32(EMAC_ADDRLO);
890 D32(EMAC_FLC);
891 D32(EMAC_HASHHI);
892 D32(EMAC_HASHLO);
893 D32(EMAC_MMC_CTL);
894 D32(EMAC_MMC_RIRQE);
895 D32(EMAC_MMC_RIRQS);
896 D32(EMAC_MMC_TIRQE);
897 D32(EMAC_MMC_TIRQS);
898 D32(EMAC_OPMODE);
899 D32(EMAC_RXC_ALIGN);
900 D32(EMAC_RXC_ALLFRM);
901 D32(EMAC_RXC_ALLOCT);
902 D32(EMAC_RXC_BROAD);
903 D32(EMAC_RXC_DMAOVF);
904 D32(EMAC_RXC_EQ64);
905 D32(EMAC_RXC_FCS);
906 D32(EMAC_RXC_GE1024);
907 D32(EMAC_RXC_LNERRI);
908 D32(EMAC_RXC_LNERRO);
909 D32(EMAC_RXC_LONG);
910 D32(EMAC_RXC_LT1024);
911 D32(EMAC_RXC_LT128);
912 D32(EMAC_RXC_LT256);
913 D32(EMAC_RXC_LT512);
914 D32(EMAC_RXC_MACCTL);
915 D32(EMAC_RXC_MULTI);
916 D32(EMAC_RXC_OCTET);
917 D32(EMAC_RXC_OK);
918 D32(EMAC_RXC_OPCODE);
919 D32(EMAC_RXC_PAUSE);
920 D32(EMAC_RXC_SHORT);
921 D32(EMAC_RXC_TYPED);
922 D32(EMAC_RXC_UNICST);
923 D32(EMAC_RX_IRQE);
924 D32(EMAC_RX_STAT);
925 D32(EMAC_RX_STKY);
926 D32(EMAC_STAADD);
927 D32(EMAC_STADAT);
928 D32(EMAC_SYSCTL);
929 D32(EMAC_SYSTAT);
930 D32(EMAC_TXC_1COL);
931 D32(EMAC_TXC_ABORT);
932 D32(EMAC_TXC_ALLFRM);
933 D32(EMAC_TXC_ALLOCT);
934 D32(EMAC_TXC_BROAD);
935 D32(EMAC_TXC_CRSERR);
936 D32(EMAC_TXC_DEFER);
937 D32(EMAC_TXC_DMAUND);
938 D32(EMAC_TXC_EQ64);
939 D32(EMAC_TXC_GE1024);
940 D32(EMAC_TXC_GT1COL);
941 D32(EMAC_TXC_LATECL);
942 D32(EMAC_TXC_LT1024);
943 D32(EMAC_TXC_LT128);
944 D32(EMAC_TXC_LT256);
945 D32(EMAC_TXC_LT512);
946 D32(EMAC_TXC_MACCTL);
947 D32(EMAC_TXC_MULTI);
948 D32(EMAC_TXC_OCTET);
949 D32(EMAC_TXC_OK);
950 D32(EMAC_TXC_UNICST);
951 D32(EMAC_TXC_XS_COL);
952 D32(EMAC_TXC_XS_DFR);
953 D32(EMAC_TX_IRQE);
954 D32(EMAC_TX_STAT);
955 D32(EMAC_TX_STKY);
956 D32(EMAC_VLAN1);
957 D32(EMAC_VLAN2);
958 D32(EMAC_WKUP_CTL);
959 D32(EMAC_WKUP_FFCMD);
960 D32(EMAC_WKUP_FFCRC0);
961 D32(EMAC_WKUP_FFCRC1);
962 D32(EMAC_WKUP_FFMSK0);
963 D32(EMAC_WKUP_FFMSK1);
964 D32(EMAC_WKUP_FFMSK2);
965 D32(EMAC_WKUP_FFMSK3);
966 D32(EMAC_WKUP_FFOFF);
967# ifdef EMAC_PTP_ACCR
968 D32(EMAC_PTP_ACCR);
969 D32(EMAC_PTP_ADDEND);
970 D32(EMAC_PTP_ALARMHI);
971 D32(EMAC_PTP_ALARMLO);
972 D16(EMAC_PTP_CTL);
973 D32(EMAC_PTP_FOFF);
974 D32(EMAC_PTP_FV1);
975 D32(EMAC_PTP_FV2);
976 D32(EMAC_PTP_FV3);
977 D16(EMAC_PTP_ID_OFF);
978 D32(EMAC_PTP_ID_SNAP);
979 D16(EMAC_PTP_IE);
980 D16(EMAC_PTP_ISTAT);
981 D32(EMAC_PTP_OFFSET);
982 D32(EMAC_PTP_PPS_PERIOD);
983 D32(EMAC_PTP_PPS_STARTHI);
984 D32(EMAC_PTP_PPS_STARTLO);
985 D32(EMAC_PTP_RXSNAPHI);
986 D32(EMAC_PTP_RXSNAPLO);
987 D32(EMAC_PTP_TIMEHI);
988 D32(EMAC_PTP_TIMELO);
989 D32(EMAC_PTP_TXSNAPHI);
990 D32(EMAC_PTP_TXSNAPLO);
991# endif
992#endif
993
994#if defined(EPPI0_STATUS) || defined(EPPI1_STATUS) || defined(EPPI2_STATUS)
995 parent = debugfs_create_dir("eppi", top);
996# ifdef EPPI0_STATUS
997 EPPI(0);
998# endif
999# ifdef EPPI1_STATUS
1000 EPPI(1);
1001# endif
1002# ifdef EPPI2_STATUS
1003 EPPI(2);
1004# endif
1005#endif
1006
1007 parent = debugfs_create_dir("gptimer", top);
1008#ifdef TIMER_DISABLE
1009 D16(TIMER_DISABLE);
1010 D16(TIMER_ENABLE);
1011 D32(TIMER_STATUS);
1012#endif
1013#ifdef TIMER_DISABLE0
1014 D16(TIMER_DISABLE0);
1015 D16(TIMER_ENABLE0);
1016 D32(TIMER_STATUS0);
1017#endif
1018#ifdef TIMER_DISABLE1
1019 D16(TIMER_DISABLE1);
1020 D16(TIMER_ENABLE1);
1021 D32(TIMER_STATUS1);
1022#endif
1023 /* XXX: Should convert BF561 MMR names */
1024#ifdef TMRS4_DISABLE
1025 D16(TMRS4_DISABLE);
1026 D16(TMRS4_ENABLE);
1027 D32(TMRS4_STATUS);
1028 D16(TMRS8_DISABLE);
1029 D16(TMRS8_ENABLE);
1030 D32(TMRS8_STATUS);
1031#endif
1032 GPTIMER(0);
1033 GPTIMER(1);
1034 GPTIMER(2);
1035#ifdef TIMER3_CONFIG
1036 GPTIMER(3);
1037 GPTIMER(4);
1038 GPTIMER(5);
1039 GPTIMER(6);
1040 GPTIMER(7);
1041#endif
1042#ifdef TIMER8_CONFIG
1043 GPTIMER(8);
1044 GPTIMER(9);
1045 GPTIMER(10);
1046#endif
1047#ifdef TIMER11_CONFIG
1048 GPTIMER(11);
1049#endif
1050
1051#ifdef HMDMA0_CONTROL
1052 parent = debugfs_create_dir("hmdma", top);
1053 HMDMA(0);
1054 HMDMA(1);
1055#endif
1056
1057#ifdef HOST_CONTROL
1058 parent = debugfs_create_dir("hostdp", top);
1059 D16(HOST_CONTROL);
1060 D16(HOST_STATUS);
1061 D16(HOST_TIMEOUT);
1062#endif
1063
1064#ifdef IMDMA_S0_CONFIG
1065 parent = debugfs_create_dir("imdma", top);
1066 IMDMA(0);
1067 IMDMA(1);
1068#endif
1069
1070#ifdef KPAD_CTL
1071 parent = debugfs_create_dir("keypad", top);
1072 D16(KPAD_CTL);
1073 D16(KPAD_PRESCALE);
1074 D16(KPAD_MSEL);
1075 D16(KPAD_ROWCOL);
1076 D16(KPAD_STAT);
1077 D16(KPAD_SOFTEVAL);
1078#endif
1079
1080 parent = debugfs_create_dir("mdma", top);
1081 MDMA(0);
1082 MDMA(1);
1083#ifdef MDMA_D2_CONFIG
1084 MDMA(2);
1085 MDMA(3);
1086#endif
1087
1088#ifdef MXVR_CONFIG
1089 parent = debugfs_create_dir("mxvr", top);
1090 D16(MXVR_CONFIG);
1091# ifdef MXVR_PLL_CTL_0
1092 D32(MXVR_PLL_CTL_0);
1093# endif
1094 D32(MXVR_STATE_0);
1095 D32(MXVR_STATE_1);
1096 D32(MXVR_INT_STAT_0);
1097 D32(MXVR_INT_STAT_1);
1098 D32(MXVR_INT_EN_0);
1099 D32(MXVR_INT_EN_1);
1100 D16(MXVR_POSITION);
1101 D16(MXVR_MAX_POSITION);
1102 D16(MXVR_DELAY);
1103 D16(MXVR_MAX_DELAY);
1104 D32(MXVR_LADDR);
1105 D16(MXVR_GADDR);
1106 D32(MXVR_AADDR);
1107 D32(MXVR_ALLOC_0);
1108 D32(MXVR_ALLOC_1);
1109 D32(MXVR_ALLOC_2);
1110 D32(MXVR_ALLOC_3);
1111 D32(MXVR_ALLOC_4);
1112 D32(MXVR_ALLOC_5);
1113 D32(MXVR_ALLOC_6);
1114 D32(MXVR_ALLOC_7);
1115 D32(MXVR_ALLOC_8);
1116 D32(MXVR_ALLOC_9);
1117 D32(MXVR_ALLOC_10);
1118 D32(MXVR_ALLOC_11);
1119 D32(MXVR_ALLOC_12);
1120 D32(MXVR_ALLOC_13);
1121 D32(MXVR_ALLOC_14);
1122 D32(MXVR_SYNC_LCHAN_0);
1123 D32(MXVR_SYNC_LCHAN_1);
1124 D32(MXVR_SYNC_LCHAN_2);
1125 D32(MXVR_SYNC_LCHAN_3);
1126 D32(MXVR_SYNC_LCHAN_4);
1127 D32(MXVR_SYNC_LCHAN_5);
1128 D32(MXVR_SYNC_LCHAN_6);
1129 D32(MXVR_SYNC_LCHAN_7);
1130 D32(MXVR_DMA0_CONFIG);
1131 D32(MXVR_DMA0_START_ADDR);
1132 D16(MXVR_DMA0_COUNT);
1133 D32(MXVR_DMA0_CURR_ADDR);
1134 D16(MXVR_DMA0_CURR_COUNT);
1135 D32(MXVR_DMA1_CONFIG);
1136 D32(MXVR_DMA1_START_ADDR);
1137 D16(MXVR_DMA1_COUNT);
1138 D32(MXVR_DMA1_CURR_ADDR);
1139 D16(MXVR_DMA1_CURR_COUNT);
1140 D32(MXVR_DMA2_CONFIG);
1141 D32(MXVR_DMA2_START_ADDR);
1142 D16(MXVR_DMA2_COUNT);
1143 D32(MXVR_DMA2_CURR_ADDR);
1144 D16(MXVR_DMA2_CURR_COUNT);
1145 D32(MXVR_DMA3_CONFIG);
1146 D32(MXVR_DMA3_START_ADDR);
1147 D16(MXVR_DMA3_COUNT);
1148 D32(MXVR_DMA3_CURR_ADDR);
1149 D16(MXVR_DMA3_CURR_COUNT);
1150 D32(MXVR_DMA4_CONFIG);
1151 D32(MXVR_DMA4_START_ADDR);
1152 D16(MXVR_DMA4_COUNT);
1153 D32(MXVR_DMA4_CURR_ADDR);
1154 D16(MXVR_DMA4_CURR_COUNT);
1155 D32(MXVR_DMA5_CONFIG);
1156 D32(MXVR_DMA5_START_ADDR);
1157 D16(MXVR_DMA5_COUNT);
1158 D32(MXVR_DMA5_CURR_ADDR);
1159 D16(MXVR_DMA5_CURR_COUNT);
1160 D32(MXVR_DMA6_CONFIG);
1161 D32(MXVR_DMA6_START_ADDR);
1162 D16(MXVR_DMA6_COUNT);
1163 D32(MXVR_DMA6_CURR_ADDR);
1164 D16(MXVR_DMA6_CURR_COUNT);
1165 D32(MXVR_DMA7_CONFIG);
1166 D32(MXVR_DMA7_START_ADDR);
1167 D16(MXVR_DMA7_COUNT);
1168 D32(MXVR_DMA7_CURR_ADDR);
1169 D16(MXVR_DMA7_CURR_COUNT);
1170 D16(MXVR_AP_CTL);
1171 D32(MXVR_APRB_START_ADDR);
1172 D32(MXVR_APRB_CURR_ADDR);
1173 D32(MXVR_APTB_START_ADDR);
1174 D32(MXVR_APTB_CURR_ADDR);
1175 D32(MXVR_CM_CTL);
1176 D32(MXVR_CMRB_START_ADDR);
1177 D32(MXVR_CMRB_CURR_ADDR);
1178 D32(MXVR_CMTB_START_ADDR);
1179 D32(MXVR_CMTB_CURR_ADDR);
1180 D32(MXVR_RRDB_START_ADDR);
1181 D32(MXVR_RRDB_CURR_ADDR);
1182 D32(MXVR_PAT_DATA_0);
1183 D32(MXVR_PAT_EN_0);
1184 D32(MXVR_PAT_DATA_1);
1185 D32(MXVR_PAT_EN_1);
1186 D16(MXVR_FRAME_CNT_0);
1187 D16(MXVR_FRAME_CNT_1);
1188 D32(MXVR_ROUTING_0);
1189 D32(MXVR_ROUTING_1);
1190 D32(MXVR_ROUTING_2);
1191 D32(MXVR_ROUTING_3);
1192 D32(MXVR_ROUTING_4);
1193 D32(MXVR_ROUTING_5);
1194 D32(MXVR_ROUTING_6);
1195 D32(MXVR_ROUTING_7);
1196 D32(MXVR_ROUTING_8);
1197 D32(MXVR_ROUTING_9);
1198 D32(MXVR_ROUTING_10);
1199 D32(MXVR_ROUTING_11);
1200 D32(MXVR_ROUTING_12);
1201 D32(MXVR_ROUTING_13);
1202 D32(MXVR_ROUTING_14);
1203# ifdef MXVR_PLL_CTL_1
1204 D32(MXVR_PLL_CTL_1);
1205# endif
1206 D16(MXVR_BLOCK_CNT);
1207# ifdef MXVR_CLK_CTL
1208 D32(MXVR_CLK_CTL);
1209# endif
1210# ifdef MXVR_CDRPLL_CTL
1211 D32(MXVR_CDRPLL_CTL);
1212# endif
1213# ifdef MXVR_FMPLL_CTL
1214 D32(MXVR_FMPLL_CTL);
1215# endif
1216# ifdef MXVR_PIN_CTL
1217 D16(MXVR_PIN_CTL);
1218# endif
1219# ifdef MXVR_SCLK_CNT
1220 D16(MXVR_SCLK_CNT);
1221# endif
1222#endif
1223
1224#ifdef NFC_ADDR
1225 parent = debugfs_create_dir("nfc", top);
1226 D_WO(NFC_ADDR, 16);
1227 D_WO(NFC_CMD, 16);
1228 D_RO(NFC_COUNT, 16);
1229 D16(NFC_CTL);
1230 D_WO(NFC_DATA_RD, 16);
1231 D_WO(NFC_DATA_WR, 16);
1232 D_RO(NFC_ECC0, 16);
1233 D_RO(NFC_ECC1, 16);
1234 D_RO(NFC_ECC2, 16);
1235 D_RO(NFC_ECC3, 16);
1236 D16(NFC_IRQMASK);
1237 D16(NFC_IRQSTAT);
1238 D_WO(NFC_PGCTL, 16);
1239 D_RO(NFC_READ, 16);
1240 D16(NFC_RST);
1241 D_RO(NFC_STAT, 16);
1242#endif
1243
1244#ifdef OTP_CONTROL
1245 parent = debugfs_create_dir("otp", top);
1246 D16(OTP_CONTROL);
1247 D16(OTP_BEN);
1248 D16(OTP_STATUS);
1249 D32(OTP_TIMING);
1250 D32(OTP_DATA0);
1251 D32(OTP_DATA1);
1252 D32(OTP_DATA2);
1253 D32(OTP_DATA3);
1254#endif
1255
1256#ifdef PIXC_CTL
1257 parent = debugfs_create_dir("pixc", top);
1258 D16(PIXC_CTL);
1259 D16(PIXC_PPL);
1260 D16(PIXC_LPF);
1261 D16(PIXC_AHSTART);
1262 D16(PIXC_AHEND);
1263 D16(PIXC_AVSTART);
1264 D16(PIXC_AVEND);
1265 D16(PIXC_ATRANSP);
1266 D16(PIXC_BHSTART);
1267 D16(PIXC_BHEND);
1268 D16(PIXC_BVSTART);
1269 D16(PIXC_BVEND);
1270 D16(PIXC_BTRANSP);
1271 D16(PIXC_INTRSTAT);
1272 D32(PIXC_RYCON);
1273 D32(PIXC_GUCON);
1274 D32(PIXC_BVCON);
1275 D32(PIXC_CCBIAS);
1276 D32(PIXC_TC);
1277#endif
1278
1279 parent = debugfs_create_dir("pll", top);
1280 D16(PLL_CTL);
1281 D16(PLL_DIV);
1282 D16(PLL_LOCKCNT);
1283 D16(PLL_STAT);
1284 D16(VR_CTL);
1285 D32(CHIPID); /* it's part of this hardware block */
1286
1287#if defined(PPI_CONTROL) || defined(PPI0_CONTROL) || defined(PPI1_CONTROL)
1288 parent = debugfs_create_dir("ppi", top);
1289# ifdef PPI_CONTROL
1290 bfin_debug_mmrs_ppi(parent, PPI_CONTROL, -1);
1291# endif
1292# ifdef PPI0_CONTROL
1293 PPI(0);
1294# endif
1295# ifdef PPI1_CONTROL
1296 PPI(1);
1297# endif
1298#endif
1299
1300#ifdef PWM_CTRL
1301 parent = debugfs_create_dir("pwm", top);
1302 D16(PWM_CTRL);
1303 D16(PWM_STAT);
1304 D16(PWM_TM);
1305 D16(PWM_DT);
1306 D16(PWM_GATE);
1307 D16(PWM_CHA);
1308 D16(PWM_CHB);
1309 D16(PWM_CHC);
1310 D16(PWM_SEG);
1311 D16(PWM_SYNCWT);
1312 D16(PWM_CHAL);
1313 D16(PWM_CHBL);
1314 D16(PWM_CHCL);
1315 D16(PWM_LSI);
1316 D16(PWM_STAT2);
1317#endif
1318
1319#ifdef RSI_CONFIG
1320 parent = debugfs_create_dir("rsi", top);
1321 D32(RSI_ARGUMENT);
1322 D16(RSI_CEATA_CONTROL);
1323 D16(RSI_CLK_CONTROL);
1324 D16(RSI_COMMAND);
1325 D16(RSI_CONFIG);
1326 D16(RSI_DATA_CNT);
1327 D16(RSI_DATA_CONTROL);
1328 D16(RSI_DATA_LGTH);
1329 D32(RSI_DATA_TIMER);
1330 D16(RSI_EMASK);
1331 D16(RSI_ESTAT);
1332 D32(RSI_FIFO);
1333 D16(RSI_FIFO_CNT);
1334 D32(RSI_MASK0);
1335 D32(RSI_MASK1);
1336 D16(RSI_PID0);
1337 D16(RSI_PID1);
1338 D16(RSI_PID2);
1339 D16(RSI_PID3);
1340 D16(RSI_PID4);
1341 D16(RSI_PID5);
1342 D16(RSI_PID6);
1343 D16(RSI_PID7);
1344 D16(RSI_PWR_CONTROL);
1345 D16(RSI_RD_WAIT_EN);
1346 D32(RSI_RESPONSE0);
1347 D32(RSI_RESPONSE1);
1348 D32(RSI_RESPONSE2);
1349 D32(RSI_RESPONSE3);
1350 D16(RSI_RESP_CMD);
1351 D32(RSI_STATUS);
1352 D_WO(RSI_STATUSCL, 16);
1353#endif
1354
1355#ifdef RTC_ALARM
1356 parent = debugfs_create_dir("rtc", top);
1357 D32(RTC_ALARM);
1358 D16(RTC_ICTL);
1359 D16(RTC_ISTAT);
1360 D16(RTC_PREN);
1361 D32(RTC_STAT);
1362 D16(RTC_SWCNT);
1363#endif
1364
1365#ifdef SDH_CFG
1366 parent = debugfs_create_dir("sdh", top);
1367 D32(SDH_ARGUMENT);
1368 D16(SDH_CFG);
1369 D16(SDH_CLK_CTL);
1370 D16(SDH_COMMAND);
1371 D_RO(SDH_DATA_CNT, 16);
1372 D16(SDH_DATA_CTL);
1373 D16(SDH_DATA_LGTH);
1374 D32(SDH_DATA_TIMER);
1375 D16(SDH_E_MASK);
1376 D16(SDH_E_STATUS);
1377 D32(SDH_FIFO);
1378 D_RO(SDH_FIFO_CNT, 16);
1379 D32(SDH_MASK0);
1380 D32(SDH_MASK1);
1381 D_RO(SDH_PID0, 16);
1382 D_RO(SDH_PID1, 16);
1383 D_RO(SDH_PID2, 16);
1384 D_RO(SDH_PID3, 16);
1385 D_RO(SDH_PID4, 16);
1386 D_RO(SDH_PID5, 16);
1387 D_RO(SDH_PID6, 16);
1388 D_RO(SDH_PID7, 16);
1389 D16(SDH_PWR_CTL);
1390 D16(SDH_RD_WAIT_EN);
1391 D_RO(SDH_RESPONSE0, 32);
1392 D_RO(SDH_RESPONSE1, 32);
1393 D_RO(SDH_RESPONSE2, 32);
1394 D_RO(SDH_RESPONSE3, 32);
1395 D_RO(SDH_RESP_CMD, 16);
1396 D_RO(SDH_STATUS, 32);
1397 D_WO(SDH_STATUS_CLR, 16);
1398#endif
1399
1400#ifdef SECURE_CONTROL
1401 parent = debugfs_create_dir("security", top);
1402 D16(SECURE_CONTROL);
1403 D16(SECURE_STATUS);
1404 D32(SECURE_SYSSWT);
1405#endif
1406
1407 parent = debugfs_create_dir("sic", top);
1408 D16(SWRST);
1409 D16(SYSCR);
1410 D16(SIC_RVECT);
1411 D32(SIC_IAR0);
1412 D32(SIC_IAR1);
1413 D32(SIC_IAR2);
1414#ifdef SIC_IAR3
1415 D32(SIC_IAR3);
1416#endif
1417#ifdef SIC_IAR4
1418 D32(SIC_IAR4);
1419 D32(SIC_IAR5);
1420 D32(SIC_IAR6);
1421#endif
1422#ifdef SIC_IAR7
1423 D32(SIC_IAR7);
1424#endif
1425#ifdef SIC_IAR8
1426 D32(SIC_IAR8);
1427 D32(SIC_IAR9);
1428 D32(SIC_IAR10);
1429 D32(SIC_IAR11);
1430#endif
1431#ifdef SIC_IMASK
1432 D32(SIC_IMASK);
1433 D32(SIC_ISR);
1434 D32(SIC_IWR);
1435#endif
1436#ifdef SIC_IMASK0
1437 D32(SIC_IMASK0);
1438 D32(SIC_IMASK1);
1439 D32(SIC_ISR0);
1440 D32(SIC_ISR1);
1441 D32(SIC_IWR0);
1442 D32(SIC_IWR1);
1443#endif
1444#ifdef SIC_IMASK2
1445 D32(SIC_IMASK2);
1446 D32(SIC_ISR2);
1447 D32(SIC_IWR2);
1448#endif
1449#ifdef SICB_RVECT
1450 D16(SICB_SWRST);
1451 D16(SICB_SYSCR);
1452 D16(SICB_RVECT);
1453 D32(SICB_IAR0);
1454 D32(SICB_IAR1);
1455 D32(SICB_IAR2);
1456 D32(SICB_IAR3);
1457 D32(SICB_IAR4);
1458 D32(SICB_IAR5);
1459 D32(SICB_IAR6);
1460 D32(SICB_IAR7);
1461 D32(SICB_IMASK0);
1462 D32(SICB_IMASK1);
1463 D32(SICB_ISR0);
1464 D32(SICB_ISR1);
1465 D32(SICB_IWR0);
1466 D32(SICB_IWR1);
1467#endif
1468
1469 parent = debugfs_create_dir("spi", top);
1470#ifdef SPI0_REGBASE
1471 SPI(0);
1472#endif
1473#ifdef SPI1_REGBASE
1474 SPI(1);
1475#endif
1476#ifdef SPI2_REGBASE
1477 SPI(2);
1478#endif
1479
1480 parent = debugfs_create_dir("sport", top);
1481#ifdef SPORT0_STAT
1482 SPORT(0);
1483#endif
1484#ifdef SPORT1_STAT
1485 SPORT(1);
1486#endif
1487#ifdef SPORT2_STAT
1488 SPORT(2);
1489#endif
1490#ifdef SPORT3_STAT
1491 SPORT(3);
1492#endif
1493
1494#if defined(TWI_CLKDIV) || defined(TWI0_CLKDIV) || defined(TWI1_CLKDIV)
1495 parent = debugfs_create_dir("twi", top);
1496# ifdef TWI_CLKDIV
1497 bfin_debug_mmrs_twi(parent, TWI_CLKDIV, -1);
1498# endif
1499# ifdef TWI0_CLKDIV
1500 TWI(0);
1501# endif
1502# ifdef TWI1_CLKDIV
1503 TWI(1);
1504# endif
1505#endif
1506
1507 parent = debugfs_create_dir("uart", top);
1508#ifdef BFIN_UART_DLL
1509 bfin_debug_mmrs_uart(parent, BFIN_UART_DLL, -1);
1510#endif
1511#ifdef UART0_DLL
1512 UART(0);
1513#endif
1514#ifdef UART1_DLL
1515 UART(1);
1516#endif
1517#ifdef UART2_DLL
1518 UART(2);
1519#endif
1520#ifdef UART3_DLL
1521 UART(3);
1522#endif
1523
1524#ifdef USB_FADDR
1525 parent = debugfs_create_dir("usb", top);
1526 D16(USB_FADDR);
1527 D16(USB_POWER);
1528 D16(USB_INTRTX);
1529 D16(USB_INTRRX);
1530 D16(USB_INTRTXE);
1531 D16(USB_INTRRXE);
1532 D16(USB_INTRUSB);
1533 D16(USB_INTRUSBE);
1534 D16(USB_FRAME);
1535 D16(USB_INDEX);
1536 D16(USB_TESTMODE);
1537 D16(USB_GLOBINTR);
1538 D16(USB_GLOBAL_CTL);
1539 D16(USB_TX_MAX_PACKET);
1540 D16(USB_CSR0);
1541 D16(USB_TXCSR);
1542 D16(USB_RX_MAX_PACKET);
1543 D16(USB_RXCSR);
1544 D16(USB_COUNT0);
1545 D16(USB_RXCOUNT);
1546 D16(USB_TXTYPE);
1547 D16(USB_NAKLIMIT0);
1548 D16(USB_TXINTERVAL);
1549 D16(USB_RXTYPE);
1550 D16(USB_RXINTERVAL);
1551 D16(USB_TXCOUNT);
1552 D16(USB_EP0_FIFO);
1553 D16(USB_EP1_FIFO);
1554 D16(USB_EP2_FIFO);
1555 D16(USB_EP3_FIFO);
1556 D16(USB_EP4_FIFO);
1557 D16(USB_EP5_FIFO);
1558 D16(USB_EP6_FIFO);
1559 D16(USB_EP7_FIFO);
1560 D16(USB_OTG_DEV_CTL);
1561 D16(USB_OTG_VBUS_IRQ);
1562 D16(USB_OTG_VBUS_MASK);
1563 D16(USB_LINKINFO);
1564 D16(USB_VPLEN);
1565 D16(USB_HS_EOF1);
1566 D16(USB_FS_EOF1);
1567 D16(USB_LS_EOF1);
1568 D16(USB_APHY_CNTRL);
1569 D16(USB_APHY_CALIB);
1570 D16(USB_APHY_CNTRL2);
1571 D16(USB_PHY_TEST);
1572 D16(USB_PLLOSC_CTRL);
1573 D16(USB_SRP_CLKDIV);
1574 D16(USB_EP_NI0_TXMAXP);
1575 D16(USB_EP_NI0_TXCSR);
1576 D16(USB_EP_NI0_RXMAXP);
1577 D16(USB_EP_NI0_RXCSR);
1578 D16(USB_EP_NI0_RXCOUNT);
1579 D16(USB_EP_NI0_TXTYPE);
1580 D16(USB_EP_NI0_TXINTERVAL);
1581 D16(USB_EP_NI0_RXTYPE);
1582 D16(USB_EP_NI0_RXINTERVAL);
1583 D16(USB_EP_NI0_TXCOUNT);
1584 D16(USB_EP_NI1_TXMAXP);
1585 D16(USB_EP_NI1_TXCSR);
1586 D16(USB_EP_NI1_RXMAXP);
1587 D16(USB_EP_NI1_RXCSR);
1588 D16(USB_EP_NI1_RXCOUNT);
1589 D16(USB_EP_NI1_TXTYPE);
1590 D16(USB_EP_NI1_TXINTERVAL);
1591 D16(USB_EP_NI1_RXTYPE);
1592 D16(USB_EP_NI1_RXINTERVAL);
1593 D16(USB_EP_NI1_TXCOUNT);
1594 D16(USB_EP_NI2_TXMAXP);
1595 D16(USB_EP_NI2_TXCSR);
1596 D16(USB_EP_NI2_RXMAXP);
1597 D16(USB_EP_NI2_RXCSR);
1598 D16(USB_EP_NI2_RXCOUNT);
1599 D16(USB_EP_NI2_TXTYPE);
1600 D16(USB_EP_NI2_TXINTERVAL);
1601 D16(USB_EP_NI2_RXTYPE);
1602 D16(USB_EP_NI2_RXINTERVAL);
1603 D16(USB_EP_NI2_TXCOUNT);
1604 D16(USB_EP_NI3_TXMAXP);
1605 D16(USB_EP_NI3_TXCSR);
1606 D16(USB_EP_NI3_RXMAXP);
1607 D16(USB_EP_NI3_RXCSR);
1608 D16(USB_EP_NI3_RXCOUNT);
1609 D16(USB_EP_NI3_TXTYPE);
1610 D16(USB_EP_NI3_TXINTERVAL);
1611 D16(USB_EP_NI3_RXTYPE);
1612 D16(USB_EP_NI3_RXINTERVAL);
1613 D16(USB_EP_NI3_TXCOUNT);
1614 D16(USB_EP_NI4_TXMAXP);
1615 D16(USB_EP_NI4_TXCSR);
1616 D16(USB_EP_NI4_RXMAXP);
1617 D16(USB_EP_NI4_RXCSR);
1618 D16(USB_EP_NI4_RXCOUNT);
1619 D16(USB_EP_NI4_TXTYPE);
1620 D16(USB_EP_NI4_TXINTERVAL);
1621 D16(USB_EP_NI4_RXTYPE);
1622 D16(USB_EP_NI4_RXINTERVAL);
1623 D16(USB_EP_NI4_TXCOUNT);
1624 D16(USB_EP_NI5_TXMAXP);
1625 D16(USB_EP_NI5_TXCSR);
1626 D16(USB_EP_NI5_RXMAXP);
1627 D16(USB_EP_NI5_RXCSR);
1628 D16(USB_EP_NI5_RXCOUNT);
1629 D16(USB_EP_NI5_TXTYPE);
1630 D16(USB_EP_NI5_TXINTERVAL);
1631 D16(USB_EP_NI5_RXTYPE);
1632 D16(USB_EP_NI5_RXINTERVAL);
1633 D16(USB_EP_NI5_TXCOUNT);
1634 D16(USB_EP_NI6_TXMAXP);
1635 D16(USB_EP_NI6_TXCSR);
1636 D16(USB_EP_NI6_RXMAXP);
1637 D16(USB_EP_NI6_RXCSR);
1638 D16(USB_EP_NI6_RXCOUNT);
1639 D16(USB_EP_NI6_TXTYPE);
1640 D16(USB_EP_NI6_TXINTERVAL);
1641 D16(USB_EP_NI6_RXTYPE);
1642 D16(USB_EP_NI6_RXINTERVAL);
1643 D16(USB_EP_NI6_TXCOUNT);
1644 D16(USB_EP_NI7_TXMAXP);
1645 D16(USB_EP_NI7_TXCSR);
1646 D16(USB_EP_NI7_RXMAXP);
1647 D16(USB_EP_NI7_RXCSR);
1648 D16(USB_EP_NI7_RXCOUNT);
1649 D16(USB_EP_NI7_TXTYPE);
1650 D16(USB_EP_NI7_TXINTERVAL);
1651 D16(USB_EP_NI7_RXTYPE);
1652 D16(USB_EP_NI7_RXINTERVAL);
1653 D16(USB_EP_NI7_TXCOUNT);
1654 D16(USB_DMA_INTERRUPT);
1655 D16(USB_DMA0CONTROL);
1656 D16(USB_DMA0ADDRLOW);
1657 D16(USB_DMA0ADDRHIGH);
1658 D16(USB_DMA0COUNTLOW);
1659 D16(USB_DMA0COUNTHIGH);
1660 D16(USB_DMA1CONTROL);
1661 D16(USB_DMA1ADDRLOW);
1662 D16(USB_DMA1ADDRHIGH);
1663 D16(USB_DMA1COUNTLOW);
1664 D16(USB_DMA1COUNTHIGH);
1665 D16(USB_DMA2CONTROL);
1666 D16(USB_DMA2ADDRLOW);
1667 D16(USB_DMA2ADDRHIGH);
1668 D16(USB_DMA2COUNTLOW);
1669 D16(USB_DMA2COUNTHIGH);
1670 D16(USB_DMA3CONTROL);
1671 D16(USB_DMA3ADDRLOW);
1672 D16(USB_DMA3ADDRHIGH);
1673 D16(USB_DMA3COUNTLOW);
1674 D16(USB_DMA3COUNTHIGH);
1675 D16(USB_DMA4CONTROL);
1676 D16(USB_DMA4ADDRLOW);
1677 D16(USB_DMA4ADDRHIGH);
1678 D16(USB_DMA4COUNTLOW);
1679 D16(USB_DMA4COUNTHIGH);
1680 D16(USB_DMA5CONTROL);
1681 D16(USB_DMA5ADDRLOW);
1682 D16(USB_DMA5ADDRHIGH);
1683 D16(USB_DMA5COUNTLOW);
1684 D16(USB_DMA5COUNTHIGH);
1685 D16(USB_DMA6CONTROL);
1686 D16(USB_DMA6ADDRLOW);
1687 D16(USB_DMA6ADDRHIGH);
1688 D16(USB_DMA6COUNTLOW);
1689 D16(USB_DMA6COUNTHIGH);
1690 D16(USB_DMA7CONTROL);
1691 D16(USB_DMA7ADDRLOW);
1692 D16(USB_DMA7ADDRHIGH);
1693 D16(USB_DMA7COUNTLOW);
1694 D16(USB_DMA7COUNTHIGH);
1695#endif
1696
1697#ifdef WDOG_CNT
1698 parent = debugfs_create_dir("watchdog", top);
1699 D32(WDOG_CNT);
1700 D16(WDOG_CTL);
1701 D32(WDOG_STAT);
1702#endif
1703#ifdef WDOGA_CNT
1704 parent = debugfs_create_dir("watchdog", top);
1705 D32(WDOGA_CNT);
1706 D16(WDOGA_CTL);
1707 D32(WDOGA_STAT);
1708 D32(WDOGB_CNT);
1709 D16(WDOGB_CTL);
1710 D32(WDOGB_STAT);
1711#endif
1712
1713 /* BF533 glue */
1714#ifdef FIO_FLAG_D
1715#define PORTFIO FIO_FLAG_D
1716#endif
1717 /* BF561 glue */
1718#ifdef FIO0_FLAG_D
1719#define PORTFIO FIO0_FLAG_D
1720#endif
1721#ifdef FIO1_FLAG_D
1722#define PORTGIO FIO1_FLAG_D
1723#endif
1724#ifdef FIO2_FLAG_D
1725#define PORTHIO FIO2_FLAG_D
1726#endif
1727 parent = debugfs_create_dir("port", top);
1728#ifdef PORTFIO
1729 PORT(PORTFIO, 'F');
1730#endif
1731#ifdef PORTGIO
1732 PORT(PORTGIO, 'G');
1733#endif
1734#ifdef PORTHIO
1735 PORT(PORTHIO, 'H');
1736#endif
1737
1738#ifdef __ADSPBF51x__
1739 D16(PORTF_FER);
1740 D16(PORTF_DRIVE);
1741 D16(PORTF_HYSTERESIS);
1742 D16(PORTF_MUX);
1743
1744 D16(PORTG_FER);
1745 D16(PORTG_DRIVE);
1746 D16(PORTG_HYSTERESIS);
1747 D16(PORTG_MUX);
1748
1749 D16(PORTH_FER);
1750 D16(PORTH_DRIVE);
1751 D16(PORTH_HYSTERESIS);
1752 D16(PORTH_MUX);
1753
1754 D16(MISCPORT_DRIVE);
1755 D16(MISCPORT_HYSTERESIS);
1756#endif /* BF51x */
1757
1758#ifdef __ADSPBF52x__
1759 D16(PORTF_FER);
1760 D16(PORTF_DRIVE);
1761 D16(PORTF_HYSTERESIS);
1762 D16(PORTF_MUX);
1763 D16(PORTF_SLEW);
1764
1765 D16(PORTG_FER);
1766 D16(PORTG_DRIVE);
1767 D16(PORTG_HYSTERESIS);
1768 D16(PORTG_MUX);
1769 D16(PORTG_SLEW);
1770
1771 D16(PORTH_FER);
1772 D16(PORTH_DRIVE);
1773 D16(PORTH_HYSTERESIS);
1774 D16(PORTH_MUX);
1775 D16(PORTH_SLEW);
1776
1777 D16(MISCPORT_DRIVE);
1778 D16(MISCPORT_HYSTERESIS);
1779 D16(MISCPORT_SLEW);
1780#endif /* BF52x */
1781
1782#ifdef BF537_FAMILY
1783 D16(PORTF_FER);
1784 D16(PORTG_FER);
1785 D16(PORTH_FER);
1786 D16(PORT_MUX);
1787#endif /* BF534 BF536 BF537 */
1788
1789#ifdef BF538_FAMILY
1790 D16(PORTCIO_FER);
1791 D16(PORTCIO);
1792 D16(PORTCIO_CLEAR);
1793 D16(PORTCIO_SET);
1794 D16(PORTCIO_TOGGLE);
1795 D16(PORTCIO_DIR);
1796 D16(PORTCIO_INEN);
1797
1798 D16(PORTDIO);
1799 D16(PORTDIO_CLEAR);
1800 D16(PORTDIO_DIR);
1801 D16(PORTDIO_FER);
1802 D16(PORTDIO_INEN);
1803 D16(PORTDIO_SET);
1804 D16(PORTDIO_TOGGLE);
1805
1806 D16(PORTEIO);
1807 D16(PORTEIO_CLEAR);
1808 D16(PORTEIO_DIR);
1809 D16(PORTEIO_FER);
1810 D16(PORTEIO_INEN);
1811 D16(PORTEIO_SET);
1812 D16(PORTEIO_TOGGLE);
1813#endif /* BF538 BF539 */
1814
1815#ifdef __ADSPBF54x__
1816 {
1817 int num;
1818 unsigned long base;
1819 char *_buf, buf[32];
1820
1821 base = PORTA_FER;
1822 for (num = 0; num < 10; ++num) {
1823 PORT(base, num);
1824 base += sizeof(struct bfin_gpio_regs);
1825 }
1826
1827#define __PINT(uname, lname) __REGS(pint, #uname, lname)
1828 parent = debugfs_create_dir("pint", top);
1829 base = PINT0_MASK_SET;
1830 for (num = 0; num < 4; ++num) {
1831 _buf = REGS_STR_PFX(buf, PINT, num);
1832 __PINT(MASK_SET, mask_set);
1833 __PINT(MASK_CLEAR, mask_clear);
1834 __PINT(IRQ, irq);
1835 __PINT(ASSIGN, assign);
1836 __PINT(EDGE_SET, edge_set);
1837 __PINT(EDGE_CLEAR, edge_clear);
1838 __PINT(INVERT_SET, invert_set);
1839 __PINT(INVERT_CLEAR, invert_clear);
1840 __PINT(PINSTATE, pinstate);
1841 __PINT(LATCH, latch);
1842 base += sizeof(struct bfin_pint_regs);
1843 }
1844
1845 }
1846#endif /* BF54x */
1847
1848 debug_mmrs_dentry = top;
1849
1850 return 0;
1851}
1852module_init(bfin_debug_mmrs_init);
1853
1854static void __exit bfin_debug_mmrs_exit(void)
1855{
1856 debugfs_remove_recursive(debug_mmrs_dentry);
1857}
1858module_exit(bfin_debug_mmrs_exit);
1859
1860MODULE_LICENSE("GPL");
diff --git a/arch/blackfin/kernel/gptimers.c b/arch/blackfin/kernel/gptimers.c
index cdbe075de1dc..8b81dc04488a 100644
--- a/arch/blackfin/kernel/gptimers.c
+++ b/arch/blackfin/kernel/gptimers.c
@@ -268,7 +268,7 @@ void disable_gptimers(uint16_t mask)
268 _disable_gptimers(mask); 268 _disable_gptimers(mask);
269 for (i = 0; i < MAX_BLACKFIN_GPTIMERS; ++i) 269 for (i = 0; i < MAX_BLACKFIN_GPTIMERS; ++i)
270 if (mask & (1 << i)) 270 if (mask & (1 << i))
271 group_regs[BFIN_TIMER_OCTET(i)]->status |= trun_mask[i]; 271 group_regs[BFIN_TIMER_OCTET(i)]->status = trun_mask[i];
272 SSYNC(); 272 SSYNC();
273} 273}
274EXPORT_SYMBOL(disable_gptimers); 274EXPORT_SYMBOL(disable_gptimers);
diff --git a/arch/blackfin/kernel/ipipe.c b/arch/blackfin/kernel/ipipe.c
index 1a496cd71ba2..486426f8a0d7 100644
--- a/arch/blackfin/kernel/ipipe.c
+++ b/arch/blackfin/kernel/ipipe.c
@@ -33,6 +33,7 @@
33#include <linux/io.h> 33#include <linux/io.h>
34#include <asm/system.h> 34#include <asm/system.h>
35#include <asm/atomic.h> 35#include <asm/atomic.h>
36#include <asm/irq_handler.h>
36 37
37DEFINE_PER_CPU(struct pt_regs, __ipipe_tick_regs); 38DEFINE_PER_CPU(struct pt_regs, __ipipe_tick_regs);
38 39
@@ -154,7 +155,7 @@ void __ipipe_handle_irq(unsigned irq, struct pt_regs *regs)
154 * pending for it. 155 * pending for it.
155 */ 156 */
156 if (test_bit(IPIPE_AHEAD_FLAG, &this_domain->flags) && 157 if (test_bit(IPIPE_AHEAD_FLAG, &this_domain->flags) &&
157 ipipe_head_cpudom_var(irqpend_himask) == 0) 158 !__ipipe_ipending_p(ipipe_head_cpudom_ptr()))
158 goto out; 159 goto out;
159 160
160 __ipipe_walk_pipeline(head); 161 __ipipe_walk_pipeline(head);
@@ -185,25 +186,21 @@ void __ipipe_disable_irqdesc(struct ipipe_domain *ipd, unsigned irq)
185} 186}
186EXPORT_SYMBOL(__ipipe_disable_irqdesc); 187EXPORT_SYMBOL(__ipipe_disable_irqdesc);
187 188
188int __ipipe_syscall_root(struct pt_regs *regs) 189asmlinkage int __ipipe_syscall_root(struct pt_regs *regs)
189{ 190{
190 struct ipipe_percpu_domain_data *p; 191 struct ipipe_percpu_domain_data *p;
191 unsigned long flags; 192 void (*hook)(void);
192 int ret; 193 int ret;
193 194
195 WARN_ON_ONCE(irqs_disabled_hw());
196
194 /* 197 /*
195 * We need to run the IRQ tail hook whenever we don't 198 * We need to run the IRQ tail hook each time we intercept a
196 * propagate a syscall to higher domains, because we know that 199 * syscall, because we know that important operations might be
197 * important operations might be pending there (e.g. Xenomai 200 * pending there (e.g. Xenomai deferred rescheduling).
198 * deferred rescheduling).
199 */ 201 */
200 202 hook = (__typeof__(hook))__ipipe_irq_tail_hook;
201 if (regs->orig_p0 < NR_syscalls) { 203 hook();
202 void (*hook)(void) = (void (*)(void))__ipipe_irq_tail_hook;
203 hook();
204 if ((current->flags & PF_EVNOTIFY) == 0)
205 return 0;
206 }
207 204
208 /* 205 /*
209 * This routine either returns: 206 * This routine either returns:
@@ -214,51 +211,47 @@ int __ipipe_syscall_root(struct pt_regs *regs)
214 * tail work has to be performed (for handling signals etc). 211 * tail work has to be performed (for handling signals etc).
215 */ 212 */
216 213
217 if (!__ipipe_event_monitored_p(IPIPE_EVENT_SYSCALL)) 214 if (!__ipipe_syscall_watched_p(current, regs->orig_p0) ||
215 !__ipipe_event_monitored_p(IPIPE_EVENT_SYSCALL))
218 return 0; 216 return 0;
219 217
220 ret = __ipipe_dispatch_event(IPIPE_EVENT_SYSCALL, regs); 218 ret = __ipipe_dispatch_event(IPIPE_EVENT_SYSCALL, regs);
221 219
222 local_irq_save_hw(flags); 220 hard_local_irq_disable();
223 221
224 if (!__ipipe_root_domain_p) { 222 /*
225 local_irq_restore_hw(flags); 223 * This is the end of the syscall path, so we may
226 return 1; 224 * safely assume a valid Linux task stack here.
225 */
226 if (current->ipipe_flags & PF_EVTRET) {
227 current->ipipe_flags &= ~PF_EVTRET;
228 __ipipe_dispatch_event(IPIPE_EVENT_RETURN, regs);
227 } 229 }
228 230
229 p = ipipe_root_cpudom_ptr(); 231 if (!__ipipe_root_domain_p)
230 if ((p->irqpend_himask & IPIPE_IRQMASK_VIRT) != 0) 232 ret = -1;
231 __ipipe_sync_pipeline(IPIPE_IRQMASK_VIRT); 233 else {
234 p = ipipe_root_cpudom_ptr();
235 if (__ipipe_ipending_p(p))
236 __ipipe_sync_pipeline();
237 }
232 238
233 local_irq_restore_hw(flags); 239 hard_local_irq_enable();
234 240
235 return -ret; 241 return -ret;
236} 242}
237 243
238unsigned long ipipe_critical_enter(void (*syncfn) (void))
239{
240 unsigned long flags;
241
242 local_irq_save_hw(flags);
243
244 return flags;
245}
246
247void ipipe_critical_exit(unsigned long flags)
248{
249 local_irq_restore_hw(flags);
250}
251
252static void __ipipe_no_irqtail(void) 244static void __ipipe_no_irqtail(void)
253{ 245{
254} 246}
255 247
256int ipipe_get_sysinfo(struct ipipe_sysinfo *info) 248int ipipe_get_sysinfo(struct ipipe_sysinfo *info)
257{ 249{
258 info->ncpus = num_online_cpus(); 250 info->sys_nr_cpus = num_online_cpus();
259 info->cpufreq = ipipe_cpu_freq(); 251 info->sys_cpu_freq = ipipe_cpu_freq();
260 info->archdep.tmirq = IPIPE_TIMER_IRQ; 252 info->sys_hrtimer_irq = IPIPE_TIMER_IRQ;
261 info->archdep.tmfreq = info->cpufreq; 253 info->sys_hrtimer_freq = __ipipe_core_clock;
254 info->sys_hrclock_freq = __ipipe_core_clock;
262 255
263 return 0; 256 return 0;
264} 257}
@@ -279,9 +272,9 @@ int ipipe_trigger_irq(unsigned irq)
279 return -EINVAL; 272 return -EINVAL;
280#endif 273#endif
281 274
282 local_irq_save_hw(flags); 275 flags = hard_local_irq_save();
283 __ipipe_handle_irq(irq, NULL); 276 __ipipe_handle_irq(irq, NULL);
284 local_irq_restore_hw(flags); 277 hard_local_irq_restore(flags);
285 278
286 return 1; 279 return 1;
287} 280}
@@ -289,30 +282,32 @@ int ipipe_trigger_irq(unsigned irq)
289asmlinkage void __ipipe_sync_root(void) 282asmlinkage void __ipipe_sync_root(void)
290{ 283{
291 void (*irq_tail_hook)(void) = (void (*)(void))__ipipe_irq_tail_hook; 284 void (*irq_tail_hook)(void) = (void (*)(void))__ipipe_irq_tail_hook;
285 struct ipipe_percpu_domain_data *p;
292 unsigned long flags; 286 unsigned long flags;
293 287
294 BUG_ON(irqs_disabled()); 288 BUG_ON(irqs_disabled());
295 289
296 local_irq_save_hw(flags); 290 flags = hard_local_irq_save();
297 291
298 if (irq_tail_hook) 292 if (irq_tail_hook)
299 irq_tail_hook(); 293 irq_tail_hook();
300 294
301 clear_thread_flag(TIF_IRQ_SYNC); 295 clear_thread_flag(TIF_IRQ_SYNC);
302 296
303 if (ipipe_root_cpudom_var(irqpend_himask) != 0) 297 p = ipipe_root_cpudom_ptr();
304 __ipipe_sync_pipeline(IPIPE_IRQMASK_ANY); 298 if (__ipipe_ipending_p(p))
299 __ipipe_sync_pipeline();
305 300
306 local_irq_restore_hw(flags); 301 hard_local_irq_restore(flags);
307} 302}
308 303
309void ___ipipe_sync_pipeline(unsigned long syncmask) 304void ___ipipe_sync_pipeline(void)
310{ 305{
311 if (__ipipe_root_domain_p && 306 if (__ipipe_root_domain_p &&
312 test_bit(IPIPE_SYNCDEFER_FLAG, &ipipe_root_cpudom_var(status))) 307 test_bit(IPIPE_SYNCDEFER_FLAG, &ipipe_root_cpudom_var(status)))
313 return; 308 return;
314 309
315 __ipipe_sync_stage(syncmask); 310 __ipipe_sync_stage();
316} 311}
317 312
318void __ipipe_disable_root_irqs_hw(void) 313void __ipipe_disable_root_irqs_hw(void)
@@ -344,10 +339,10 @@ void __ipipe_stall_root(void)
344{ 339{
345 unsigned long *p, flags; 340 unsigned long *p, flags;
346 341
347 local_irq_save_hw(flags); 342 flags = hard_local_irq_save();
348 p = &__ipipe_root_status; 343 p = &__ipipe_root_status;
349 __set_bit(IPIPE_STALL_FLAG, p); 344 __set_bit(IPIPE_STALL_FLAG, p);
350 local_irq_restore_hw(flags); 345 hard_local_irq_restore(flags);
351} 346}
352EXPORT_SYMBOL(__ipipe_stall_root); 347EXPORT_SYMBOL(__ipipe_stall_root);
353 348
@@ -356,10 +351,10 @@ unsigned long __ipipe_test_and_stall_root(void)
356 unsigned long *p, flags; 351 unsigned long *p, flags;
357 int x; 352 int x;
358 353
359 local_irq_save_hw(flags); 354 flags = hard_local_irq_save();
360 p = &__ipipe_root_status; 355 p = &__ipipe_root_status;
361 x = __test_and_set_bit(IPIPE_STALL_FLAG, p); 356 x = __test_and_set_bit(IPIPE_STALL_FLAG, p);
362 local_irq_restore_hw(flags); 357 hard_local_irq_restore(flags);
363 358
364 return x; 359 return x;
365} 360}
@@ -371,10 +366,10 @@ unsigned long __ipipe_test_root(void)
371 unsigned long flags; 366 unsigned long flags;
372 int x; 367 int x;
373 368
374 local_irq_save_hw_smp(flags); 369 flags = hard_local_irq_save_smp();
375 p = &__ipipe_root_status; 370 p = &__ipipe_root_status;
376 x = test_bit(IPIPE_STALL_FLAG, p); 371 x = test_bit(IPIPE_STALL_FLAG, p);
377 local_irq_restore_hw_smp(flags); 372 hard_local_irq_restore_smp(flags);
378 373
379 return x; 374 return x;
380} 375}
@@ -384,10 +379,10 @@ void __ipipe_lock_root(void)
384{ 379{
385 unsigned long *p, flags; 380 unsigned long *p, flags;
386 381
387 local_irq_save_hw(flags); 382 flags = hard_local_irq_save();
388 p = &__ipipe_root_status; 383 p = &__ipipe_root_status;
389 __set_bit(IPIPE_SYNCDEFER_FLAG, p); 384 __set_bit(IPIPE_SYNCDEFER_FLAG, p);
390 local_irq_restore_hw(flags); 385 hard_local_irq_restore(flags);
391} 386}
392EXPORT_SYMBOL(__ipipe_lock_root); 387EXPORT_SYMBOL(__ipipe_lock_root);
393 388
@@ -395,9 +390,9 @@ void __ipipe_unlock_root(void)
395{ 390{
396 unsigned long *p, flags; 391 unsigned long *p, flags;
397 392
398 local_irq_save_hw(flags); 393 flags = hard_local_irq_save();
399 p = &__ipipe_root_status; 394 p = &__ipipe_root_status;
400 __clear_bit(IPIPE_SYNCDEFER_FLAG, p); 395 __clear_bit(IPIPE_SYNCDEFER_FLAG, p);
401 local_irq_restore_hw(flags); 396 hard_local_irq_restore(flags);
402} 397}
403EXPORT_SYMBOL(__ipipe_unlock_root); 398EXPORT_SYMBOL(__ipipe_unlock_root);
diff --git a/arch/blackfin/kernel/irqchip.c b/arch/blackfin/kernel/irqchip.c
index 64cff54a8a58..ff3d747154ac 100644
--- a/arch/blackfin/kernel/irqchip.c
+++ b/arch/blackfin/kernel/irqchip.c
@@ -11,6 +11,7 @@
11#include <linux/kallsyms.h> 11#include <linux/kallsyms.h>
12#include <linux/interrupt.h> 12#include <linux/interrupt.h>
13#include <linux/irq.h> 13#include <linux/irq.h>
14#include <asm/irq_handler.h>
14#include <asm/trace.h> 15#include <asm/trace.h>
15#include <asm/pda.h> 16#include <asm/pda.h>
16 17
@@ -39,21 +40,23 @@ int show_interrupts(struct seq_file *p, void *v)
39 unsigned long flags; 40 unsigned long flags;
40 41
41 if (i < NR_IRQS) { 42 if (i < NR_IRQS) {
42 raw_spin_lock_irqsave(&irq_desc[i].lock, flags); 43 struct irq_desc *desc = irq_to_desc(i);
43 action = irq_desc[i].action; 44
45 raw_spin_lock_irqsave(&desc->lock, flags);
46 action = desc->action;
44 if (!action) 47 if (!action)
45 goto skip; 48 goto skip;
46 seq_printf(p, "%3d: ", i); 49 seq_printf(p, "%3d: ", i);
47 for_each_online_cpu(j) 50 for_each_online_cpu(j)
48 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); 51 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
49 seq_printf(p, " %8s", irq_desc[i].chip->name); 52 seq_printf(p, " %8s", irq_desc_get_chip(desc)->name);
50 seq_printf(p, " %s", action->name); 53 seq_printf(p, " %s", action->name);
51 for (action = action->next; action; action = action->next) 54 for (action = action->next; action; action = action->next)
52 seq_printf(p, " %s", action->name); 55 seq_printf(p, " %s", action->name);
53 56
54 seq_putc(p, '\n'); 57 seq_putc(p, '\n');
55 skip: 58 skip:
56 raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); 59 raw_spin_unlock_irqrestore(&desc->lock, flags);
57 } else if (i == NR_IRQS) { 60 } else if (i == NR_IRQS) {
58 seq_printf(p, "NMI: "); 61 seq_printf(p, "NMI: ");
59 for_each_online_cpu(j) 62 for_each_online_cpu(j)
diff --git a/arch/blackfin/kernel/kgdb.c b/arch/blackfin/kernel/kgdb.c
index 08bc44ea6883..9b80b152435e 100644
--- a/arch/blackfin/kernel/kgdb.c
+++ b/arch/blackfin/kernel/kgdb.c
@@ -181,7 +181,7 @@ static int bfin_set_hw_break(unsigned long addr, int len, enum kgdb_bptype type)
181 return -ENOSPC; 181 return -ENOSPC;
182 } 182 }
183 183
184 /* Becasue hardware data watchpoint impelemented in current 184 /* Because hardware data watchpoint impelemented in current
185 * Blackfin can not trigger an exception event as the hardware 185 * Blackfin can not trigger an exception event as the hardware
186 * instrction watchpoint does, we ignaore all data watch point here. 186 * instrction watchpoint does, we ignaore all data watch point here.
187 * They can be turned on easily after future blackfin design 187 * They can be turned on easily after future blackfin design
@@ -320,7 +320,7 @@ static void bfin_correct_hw_break(void)
320 } 320 }
321} 321}
322 322
323void kgdb_disable_hw_debug(struct pt_regs *regs) 323static void bfin_disable_hw_debug(struct pt_regs *regs)
324{ 324{
325 /* Disable hardware debugging while we are in kgdb */ 325 /* Disable hardware debugging while we are in kgdb */
326 bfin_write_WPIACTL(0); 326 bfin_write_WPIACTL(0);
@@ -345,6 +345,23 @@ void kgdb_roundup_cpu(int cpu, unsigned long flags)
345} 345}
346#endif 346#endif
347 347
348#ifdef CONFIG_IPIPE
349static unsigned long kgdb_arch_imask;
350#endif
351
352void kgdb_post_primary_code(struct pt_regs *regs, int e_vector, int err_code)
353{
354 if (kgdb_single_step)
355 preempt_enable();
356
357#ifdef CONFIG_IPIPE
358 if (kgdb_arch_imask) {
359 cpu_pda[raw_smp_processor_id()].ex_imask = kgdb_arch_imask;
360 kgdb_arch_imask = 0;
361 }
362#endif
363}
364
348int kgdb_arch_handle_exception(int vector, int signo, 365int kgdb_arch_handle_exception(int vector, int signo,
349 int err_code, char *remcom_in_buffer, 366 int err_code, char *remcom_in_buffer,
350 char *remcom_out_buffer, 367 char *remcom_out_buffer,
@@ -388,6 +405,12 @@ int kgdb_arch_handle_exception(int vector, int signo,
388 * kgdb_single_step > 0 means in single step mode 405 * kgdb_single_step > 0 means in single step mode
389 */ 406 */
390 kgdb_single_step = i + 1; 407 kgdb_single_step = i + 1;
408
409 preempt_disable();
410#ifdef CONFIG_IPIPE
411 kgdb_arch_imask = cpu_pda[raw_smp_processor_id()].ex_imask;
412 cpu_pda[raw_smp_processor_id()].ex_imask = 0;
413#endif
391 } 414 }
392 415
393 bfin_correct_hw_break(); 416 bfin_correct_hw_break();
@@ -399,13 +422,10 @@ int kgdb_arch_handle_exception(int vector, int signo,
399 422
400struct kgdb_arch arch_kgdb_ops = { 423struct kgdb_arch arch_kgdb_ops = {
401 .gdb_bpt_instr = {0xa1}, 424 .gdb_bpt_instr = {0xa1},
402#ifdef CONFIG_SMP
403 .flags = KGDB_HW_BREAKPOINT|KGDB_THR_PROC_SWAP,
404#else
405 .flags = KGDB_HW_BREAKPOINT, 425 .flags = KGDB_HW_BREAKPOINT,
406#endif
407 .set_hw_breakpoint = bfin_set_hw_break, 426 .set_hw_breakpoint = bfin_set_hw_break,
408 .remove_hw_breakpoint = bfin_remove_hw_break, 427 .remove_hw_breakpoint = bfin_remove_hw_break,
428 .disable_hw_break = bfin_disable_hw_debug,
409 .remove_all_hw_break = bfin_remove_all_hw_break, 429 .remove_all_hw_break = bfin_remove_all_hw_break,
410 .correct_hw_break = bfin_correct_hw_break, 430 .correct_hw_break = bfin_correct_hw_break,
411}; 431};
@@ -447,6 +467,9 @@ void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip)
447int kgdb_arch_init(void) 467int kgdb_arch_init(void)
448{ 468{
449 kgdb_single_step = 0; 469 kgdb_single_step = 0;
470#ifdef CONFIG_IPIPE
471 kgdb_arch_imask = 0;
472#endif
450 473
451 bfin_remove_all_hw_break(); 474 bfin_remove_all_hw_break();
452 return 0; 475 return 0;
diff --git a/arch/blackfin/kernel/kgdb_test.c b/arch/blackfin/kernel/kgdb_test.c
index 9a4b07594389..2a6e9dbb62a5 100644
--- a/arch/blackfin/kernel/kgdb_test.c
+++ b/arch/blackfin/kernel/kgdb_test.c
@@ -88,12 +88,17 @@ static const struct file_operations kgdb_test_proc_fops = {
88 .owner = THIS_MODULE, 88 .owner = THIS_MODULE,
89 .read = kgdb_test_proc_read, 89 .read = kgdb_test_proc_read,
90 .write = kgdb_test_proc_write, 90 .write = kgdb_test_proc_write,
91 .llseek = noop_llseek,
91}; 92};
92 93
93static int __init kgdbtest_init(void) 94static int __init kgdbtest_init(void)
94{ 95{
95 struct proc_dir_entry *entry; 96 struct proc_dir_entry *entry;
96 97
98#if L2_LENGTH
99 num2 = 0;
100#endif
101
97 entry = proc_create("kgdbtest", 0, NULL, &kgdb_test_proc_fops); 102 entry = proc_create("kgdbtest", 0, NULL, &kgdb_test_proc_fops);
98 if (entry == NULL) 103 if (entry == NULL)
99 return -ENOMEM; 104 return -ENOMEM;
diff --git a/arch/blackfin/kernel/module.c b/arch/blackfin/kernel/module.c
index a6dfa6b71e63..35e350cad9d9 100644
--- a/arch/blackfin/kernel/module.c
+++ b/arch/blackfin/kernel/module.c
@@ -4,7 +4,7 @@
4 * Licensed under the GPL-2 or later 4 * Licensed under the GPL-2 or later
5 */ 5 */
6 6
7#define pr_fmt(fmt) "module %s: " fmt 7#define pr_fmt(fmt) "module %s: " fmt, mod->name
8 8
9#include <linux/moduleloader.h> 9#include <linux/moduleloader.h>
10#include <linux/elf.h> 10#include <linux/elf.h>
@@ -57,8 +57,7 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
57 dest = l1_inst_sram_alloc(s->sh_size); 57 dest = l1_inst_sram_alloc(s->sh_size);
58 mod->arch.text_l1 = dest; 58 mod->arch.text_l1 = dest;
59 if (dest == NULL) { 59 if (dest == NULL) {
60 pr_err("L1 inst memory allocation failed\n", 60 pr_err("L1 inst memory allocation failed\n");
61 mod->name);
62 return -1; 61 return -1;
63 } 62 }
64 dma_memcpy(dest, (void *)s->sh_addr, s->sh_size); 63 dma_memcpy(dest, (void *)s->sh_addr, s->sh_size);
@@ -70,8 +69,7 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
70 dest = l1_data_sram_alloc(s->sh_size); 69 dest = l1_data_sram_alloc(s->sh_size);
71 mod->arch.data_a_l1 = dest; 70 mod->arch.data_a_l1 = dest;
72 if (dest == NULL) { 71 if (dest == NULL) {
73 pr_err("L1 data memory allocation failed\n", 72 pr_err("L1 data memory allocation failed\n");
74 mod->name);
75 return -1; 73 return -1;
76 } 74 }
77 memcpy(dest, (void *)s->sh_addr, s->sh_size); 75 memcpy(dest, (void *)s->sh_addr, s->sh_size);
@@ -83,8 +81,7 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
83 dest = l1_data_sram_zalloc(s->sh_size); 81 dest = l1_data_sram_zalloc(s->sh_size);
84 mod->arch.bss_a_l1 = dest; 82 mod->arch.bss_a_l1 = dest;
85 if (dest == NULL) { 83 if (dest == NULL) {
86 pr_err("L1 data memory allocation failed\n", 84 pr_err("L1 data memory allocation failed\n");
87 mod->name);
88 return -1; 85 return -1;
89 } 86 }
90 87
@@ -93,8 +90,7 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
93 dest = l1_data_B_sram_alloc(s->sh_size); 90 dest = l1_data_B_sram_alloc(s->sh_size);
94 mod->arch.data_b_l1 = dest; 91 mod->arch.data_b_l1 = dest;
95 if (dest == NULL) { 92 if (dest == NULL) {
96 pr_err("L1 data memory allocation failed\n", 93 pr_err("L1 data memory allocation failed\n");
97 mod->name);
98 return -1; 94 return -1;
99 } 95 }
100 memcpy(dest, (void *)s->sh_addr, s->sh_size); 96 memcpy(dest, (void *)s->sh_addr, s->sh_size);
@@ -104,8 +100,7 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
104 dest = l1_data_B_sram_alloc(s->sh_size); 100 dest = l1_data_B_sram_alloc(s->sh_size);
105 mod->arch.bss_b_l1 = dest; 101 mod->arch.bss_b_l1 = dest;
106 if (dest == NULL) { 102 if (dest == NULL) {
107 pr_err("L1 data memory allocation failed\n", 103 pr_err("L1 data memory allocation failed\n");
108 mod->name);
109 return -1; 104 return -1;
110 } 105 }
111 memset(dest, 0, s->sh_size); 106 memset(dest, 0, s->sh_size);
@@ -117,8 +112,7 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
117 dest = l2_sram_alloc(s->sh_size); 112 dest = l2_sram_alloc(s->sh_size);
118 mod->arch.text_l2 = dest; 113 mod->arch.text_l2 = dest;
119 if (dest == NULL) { 114 if (dest == NULL) {
120 pr_err("L2 SRAM allocation failed\n", 115 pr_err("L2 SRAM allocation failed\n");
121 mod->name);
122 return -1; 116 return -1;
123 } 117 }
124 memcpy(dest, (void *)s->sh_addr, s->sh_size); 118 memcpy(dest, (void *)s->sh_addr, s->sh_size);
@@ -130,8 +124,7 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
130 dest = l2_sram_alloc(s->sh_size); 124 dest = l2_sram_alloc(s->sh_size);
131 mod->arch.data_l2 = dest; 125 mod->arch.data_l2 = dest;
132 if (dest == NULL) { 126 if (dest == NULL) {
133 pr_err("L2 SRAM allocation failed\n", 127 pr_err("L2 SRAM allocation failed\n");
134 mod->name);
135 return -1; 128 return -1;
136 } 129 }
137 memcpy(dest, (void *)s->sh_addr, s->sh_size); 130 memcpy(dest, (void *)s->sh_addr, s->sh_size);
@@ -143,8 +136,7 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
143 dest = l2_sram_zalloc(s->sh_size); 136 dest = l2_sram_zalloc(s->sh_size);
144 mod->arch.bss_l2 = dest; 137 mod->arch.bss_l2 = dest;
145 if (dest == NULL) { 138 if (dest == NULL) {
146 pr_err("L2 SRAM allocation failed\n", 139 pr_err("L2 SRAM allocation failed\n");
147 mod->name);
148 return -1; 140 return -1;
149 } 141 }
150 142
@@ -160,9 +152,9 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
160 152
161int 153int
162apply_relocate(Elf_Shdr * sechdrs, const char *strtab, 154apply_relocate(Elf_Shdr * sechdrs, const char *strtab,
163 unsigned int symindex, unsigned int relsec, struct module *me) 155 unsigned int symindex, unsigned int relsec, struct module *mod)
164{ 156{
165 pr_err(".rel unsupported\n", me->name); 157 pr_err(".rel unsupported\n");
166 return -ENOEXEC; 158 return -ENOEXEC;
167} 159}
168 160
@@ -186,7 +178,7 @@ apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
186 Elf32_Sym *sym; 178 Elf32_Sym *sym;
187 unsigned long location, value, size; 179 unsigned long location, value, size;
188 180
189 pr_debug("applying relocate section %u to %u\n", mod->name, 181 pr_debug("applying relocate section %u to %u\n",
190 relsec, sechdrs[relsec].sh_info); 182 relsec, sechdrs[relsec].sh_info);
191 183
192 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { 184 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
@@ -203,14 +195,14 @@ apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
203 195
204#ifdef CONFIG_SMP 196#ifdef CONFIG_SMP
205 if (location >= COREB_L1_DATA_A_START) { 197 if (location >= COREB_L1_DATA_A_START) {
206 pr_err("cannot relocate in L1: %u (SMP kernel)", 198 pr_err("cannot relocate in L1: %u (SMP kernel)\n",
207 mod->name, ELF32_R_TYPE(rel[i].r_info)); 199 ELF32_R_TYPE(rel[i].r_info));
208 return -ENOEXEC; 200 return -ENOEXEC;
209 } 201 }
210#endif 202#endif
211 203
212 pr_debug("location is %lx, value is %lx type is %d\n", 204 pr_debug("location is %lx, value is %lx type is %d\n",
213 mod->name, location, value, ELF32_R_TYPE(rel[i].r_info)); 205 location, value, ELF32_R_TYPE(rel[i].r_info));
214 206
215 switch (ELF32_R_TYPE(rel[i].r_info)) { 207 switch (ELF32_R_TYPE(rel[i].r_info)) {
216 208
@@ -230,11 +222,11 @@ apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
230 case R_BFIN_PCREL12_JUMP_S: 222 case R_BFIN_PCREL12_JUMP_S:
231 case R_BFIN_PCREL10: 223 case R_BFIN_PCREL10:
232 pr_err("unsupported relocation: %u (no -mlong-calls?)\n", 224 pr_err("unsupported relocation: %u (no -mlong-calls?)\n",
233 mod->name, ELF32_R_TYPE(rel[i].r_info)); 225 ELF32_R_TYPE(rel[i].r_info));
234 return -ENOEXEC; 226 return -ENOEXEC;
235 227
236 default: 228 default:
237 pr_err("unknown relocation: %u\n", mod->name, 229 pr_err("unknown relocation: %u\n",
238 ELF32_R_TYPE(rel[i].r_info)); 230 ELF32_R_TYPE(rel[i].r_info));
239 return -ENOEXEC; 231 return -ENOEXEC;
240 } 232 }
@@ -251,8 +243,7 @@ apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
251 isram_memcpy((void *)location, &value, size); 243 isram_memcpy((void *)location, &value, size);
252 break; 244 break;
253 default: 245 default:
254 pr_err("invalid relocation for %#lx\n", 246 pr_err("invalid relocation for %#lx\n", location);
255 mod->name, location);
256 return -ENOEXEC; 247 return -ENOEXEC;
257 } 248 }
258 } 249 }
diff --git a/arch/blackfin/kernel/nmi.c b/arch/blackfin/kernel/nmi.c
index 0b5f72f17fd0..679d0db35256 100644
--- a/arch/blackfin/kernel/nmi.c
+++ b/arch/blackfin/kernel/nmi.c
@@ -12,7 +12,7 @@
12 12
13#include <linux/bitops.h> 13#include <linux/bitops.h>
14#include <linux/hardirq.h> 14#include <linux/hardirq.h>
15#include <linux/sysdev.h> 15#include <linux/syscore_ops.h>
16#include <linux/pm.h> 16#include <linux/pm.h>
17#include <linux/nmi.h> 17#include <linux/nmi.h>
18#include <linux/smp.h> 18#include <linux/smp.h>
@@ -145,16 +145,16 @@ int check_nmi_wdt_touched(void)
145{ 145{
146 unsigned int this_cpu = smp_processor_id(); 146 unsigned int this_cpu = smp_processor_id();
147 unsigned int cpu; 147 unsigned int cpu;
148 cpumask_t mask;
148 149
149 cpumask_t mask = cpu_online_map; 150 cpumask_copy(&mask, cpu_online_mask);
150
151 if (!atomic_read(&nmi_touched[this_cpu])) 151 if (!atomic_read(&nmi_touched[this_cpu]))
152 return 0; 152 return 0;
153 153
154 atomic_set(&nmi_touched[this_cpu], 0); 154 atomic_set(&nmi_touched[this_cpu], 0);
155 155
156 cpu_clear(this_cpu, mask); 156 cpumask_clear_cpu(this_cpu, &mask);
157 for_each_cpu_mask(cpu, mask) { 157 for_each_cpu(cpu, &mask) {
158 invalidate_dcache_range((unsigned long)(&nmi_touched[cpu]), 158 invalidate_dcache_range((unsigned long)(&nmi_touched[cpu]),
159 (unsigned long)(&nmi_touched[cpu])); 159 (unsigned long)(&nmi_touched[cpu]));
160 if (!atomic_read(&nmi_touched[cpu])) 160 if (!atomic_read(&nmi_touched[cpu]))
@@ -196,43 +196,31 @@ void touch_nmi_watchdog(void)
196 196
197/* Suspend/resume support */ 197/* Suspend/resume support */
198#ifdef CONFIG_PM 198#ifdef CONFIG_PM
199static int nmi_wdt_suspend(struct sys_device *dev, pm_message_t state) 199static int nmi_wdt_suspend(void)
200{ 200{
201 nmi_wdt_stop(); 201 nmi_wdt_stop();
202 return 0; 202 return 0;
203} 203}
204 204
205static int nmi_wdt_resume(struct sys_device *dev) 205static void nmi_wdt_resume(void)
206{ 206{
207 if (nmi_active) 207 if (nmi_active)
208 nmi_wdt_start(); 208 nmi_wdt_start();
209 return 0;
210} 209}
211 210
212static struct sysdev_class nmi_sysclass = { 211static struct syscore_ops nmi_syscore_ops = {
213 .name = DRV_NAME,
214 .resume = nmi_wdt_resume, 212 .resume = nmi_wdt_resume,
215 .suspend = nmi_wdt_suspend, 213 .suspend = nmi_wdt_suspend,
216}; 214};
217 215
218static struct sys_device device_nmi_wdt = { 216static int __init init_nmi_wdt_syscore(void)
219 .id = 0,
220 .cls = &nmi_sysclass,
221};
222
223static int __init init_nmi_wdt_sysfs(void)
224{ 217{
225 int error; 218 if (nmi_active)
226 219 register_syscore_ops(&nmi_syscore_ops);
227 if (!nmi_active)
228 return 0;
229 220
230 error = sysdev_class_register(&nmi_sysclass); 221 return 0;
231 if (!error)
232 error = sysdev_register(&device_nmi_wdt);
233 return error;
234} 222}
235late_initcall(init_nmi_wdt_sysfs); 223late_initcall(init_nmi_wdt_syscore);
236 224
237#endif /* CONFIG_PM */ 225#endif /* CONFIG_PM */
238 226
diff --git a/arch/blackfin/kernel/perf_event.c b/arch/blackfin/kernel/perf_event.c
new file mode 100644
index 000000000000..04300f29c0e7
--- /dev/null
+++ b/arch/blackfin/kernel/perf_event.c
@@ -0,0 +1,498 @@
1/*
2 * Blackfin performance counters
3 *
4 * Copyright 2011 Analog Devices Inc.
5 *
6 * Ripped from SuperH version:
7 *
8 * Copyright (C) 2009 Paul Mundt
9 *
10 * Heavily based on the x86 and PowerPC implementations.
11 *
12 * x86:
13 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
14 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
15 * Copyright (C) 2009 Jaswinder Singh Rajput
16 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
17 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
18 * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
19 *
20 * ppc:
21 * Copyright 2008-2009 Paul Mackerras, IBM Corporation.
22 *
23 * Licensed under the GPL-2 or later.
24 */
25
26#include <linux/kernel.h>
27#include <linux/init.h>
28#include <linux/perf_event.h>
29#include <asm/bfin_pfmon.h>
30
31/*
32 * We have two counters, and each counter can support an event type.
33 * The 'o' is PFCNTx=1 and 's' is PFCNTx=0
34 *
35 * 0x04 o pc invariant branches
36 * 0x06 o mispredicted branches
37 * 0x09 o predicted branches taken
38 * 0x0B o EXCPT insn
39 * 0x0C o CSYNC/SSYNC insn
40 * 0x0D o Insns committed
41 * 0x0E o Interrupts taken
42 * 0x0F o Misaligned address exceptions
43 * 0x80 o Code memory fetches stalled due to DMA
44 * 0x83 o 64bit insn fetches delivered
45 * 0x9A o data cache fills (bank a)
46 * 0x9B o data cache fills (bank b)
47 * 0x9C o data cache lines evicted (bank a)
48 * 0x9D o data cache lines evicted (bank b)
49 * 0x9E o data cache high priority fills
50 * 0x9F o data cache low priority fills
51 * 0x00 s loop 0 iterations
52 * 0x01 s loop 1 iterations
53 * 0x0A s CSYNC/SSYNC stalls
54 * 0x10 s DAG read/after write hazards
55 * 0x13 s RAW data hazards
56 * 0x81 s code TAG stalls
57 * 0x82 s code fill stalls
58 * 0x90 s processor to memory stalls
59 * 0x91 s data memory stalls not hidden by 0x90
60 * 0x92 s data store buffer full stalls
61 * 0x93 s data memory write buffer full stalls due to high->low priority
62 * 0x95 s data memory fill buffer stalls
63 * 0x96 s data TAG collision stalls
64 * 0x97 s data collision stalls
65 * 0x98 s data stalls
66 * 0x99 s data stalls sent to processor
67 */
68
69static const int event_map[] = {
70 /* use CYCLES cpu register */
71 [PERF_COUNT_HW_CPU_CYCLES] = -1,
72 [PERF_COUNT_HW_INSTRUCTIONS] = 0x0D,
73 [PERF_COUNT_HW_CACHE_REFERENCES] = -1,
74 [PERF_COUNT_HW_CACHE_MISSES] = 0x83,
75 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x09,
76 [PERF_COUNT_HW_BRANCH_MISSES] = 0x06,
77 [PERF_COUNT_HW_BUS_CYCLES] = -1,
78};
79
80#define C(x) PERF_COUNT_HW_CACHE_##x
81
82static const int cache_events[PERF_COUNT_HW_CACHE_MAX]
83 [PERF_COUNT_HW_CACHE_OP_MAX]
84 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
85{
86 [C(L1D)] = { /* Data bank A */
87 [C(OP_READ)] = {
88 [C(RESULT_ACCESS)] = 0,
89 [C(RESULT_MISS) ] = 0x9A,
90 },
91 [C(OP_WRITE)] = {
92 [C(RESULT_ACCESS)] = 0,
93 [C(RESULT_MISS) ] = 0,
94 },
95 [C(OP_PREFETCH)] = {
96 [C(RESULT_ACCESS)] = 0,
97 [C(RESULT_MISS) ] = 0,
98 },
99 },
100
101 [C(L1I)] = {
102 [C(OP_READ)] = {
103 [C(RESULT_ACCESS)] = 0,
104 [C(RESULT_MISS) ] = 0x83,
105 },
106 [C(OP_WRITE)] = {
107 [C(RESULT_ACCESS)] = -1,
108 [C(RESULT_MISS) ] = -1,
109 },
110 [C(OP_PREFETCH)] = {
111 [C(RESULT_ACCESS)] = 0,
112 [C(RESULT_MISS) ] = 0,
113 },
114 },
115
116 [C(LL)] = {
117 [C(OP_READ)] = {
118 [C(RESULT_ACCESS)] = -1,
119 [C(RESULT_MISS) ] = -1,
120 },
121 [C(OP_WRITE)] = {
122 [C(RESULT_ACCESS)] = -1,
123 [C(RESULT_MISS) ] = -1,
124 },
125 [C(OP_PREFETCH)] = {
126 [C(RESULT_ACCESS)] = -1,
127 [C(RESULT_MISS) ] = -1,
128 },
129 },
130
131 [C(DTLB)] = {
132 [C(OP_READ)] = {
133 [C(RESULT_ACCESS)] = -1,
134 [C(RESULT_MISS) ] = -1,
135 },
136 [C(OP_WRITE)] = {
137 [C(RESULT_ACCESS)] = -1,
138 [C(RESULT_MISS) ] = -1,
139 },
140 [C(OP_PREFETCH)] = {
141 [C(RESULT_ACCESS)] = -1,
142 [C(RESULT_MISS) ] = -1,
143 },
144 },
145
146 [C(ITLB)] = {
147 [C(OP_READ)] = {
148 [C(RESULT_ACCESS)] = -1,
149 [C(RESULT_MISS) ] = -1,
150 },
151 [C(OP_WRITE)] = {
152 [C(RESULT_ACCESS)] = -1,
153 [C(RESULT_MISS) ] = -1,
154 },
155 [C(OP_PREFETCH)] = {
156 [C(RESULT_ACCESS)] = -1,
157 [C(RESULT_MISS) ] = -1,
158 },
159 },
160
161 [C(BPU)] = {
162 [C(OP_READ)] = {
163 [C(RESULT_ACCESS)] = -1,
164 [C(RESULT_MISS) ] = -1,
165 },
166 [C(OP_WRITE)] = {
167 [C(RESULT_ACCESS)] = -1,
168 [C(RESULT_MISS) ] = -1,
169 },
170 [C(OP_PREFETCH)] = {
171 [C(RESULT_ACCESS)] = -1,
172 [C(RESULT_MISS) ] = -1,
173 },
174 },
175};
176
177const char *perf_pmu_name(void)
178{
179 return "bfin";
180}
181EXPORT_SYMBOL(perf_pmu_name);
182
183int perf_num_counters(void)
184{
185 return ARRAY_SIZE(event_map);
186}
187EXPORT_SYMBOL(perf_num_counters);
188
189static u64 bfin_pfmon_read(int idx)
190{
191 return bfin_read32(PFCNTR0 + (idx * 4));
192}
193
194static void bfin_pfmon_disable(struct hw_perf_event *hwc, int idx)
195{
196 bfin_write_PFCTL(bfin_read_PFCTL() & ~PFCEN(idx, PFCEN_MASK));
197}
198
199static void bfin_pfmon_enable(struct hw_perf_event *hwc, int idx)
200{
201 u32 val, mask;
202
203 val = PFPWR;
204 if (idx) {
205 mask = ~(PFCNT1 | PFMON1 | PFCEN1 | PEMUSW1);
206 /* The packed config is for event0, so shift it to event1 slots */
207 val |= (hwc->config << (PFMON1_P - PFMON0_P));
208 val |= (hwc->config & PFCNT0) << (PFCNT1_P - PFCNT0_P);
209 bfin_write_PFCNTR1(0);
210 } else {
211 mask = ~(PFCNT0 | PFMON0 | PFCEN0 | PEMUSW0);
212 val |= hwc->config;
213 bfin_write_PFCNTR0(0);
214 }
215
216 bfin_write_PFCTL((bfin_read_PFCTL() & mask) | val);
217}
218
219static void bfin_pfmon_disable_all(void)
220{
221 bfin_write_PFCTL(bfin_read_PFCTL() & ~PFPWR);
222}
223
224static void bfin_pfmon_enable_all(void)
225{
226 bfin_write_PFCTL(bfin_read_PFCTL() | PFPWR);
227}
228
229struct cpu_hw_events {
230 struct perf_event *events[MAX_HWEVENTS];
231 unsigned long used_mask[BITS_TO_LONGS(MAX_HWEVENTS)];
232};
233DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
234
235static int hw_perf_cache_event(int config, int *evp)
236{
237 unsigned long type, op, result;
238 int ev;
239
240 /* unpack config */
241 type = config & 0xff;
242 op = (config >> 8) & 0xff;
243 result = (config >> 16) & 0xff;
244
245 if (type >= PERF_COUNT_HW_CACHE_MAX ||
246 op >= PERF_COUNT_HW_CACHE_OP_MAX ||
247 result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
248 return -EINVAL;
249
250 ev = cache_events[type][op][result];
251 if (ev == 0)
252 return -EOPNOTSUPP;
253 if (ev == -1)
254 return -EINVAL;
255 *evp = ev;
256 return 0;
257}
258
259static void bfin_perf_event_update(struct perf_event *event,
260 struct hw_perf_event *hwc, int idx)
261{
262 u64 prev_raw_count, new_raw_count;
263 s64 delta;
264 int shift = 0;
265
266 /*
267 * Depending on the counter configuration, they may or may not
268 * be chained, in which case the previous counter value can be
269 * updated underneath us if the lower-half overflows.
270 *
271 * Our tactic to handle this is to first atomically read and
272 * exchange a new raw count - then add that new-prev delta
273 * count to the generic counter atomically.
274 *
275 * As there is no interrupt associated with the overflow events,
276 * this is the simplest approach for maintaining consistency.
277 */
278again:
279 prev_raw_count = local64_read(&hwc->prev_count);
280 new_raw_count = bfin_pfmon_read(idx);
281
282 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
283 new_raw_count) != prev_raw_count)
284 goto again;
285
286 /*
287 * Now we have the new raw value and have updated the prev
288 * timestamp already. We can now calculate the elapsed delta
289 * (counter-)time and add that to the generic counter.
290 *
291 * Careful, not all hw sign-extends above the physical width
292 * of the count.
293 */
294 delta = (new_raw_count << shift) - (prev_raw_count << shift);
295 delta >>= shift;
296
297 local64_add(delta, &event->count);
298}
299
300static void bfin_pmu_stop(struct perf_event *event, int flags)
301{
302 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
303 struct hw_perf_event *hwc = &event->hw;
304 int idx = hwc->idx;
305
306 if (!(event->hw.state & PERF_HES_STOPPED)) {
307 bfin_pfmon_disable(hwc, idx);
308 cpuc->events[idx] = NULL;
309 event->hw.state |= PERF_HES_STOPPED;
310 }
311
312 if ((flags & PERF_EF_UPDATE) && !(event->hw.state & PERF_HES_UPTODATE)) {
313 bfin_perf_event_update(event, &event->hw, idx);
314 event->hw.state |= PERF_HES_UPTODATE;
315 }
316}
317
318static void bfin_pmu_start(struct perf_event *event, int flags)
319{
320 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
321 struct hw_perf_event *hwc = &event->hw;
322 int idx = hwc->idx;
323
324 if (WARN_ON_ONCE(idx == -1))
325 return;
326
327 if (flags & PERF_EF_RELOAD)
328 WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
329
330 cpuc->events[idx] = event;
331 event->hw.state = 0;
332 bfin_pfmon_enable(hwc, idx);
333}
334
335static void bfin_pmu_del(struct perf_event *event, int flags)
336{
337 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
338
339 bfin_pmu_stop(event, PERF_EF_UPDATE);
340 __clear_bit(event->hw.idx, cpuc->used_mask);
341
342 perf_event_update_userpage(event);
343}
344
345static int bfin_pmu_add(struct perf_event *event, int flags)
346{
347 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
348 struct hw_perf_event *hwc = &event->hw;
349 int idx = hwc->idx;
350 int ret = -EAGAIN;
351
352 perf_pmu_disable(event->pmu);
353
354 if (__test_and_set_bit(idx, cpuc->used_mask)) {
355 idx = find_first_zero_bit(cpuc->used_mask, MAX_HWEVENTS);
356 if (idx == MAX_HWEVENTS)
357 goto out;
358
359 __set_bit(idx, cpuc->used_mask);
360 hwc->idx = idx;
361 }
362
363 bfin_pfmon_disable(hwc, idx);
364
365 event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
366 if (flags & PERF_EF_START)
367 bfin_pmu_start(event, PERF_EF_RELOAD);
368
369 perf_event_update_userpage(event);
370 ret = 0;
371out:
372 perf_pmu_enable(event->pmu);
373 return ret;
374}
375
376static void bfin_pmu_read(struct perf_event *event)
377{
378 bfin_perf_event_update(event, &event->hw, event->hw.idx);
379}
380
381static int bfin_pmu_event_init(struct perf_event *event)
382{
383 struct perf_event_attr *attr = &event->attr;
384 struct hw_perf_event *hwc = &event->hw;
385 int config = -1;
386 int ret;
387
388 if (attr->exclude_hv || attr->exclude_idle)
389 return -EPERM;
390
391 /*
392 * All of the on-chip counters are "limited", in that they have
393 * no interrupts, and are therefore unable to do sampling without
394 * further work and timer assistance.
395 */
396 if (hwc->sample_period)
397 return -EINVAL;
398
399 ret = 0;
400 switch (attr->type) {
401 case PERF_TYPE_RAW:
402 config = PFMON(0, attr->config & PFMON_MASK) |
403 PFCNT(0, !(attr->config & 0x100));
404 break;
405 case PERF_TYPE_HW_CACHE:
406 ret = hw_perf_cache_event(attr->config, &config);
407 break;
408 case PERF_TYPE_HARDWARE:
409 if (attr->config >= ARRAY_SIZE(event_map))
410 return -EINVAL;
411
412 config = event_map[attr->config];
413 break;
414 }
415
416 if (config == -1)
417 return -EINVAL;
418
419 if (!attr->exclude_kernel)
420 config |= PFCEN(0, PFCEN_ENABLE_SUPV);
421 if (!attr->exclude_user)
422 config |= PFCEN(0, PFCEN_ENABLE_USER);
423
424 hwc->config |= config;
425
426 return ret;
427}
428
429static void bfin_pmu_enable(struct pmu *pmu)
430{
431 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
432 struct perf_event *event;
433 struct hw_perf_event *hwc;
434 int i;
435
436 for (i = 0; i < MAX_HWEVENTS; ++i) {
437 event = cpuc->events[i];
438 if (!event)
439 continue;
440 hwc = &event->hw;
441 bfin_pfmon_enable(hwc, hwc->idx);
442 }
443
444 bfin_pfmon_enable_all();
445}
446
447static void bfin_pmu_disable(struct pmu *pmu)
448{
449 bfin_pfmon_disable_all();
450}
451
452static struct pmu pmu = {
453 .pmu_enable = bfin_pmu_enable,
454 .pmu_disable = bfin_pmu_disable,
455 .event_init = bfin_pmu_event_init,
456 .add = bfin_pmu_add,
457 .del = bfin_pmu_del,
458 .start = bfin_pmu_start,
459 .stop = bfin_pmu_stop,
460 .read = bfin_pmu_read,
461};
462
463static void bfin_pmu_setup(int cpu)
464{
465 struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
466
467 memset(cpuhw, 0, sizeof(struct cpu_hw_events));
468}
469
470static int __cpuinit
471bfin_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
472{
473 unsigned int cpu = (long)hcpu;
474
475 switch (action & ~CPU_TASKS_FROZEN) {
476 case CPU_UP_PREPARE:
477 bfin_write_PFCTL(0);
478 bfin_pmu_setup(cpu);
479 break;
480
481 default:
482 break;
483 }
484
485 return NOTIFY_OK;
486}
487
488static int __init bfin_pmu_init(void)
489{
490 int ret;
491
492 ret = perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
493 if (!ret)
494 perf_cpu_notifier(bfin_pmu_notifier);
495
496 return ret;
497}
498early_initcall(bfin_pmu_init);
diff --git a/arch/blackfin/kernel/process.c b/arch/blackfin/kernel/process.c
index 01f98cb964d2..6a660fa921b5 100644
--- a/arch/blackfin/kernel/process.c
+++ b/arch/blackfin/kernel/process.c
@@ -7,7 +7,6 @@
7 */ 7 */
8 8
9#include <linux/module.h> 9#include <linux/module.h>
10#include <linux/smp_lock.h>
11#include <linux/unistd.h> 10#include <linux/unistd.h>
12#include <linux/user.h> 11#include <linux/user.h>
13#include <linux/uaccess.h> 12#include <linux/uaccess.h>
@@ -65,11 +64,11 @@ static void default_idle(void)
65#ifdef CONFIG_IPIPE 64#ifdef CONFIG_IPIPE
66 ipipe_suspend_domain(); 65 ipipe_suspend_domain();
67#endif 66#endif
68 local_irq_disable_hw(); 67 hard_local_irq_disable();
69 if (!need_resched()) 68 if (!need_resched())
70 idle_with_irq_disabled(); 69 idle_with_irq_disabled();
71 70
72 local_irq_enable_hw(); 71 hard_local_irq_enable();
73} 72}
74 73
75/* 74/*
@@ -172,10 +171,8 @@ asmlinkage int bfin_clone(struct pt_regs *regs)
172 unsigned long newsp; 171 unsigned long newsp;
173 172
174#ifdef __ARCH_SYNC_CORE_DCACHE 173#ifdef __ARCH_SYNC_CORE_DCACHE
175 if (current->rt.nr_cpus_allowed == num_possible_cpus()) { 174 if (current->rt.nr_cpus_allowed == num_possible_cpus())
176 current->cpus_allowed = cpumask_of_cpu(smp_processor_id()); 175 set_cpus_allowed_ptr(current, cpumask_of(smp_processor_id()));
177 current->rt.nr_cpus_allowed = 1;
178 }
179#endif 176#endif
180 177
181 /* syscall2 puts clone_flags in r0 and usp in r1 */ 178 /* syscall2 puts clone_flags in r0 and usp in r1 */
@@ -493,6 +490,11 @@ int _access_ok(unsigned long addr, unsigned long size)
493 return 1; 490 return 1;
494#endif 491#endif
495 492
493#ifndef CONFIG_EXCEPTION_L1_SCRATCH
494 if (in_mem_const(addr, size, (unsigned long)l1_stack_base, l1_stack_len))
495 return 1;
496#endif
497
496 aret = in_async(addr, size); 498 aret = in_async(addr, size);
497 if (aret < 2) 499 if (aret < 2)
498 return aret; 500 return aret;
diff --git a/arch/blackfin/kernel/ptrace.c b/arch/blackfin/kernel/ptrace.c
index 6ec77685df52..75089f80855d 100644
--- a/arch/blackfin/kernel/ptrace.c
+++ b/arch/blackfin/kernel/ptrace.c
@@ -27,6 +27,7 @@
27#include <asm/fixed_code.h> 27#include <asm/fixed_code.h>
28#include <asm/cacheflush.h> 28#include <asm/cacheflush.h>
29#include <asm/mem_map.h> 29#include <asm/mem_map.h>
30#include <asm/mmu_context.h>
30 31
31/* 32/*
32 * does not yet catch signals sent when the child dies. 33 * does not yet catch signals sent when the child dies.
@@ -37,12 +38,13 @@
37 * Get contents of register REGNO in task TASK. 38 * Get contents of register REGNO in task TASK.
38 */ 39 */
39static inline long 40static inline long
40get_reg(struct task_struct *task, long regno, unsigned long __user *datap) 41get_reg(struct task_struct *task, unsigned long regno,
42 unsigned long __user *datap)
41{ 43{
42 long tmp; 44 long tmp;
43 struct pt_regs *regs = task_pt_regs(task); 45 struct pt_regs *regs = task_pt_regs(task);
44 46
45 if (regno & 3 || regno > PT_LAST_PSEUDO || regno < 0) 47 if (regno & 3 || regno > PT_LAST_PSEUDO)
46 return -EIO; 48 return -EIO;
47 49
48 switch (regno) { 50 switch (regno) {
@@ -73,11 +75,11 @@ get_reg(struct task_struct *task, long regno, unsigned long __user *datap)
73 * Write contents of register REGNO in task TASK. 75 * Write contents of register REGNO in task TASK.
74 */ 76 */
75static inline int 77static inline int
76put_reg(struct task_struct *task, long regno, unsigned long data) 78put_reg(struct task_struct *task, unsigned long regno, unsigned long data)
77{ 79{
78 struct pt_regs *regs = task_pt_regs(task); 80 struct pt_regs *regs = task_pt_regs(task);
79 81
80 if (regno & 3 || regno > PT_LAST_PSEUDO || regno < 0) 82 if (regno & 3 || regno > PT_LAST_PSEUDO)
81 return -EIO; 83 return -EIO;
82 84
83 switch (regno) { 85 switch (regno) {
@@ -113,8 +115,8 @@ put_reg(struct task_struct *task, long regno, unsigned long data)
113/* 115/*
114 * check that an address falls within the bounds of the target process's memory mappings 116 * check that an address falls within the bounds of the target process's memory mappings
115 */ 117 */
116static inline int is_user_addr_valid(struct task_struct *child, 118int
117 unsigned long start, unsigned long len) 119is_user_addr_valid(struct task_struct *child, unsigned long start, unsigned long len)
118{ 120{
119 struct vm_area_struct *vma; 121 struct vm_area_struct *vma;
120 struct sram_list_struct *sraml; 122 struct sram_list_struct *sraml;
@@ -135,6 +137,13 @@ static inline int is_user_addr_valid(struct task_struct *child,
135 if (start >= FIXED_CODE_START && start + len < FIXED_CODE_END) 137 if (start >= FIXED_CODE_START && start + len < FIXED_CODE_END)
136 return 0; 138 return 0;
137 139
140#ifdef CONFIG_APP_STACK_L1
141 if (child->mm->context.l1_stack_save)
142 if (start >= (unsigned long)l1_stack_base &&
143 start + len < (unsigned long)l1_stack_base + l1_stack_len)
144 return 0;
145#endif
146
138 return -EIO; 147 return -EIO;
139} 148}
140 149
@@ -232,7 +241,8 @@ void user_disable_single_step(struct task_struct *child)
232 clear_tsk_thread_flag(child, TIF_SINGLESTEP); 241 clear_tsk_thread_flag(child, TIF_SINGLESTEP);
233} 242}
234 243
235long arch_ptrace(struct task_struct *child, long request, long addr, long data) 244long arch_ptrace(struct task_struct *child, long request,
245 unsigned long addr, unsigned long data)
236{ 246{
237 int ret; 247 int ret;
238 unsigned long __user *datap = (unsigned long __user *)data; 248 unsigned long __user *datap = (unsigned long __user *)data;
@@ -360,14 +370,14 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
360 return copy_regset_to_user(child, &user_bfin_native_view, 370 return copy_regset_to_user(child, &user_bfin_native_view,
361 REGSET_GENERAL, 371 REGSET_GENERAL,
362 0, sizeof(struct pt_regs), 372 0, sizeof(struct pt_regs),
363 (void __user *)data); 373 datap);
364 374
365 case PTRACE_SETREGS: 375 case PTRACE_SETREGS:
366 pr_debug("ptrace: PTRACE_SETREGS\n"); 376 pr_debug("ptrace: PTRACE_SETREGS\n");
367 return copy_regset_from_user(child, &user_bfin_native_view, 377 return copy_regset_from_user(child, &user_bfin_native_view,
368 REGSET_GENERAL, 378 REGSET_GENERAL,
369 0, sizeof(struct pt_regs), 379 0, sizeof(struct pt_regs),
370 (const void __user *)data); 380 datap);
371 381
372 case_default: 382 case_default:
373 default: 383 default:
diff --git a/arch/blackfin/kernel/reboot.c b/arch/blackfin/kernel/reboot.c
index 53d08dee8531..488bdc51aaa5 100644
--- a/arch/blackfin/kernel/reboot.c
+++ b/arch/blackfin/kernel/reboot.c
@@ -23,6 +23,9 @@
23__attribute__ ((__l1_text__, __noreturn__)) 23__attribute__ ((__l1_text__, __noreturn__))
24static void bfin_reset(void) 24static void bfin_reset(void)
25{ 25{
26 if (!ANOMALY_05000353 && !ANOMALY_05000386)
27 bfrom_SoftReset((void *)(L1_SCRATCH_START + L1_SCRATCH_LENGTH - 20));
28
26 /* Wait for completion of "system" events such as cache line 29 /* Wait for completion of "system" events such as cache line
27 * line fills so that we avoid infinite stalls later on as 30 * line fills so that we avoid infinite stalls later on as
28 * much as possible. This code is in L1, so it won't trigger 31 * much as possible. This code is in L1, so it won't trigger
@@ -30,46 +33,40 @@ static void bfin_reset(void)
30 */ 33 */
31 __builtin_bfin_ssync(); 34 __builtin_bfin_ssync();
32 35
33 /* The bootrom checks to see how it was reset and will 36 /* Initiate System software reset. */
34 * automatically perform a software reset for us when 37 bfin_write_SWRST(0x7);
35 * it starts executing after the core reset.
36 */
37 if (ANOMALY_05000353 || ANOMALY_05000386) {
38 /* Initiate System software reset. */
39 bfin_write_SWRST(0x7);
40 38
41 /* Due to the way reset is handled in the hardware, we need 39 /* Due to the way reset is handled in the hardware, we need
42 * to delay for 10 SCLKS. The only reliable way to do this is 40 * to delay for 10 SCLKS. The only reliable way to do this is
43 * to calculate the CCLK/SCLK ratio and multiply 10. For now, 41 * to calculate the CCLK/SCLK ratio and multiply 10. For now,
44 * we'll assume worse case which is a 1:15 ratio. 42 * we'll assume worse case which is a 1:15 ratio.
45 */ 43 */
46 asm( 44 asm(
47 "LSETUP (1f, 1f) LC0 = %0\n" 45 "LSETUP (1f, 1f) LC0 = %0\n"
48 "1: nop;" 46 "1: nop;"
49 : 47 :
50 : "a" (15 * 10) 48 : "a" (15 * 10)
51 : "LC0", "LB0", "LT0" 49 : "LC0", "LB0", "LT0"
52 ); 50 );
53 51
54 /* Clear System software reset */ 52 /* Clear System software reset */
55 bfin_write_SWRST(0); 53 bfin_write_SWRST(0);
56 54
57 /* The BF526 ROM will crash during reset */ 55 /* The BF526 ROM will crash during reset */
58#if defined(__ADSPBF522__) || defined(__ADSPBF524__) || defined(__ADSPBF526__) 56#if defined(__ADSPBF522__) || defined(__ADSPBF524__) || defined(__ADSPBF526__)
59 bfin_read_SWRST(); 57 bfin_read_SWRST();
60#endif 58#endif
61 59
62 /* Wait for the SWRST write to complete. Cannot rely on SSYNC 60 /* Wait for the SWRST write to complete. Cannot rely on SSYNC
63 * though as the System state is all reset now. 61 * though as the System state is all reset now.
64 */ 62 */
65 asm( 63 asm(
66 "LSETUP (1f, 1f) LC1 = %0\n" 64 "LSETUP (1f, 1f) LC1 = %0\n"
67 "1: nop;" 65 "1: nop;"
68 : 66 :
69 : "a" (15 * 1) 67 : "a" (15 * 1)
70 : "LC1", "LB1", "LT1" 68 : "LC1", "LB1", "LT1"
71 ); 69 );
72 }
73 70
74 while (1) 71 while (1)
75 /* Issue core reset */ 72 /* Issue core reset */
diff --git a/arch/blackfin/kernel/setup.c b/arch/blackfin/kernel/setup.c
index ac71dc15cbdb..536bd9d7e0cf 100644
--- a/arch/blackfin/kernel/setup.c
+++ b/arch/blackfin/kernel/setup.c
@@ -29,6 +29,7 @@
29#include <asm/cpu.h> 29#include <asm/cpu.h>
30#include <asm/fixed_code.h> 30#include <asm/fixed_code.h>
31#include <asm/early_printk.h> 31#include <asm/early_printk.h>
32#include <asm/irq_handler.h>
32 33
33u16 _bfin_swrst; 34u16 _bfin_swrst;
34EXPORT_SYMBOL(_bfin_swrst); 35EXPORT_SYMBOL(_bfin_swrst);
@@ -105,6 +106,8 @@ void __cpuinit bfin_setup_caches(unsigned int cpu)
105 bfin_dcache_init(dcplb_tbl[cpu]); 106 bfin_dcache_init(dcplb_tbl[cpu]);
106#endif 107#endif
107 108
109 bfin_setup_cpudata(cpu);
110
108 /* 111 /*
109 * In cache coherence emulation mode, we need to have the 112 * In cache coherence emulation mode, we need to have the
110 * D-cache enabled before running any atomic operation which 113 * D-cache enabled before running any atomic operation which
@@ -163,7 +166,6 @@ void __cpuinit bfin_setup_cpudata(unsigned int cpu)
163{ 166{
164 struct blackfin_cpudata *cpudata = &per_cpu(cpu_data, cpu); 167 struct blackfin_cpudata *cpudata = &per_cpu(cpu_data, cpu);
165 168
166 cpudata->idle = current;
167 cpudata->imemctl = bfin_read_IMEM_CONTROL(); 169 cpudata->imemctl = bfin_read_IMEM_CONTROL();
168 cpudata->dmemctl = bfin_read_DMEM_CONTROL(); 170 cpudata->dmemctl = bfin_read_DMEM_CONTROL();
169} 171}
@@ -215,11 +217,48 @@ void __init bfin_relocate_l1_mem(void)
215 217
216 early_dma_memcpy_done(); 218 early_dma_memcpy_done();
217 219
220#if defined(CONFIG_SMP) && defined(CONFIG_ICACHE_FLUSH_L1)
221 blackfin_iflush_l1_entry[0] = (unsigned long)blackfin_icache_flush_range_l1;
222#endif
223
218 /* if necessary, copy L2 text/data to L2 SRAM */ 224 /* if necessary, copy L2 text/data to L2 SRAM */
219 if (L2_LENGTH && l2_len) 225 if (L2_LENGTH && l2_len)
220 memcpy(_stext_l2, _l2_lma, l2_len); 226 memcpy(_stext_l2, _l2_lma, l2_len);
221} 227}
222 228
229#ifdef CONFIG_SMP
230void __init bfin_relocate_coreb_l1_mem(void)
231{
232 unsigned long text_l1_len = (unsigned long)_text_l1_len;
233 unsigned long data_l1_len = (unsigned long)_data_l1_len;
234 unsigned long data_b_l1_len = (unsigned long)_data_b_l1_len;
235
236 blackfin_dma_early_init();
237
238 /* if necessary, copy L1 text to L1 instruction SRAM */
239 if (L1_CODE_LENGTH && text_l1_len)
240 early_dma_memcpy((void *)COREB_L1_CODE_START, _text_l1_lma,
241 text_l1_len);
242
243 /* if necessary, copy L1 data to L1 data bank A SRAM */
244 if (L1_DATA_A_LENGTH && data_l1_len)
245 early_dma_memcpy((void *)COREB_L1_DATA_A_START, _data_l1_lma,
246 data_l1_len);
247
248 /* if necessary, copy L1 data B to L1 data bank B SRAM */
249 if (L1_DATA_B_LENGTH && data_b_l1_len)
250 early_dma_memcpy((void *)COREB_L1_DATA_B_START, _data_b_l1_lma,
251 data_b_l1_len);
252
253 early_dma_memcpy_done();
254
255#ifdef CONFIG_ICACHE_FLUSH_L1
256 blackfin_iflush_l1_entry[1] = (unsigned long)blackfin_icache_flush_range_l1 -
257 (unsigned long)_stext_l1 + COREB_L1_CODE_START;
258#endif
259}
260#endif
261
223#ifdef CONFIG_ROMKERNEL 262#ifdef CONFIG_ROMKERNEL
224void __init bfin_relocate_xip_data(void) 263void __init bfin_relocate_xip_data(void)
225{ 264{
@@ -814,6 +853,7 @@ void __init native_machine_early_platform_add_devices(void)
814 853
815void __init setup_arch(char **cmdline_p) 854void __init setup_arch(char **cmdline_p)
816{ 855{
856 u32 mmr;
817 unsigned long sclk, cclk; 857 unsigned long sclk, cclk;
818 858
819 native_machine_early_platform_add_devices(); 859 native_machine_early_platform_add_devices();
@@ -865,10 +905,10 @@ void __init setup_arch(char **cmdline_p)
865 bfin_write_EBIU_FCTL(CONFIG_EBIU_FCTLVAL); 905 bfin_write_EBIU_FCTL(CONFIG_EBIU_FCTLVAL);
866#endif 906#endif
867#ifdef CONFIG_BFIN_HYSTERESIS_CONTROL 907#ifdef CONFIG_BFIN_HYSTERESIS_CONTROL
868 bfin_write_PORTF_HYSTERISIS(HYST_PORTF_0_15); 908 bfin_write_PORTF_HYSTERESIS(HYST_PORTF_0_15);
869 bfin_write_PORTG_HYSTERISIS(HYST_PORTG_0_15); 909 bfin_write_PORTG_HYSTERESIS(HYST_PORTG_0_15);
870 bfin_write_PORTH_HYSTERISIS(HYST_PORTH_0_15); 910 bfin_write_PORTH_HYSTERESIS(HYST_PORTH_0_15);
871 bfin_write_MISCPORT_HYSTERISIS((bfin_read_MISCPORT_HYSTERISIS() & 911 bfin_write_MISCPORT_HYSTERESIS((bfin_read_MISCPORT_HYSTERESIS() &
872 ~HYST_NONEGPIO_MASK) | HYST_NONEGPIO); 912 ~HYST_NONEGPIO_MASK) | HYST_NONEGPIO);
873#endif 913#endif
874 914
@@ -884,17 +924,14 @@ void __init setup_arch(char **cmdline_p)
884 bfin_read_IMDMA_D1_IRQ_STATUS(); 924 bfin_read_IMDMA_D1_IRQ_STATUS();
885 } 925 }
886#endif 926#endif
887 printk(KERN_INFO "Hardware Trace ");
888 if (bfin_read_TBUFCTL() & 0x1)
889 printk(KERN_CONT "Active ");
890 else
891 printk(KERN_CONT "Off ");
892 if (bfin_read_TBUFCTL() & 0x2)
893 printk(KERN_CONT "and Enabled\n");
894 else
895 printk(KERN_CONT "and Disabled\n");
896 927
897 printk(KERN_INFO "Boot Mode: %i\n", bfin_read_SYSCR() & 0xF); 928 mmr = bfin_read_TBUFCTL();
929 printk(KERN_INFO "Hardware Trace %s and %sabled\n",
930 (mmr & 0x1) ? "active" : "off",
931 (mmr & 0x2) ? "en" : "dis");
932
933 mmr = bfin_read_SYSCR();
934 printk(KERN_INFO "Boot Mode: %i\n", mmr & 0xF);
898 935
899 /* Newer parts mirror SWRST bits in SYSCR */ 936 /* Newer parts mirror SWRST bits in SYSCR */
900#if defined(CONFIG_BF53x) || defined(CONFIG_BF561) || \ 937#if defined(CONFIG_BF53x) || defined(CONFIG_BF561) || \
@@ -902,7 +939,7 @@ void __init setup_arch(char **cmdline_p)
902 _bfin_swrst = bfin_read_SWRST(); 939 _bfin_swrst = bfin_read_SWRST();
903#else 940#else
904 /* Clear boot mode field */ 941 /* Clear boot mode field */
905 _bfin_swrst = bfin_read_SYSCR() & ~0xf; 942 _bfin_swrst = mmr & ~0xf;
906#endif 943#endif
907 944
908#ifdef CONFIG_DEBUG_DOUBLEFAULT_PRINT 945#ifdef CONFIG_DEBUG_DOUBLEFAULT_PRINT
@@ -999,8 +1036,6 @@ void __init setup_arch(char **cmdline_p)
999static int __init topology_init(void) 1036static int __init topology_init(void)
1000{ 1037{
1001 unsigned int cpu; 1038 unsigned int cpu;
1002 /* Record CPU-private information for the boot processor. */
1003 bfin_setup_cpudata(0);
1004 1039
1005 for_each_possible_cpu(cpu) { 1040 for_each_possible_cpu(cpu) {
1006 register_cpu(&per_cpu(cpu_data, cpu).cpu, cpu); 1041 register_cpu(&per_cpu(cpu_data, cpu).cpu, cpu);
@@ -1246,12 +1281,14 @@ static int show_cpuinfo(struct seq_file *m, void *v)
1246 dsup_banks, BFIN_DSUBBANKS, BFIN_DWAYS, 1281 dsup_banks, BFIN_DSUBBANKS, BFIN_DWAYS,
1247 BFIN_DLINES); 1282 BFIN_DLINES);
1248#ifdef __ARCH_SYNC_CORE_DCACHE 1283#ifdef __ARCH_SYNC_CORE_DCACHE
1249 seq_printf(m, "SMP Dcache Flushes\t: %lu\n\n", dcache_invld_count[cpu_num]); 1284 seq_printf(m, "dcache flushes\t: %lu\n", dcache_invld_count[cpu_num]);
1250#endif 1285#endif
1251#ifdef __ARCH_SYNC_CORE_ICACHE 1286#ifdef __ARCH_SYNC_CORE_ICACHE
1252 seq_printf(m, "SMP Icache Flushes\t: %lu\n\n", icache_invld_count[cpu_num]); 1287 seq_printf(m, "icache flushes\t: %lu\n", icache_invld_count[cpu_num]);
1253#endif 1288#endif
1254 1289
1290 seq_printf(m, "\n");
1291
1255 if (cpu_num != num_possible_cpus() - 1) 1292 if (cpu_num != num_possible_cpus() - 1)
1256 return 0; 1293 return 0;
1257 1294
@@ -1275,13 +1312,11 @@ static int show_cpuinfo(struct seq_file *m, void *v)
1275 " in data cache\n"); 1312 " in data cache\n");
1276 } 1313 }
1277 seq_printf(m, "board name\t: %s\n", bfin_board_name); 1314 seq_printf(m, "board name\t: %s\n", bfin_board_name);
1278 seq_printf(m, "board memory\t: %ld kB (0x%p -> 0x%p)\n", 1315 seq_printf(m, "board memory\t: %ld kB (0x%08lx -> 0x%08lx)\n",
1279 physical_mem_end >> 10, (void *)0, (void *)physical_mem_end); 1316 physical_mem_end >> 10, 0ul, physical_mem_end);
1280 seq_printf(m, "kernel memory\t: %d kB (0x%p -> 0x%p)\n", 1317 seq_printf(m, "kernel memory\t: %d kB (0x%08lx -> 0x%08lx)\n",
1281 ((int)memory_end - (int)_rambase) >> 10, 1318 ((int)memory_end - (int)_rambase) >> 10,
1282 (void *)_rambase, 1319 _rambase, memory_end);
1283 (void *)memory_end);
1284 seq_printf(m, "\n");
1285 1320
1286 return 0; 1321 return 0;
1287} 1322}
@@ -1289,7 +1324,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
1289static void *c_start(struct seq_file *m, loff_t *pos) 1324static void *c_start(struct seq_file *m, loff_t *pos)
1290{ 1325{
1291 if (*pos == 0) 1326 if (*pos == 0)
1292 *pos = first_cpu(cpu_online_map); 1327 *pos = cpumask_first(cpu_online_mask);
1293 if (*pos >= num_online_cpus()) 1328 if (*pos >= num_online_cpus())
1294 return NULL; 1329 return NULL;
1295 1330
@@ -1298,7 +1333,7 @@ static void *c_start(struct seq_file *m, loff_t *pos)
1298 1333
1299static void *c_next(struct seq_file *m, void *v, loff_t *pos) 1334static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1300{ 1335{
1301 *pos = next_cpu(*pos, cpu_online_map); 1336 *pos = cpumask_next(*pos, cpu_online_mask);
1302 1337
1303 return c_start(m, pos); 1338 return c_start(m, pos);
1304} 1339}
diff --git a/arch/blackfin/kernel/sys_bfin.c b/arch/blackfin/kernel/sys_bfin.c
index bdc1e2f0da32..89448ed7065d 100644
--- a/arch/blackfin/kernel/sys_bfin.c
+++ b/arch/blackfin/kernel/sys_bfin.c
@@ -21,6 +21,8 @@
21 21
22#include <asm/cacheflush.h> 22#include <asm/cacheflush.h>
23#include <asm/dma.h> 23#include <asm/dma.h>
24#include <asm/cachectl.h>
25#include <asm/ptrace.h>
24 26
25asmlinkage void *sys_sram_alloc(size_t size, unsigned long flags) 27asmlinkage void *sys_sram_alloc(size_t size, unsigned long flags)
26{ 28{
@@ -70,3 +72,16 @@ asmlinkage int sys_bfin_spinlock(int *p)
70 72
71 return ret; 73 return ret;
72} 74}
75
76SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, len, int, op)
77{
78 if (is_user_addr_valid(current, addr, len) != 0)
79 return -EINVAL;
80
81 if (op & DCACHE)
82 blackfin_dcache_flush_range(addr, addr + len);
83 if (op & ICACHE)
84 blackfin_icache_flush_range(addr, addr + len);
85
86 return 0;
87}
diff --git a/arch/blackfin/kernel/time-ts.c b/arch/blackfin/kernel/time-ts.c
index 8c9a43daf80f..9e9b60d969dc 100644
--- a/arch/blackfin/kernel/time-ts.c
+++ b/arch/blackfin/kernel/time-ts.c
@@ -23,29 +23,6 @@
23#include <asm/gptimers.h> 23#include <asm/gptimers.h>
24#include <asm/nmi.h> 24#include <asm/nmi.h>
25 25
26/* Accelerators for sched_clock()
27 * convert from cycles(64bits) => nanoseconds (64bits)
28 * basic equation:
29 * ns = cycles / (freq / ns_per_sec)
30 * ns = cycles * (ns_per_sec / freq)
31 * ns = cycles * (10^9 / (cpu_khz * 10^3))
32 * ns = cycles * (10^6 / cpu_khz)
33 *
34 * Then we use scaling math (suggested by george@mvista.com) to get:
35 * ns = cycles * (10^6 * SC / cpu_khz) / SC
36 * ns = cycles * cyc2ns_scale / SC
37 *
38 * And since SC is a constant power of two, we can convert the div
39 * into a shift.
40 *
41 * We can use khz divisor instead of mhz to keep a better precision, since
42 * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits.
43 * (mathieu.desnoyers@polymtl.ca)
44 *
45 * -johnstul@us.ibm.com "math is hard, lets go shopping!"
46 */
47
48#define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */
49 26
50#if defined(CONFIG_CYCLES_CLOCKSOURCE) 27#if defined(CONFIG_CYCLES_CLOCKSOURCE)
51 28
@@ -63,7 +40,6 @@ static struct clocksource bfin_cs_cycles = {
63 .rating = 400, 40 .rating = 400,
64 .read = bfin_read_cycles, 41 .read = bfin_read_cycles,
65 .mask = CLOCKSOURCE_MASK(64), 42 .mask = CLOCKSOURCE_MASK(64),
66 .shift = CYC2NS_SCALE_FACTOR,
67 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 43 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
68}; 44};
69 45
@@ -75,10 +51,7 @@ static inline unsigned long long bfin_cs_cycles_sched_clock(void)
75 51
76static int __init bfin_cs_cycles_init(void) 52static int __init bfin_cs_cycles_init(void)
77{ 53{
78 bfin_cs_cycles.mult = \ 54 if (clocksource_register_hz(&bfin_cs_cycles, get_cclk()))
79 clocksource_hz2mult(get_cclk(), bfin_cs_cycles.shift);
80
81 if (clocksource_register(&bfin_cs_cycles))
82 panic("failed to register clocksource"); 55 panic("failed to register clocksource");
83 56
84 return 0; 57 return 0;
@@ -111,7 +84,6 @@ static struct clocksource bfin_cs_gptimer0 = {
111 .rating = 350, 84 .rating = 350,
112 .read = bfin_read_gptimer0, 85 .read = bfin_read_gptimer0,
113 .mask = CLOCKSOURCE_MASK(32), 86 .mask = CLOCKSOURCE_MASK(32),
114 .shift = CYC2NS_SCALE_FACTOR,
115 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 87 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
116}; 88};
117 89
@@ -125,10 +97,7 @@ static int __init bfin_cs_gptimer0_init(void)
125{ 97{
126 setup_gptimer0(); 98 setup_gptimer0();
127 99
128 bfin_cs_gptimer0.mult = \ 100 if (clocksource_register_hz(&bfin_cs_gptimer0, get_sclk()))
129 clocksource_hz2mult(get_sclk(), bfin_cs_gptimer0.shift);
130
131 if (clocksource_register(&bfin_cs_gptimer0))
132 panic("failed to register clocksource"); 101 panic("failed to register clocksource");
133 102
134 return 0; 103 return 0;
@@ -206,8 +175,14 @@ irqreturn_t bfin_gptmr0_interrupt(int irq, void *dev_id)
206{ 175{
207 struct clock_event_device *evt = dev_id; 176 struct clock_event_device *evt = dev_id;
208 smp_mb(); 177 smp_mb();
209 evt->event_handler(evt); 178 /*
179 * We want to ACK before we handle so that we can handle smaller timer
180 * intervals. This way if the timer expires again while we're handling
181 * things, we're more likely to see that 2nd int rather than swallowing
182 * it by ACKing the int at the end of this handler.
183 */
210 bfin_gptmr0_ack(); 184 bfin_gptmr0_ack();
185 evt->event_handler(evt);
211 return IRQ_HANDLED; 186 return IRQ_HANDLED;
212} 187}
213 188
diff --git a/arch/blackfin/kernel/time.c b/arch/blackfin/kernel/time.c
index c9113619029f..8d73724c0092 100644
--- a/arch/blackfin/kernel/time.c
+++ b/arch/blackfin/kernel/time.c
@@ -114,16 +114,14 @@ u32 arch_gettimeoffset(void)
114 114
115/* 115/*
116 * timer_interrupt() needs to keep up the real-time clock, 116 * timer_interrupt() needs to keep up the real-time clock,
117 * as well as call the "do_timer()" routine every clocktick 117 * as well as call the "xtime_update()" routine every clocktick
118 */ 118 */
119#ifdef CONFIG_CORE_TIMER_IRQ_L1 119#ifdef CONFIG_CORE_TIMER_IRQ_L1
120__attribute__((l1_text)) 120__attribute__((l1_text))
121#endif 121#endif
122irqreturn_t timer_interrupt(int irq, void *dummy) 122irqreturn_t timer_interrupt(int irq, void *dummy)
123{ 123{
124 write_seqlock(&xtime_lock); 124 xtime_update(1);
125 do_timer(1);
126 write_sequnlock(&xtime_lock);
127 125
128#ifdef CONFIG_IPIPE 126#ifdef CONFIG_IPIPE
129 update_root_process_times(get_irq_regs()); 127 update_root_process_times(get_irq_regs());
diff --git a/arch/blackfin/kernel/trace.c b/arch/blackfin/kernel/trace.c
index 59fcdf6b0138..050db44fe919 100644
--- a/arch/blackfin/kernel/trace.c
+++ b/arch/blackfin/kernel/trace.c
@@ -15,6 +15,7 @@
15#include <linux/kallsyms.h> 15#include <linux/kallsyms.h>
16#include <linux/err.h> 16#include <linux/err.h>
17#include <linux/fs.h> 17#include <linux/fs.h>
18#include <linux/irq.h>
18#include <asm/dma.h> 19#include <asm/dma.h>
19#include <asm/trace.h> 20#include <asm/trace.h>
20#include <asm/fixed_code.h> 21#include <asm/fixed_code.h>
@@ -911,10 +912,11 @@ void show_regs(struct pt_regs *fp)
911 /* if no interrupts are going off, don't print this out */ 912 /* if no interrupts are going off, don't print this out */
912 if (fp->ipend & ~0x3F) { 913 if (fp->ipend & ~0x3F) {
913 for (i = 0; i < (NR_IRQS - 1); i++) { 914 for (i = 0; i < (NR_IRQS - 1); i++) {
915 struct irq_desc *desc = irq_to_desc(i);
914 if (!in_atomic) 916 if (!in_atomic)
915 raw_spin_lock_irqsave(&irq_desc[i].lock, flags); 917 raw_spin_lock_irqsave(&desc->lock, flags);
916 918
917 action = irq_desc[i].action; 919 action = desc->action;
918 if (!action) 920 if (!action)
919 goto unlock; 921 goto unlock;
920 922
@@ -927,7 +929,7 @@ void show_regs(struct pt_regs *fp)
927 pr_cont("\n"); 929 pr_cont("\n");
928unlock: 930unlock:
929 if (!in_atomic) 931 if (!in_atomic)
930 raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); 932 raw_spin_unlock_irqrestore(&desc->lock, flags);
931 } 933 }
932 } 934 }
933 935
diff --git a/arch/blackfin/kernel/traps.c b/arch/blackfin/kernel/traps.c
index 59c1df75e4de..655f25d139a7 100644
--- a/arch/blackfin/kernel/traps.c
+++ b/arch/blackfin/kernel/traps.c
@@ -98,7 +98,7 @@ asmlinkage notrace void trap_c(struct pt_regs *fp)
98 /* send the appropriate signal to the user program */ 98 /* send the appropriate signal to the user program */
99 switch (trapnr) { 99 switch (trapnr) {
100 100
101 /* This table works in conjuction with the one in ./mach-common/entry.S 101 /* This table works in conjunction with the one in ./mach-common/entry.S
102 * Some exceptions are handled there (in assembly, in exception space) 102 * Some exceptions are handled there (in assembly, in exception space)
103 * Some are handled here, (in C, in interrupt space) 103 * Some are handled here, (in C, in interrupt space)
104 * Some, like CPLB, are handled in both, where the normal path is 104 * Some, like CPLB, are handled in both, where the normal path is
diff --git a/arch/blackfin/kernel/vmlinux.lds.S b/arch/blackfin/kernel/vmlinux.lds.S
index 4122678529c0..3ac5b66d14aa 100644
--- a/arch/blackfin/kernel/vmlinux.lds.S
+++ b/arch/blackfin/kernel/vmlinux.lds.S
@@ -136,7 +136,7 @@ SECTIONS
136 136
137 . = ALIGN(16); 137 . = ALIGN(16);
138 INIT_DATA_SECTION(16) 138 INIT_DATA_SECTION(16)
139 PERCPU(4) 139 PERCPU_SECTION(32)
140 140
141 .exit.data : 141 .exit.data :
142 { 142 {
@@ -155,14 +155,8 @@ SECTIONS
155 SECURITY_INITCALL 155 SECURITY_INITCALL
156 INIT_RAM_FS 156 INIT_RAM_FS
157 157
158 . = ALIGN(4);
159 ___per_cpu_load = .; 158 ___per_cpu_load = .;
160 ___per_cpu_start = .; 159 PERCPU_INPUT(32)
161 *(.data.percpu.first)
162 *(.data.percpu.page_aligned)
163 *(.data.percpu)
164 *(.data.percpu.shared_aligned)
165 ___per_cpu_end = .;
166 160
167 EXIT_DATA 161 EXIT_DATA
168 __einitdata = .; 162 __einitdata = .;
@@ -176,6 +170,7 @@ SECTIONS
176 { 170 {
177 . = ALIGN(4); 171 . = ALIGN(4);
178 __stext_l1 = .; 172 __stext_l1 = .;
173 *(.l1.text.head)
179 *(.l1.text) 174 *(.l1.text)
180#ifdef CONFIG_SCHEDULE_L1 175#ifdef CONFIG_SCHEDULE_L1
181 SCHED_TEXT 176 SCHED_TEXT