diff options
| author | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-01-22 10:38:37 -0500 |
|---|---|---|
| committer | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-01-22 10:38:37 -0500 |
| commit | fcc9d2e5a6c89d22b8b773a64fb4ad21ac318446 (patch) | |
| tree | a57612d1888735a2ec7972891b68c1ac5ec8faea /arch/arm/common | |
| parent | 8dea78da5cee153b8af9c07a2745f6c55057fe12 (diff) | |
Diffstat (limited to 'arch/arm/common')
| -rw-r--r-- | arch/arm/common/fiq_debugger.c | 1196 | ||||
| -rw-r--r-- | arch/arm/common/fiq_debugger_ringbuf.h | 94 | ||||
| -rw-r--r-- | arch/arm/common/fiq_glue.S | 111 | ||||
| -rw-r--r-- | arch/arm/common/fiq_glue_setup.c | 155 | ||||
| -rw-r--r-- | arch/arm/common/pl330.c | 1965 | ||||
| -rw-r--r-- | arch/arm/common/time-acorn.c | 95 | ||||
| -rw-r--r-- | arch/arm/common/uengine.c | 507 |
7 files changed, 4123 insertions, 0 deletions
diff --git a/arch/arm/common/fiq_debugger.c b/arch/arm/common/fiq_debugger.c new file mode 100644 index 00000000000..3ed18ae2ed8 --- /dev/null +++ b/arch/arm/common/fiq_debugger.c | |||
| @@ -0,0 +1,1196 @@ | |||
| 1 | /* | ||
| 2 | * arch/arm/common/fiq_debugger.c | ||
| 3 | * | ||
| 4 | * Serial Debugger Interface accessed through an FIQ interrupt. | ||
| 5 | * | ||
| 6 | * Copyright (C) 2008 Google, Inc. | ||
| 7 | * | ||
| 8 | * This software is licensed under the terms of the GNU General Public | ||
| 9 | * License version 2, as published by the Free Software Foundation, and | ||
| 10 | * may be copied, distributed, and modified under those terms. | ||
| 11 | * | ||
| 12 | * This program is distributed in the hope that it will be useful, | ||
| 13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 15 | * GNU General Public License for more details. | ||
| 16 | */ | ||
| 17 | |||
| 18 | #include <stdarg.h> | ||
| 19 | #include <linux/module.h> | ||
| 20 | #include <linux/io.h> | ||
| 21 | #include <linux/console.h> | ||
| 22 | #include <linux/interrupt.h> | ||
| 23 | #include <linux/clk.h> | ||
| 24 | #include <linux/platform_device.h> | ||
| 25 | #include <linux/kernel_stat.h> | ||
| 26 | #include <linux/irq.h> | ||
| 27 | #include <linux/delay.h> | ||
| 28 | #include <linux/sched.h> | ||
| 29 | #include <linux/slab.h> | ||
| 30 | #include <linux/smp.h> | ||
| 31 | #include <linux/timer.h> | ||
| 32 | #include <linux/tty.h> | ||
| 33 | #include <linux/tty_flip.h> | ||
| 34 | #include <linux/wakelock.h> | ||
| 35 | |||
| 36 | #include <asm/fiq_debugger.h> | ||
| 37 | #include <asm/fiq_glue.h> | ||
| 38 | #include <asm/stacktrace.h> | ||
| 39 | |||
| 40 | #include <mach/system.h> | ||
| 41 | |||
| 42 | #include <linux/uaccess.h> | ||
| 43 | |||
| 44 | #include "fiq_debugger_ringbuf.h" | ||
| 45 | |||
| 46 | #define DEBUG_MAX 64 | ||
| 47 | #define MAX_UNHANDLED_FIQ_COUNT 1000000 | ||
| 48 | |||
| 49 | #define THREAD_INFO(sp) ((struct thread_info *) \ | ||
| 50 | ((unsigned long)(sp) & ~(THREAD_SIZE - 1))) | ||
| 51 | |||
| 52 | struct fiq_debugger_state { | ||
| 53 | struct fiq_glue_handler handler; | ||
| 54 | |||
| 55 | int fiq; | ||
| 56 | int uart_irq; | ||
| 57 | int signal_irq; | ||
| 58 | int wakeup_irq; | ||
| 59 | bool wakeup_irq_no_set_wake; | ||
| 60 | struct clk *clk; | ||
| 61 | struct fiq_debugger_pdata *pdata; | ||
| 62 | struct platform_device *pdev; | ||
| 63 | |||
| 64 | char debug_cmd[DEBUG_MAX]; | ||
| 65 | int debug_busy; | ||
| 66 | int debug_abort; | ||
| 67 | |||
| 68 | char debug_buf[DEBUG_MAX]; | ||
| 69 | int debug_count; | ||
| 70 | |||
| 71 | bool no_sleep; | ||
| 72 | bool debug_enable; | ||
| 73 | bool ignore_next_wakeup_irq; | ||
| 74 | struct timer_list sleep_timer; | ||
| 75 | spinlock_t sleep_timer_lock; | ||
| 76 | bool uart_enabled; | ||
| 77 | struct wake_lock debugger_wake_lock; | ||
| 78 | bool console_enable; | ||
| 79 | int current_cpu; | ||
| 80 | atomic_t unhandled_fiq_count; | ||
| 81 | bool in_fiq; | ||
| 82 | |||
| 83 | #ifdef CONFIG_FIQ_DEBUGGER_CONSOLE | ||
| 84 | struct console console; | ||
| 85 | struct tty_driver *tty_driver; | ||
| 86 | struct tty_struct *tty; | ||
| 87 | int tty_open_count; | ||
| 88 | struct fiq_debugger_ringbuf *tty_rbuf; | ||
| 89 | bool syslog_dumping; | ||
| 90 | #endif | ||
| 91 | |||
| 92 | unsigned int last_irqs[NR_IRQS]; | ||
| 93 | unsigned int last_local_timer_irqs[NR_CPUS]; | ||
| 94 | }; | ||
| 95 | |||
| 96 | #ifdef CONFIG_FIQ_DEBUGGER_NO_SLEEP | ||
| 97 | static bool initial_no_sleep = true; | ||
| 98 | #else | ||
| 99 | static bool initial_no_sleep; | ||
| 100 | #endif | ||
| 101 | |||
| 102 | #ifdef CONFIG_FIQ_DEBUGGER_CONSOLE_DEFAULT_ENABLE | ||
| 103 | static bool initial_debug_enable = true; | ||
| 104 | static bool initial_console_enable = true; | ||
| 105 | #else | ||
| 106 | static bool initial_debug_enable; | ||
| 107 | static bool initial_console_enable; | ||
| 108 | #endif | ||
| 109 | |||
| 110 | module_param_named(no_sleep, initial_no_sleep, bool, 0644); | ||
| 111 | module_param_named(debug_enable, initial_debug_enable, bool, 0644); | ||
| 112 | module_param_named(console_enable, initial_console_enable, bool, 0644); | ||
| 113 | |||
| 114 | #ifdef CONFIG_FIQ_DEBUGGER_WAKEUP_IRQ_ALWAYS_ON | ||
| 115 | static inline void enable_wakeup_irq(struct fiq_debugger_state *state) {} | ||
| 116 | static inline void disable_wakeup_irq(struct fiq_debugger_state *state) {} | ||
| 117 | #else | ||
| 118 | static inline void enable_wakeup_irq(struct fiq_debugger_state *state) | ||
| 119 | { | ||
| 120 | if (state->wakeup_irq < 0) | ||
| 121 | return; | ||
| 122 | enable_irq(state->wakeup_irq); | ||
| 123 | if (!state->wakeup_irq_no_set_wake) | ||
| 124 | enable_irq_wake(state->wakeup_irq); | ||
| 125 | } | ||
| 126 | static inline void disable_wakeup_irq(struct fiq_debugger_state *state) | ||
| 127 | { | ||
| 128 | if (state->wakeup_irq < 0) | ||
| 129 | return; | ||
| 130 | disable_irq_nosync(state->wakeup_irq); | ||
| 131 | if (!state->wakeup_irq_no_set_wake) | ||
| 132 | disable_irq_wake(state->wakeup_irq); | ||
| 133 | } | ||
| 134 | #endif | ||
| 135 | |||
| 136 | static bool inline debug_have_fiq(struct fiq_debugger_state *state) | ||
| 137 | { | ||
| 138 | return (state->fiq >= 0); | ||
| 139 | } | ||
| 140 | |||
| 141 | static void debug_force_irq(struct fiq_debugger_state *state) | ||
| 142 | { | ||
| 143 | unsigned int irq = state->signal_irq; | ||
| 144 | |||
| 145 | if (WARN_ON(!debug_have_fiq(state))) | ||
| 146 | return; | ||
| 147 | if (state->pdata->force_irq) { | ||
| 148 | state->pdata->force_irq(state->pdev, irq); | ||
| 149 | } else { | ||
| 150 | struct irq_chip *chip = irq_get_chip(irq); | ||
| 151 | if (chip && chip->irq_retrigger) | ||
| 152 | chip->irq_retrigger(irq_get_irq_data(irq)); | ||
| 153 | } | ||
| 154 | } | ||
| 155 | |||
| 156 | static void debug_uart_enable(struct fiq_debugger_state *state) | ||
| 157 | { | ||
| 158 | if (state->clk) | ||
| 159 | clk_enable(state->clk); | ||
| 160 | if (state->pdata->uart_enable) | ||
| 161 | state->pdata->uart_enable(state->pdev); | ||
| 162 | } | ||
| 163 | |||
| 164 | static void debug_uart_disable(struct fiq_debugger_state *state) | ||
| 165 | { | ||
| 166 | if (state->pdata->uart_disable) | ||
| 167 | state->pdata->uart_disable(state->pdev); | ||
| 168 | if (state->clk) | ||
| 169 | clk_disable(state->clk); | ||
| 170 | } | ||
| 171 | |||
| 172 | static void debug_uart_flush(struct fiq_debugger_state *state) | ||
| 173 | { | ||
| 174 | if (state->pdata->uart_flush) | ||
| 175 | state->pdata->uart_flush(state->pdev); | ||
| 176 | } | ||
| 177 | |||
| 178 | static void debug_puts(struct fiq_debugger_state *state, char *s) | ||
| 179 | { | ||
| 180 | unsigned c; | ||
| 181 | while ((c = *s++)) { | ||
| 182 | if (c == '\n') | ||
| 183 | state->pdata->uart_putc(state->pdev, '\r'); | ||
| 184 | state->pdata->uart_putc(state->pdev, c); | ||
| 185 | } | ||
| 186 | } | ||
| 187 | |||
| 188 | static void debug_prompt(struct fiq_debugger_state *state) | ||
| 189 | { | ||
| 190 | debug_puts(state, "debug> "); | ||
| 191 | } | ||
| 192 | |||
| 193 | int log_buf_copy(char *dest, int idx, int len); | ||
| 194 | static void dump_kernel_log(struct fiq_debugger_state *state) | ||
| 195 | { | ||
| 196 | char buf[1024]; | ||
| 197 | int idx = 0; | ||
| 198 | int ret; | ||
| 199 | int saved_oip; | ||
| 200 | |||
| 201 | /* setting oops_in_progress prevents log_buf_copy() | ||
| 202 | * from trying to take a spinlock which will make it | ||
| 203 | * very unhappy in some cases... | ||
| 204 | */ | ||
| 205 | saved_oip = oops_in_progress; | ||
| 206 | oops_in_progress = 1; | ||
| 207 | for (;;) { | ||
| 208 | ret = log_buf_copy(buf, idx, 1023); | ||
| 209 | if (ret <= 0) | ||
| 210 | break; | ||
| 211 | buf[ret] = 0; | ||
| 212 | debug_puts(state, buf); | ||
| 213 | idx += ret; | ||
| 214 | } | ||
| 215 | oops_in_progress = saved_oip; | ||
| 216 | } | ||
| 217 | |||
| 218 | static char *mode_name(unsigned cpsr) | ||
| 219 | { | ||
| 220 | switch (cpsr & MODE_MASK) { | ||
| 221 | case USR_MODE: return "USR"; | ||
| 222 | case FIQ_MODE: return "FIQ"; | ||
| 223 | case IRQ_MODE: return "IRQ"; | ||
| 224 | case SVC_MODE: return "SVC"; | ||
| 225 | case ABT_MODE: return "ABT"; | ||
| 226 | case UND_MODE: return "UND"; | ||
| 227 | case SYSTEM_MODE: return "SYS"; | ||
| 228 | default: return "???"; | ||
| 229 | } | ||
| 230 | } | ||
| 231 | |||
| 232 | static int debug_printf(void *cookie, const char *fmt, ...) | ||
| 233 | { | ||
| 234 | struct fiq_debugger_state *state = cookie; | ||
| 235 | char buf[256]; | ||
| 236 | va_list ap; | ||
| 237 | |||
| 238 | va_start(ap, fmt); | ||
| 239 | vsnprintf(buf, sizeof(buf), fmt, ap); | ||
| 240 | va_end(ap); | ||
| 241 | |||
| 242 | debug_puts(state, buf); | ||
| 243 | return state->debug_abort; | ||
| 244 | } | ||
| 245 | |||
| 246 | /* Safe outside fiq context */ | ||
| 247 | static int debug_printf_nfiq(void *cookie, const char *fmt, ...) | ||
| 248 | { | ||
| 249 | struct fiq_debugger_state *state = cookie; | ||
| 250 | char buf[256]; | ||
| 251 | va_list ap; | ||
| 252 | unsigned long irq_flags; | ||
| 253 | |||
| 254 | va_start(ap, fmt); | ||
| 255 | vsnprintf(buf, 128, fmt, ap); | ||
| 256 | va_end(ap); | ||
| 257 | |||
| 258 | local_irq_save(irq_flags); | ||
| 259 | debug_puts(state, buf); | ||
| 260 | debug_uart_flush(state); | ||
| 261 | local_irq_restore(irq_flags); | ||
| 262 | return state->debug_abort; | ||
| 263 | } | ||
| 264 | |||
| 265 | static void dump_regs(struct fiq_debugger_state *state, unsigned *regs) | ||
| 266 | { | ||
| 267 | debug_printf(state, " r0 %08x r1 %08x r2 %08x r3 %08x\n", | ||
| 268 | regs[0], regs[1], regs[2], regs[3]); | ||
| 269 | debug_printf(state, " r4 %08x r5 %08x r6 %08x r7 %08x\n", | ||
| 270 | regs[4], regs[5], regs[6], regs[7]); | ||
| 271 | debug_printf(state, " r8 %08x r9 %08x r10 %08x r11 %08x mode %s\n", | ||
| 272 | regs[8], regs[9], regs[10], regs[11], | ||
| 273 | mode_name(regs[16])); | ||
| 274 | if ((regs[16] & MODE_MASK) == USR_MODE) | ||
| 275 | debug_printf(state, " ip %08x sp %08x lr %08x pc %08x " | ||
| 276 | "cpsr %08x\n", regs[12], regs[13], regs[14], | ||
| 277 | regs[15], regs[16]); | ||
| 278 | else | ||
| 279 | debug_printf(state, " ip %08x sp %08x lr %08x pc %08x " | ||
| 280 | "cpsr %08x spsr %08x\n", regs[12], regs[13], | ||
| 281 | regs[14], regs[15], regs[16], regs[17]); | ||
| 282 | } | ||
| 283 | |||
| 284 | struct mode_regs { | ||
| 285 | unsigned long sp_svc; | ||
| 286 | unsigned long lr_svc; | ||
| 287 | unsigned long spsr_svc; | ||
| 288 | |||
| 289 | unsigned long sp_abt; | ||
| 290 | unsigned long lr_abt; | ||
| 291 | unsigned long spsr_abt; | ||
| 292 | |||
| 293 | unsigned long sp_und; | ||
| 294 | unsigned long lr_und; | ||
| 295 | unsigned long spsr_und; | ||
| 296 | |||
| 297 | unsigned long sp_irq; | ||
| 298 | unsigned long lr_irq; | ||
| 299 | unsigned long spsr_irq; | ||
| 300 | |||
| 301 | unsigned long r8_fiq; | ||
| 302 | unsigned long r9_fiq; | ||
| 303 | unsigned long r10_fiq; | ||
| 304 | unsigned long r11_fiq; | ||
| 305 | unsigned long r12_fiq; | ||
| 306 | unsigned long sp_fiq; | ||
| 307 | unsigned long lr_fiq; | ||
| 308 | unsigned long spsr_fiq; | ||
| 309 | }; | ||
| 310 | |||
| 311 | void __naked get_mode_regs(struct mode_regs *regs) | ||
| 312 | { | ||
| 313 | asm volatile ( | ||
| 314 | "mrs r1, cpsr\n" | ||
| 315 | "msr cpsr_c, #0xd3 @(SVC_MODE | PSR_I_BIT | PSR_F_BIT)\n" | ||
| 316 | "stmia r0!, {r13 - r14}\n" | ||
| 317 | "mrs r2, spsr\n" | ||
| 318 | "msr cpsr_c, #0xd7 @(ABT_MODE | PSR_I_BIT | PSR_F_BIT)\n" | ||
| 319 | "stmia r0!, {r2, r13 - r14}\n" | ||
| 320 | "mrs r2, spsr\n" | ||
| 321 | "msr cpsr_c, #0xdb @(UND_MODE | PSR_I_BIT | PSR_F_BIT)\n" | ||
| 322 | "stmia r0!, {r2, r13 - r14}\n" | ||
| 323 | "mrs r2, spsr\n" | ||
| 324 | "msr cpsr_c, #0xd2 @(IRQ_MODE | PSR_I_BIT | PSR_F_BIT)\n" | ||
| 325 | "stmia r0!, {r2, r13 - r14}\n" | ||
| 326 | "mrs r2, spsr\n" | ||
| 327 | "msr cpsr_c, #0xd1 @(FIQ_MODE | PSR_I_BIT | PSR_F_BIT)\n" | ||
| 328 | "stmia r0!, {r2, r8 - r14}\n" | ||
| 329 | "mrs r2, spsr\n" | ||
| 330 | "stmia r0!, {r2}\n" | ||
| 331 | "msr cpsr_c, r1\n" | ||
| 332 | "bx lr\n"); | ||
| 333 | } | ||
| 334 | |||
| 335 | |||
| 336 | static void dump_allregs(struct fiq_debugger_state *state, unsigned *regs) | ||
| 337 | { | ||
| 338 | struct mode_regs mode_regs; | ||
| 339 | dump_regs(state, regs); | ||
| 340 | get_mode_regs(&mode_regs); | ||
| 341 | debug_printf(state, " svc: sp %08x lr %08x spsr %08x\n", | ||
| 342 | mode_regs.sp_svc, mode_regs.lr_svc, mode_regs.spsr_svc); | ||
| 343 | debug_printf(state, " abt: sp %08x lr %08x spsr %08x\n", | ||
| 344 | mode_regs.sp_abt, mode_regs.lr_abt, mode_regs.spsr_abt); | ||
| 345 | debug_printf(state, " und: sp %08x lr %08x spsr %08x\n", | ||
| 346 | mode_regs.sp_und, mode_regs.lr_und, mode_regs.spsr_und); | ||
| 347 | debug_printf(state, " irq: sp %08x lr %08x spsr %08x\n", | ||
| 348 | mode_regs.sp_irq, mode_regs.lr_irq, mode_regs.spsr_irq); | ||
| 349 | debug_printf(state, " fiq: r8 %08x r9 %08x r10 %08x r11 %08x " | ||
| 350 | "r12 %08x\n", | ||
| 351 | mode_regs.r8_fiq, mode_regs.r9_fiq, mode_regs.r10_fiq, | ||
| 352 | mode_regs.r11_fiq, mode_regs.r12_fiq); | ||
| 353 | debug_printf(state, " fiq: sp %08x lr %08x spsr %08x\n", | ||
| 354 | mode_regs.sp_fiq, mode_regs.lr_fiq, mode_regs.spsr_fiq); | ||
| 355 | } | ||
| 356 | |||
| 357 | static void dump_irqs(struct fiq_debugger_state *state) | ||
| 358 | { | ||
| 359 | int n; | ||
| 360 | unsigned int cpu; | ||
| 361 | |||
| 362 | debug_printf(state, "irqnr total since-last status name\n"); | ||
| 363 | for (n = 0; n < NR_IRQS; n++) { | ||
| 364 | struct irqaction *act = irq_desc[n].action; | ||
| 365 | if (!act && !kstat_irqs(n)) | ||
| 366 | continue; | ||
| 367 | debug_printf(state, "%5d: %10u %11u %8x %s\n", n, | ||
| 368 | kstat_irqs(n), | ||
| 369 | kstat_irqs(n) - state->last_irqs[n], | ||
| 370 | irq_desc[n].status_use_accessors, | ||
| 371 | (act && act->name) ? act->name : "???"); | ||
| 372 | state->last_irqs[n] = kstat_irqs(n); | ||
| 373 | } | ||
| 374 | |||
| 375 | for (cpu = 0; cpu < NR_CPUS; cpu++) { | ||
| 376 | |||
| 377 | debug_printf(state, "LOC %d: %10u %11u\n", cpu, | ||
| 378 | __IRQ_STAT(cpu, local_timer_irqs), | ||
| 379 | __IRQ_STAT(cpu, local_timer_irqs) - | ||
| 380 | state->last_local_timer_irqs[cpu]); | ||
| 381 | state->last_local_timer_irqs[cpu] = | ||
| 382 | __IRQ_STAT(cpu, local_timer_irqs); | ||
| 383 | } | ||
| 384 | } | ||
| 385 | |||
| 386 | struct stacktrace_state { | ||
| 387 | struct fiq_debugger_state *state; | ||
| 388 | unsigned int depth; | ||
| 389 | }; | ||
| 390 | |||
| 391 | static int report_trace(struct stackframe *frame, void *d) | ||
| 392 | { | ||
| 393 | struct stacktrace_state *sts = d; | ||
| 394 | |||
| 395 | if (sts->depth) { | ||
| 396 | debug_printf(sts->state, | ||
| 397 | " pc: %p (%pF), lr %p (%pF), sp %p, fp %p\n", | ||
| 398 | frame->pc, frame->pc, frame->lr, frame->lr, | ||
| 399 | frame->sp, frame->fp); | ||
| 400 | sts->depth--; | ||
| 401 | return 0; | ||
| 402 | } | ||
| 403 | debug_printf(sts->state, " ...\n"); | ||
| 404 | |||
| 405 | return sts->depth == 0; | ||
| 406 | } | ||
| 407 | |||
| 408 | struct frame_tail { | ||
| 409 | struct frame_tail *fp; | ||
| 410 | unsigned long sp; | ||
| 411 | unsigned long lr; | ||
| 412 | } __attribute__((packed)); | ||
| 413 | |||
| 414 | static struct frame_tail *user_backtrace(struct fiq_debugger_state *state, | ||
| 415 | struct frame_tail *tail) | ||
| 416 | { | ||
| 417 | struct frame_tail buftail[2]; | ||
| 418 | |||
| 419 | /* Also check accessibility of one struct frame_tail beyond */ | ||
| 420 | if (!access_ok(VERIFY_READ, tail, sizeof(buftail))) { | ||
| 421 | debug_printf(state, " invalid frame pointer %p\n", tail); | ||
| 422 | return NULL; | ||
| 423 | } | ||
| 424 | if (__copy_from_user_inatomic(buftail, tail, sizeof(buftail))) { | ||
| 425 | debug_printf(state, | ||
| 426 | " failed to copy frame pointer %p\n", tail); | ||
| 427 | return NULL; | ||
| 428 | } | ||
| 429 | |||
| 430 | debug_printf(state, " %p\n", buftail[0].lr); | ||
| 431 | |||
| 432 | /* frame pointers should strictly progress back up the stack | ||
| 433 | * (towards higher addresses) */ | ||
| 434 | if (tail >= buftail[0].fp) | ||
| 435 | return NULL; | ||
| 436 | |||
| 437 | return buftail[0].fp-1; | ||
| 438 | } | ||
| 439 | |||
| 440 | void dump_stacktrace(struct fiq_debugger_state *state, | ||
| 441 | struct pt_regs * const regs, unsigned int depth, void *ssp) | ||
| 442 | { | ||
| 443 | struct frame_tail *tail; | ||
| 444 | struct thread_info *real_thread_info = THREAD_INFO(ssp); | ||
| 445 | struct stacktrace_state sts; | ||
| 446 | |||
| 447 | sts.depth = depth; | ||
| 448 | sts.state = state; | ||
| 449 | *current_thread_info() = *real_thread_info; | ||
| 450 | |||
| 451 | if (!current) | ||
| 452 | debug_printf(state, "current NULL\n"); | ||
| 453 | else | ||
| 454 | debug_printf(state, "pid: %d comm: %s\n", | ||
| 455 | current->pid, current->comm); | ||
| 456 | dump_regs(state, (unsigned *)regs); | ||
| 457 | |||
| 458 | if (!user_mode(regs)) { | ||
| 459 | struct stackframe frame; | ||
| 460 | frame.fp = regs->ARM_fp; | ||
| 461 | frame.sp = regs->ARM_sp; | ||
| 462 | frame.lr = regs->ARM_lr; | ||
| 463 | frame.pc = regs->ARM_pc; | ||
| 464 | debug_printf(state, | ||
| 465 | " pc: %p (%pF), lr %p (%pF), sp %p, fp %p\n", | ||
| 466 | regs->ARM_pc, regs->ARM_pc, regs->ARM_lr, regs->ARM_lr, | ||
| 467 | regs->ARM_sp, regs->ARM_fp); | ||
| 468 | walk_stackframe(&frame, report_trace, &sts); | ||
| 469 | return; | ||
| 470 | } | ||
| 471 | |||
| 472 | tail = ((struct frame_tail *) regs->ARM_fp) - 1; | ||
| 473 | while (depth-- && tail && !((unsigned long) tail & 3)) | ||
| 474 | tail = user_backtrace(state, tail); | ||
| 475 | } | ||
| 476 | |||
| 477 | static void do_ps(struct fiq_debugger_state *state) | ||
| 478 | { | ||
| 479 | struct task_struct *g; | ||
| 480 | struct task_struct *p; | ||
| 481 | unsigned task_state; | ||
| 482 | static const char stat_nam[] = "RSDTtZX"; | ||
| 483 | |||
| 484 | debug_printf(state, "pid ppid prio task pc\n"); | ||
| 485 | read_lock(&tasklist_lock); | ||
| 486 | do_each_thread(g, p) { | ||
| 487 | task_state = p->state ? __ffs(p->state) + 1 : 0; | ||
| 488 | debug_printf(state, | ||
| 489 | "%5d %5d %4d ", p->pid, p->parent->pid, p->prio); | ||
| 490 | debug_printf(state, "%-13.13s %c", p->comm, | ||
| 491 | task_state >= sizeof(stat_nam) ? '?' : stat_nam[task_state]); | ||
| 492 | if (task_state == TASK_RUNNING) | ||
| 493 | debug_printf(state, " running\n"); | ||
| 494 | else | ||
| 495 | debug_printf(state, " %08lx\n", thread_saved_pc(p)); | ||
| 496 | } while_each_thread(g, p); | ||
| 497 | read_unlock(&tasklist_lock); | ||
| 498 | } | ||
| 499 | |||
| 500 | #ifdef CONFIG_FIQ_DEBUGGER_CONSOLE | ||
| 501 | static void begin_syslog_dump(struct fiq_debugger_state *state) | ||
| 502 | { | ||
| 503 | state->syslog_dumping = true; | ||
| 504 | } | ||
| 505 | |||
| 506 | static void end_syslog_dump(struct fiq_debugger_state *state) | ||
| 507 | { | ||
| 508 | state->syslog_dumping = false; | ||
| 509 | } | ||
| 510 | #else | ||
| 511 | extern int do_syslog(int type, char __user *bug, int count); | ||
| 512 | static void begin_syslog_dump(struct fiq_debugger_state *state) | ||
| 513 | { | ||
| 514 | do_syslog(5 /* clear */, NULL, 0); | ||
| 515 | } | ||
| 516 | |||
| 517 | static void end_syslog_dump(struct fiq_debugger_state *state) | ||
| 518 | { | ||
| 519 | char buf[128]; | ||
| 520 | int ret; | ||
| 521 | int idx = 0; | ||
| 522 | |||
| 523 | while (1) { | ||
| 524 | ret = log_buf_copy(buf, idx, sizeof(buf) - 1); | ||
| 525 | if (ret <= 0) | ||
| 526 | break; | ||
| 527 | buf[ret] = 0; | ||
| 528 | debug_printf(state, "%s", buf); | ||
| 529 | idx += ret; | ||
| 530 | } | ||
| 531 | } | ||
| 532 | #endif | ||
| 533 | |||
| 534 | static void do_sysrq(struct fiq_debugger_state *state, char rq) | ||
| 535 | { | ||
| 536 | begin_syslog_dump(state); | ||
| 537 | handle_sysrq(rq); | ||
| 538 | end_syslog_dump(state); | ||
| 539 | } | ||
| 540 | |||
| 541 | /* This function CANNOT be called in FIQ context */ | ||
| 542 | static void debug_irq_exec(struct fiq_debugger_state *state, char *cmd) | ||
| 543 | { | ||
| 544 | if (!strcmp(cmd, "ps")) | ||
| 545 | do_ps(state); | ||
| 546 | if (!strcmp(cmd, "sysrq")) | ||
| 547 | do_sysrq(state, 'h'); | ||
| 548 | if (!strncmp(cmd, "sysrq ", 6)) | ||
| 549 | do_sysrq(state, cmd[6]); | ||
| 550 | } | ||
| 551 | |||
| 552 | static void debug_help(struct fiq_debugger_state *state) | ||
| 553 | { | ||
| 554 | debug_printf(state, "FIQ Debugger commands:\n" | ||
| 555 | " pc PC status\n" | ||
| 556 | " regs Register dump\n" | ||
| 557 | " allregs Extended Register dump\n" | ||
| 558 | " bt Stack trace\n" | ||
| 559 | " reboot Reboot\n" | ||
| 560 | " irqs Interupt status\n" | ||
| 561 | " kmsg Kernel log\n" | ||
| 562 | " version Kernel version\n"); | ||
| 563 | debug_printf(state, " sleep Allow sleep while in FIQ\n" | ||
| 564 | " nosleep Disable sleep while in FIQ\n" | ||
| 565 | " console Switch terminal to console\n" | ||
| 566 | " cpu Current CPU\n" | ||
| 567 | " cpu <number> Switch to CPU<number>\n"); | ||
| 568 | debug_printf(state, " ps Process list\n" | ||
| 569 | " sysrq sysrq options\n" | ||
| 570 | " sysrq <param> Execute sysrq with <param>\n"); | ||
| 571 | } | ||
| 572 | |||
| 573 | static void take_affinity(void *info) | ||
| 574 | { | ||
| 575 | struct fiq_debugger_state *state = info; | ||
| 576 | struct cpumask cpumask; | ||
| 577 | |||
| 578 | cpumask_clear(&cpumask); | ||
| 579 | cpumask_set_cpu(get_cpu(), &cpumask); | ||
| 580 | |||
| 581 | irq_set_affinity(state->uart_irq, &cpumask); | ||
| 582 | } | ||
| 583 | |||
| 584 | static void switch_cpu(struct fiq_debugger_state *state, int cpu) | ||
| 585 | { | ||
| 586 | if (!debug_have_fiq(state)) | ||
| 587 | smp_call_function_single(cpu, take_affinity, state, false); | ||
| 588 | state->current_cpu = cpu; | ||
| 589 | } | ||
| 590 | |||
| 591 | static bool debug_fiq_exec(struct fiq_debugger_state *state, | ||
| 592 | const char *cmd, unsigned *regs, void *svc_sp) | ||
| 593 | { | ||
| 594 | bool signal_helper = false; | ||
| 595 | |||
| 596 | if (!strcmp(cmd, "help") || !strcmp(cmd, "?")) { | ||
| 597 | debug_help(state); | ||
| 598 | } else if (!strcmp(cmd, "pc")) { | ||
| 599 | debug_printf(state, " pc %08x cpsr %08x mode %s\n", | ||
| 600 | regs[15], regs[16], mode_name(regs[16])); | ||
| 601 | } else if (!strcmp(cmd, "regs")) { | ||
| 602 | dump_regs(state, regs); | ||
| 603 | } else if (!strcmp(cmd, "allregs")) { | ||
| 604 | dump_allregs(state, regs); | ||
| 605 | } else if (!strcmp(cmd, "bt")) { | ||
| 606 | dump_stacktrace(state, (struct pt_regs *)regs, 100, svc_sp); | ||
| 607 | } else if (!strcmp(cmd, "reboot")) { | ||
| 608 | arch_reset(0, 0); | ||
| 609 | } else if (!strcmp(cmd, "irqs")) { | ||
| 610 | dump_irqs(state); | ||
| 611 | } else if (!strcmp(cmd, "kmsg")) { | ||
| 612 | dump_kernel_log(state); | ||
| 613 | } else if (!strcmp(cmd, "version")) { | ||
| 614 | debug_printf(state, "%s\n", linux_banner); | ||
| 615 | } else if (!strcmp(cmd, "sleep")) { | ||
| 616 | state->no_sleep = false; | ||
| 617 | debug_printf(state, "enabling sleep\n"); | ||
| 618 | } else if (!strcmp(cmd, "nosleep")) { | ||
| 619 | state->no_sleep = true; | ||
| 620 | debug_printf(state, "disabling sleep\n"); | ||
| 621 | } else if (!strcmp(cmd, "console")) { | ||
| 622 | state->console_enable = true; | ||
| 623 | debug_printf(state, "console mode\n"); | ||
| 624 | } else if (!strcmp(cmd, "cpu")) { | ||
| 625 | debug_printf(state, "cpu %d\n", state->current_cpu); | ||
| 626 | } else if (!strncmp(cmd, "cpu ", 4)) { | ||
| 627 | unsigned long cpu = 0; | ||
| 628 | if (strict_strtoul(cmd + 4, 10, &cpu) == 0) | ||
| 629 | switch_cpu(state, cpu); | ||
| 630 | else | ||
| 631 | debug_printf(state, "invalid cpu\n"); | ||
| 632 | debug_printf(state, "cpu %d\n", state->current_cpu); | ||
| 633 | } else { | ||
| 634 | if (state->debug_busy) { | ||
| 635 | debug_printf(state, | ||
| 636 | "command processor busy. trying to abort.\n"); | ||
| 637 | state->debug_abort = -1; | ||
| 638 | } else { | ||
| 639 | strcpy(state->debug_cmd, cmd); | ||
| 640 | state->debug_busy = 1; | ||
| 641 | } | ||
| 642 | |||
| 643 | return true; | ||
| 644 | } | ||
| 645 | if (!state->console_enable) | ||
| 646 | debug_prompt(state); | ||
| 647 | |||
| 648 | return signal_helper; | ||
| 649 | } | ||
| 650 | |||
| 651 | static void sleep_timer_expired(unsigned long data) | ||
| 652 | { | ||
| 653 | struct fiq_debugger_state *state = (struct fiq_debugger_state *)data; | ||
| 654 | unsigned long flags; | ||
| 655 | |||
| 656 | spin_lock_irqsave(&state->sleep_timer_lock, flags); | ||
| 657 | if (state->uart_enabled && !state->no_sleep) { | ||
| 658 | if (state->debug_enable && !state->console_enable) { | ||
| 659 | state->debug_enable = false; | ||
| 660 | debug_printf_nfiq(state, "suspending fiq debugger\n"); | ||
| 661 | } | ||
| 662 | state->ignore_next_wakeup_irq = true; | ||
| 663 | debug_uart_disable(state); | ||
| 664 | state->uart_enabled = false; | ||
| 665 | enable_wakeup_irq(state); | ||
| 666 | } | ||
| 667 | wake_unlock(&state->debugger_wake_lock); | ||
| 668 | spin_unlock_irqrestore(&state->sleep_timer_lock, flags); | ||
| 669 | } | ||
| 670 | |||
| 671 | static void handle_wakeup(struct fiq_debugger_state *state) | ||
| 672 | { | ||
| 673 | unsigned long flags; | ||
| 674 | |||
| 675 | spin_lock_irqsave(&state->sleep_timer_lock, flags); | ||
| 676 | if (state->wakeup_irq >= 0 && state->ignore_next_wakeup_irq) { | ||
| 677 | state->ignore_next_wakeup_irq = false; | ||
| 678 | } else if (!state->uart_enabled) { | ||
| 679 | wake_lock(&state->debugger_wake_lock); | ||
| 680 | debug_uart_enable(state); | ||
| 681 | state->uart_enabled = true; | ||
| 682 | disable_wakeup_irq(state); | ||
| 683 | mod_timer(&state->sleep_timer, jiffies + HZ / 2); | ||
| 684 | } | ||
| 685 | spin_unlock_irqrestore(&state->sleep_timer_lock, flags); | ||
| 686 | } | ||
| 687 | |||
| 688 | static irqreturn_t wakeup_irq_handler(int irq, void *dev) | ||
| 689 | { | ||
| 690 | struct fiq_debugger_state *state = dev; | ||
| 691 | |||
| 692 | if (!state->no_sleep) | ||
| 693 | debug_puts(state, "WAKEUP\n"); | ||
| 694 | handle_wakeup(state); | ||
| 695 | |||
| 696 | return IRQ_HANDLED; | ||
| 697 | } | ||
| 698 | |||
| 699 | |||
| 700 | static void debug_handle_irq_context(struct fiq_debugger_state *state) | ||
| 701 | { | ||
| 702 | if (!state->no_sleep) { | ||
| 703 | unsigned long flags; | ||
| 704 | |||
| 705 | spin_lock_irqsave(&state->sleep_timer_lock, flags); | ||
| 706 | wake_lock(&state->debugger_wake_lock); | ||
| 707 | mod_timer(&state->sleep_timer, jiffies + HZ * 5); | ||
| 708 | spin_unlock_irqrestore(&state->sleep_timer_lock, flags); | ||
| 709 | } | ||
| 710 | #if defined(CONFIG_FIQ_DEBUGGER_CONSOLE) | ||
| 711 | if (state->tty) { | ||
| 712 | int i; | ||
| 713 | int count = fiq_debugger_ringbuf_level(state->tty_rbuf); | ||
| 714 | for (i = 0; i < count; i++) { | ||
| 715 | int c = fiq_debugger_ringbuf_peek(state->tty_rbuf, 0); | ||
| 716 | tty_insert_flip_char(state->tty, c, TTY_NORMAL); | ||
| 717 | if (!fiq_debugger_ringbuf_consume(state->tty_rbuf, 1)) | ||
| 718 | pr_warn("fiq tty failed to consume byte\n"); | ||
| 719 | } | ||
| 720 | tty_flip_buffer_push(state->tty); | ||
| 721 | } | ||
| 722 | #endif | ||
| 723 | if (state->debug_busy) { | ||
| 724 | debug_irq_exec(state, state->debug_cmd); | ||
| 725 | debug_prompt(state); | ||
| 726 | state->debug_busy = 0; | ||
| 727 | } | ||
| 728 | } | ||
| 729 | |||
| 730 | static int debug_getc(struct fiq_debugger_state *state) | ||
| 731 | { | ||
| 732 | return state->pdata->uart_getc(state->pdev); | ||
| 733 | } | ||
| 734 | |||
| 735 | static bool debug_handle_uart_interrupt(struct fiq_debugger_state *state, | ||
| 736 | int this_cpu, void *regs, void *svc_sp) | ||
| 737 | { | ||
| 738 | int c; | ||
| 739 | static int last_c; | ||
| 740 | int count = 0; | ||
| 741 | bool signal_helper = false; | ||
| 742 | |||
| 743 | if (this_cpu != state->current_cpu) { | ||
| 744 | if (state->in_fiq) | ||
| 745 | return false; | ||
| 746 | |||
| 747 | if (atomic_inc_return(&state->unhandled_fiq_count) != | ||
| 748 | MAX_UNHANDLED_FIQ_COUNT) | ||
| 749 | return false; | ||
| 750 | |||
| 751 | debug_printf(state, "fiq_debugger: cpu %d not responding, " | ||
| 752 | "reverting to cpu %d\n", state->current_cpu, | ||
| 753 | this_cpu); | ||
| 754 | |||
| 755 | atomic_set(&state->unhandled_fiq_count, 0); | ||
| 756 | switch_cpu(state, this_cpu); | ||
| 757 | return false; | ||
| 758 | } | ||
| 759 | |||
| 760 | state->in_fiq = true; | ||
| 761 | |||
| 762 | while ((c = debug_getc(state)) != FIQ_DEBUGGER_NO_CHAR) { | ||
| 763 | count++; | ||
| 764 | if (!state->debug_enable) { | ||
| 765 | if ((c == 13) || (c == 10)) { | ||
| 766 | state->debug_enable = true; | ||
| 767 | state->debug_count = 0; | ||
| 768 | debug_prompt(state); | ||
| 769 | } | ||
| 770 | } else if (c == FIQ_DEBUGGER_BREAK) { | ||
| 771 | state->console_enable = false; | ||
| 772 | debug_puts(state, "fiq debugger mode\n"); | ||
| 773 | state->debug_count = 0; | ||
| 774 | debug_prompt(state); | ||
| 775 | #ifdef CONFIG_FIQ_DEBUGGER_CONSOLE | ||
| 776 | } else if (state->console_enable && state->tty_rbuf) { | ||
| 777 | fiq_debugger_ringbuf_push(state->tty_rbuf, c); | ||
| 778 | signal_helper = true; | ||
| 779 | #endif | ||
| 780 | } else if ((c >= ' ') && (c < 127)) { | ||
| 781 | if (state->debug_count < (DEBUG_MAX - 1)) { | ||
| 782 | state->debug_buf[state->debug_count++] = c; | ||
| 783 | state->pdata->uart_putc(state->pdev, c); | ||
| 784 | } | ||
| 785 | } else if ((c == 8) || (c == 127)) { | ||
| 786 | if (state->debug_count > 0) { | ||
| 787 | state->debug_count--; | ||
| 788 | state->pdata->uart_putc(state->pdev, 8); | ||
| 789 | state->pdata->uart_putc(state->pdev, ' '); | ||
| 790 | state->pdata->uart_putc(state->pdev, 8); | ||
| 791 | } | ||
| 792 | } else if ((c == 13) || (c == 10)) { | ||
| 793 | if (c == '\r' || (c == '\n' && last_c != '\r')) { | ||
| 794 | state->pdata->uart_putc(state->pdev, '\r'); | ||
| 795 | state->pdata->uart_putc(state->pdev, '\n'); | ||
| 796 | } | ||
| 797 | if (state->debug_count) { | ||
| 798 | state->debug_buf[state->debug_count] = 0; | ||
| 799 | state->debug_count = 0; | ||
| 800 | signal_helper |= | ||
| 801 | debug_fiq_exec(state, state->debug_buf, | ||
| 802 | regs, svc_sp); | ||
| 803 | } else { | ||
| 804 | debug_prompt(state); | ||
| 805 | } | ||
| 806 | } | ||
| 807 | last_c = c; | ||
| 808 | } | ||
| 809 | debug_uart_flush(state); | ||
| 810 | if (state->pdata->fiq_ack) | ||
| 811 | state->pdata->fiq_ack(state->pdev, state->fiq); | ||
| 812 | |||
| 813 | /* poke sleep timer if necessary */ | ||
| 814 | if (state->debug_enable && !state->no_sleep) | ||
| 815 | signal_helper = true; | ||
| 816 | |||
| 817 | atomic_set(&state->unhandled_fiq_count, 0); | ||
| 818 | state->in_fiq = false; | ||
| 819 | |||
| 820 | return signal_helper; | ||
| 821 | } | ||
| 822 | |||
| 823 | static void debug_fiq(struct fiq_glue_handler *h, void *regs, void *svc_sp) | ||
| 824 | { | ||
| 825 | struct fiq_debugger_state *state = | ||
| 826 | container_of(h, struct fiq_debugger_state, handler); | ||
| 827 | unsigned int this_cpu = THREAD_INFO(svc_sp)->cpu; | ||
| 828 | bool need_irq; | ||
| 829 | |||
| 830 | need_irq = debug_handle_uart_interrupt(state, this_cpu, regs, svc_sp); | ||
| 831 | if (need_irq) | ||
| 832 | debug_force_irq(state); | ||
| 833 | } | ||
| 834 | |||
| 835 | /* | ||
| 836 | * When not using FIQs, we only use this single interrupt as an entry point. | ||
| 837 | * This just effectively takes over the UART interrupt and does all the work | ||
| 838 | * in this context. | ||
| 839 | */ | ||
| 840 | static irqreturn_t debug_uart_irq(int irq, void *dev) | ||
| 841 | { | ||
| 842 | struct fiq_debugger_state *state = dev; | ||
| 843 | bool not_done; | ||
| 844 | |||
| 845 | handle_wakeup(state); | ||
| 846 | |||
| 847 | /* handle the debugger irq in regular context */ | ||
| 848 | not_done = debug_handle_uart_interrupt(state, smp_processor_id(), | ||
| 849 | get_irq_regs(), | ||
| 850 | current_thread_info()); | ||
| 851 | if (not_done) | ||
| 852 | debug_handle_irq_context(state); | ||
| 853 | |||
| 854 | return IRQ_HANDLED; | ||
| 855 | } | ||
| 856 | |||
| 857 | /* | ||
| 858 | * If FIQs are used, not everything can happen in fiq context. | ||
| 859 | * FIQ handler does what it can and then signals this interrupt to finish the | ||
| 860 | * job in irq context. | ||
| 861 | */ | ||
| 862 | static irqreturn_t debug_signal_irq(int irq, void *dev) | ||
| 863 | { | ||
| 864 | struct fiq_debugger_state *state = dev; | ||
| 865 | |||
| 866 | if (state->pdata->force_irq_ack) | ||
| 867 | state->pdata->force_irq_ack(state->pdev, state->signal_irq); | ||
| 868 | |||
| 869 | debug_handle_irq_context(state); | ||
| 870 | |||
| 871 | return IRQ_HANDLED; | ||
| 872 | } | ||
| 873 | |||
| 874 | static void debug_resume(struct fiq_glue_handler *h) | ||
| 875 | { | ||
| 876 | struct fiq_debugger_state *state = | ||
| 877 | container_of(h, struct fiq_debugger_state, handler); | ||
| 878 | if (state->pdata->uart_resume) | ||
| 879 | state->pdata->uart_resume(state->pdev); | ||
| 880 | } | ||
| 881 | |||
| 882 | #if defined(CONFIG_FIQ_DEBUGGER_CONSOLE) | ||
| 883 | struct tty_driver *debug_console_device(struct console *co, int *index) | ||
| 884 | { | ||
| 885 | struct fiq_debugger_state *state; | ||
| 886 | state = container_of(co, struct fiq_debugger_state, console); | ||
| 887 | *index = 0; | ||
| 888 | return state->tty_driver; | ||
| 889 | } | ||
| 890 | |||
| 891 | static void debug_console_write(struct console *co, | ||
| 892 | const char *s, unsigned int count) | ||
| 893 | { | ||
| 894 | struct fiq_debugger_state *state; | ||
| 895 | |||
| 896 | state = container_of(co, struct fiq_debugger_state, console); | ||
| 897 | |||
| 898 | if (!state->console_enable && !state->syslog_dumping) | ||
| 899 | return; | ||
| 900 | |||
| 901 | debug_uart_enable(state); | ||
| 902 | while (count--) { | ||
| 903 | if (*s == '\n') | ||
| 904 | state->pdata->uart_putc(state->pdev, '\r'); | ||
| 905 | state->pdata->uart_putc(state->pdev, *s++); | ||
| 906 | } | ||
| 907 | debug_uart_flush(state); | ||
| 908 | debug_uart_disable(state); | ||
| 909 | } | ||
| 910 | |||
| 911 | static struct console fiq_debugger_console = { | ||
| 912 | .name = "ttyFIQ", | ||
| 913 | .device = debug_console_device, | ||
| 914 | .write = debug_console_write, | ||
| 915 | .flags = CON_PRINTBUFFER | CON_ANYTIME | CON_ENABLED, | ||
| 916 | }; | ||
| 917 | |||
| 918 | int fiq_tty_open(struct tty_struct *tty, struct file *filp) | ||
| 919 | { | ||
| 920 | struct fiq_debugger_state *state = tty->driver->driver_state; | ||
| 921 | if (state->tty_open_count++) | ||
| 922 | return 0; | ||
| 923 | |||
| 924 | tty->driver_data = state; | ||
| 925 | state->tty = tty; | ||
| 926 | return 0; | ||
| 927 | } | ||
| 928 | |||
| 929 | void fiq_tty_close(struct tty_struct *tty, struct file *filp) | ||
| 930 | { | ||
| 931 | struct fiq_debugger_state *state = tty->driver_data; | ||
| 932 | if (--state->tty_open_count) | ||
| 933 | return; | ||
| 934 | state->tty = NULL; | ||
| 935 | } | ||
| 936 | |||
| 937 | int fiq_tty_write(struct tty_struct *tty, const unsigned char *buf, int count) | ||
| 938 | { | ||
| 939 | int i; | ||
| 940 | struct fiq_debugger_state *state = tty->driver_data; | ||
| 941 | |||
| 942 | if (!state->console_enable) | ||
| 943 | return count; | ||
| 944 | |||
| 945 | debug_uart_enable(state); | ||
| 946 | for (i = 0; i < count; i++) | ||
| 947 | state->pdata->uart_putc(state->pdev, *buf++); | ||
| 948 | debug_uart_disable(state); | ||
| 949 | |||
| 950 | return count; | ||
| 951 | } | ||
| 952 | |||
| 953 | int fiq_tty_write_room(struct tty_struct *tty) | ||
| 954 | { | ||
| 955 | return 1024; | ||
| 956 | } | ||
| 957 | |||
| 958 | static const struct tty_operations fiq_tty_driver_ops = { | ||
| 959 | .write = fiq_tty_write, | ||
| 960 | .write_room = fiq_tty_write_room, | ||
| 961 | .open = fiq_tty_open, | ||
| 962 | .close = fiq_tty_close, | ||
| 963 | }; | ||
| 964 | |||
| 965 | static int fiq_debugger_tty_init(struct fiq_debugger_state *state) | ||
| 966 | { | ||
| 967 | int ret = -EINVAL; | ||
| 968 | |||
| 969 | state->tty_driver = alloc_tty_driver(1); | ||
| 970 | if (!state->tty_driver) { | ||
| 971 | pr_err("Failed to allocate fiq debugger tty\n"); | ||
| 972 | return -ENOMEM; | ||
| 973 | } | ||
| 974 | |||
| 975 | state->tty_driver->owner = THIS_MODULE; | ||
| 976 | state->tty_driver->driver_name = "fiq-debugger"; | ||
| 977 | state->tty_driver->name = "ttyFIQ"; | ||
| 978 | state->tty_driver->type = TTY_DRIVER_TYPE_SERIAL; | ||
| 979 | state->tty_driver->subtype = SERIAL_TYPE_NORMAL; | ||
| 980 | state->tty_driver->init_termios = tty_std_termios; | ||
| 981 | state->tty_driver->init_termios.c_cflag = | ||
| 982 | B115200 | CS8 | CREAD | HUPCL | CLOCAL; | ||
| 983 | state->tty_driver->init_termios.c_ispeed = | ||
| 984 | state->tty_driver->init_termios.c_ospeed = 115200; | ||
| 985 | state->tty_driver->flags = TTY_DRIVER_REAL_RAW; | ||
| 986 | tty_set_operations(state->tty_driver, &fiq_tty_driver_ops); | ||
| 987 | state->tty_driver->driver_state = state; | ||
| 988 | |||
| 989 | ret = tty_register_driver(state->tty_driver); | ||
| 990 | if (ret) { | ||
| 991 | pr_err("Failed to register fiq tty: %d\n", ret); | ||
| 992 | goto err; | ||
| 993 | } | ||
| 994 | |||
| 995 | state->tty_rbuf = fiq_debugger_ringbuf_alloc(1024); | ||
| 996 | if (!state->tty_rbuf) { | ||
| 997 | pr_err("Failed to allocate fiq debugger ringbuf\n"); | ||
| 998 | ret = -ENOMEM; | ||
| 999 | goto err; | ||
| 1000 | } | ||
| 1001 | |||
| 1002 | pr_info("Registered FIQ tty driver %p\n", state->tty_driver); | ||
| 1003 | return 0; | ||
| 1004 | |||
| 1005 | err: | ||
| 1006 | fiq_debugger_ringbuf_free(state->tty_rbuf); | ||
| 1007 | state->tty_rbuf = NULL; | ||
| 1008 | put_tty_driver(state->tty_driver); | ||
| 1009 | return ret; | ||
| 1010 | } | ||
| 1011 | #endif | ||
| 1012 | |||
| 1013 | static int fiq_debugger_dev_suspend(struct device *dev) | ||
| 1014 | { | ||
| 1015 | struct platform_device *pdev = to_platform_device(dev); | ||
| 1016 | struct fiq_debugger_state *state = platform_get_drvdata(pdev); | ||
| 1017 | |||
| 1018 | if (state->pdata->uart_dev_suspend) | ||
| 1019 | return state->pdata->uart_dev_suspend(pdev); | ||
| 1020 | return 0; | ||
| 1021 | } | ||
| 1022 | |||
| 1023 | static int fiq_debugger_dev_resume(struct device *dev) | ||
| 1024 | { | ||
| 1025 | struct platform_device *pdev = to_platform_device(dev); | ||
| 1026 | struct fiq_debugger_state *state = platform_get_drvdata(pdev); | ||
| 1027 | |||
| 1028 | if (state->pdata->uart_dev_resume) | ||
| 1029 | return state->pdata->uart_dev_resume(pdev); | ||
| 1030 | return 0; | ||
| 1031 | } | ||
| 1032 | |||
| 1033 | static int fiq_debugger_probe(struct platform_device *pdev) | ||
| 1034 | { | ||
| 1035 | int ret; | ||
| 1036 | struct fiq_debugger_pdata *pdata = dev_get_platdata(&pdev->dev); | ||
| 1037 | struct fiq_debugger_state *state; | ||
| 1038 | int fiq; | ||
| 1039 | int uart_irq; | ||
| 1040 | |||
| 1041 | if (!pdata->uart_getc || !pdata->uart_putc) | ||
| 1042 | return -EINVAL; | ||
| 1043 | if ((pdata->uart_enable && !pdata->uart_disable) || | ||
| 1044 | (!pdata->uart_enable && pdata->uart_disable)) | ||
| 1045 | return -EINVAL; | ||
| 1046 | |||
| 1047 | fiq = platform_get_irq_byname(pdev, "fiq"); | ||
| 1048 | uart_irq = platform_get_irq_byname(pdev, "uart_irq"); | ||
| 1049 | |||
| 1050 | /* uart_irq mode and fiq mode are mutually exclusive, but one of them | ||
| 1051 | * is required */ | ||
| 1052 | if ((uart_irq < 0 && fiq < 0) || (uart_irq >= 0 && fiq >= 0)) | ||
| 1053 | return -EINVAL; | ||
| 1054 | if (fiq >= 0 && !pdata->fiq_enable) | ||
| 1055 | return -EINVAL; | ||
| 1056 | |||
| 1057 | state = kzalloc(sizeof(*state), GFP_KERNEL); | ||
| 1058 | setup_timer(&state->sleep_timer, sleep_timer_expired, | ||
| 1059 | (unsigned long)state); | ||
| 1060 | state->pdata = pdata; | ||
| 1061 | state->pdev = pdev; | ||
| 1062 | state->no_sleep = initial_no_sleep; | ||
| 1063 | state->debug_enable = initial_debug_enable; | ||
| 1064 | state->console_enable = initial_console_enable; | ||
| 1065 | |||
| 1066 | state->fiq = fiq; | ||
| 1067 | state->uart_irq = uart_irq; | ||
| 1068 | state->signal_irq = platform_get_irq_byname(pdev, "signal"); | ||
| 1069 | state->wakeup_irq = platform_get_irq_byname(pdev, "wakeup"); | ||
| 1070 | |||
| 1071 | platform_set_drvdata(pdev, state); | ||
| 1072 | |||
| 1073 | spin_lock_init(&state->sleep_timer_lock); | ||
| 1074 | |||
| 1075 | if (state->wakeup_irq < 0 && debug_have_fiq(state)) | ||
| 1076 | state->no_sleep = true; | ||
| 1077 | state->ignore_next_wakeup_irq = !state->no_sleep; | ||
| 1078 | |||
| 1079 | wake_lock_init(&state->debugger_wake_lock, | ||
| 1080 | WAKE_LOCK_SUSPEND, "serial-debug"); | ||
| 1081 | |||
| 1082 | state->clk = clk_get(&pdev->dev, NULL); | ||
| 1083 | if (IS_ERR(state->clk)) | ||
| 1084 | state->clk = NULL; | ||
| 1085 | |||
| 1086 | /* do not call pdata->uart_enable here since uart_init may still | ||
| 1087 | * need to do some initialization before uart_enable can work. | ||
| 1088 | * So, only try to manage the clock during init. | ||
| 1089 | */ | ||
| 1090 | if (state->clk) | ||
| 1091 | clk_enable(state->clk); | ||
| 1092 | |||
| 1093 | if (pdata->uart_init) { | ||
| 1094 | ret = pdata->uart_init(pdev); | ||
| 1095 | if (ret) | ||
| 1096 | goto err_uart_init; | ||
| 1097 | } | ||
| 1098 | |||
| 1099 | debug_printf_nfiq(state, "<hit enter %sto activate fiq debugger>\n", | ||
| 1100 | state->no_sleep ? "" : "twice "); | ||
| 1101 | |||
| 1102 | if (debug_have_fiq(state)) { | ||
| 1103 | state->handler.fiq = debug_fiq; | ||
| 1104 | state->handler.resume = debug_resume; | ||
| 1105 | ret = fiq_glue_register_handler(&state->handler); | ||
| 1106 | if (ret) { | ||
| 1107 | pr_err("%s: could not install fiq handler\n", __func__); | ||
| 1108 | goto err_register_fiq; | ||
| 1109 | } | ||
| 1110 | |||
| 1111 | pdata->fiq_enable(pdev, state->fiq, 1); | ||
| 1112 | } else { | ||
| 1113 | ret = request_irq(state->uart_irq, debug_uart_irq, | ||
| 1114 | IRQF_NO_SUSPEND, "debug", state); | ||
| 1115 | if (ret) { | ||
| 1116 | pr_err("%s: could not install irq handler\n", __func__); | ||
| 1117 | goto err_register_irq; | ||
| 1118 | } | ||
| 1119 | |||
| 1120 | /* for irq-only mode, we want this irq to wake us up, if it | ||
| 1121 | * can. | ||
| 1122 | */ | ||
| 1123 | enable_irq_wake(state->uart_irq); | ||
| 1124 | } | ||
| 1125 | |||
| 1126 | if (state->clk) | ||
| 1127 | clk_disable(state->clk); | ||
| 1128 | |||
| 1129 | if (state->signal_irq >= 0) { | ||
| 1130 | ret = request_irq(state->signal_irq, debug_signal_irq, | ||
| 1131 | IRQF_TRIGGER_RISING, "debug-signal", state); | ||
| 1132 | if (ret) | ||
| 1133 | pr_err("serial_debugger: could not install signal_irq"); | ||
| 1134 | } | ||
| 1135 | |||
| 1136 | if (state->wakeup_irq >= 0) { | ||
| 1137 | ret = request_irq(state->wakeup_irq, wakeup_irq_handler, | ||
| 1138 | IRQF_TRIGGER_FALLING | IRQF_DISABLED, | ||
| 1139 | "debug-wakeup", state); | ||
| 1140 | if (ret) { | ||
| 1141 | pr_err("serial_debugger: " | ||
| 1142 | "could not install wakeup irq\n"); | ||
| 1143 | state->wakeup_irq = -1; | ||
| 1144 | } else { | ||
| 1145 | ret = enable_irq_wake(state->wakeup_irq); | ||
| 1146 | if (ret) { | ||
| 1147 | pr_err("serial_debugger: " | ||
| 1148 | "could not enable wakeup\n"); | ||
| 1149 | state->wakeup_irq_no_set_wake = true; | ||
| 1150 | } | ||
| 1151 | } | ||
| 1152 | } | ||
| 1153 | if (state->no_sleep) | ||
| 1154 | handle_wakeup(state); | ||
| 1155 | |||
| 1156 | #if defined(CONFIG_FIQ_DEBUGGER_CONSOLE) | ||
| 1157 | state->console = fiq_debugger_console; | ||
| 1158 | register_console(&state->console); | ||
| 1159 | fiq_debugger_tty_init(state); | ||
| 1160 | #endif | ||
| 1161 | return 0; | ||
| 1162 | |||
| 1163 | err_register_irq: | ||
| 1164 | err_register_fiq: | ||
| 1165 | if (pdata->uart_free) | ||
| 1166 | pdata->uart_free(pdev); | ||
| 1167 | err_uart_init: | ||
| 1168 | if (state->clk) | ||
| 1169 | clk_disable(state->clk); | ||
| 1170 | if (state->clk) | ||
| 1171 | clk_put(state->clk); | ||
| 1172 | wake_lock_destroy(&state->debugger_wake_lock); | ||
| 1173 | platform_set_drvdata(pdev, NULL); | ||
| 1174 | kfree(state); | ||
| 1175 | return ret; | ||
| 1176 | } | ||
| 1177 | |||
| 1178 | static const struct dev_pm_ops fiq_debugger_dev_pm_ops = { | ||
| 1179 | .suspend = fiq_debugger_dev_suspend, | ||
| 1180 | .resume = fiq_debugger_dev_resume, | ||
| 1181 | }; | ||
| 1182 | |||
| 1183 | static struct platform_driver fiq_debugger_driver = { | ||
| 1184 | .probe = fiq_debugger_probe, | ||
| 1185 | .driver = { | ||
| 1186 | .name = "fiq_debugger", | ||
| 1187 | .pm = &fiq_debugger_dev_pm_ops, | ||
| 1188 | }, | ||
| 1189 | }; | ||
| 1190 | |||
| 1191 | static int __init fiq_debugger_init(void) | ||
| 1192 | { | ||
| 1193 | return platform_driver_register(&fiq_debugger_driver); | ||
| 1194 | } | ||
| 1195 | |||
| 1196 | postcore_initcall(fiq_debugger_init); | ||
diff --git a/arch/arm/common/fiq_debugger_ringbuf.h b/arch/arm/common/fiq_debugger_ringbuf.h new file mode 100644 index 00000000000..2649b558108 --- /dev/null +++ b/arch/arm/common/fiq_debugger_ringbuf.h | |||
| @@ -0,0 +1,94 @@ | |||
| 1 | /* | ||
| 2 | * arch/arm/common/fiq_debugger_ringbuf.c | ||
| 3 | * | ||
| 4 | * simple lockless ringbuffer | ||
| 5 | * | ||
| 6 | * Copyright (C) 2010 Google, Inc. | ||
| 7 | * | ||
| 8 | * This software is licensed under the terms of the GNU General Public | ||
| 9 | * License version 2, as published by the Free Software Foundation, and | ||
| 10 | * may be copied, distributed, and modified under those terms. | ||
| 11 | * | ||
| 12 | * This program is distributed in the hope that it will be useful, | ||
| 13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 15 | * GNU General Public License for more details. | ||
| 16 | */ | ||
| 17 | |||
| 18 | #include <linux/kernel.h> | ||
| 19 | #include <linux/slab.h> | ||
| 20 | |||
| 21 | struct fiq_debugger_ringbuf { | ||
| 22 | int len; | ||
| 23 | int head; | ||
| 24 | int tail; | ||
| 25 | u8 buf[]; | ||
| 26 | }; | ||
| 27 | |||
| 28 | |||
| 29 | static inline struct fiq_debugger_ringbuf *fiq_debugger_ringbuf_alloc(int len) | ||
| 30 | { | ||
| 31 | struct fiq_debugger_ringbuf *rbuf; | ||
| 32 | |||
| 33 | rbuf = kzalloc(sizeof(*rbuf) + len, GFP_KERNEL); | ||
| 34 | if (rbuf == NULL) | ||
| 35 | return NULL; | ||
| 36 | |||
| 37 | rbuf->len = len; | ||
| 38 | rbuf->head = 0; | ||
| 39 | rbuf->tail = 0; | ||
| 40 | smp_mb(); | ||
| 41 | |||
| 42 | return rbuf; | ||
| 43 | } | ||
| 44 | |||
| 45 | static inline void fiq_debugger_ringbuf_free(struct fiq_debugger_ringbuf *rbuf) | ||
| 46 | { | ||
| 47 | kfree(rbuf); | ||
| 48 | } | ||
| 49 | |||
| 50 | static inline int fiq_debugger_ringbuf_level(struct fiq_debugger_ringbuf *rbuf) | ||
| 51 | { | ||
| 52 | int level = rbuf->head - rbuf->tail; | ||
| 53 | |||
| 54 | if (level < 0) | ||
| 55 | level = rbuf->len + level; | ||
| 56 | |||
| 57 | return level; | ||
| 58 | } | ||
| 59 | |||
| 60 | static inline int fiq_debugger_ringbuf_room(struct fiq_debugger_ringbuf *rbuf) | ||
| 61 | { | ||
| 62 | return rbuf->len - fiq_debugger_ringbuf_level(rbuf) - 1; | ||
| 63 | } | ||
| 64 | |||
| 65 | static inline u8 | ||
| 66 | fiq_debugger_ringbuf_peek(struct fiq_debugger_ringbuf *rbuf, int i) | ||
| 67 | { | ||
| 68 | return rbuf->buf[(rbuf->tail + i) % rbuf->len]; | ||
| 69 | } | ||
| 70 | |||
| 71 | static inline int | ||
| 72 | fiq_debugger_ringbuf_consume(struct fiq_debugger_ringbuf *rbuf, int count) | ||
| 73 | { | ||
| 74 | count = min(count, fiq_debugger_ringbuf_level(rbuf)); | ||
| 75 | |||
| 76 | rbuf->tail = (rbuf->tail + count) % rbuf->len; | ||
| 77 | smp_mb(); | ||
| 78 | |||
| 79 | return count; | ||
| 80 | } | ||
| 81 | |||
| 82 | static inline int | ||
| 83 | fiq_debugger_ringbuf_push(struct fiq_debugger_ringbuf *rbuf, u8 datum) | ||
| 84 | { | ||
| 85 | if (fiq_debugger_ringbuf_room(rbuf) == 0) | ||
| 86 | return 0; | ||
| 87 | |||
| 88 | rbuf->buf[rbuf->head] = datum; | ||
| 89 | smp_mb(); | ||
| 90 | rbuf->head = (rbuf->head + 1) % rbuf->len; | ||
| 91 | smp_mb(); | ||
| 92 | |||
| 93 | return 1; | ||
| 94 | } | ||
diff --git a/arch/arm/common/fiq_glue.S b/arch/arm/common/fiq_glue.S new file mode 100644 index 00000000000..9e3455a09f8 --- /dev/null +++ b/arch/arm/common/fiq_glue.S | |||
| @@ -0,0 +1,111 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2008 Google, Inc. | ||
| 3 | * | ||
| 4 | * This software is licensed under the terms of the GNU General Public | ||
| 5 | * License version 2, as published by the Free Software Foundation, and | ||
| 6 | * may be copied, distributed, and modified under those terms. | ||
| 7 | * | ||
| 8 | * This program is distributed in the hope that it will be useful, | ||
| 9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 11 | * GNU General Public License for more details. | ||
| 12 | * | ||
| 13 | */ | ||
| 14 | |||
| 15 | #include <linux/linkage.h> | ||
| 16 | #include <asm/assembler.h> | ||
| 17 | |||
| 18 | .text | ||
| 19 | |||
| 20 | .global fiq_glue_end | ||
| 21 | |||
| 22 | /* fiq stack: r0-r15,cpsr,spsr of interrupted mode */ | ||
| 23 | |||
| 24 | ENTRY(fiq_glue) | ||
| 25 | /* store pc, cpsr from previous mode */ | ||
| 26 | mrs r12, spsr | ||
| 27 | sub r11, lr, #4 | ||
| 28 | subs r10, #1 | ||
| 29 | bne nested_fiq | ||
| 30 | |||
| 31 | stmfd sp!, {r11-r12, lr} | ||
| 32 | |||
| 33 | /* store r8-r14 from previous mode */ | ||
| 34 | sub sp, sp, #(7 * 4) | ||
| 35 | stmia sp, {r8-r14}^ | ||
| 36 | nop | ||
| 37 | |||
| 38 | /* store r0-r7 from previous mode */ | ||
| 39 | stmfd sp!, {r0-r7} | ||
| 40 | |||
| 41 | /* setup func(data,regs) arguments */ | ||
| 42 | mov r0, r9 | ||
| 43 | mov r1, sp | ||
| 44 | mov r3, r8 | ||
| 45 | |||
| 46 | mov r7, sp | ||
| 47 | |||
| 48 | /* Get sp and lr from non-user modes */ | ||
| 49 | and r4, r12, #MODE_MASK | ||
| 50 | cmp r4, #USR_MODE | ||
| 51 | beq fiq_from_usr_mode | ||
| 52 | |||
| 53 | mov r7, sp | ||
| 54 | orr r4, r4, #(PSR_I_BIT | PSR_F_BIT) | ||
| 55 | msr cpsr_c, r4 | ||
| 56 | str sp, [r7, #(4 * 13)] | ||
| 57 | str lr, [r7, #(4 * 14)] | ||
| 58 | mrs r5, spsr | ||
| 59 | str r5, [r7, #(4 * 17)] | ||
| 60 | |||
| 61 | cmp r4, #(SVC_MODE | PSR_I_BIT | PSR_F_BIT) | ||
| 62 | /* use fiq stack if we reenter this mode */ | ||
| 63 | subne sp, r7, #(4 * 3) | ||
| 64 | |||
| 65 | fiq_from_usr_mode: | ||
| 66 | msr cpsr_c, #(SVC_MODE | PSR_I_BIT | PSR_F_BIT) | ||
| 67 | mov r2, sp | ||
| 68 | sub sp, r7, #12 | ||
| 69 | stmfd sp!, {r2, ip, lr} | ||
| 70 | /* call func(data,regs) */ | ||
| 71 | blx r3 | ||
| 72 | ldmfd sp, {r2, ip, lr} | ||
| 73 | mov sp, r2 | ||
| 74 | |||
| 75 | /* restore/discard saved state */ | ||
| 76 | cmp r4, #USR_MODE | ||
| 77 | beq fiq_from_usr_mode_exit | ||
| 78 | |||
| 79 | msr cpsr_c, r4 | ||
| 80 | ldr sp, [r7, #(4 * 13)] | ||
| 81 | ldr lr, [r7, #(4 * 14)] | ||
| 82 | msr spsr_cxsf, r5 | ||
| 83 | |||
| 84 | fiq_from_usr_mode_exit: | ||
| 85 | msr cpsr_c, #(FIQ_MODE | PSR_I_BIT | PSR_F_BIT) | ||
| 86 | |||
| 87 | ldmfd sp!, {r0-r7} | ||
| 88 | add sp, sp, #(7 * 4) | ||
| 89 | ldmfd sp!, {r11-r12, lr} | ||
| 90 | exit_fiq: | ||
| 91 | msr spsr_cxsf, r12 | ||
| 92 | add r10, #1 | ||
| 93 | movs pc, r11 | ||
| 94 | |||
| 95 | nested_fiq: | ||
| 96 | orr r12, r12, #(PSR_F_BIT) | ||
| 97 | b exit_fiq | ||
| 98 | |||
| 99 | fiq_glue_end: | ||
| 100 | |||
| 101 | ENTRY(fiq_glue_setup) /* func, data, sp */ | ||
| 102 | mrs r3, cpsr | ||
| 103 | msr cpsr_c, #(FIQ_MODE | PSR_I_BIT | PSR_F_BIT) | ||
| 104 | movs r8, r0 | ||
| 105 | mov r9, r1 | ||
| 106 | mov sp, r2 | ||
| 107 | moveq r10, #0 | ||
| 108 | movne r10, #1 | ||
| 109 | msr cpsr_c, r3 | ||
| 110 | bx lr | ||
| 111 | |||
diff --git a/arch/arm/common/fiq_glue_setup.c b/arch/arm/common/fiq_glue_setup.c new file mode 100644 index 00000000000..59586861a63 --- /dev/null +++ b/arch/arm/common/fiq_glue_setup.c | |||
| @@ -0,0 +1,155 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2010 Google, Inc. | ||
| 3 | * | ||
| 4 | * This software is licensed under the terms of the GNU General Public | ||
| 5 | * License version 2, as published by the Free Software Foundation, and | ||
| 6 | * may be copied, distributed, and modified under those terms. | ||
| 7 | * | ||
| 8 | * This program is distributed in the hope that it will be useful, | ||
| 9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 11 | * GNU General Public License for more details. | ||
| 12 | */ | ||
| 13 | |||
| 14 | #include <linux/kernel.h> | ||
| 15 | #include <linux/percpu.h> | ||
| 16 | #include <linux/slab.h> | ||
| 17 | #include <linux/syscore_ops.h> | ||
| 18 | #include <asm/cpu_pm.h> | ||
| 19 | #include <asm/fiq.h> | ||
| 20 | #include <asm/fiq_glue.h> | ||
| 21 | |||
| 22 | extern unsigned char fiq_glue, fiq_glue_end; | ||
| 23 | extern void fiq_glue_setup(void *func, void *data, void *sp); | ||
| 24 | |||
| 25 | static struct fiq_handler fiq_debbuger_fiq_handler = { | ||
| 26 | .name = "fiq_glue", | ||
| 27 | }; | ||
| 28 | DEFINE_PER_CPU(void *, fiq_stack); | ||
| 29 | static struct fiq_glue_handler *current_handler; | ||
| 30 | static DEFINE_MUTEX(fiq_glue_lock); | ||
| 31 | |||
| 32 | static void fiq_glue_setup_helper(void *info) | ||
| 33 | { | ||
| 34 | struct fiq_glue_handler *handler = info; | ||
| 35 | fiq_glue_setup(handler->fiq, handler, | ||
| 36 | __get_cpu_var(fiq_stack) + THREAD_START_SP); | ||
| 37 | } | ||
| 38 | |||
| 39 | int fiq_glue_register_handler(struct fiq_glue_handler *handler) | ||
| 40 | { | ||
| 41 | int ret; | ||
| 42 | int cpu; | ||
| 43 | |||
| 44 | if (!handler || !handler->fiq) | ||
| 45 | return -EINVAL; | ||
| 46 | |||
| 47 | mutex_lock(&fiq_glue_lock); | ||
| 48 | if (fiq_stack) { | ||
| 49 | ret = -EBUSY; | ||
| 50 | goto err_busy; | ||
| 51 | } | ||
| 52 | |||
| 53 | for_each_possible_cpu(cpu) { | ||
| 54 | void *stack; | ||
| 55 | stack = (void *)__get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER); | ||
| 56 | if (WARN_ON(!stack)) { | ||
| 57 | ret = -ENOMEM; | ||
| 58 | goto err_alloc_fiq_stack; | ||
| 59 | } | ||
| 60 | per_cpu(fiq_stack, cpu) = stack; | ||
| 61 | } | ||
| 62 | |||
| 63 | ret = claim_fiq(&fiq_debbuger_fiq_handler); | ||
| 64 | if (WARN_ON(ret)) | ||
| 65 | goto err_claim_fiq; | ||
| 66 | |||
| 67 | current_handler = handler; | ||
| 68 | on_each_cpu(fiq_glue_setup_helper, handler, true); | ||
| 69 | set_fiq_handler(&fiq_glue, &fiq_glue_end - &fiq_glue); | ||
| 70 | |||
| 71 | mutex_unlock(&fiq_glue_lock); | ||
| 72 | return 0; | ||
| 73 | |||
| 74 | err_claim_fiq: | ||
| 75 | err_alloc_fiq_stack: | ||
| 76 | for_each_possible_cpu(cpu) { | ||
| 77 | __free_pages(per_cpu(fiq_stack, cpu), THREAD_SIZE_ORDER); | ||
| 78 | per_cpu(fiq_stack, cpu) = NULL; | ||
| 79 | } | ||
| 80 | err_busy: | ||
| 81 | mutex_unlock(&fiq_glue_lock); | ||
| 82 | return ret; | ||
| 83 | } | ||
| 84 | |||
| 85 | /** | ||
| 86 | * fiq_glue_resume - Restore fiqs after suspend or low power idle states | ||
| 87 | * | ||
| 88 | * This must be called before calling local_fiq_enable after returning from a | ||
| 89 | * power state where the fiq mode registers were lost. If a driver provided | ||
| 90 | * a resume hook when it registered the handler it will be called. | ||
| 91 | */ | ||
| 92 | |||
| 93 | void fiq_glue_resume(void) | ||
| 94 | { | ||
| 95 | if (!current_handler) | ||
| 96 | return; | ||
| 97 | fiq_glue_setup(current_handler->fiq, current_handler, | ||
| 98 | __get_cpu_var(fiq_stack) + THREAD_START_SP); | ||
| 99 | if (current_handler->resume) | ||
| 100 | current_handler->resume(current_handler); | ||
| 101 | } | ||
| 102 | |||
| 103 | static int fiq_glue_cpu_pm_notify(struct notifier_block *self, unsigned long cmd, | ||
| 104 | void *v) | ||
| 105 | { | ||
| 106 | switch (cmd) { | ||
| 107 | case CPU_PM_ENTER: | ||
| 108 | //pr_info("cpu pm enter %d\n", smp_processor_id()); | ||
| 109 | local_fiq_disable(); | ||
| 110 | break; | ||
| 111 | case CPU_PM_ENTER_FAILED: | ||
| 112 | case CPU_PM_EXIT: | ||
| 113 | fiq_glue_resume(); | ||
| 114 | local_fiq_enable(); | ||
| 115 | //pr_info("cpu pm exit %d\n", smp_processor_id()); | ||
| 116 | break; | ||
| 117 | } | ||
| 118 | return NOTIFY_OK; | ||
| 119 | } | ||
| 120 | |||
| 121 | static struct notifier_block fiq_glue_cpu_pm_notifier = { | ||
| 122 | .notifier_call = fiq_glue_cpu_pm_notify, | ||
| 123 | }; | ||
| 124 | |||
| 125 | static int __init fiq_glue_cpu_pm_init(void) | ||
| 126 | { | ||
| 127 | return cpu_pm_register_notifier(&fiq_glue_cpu_pm_notifier); | ||
| 128 | } | ||
| 129 | core_initcall(fiq_glue_cpu_pm_init); | ||
| 130 | |||
| 131 | #ifdef CONFIG_PM | ||
| 132 | static int fiq_glue_syscore_suspend(void) | ||
| 133 | { | ||
| 134 | local_fiq_disable(); | ||
| 135 | return 0; | ||
| 136 | } | ||
| 137 | |||
| 138 | static void fiq_glue_syscore_resume(void) | ||
| 139 | { | ||
| 140 | fiq_glue_resume(); | ||
| 141 | local_fiq_enable(); | ||
| 142 | } | ||
| 143 | |||
| 144 | static struct syscore_ops fiq_glue_syscore_ops = { | ||
| 145 | .suspend = fiq_glue_syscore_suspend, | ||
| 146 | .resume = fiq_glue_syscore_resume, | ||
| 147 | }; | ||
| 148 | |||
| 149 | static int __init fiq_glue_syscore_init(void) | ||
| 150 | { | ||
| 151 | register_syscore_ops(&fiq_glue_syscore_ops); | ||
| 152 | return 0; | ||
| 153 | } | ||
| 154 | late_initcall(fiq_glue_syscore_init); | ||
| 155 | #endif | ||
diff --git a/arch/arm/common/pl330.c b/arch/arm/common/pl330.c new file mode 100644 index 00000000000..97912fa4878 --- /dev/null +++ b/arch/arm/common/pl330.c | |||
| @@ -0,0 +1,1965 @@ | |||
| 1 | /* linux/arch/arm/common/pl330.c | ||
| 2 | * | ||
| 3 | * Copyright (C) 2010 Samsung Electronics Co Ltd. | ||
| 4 | * Jaswinder Singh <jassi.brar@samsung.com> | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify | ||
| 7 | * it under the terms of the GNU General Public License as published by | ||
| 8 | * the Free Software Foundation; either version 2 of the License, or | ||
| 9 | * (at your option) any later version. | ||
| 10 | * | ||
| 11 | * This program is distributed in the hope that it will be useful, | ||
| 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 14 | * GNU General Public License for more details. | ||
| 15 | * | ||
| 16 | * You should have received a copy of the GNU General Public License | ||
| 17 | * along with this program; if not, write to the Free Software | ||
| 18 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
| 19 | */ | ||
| 20 | |||
| 21 | #include <linux/kernel.h> | ||
| 22 | #include <linux/init.h> | ||
| 23 | #include <linux/slab.h> | ||
| 24 | #include <linux/module.h> | ||
| 25 | #include <linux/string.h> | ||
| 26 | #include <linux/io.h> | ||
| 27 | #include <linux/delay.h> | ||
| 28 | #include <linux/interrupt.h> | ||
| 29 | #include <linux/dma-mapping.h> | ||
| 30 | |||
| 31 | #include <asm/hardware/pl330.h> | ||
| 32 | |||
| 33 | /* Register and Bit field Definitions */ | ||
| 34 | #define DS 0x0 | ||
| 35 | #define DS_ST_STOP 0x0 | ||
| 36 | #define DS_ST_EXEC 0x1 | ||
| 37 | #define DS_ST_CMISS 0x2 | ||
| 38 | #define DS_ST_UPDTPC 0x3 | ||
| 39 | #define DS_ST_WFE 0x4 | ||
| 40 | #define DS_ST_ATBRR 0x5 | ||
| 41 | #define DS_ST_QBUSY 0x6 | ||
| 42 | #define DS_ST_WFP 0x7 | ||
| 43 | #define DS_ST_KILL 0x8 | ||
| 44 | #define DS_ST_CMPLT 0x9 | ||
| 45 | #define DS_ST_FLTCMP 0xe | ||
| 46 | #define DS_ST_FAULT 0xf | ||
| 47 | |||
| 48 | #define DPC 0x4 | ||
| 49 | #define INTEN 0x20 | ||
| 50 | #define ES 0x24 | ||
| 51 | #define INTSTATUS 0x28 | ||
| 52 | #define INTCLR 0x2c | ||
| 53 | #define FSM 0x30 | ||
| 54 | #define FSC 0x34 | ||
| 55 | #define FTM 0x38 | ||
| 56 | |||
| 57 | #define _FTC 0x40 | ||
| 58 | #define FTC(n) (_FTC + (n)*0x4) | ||
| 59 | |||
| 60 | #define _CS 0x100 | ||
| 61 | #define CS(n) (_CS + (n)*0x8) | ||
| 62 | #define CS_CNS (1 << 21) | ||
| 63 | |||
| 64 | #define _CPC 0x104 | ||
| 65 | #define CPC(n) (_CPC + (n)*0x8) | ||
| 66 | |||
| 67 | #define _SA 0x400 | ||
| 68 | #define SA(n) (_SA + (n)*0x20) | ||
| 69 | |||
| 70 | #define _DA 0x404 | ||
| 71 | #define DA(n) (_DA + (n)*0x20) | ||
| 72 | |||
| 73 | #define _CC 0x408 | ||
| 74 | #define CC(n) (_CC + (n)*0x20) | ||
| 75 | |||
| 76 | #define CC_SRCINC (1 << 0) | ||
| 77 | #define CC_DSTINC (1 << 14) | ||
| 78 | #define CC_SRCPRI (1 << 8) | ||
| 79 | #define CC_DSTPRI (1 << 22) | ||
| 80 | #define CC_SRCNS (1 << 9) | ||
| 81 | #define CC_DSTNS (1 << 23) | ||
| 82 | #define CC_SRCIA (1 << 10) | ||
| 83 | #define CC_DSTIA (1 << 24) | ||
| 84 | #define CC_SRCBRSTLEN_SHFT 4 | ||
| 85 | #define CC_DSTBRSTLEN_SHFT 18 | ||
| 86 | #define CC_SRCBRSTSIZE_SHFT 1 | ||
| 87 | #define CC_DSTBRSTSIZE_SHFT 15 | ||
| 88 | #define CC_SRCCCTRL_SHFT 11 | ||
| 89 | #define CC_SRCCCTRL_MASK 0x7 | ||
| 90 | #define CC_DSTCCTRL_SHFT 25 | ||
| 91 | #define CC_DRCCCTRL_MASK 0x7 | ||
| 92 | #define CC_SWAP_SHFT 28 | ||
| 93 | |||
| 94 | #define _LC0 0x40c | ||
| 95 | #define LC0(n) (_LC0 + (n)*0x20) | ||
| 96 | |||
| 97 | #define _LC1 0x410 | ||
| 98 | #define LC1(n) (_LC1 + (n)*0x20) | ||
| 99 | |||
| 100 | #define DBGSTATUS 0xd00 | ||
| 101 | #define DBG_BUSY (1 << 0) | ||
| 102 | |||
| 103 | #define DBGCMD 0xd04 | ||
| 104 | #define DBGINST0 0xd08 | ||
| 105 | #define DBGINST1 0xd0c | ||
| 106 | |||
| 107 | #define CR0 0xe00 | ||
| 108 | #define CR1 0xe04 | ||
| 109 | #define CR2 0xe08 | ||
| 110 | #define CR3 0xe0c | ||
| 111 | #define CR4 0xe10 | ||
| 112 | #define CRD 0xe14 | ||
| 113 | |||
| 114 | #define PERIPH_ID 0xfe0 | ||
| 115 | #define PCELL_ID 0xff0 | ||
| 116 | |||
| 117 | #define CR0_PERIPH_REQ_SET (1 << 0) | ||
| 118 | #define CR0_BOOT_EN_SET (1 << 1) | ||
| 119 | #define CR0_BOOT_MAN_NS (1 << 2) | ||
| 120 | #define CR0_NUM_CHANS_SHIFT 4 | ||
| 121 | #define CR0_NUM_CHANS_MASK 0x7 | ||
| 122 | #define CR0_NUM_PERIPH_SHIFT 12 | ||
| 123 | #define CR0_NUM_PERIPH_MASK 0x1f | ||
| 124 | #define CR0_NUM_EVENTS_SHIFT 17 | ||
| 125 | #define CR0_NUM_EVENTS_MASK 0x1f | ||
| 126 | |||
| 127 | #define CR1_ICACHE_LEN_SHIFT 0 | ||
| 128 | #define CR1_ICACHE_LEN_MASK 0x7 | ||
| 129 | #define CR1_NUM_ICACHELINES_SHIFT 4 | ||
| 130 | #define CR1_NUM_ICACHELINES_MASK 0xf | ||
| 131 | |||
| 132 | #define CRD_DATA_WIDTH_SHIFT 0 | ||
| 133 | #define CRD_DATA_WIDTH_MASK 0x7 | ||
| 134 | #define CRD_WR_CAP_SHIFT 4 | ||
| 135 | #define CRD_WR_CAP_MASK 0x7 | ||
| 136 | #define CRD_WR_Q_DEP_SHIFT 8 | ||
| 137 | #define CRD_WR_Q_DEP_MASK 0xf | ||
| 138 | #define CRD_RD_CAP_SHIFT 12 | ||
| 139 | #define CRD_RD_CAP_MASK 0x7 | ||
| 140 | #define CRD_RD_Q_DEP_SHIFT 16 | ||
| 141 | #define CRD_RD_Q_DEP_MASK 0xf | ||
| 142 | #define CRD_DATA_BUFF_SHIFT 20 | ||
| 143 | #define CRD_DATA_BUFF_MASK 0x3ff | ||
| 144 | |||
| 145 | #define PART 0x330 | ||
| 146 | #define DESIGNER 0x41 | ||
| 147 | #define REVISION 0x0 | ||
| 148 | #define INTEG_CFG 0x0 | ||
| 149 | #define PERIPH_ID_VAL ((PART << 0) | (DESIGNER << 12)) | ||
| 150 | |||
| 151 | #define PCELL_ID_VAL 0xb105f00d | ||
| 152 | |||
| 153 | #define PL330_STATE_STOPPED (1 << 0) | ||
| 154 | #define PL330_STATE_EXECUTING (1 << 1) | ||
| 155 | #define PL330_STATE_WFE (1 << 2) | ||
| 156 | #define PL330_STATE_FAULTING (1 << 3) | ||
| 157 | #define PL330_STATE_COMPLETING (1 << 4) | ||
| 158 | #define PL330_STATE_WFP (1 << 5) | ||
| 159 | #define PL330_STATE_KILLING (1 << 6) | ||
| 160 | #define PL330_STATE_FAULT_COMPLETING (1 << 7) | ||
| 161 | #define PL330_STATE_CACHEMISS (1 << 8) | ||
| 162 | #define PL330_STATE_UPDTPC (1 << 9) | ||
| 163 | #define PL330_STATE_ATBARRIER (1 << 10) | ||
| 164 | #define PL330_STATE_QUEUEBUSY (1 << 11) | ||
| 165 | #define PL330_STATE_INVALID (1 << 15) | ||
| 166 | |||
| 167 | #define PL330_STABLE_STATES (PL330_STATE_STOPPED | PL330_STATE_EXECUTING \ | ||
| 168 | | PL330_STATE_WFE | PL330_STATE_FAULTING) | ||
| 169 | |||
| 170 | #define CMD_DMAADDH 0x54 | ||
| 171 | #define CMD_DMAEND 0x00 | ||
| 172 | #define CMD_DMAFLUSHP 0x35 | ||
| 173 | #define CMD_DMAGO 0xa0 | ||
| 174 | #define CMD_DMALD 0x04 | ||
| 175 | #define CMD_DMALDP 0x25 | ||
| 176 | #define CMD_DMALP 0x20 | ||
| 177 | #define CMD_DMALPEND 0x28 | ||
| 178 | #define CMD_DMAKILL 0x01 | ||
| 179 | #define CMD_DMAMOV 0xbc | ||
| 180 | #define CMD_DMANOP 0x18 | ||
| 181 | #define CMD_DMARMB 0x12 | ||
| 182 | #define CMD_DMASEV 0x34 | ||
| 183 | #define CMD_DMAST 0x08 | ||
| 184 | #define CMD_DMASTP 0x29 | ||
| 185 | #define CMD_DMASTZ 0x0c | ||
| 186 | #define CMD_DMAWFE 0x36 | ||
| 187 | #define CMD_DMAWFP 0x30 | ||
| 188 | #define CMD_DMAWMB 0x13 | ||
| 189 | |||
| 190 | #define SZ_DMAADDH 3 | ||
| 191 | #define SZ_DMAEND 1 | ||
| 192 | #define SZ_DMAFLUSHP 2 | ||
| 193 | #define SZ_DMALD 1 | ||
| 194 | #define SZ_DMALDP 2 | ||
| 195 | #define SZ_DMALP 2 | ||
| 196 | #define SZ_DMALPEND 2 | ||
| 197 | #define SZ_DMAKILL 1 | ||
| 198 | #define SZ_DMAMOV 6 | ||
| 199 | #define SZ_DMANOP 1 | ||
| 200 | #define SZ_DMARMB 1 | ||
| 201 | #define SZ_DMASEV 2 | ||
| 202 | #define SZ_DMAST 1 | ||
| 203 | #define SZ_DMASTP 2 | ||
| 204 | #define SZ_DMASTZ 1 | ||
| 205 | #define SZ_DMAWFE 2 | ||
| 206 | #define SZ_DMAWFP 2 | ||
| 207 | #define SZ_DMAWMB 1 | ||
| 208 | #define SZ_DMAGO 6 | ||
| 209 | |||
| 210 | #define BRST_LEN(ccr) ((((ccr) >> CC_SRCBRSTLEN_SHFT) & 0xf) + 1) | ||
| 211 | #define BRST_SIZE(ccr) (1 << (((ccr) >> CC_SRCBRSTSIZE_SHFT) & 0x7)) | ||
| 212 | |||
| 213 | #define BYTE_TO_BURST(b, ccr) ((b) / BRST_SIZE(ccr) / BRST_LEN(ccr)) | ||
| 214 | #define BURST_TO_BYTE(c, ccr) ((c) * BRST_SIZE(ccr) * BRST_LEN(ccr)) | ||
| 215 | |||
| 216 | /* | ||
| 217 | * With 256 bytes, we can do more than 2.5MB and 5MB xfers per req | ||
| 218 | * at 1byte/burst for P<->M and M<->M respectively. | ||
| 219 | * For typical scenario, at 1word/burst, 10MB and 20MB xfers per req | ||
| 220 | * should be enough for P<->M and M<->M respectively. | ||
| 221 | */ | ||
| 222 | #define MCODE_BUFF_PER_REQ 256 | ||
| 223 | |||
| 224 | /* | ||
| 225 | * Mark a _pl330_req as free. | ||
| 226 | * We do it by writing DMAEND as the first instruction | ||
| 227 | * because no valid request is going to have DMAEND as | ||
| 228 | * its first instruction to execute. | ||
| 229 | */ | ||
| 230 | #define MARK_FREE(req) do { \ | ||
| 231 | _emit_END(0, (req)->mc_cpu); \ | ||
| 232 | (req)->mc_len = 0; \ | ||
| 233 | } while (0) | ||
| 234 | |||
| 235 | /* If the _pl330_req is available to the client */ | ||
| 236 | #define IS_FREE(req) (*((u8 *)((req)->mc_cpu)) == CMD_DMAEND) | ||
| 237 | |||
| 238 | /* Use this _only_ to wait on transient states */ | ||
| 239 | #define UNTIL(t, s) while (!(_state(t) & (s))) cpu_relax(); | ||
| 240 | |||
| 241 | #ifdef PL330_DEBUG_MCGEN | ||
| 242 | static unsigned cmd_line; | ||
| 243 | #define PL330_DBGCMD_DUMP(off, x...) do { \ | ||
| 244 | printk("%x:", cmd_line); \ | ||
| 245 | printk(x); \ | ||
| 246 | cmd_line += off; \ | ||
| 247 | } while (0) | ||
| 248 | #define PL330_DBGMC_START(addr) (cmd_line = addr) | ||
| 249 | #else | ||
| 250 | #define PL330_DBGCMD_DUMP(off, x...) do {} while (0) | ||
| 251 | #define PL330_DBGMC_START(addr) do {} while (0) | ||
| 252 | #endif | ||
| 253 | |||
| 254 | struct _xfer_spec { | ||
| 255 | u32 ccr; | ||
| 256 | struct pl330_req *r; | ||
| 257 | struct pl330_xfer *x; | ||
| 258 | }; | ||
| 259 | |||
| 260 | enum dmamov_dst { | ||
| 261 | SAR = 0, | ||
| 262 | CCR, | ||
| 263 | DAR, | ||
| 264 | }; | ||
| 265 | |||
| 266 | enum pl330_dst { | ||
| 267 | SRC = 0, | ||
| 268 | DST, | ||
| 269 | }; | ||
| 270 | |||
| 271 | enum pl330_cond { | ||
| 272 | SINGLE, | ||
| 273 | BURST, | ||
| 274 | ALWAYS, | ||
| 275 | }; | ||
| 276 | |||
| 277 | struct _pl330_req { | ||
| 278 | u32 mc_bus; | ||
| 279 | void *mc_cpu; | ||
| 280 | /* Number of bytes taken to setup MC for the req */ | ||
| 281 | u32 mc_len; | ||
| 282 | struct pl330_req *r; | ||
| 283 | /* Hook to attach to DMAC's list of reqs with due callback */ | ||
| 284 | struct list_head rqd; | ||
| 285 | }; | ||
| 286 | |||
| 287 | /* ToBeDone for tasklet */ | ||
| 288 | struct _pl330_tbd { | ||
| 289 | bool reset_dmac; | ||
| 290 | bool reset_mngr; | ||
| 291 | u8 reset_chan; | ||
| 292 | }; | ||
| 293 | |||
| 294 | /* A DMAC Thread */ | ||
| 295 | struct pl330_thread { | ||
| 296 | u8 id; | ||
| 297 | int ev; | ||
| 298 | /* If the channel is not yet acquired by any client */ | ||
| 299 | bool free; | ||
| 300 | /* Parent DMAC */ | ||
| 301 | struct pl330_dmac *dmac; | ||
| 302 | /* Only two at a time */ | ||
| 303 | struct _pl330_req req[2]; | ||
| 304 | /* Index of the last submitted request */ | ||
| 305 | unsigned lstenq; | ||
| 306 | }; | ||
| 307 | |||
| 308 | enum pl330_dmac_state { | ||
| 309 | UNINIT, | ||
| 310 | INIT, | ||
| 311 | DYING, | ||
| 312 | }; | ||
| 313 | |||
| 314 | /* A DMAC */ | ||
| 315 | struct pl330_dmac { | ||
| 316 | spinlock_t lock; | ||
| 317 | /* Holds list of reqs with due callbacks */ | ||
| 318 | struct list_head req_done; | ||
| 319 | /* Pointer to platform specific stuff */ | ||
| 320 | struct pl330_info *pinfo; | ||
| 321 | /* Maximum possible events/irqs */ | ||
| 322 | int events[32]; | ||
| 323 | /* BUS address of MicroCode buffer */ | ||
| 324 | u32 mcode_bus; | ||
| 325 | /* CPU address of MicroCode buffer */ | ||
| 326 | void *mcode_cpu; | ||
| 327 | /* List of all Channel threads */ | ||
| 328 | struct pl330_thread *channels; | ||
| 329 | /* Pointer to the MANAGER thread */ | ||
| 330 | struct pl330_thread *manager; | ||
| 331 | /* To handle bad news in interrupt */ | ||
| 332 | struct tasklet_struct tasks; | ||
| 333 | struct _pl330_tbd dmac_tbd; | ||
| 334 | /* State of DMAC operation */ | ||
| 335 | enum pl330_dmac_state state; | ||
| 336 | }; | ||
| 337 | |||
| 338 | static inline void _callback(struct pl330_req *r, enum pl330_op_err err) | ||
| 339 | { | ||
| 340 | if (r && r->xfer_cb) | ||
| 341 | r->xfer_cb(r->token, err); | ||
| 342 | } | ||
| 343 | |||
| 344 | static inline bool _queue_empty(struct pl330_thread *thrd) | ||
| 345 | { | ||
| 346 | return (IS_FREE(&thrd->req[0]) && IS_FREE(&thrd->req[1])) | ||
| 347 | ? true : false; | ||
| 348 | } | ||
| 349 | |||
| 350 | static inline bool _queue_full(struct pl330_thread *thrd) | ||
| 351 | { | ||
| 352 | return (IS_FREE(&thrd->req[0]) || IS_FREE(&thrd->req[1])) | ||
| 353 | ? false : true; | ||
| 354 | } | ||
| 355 | |||
| 356 | static inline bool is_manager(struct pl330_thread *thrd) | ||
| 357 | { | ||
| 358 | struct pl330_dmac *pl330 = thrd->dmac; | ||
| 359 | |||
| 360 | /* MANAGER is indexed at the end */ | ||
| 361 | if (thrd->id == pl330->pinfo->pcfg.num_chan) | ||
| 362 | return true; | ||
| 363 | else | ||
| 364 | return false; | ||
| 365 | } | ||
| 366 | |||
| 367 | /* If manager of the thread is in Non-Secure mode */ | ||
| 368 | static inline bool _manager_ns(struct pl330_thread *thrd) | ||
| 369 | { | ||
| 370 | struct pl330_dmac *pl330 = thrd->dmac; | ||
| 371 | |||
| 372 | return (pl330->pinfo->pcfg.mode & DMAC_MODE_NS) ? true : false; | ||
| 373 | } | ||
| 374 | |||
| 375 | static inline u32 get_id(struct pl330_info *pi, u32 off) | ||
| 376 | { | ||
| 377 | void __iomem *regs = pi->base; | ||
| 378 | u32 id = 0; | ||
| 379 | |||
| 380 | id |= (readb(regs + off + 0x0) << 0); | ||
| 381 | id |= (readb(regs + off + 0x4) << 8); | ||
| 382 | id |= (readb(regs + off + 0x8) << 16); | ||
| 383 | id |= (readb(regs + off + 0xc) << 24); | ||
| 384 | |||
| 385 | return id; | ||
| 386 | } | ||
| 387 | |||
| 388 | static inline u32 _emit_ADDH(unsigned dry_run, u8 buf[], | ||
| 389 | enum pl330_dst da, u16 val) | ||
| 390 | { | ||
| 391 | if (dry_run) | ||
| 392 | return SZ_DMAADDH; | ||
| 393 | |||
| 394 | buf[0] = CMD_DMAADDH; | ||
| 395 | buf[0] |= (da << 1); | ||
| 396 | *((u16 *)&buf[1]) = val; | ||
| 397 | |||
| 398 | PL330_DBGCMD_DUMP(SZ_DMAADDH, "\tDMAADDH %s %u\n", | ||
| 399 | da == 1 ? "DA" : "SA", val); | ||
| 400 | |||
| 401 | return SZ_DMAADDH; | ||
| 402 | } | ||
| 403 | |||
| 404 | static inline u32 _emit_END(unsigned dry_run, u8 buf[]) | ||
| 405 | { | ||
| 406 | if (dry_run) | ||
| 407 | return SZ_DMAEND; | ||
| 408 | |||
| 409 | buf[0] = CMD_DMAEND; | ||
| 410 | |||
| 411 | PL330_DBGCMD_DUMP(SZ_DMAEND, "\tDMAEND\n"); | ||
| 412 | |||
| 413 | return SZ_DMAEND; | ||
| 414 | } | ||
| 415 | |||
| 416 | static inline u32 _emit_FLUSHP(unsigned dry_run, u8 buf[], u8 peri) | ||
| 417 | { | ||
| 418 | if (dry_run) | ||
| 419 | return SZ_DMAFLUSHP; | ||
| 420 | |||
| 421 | buf[0] = CMD_DMAFLUSHP; | ||
| 422 | |||
| 423 | peri &= 0x1f; | ||
| 424 | peri <<= 3; | ||
| 425 | buf[1] = peri; | ||
| 426 | |||
| 427 | PL330_DBGCMD_DUMP(SZ_DMAFLUSHP, "\tDMAFLUSHP %u\n", peri >> 3); | ||
| 428 | |||
| 429 | return SZ_DMAFLUSHP; | ||
| 430 | } | ||
| 431 | |||
| 432 | static inline u32 _emit_LD(unsigned dry_run, u8 buf[], enum pl330_cond cond) | ||
| 433 | { | ||
| 434 | if (dry_run) | ||
| 435 | return SZ_DMALD; | ||
| 436 | |||
| 437 | buf[0] = CMD_DMALD; | ||
| 438 | |||
| 439 | if (cond == SINGLE) | ||
| 440 | buf[0] |= (0 << 1) | (1 << 0); | ||
| 441 | else if (cond == BURST) | ||
| 442 | buf[0] |= (1 << 1) | (1 << 0); | ||
| 443 | |||
| 444 | PL330_DBGCMD_DUMP(SZ_DMALD, "\tDMALD%c\n", | ||
| 445 | cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'A')); | ||
| 446 | |||
| 447 | return SZ_DMALD; | ||
| 448 | } | ||
| 449 | |||
| 450 | static inline u32 _emit_LDP(unsigned dry_run, u8 buf[], | ||
| 451 | enum pl330_cond cond, u8 peri) | ||
| 452 | { | ||
| 453 | if (dry_run) | ||
| 454 | return SZ_DMALDP; | ||
| 455 | |||
| 456 | buf[0] = CMD_DMALDP; | ||
| 457 | |||
| 458 | if (cond == BURST) | ||
| 459 | buf[0] |= (1 << 1); | ||
| 460 | |||
| 461 | peri &= 0x1f; | ||
| 462 | peri <<= 3; | ||
| 463 | buf[1] = peri; | ||
| 464 | |||
| 465 | PL330_DBGCMD_DUMP(SZ_DMALDP, "\tDMALDP%c %u\n", | ||
| 466 | cond == SINGLE ? 'S' : 'B', peri >> 3); | ||
| 467 | |||
| 468 | return SZ_DMALDP; | ||
| 469 | } | ||
| 470 | |||
| 471 | static inline u32 _emit_LP(unsigned dry_run, u8 buf[], | ||
| 472 | unsigned loop, u8 cnt) | ||
| 473 | { | ||
| 474 | if (dry_run) | ||
| 475 | return SZ_DMALP; | ||
| 476 | |||
| 477 | buf[0] = CMD_DMALP; | ||
| 478 | |||
| 479 | if (loop) | ||
| 480 | buf[0] |= (1 << 1); | ||
| 481 | |||
| 482 | cnt--; /* DMAC increments by 1 internally */ | ||
| 483 | buf[1] = cnt; | ||
| 484 | |||
| 485 | PL330_DBGCMD_DUMP(SZ_DMALP, "\tDMALP_%c %u\n", loop ? '1' : '0', cnt); | ||
| 486 | |||
| 487 | return SZ_DMALP; | ||
| 488 | } | ||
| 489 | |||
| 490 | struct _arg_LPEND { | ||
| 491 | enum pl330_cond cond; | ||
| 492 | bool forever; | ||
| 493 | unsigned loop; | ||
| 494 | u8 bjump; | ||
| 495 | }; | ||
| 496 | |||
| 497 | static inline u32 _emit_LPEND(unsigned dry_run, u8 buf[], | ||
| 498 | const struct _arg_LPEND *arg) | ||
| 499 | { | ||
| 500 | enum pl330_cond cond = arg->cond; | ||
| 501 | bool forever = arg->forever; | ||
| 502 | unsigned loop = arg->loop; | ||
| 503 | u8 bjump = arg->bjump; | ||
| 504 | |||
| 505 | if (dry_run) | ||
| 506 | return SZ_DMALPEND; | ||
| 507 | |||
| 508 | buf[0] = CMD_DMALPEND; | ||
| 509 | |||
| 510 | if (loop) | ||
| 511 | buf[0] |= (1 << 2); | ||
| 512 | |||
| 513 | if (!forever) | ||
| 514 | buf[0] |= (1 << 4); | ||
| 515 | |||
| 516 | if (cond == SINGLE) | ||
| 517 | buf[0] |= (0 << 1) | (1 << 0); | ||
| 518 | else if (cond == BURST) | ||
| 519 | buf[0] |= (1 << 1) | (1 << 0); | ||
| 520 | |||
| 521 | buf[1] = bjump; | ||
| 522 | |||
| 523 | PL330_DBGCMD_DUMP(SZ_DMALPEND, "\tDMALP%s%c_%c bjmpto_%x\n", | ||
| 524 | forever ? "FE" : "END", | ||
| 525 | cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'A'), | ||
| 526 | loop ? '1' : '0', | ||
| 527 | bjump); | ||
| 528 | |||
| 529 | return SZ_DMALPEND; | ||
| 530 | } | ||
| 531 | |||
| 532 | static inline u32 _emit_KILL(unsigned dry_run, u8 buf[]) | ||
| 533 | { | ||
| 534 | if (dry_run) | ||
| 535 | return SZ_DMAKILL; | ||
| 536 | |||
| 537 | buf[0] = CMD_DMAKILL; | ||
| 538 | |||
| 539 | return SZ_DMAKILL; | ||
| 540 | } | ||
| 541 | |||
| 542 | static inline u32 _emit_MOV(unsigned dry_run, u8 buf[], | ||
| 543 | enum dmamov_dst dst, u32 val) | ||
| 544 | { | ||
| 545 | if (dry_run) | ||
| 546 | return SZ_DMAMOV; | ||
| 547 | |||
| 548 | buf[0] = CMD_DMAMOV; | ||
| 549 | buf[1] = dst; | ||
| 550 | *((u32 *)&buf[2]) = val; | ||
| 551 | |||
| 552 | PL330_DBGCMD_DUMP(SZ_DMAMOV, "\tDMAMOV %s 0x%x\n", | ||
| 553 | dst == SAR ? "SAR" : (dst == DAR ? "DAR" : "CCR"), val); | ||
| 554 | |||
| 555 | return SZ_DMAMOV; | ||
| 556 | } | ||
| 557 | |||
| 558 | static inline u32 _emit_NOP(unsigned dry_run, u8 buf[]) | ||
| 559 | { | ||
| 560 | if (dry_run) | ||
| 561 | return SZ_DMANOP; | ||
| 562 | |||
| 563 | buf[0] = CMD_DMANOP; | ||
| 564 | |||
| 565 | PL330_DBGCMD_DUMP(SZ_DMANOP, "\tDMANOP\n"); | ||
| 566 | |||
| 567 | return SZ_DMANOP; | ||
| 568 | } | ||
| 569 | |||
| 570 | static inline u32 _emit_RMB(unsigned dry_run, u8 buf[]) | ||
| 571 | { | ||
| 572 | if (dry_run) | ||
| 573 | return SZ_DMARMB; | ||
| 574 | |||
| 575 | buf[0] = CMD_DMARMB; | ||
| 576 | |||
| 577 | PL330_DBGCMD_DUMP(SZ_DMARMB, "\tDMARMB\n"); | ||
| 578 | |||
| 579 | return SZ_DMARMB; | ||
| 580 | } | ||
| 581 | |||
| 582 | static inline u32 _emit_SEV(unsigned dry_run, u8 buf[], u8 ev) | ||
| 583 | { | ||
| 584 | if (dry_run) | ||
| 585 | return SZ_DMASEV; | ||
| 586 | |||
| 587 | buf[0] = CMD_DMASEV; | ||
| 588 | |||
| 589 | ev &= 0x1f; | ||
| 590 | ev <<= 3; | ||
| 591 | buf[1] = ev; | ||
| 592 | |||
| 593 | PL330_DBGCMD_DUMP(SZ_DMASEV, "\tDMASEV %u\n", ev >> 3); | ||
| 594 | |||
| 595 | return SZ_DMASEV; | ||
| 596 | } | ||
| 597 | |||
| 598 | static inline u32 _emit_ST(unsigned dry_run, u8 buf[], enum pl330_cond cond) | ||
| 599 | { | ||
| 600 | if (dry_run) | ||
| 601 | return SZ_DMAST; | ||
| 602 | |||
| 603 | buf[0] = CMD_DMAST; | ||
| 604 | |||
| 605 | if (cond == SINGLE) | ||
| 606 | buf[0] |= (0 << 1) | (1 << 0); | ||
| 607 | else if (cond == BURST) | ||
| 608 | buf[0] |= (1 << 1) | (1 << 0); | ||
| 609 | |||
| 610 | PL330_DBGCMD_DUMP(SZ_DMAST, "\tDMAST%c\n", | ||
| 611 | cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'A')); | ||
| 612 | |||
| 613 | return SZ_DMAST; | ||
| 614 | } | ||
| 615 | |||
| 616 | static inline u32 _emit_STP(unsigned dry_run, u8 buf[], | ||
| 617 | enum pl330_cond cond, u8 peri) | ||
| 618 | { | ||
| 619 | if (dry_run) | ||
| 620 | return SZ_DMASTP; | ||
| 621 | |||
| 622 | buf[0] = CMD_DMASTP; | ||
| 623 | |||
| 624 | if (cond == BURST) | ||
| 625 | buf[0] |= (1 << 1); | ||
| 626 | |||
| 627 | peri &= 0x1f; | ||
| 628 | peri <<= 3; | ||
| 629 | buf[1] = peri; | ||
| 630 | |||
| 631 | PL330_DBGCMD_DUMP(SZ_DMASTP, "\tDMASTP%c %u\n", | ||
| 632 | cond == SINGLE ? 'S' : 'B', peri >> 3); | ||
| 633 | |||
| 634 | return SZ_DMASTP; | ||
| 635 | } | ||
| 636 | |||
| 637 | static inline u32 _emit_STZ(unsigned dry_run, u8 buf[]) | ||
| 638 | { | ||
| 639 | if (dry_run) | ||
| 640 | return SZ_DMASTZ; | ||
| 641 | |||
| 642 | buf[0] = CMD_DMASTZ; | ||
| 643 | |||
| 644 | PL330_DBGCMD_DUMP(SZ_DMASTZ, "\tDMASTZ\n"); | ||
| 645 | |||
| 646 | return SZ_DMASTZ; | ||
| 647 | } | ||
| 648 | |||
| 649 | static inline u32 _emit_WFE(unsigned dry_run, u8 buf[], u8 ev, | ||
| 650 | unsigned invalidate) | ||
| 651 | { | ||
| 652 | if (dry_run) | ||
| 653 | return SZ_DMAWFE; | ||
| 654 | |||
| 655 | buf[0] = CMD_DMAWFE; | ||
| 656 | |||
| 657 | ev &= 0x1f; | ||
| 658 | ev <<= 3; | ||
| 659 | buf[1] = ev; | ||
| 660 | |||
| 661 | if (invalidate) | ||
| 662 | buf[1] |= (1 << 1); | ||
| 663 | |||
| 664 | PL330_DBGCMD_DUMP(SZ_DMAWFE, "\tDMAWFE %u%s\n", | ||
| 665 | ev >> 3, invalidate ? ", I" : ""); | ||
| 666 | |||
| 667 | return SZ_DMAWFE; | ||
| 668 | } | ||
| 669 | |||
| 670 | static inline u32 _emit_WFP(unsigned dry_run, u8 buf[], | ||
| 671 | enum pl330_cond cond, u8 peri) | ||
| 672 | { | ||
| 673 | if (dry_run) | ||
| 674 | return SZ_DMAWFP; | ||
| 675 | |||
| 676 | buf[0] = CMD_DMAWFP; | ||
| 677 | |||
| 678 | if (cond == SINGLE) | ||
| 679 | buf[0] |= (0 << 1) | (0 << 0); | ||
| 680 | else if (cond == BURST) | ||
| 681 | buf[0] |= (1 << 1) | (0 << 0); | ||
| 682 | else | ||
| 683 | buf[0] |= (0 << 1) | (1 << 0); | ||
| 684 | |||
| 685 | peri &= 0x1f; | ||
| 686 | peri <<= 3; | ||
| 687 | buf[1] = peri; | ||
| 688 | |||
| 689 | PL330_DBGCMD_DUMP(SZ_DMAWFP, "\tDMAWFP%c %u\n", | ||
| 690 | cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'P'), peri >> 3); | ||
| 691 | |||
| 692 | return SZ_DMAWFP; | ||
| 693 | } | ||
| 694 | |||
| 695 | static inline u32 _emit_WMB(unsigned dry_run, u8 buf[]) | ||
| 696 | { | ||
| 697 | if (dry_run) | ||
| 698 | return SZ_DMAWMB; | ||
| 699 | |||
| 700 | buf[0] = CMD_DMAWMB; | ||
| 701 | |||
| 702 | PL330_DBGCMD_DUMP(SZ_DMAWMB, "\tDMAWMB\n"); | ||
| 703 | |||
| 704 | return SZ_DMAWMB; | ||
| 705 | } | ||
| 706 | |||
| 707 | struct _arg_GO { | ||
| 708 | u8 chan; | ||
| 709 | u32 addr; | ||
| 710 | unsigned ns; | ||
| 711 | }; | ||
| 712 | |||
| 713 | static inline u32 _emit_GO(unsigned dry_run, u8 buf[], | ||
| 714 | const struct _arg_GO *arg) | ||
| 715 | { | ||
| 716 | u8 chan = arg->chan; | ||
| 717 | u32 addr = arg->addr; | ||
| 718 | unsigned ns = arg->ns; | ||
| 719 | |||
| 720 | if (dry_run) | ||
| 721 | return SZ_DMAGO; | ||
| 722 | |||
| 723 | buf[0] = CMD_DMAGO; | ||
| 724 | buf[0] |= (ns << 1); | ||
| 725 | |||
| 726 | buf[1] = chan & 0x7; | ||
| 727 | |||
| 728 | *((u32 *)&buf[2]) = addr; | ||
| 729 | |||
| 730 | return SZ_DMAGO; | ||
| 731 | } | ||
| 732 | |||
| 733 | #define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t) | ||
| 734 | |||
| 735 | /* Returns Time-Out */ | ||
| 736 | static bool _until_dmac_idle(struct pl330_thread *thrd) | ||
| 737 | { | ||
| 738 | void __iomem *regs = thrd->dmac->pinfo->base; | ||
| 739 | unsigned long loops = msecs_to_loops(5); | ||
| 740 | |||
| 741 | do { | ||
| 742 | /* Until Manager is Idle */ | ||
| 743 | if (!(readl(regs + DBGSTATUS) & DBG_BUSY)) | ||
| 744 | break; | ||
| 745 | |||
| 746 | cpu_relax(); | ||
| 747 | } while (--loops); | ||
| 748 | |||
| 749 | if (!loops) | ||
| 750 | return true; | ||
| 751 | |||
| 752 | return false; | ||
| 753 | } | ||
| 754 | |||
| 755 | static inline void _execute_DBGINSN(struct pl330_thread *thrd, | ||
| 756 | u8 insn[], bool as_manager) | ||
| 757 | { | ||
| 758 | void __iomem *regs = thrd->dmac->pinfo->base; | ||
| 759 | u32 val; | ||
| 760 | |||
| 761 | val = (insn[0] << 16) | (insn[1] << 24); | ||
| 762 | if (!as_manager) { | ||
| 763 | val |= (1 << 0); | ||
| 764 | val |= (thrd->id << 8); /* Channel Number */ | ||
| 765 | } | ||
| 766 | writel(val, regs + DBGINST0); | ||
| 767 | |||
| 768 | val = *((u32 *)&insn[2]); | ||
| 769 | writel(val, regs + DBGINST1); | ||
| 770 | |||
| 771 | /* If timed out due to halted state-machine */ | ||
| 772 | if (_until_dmac_idle(thrd)) { | ||
| 773 | dev_err(thrd->dmac->pinfo->dev, "DMAC halted!\n"); | ||
| 774 | return; | ||
| 775 | } | ||
| 776 | |||
| 777 | /* Get going */ | ||
| 778 | writel(0, regs + DBGCMD); | ||
| 779 | } | ||
| 780 | |||
| 781 | static inline u32 _state(struct pl330_thread *thrd) | ||
| 782 | { | ||
| 783 | void __iomem *regs = thrd->dmac->pinfo->base; | ||
| 784 | u32 val; | ||
| 785 | |||
| 786 | if (is_manager(thrd)) | ||
| 787 | val = readl(regs + DS) & 0xf; | ||
| 788 | else | ||
| 789 | val = readl(regs + CS(thrd->id)) & 0xf; | ||
| 790 | |||
| 791 | switch (val) { | ||
| 792 | case DS_ST_STOP: | ||
| 793 | return PL330_STATE_STOPPED; | ||
| 794 | case DS_ST_EXEC: | ||
| 795 | return PL330_STATE_EXECUTING; | ||
| 796 | case DS_ST_CMISS: | ||
| 797 | return PL330_STATE_CACHEMISS; | ||
| 798 | case DS_ST_UPDTPC: | ||
| 799 | return PL330_STATE_UPDTPC; | ||
| 800 | case DS_ST_WFE: | ||
| 801 | return PL330_STATE_WFE; | ||
| 802 | case DS_ST_FAULT: | ||
| 803 | return PL330_STATE_FAULTING; | ||
| 804 | case DS_ST_ATBRR: | ||
| 805 | if (is_manager(thrd)) | ||
| 806 | return PL330_STATE_INVALID; | ||
| 807 | else | ||
| 808 | return PL330_STATE_ATBARRIER; | ||
| 809 | case DS_ST_QBUSY: | ||
| 810 | if (is_manager(thrd)) | ||
| 811 | return PL330_STATE_INVALID; | ||
| 812 | else | ||
| 813 | return PL330_STATE_QUEUEBUSY; | ||
| 814 | case DS_ST_WFP: | ||
| 815 | if (is_manager(thrd)) | ||
| 816 | return PL330_STATE_INVALID; | ||
| 817 | else | ||
| 818 | return PL330_STATE_WFP; | ||
| 819 | case DS_ST_KILL: | ||
| 820 | if (is_manager(thrd)) | ||
| 821 | return PL330_STATE_INVALID; | ||
| 822 | else | ||
| 823 | return PL330_STATE_KILLING; | ||
| 824 | case DS_ST_CMPLT: | ||
| 825 | if (is_manager(thrd)) | ||
| 826 | return PL330_STATE_INVALID; | ||
| 827 | else | ||
| 828 | return PL330_STATE_COMPLETING; | ||
| 829 | case DS_ST_FLTCMP: | ||
| 830 | if (is_manager(thrd)) | ||
| 831 | return PL330_STATE_INVALID; | ||
| 832 | else | ||
| 833 | return PL330_STATE_FAULT_COMPLETING; | ||
| 834 | default: | ||
| 835 | return PL330_STATE_INVALID; | ||
| 836 | } | ||
| 837 | } | ||
| 838 | |||
| 839 | /* If the request 'req' of thread 'thrd' is currently active */ | ||
| 840 | static inline bool _req_active(struct pl330_thread *thrd, | ||
| 841 | struct _pl330_req *req) | ||
| 842 | { | ||
| 843 | void __iomem *regs = thrd->dmac->pinfo->base; | ||
| 844 | u32 buf = req->mc_bus, pc = readl(regs + CPC(thrd->id)); | ||
| 845 | |||
| 846 | if (IS_FREE(req)) | ||
| 847 | return false; | ||
| 848 | |||
| 849 | return (pc >= buf && pc <= buf + req->mc_len) ? true : false; | ||
| 850 | } | ||
| 851 | |||
| 852 | /* Returns 0 if the thread is inactive, ID of active req + 1 otherwise */ | ||
| 853 | static inline unsigned _thrd_active(struct pl330_thread *thrd) | ||
| 854 | { | ||
| 855 | if (_req_active(thrd, &thrd->req[0])) | ||
| 856 | return 1; /* First req active */ | ||
| 857 | |||
| 858 | if (_req_active(thrd, &thrd->req[1])) | ||
| 859 | return 2; /* Second req active */ | ||
| 860 | |||
| 861 | return 0; | ||
| 862 | } | ||
| 863 | |||
| 864 | static void _stop(struct pl330_thread *thrd) | ||
| 865 | { | ||
| 866 | void __iomem *regs = thrd->dmac->pinfo->base; | ||
| 867 | u8 insn[6] = {0, 0, 0, 0, 0, 0}; | ||
| 868 | |||
| 869 | if (_state(thrd) == PL330_STATE_FAULT_COMPLETING) | ||
| 870 | UNTIL(thrd, PL330_STATE_FAULTING | PL330_STATE_KILLING); | ||
| 871 | |||
| 872 | /* Return if nothing needs to be done */ | ||
| 873 | if (_state(thrd) == PL330_STATE_COMPLETING | ||
| 874 | || _state(thrd) == PL330_STATE_KILLING | ||
| 875 | || _state(thrd) == PL330_STATE_STOPPED) | ||
| 876 | return; | ||
| 877 | |||
| 878 | _emit_KILL(0, insn); | ||
| 879 | |||
| 880 | /* Stop generating interrupts for SEV */ | ||
| 881 | writel(readl(regs + INTEN) & ~(1 << thrd->ev), regs + INTEN); | ||
| 882 | |||
| 883 | _execute_DBGINSN(thrd, insn, is_manager(thrd)); | ||
| 884 | } | ||
| 885 | |||
| 886 | /* Start doing req 'idx' of thread 'thrd' */ | ||
| 887 | static bool _trigger(struct pl330_thread *thrd) | ||
| 888 | { | ||
| 889 | void __iomem *regs = thrd->dmac->pinfo->base; | ||
| 890 | struct _pl330_req *req; | ||
| 891 | struct pl330_req *r; | ||
| 892 | struct _arg_GO go; | ||
| 893 | unsigned ns; | ||
| 894 | u8 insn[6] = {0, 0, 0, 0, 0, 0}; | ||
| 895 | |||
| 896 | /* Return if already ACTIVE */ | ||
| 897 | if (_state(thrd) != PL330_STATE_STOPPED) | ||
| 898 | return true; | ||
| 899 | |||
| 900 | if (!IS_FREE(&thrd->req[1 - thrd->lstenq])) | ||
| 901 | req = &thrd->req[1 - thrd->lstenq]; | ||
| 902 | else if (!IS_FREE(&thrd->req[thrd->lstenq])) | ||
| 903 | req = &thrd->req[thrd->lstenq]; | ||
| 904 | else | ||
| 905 | req = NULL; | ||
| 906 | |||
| 907 | /* Return if no request */ | ||
| 908 | if (!req || !req->r) | ||
| 909 | return true; | ||
| 910 | |||
| 911 | r = req->r; | ||
| 912 | |||
| 913 | if (r->cfg) | ||
| 914 | ns = r->cfg->nonsecure ? 1 : 0; | ||
| 915 | else if (readl(regs + CS(thrd->id)) & CS_CNS) | ||
| 916 | ns = 1; | ||
| 917 | else | ||
| 918 | ns = 0; | ||
| 919 | |||
| 920 | /* See 'Abort Sources' point-4 at Page 2-25 */ | ||
| 921 | if (_manager_ns(thrd) && !ns) | ||
| 922 | dev_info(thrd->dmac->pinfo->dev, "%s:%d Recipe for ABORT!\n", | ||
| 923 | __func__, __LINE__); | ||
| 924 | |||
| 925 | go.chan = thrd->id; | ||
| 926 | go.addr = req->mc_bus; | ||
| 927 | go.ns = ns; | ||
| 928 | _emit_GO(0, insn, &go); | ||
| 929 | |||
| 930 | /* Set to generate interrupts for SEV */ | ||
| 931 | writel(readl(regs + INTEN) | (1 << thrd->ev), regs + INTEN); | ||
| 932 | |||
| 933 | /* Only manager can execute GO */ | ||
| 934 | _execute_DBGINSN(thrd, insn, true); | ||
| 935 | |||
| 936 | return true; | ||
| 937 | } | ||
| 938 | |||
| 939 | static bool _start(struct pl330_thread *thrd) | ||
| 940 | { | ||
| 941 | switch (_state(thrd)) { | ||
| 942 | case PL330_STATE_FAULT_COMPLETING: | ||
| 943 | UNTIL(thrd, PL330_STATE_FAULTING | PL330_STATE_KILLING); | ||
| 944 | |||
| 945 | if (_state(thrd) == PL330_STATE_KILLING) | ||
| 946 | UNTIL(thrd, PL330_STATE_STOPPED) | ||
| 947 | |||
| 948 | case PL330_STATE_FAULTING: | ||
| 949 | _stop(thrd); | ||
| 950 | |||
| 951 | case PL330_STATE_KILLING: | ||
| 952 | case PL330_STATE_COMPLETING: | ||
| 953 | UNTIL(thrd, PL330_STATE_STOPPED) | ||
| 954 | |||
| 955 | case PL330_STATE_STOPPED: | ||
| 956 | return _trigger(thrd); | ||
| 957 | |||
| 958 | case PL330_STATE_WFP: | ||
| 959 | case PL330_STATE_QUEUEBUSY: | ||
| 960 | case PL330_STATE_ATBARRIER: | ||
| 961 | case PL330_STATE_UPDTPC: | ||
| 962 | case PL330_STATE_CACHEMISS: | ||
| 963 | case PL330_STATE_EXECUTING: | ||
| 964 | return true; | ||
| 965 | |||
| 966 | case PL330_STATE_WFE: /* For RESUME, nothing yet */ | ||
| 967 | default: | ||
| 968 | return false; | ||
| 969 | } | ||
| 970 | } | ||
| 971 | |||
| 972 | static inline int _ldst_memtomem(unsigned dry_run, u8 buf[], | ||
| 973 | const struct _xfer_spec *pxs, int cyc) | ||
| 974 | { | ||
| 975 | int off = 0; | ||
| 976 | |||
| 977 | while (cyc--) { | ||
| 978 | off += _emit_LD(dry_run, &buf[off], ALWAYS); | ||
| 979 | off += _emit_RMB(dry_run, &buf[off]); | ||
| 980 | off += _emit_ST(dry_run, &buf[off], ALWAYS); | ||
| 981 | off += _emit_WMB(dry_run, &buf[off]); | ||
| 982 | } | ||
| 983 | |||
| 984 | return off; | ||
| 985 | } | ||
| 986 | |||
| 987 | static inline int _ldst_devtomem(unsigned dry_run, u8 buf[], | ||
| 988 | const struct _xfer_spec *pxs, int cyc) | ||
| 989 | { | ||
| 990 | int off = 0; | ||
| 991 | |||
| 992 | while (cyc--) { | ||
| 993 | off += _emit_WFP(dry_run, &buf[off], SINGLE, pxs->r->peri); | ||
| 994 | off += _emit_LDP(dry_run, &buf[off], SINGLE, pxs->r->peri); | ||
| 995 | off += _emit_ST(dry_run, &buf[off], ALWAYS); | ||
| 996 | off += _emit_FLUSHP(dry_run, &buf[off], pxs->r->peri); | ||
| 997 | } | ||
| 998 | |||
| 999 | return off; | ||
| 1000 | } | ||
| 1001 | |||
| 1002 | static inline int _ldst_memtodev(unsigned dry_run, u8 buf[], | ||
| 1003 | const struct _xfer_spec *pxs, int cyc) | ||
| 1004 | { | ||
| 1005 | int off = 0; | ||
| 1006 | |||
| 1007 | while (cyc--) { | ||
| 1008 | off += _emit_WFP(dry_run, &buf[off], SINGLE, pxs->r->peri); | ||
| 1009 | off += _emit_LD(dry_run, &buf[off], ALWAYS); | ||
| 1010 | off += _emit_STP(dry_run, &buf[off], SINGLE, pxs->r->peri); | ||
| 1011 | off += _emit_FLUSHP(dry_run, &buf[off], pxs->r->peri); | ||
| 1012 | } | ||
| 1013 | |||
| 1014 | return off; | ||
| 1015 | } | ||
| 1016 | |||
| 1017 | static int _bursts(unsigned dry_run, u8 buf[], | ||
| 1018 | const struct _xfer_spec *pxs, int cyc) | ||
| 1019 | { | ||
| 1020 | int off = 0; | ||
| 1021 | |||
| 1022 | switch (pxs->r->rqtype) { | ||
| 1023 | case MEMTODEV: | ||
| 1024 | off += _ldst_memtodev(dry_run, &buf[off], pxs, cyc); | ||
| 1025 | break; | ||
| 1026 | case DEVTOMEM: | ||
| 1027 | off += _ldst_devtomem(dry_run, &buf[off], pxs, cyc); | ||
| 1028 | break; | ||
| 1029 | case MEMTOMEM: | ||
| 1030 | off += _ldst_memtomem(dry_run, &buf[off], pxs, cyc); | ||
| 1031 | break; | ||
| 1032 | default: | ||
| 1033 | off += 0x40000000; /* Scare off the Client */ | ||
| 1034 | break; | ||
| 1035 | } | ||
| 1036 | |||
| 1037 | return off; | ||
| 1038 | } | ||
| 1039 | |||
| 1040 | /* Returns bytes consumed and updates bursts */ | ||
| 1041 | static inline int _loop(unsigned dry_run, u8 buf[], | ||
| 1042 | unsigned long *bursts, const struct _xfer_spec *pxs) | ||
| 1043 | { | ||
| 1044 | int cyc, cycmax, szlp, szlpend, szbrst, off; | ||
| 1045 | unsigned lcnt0, lcnt1, ljmp0, ljmp1; | ||
| 1046 | struct _arg_LPEND lpend; | ||
| 1047 | |||
| 1048 | /* Max iterations possible in DMALP is 256 */ | ||
| 1049 | if (*bursts >= 256*256) { | ||
| 1050 | lcnt1 = 256; | ||
| 1051 | lcnt0 = 256; | ||
| 1052 | cyc = *bursts / lcnt1 / lcnt0; | ||
| 1053 | } else if (*bursts > 256) { | ||
| 1054 | lcnt1 = 256; | ||
| 1055 | lcnt0 = *bursts / lcnt1; | ||
| 1056 | cyc = 1; | ||
| 1057 | } else { | ||
| 1058 | lcnt1 = *bursts; | ||
| 1059 | lcnt0 = 0; | ||
| 1060 | cyc = 1; | ||
| 1061 | } | ||
| 1062 | |||
| 1063 | szlp = _emit_LP(1, buf, 0, 0); | ||
| 1064 | szbrst = _bursts(1, buf, pxs, 1); | ||
| 1065 | |||
| 1066 | lpend.cond = ALWAYS; | ||
| 1067 | lpend.forever = false; | ||
| 1068 | lpend.loop = 0; | ||
| 1069 | lpend.bjump = 0; | ||
| 1070 | szlpend = _emit_LPEND(1, buf, &lpend); | ||
| 1071 | |||
| 1072 | if (lcnt0) { | ||
| 1073 | szlp *= 2; | ||
| 1074 | szlpend *= 2; | ||
| 1075 | } | ||
| 1076 | |||
| 1077 | /* | ||
| 1078 | * Max bursts that we can unroll due to limit on the | ||
| 1079 | * size of backward jump that can be encoded in DMALPEND | ||
| 1080 | * which is 8-bits and hence 255 | ||
| 1081 | */ | ||
| 1082 | cycmax = (255 - (szlp + szlpend)) / szbrst; | ||
| 1083 | |||
| 1084 | cyc = (cycmax < cyc) ? cycmax : cyc; | ||
| 1085 | |||
| 1086 | off = 0; | ||
| 1087 | |||
| 1088 | if (lcnt0) { | ||
| 1089 | off += _emit_LP(dry_run, &buf[off], 0, lcnt0); | ||
| 1090 | ljmp0 = off; | ||
| 1091 | } | ||
| 1092 | |||
| 1093 | off += _emit_LP(dry_run, &buf[off], 1, lcnt1); | ||
| 1094 | ljmp1 = off; | ||
| 1095 | |||
| 1096 | off += _bursts(dry_run, &buf[off], pxs, cyc); | ||
| 1097 | |||
| 1098 | lpend.cond = ALWAYS; | ||
| 1099 | lpend.forever = false; | ||
| 1100 | lpend.loop = 1; | ||
| 1101 | lpend.bjump = off - ljmp1; | ||
| 1102 | off += _emit_LPEND(dry_run, &buf[off], &lpend); | ||
| 1103 | |||
| 1104 | if (lcnt0) { | ||
| 1105 | lpend.cond = ALWAYS; | ||
| 1106 | lpend.forever = false; | ||
| 1107 | lpend.loop = 0; | ||
| 1108 | lpend.bjump = off - ljmp0; | ||
| 1109 | off += _emit_LPEND(dry_run, &buf[off], &lpend); | ||
| 1110 | } | ||
| 1111 | |||
| 1112 | *bursts = lcnt1 * cyc; | ||
| 1113 | if (lcnt0) | ||
| 1114 | *bursts *= lcnt0; | ||
| 1115 | |||
| 1116 | return off; | ||
| 1117 | } | ||
| 1118 | |||
| 1119 | static inline int _setup_loops(unsigned dry_run, u8 buf[], | ||
| 1120 | const struct _xfer_spec *pxs) | ||
| 1121 | { | ||
| 1122 | struct pl330_xfer *x = pxs->x; | ||
| 1123 | u32 ccr = pxs->ccr; | ||
| 1124 | unsigned long c, bursts = BYTE_TO_BURST(x->bytes, ccr); | ||
| 1125 | int off = 0; | ||
| 1126 | |||
| 1127 | while (bursts) { | ||
| 1128 | c = bursts; | ||
| 1129 | off += _loop(dry_run, &buf[off], &c, pxs); | ||
| 1130 | bursts -= c; | ||
| 1131 | } | ||
| 1132 | |||
| 1133 | return off; | ||
| 1134 | } | ||
| 1135 | |||
| 1136 | static inline int _setup_xfer(unsigned dry_run, u8 buf[], | ||
| 1137 | const struct _xfer_spec *pxs) | ||
| 1138 | { | ||
| 1139 | struct pl330_xfer *x = pxs->x; | ||
| 1140 | int off = 0; | ||
| 1141 | |||
| 1142 | /* DMAMOV SAR, x->src_addr */ | ||
| 1143 | off += _emit_MOV(dry_run, &buf[off], SAR, x->src_addr); | ||
| 1144 | /* DMAMOV DAR, x->dst_addr */ | ||
| 1145 | off += _emit_MOV(dry_run, &buf[off], DAR, x->dst_addr); | ||
| 1146 | |||
| 1147 | /* Setup Loop(s) */ | ||
| 1148 | off += _setup_loops(dry_run, &buf[off], pxs); | ||
| 1149 | |||
| 1150 | return off; | ||
| 1151 | } | ||
| 1152 | |||
| 1153 | /* | ||
| 1154 | * A req is a sequence of one or more xfer units. | ||
| 1155 | * Returns the number of bytes taken to setup the MC for the req. | ||
| 1156 | */ | ||
| 1157 | static int _setup_req(unsigned dry_run, struct pl330_thread *thrd, | ||
| 1158 | unsigned index, struct _xfer_spec *pxs) | ||
| 1159 | { | ||
| 1160 | struct _pl330_req *req = &thrd->req[index]; | ||
| 1161 | struct pl330_xfer *x; | ||
| 1162 | u8 *buf = req->mc_cpu; | ||
| 1163 | int off = 0; | ||
| 1164 | |||
| 1165 | PL330_DBGMC_START(req->mc_bus); | ||
| 1166 | |||
| 1167 | /* DMAMOV CCR, ccr */ | ||
| 1168 | off += _emit_MOV(dry_run, &buf[off], CCR, pxs->ccr); | ||
| 1169 | |||
| 1170 | x = pxs->r->x; | ||
| 1171 | do { | ||
| 1172 | /* Error if xfer length is not aligned at burst size */ | ||
| 1173 | if (x->bytes % (BRST_SIZE(pxs->ccr) * BRST_LEN(pxs->ccr))) | ||
| 1174 | return -EINVAL; | ||
| 1175 | |||
| 1176 | pxs->x = x; | ||
| 1177 | off += _setup_xfer(dry_run, &buf[off], pxs); | ||
| 1178 | |||
| 1179 | x = x->next; | ||
| 1180 | } while (x); | ||
| 1181 | |||
| 1182 | /* DMASEV peripheral/event */ | ||
| 1183 | off += _emit_SEV(dry_run, &buf[off], thrd->ev); | ||
| 1184 | /* DMAEND */ | ||
| 1185 | off += _emit_END(dry_run, &buf[off]); | ||
| 1186 | |||
| 1187 | return off; | ||
| 1188 | } | ||
| 1189 | |||
| 1190 | static inline u32 _prepare_ccr(const struct pl330_reqcfg *rqc) | ||
| 1191 | { | ||
| 1192 | u32 ccr = 0; | ||
| 1193 | |||
| 1194 | if (rqc->src_inc) | ||
| 1195 | ccr |= CC_SRCINC; | ||
| 1196 | |||
| 1197 | if (rqc->dst_inc) | ||
| 1198 | ccr |= CC_DSTINC; | ||
| 1199 | |||
| 1200 | /* We set same protection levels for Src and DST for now */ | ||
| 1201 | if (rqc->privileged) | ||
| 1202 | ccr |= CC_SRCPRI | CC_DSTPRI; | ||
| 1203 | if (rqc->nonsecure) | ||
| 1204 | ccr |= CC_SRCNS | CC_DSTNS; | ||
| 1205 | if (rqc->insnaccess) | ||
| 1206 | ccr |= CC_SRCIA | CC_DSTIA; | ||
| 1207 | |||
| 1208 | ccr |= (((rqc->brst_len - 1) & 0xf) << CC_SRCBRSTLEN_SHFT); | ||
| 1209 | ccr |= (((rqc->brst_len - 1) & 0xf) << CC_DSTBRSTLEN_SHFT); | ||
| 1210 | |||
| 1211 | ccr |= (rqc->brst_size << CC_SRCBRSTSIZE_SHFT); | ||
| 1212 | ccr |= (rqc->brst_size << CC_DSTBRSTSIZE_SHFT); | ||
| 1213 | |||
| 1214 | ccr |= (rqc->dcctl << CC_SRCCCTRL_SHFT); | ||
| 1215 | ccr |= (rqc->scctl << CC_DSTCCTRL_SHFT); | ||
| 1216 | |||
| 1217 | ccr |= (rqc->swap << CC_SWAP_SHFT); | ||
| 1218 | |||
| 1219 | return ccr; | ||
| 1220 | } | ||
| 1221 | |||
| 1222 | static inline bool _is_valid(u32 ccr) | ||
| 1223 | { | ||
| 1224 | enum pl330_dstcachectrl dcctl; | ||
| 1225 | enum pl330_srccachectrl scctl; | ||
| 1226 | |||
| 1227 | dcctl = (ccr >> CC_DSTCCTRL_SHFT) & CC_DRCCCTRL_MASK; | ||
| 1228 | scctl = (ccr >> CC_SRCCCTRL_SHFT) & CC_SRCCCTRL_MASK; | ||
| 1229 | |||
| 1230 | if (dcctl == DINVALID1 || dcctl == DINVALID2 | ||
| 1231 | || scctl == SINVALID1 || scctl == SINVALID2) | ||
| 1232 | return false; | ||
| 1233 | else | ||
| 1234 | return true; | ||
| 1235 | } | ||
| 1236 | |||
| 1237 | /* | ||
| 1238 | * Submit a list of xfers after which the client wants notification. | ||
| 1239 | * Client is not notified after each xfer unit, just once after all | ||
| 1240 | * xfer units are done or some error occurs. | ||
| 1241 | */ | ||
| 1242 | int pl330_submit_req(void *ch_id, struct pl330_req *r) | ||
| 1243 | { | ||
| 1244 | struct pl330_thread *thrd = ch_id; | ||
| 1245 | struct pl330_dmac *pl330; | ||
| 1246 | struct pl330_info *pi; | ||
| 1247 | struct _xfer_spec xs; | ||
| 1248 | unsigned long flags; | ||
| 1249 | void __iomem *regs; | ||
| 1250 | unsigned idx; | ||
| 1251 | u32 ccr; | ||
| 1252 | int ret = 0; | ||
| 1253 | |||
| 1254 | /* No Req or Unacquired Channel or DMAC */ | ||
| 1255 | if (!r || !thrd || thrd->free) | ||
| 1256 | return -EINVAL; | ||
| 1257 | |||
| 1258 | pl330 = thrd->dmac; | ||
| 1259 | pi = pl330->pinfo; | ||
| 1260 | regs = pi->base; | ||
| 1261 | |||
| 1262 | if (pl330->state == DYING | ||
| 1263 | || pl330->dmac_tbd.reset_chan & (1 << thrd->id)) { | ||
| 1264 | dev_info(thrd->dmac->pinfo->dev, "%s:%d\n", | ||
| 1265 | __func__, __LINE__); | ||
| 1266 | return -EAGAIN; | ||
| 1267 | } | ||
| 1268 | |||
| 1269 | /* If request for non-existing peripheral */ | ||
| 1270 | if (r->rqtype != MEMTOMEM && r->peri >= pi->pcfg.num_peri) { | ||
| 1271 | dev_info(thrd->dmac->pinfo->dev, | ||
| 1272 | "%s:%d Invalid peripheral(%u)!\n", | ||
| 1273 | __func__, __LINE__, r->peri); | ||
| 1274 | return -EINVAL; | ||
| 1275 | } | ||
| 1276 | |||
| 1277 | spin_lock_irqsave(&pl330->lock, flags); | ||
| 1278 | |||
| 1279 | if (_queue_full(thrd)) { | ||
| 1280 | ret = -EAGAIN; | ||
| 1281 | goto xfer_exit; | ||
| 1282 | } | ||
| 1283 | |||
| 1284 | /* Prefer Secure Channel */ | ||
| 1285 | if (!_manager_ns(thrd)) | ||
| 1286 | r->cfg->nonsecure = 0; | ||
| 1287 | else | ||
| 1288 | r->cfg->nonsecure = 1; | ||
| 1289 | |||
| 1290 | /* Use last settings, if not provided */ | ||
| 1291 | if (r->cfg) | ||
| 1292 | ccr = _prepare_ccr(r->cfg); | ||
| 1293 | else | ||
| 1294 | ccr = readl(regs + CC(thrd->id)); | ||
| 1295 | |||
| 1296 | /* If this req doesn't have valid xfer settings */ | ||
| 1297 | if (!_is_valid(ccr)) { | ||
| 1298 | ret = -EINVAL; | ||
| 1299 | dev_info(thrd->dmac->pinfo->dev, "%s:%d Invalid CCR(%x)!\n", | ||
| 1300 | __func__, __LINE__, ccr); | ||
| 1301 | goto xfer_exit; | ||
| 1302 | } | ||
| 1303 | |||
| 1304 | idx = IS_FREE(&thrd->req[0]) ? 0 : 1; | ||
| 1305 | |||
| 1306 | xs.ccr = ccr; | ||
| 1307 | xs.r = r; | ||
| 1308 | |||
| 1309 | /* First dry run to check if req is acceptable */ | ||
| 1310 | ret = _setup_req(1, thrd, idx, &xs); | ||
| 1311 | if (ret < 0) | ||
| 1312 | goto xfer_exit; | ||
| 1313 | |||
| 1314 | if (ret > pi->mcbufsz / 2) { | ||
| 1315 | dev_info(thrd->dmac->pinfo->dev, | ||
| 1316 | "%s:%d Trying increasing mcbufsz\n", | ||
| 1317 | __func__, __LINE__); | ||
| 1318 | ret = -ENOMEM; | ||
| 1319 | goto xfer_exit; | ||
| 1320 | } | ||
| 1321 | |||
| 1322 | /* Hook the request */ | ||
| 1323 | thrd->lstenq = idx; | ||
| 1324 | thrd->req[idx].mc_len = _setup_req(0, thrd, idx, &xs); | ||
| 1325 | thrd->req[idx].r = r; | ||
| 1326 | |||
| 1327 | ret = 0; | ||
| 1328 | |||
| 1329 | xfer_exit: | ||
| 1330 | spin_unlock_irqrestore(&pl330->lock, flags); | ||
| 1331 | |||
| 1332 | return ret; | ||
| 1333 | } | ||
| 1334 | EXPORT_SYMBOL(pl330_submit_req); | ||
| 1335 | |||
| 1336 | static void pl330_dotask(unsigned long data) | ||
| 1337 | { | ||
| 1338 | struct pl330_dmac *pl330 = (struct pl330_dmac *) data; | ||
| 1339 | struct pl330_info *pi = pl330->pinfo; | ||
| 1340 | unsigned long flags; | ||
| 1341 | int i; | ||
| 1342 | |||
| 1343 | spin_lock_irqsave(&pl330->lock, flags); | ||
| 1344 | |||
| 1345 | /* The DMAC itself gone nuts */ | ||
| 1346 | if (pl330->dmac_tbd.reset_dmac) { | ||
| 1347 | pl330->state = DYING; | ||
| 1348 | /* Reset the manager too */ | ||
| 1349 | pl330->dmac_tbd.reset_mngr = true; | ||
| 1350 | /* Clear the reset flag */ | ||
| 1351 | pl330->dmac_tbd.reset_dmac = false; | ||
| 1352 | } | ||
| 1353 | |||
| 1354 | if (pl330->dmac_tbd.reset_mngr) { | ||
| 1355 | _stop(pl330->manager); | ||
| 1356 | /* Reset all channels */ | ||
| 1357 | pl330->dmac_tbd.reset_chan = (1 << pi->pcfg.num_chan) - 1; | ||
| 1358 | /* Clear the reset flag */ | ||
| 1359 | pl330->dmac_tbd.reset_mngr = false; | ||
| 1360 | } | ||
| 1361 | |||
| 1362 | for (i = 0; i < pi->pcfg.num_chan; i++) { | ||
| 1363 | |||
| 1364 | if (pl330->dmac_tbd.reset_chan & (1 << i)) { | ||
| 1365 | struct pl330_thread *thrd = &pl330->channels[i]; | ||
| 1366 | void __iomem *regs = pi->base; | ||
| 1367 | enum pl330_op_err err; | ||
| 1368 | |||
| 1369 | _stop(thrd); | ||
| 1370 | |||
| 1371 | if (readl(regs + FSC) & (1 << thrd->id)) | ||
| 1372 | err = PL330_ERR_FAIL; | ||
| 1373 | else | ||
| 1374 | err = PL330_ERR_ABORT; | ||
| 1375 | |||
| 1376 | spin_unlock_irqrestore(&pl330->lock, flags); | ||
| 1377 | |||
| 1378 | _callback(thrd->req[1 - thrd->lstenq].r, err); | ||
| 1379 | _callback(thrd->req[thrd->lstenq].r, err); | ||
| 1380 | |||
| 1381 | spin_lock_irqsave(&pl330->lock, flags); | ||
| 1382 | |||
| 1383 | thrd->req[0].r = NULL; | ||
| 1384 | thrd->req[1].r = NULL; | ||
| 1385 | MARK_FREE(&thrd->req[0]); | ||
| 1386 | MARK_FREE(&thrd->req[1]); | ||
| 1387 | |||
| 1388 | /* Clear the reset flag */ | ||
| 1389 | pl330->dmac_tbd.reset_chan &= ~(1 << i); | ||
| 1390 | } | ||
| 1391 | } | ||
| 1392 | |||
| 1393 | spin_unlock_irqrestore(&pl330->lock, flags); | ||
| 1394 | |||
| 1395 | return; | ||
| 1396 | } | ||
| 1397 | |||
| 1398 | /* Returns 1 if state was updated, 0 otherwise */ | ||
| 1399 | int pl330_update(const struct pl330_info *pi) | ||
| 1400 | { | ||
| 1401 | struct _pl330_req *rqdone; | ||
| 1402 | struct pl330_dmac *pl330; | ||
| 1403 | unsigned long flags; | ||
| 1404 | void __iomem *regs; | ||
| 1405 | u32 val; | ||
| 1406 | int id, ev, ret = 0; | ||
| 1407 | |||
| 1408 | if (!pi || !pi->pl330_data) | ||
| 1409 | return 0; | ||
| 1410 | |||
| 1411 | regs = pi->base; | ||
| 1412 | pl330 = pi->pl330_data; | ||
| 1413 | |||
| 1414 | spin_lock_irqsave(&pl330->lock, flags); | ||
| 1415 | |||
| 1416 | val = readl(regs + FSM) & 0x1; | ||
| 1417 | if (val) | ||
| 1418 | pl330->dmac_tbd.reset_mngr = true; | ||
| 1419 | else | ||
| 1420 | pl330->dmac_tbd.reset_mngr = false; | ||
| 1421 | |||
| 1422 | val = readl(regs + FSC) & ((1 << pi->pcfg.num_chan) - 1); | ||
| 1423 | pl330->dmac_tbd.reset_chan |= val; | ||
| 1424 | if (val) { | ||
| 1425 | int i = 0; | ||
| 1426 | while (i < pi->pcfg.num_chan) { | ||
| 1427 | if (val & (1 << i)) { | ||
| 1428 | dev_info(pi->dev, | ||
| 1429 | "Reset Channel-%d\t CS-%x FTC-%x\n", | ||
| 1430 | i, readl(regs + CS(i)), | ||
| 1431 | readl(regs + FTC(i))); | ||
| 1432 | _stop(&pl330->channels[i]); | ||
| 1433 | } | ||
| 1434 | i++; | ||
| 1435 | } | ||
| 1436 | } | ||
| 1437 | |||
| 1438 | /* Check which event happened i.e, thread notified */ | ||
| 1439 | val = readl(regs + ES); | ||
| 1440 | if (pi->pcfg.num_events < 32 | ||
| 1441 | && val & ~((1 << pi->pcfg.num_events) - 1)) { | ||
| 1442 | pl330->dmac_tbd.reset_dmac = true; | ||
| 1443 | dev_err(pi->dev, "%s:%d Unexpected!\n", __func__, __LINE__); | ||
| 1444 | ret = 1; | ||
| 1445 | goto updt_exit; | ||
| 1446 | } | ||
| 1447 | |||
| 1448 | for (ev = 0; ev < pi->pcfg.num_events; ev++) { | ||
| 1449 | if (val & (1 << ev)) { /* Event occurred */ | ||
| 1450 | struct pl330_thread *thrd; | ||
| 1451 | u32 inten = readl(regs + INTEN); | ||
| 1452 | int active; | ||
| 1453 | |||
| 1454 | /* Clear the event */ | ||
| 1455 | if (inten & (1 << ev)) | ||
| 1456 | writel(1 << ev, regs + INTCLR); | ||
| 1457 | |||
| 1458 | ret = 1; | ||
| 1459 | |||
| 1460 | id = pl330->events[ev]; | ||
| 1461 | |||
| 1462 | thrd = &pl330->channels[id]; | ||
| 1463 | |||
| 1464 | active = _thrd_active(thrd); | ||
| 1465 | if (!active) /* Aborted */ | ||
| 1466 | continue; | ||
| 1467 | |||
| 1468 | active -= 1; | ||
| 1469 | |||
| 1470 | rqdone = &thrd->req[active]; | ||
| 1471 | MARK_FREE(rqdone); | ||
| 1472 | |||
| 1473 | /* Get going again ASAP */ | ||
| 1474 | _start(thrd); | ||
| 1475 | |||
| 1476 | /* For now, just make a list of callbacks to be done */ | ||
| 1477 | list_add_tail(&rqdone->rqd, &pl330->req_done); | ||
| 1478 | } | ||
| 1479 | } | ||
| 1480 | |||
| 1481 | /* Now that we are in no hurry, do the callbacks */ | ||
| 1482 | while (!list_empty(&pl330->req_done)) { | ||
| 1483 | rqdone = container_of(pl330->req_done.next, | ||
| 1484 | struct _pl330_req, rqd); | ||
| 1485 | |||
| 1486 | list_del_init(&rqdone->rqd); | ||
| 1487 | |||
| 1488 | spin_unlock_irqrestore(&pl330->lock, flags); | ||
| 1489 | _callback(rqdone->r, PL330_ERR_NONE); | ||
| 1490 | spin_lock_irqsave(&pl330->lock, flags); | ||
| 1491 | } | ||
| 1492 | |||
| 1493 | updt_exit: | ||
| 1494 | spin_unlock_irqrestore(&pl330->lock, flags); | ||
| 1495 | |||
| 1496 | if (pl330->dmac_tbd.reset_dmac | ||
| 1497 | || pl330->dmac_tbd.reset_mngr | ||
| 1498 | || pl330->dmac_tbd.reset_chan) { | ||
| 1499 | ret = 1; | ||
| 1500 | tasklet_schedule(&pl330->tasks); | ||
| 1501 | } | ||
| 1502 | |||
| 1503 | return ret; | ||
| 1504 | } | ||
| 1505 | EXPORT_SYMBOL(pl330_update); | ||
| 1506 | |||
| 1507 | int pl330_chan_ctrl(void *ch_id, enum pl330_chan_op op) | ||
| 1508 | { | ||
| 1509 | struct pl330_thread *thrd = ch_id; | ||
| 1510 | struct pl330_dmac *pl330; | ||
| 1511 | unsigned long flags; | ||
| 1512 | int ret = 0, active; | ||
| 1513 | |||
| 1514 | if (!thrd || thrd->free || thrd->dmac->state == DYING) | ||
| 1515 | return -EINVAL; | ||
| 1516 | |||
| 1517 | pl330 = thrd->dmac; | ||
| 1518 | |||
| 1519 | spin_lock_irqsave(&pl330->lock, flags); | ||
| 1520 | |||
| 1521 | switch (op) { | ||
| 1522 | case PL330_OP_FLUSH: | ||
| 1523 | /* Make sure the channel is stopped */ | ||
| 1524 | _stop(thrd); | ||
| 1525 | |||
| 1526 | thrd->req[0].r = NULL; | ||
| 1527 | thrd->req[1].r = NULL; | ||
| 1528 | MARK_FREE(&thrd->req[0]); | ||
| 1529 | MARK_FREE(&thrd->req[1]); | ||
| 1530 | break; | ||
| 1531 | |||
| 1532 | case PL330_OP_ABORT: | ||
| 1533 | active = _thrd_active(thrd); | ||
| 1534 | |||
| 1535 | /* Make sure the channel is stopped */ | ||
| 1536 | _stop(thrd); | ||
| 1537 | |||
| 1538 | /* ABORT is only for the active req */ | ||
| 1539 | if (!active) | ||
| 1540 | break; | ||
| 1541 | |||
| 1542 | active--; | ||
| 1543 | |||
| 1544 | thrd->req[active].r = NULL; | ||
| 1545 | MARK_FREE(&thrd->req[active]); | ||
| 1546 | |||
| 1547 | /* Start the next */ | ||
| 1548 | case PL330_OP_START: | ||
| 1549 | if (!_start(thrd)) | ||
| 1550 | ret = -EIO; | ||
| 1551 | break; | ||
| 1552 | |||
| 1553 | default: | ||
| 1554 | ret = -EINVAL; | ||
| 1555 | } | ||
| 1556 | |||
| 1557 | spin_unlock_irqrestore(&pl330->lock, flags); | ||
| 1558 | return ret; | ||
| 1559 | } | ||
| 1560 | EXPORT_SYMBOL(pl330_chan_ctrl); | ||
| 1561 | |||
| 1562 | int pl330_chan_status(void *ch_id, struct pl330_chanstatus *pstatus) | ||
| 1563 | { | ||
| 1564 | struct pl330_thread *thrd = ch_id; | ||
| 1565 | struct pl330_dmac *pl330; | ||
| 1566 | struct pl330_info *pi; | ||
| 1567 | void __iomem *regs; | ||
| 1568 | int active; | ||
| 1569 | u32 val; | ||
| 1570 | |||
| 1571 | if (!pstatus || !thrd || thrd->free) | ||
| 1572 | return -EINVAL; | ||
| 1573 | |||
| 1574 | pl330 = thrd->dmac; | ||
| 1575 | pi = pl330->pinfo; | ||
| 1576 | regs = pi->base; | ||
| 1577 | |||
| 1578 | /* The client should remove the DMAC and add again */ | ||
| 1579 | if (pl330->state == DYING) | ||
| 1580 | pstatus->dmac_halted = true; | ||
| 1581 | else | ||
| 1582 | pstatus->dmac_halted = false; | ||
| 1583 | |||
| 1584 | val = readl(regs + FSC); | ||
| 1585 | if (val & (1 << thrd->id)) | ||
| 1586 | pstatus->faulting = true; | ||
| 1587 | else | ||
| 1588 | pstatus->faulting = false; | ||
| 1589 | |||
| 1590 | active = _thrd_active(thrd); | ||
| 1591 | |||
| 1592 | if (!active) { | ||
| 1593 | /* Indicate that the thread is not running */ | ||
| 1594 | pstatus->top_req = NULL; | ||
| 1595 | pstatus->wait_req = NULL; | ||
| 1596 | } else { | ||
| 1597 | active--; | ||
| 1598 | pstatus->top_req = thrd->req[active].r; | ||
| 1599 | pstatus->wait_req = !IS_FREE(&thrd->req[1 - active]) | ||
| 1600 | ? thrd->req[1 - active].r : NULL; | ||
| 1601 | } | ||
| 1602 | |||
| 1603 | pstatus->src_addr = readl(regs + SA(thrd->id)); | ||
| 1604 | pstatus->dst_addr = readl(regs + DA(thrd->id)); | ||
| 1605 | |||
| 1606 | return 0; | ||
| 1607 | } | ||
| 1608 | EXPORT_SYMBOL(pl330_chan_status); | ||
| 1609 | |||
| 1610 | /* Reserve an event */ | ||
| 1611 | static inline int _alloc_event(struct pl330_thread *thrd) | ||
| 1612 | { | ||
| 1613 | struct pl330_dmac *pl330 = thrd->dmac; | ||
| 1614 | struct pl330_info *pi = pl330->pinfo; | ||
| 1615 | int ev; | ||
| 1616 | |||
| 1617 | for (ev = 0; ev < pi->pcfg.num_events; ev++) | ||
| 1618 | if (pl330->events[ev] == -1) { | ||
| 1619 | pl330->events[ev] = thrd->id; | ||
| 1620 | return ev; | ||
| 1621 | } | ||
| 1622 | |||
| 1623 | return -1; | ||
| 1624 | } | ||
| 1625 | |||
| 1626 | /* Upon success, returns IdentityToken for the | ||
| 1627 | * allocated channel, NULL otherwise. | ||
| 1628 | */ | ||
| 1629 | void *pl330_request_channel(const struct pl330_info *pi) | ||
| 1630 | { | ||
| 1631 | struct pl330_thread *thrd = NULL; | ||
| 1632 | struct pl330_dmac *pl330; | ||
| 1633 | unsigned long flags; | ||
| 1634 | int chans, i; | ||
| 1635 | |||
| 1636 | if (!pi || !pi->pl330_data) | ||
| 1637 | return NULL; | ||
| 1638 | |||
| 1639 | pl330 = pi->pl330_data; | ||
| 1640 | |||
| 1641 | if (pl330->state == DYING) | ||
| 1642 | return NULL; | ||
| 1643 | |||
| 1644 | chans = pi->pcfg.num_chan; | ||
| 1645 | |||
| 1646 | spin_lock_irqsave(&pl330->lock, flags); | ||
| 1647 | |||
| 1648 | for (i = 0; i < chans; i++) { | ||
| 1649 | thrd = &pl330->channels[i]; | ||
| 1650 | if (thrd->free) { | ||
| 1651 | thrd->ev = _alloc_event(thrd); | ||
| 1652 | if (thrd->ev >= 0) { | ||
| 1653 | thrd->free = false; | ||
| 1654 | thrd->lstenq = 1; | ||
| 1655 | thrd->req[0].r = NULL; | ||
| 1656 | MARK_FREE(&thrd->req[0]); | ||
| 1657 | thrd->req[1].r = NULL; | ||
| 1658 | MARK_FREE(&thrd->req[1]); | ||
| 1659 | break; | ||
| 1660 | } | ||
| 1661 | } | ||
| 1662 | thrd = NULL; | ||
| 1663 | } | ||
| 1664 | |||
| 1665 | spin_unlock_irqrestore(&pl330->lock, flags); | ||
| 1666 | |||
| 1667 | return thrd; | ||
| 1668 | } | ||
| 1669 | EXPORT_SYMBOL(pl330_request_channel); | ||
| 1670 | |||
| 1671 | /* Release an event */ | ||
| 1672 | static inline void _free_event(struct pl330_thread *thrd, int ev) | ||
| 1673 | { | ||
| 1674 | struct pl330_dmac *pl330 = thrd->dmac; | ||
| 1675 | struct pl330_info *pi = pl330->pinfo; | ||
| 1676 | |||
| 1677 | /* If the event is valid and was held by the thread */ | ||
| 1678 | if (ev >= 0 && ev < pi->pcfg.num_events | ||
| 1679 | && pl330->events[ev] == thrd->id) | ||
| 1680 | pl330->events[ev] = -1; | ||
| 1681 | } | ||
| 1682 | |||
| 1683 | void pl330_release_channel(void *ch_id) | ||
| 1684 | { | ||
| 1685 | struct pl330_thread *thrd = ch_id; | ||
| 1686 | struct pl330_dmac *pl330; | ||
| 1687 | unsigned long flags; | ||
| 1688 | |||
| 1689 | if (!thrd || thrd->free) | ||
| 1690 | return; | ||
| 1691 | |||
| 1692 | _stop(thrd); | ||
| 1693 | |||
| 1694 | _callback(thrd->req[1 - thrd->lstenq].r, PL330_ERR_ABORT); | ||
| 1695 | _callback(thrd->req[thrd->lstenq].r, PL330_ERR_ABORT); | ||
| 1696 | |||
| 1697 | pl330 = thrd->dmac; | ||
| 1698 | |||
| 1699 | spin_lock_irqsave(&pl330->lock, flags); | ||
| 1700 | _free_event(thrd, thrd->ev); | ||
| 1701 | thrd->free = true; | ||
| 1702 | spin_unlock_irqrestore(&pl330->lock, flags); | ||
| 1703 | } | ||
| 1704 | EXPORT_SYMBOL(pl330_release_channel); | ||
| 1705 | |||
| 1706 | /* Initialize the structure for PL330 configuration, that can be used | ||
| 1707 | * by the client driver the make best use of the DMAC | ||
| 1708 | */ | ||
| 1709 | static void read_dmac_config(struct pl330_info *pi) | ||
| 1710 | { | ||
| 1711 | void __iomem *regs = pi->base; | ||
| 1712 | u32 val; | ||
| 1713 | |||
| 1714 | val = readl(regs + CRD) >> CRD_DATA_WIDTH_SHIFT; | ||
| 1715 | val &= CRD_DATA_WIDTH_MASK; | ||
| 1716 | pi->pcfg.data_bus_width = 8 * (1 << val); | ||
| 1717 | |||
| 1718 | val = readl(regs + CRD) >> CRD_DATA_BUFF_SHIFT; | ||
| 1719 | val &= CRD_DATA_BUFF_MASK; | ||
| 1720 | pi->pcfg.data_buf_dep = val + 1; | ||
| 1721 | |||
| 1722 | val = readl(regs + CR0) >> CR0_NUM_CHANS_SHIFT; | ||
| 1723 | val &= CR0_NUM_CHANS_MASK; | ||
| 1724 | val += 1; | ||
| 1725 | pi->pcfg.num_chan = val; | ||
| 1726 | |||
| 1727 | val = readl(regs + CR0); | ||
| 1728 | if (val & CR0_PERIPH_REQ_SET) { | ||
| 1729 | val = (val >> CR0_NUM_PERIPH_SHIFT) & CR0_NUM_PERIPH_MASK; | ||
| 1730 | val += 1; | ||
| 1731 | pi->pcfg.num_peri = val; | ||
| 1732 | pi->pcfg.peri_ns = readl(regs + CR4); | ||
| 1733 | } else { | ||
| 1734 | pi->pcfg.num_peri = 0; | ||
| 1735 | } | ||
| 1736 | |||
| 1737 | val = readl(regs + CR0); | ||
| 1738 | if (val & CR0_BOOT_MAN_NS) | ||
| 1739 | pi->pcfg.mode |= DMAC_MODE_NS; | ||
| 1740 | else | ||
| 1741 | pi->pcfg.mode &= ~DMAC_MODE_NS; | ||
| 1742 | |||
| 1743 | val = readl(regs + CR0) >> CR0_NUM_EVENTS_SHIFT; | ||
| 1744 | val &= CR0_NUM_EVENTS_MASK; | ||
| 1745 | val += 1; | ||
| 1746 | pi->pcfg.num_events = val; | ||
| 1747 | |||
| 1748 | pi->pcfg.irq_ns = readl(regs + CR3); | ||
| 1749 | |||
| 1750 | pi->pcfg.periph_id = get_id(pi, PERIPH_ID); | ||
| 1751 | pi->pcfg.pcell_id = get_id(pi, PCELL_ID); | ||
| 1752 | } | ||
| 1753 | |||
| 1754 | static inline void _reset_thread(struct pl330_thread *thrd) | ||
| 1755 | { | ||
| 1756 | struct pl330_dmac *pl330 = thrd->dmac; | ||
| 1757 | struct pl330_info *pi = pl330->pinfo; | ||
| 1758 | |||
| 1759 | thrd->req[0].mc_cpu = pl330->mcode_cpu | ||
| 1760 | + (thrd->id * pi->mcbufsz); | ||
| 1761 | thrd->req[0].mc_bus = pl330->mcode_bus | ||
| 1762 | + (thrd->id * pi->mcbufsz); | ||
| 1763 | thrd->req[0].r = NULL; | ||
| 1764 | MARK_FREE(&thrd->req[0]); | ||
| 1765 | |||
| 1766 | thrd->req[1].mc_cpu = thrd->req[0].mc_cpu | ||
| 1767 | + pi->mcbufsz / 2; | ||
| 1768 | thrd->req[1].mc_bus = thrd->req[0].mc_bus | ||
| 1769 | + pi->mcbufsz / 2; | ||
| 1770 | thrd->req[1].r = NULL; | ||
| 1771 | MARK_FREE(&thrd->req[1]); | ||
| 1772 | } | ||
| 1773 | |||
| 1774 | static int dmac_alloc_threads(struct pl330_dmac *pl330) | ||
| 1775 | { | ||
| 1776 | struct pl330_info *pi = pl330->pinfo; | ||
| 1777 | int chans = pi->pcfg.num_chan; | ||
| 1778 | struct pl330_thread *thrd; | ||
| 1779 | int i; | ||
| 1780 | |||
| 1781 | /* Allocate 1 Manager and 'chans' Channel threads */ | ||
| 1782 | pl330->channels = kzalloc((1 + chans) * sizeof(*thrd), | ||
| 1783 | GFP_KERNEL); | ||
| 1784 | if (!pl330->channels) | ||
| 1785 | return -ENOMEM; | ||
| 1786 | |||
| 1787 | /* Init Channel threads */ | ||
| 1788 | for (i = 0; i < chans; i++) { | ||
| 1789 | thrd = &pl330->channels[i]; | ||
| 1790 | thrd->id = i; | ||
| 1791 | thrd->dmac = pl330; | ||
| 1792 | _reset_thread(thrd); | ||
| 1793 | thrd->free = true; | ||
| 1794 | } | ||
| 1795 | |||
| 1796 | /* MANAGER is indexed at the end */ | ||
| 1797 | thrd = &pl330->channels[chans]; | ||
| 1798 | thrd->id = chans; | ||
| 1799 | thrd->dmac = pl330; | ||
| 1800 | thrd->free = false; | ||
| 1801 | pl330->manager = thrd; | ||
| 1802 | |||
| 1803 | return 0; | ||
| 1804 | } | ||
| 1805 | |||
| 1806 | static int dmac_alloc_resources(struct pl330_dmac *pl330) | ||
| 1807 | { | ||
| 1808 | struct pl330_info *pi = pl330->pinfo; | ||
| 1809 | int chans = pi->pcfg.num_chan; | ||
| 1810 | int ret; | ||
| 1811 | |||
| 1812 | /* | ||
| 1813 | * Alloc MicroCode buffer for 'chans' Channel threads. | ||
| 1814 | * A channel's buffer offset is (Channel_Id * MCODE_BUFF_PERCHAN) | ||
| 1815 | */ | ||
| 1816 | pl330->mcode_cpu = dma_alloc_coherent(pi->dev, | ||
| 1817 | chans * pi->mcbufsz, | ||
| 1818 | &pl330->mcode_bus, GFP_KERNEL); | ||
| 1819 | if (!pl330->mcode_cpu) { | ||
| 1820 | dev_err(pi->dev, "%s:%d Can't allocate memory!\n", | ||
| 1821 | __func__, __LINE__); | ||
| 1822 | return -ENOMEM; | ||
| 1823 | } | ||
| 1824 | |||
| 1825 | ret = dmac_alloc_threads(pl330); | ||
| 1826 | if (ret) { | ||
| 1827 | dev_err(pi->dev, "%s:%d Can't to create channels for DMAC!\n", | ||
| 1828 | __func__, __LINE__); | ||
| 1829 | dma_free_coherent(pi->dev, | ||
| 1830 | chans * pi->mcbufsz, | ||
| 1831 | pl330->mcode_cpu, pl330->mcode_bus); | ||
| 1832 | return ret; | ||
| 1833 | } | ||
| 1834 | |||
| 1835 | return 0; | ||
| 1836 | } | ||
| 1837 | |||
| 1838 | int pl330_add(struct pl330_info *pi) | ||
| 1839 | { | ||
| 1840 | struct pl330_dmac *pl330; | ||
| 1841 | void __iomem *regs; | ||
| 1842 | int i, ret; | ||
| 1843 | |||
| 1844 | if (!pi || !pi->dev) | ||
| 1845 | return -EINVAL; | ||
| 1846 | |||
| 1847 | /* If already added */ | ||
| 1848 | if (pi->pl330_data) | ||
| 1849 | return -EINVAL; | ||
| 1850 | |||
| 1851 | /* | ||
| 1852 | * If the SoC can perform reset on the DMAC, then do it | ||
| 1853 | * before reading its configuration. | ||
| 1854 | */ | ||
| 1855 | if (pi->dmac_reset) | ||
| 1856 | pi->dmac_reset(pi); | ||
| 1857 | |||
| 1858 | regs = pi->base; | ||
| 1859 | |||
| 1860 | /* Check if we can handle this DMAC */ | ||
| 1861 | if ((get_id(pi, PERIPH_ID) & 0xfffff) != PERIPH_ID_VAL | ||
| 1862 | || get_id(pi, PCELL_ID) != PCELL_ID_VAL) { | ||
| 1863 | dev_err(pi->dev, "PERIPH_ID 0x%x, PCELL_ID 0x%x !\n", | ||
| 1864 | get_id(pi, PERIPH_ID), get_id(pi, PCELL_ID)); | ||
| 1865 | return -EINVAL; | ||
| 1866 | } | ||
| 1867 | |||
| 1868 | /* Read the configuration of the DMAC */ | ||
| 1869 | read_dmac_config(pi); | ||
| 1870 | |||
| 1871 | if (pi->pcfg.num_events == 0) { | ||
| 1872 | dev_err(pi->dev, "%s:%d Can't work without events!\n", | ||
| 1873 | __func__, __LINE__); | ||
| 1874 | return -EINVAL; | ||
| 1875 | } | ||
| 1876 | |||
| 1877 | pl330 = kzalloc(sizeof(*pl330), GFP_KERNEL); | ||
| 1878 | if (!pl330) { | ||
| 1879 | dev_err(pi->dev, "%s:%d Can't allocate memory!\n", | ||
| 1880 | __func__, __LINE__); | ||
| 1881 | return -ENOMEM; | ||
| 1882 | } | ||
| 1883 | |||
| 1884 | /* Assign the info structure and private data */ | ||
| 1885 | pl330->pinfo = pi; | ||
| 1886 | pi->pl330_data = pl330; | ||
| 1887 | |||
| 1888 | spin_lock_init(&pl330->lock); | ||
| 1889 | |||
| 1890 | INIT_LIST_HEAD(&pl330->req_done); | ||
| 1891 | |||
| 1892 | /* Use default MC buffer size if not provided */ | ||
| 1893 | if (!pi->mcbufsz) | ||
| 1894 | pi->mcbufsz = MCODE_BUFF_PER_REQ * 2; | ||
| 1895 | |||
| 1896 | /* Mark all events as free */ | ||
| 1897 | for (i = 0; i < pi->pcfg.num_events; i++) | ||
| 1898 | pl330->events[i] = -1; | ||
| 1899 | |||
| 1900 | /* Allocate resources needed by the DMAC */ | ||
| 1901 | ret = dmac_alloc_resources(pl330); | ||
| 1902 | if (ret) { | ||
| 1903 | dev_err(pi->dev, "Unable to create channels for DMAC\n"); | ||
| 1904 | kfree(pl330); | ||
| 1905 | return ret; | ||
| 1906 | } | ||
| 1907 | |||
| 1908 | tasklet_init(&pl330->tasks, pl330_dotask, (unsigned long) pl330); | ||
| 1909 | |||
| 1910 | pl330->state = INIT; | ||
| 1911 | |||
| 1912 | return 0; | ||
| 1913 | } | ||
| 1914 | EXPORT_SYMBOL(pl330_add); | ||
| 1915 | |||
| 1916 | static int dmac_free_threads(struct pl330_dmac *pl330) | ||
| 1917 | { | ||
| 1918 | struct pl330_info *pi = pl330->pinfo; | ||
| 1919 | int chans = pi->pcfg.num_chan; | ||
| 1920 | struct pl330_thread *thrd; | ||
| 1921 | int i; | ||
| 1922 | |||
| 1923 | /* Release Channel threads */ | ||
| 1924 | for (i = 0; i < chans; i++) { | ||
| 1925 | thrd = &pl330->channels[i]; | ||
| 1926 | pl330_release_channel((void *)thrd); | ||
| 1927 | } | ||
| 1928 | |||
| 1929 | /* Free memory */ | ||
| 1930 | kfree(pl330->channels); | ||
| 1931 | |||
| 1932 | return 0; | ||
| 1933 | } | ||
| 1934 | |||
| 1935 | static void dmac_free_resources(struct pl330_dmac *pl330) | ||
| 1936 | { | ||
| 1937 | struct pl330_info *pi = pl330->pinfo; | ||
| 1938 | int chans = pi->pcfg.num_chan; | ||
| 1939 | |||
| 1940 | dmac_free_threads(pl330); | ||
| 1941 | |||
| 1942 | dma_free_coherent(pi->dev, chans * pi->mcbufsz, | ||
| 1943 | pl330->mcode_cpu, pl330->mcode_bus); | ||
| 1944 | } | ||
| 1945 | |||
| 1946 | void pl330_del(struct pl330_info *pi) | ||
| 1947 | { | ||
| 1948 | struct pl330_dmac *pl330; | ||
| 1949 | |||
| 1950 | if (!pi || !pi->pl330_data) | ||
| 1951 | return; | ||
| 1952 | |||
| 1953 | pl330 = pi->pl330_data; | ||
| 1954 | |||
| 1955 | pl330->state = UNINIT; | ||
| 1956 | |||
| 1957 | tasklet_kill(&pl330->tasks); | ||
| 1958 | |||
| 1959 | /* Free DMAC resources */ | ||
| 1960 | dmac_free_resources(pl330); | ||
| 1961 | |||
| 1962 | kfree(pl330); | ||
| 1963 | pi->pl330_data = NULL; | ||
| 1964 | } | ||
| 1965 | EXPORT_SYMBOL(pl330_del); | ||
diff --git a/arch/arm/common/time-acorn.c b/arch/arm/common/time-acorn.c new file mode 100644 index 00000000000..deeed561b16 --- /dev/null +++ b/arch/arm/common/time-acorn.c | |||
| @@ -0,0 +1,95 @@ | |||
| 1 | /* | ||
| 2 | * linux/arch/arm/common/time-acorn.c | ||
| 3 | * | ||
| 4 | * Copyright (c) 1996-2000 Russell King. | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify | ||
| 7 | * it under the terms of the GNU General Public License version 2 as | ||
| 8 | * published by the Free Software Foundation. | ||
| 9 | * | ||
| 10 | * Changelog: | ||
| 11 | * 24-Sep-1996 RMK Created | ||
| 12 | * 10-Oct-1996 RMK Brought up to date with arch-sa110eval | ||
| 13 | * 04-Dec-1997 RMK Updated for new arch/arm/time.c | ||
| 14 | * 13=Jun-2004 DS Moved to arch/arm/common b/c shared w/CLPS7500 | ||
| 15 | */ | ||
| 16 | #include <linux/timex.h> | ||
| 17 | #include <linux/init.h> | ||
| 18 | #include <linux/interrupt.h> | ||
| 19 | #include <linux/irq.h> | ||
| 20 | #include <linux/io.h> | ||
| 21 | |||
| 22 | #include <mach/hardware.h> | ||
| 23 | #include <asm/hardware/ioc.h> | ||
| 24 | |||
| 25 | #include <asm/mach/time.h> | ||
| 26 | |||
| 27 | unsigned long ioc_timer_gettimeoffset(void) | ||
| 28 | { | ||
| 29 | unsigned int count1, count2, status; | ||
| 30 | long offset; | ||
| 31 | |||
| 32 | ioc_writeb (0, IOC_T0LATCH); | ||
| 33 | barrier (); | ||
| 34 | count1 = ioc_readb(IOC_T0CNTL) | (ioc_readb(IOC_T0CNTH) << 8); | ||
| 35 | barrier (); | ||
| 36 | status = ioc_readb(IOC_IRQREQA); | ||
| 37 | barrier (); | ||
| 38 | ioc_writeb (0, IOC_T0LATCH); | ||
| 39 | barrier (); | ||
| 40 | count2 = ioc_readb(IOC_T0CNTL) | (ioc_readb(IOC_T0CNTH) << 8); | ||
| 41 | |||
| 42 | offset = count2; | ||
| 43 | if (count2 < count1) { | ||
| 44 | /* | ||
| 45 | * We have not had an interrupt between reading count1 | ||
| 46 | * and count2. | ||
| 47 | */ | ||
| 48 | if (status & (1 << 5)) | ||
| 49 | offset -= LATCH; | ||
| 50 | } else if (count2 > count1) { | ||
| 51 | /* | ||
| 52 | * We have just had another interrupt between reading | ||
| 53 | * count1 and count2. | ||
| 54 | */ | ||
| 55 | offset -= LATCH; | ||
| 56 | } | ||
| 57 | |||
| 58 | offset = (LATCH - offset) * (tick_nsec / 1000); | ||
| 59 | return (offset + LATCH/2) / LATCH; | ||
| 60 | } | ||
| 61 | |||
| 62 | void __init ioctime_init(void) | ||
| 63 | { | ||
| 64 | ioc_writeb(LATCH & 255, IOC_T0LTCHL); | ||
| 65 | ioc_writeb(LATCH >> 8, IOC_T0LTCHH); | ||
| 66 | ioc_writeb(0, IOC_T0GO); | ||
| 67 | } | ||
| 68 | |||
| 69 | static irqreturn_t | ||
| 70 | ioc_timer_interrupt(int irq, void *dev_id) | ||
| 71 | { | ||
| 72 | timer_tick(); | ||
| 73 | return IRQ_HANDLED; | ||
| 74 | } | ||
| 75 | |||
| 76 | static struct irqaction ioc_timer_irq = { | ||
| 77 | .name = "timer", | ||
| 78 | .flags = IRQF_DISABLED, | ||
| 79 | .handler = ioc_timer_interrupt | ||
| 80 | }; | ||
| 81 | |||
| 82 | /* | ||
| 83 | * Set up timer interrupt. | ||
| 84 | */ | ||
| 85 | static void __init ioc_timer_init(void) | ||
| 86 | { | ||
| 87 | ioctime_init(); | ||
| 88 | setup_irq(IRQ_TIMER, &ioc_timer_irq); | ||
| 89 | } | ||
| 90 | |||
| 91 | struct sys_timer ioc_timer = { | ||
| 92 | .init = ioc_timer_init, | ||
| 93 | .offset = ioc_timer_gettimeoffset, | ||
| 94 | }; | ||
| 95 | |||
diff --git a/arch/arm/common/uengine.c b/arch/arm/common/uengine.c new file mode 100644 index 00000000000..bef408f3d76 --- /dev/null +++ b/arch/arm/common/uengine.c | |||
| @@ -0,0 +1,507 @@ | |||
| 1 | /* | ||
| 2 | * Generic library functions for the microengines found on the Intel | ||
| 3 | * IXP2000 series of network processors. | ||
| 4 | * | ||
| 5 | * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org> | ||
| 6 | * Dedicated to Marija Kulikova. | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or modify | ||
| 9 | * it under the terms of the GNU Lesser General Public License as | ||
| 10 | * published by the Free Software Foundation; either version 2.1 of the | ||
| 11 | * License, or (at your option) any later version. | ||
| 12 | */ | ||
| 13 | |||
| 14 | #include <linux/kernel.h> | ||
| 15 | #include <linux/init.h> | ||
| 16 | #include <linux/slab.h> | ||
| 17 | #include <linux/module.h> | ||
| 18 | #include <linux/string.h> | ||
| 19 | #include <linux/io.h> | ||
| 20 | #include <mach/hardware.h> | ||
| 21 | #include <asm/hardware/uengine.h> | ||
| 22 | |||
| 23 | #if defined(CONFIG_ARCH_IXP2000) | ||
| 24 | #define IXP_UENGINE_CSR_VIRT_BASE IXP2000_UENGINE_CSR_VIRT_BASE | ||
| 25 | #define IXP_PRODUCT_ID IXP2000_PRODUCT_ID | ||
| 26 | #define IXP_MISC_CONTROL IXP2000_MISC_CONTROL | ||
| 27 | #define IXP_RESET1 IXP2000_RESET1 | ||
| 28 | #else | ||
| 29 | #if defined(CONFIG_ARCH_IXP23XX) | ||
| 30 | #define IXP_UENGINE_CSR_VIRT_BASE IXP23XX_UENGINE_CSR_VIRT_BASE | ||
| 31 | #define IXP_PRODUCT_ID IXP23XX_PRODUCT_ID | ||
| 32 | #define IXP_MISC_CONTROL IXP23XX_MISC_CONTROL | ||
| 33 | #define IXP_RESET1 IXP23XX_RESET1 | ||
| 34 | #else | ||
| 35 | #error unknown platform | ||
| 36 | #endif | ||
| 37 | #endif | ||
| 38 | |||
| 39 | #define USTORE_ADDRESS 0x000 | ||
| 40 | #define USTORE_DATA_LOWER 0x004 | ||
| 41 | #define USTORE_DATA_UPPER 0x008 | ||
| 42 | #define CTX_ENABLES 0x018 | ||
| 43 | #define CC_ENABLE 0x01c | ||
| 44 | #define CSR_CTX_POINTER 0x020 | ||
| 45 | #define INDIRECT_CTX_STS 0x040 | ||
| 46 | #define ACTIVE_CTX_STS 0x044 | ||
| 47 | #define INDIRECT_CTX_SIG_EVENTS 0x048 | ||
| 48 | #define INDIRECT_CTX_WAKEUP_EVENTS 0x050 | ||
| 49 | #define NN_PUT 0x080 | ||
| 50 | #define NN_GET 0x084 | ||
| 51 | #define TIMESTAMP_LOW 0x0c0 | ||
| 52 | #define TIMESTAMP_HIGH 0x0c4 | ||
| 53 | #define T_INDEX_BYTE_INDEX 0x0f4 | ||
| 54 | #define LOCAL_CSR_STATUS 0x180 | ||
| 55 | |||
| 56 | u32 ixp2000_uengine_mask; | ||
| 57 | |||
| 58 | static void *ixp2000_uengine_csr_area(int uengine) | ||
| 59 | { | ||
| 60 | return ((void *)IXP_UENGINE_CSR_VIRT_BASE) + (uengine << 10); | ||
| 61 | } | ||
| 62 | |||
| 63 | /* | ||
| 64 | * LOCAL_CSR_STATUS=1 after a read or write to a microengine's CSR | ||
| 65 | * space means that the microengine we tried to access was also trying | ||
| 66 | * to access its own CSR space on the same clock cycle as we did. When | ||
| 67 | * this happens, we lose the arbitration process by default, and the | ||
| 68 | * read or write we tried to do was not actually performed, so we try | ||
| 69 | * again until it succeeds. | ||
| 70 | */ | ||
| 71 | u32 ixp2000_uengine_csr_read(int uengine, int offset) | ||
| 72 | { | ||
| 73 | void *uebase; | ||
| 74 | u32 *local_csr_status; | ||
| 75 | u32 *reg; | ||
| 76 | u32 value; | ||
| 77 | |||
| 78 | uebase = ixp2000_uengine_csr_area(uengine); | ||
| 79 | |||
| 80 | local_csr_status = (u32 *)(uebase + LOCAL_CSR_STATUS); | ||
| 81 | reg = (u32 *)(uebase + offset); | ||
| 82 | do { | ||
| 83 | value = ixp2000_reg_read(reg); | ||
| 84 | } while (ixp2000_reg_read(local_csr_status) & 1); | ||
| 85 | |||
| 86 | return value; | ||
| 87 | } | ||
| 88 | EXPORT_SYMBOL(ixp2000_uengine_csr_read); | ||
| 89 | |||
| 90 | void ixp2000_uengine_csr_write(int uengine, int offset, u32 value) | ||
| 91 | { | ||
| 92 | void *uebase; | ||
| 93 | u32 *local_csr_status; | ||
| 94 | u32 *reg; | ||
| 95 | |||
| 96 | uebase = ixp2000_uengine_csr_area(uengine); | ||
| 97 | |||
| 98 | local_csr_status = (u32 *)(uebase + LOCAL_CSR_STATUS); | ||
| 99 | reg = (u32 *)(uebase + offset); | ||
| 100 | do { | ||
| 101 | ixp2000_reg_write(reg, value); | ||
| 102 | } while (ixp2000_reg_read(local_csr_status) & 1); | ||
| 103 | } | ||
| 104 | EXPORT_SYMBOL(ixp2000_uengine_csr_write); | ||
| 105 | |||
| 106 | void ixp2000_uengine_reset(u32 uengine_mask) | ||
| 107 | { | ||
| 108 | u32 value; | ||
| 109 | |||
| 110 | value = ixp2000_reg_read(IXP_RESET1) & ~ixp2000_uengine_mask; | ||
| 111 | |||
| 112 | uengine_mask &= ixp2000_uengine_mask; | ||
| 113 | ixp2000_reg_wrb(IXP_RESET1, value | uengine_mask); | ||
| 114 | ixp2000_reg_wrb(IXP_RESET1, value); | ||
| 115 | } | ||
| 116 | EXPORT_SYMBOL(ixp2000_uengine_reset); | ||
| 117 | |||
| 118 | void ixp2000_uengine_set_mode(int uengine, u32 mode) | ||
| 119 | { | ||
| 120 | /* | ||
| 121 | * CTL_STR_PAR_EN: unconditionally enable parity checking on | ||
| 122 | * control store. | ||
| 123 | */ | ||
| 124 | mode |= 0x10000000; | ||
| 125 | ixp2000_uengine_csr_write(uengine, CTX_ENABLES, mode); | ||
| 126 | |||
| 127 | /* | ||
| 128 | * Enable updating of condition codes. | ||
| 129 | */ | ||
| 130 | ixp2000_uengine_csr_write(uengine, CC_ENABLE, 0x00002000); | ||
| 131 | |||
| 132 | /* | ||
| 133 | * Initialise other per-microengine registers. | ||
| 134 | */ | ||
| 135 | ixp2000_uengine_csr_write(uengine, NN_PUT, 0x00); | ||
| 136 | ixp2000_uengine_csr_write(uengine, NN_GET, 0x00); | ||
| 137 | ixp2000_uengine_csr_write(uengine, T_INDEX_BYTE_INDEX, 0); | ||
| 138 | } | ||
| 139 | EXPORT_SYMBOL(ixp2000_uengine_set_mode); | ||
| 140 | |||
| 141 | static int make_even_parity(u32 x) | ||
| 142 | { | ||
| 143 | return hweight32(x) & 1; | ||
| 144 | } | ||
| 145 | |||
| 146 | static void ustore_write(int uengine, u64 insn) | ||
| 147 | { | ||
| 148 | /* | ||
| 149 | * Generate even parity for top and bottom 20 bits. | ||
| 150 | */ | ||
| 151 | insn |= (u64)make_even_parity((insn >> 20) & 0x000fffff) << 41; | ||
| 152 | insn |= (u64)make_even_parity(insn & 0x000fffff) << 40; | ||
| 153 | |||
| 154 | /* | ||
| 155 | * Write to microstore. The second write auto-increments | ||
| 156 | * the USTORE_ADDRESS index register. | ||
| 157 | */ | ||
| 158 | ixp2000_uengine_csr_write(uengine, USTORE_DATA_LOWER, (u32)insn); | ||
| 159 | ixp2000_uengine_csr_write(uengine, USTORE_DATA_UPPER, (u32)(insn >> 32)); | ||
| 160 | } | ||
| 161 | |||
| 162 | void ixp2000_uengine_load_microcode(int uengine, u8 *ucode, int insns) | ||
| 163 | { | ||
| 164 | int i; | ||
| 165 | |||
| 166 | /* | ||
| 167 | * Start writing to microstore at address 0. | ||
| 168 | */ | ||
| 169 | ixp2000_uengine_csr_write(uengine, USTORE_ADDRESS, 0x80000000); | ||
| 170 | for (i = 0; i < insns; i++) { | ||
| 171 | u64 insn; | ||
| 172 | |||
| 173 | insn = (((u64)ucode[0]) << 32) | | ||
| 174 | (((u64)ucode[1]) << 24) | | ||
| 175 | (((u64)ucode[2]) << 16) | | ||
| 176 | (((u64)ucode[3]) << 8) | | ||
| 177 | ((u64)ucode[4]); | ||
| 178 | ucode += 5; | ||
| 179 | |||
| 180 | ustore_write(uengine, insn); | ||
| 181 | } | ||
| 182 | |||
| 183 | /* | ||
| 184 | * Pad with a few NOPs at the end (to avoid the microengine | ||
| 185 | * aborting as it prefetches beyond the last instruction), unless | ||
| 186 | * we run off the end of the instruction store first, at which | ||
| 187 | * point the address register will wrap back to zero. | ||
| 188 | */ | ||
| 189 | for (i = 0; i < 4; i++) { | ||
| 190 | u32 addr; | ||
| 191 | |||
| 192 | addr = ixp2000_uengine_csr_read(uengine, USTORE_ADDRESS); | ||
| 193 | if (addr == 0x80000000) | ||
| 194 | break; | ||
| 195 | ustore_write(uengine, 0xf0000c0300ULL); | ||
| 196 | } | ||
| 197 | |||
| 198 | /* | ||
| 199 | * End programming. | ||
| 200 | */ | ||
| 201 | ixp2000_uengine_csr_write(uengine, USTORE_ADDRESS, 0x00000000); | ||
| 202 | } | ||
| 203 | EXPORT_SYMBOL(ixp2000_uengine_load_microcode); | ||
| 204 | |||
| 205 | void ixp2000_uengine_init_context(int uengine, int context, int pc) | ||
| 206 | { | ||
| 207 | /* | ||
| 208 | * Select the right context for indirect access. | ||
| 209 | */ | ||
| 210 | ixp2000_uengine_csr_write(uengine, CSR_CTX_POINTER, context); | ||
| 211 | |||
| 212 | /* | ||
| 213 | * Initialise signal masks to immediately go to Ready state. | ||
| 214 | */ | ||
| 215 | ixp2000_uengine_csr_write(uengine, INDIRECT_CTX_SIG_EVENTS, 1); | ||
| 216 | ixp2000_uengine_csr_write(uengine, INDIRECT_CTX_WAKEUP_EVENTS, 1); | ||
| 217 | |||
| 218 | /* | ||
| 219 | * Set program counter. | ||
| 220 | */ | ||
| 221 | ixp2000_uengine_csr_write(uengine, INDIRECT_CTX_STS, pc); | ||
| 222 | } | ||
| 223 | EXPORT_SYMBOL(ixp2000_uengine_init_context); | ||
| 224 | |||
| 225 | void ixp2000_uengine_start_contexts(int uengine, u8 ctx_mask) | ||
| 226 | { | ||
| 227 | u32 mask; | ||
| 228 | |||
| 229 | /* | ||
| 230 | * Enable the specified context to go to Executing state. | ||
| 231 | */ | ||
| 232 | mask = ixp2000_uengine_csr_read(uengine, CTX_ENABLES); | ||
| 233 | mask |= ctx_mask << 8; | ||
| 234 | ixp2000_uengine_csr_write(uengine, CTX_ENABLES, mask); | ||
| 235 | } | ||
| 236 | EXPORT_SYMBOL(ixp2000_uengine_start_contexts); | ||
| 237 | |||
| 238 | void ixp2000_uengine_stop_contexts(int uengine, u8 ctx_mask) | ||
| 239 | { | ||
| 240 | u32 mask; | ||
| 241 | |||
| 242 | /* | ||
| 243 | * Disable the Ready->Executing transition. Note that this | ||
| 244 | * does not stop the context until it voluntarily yields. | ||
| 245 | */ | ||
| 246 | mask = ixp2000_uengine_csr_read(uengine, CTX_ENABLES); | ||
| 247 | mask &= ~(ctx_mask << 8); | ||
| 248 | ixp2000_uengine_csr_write(uengine, CTX_ENABLES, mask); | ||
| 249 | } | ||
| 250 | EXPORT_SYMBOL(ixp2000_uengine_stop_contexts); | ||
| 251 | |||
| 252 | static int check_ixp_type(struct ixp2000_uengine_code *c) | ||
| 253 | { | ||
| 254 | u32 product_id; | ||
| 255 | u32 rev; | ||
| 256 | |||
| 257 | product_id = ixp2000_reg_read(IXP_PRODUCT_ID); | ||
| 258 | if (((product_id >> 16) & 0x1f) != 0) | ||
| 259 | return 0; | ||
| 260 | |||
| 261 | switch ((product_id >> 8) & 0xff) { | ||
| 262 | #ifdef CONFIG_ARCH_IXP2000 | ||
| 263 | case 0: /* IXP2800 */ | ||
| 264 | if (!(c->cpu_model_bitmask & 4)) | ||
| 265 | return 0; | ||
| 266 | break; | ||
| 267 | |||
| 268 | case 1: /* IXP2850 */ | ||
| 269 | if (!(c->cpu_model_bitmask & 8)) | ||
| 270 | return 0; | ||
| 271 | break; | ||
| 272 | |||
| 273 | case 2: /* IXP2400 */ | ||
| 274 | if (!(c->cpu_model_bitmask & 2)) | ||
| 275 | return 0; | ||
| 276 | break; | ||
| 277 | #endif | ||
| 278 | |||
| 279 | #ifdef CONFIG_ARCH_IXP23XX | ||
| 280 | case 4: /* IXP23xx */ | ||
| 281 | if (!(c->cpu_model_bitmask & 0x3f0)) | ||
| 282 | return 0; | ||
| 283 | break; | ||
| 284 | #endif | ||
| 285 | |||
| 286 | default: | ||
| 287 | return 0; | ||
| 288 | } | ||
| 289 | |||
| 290 | rev = product_id & 0xff; | ||
| 291 | if (rev < c->cpu_min_revision || rev > c->cpu_max_revision) | ||
| 292 | return 0; | ||
| 293 | |||
| 294 | return 1; | ||
| 295 | } | ||
| 296 | |||
| 297 | static void generate_ucode(u8 *ucode, u32 *gpr_a, u32 *gpr_b) | ||
| 298 | { | ||
| 299 | int offset; | ||
| 300 | int i; | ||
| 301 | |||
| 302 | offset = 0; | ||
| 303 | |||
| 304 | for (i = 0; i < 128; i++) { | ||
| 305 | u8 b3; | ||
| 306 | u8 b2; | ||
| 307 | u8 b1; | ||
| 308 | u8 b0; | ||
| 309 | |||
| 310 | b3 = (gpr_a[i] >> 24) & 0xff; | ||
| 311 | b2 = (gpr_a[i] >> 16) & 0xff; | ||
| 312 | b1 = (gpr_a[i] >> 8) & 0xff; | ||
| 313 | b0 = gpr_a[i] & 0xff; | ||
| 314 | |||
| 315 | /* immed[@ai, (b1 << 8) | b0] */ | ||
| 316 | /* 11110000 0000VVVV VVVV11VV VVVVVV00 1IIIIIII */ | ||
| 317 | ucode[offset++] = 0xf0; | ||
| 318 | ucode[offset++] = (b1 >> 4); | ||
| 319 | ucode[offset++] = (b1 << 4) | 0x0c | (b0 >> 6); | ||
| 320 | ucode[offset++] = (b0 << 2); | ||
| 321 | ucode[offset++] = 0x80 | i; | ||
| 322 | |||
| 323 | /* immed_w1[@ai, (b3 << 8) | b2] */ | ||
| 324 | /* 11110100 0100VVVV VVVV11VV VVVVVV00 1IIIIIII */ | ||
| 325 | ucode[offset++] = 0xf4; | ||
| 326 | ucode[offset++] = 0x40 | (b3 >> 4); | ||
| 327 | ucode[offset++] = (b3 << 4) | 0x0c | (b2 >> 6); | ||
| 328 | ucode[offset++] = (b2 << 2); | ||
| 329 | ucode[offset++] = 0x80 | i; | ||
| 330 | } | ||
| 331 | |||
| 332 | for (i = 0; i < 128; i++) { | ||
| 333 | u8 b3; | ||
| 334 | u8 b2; | ||
| 335 | u8 b1; | ||
| 336 | u8 b0; | ||
| 337 | |||
| 338 | b3 = (gpr_b[i] >> 24) & 0xff; | ||
| 339 | b2 = (gpr_b[i] >> 16) & 0xff; | ||
| 340 | b1 = (gpr_b[i] >> 8) & 0xff; | ||
| 341 | b0 = gpr_b[i] & 0xff; | ||
| 342 | |||
| 343 | /* immed[@bi, (b1 << 8) | b0] */ | ||
| 344 | /* 11110000 0000VVVV VVVV001I IIIIII11 VVVVVVVV */ | ||
| 345 | ucode[offset++] = 0xf0; | ||
| 346 | ucode[offset++] = (b1 >> 4); | ||
| 347 | ucode[offset++] = (b1 << 4) | 0x02 | (i >> 6); | ||
| 348 | ucode[offset++] = (i << 2) | 0x03; | ||
| 349 | ucode[offset++] = b0; | ||
| 350 | |||
| 351 | /* immed_w1[@bi, (b3 << 8) | b2] */ | ||
| 352 | /* 11110100 0100VVVV VVVV001I IIIIII11 VVVVVVVV */ | ||
| 353 | ucode[offset++] = 0xf4; | ||
| 354 | ucode[offset++] = 0x40 | (b3 >> 4); | ||
| 355 | ucode[offset++] = (b3 << 4) | 0x02 | (i >> 6); | ||
| 356 | ucode[offset++] = (i << 2) | 0x03; | ||
| 357 | ucode[offset++] = b2; | ||
| 358 | } | ||
| 359 | |||
| 360 | /* ctx_arb[kill] */ | ||
| 361 | ucode[offset++] = 0xe0; | ||
| 362 | ucode[offset++] = 0x00; | ||
| 363 | ucode[offset++] = 0x01; | ||
| 364 | ucode[offset++] = 0x00; | ||
| 365 | ucode[offset++] = 0x00; | ||
| 366 | } | ||
| 367 | |||
| 368 | static int set_initial_registers(int uengine, struct ixp2000_uengine_code *c) | ||
| 369 | { | ||
| 370 | int per_ctx_regs; | ||
| 371 | u32 *gpr_a; | ||
| 372 | u32 *gpr_b; | ||
| 373 | u8 *ucode; | ||
| 374 | int i; | ||
| 375 | |||
| 376 | gpr_a = kzalloc(128 * sizeof(u32), GFP_KERNEL); | ||
| 377 | gpr_b = kzalloc(128 * sizeof(u32), GFP_KERNEL); | ||
| 378 | ucode = kmalloc(513 * 5, GFP_KERNEL); | ||
| 379 | if (gpr_a == NULL || gpr_b == NULL || ucode == NULL) { | ||
| 380 | kfree(ucode); | ||
| 381 | kfree(gpr_b); | ||
| 382 | kfree(gpr_a); | ||
| 383 | return 1; | ||
| 384 | } | ||
| 385 | |||
| 386 | per_ctx_regs = 16; | ||
| 387 | if (c->uengine_parameters & IXP2000_UENGINE_4_CONTEXTS) | ||
| 388 | per_ctx_regs = 32; | ||
| 389 | |||
| 390 | for (i = 0; i < 256; i++) { | ||
| 391 | struct ixp2000_reg_value *r = c->initial_reg_values + i; | ||
| 392 | u32 *bank; | ||
| 393 | int inc; | ||
| 394 | int j; | ||
| 395 | |||
| 396 | if (r->reg == -1) | ||
| 397 | break; | ||
| 398 | |||
| 399 | bank = (r->reg & 0x400) ? gpr_b : gpr_a; | ||
| 400 | inc = (r->reg & 0x80) ? 128 : per_ctx_regs; | ||
| 401 | |||
| 402 | j = r->reg & 0x7f; | ||
| 403 | while (j < 128) { | ||
| 404 | bank[j] = r->value; | ||
| 405 | j += inc; | ||
| 406 | } | ||
| 407 | } | ||
| 408 | |||
| 409 | generate_ucode(ucode, gpr_a, gpr_b); | ||
| 410 | ixp2000_uengine_load_microcode(uengine, ucode, 513); | ||
| 411 | ixp2000_uengine_init_context(uengine, 0, 0); | ||
| 412 | ixp2000_uengine_start_contexts(uengine, 0x01); | ||
| 413 | for (i = 0; i < 100; i++) { | ||
| 414 | u32 status; | ||
| 415 | |||
| 416 | status = ixp2000_uengine_csr_read(uengine, ACTIVE_CTX_STS); | ||
| 417 | if (!(status & 0x80000000)) | ||
| 418 | break; | ||
| 419 | } | ||
| 420 | ixp2000_uengine_stop_contexts(uengine, 0x01); | ||
| 421 | |||
| 422 | kfree(ucode); | ||
| 423 | kfree(gpr_b); | ||
| 424 | kfree(gpr_a); | ||
| 425 | |||
| 426 | return !!(i == 100); | ||
| 427 | } | ||
| 428 | |||
| 429 | int ixp2000_uengine_load(int uengine, struct ixp2000_uengine_code *c) | ||
| 430 | { | ||
| 431 | int ctx; | ||
| 432 | |||
| 433 | if (!check_ixp_type(c)) | ||
| 434 | return 1; | ||
| 435 | |||
| 436 | if (!(ixp2000_uengine_mask & (1 << uengine))) | ||
| 437 | return 1; | ||
| 438 | |||
| 439 | ixp2000_uengine_reset(1 << uengine); | ||
| 440 | ixp2000_uengine_set_mode(uengine, c->uengine_parameters); | ||
| 441 | if (set_initial_registers(uengine, c)) | ||
| 442 | return 1; | ||
| 443 | ixp2000_uengine_load_microcode(uengine, c->insns, c->num_insns); | ||
| 444 | |||
| 445 | for (ctx = 0; ctx < 8; ctx++) | ||
| 446 | ixp2000_uengine_init_context(uengine, ctx, 0); | ||
| 447 | |||
| 448 | return 0; | ||
| 449 | } | ||
| 450 | EXPORT_SYMBOL(ixp2000_uengine_load); | ||
| 451 | |||
| 452 | |||
| 453 | static int __init ixp2000_uengine_init(void) | ||
| 454 | { | ||
| 455 | int uengine; | ||
| 456 | u32 value; | ||
| 457 | |||
| 458 | /* | ||
| 459 | * Determine number of microengines present. | ||
| 460 | */ | ||
| 461 | switch ((ixp2000_reg_read(IXP_PRODUCT_ID) >> 8) & 0x1fff) { | ||
| 462 | #ifdef CONFIG_ARCH_IXP2000 | ||
| 463 | case 0: /* IXP2800 */ | ||
| 464 | case 1: /* IXP2850 */ | ||
| 465 | ixp2000_uengine_mask = 0x00ff00ff; | ||
| 466 | break; | ||
| 467 | |||
| 468 | case 2: /* IXP2400 */ | ||
| 469 | ixp2000_uengine_mask = 0x000f000f; | ||
| 470 | break; | ||
| 471 | #endif | ||
| 472 | |||
| 473 | #ifdef CONFIG_ARCH_IXP23XX | ||
| 474 | case 4: /* IXP23xx */ | ||
| 475 | ixp2000_uengine_mask = (*IXP23XX_EXP_CFG_FUSE >> 8) & 0xf; | ||
| 476 | break; | ||
| 477 | #endif | ||
| 478 | |||
| 479 | default: | ||
| 480 | printk(KERN_INFO "Detected unknown IXP2000 model (%.8x)\n", | ||
| 481 | (unsigned int)ixp2000_reg_read(IXP_PRODUCT_ID)); | ||
| 482 | ixp2000_uengine_mask = 0x00000000; | ||
| 483 | break; | ||
| 484 | } | ||
| 485 | |||
| 486 | /* | ||
| 487 | * Reset microengines. | ||
| 488 | */ | ||
| 489 | ixp2000_uengine_reset(ixp2000_uengine_mask); | ||
| 490 | |||
| 491 | /* | ||
| 492 | * Synchronise timestamp counters across all microengines. | ||
| 493 | */ | ||
| 494 | value = ixp2000_reg_read(IXP_MISC_CONTROL); | ||
| 495 | ixp2000_reg_wrb(IXP_MISC_CONTROL, value & ~0x80); | ||
| 496 | for (uengine = 0; uengine < 32; uengine++) { | ||
| 497 | if (ixp2000_uengine_mask & (1 << uengine)) { | ||
| 498 | ixp2000_uengine_csr_write(uengine, TIMESTAMP_LOW, 0); | ||
| 499 | ixp2000_uengine_csr_write(uengine, TIMESTAMP_HIGH, 0); | ||
| 500 | } | ||
| 501 | } | ||
| 502 | ixp2000_reg_wrb(IXP_MISC_CONTROL, value | 0x80); | ||
| 503 | |||
| 504 | return 0; | ||
| 505 | } | ||
| 506 | |||
| 507 | subsys_initcall(ixp2000_uengine_init); | ||
