diff options
Diffstat (limited to 'arch/arm/common')
-rw-r--r-- | arch/arm/common/Kconfig | 59 | ||||
-rw-r--r-- | arch/arm/common/Makefile | 2 | ||||
-rw-r--r-- | arch/arm/common/dmabounce.c | 193 | ||||
-rw-r--r-- | arch/arm/common/fiq_debugger.c | 1196 | ||||
-rw-r--r-- | arch/arm/common/fiq_debugger_ringbuf.h | 94 | ||||
-rw-r--r-- | arch/arm/common/fiq_glue.S | 111 | ||||
-rw-r--r-- | arch/arm/common/fiq_glue_setup.c | 155 | ||||
-rw-r--r-- | arch/arm/common/gic.c | 218 | ||||
-rw-r--r-- | arch/arm/common/it8152.c | 18 | ||||
-rw-r--r-- | arch/arm/common/sa1111.c | 60 | ||||
-rw-r--r-- | arch/arm/common/scoop.c | 2 | ||||
-rw-r--r-- | arch/arm/common/vic.c | 1 |
12 files changed, 1947 insertions, 162 deletions
diff --git a/arch/arm/common/Kconfig b/arch/arm/common/Kconfig index 4b71766fb21..23b2a6a98c2 100644 --- a/arch/arm/common/Kconfig +++ b/arch/arm/common/Kconfig | |||
@@ -39,3 +39,62 @@ config SHARP_PARAM | |||
39 | 39 | ||
40 | config SHARP_SCOOP | 40 | config SHARP_SCOOP |
41 | bool | 41 | bool |
42 | |||
43 | config FIQ_GLUE | ||
44 | bool | ||
45 | select FIQ | ||
46 | |||
47 | config FIQ_DEBUGGER | ||
48 | bool "FIQ Mode Serial Debugger" | ||
49 | select FIQ | ||
50 | select FIQ_GLUE | ||
51 | default n | ||
52 | help | ||
53 | The FIQ serial debugger can accept commands even when the | ||
54 | kernel is unresponsive due to being stuck with interrupts | ||
55 | disabled. | ||
56 | |||
57 | |||
58 | config FIQ_DEBUGGER_NO_SLEEP | ||
59 | bool "Keep serial debugger active" | ||
60 | depends on FIQ_DEBUGGER | ||
61 | default n | ||
62 | help | ||
63 | Enables the serial debugger at boot. Passing | ||
64 | fiq_debugger.no_sleep on the kernel commandline will | ||
65 | override this config option. | ||
66 | |||
67 | config FIQ_DEBUGGER_WAKEUP_IRQ_ALWAYS_ON | ||
68 | bool "Don't disable wakeup IRQ when debugger is active" | ||
69 | depends on FIQ_DEBUGGER | ||
70 | default n | ||
71 | help | ||
72 | Don't disable the wakeup irq when enabling the uart clock. This will | ||
73 | cause extra interrupts, but it makes the serial debugger usable with | ||
74 | on some MSM radio builds that ignore the uart clock request in power | ||
75 | collapse. | ||
76 | |||
77 | config FIQ_DEBUGGER_CONSOLE | ||
78 | bool "Console on FIQ Serial Debugger port" | ||
79 | depends on FIQ_DEBUGGER | ||
80 | default n | ||
81 | help | ||
82 | Enables a console so that printk messages are displayed on | ||
83 | the debugger serial port as the occur. | ||
84 | |||
85 | config FIQ_DEBUGGER_CONSOLE_DEFAULT_ENABLE | ||
86 | bool "Put the FIQ debugger into console mode by default" | ||
87 | depends on FIQ_DEBUGGER_CONSOLE | ||
88 | default n | ||
89 | help | ||
90 | If enabled, this puts the fiq debugger into console mode by default. | ||
91 | Otherwise, the fiq debugger will start out in debug mode. | ||
92 | |||
93 | config GIC_SET_MULTIPLE_CPUS | ||
94 | bool "Use affinity hint to allow multiple CPUs for IRQ" | ||
95 | depends on ARM_GIC && SMP | ||
96 | default n | ||
97 | help | ||
98 | IRQ affinity is always set by gic to the 1st cpu in the requested | ||
99 | mask. If this option is enabled, affinity is also set to all cpus | ||
100 | present in affinity_hint and requested masks. | ||
diff --git a/arch/arm/common/Makefile b/arch/arm/common/Makefile index 6ea9b6f3607..3ab5d765fed 100644 --- a/arch/arm/common/Makefile +++ b/arch/arm/common/Makefile | |||
@@ -17,3 +17,5 @@ obj-$(CONFIG_ARCH_IXP2000) += uengine.o | |||
17 | obj-$(CONFIG_ARCH_IXP23XX) += uengine.o | 17 | obj-$(CONFIG_ARCH_IXP23XX) += uengine.o |
18 | obj-$(CONFIG_PCI_HOST_ITE8152) += it8152.o | 18 | obj-$(CONFIG_PCI_HOST_ITE8152) += it8152.o |
19 | obj-$(CONFIG_ARM_TIMER_SP804) += timer-sp.o | 19 | obj-$(CONFIG_ARM_TIMER_SP804) += timer-sp.o |
20 | obj-$(CONFIG_FIQ_GLUE) += fiq_glue.o fiq_glue_setup.o | ||
21 | obj-$(CONFIG_FIQ_DEBUGGER) += fiq_debugger.o | ||
diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c index 841df7d21c2..595ecd290eb 100644 --- a/arch/arm/common/dmabounce.c +++ b/arch/arm/common/dmabounce.c | |||
@@ -79,6 +79,8 @@ struct dmabounce_device_info { | |||
79 | struct dmabounce_pool large; | 79 | struct dmabounce_pool large; |
80 | 80 | ||
81 | rwlock_t lock; | 81 | rwlock_t lock; |
82 | |||
83 | int (*needs_bounce)(struct device *, dma_addr_t, size_t); | ||
82 | }; | 84 | }; |
83 | 85 | ||
84 | #ifdef STATS | 86 | #ifdef STATS |
@@ -210,114 +212,91 @@ static struct safe_buffer *find_safe_buffer_dev(struct device *dev, | |||
210 | if (!dev || !dev->archdata.dmabounce) | 212 | if (!dev || !dev->archdata.dmabounce) |
211 | return NULL; | 213 | return NULL; |
212 | if (dma_mapping_error(dev, dma_addr)) { | 214 | if (dma_mapping_error(dev, dma_addr)) { |
213 | if (dev) | 215 | dev_err(dev, "Trying to %s invalid mapping\n", where); |
214 | dev_err(dev, "Trying to %s invalid mapping\n", where); | ||
215 | else | ||
216 | pr_err("unknown device: Trying to %s invalid mapping\n", where); | ||
217 | return NULL; | 216 | return NULL; |
218 | } | 217 | } |
219 | return find_safe_buffer(dev->archdata.dmabounce, dma_addr); | 218 | return find_safe_buffer(dev->archdata.dmabounce, dma_addr); |
220 | } | 219 | } |
221 | 220 | ||
222 | static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size, | 221 | static int needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size) |
223 | enum dma_data_direction dir) | ||
224 | { | 222 | { |
225 | struct dmabounce_device_info *device_info = dev->archdata.dmabounce; | 223 | if (!dev || !dev->archdata.dmabounce) |
226 | dma_addr_t dma_addr; | 224 | return 0; |
227 | int needs_bounce = 0; | ||
228 | |||
229 | if (device_info) | ||
230 | DO_STATS ( device_info->map_op_count++ ); | ||
231 | |||
232 | dma_addr = virt_to_dma(dev, ptr); | ||
233 | 225 | ||
234 | if (dev->dma_mask) { | 226 | if (dev->dma_mask) { |
235 | unsigned long mask = *dev->dma_mask; | 227 | unsigned long limit, mask = *dev->dma_mask; |
236 | unsigned long limit; | ||
237 | 228 | ||
238 | limit = (mask + 1) & ~mask; | 229 | limit = (mask + 1) & ~mask; |
239 | if (limit && size > limit) { | 230 | if (limit && size > limit) { |
240 | dev_err(dev, "DMA mapping too big (requested %#x " | 231 | dev_err(dev, "DMA mapping too big (requested %#x " |
241 | "mask %#Lx)\n", size, *dev->dma_mask); | 232 | "mask %#Lx)\n", size, *dev->dma_mask); |
242 | return ~0; | 233 | return -E2BIG; |
243 | } | 234 | } |
244 | 235 | ||
245 | /* | 236 | /* Figure out if we need to bounce from the DMA mask. */ |
246 | * Figure out if we need to bounce from the DMA mask. | 237 | if ((dma_addr | (dma_addr + size - 1)) & ~mask) |
247 | */ | 238 | return 1; |
248 | needs_bounce = (dma_addr | (dma_addr + size - 1)) & ~mask; | ||
249 | } | 239 | } |
250 | 240 | ||
251 | if (device_info && (needs_bounce || dma_needs_bounce(dev, dma_addr, size))) { | 241 | return !!dev->archdata.dmabounce->needs_bounce(dev, dma_addr, size); |
252 | struct safe_buffer *buf; | 242 | } |
253 | 243 | ||
254 | buf = alloc_safe_buffer(device_info, ptr, size, dir); | 244 | static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size, |
255 | if (buf == 0) { | 245 | enum dma_data_direction dir) |
256 | dev_err(dev, "%s: unable to map unsafe buffer %p!\n", | 246 | { |
257 | __func__, ptr); | 247 | struct dmabounce_device_info *device_info = dev->archdata.dmabounce; |
258 | return ~0; | 248 | struct safe_buffer *buf; |
259 | } | ||
260 | 249 | ||
261 | dev_dbg(dev, | 250 | if (device_info) |
262 | "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", | 251 | DO_STATS ( device_info->map_op_count++ ); |
263 | __func__, buf->ptr, virt_to_dma(dev, buf->ptr), | ||
264 | buf->safe, buf->safe_dma_addr); | ||
265 | 252 | ||
266 | if ((dir == DMA_TO_DEVICE) || | 253 | buf = alloc_safe_buffer(device_info, ptr, size, dir); |
267 | (dir == DMA_BIDIRECTIONAL)) { | 254 | if (buf == NULL) { |
268 | dev_dbg(dev, "%s: copy unsafe %p to safe %p, size %d\n", | 255 | dev_err(dev, "%s: unable to map unsafe buffer %p!\n", |
269 | __func__, ptr, buf->safe, size); | 256 | __func__, ptr); |
270 | memcpy(buf->safe, ptr, size); | 257 | return ~0; |
271 | } | 258 | } |
272 | ptr = buf->safe; | ||
273 | 259 | ||
274 | dma_addr = buf->safe_dma_addr; | 260 | dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", |
275 | } else { | 261 | __func__, buf->ptr, virt_to_dma(dev, buf->ptr), |
276 | /* | 262 | buf->safe, buf->safe_dma_addr); |
277 | * We don't need to sync the DMA buffer since | 263 | |
278 | * it was allocated via the coherent allocators. | 264 | if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) { |
279 | */ | 265 | dev_dbg(dev, "%s: copy unsafe %p to safe %p, size %d\n", |
280 | __dma_single_cpu_to_dev(ptr, size, dir); | 266 | __func__, ptr, buf->safe, size); |
267 | memcpy(buf->safe, ptr, size); | ||
281 | } | 268 | } |
282 | 269 | ||
283 | return dma_addr; | 270 | return buf->safe_dma_addr; |
284 | } | 271 | } |
285 | 272 | ||
286 | static inline void unmap_single(struct device *dev, dma_addr_t dma_addr, | 273 | static inline void unmap_single(struct device *dev, struct safe_buffer *buf, |
287 | size_t size, enum dma_data_direction dir) | 274 | size_t size, enum dma_data_direction dir) |
288 | { | 275 | { |
289 | struct safe_buffer *buf = find_safe_buffer_dev(dev, dma_addr, "unmap"); | 276 | BUG_ON(buf->size != size); |
290 | 277 | BUG_ON(buf->direction != dir); | |
291 | if (buf) { | ||
292 | BUG_ON(buf->size != size); | ||
293 | BUG_ON(buf->direction != dir); | ||
294 | 278 | ||
295 | dev_dbg(dev, | 279 | dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", |
296 | "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", | 280 | __func__, buf->ptr, virt_to_dma(dev, buf->ptr), |
297 | __func__, buf->ptr, virt_to_dma(dev, buf->ptr), | 281 | buf->safe, buf->safe_dma_addr); |
298 | buf->safe, buf->safe_dma_addr); | ||
299 | 282 | ||
300 | DO_STATS(dev->archdata.dmabounce->bounce_count++); | 283 | DO_STATS(dev->archdata.dmabounce->bounce_count++); |
301 | 284 | ||
302 | if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) { | 285 | if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) { |
303 | void *ptr = buf->ptr; | 286 | void *ptr = buf->ptr; |
304 | 287 | ||
305 | dev_dbg(dev, | 288 | dev_dbg(dev, "%s: copy back safe %p to unsafe %p size %d\n", |
306 | "%s: copy back safe %p to unsafe %p size %d\n", | 289 | __func__, buf->safe, ptr, size); |
307 | __func__, buf->safe, ptr, size); | 290 | memcpy(ptr, buf->safe, size); |
308 | memcpy(ptr, buf->safe, size); | ||
309 | 291 | ||
310 | /* | 292 | /* |
311 | * Since we may have written to a page cache page, | 293 | * Since we may have written to a page cache page, |
312 | * we need to ensure that the data will be coherent | 294 | * we need to ensure that the data will be coherent |
313 | * with user mappings. | 295 | * with user mappings. |
314 | */ | 296 | */ |
315 | __cpuc_flush_dcache_area(ptr, size); | 297 | __cpuc_flush_dcache_area(ptr, size); |
316 | } | ||
317 | free_safe_buffer(dev->archdata.dmabounce, buf); | ||
318 | } else { | ||
319 | __dma_single_dev_to_cpu(dma_to_virt(dev, dma_addr), size, dir); | ||
320 | } | 298 | } |
299 | free_safe_buffer(dev->archdata.dmabounce, buf); | ||
321 | } | 300 | } |
322 | 301 | ||
323 | /* ************************************************** */ | 302 | /* ************************************************** */ |
@@ -328,45 +307,28 @@ static inline void unmap_single(struct device *dev, dma_addr_t dma_addr, | |||
328 | * substitute the safe buffer for the unsafe one. | 307 | * substitute the safe buffer for the unsafe one. |
329 | * (basically move the buffer from an unsafe area to a safe one) | 308 | * (basically move the buffer from an unsafe area to a safe one) |
330 | */ | 309 | */ |
331 | dma_addr_t __dma_map_single(struct device *dev, void *ptr, size_t size, | ||
332 | enum dma_data_direction dir) | ||
333 | { | ||
334 | dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n", | ||
335 | __func__, ptr, size, dir); | ||
336 | |||
337 | BUG_ON(!valid_dma_direction(dir)); | ||
338 | |||
339 | return map_single(dev, ptr, size, dir); | ||
340 | } | ||
341 | EXPORT_SYMBOL(__dma_map_single); | ||
342 | |||
343 | /* | ||
344 | * see if a mapped address was really a "safe" buffer and if so, copy | ||
345 | * the data from the safe buffer back to the unsafe buffer and free up | ||
346 | * the safe buffer. (basically return things back to the way they | ||
347 | * should be) | ||
348 | */ | ||
349 | void __dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | ||
350 | enum dma_data_direction dir) | ||
351 | { | ||
352 | dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n", | ||
353 | __func__, (void *) dma_addr, size, dir); | ||
354 | |||
355 | unmap_single(dev, dma_addr, size, dir); | ||
356 | } | ||
357 | EXPORT_SYMBOL(__dma_unmap_single); | ||
358 | |||
359 | dma_addr_t __dma_map_page(struct device *dev, struct page *page, | 310 | dma_addr_t __dma_map_page(struct device *dev, struct page *page, |
360 | unsigned long offset, size_t size, enum dma_data_direction dir) | 311 | unsigned long offset, size_t size, enum dma_data_direction dir) |
361 | { | 312 | { |
313 | dma_addr_t dma_addr; | ||
314 | int ret; | ||
315 | |||
362 | dev_dbg(dev, "%s(page=%p,off=%#lx,size=%zx,dir=%x)\n", | 316 | dev_dbg(dev, "%s(page=%p,off=%#lx,size=%zx,dir=%x)\n", |
363 | __func__, page, offset, size, dir); | 317 | __func__, page, offset, size, dir); |
364 | 318 | ||
365 | BUG_ON(!valid_dma_direction(dir)); | 319 | dma_addr = pfn_to_dma(dev, page_to_pfn(page)) + offset; |
320 | |||
321 | ret = needs_bounce(dev, dma_addr, size); | ||
322 | if (ret < 0) | ||
323 | return ~0; | ||
324 | |||
325 | if (ret == 0) { | ||
326 | __dma_page_cpu_to_dev(page, offset, size, dir); | ||
327 | return dma_addr; | ||
328 | } | ||
366 | 329 | ||
367 | if (PageHighMem(page)) { | 330 | if (PageHighMem(page)) { |
368 | dev_err(dev, "DMA buffer bouncing of HIGHMEM pages " | 331 | dev_err(dev, "DMA buffer bouncing of HIGHMEM pages is not supported\n"); |
369 | "is not supported\n"); | ||
370 | return ~0; | 332 | return ~0; |
371 | } | 333 | } |
372 | 334 | ||
@@ -383,10 +345,19 @@ EXPORT_SYMBOL(__dma_map_page); | |||
383 | void __dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, | 345 | void __dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, |
384 | enum dma_data_direction dir) | 346 | enum dma_data_direction dir) |
385 | { | 347 | { |
386 | dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n", | 348 | struct safe_buffer *buf; |
387 | __func__, (void *) dma_addr, size, dir); | 349 | |
350 | dev_dbg(dev, "%s(dma=%#x,size=%d,dir=%x)\n", | ||
351 | __func__, dma_addr, size, dir); | ||
352 | |||
353 | buf = find_safe_buffer_dev(dev, dma_addr, __func__); | ||
354 | if (!buf) { | ||
355 | __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, dma_addr)), | ||
356 | dma_addr & ~PAGE_MASK, size, dir); | ||
357 | return; | ||
358 | } | ||
388 | 359 | ||
389 | unmap_single(dev, dma_addr, size, dir); | 360 | unmap_single(dev, buf, size, dir); |
390 | } | 361 | } |
391 | EXPORT_SYMBOL(__dma_unmap_page); | 362 | EXPORT_SYMBOL(__dma_unmap_page); |
392 | 363 | ||
@@ -461,7 +432,8 @@ static int dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev, | |||
461 | } | 432 | } |
462 | 433 | ||
463 | int dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size, | 434 | int dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size, |
464 | unsigned long large_buffer_size) | 435 | unsigned long large_buffer_size, |
436 | int (*needs_bounce_fn)(struct device *, dma_addr_t, size_t)) | ||
465 | { | 437 | { |
466 | struct dmabounce_device_info *device_info; | 438 | struct dmabounce_device_info *device_info; |
467 | int ret; | 439 | int ret; |
@@ -497,6 +469,7 @@ int dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size, | |||
497 | device_info->dev = dev; | 469 | device_info->dev = dev; |
498 | INIT_LIST_HEAD(&device_info->safe_buffers); | 470 | INIT_LIST_HEAD(&device_info->safe_buffers); |
499 | rwlock_init(&device_info->lock); | 471 | rwlock_init(&device_info->lock); |
472 | device_info->needs_bounce = needs_bounce_fn; | ||
500 | 473 | ||
501 | #ifdef STATS | 474 | #ifdef STATS |
502 | device_info->total_allocs = 0; | 475 | device_info->total_allocs = 0; |
diff --git a/arch/arm/common/fiq_debugger.c b/arch/arm/common/fiq_debugger.c new file mode 100644 index 00000000000..3ed18ae2ed8 --- /dev/null +++ b/arch/arm/common/fiq_debugger.c | |||
@@ -0,0 +1,1196 @@ | |||
1 | /* | ||
2 | * arch/arm/common/fiq_debugger.c | ||
3 | * | ||
4 | * Serial Debugger Interface accessed through an FIQ interrupt. | ||
5 | * | ||
6 | * Copyright (C) 2008 Google, Inc. | ||
7 | * | ||
8 | * This software is licensed under the terms of the GNU General Public | ||
9 | * License version 2, as published by the Free Software Foundation, and | ||
10 | * may be copied, distributed, and modified under those terms. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | */ | ||
17 | |||
18 | #include <stdarg.h> | ||
19 | #include <linux/module.h> | ||
20 | #include <linux/io.h> | ||
21 | #include <linux/console.h> | ||
22 | #include <linux/interrupt.h> | ||
23 | #include <linux/clk.h> | ||
24 | #include <linux/platform_device.h> | ||
25 | #include <linux/kernel_stat.h> | ||
26 | #include <linux/irq.h> | ||
27 | #include <linux/delay.h> | ||
28 | #include <linux/sched.h> | ||
29 | #include <linux/slab.h> | ||
30 | #include <linux/smp.h> | ||
31 | #include <linux/timer.h> | ||
32 | #include <linux/tty.h> | ||
33 | #include <linux/tty_flip.h> | ||
34 | #include <linux/wakelock.h> | ||
35 | |||
36 | #include <asm/fiq_debugger.h> | ||
37 | #include <asm/fiq_glue.h> | ||
38 | #include <asm/stacktrace.h> | ||
39 | |||
40 | #include <mach/system.h> | ||
41 | |||
42 | #include <linux/uaccess.h> | ||
43 | |||
44 | #include "fiq_debugger_ringbuf.h" | ||
45 | |||
46 | #define DEBUG_MAX 64 | ||
47 | #define MAX_UNHANDLED_FIQ_COUNT 1000000 | ||
48 | |||
49 | #define THREAD_INFO(sp) ((struct thread_info *) \ | ||
50 | ((unsigned long)(sp) & ~(THREAD_SIZE - 1))) | ||
51 | |||
52 | struct fiq_debugger_state { | ||
53 | struct fiq_glue_handler handler; | ||
54 | |||
55 | int fiq; | ||
56 | int uart_irq; | ||
57 | int signal_irq; | ||
58 | int wakeup_irq; | ||
59 | bool wakeup_irq_no_set_wake; | ||
60 | struct clk *clk; | ||
61 | struct fiq_debugger_pdata *pdata; | ||
62 | struct platform_device *pdev; | ||
63 | |||
64 | char debug_cmd[DEBUG_MAX]; | ||
65 | int debug_busy; | ||
66 | int debug_abort; | ||
67 | |||
68 | char debug_buf[DEBUG_MAX]; | ||
69 | int debug_count; | ||
70 | |||
71 | bool no_sleep; | ||
72 | bool debug_enable; | ||
73 | bool ignore_next_wakeup_irq; | ||
74 | struct timer_list sleep_timer; | ||
75 | spinlock_t sleep_timer_lock; | ||
76 | bool uart_enabled; | ||
77 | struct wake_lock debugger_wake_lock; | ||
78 | bool console_enable; | ||
79 | int current_cpu; | ||
80 | atomic_t unhandled_fiq_count; | ||
81 | bool in_fiq; | ||
82 | |||
83 | #ifdef CONFIG_FIQ_DEBUGGER_CONSOLE | ||
84 | struct console console; | ||
85 | struct tty_driver *tty_driver; | ||
86 | struct tty_struct *tty; | ||
87 | int tty_open_count; | ||
88 | struct fiq_debugger_ringbuf *tty_rbuf; | ||
89 | bool syslog_dumping; | ||
90 | #endif | ||
91 | |||
92 | unsigned int last_irqs[NR_IRQS]; | ||
93 | unsigned int last_local_timer_irqs[NR_CPUS]; | ||
94 | }; | ||
95 | |||
96 | #ifdef CONFIG_FIQ_DEBUGGER_NO_SLEEP | ||
97 | static bool initial_no_sleep = true; | ||
98 | #else | ||
99 | static bool initial_no_sleep; | ||
100 | #endif | ||
101 | |||
102 | #ifdef CONFIG_FIQ_DEBUGGER_CONSOLE_DEFAULT_ENABLE | ||
103 | static bool initial_debug_enable = true; | ||
104 | static bool initial_console_enable = true; | ||
105 | #else | ||
106 | static bool initial_debug_enable; | ||
107 | static bool initial_console_enable; | ||
108 | #endif | ||
109 | |||
110 | module_param_named(no_sleep, initial_no_sleep, bool, 0644); | ||
111 | module_param_named(debug_enable, initial_debug_enable, bool, 0644); | ||
112 | module_param_named(console_enable, initial_console_enable, bool, 0644); | ||
113 | |||
114 | #ifdef CONFIG_FIQ_DEBUGGER_WAKEUP_IRQ_ALWAYS_ON | ||
115 | static inline void enable_wakeup_irq(struct fiq_debugger_state *state) {} | ||
116 | static inline void disable_wakeup_irq(struct fiq_debugger_state *state) {} | ||
117 | #else | ||
118 | static inline void enable_wakeup_irq(struct fiq_debugger_state *state) | ||
119 | { | ||
120 | if (state->wakeup_irq < 0) | ||
121 | return; | ||
122 | enable_irq(state->wakeup_irq); | ||
123 | if (!state->wakeup_irq_no_set_wake) | ||
124 | enable_irq_wake(state->wakeup_irq); | ||
125 | } | ||
126 | static inline void disable_wakeup_irq(struct fiq_debugger_state *state) | ||
127 | { | ||
128 | if (state->wakeup_irq < 0) | ||
129 | return; | ||
130 | disable_irq_nosync(state->wakeup_irq); | ||
131 | if (!state->wakeup_irq_no_set_wake) | ||
132 | disable_irq_wake(state->wakeup_irq); | ||
133 | } | ||
134 | #endif | ||
135 | |||
136 | static bool inline debug_have_fiq(struct fiq_debugger_state *state) | ||
137 | { | ||
138 | return (state->fiq >= 0); | ||
139 | } | ||
140 | |||
141 | static void debug_force_irq(struct fiq_debugger_state *state) | ||
142 | { | ||
143 | unsigned int irq = state->signal_irq; | ||
144 | |||
145 | if (WARN_ON(!debug_have_fiq(state))) | ||
146 | return; | ||
147 | if (state->pdata->force_irq) { | ||
148 | state->pdata->force_irq(state->pdev, irq); | ||
149 | } else { | ||
150 | struct irq_chip *chip = irq_get_chip(irq); | ||
151 | if (chip && chip->irq_retrigger) | ||
152 | chip->irq_retrigger(irq_get_irq_data(irq)); | ||
153 | } | ||
154 | } | ||
155 | |||
156 | static void debug_uart_enable(struct fiq_debugger_state *state) | ||
157 | { | ||
158 | if (state->clk) | ||
159 | clk_enable(state->clk); | ||
160 | if (state->pdata->uart_enable) | ||
161 | state->pdata->uart_enable(state->pdev); | ||
162 | } | ||
163 | |||
164 | static void debug_uart_disable(struct fiq_debugger_state *state) | ||
165 | { | ||
166 | if (state->pdata->uart_disable) | ||
167 | state->pdata->uart_disable(state->pdev); | ||
168 | if (state->clk) | ||
169 | clk_disable(state->clk); | ||
170 | } | ||
171 | |||
172 | static void debug_uart_flush(struct fiq_debugger_state *state) | ||
173 | { | ||
174 | if (state->pdata->uart_flush) | ||
175 | state->pdata->uart_flush(state->pdev); | ||
176 | } | ||
177 | |||
178 | static void debug_puts(struct fiq_debugger_state *state, char *s) | ||
179 | { | ||
180 | unsigned c; | ||
181 | while ((c = *s++)) { | ||
182 | if (c == '\n') | ||
183 | state->pdata->uart_putc(state->pdev, '\r'); | ||
184 | state->pdata->uart_putc(state->pdev, c); | ||
185 | } | ||
186 | } | ||
187 | |||
188 | static void debug_prompt(struct fiq_debugger_state *state) | ||
189 | { | ||
190 | debug_puts(state, "debug> "); | ||
191 | } | ||
192 | |||
193 | int log_buf_copy(char *dest, int idx, int len); | ||
194 | static void dump_kernel_log(struct fiq_debugger_state *state) | ||
195 | { | ||
196 | char buf[1024]; | ||
197 | int idx = 0; | ||
198 | int ret; | ||
199 | int saved_oip; | ||
200 | |||
201 | /* setting oops_in_progress prevents log_buf_copy() | ||
202 | * from trying to take a spinlock which will make it | ||
203 | * very unhappy in some cases... | ||
204 | */ | ||
205 | saved_oip = oops_in_progress; | ||
206 | oops_in_progress = 1; | ||
207 | for (;;) { | ||
208 | ret = log_buf_copy(buf, idx, 1023); | ||
209 | if (ret <= 0) | ||
210 | break; | ||
211 | buf[ret] = 0; | ||
212 | debug_puts(state, buf); | ||
213 | idx += ret; | ||
214 | } | ||
215 | oops_in_progress = saved_oip; | ||
216 | } | ||
217 | |||
218 | static char *mode_name(unsigned cpsr) | ||
219 | { | ||
220 | switch (cpsr & MODE_MASK) { | ||
221 | case USR_MODE: return "USR"; | ||
222 | case FIQ_MODE: return "FIQ"; | ||
223 | case IRQ_MODE: return "IRQ"; | ||
224 | case SVC_MODE: return "SVC"; | ||
225 | case ABT_MODE: return "ABT"; | ||
226 | case UND_MODE: return "UND"; | ||
227 | case SYSTEM_MODE: return "SYS"; | ||
228 | default: return "???"; | ||
229 | } | ||
230 | } | ||
231 | |||
232 | static int debug_printf(void *cookie, const char *fmt, ...) | ||
233 | { | ||
234 | struct fiq_debugger_state *state = cookie; | ||
235 | char buf[256]; | ||
236 | va_list ap; | ||
237 | |||
238 | va_start(ap, fmt); | ||
239 | vsnprintf(buf, sizeof(buf), fmt, ap); | ||
240 | va_end(ap); | ||
241 | |||
242 | debug_puts(state, buf); | ||
243 | return state->debug_abort; | ||
244 | } | ||
245 | |||
246 | /* Safe outside fiq context */ | ||
247 | static int debug_printf_nfiq(void *cookie, const char *fmt, ...) | ||
248 | { | ||
249 | struct fiq_debugger_state *state = cookie; | ||
250 | char buf[256]; | ||
251 | va_list ap; | ||
252 | unsigned long irq_flags; | ||
253 | |||
254 | va_start(ap, fmt); | ||
255 | vsnprintf(buf, 128, fmt, ap); | ||
256 | va_end(ap); | ||
257 | |||
258 | local_irq_save(irq_flags); | ||
259 | debug_puts(state, buf); | ||
260 | debug_uart_flush(state); | ||
261 | local_irq_restore(irq_flags); | ||
262 | return state->debug_abort; | ||
263 | } | ||
264 | |||
265 | static void dump_regs(struct fiq_debugger_state *state, unsigned *regs) | ||
266 | { | ||
267 | debug_printf(state, " r0 %08x r1 %08x r2 %08x r3 %08x\n", | ||
268 | regs[0], regs[1], regs[2], regs[3]); | ||
269 | debug_printf(state, " r4 %08x r5 %08x r6 %08x r7 %08x\n", | ||
270 | regs[4], regs[5], regs[6], regs[7]); | ||
271 | debug_printf(state, " r8 %08x r9 %08x r10 %08x r11 %08x mode %s\n", | ||
272 | regs[8], regs[9], regs[10], regs[11], | ||
273 | mode_name(regs[16])); | ||
274 | if ((regs[16] & MODE_MASK) == USR_MODE) | ||
275 | debug_printf(state, " ip %08x sp %08x lr %08x pc %08x " | ||
276 | "cpsr %08x\n", regs[12], regs[13], regs[14], | ||
277 | regs[15], regs[16]); | ||
278 | else | ||
279 | debug_printf(state, " ip %08x sp %08x lr %08x pc %08x " | ||
280 | "cpsr %08x spsr %08x\n", regs[12], regs[13], | ||
281 | regs[14], regs[15], regs[16], regs[17]); | ||
282 | } | ||
283 | |||
284 | struct mode_regs { | ||
285 | unsigned long sp_svc; | ||
286 | unsigned long lr_svc; | ||
287 | unsigned long spsr_svc; | ||
288 | |||
289 | unsigned long sp_abt; | ||
290 | unsigned long lr_abt; | ||
291 | unsigned long spsr_abt; | ||
292 | |||
293 | unsigned long sp_und; | ||
294 | unsigned long lr_und; | ||
295 | unsigned long spsr_und; | ||
296 | |||
297 | unsigned long sp_irq; | ||
298 | unsigned long lr_irq; | ||
299 | unsigned long spsr_irq; | ||
300 | |||
301 | unsigned long r8_fiq; | ||
302 | unsigned long r9_fiq; | ||
303 | unsigned long r10_fiq; | ||
304 | unsigned long r11_fiq; | ||
305 | unsigned long r12_fiq; | ||
306 | unsigned long sp_fiq; | ||
307 | unsigned long lr_fiq; | ||
308 | unsigned long spsr_fiq; | ||
309 | }; | ||
310 | |||
311 | void __naked get_mode_regs(struct mode_regs *regs) | ||
312 | { | ||
313 | asm volatile ( | ||
314 | "mrs r1, cpsr\n" | ||
315 | "msr cpsr_c, #0xd3 @(SVC_MODE | PSR_I_BIT | PSR_F_BIT)\n" | ||
316 | "stmia r0!, {r13 - r14}\n" | ||
317 | "mrs r2, spsr\n" | ||
318 | "msr cpsr_c, #0xd7 @(ABT_MODE | PSR_I_BIT | PSR_F_BIT)\n" | ||
319 | "stmia r0!, {r2, r13 - r14}\n" | ||
320 | "mrs r2, spsr\n" | ||
321 | "msr cpsr_c, #0xdb @(UND_MODE | PSR_I_BIT | PSR_F_BIT)\n" | ||
322 | "stmia r0!, {r2, r13 - r14}\n" | ||
323 | "mrs r2, spsr\n" | ||
324 | "msr cpsr_c, #0xd2 @(IRQ_MODE | PSR_I_BIT | PSR_F_BIT)\n" | ||
325 | "stmia r0!, {r2, r13 - r14}\n" | ||
326 | "mrs r2, spsr\n" | ||
327 | "msr cpsr_c, #0xd1 @(FIQ_MODE | PSR_I_BIT | PSR_F_BIT)\n" | ||
328 | "stmia r0!, {r2, r8 - r14}\n" | ||
329 | "mrs r2, spsr\n" | ||
330 | "stmia r0!, {r2}\n" | ||
331 | "msr cpsr_c, r1\n" | ||
332 | "bx lr\n"); | ||
333 | } | ||
334 | |||
335 | |||
336 | static void dump_allregs(struct fiq_debugger_state *state, unsigned *regs) | ||
337 | { | ||
338 | struct mode_regs mode_regs; | ||
339 | dump_regs(state, regs); | ||
340 | get_mode_regs(&mode_regs); | ||
341 | debug_printf(state, " svc: sp %08x lr %08x spsr %08x\n", | ||
342 | mode_regs.sp_svc, mode_regs.lr_svc, mode_regs.spsr_svc); | ||
343 | debug_printf(state, " abt: sp %08x lr %08x spsr %08x\n", | ||
344 | mode_regs.sp_abt, mode_regs.lr_abt, mode_regs.spsr_abt); | ||
345 | debug_printf(state, " und: sp %08x lr %08x spsr %08x\n", | ||
346 | mode_regs.sp_und, mode_regs.lr_und, mode_regs.spsr_und); | ||
347 | debug_printf(state, " irq: sp %08x lr %08x spsr %08x\n", | ||
348 | mode_regs.sp_irq, mode_regs.lr_irq, mode_regs.spsr_irq); | ||
349 | debug_printf(state, " fiq: r8 %08x r9 %08x r10 %08x r11 %08x " | ||
350 | "r12 %08x\n", | ||
351 | mode_regs.r8_fiq, mode_regs.r9_fiq, mode_regs.r10_fiq, | ||
352 | mode_regs.r11_fiq, mode_regs.r12_fiq); | ||
353 | debug_printf(state, " fiq: sp %08x lr %08x spsr %08x\n", | ||
354 | mode_regs.sp_fiq, mode_regs.lr_fiq, mode_regs.spsr_fiq); | ||
355 | } | ||
356 | |||
357 | static void dump_irqs(struct fiq_debugger_state *state) | ||
358 | { | ||
359 | int n; | ||
360 | unsigned int cpu; | ||
361 | |||
362 | debug_printf(state, "irqnr total since-last status name\n"); | ||
363 | for (n = 0; n < NR_IRQS; n++) { | ||
364 | struct irqaction *act = irq_desc[n].action; | ||
365 | if (!act && !kstat_irqs(n)) | ||
366 | continue; | ||
367 | debug_printf(state, "%5d: %10u %11u %8x %s\n", n, | ||
368 | kstat_irqs(n), | ||
369 | kstat_irqs(n) - state->last_irqs[n], | ||
370 | irq_desc[n].status_use_accessors, | ||
371 | (act && act->name) ? act->name : "???"); | ||
372 | state->last_irqs[n] = kstat_irqs(n); | ||
373 | } | ||
374 | |||
375 | for (cpu = 0; cpu < NR_CPUS; cpu++) { | ||
376 | |||
377 | debug_printf(state, "LOC %d: %10u %11u\n", cpu, | ||
378 | __IRQ_STAT(cpu, local_timer_irqs), | ||
379 | __IRQ_STAT(cpu, local_timer_irqs) - | ||
380 | state->last_local_timer_irqs[cpu]); | ||
381 | state->last_local_timer_irqs[cpu] = | ||
382 | __IRQ_STAT(cpu, local_timer_irqs); | ||
383 | } | ||
384 | } | ||
385 | |||
386 | struct stacktrace_state { | ||
387 | struct fiq_debugger_state *state; | ||
388 | unsigned int depth; | ||
389 | }; | ||
390 | |||
391 | static int report_trace(struct stackframe *frame, void *d) | ||
392 | { | ||
393 | struct stacktrace_state *sts = d; | ||
394 | |||
395 | if (sts->depth) { | ||
396 | debug_printf(sts->state, | ||
397 | " pc: %p (%pF), lr %p (%pF), sp %p, fp %p\n", | ||
398 | frame->pc, frame->pc, frame->lr, frame->lr, | ||
399 | frame->sp, frame->fp); | ||
400 | sts->depth--; | ||
401 | return 0; | ||
402 | } | ||
403 | debug_printf(sts->state, " ...\n"); | ||
404 | |||
405 | return sts->depth == 0; | ||
406 | } | ||
407 | |||
408 | struct frame_tail { | ||
409 | struct frame_tail *fp; | ||
410 | unsigned long sp; | ||
411 | unsigned long lr; | ||
412 | } __attribute__((packed)); | ||
413 | |||
414 | static struct frame_tail *user_backtrace(struct fiq_debugger_state *state, | ||
415 | struct frame_tail *tail) | ||
416 | { | ||
417 | struct frame_tail buftail[2]; | ||
418 | |||
419 | /* Also check accessibility of one struct frame_tail beyond */ | ||
420 | if (!access_ok(VERIFY_READ, tail, sizeof(buftail))) { | ||
421 | debug_printf(state, " invalid frame pointer %p\n", tail); | ||
422 | return NULL; | ||
423 | } | ||
424 | if (__copy_from_user_inatomic(buftail, tail, sizeof(buftail))) { | ||
425 | debug_printf(state, | ||
426 | " failed to copy frame pointer %p\n", tail); | ||
427 | return NULL; | ||
428 | } | ||
429 | |||
430 | debug_printf(state, " %p\n", buftail[0].lr); | ||
431 | |||
432 | /* frame pointers should strictly progress back up the stack | ||
433 | * (towards higher addresses) */ | ||
434 | if (tail >= buftail[0].fp) | ||
435 | return NULL; | ||
436 | |||
437 | return buftail[0].fp-1; | ||
438 | } | ||
439 | |||
440 | void dump_stacktrace(struct fiq_debugger_state *state, | ||
441 | struct pt_regs * const regs, unsigned int depth, void *ssp) | ||
442 | { | ||
443 | struct frame_tail *tail; | ||
444 | struct thread_info *real_thread_info = THREAD_INFO(ssp); | ||
445 | struct stacktrace_state sts; | ||
446 | |||
447 | sts.depth = depth; | ||
448 | sts.state = state; | ||
449 | *current_thread_info() = *real_thread_info; | ||
450 | |||
451 | if (!current) | ||
452 | debug_printf(state, "current NULL\n"); | ||
453 | else | ||
454 | debug_printf(state, "pid: %d comm: %s\n", | ||
455 | current->pid, current->comm); | ||
456 | dump_regs(state, (unsigned *)regs); | ||
457 | |||
458 | if (!user_mode(regs)) { | ||
459 | struct stackframe frame; | ||
460 | frame.fp = regs->ARM_fp; | ||
461 | frame.sp = regs->ARM_sp; | ||
462 | frame.lr = regs->ARM_lr; | ||
463 | frame.pc = regs->ARM_pc; | ||
464 | debug_printf(state, | ||
465 | " pc: %p (%pF), lr %p (%pF), sp %p, fp %p\n", | ||
466 | regs->ARM_pc, regs->ARM_pc, regs->ARM_lr, regs->ARM_lr, | ||
467 | regs->ARM_sp, regs->ARM_fp); | ||
468 | walk_stackframe(&frame, report_trace, &sts); | ||
469 | return; | ||
470 | } | ||
471 | |||
472 | tail = ((struct frame_tail *) regs->ARM_fp) - 1; | ||
473 | while (depth-- && tail && !((unsigned long) tail & 3)) | ||
474 | tail = user_backtrace(state, tail); | ||
475 | } | ||
476 | |||
477 | static void do_ps(struct fiq_debugger_state *state) | ||
478 | { | ||
479 | struct task_struct *g; | ||
480 | struct task_struct *p; | ||
481 | unsigned task_state; | ||
482 | static const char stat_nam[] = "RSDTtZX"; | ||
483 | |||
484 | debug_printf(state, "pid ppid prio task pc\n"); | ||
485 | read_lock(&tasklist_lock); | ||
486 | do_each_thread(g, p) { | ||
487 | task_state = p->state ? __ffs(p->state) + 1 : 0; | ||
488 | debug_printf(state, | ||
489 | "%5d %5d %4d ", p->pid, p->parent->pid, p->prio); | ||
490 | debug_printf(state, "%-13.13s %c", p->comm, | ||
491 | task_state >= sizeof(stat_nam) ? '?' : stat_nam[task_state]); | ||
492 | if (task_state == TASK_RUNNING) | ||
493 | debug_printf(state, " running\n"); | ||
494 | else | ||
495 | debug_printf(state, " %08lx\n", thread_saved_pc(p)); | ||
496 | } while_each_thread(g, p); | ||
497 | read_unlock(&tasklist_lock); | ||
498 | } | ||
499 | |||
500 | #ifdef CONFIG_FIQ_DEBUGGER_CONSOLE | ||
501 | static void begin_syslog_dump(struct fiq_debugger_state *state) | ||
502 | { | ||
503 | state->syslog_dumping = true; | ||
504 | } | ||
505 | |||
506 | static void end_syslog_dump(struct fiq_debugger_state *state) | ||
507 | { | ||
508 | state->syslog_dumping = false; | ||
509 | } | ||
510 | #else | ||
511 | extern int do_syslog(int type, char __user *bug, int count); | ||
512 | static void begin_syslog_dump(struct fiq_debugger_state *state) | ||
513 | { | ||
514 | do_syslog(5 /* clear */, NULL, 0); | ||
515 | } | ||
516 | |||
517 | static void end_syslog_dump(struct fiq_debugger_state *state) | ||
518 | { | ||
519 | char buf[128]; | ||
520 | int ret; | ||
521 | int idx = 0; | ||
522 | |||
523 | while (1) { | ||
524 | ret = log_buf_copy(buf, idx, sizeof(buf) - 1); | ||
525 | if (ret <= 0) | ||
526 | break; | ||
527 | buf[ret] = 0; | ||
528 | debug_printf(state, "%s", buf); | ||
529 | idx += ret; | ||
530 | } | ||
531 | } | ||
532 | #endif | ||
533 | |||
534 | static void do_sysrq(struct fiq_debugger_state *state, char rq) | ||
535 | { | ||
536 | begin_syslog_dump(state); | ||
537 | handle_sysrq(rq); | ||
538 | end_syslog_dump(state); | ||
539 | } | ||
540 | |||
541 | /* This function CANNOT be called in FIQ context */ | ||
542 | static void debug_irq_exec(struct fiq_debugger_state *state, char *cmd) | ||
543 | { | ||
544 | if (!strcmp(cmd, "ps")) | ||
545 | do_ps(state); | ||
546 | if (!strcmp(cmd, "sysrq")) | ||
547 | do_sysrq(state, 'h'); | ||
548 | if (!strncmp(cmd, "sysrq ", 6)) | ||
549 | do_sysrq(state, cmd[6]); | ||
550 | } | ||
551 | |||
552 | static void debug_help(struct fiq_debugger_state *state) | ||
553 | { | ||
554 | debug_printf(state, "FIQ Debugger commands:\n" | ||
555 | " pc PC status\n" | ||
556 | " regs Register dump\n" | ||
557 | " allregs Extended Register dump\n" | ||
558 | " bt Stack trace\n" | ||
559 | " reboot Reboot\n" | ||
560 | " irqs Interupt status\n" | ||
561 | " kmsg Kernel log\n" | ||
562 | " version Kernel version\n"); | ||
563 | debug_printf(state, " sleep Allow sleep while in FIQ\n" | ||
564 | " nosleep Disable sleep while in FIQ\n" | ||
565 | " console Switch terminal to console\n" | ||
566 | " cpu Current CPU\n" | ||
567 | " cpu <number> Switch to CPU<number>\n"); | ||
568 | debug_printf(state, " ps Process list\n" | ||
569 | " sysrq sysrq options\n" | ||
570 | " sysrq <param> Execute sysrq with <param>\n"); | ||
571 | } | ||
572 | |||
573 | static void take_affinity(void *info) | ||
574 | { | ||
575 | struct fiq_debugger_state *state = info; | ||
576 | struct cpumask cpumask; | ||
577 | |||
578 | cpumask_clear(&cpumask); | ||
579 | cpumask_set_cpu(get_cpu(), &cpumask); | ||
580 | |||
581 | irq_set_affinity(state->uart_irq, &cpumask); | ||
582 | } | ||
583 | |||
584 | static void switch_cpu(struct fiq_debugger_state *state, int cpu) | ||
585 | { | ||
586 | if (!debug_have_fiq(state)) | ||
587 | smp_call_function_single(cpu, take_affinity, state, false); | ||
588 | state->current_cpu = cpu; | ||
589 | } | ||
590 | |||
591 | static bool debug_fiq_exec(struct fiq_debugger_state *state, | ||
592 | const char *cmd, unsigned *regs, void *svc_sp) | ||
593 | { | ||
594 | bool signal_helper = false; | ||
595 | |||
596 | if (!strcmp(cmd, "help") || !strcmp(cmd, "?")) { | ||
597 | debug_help(state); | ||
598 | } else if (!strcmp(cmd, "pc")) { | ||
599 | debug_printf(state, " pc %08x cpsr %08x mode %s\n", | ||
600 | regs[15], regs[16], mode_name(regs[16])); | ||
601 | } else if (!strcmp(cmd, "regs")) { | ||
602 | dump_regs(state, regs); | ||
603 | } else if (!strcmp(cmd, "allregs")) { | ||
604 | dump_allregs(state, regs); | ||
605 | } else if (!strcmp(cmd, "bt")) { | ||
606 | dump_stacktrace(state, (struct pt_regs *)regs, 100, svc_sp); | ||
607 | } else if (!strcmp(cmd, "reboot")) { | ||
608 | arch_reset(0, 0); | ||
609 | } else if (!strcmp(cmd, "irqs")) { | ||
610 | dump_irqs(state); | ||
611 | } else if (!strcmp(cmd, "kmsg")) { | ||
612 | dump_kernel_log(state); | ||
613 | } else if (!strcmp(cmd, "version")) { | ||
614 | debug_printf(state, "%s\n", linux_banner); | ||
615 | } else if (!strcmp(cmd, "sleep")) { | ||
616 | state->no_sleep = false; | ||
617 | debug_printf(state, "enabling sleep\n"); | ||
618 | } else if (!strcmp(cmd, "nosleep")) { | ||
619 | state->no_sleep = true; | ||
620 | debug_printf(state, "disabling sleep\n"); | ||
621 | } else if (!strcmp(cmd, "console")) { | ||
622 | state->console_enable = true; | ||
623 | debug_printf(state, "console mode\n"); | ||
624 | } else if (!strcmp(cmd, "cpu")) { | ||
625 | debug_printf(state, "cpu %d\n", state->current_cpu); | ||
626 | } else if (!strncmp(cmd, "cpu ", 4)) { | ||
627 | unsigned long cpu = 0; | ||
628 | if (strict_strtoul(cmd + 4, 10, &cpu) == 0) | ||
629 | switch_cpu(state, cpu); | ||
630 | else | ||
631 | debug_printf(state, "invalid cpu\n"); | ||
632 | debug_printf(state, "cpu %d\n", state->current_cpu); | ||
633 | } else { | ||
634 | if (state->debug_busy) { | ||
635 | debug_printf(state, | ||
636 | "command processor busy. trying to abort.\n"); | ||
637 | state->debug_abort = -1; | ||
638 | } else { | ||
639 | strcpy(state->debug_cmd, cmd); | ||
640 | state->debug_busy = 1; | ||
641 | } | ||
642 | |||
643 | return true; | ||
644 | } | ||
645 | if (!state->console_enable) | ||
646 | debug_prompt(state); | ||
647 | |||
648 | return signal_helper; | ||
649 | } | ||
650 | |||
651 | static void sleep_timer_expired(unsigned long data) | ||
652 | { | ||
653 | struct fiq_debugger_state *state = (struct fiq_debugger_state *)data; | ||
654 | unsigned long flags; | ||
655 | |||
656 | spin_lock_irqsave(&state->sleep_timer_lock, flags); | ||
657 | if (state->uart_enabled && !state->no_sleep) { | ||
658 | if (state->debug_enable && !state->console_enable) { | ||
659 | state->debug_enable = false; | ||
660 | debug_printf_nfiq(state, "suspending fiq debugger\n"); | ||
661 | } | ||
662 | state->ignore_next_wakeup_irq = true; | ||
663 | debug_uart_disable(state); | ||
664 | state->uart_enabled = false; | ||
665 | enable_wakeup_irq(state); | ||
666 | } | ||
667 | wake_unlock(&state->debugger_wake_lock); | ||
668 | spin_unlock_irqrestore(&state->sleep_timer_lock, flags); | ||
669 | } | ||
670 | |||
671 | static void handle_wakeup(struct fiq_debugger_state *state) | ||
672 | { | ||
673 | unsigned long flags; | ||
674 | |||
675 | spin_lock_irqsave(&state->sleep_timer_lock, flags); | ||
676 | if (state->wakeup_irq >= 0 && state->ignore_next_wakeup_irq) { | ||
677 | state->ignore_next_wakeup_irq = false; | ||
678 | } else if (!state->uart_enabled) { | ||
679 | wake_lock(&state->debugger_wake_lock); | ||
680 | debug_uart_enable(state); | ||
681 | state->uart_enabled = true; | ||
682 | disable_wakeup_irq(state); | ||
683 | mod_timer(&state->sleep_timer, jiffies + HZ / 2); | ||
684 | } | ||
685 | spin_unlock_irqrestore(&state->sleep_timer_lock, flags); | ||
686 | } | ||
687 | |||
688 | static irqreturn_t wakeup_irq_handler(int irq, void *dev) | ||
689 | { | ||
690 | struct fiq_debugger_state *state = dev; | ||
691 | |||
692 | if (!state->no_sleep) | ||
693 | debug_puts(state, "WAKEUP\n"); | ||
694 | handle_wakeup(state); | ||
695 | |||
696 | return IRQ_HANDLED; | ||
697 | } | ||
698 | |||
699 | |||
700 | static void debug_handle_irq_context(struct fiq_debugger_state *state) | ||
701 | { | ||
702 | if (!state->no_sleep) { | ||
703 | unsigned long flags; | ||
704 | |||
705 | spin_lock_irqsave(&state->sleep_timer_lock, flags); | ||
706 | wake_lock(&state->debugger_wake_lock); | ||
707 | mod_timer(&state->sleep_timer, jiffies + HZ * 5); | ||
708 | spin_unlock_irqrestore(&state->sleep_timer_lock, flags); | ||
709 | } | ||
710 | #if defined(CONFIG_FIQ_DEBUGGER_CONSOLE) | ||
711 | if (state->tty) { | ||
712 | int i; | ||
713 | int count = fiq_debugger_ringbuf_level(state->tty_rbuf); | ||
714 | for (i = 0; i < count; i++) { | ||
715 | int c = fiq_debugger_ringbuf_peek(state->tty_rbuf, 0); | ||
716 | tty_insert_flip_char(state->tty, c, TTY_NORMAL); | ||
717 | if (!fiq_debugger_ringbuf_consume(state->tty_rbuf, 1)) | ||
718 | pr_warn("fiq tty failed to consume byte\n"); | ||
719 | } | ||
720 | tty_flip_buffer_push(state->tty); | ||
721 | } | ||
722 | #endif | ||
723 | if (state->debug_busy) { | ||
724 | debug_irq_exec(state, state->debug_cmd); | ||
725 | debug_prompt(state); | ||
726 | state->debug_busy = 0; | ||
727 | } | ||
728 | } | ||
729 | |||
730 | static int debug_getc(struct fiq_debugger_state *state) | ||
731 | { | ||
732 | return state->pdata->uart_getc(state->pdev); | ||
733 | } | ||
734 | |||
735 | static bool debug_handle_uart_interrupt(struct fiq_debugger_state *state, | ||
736 | int this_cpu, void *regs, void *svc_sp) | ||
737 | { | ||
738 | int c; | ||
739 | static int last_c; | ||
740 | int count = 0; | ||
741 | bool signal_helper = false; | ||
742 | |||
743 | if (this_cpu != state->current_cpu) { | ||
744 | if (state->in_fiq) | ||
745 | return false; | ||
746 | |||
747 | if (atomic_inc_return(&state->unhandled_fiq_count) != | ||
748 | MAX_UNHANDLED_FIQ_COUNT) | ||
749 | return false; | ||
750 | |||
751 | debug_printf(state, "fiq_debugger: cpu %d not responding, " | ||
752 | "reverting to cpu %d\n", state->current_cpu, | ||
753 | this_cpu); | ||
754 | |||
755 | atomic_set(&state->unhandled_fiq_count, 0); | ||
756 | switch_cpu(state, this_cpu); | ||
757 | return false; | ||
758 | } | ||
759 | |||
760 | state->in_fiq = true; | ||
761 | |||
762 | while ((c = debug_getc(state)) != FIQ_DEBUGGER_NO_CHAR) { | ||
763 | count++; | ||
764 | if (!state->debug_enable) { | ||
765 | if ((c == 13) || (c == 10)) { | ||
766 | state->debug_enable = true; | ||
767 | state->debug_count = 0; | ||
768 | debug_prompt(state); | ||
769 | } | ||
770 | } else if (c == FIQ_DEBUGGER_BREAK) { | ||
771 | state->console_enable = false; | ||
772 | debug_puts(state, "fiq debugger mode\n"); | ||
773 | state->debug_count = 0; | ||
774 | debug_prompt(state); | ||
775 | #ifdef CONFIG_FIQ_DEBUGGER_CONSOLE | ||
776 | } else if (state->console_enable && state->tty_rbuf) { | ||
777 | fiq_debugger_ringbuf_push(state->tty_rbuf, c); | ||
778 | signal_helper = true; | ||
779 | #endif | ||
780 | } else if ((c >= ' ') && (c < 127)) { | ||
781 | if (state->debug_count < (DEBUG_MAX - 1)) { | ||
782 | state->debug_buf[state->debug_count++] = c; | ||
783 | state->pdata->uart_putc(state->pdev, c); | ||
784 | } | ||
785 | } else if ((c == 8) || (c == 127)) { | ||
786 | if (state->debug_count > 0) { | ||
787 | state->debug_count--; | ||
788 | state->pdata->uart_putc(state->pdev, 8); | ||
789 | state->pdata->uart_putc(state->pdev, ' '); | ||
790 | state->pdata->uart_putc(state->pdev, 8); | ||
791 | } | ||
792 | } else if ((c == 13) || (c == 10)) { | ||
793 | if (c == '\r' || (c == '\n' && last_c != '\r')) { | ||
794 | state->pdata->uart_putc(state->pdev, '\r'); | ||
795 | state->pdata->uart_putc(state->pdev, '\n'); | ||
796 | } | ||
797 | if (state->debug_count) { | ||
798 | state->debug_buf[state->debug_count] = 0; | ||
799 | state->debug_count = 0; | ||
800 | signal_helper |= | ||
801 | debug_fiq_exec(state, state->debug_buf, | ||
802 | regs, svc_sp); | ||
803 | } else { | ||
804 | debug_prompt(state); | ||
805 | } | ||
806 | } | ||
807 | last_c = c; | ||
808 | } | ||
809 | debug_uart_flush(state); | ||
810 | if (state->pdata->fiq_ack) | ||
811 | state->pdata->fiq_ack(state->pdev, state->fiq); | ||
812 | |||
813 | /* poke sleep timer if necessary */ | ||
814 | if (state->debug_enable && !state->no_sleep) | ||
815 | signal_helper = true; | ||
816 | |||
817 | atomic_set(&state->unhandled_fiq_count, 0); | ||
818 | state->in_fiq = false; | ||
819 | |||
820 | return signal_helper; | ||
821 | } | ||
822 | |||
823 | static void debug_fiq(struct fiq_glue_handler *h, void *regs, void *svc_sp) | ||
824 | { | ||
825 | struct fiq_debugger_state *state = | ||
826 | container_of(h, struct fiq_debugger_state, handler); | ||
827 | unsigned int this_cpu = THREAD_INFO(svc_sp)->cpu; | ||
828 | bool need_irq; | ||
829 | |||
830 | need_irq = debug_handle_uart_interrupt(state, this_cpu, regs, svc_sp); | ||
831 | if (need_irq) | ||
832 | debug_force_irq(state); | ||
833 | } | ||
834 | |||
835 | /* | ||
836 | * When not using FIQs, we only use this single interrupt as an entry point. | ||
837 | * This just effectively takes over the UART interrupt and does all the work | ||
838 | * in this context. | ||
839 | */ | ||
840 | static irqreturn_t debug_uart_irq(int irq, void *dev) | ||
841 | { | ||
842 | struct fiq_debugger_state *state = dev; | ||
843 | bool not_done; | ||
844 | |||
845 | handle_wakeup(state); | ||
846 | |||
847 | /* handle the debugger irq in regular context */ | ||
848 | not_done = debug_handle_uart_interrupt(state, smp_processor_id(), | ||
849 | get_irq_regs(), | ||
850 | current_thread_info()); | ||
851 | if (not_done) | ||
852 | debug_handle_irq_context(state); | ||
853 | |||
854 | return IRQ_HANDLED; | ||
855 | } | ||
856 | |||
857 | /* | ||
858 | * If FIQs are used, not everything can happen in fiq context. | ||
859 | * FIQ handler does what it can and then signals this interrupt to finish the | ||
860 | * job in irq context. | ||
861 | */ | ||
862 | static irqreturn_t debug_signal_irq(int irq, void *dev) | ||
863 | { | ||
864 | struct fiq_debugger_state *state = dev; | ||
865 | |||
866 | if (state->pdata->force_irq_ack) | ||
867 | state->pdata->force_irq_ack(state->pdev, state->signal_irq); | ||
868 | |||
869 | debug_handle_irq_context(state); | ||
870 | |||
871 | return IRQ_HANDLED; | ||
872 | } | ||
873 | |||
874 | static void debug_resume(struct fiq_glue_handler *h) | ||
875 | { | ||
876 | struct fiq_debugger_state *state = | ||
877 | container_of(h, struct fiq_debugger_state, handler); | ||
878 | if (state->pdata->uart_resume) | ||
879 | state->pdata->uart_resume(state->pdev); | ||
880 | } | ||
881 | |||
882 | #if defined(CONFIG_FIQ_DEBUGGER_CONSOLE) | ||
883 | struct tty_driver *debug_console_device(struct console *co, int *index) | ||
884 | { | ||
885 | struct fiq_debugger_state *state; | ||
886 | state = container_of(co, struct fiq_debugger_state, console); | ||
887 | *index = 0; | ||
888 | return state->tty_driver; | ||
889 | } | ||
890 | |||
891 | static void debug_console_write(struct console *co, | ||
892 | const char *s, unsigned int count) | ||
893 | { | ||
894 | struct fiq_debugger_state *state; | ||
895 | |||
896 | state = container_of(co, struct fiq_debugger_state, console); | ||
897 | |||
898 | if (!state->console_enable && !state->syslog_dumping) | ||
899 | return; | ||
900 | |||
901 | debug_uart_enable(state); | ||
902 | while (count--) { | ||
903 | if (*s == '\n') | ||
904 | state->pdata->uart_putc(state->pdev, '\r'); | ||
905 | state->pdata->uart_putc(state->pdev, *s++); | ||
906 | } | ||
907 | debug_uart_flush(state); | ||
908 | debug_uart_disable(state); | ||
909 | } | ||
910 | |||
911 | static struct console fiq_debugger_console = { | ||
912 | .name = "ttyFIQ", | ||
913 | .device = debug_console_device, | ||
914 | .write = debug_console_write, | ||
915 | .flags = CON_PRINTBUFFER | CON_ANYTIME | CON_ENABLED, | ||
916 | }; | ||
917 | |||
918 | int fiq_tty_open(struct tty_struct *tty, struct file *filp) | ||
919 | { | ||
920 | struct fiq_debugger_state *state = tty->driver->driver_state; | ||
921 | if (state->tty_open_count++) | ||
922 | return 0; | ||
923 | |||
924 | tty->driver_data = state; | ||
925 | state->tty = tty; | ||
926 | return 0; | ||
927 | } | ||
928 | |||
929 | void fiq_tty_close(struct tty_struct *tty, struct file *filp) | ||
930 | { | ||
931 | struct fiq_debugger_state *state = tty->driver_data; | ||
932 | if (--state->tty_open_count) | ||
933 | return; | ||
934 | state->tty = NULL; | ||
935 | } | ||
936 | |||
937 | int fiq_tty_write(struct tty_struct *tty, const unsigned char *buf, int count) | ||
938 | { | ||
939 | int i; | ||
940 | struct fiq_debugger_state *state = tty->driver_data; | ||
941 | |||
942 | if (!state->console_enable) | ||
943 | return count; | ||
944 | |||
945 | debug_uart_enable(state); | ||
946 | for (i = 0; i < count; i++) | ||
947 | state->pdata->uart_putc(state->pdev, *buf++); | ||
948 | debug_uart_disable(state); | ||
949 | |||
950 | return count; | ||
951 | } | ||
952 | |||
953 | int fiq_tty_write_room(struct tty_struct *tty) | ||
954 | { | ||
955 | return 1024; | ||
956 | } | ||
957 | |||
958 | static const struct tty_operations fiq_tty_driver_ops = { | ||
959 | .write = fiq_tty_write, | ||
960 | .write_room = fiq_tty_write_room, | ||
961 | .open = fiq_tty_open, | ||
962 | .close = fiq_tty_close, | ||
963 | }; | ||
964 | |||
965 | static int fiq_debugger_tty_init(struct fiq_debugger_state *state) | ||
966 | { | ||
967 | int ret = -EINVAL; | ||
968 | |||
969 | state->tty_driver = alloc_tty_driver(1); | ||
970 | if (!state->tty_driver) { | ||
971 | pr_err("Failed to allocate fiq debugger tty\n"); | ||
972 | return -ENOMEM; | ||
973 | } | ||
974 | |||
975 | state->tty_driver->owner = THIS_MODULE; | ||
976 | state->tty_driver->driver_name = "fiq-debugger"; | ||
977 | state->tty_driver->name = "ttyFIQ"; | ||
978 | state->tty_driver->type = TTY_DRIVER_TYPE_SERIAL; | ||
979 | state->tty_driver->subtype = SERIAL_TYPE_NORMAL; | ||
980 | state->tty_driver->init_termios = tty_std_termios; | ||
981 | state->tty_driver->init_termios.c_cflag = | ||
982 | B115200 | CS8 | CREAD | HUPCL | CLOCAL; | ||
983 | state->tty_driver->init_termios.c_ispeed = | ||
984 | state->tty_driver->init_termios.c_ospeed = 115200; | ||
985 | state->tty_driver->flags = TTY_DRIVER_REAL_RAW; | ||
986 | tty_set_operations(state->tty_driver, &fiq_tty_driver_ops); | ||
987 | state->tty_driver->driver_state = state; | ||
988 | |||
989 | ret = tty_register_driver(state->tty_driver); | ||
990 | if (ret) { | ||
991 | pr_err("Failed to register fiq tty: %d\n", ret); | ||
992 | goto err; | ||
993 | } | ||
994 | |||
995 | state->tty_rbuf = fiq_debugger_ringbuf_alloc(1024); | ||
996 | if (!state->tty_rbuf) { | ||
997 | pr_err("Failed to allocate fiq debugger ringbuf\n"); | ||
998 | ret = -ENOMEM; | ||
999 | goto err; | ||
1000 | } | ||
1001 | |||
1002 | pr_info("Registered FIQ tty driver %p\n", state->tty_driver); | ||
1003 | return 0; | ||
1004 | |||
1005 | err: | ||
1006 | fiq_debugger_ringbuf_free(state->tty_rbuf); | ||
1007 | state->tty_rbuf = NULL; | ||
1008 | put_tty_driver(state->tty_driver); | ||
1009 | return ret; | ||
1010 | } | ||
1011 | #endif | ||
1012 | |||
1013 | static int fiq_debugger_dev_suspend(struct device *dev) | ||
1014 | { | ||
1015 | struct platform_device *pdev = to_platform_device(dev); | ||
1016 | struct fiq_debugger_state *state = platform_get_drvdata(pdev); | ||
1017 | |||
1018 | if (state->pdata->uart_dev_suspend) | ||
1019 | return state->pdata->uart_dev_suspend(pdev); | ||
1020 | return 0; | ||
1021 | } | ||
1022 | |||
1023 | static int fiq_debugger_dev_resume(struct device *dev) | ||
1024 | { | ||
1025 | struct platform_device *pdev = to_platform_device(dev); | ||
1026 | struct fiq_debugger_state *state = platform_get_drvdata(pdev); | ||
1027 | |||
1028 | if (state->pdata->uart_dev_resume) | ||
1029 | return state->pdata->uart_dev_resume(pdev); | ||
1030 | return 0; | ||
1031 | } | ||
1032 | |||
1033 | static int fiq_debugger_probe(struct platform_device *pdev) | ||
1034 | { | ||
1035 | int ret; | ||
1036 | struct fiq_debugger_pdata *pdata = dev_get_platdata(&pdev->dev); | ||
1037 | struct fiq_debugger_state *state; | ||
1038 | int fiq; | ||
1039 | int uart_irq; | ||
1040 | |||
1041 | if (!pdata->uart_getc || !pdata->uart_putc) | ||
1042 | return -EINVAL; | ||
1043 | if ((pdata->uart_enable && !pdata->uart_disable) || | ||
1044 | (!pdata->uart_enable && pdata->uart_disable)) | ||
1045 | return -EINVAL; | ||
1046 | |||
1047 | fiq = platform_get_irq_byname(pdev, "fiq"); | ||
1048 | uart_irq = platform_get_irq_byname(pdev, "uart_irq"); | ||
1049 | |||
1050 | /* uart_irq mode and fiq mode are mutually exclusive, but one of them | ||
1051 | * is required */ | ||
1052 | if ((uart_irq < 0 && fiq < 0) || (uart_irq >= 0 && fiq >= 0)) | ||
1053 | return -EINVAL; | ||
1054 | if (fiq >= 0 && !pdata->fiq_enable) | ||
1055 | return -EINVAL; | ||
1056 | |||
1057 | state = kzalloc(sizeof(*state), GFP_KERNEL); | ||
1058 | setup_timer(&state->sleep_timer, sleep_timer_expired, | ||
1059 | (unsigned long)state); | ||
1060 | state->pdata = pdata; | ||
1061 | state->pdev = pdev; | ||
1062 | state->no_sleep = initial_no_sleep; | ||
1063 | state->debug_enable = initial_debug_enable; | ||
1064 | state->console_enable = initial_console_enable; | ||
1065 | |||
1066 | state->fiq = fiq; | ||
1067 | state->uart_irq = uart_irq; | ||
1068 | state->signal_irq = platform_get_irq_byname(pdev, "signal"); | ||
1069 | state->wakeup_irq = platform_get_irq_byname(pdev, "wakeup"); | ||
1070 | |||
1071 | platform_set_drvdata(pdev, state); | ||
1072 | |||
1073 | spin_lock_init(&state->sleep_timer_lock); | ||
1074 | |||
1075 | if (state->wakeup_irq < 0 && debug_have_fiq(state)) | ||
1076 | state->no_sleep = true; | ||
1077 | state->ignore_next_wakeup_irq = !state->no_sleep; | ||
1078 | |||
1079 | wake_lock_init(&state->debugger_wake_lock, | ||
1080 | WAKE_LOCK_SUSPEND, "serial-debug"); | ||
1081 | |||
1082 | state->clk = clk_get(&pdev->dev, NULL); | ||
1083 | if (IS_ERR(state->clk)) | ||
1084 | state->clk = NULL; | ||
1085 | |||
1086 | /* do not call pdata->uart_enable here since uart_init may still | ||
1087 | * need to do some initialization before uart_enable can work. | ||
1088 | * So, only try to manage the clock during init. | ||
1089 | */ | ||
1090 | if (state->clk) | ||
1091 | clk_enable(state->clk); | ||
1092 | |||
1093 | if (pdata->uart_init) { | ||
1094 | ret = pdata->uart_init(pdev); | ||
1095 | if (ret) | ||
1096 | goto err_uart_init; | ||
1097 | } | ||
1098 | |||
1099 | debug_printf_nfiq(state, "<hit enter %sto activate fiq debugger>\n", | ||
1100 | state->no_sleep ? "" : "twice "); | ||
1101 | |||
1102 | if (debug_have_fiq(state)) { | ||
1103 | state->handler.fiq = debug_fiq; | ||
1104 | state->handler.resume = debug_resume; | ||
1105 | ret = fiq_glue_register_handler(&state->handler); | ||
1106 | if (ret) { | ||
1107 | pr_err("%s: could not install fiq handler\n", __func__); | ||
1108 | goto err_register_fiq; | ||
1109 | } | ||
1110 | |||
1111 | pdata->fiq_enable(pdev, state->fiq, 1); | ||
1112 | } else { | ||
1113 | ret = request_irq(state->uart_irq, debug_uart_irq, | ||
1114 | IRQF_NO_SUSPEND, "debug", state); | ||
1115 | if (ret) { | ||
1116 | pr_err("%s: could not install irq handler\n", __func__); | ||
1117 | goto err_register_irq; | ||
1118 | } | ||
1119 | |||
1120 | /* for irq-only mode, we want this irq to wake us up, if it | ||
1121 | * can. | ||
1122 | */ | ||
1123 | enable_irq_wake(state->uart_irq); | ||
1124 | } | ||
1125 | |||
1126 | if (state->clk) | ||
1127 | clk_disable(state->clk); | ||
1128 | |||
1129 | if (state->signal_irq >= 0) { | ||
1130 | ret = request_irq(state->signal_irq, debug_signal_irq, | ||
1131 | IRQF_TRIGGER_RISING, "debug-signal", state); | ||
1132 | if (ret) | ||
1133 | pr_err("serial_debugger: could not install signal_irq"); | ||
1134 | } | ||
1135 | |||
1136 | if (state->wakeup_irq >= 0) { | ||
1137 | ret = request_irq(state->wakeup_irq, wakeup_irq_handler, | ||
1138 | IRQF_TRIGGER_FALLING | IRQF_DISABLED, | ||
1139 | "debug-wakeup", state); | ||
1140 | if (ret) { | ||
1141 | pr_err("serial_debugger: " | ||
1142 | "could not install wakeup irq\n"); | ||
1143 | state->wakeup_irq = -1; | ||
1144 | } else { | ||
1145 | ret = enable_irq_wake(state->wakeup_irq); | ||
1146 | if (ret) { | ||
1147 | pr_err("serial_debugger: " | ||
1148 | "could not enable wakeup\n"); | ||
1149 | state->wakeup_irq_no_set_wake = true; | ||
1150 | } | ||
1151 | } | ||
1152 | } | ||
1153 | if (state->no_sleep) | ||
1154 | handle_wakeup(state); | ||
1155 | |||
1156 | #if defined(CONFIG_FIQ_DEBUGGER_CONSOLE) | ||
1157 | state->console = fiq_debugger_console; | ||
1158 | register_console(&state->console); | ||
1159 | fiq_debugger_tty_init(state); | ||
1160 | #endif | ||
1161 | return 0; | ||
1162 | |||
1163 | err_register_irq: | ||
1164 | err_register_fiq: | ||
1165 | if (pdata->uart_free) | ||
1166 | pdata->uart_free(pdev); | ||
1167 | err_uart_init: | ||
1168 | if (state->clk) | ||
1169 | clk_disable(state->clk); | ||
1170 | if (state->clk) | ||
1171 | clk_put(state->clk); | ||
1172 | wake_lock_destroy(&state->debugger_wake_lock); | ||
1173 | platform_set_drvdata(pdev, NULL); | ||
1174 | kfree(state); | ||
1175 | return ret; | ||
1176 | } | ||
1177 | |||
1178 | static const struct dev_pm_ops fiq_debugger_dev_pm_ops = { | ||
1179 | .suspend = fiq_debugger_dev_suspend, | ||
1180 | .resume = fiq_debugger_dev_resume, | ||
1181 | }; | ||
1182 | |||
1183 | static struct platform_driver fiq_debugger_driver = { | ||
1184 | .probe = fiq_debugger_probe, | ||
1185 | .driver = { | ||
1186 | .name = "fiq_debugger", | ||
1187 | .pm = &fiq_debugger_dev_pm_ops, | ||
1188 | }, | ||
1189 | }; | ||
1190 | |||
1191 | static int __init fiq_debugger_init(void) | ||
1192 | { | ||
1193 | return platform_driver_register(&fiq_debugger_driver); | ||
1194 | } | ||
1195 | |||
1196 | postcore_initcall(fiq_debugger_init); | ||
diff --git a/arch/arm/common/fiq_debugger_ringbuf.h b/arch/arm/common/fiq_debugger_ringbuf.h new file mode 100644 index 00000000000..2649b558108 --- /dev/null +++ b/arch/arm/common/fiq_debugger_ringbuf.h | |||
@@ -0,0 +1,94 @@ | |||
1 | /* | ||
2 | * arch/arm/common/fiq_debugger_ringbuf.c | ||
3 | * | ||
4 | * simple lockless ringbuffer | ||
5 | * | ||
6 | * Copyright (C) 2010 Google, Inc. | ||
7 | * | ||
8 | * This software is licensed under the terms of the GNU General Public | ||
9 | * License version 2, as published by the Free Software Foundation, and | ||
10 | * may be copied, distributed, and modified under those terms. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | */ | ||
17 | |||
18 | #include <linux/kernel.h> | ||
19 | #include <linux/slab.h> | ||
20 | |||
21 | struct fiq_debugger_ringbuf { | ||
22 | int len; | ||
23 | int head; | ||
24 | int tail; | ||
25 | u8 buf[]; | ||
26 | }; | ||
27 | |||
28 | |||
29 | static inline struct fiq_debugger_ringbuf *fiq_debugger_ringbuf_alloc(int len) | ||
30 | { | ||
31 | struct fiq_debugger_ringbuf *rbuf; | ||
32 | |||
33 | rbuf = kzalloc(sizeof(*rbuf) + len, GFP_KERNEL); | ||
34 | if (rbuf == NULL) | ||
35 | return NULL; | ||
36 | |||
37 | rbuf->len = len; | ||
38 | rbuf->head = 0; | ||
39 | rbuf->tail = 0; | ||
40 | smp_mb(); | ||
41 | |||
42 | return rbuf; | ||
43 | } | ||
44 | |||
45 | static inline void fiq_debugger_ringbuf_free(struct fiq_debugger_ringbuf *rbuf) | ||
46 | { | ||
47 | kfree(rbuf); | ||
48 | } | ||
49 | |||
50 | static inline int fiq_debugger_ringbuf_level(struct fiq_debugger_ringbuf *rbuf) | ||
51 | { | ||
52 | int level = rbuf->head - rbuf->tail; | ||
53 | |||
54 | if (level < 0) | ||
55 | level = rbuf->len + level; | ||
56 | |||
57 | return level; | ||
58 | } | ||
59 | |||
60 | static inline int fiq_debugger_ringbuf_room(struct fiq_debugger_ringbuf *rbuf) | ||
61 | { | ||
62 | return rbuf->len - fiq_debugger_ringbuf_level(rbuf) - 1; | ||
63 | } | ||
64 | |||
65 | static inline u8 | ||
66 | fiq_debugger_ringbuf_peek(struct fiq_debugger_ringbuf *rbuf, int i) | ||
67 | { | ||
68 | return rbuf->buf[(rbuf->tail + i) % rbuf->len]; | ||
69 | } | ||
70 | |||
71 | static inline int | ||
72 | fiq_debugger_ringbuf_consume(struct fiq_debugger_ringbuf *rbuf, int count) | ||
73 | { | ||
74 | count = min(count, fiq_debugger_ringbuf_level(rbuf)); | ||
75 | |||
76 | rbuf->tail = (rbuf->tail + count) % rbuf->len; | ||
77 | smp_mb(); | ||
78 | |||
79 | return count; | ||
80 | } | ||
81 | |||
82 | static inline int | ||
83 | fiq_debugger_ringbuf_push(struct fiq_debugger_ringbuf *rbuf, u8 datum) | ||
84 | { | ||
85 | if (fiq_debugger_ringbuf_room(rbuf) == 0) | ||
86 | return 0; | ||
87 | |||
88 | rbuf->buf[rbuf->head] = datum; | ||
89 | smp_mb(); | ||
90 | rbuf->head = (rbuf->head + 1) % rbuf->len; | ||
91 | smp_mb(); | ||
92 | |||
93 | return 1; | ||
94 | } | ||
diff --git a/arch/arm/common/fiq_glue.S b/arch/arm/common/fiq_glue.S new file mode 100644 index 00000000000..9e3455a09f8 --- /dev/null +++ b/arch/arm/common/fiq_glue.S | |||
@@ -0,0 +1,111 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2008 Google, Inc. | ||
3 | * | ||
4 | * This software is licensed under the terms of the GNU General Public | ||
5 | * License version 2, as published by the Free Software Foundation, and | ||
6 | * may be copied, distributed, and modified under those terms. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, | ||
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
11 | * GNU General Public License for more details. | ||
12 | * | ||
13 | */ | ||
14 | |||
15 | #include <linux/linkage.h> | ||
16 | #include <asm/assembler.h> | ||
17 | |||
18 | .text | ||
19 | |||
20 | .global fiq_glue_end | ||
21 | |||
22 | /* fiq stack: r0-r15,cpsr,spsr of interrupted mode */ | ||
23 | |||
24 | ENTRY(fiq_glue) | ||
25 | /* store pc, cpsr from previous mode */ | ||
26 | mrs r12, spsr | ||
27 | sub r11, lr, #4 | ||
28 | subs r10, #1 | ||
29 | bne nested_fiq | ||
30 | |||
31 | stmfd sp!, {r11-r12, lr} | ||
32 | |||
33 | /* store r8-r14 from previous mode */ | ||
34 | sub sp, sp, #(7 * 4) | ||
35 | stmia sp, {r8-r14}^ | ||
36 | nop | ||
37 | |||
38 | /* store r0-r7 from previous mode */ | ||
39 | stmfd sp!, {r0-r7} | ||
40 | |||
41 | /* setup func(data,regs) arguments */ | ||
42 | mov r0, r9 | ||
43 | mov r1, sp | ||
44 | mov r3, r8 | ||
45 | |||
46 | mov r7, sp | ||
47 | |||
48 | /* Get sp and lr from non-user modes */ | ||
49 | and r4, r12, #MODE_MASK | ||
50 | cmp r4, #USR_MODE | ||
51 | beq fiq_from_usr_mode | ||
52 | |||
53 | mov r7, sp | ||
54 | orr r4, r4, #(PSR_I_BIT | PSR_F_BIT) | ||
55 | msr cpsr_c, r4 | ||
56 | str sp, [r7, #(4 * 13)] | ||
57 | str lr, [r7, #(4 * 14)] | ||
58 | mrs r5, spsr | ||
59 | str r5, [r7, #(4 * 17)] | ||
60 | |||
61 | cmp r4, #(SVC_MODE | PSR_I_BIT | PSR_F_BIT) | ||
62 | /* use fiq stack if we reenter this mode */ | ||
63 | subne sp, r7, #(4 * 3) | ||
64 | |||
65 | fiq_from_usr_mode: | ||
66 | msr cpsr_c, #(SVC_MODE | PSR_I_BIT | PSR_F_BIT) | ||
67 | mov r2, sp | ||
68 | sub sp, r7, #12 | ||
69 | stmfd sp!, {r2, ip, lr} | ||
70 | /* call func(data,regs) */ | ||
71 | blx r3 | ||
72 | ldmfd sp, {r2, ip, lr} | ||
73 | mov sp, r2 | ||
74 | |||
75 | /* restore/discard saved state */ | ||
76 | cmp r4, #USR_MODE | ||
77 | beq fiq_from_usr_mode_exit | ||
78 | |||
79 | msr cpsr_c, r4 | ||
80 | ldr sp, [r7, #(4 * 13)] | ||
81 | ldr lr, [r7, #(4 * 14)] | ||
82 | msr spsr_cxsf, r5 | ||
83 | |||
84 | fiq_from_usr_mode_exit: | ||
85 | msr cpsr_c, #(FIQ_MODE | PSR_I_BIT | PSR_F_BIT) | ||
86 | |||
87 | ldmfd sp!, {r0-r7} | ||
88 | add sp, sp, #(7 * 4) | ||
89 | ldmfd sp!, {r11-r12, lr} | ||
90 | exit_fiq: | ||
91 | msr spsr_cxsf, r12 | ||
92 | add r10, #1 | ||
93 | movs pc, r11 | ||
94 | |||
95 | nested_fiq: | ||
96 | orr r12, r12, #(PSR_F_BIT) | ||
97 | b exit_fiq | ||
98 | |||
99 | fiq_glue_end: | ||
100 | |||
101 | ENTRY(fiq_glue_setup) /* func, data, sp */ | ||
102 | mrs r3, cpsr | ||
103 | msr cpsr_c, #(FIQ_MODE | PSR_I_BIT | PSR_F_BIT) | ||
104 | movs r8, r0 | ||
105 | mov r9, r1 | ||
106 | mov sp, r2 | ||
107 | moveq r10, #0 | ||
108 | movne r10, #1 | ||
109 | msr cpsr_c, r3 | ||
110 | bx lr | ||
111 | |||
diff --git a/arch/arm/common/fiq_glue_setup.c b/arch/arm/common/fiq_glue_setup.c new file mode 100644 index 00000000000..59586861a63 --- /dev/null +++ b/arch/arm/common/fiq_glue_setup.c | |||
@@ -0,0 +1,155 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2010 Google, Inc. | ||
3 | * | ||
4 | * This software is licensed under the terms of the GNU General Public | ||
5 | * License version 2, as published by the Free Software Foundation, and | ||
6 | * may be copied, distributed, and modified under those terms. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, | ||
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
11 | * GNU General Public License for more details. | ||
12 | */ | ||
13 | |||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/percpu.h> | ||
16 | #include <linux/slab.h> | ||
17 | #include <linux/syscore_ops.h> | ||
18 | #include <asm/cpu_pm.h> | ||
19 | #include <asm/fiq.h> | ||
20 | #include <asm/fiq_glue.h> | ||
21 | |||
22 | extern unsigned char fiq_glue, fiq_glue_end; | ||
23 | extern void fiq_glue_setup(void *func, void *data, void *sp); | ||
24 | |||
25 | static struct fiq_handler fiq_debbuger_fiq_handler = { | ||
26 | .name = "fiq_glue", | ||
27 | }; | ||
28 | DEFINE_PER_CPU(void *, fiq_stack); | ||
29 | static struct fiq_glue_handler *current_handler; | ||
30 | static DEFINE_MUTEX(fiq_glue_lock); | ||
31 | |||
32 | static void fiq_glue_setup_helper(void *info) | ||
33 | { | ||
34 | struct fiq_glue_handler *handler = info; | ||
35 | fiq_glue_setup(handler->fiq, handler, | ||
36 | __get_cpu_var(fiq_stack) + THREAD_START_SP); | ||
37 | } | ||
38 | |||
39 | int fiq_glue_register_handler(struct fiq_glue_handler *handler) | ||
40 | { | ||
41 | int ret; | ||
42 | int cpu; | ||
43 | |||
44 | if (!handler || !handler->fiq) | ||
45 | return -EINVAL; | ||
46 | |||
47 | mutex_lock(&fiq_glue_lock); | ||
48 | if (fiq_stack) { | ||
49 | ret = -EBUSY; | ||
50 | goto err_busy; | ||
51 | } | ||
52 | |||
53 | for_each_possible_cpu(cpu) { | ||
54 | void *stack; | ||
55 | stack = (void *)__get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER); | ||
56 | if (WARN_ON(!stack)) { | ||
57 | ret = -ENOMEM; | ||
58 | goto err_alloc_fiq_stack; | ||
59 | } | ||
60 | per_cpu(fiq_stack, cpu) = stack; | ||
61 | } | ||
62 | |||
63 | ret = claim_fiq(&fiq_debbuger_fiq_handler); | ||
64 | if (WARN_ON(ret)) | ||
65 | goto err_claim_fiq; | ||
66 | |||
67 | current_handler = handler; | ||
68 | on_each_cpu(fiq_glue_setup_helper, handler, true); | ||
69 | set_fiq_handler(&fiq_glue, &fiq_glue_end - &fiq_glue); | ||
70 | |||
71 | mutex_unlock(&fiq_glue_lock); | ||
72 | return 0; | ||
73 | |||
74 | err_claim_fiq: | ||
75 | err_alloc_fiq_stack: | ||
76 | for_each_possible_cpu(cpu) { | ||
77 | __free_pages(per_cpu(fiq_stack, cpu), THREAD_SIZE_ORDER); | ||
78 | per_cpu(fiq_stack, cpu) = NULL; | ||
79 | } | ||
80 | err_busy: | ||
81 | mutex_unlock(&fiq_glue_lock); | ||
82 | return ret; | ||
83 | } | ||
84 | |||
85 | /** | ||
86 | * fiq_glue_resume - Restore fiqs after suspend or low power idle states | ||
87 | * | ||
88 | * This must be called before calling local_fiq_enable after returning from a | ||
89 | * power state where the fiq mode registers were lost. If a driver provided | ||
90 | * a resume hook when it registered the handler it will be called. | ||
91 | */ | ||
92 | |||
93 | void fiq_glue_resume(void) | ||
94 | { | ||
95 | if (!current_handler) | ||
96 | return; | ||
97 | fiq_glue_setup(current_handler->fiq, current_handler, | ||
98 | __get_cpu_var(fiq_stack) + THREAD_START_SP); | ||
99 | if (current_handler->resume) | ||
100 | current_handler->resume(current_handler); | ||
101 | } | ||
102 | |||
103 | static int fiq_glue_cpu_pm_notify(struct notifier_block *self, unsigned long cmd, | ||
104 | void *v) | ||
105 | { | ||
106 | switch (cmd) { | ||
107 | case CPU_PM_ENTER: | ||
108 | //pr_info("cpu pm enter %d\n", smp_processor_id()); | ||
109 | local_fiq_disable(); | ||
110 | break; | ||
111 | case CPU_PM_ENTER_FAILED: | ||
112 | case CPU_PM_EXIT: | ||
113 | fiq_glue_resume(); | ||
114 | local_fiq_enable(); | ||
115 | //pr_info("cpu pm exit %d\n", smp_processor_id()); | ||
116 | break; | ||
117 | } | ||
118 | return NOTIFY_OK; | ||
119 | } | ||
120 | |||
121 | static struct notifier_block fiq_glue_cpu_pm_notifier = { | ||
122 | .notifier_call = fiq_glue_cpu_pm_notify, | ||
123 | }; | ||
124 | |||
125 | static int __init fiq_glue_cpu_pm_init(void) | ||
126 | { | ||
127 | return cpu_pm_register_notifier(&fiq_glue_cpu_pm_notifier); | ||
128 | } | ||
129 | core_initcall(fiq_glue_cpu_pm_init); | ||
130 | |||
131 | #ifdef CONFIG_PM | ||
132 | static int fiq_glue_syscore_suspend(void) | ||
133 | { | ||
134 | local_fiq_disable(); | ||
135 | return 0; | ||
136 | } | ||
137 | |||
138 | static void fiq_glue_syscore_resume(void) | ||
139 | { | ||
140 | fiq_glue_resume(); | ||
141 | local_fiq_enable(); | ||
142 | } | ||
143 | |||
144 | static struct syscore_ops fiq_glue_syscore_ops = { | ||
145 | .suspend = fiq_glue_syscore_suspend, | ||
146 | .resume = fiq_glue_syscore_resume, | ||
147 | }; | ||
148 | |||
149 | static int __init fiq_glue_syscore_init(void) | ||
150 | { | ||
151 | register_syscore_ops(&fiq_glue_syscore_ops); | ||
152 | return 0; | ||
153 | } | ||
154 | late_initcall(fiq_glue_syscore_init); | ||
155 | #endif | ||
diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c index 4ddd0a6ac7f..05cd423c575 100644 --- a/arch/arm/common/gic.c +++ b/arch/arm/common/gic.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/cpumask.h> | 29 | #include <linux/cpumask.h> |
30 | #include <linux/io.h> | 30 | #include <linux/io.h> |
31 | 31 | ||
32 | #include <asm/cpu_pm.h> | ||
32 | #include <asm/irq.h> | 33 | #include <asm/irq.h> |
33 | #include <asm/mach/irq.h> | 34 | #include <asm/mach/irq.h> |
34 | #include <asm/hardware/gic.h> | 35 | #include <asm/hardware/gic.h> |
@@ -38,12 +39,6 @@ static DEFINE_SPINLOCK(irq_controller_lock); | |||
38 | /* Address of GIC 0 CPU interface */ | 39 | /* Address of GIC 0 CPU interface */ |
39 | void __iomem *gic_cpu_base_addr __read_mostly; | 40 | void __iomem *gic_cpu_base_addr __read_mostly; |
40 | 41 | ||
41 | struct gic_chip_data { | ||
42 | unsigned int irq_offset; | ||
43 | void __iomem *dist_base; | ||
44 | void __iomem *cpu_base; | ||
45 | }; | ||
46 | |||
47 | /* | 42 | /* |
48 | * Supported arch specific GIC irq extension. | 43 | * Supported arch specific GIC irq extension. |
49 | * Default make them NULL. | 44 | * Default make them NULL. |
@@ -179,22 +174,32 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, | |||
179 | { | 174 | { |
180 | void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3); | 175 | void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3); |
181 | unsigned int shift = (d->irq % 4) * 8; | 176 | unsigned int shift = (d->irq % 4) * 8; |
182 | unsigned int cpu = cpumask_first(mask_val); | 177 | unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask); |
183 | u32 val, mask, bit; | 178 | u32 val, mask, bit; |
179 | #ifdef CONFIG_GIC_SET_MULTIPLE_CPUS | ||
180 | struct irq_desc *desc = irq_to_desc(d->irq); | ||
181 | #endif | ||
184 | 182 | ||
185 | if (cpu >= 8) | 183 | if (cpu >= 8 || cpu >= nr_cpu_ids) |
186 | return -EINVAL; | 184 | return -EINVAL; |
187 | 185 | ||
188 | mask = 0xff << shift; | 186 | mask = 0xff << shift; |
189 | bit = 1 << (cpu + shift); | 187 | bit = 1 << (cpu + shift); |
190 | 188 | ||
191 | spin_lock(&irq_controller_lock); | 189 | spin_lock(&irq_controller_lock); |
192 | d->node = cpu; | ||
193 | val = readl_relaxed(reg) & ~mask; | 190 | val = readl_relaxed(reg) & ~mask; |
194 | writel_relaxed(val | bit, reg); | 191 | val |= bit; |
192 | #ifdef CONFIG_GIC_SET_MULTIPLE_CPUS | ||
193 | if (desc && desc->affinity_hint) { | ||
194 | struct cpumask mask_hint; | ||
195 | if (cpumask_and(&mask_hint, desc->affinity_hint, mask_val)) | ||
196 | val |= (*cpumask_bits(&mask_hint) << shift) & mask; | ||
197 | } | ||
198 | #endif | ||
199 | writel_relaxed(val, reg); | ||
195 | spin_unlock(&irq_controller_lock); | 200 | spin_unlock(&irq_controller_lock); |
196 | 201 | ||
197 | return 0; | 202 | return IRQ_SET_MASK_OK; |
198 | } | 203 | } |
199 | #endif | 204 | #endif |
200 | 205 | ||
@@ -283,6 +288,8 @@ static void __init gic_dist_init(struct gic_chip_data *gic, | |||
283 | if (gic_irqs > 1020) | 288 | if (gic_irqs > 1020) |
284 | gic_irqs = 1020; | 289 | gic_irqs = 1020; |
285 | 290 | ||
291 | gic->gic_irqs = gic_irqs; | ||
292 | |||
286 | /* | 293 | /* |
287 | * Set all global interrupts to be level triggered, active low. | 294 | * Set all global interrupts to be level triggered, active low. |
288 | */ | 295 | */ |
@@ -350,6 +357,180 @@ static void __cpuinit gic_cpu_init(struct gic_chip_data *gic) | |||
350 | writel_relaxed(1, base + GIC_CPU_CTRL); | 357 | writel_relaxed(1, base + GIC_CPU_CTRL); |
351 | } | 358 | } |
352 | 359 | ||
360 | /* | ||
361 | * Saves the GIC distributor registers during suspend or idle. Must be called | ||
362 | * with interrupts disabled but before powering down the GIC. After calling | ||
363 | * this function, no interrupts will be delivered by the GIC, and another | ||
364 | * platform-specific wakeup source must be enabled. | ||
365 | */ | ||
366 | static void gic_dist_save(unsigned int gic_nr) | ||
367 | { | ||
368 | unsigned int gic_irqs; | ||
369 | void __iomem *dist_base; | ||
370 | int i; | ||
371 | |||
372 | if (gic_nr >= MAX_GIC_NR) | ||
373 | BUG(); | ||
374 | |||
375 | gic_irqs = gic_data[gic_nr].gic_irqs; | ||
376 | dist_base = gic_data[gic_nr].dist_base; | ||
377 | |||
378 | if (!dist_base) | ||
379 | return; | ||
380 | |||
381 | for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++) | ||
382 | gic_data[gic_nr].saved_spi_conf[i] = | ||
383 | readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4); | ||
384 | |||
385 | for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++) | ||
386 | gic_data[gic_nr].saved_spi_pri[i] = | ||
387 | readl_relaxed(dist_base + GIC_DIST_PRI + i * 4); | ||
388 | |||
389 | for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++) | ||
390 | gic_data[gic_nr].saved_spi_target[i] = | ||
391 | readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4); | ||
392 | |||
393 | for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) | ||
394 | gic_data[gic_nr].saved_spi_enable[i] = | ||
395 | readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4); | ||
396 | |||
397 | writel_relaxed(0, dist_base + GIC_DIST_CTRL); | ||
398 | } | ||
399 | |||
400 | /* | ||
401 | * Restores the GIC distributor registers during resume or when coming out of | ||
402 | * idle. Must be called before enabling interrupts. If a level interrupt | ||
403 | * that occured while the GIC was suspended is still present, it will be | ||
404 | * handled normally, but any edge interrupts that occured will not be seen by | ||
405 | * the GIC and need to be handled by the platform-specific wakeup source. | ||
406 | */ | ||
407 | static void gic_dist_restore(unsigned int gic_nr) | ||
408 | { | ||
409 | unsigned int gic_irqs; | ||
410 | unsigned int i; | ||
411 | void __iomem *dist_base; | ||
412 | |||
413 | if (gic_nr >= MAX_GIC_NR) | ||
414 | BUG(); | ||
415 | |||
416 | gic_irqs = gic_data[gic_nr].gic_irqs; | ||
417 | dist_base = gic_data[gic_nr].dist_base; | ||
418 | |||
419 | if (!dist_base) | ||
420 | return; | ||
421 | |||
422 | writel_relaxed(0, dist_base + GIC_DIST_CTRL); | ||
423 | |||
424 | for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++) | ||
425 | writel_relaxed(gic_data[gic_nr].saved_spi_conf[i], | ||
426 | dist_base + GIC_DIST_CONFIG + i * 4); | ||
427 | |||
428 | for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++) | ||
429 | writel_relaxed(gic_data[gic_nr].saved_spi_pri[i], | ||
430 | dist_base + GIC_DIST_PRI + i * 4); | ||
431 | |||
432 | for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++) | ||
433 | writel_relaxed(gic_data[gic_nr].saved_spi_target[i], | ||
434 | dist_base + GIC_DIST_TARGET + i * 4); | ||
435 | |||
436 | for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) | ||
437 | writel_relaxed(gic_data[gic_nr].saved_spi_enable[i], | ||
438 | dist_base + GIC_DIST_ENABLE_SET + i * 4); | ||
439 | |||
440 | writel_relaxed(1, dist_base + GIC_DIST_CTRL); | ||
441 | } | ||
442 | |||
443 | static void gic_cpu_save(unsigned int gic_nr) | ||
444 | { | ||
445 | int i; | ||
446 | u32 *ptr; | ||
447 | void __iomem *dist_base; | ||
448 | void __iomem *cpu_base; | ||
449 | |||
450 | if (gic_nr >= MAX_GIC_NR) | ||
451 | BUG(); | ||
452 | |||
453 | dist_base = gic_data[gic_nr].dist_base; | ||
454 | cpu_base = gic_data[gic_nr].cpu_base; | ||
455 | |||
456 | if (!dist_base || !cpu_base) | ||
457 | return; | ||
458 | |||
459 | ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_enable); | ||
460 | for (i = 0; i < DIV_ROUND_UP(32, 32); i++) | ||
461 | ptr[i] = readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4); | ||
462 | |||
463 | ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_conf); | ||
464 | for (i = 0; i < DIV_ROUND_UP(32, 16); i++) | ||
465 | ptr[i] = readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4); | ||
466 | |||
467 | ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_pri); | ||
468 | for (i = 0; i < DIV_ROUND_UP(32, 4); i++) | ||
469 | ptr[i] = readl_relaxed(dist_base + GIC_DIST_PRI + i * 4); | ||
470 | } | ||
471 | |||
472 | static void gic_cpu_restore(unsigned int gic_nr) | ||
473 | { | ||
474 | int i; | ||
475 | u32 *ptr; | ||
476 | void __iomem *dist_base; | ||
477 | void __iomem *cpu_base; | ||
478 | |||
479 | if (gic_nr >= MAX_GIC_NR) | ||
480 | BUG(); | ||
481 | |||
482 | dist_base = gic_data[gic_nr].dist_base; | ||
483 | cpu_base = gic_data[gic_nr].cpu_base; | ||
484 | |||
485 | if (!dist_base || !cpu_base) | ||
486 | return; | ||
487 | |||
488 | ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_enable); | ||
489 | for (i = 0; i < DIV_ROUND_UP(32, 32); i++) | ||
490 | writel_relaxed(ptr[i], dist_base + GIC_DIST_ENABLE_SET + i * 4); | ||
491 | |||
492 | ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_conf); | ||
493 | for (i = 0; i < DIV_ROUND_UP(32, 16); i++) | ||
494 | writel_relaxed(ptr[i], dist_base + GIC_DIST_CONFIG + i * 4); | ||
495 | |||
496 | ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_pri); | ||
497 | for (i = 0; i < DIV_ROUND_UP(32, 4); i++) | ||
498 | writel_relaxed(ptr[i], dist_base + GIC_DIST_PRI + i * 4); | ||
499 | |||
500 | writel_relaxed(0xf0, cpu_base + GIC_CPU_PRIMASK); | ||
501 | writel_relaxed(1, cpu_base + GIC_CPU_CTRL); | ||
502 | } | ||
503 | |||
504 | static int gic_notifier(struct notifier_block *self, unsigned long cmd, void *v) | ||
505 | { | ||
506 | int i; | ||
507 | |||
508 | for (i = 0; i < MAX_GIC_NR; i++) { | ||
509 | switch (cmd) { | ||
510 | case CPU_PM_ENTER: | ||
511 | gic_cpu_save(i); | ||
512 | break; | ||
513 | case CPU_PM_ENTER_FAILED: | ||
514 | case CPU_PM_EXIT: | ||
515 | gic_cpu_restore(i); | ||
516 | break; | ||
517 | case CPU_COMPLEX_PM_ENTER: | ||
518 | gic_dist_save(i); | ||
519 | break; | ||
520 | case CPU_COMPLEX_PM_ENTER_FAILED: | ||
521 | case CPU_COMPLEX_PM_EXIT: | ||
522 | gic_dist_restore(i); | ||
523 | break; | ||
524 | } | ||
525 | } | ||
526 | |||
527 | return NOTIFY_OK; | ||
528 | } | ||
529 | |||
530 | static struct notifier_block gic_notifier_block = { | ||
531 | .notifier_call = gic_notifier, | ||
532 | }; | ||
533 | |||
353 | void __init gic_init(unsigned int gic_nr, unsigned int irq_start, | 534 | void __init gic_init(unsigned int gic_nr, unsigned int irq_start, |
354 | void __iomem *dist_base, void __iomem *cpu_base) | 535 | void __iomem *dist_base, void __iomem *cpu_base) |
355 | { | 536 | { |
@@ -365,8 +546,23 @@ void __init gic_init(unsigned int gic_nr, unsigned int irq_start, | |||
365 | if (gic_nr == 0) | 546 | if (gic_nr == 0) |
366 | gic_cpu_base_addr = cpu_base; | 547 | gic_cpu_base_addr = cpu_base; |
367 | 548 | ||
549 | gic_chip.flags |= gic_arch_extn.flags; | ||
368 | gic_dist_init(gic, irq_start); | 550 | gic_dist_init(gic, irq_start); |
369 | gic_cpu_init(gic); | 551 | gic_cpu_init(gic); |
552 | |||
553 | gic->saved_ppi_enable = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4, | ||
554 | sizeof(u32)); | ||
555 | BUG_ON(!gic->saved_ppi_enable); | ||
556 | |||
557 | gic->saved_ppi_conf = __alloc_percpu(DIV_ROUND_UP(32, 16) * 4, | ||
558 | sizeof(u32)); | ||
559 | BUG_ON(!gic->saved_ppi_conf); | ||
560 | |||
561 | gic->saved_ppi_pri = __alloc_percpu(DIV_ROUND_UP(32, 4) * 4, | ||
562 | sizeof(u32)); | ||
563 | BUG_ON(!gic->saved_ppi_pri); | ||
564 | |||
565 | cpu_pm_register_notifier(&gic_notifier_block); | ||
370 | } | 566 | } |
371 | 567 | ||
372 | void __cpuinit gic_secondary_init(unsigned int gic_nr) | 568 | void __cpuinit gic_secondary_init(unsigned int gic_nr) |
diff --git a/arch/arm/common/it8152.c b/arch/arm/common/it8152.c index 7a21927c52e..a7934ba9e1d 100644 --- a/arch/arm/common/it8152.c +++ b/arch/arm/common/it8152.c | |||
@@ -144,7 +144,7 @@ void it8152_irq_demux(unsigned int irq, struct irq_desc *desc) | |||
144 | } | 144 | } |
145 | 145 | ||
146 | /* mapping for on-chip devices */ | 146 | /* mapping for on-chip devices */ |
147 | int __init it8152_pci_map_irq(struct pci_dev *dev, u8 slot, u8 pin) | 147 | int __init it8152_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) |
148 | { | 148 | { |
149 | if ((dev->vendor == PCI_VENDOR_ID_ITE) && | 149 | if ((dev->vendor == PCI_VENDOR_ID_ITE) && |
150 | (dev->device == PCI_DEVICE_ID_ITE_8152)) { | 150 | (dev->device == PCI_DEVICE_ID_ITE_8152)) { |
@@ -243,6 +243,12 @@ static struct resource it8152_mem = { | |||
243 | * ITE8152 chip can address up to 64MByte, so all the devices | 243 | * ITE8152 chip can address up to 64MByte, so all the devices |
244 | * connected to ITE8152 (PCI and USB) should have limited DMA window | 244 | * connected to ITE8152 (PCI and USB) should have limited DMA window |
245 | */ | 245 | */ |
246 | static int it8152_needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size) | ||
247 | { | ||
248 | dev_dbg(dev, "%s: dma_addr %08x, size %08x\n", | ||
249 | __func__, dma_addr, size); | ||
250 | return (dma_addr + size - PHYS_OFFSET) >= SZ_64M; | ||
251 | } | ||
246 | 252 | ||
247 | /* | 253 | /* |
248 | * Setup DMA mask to 64MB on devices connected to ITE8152. Ignore all | 254 | * Setup DMA mask to 64MB on devices connected to ITE8152. Ignore all |
@@ -254,7 +260,7 @@ static int it8152_pci_platform_notify(struct device *dev) | |||
254 | if (dev->dma_mask) | 260 | if (dev->dma_mask) |
255 | *dev->dma_mask = (SZ_64M - 1) | PHYS_OFFSET; | 261 | *dev->dma_mask = (SZ_64M - 1) | PHYS_OFFSET; |
256 | dev->coherent_dma_mask = (SZ_64M - 1) | PHYS_OFFSET; | 262 | dev->coherent_dma_mask = (SZ_64M - 1) | PHYS_OFFSET; |
257 | dmabounce_register_dev(dev, 2048, 4096); | 263 | dmabounce_register_dev(dev, 2048, 4096, it8152_needs_bounce); |
258 | } | 264 | } |
259 | return 0; | 265 | return 0; |
260 | } | 266 | } |
@@ -267,14 +273,6 @@ static int it8152_pci_platform_notify_remove(struct device *dev) | |||
267 | return 0; | 273 | return 0; |
268 | } | 274 | } |
269 | 275 | ||
270 | int dma_needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size) | ||
271 | { | ||
272 | dev_dbg(dev, "%s: dma_addr %08x, size %08x\n", | ||
273 | __func__, dma_addr, size); | ||
274 | return (dev->bus == &pci_bus_type) && | ||
275 | ((dma_addr + size - PHYS_OFFSET) >= SZ_64M); | ||
276 | } | ||
277 | |||
278 | int dma_set_coherent_mask(struct device *dev, u64 mask) | 276 | int dma_set_coherent_mask(struct device *dev, u64 mask) |
279 | { | 277 | { |
280 | if (mask >= PHYS_OFFSET + SZ_64M - 1) | 278 | if (mask >= PHYS_OFFSET + SZ_64M - 1) |
diff --git a/arch/arm/common/sa1111.c b/arch/arm/common/sa1111.c index 9c49a46a2b7..0569de6acfb 100644 --- a/arch/arm/common/sa1111.c +++ b/arch/arm/common/sa1111.c | |||
@@ -579,7 +579,36 @@ sa1111_configure_smc(struct sa1111 *sachip, int sdram, unsigned int drac, | |||
579 | 579 | ||
580 | sachip->dev->coherent_dma_mask &= sa1111_dma_mask[drac >> 2]; | 580 | sachip->dev->coherent_dma_mask &= sa1111_dma_mask[drac >> 2]; |
581 | } | 581 | } |
582 | #endif | ||
582 | 583 | ||
584 | #ifdef CONFIG_DMABOUNCE | ||
585 | /* | ||
586 | * According to the "Intel StrongARM SA-1111 Microprocessor Companion | ||
587 | * Chip Specification Update" (June 2000), erratum #7, there is a | ||
588 | * significant bug in the SA1111 SDRAM shared memory controller. If | ||
589 | * an access to a region of memory above 1MB relative to the bank base, | ||
590 | * it is important that address bit 10 _NOT_ be asserted. Depending | ||
591 | * on the configuration of the RAM, bit 10 may correspond to one | ||
592 | * of several different (processor-relative) address bits. | ||
593 | * | ||
594 | * This routine only identifies whether or not a given DMA address | ||
595 | * is susceptible to the bug. | ||
596 | * | ||
597 | * This should only get called for sa1111_device types due to the | ||
598 | * way we configure our device dma_masks. | ||
599 | */ | ||
600 | static int sa1111_needs_bounce(struct device *dev, dma_addr_t addr, size_t size) | ||
601 | { | ||
602 | /* | ||
603 | * Section 4.6 of the "Intel StrongARM SA-1111 Development Module | ||
604 | * User's Guide" mentions that jumpers R51 and R52 control the | ||
605 | * target of SA-1111 DMA (either SDRAM bank 0 on Assabet, or | ||
606 | * SDRAM bank 1 on Neponset). The default configuration selects | ||
607 | * Assabet, so any address in bank 1 is necessarily invalid. | ||
608 | */ | ||
609 | return (machine_is_assabet() || machine_is_pfs168()) && | ||
610 | (addr >= 0xc8000000 || (addr + size) >= 0xc8000000); | ||
611 | } | ||
583 | #endif | 612 | #endif |
584 | 613 | ||
585 | static void sa1111_dev_release(struct device *_dev) | 614 | static void sa1111_dev_release(struct device *_dev) |
@@ -644,7 +673,8 @@ sa1111_init_one_child(struct sa1111 *sachip, struct resource *parent, | |||
644 | dev->dev.dma_mask = &dev->dma_mask; | 673 | dev->dev.dma_mask = &dev->dma_mask; |
645 | 674 | ||
646 | if (dev->dma_mask != 0xffffffffUL) { | 675 | if (dev->dma_mask != 0xffffffffUL) { |
647 | ret = dmabounce_register_dev(&dev->dev, 1024, 4096); | 676 | ret = dmabounce_register_dev(&dev->dev, 1024, 4096, |
677 | sa1111_needs_bounce); | ||
648 | if (ret) { | 678 | if (ret) { |
649 | dev_err(&dev->dev, "SA1111: Failed to register" | 679 | dev_err(&dev->dev, "SA1111: Failed to register" |
650 | " with dmabounce\n"); | 680 | " with dmabounce\n"); |
@@ -818,34 +848,6 @@ static void __sa1111_remove(struct sa1111 *sachip) | |||
818 | kfree(sachip); | 848 | kfree(sachip); |
819 | } | 849 | } |
820 | 850 | ||
821 | /* | ||
822 | * According to the "Intel StrongARM SA-1111 Microprocessor Companion | ||
823 | * Chip Specification Update" (June 2000), erratum #7, there is a | ||
824 | * significant bug in the SA1111 SDRAM shared memory controller. If | ||
825 | * an access to a region of memory above 1MB relative to the bank base, | ||
826 | * it is important that address bit 10 _NOT_ be asserted. Depending | ||
827 | * on the configuration of the RAM, bit 10 may correspond to one | ||
828 | * of several different (processor-relative) address bits. | ||
829 | * | ||
830 | * This routine only identifies whether or not a given DMA address | ||
831 | * is susceptible to the bug. | ||
832 | * | ||
833 | * This should only get called for sa1111_device types due to the | ||
834 | * way we configure our device dma_masks. | ||
835 | */ | ||
836 | int dma_needs_bounce(struct device *dev, dma_addr_t addr, size_t size) | ||
837 | { | ||
838 | /* | ||
839 | * Section 4.6 of the "Intel StrongARM SA-1111 Development Module | ||
840 | * User's Guide" mentions that jumpers R51 and R52 control the | ||
841 | * target of SA-1111 DMA (either SDRAM bank 0 on Assabet, or | ||
842 | * SDRAM bank 1 on Neponset). The default configuration selects | ||
843 | * Assabet, so any address in bank 1 is necessarily invalid. | ||
844 | */ | ||
845 | return ((machine_is_assabet() || machine_is_pfs168()) && | ||
846 | (addr >= 0xc8000000 || (addr + size) >= 0xc8000000)); | ||
847 | } | ||
848 | |||
849 | struct sa1111_save_data { | 851 | struct sa1111_save_data { |
850 | unsigned int skcr; | 852 | unsigned int skcr; |
851 | unsigned int skpcr; | 853 | unsigned int skpcr; |
diff --git a/arch/arm/common/scoop.c b/arch/arm/common/scoop.c index c11af1e4bad..a07b0e763a8 100644 --- a/arch/arm/common/scoop.c +++ b/arch/arm/common/scoop.c | |||
@@ -193,7 +193,7 @@ static int __devinit scoop_probe(struct platform_device *pdev) | |||
193 | spin_lock_init(&devptr->scoop_lock); | 193 | spin_lock_init(&devptr->scoop_lock); |
194 | 194 | ||
195 | inf = pdev->dev.platform_data; | 195 | inf = pdev->dev.platform_data; |
196 | devptr->base = ioremap(mem->start, mem->end - mem->start + 1); | 196 | devptr->base = ioremap(mem->start, resource_size(mem)); |
197 | 197 | ||
198 | if (!devptr->base) { | 198 | if (!devptr->base) { |
199 | ret = -ENOMEM; | 199 | ret = -ENOMEM; |
diff --git a/arch/arm/common/vic.c b/arch/arm/common/vic.c index 7aa4262ada7..197f81c7735 100644 --- a/arch/arm/common/vic.c +++ b/arch/arm/common/vic.c | |||
@@ -259,7 +259,6 @@ static void __init vic_disable(void __iomem *base) | |||
259 | writel(0, base + VIC_INT_SELECT); | 259 | writel(0, base + VIC_INT_SELECT); |
260 | writel(0, base + VIC_INT_ENABLE); | 260 | writel(0, base + VIC_INT_ENABLE); |
261 | writel(~0, base + VIC_INT_ENABLE_CLEAR); | 261 | writel(~0, base + VIC_INT_ENABLE_CLEAR); |
262 | writel(0, base + VIC_IRQ_STATUS); | ||
263 | writel(0, base + VIC_ITCR); | 262 | writel(0, base + VIC_ITCR); |
264 | writel(~0, base + VIC_INT_SOFT_CLEAR); | 263 | writel(~0, base + VIC_INT_SOFT_CLEAR); |
265 | } | 264 | } |