aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/sh/boards/mach-migor/setup.c16
-rw-r--r--arch/sh/boot/compressed/cache.c2
-rw-r--r--arch/sh/include/asm/cacheflush.h4
-rw-r--r--arch/sh/include/asm/dma-register.h51
-rw-r--r--arch/sh/include/asm/dma-sh.h88
-rw-r--r--arch/sh/include/asm/dmaengine.h93
-rw-r--r--arch/sh/include/asm/io.h23
-rw-r--r--arch/sh/include/asm/mmu.h31
-rw-r--r--arch/sh/include/asm/siu.h2
-rw-r--r--arch/sh/include/asm/topology.h2
-rw-r--r--arch/sh/include/cpu-sh3/cpu/dma-register.h41
-rw-r--r--arch/sh/include/cpu-sh3/cpu/dma.h27
-rw-r--r--arch/sh/include/cpu-sh4/cpu/dma-register.h112
-rw-r--r--arch/sh/include/cpu-sh4/cpu/dma-sh4a.h62
-rw-r--r--arch/sh/include/cpu-sh4/cpu/dma.h36
-rw-r--r--arch/sh/include/mach-migor/mach/migor.h1
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7722.c190
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7724.c186
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7780.c134
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7785.c134
-rw-r--r--arch/sh/kernel/hw_breakpoint.c30
-rw-r--r--arch/sh/kernel/setup.c3
-rw-r--r--arch/sh/kernel/time.c6
-rw-r--r--arch/sh/lib/libgcc.h3
-rw-r--r--arch/sh/mm/ioremap.c70
-rw-r--r--arch/sh/mm/ioremap_fixed.c11
-rw-r--r--arch/sh/mm/numa.c3
-rw-r--r--arch/sh/mm/pmb.c412
-rw-r--r--drivers/dma/shdma.c500
-rw-r--r--drivers/dma/shdma.h26
-rw-r--r--drivers/serial/Kconfig4
-rw-r--r--drivers/serial/sh-sci.c616
-rw-r--r--include/linux/serial_sci.h6
-rw-r--r--sound/soc/sh/siu.h2
-rw-r--r--sound/soc/sh/siu_pcm.c2
35 files changed, 2201 insertions, 728 deletions
diff --git a/arch/sh/boards/mach-migor/setup.c b/arch/sh/boards/mach-migor/setup.c
index be300aaca6fe..7da0fc94a01e 100644
--- a/arch/sh/boards/mach-migor/setup.c
+++ b/arch/sh/boards/mach-migor/setup.c
@@ -419,6 +419,9 @@ static struct i2c_board_info migor_i2c_devices[] = {
419 I2C_BOARD_INFO("migor_ts", 0x51), 419 I2C_BOARD_INFO("migor_ts", 0x51),
420 .irq = 38, /* IRQ6 */ 420 .irq = 38, /* IRQ6 */
421 }, 421 },
422 {
423 I2C_BOARD_INFO("wm8978", 0x1a),
424 },
422}; 425};
423 426
424static struct i2c_board_info migor_i2c_camera[] = { 427static struct i2c_board_info migor_i2c_camera[] = {
@@ -619,6 +622,19 @@ static int __init migor_devices_setup(void)
619 622
620 platform_resource_setup_memory(&migor_ceu_device, "ceu", 4 << 20); 623 platform_resource_setup_memory(&migor_ceu_device, "ceu", 4 << 20);
621 624
625 /* SIU: Port B */
626 gpio_request(GPIO_FN_SIUBOLR, NULL);
627 gpio_request(GPIO_FN_SIUBOBT, NULL);
628 gpio_request(GPIO_FN_SIUBISLD, NULL);
629 gpio_request(GPIO_FN_SIUBOSLD, NULL);
630 gpio_request(GPIO_FN_SIUMCKB, NULL);
631
632 /*
633 * The original driver sets SIUB OLR/OBT, ILR/IBT, and SIUA OLR/OBT to
634 * output. Need only SIUB, set to output for master mode (table 34.2)
635 */
636 __raw_writew(__raw_readw(PORT_MSELCRA) | 1, PORT_MSELCRA);
637
622 i2c_register_board_info(0, migor_i2c_devices, 638 i2c_register_board_info(0, migor_i2c_devices,
623 ARRAY_SIZE(migor_i2c_devices)); 639 ARRAY_SIZE(migor_i2c_devices));
624 640
diff --git a/arch/sh/boot/compressed/cache.c b/arch/sh/boot/compressed/cache.c
index e27fc74f228c..d0b77b68a4d0 100644
--- a/arch/sh/boot/compressed/cache.c
+++ b/arch/sh/boot/compressed/cache.c
@@ -5,7 +5,7 @@ int cache_control(unsigned int command)
5 5
6 for (i = 0; i < (32 * 1024); i += 32) { 6 for (i = 0; i < (32 * 1024); i += 32) {
7 (void)*p; 7 (void)*p;
8 p += (32 / sizeof (int)); 8 p += (32 / sizeof(int));
9 } 9 }
10 10
11 return 0; 11 return 0;
diff --git a/arch/sh/include/asm/cacheflush.h b/arch/sh/include/asm/cacheflush.h
index da3ebec921a7..1f4e562c5e8c 100644
--- a/arch/sh/include/asm/cacheflush.h
+++ b/arch/sh/include/asm/cacheflush.h
@@ -86,8 +86,8 @@ extern void copy_from_user_page(struct vm_area_struct *vma,
86 struct page *page, unsigned long vaddr, void *dst, const void *src, 86 struct page *page, unsigned long vaddr, void *dst, const void *src,
87 unsigned long len); 87 unsigned long len);
88 88
89#define flush_cache_vmap(start, end) flush_cache_all() 89#define flush_cache_vmap(start, end) local_flush_cache_all(NULL)
90#define flush_cache_vunmap(start, end) flush_cache_all() 90#define flush_cache_vunmap(start, end) local_flush_cache_all(NULL)
91 91
92#define flush_dcache_mmap_lock(mapping) do { } while (0) 92#define flush_dcache_mmap_lock(mapping) do { } while (0)
93#define flush_dcache_mmap_unlock(mapping) do { } while (0) 93#define flush_dcache_mmap_unlock(mapping) do { } while (0)
diff --git a/arch/sh/include/asm/dma-register.h b/arch/sh/include/asm/dma-register.h
new file mode 100644
index 000000000000..51cd78feacff
--- /dev/null
+++ b/arch/sh/include/asm/dma-register.h
@@ -0,0 +1,51 @@
1/*
2 * Common header for the legacy SH DMA driver and the new dmaengine driver
3 *
4 * extracted from arch/sh/include/asm/dma-sh.h:
5 *
6 * Copyright (C) 2000 Takashi YOSHII
7 * Copyright (C) 2003 Paul Mundt
8 *
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
11 * for more details.
12 */
13#ifndef DMA_REGISTER_H
14#define DMA_REGISTER_H
15
16/* DMA register */
17#define SAR 0x00
18#define DAR 0x04
19#define TCR 0x08
20#define CHCR 0x0C
21#define DMAOR 0x40
22
23/* DMAOR definitions */
24#define DMAOR_AE 0x00000004
25#define DMAOR_NMIF 0x00000002
26#define DMAOR_DME 0x00000001
27
28/* Definitions for the SuperH DMAC */
29#define REQ_L 0x00000000
30#define REQ_E 0x00080000
31#define RACK_H 0x00000000
32#define RACK_L 0x00040000
33#define ACK_R 0x00000000
34#define ACK_W 0x00020000
35#define ACK_H 0x00000000
36#define ACK_L 0x00010000
37#define DM_INC 0x00004000
38#define DM_DEC 0x00008000
39#define DM_FIX 0x0000c000
40#define SM_INC 0x00001000
41#define SM_DEC 0x00002000
42#define SM_FIX 0x00003000
43#define RS_IN 0x00000200
44#define RS_OUT 0x00000300
45#define TS_BLK 0x00000040
46#define TM_BUR 0x00000020
47#define CHCR_DE 0x00000001
48#define CHCR_TE 0x00000002
49#define CHCR_IE 0x00000004
50
51#endif
diff --git a/arch/sh/include/asm/dma-sh.h b/arch/sh/include/asm/dma-sh.h
index e934a2e66651..f3acb8e34c6b 100644
--- a/arch/sh/include/asm/dma-sh.h
+++ b/arch/sh/include/asm/dma-sh.h
@@ -11,7 +11,8 @@
11#ifndef __DMA_SH_H 11#ifndef __DMA_SH_H
12#define __DMA_SH_H 12#define __DMA_SH_H
13 13
14#include <asm/dma.h> 14#include <asm/dma-register.h>
15#include <cpu/dma-register.h>
15#include <cpu/dma.h> 16#include <cpu/dma.h>
16 17
17/* DMAOR contorl: The DMAOR access size is different by CPU.*/ 18/* DMAOR contorl: The DMAOR access size is different by CPU.*/
@@ -53,34 +54,6 @@ static int dmte_irq_map[] __maybe_unused = {
53#endif 54#endif
54}; 55};
55 56
56/* Definitions for the SuperH DMAC */
57#define REQ_L 0x00000000
58#define REQ_E 0x00080000
59#define RACK_H 0x00000000
60#define RACK_L 0x00040000
61#define ACK_R 0x00000000
62#define ACK_W 0x00020000
63#define ACK_H 0x00000000
64#define ACK_L 0x00010000
65#define DM_INC 0x00004000
66#define DM_DEC 0x00008000
67#define DM_FIX 0x0000c000
68#define SM_INC 0x00001000
69#define SM_DEC 0x00002000
70#define SM_FIX 0x00003000
71#define RS_IN 0x00000200
72#define RS_OUT 0x00000300
73#define TS_BLK 0x00000040
74#define TM_BUR 0x00000020
75#define CHCR_DE 0x00000001
76#define CHCR_TE 0x00000002
77#define CHCR_IE 0x00000004
78
79/* DMAOR definitions */
80#define DMAOR_AE 0x00000004
81#define DMAOR_NMIF 0x00000002
82#define DMAOR_DME 0x00000001
83
84/* 57/*
85 * Define the default configuration for dual address memory-memory transfer. 58 * Define the default configuration for dual address memory-memory transfer.
86 * The 0x400 value represents auto-request, external->external. 59 * The 0x400 value represents auto-request, external->external.
@@ -111,61 +84,4 @@ static u32 dma_base_addr[] __maybe_unused = {
111#endif 84#endif
112}; 85};
113 86
114/* DMA register */
115#define SAR 0x00
116#define DAR 0x04
117#define TCR 0x08
118#define CHCR 0x0C
119#define DMAOR 0x40
120
121/*
122 * for dma engine
123 *
124 * SuperH DMA mode
125 */
126#define SHDMA_MIX_IRQ (1 << 1)
127#define SHDMA_DMAOR1 (1 << 2)
128#define SHDMA_DMAE1 (1 << 3)
129
130enum sh_dmae_slave_chan_id {
131 SHDMA_SLAVE_SCIF0_TX,
132 SHDMA_SLAVE_SCIF0_RX,
133 SHDMA_SLAVE_SCIF1_TX,
134 SHDMA_SLAVE_SCIF1_RX,
135 SHDMA_SLAVE_SCIF2_TX,
136 SHDMA_SLAVE_SCIF2_RX,
137 SHDMA_SLAVE_SCIF3_TX,
138 SHDMA_SLAVE_SCIF3_RX,
139 SHDMA_SLAVE_SCIF4_TX,
140 SHDMA_SLAVE_SCIF4_RX,
141 SHDMA_SLAVE_SCIF5_TX,
142 SHDMA_SLAVE_SCIF5_RX,
143 SHDMA_SLAVE_SIUA_TX,
144 SHDMA_SLAVE_SIUA_RX,
145 SHDMA_SLAVE_SIUB_TX,
146 SHDMA_SLAVE_SIUB_RX,
147 SHDMA_SLAVE_NUMBER, /* Must stay last */
148};
149
150struct sh_dmae_slave_config {
151 enum sh_dmae_slave_chan_id slave_id;
152 dma_addr_t addr;
153 u32 chcr;
154 char mid_rid;
155};
156
157struct sh_dmae_pdata {
158 unsigned int mode;
159 struct sh_dmae_slave_config *config;
160 int config_num;
161};
162
163struct device;
164
165struct sh_dmae_slave {
166 enum sh_dmae_slave_chan_id slave_id; /* Set by the platform */
167 struct device *dma_dev; /* Set by the platform */
168 struct sh_dmae_slave_config *config; /* Set by the driver */
169};
170
171#endif /* __DMA_SH_H */ 87#endif /* __DMA_SH_H */
diff --git a/arch/sh/include/asm/dmaengine.h b/arch/sh/include/asm/dmaengine.h
new file mode 100644
index 000000000000..bf2f30cf0a27
--- /dev/null
+++ b/arch/sh/include/asm/dmaengine.h
@@ -0,0 +1,93 @@
1/*
2 * Header for the new SH dmaengine driver
3 *
4 * Copyright (C) 2010 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#ifndef ASM_DMAENGINE_H
11#define ASM_DMAENGINE_H
12
13#include <linux/dmaengine.h>
14#include <linux/list.h>
15
16#include <asm/dma-register.h>
17
18#define SH_DMAC_MAX_CHANNELS 6
19
20enum sh_dmae_slave_chan_id {
21 SHDMA_SLAVE_SCIF0_TX,
22 SHDMA_SLAVE_SCIF0_RX,
23 SHDMA_SLAVE_SCIF1_TX,
24 SHDMA_SLAVE_SCIF1_RX,
25 SHDMA_SLAVE_SCIF2_TX,
26 SHDMA_SLAVE_SCIF2_RX,
27 SHDMA_SLAVE_SCIF3_TX,
28 SHDMA_SLAVE_SCIF3_RX,
29 SHDMA_SLAVE_SCIF4_TX,
30 SHDMA_SLAVE_SCIF4_RX,
31 SHDMA_SLAVE_SCIF5_TX,
32 SHDMA_SLAVE_SCIF5_RX,
33 SHDMA_SLAVE_SIUA_TX,
34 SHDMA_SLAVE_SIUA_RX,
35 SHDMA_SLAVE_SIUB_TX,
36 SHDMA_SLAVE_SIUB_RX,
37 SHDMA_SLAVE_NUMBER, /* Must stay last */
38};
39
40struct sh_dmae_slave_config {
41 enum sh_dmae_slave_chan_id slave_id;
42 dma_addr_t addr;
43 u32 chcr;
44 char mid_rid;
45};
46
47struct sh_dmae_channel {
48 unsigned int offset;
49 unsigned int dmars;
50 unsigned int dmars_bit;
51};
52
53struct sh_dmae_pdata {
54 struct sh_dmae_slave_config *slave;
55 int slave_num;
56 struct sh_dmae_channel *channel;
57 int channel_num;
58 unsigned int ts_low_shift;
59 unsigned int ts_low_mask;
60 unsigned int ts_high_shift;
61 unsigned int ts_high_mask;
62 unsigned int *ts_shift;
63 int ts_shift_num;
64 u16 dmaor_init;
65};
66
67struct device;
68
69/* Used by slave DMA clients to request DMA to/from a specific peripheral */
70struct sh_dmae_slave {
71 enum sh_dmae_slave_chan_id slave_id; /* Set by the platform */
72 struct device *dma_dev; /* Set by the platform */
73 struct sh_dmae_slave_config *config; /* Set by the driver */
74};
75
76struct sh_dmae_regs {
77 u32 sar; /* SAR / source address */
78 u32 dar; /* DAR / destination address */
79 u32 tcr; /* TCR / transfer count */
80};
81
82struct sh_desc {
83 struct sh_dmae_regs hw;
84 struct list_head node;
85 struct dma_async_tx_descriptor async_tx;
86 enum dma_data_direction direction;
87 dma_cookie_t cookie;
88 size_t partial;
89 int chunks;
90 int mark;
91};
92
93#endif
diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h
index 7dab7b23a5ec..f689554e17c1 100644
--- a/arch/sh/include/asm/io.h
+++ b/arch/sh/include/asm/io.h
@@ -291,21 +291,21 @@ unsigned long long poke_real_address_q(unsigned long long addr,
291 * doesn't exist, so everything must go through page tables. 291 * doesn't exist, so everything must go through page tables.
292 */ 292 */
293#ifdef CONFIG_MMU 293#ifdef CONFIG_MMU
294void __iomem *__ioremap_caller(unsigned long offset, unsigned long size, 294void __iomem *__ioremap_caller(phys_addr_t offset, unsigned long size,
295 pgprot_t prot, void *caller); 295 pgprot_t prot, void *caller);
296void __iounmap(void __iomem *addr); 296void __iounmap(void __iomem *addr);
297 297
298static inline void __iomem * 298static inline void __iomem *
299__ioremap(unsigned long offset, unsigned long size, pgprot_t prot) 299__ioremap(phys_addr_t offset, unsigned long size, pgprot_t prot)
300{ 300{
301 return __ioremap_caller(offset, size, prot, __builtin_return_address(0)); 301 return __ioremap_caller(offset, size, prot, __builtin_return_address(0));
302} 302}
303 303
304static inline void __iomem * 304static inline void __iomem *
305__ioremap_29bit(unsigned long offset, unsigned long size, pgprot_t prot) 305__ioremap_29bit(phys_addr_t offset, unsigned long size, pgprot_t prot)
306{ 306{
307#ifdef CONFIG_29BIT 307#ifdef CONFIG_29BIT
308 unsigned long last_addr = offset + size - 1; 308 phys_addr_t last_addr = offset + size - 1;
309 309
310 /* 310 /*
311 * For P1 and P2 space this is trivial, as everything is already 311 * For P1 and P2 space this is trivial, as everything is already
@@ -329,7 +329,7 @@ __ioremap_29bit(unsigned long offset, unsigned long size, pgprot_t prot)
329} 329}
330 330
331static inline void __iomem * 331static inline void __iomem *
332__ioremap_mode(unsigned long offset, unsigned long size, pgprot_t prot) 332__ioremap_mode(phys_addr_t offset, unsigned long size, pgprot_t prot)
333{ 333{
334 void __iomem *ret; 334 void __iomem *ret;
335 335
@@ -349,35 +349,32 @@ __ioremap_mode(unsigned long offset, unsigned long size, pgprot_t prot)
349#define __iounmap(addr) do { } while (0) 349#define __iounmap(addr) do { } while (0)
350#endif /* CONFIG_MMU */ 350#endif /* CONFIG_MMU */
351 351
352static inline void __iomem * 352static inline void __iomem *ioremap(phys_addr_t offset, unsigned long size)
353ioremap(unsigned long offset, unsigned long size)
354{ 353{
355 return __ioremap_mode(offset, size, PAGE_KERNEL_NOCACHE); 354 return __ioremap_mode(offset, size, PAGE_KERNEL_NOCACHE);
356} 355}
357 356
358static inline void __iomem * 357static inline void __iomem *
359ioremap_cache(unsigned long offset, unsigned long size) 358ioremap_cache(phys_addr_t offset, unsigned long size)
360{ 359{
361 return __ioremap_mode(offset, size, PAGE_KERNEL); 360 return __ioremap_mode(offset, size, PAGE_KERNEL);
362} 361}
363 362
364#ifdef CONFIG_HAVE_IOREMAP_PROT 363#ifdef CONFIG_HAVE_IOREMAP_PROT
365static inline void __iomem * 364static inline void __iomem *
366ioremap_prot(resource_size_t offset, unsigned long size, unsigned long flags) 365ioremap_prot(phys_addr_t offset, unsigned long size, unsigned long flags)
367{ 366{
368 return __ioremap_mode(offset, size, __pgprot(flags)); 367 return __ioremap_mode(offset, size, __pgprot(flags));
369} 368}
370#endif 369#endif
371 370
372#ifdef CONFIG_IOREMAP_FIXED 371#ifdef CONFIG_IOREMAP_FIXED
373extern void __iomem *ioremap_fixed(resource_size_t, unsigned long, 372extern void __iomem *ioremap_fixed(phys_addr_t, unsigned long, pgprot_t);
374 unsigned long, pgprot_t);
375extern int iounmap_fixed(void __iomem *); 373extern int iounmap_fixed(void __iomem *);
376extern void ioremap_fixed_init(void); 374extern void ioremap_fixed_init(void);
377#else 375#else
378static inline void __iomem * 376static inline void __iomem *
379ioremap_fixed(resource_size_t phys_addr, unsigned long offset, 377ioremap_fixed(phys_addr_t phys_addr, unsigned long size, pgprot_t prot)
380 unsigned long size, pgprot_t prot)
381{ 378{
382 BUG(); 379 BUG();
383 return NULL; 380 return NULL;
diff --git a/arch/sh/include/asm/mmu.h b/arch/sh/include/asm/mmu.h
index 15a05b615ba7..19fe84550b49 100644
--- a/arch/sh/include/asm/mmu.h
+++ b/arch/sh/include/asm/mmu.h
@@ -55,19 +55,29 @@ typedef struct {
55 55
56#ifdef CONFIG_PMB 56#ifdef CONFIG_PMB
57/* arch/sh/mm/pmb.c */ 57/* arch/sh/mm/pmb.c */
58long pmb_remap(unsigned long virt, unsigned long phys,
59 unsigned long size, pgprot_t prot);
60void pmb_unmap(unsigned long addr);
61void pmb_init(void);
62bool __in_29bit_mode(void); 58bool __in_29bit_mode(void);
59
60void pmb_init(void);
61int pmb_bolt_mapping(unsigned long virt, phys_addr_t phys,
62 unsigned long size, pgprot_t prot);
63void __iomem *pmb_remap_caller(phys_addr_t phys, unsigned long size,
64 pgprot_t prot, void *caller);
65int pmb_unmap(void __iomem *addr);
66
63#else 67#else
64static inline long pmb_remap(unsigned long virt, unsigned long phys, 68
65 unsigned long size, pgprot_t prot) 69static inline void __iomem *
70pmb_remap_caller(phys_addr_t phys, unsigned long size,
71 pgprot_t prot, void *caller)
72{
73 return NULL;
74}
75
76static inline int pmb_unmap(void __iomem *addr)
66{ 77{
67 return -EINVAL; 78 return -EINVAL;
68} 79}
69 80
70#define pmb_unmap(addr) do { } while (0)
71#define pmb_init(addr) do { } while (0) 81#define pmb_init(addr) do { } while (0)
72 82
73#ifdef CONFIG_29BIT 83#ifdef CONFIG_29BIT
@@ -77,6 +87,13 @@ static inline long pmb_remap(unsigned long virt, unsigned long phys,
77#endif 87#endif
78 88
79#endif /* CONFIG_PMB */ 89#endif /* CONFIG_PMB */
90
91static inline void __iomem *
92pmb_remap(phys_addr_t phys, unsigned long size, pgprot_t prot)
93{
94 return pmb_remap_caller(phys, size, prot, __builtin_return_address(0));
95}
96
80#endif /* __ASSEMBLY__ */ 97#endif /* __ASSEMBLY__ */
81 98
82#endif /* __MMU_H */ 99#endif /* __MMU_H */
diff --git a/arch/sh/include/asm/siu.h b/arch/sh/include/asm/siu.h
index 57565a3b551f..f1b1e6944a5f 100644
--- a/arch/sh/include/asm/siu.h
+++ b/arch/sh/include/asm/siu.h
@@ -11,7 +11,7 @@
11#ifndef ASM_SIU_H 11#ifndef ASM_SIU_H
12#define ASM_SIU_H 12#define ASM_SIU_H
13 13
14#include <asm/dma-sh.h> 14#include <asm/dmaengine.h>
15 15
16struct device; 16struct device;
17 17
diff --git a/arch/sh/include/asm/topology.h b/arch/sh/include/asm/topology.h
index 37cdadd975ac..88e734069fa6 100644
--- a/arch/sh/include/asm/topology.h
+++ b/arch/sh/include/asm/topology.h
@@ -35,7 +35,7 @@
35 35
36#define pcibus_to_node(bus) ((void)(bus), -1) 36#define pcibus_to_node(bus) ((void)(bus), -1)
37#define cpumask_of_pcibus(bus) (pcibus_to_node(bus) == -1 ? \ 37#define cpumask_of_pcibus(bus) (pcibus_to_node(bus) == -1 ? \
38 CPU_MASK_ALL_PTR : \ 38 cpu_all_mask : \
39 cpumask_of_node(pcibus_to_node(bus))) 39 cpumask_of_node(pcibus_to_node(bus)))
40 40
41#endif 41#endif
diff --git a/arch/sh/include/cpu-sh3/cpu/dma-register.h b/arch/sh/include/cpu-sh3/cpu/dma-register.h
new file mode 100644
index 000000000000..2349e488c9a6
--- /dev/null
+++ b/arch/sh/include/cpu-sh3/cpu/dma-register.h
@@ -0,0 +1,41 @@
1/*
2 * SH3 CPU-specific DMA definitions, used by both DMA drivers
3 *
4 * Copyright (C) 2010 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#ifndef CPU_DMA_REGISTER_H
11#define CPU_DMA_REGISTER_H
12
13#define CHCR_TS_LOW_MASK 0x18
14#define CHCR_TS_LOW_SHIFT 3
15#define CHCR_TS_HIGH_MASK 0
16#define CHCR_TS_HIGH_SHIFT 0
17
18#define DMAOR_INIT DMAOR_DME
19
20/*
21 * The SuperH DMAC supports a number of transmit sizes, we list them here,
22 * with their respective values as they appear in the CHCR registers.
23 */
24enum {
25 XMIT_SZ_8BIT,
26 XMIT_SZ_16BIT,
27 XMIT_SZ_32BIT,
28 XMIT_SZ_128BIT,
29};
30
31/* log2(size / 8) - used to calculate number of transfers */
32#define TS_SHIFT { \
33 [XMIT_SZ_8BIT] = 0, \
34 [XMIT_SZ_16BIT] = 1, \
35 [XMIT_SZ_32BIT] = 2, \
36 [XMIT_SZ_128BIT] = 4, \
37}
38
39#define TS_INDEX2VAL(i) (((i) & 3) << CHCR_TS_LOW_SHIFT)
40
41#endif
diff --git a/arch/sh/include/cpu-sh3/cpu/dma.h b/arch/sh/include/cpu-sh3/cpu/dma.h
index 207811a7a650..24e28b91c9d5 100644
--- a/arch/sh/include/cpu-sh3/cpu/dma.h
+++ b/arch/sh/include/cpu-sh3/cpu/dma.h
@@ -20,31 +20,4 @@
20#define TS_32 0x00000010 20#define TS_32 0x00000010
21#define TS_128 0x00000018 21#define TS_128 0x00000018
22 22
23#define CHCR_TS_LOW_MASK 0x18
24#define CHCR_TS_LOW_SHIFT 3
25#define CHCR_TS_HIGH_MASK 0
26#define CHCR_TS_HIGH_SHIFT 0
27
28#define DMAOR_INIT DMAOR_DME
29
30/*
31 * The SuperH DMAC supports a number of transmit sizes, we list them here,
32 * with their respective values as they appear in the CHCR registers.
33 */
34enum {
35 XMIT_SZ_8BIT,
36 XMIT_SZ_16BIT,
37 XMIT_SZ_32BIT,
38 XMIT_SZ_128BIT,
39};
40
41#define TS_SHIFT { \
42 [XMIT_SZ_8BIT] = 0, \
43 [XMIT_SZ_16BIT] = 1, \
44 [XMIT_SZ_32BIT] = 2, \
45 [XMIT_SZ_128BIT] = 4, \
46}
47
48#define TS_INDEX2VAL(i) (((i) & 3) << CHCR_TS_LOW_SHIFT)
49
50#endif /* __ASM_CPU_SH3_DMA_H */ 23#endif /* __ASM_CPU_SH3_DMA_H */
diff --git a/arch/sh/include/cpu-sh4/cpu/dma-register.h b/arch/sh/include/cpu-sh4/cpu/dma-register.h
new file mode 100644
index 000000000000..55f9fec082d4
--- /dev/null
+++ b/arch/sh/include/cpu-sh4/cpu/dma-register.h
@@ -0,0 +1,112 @@
1/*
2 * SH4 CPU-specific DMA definitions, used by both DMA drivers
3 *
4 * Copyright (C) 2010 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#ifndef CPU_DMA_REGISTER_H
11#define CPU_DMA_REGISTER_H
12
13/* SH7751/7760/7780 DMA IRQ sources */
14
15#ifdef CONFIG_CPU_SH4A
16
17#define DMAOR_INIT DMAOR_DME
18
19#if defined(CONFIG_CPU_SUBTYPE_SH7343) || \
20 defined(CONFIG_CPU_SUBTYPE_SH7730)
21#define CHCR_TS_LOW_MASK 0x00000018
22#define CHCR_TS_LOW_SHIFT 3
23#define CHCR_TS_HIGH_MASK 0
24#define CHCR_TS_HIGH_SHIFT 0
25#elif defined(CONFIG_CPU_SUBTYPE_SH7722) || \
26 defined(CONFIG_CPU_SUBTYPE_SH7724)
27#define CHCR_TS_LOW_MASK 0x00000018
28#define CHCR_TS_LOW_SHIFT 3
29#define CHCR_TS_HIGH_MASK 0x00300000
30#define CHCR_TS_HIGH_SHIFT (20 - 2) /* 2 bits for shifted low TS */
31#elif defined(CONFIG_CPU_SUBTYPE_SH7763) || \
32 defined(CONFIG_CPU_SUBTYPE_SH7764)
33#define CHCR_TS_LOW_MASK 0x00000018
34#define CHCR_TS_LOW_SHIFT 3
35#define CHCR_TS_HIGH_MASK 0
36#define CHCR_TS_HIGH_SHIFT 0
37#elif defined(CONFIG_CPU_SUBTYPE_SH7723)
38#define CHCR_TS_LOW_MASK 0x00000018
39#define CHCR_TS_LOW_SHIFT 3
40#define CHCR_TS_HIGH_MASK 0
41#define CHCR_TS_HIGH_SHIFT 0
42#elif defined(CONFIG_CPU_SUBTYPE_SH7780)
43#define CHCR_TS_LOW_MASK 0x00000018
44#define CHCR_TS_LOW_SHIFT 3
45#define CHCR_TS_HIGH_MASK 0
46#define CHCR_TS_HIGH_SHIFT 0
47#else /* SH7785 */
48#define CHCR_TS_LOW_MASK 0x00000018
49#define CHCR_TS_LOW_SHIFT 3
50#define CHCR_TS_HIGH_MASK 0
51#define CHCR_TS_HIGH_SHIFT 0
52#endif
53
54/* Transmit sizes and respective CHCR register values */
55enum {
56 XMIT_SZ_8BIT = 0,
57 XMIT_SZ_16BIT = 1,
58 XMIT_SZ_32BIT = 2,
59 XMIT_SZ_64BIT = 7,
60 XMIT_SZ_128BIT = 3,
61 XMIT_SZ_256BIT = 4,
62 XMIT_SZ_128BIT_BLK = 0xb,
63 XMIT_SZ_256BIT_BLK = 0xc,
64};
65
66/* log2(size / 8) - used to calculate number of transfers */
67#define TS_SHIFT { \
68 [XMIT_SZ_8BIT] = 0, \
69 [XMIT_SZ_16BIT] = 1, \
70 [XMIT_SZ_32BIT] = 2, \
71 [XMIT_SZ_64BIT] = 3, \
72 [XMIT_SZ_128BIT] = 4, \
73 [XMIT_SZ_256BIT] = 5, \
74 [XMIT_SZ_128BIT_BLK] = 4, \
75 [XMIT_SZ_256BIT_BLK] = 5, \
76}
77
78#define TS_INDEX2VAL(i) ((((i) & 3) << CHCR_TS_LOW_SHIFT) | \
79 ((((i) >> 2) & 3) << CHCR_TS_HIGH_SHIFT))
80
81#else /* CONFIG_CPU_SH4A */
82
83#define DMAOR_INIT (0x8000 | DMAOR_DME)
84
85#define CHCR_TS_LOW_MASK 0x70
86#define CHCR_TS_LOW_SHIFT 4
87#define CHCR_TS_HIGH_MASK 0
88#define CHCR_TS_HIGH_SHIFT 0
89
90/* Transmit sizes and respective CHCR register values */
91enum {
92 XMIT_SZ_8BIT = 1,
93 XMIT_SZ_16BIT = 2,
94 XMIT_SZ_32BIT = 3,
95 XMIT_SZ_64BIT = 0,
96 XMIT_SZ_256BIT = 4,
97};
98
99/* log2(size / 8) - used to calculate number of transfers */
100#define TS_SHIFT { \
101 [XMIT_SZ_8BIT] = 0, \
102 [XMIT_SZ_16BIT] = 1, \
103 [XMIT_SZ_32BIT] = 2, \
104 [XMIT_SZ_64BIT] = 3, \
105 [XMIT_SZ_256BIT] = 5, \
106}
107
108#define TS_INDEX2VAL(i) (((i) & 7) << CHCR_TS_LOW_SHIFT)
109
110#endif /* CONFIG_CPU_SH4A */
111
112#endif
diff --git a/arch/sh/include/cpu-sh4/cpu/dma-sh4a.h b/arch/sh/include/cpu-sh4/cpu/dma-sh4a.h
index e734ea47d8a0..9647e681fd27 100644
--- a/arch/sh/include/cpu-sh4/cpu/dma-sh4a.h
+++ b/arch/sh/include/cpu-sh4/cpu/dma-sh4a.h
@@ -8,20 +8,12 @@
8#define DMAE0_IRQ 78 /* DMA Error IRQ*/ 8#define DMAE0_IRQ 78 /* DMA Error IRQ*/
9#define SH_DMAC_BASE0 0xFE008020 9#define SH_DMAC_BASE0 0xFE008020
10#define SH_DMARS_BASE0 0xFE009000 10#define SH_DMARS_BASE0 0xFE009000
11#define CHCR_TS_LOW_MASK 0x00000018
12#define CHCR_TS_LOW_SHIFT 3
13#define CHCR_TS_HIGH_MASK 0
14#define CHCR_TS_HIGH_SHIFT 0
15#elif defined(CONFIG_CPU_SUBTYPE_SH7722) 11#elif defined(CONFIG_CPU_SUBTYPE_SH7722)
16#define DMTE0_IRQ 48 12#define DMTE0_IRQ 48
17#define DMTE4_IRQ 76 13#define DMTE4_IRQ 76
18#define DMAE0_IRQ 78 /* DMA Error IRQ*/ 14#define DMAE0_IRQ 78 /* DMA Error IRQ*/
19#define SH_DMAC_BASE0 0xFE008020 15#define SH_DMAC_BASE0 0xFE008020
20#define SH_DMARS_BASE0 0xFE009000 16#define SH_DMARS_BASE0 0xFE009000
21#define CHCR_TS_LOW_MASK 0x00000018
22#define CHCR_TS_LOW_SHIFT 3
23#define CHCR_TS_HIGH_MASK 0x00300000
24#define CHCR_TS_HIGH_SHIFT 20
25#elif defined(CONFIG_CPU_SUBTYPE_SH7763) || \ 17#elif defined(CONFIG_CPU_SUBTYPE_SH7763) || \
26 defined(CONFIG_CPU_SUBTYPE_SH7764) 18 defined(CONFIG_CPU_SUBTYPE_SH7764)
27#define DMTE0_IRQ 34 19#define DMTE0_IRQ 34
@@ -29,10 +21,6 @@
29#define DMAE0_IRQ 38 21#define DMAE0_IRQ 38
30#define SH_DMAC_BASE0 0xFF608020 22#define SH_DMAC_BASE0 0xFF608020
31#define SH_DMARS_BASE0 0xFF609000 23#define SH_DMARS_BASE0 0xFF609000
32#define CHCR_TS_LOW_MASK 0x00000018
33#define CHCR_TS_LOW_SHIFT 3
34#define CHCR_TS_HIGH_MASK 0
35#define CHCR_TS_HIGH_SHIFT 0
36#elif defined(CONFIG_CPU_SUBTYPE_SH7723) 24#elif defined(CONFIG_CPU_SUBTYPE_SH7723)
37#define DMTE0_IRQ 48 /* DMAC0A*/ 25#define DMTE0_IRQ 48 /* DMAC0A*/
38#define DMTE4_IRQ 76 /* DMAC0B */ 26#define DMTE4_IRQ 76 /* DMAC0B */
@@ -46,10 +34,6 @@
46#define SH_DMAC_BASE0 0xFE008020 34#define SH_DMAC_BASE0 0xFE008020
47#define SH_DMAC_BASE1 0xFDC08020 35#define SH_DMAC_BASE1 0xFDC08020
48#define SH_DMARS_BASE0 0xFDC09000 36#define SH_DMARS_BASE0 0xFDC09000
49#define CHCR_TS_LOW_MASK 0x00000018
50#define CHCR_TS_LOW_SHIFT 3
51#define CHCR_TS_HIGH_MASK 0
52#define CHCR_TS_HIGH_SHIFT 0
53#elif defined(CONFIG_CPU_SUBTYPE_SH7724) 37#elif defined(CONFIG_CPU_SUBTYPE_SH7724)
54#define DMTE0_IRQ 48 /* DMAC0A*/ 38#define DMTE0_IRQ 48 /* DMAC0A*/
55#define DMTE4_IRQ 76 /* DMAC0B */ 39#define DMTE4_IRQ 76 /* DMAC0B */
@@ -64,10 +48,6 @@
64#define SH_DMAC_BASE1 0xFDC08020 48#define SH_DMAC_BASE1 0xFDC08020
65#define SH_DMARS_BASE0 0xFE009000 49#define SH_DMARS_BASE0 0xFE009000
66#define SH_DMARS_BASE1 0xFDC09000 50#define SH_DMARS_BASE1 0xFDC09000
67#define CHCR_TS_LOW_MASK 0x00000018
68#define CHCR_TS_LOW_SHIFT 3
69#define CHCR_TS_HIGH_MASK 0x00600000
70#define CHCR_TS_HIGH_SHIFT 21
71#elif defined(CONFIG_CPU_SUBTYPE_SH7780) 51#elif defined(CONFIG_CPU_SUBTYPE_SH7780)
72#define DMTE0_IRQ 34 52#define DMTE0_IRQ 34
73#define DMTE4_IRQ 44 53#define DMTE4_IRQ 44
@@ -80,10 +60,6 @@
80#define SH_DMAC_BASE0 0xFC808020 60#define SH_DMAC_BASE0 0xFC808020
81#define SH_DMAC_BASE1 0xFC818020 61#define SH_DMAC_BASE1 0xFC818020
82#define SH_DMARS_BASE0 0xFC809000 62#define SH_DMARS_BASE0 0xFC809000
83#define CHCR_TS_LOW_MASK 0x00000018
84#define CHCR_TS_LOW_SHIFT 3
85#define CHCR_TS_HIGH_MASK 0
86#define CHCR_TS_HIGH_SHIFT 0
87#else /* SH7785 */ 63#else /* SH7785 */
88#define DMTE0_IRQ 33 64#define DMTE0_IRQ 33
89#define DMTE4_IRQ 37 65#define DMTE4_IRQ 37
@@ -97,10 +73,6 @@
97#define SH_DMAC_BASE0 0xFC808020 73#define SH_DMAC_BASE0 0xFC808020
98#define SH_DMAC_BASE1 0xFCC08020 74#define SH_DMAC_BASE1 0xFCC08020
99#define SH_DMARS_BASE0 0xFC809000 75#define SH_DMARS_BASE0 0xFC809000
100#define CHCR_TS_LOW_MASK 0x00000018
101#define CHCR_TS_LOW_SHIFT 3
102#define CHCR_TS_HIGH_MASK 0
103#define CHCR_TS_HIGH_SHIFT 0
104#endif 76#endif
105 77
106#define REQ_HE 0x000000C0 78#define REQ_HE 0x000000C0
@@ -108,38 +80,4 @@
108#define REQ_LE 0x00000040 80#define REQ_LE 0x00000040
109#define TM_BURST 0x00000020 81#define TM_BURST 0x00000020
110 82
111/*
112 * The SuperH DMAC supports a number of transmit sizes, we list them here,
113 * with their respective values as they appear in the CHCR registers.
114 *
115 * Defaults to a 64-bit transfer size.
116 */
117enum {
118 XMIT_SZ_8BIT = 0,
119 XMIT_SZ_16BIT = 1,
120 XMIT_SZ_32BIT = 2,
121 XMIT_SZ_64BIT = 7,
122 XMIT_SZ_128BIT = 3,
123 XMIT_SZ_256BIT = 4,
124 XMIT_SZ_128BIT_BLK = 0xb,
125 XMIT_SZ_256BIT_BLK = 0xc,
126};
127
128/*
129 * The DMA count is defined as the number of bytes to transfer.
130 */
131#define TS_SHIFT { \
132 [XMIT_SZ_8BIT] = 0, \
133 [XMIT_SZ_16BIT] = 1, \
134 [XMIT_SZ_32BIT] = 2, \
135 [XMIT_SZ_64BIT] = 3, \
136 [XMIT_SZ_128BIT] = 4, \
137 [XMIT_SZ_256BIT] = 5, \
138 [XMIT_SZ_128BIT_BLK] = 4, \
139 [XMIT_SZ_256BIT_BLK] = 5, \
140}
141
142#define TS_INDEX2VAL(i) ((((i) & 3) << CHCR_TS_LOW_SHIFT) | \
143 ((((i) >> 2) & 3) << CHCR_TS_HIGH_SHIFT))
144
145#endif /* __ASM_SH_CPU_SH4_DMA_SH7780_H */ 83#endif /* __ASM_SH_CPU_SH4_DMA_SH7780_H */
diff --git a/arch/sh/include/cpu-sh4/cpu/dma.h b/arch/sh/include/cpu-sh4/cpu/dma.h
index 114a369705bc..ca747e93c2ed 100644
--- a/arch/sh/include/cpu-sh4/cpu/dma.h
+++ b/arch/sh/include/cpu-sh4/cpu/dma.h
@@ -5,9 +5,8 @@
5 5
6#ifdef CONFIG_CPU_SH4A 6#ifdef CONFIG_CPU_SH4A
7 7
8#define DMAOR_INIT (DMAOR_DME)
9
10#include <cpu/dma-sh4a.h> 8#include <cpu/dma-sh4a.h>
9
11#else /* CONFIG_CPU_SH4A */ 10#else /* CONFIG_CPU_SH4A */
12/* 11/*
13 * SH7750/SH7751/SH7760 12 * SH7750/SH7751/SH7760
@@ -17,7 +16,6 @@
17#define DMTE6_IRQ 46 16#define DMTE6_IRQ 46
18#define DMAE0_IRQ 38 17#define DMAE0_IRQ 38
19 18
20#define DMAOR_INIT (0x8000|DMAOR_DME)
21#define SH_DMAC_BASE0 0xffa00000 19#define SH_DMAC_BASE0 0xffa00000
22#define SH_DMAC_BASE1 0xffa00070 20#define SH_DMAC_BASE1 0xffa00070
23/* Definitions for the SuperH DMAC */ 21/* Definitions for the SuperH DMAC */
@@ -27,40 +25,8 @@
27#define TS_32 0x00000030 25#define TS_32 0x00000030
28#define TS_64 0x00000000 26#define TS_64 0x00000000
29 27
30#define CHCR_TS_LOW_MASK 0x70
31#define CHCR_TS_LOW_SHIFT 4
32#define CHCR_TS_HIGH_MASK 0
33#define CHCR_TS_HIGH_SHIFT 0
34
35#define DMAOR_COD 0x00000008 28#define DMAOR_COD 0x00000008
36 29
37/*
38 * The SuperH DMAC supports a number of transmit sizes, we list them here,
39 * with their respective values as they appear in the CHCR registers.
40 *
41 * Defaults to a 64-bit transfer size.
42 */
43enum {
44 XMIT_SZ_8BIT = 1,
45 XMIT_SZ_16BIT = 2,
46 XMIT_SZ_32BIT = 3,
47 XMIT_SZ_64BIT = 0,
48 XMIT_SZ_256BIT = 4,
49};
50
51/*
52 * The DMA count is defined as the number of bytes to transfer.
53 */
54#define TS_SHIFT { \
55 [XMIT_SZ_8BIT] = 0, \
56 [XMIT_SZ_16BIT] = 1, \
57 [XMIT_SZ_32BIT] = 2, \
58 [XMIT_SZ_64BIT] = 3, \
59 [XMIT_SZ_256BIT] = 5, \
60}
61
62#define TS_INDEX2VAL(i) (((i) & 7) << CHCR_TS_LOW_SHIFT)
63
64#endif 30#endif
65 31
66#endif /* __ASM_CPU_SH4_DMA_H */ 32#endif /* __ASM_CPU_SH4_DMA_H */
diff --git a/arch/sh/include/mach-migor/mach/migor.h b/arch/sh/include/mach-migor/mach/migor.h
index cee6cb88e020..42fccf93412e 100644
--- a/arch/sh/include/mach-migor/mach/migor.h
+++ b/arch/sh/include/mach-migor/mach/migor.h
@@ -1,6 +1,7 @@
1#ifndef __ASM_SH_MIGOR_H 1#ifndef __ASM_SH_MIGOR_H
2#define __ASM_SH_MIGOR_H 2#define __ASM_SH_MIGOR_H
3 3
4#define PORT_MSELCRA 0xa4050180
4#define PORT_MSELCRB 0xa4050182 5#define PORT_MSELCRB 0xa4050182
5#define BSC_CS4BCR 0xfec10010 6#define BSC_CS4BCR 0xfec10010
6#define BSC_CS6ABCR 0xfec1001c 7#define BSC_CS6ABCR 0xfec1001c
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c
index ef3f97827808..fd7e3639e845 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7722.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7722.c
@@ -7,19 +7,167 @@
7 * License. See the file "COPYING" in the main directory of this archive 7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details. 8 * for more details.
9 */ 9 */
10#include <linux/platform_device.h>
11#include <linux/init.h> 10#include <linux/init.h>
11#include <linux/mm.h>
12#include <linux/platform_device.h>
12#include <linux/serial.h> 13#include <linux/serial.h>
13#include <linux/serial_sci.h> 14#include <linux/serial_sci.h>
14#include <linux/mm.h> 15#include <linux/sh_timer.h>
15#include <linux/uio_driver.h> 16#include <linux/uio_driver.h>
16#include <linux/usb/m66592.h> 17#include <linux/usb/m66592.h>
17#include <linux/sh_timer.h> 18
18#include <asm/clock.h> 19#include <asm/clock.h>
20#include <asm/dmaengine.h>
19#include <asm/mmzone.h> 21#include <asm/mmzone.h>
20#include <asm/dma-sh.h> 22#include <asm/siu.h>
23
24#include <cpu/dma-register.h>
21#include <cpu/sh7722.h> 25#include <cpu/sh7722.h>
22 26
27static struct sh_dmae_slave_config sh7722_dmae_slaves[] = {
28 {
29 .slave_id = SHDMA_SLAVE_SCIF0_TX,
30 .addr = 0xffe0000c,
31 .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
32 .mid_rid = 0x21,
33 }, {
34 .slave_id = SHDMA_SLAVE_SCIF0_RX,
35 .addr = 0xffe00014,
36 .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
37 .mid_rid = 0x22,
38 }, {
39 .slave_id = SHDMA_SLAVE_SCIF1_TX,
40 .addr = 0xffe1000c,
41 .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
42 .mid_rid = 0x25,
43 }, {
44 .slave_id = SHDMA_SLAVE_SCIF1_RX,
45 .addr = 0xffe10014,
46 .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
47 .mid_rid = 0x26,
48 }, {
49 .slave_id = SHDMA_SLAVE_SCIF2_TX,
50 .addr = 0xffe2000c,
51 .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
52 .mid_rid = 0x29,
53 }, {
54 .slave_id = SHDMA_SLAVE_SCIF2_RX,
55 .addr = 0xffe20014,
56 .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
57 .mid_rid = 0x2a,
58 }, {
59 .slave_id = SHDMA_SLAVE_SIUA_TX,
60 .addr = 0xa454c098,
61 .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT),
62 .mid_rid = 0xb1,
63 }, {
64 .slave_id = SHDMA_SLAVE_SIUA_RX,
65 .addr = 0xa454c090,
66 .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT),
67 .mid_rid = 0xb2,
68 }, {
69 .slave_id = SHDMA_SLAVE_SIUB_TX,
70 .addr = 0xa454c09c,
71 .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT),
72 .mid_rid = 0xb5,
73 }, {
74 .slave_id = SHDMA_SLAVE_SIUB_RX,
75 .addr = 0xa454c094,
76 .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT),
77 .mid_rid = 0xb6,
78 },
79};
80
81static struct sh_dmae_channel sh7722_dmae_channels[] = {
82 {
83 .offset = 0,
84 .dmars = 0,
85 .dmars_bit = 0,
86 }, {
87 .offset = 0x10,
88 .dmars = 0,
89 .dmars_bit = 8,
90 }, {
91 .offset = 0x20,
92 .dmars = 4,
93 .dmars_bit = 0,
94 }, {
95 .offset = 0x30,
96 .dmars = 4,
97 .dmars_bit = 8,
98 }, {
99 .offset = 0x50,
100 .dmars = 8,
101 .dmars_bit = 0,
102 }, {
103 .offset = 0x60,
104 .dmars = 8,
105 .dmars_bit = 8,
106 }
107};
108
109static unsigned int ts_shift[] = TS_SHIFT;
110
111static struct sh_dmae_pdata dma_platform_data = {
112 .slave = sh7722_dmae_slaves,
113 .slave_num = ARRAY_SIZE(sh7722_dmae_slaves),
114 .channel = sh7722_dmae_channels,
115 .channel_num = ARRAY_SIZE(sh7722_dmae_channels),
116 .ts_low_shift = CHCR_TS_LOW_SHIFT,
117 .ts_low_mask = CHCR_TS_LOW_MASK,
118 .ts_high_shift = CHCR_TS_HIGH_SHIFT,
119 .ts_high_mask = CHCR_TS_HIGH_MASK,
120 .ts_shift = ts_shift,
121 .ts_shift_num = ARRAY_SIZE(ts_shift),
122 .dmaor_init = DMAOR_INIT,
123};
124
125static struct resource sh7722_dmae_resources[] = {
126 [0] = {
127 /* Channel registers and DMAOR */
128 .start = 0xfe008020,
129 .end = 0xfe00808f,
130 .flags = IORESOURCE_MEM,
131 },
132 [1] = {
133 /* DMARSx */
134 .start = 0xfe009000,
135 .end = 0xfe00900b,
136 .flags = IORESOURCE_MEM,
137 },
138 {
139 /* DMA error IRQ */
140 .start = 78,
141 .end = 78,
142 .flags = IORESOURCE_IRQ,
143 },
144 {
145 /* IRQ for channels 0-3 */
146 .start = 48,
147 .end = 51,
148 .flags = IORESOURCE_IRQ,
149 },
150 {
151 /* IRQ for channels 4-5 */
152 .start = 76,
153 .end = 77,
154 .flags = IORESOURCE_IRQ,
155 },
156};
157
158struct platform_device dma_device = {
159 .name = "sh-dma-engine",
160 .id = -1,
161 .resource = sh7722_dmae_resources,
162 .num_resources = ARRAY_SIZE(sh7722_dmae_resources),
163 .dev = {
164 .platform_data = &dma_platform_data,
165 },
166 .archdata = {
167 .hwblk_id = HWBLK_DMAC,
168 },
169};
170
23/* Serial */ 171/* Serial */
24static struct plat_sci_port scif0_platform_data = { 172static struct plat_sci_port scif0_platform_data = {
25 .mapbase = 0xffe00000, 173 .mapbase = 0xffe00000,
@@ -388,15 +536,36 @@ static struct platform_device tmu2_device = {
388 }, 536 },
389}; 537};
390 538
391static struct sh_dmae_pdata dma_platform_data = { 539static struct siu_platform siu_platform_data = {
392 .mode = 0, 540 .dma_dev = &dma_device.dev,
541 .dma_slave_tx_a = SHDMA_SLAVE_SIUA_TX,
542 .dma_slave_rx_a = SHDMA_SLAVE_SIUA_RX,
543 .dma_slave_tx_b = SHDMA_SLAVE_SIUB_TX,
544 .dma_slave_rx_b = SHDMA_SLAVE_SIUB_RX,
393}; 545};
394 546
395static struct platform_device dma_device = { 547static struct resource siu_resources[] = {
396 .name = "sh-dma-engine", 548 [0] = {
549 .start = 0xa4540000,
550 .end = 0xa454c10f,
551 .flags = IORESOURCE_MEM,
552 },
553 [1] = {
554 .start = 108,
555 .flags = IORESOURCE_IRQ,
556 },
557};
558
559static struct platform_device siu_device = {
560 .name = "sh_siu",
397 .id = -1, 561 .id = -1,
398 .dev = { 562 .dev = {
399 .platform_data = &dma_platform_data, 563 .platform_data = &siu_platform_data,
564 },
565 .resource = siu_resources,
566 .num_resources = ARRAY_SIZE(siu_resources),
567 .archdata = {
568 .hwblk_id = HWBLK_SIU,
400 }, 569 },
401}; 570};
402 571
@@ -414,6 +583,7 @@ static struct platform_device *sh7722_devices[] __initdata = {
414 &vpu_device, 583 &vpu_device,
415 &veu_device, 584 &veu_device,
416 &jpu_device, 585 &jpu_device,
586 &siu_device,
417 &dma_device, 587 &dma_device,
418}; 588};
419 589
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7724.c b/arch/sh/kernel/cpu/sh4a/setup-sh7724.c
index 31e3451f7e3d..e7fa2a92fc1f 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7724.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7724.c
@@ -21,22 +21,189 @@
21#include <linux/sh_timer.h> 21#include <linux/sh_timer.h>
22#include <linux/io.h> 22#include <linux/io.h>
23#include <linux/notifier.h> 23#include <linux/notifier.h>
24
24#include <asm/suspend.h> 25#include <asm/suspend.h>
25#include <asm/clock.h> 26#include <asm/clock.h>
26#include <asm/dma-sh.h> 27#include <asm/dmaengine.h>
27#include <asm/mmzone.h> 28#include <asm/mmzone.h>
29
30#include <cpu/dma-register.h>
28#include <cpu/sh7724.h> 31#include <cpu/sh7724.h>
29 32
30/* DMA */ 33/* DMA */
31static struct sh_dmae_pdata dma_platform_data = { 34static struct sh_dmae_channel sh7724_dmae0_channels[] = {
32 .mode = SHDMA_DMAOR1, 35 {
36 .offset = 0,
37 .dmars = 0,
38 .dmars_bit = 0,
39 }, {
40 .offset = 0x10,
41 .dmars = 0,
42 .dmars_bit = 8,
43 }, {
44 .offset = 0x20,
45 .dmars = 4,
46 .dmars_bit = 0,
47 }, {
48 .offset = 0x30,
49 .dmars = 4,
50 .dmars_bit = 8,
51 }, {
52 .offset = 0x50,
53 .dmars = 8,
54 .dmars_bit = 0,
55 }, {
56 .offset = 0x60,
57 .dmars = 8,
58 .dmars_bit = 8,
59 }
60};
61
62static struct sh_dmae_channel sh7724_dmae1_channels[] = {
63 {
64 .offset = 0,
65 .dmars = 0,
66 .dmars_bit = 0,
67 }, {
68 .offset = 0x10,
69 .dmars = 0,
70 .dmars_bit = 8,
71 }, {
72 .offset = 0x20,
73 .dmars = 4,
74 .dmars_bit = 0,
75 }, {
76 .offset = 0x30,
77 .dmars = 4,
78 .dmars_bit = 8,
79 }, {
80 .offset = 0x50,
81 .dmars = 8,
82 .dmars_bit = 0,
83 }, {
84 .offset = 0x60,
85 .dmars = 8,
86 .dmars_bit = 8,
87 }
88};
89
90static unsigned int ts_shift[] = TS_SHIFT;
91
92static struct sh_dmae_pdata dma0_platform_data = {
93 .channel = sh7724_dmae0_channels,
94 .channel_num = ARRAY_SIZE(sh7724_dmae0_channels),
95 .ts_low_shift = CHCR_TS_LOW_SHIFT,
96 .ts_low_mask = CHCR_TS_LOW_MASK,
97 .ts_high_shift = CHCR_TS_HIGH_SHIFT,
98 .ts_high_mask = CHCR_TS_HIGH_MASK,
99 .ts_shift = ts_shift,
100 .ts_shift_num = ARRAY_SIZE(ts_shift),
101 .dmaor_init = DMAOR_INIT,
102};
103
104static struct sh_dmae_pdata dma1_platform_data = {
105 .channel = sh7724_dmae1_channels,
106 .channel_num = ARRAY_SIZE(sh7724_dmae1_channels),
107 .ts_low_shift = CHCR_TS_LOW_SHIFT,
108 .ts_low_mask = CHCR_TS_LOW_MASK,
109 .ts_high_shift = CHCR_TS_HIGH_SHIFT,
110 .ts_high_mask = CHCR_TS_HIGH_MASK,
111 .ts_shift = ts_shift,
112 .ts_shift_num = ARRAY_SIZE(ts_shift),
113 .dmaor_init = DMAOR_INIT,
114};
115
116/* Resource order important! */
117static struct resource sh7724_dmae0_resources[] = {
118 {
119 /* Channel registers and DMAOR */
120 .start = 0xfe008020,
121 .end = 0xfe00808f,
122 .flags = IORESOURCE_MEM,
123 },
124 {
125 /* DMARSx */
126 .start = 0xfe009000,
127 .end = 0xfe00900b,
128 .flags = IORESOURCE_MEM,
129 },
130 {
131 /* DMA error IRQ */
132 .start = 78,
133 .end = 78,
134 .flags = IORESOURCE_IRQ,
135 },
136 {
137 /* IRQ for channels 0-3 */
138 .start = 48,
139 .end = 51,
140 .flags = IORESOURCE_IRQ,
141 },
142 {
143 /* IRQ for channels 4-5 */
144 .start = 76,
145 .end = 77,
146 .flags = IORESOURCE_IRQ,
147 },
33}; 148};
34 149
35static struct platform_device dma_device = { 150/* Resource order important! */
36 .name = "sh-dma-engine", 151static struct resource sh7724_dmae1_resources[] = {
37 .id = -1, 152 {
38 .dev = { 153 /* Channel registers and DMAOR */
39 .platform_data = &dma_platform_data, 154 .start = 0xfdc08020,
155 .end = 0xfdc0808f,
156 .flags = IORESOURCE_MEM,
157 },
158 {
159 /* DMARSx */
160 .start = 0xfdc09000,
161 .end = 0xfdc0900b,
162 .flags = IORESOURCE_MEM,
163 },
164 {
165 /* DMA error IRQ */
166 .start = 74,
167 .end = 74,
168 .flags = IORESOURCE_IRQ,
169 },
170 {
171 /* IRQ for channels 0-3 */
172 .start = 40,
173 .end = 43,
174 .flags = IORESOURCE_IRQ,
175 },
176 {
177 /* IRQ for channels 4-5 */
178 .start = 72,
179 .end = 73,
180 .flags = IORESOURCE_IRQ,
181 },
182};
183
184static struct platform_device dma0_device = {
185 .name = "sh-dma-engine",
186 .id = 0,
187 .resource = sh7724_dmae0_resources,
188 .num_resources = ARRAY_SIZE(sh7724_dmae0_resources),
189 .dev = {
190 .platform_data = &dma0_platform_data,
191 },
192 .archdata = {
193 .hwblk_id = HWBLK_DMAC0,
194 },
195};
196
197static struct platform_device dma1_device = {
198 .name = "sh-dma-engine",
199 .id = 1,
200 .resource = sh7724_dmae1_resources,
201 .num_resources = ARRAY_SIZE(sh7724_dmae1_resources),
202 .dev = {
203 .platform_data = &dma1_platform_data,
204 },
205 .archdata = {
206 .hwblk_id = HWBLK_DMAC1,
40 }, 207 },
41}; 208};
42 209
@@ -663,7 +830,8 @@ static struct platform_device *sh7724_devices[] __initdata = {
663 &tmu3_device, 830 &tmu3_device,
664 &tmu4_device, 831 &tmu4_device,
665 &tmu5_device, 832 &tmu5_device,
666 &dma_device, 833 &dma0_device,
834 &dma1_device,
667 &rtc_device, 835 &rtc_device,
668 &iic0_device, 836 &iic0_device,
669 &iic1_device, 837 &iic1_device,
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7780.c b/arch/sh/kernel/cpu/sh4a/setup-sh7780.c
index f8f21618d785..02e792c90de6 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7780.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7780.c
@@ -13,7 +13,10 @@
13#include <linux/io.h> 13#include <linux/io.h>
14#include <linux/serial_sci.h> 14#include <linux/serial_sci.h>
15#include <linux/sh_timer.h> 15#include <linux/sh_timer.h>
16#include <asm/dma-sh.h> 16
17#include <asm/dmaengine.h>
18
19#include <cpu/dma-register.h>
17 20
18static struct plat_sci_port scif0_platform_data = { 21static struct plat_sci_port scif0_platform_data = {
19 .mapbase = 0xffe00000, 22 .mapbase = 0xffe00000,
@@ -247,15 +250,131 @@ static struct platform_device rtc_device = {
247 .resource = rtc_resources, 250 .resource = rtc_resources,
248}; 251};
249 252
250static struct sh_dmae_pdata dma_platform_data = { 253/* DMA */
251 .mode = (SHDMA_MIX_IRQ | SHDMA_DMAOR1), 254static struct sh_dmae_channel sh7780_dmae0_channels[] = {
255 {
256 .offset = 0,
257 .dmars = 0,
258 .dmars_bit = 0,
259 }, {
260 .offset = 0x10,
261 .dmars = 0,
262 .dmars_bit = 8,
263 }, {
264 .offset = 0x20,
265 .dmars = 4,
266 .dmars_bit = 0,
267 }, {
268 .offset = 0x30,
269 .dmars = 4,
270 .dmars_bit = 8,
271 }, {
272 .offset = 0x50,
273 .dmars = 8,
274 .dmars_bit = 0,
275 }, {
276 .offset = 0x60,
277 .dmars = 8,
278 .dmars_bit = 8,
279 }
280};
281
282static struct sh_dmae_channel sh7780_dmae1_channels[] = {
283 {
284 .offset = 0,
285 }, {
286 .offset = 0x10,
287 }, {
288 .offset = 0x20,
289 }, {
290 .offset = 0x30,
291 }, {
292 .offset = 0x50,
293 }, {
294 .offset = 0x60,
295 }
296};
297
298static unsigned int ts_shift[] = TS_SHIFT;
299
300static struct sh_dmae_pdata dma0_platform_data = {
301 .channel = sh7780_dmae0_channels,
302 .channel_num = ARRAY_SIZE(sh7780_dmae0_channels),
303 .ts_low_shift = CHCR_TS_LOW_SHIFT,
304 .ts_low_mask = CHCR_TS_LOW_MASK,
305 .ts_high_shift = CHCR_TS_HIGH_SHIFT,
306 .ts_high_mask = CHCR_TS_HIGH_MASK,
307 .ts_shift = ts_shift,
308 .ts_shift_num = ARRAY_SIZE(ts_shift),
309 .dmaor_init = DMAOR_INIT,
310};
311
312static struct sh_dmae_pdata dma1_platform_data = {
313 .channel = sh7780_dmae1_channels,
314 .channel_num = ARRAY_SIZE(sh7780_dmae1_channels),
315 .ts_low_shift = CHCR_TS_LOW_SHIFT,
316 .ts_low_mask = CHCR_TS_LOW_MASK,
317 .ts_high_shift = CHCR_TS_HIGH_SHIFT,
318 .ts_high_mask = CHCR_TS_HIGH_MASK,
319 .ts_shift = ts_shift,
320 .ts_shift_num = ARRAY_SIZE(ts_shift),
321 .dmaor_init = DMAOR_INIT,
252}; 322};
253 323
254static struct platform_device dma_device = { 324static struct resource sh7780_dmae0_resources[] = {
325 [0] = {
326 /* Channel registers and DMAOR */
327 .start = 0xfc808020,
328 .end = 0xfc80808f,
329 .flags = IORESOURCE_MEM,
330 },
331 [1] = {
332 /* DMARSx */
333 .start = 0xfc809000,
334 .end = 0xfc80900b,
335 .flags = IORESOURCE_MEM,
336 },
337 {
338 /* Real DMA error IRQ is 38, and channel IRQs are 34-37, 44-45 */
339 .start = 34,
340 .end = 34,
341 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE,
342 },
343};
344
345static struct resource sh7780_dmae1_resources[] = {
346 [0] = {
347 /* Channel registers and DMAOR */
348 .start = 0xfc818020,
349 .end = 0xfc81808f,
350 .flags = IORESOURCE_MEM,
351 },
352 /* DMAC1 has no DMARS */
353 {
354 /* Real DMA error IRQ is 38, and channel IRQs are 46-47, 92-95 */
355 .start = 46,
356 .end = 46,
357 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE,
358 },
359};
360
361static struct platform_device dma0_device = {
255 .name = "sh-dma-engine", 362 .name = "sh-dma-engine",
256 .id = -1, 363 .id = 0,
364 .resource = sh7780_dmae0_resources,
365 .num_resources = ARRAY_SIZE(sh7780_dmae0_resources),
257 .dev = { 366 .dev = {
258 .platform_data = &dma_platform_data, 367 .platform_data = &dma0_platform_data,
368 },
369};
370
371static struct platform_device dma1_device = {
372 .name = "sh-dma-engine",
373 .id = 1,
374 .resource = sh7780_dmae1_resources,
375 .num_resources = ARRAY_SIZE(sh7780_dmae1_resources),
376 .dev = {
377 .platform_data = &dma1_platform_data,
259 }, 378 },
260}; 379};
261 380
@@ -269,7 +388,8 @@ static struct platform_device *sh7780_devices[] __initdata = {
269 &tmu4_device, 388 &tmu4_device,
270 &tmu5_device, 389 &tmu5_device,
271 &rtc_device, 390 &rtc_device,
272 &dma_device, 391 &dma0_device,
392 &dma1_device,
273}; 393};
274 394
275static int __init sh7780_devices_setup(void) 395static int __init sh7780_devices_setup(void)
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7785.c b/arch/sh/kernel/cpu/sh4a/setup-sh7785.c
index 23448d8c6711..1fcd88b1671e 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7785.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7785.c
@@ -14,9 +14,12 @@
14#include <linux/io.h> 14#include <linux/io.h>
15#include <linux/mm.h> 15#include <linux/mm.h>
16#include <linux/sh_timer.h> 16#include <linux/sh_timer.h>
17#include <asm/dma-sh.h> 17
18#include <asm/dmaengine.h>
18#include <asm/mmzone.h> 19#include <asm/mmzone.h>
19 20
21#include <cpu/dma-register.h>
22
20static struct plat_sci_port scif0_platform_data = { 23static struct plat_sci_port scif0_platform_data = {
21 .mapbase = 0xffea0000, 24 .mapbase = 0xffea0000,
22 .flags = UPF_BOOT_AUTOCONF, 25 .flags = UPF_BOOT_AUTOCONF,
@@ -295,15 +298,131 @@ static struct platform_device tmu5_device = {
295 .num_resources = ARRAY_SIZE(tmu5_resources), 298 .num_resources = ARRAY_SIZE(tmu5_resources),
296}; 299};
297 300
298static struct sh_dmae_pdata dma_platform_data = { 301/* DMA */
299 .mode = (SHDMA_MIX_IRQ | SHDMA_DMAOR1), 302static struct sh_dmae_channel sh7785_dmae0_channels[] = {
303 {
304 .offset = 0,
305 .dmars = 0,
306 .dmars_bit = 0,
307 }, {
308 .offset = 0x10,
309 .dmars = 0,
310 .dmars_bit = 8,
311 }, {
312 .offset = 0x20,
313 .dmars = 4,
314 .dmars_bit = 0,
315 }, {
316 .offset = 0x30,
317 .dmars = 4,
318 .dmars_bit = 8,
319 }, {
320 .offset = 0x50,
321 .dmars = 8,
322 .dmars_bit = 0,
323 }, {
324 .offset = 0x60,
325 .dmars = 8,
326 .dmars_bit = 8,
327 }
328};
329
330static struct sh_dmae_channel sh7785_dmae1_channels[] = {
331 {
332 .offset = 0,
333 }, {
334 .offset = 0x10,
335 }, {
336 .offset = 0x20,
337 }, {
338 .offset = 0x30,
339 }, {
340 .offset = 0x50,
341 }, {
342 .offset = 0x60,
343 }
344};
345
346static unsigned int ts_shift[] = TS_SHIFT;
347
348static struct sh_dmae_pdata dma0_platform_data = {
349 .channel = sh7785_dmae0_channels,
350 .channel_num = ARRAY_SIZE(sh7785_dmae0_channels),
351 .ts_low_shift = CHCR_TS_LOW_SHIFT,
352 .ts_low_mask = CHCR_TS_LOW_MASK,
353 .ts_high_shift = CHCR_TS_HIGH_SHIFT,
354 .ts_high_mask = CHCR_TS_HIGH_MASK,
355 .ts_shift = ts_shift,
356 .ts_shift_num = ARRAY_SIZE(ts_shift),
357 .dmaor_init = DMAOR_INIT,
358};
359
360static struct sh_dmae_pdata dma1_platform_data = {
361 .channel = sh7785_dmae1_channels,
362 .channel_num = ARRAY_SIZE(sh7785_dmae1_channels),
363 .ts_low_shift = CHCR_TS_LOW_SHIFT,
364 .ts_low_mask = CHCR_TS_LOW_MASK,
365 .ts_high_shift = CHCR_TS_HIGH_SHIFT,
366 .ts_high_mask = CHCR_TS_HIGH_MASK,
367 .ts_shift = ts_shift,
368 .ts_shift_num = ARRAY_SIZE(ts_shift),
369 .dmaor_init = DMAOR_INIT,
300}; 370};
301 371
302static struct platform_device dma_device = { 372static struct resource sh7785_dmae0_resources[] = {
373 [0] = {
374 /* Channel registers and DMAOR */
375 .start = 0xfc808020,
376 .end = 0xfc80808f,
377 .flags = IORESOURCE_MEM,
378 },
379 [1] = {
380 /* DMARSx */
381 .start = 0xfc809000,
382 .end = 0xfc80900b,
383 .flags = IORESOURCE_MEM,
384 },
385 {
386 /* Real DMA error IRQ is 39, and channel IRQs are 33-38 */
387 .start = 33,
388 .end = 33,
389 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE,
390 },
391};
392
393static struct resource sh7785_dmae1_resources[] = {
394 [0] = {
395 /* Channel registers and DMAOR */
396 .start = 0xfcc08020,
397 .end = 0xfcc0808f,
398 .flags = IORESOURCE_MEM,
399 },
400 /* DMAC1 has no DMARS */
401 {
402 /* Real DMA error IRQ is 58, and channel IRQs are 52-57 */
403 .start = 52,
404 .end = 52,
405 .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE,
406 },
407};
408
409static struct platform_device dma0_device = {
303 .name = "sh-dma-engine", 410 .name = "sh-dma-engine",
304 .id = -1, 411 .id = 0,
412 .resource = sh7785_dmae0_resources,
413 .num_resources = ARRAY_SIZE(sh7785_dmae0_resources),
305 .dev = { 414 .dev = {
306 .platform_data = &dma_platform_data, 415 .platform_data = &dma0_platform_data,
416 },
417};
418
419static struct platform_device dma1_device = {
420 .name = "sh-dma-engine",
421 .id = 1,
422 .resource = sh7785_dmae1_resources,
423 .num_resources = ARRAY_SIZE(sh7785_dmae1_resources),
424 .dev = {
425 .platform_data = &dma1_platform_data,
307 }, 426 },
308}; 427};
309 428
@@ -320,7 +439,8 @@ static struct platform_device *sh7785_devices[] __initdata = {
320 &tmu3_device, 439 &tmu3_device,
321 &tmu4_device, 440 &tmu4_device,
322 &tmu5_device, 441 &tmu5_device,
323 &dma_device, 442 &dma0_device,
443 &dma1_device,
324}; 444};
325 445
326static int __init sh7785_devices_setup(void) 446static int __init sh7785_devices_setup(void)
diff --git a/arch/sh/kernel/hw_breakpoint.c b/arch/sh/kernel/hw_breakpoint.c
index e2f1753d275c..675eea7785d9 100644
--- a/arch/sh/kernel/hw_breakpoint.c
+++ b/arch/sh/kernel/hw_breakpoint.c
@@ -143,26 +143,6 @@ static int arch_check_va_in_kernelspace(unsigned long va, u8 hbp_len)
143 return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE); 143 return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
144} 144}
145 145
146/*
147 * Store a breakpoint's encoded address, length, and type.
148 */
149static int arch_store_info(struct perf_event *bp)
150{
151 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
152
153 /*
154 * User-space requests will always have the address field populated
155 * For kernel-addresses, either the address or symbol name can be
156 * specified.
157 */
158 if (info->name)
159 info->address = (unsigned long)kallsyms_lookup_name(info->name);
160 if (info->address)
161 return 0;
162
163 return -EINVAL;
164}
165
166int arch_bp_generic_fields(int sh_len, int sh_type, 146int arch_bp_generic_fields(int sh_len, int sh_type,
167 int *gen_len, int *gen_type) 147 int *gen_len, int *gen_type)
168{ 148{
@@ -276,10 +256,12 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp,
276 return ret; 256 return ret;
277 } 257 }
278 258
279 ret = arch_store_info(bp); 259 /*
280 260 * For kernel-addresses, either the address or symbol name can be
281 if (ret < 0) 261 * specified.
282 return ret; 262 */
263 if (info->name)
264 info->address = (unsigned long)kallsyms_lookup_name(info->name);
283 265
284 /* 266 /*
285 * Check that the low-order bits of the address are appropriate 267 * Check that the low-order bits of the address are appropriate
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c
index 3459e70eed72..8870d6ba64bf 100644
--- a/arch/sh/kernel/setup.c
+++ b/arch/sh/kernel/setup.c
@@ -443,7 +443,7 @@ void __init setup_arch(char **cmdline_p)
443 443
444 nodes_clear(node_online_map); 444 nodes_clear(node_online_map);
445 445
446 /* Setup bootmem with available RAM */ 446 pmb_init();
447 lmb_init(); 447 lmb_init();
448 setup_memory(); 448 setup_memory();
449 sparse_init(); 449 sparse_init();
@@ -452,7 +452,6 @@ void __init setup_arch(char **cmdline_p)
452 conswitchp = &dummy_con; 452 conswitchp = &dummy_con;
453#endif 453#endif
454 paging_init(); 454 paging_init();
455 pmb_init();
456 455
457 ioremap_fixed_init(); 456 ioremap_fixed_init();
458 457
diff --git a/arch/sh/kernel/time.c b/arch/sh/kernel/time.c
index 953fa1613312..8a0072de2bcc 100644
--- a/arch/sh/kernel/time.c
+++ b/arch/sh/kernel/time.c
@@ -39,12 +39,12 @@ static int null_rtc_set_time(const time_t secs)
39void (*rtc_sh_get_time)(struct timespec *) = null_rtc_get_time; 39void (*rtc_sh_get_time)(struct timespec *) = null_rtc_get_time;
40int (*rtc_sh_set_time)(const time_t) = null_rtc_set_time; 40int (*rtc_sh_set_time)(const time_t) = null_rtc_set_time;
41 41
42#ifdef CONFIG_GENERIC_CMOS_UPDATE
43void read_persistent_clock(struct timespec *ts) 42void read_persistent_clock(struct timespec *ts)
44{ 43{
45 rtc_sh_get_time(ts); 44 rtc_sh_get_time(ts);
46} 45}
47 46
47#ifdef CONFIG_GENERIC_CMOS_UPDATE
48int update_persistent_clock(struct timespec now) 48int update_persistent_clock(struct timespec now)
49{ 49{
50 return rtc_sh_set_time(now.tv_sec); 50 return rtc_sh_set_time(now.tv_sec);
@@ -113,9 +113,5 @@ void __init time_init(void)
113 hwblk_init(); 113 hwblk_init();
114 clk_init(); 114 clk_init();
115 115
116 rtc_sh_get_time(&xtime);
117 set_normalized_timespec(&wall_to_monotonic,
118 -xtime.tv_sec, -xtime.tv_nsec);
119
120 late_time_init = sh_late_time_init; 116 late_time_init = sh_late_time_init;
121} 117}
diff --git a/arch/sh/lib/libgcc.h b/arch/sh/lib/libgcc.h
index 3f19d1c5d942..05909d58e2fe 100644
--- a/arch/sh/lib/libgcc.h
+++ b/arch/sh/lib/libgcc.h
@@ -17,8 +17,7 @@ struct DWstruct {
17#error I feel sick. 17#error I feel sick.
18#endif 18#endif
19 19
20typedef union 20typedef union {
21{
22 struct DWstruct s; 21 struct DWstruct s;
23 long long ll; 22 long long ll;
24} DWunion; 23} DWunion;
diff --git a/arch/sh/mm/ioremap.c b/arch/sh/mm/ioremap.c
index c68d2d7d00a9..1ab2385ecefe 100644
--- a/arch/sh/mm/ioremap.c
+++ b/arch/sh/mm/ioremap.c
@@ -34,11 +34,12 @@
34 * caller shouldn't need to know that small detail. 34 * caller shouldn't need to know that small detail.
35 */ 35 */
36void __iomem * __init_refok 36void __iomem * __init_refok
37__ioremap_caller(unsigned long phys_addr, unsigned long size, 37__ioremap_caller(phys_addr_t phys_addr, unsigned long size,
38 pgprot_t pgprot, void *caller) 38 pgprot_t pgprot, void *caller)
39{ 39{
40 struct vm_struct *area; 40 struct vm_struct *area;
41 unsigned long offset, last_addr, addr, orig_addr; 41 unsigned long offset, last_addr, addr, orig_addr;
42 void __iomem *mapped;
42 43
43 /* Don't allow wraparound or zero size */ 44 /* Don't allow wraparound or zero size */
44 last_addr = phys_addr + size - 1; 45 last_addr = phys_addr + size - 1;
@@ -46,6 +47,20 @@ __ioremap_caller(unsigned long phys_addr, unsigned long size,
46 return NULL; 47 return NULL;
47 48
48 /* 49 /*
50 * If we can't yet use the regular approach, go the fixmap route.
51 */
52 if (!mem_init_done)
53 return ioremap_fixed(phys_addr, size, pgprot);
54
55 /*
56 * First try to remap through the PMB.
57 * PMB entries are all pre-faulted.
58 */
59 mapped = pmb_remap_caller(phys_addr, size, pgprot, caller);
60 if (mapped && !IS_ERR(mapped))
61 return mapped;
62
63 /*
49 * Mappings have to be page-aligned 64 * Mappings have to be page-aligned
50 */ 65 */
51 offset = phys_addr & ~PAGE_MASK; 66 offset = phys_addr & ~PAGE_MASK;
@@ -53,12 +68,6 @@ __ioremap_caller(unsigned long phys_addr, unsigned long size,
53 size = PAGE_ALIGN(last_addr+1) - phys_addr; 68 size = PAGE_ALIGN(last_addr+1) - phys_addr;
54 69
55 /* 70 /*
56 * If we can't yet use the regular approach, go the fixmap route.
57 */
58 if (!mem_init_done)
59 return ioremap_fixed(phys_addr, offset, size, pgprot);
60
61 /*
62 * Ok, go for it.. 71 * Ok, go for it..
63 */ 72 */
64 area = get_vm_area_caller(size, VM_IOREMAP, caller); 73 area = get_vm_area_caller(size, VM_IOREMAP, caller);
@@ -67,33 +76,10 @@ __ioremap_caller(unsigned long phys_addr, unsigned long size,
67 area->phys_addr = phys_addr; 76 area->phys_addr = phys_addr;
68 orig_addr = addr = (unsigned long)area->addr; 77 orig_addr = addr = (unsigned long)area->addr;
69 78
70#ifdef CONFIG_PMB 79 if (ioremap_page_range(addr, addr + size, phys_addr, pgprot)) {
71 /* 80 vunmap((void *)orig_addr);
72 * First try to remap through the PMB once a valid VMA has been 81 return NULL;
73 * established. Smaller allocations (or the rest of the size
74 * remaining after a PMB mapping due to the size not being
75 * perfectly aligned on a PMB size boundary) are then mapped
76 * through the UTLB using conventional page tables.
77 *
78 * PMB entries are all pre-faulted.
79 */
80 if (unlikely(phys_addr >= P1SEG)) {
81 unsigned long mapped;
82
83 mapped = pmb_remap(addr, phys_addr, size, pgprot);
84 if (likely(mapped)) {
85 addr += mapped;
86 phys_addr += mapped;
87 size -= mapped;
88 }
89 } 82 }
90#endif
91
92 if (likely(size))
93 if (ioremap_page_range(addr, addr + size, phys_addr, pgprot)) {
94 vunmap((void *)orig_addr);
95 return NULL;
96 }
97 83
98 return (void __iomem *)(offset + (char *)orig_addr); 84 return (void __iomem *)(offset + (char *)orig_addr);
99} 85}
@@ -133,23 +119,11 @@ void __iounmap(void __iomem *addr)
133 if (iounmap_fixed(addr) == 0) 119 if (iounmap_fixed(addr) == 0)
134 return; 120 return;
135 121
136#ifdef CONFIG_PMB
137 /* 122 /*
138 * Purge any PMB entries that may have been established for this 123 * If the PMB handled it, there's nothing else to do.
139 * mapping, then proceed with conventional VMA teardown.
140 *
141 * XXX: Note that due to the way that remove_vm_area() does
142 * matching of the resultant VMA, we aren't able to fast-forward
143 * the address past the PMB space until the end of the VMA where
144 * the page tables reside. As such, unmap_vm_area() will be
145 * forced to linearly scan over the area until it finds the page
146 * tables where PTEs that need to be unmapped actually reside,
147 * which is far from optimal. Perhaps we need to use a separate
148 * VMA for the PMB mappings?
149 * -- PFM.
150 */ 124 */
151 pmb_unmap(vaddr); 125 if (pmb_unmap(addr) == 0)
152#endif 126 return;
153 127
154 p = remove_vm_area((void *)(vaddr & PAGE_MASK)); 128 p = remove_vm_area((void *)(vaddr & PAGE_MASK));
155 if (!p) { 129 if (!p) {
diff --git a/arch/sh/mm/ioremap_fixed.c b/arch/sh/mm/ioremap_fixed.c
index 0b78b1e20ef1..7f682e5dafcf 100644
--- a/arch/sh/mm/ioremap_fixed.c
+++ b/arch/sh/mm/ioremap_fixed.c
@@ -45,14 +45,21 @@ void __init ioremap_fixed_init(void)
45} 45}
46 46
47void __init __iomem * 47void __init __iomem *
48ioremap_fixed(resource_size_t phys_addr, unsigned long offset, 48ioremap_fixed(phys_addr_t phys_addr, unsigned long size, pgprot_t prot)
49 unsigned long size, pgprot_t prot)
50{ 49{
51 enum fixed_addresses idx0, idx; 50 enum fixed_addresses idx0, idx;
52 struct ioremap_map *map; 51 struct ioremap_map *map;
53 unsigned int nrpages; 52 unsigned int nrpages;
53 unsigned long offset;
54 int i, slot; 54 int i, slot;
55 55
56 /*
57 * Mappings have to be page-aligned
58 */
59 offset = phys_addr & ~PAGE_MASK;
60 phys_addr &= PAGE_MASK;
61 size = PAGE_ALIGN(phys_addr + size) - phys_addr;
62
56 slot = -1; 63 slot = -1;
57 for (i = 0; i < FIX_N_IOREMAPS; i++) { 64 for (i = 0; i < FIX_N_IOREMAPS; i++) {
58 map = &ioremap_maps[i]; 65 map = &ioremap_maps[i];
diff --git a/arch/sh/mm/numa.c b/arch/sh/mm/numa.c
index 422e92721878..961b34085e3b 100644
--- a/arch/sh/mm/numa.c
+++ b/arch/sh/mm/numa.c
@@ -74,6 +74,9 @@ void __init setup_bootmem_node(int nid, unsigned long start, unsigned long end)
74 start_pfn = start >> PAGE_SHIFT; 74 start_pfn = start >> PAGE_SHIFT;
75 end_pfn = end >> PAGE_SHIFT; 75 end_pfn = end >> PAGE_SHIFT;
76 76
77 pmb_bolt_mapping((unsigned long)__va(start), start, end - start,
78 PAGE_KERNEL);
79
77 lmb_add(start, end - start); 80 lmb_add(start, end - start);
78 81
79 __add_active_range(nid, start_pfn, end_pfn); 82 __add_active_range(nid, start_pfn, end_pfn);
diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c
index 198bcff5e96f..a4662e2782c3 100644
--- a/arch/sh/mm/pmb.c
+++ b/arch/sh/mm/pmb.c
@@ -23,7 +23,8 @@
23#include <linux/err.h> 23#include <linux/err.h>
24#include <linux/io.h> 24#include <linux/io.h>
25#include <linux/spinlock.h> 25#include <linux/spinlock.h>
26#include <linux/rwlock.h> 26#include <linux/vmalloc.h>
27#include <asm/cacheflush.h>
27#include <asm/sizes.h> 28#include <asm/sizes.h>
28#include <asm/system.h> 29#include <asm/system.h>
29#include <asm/uaccess.h> 30#include <asm/uaccess.h>
@@ -52,12 +53,24 @@ struct pmb_entry {
52 struct pmb_entry *link; 53 struct pmb_entry *link;
53}; 54};
54 55
56static struct {
57 unsigned long size;
58 int flag;
59} pmb_sizes[] = {
60 { .size = SZ_512M, .flag = PMB_SZ_512M, },
61 { .size = SZ_128M, .flag = PMB_SZ_128M, },
62 { .size = SZ_64M, .flag = PMB_SZ_64M, },
63 { .size = SZ_16M, .flag = PMB_SZ_16M, },
64};
65
55static void pmb_unmap_entry(struct pmb_entry *, int depth); 66static void pmb_unmap_entry(struct pmb_entry *, int depth);
56 67
57static DEFINE_RWLOCK(pmb_rwlock); 68static DEFINE_RWLOCK(pmb_rwlock);
58static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES]; 69static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES];
59static DECLARE_BITMAP(pmb_map, NR_PMB_ENTRIES); 70static DECLARE_BITMAP(pmb_map, NR_PMB_ENTRIES);
60 71
72static unsigned int pmb_iomapping_enabled;
73
61static __always_inline unsigned long mk_pmb_entry(unsigned int entry) 74static __always_inline unsigned long mk_pmb_entry(unsigned int entry)
62{ 75{
63 return (entry & PMB_E_MASK) << PMB_E_SHIFT; 76 return (entry & PMB_E_MASK) << PMB_E_SHIFT;
@@ -73,6 +86,142 @@ static __always_inline unsigned long mk_pmb_data(unsigned int entry)
73 return mk_pmb_entry(entry) | PMB_DATA; 86 return mk_pmb_entry(entry) | PMB_DATA;
74} 87}
75 88
89static __always_inline unsigned int pmb_ppn_in_range(unsigned long ppn)
90{
91 return ppn >= __pa(memory_start) && ppn < __pa(memory_end);
92}
93
94/*
95 * Ensure that the PMB entries match our cache configuration.
96 *
97 * When we are in 32-bit address extended mode, CCR.CB becomes
98 * invalid, so care must be taken to manually adjust cacheable
99 * translations.
100 */
101static __always_inline unsigned long pmb_cache_flags(void)
102{
103 unsigned long flags = 0;
104
105#if defined(CONFIG_CACHE_OFF)
106 flags |= PMB_WT | PMB_UB;
107#elif defined(CONFIG_CACHE_WRITETHROUGH)
108 flags |= PMB_C | PMB_WT | PMB_UB;
109#elif defined(CONFIG_CACHE_WRITEBACK)
110 flags |= PMB_C;
111#endif
112
113 return flags;
114}
115
116/*
117 * Convert typical pgprot value to the PMB equivalent
118 */
119static inline unsigned long pgprot_to_pmb_flags(pgprot_t prot)
120{
121 unsigned long pmb_flags = 0;
122 u64 flags = pgprot_val(prot);
123
124 if (flags & _PAGE_CACHABLE)
125 pmb_flags |= PMB_C;
126 if (flags & _PAGE_WT)
127 pmb_flags |= PMB_WT | PMB_UB;
128
129 return pmb_flags;
130}
131
132static inline bool pmb_can_merge(struct pmb_entry *a, struct pmb_entry *b)
133{
134 return (b->vpn == (a->vpn + a->size)) &&
135 (b->ppn == (a->ppn + a->size)) &&
136 (b->flags == a->flags);
137}
138
139static bool pmb_mapping_exists(unsigned long vaddr, phys_addr_t phys,
140 unsigned long size)
141{
142 int i;
143
144 read_lock(&pmb_rwlock);
145
146 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
147 struct pmb_entry *pmbe, *iter;
148 unsigned long span;
149
150 if (!test_bit(i, pmb_map))
151 continue;
152
153 pmbe = &pmb_entry_list[i];
154
155 /*
156 * See if VPN and PPN are bounded by an existing mapping.
157 */
158 if ((vaddr < pmbe->vpn) || (vaddr >= (pmbe->vpn + pmbe->size)))
159 continue;
160 if ((phys < pmbe->ppn) || (phys >= (pmbe->ppn + pmbe->size)))
161 continue;
162
163 /*
164 * Now see if we're in range of a simple mapping.
165 */
166 if (size <= pmbe->size) {
167 read_unlock(&pmb_rwlock);
168 return true;
169 }
170
171 span = pmbe->size;
172
173 /*
174 * Finally for sizes that involve compound mappings, walk
175 * the chain.
176 */
177 for (iter = pmbe->link; iter; iter = iter->link)
178 span += iter->size;
179
180 /*
181 * Nothing else to do if the range requirements are met.
182 */
183 if (size <= span) {
184 read_unlock(&pmb_rwlock);
185 return true;
186 }
187 }
188
189 read_unlock(&pmb_rwlock);
190 return false;
191}
192
193static bool pmb_size_valid(unsigned long size)
194{
195 int i;
196
197 for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
198 if (pmb_sizes[i].size == size)
199 return true;
200
201 return false;
202}
203
204static inline bool pmb_addr_valid(unsigned long addr, unsigned long size)
205{
206 return (addr >= P1SEG && (addr + size - 1) < P3SEG);
207}
208
209static inline bool pmb_prot_valid(pgprot_t prot)
210{
211 return (pgprot_val(prot) & _PAGE_USER) == 0;
212}
213
214static int pmb_size_to_flags(unsigned long size)
215{
216 int i;
217
218 for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
219 if (pmb_sizes[i].size == size)
220 return pmb_sizes[i].flag;
221
222 return 0;
223}
224
76static int pmb_alloc_entry(void) 225static int pmb_alloc_entry(void)
77{ 226{
78 int pos; 227 int pos;
@@ -140,33 +289,22 @@ static void pmb_free(struct pmb_entry *pmbe)
140} 289}
141 290
142/* 291/*
143 * Ensure that the PMB entries match our cache configuration. 292 * Must be run uncached.
144 *
145 * When we are in 32-bit address extended mode, CCR.CB becomes
146 * invalid, so care must be taken to manually adjust cacheable
147 * translations.
148 */ 293 */
149static __always_inline unsigned long pmb_cache_flags(void) 294static void __set_pmb_entry(struct pmb_entry *pmbe)
150{ 295{
151 unsigned long flags = 0; 296 unsigned long addr, data;
152 297
153#if defined(CONFIG_CACHE_WRITETHROUGH) 298 addr = mk_pmb_addr(pmbe->entry);
154 flags |= PMB_C | PMB_WT | PMB_UB; 299 data = mk_pmb_data(pmbe->entry);
155#elif defined(CONFIG_CACHE_WRITEBACK)
156 flags |= PMB_C;
157#endif
158 300
159 return flags; 301 jump_to_uncached();
160}
161 302
162/* 303 /* Set V-bit */
163 * Must be run uncached. 304 __raw_writel(pmbe->vpn | PMB_V, addr);
164 */ 305 __raw_writel(pmbe->ppn | pmbe->flags | PMB_V, data);
165static void __set_pmb_entry(struct pmb_entry *pmbe) 306
166{ 307 back_to_cached();
167 writel_uncached(pmbe->vpn | PMB_V, mk_pmb_addr(pmbe->entry));
168 writel_uncached(pmbe->ppn | pmbe->flags | PMB_V,
169 mk_pmb_data(pmbe->entry));
170} 308}
171 309
172static void __clear_pmb_entry(struct pmb_entry *pmbe) 310static void __clear_pmb_entry(struct pmb_entry *pmbe)
@@ -194,144 +332,155 @@ static void set_pmb_entry(struct pmb_entry *pmbe)
194 spin_unlock_irqrestore(&pmbe->lock, flags); 332 spin_unlock_irqrestore(&pmbe->lock, flags);
195} 333}
196 334
197static struct { 335int pmb_bolt_mapping(unsigned long vaddr, phys_addr_t phys,
198 unsigned long size; 336 unsigned long size, pgprot_t prot)
199 int flag;
200} pmb_sizes[] = {
201 { .size = SZ_512M, .flag = PMB_SZ_512M, },
202 { .size = SZ_128M, .flag = PMB_SZ_128M, },
203 { .size = SZ_64M, .flag = PMB_SZ_64M, },
204 { .size = SZ_16M, .flag = PMB_SZ_16M, },
205};
206
207long pmb_remap(unsigned long vaddr, unsigned long phys,
208 unsigned long size, pgprot_t prot)
209{ 337{
210 struct pmb_entry *pmbp, *pmbe; 338 struct pmb_entry *pmbp, *pmbe;
211 unsigned long wanted; 339 unsigned long orig_addr, orig_size;
212 int pmb_flags, i; 340 unsigned long flags, pmb_flags;
213 long err; 341 int i, mapped;
214 u64 flags;
215 342
216 flags = pgprot_val(prot); 343 if (!pmb_addr_valid(vaddr, size))
344 return -EFAULT;
345 if (pmb_mapping_exists(vaddr, phys, size))
346 return 0;
217 347
218 pmb_flags = PMB_WT | PMB_UB; 348 orig_addr = vaddr;
219 349 orig_size = size;
220 /* Convert typical pgprot value to the PMB equivalent */
221 if (flags & _PAGE_CACHABLE) {
222 pmb_flags |= PMB_C;
223 350
224 if ((flags & _PAGE_WT) == 0) 351 flush_tlb_kernel_range(vaddr, vaddr + size);
225 pmb_flags &= ~(PMB_WT | PMB_UB);
226 }
227 352
353 pmb_flags = pgprot_to_pmb_flags(prot);
228 pmbp = NULL; 354 pmbp = NULL;
229 wanted = size;
230 355
231again: 356 do {
232 for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) { 357 for (i = mapped = 0; i < ARRAY_SIZE(pmb_sizes); i++) {
233 unsigned long flags; 358 if (size < pmb_sizes[i].size)
359 continue;
360
361 pmbe = pmb_alloc(vaddr, phys, pmb_flags |
362 pmb_sizes[i].flag, PMB_NO_ENTRY);
363 if (IS_ERR(pmbe)) {
364 pmb_unmap_entry(pmbp, mapped);
365 return PTR_ERR(pmbe);
366 }
234 367
235 if (size < pmb_sizes[i].size) 368 spin_lock_irqsave(&pmbe->lock, flags);
236 continue;
237 369
238 pmbe = pmb_alloc(vaddr, phys, pmb_flags | pmb_sizes[i].flag, 370 pmbe->size = pmb_sizes[i].size;
239 PMB_NO_ENTRY);
240 if (IS_ERR(pmbe)) {
241 err = PTR_ERR(pmbe);
242 goto out;
243 }
244 371
245 spin_lock_irqsave(&pmbe->lock, flags); 372 __set_pmb_entry(pmbe);
246 373
247 __set_pmb_entry(pmbe); 374 phys += pmbe->size;
375 vaddr += pmbe->size;
376 size -= pmbe->size;
248 377
249 phys += pmb_sizes[i].size; 378 /*
250 vaddr += pmb_sizes[i].size; 379 * Link adjacent entries that span multiple PMB
251 size -= pmb_sizes[i].size; 380 * entries for easier tear-down.
381 */
382 if (likely(pmbp)) {
383 spin_lock(&pmbp->lock);
384 pmbp->link = pmbe;
385 spin_unlock(&pmbp->lock);
386 }
252 387
253 pmbe->size = pmb_sizes[i].size; 388 pmbp = pmbe;
254 389
255 /* 390 /*
256 * Link adjacent entries that span multiple PMB entries 391 * Instead of trying smaller sizes on every
257 * for easier tear-down. 392 * iteration (even if we succeed in allocating
258 */ 393 * space), try using pmb_sizes[i].size again.
259 if (likely(pmbp)) { 394 */
260 spin_lock(&pmbp->lock); 395 i--;
261 pmbp->link = pmbe; 396 mapped++;
262 spin_unlock(&pmbp->lock); 397
398 spin_unlock_irqrestore(&pmbe->lock, flags);
263 } 399 }
400 } while (size >= SZ_16M);
264 401
265 pmbp = pmbe; 402 flush_cache_vmap(orig_addr, orig_addr + orig_size);
266 403
267 /* 404 return 0;
268 * Instead of trying smaller sizes on every iteration 405}
269 * (even if we succeed in allocating space), try using
270 * pmb_sizes[i].size again.
271 */
272 i--;
273 406
274 spin_unlock_irqrestore(&pmbe->lock, flags); 407void __iomem *pmb_remap_caller(phys_addr_t phys, unsigned long size,
275 } 408 pgprot_t prot, void *caller)
409{
410 unsigned long vaddr;
411 phys_addr_t offset, last_addr;
412 phys_addr_t align_mask;
413 unsigned long aligned;
414 struct vm_struct *area;
415 int i, ret;
276 416
277 if (size >= SZ_16M) 417 if (!pmb_iomapping_enabled)
278 goto again; 418 return NULL;
279 419
280 return wanted - size; 420 /*
421 * Small mappings need to go through the TLB.
422 */
423 if (size < SZ_16M)
424 return ERR_PTR(-EINVAL);
425 if (!pmb_prot_valid(prot))
426 return ERR_PTR(-EINVAL);
281 427
282out: 428 for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
283 pmb_unmap_entry(pmbp, NR_PMB_ENTRIES); 429 if (size >= pmb_sizes[i].size)
430 break;
431
432 last_addr = phys + size;
433 align_mask = ~(pmb_sizes[i].size - 1);
434 offset = phys & ~align_mask;
435 phys &= align_mask;
436 aligned = ALIGN(last_addr, pmb_sizes[i].size) - phys;
437
438 /*
439 * XXX: This should really start from uncached_end, but this
440 * causes the MMU to reset, so for now we restrict it to the
441 * 0xb000...0xc000 range.
442 */
443 area = __get_vm_area_caller(aligned, VM_IOREMAP, 0xb0000000,
444 P3SEG, caller);
445 if (!area)
446 return NULL;
447
448 area->phys_addr = phys;
449 vaddr = (unsigned long)area->addr;
450
451 ret = pmb_bolt_mapping(vaddr, phys, size, prot);
452 if (unlikely(ret != 0))
453 return ERR_PTR(ret);
284 454
285 return err; 455 return (void __iomem *)(offset + (char *)vaddr);
286} 456}
287 457
288void pmb_unmap(unsigned long addr) 458int pmb_unmap(void __iomem *addr)
289{ 459{
290 struct pmb_entry *pmbe = NULL; 460 struct pmb_entry *pmbe = NULL;
291 int i; 461 unsigned long vaddr = (unsigned long __force)addr;
462 int i, found = 0;
292 463
293 read_lock(&pmb_rwlock); 464 read_lock(&pmb_rwlock);
294 465
295 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { 466 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
296 if (test_bit(i, pmb_map)) { 467 if (test_bit(i, pmb_map)) {
297 pmbe = &pmb_entry_list[i]; 468 pmbe = &pmb_entry_list[i];
298 if (pmbe->vpn == addr) 469 if (pmbe->vpn == vaddr) {
470 found = 1;
299 break; 471 break;
472 }
300 } 473 }
301 } 474 }
302 475
303 read_unlock(&pmb_rwlock); 476 read_unlock(&pmb_rwlock);
304 477
305 pmb_unmap_entry(pmbe, NR_PMB_ENTRIES); 478 if (found) {
306} 479 pmb_unmap_entry(pmbe, NR_PMB_ENTRIES);
307 480 return 0;
308static bool pmb_can_merge(struct pmb_entry *a, struct pmb_entry *b) 481 }
309{
310 return (b->vpn == (a->vpn + a->size)) &&
311 (b->ppn == (a->ppn + a->size)) &&
312 (b->flags == a->flags);
313}
314
315static bool pmb_size_valid(unsigned long size)
316{
317 int i;
318
319 for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
320 if (pmb_sizes[i].size == size)
321 return true;
322
323 return false;
324}
325
326static int pmb_size_to_flags(unsigned long size)
327{
328 int i;
329
330 for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
331 if (pmb_sizes[i].size == size)
332 return pmb_sizes[i].flag;
333 482
334 return 0; 483 return -EINVAL;
335} 484}
336 485
337static void __pmb_unmap_entry(struct pmb_entry *pmbe, int depth) 486static void __pmb_unmap_entry(struct pmb_entry *pmbe, int depth)
@@ -351,6 +500,8 @@ static void __pmb_unmap_entry(struct pmb_entry *pmbe, int depth)
351 */ 500 */
352 __clear_pmb_entry(pmbe); 501 __clear_pmb_entry(pmbe);
353 502
503 flush_cache_vunmap(pmbe->vpn, pmbe->vpn + pmbe->size);
504
354 pmbe = pmblink->link; 505 pmbe = pmblink->link;
355 506
356 pmb_free(pmblink); 507 pmb_free(pmblink);
@@ -369,11 +520,6 @@ static void pmb_unmap_entry(struct pmb_entry *pmbe, int depth)
369 write_unlock_irqrestore(&pmb_rwlock, flags); 520 write_unlock_irqrestore(&pmb_rwlock, flags);
370} 521}
371 522
372static __always_inline unsigned int pmb_ppn_in_range(unsigned long ppn)
373{
374 return ppn >= __pa(memory_start) && ppn < __pa(memory_end);
375}
376
377static void __init pmb_notify(void) 523static void __init pmb_notify(void)
378{ 524{
379 int i; 525 int i;
@@ -625,6 +771,18 @@ static void __init pmb_resize(void)
625} 771}
626#endif 772#endif
627 773
774static int __init early_pmb(char *p)
775{
776 if (!p)
777 return 0;
778
779 if (strstr(p, "iomap"))
780 pmb_iomapping_enabled = 1;
781
782 return 0;
783}
784early_param("pmb", early_pmb);
785
628void __init pmb_init(void) 786void __init pmb_init(void)
629{ 787{
630 /* Synchronize software state */ 788 /* Synchronize software state */
@@ -713,7 +871,7 @@ static int __init pmb_debugfs_init(void)
713 871
714 return 0; 872 return 0;
715} 873}
716postcore_initcall(pmb_debugfs_init); 874subsys_initcall(pmb_debugfs_init);
717 875
718#ifdef CONFIG_PM 876#ifdef CONFIG_PM
719static int pmb_sysdev_suspend(struct sys_device *dev, pm_message_t state) 877static int pmb_sysdev_suspend(struct sys_device *dev, pm_message_t state)
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c
index b75ce8b84c46..5d17e09cb625 100644
--- a/drivers/dma/shdma.c
+++ b/drivers/dma/shdma.c
@@ -24,8 +24,10 @@
24#include <linux/delay.h> 24#include <linux/delay.h>
25#include <linux/dma-mapping.h> 25#include <linux/dma-mapping.h>
26#include <linux/platform_device.h> 26#include <linux/platform_device.h>
27#include <cpu/dma.h> 27#include <linux/pm_runtime.h>
28#include <asm/dma-sh.h> 28
29#include <asm/dmaengine.h>
30
29#include "shdma.h" 31#include "shdma.h"
30 32
31/* DMA descriptor control */ 33/* DMA descriptor control */
@@ -38,30 +40,32 @@ enum sh_dmae_desc_status {
38}; 40};
39 41
40#define NR_DESCS_PER_CHANNEL 32 42#define NR_DESCS_PER_CHANNEL 32
41/* 43/* Default MEMCPY transfer size = 2^2 = 4 bytes */
42 * Define the default configuration for dual address memory-memory transfer. 44#define LOG2_DEFAULT_XFER_SIZE 2
43 * The 0x400 value represents auto-request, external->external.
44 *
45 * And this driver set 4byte burst mode.
46 * If you want to change mode, you need to change RS_DEFAULT of value.
47 * (ex 1byte burst mode -> (RS_DUAL & ~TS_32)
48 */
49#define RS_DEFAULT (RS_DUAL)
50 45
51/* A bitmask with bits enough for enum sh_dmae_slave_chan_id */ 46/* A bitmask with bits enough for enum sh_dmae_slave_chan_id */
52static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SHDMA_SLAVE_NUMBER)]; 47static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SHDMA_SLAVE_NUMBER)];
53 48
54static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all); 49static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all);
55 50
56#define SH_DMAC_CHAN_BASE(id) (dma_base_addr[id])
57static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg) 51static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg)
58{ 52{
59 ctrl_outl(data, SH_DMAC_CHAN_BASE(sh_dc->id) + reg); 53 __raw_writel(data, sh_dc->base + reg / sizeof(u32));
60} 54}
61 55
62static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg) 56static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg)
63{ 57{
64 return ctrl_inl(SH_DMAC_CHAN_BASE(sh_dc->id) + reg); 58 return __raw_readl(sh_dc->base + reg / sizeof(u32));
59}
60
61static u16 dmaor_read(struct sh_dmae_device *shdev)
62{
63 return __raw_readw(shdev->chan_reg + DMAOR / sizeof(u32));
64}
65
66static void dmaor_write(struct sh_dmae_device *shdev, u16 data)
67{
68 __raw_writew(data, shdev->chan_reg + DMAOR / sizeof(u32));
65} 69}
66 70
67/* 71/*
@@ -69,24 +73,23 @@ static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg)
69 * 73 *
70 * SH7780 has two DMAOR register 74 * SH7780 has two DMAOR register
71 */ 75 */
72static void sh_dmae_ctl_stop(int id) 76static void sh_dmae_ctl_stop(struct sh_dmae_device *shdev)
73{ 77{
74 unsigned short dmaor = dmaor_read_reg(id); 78 unsigned short dmaor = dmaor_read(shdev);
75 79
76 dmaor &= ~(DMAOR_NMIF | DMAOR_AE); 80 dmaor_write(shdev, dmaor & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME));
77 dmaor_write_reg(id, dmaor);
78} 81}
79 82
80static int sh_dmae_rst(int id) 83static int sh_dmae_rst(struct sh_dmae_device *shdev)
81{ 84{
82 unsigned short dmaor; 85 unsigned short dmaor;
83 86
84 sh_dmae_ctl_stop(id); 87 sh_dmae_ctl_stop(shdev);
85 dmaor = dmaor_read_reg(id) | DMAOR_INIT; 88 dmaor = dmaor_read(shdev) | shdev->pdata->dmaor_init;
86 89
87 dmaor_write_reg(id, dmaor); 90 dmaor_write(shdev, dmaor);
88 if (dmaor_read_reg(id) & (DMAOR_AE | DMAOR_NMIF)) { 91 if (dmaor_read(shdev) & (DMAOR_AE | DMAOR_NMIF)) {
89 pr_warning(KERN_ERR "dma-sh: Can't initialize DMAOR.\n"); 92 pr_warning("dma-sh: Can't initialize DMAOR.\n");
90 return -EINVAL; 93 return -EINVAL;
91 } 94 }
92 return 0; 95 return 0;
@@ -102,13 +105,36 @@ static bool dmae_is_busy(struct sh_dmae_chan *sh_chan)
102 return false; /* waiting */ 105 return false; /* waiting */
103} 106}
104 107
105static unsigned int ts_shift[] = TS_SHIFT; 108static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr)
106static inline unsigned int calc_xmit_shift(u32 chcr)
107{ 109{
108 int cnt = ((chcr & CHCR_TS_LOW_MASK) >> CHCR_TS_LOW_SHIFT) | 110 struct sh_dmae_device *shdev = container_of(sh_chan->common.device,
109 ((chcr & CHCR_TS_HIGH_MASK) >> CHCR_TS_HIGH_SHIFT); 111 struct sh_dmae_device, common);
112 struct sh_dmae_pdata *pdata = shdev->pdata;
113 int cnt = ((chcr & pdata->ts_low_mask) >> pdata->ts_low_shift) |
114 ((chcr & pdata->ts_high_mask) >> pdata->ts_high_shift);
115
116 if (cnt >= pdata->ts_shift_num)
117 cnt = 0;
110 118
111 return ts_shift[cnt]; 119 return pdata->ts_shift[cnt];
120}
121
122static u32 log2size_to_chcr(struct sh_dmae_chan *sh_chan, int l2size)
123{
124 struct sh_dmae_device *shdev = container_of(sh_chan->common.device,
125 struct sh_dmae_device, common);
126 struct sh_dmae_pdata *pdata = shdev->pdata;
127 int i;
128
129 for (i = 0; i < pdata->ts_shift_num; i++)
130 if (pdata->ts_shift[i] == l2size)
131 break;
132
133 if (i == pdata->ts_shift_num)
134 i = 0;
135
136 return ((i << pdata->ts_low_shift) & pdata->ts_low_mask) |
137 ((i << pdata->ts_high_shift) & pdata->ts_high_mask);
112} 138}
113 139
114static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw) 140static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw)
@@ -136,8 +162,13 @@ static void dmae_halt(struct sh_dmae_chan *sh_chan)
136 162
137static void dmae_init(struct sh_dmae_chan *sh_chan) 163static void dmae_init(struct sh_dmae_chan *sh_chan)
138{ 164{
139 u32 chcr = RS_DEFAULT; /* default is DUAL mode */ 165 /*
140 sh_chan->xmit_shift = calc_xmit_shift(chcr); 166 * Default configuration for dual address memory-memory transfer.
167 * 0x400 represents auto-request.
168 */
169 u32 chcr = DM_INC | SM_INC | 0x400 | log2size_to_chcr(sh_chan,
170 LOG2_DEFAULT_XFER_SIZE);
171 sh_chan->xmit_shift = calc_xmit_shift(sh_chan, chcr);
141 sh_dmae_writel(sh_chan, chcr, CHCR); 172 sh_dmae_writel(sh_chan, chcr, CHCR);
142} 173}
143 174
@@ -147,37 +178,26 @@ static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val)
147 if (dmae_is_busy(sh_chan)) 178 if (dmae_is_busy(sh_chan))
148 return -EBUSY; 179 return -EBUSY;
149 180
150 sh_chan->xmit_shift = calc_xmit_shift(val); 181 sh_chan->xmit_shift = calc_xmit_shift(sh_chan, val);
151 sh_dmae_writel(sh_chan, val, CHCR); 182 sh_dmae_writel(sh_chan, val, CHCR);
152 183
153 return 0; 184 return 0;
154} 185}
155 186
156#define DMARS_SHIFT 8
157#define DMARS_CHAN_MSK 0x01
158static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val) 187static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
159{ 188{
160 u32 addr; 189 struct sh_dmae_device *shdev = container_of(sh_chan->common.device,
161 int shift = 0; 190 struct sh_dmae_device, common);
191 struct sh_dmae_pdata *pdata = shdev->pdata;
192 struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->id];
193 u16 __iomem *addr = shdev->dmars + chan_pdata->dmars / sizeof(u16);
194 int shift = chan_pdata->dmars_bit;
162 195
163 if (dmae_is_busy(sh_chan)) 196 if (dmae_is_busy(sh_chan))
164 return -EBUSY; 197 return -EBUSY;
165 198
166 if (sh_chan->id & DMARS_CHAN_MSK) 199 __raw_writew((__raw_readw(addr) & (0xff00 >> shift)) | (val << shift),
167 shift = DMARS_SHIFT; 200 addr);
168
169 if (sh_chan->id < 6)
170 /* DMA0RS0 - DMA0RS2 */
171 addr = SH_DMARS_BASE0 + (sh_chan->id / 2) * 4;
172#ifdef SH_DMARS_BASE1
173 else if (sh_chan->id < 12)
174 /* DMA1RS0 - DMA1RS2 */
175 addr = SH_DMARS_BASE1 + ((sh_chan->id - 6) / 2) * 4;
176#endif
177 else
178 return -EINVAL;
179
180 ctrl_outw((val << shift) | (ctrl_inw(addr) & (0xFF00 >> shift)), addr);
181 201
182 return 0; 202 return 0;
183} 203}
@@ -251,15 +271,15 @@ static struct sh_dmae_slave_config *sh_dmae_find_slave(
251 struct dma_device *dma_dev = sh_chan->common.device; 271 struct dma_device *dma_dev = sh_chan->common.device;
252 struct sh_dmae_device *shdev = container_of(dma_dev, 272 struct sh_dmae_device *shdev = container_of(dma_dev,
253 struct sh_dmae_device, common); 273 struct sh_dmae_device, common);
254 struct sh_dmae_pdata *pdata = &shdev->pdata; 274 struct sh_dmae_pdata *pdata = shdev->pdata;
255 int i; 275 int i;
256 276
257 if ((unsigned)slave_id >= SHDMA_SLAVE_NUMBER) 277 if ((unsigned)slave_id >= SHDMA_SLAVE_NUMBER)
258 return NULL; 278 return NULL;
259 279
260 for (i = 0; i < pdata->config_num; i++) 280 for (i = 0; i < pdata->slave_num; i++)
261 if (pdata->config[i].slave_id == slave_id) 281 if (pdata->slave[i].slave_id == slave_id)
262 return pdata->config + i; 282 return pdata->slave + i;
263 283
264 return NULL; 284 return NULL;
265} 285}
@@ -270,6 +290,8 @@ static int sh_dmae_alloc_chan_resources(struct dma_chan *chan)
270 struct sh_desc *desc; 290 struct sh_desc *desc;
271 struct sh_dmae_slave *param = chan->private; 291 struct sh_dmae_slave *param = chan->private;
272 292
293 pm_runtime_get_sync(sh_chan->dev);
294
273 /* 295 /*
274 * This relies on the guarantee from dmaengine that alloc_chan_resources 296 * This relies on the guarantee from dmaengine that alloc_chan_resources
275 * never runs concurrently with itself or free_chan_resources. 297 * never runs concurrently with itself or free_chan_resources.
@@ -288,9 +310,8 @@ static int sh_dmae_alloc_chan_resources(struct dma_chan *chan)
288 310
289 dmae_set_dmars(sh_chan, cfg->mid_rid); 311 dmae_set_dmars(sh_chan, cfg->mid_rid);
290 dmae_set_chcr(sh_chan, cfg->chcr); 312 dmae_set_chcr(sh_chan, cfg->chcr);
291 } else { 313 } else if ((sh_dmae_readl(sh_chan, CHCR) & 0xf00) != 0x400) {
292 if ((sh_dmae_readl(sh_chan, CHCR) & 0x700) != 0x400) 314 dmae_init(sh_chan);
293 dmae_set_chcr(sh_chan, RS_DEFAULT);
294 } 315 }
295 316
296 spin_lock_bh(&sh_chan->desc_lock); 317 spin_lock_bh(&sh_chan->desc_lock);
@@ -312,6 +333,9 @@ static int sh_dmae_alloc_chan_resources(struct dma_chan *chan)
312 } 333 }
313 spin_unlock_bh(&sh_chan->desc_lock); 334 spin_unlock_bh(&sh_chan->desc_lock);
314 335
336 if (!sh_chan->descs_allocated)
337 pm_runtime_put(sh_chan->dev);
338
315 return sh_chan->descs_allocated; 339 return sh_chan->descs_allocated;
316} 340}
317 341
@@ -323,6 +347,7 @@ static void sh_dmae_free_chan_resources(struct dma_chan *chan)
323 struct sh_dmae_chan *sh_chan = to_sh_chan(chan); 347 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
324 struct sh_desc *desc, *_desc; 348 struct sh_desc *desc, *_desc;
325 LIST_HEAD(list); 349 LIST_HEAD(list);
350 int descs = sh_chan->descs_allocated;
326 351
327 dmae_halt(sh_chan); 352 dmae_halt(sh_chan);
328 353
@@ -343,6 +368,9 @@ static void sh_dmae_free_chan_resources(struct dma_chan *chan)
343 368
344 spin_unlock_bh(&sh_chan->desc_lock); 369 spin_unlock_bh(&sh_chan->desc_lock);
345 370
371 if (descs > 0)
372 pm_runtime_put(sh_chan->dev);
373
346 list_for_each_entry_safe(desc, _desc, &list, node) 374 list_for_each_entry_safe(desc, _desc, &list, node)
347 kfree(desc); 375 kfree(desc);
348} 376}
@@ -559,6 +587,19 @@ static void sh_dmae_terminate_all(struct dma_chan *chan)
559 if (!chan) 587 if (!chan)
560 return; 588 return;
561 589
590 dmae_halt(sh_chan);
591
592 spin_lock_bh(&sh_chan->desc_lock);
593 if (!list_empty(&sh_chan->ld_queue)) {
594 /* Record partial transfer */
595 struct sh_desc *desc = list_entry(sh_chan->ld_queue.next,
596 struct sh_desc, node);
597 desc->partial = (desc->hw.tcr - sh_dmae_readl(sh_chan, TCR)) <<
598 sh_chan->xmit_shift;
599
600 }
601 spin_unlock_bh(&sh_chan->desc_lock);
602
562 sh_dmae_chan_ld_cleanup(sh_chan, true); 603 sh_dmae_chan_ld_cleanup(sh_chan, true);
563} 604}
564 605
@@ -661,7 +702,7 @@ static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
661 702
662static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan) 703static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan)
663{ 704{
664 struct sh_desc *sd; 705 struct sh_desc *desc;
665 706
666 spin_lock_bh(&sh_chan->desc_lock); 707 spin_lock_bh(&sh_chan->desc_lock);
667 /* DMA work check */ 708 /* DMA work check */
@@ -671,10 +712,13 @@ static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan)
671 } 712 }
672 713
673 /* Find the first not transferred desciptor */ 714 /* Find the first not transferred desciptor */
674 list_for_each_entry(sd, &sh_chan->ld_queue, node) 715 list_for_each_entry(desc, &sh_chan->ld_queue, node)
675 if (sd->mark == DESC_SUBMITTED) { 716 if (desc->mark == DESC_SUBMITTED) {
717 dev_dbg(sh_chan->dev, "Queue #%d to %d: %u@%x -> %x\n",
718 desc->async_tx.cookie, sh_chan->id,
719 desc->hw.tcr, desc->hw.sar, desc->hw.dar);
676 /* Get the ld start address from ld_queue */ 720 /* Get the ld start address from ld_queue */
677 dmae_set_reg(sh_chan, &sd->hw); 721 dmae_set_reg(sh_chan, &desc->hw);
678 dmae_start(sh_chan); 722 dmae_start(sh_chan);
679 break; 723 break;
680 } 724 }
@@ -696,6 +740,7 @@ static enum dma_status sh_dmae_is_complete(struct dma_chan *chan,
696 struct sh_dmae_chan *sh_chan = to_sh_chan(chan); 740 struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
697 dma_cookie_t last_used; 741 dma_cookie_t last_used;
698 dma_cookie_t last_complete; 742 dma_cookie_t last_complete;
743 enum dma_status status;
699 744
700 sh_dmae_chan_ld_cleanup(sh_chan, false); 745 sh_dmae_chan_ld_cleanup(sh_chan, false);
701 746
@@ -709,7 +754,27 @@ static enum dma_status sh_dmae_is_complete(struct dma_chan *chan,
709 if (used) 754 if (used)
710 *used = last_used; 755 *used = last_used;
711 756
712 return dma_async_is_complete(cookie, last_complete, last_used); 757 spin_lock_bh(&sh_chan->desc_lock);
758
759 status = dma_async_is_complete(cookie, last_complete, last_used);
760
761 /*
762 * If we don't find cookie on the queue, it has been aborted and we have
763 * to report error
764 */
765 if (status != DMA_SUCCESS) {
766 struct sh_desc *desc;
767 status = DMA_ERROR;
768 list_for_each_entry(desc, &sh_chan->ld_queue, node)
769 if (desc->cookie == cookie) {
770 status = DMA_IN_PROGRESS;
771 break;
772 }
773 }
774
775 spin_unlock_bh(&sh_chan->desc_lock);
776
777 return status;
713} 778}
714 779
715static irqreturn_t sh_dmae_interrupt(int irq, void *data) 780static irqreturn_t sh_dmae_interrupt(int irq, void *data)
@@ -732,40 +797,32 @@ static irqreturn_t sh_dmae_interrupt(int irq, void *data)
732#if defined(CONFIG_CPU_SH4) 797#if defined(CONFIG_CPU_SH4)
733static irqreturn_t sh_dmae_err(int irq, void *data) 798static irqreturn_t sh_dmae_err(int irq, void *data)
734{ 799{
735 int err = 0;
736 struct sh_dmae_device *shdev = (struct sh_dmae_device *)data; 800 struct sh_dmae_device *shdev = (struct sh_dmae_device *)data;
801 int i;
737 802
738 /* IRQ Multi */ 803 /* halt the dma controller */
739 if (shdev->pdata.mode & SHDMA_MIX_IRQ) { 804 sh_dmae_ctl_stop(shdev);
740 int __maybe_unused cnt = 0; 805
741 switch (irq) { 806 /* We cannot detect, which channel caused the error, have to reset all */
742#if defined(DMTE6_IRQ) && defined(DMAE1_IRQ) 807 for (i = 0; i < SH_DMAC_MAX_CHANNELS; i++) {
743 case DMTE6_IRQ: 808 struct sh_dmae_chan *sh_chan = shdev->chan[i];
744 cnt++; 809 if (sh_chan) {
745#endif 810 struct sh_desc *desc;
746 case DMTE0_IRQ: 811 /* Stop the channel */
747 if (dmaor_read_reg(cnt) & (DMAOR_NMIF | DMAOR_AE)) { 812 dmae_halt(sh_chan);
748 disable_irq(irq); 813 /* Complete all */
749 return IRQ_HANDLED; 814 list_for_each_entry(desc, &sh_chan->ld_queue, node) {
815 struct dma_async_tx_descriptor *tx = &desc->async_tx;
816 desc->mark = DESC_IDLE;
817 if (tx->callback)
818 tx->callback(tx->callback_param);
750 } 819 }
751 default: 820 list_splice_init(&sh_chan->ld_queue, &sh_chan->ld_free);
752 return IRQ_NONE;
753 } 821 }
754 } else {
755 /* reset dma controller */
756 err = sh_dmae_rst(0);
757 if (err)
758 return err;
759#ifdef SH_DMAC_BASE1
760 if (shdev->pdata.mode & SHDMA_DMAOR1) {
761 err = sh_dmae_rst(1);
762 if (err)
763 return err;
764 }
765#endif
766 disable_irq(irq);
767 return IRQ_HANDLED;
768 } 822 }
823 sh_dmae_rst(shdev);
824
825 return IRQ_HANDLED;
769} 826}
770#endif 827#endif
771 828
@@ -796,19 +853,12 @@ static void dmae_do_tasklet(unsigned long data)
796 sh_dmae_chan_ld_cleanup(sh_chan, false); 853 sh_dmae_chan_ld_cleanup(sh_chan, false);
797} 854}
798 855
799static unsigned int get_dmae_irq(unsigned int id) 856static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id,
800{ 857 int irq, unsigned long flags)
801 unsigned int irq = 0;
802 if (id < ARRAY_SIZE(dmte_irq_map))
803 irq = dmte_irq_map[id];
804 return irq;
805}
806
807static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id)
808{ 858{
809 int err; 859 int err;
810 unsigned int irq = get_dmae_irq(id); 860 struct sh_dmae_channel *chan_pdata = &shdev->pdata->channel[id];
811 unsigned long irqflags = IRQF_DISABLED; 861 struct platform_device *pdev = to_platform_device(shdev->common.dev);
812 struct sh_dmae_chan *new_sh_chan; 862 struct sh_dmae_chan *new_sh_chan;
813 863
814 /* alloc channel */ 864 /* alloc channel */
@@ -819,8 +869,13 @@ static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id)
819 return -ENOMEM; 869 return -ENOMEM;
820 } 870 }
821 871
872 /* copy struct dma_device */
873 new_sh_chan->common.device = &shdev->common;
874
822 new_sh_chan->dev = shdev->common.dev; 875 new_sh_chan->dev = shdev->common.dev;
823 new_sh_chan->id = id; 876 new_sh_chan->id = id;
877 new_sh_chan->irq = irq;
878 new_sh_chan->base = shdev->chan_reg + chan_pdata->offset / sizeof(u32);
824 879
825 /* Init DMA tasklet */ 880 /* Init DMA tasklet */
826 tasklet_init(&new_sh_chan->tasklet, dmae_do_tasklet, 881 tasklet_init(&new_sh_chan->tasklet, dmae_do_tasklet,
@@ -835,29 +890,20 @@ static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id)
835 INIT_LIST_HEAD(&new_sh_chan->ld_queue); 890 INIT_LIST_HEAD(&new_sh_chan->ld_queue);
836 INIT_LIST_HEAD(&new_sh_chan->ld_free); 891 INIT_LIST_HEAD(&new_sh_chan->ld_free);
837 892
838 /* copy struct dma_device */
839 new_sh_chan->common.device = &shdev->common;
840
841 /* Add the channel to DMA device channel list */ 893 /* Add the channel to DMA device channel list */
842 list_add_tail(&new_sh_chan->common.device_node, 894 list_add_tail(&new_sh_chan->common.device_node,
843 &shdev->common.channels); 895 &shdev->common.channels);
844 shdev->common.chancnt++; 896 shdev->common.chancnt++;
845 897
846 if (shdev->pdata.mode & SHDMA_MIX_IRQ) { 898 if (pdev->id >= 0)
847 irqflags = IRQF_SHARED; 899 snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id),
848#if defined(DMTE6_IRQ) 900 "sh-dmae%d.%d", pdev->id, new_sh_chan->id);
849 if (irq >= DMTE6_IRQ) 901 else
850 irq = DMTE6_IRQ; 902 snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id),
851 else 903 "sh-dma%d", new_sh_chan->id);
852#endif
853 irq = DMTE0_IRQ;
854 }
855
856 snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id),
857 "sh-dmae%d", new_sh_chan->id);
858 904
859 /* set up channel irq */ 905 /* set up channel irq */
860 err = request_irq(irq, &sh_dmae_interrupt, irqflags, 906 err = request_irq(irq, &sh_dmae_interrupt, flags,
861 new_sh_chan->dev_id, new_sh_chan); 907 new_sh_chan->dev_id, new_sh_chan);
862 if (err) { 908 if (err) {
863 dev_err(shdev->common.dev, "DMA channel %d request_irq error " 909 dev_err(shdev->common.dev, "DMA channel %d request_irq error "
@@ -881,12 +927,12 @@ static void sh_dmae_chan_remove(struct sh_dmae_device *shdev)
881 927
882 for (i = shdev->common.chancnt - 1 ; i >= 0 ; i--) { 928 for (i = shdev->common.chancnt - 1 ; i >= 0 ; i--) {
883 if (shdev->chan[i]) { 929 if (shdev->chan[i]) {
884 struct sh_dmae_chan *shchan = shdev->chan[i]; 930 struct sh_dmae_chan *sh_chan = shdev->chan[i];
885 if (!(shdev->pdata.mode & SHDMA_MIX_IRQ))
886 free_irq(dmte_irq_map[i], shchan);
887 931
888 list_del(&shchan->common.device_node); 932 free_irq(sh_chan->irq, sh_chan);
889 kfree(shchan); 933
934 list_del(&sh_chan->common.device_node);
935 kfree(sh_chan);
890 shdev->chan[i] = NULL; 936 shdev->chan[i] = NULL;
891 } 937 }
892 } 938 }
@@ -895,47 +941,84 @@ static void sh_dmae_chan_remove(struct sh_dmae_device *shdev)
895 941
896static int __init sh_dmae_probe(struct platform_device *pdev) 942static int __init sh_dmae_probe(struct platform_device *pdev)
897{ 943{
898 int err = 0, cnt, ecnt; 944 struct sh_dmae_pdata *pdata = pdev->dev.platform_data;
899 unsigned long irqflags = IRQF_DISABLED; 945 unsigned long irqflags = IRQF_DISABLED,
900#if defined(CONFIG_CPU_SH4) 946 chan_flag[SH_DMAC_MAX_CHANNELS] = {};
901 int eirq[] = { DMAE0_IRQ, 947 int errirq, chan_irq[SH_DMAC_MAX_CHANNELS];
902#if defined(DMAE1_IRQ) 948 int err, i, irq_cnt = 0, irqres = 0;
903 DMAE1_IRQ
904#endif
905 };
906#endif
907 struct sh_dmae_device *shdev; 949 struct sh_dmae_device *shdev;
950 struct resource *chan, *dmars, *errirq_res, *chanirq_res;
908 951
909 /* get platform data */ 952 /* get platform data */
910 if (!pdev->dev.platform_data) 953 if (!pdata || !pdata->channel_num)
911 return -ENODEV; 954 return -ENODEV;
912 955
956 chan = platform_get_resource(pdev, IORESOURCE_MEM, 0);
957 /* DMARS area is optional, if absent, this controller cannot do slave DMA */
958 dmars = platform_get_resource(pdev, IORESOURCE_MEM, 1);
959 /*
960 * IRQ resources:
961 * 1. there always must be at least one IRQ IO-resource. On SH4 it is
962 * the error IRQ, in which case it is the only IRQ in this resource:
963 * start == end. If it is the only IRQ resource, all channels also
964 * use the same IRQ.
965 * 2. DMA channel IRQ resources can be specified one per resource or in
966 * ranges (start != end)
967 * 3. iff all events (channels and, optionally, error) on this
968 * controller use the same IRQ, only one IRQ resource can be
969 * specified, otherwise there must be one IRQ per channel, even if
970 * some of them are equal
971 * 4. if all IRQs on this controller are equal or if some specific IRQs
972 * specify IORESOURCE_IRQ_SHAREABLE in their resources, they will be
973 * requested with the IRQF_SHARED flag
974 */
975 errirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
976 if (!chan || !errirq_res)
977 return -ENODEV;
978
979 if (!request_mem_region(chan->start, resource_size(chan), pdev->name)) {
980 dev_err(&pdev->dev, "DMAC register region already claimed\n");
981 return -EBUSY;
982 }
983
984 if (dmars && !request_mem_region(dmars->start, resource_size(dmars), pdev->name)) {
985 dev_err(&pdev->dev, "DMAC DMARS region already claimed\n");
986 err = -EBUSY;
987 goto ermrdmars;
988 }
989
990 err = -ENOMEM;
913 shdev = kzalloc(sizeof(struct sh_dmae_device), GFP_KERNEL); 991 shdev = kzalloc(sizeof(struct sh_dmae_device), GFP_KERNEL);
914 if (!shdev) { 992 if (!shdev) {
915 dev_err(&pdev->dev, "No enough memory\n"); 993 dev_err(&pdev->dev, "Not enough memory\n");
916 return -ENOMEM; 994 goto ealloc;
995 }
996
997 shdev->chan_reg = ioremap(chan->start, resource_size(chan));
998 if (!shdev->chan_reg)
999 goto emapchan;
1000 if (dmars) {
1001 shdev->dmars = ioremap(dmars->start, resource_size(dmars));
1002 if (!shdev->dmars)
1003 goto emapdmars;
917 } 1004 }
918 1005
919 /* platform data */ 1006 /* platform data */
920 memcpy(&shdev->pdata, pdev->dev.platform_data, 1007 shdev->pdata = pdata;
921 sizeof(struct sh_dmae_pdata)); 1008
1009 pm_runtime_enable(&pdev->dev);
1010 pm_runtime_get_sync(&pdev->dev);
922 1011
923 /* reset dma controller */ 1012 /* reset dma controller */
924 err = sh_dmae_rst(0); 1013 err = sh_dmae_rst(shdev);
925 if (err) 1014 if (err)
926 goto rst_err; 1015 goto rst_err;
927 1016
928 /* SH7780/85/23 has DMAOR1 */
929 if (shdev->pdata.mode & SHDMA_DMAOR1) {
930 err = sh_dmae_rst(1);
931 if (err)
932 goto rst_err;
933 }
934
935 INIT_LIST_HEAD(&shdev->common.channels); 1017 INIT_LIST_HEAD(&shdev->common.channels);
936 1018
937 dma_cap_set(DMA_MEMCPY, shdev->common.cap_mask); 1019 dma_cap_set(DMA_MEMCPY, shdev->common.cap_mask);
938 dma_cap_set(DMA_SLAVE, shdev->common.cap_mask); 1020 if (dmars)
1021 dma_cap_set(DMA_SLAVE, shdev->common.cap_mask);
939 1022
940 shdev->common.device_alloc_chan_resources 1023 shdev->common.device_alloc_chan_resources
941 = sh_dmae_alloc_chan_resources; 1024 = sh_dmae_alloc_chan_resources;
@@ -950,37 +1033,72 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
950 1033
951 shdev->common.dev = &pdev->dev; 1034 shdev->common.dev = &pdev->dev;
952 /* Default transfer size of 32 bytes requires 32-byte alignment */ 1035 /* Default transfer size of 32 bytes requires 32-byte alignment */
953 shdev->common.copy_align = 5; 1036 shdev->common.copy_align = LOG2_DEFAULT_XFER_SIZE;
954 1037
955#if defined(CONFIG_CPU_SH4) 1038#if defined(CONFIG_CPU_SH4)
956 /* Non Mix IRQ mode SH7722/SH7730 etc... */ 1039 chanirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
957 if (shdev->pdata.mode & SHDMA_MIX_IRQ) { 1040
1041 if (!chanirq_res)
1042 chanirq_res = errirq_res;
1043 else
1044 irqres++;
1045
1046 if (chanirq_res == errirq_res ||
1047 (errirq_res->flags & IORESOURCE_BITS) == IORESOURCE_IRQ_SHAREABLE)
958 irqflags = IRQF_SHARED; 1048 irqflags = IRQF_SHARED;
959 eirq[0] = DMTE0_IRQ; 1049
960#if defined(DMTE6_IRQ) && defined(DMAE1_IRQ) 1050 errirq = errirq_res->start;
961 eirq[1] = DMTE6_IRQ; 1051
962#endif 1052 err = request_irq(errirq, sh_dmae_err, irqflags,
1053 "DMAC Address Error", shdev);
1054 if (err) {
1055 dev_err(&pdev->dev,
1056 "DMA failed requesting irq #%d, error %d\n",
1057 errirq, err);
1058 goto eirq_err;
963 } 1059 }
964 1060
965 for (ecnt = 0 ; ecnt < ARRAY_SIZE(eirq); ecnt++) { 1061#else
966 err = request_irq(eirq[ecnt], sh_dmae_err, irqflags, 1062 chanirq_res = errirq_res;
967 "DMAC Address Error", shdev); 1063#endif /* CONFIG_CPU_SH4 */
968 if (err) { 1064
969 dev_err(&pdev->dev, "DMA device request_irq" 1065 if (chanirq_res->start == chanirq_res->end &&
970 "error (irq %d) with return %d\n", 1066 !platform_get_resource(pdev, IORESOURCE_IRQ, 1)) {
971 eirq[ecnt], err); 1067 /* Special case - all multiplexed */
972 goto eirq_err; 1068 for (; irq_cnt < pdata->channel_num; irq_cnt++) {
1069 chan_irq[irq_cnt] = chanirq_res->start;
1070 chan_flag[irq_cnt] = IRQF_SHARED;
973 } 1071 }
1072 } else {
1073 do {
1074 for (i = chanirq_res->start; i <= chanirq_res->end; i++) {
1075 if ((errirq_res->flags & IORESOURCE_BITS) ==
1076 IORESOURCE_IRQ_SHAREABLE)
1077 chan_flag[irq_cnt] = IRQF_SHARED;
1078 else
1079 chan_flag[irq_cnt] = IRQF_DISABLED;
1080 dev_dbg(&pdev->dev,
1081 "Found IRQ %d for channel %d\n",
1082 i, irq_cnt);
1083 chan_irq[irq_cnt++] = i;
1084 }
1085 chanirq_res = platform_get_resource(pdev,
1086 IORESOURCE_IRQ, ++irqres);
1087 } while (irq_cnt < pdata->channel_num && chanirq_res);
974 } 1088 }
975#endif /* CONFIG_CPU_SH4 */ 1089
1090 if (irq_cnt < pdata->channel_num)
1091 goto eirqres;
976 1092
977 /* Create DMA Channel */ 1093 /* Create DMA Channel */
978 for (cnt = 0 ; cnt < MAX_DMA_CHANNELS ; cnt++) { 1094 for (i = 0; i < pdata->channel_num; i++) {
979 err = sh_dmae_chan_probe(shdev, cnt); 1095 err = sh_dmae_chan_probe(shdev, i, chan_irq[i], chan_flag[i]);
980 if (err) 1096 if (err)
981 goto chan_probe_err; 1097 goto chan_probe_err;
982 } 1098 }
983 1099
1100 pm_runtime_put(&pdev->dev);
1101
984 platform_set_drvdata(pdev, shdev); 1102 platform_set_drvdata(pdev, shdev);
985 dma_async_device_register(&shdev->common); 1103 dma_async_device_register(&shdev->common);
986 1104
@@ -988,13 +1106,24 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
988 1106
989chan_probe_err: 1107chan_probe_err:
990 sh_dmae_chan_remove(shdev); 1108 sh_dmae_chan_remove(shdev);
991 1109eirqres:
1110#if defined(CONFIG_CPU_SH4)
1111 free_irq(errirq, shdev);
992eirq_err: 1112eirq_err:
993 for (ecnt-- ; ecnt >= 0; ecnt--) 1113#endif
994 free_irq(eirq[ecnt], shdev);
995
996rst_err: 1114rst_err:
1115 pm_runtime_put(&pdev->dev);
1116 if (dmars)
1117 iounmap(shdev->dmars);
1118emapdmars:
1119 iounmap(shdev->chan_reg);
1120emapchan:
997 kfree(shdev); 1121 kfree(shdev);
1122ealloc:
1123 if (dmars)
1124 release_mem_region(dmars->start, resource_size(dmars));
1125ermrdmars:
1126 release_mem_region(chan->start, resource_size(chan));
998 1127
999 return err; 1128 return err;
1000} 1129}
@@ -1002,36 +1131,39 @@ rst_err:
1002static int __exit sh_dmae_remove(struct platform_device *pdev) 1131static int __exit sh_dmae_remove(struct platform_device *pdev)
1003{ 1132{
1004 struct sh_dmae_device *shdev = platform_get_drvdata(pdev); 1133 struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
1134 struct resource *res;
1135 int errirq = platform_get_irq(pdev, 0);
1005 1136
1006 dma_async_device_unregister(&shdev->common); 1137 dma_async_device_unregister(&shdev->common);
1007 1138
1008 if (shdev->pdata.mode & SHDMA_MIX_IRQ) { 1139 if (errirq > 0)
1009 free_irq(DMTE0_IRQ, shdev); 1140 free_irq(errirq, shdev);
1010#if defined(DMTE6_IRQ)
1011 free_irq(DMTE6_IRQ, shdev);
1012#endif
1013 }
1014 1141
1015 /* channel data remove */ 1142 /* channel data remove */
1016 sh_dmae_chan_remove(shdev); 1143 sh_dmae_chan_remove(shdev);
1017 1144
1018 if (!(shdev->pdata.mode & SHDMA_MIX_IRQ)) { 1145 pm_runtime_disable(&pdev->dev);
1019 free_irq(DMAE0_IRQ, shdev); 1146
1020#if defined(DMAE1_IRQ) 1147 if (shdev->dmars)
1021 free_irq(DMAE1_IRQ, shdev); 1148 iounmap(shdev->dmars);
1022#endif 1149 iounmap(shdev->chan_reg);
1023 } 1150
1024 kfree(shdev); 1151 kfree(shdev);
1025 1152
1153 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1154 if (res)
1155 release_mem_region(res->start, resource_size(res));
1156 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1157 if (res)
1158 release_mem_region(res->start, resource_size(res));
1159
1026 return 0; 1160 return 0;
1027} 1161}
1028 1162
1029static void sh_dmae_shutdown(struct platform_device *pdev) 1163static void sh_dmae_shutdown(struct platform_device *pdev)
1030{ 1164{
1031 struct sh_dmae_device *shdev = platform_get_drvdata(pdev); 1165 struct sh_dmae_device *shdev = platform_get_drvdata(pdev);
1032 sh_dmae_ctl_stop(0); 1166 sh_dmae_ctl_stop(shdev);
1033 if (shdev->pdata.mode & SHDMA_DMAOR1)
1034 sh_dmae_ctl_stop(1);
1035} 1167}
1036 1168
1037static struct platform_driver sh_dmae_driver = { 1169static struct platform_driver sh_dmae_driver = {
diff --git a/drivers/dma/shdma.h b/drivers/dma/shdma.h
index 7e227f3c87c4..153609a1e96c 100644
--- a/drivers/dma/shdma.h
+++ b/drivers/dma/shdma.h
@@ -17,23 +17,9 @@
17#include <linux/interrupt.h> 17#include <linux/interrupt.h>
18#include <linux/list.h> 18#include <linux/list.h>
19 19
20#define SH_DMA_TCR_MAX 0x00FFFFFF /* 16MB */ 20#include <asm/dmaengine.h>
21
22struct sh_dmae_regs {
23 u32 sar; /* SAR / source address */
24 u32 dar; /* DAR / destination address */
25 u32 tcr; /* TCR / transfer count */
26};
27 21
28struct sh_desc { 22#define SH_DMA_TCR_MAX 0x00FFFFFF /* 16MB */
29 struct sh_dmae_regs hw;
30 struct list_head node;
31 struct dma_async_tx_descriptor async_tx;
32 enum dma_data_direction direction;
33 dma_cookie_t cookie;
34 int chunks;
35 int mark;
36};
37 23
38struct device; 24struct device;
39 25
@@ -47,14 +33,18 @@ struct sh_dmae_chan {
47 struct tasklet_struct tasklet; /* Tasklet */ 33 struct tasklet_struct tasklet; /* Tasklet */
48 int descs_allocated; /* desc count */ 34 int descs_allocated; /* desc count */
49 int xmit_shift; /* log_2(bytes_per_xfer) */ 35 int xmit_shift; /* log_2(bytes_per_xfer) */
36 int irq;
50 int id; /* Raw id of this channel */ 37 int id; /* Raw id of this channel */
38 u32 __iomem *base;
51 char dev_id[16]; /* unique name per DMAC of channel */ 39 char dev_id[16]; /* unique name per DMAC of channel */
52}; 40};
53 41
54struct sh_dmae_device { 42struct sh_dmae_device {
55 struct dma_device common; 43 struct dma_device common;
56 struct sh_dmae_chan *chan[MAX_DMA_CHANNELS]; 44 struct sh_dmae_chan *chan[SH_DMAC_MAX_CHANNELS];
57 struct sh_dmae_pdata pdata; 45 struct sh_dmae_pdata *pdata;
46 u32 __iomem *chan_reg;
47 u16 __iomem *dmars;
58}; 48};
59 49
60#define to_sh_chan(chan) container_of(chan, struct sh_dmae_chan, common) 50#define to_sh_chan(chan) container_of(chan, struct sh_dmae_chan, common)
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig
index 746e07033dce..d6ff73395623 100644
--- a/drivers/serial/Kconfig
+++ b/drivers/serial/Kconfig
@@ -1009,6 +1009,10 @@ config SERIAL_SH_SCI_CONSOLE
1009 depends on SERIAL_SH_SCI=y 1009 depends on SERIAL_SH_SCI=y
1010 select SERIAL_CORE_CONSOLE 1010 select SERIAL_CORE_CONSOLE
1011 1011
1012config SERIAL_SH_SCI_DMA
1013 bool "DMA support"
1014 depends on SERIAL_SH_SCI && SH_DMAE && EXPERIMENTAL
1015
1012config SERIAL_PNX8XXX 1016config SERIAL_PNX8XXX
1013 bool "Enable PNX8XXX SoCs' UART Support" 1017 bool "Enable PNX8XXX SoCs' UART Support"
1014 depends on MIPS && (SOC_PNX8550 || SOC_PNX833X) 1018 depends on MIPS && (SOC_PNX8550 || SOC_PNX833X)
diff --git a/drivers/serial/sh-sci.c b/drivers/serial/sh-sci.c
index 42f3333c4ad0..980f39449ee5 100644
--- a/drivers/serial/sh-sci.c
+++ b/drivers/serial/sh-sci.c
@@ -48,6 +48,9 @@
48#include <linux/ctype.h> 48#include <linux/ctype.h>
49#include <linux/err.h> 49#include <linux/err.h>
50#include <linux/list.h> 50#include <linux/list.h>
51#include <linux/dmaengine.h>
52#include <linux/scatterlist.h>
53#include <linux/timer.h>
51 54
52#ifdef CONFIG_SUPERH 55#ifdef CONFIG_SUPERH
53#include <asm/sh_bios.h> 56#include <asm/sh_bios.h>
@@ -84,6 +87,27 @@ struct sci_port {
84 struct clk *dclk; 87 struct clk *dclk;
85 88
86 struct list_head node; 89 struct list_head node;
90 struct dma_chan *chan_tx;
91 struct dma_chan *chan_rx;
92#ifdef CONFIG_SERIAL_SH_SCI_DMA
93 struct device *dma_dev;
94 enum sh_dmae_slave_chan_id slave_tx;
95 enum sh_dmae_slave_chan_id slave_rx;
96 struct dma_async_tx_descriptor *desc_tx;
97 struct dma_async_tx_descriptor *desc_rx[2];
98 dma_cookie_t cookie_tx;
99 dma_cookie_t cookie_rx[2];
100 dma_cookie_t active_rx;
101 struct scatterlist sg_tx;
102 unsigned int sg_len_tx;
103 struct scatterlist sg_rx[2];
104 size_t buf_len_rx;
105 struct sh_dmae_slave param_tx;
106 struct sh_dmae_slave param_rx;
107 struct work_struct work_tx;
108 struct work_struct work_rx;
109 struct timer_list rx_timer;
110#endif
87}; 111};
88 112
89struct sh_sci_priv { 113struct sh_sci_priv {
@@ -269,29 +293,44 @@ static inline void sci_init_pins(struct uart_port *port, unsigned int cflag)
269 defined(CONFIG_CPU_SUBTYPE_SH7780) || \ 293 defined(CONFIG_CPU_SUBTYPE_SH7780) || \
270 defined(CONFIG_CPU_SUBTYPE_SH7785) || \ 294 defined(CONFIG_CPU_SUBTYPE_SH7785) || \
271 defined(CONFIG_CPU_SUBTYPE_SH7786) 295 defined(CONFIG_CPU_SUBTYPE_SH7786)
272static inline int scif_txroom(struct uart_port *port) 296static int scif_txfill(struct uart_port *port)
273{ 297{
274 return SCIF_TXROOM_MAX - (sci_in(port, SCTFDR) & 0xff); 298 return sci_in(port, SCTFDR) & 0xff;
275} 299}
276 300
277static inline int scif_rxroom(struct uart_port *port) 301static int scif_txroom(struct uart_port *port)
302{
303 return SCIF_TXROOM_MAX - scif_txfill(port);
304}
305
306static int scif_rxfill(struct uart_port *port)
278{ 307{
279 return sci_in(port, SCRFDR) & 0xff; 308 return sci_in(port, SCRFDR) & 0xff;
280} 309}
281#elif defined(CONFIG_CPU_SUBTYPE_SH7763) 310#elif defined(CONFIG_CPU_SUBTYPE_SH7763)
282static inline int scif_txroom(struct uart_port *port) 311static int scif_txfill(struct uart_port *port)
283{ 312{
284 if ((port->mapbase == 0xffe00000) || 313 if (port->mapbase == 0xffe00000 ||
285 (port->mapbase == 0xffe08000)) { 314 port->mapbase == 0xffe08000)
286 /* SCIF0/1*/ 315 /* SCIF0/1*/
287 return SCIF_TXROOM_MAX - (sci_in(port, SCTFDR) & 0xff); 316 return sci_in(port, SCTFDR) & 0xff;
288 } else { 317 else
289 /* SCIF2 */ 318 /* SCIF2 */
290 return SCIF2_TXROOM_MAX - (sci_in(port, SCFDR) >> 8); 319 return sci_in(port, SCFDR) >> 8;
291 }
292} 320}
293 321
294static inline int scif_rxroom(struct uart_port *port) 322static int scif_txroom(struct uart_port *port)
323{
324 if (port->mapbase == 0xffe00000 ||
325 port->mapbase == 0xffe08000)
326 /* SCIF0/1*/
327 return SCIF_TXROOM_MAX - scif_txfill(port);
328 else
329 /* SCIF2 */
330 return SCIF2_TXROOM_MAX - scif_txfill(port);
331}
332
333static int scif_rxfill(struct uart_port *port)
295{ 334{
296 if ((port->mapbase == 0xffe00000) || 335 if ((port->mapbase == 0xffe00000) ||
297 (port->mapbase == 0xffe08000)) { 336 (port->mapbase == 0xffe08000)) {
@@ -303,23 +342,33 @@ static inline int scif_rxroom(struct uart_port *port)
303 } 342 }
304} 343}
305#else 344#else
306static inline int scif_txroom(struct uart_port *port) 345static int scif_txfill(struct uart_port *port)
346{
347 return sci_in(port, SCFDR) >> 8;
348}
349
350static int scif_txroom(struct uart_port *port)
307{ 351{
308 return SCIF_TXROOM_MAX - (sci_in(port, SCFDR) >> 8); 352 return SCIF_TXROOM_MAX - scif_txfill(port);
309} 353}
310 354
311static inline int scif_rxroom(struct uart_port *port) 355static int scif_rxfill(struct uart_port *port)
312{ 356{
313 return sci_in(port, SCFDR) & SCIF_RFDC_MASK; 357 return sci_in(port, SCFDR) & SCIF_RFDC_MASK;
314} 358}
315#endif 359#endif
316 360
317static inline int sci_txroom(struct uart_port *port) 361static int sci_txfill(struct uart_port *port)
318{ 362{
319 return (sci_in(port, SCxSR) & SCI_TDRE) != 0; 363 return !(sci_in(port, SCxSR) & SCI_TDRE);
320} 364}
321 365
322static inline int sci_rxroom(struct uart_port *port) 366static int sci_txroom(struct uart_port *port)
367{
368 return !sci_txfill(port);
369}
370
371static int sci_rxfill(struct uart_port *port)
323{ 372{
324 return (sci_in(port, SCxSR) & SCxSR_RDxF(port)) != 0; 373 return (sci_in(port, SCxSR) & SCxSR_RDxF(port)) != 0;
325} 374}
@@ -406,9 +455,9 @@ static inline void sci_receive_chars(struct uart_port *port)
406 455
407 while (1) { 456 while (1) {
408 if (port->type == PORT_SCI) 457 if (port->type == PORT_SCI)
409 count = sci_rxroom(port); 458 count = sci_rxfill(port);
410 else 459 else
411 count = scif_rxroom(port); 460 count = scif_rxfill(port);
412 461
413 /* Don't copy more bytes than there is room for in the buffer */ 462 /* Don't copy more bytes than there is room for in the buffer */
414 count = tty_buffer_request_room(tty, count); 463 count = tty_buffer_request_room(tty, count);
@@ -453,10 +502,10 @@ static inline void sci_receive_chars(struct uart_port *port)
453 } 502 }
454 503
455 /* Store data and status */ 504 /* Store data and status */
456 if (status&SCxSR_FER(port)) { 505 if (status & SCxSR_FER(port)) {
457 flag = TTY_FRAME; 506 flag = TTY_FRAME;
458 dev_notice(port->dev, "frame error\n"); 507 dev_notice(port->dev, "frame error\n");
459 } else if (status&SCxSR_PER(port)) { 508 } else if (status & SCxSR_PER(port)) {
460 flag = TTY_PARITY; 509 flag = TTY_PARITY;
461 dev_notice(port->dev, "parity error\n"); 510 dev_notice(port->dev, "parity error\n");
462 } else 511 } else
@@ -618,13 +667,39 @@ static inline int sci_handle_breaks(struct uart_port *port)
618 return copied; 667 return copied;
619} 668}
620 669
621static irqreturn_t sci_rx_interrupt(int irq, void *port) 670static irqreturn_t sci_rx_interrupt(int irq, void *ptr)
622{ 671{
672#ifdef CONFIG_SERIAL_SH_SCI_DMA
673 struct uart_port *port = ptr;
674 struct sci_port *s = to_sci_port(port);
675
676 if (s->chan_rx) {
677 unsigned long tout;
678 u16 scr = sci_in(port, SCSCR);
679 u16 ssr = sci_in(port, SCxSR);
680
681 /* Disable future Rx interrupts */
682 sci_out(port, SCSCR, scr & ~SCI_CTRL_FLAGS_RIE);
683 /* Clear current interrupt */
684 sci_out(port, SCxSR, ssr & ~(1 | SCxSR_RDxF(port)));
685 /* Calculate delay for 1.5 DMA buffers */
686 tout = (port->timeout - HZ / 50) * s->buf_len_rx * 3 /
687 port->fifosize / 2;
688 dev_dbg(port->dev, "Rx IRQ: setup timeout in %lu ms\n",
689 tout * 1000 / HZ);
690 if (tout < 2)
691 tout = 2;
692 mod_timer(&s->rx_timer, jiffies + tout);
693
694 return IRQ_HANDLED;
695 }
696#endif
697
623 /* I think sci_receive_chars has to be called irrespective 698 /* I think sci_receive_chars has to be called irrespective
624 * of whether the I_IXOFF is set, otherwise, how is the interrupt 699 * of whether the I_IXOFF is set, otherwise, how is the interrupt
625 * to be disabled? 700 * to be disabled?
626 */ 701 */
627 sci_receive_chars(port); 702 sci_receive_chars(ptr);
628 703
629 return IRQ_HANDLED; 704 return IRQ_HANDLED;
630} 705}
@@ -680,6 +755,7 @@ static irqreturn_t sci_mpxed_interrupt(int irq, void *ptr)
680{ 755{
681 unsigned short ssr_status, scr_status, err_enabled; 756 unsigned short ssr_status, scr_status, err_enabled;
682 struct uart_port *port = ptr; 757 struct uart_port *port = ptr;
758 struct sci_port *s = to_sci_port(port);
683 irqreturn_t ret = IRQ_NONE; 759 irqreturn_t ret = IRQ_NONE;
684 760
685 ssr_status = sci_in(port, SCxSR); 761 ssr_status = sci_in(port, SCxSR);
@@ -687,10 +763,15 @@ static irqreturn_t sci_mpxed_interrupt(int irq, void *ptr)
687 err_enabled = scr_status & (SCI_CTRL_FLAGS_REIE | SCI_CTRL_FLAGS_RIE); 763 err_enabled = scr_status & (SCI_CTRL_FLAGS_REIE | SCI_CTRL_FLAGS_RIE);
688 764
689 /* Tx Interrupt */ 765 /* Tx Interrupt */
690 if ((ssr_status & SCxSR_TDxE(port)) && (scr_status & SCI_CTRL_FLAGS_TIE)) 766 if ((ssr_status & SCxSR_TDxE(port)) && (scr_status & SCI_CTRL_FLAGS_TIE) &&
767 !s->chan_tx)
691 ret = sci_tx_interrupt(irq, ptr); 768 ret = sci_tx_interrupt(irq, ptr);
692 /* Rx Interrupt */ 769 /*
693 if ((ssr_status & SCxSR_RDxF(port)) && (scr_status & SCI_CTRL_FLAGS_RIE)) 770 * Rx Interrupt: if we're using DMA, the DMA controller clears RDF /
771 * DR flags
772 */
773 if (((ssr_status & SCxSR_RDxF(port)) || s->chan_rx) &&
774 (scr_status & SCI_CTRL_FLAGS_RIE))
694 ret = sci_rx_interrupt(irq, ptr); 775 ret = sci_rx_interrupt(irq, ptr);
695 /* Error Interrupt */ 776 /* Error Interrupt */
696 if ((ssr_status & SCxSR_ERRORS(port)) && err_enabled) 777 if ((ssr_status & SCxSR_ERRORS(port)) && err_enabled)
@@ -699,6 +780,10 @@ static irqreturn_t sci_mpxed_interrupt(int irq, void *ptr)
699 if ((ssr_status & SCxSR_BRK(port)) && err_enabled) 780 if ((ssr_status & SCxSR_BRK(port)) && err_enabled)
700 ret = sci_br_interrupt(irq, ptr); 781 ret = sci_br_interrupt(irq, ptr);
701 782
783 WARN_ONCE(ret == IRQ_NONE,
784 "%s: %d IRQ %d, status %x, control %x\n", __func__,
785 irq, port->line, ssr_status, scr_status);
786
702 return ret; 787 return ret;
703} 788}
704 789
@@ -800,7 +885,9 @@ static void sci_free_irq(struct sci_port *port)
800static unsigned int sci_tx_empty(struct uart_port *port) 885static unsigned int sci_tx_empty(struct uart_port *port)
801{ 886{
802 unsigned short status = sci_in(port, SCxSR); 887 unsigned short status = sci_in(port, SCxSR);
803 return status & SCxSR_TEND(port) ? TIOCSER_TEMT : 0; 888 unsigned short in_tx_fifo = scif_txfill(port);
889
890 return (status & SCxSR_TEND(port)) && !in_tx_fifo ? TIOCSER_TEMT : 0;
804} 891}
805 892
806static void sci_set_mctrl(struct uart_port *port, unsigned int mctrl) 893static void sci_set_mctrl(struct uart_port *port, unsigned int mctrl)
@@ -812,16 +899,297 @@ static void sci_set_mctrl(struct uart_port *port, unsigned int mctrl)
812 899
813static unsigned int sci_get_mctrl(struct uart_port *port) 900static unsigned int sci_get_mctrl(struct uart_port *port)
814{ 901{
815 /* This routine is used for geting signals of: DTR, DCD, DSR, RI, 902 /* This routine is used for getting signals of: DTR, DCD, DSR, RI,
816 and CTS/RTS */ 903 and CTS/RTS */
817 904
818 return TIOCM_DTR | TIOCM_RTS | TIOCM_DSR; 905 return TIOCM_DTR | TIOCM_RTS | TIOCM_DSR;
819} 906}
820 907
908#ifdef CONFIG_SERIAL_SH_SCI_DMA
909static void sci_dma_tx_complete(void *arg)
910{
911 struct sci_port *s = arg;
912 struct uart_port *port = &s->port;
913 struct circ_buf *xmit = &port->state->xmit;
914 unsigned long flags;
915
916 dev_dbg(port->dev, "%s(%d)\n", __func__, port->line);
917
918 spin_lock_irqsave(&port->lock, flags);
919
920 xmit->tail += s->sg_tx.length;
921 xmit->tail &= UART_XMIT_SIZE - 1;
922
923 port->icount.tx += s->sg_tx.length;
924
925 async_tx_ack(s->desc_tx);
926 s->cookie_tx = -EINVAL;
927 s->desc_tx = NULL;
928
929 spin_unlock_irqrestore(&port->lock, flags);
930
931 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
932 uart_write_wakeup(port);
933
934 if (uart_circ_chars_pending(xmit))
935 schedule_work(&s->work_tx);
936}
937
938/* Locking: called with port lock held */
939static int sci_dma_rx_push(struct sci_port *s, struct tty_struct *tty,
940 size_t count)
941{
942 struct uart_port *port = &s->port;
943 int i, active, room;
944
945 room = tty_buffer_request_room(tty, count);
946
947 if (s->active_rx == s->cookie_rx[0]) {
948 active = 0;
949 } else if (s->active_rx == s->cookie_rx[1]) {
950 active = 1;
951 } else {
952 dev_err(port->dev, "cookie %d not found!\n", s->active_rx);
953 return 0;
954 }
955
956 if (room < count)
957 dev_warn(port->dev, "Rx overrun: dropping %u bytes\n",
958 count - room);
959 if (!room)
960 return room;
961
962 for (i = 0; i < room; i++)
963 tty_insert_flip_char(tty, ((u8 *)sg_virt(&s->sg_rx[active]))[i],
964 TTY_NORMAL);
965
966 port->icount.rx += room;
967
968 return room;
969}
970
971static void sci_dma_rx_complete(void *arg)
972{
973 struct sci_port *s = arg;
974 struct uart_port *port = &s->port;
975 struct tty_struct *tty = port->state->port.tty;
976 unsigned long flags;
977 int count;
978
979 dev_dbg(port->dev, "%s(%d)\n", __func__, port->line);
980
981 spin_lock_irqsave(&port->lock, flags);
982
983 count = sci_dma_rx_push(s, tty, s->buf_len_rx);
984
985 mod_timer(&s->rx_timer, jiffies + msecs_to_jiffies(5));
986
987 spin_unlock_irqrestore(&port->lock, flags);
988
989 if (count)
990 tty_flip_buffer_push(tty);
991
992 schedule_work(&s->work_rx);
993}
994
995static void sci_start_rx(struct uart_port *port);
996static void sci_start_tx(struct uart_port *port);
997
998static void sci_rx_dma_release(struct sci_port *s, bool enable_pio)
999{
1000 struct dma_chan *chan = s->chan_rx;
1001 struct uart_port *port = &s->port;
1002
1003 s->chan_rx = NULL;
1004 s->cookie_rx[0] = s->cookie_rx[1] = -EINVAL;
1005 dma_release_channel(chan);
1006 dma_free_coherent(port->dev, s->buf_len_rx * 2,
1007 sg_virt(&s->sg_rx[0]), sg_dma_address(&s->sg_rx[0]));
1008 if (enable_pio)
1009 sci_start_rx(port);
1010}
1011
1012static void sci_tx_dma_release(struct sci_port *s, bool enable_pio)
1013{
1014 struct dma_chan *chan = s->chan_tx;
1015 struct uart_port *port = &s->port;
1016
1017 s->chan_tx = NULL;
1018 s->cookie_tx = -EINVAL;
1019 dma_release_channel(chan);
1020 if (enable_pio)
1021 sci_start_tx(port);
1022}
1023
1024static void sci_submit_rx(struct sci_port *s)
1025{
1026 struct dma_chan *chan = s->chan_rx;
1027 int i;
1028
1029 for (i = 0; i < 2; i++) {
1030 struct scatterlist *sg = &s->sg_rx[i];
1031 struct dma_async_tx_descriptor *desc;
1032
1033 desc = chan->device->device_prep_slave_sg(chan,
1034 sg, 1, DMA_FROM_DEVICE, DMA_PREP_INTERRUPT);
1035
1036 if (desc) {
1037 s->desc_rx[i] = desc;
1038 desc->callback = sci_dma_rx_complete;
1039 desc->callback_param = s;
1040 s->cookie_rx[i] = desc->tx_submit(desc);
1041 }
1042
1043 if (!desc || s->cookie_rx[i] < 0) {
1044 if (i) {
1045 async_tx_ack(s->desc_rx[0]);
1046 s->cookie_rx[0] = -EINVAL;
1047 }
1048 if (desc) {
1049 async_tx_ack(desc);
1050 s->cookie_rx[i] = -EINVAL;
1051 }
1052 dev_warn(s->port.dev,
1053 "failed to re-start DMA, using PIO\n");
1054 sci_rx_dma_release(s, true);
1055 return;
1056 }
1057 }
1058
1059 s->active_rx = s->cookie_rx[0];
1060
1061 dma_async_issue_pending(chan);
1062}
1063
1064static void work_fn_rx(struct work_struct *work)
1065{
1066 struct sci_port *s = container_of(work, struct sci_port, work_rx);
1067 struct uart_port *port = &s->port;
1068 struct dma_async_tx_descriptor *desc;
1069 int new;
1070
1071 if (s->active_rx == s->cookie_rx[0]) {
1072 new = 0;
1073 } else if (s->active_rx == s->cookie_rx[1]) {
1074 new = 1;
1075 } else {
1076 dev_err(port->dev, "cookie %d not found!\n", s->active_rx);
1077 return;
1078 }
1079 desc = s->desc_rx[new];
1080
1081 if (dma_async_is_tx_complete(s->chan_rx, s->active_rx, NULL, NULL) !=
1082 DMA_SUCCESS) {
1083 /* Handle incomplete DMA receive */
1084 struct tty_struct *tty = port->state->port.tty;
1085 struct dma_chan *chan = s->chan_rx;
1086 struct sh_desc *sh_desc = container_of(desc, struct sh_desc,
1087 async_tx);
1088 unsigned long flags;
1089 int count;
1090
1091 chan->device->device_terminate_all(chan);
1092 dev_dbg(port->dev, "Read %u bytes with cookie %d\n",
1093 sh_desc->partial, sh_desc->cookie);
1094
1095 spin_lock_irqsave(&port->lock, flags);
1096 count = sci_dma_rx_push(s, tty, sh_desc->partial);
1097 spin_unlock_irqrestore(&port->lock, flags);
1098
1099 if (count)
1100 tty_flip_buffer_push(tty);
1101
1102 sci_submit_rx(s);
1103
1104 return;
1105 }
1106
1107 s->cookie_rx[new] = desc->tx_submit(desc);
1108 if (s->cookie_rx[new] < 0) {
1109 dev_warn(port->dev, "Failed submitting Rx DMA descriptor\n");
1110 sci_rx_dma_release(s, true);
1111 return;
1112 }
1113
1114 dev_dbg(port->dev, "%s: cookie %d #%d\n", __func__,
1115 s->cookie_rx[new], new);
1116
1117 s->active_rx = s->cookie_rx[!new];
1118}
1119
1120static void work_fn_tx(struct work_struct *work)
1121{
1122 struct sci_port *s = container_of(work, struct sci_port, work_tx);
1123 struct dma_async_tx_descriptor *desc;
1124 struct dma_chan *chan = s->chan_tx;
1125 struct uart_port *port = &s->port;
1126 struct circ_buf *xmit = &port->state->xmit;
1127 struct scatterlist *sg = &s->sg_tx;
1128
1129 /*
1130 * DMA is idle now.
1131 * Port xmit buffer is already mapped, and it is one page... Just adjust
1132 * offsets and lengths. Since it is a circular buffer, we have to
1133 * transmit till the end, and then the rest. Take the port lock to get a
1134 * consistent xmit buffer state.
1135 */
1136 spin_lock_irq(&port->lock);
1137 sg->offset = xmit->tail & (UART_XMIT_SIZE - 1);
1138 sg->dma_address = (sg_dma_address(sg) & ~(UART_XMIT_SIZE - 1)) +
1139 sg->offset;
1140 sg->length = min((int)CIRC_CNT(xmit->head, xmit->tail, UART_XMIT_SIZE),
1141 CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE));
1142 sg->dma_length = sg->length;
1143 spin_unlock_irq(&port->lock);
1144
1145 BUG_ON(!sg->length);
1146
1147 desc = chan->device->device_prep_slave_sg(chan,
1148 sg, s->sg_len_tx, DMA_TO_DEVICE,
1149 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1150 if (!desc) {
1151 /* switch to PIO */
1152 sci_tx_dma_release(s, true);
1153 return;
1154 }
1155
1156 dma_sync_sg_for_device(port->dev, sg, 1, DMA_TO_DEVICE);
1157
1158 spin_lock_irq(&port->lock);
1159 s->desc_tx = desc;
1160 desc->callback = sci_dma_tx_complete;
1161 desc->callback_param = s;
1162 spin_unlock_irq(&port->lock);
1163 s->cookie_tx = desc->tx_submit(desc);
1164 if (s->cookie_tx < 0) {
1165 dev_warn(port->dev, "Failed submitting Tx DMA descriptor\n");
1166 /* switch to PIO */
1167 sci_tx_dma_release(s, true);
1168 return;
1169 }
1170
1171 dev_dbg(port->dev, "%s: %p: %d...%d, cookie %d\n", __func__,
1172 xmit->buf, xmit->tail, xmit->head, s->cookie_tx);
1173
1174 dma_async_issue_pending(chan);
1175}
1176#endif
1177
821static void sci_start_tx(struct uart_port *port) 1178static void sci_start_tx(struct uart_port *port)
822{ 1179{
823 unsigned short ctrl; 1180 unsigned short ctrl;
824 1181
1182#ifdef CONFIG_SERIAL_SH_SCI_DMA
1183 struct sci_port *s = to_sci_port(port);
1184
1185 if (s->chan_tx) {
1186 if (!uart_circ_empty(&s->port.state->xmit) && s->cookie_tx < 0)
1187 schedule_work(&s->work_tx);
1188
1189 return;
1190 }
1191#endif
1192
825 /* Set TIE (Transmit Interrupt Enable) bit in SCSCR */ 1193 /* Set TIE (Transmit Interrupt Enable) bit in SCSCR */
826 ctrl = sci_in(port, SCSCR); 1194 ctrl = sci_in(port, SCSCR);
827 ctrl |= SCI_CTRL_FLAGS_TIE; 1195 ctrl |= SCI_CTRL_FLAGS_TIE;
@@ -838,13 +1206,12 @@ static void sci_stop_tx(struct uart_port *port)
838 sci_out(port, SCSCR, ctrl); 1206 sci_out(port, SCSCR, ctrl);
839} 1207}
840 1208
841static void sci_start_rx(struct uart_port *port, unsigned int tty_start) 1209static void sci_start_rx(struct uart_port *port)
842{ 1210{
843 unsigned short ctrl; 1211 unsigned short ctrl = SCI_CTRL_FLAGS_RIE | SCI_CTRL_FLAGS_REIE;
844 1212
845 /* Set RIE (Receive Interrupt Enable) bit in SCSCR */ 1213 /* Set RIE (Receive Interrupt Enable) bit in SCSCR */
846 ctrl = sci_in(port, SCSCR); 1214 ctrl |= sci_in(port, SCSCR);
847 ctrl |= SCI_CTRL_FLAGS_RIE | SCI_CTRL_FLAGS_REIE;
848 sci_out(port, SCSCR, ctrl); 1215 sci_out(port, SCSCR, ctrl);
849} 1216}
850 1217
@@ -868,16 +1235,154 @@ static void sci_break_ctl(struct uart_port *port, int break_state)
868 /* Nothing here yet .. */ 1235 /* Nothing here yet .. */
869} 1236}
870 1237
1238#ifdef CONFIG_SERIAL_SH_SCI_DMA
1239static bool filter(struct dma_chan *chan, void *slave)
1240{
1241 struct sh_dmae_slave *param = slave;
1242
1243 dev_dbg(chan->device->dev, "%s: slave ID %d\n", __func__,
1244 param->slave_id);
1245
1246 if (param->dma_dev == chan->device->dev) {
1247 chan->private = param;
1248 return true;
1249 } else {
1250 return false;
1251 }
1252}
1253
1254static void rx_timer_fn(unsigned long arg)
1255{
1256 struct sci_port *s = (struct sci_port *)arg;
1257 struct uart_port *port = &s->port;
1258
1259 u16 scr = sci_in(port, SCSCR);
1260 sci_out(port, SCSCR, scr | SCI_CTRL_FLAGS_RIE);
1261 dev_dbg(port->dev, "DMA Rx timed out\n");
1262 schedule_work(&s->work_rx);
1263}
1264
1265static void sci_request_dma(struct uart_port *port)
1266{
1267 struct sci_port *s = to_sci_port(port);
1268 struct sh_dmae_slave *param;
1269 struct dma_chan *chan;
1270 dma_cap_mask_t mask;
1271 int nent;
1272
1273 dev_dbg(port->dev, "%s: port %d DMA %p\n", __func__,
1274 port->line, s->dma_dev);
1275
1276 if (!s->dma_dev)
1277 return;
1278
1279 dma_cap_zero(mask);
1280 dma_cap_set(DMA_SLAVE, mask);
1281
1282 param = &s->param_tx;
1283
1284 /* Slave ID, e.g., SHDMA_SLAVE_SCIF0_TX */
1285 param->slave_id = s->slave_tx;
1286 param->dma_dev = s->dma_dev;
1287
1288 s->cookie_tx = -EINVAL;
1289 chan = dma_request_channel(mask, filter, param);
1290 dev_dbg(port->dev, "%s: TX: got channel %p\n", __func__, chan);
1291 if (chan) {
1292 s->chan_tx = chan;
1293 sg_init_table(&s->sg_tx, 1);
1294 /* UART circular tx buffer is an aligned page. */
1295 BUG_ON((int)port->state->xmit.buf & ~PAGE_MASK);
1296 sg_set_page(&s->sg_tx, virt_to_page(port->state->xmit.buf),
1297 UART_XMIT_SIZE, (int)port->state->xmit.buf & ~PAGE_MASK);
1298 nent = dma_map_sg(port->dev, &s->sg_tx, 1, DMA_TO_DEVICE);
1299 if (!nent)
1300 sci_tx_dma_release(s, false);
1301 else
1302 dev_dbg(port->dev, "%s: mapped %d@%p to %x\n", __func__,
1303 sg_dma_len(&s->sg_tx),
1304 port->state->xmit.buf, sg_dma_address(&s->sg_tx));
1305
1306 s->sg_len_tx = nent;
1307
1308 INIT_WORK(&s->work_tx, work_fn_tx);
1309 }
1310
1311 param = &s->param_rx;
1312
1313 /* Slave ID, e.g., SHDMA_SLAVE_SCIF0_RX */
1314 param->slave_id = s->slave_rx;
1315 param->dma_dev = s->dma_dev;
1316
1317 chan = dma_request_channel(mask, filter, param);
1318 dev_dbg(port->dev, "%s: RX: got channel %p\n", __func__, chan);
1319 if (chan) {
1320 dma_addr_t dma[2];
1321 void *buf[2];
1322 int i;
1323
1324 s->chan_rx = chan;
1325
1326 s->buf_len_rx = 2 * max(16, (int)port->fifosize);
1327 buf[0] = dma_alloc_coherent(port->dev, s->buf_len_rx * 2,
1328 &dma[0], GFP_KERNEL);
1329
1330 if (!buf[0]) {
1331 dev_warn(port->dev,
1332 "failed to allocate dma buffer, using PIO\n");
1333 sci_rx_dma_release(s, true);
1334 return;
1335 }
1336
1337 buf[1] = buf[0] + s->buf_len_rx;
1338 dma[1] = dma[0] + s->buf_len_rx;
1339
1340 for (i = 0; i < 2; i++) {
1341 struct scatterlist *sg = &s->sg_rx[i];
1342
1343 sg_init_table(sg, 1);
1344 sg_set_page(sg, virt_to_page(buf[i]), s->buf_len_rx,
1345 (int)buf[i] & ~PAGE_MASK);
1346 sg->dma_address = dma[i];
1347 sg->dma_length = sg->length;
1348 }
1349
1350 INIT_WORK(&s->work_rx, work_fn_rx);
1351 setup_timer(&s->rx_timer, rx_timer_fn, (unsigned long)s);
1352
1353 sci_submit_rx(s);
1354 }
1355}
1356
1357static void sci_free_dma(struct uart_port *port)
1358{
1359 struct sci_port *s = to_sci_port(port);
1360
1361 if (!s->dma_dev)
1362 return;
1363
1364 if (s->chan_tx)
1365 sci_tx_dma_release(s, false);
1366 if (s->chan_rx)
1367 sci_rx_dma_release(s, false);
1368}
1369#endif
1370
871static int sci_startup(struct uart_port *port) 1371static int sci_startup(struct uart_port *port)
872{ 1372{
873 struct sci_port *s = to_sci_port(port); 1373 struct sci_port *s = to_sci_port(port);
874 1374
1375 dev_dbg(port->dev, "%s(%d)\n", __func__, port->line);
1376
875 if (s->enable) 1377 if (s->enable)
876 s->enable(port); 1378 s->enable(port);
877 1379
878 sci_request_irq(s); 1380 sci_request_irq(s);
1381#ifdef CONFIG_SERIAL_SH_SCI_DMA
1382 sci_request_dma(port);
1383#endif
879 sci_start_tx(port); 1384 sci_start_tx(port);
880 sci_start_rx(port, 1); 1385 sci_start_rx(port);
881 1386
882 return 0; 1387 return 0;
883} 1388}
@@ -886,8 +1391,13 @@ static void sci_shutdown(struct uart_port *port)
886{ 1391{
887 struct sci_port *s = to_sci_port(port); 1392 struct sci_port *s = to_sci_port(port);
888 1393
1394 dev_dbg(port->dev, "%s(%d)\n", __func__, port->line);
1395
889 sci_stop_rx(port); 1396 sci_stop_rx(port);
890 sci_stop_tx(port); 1397 sci_stop_tx(port);
1398#ifdef CONFIG_SERIAL_SH_SCI_DMA
1399 sci_free_dma(port);
1400#endif
891 sci_free_irq(s); 1401 sci_free_irq(s);
892 1402
893 if (s->disable) 1403 if (s->disable)
@@ -937,6 +1447,9 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
937 1447
938 sci_out(port, SCSMR, smr_val); 1448 sci_out(port, SCSMR, smr_val);
939 1449
1450 dev_dbg(port->dev, "%s: SMR %x, t %x, SCSCR %x\n", __func__, smr_val, t,
1451 SCSCR_INIT(port));
1452
940 if (t > 0) { 1453 if (t > 0) {
941 if (t >= 256) { 1454 if (t >= 256) {
942 sci_out(port, SCSMR, (sci_in(port, SCSMR) & ~3) | 1); 1455 sci_out(port, SCSMR, (sci_in(port, SCSMR) & ~3) | 1);
@@ -954,7 +1467,7 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
954 sci_out(port, SCSCR, SCSCR_INIT(port)); 1467 sci_out(port, SCSCR, SCSCR_INIT(port));
955 1468
956 if ((termios->c_cflag & CREAD) != 0) 1469 if ((termios->c_cflag & CREAD) != 0)
957 sci_start_rx(port, 0); 1470 sci_start_rx(port);
958} 1471}
959 1472
960static const char *sci_type(struct uart_port *port) 1473static const char *sci_type(struct uart_port *port)
@@ -1049,19 +1562,21 @@ static void __devinit sci_init_single(struct platform_device *dev,
1049 unsigned int index, 1562 unsigned int index,
1050 struct plat_sci_port *p) 1563 struct plat_sci_port *p)
1051{ 1564{
1052 sci_port->port.ops = &sci_uart_ops; 1565 struct uart_port *port = &sci_port->port;
1053 sci_port->port.iotype = UPIO_MEM; 1566
1054 sci_port->port.line = index; 1567 port->ops = &sci_uart_ops;
1568 port->iotype = UPIO_MEM;
1569 port->line = index;
1055 1570
1056 switch (p->type) { 1571 switch (p->type) {
1057 case PORT_SCIFA: 1572 case PORT_SCIFA:
1058 sci_port->port.fifosize = 64; 1573 port->fifosize = 64;
1059 break; 1574 break;
1060 case PORT_SCIF: 1575 case PORT_SCIF:
1061 sci_port->port.fifosize = 16; 1576 port->fifosize = 16;
1062 break; 1577 break;
1063 default: 1578 default:
1064 sci_port->port.fifosize = 1; 1579 port->fifosize = 1;
1065 break; 1580 break;
1066 } 1581 }
1067 1582
@@ -1070,19 +1585,28 @@ static void __devinit sci_init_single(struct platform_device *dev,
1070 sci_port->dclk = clk_get(&dev->dev, "peripheral_clk"); 1585 sci_port->dclk = clk_get(&dev->dev, "peripheral_clk");
1071 sci_port->enable = sci_clk_enable; 1586 sci_port->enable = sci_clk_enable;
1072 sci_port->disable = sci_clk_disable; 1587 sci_port->disable = sci_clk_disable;
1073 sci_port->port.dev = &dev->dev; 1588 port->dev = &dev->dev;
1074 } 1589 }
1075 1590
1076 sci_port->break_timer.data = (unsigned long)sci_port; 1591 sci_port->break_timer.data = (unsigned long)sci_port;
1077 sci_port->break_timer.function = sci_break_timer; 1592 sci_port->break_timer.function = sci_break_timer;
1078 init_timer(&sci_port->break_timer); 1593 init_timer(&sci_port->break_timer);
1079 1594
1080 sci_port->port.mapbase = p->mapbase; 1595 port->mapbase = p->mapbase;
1081 sci_port->port.membase = p->membase; 1596 port->membase = p->membase;
1082 1597
1083 sci_port->port.irq = p->irqs[SCIx_TXI_IRQ]; 1598 port->irq = p->irqs[SCIx_TXI_IRQ];
1084 sci_port->port.flags = p->flags; 1599 port->flags = p->flags;
1085 sci_port->type = sci_port->port.type = p->type; 1600 sci_port->type = port->type = p->type;
1601
1602#ifdef CONFIG_SERIAL_SH_SCI_DMA
1603 sci_port->dma_dev = p->dma_dev;
1604 sci_port->slave_tx = p->dma_slave_tx;
1605 sci_port->slave_rx = p->dma_slave_rx;
1606
1607 dev_dbg(port->dev, "%s: DMA device %p, tx %d, rx %d\n", __func__,
1608 p->dma_dev, p->dma_slave_tx, p->dma_slave_rx);
1609#endif
1086 1610
1087 memcpy(&sci_port->irqs, &p->irqs, sizeof(p->irqs)); 1611 memcpy(&sci_port->irqs, &p->irqs, sizeof(p->irqs));
1088} 1612}
diff --git a/include/linux/serial_sci.h b/include/linux/serial_sci.h
index 1c297ddc9d5a..1b177d29a7f0 100644
--- a/include/linux/serial_sci.h
+++ b/include/linux/serial_sci.h
@@ -2,6 +2,7 @@
2#define __LINUX_SERIAL_SCI_H 2#define __LINUX_SERIAL_SCI_H
3 3
4#include <linux/serial_core.h> 4#include <linux/serial_core.h>
5#include <asm/dmaengine.h>
5 6
6/* 7/*
7 * Generic header for SuperH SCI(F) (used by sh/sh64/h8300 and related parts) 8 * Generic header for SuperH SCI(F) (used by sh/sh64/h8300 and related parts)
@@ -16,6 +17,8 @@ enum {
16 SCIx_NR_IRQS, 17 SCIx_NR_IRQS,
17}; 18};
18 19
20struct device;
21
19/* 22/*
20 * Platform device specific platform_data struct 23 * Platform device specific platform_data struct
21 */ 24 */
@@ -26,6 +29,9 @@ struct plat_sci_port {
26 unsigned int type; /* SCI / SCIF / IRDA */ 29 unsigned int type; /* SCI / SCIF / IRDA */
27 upf_t flags; /* UPF_* flags */ 30 upf_t flags; /* UPF_* flags */
28 char *clk; /* clock string */ 31 char *clk; /* clock string */
32 struct device *dma_dev;
33 enum sh_dmae_slave_chan_id dma_slave_tx;
34 enum sh_dmae_slave_chan_id dma_slave_rx;
29}; 35};
30 36
31#endif /* __LINUX_SERIAL_SCI_H */ 37#endif /* __LINUX_SERIAL_SCI_H */
diff --git a/sound/soc/sh/siu.h b/sound/soc/sh/siu.h
index 9cc04ab2bce7..c0bfab8fed3d 100644
--- a/sound/soc/sh/siu.h
+++ b/sound/soc/sh/siu.h
@@ -72,7 +72,7 @@ struct siu_firmware {
72#include <linux/interrupt.h> 72#include <linux/interrupt.h>
73#include <linux/io.h> 73#include <linux/io.h>
74 74
75#include <asm/dma-sh.h> 75#include <asm/dmaengine.h>
76 76
77#include <sound/core.h> 77#include <sound/core.h>
78#include <sound/pcm.h> 78#include <sound/pcm.h>
diff --git a/sound/soc/sh/siu_pcm.c b/sound/soc/sh/siu_pcm.c
index c5efc30f0136..ba7f8d05d977 100644
--- a/sound/soc/sh/siu_pcm.c
+++ b/sound/soc/sh/siu_pcm.c
@@ -32,7 +32,7 @@
32#include <sound/pcm_params.h> 32#include <sound/pcm_params.h>
33#include <sound/soc-dai.h> 33#include <sound/soc-dai.h>
34 34
35#include <asm/dma-sh.h> 35#include <asm/dmaengine.h>
36#include <asm/siu.h> 36#include <asm/siu.h>
37 37
38#include "siu.h" 38#include "siu.h"