diff options
Diffstat (limited to 'arch/mips/au1000/common')
-rw-r--r-- | arch/mips/au1000/common/au1xxx_irqmap.c | 32 | ||||
-rw-r--r-- | arch/mips/au1000/common/cputable.c | 3 | ||||
-rw-r--r-- | arch/mips/au1000/common/dbdma.c | 298 | ||||
-rw-r--r-- | arch/mips/au1000/common/irq.c | 4 | ||||
-rw-r--r-- | arch/mips/au1000/common/usbdev.c | 12 |
5 files changed, 244 insertions, 105 deletions
diff --git a/arch/mips/au1000/common/au1xxx_irqmap.c b/arch/mips/au1000/common/au1xxx_irqmap.c index 8a0f39f67c59..0b2c03c52319 100644 --- a/arch/mips/au1000/common/au1xxx_irqmap.c +++ b/arch/mips/au1000/common/au1xxx_irqmap.c | |||
@@ -173,14 +173,14 @@ au1xxx_irq_map_t au1xxx_ic0_map[] = { | |||
173 | { AU1550_PSC1_INT, INTC_INT_HIGH_LEVEL, 0}, | 173 | { AU1550_PSC1_INT, INTC_INT_HIGH_LEVEL, 0}, |
174 | { AU1550_PSC2_INT, INTC_INT_HIGH_LEVEL, 0}, | 174 | { AU1550_PSC2_INT, INTC_INT_HIGH_LEVEL, 0}, |
175 | { AU1550_PSC3_INT, INTC_INT_HIGH_LEVEL, 0}, | 175 | { AU1550_PSC3_INT, INTC_INT_HIGH_LEVEL, 0}, |
176 | { AU1550_TOY_INT, INTC_INT_RISE_EDGE, 0 }, | 176 | { AU1000_TOY_INT, INTC_INT_RISE_EDGE, 0 }, |
177 | { AU1550_TOY_MATCH0_INT, INTC_INT_RISE_EDGE, 0 }, | 177 | { AU1000_TOY_MATCH0_INT, INTC_INT_RISE_EDGE, 0 }, |
178 | { AU1550_TOY_MATCH1_INT, INTC_INT_RISE_EDGE, 0 }, | 178 | { AU1000_TOY_MATCH1_INT, INTC_INT_RISE_EDGE, 0 }, |
179 | { AU1550_TOY_MATCH2_INT, INTC_INT_RISE_EDGE, 1 }, | 179 | { AU1000_TOY_MATCH2_INT, INTC_INT_RISE_EDGE, 1 }, |
180 | { AU1550_RTC_INT, INTC_INT_RISE_EDGE, 0 }, | 180 | { AU1000_RTC_INT, INTC_INT_RISE_EDGE, 0 }, |
181 | { AU1550_RTC_MATCH0_INT, INTC_INT_RISE_EDGE, 0 }, | 181 | { AU1000_RTC_MATCH0_INT, INTC_INT_RISE_EDGE, 0 }, |
182 | { AU1550_RTC_MATCH1_INT, INTC_INT_RISE_EDGE, 0 }, | 182 | { AU1000_RTC_MATCH1_INT, INTC_INT_RISE_EDGE, 0 }, |
183 | { AU1550_RTC_MATCH2_INT, INTC_INT_RISE_EDGE, 0 }, | 183 | { AU1000_RTC_MATCH2_INT, INTC_INT_RISE_EDGE, 0 }, |
184 | { AU1550_NAND_INT, INTC_INT_RISE_EDGE, 0}, | 184 | { AU1550_NAND_INT, INTC_INT_RISE_EDGE, 0}, |
185 | { AU1550_USB_DEV_REQ_INT, INTC_INT_HIGH_LEVEL, 0 }, | 185 | { AU1550_USB_DEV_REQ_INT, INTC_INT_HIGH_LEVEL, 0 }, |
186 | { AU1550_USB_DEV_SUS_INT, INTC_INT_RISE_EDGE, 0 }, | 186 | { AU1550_USB_DEV_SUS_INT, INTC_INT_RISE_EDGE, 0 }, |
@@ -201,14 +201,14 @@ au1xxx_irq_map_t au1xxx_ic0_map[] = { | |||
201 | { AU1200_PSC1_INT, INTC_INT_HIGH_LEVEL, 0}, | 201 | { AU1200_PSC1_INT, INTC_INT_HIGH_LEVEL, 0}, |
202 | { AU1200_AES_INT, INTC_INT_HIGH_LEVEL, 0}, | 202 | { AU1200_AES_INT, INTC_INT_HIGH_LEVEL, 0}, |
203 | { AU1200_CAMERA_INT, INTC_INT_HIGH_LEVEL, 0}, | 203 | { AU1200_CAMERA_INT, INTC_INT_HIGH_LEVEL, 0}, |
204 | { AU1200_TOY_INT, INTC_INT_RISE_EDGE, 0 }, | 204 | { AU1000_TOY_INT, INTC_INT_RISE_EDGE, 0 }, |
205 | { AU1200_TOY_MATCH0_INT, INTC_INT_RISE_EDGE, 0 }, | 205 | { AU1000_TOY_MATCH0_INT, INTC_INT_RISE_EDGE, 0 }, |
206 | { AU1200_TOY_MATCH1_INT, INTC_INT_RISE_EDGE, 0 }, | 206 | { AU1000_TOY_MATCH1_INT, INTC_INT_RISE_EDGE, 0 }, |
207 | { AU1200_TOY_MATCH2_INT, INTC_INT_RISE_EDGE, 1 }, | 207 | { AU1000_TOY_MATCH2_INT, INTC_INT_RISE_EDGE, 1 }, |
208 | { AU1200_RTC_INT, INTC_INT_RISE_EDGE, 0 }, | 208 | { AU1000_RTC_INT, INTC_INT_RISE_EDGE, 0 }, |
209 | { AU1200_RTC_MATCH0_INT, INTC_INT_RISE_EDGE, 0 }, | 209 | { AU1000_RTC_MATCH0_INT, INTC_INT_RISE_EDGE, 0 }, |
210 | { AU1200_RTC_MATCH1_INT, INTC_INT_RISE_EDGE, 0 }, | 210 | { AU1000_RTC_MATCH1_INT, INTC_INT_RISE_EDGE, 0 }, |
211 | { AU1200_RTC_MATCH2_INT, INTC_INT_RISE_EDGE, 0 }, | 211 | { AU1000_RTC_MATCH2_INT, INTC_INT_RISE_EDGE, 0 }, |
212 | { AU1200_NAND_INT, INTC_INT_RISE_EDGE, 0}, | 212 | { AU1200_NAND_INT, INTC_INT_RISE_EDGE, 0}, |
213 | { AU1200_USB_INT, INTC_INT_HIGH_LEVEL, 0 }, | 213 | { AU1200_USB_INT, INTC_INT_HIGH_LEVEL, 0 }, |
214 | { AU1200_LCD_INT, INTC_INT_HIGH_LEVEL, 0}, | 214 | { AU1200_LCD_INT, INTC_INT_HIGH_LEVEL, 0}, |
diff --git a/arch/mips/au1000/common/cputable.c b/arch/mips/au1000/common/cputable.c index f5521dfccfd6..4dbde82c8215 100644 --- a/arch/mips/au1000/common/cputable.c +++ b/arch/mips/au1000/common/cputable.c | |||
@@ -37,7 +37,8 @@ struct cpu_spec cpu_specs[] = { | |||
37 | { 0xffffffff, 0x02030203, "Au1100 BD", 0, 1 }, | 37 | { 0xffffffff, 0x02030203, "Au1100 BD", 0, 1 }, |
38 | { 0xffffffff, 0x02030204, "Au1100 BE", 0, 1 }, | 38 | { 0xffffffff, 0x02030204, "Au1100 BE", 0, 1 }, |
39 | { 0xffffffff, 0x03030200, "Au1550 AA", 0, 1 }, | 39 | { 0xffffffff, 0x03030200, "Au1550 AA", 0, 1 }, |
40 | { 0xffffffff, 0x04030200, "Au1200 AA", 0, 1 }, | 40 | { 0xffffffff, 0x04030200, "Au1200 AB", 0, 0 }, |
41 | { 0xffffffff, 0x04030201, "Au1200 AC", 0, 1 }, | ||
41 | { 0x00000000, 0x00000000, "Unknown Au1xxx", 1, 0 }, | 42 | { 0x00000000, 0x00000000, "Unknown Au1xxx", 1, 0 }, |
42 | }; | 43 | }; |
43 | 44 | ||
diff --git a/arch/mips/au1000/common/dbdma.c b/arch/mips/au1000/common/dbdma.c index adfc3172aace..cf10dc246f82 100644 --- a/arch/mips/au1000/common/dbdma.c +++ b/arch/mips/au1000/common/dbdma.c | |||
@@ -29,6 +29,7 @@ | |||
29 | * 675 Mass Ave, Cambridge, MA 02139, USA. | 29 | * 675 Mass Ave, Cambridge, MA 02139, USA. |
30 | * | 30 | * |
31 | */ | 31 | */ |
32 | |||
32 | #include <linux/config.h> | 33 | #include <linux/config.h> |
33 | #include <linux/kernel.h> | 34 | #include <linux/kernel.h> |
34 | #include <linux/errno.h> | 35 | #include <linux/errno.h> |
@@ -42,6 +43,8 @@ | |||
42 | #include <asm/mach-au1x00/au1xxx_dbdma.h> | 43 | #include <asm/mach-au1x00/au1xxx_dbdma.h> |
43 | #include <asm/system.h> | 44 | #include <asm/system.h> |
44 | 45 | ||
46 | /* #include <linux/module.h> */ | ||
47 | |||
45 | #if defined(CONFIG_SOC_AU1550) || defined(CONFIG_SOC_AU1200) | 48 | #if defined(CONFIG_SOC_AU1550) || defined(CONFIG_SOC_AU1200) |
46 | 49 | ||
47 | /* | 50 | /* |
@@ -55,43 +58,16 @@ | |||
55 | * functions. The drivers allocate the data buffers and assign them | 58 | * functions. The drivers allocate the data buffers and assign them |
56 | * to the descriptors. | 59 | * to the descriptors. |
57 | */ | 60 | */ |
58 | static DEFINE_SPINLOCK(au1xxx_dbdma_spin_lock); | 61 | static spinlock_t au1xxx_dbdma_spin_lock = SPIN_LOCK_UNLOCKED; |
59 | 62 | ||
60 | /* I couldn't find a macro that did this...... | 63 | /* I couldn't find a macro that did this...... |
61 | */ | 64 | */ |
62 | #define ALIGN_ADDR(x, a) ((((u32)(x)) + (a-1)) & ~(a-1)) | 65 | #define ALIGN_ADDR(x, a) ((((u32)(x)) + (a-1)) & ~(a-1)) |
63 | 66 | ||
64 | static volatile dbdma_global_t *dbdma_gptr = (dbdma_global_t *)DDMA_GLOBAL_BASE; | 67 | static dbdma_global_t *dbdma_gptr = (dbdma_global_t *)DDMA_GLOBAL_BASE; |
65 | static int dbdma_initialized; | 68 | static int dbdma_initialized=0; |
66 | static void au1xxx_dbdma_init(void); | 69 | static void au1xxx_dbdma_init(void); |
67 | 70 | ||
68 | typedef struct dbdma_device_table { | ||
69 | u32 dev_id; | ||
70 | u32 dev_flags; | ||
71 | u32 dev_tsize; | ||
72 | u32 dev_devwidth; | ||
73 | u32 dev_physaddr; /* If FIFO */ | ||
74 | u32 dev_intlevel; | ||
75 | u32 dev_intpolarity; | ||
76 | } dbdev_tab_t; | ||
77 | |||
78 | typedef struct dbdma_chan_config { | ||
79 | u32 chan_flags; | ||
80 | u32 chan_index; | ||
81 | dbdev_tab_t *chan_src; | ||
82 | dbdev_tab_t *chan_dest; | ||
83 | au1x_dma_chan_t *chan_ptr; | ||
84 | au1x_ddma_desc_t *chan_desc_base; | ||
85 | au1x_ddma_desc_t *get_ptr, *put_ptr, *cur_ptr; | ||
86 | void *chan_callparam; | ||
87 | void (*chan_callback)(int, void *, struct pt_regs *); | ||
88 | } chan_tab_t; | ||
89 | |||
90 | #define DEV_FLAGS_INUSE (1 << 0) | ||
91 | #define DEV_FLAGS_ANYUSE (1 << 1) | ||
92 | #define DEV_FLAGS_OUT (1 << 2) | ||
93 | #define DEV_FLAGS_IN (1 << 3) | ||
94 | |||
95 | static dbdev_tab_t dbdev_tab[] = { | 71 | static dbdev_tab_t dbdev_tab[] = { |
96 | #ifdef CONFIG_SOC_AU1550 | 72 | #ifdef CONFIG_SOC_AU1550 |
97 | /* UARTS */ | 73 | /* UARTS */ |
@@ -157,13 +133,13 @@ static dbdev_tab_t dbdev_tab[] = { | |||
157 | { DSCR_CMD0_MAE_BOTH, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 }, | 133 | { DSCR_CMD0_MAE_BOTH, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 }, |
158 | { DSCR_CMD0_LCD, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 }, | 134 | { DSCR_CMD0_LCD, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 }, |
159 | 135 | ||
160 | { DSCR_CMD0_SDMS_TX0, DEV_FLAGS_OUT, 0, 0, 0x00000000, 0, 0 }, | 136 | { DSCR_CMD0_SDMS_TX0, DEV_FLAGS_OUT, 4, 8, 0x10600000, 0, 0 }, |
161 | { DSCR_CMD0_SDMS_RX0, DEV_FLAGS_IN, 0, 0, 0x00000000, 0, 0 }, | 137 | { DSCR_CMD0_SDMS_RX0, DEV_FLAGS_IN, 4, 8, 0x10600004, 0, 0 }, |
162 | { DSCR_CMD0_SDMS_TX1, DEV_FLAGS_OUT, 0, 0, 0x00000000, 0, 0 }, | 138 | { DSCR_CMD0_SDMS_TX1, DEV_FLAGS_OUT, 4, 8, 0x10680000, 0, 0 }, |
163 | { DSCR_CMD0_SDMS_RX1, DEV_FLAGS_IN, 0, 0, 0x00000000, 0, 0 }, | 139 | { DSCR_CMD0_SDMS_RX1, DEV_FLAGS_IN, 4, 8, 0x10680004, 0, 0 }, |
164 | 140 | ||
165 | { DSCR_CMD0_AES_TX, DEV_FLAGS_OUT, 0, 0, 0x00000000, 0, 0 }, | 141 | { DSCR_CMD0_AES_RX, DEV_FLAGS_IN , 4, 32, 0x10300008, 0, 0 }, |
166 | { DSCR_CMD0_AES_RX, DEV_FLAGS_IN, 0, 0, 0x00000000, 0, 0 }, | 142 | { DSCR_CMD0_AES_TX, DEV_FLAGS_OUT, 4, 32, 0x10300004, 0, 0 }, |
167 | 143 | ||
168 | { DSCR_CMD0_PSC0_TX, DEV_FLAGS_OUT, 0, 0, 0x11a0001c, 0, 0 }, | 144 | { DSCR_CMD0_PSC0_TX, DEV_FLAGS_OUT, 0, 0, 0x11a0001c, 0, 0 }, |
169 | { DSCR_CMD0_PSC0_RX, DEV_FLAGS_IN, 0, 0, 0x11a0001c, 0, 0 }, | 145 | { DSCR_CMD0_PSC0_RX, DEV_FLAGS_IN, 0, 0, 0x11a0001c, 0, 0 }, |
@@ -173,9 +149,9 @@ static dbdev_tab_t dbdev_tab[] = { | |||
173 | { DSCR_CMD0_PSC1_RX, DEV_FLAGS_IN, 0, 0, 0x11b0001c, 0, 0 }, | 149 | { DSCR_CMD0_PSC1_RX, DEV_FLAGS_IN, 0, 0, 0x11b0001c, 0, 0 }, |
174 | { DSCR_CMD0_PSC1_SYNC, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 }, | 150 | { DSCR_CMD0_PSC1_SYNC, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 }, |
175 | 151 | ||
176 | { DSCR_CMD0_CIM_RXA, DEV_FLAGS_IN, 0, 0, 0x00000000, 0, 0 }, | 152 | { DSCR_CMD0_CIM_RXA, DEV_FLAGS_IN, 0, 32, 0x14004020, 0, 0 }, |
177 | { DSCR_CMD0_CIM_RXB, DEV_FLAGS_IN, 0, 0, 0x00000000, 0, 0 }, | 153 | { DSCR_CMD0_CIM_RXB, DEV_FLAGS_IN, 0, 32, 0x14004040, 0, 0 }, |
178 | { DSCR_CMD0_CIM_RXC, DEV_FLAGS_IN, 0, 0, 0x00000000, 0, 0 }, | 154 | { DSCR_CMD0_CIM_RXC, DEV_FLAGS_IN, 0, 32, 0x14004060, 0, 0 }, |
179 | { DSCR_CMD0_CIM_SYNC, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 }, | 155 | { DSCR_CMD0_CIM_SYNC, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 }, |
180 | 156 | ||
181 | { DSCR_CMD0_NAND_FLASH, DEV_FLAGS_IN, 0, 0, 0x00000000, 0, 0 }, | 157 | { DSCR_CMD0_NAND_FLASH, DEV_FLAGS_IN, 0, 0, 0x00000000, 0, 0 }, |
@@ -184,6 +160,24 @@ static dbdev_tab_t dbdev_tab[] = { | |||
184 | 160 | ||
185 | { DSCR_CMD0_THROTTLE, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 }, | 161 | { DSCR_CMD0_THROTTLE, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 }, |
186 | { DSCR_CMD0_ALWAYS, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 }, | 162 | { DSCR_CMD0_ALWAYS, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 }, |
163 | |||
164 | /* Provide 16 user definable device types */ | ||
165 | { 0, 0, 0, 0, 0, 0, 0 }, | ||
166 | { 0, 0, 0, 0, 0, 0, 0 }, | ||
167 | { 0, 0, 0, 0, 0, 0, 0 }, | ||
168 | { 0, 0, 0, 0, 0, 0, 0 }, | ||
169 | { 0, 0, 0, 0, 0, 0, 0 }, | ||
170 | { 0, 0, 0, 0, 0, 0, 0 }, | ||
171 | { 0, 0, 0, 0, 0, 0, 0 }, | ||
172 | { 0, 0, 0, 0, 0, 0, 0 }, | ||
173 | { 0, 0, 0, 0, 0, 0, 0 }, | ||
174 | { 0, 0, 0, 0, 0, 0, 0 }, | ||
175 | { 0, 0, 0, 0, 0, 0, 0 }, | ||
176 | { 0, 0, 0, 0, 0, 0, 0 }, | ||
177 | { 0, 0, 0, 0, 0, 0, 0 }, | ||
178 | { 0, 0, 0, 0, 0, 0, 0 }, | ||
179 | { 0, 0, 0, 0, 0, 0, 0 }, | ||
180 | { 0, 0, 0, 0, 0, 0, 0 }, | ||
187 | }; | 181 | }; |
188 | 182 | ||
189 | #define DBDEV_TAB_SIZE (sizeof(dbdev_tab) / sizeof(dbdev_tab_t)) | 183 | #define DBDEV_TAB_SIZE (sizeof(dbdev_tab) / sizeof(dbdev_tab_t)) |
@@ -203,6 +197,30 @@ find_dbdev_id (u32 id) | |||
203 | return NULL; | 197 | return NULL; |
204 | } | 198 | } |
205 | 199 | ||
200 | u32 | ||
201 | au1xxx_ddma_add_device(dbdev_tab_t *dev) | ||
202 | { | ||
203 | u32 ret = 0; | ||
204 | dbdev_tab_t *p=NULL; | ||
205 | static u16 new_id=0x1000; | ||
206 | |||
207 | p = find_dbdev_id(0); | ||
208 | if ( NULL != p ) | ||
209 | { | ||
210 | memcpy(p, dev, sizeof(dbdev_tab_t)); | ||
211 | p->dev_id = DSCR_DEV2CUSTOM_ID(new_id,dev->dev_id); | ||
212 | ret = p->dev_id; | ||
213 | new_id++; | ||
214 | #if 0 | ||
215 | printk("add_device: id:%x flags:%x padd:%x\n", | ||
216 | p->dev_id, p->dev_flags, p->dev_physaddr ); | ||
217 | #endif | ||
218 | } | ||
219 | |||
220 | return ret; | ||
221 | } | ||
222 | EXPORT_SYMBOL(au1xxx_ddma_add_device); | ||
223 | |||
206 | /* Allocate a channel and return a non-zero descriptor if successful. | 224 | /* Allocate a channel and return a non-zero descriptor if successful. |
207 | */ | 225 | */ |
208 | u32 | 226 | u32 |
@@ -215,7 +233,7 @@ au1xxx_dbdma_chan_alloc(u32 srcid, u32 destid, | |||
215 | int i; | 233 | int i; |
216 | dbdev_tab_t *stp, *dtp; | 234 | dbdev_tab_t *stp, *dtp; |
217 | chan_tab_t *ctp; | 235 | chan_tab_t *ctp; |
218 | volatile au1x_dma_chan_t *cp; | 236 | au1x_dma_chan_t *cp; |
219 | 237 | ||
220 | /* We do the intialization on the first channel allocation. | 238 | /* We do the intialization on the first channel allocation. |
221 | * We have to wait because of the interrupt handler initialization | 239 | * We have to wait because of the interrupt handler initialization |
@@ -225,9 +243,6 @@ au1xxx_dbdma_chan_alloc(u32 srcid, u32 destid, | |||
225 | au1xxx_dbdma_init(); | 243 | au1xxx_dbdma_init(); |
226 | dbdma_initialized = 1; | 244 | dbdma_initialized = 1; |
227 | 245 | ||
228 | if ((srcid > DSCR_NDEV_IDS) || (destid > DSCR_NDEV_IDS)) | ||
229 | return 0; | ||
230 | |||
231 | if ((stp = find_dbdev_id(srcid)) == NULL) return 0; | 246 | if ((stp = find_dbdev_id(srcid)) == NULL) return 0; |
232 | if ((dtp = find_dbdev_id(destid)) == NULL) return 0; | 247 | if ((dtp = find_dbdev_id(destid)) == NULL) return 0; |
233 | 248 | ||
@@ -269,9 +284,9 @@ au1xxx_dbdma_chan_alloc(u32 srcid, u32 destid, | |||
269 | /* If kmalloc fails, it is caught below same | 284 | /* If kmalloc fails, it is caught below same |
270 | * as a channel not available. | 285 | * as a channel not available. |
271 | */ | 286 | */ |
272 | ctp = kmalloc(sizeof(chan_tab_t), GFP_KERNEL); | 287 | ctp = (chan_tab_t *) |
288 | kmalloc(sizeof(chan_tab_t), GFP_KERNEL); | ||
273 | chan_tab_ptr[i] = ctp; | 289 | chan_tab_ptr[i] = ctp; |
274 | ctp->chan_index = chan = i; | ||
275 | break; | 290 | break; |
276 | } | 291 | } |
277 | } | 292 | } |
@@ -279,10 +294,11 @@ au1xxx_dbdma_chan_alloc(u32 srcid, u32 destid, | |||
279 | 294 | ||
280 | if (ctp != NULL) { | 295 | if (ctp != NULL) { |
281 | memset(ctp, 0, sizeof(chan_tab_t)); | 296 | memset(ctp, 0, sizeof(chan_tab_t)); |
297 | ctp->chan_index = chan = i; | ||
282 | dcp = DDMA_CHANNEL_BASE; | 298 | dcp = DDMA_CHANNEL_BASE; |
283 | dcp += (0x0100 * chan); | 299 | dcp += (0x0100 * chan); |
284 | ctp->chan_ptr = (au1x_dma_chan_t *)dcp; | 300 | ctp->chan_ptr = (au1x_dma_chan_t *)dcp; |
285 | cp = (volatile au1x_dma_chan_t *)dcp; | 301 | cp = (au1x_dma_chan_t *)dcp; |
286 | ctp->chan_src = stp; | 302 | ctp->chan_src = stp; |
287 | ctp->chan_dest = dtp; | 303 | ctp->chan_dest = dtp; |
288 | ctp->chan_callback = callback; | 304 | ctp->chan_callback = callback; |
@@ -299,6 +315,9 @@ au1xxx_dbdma_chan_alloc(u32 srcid, u32 destid, | |||
299 | i |= DDMA_CFG_DED; | 315 | i |= DDMA_CFG_DED; |
300 | if (dtp->dev_intpolarity) | 316 | if (dtp->dev_intpolarity) |
301 | i |= DDMA_CFG_DP; | 317 | i |= DDMA_CFG_DP; |
318 | if ((stp->dev_flags & DEV_FLAGS_SYNC) || | ||
319 | (dtp->dev_flags & DEV_FLAGS_SYNC)) | ||
320 | i |= DDMA_CFG_SYNC; | ||
302 | cp->ddma_cfg = i; | 321 | cp->ddma_cfg = i; |
303 | au_sync(); | 322 | au_sync(); |
304 | 323 | ||
@@ -309,14 +328,14 @@ au1xxx_dbdma_chan_alloc(u32 srcid, u32 destid, | |||
309 | rv = (u32)(&chan_tab_ptr[chan]); | 328 | rv = (u32)(&chan_tab_ptr[chan]); |
310 | } | 329 | } |
311 | else { | 330 | else { |
312 | /* Release devices. | 331 | /* Release devices */ |
313 | */ | ||
314 | stp->dev_flags &= ~DEV_FLAGS_INUSE; | 332 | stp->dev_flags &= ~DEV_FLAGS_INUSE; |
315 | dtp->dev_flags &= ~DEV_FLAGS_INUSE; | 333 | dtp->dev_flags &= ~DEV_FLAGS_INUSE; |
316 | } | 334 | } |
317 | } | 335 | } |
318 | return rv; | 336 | return rv; |
319 | } | 337 | } |
338 | EXPORT_SYMBOL(au1xxx_dbdma_chan_alloc); | ||
320 | 339 | ||
321 | /* Set the device width if source or destination is a FIFO. | 340 | /* Set the device width if source or destination is a FIFO. |
322 | * Should be 8, 16, or 32 bits. | 341 | * Should be 8, 16, or 32 bits. |
@@ -344,6 +363,7 @@ au1xxx_dbdma_set_devwidth(u32 chanid, int bits) | |||
344 | 363 | ||
345 | return rv; | 364 | return rv; |
346 | } | 365 | } |
366 | EXPORT_SYMBOL(au1xxx_dbdma_set_devwidth); | ||
347 | 367 | ||
348 | /* Allocate a descriptor ring, initializing as much as possible. | 368 | /* Allocate a descriptor ring, initializing as much as possible. |
349 | */ | 369 | */ |
@@ -370,7 +390,8 @@ au1xxx_dbdma_ring_alloc(u32 chanid, int entries) | |||
370 | * and if we try that first we are likely to not waste larger | 390 | * and if we try that first we are likely to not waste larger |
371 | * slabs of memory. | 391 | * slabs of memory. |
372 | */ | 392 | */ |
373 | desc_base = (u32)kmalloc(entries * sizeof(au1x_ddma_desc_t), GFP_KERNEL); | 393 | desc_base = (u32)kmalloc(entries * sizeof(au1x_ddma_desc_t), |
394 | GFP_KERNEL|GFP_DMA); | ||
374 | if (desc_base == 0) | 395 | if (desc_base == 0) |
375 | return 0; | 396 | return 0; |
376 | 397 | ||
@@ -381,7 +402,7 @@ au1xxx_dbdma_ring_alloc(u32 chanid, int entries) | |||
381 | kfree((const void *)desc_base); | 402 | kfree((const void *)desc_base); |
382 | i = entries * sizeof(au1x_ddma_desc_t); | 403 | i = entries * sizeof(au1x_ddma_desc_t); |
383 | i += (sizeof(au1x_ddma_desc_t) - 1); | 404 | i += (sizeof(au1x_ddma_desc_t) - 1); |
384 | if ((desc_base = (u32)kmalloc(i, GFP_KERNEL)) == 0) | 405 | if ((desc_base = (u32)kmalloc(i, GFP_KERNEL|GFP_DMA)) == 0) |
385 | return 0; | 406 | return 0; |
386 | 407 | ||
387 | desc_base = ALIGN_ADDR(desc_base, sizeof(au1x_ddma_desc_t)); | 408 | desc_base = ALIGN_ADDR(desc_base, sizeof(au1x_ddma_desc_t)); |
@@ -461,9 +482,14 @@ au1xxx_dbdma_ring_alloc(u32 chanid, int entries) | |||
461 | /* If source input is fifo, set static address. | 482 | /* If source input is fifo, set static address. |
462 | */ | 483 | */ |
463 | if (stp->dev_flags & DEV_FLAGS_IN) { | 484 | if (stp->dev_flags & DEV_FLAGS_IN) { |
464 | src0 = stp->dev_physaddr; | 485 | if ( stp->dev_flags & DEV_FLAGS_BURSTABLE ) |
486 | src1 |= DSCR_SRC1_SAM(DSCR_xAM_BURST); | ||
487 | else | ||
465 | src1 |= DSCR_SRC1_SAM(DSCR_xAM_STATIC); | 488 | src1 |= DSCR_SRC1_SAM(DSCR_xAM_STATIC); |
489 | |||
466 | } | 490 | } |
491 | if (stp->dev_physaddr) | ||
492 | src0 = stp->dev_physaddr; | ||
467 | 493 | ||
468 | /* Set up dest1. For now, assume no stride and increment. | 494 | /* Set up dest1. For now, assume no stride and increment. |
469 | * A channel attribute update can change this later. | 495 | * A channel attribute update can change this later. |
@@ -487,10 +513,18 @@ au1xxx_dbdma_ring_alloc(u32 chanid, int entries) | |||
487 | /* If destination output is fifo, set static address. | 513 | /* If destination output is fifo, set static address. |
488 | */ | 514 | */ |
489 | if (dtp->dev_flags & DEV_FLAGS_OUT) { | 515 | if (dtp->dev_flags & DEV_FLAGS_OUT) { |
490 | dest0 = dtp->dev_physaddr; | 516 | if ( dtp->dev_flags & DEV_FLAGS_BURSTABLE ) |
517 | dest1 |= DSCR_DEST1_DAM(DSCR_xAM_BURST); | ||
518 | else | ||
491 | dest1 |= DSCR_DEST1_DAM(DSCR_xAM_STATIC); | 519 | dest1 |= DSCR_DEST1_DAM(DSCR_xAM_STATIC); |
492 | } | 520 | } |
521 | if (dtp->dev_physaddr) | ||
522 | dest0 = dtp->dev_physaddr; | ||
493 | 523 | ||
524 | #if 0 | ||
525 | printk("did:%x sid:%x cmd0:%x cmd1:%x source0:%x source1:%x dest0:%x dest1:%x\n", | ||
526 | dtp->dev_id, stp->dev_id, cmd0, cmd1, src0, src1, dest0, dest1 ); | ||
527 | #endif | ||
494 | for (i=0; i<entries; i++) { | 528 | for (i=0; i<entries; i++) { |
495 | dp->dscr_cmd0 = cmd0; | 529 | dp->dscr_cmd0 = cmd0; |
496 | dp->dscr_cmd1 = cmd1; | 530 | dp->dscr_cmd1 = cmd1; |
@@ -499,6 +533,7 @@ au1xxx_dbdma_ring_alloc(u32 chanid, int entries) | |||
499 | dp->dscr_dest0 = dest0; | 533 | dp->dscr_dest0 = dest0; |
500 | dp->dscr_dest1 = dest1; | 534 | dp->dscr_dest1 = dest1; |
501 | dp->dscr_stat = 0; | 535 | dp->dscr_stat = 0; |
536 | dp->sw_context = dp->sw_status = 0; | ||
502 | dp->dscr_nxtptr = DSCR_NXTPTR(virt_to_phys(dp + 1)); | 537 | dp->dscr_nxtptr = DSCR_NXTPTR(virt_to_phys(dp + 1)); |
503 | dp++; | 538 | dp++; |
504 | } | 539 | } |
@@ -511,13 +546,14 @@ au1xxx_dbdma_ring_alloc(u32 chanid, int entries) | |||
511 | 546 | ||
512 | return (u32)(ctp->chan_desc_base); | 547 | return (u32)(ctp->chan_desc_base); |
513 | } | 548 | } |
549 | EXPORT_SYMBOL(au1xxx_dbdma_ring_alloc); | ||
514 | 550 | ||
515 | /* Put a source buffer into the DMA ring. | 551 | /* Put a source buffer into the DMA ring. |
516 | * This updates the source pointer and byte count. Normally used | 552 | * This updates the source pointer and byte count. Normally used |
517 | * for memory to fifo transfers. | 553 | * for memory to fifo transfers. |
518 | */ | 554 | */ |
519 | u32 | 555 | u32 |
520 | au1xxx_dbdma_put_source(u32 chanid, void *buf, int nbytes) | 556 | _au1xxx_dbdma_put_source(u32 chanid, void *buf, int nbytes, u32 flags) |
521 | { | 557 | { |
522 | chan_tab_t *ctp; | 558 | chan_tab_t *ctp; |
523 | au1x_ddma_desc_t *dp; | 559 | au1x_ddma_desc_t *dp; |
@@ -544,24 +580,40 @@ au1xxx_dbdma_put_source(u32 chanid, void *buf, int nbytes) | |||
544 | */ | 580 | */ |
545 | dp->dscr_source0 = virt_to_phys(buf); | 581 | dp->dscr_source0 = virt_to_phys(buf); |
546 | dp->dscr_cmd1 = nbytes; | 582 | dp->dscr_cmd1 = nbytes; |
547 | dp->dscr_cmd0 |= DSCR_CMD0_V; /* Let it rip */ | 583 | /* Check flags */ |
548 | ctp->chan_ptr->ddma_dbell = 0xffffffff; /* Make it go */ | 584 | if (flags & DDMA_FLAGS_IE) |
549 | 585 | dp->dscr_cmd0 |= DSCR_CMD0_IE; | |
586 | if (flags & DDMA_FLAGS_NOIE) | ||
587 | dp->dscr_cmd0 &= ~DSCR_CMD0_IE; | ||
550 | /* Get next descriptor pointer. | 588 | /* Get next descriptor pointer. |
551 | */ | 589 | */ |
552 | ctp->put_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr)); | 590 | ctp->put_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr)); |
553 | 591 | ||
592 | /* | ||
593 | * There is an errata on the Au1200/Au1550 parts that could result | ||
594 | * in "stale" data being DMA'd. It has to do with the snoop logic on | ||
595 | * the dache eviction buffer. NONCOHERENT_IO is on by default for | ||
596 | * these parts. If it is fixedin the future, these dma_cache_inv will | ||
597 | * just be nothing more than empty macros. See io.h. | ||
598 | * */ | ||
599 | dma_cache_wback_inv(buf,nbytes); | ||
600 | dp->dscr_cmd0 |= DSCR_CMD0_V; /* Let it rip */ | ||
601 | au_sync(); | ||
602 | dma_cache_wback_inv(dp, sizeof(dp)); | ||
603 | ctp->chan_ptr->ddma_dbell = 0; | ||
604 | |||
554 | /* return something not zero. | 605 | /* return something not zero. |
555 | */ | 606 | */ |
556 | return nbytes; | 607 | return nbytes; |
557 | } | 608 | } |
609 | EXPORT_SYMBOL(_au1xxx_dbdma_put_source); | ||
558 | 610 | ||
559 | /* Put a destination buffer into the DMA ring. | 611 | /* Put a destination buffer into the DMA ring. |
560 | * This updates the destination pointer and byte count. Normally used | 612 | * This updates the destination pointer and byte count. Normally used |
561 | * to place an empty buffer into the ring for fifo to memory transfers. | 613 | * to place an empty buffer into the ring for fifo to memory transfers. |
562 | */ | 614 | */ |
563 | u32 | 615 | u32 |
564 | au1xxx_dbdma_put_dest(u32 chanid, void *buf, int nbytes) | 616 | _au1xxx_dbdma_put_dest(u32 chanid, void *buf, int nbytes, u32 flags) |
565 | { | 617 | { |
566 | chan_tab_t *ctp; | 618 | chan_tab_t *ctp; |
567 | au1x_ddma_desc_t *dp; | 619 | au1x_ddma_desc_t *dp; |
@@ -583,11 +635,33 @@ au1xxx_dbdma_put_dest(u32 chanid, void *buf, int nbytes) | |||
583 | if (dp->dscr_cmd0 & DSCR_CMD0_V) | 635 | if (dp->dscr_cmd0 & DSCR_CMD0_V) |
584 | return 0; | 636 | return 0; |
585 | 637 | ||
586 | /* Load up buffer address and byte count. | 638 | /* Load up buffer address and byte count */ |
587 | */ | 639 | |
640 | /* Check flags */ | ||
641 | if (flags & DDMA_FLAGS_IE) | ||
642 | dp->dscr_cmd0 |= DSCR_CMD0_IE; | ||
643 | if (flags & DDMA_FLAGS_NOIE) | ||
644 | dp->dscr_cmd0 &= ~DSCR_CMD0_IE; | ||
645 | |||
588 | dp->dscr_dest0 = virt_to_phys(buf); | 646 | dp->dscr_dest0 = virt_to_phys(buf); |
589 | dp->dscr_cmd1 = nbytes; | 647 | dp->dscr_cmd1 = nbytes; |
648 | #if 0 | ||
649 | printk("cmd0:%x cmd1:%x source0:%x source1:%x dest0:%x dest1:%x\n", | ||
650 | dp->dscr_cmd0, dp->dscr_cmd1, dp->dscr_source0, | ||
651 | dp->dscr_source1, dp->dscr_dest0, dp->dscr_dest1 ); | ||
652 | #endif | ||
653 | /* | ||
654 | * There is an errata on the Au1200/Au1550 parts that could result in | ||
655 | * "stale" data being DMA'd. It has to do with the snoop logic on the | ||
656 | * dache eviction buffer. NONCOHERENT_IO is on by default for these | ||
657 | * parts. If it is fixedin the future, these dma_cache_inv will just | ||
658 | * be nothing more than empty macros. See io.h. | ||
659 | * */ | ||
660 | dma_cache_inv(buf,nbytes); | ||
590 | dp->dscr_cmd0 |= DSCR_CMD0_V; /* Let it rip */ | 661 | dp->dscr_cmd0 |= DSCR_CMD0_V; /* Let it rip */ |
662 | au_sync(); | ||
663 | dma_cache_wback_inv(dp, sizeof(dp)); | ||
664 | ctp->chan_ptr->ddma_dbell = 0; | ||
591 | 665 | ||
592 | /* Get next descriptor pointer. | 666 | /* Get next descriptor pointer. |
593 | */ | 667 | */ |
@@ -597,6 +671,7 @@ au1xxx_dbdma_put_dest(u32 chanid, void *buf, int nbytes) | |||
597 | */ | 671 | */ |
598 | return nbytes; | 672 | return nbytes; |
599 | } | 673 | } |
674 | EXPORT_SYMBOL(_au1xxx_dbdma_put_dest); | ||
600 | 675 | ||
601 | /* Get a destination buffer into the DMA ring. | 676 | /* Get a destination buffer into the DMA ring. |
602 | * Normally used to get a full buffer from the ring during fifo | 677 | * Normally used to get a full buffer from the ring during fifo |
@@ -646,7 +721,7 @@ void | |||
646 | au1xxx_dbdma_stop(u32 chanid) | 721 | au1xxx_dbdma_stop(u32 chanid) |
647 | { | 722 | { |
648 | chan_tab_t *ctp; | 723 | chan_tab_t *ctp; |
649 | volatile au1x_dma_chan_t *cp; | 724 | au1x_dma_chan_t *cp; |
650 | int halt_timeout = 0; | 725 | int halt_timeout = 0; |
651 | 726 | ||
652 | ctp = *((chan_tab_t **)chanid); | 727 | ctp = *((chan_tab_t **)chanid); |
@@ -666,6 +741,7 @@ au1xxx_dbdma_stop(u32 chanid) | |||
666 | cp->ddma_stat |= (DDMA_STAT_DB | DDMA_STAT_V); | 741 | cp->ddma_stat |= (DDMA_STAT_DB | DDMA_STAT_V); |
667 | au_sync(); | 742 | au_sync(); |
668 | } | 743 | } |
744 | EXPORT_SYMBOL(au1xxx_dbdma_stop); | ||
669 | 745 | ||
670 | /* Start using the current descriptor pointer. If the dbdma encounters | 746 | /* Start using the current descriptor pointer. If the dbdma encounters |
671 | * a not valid descriptor, it will stop. In this case, we can just | 747 | * a not valid descriptor, it will stop. In this case, we can just |
@@ -675,17 +751,17 @@ void | |||
675 | au1xxx_dbdma_start(u32 chanid) | 751 | au1xxx_dbdma_start(u32 chanid) |
676 | { | 752 | { |
677 | chan_tab_t *ctp; | 753 | chan_tab_t *ctp; |
678 | volatile au1x_dma_chan_t *cp; | 754 | au1x_dma_chan_t *cp; |
679 | 755 | ||
680 | ctp = *((chan_tab_t **)chanid); | 756 | ctp = *((chan_tab_t **)chanid); |
681 | |||
682 | cp = ctp->chan_ptr; | 757 | cp = ctp->chan_ptr; |
683 | cp->ddma_desptr = virt_to_phys(ctp->cur_ptr); | 758 | cp->ddma_desptr = virt_to_phys(ctp->cur_ptr); |
684 | cp->ddma_cfg |= DDMA_CFG_EN; /* Enable channel */ | 759 | cp->ddma_cfg |= DDMA_CFG_EN; /* Enable channel */ |
685 | au_sync(); | 760 | au_sync(); |
686 | cp->ddma_dbell = 0xffffffff; /* Make it go */ | 761 | cp->ddma_dbell = 0; |
687 | au_sync(); | 762 | au_sync(); |
688 | } | 763 | } |
764 | EXPORT_SYMBOL(au1xxx_dbdma_start); | ||
689 | 765 | ||
690 | void | 766 | void |
691 | au1xxx_dbdma_reset(u32 chanid) | 767 | au1xxx_dbdma_reset(u32 chanid) |
@@ -704,15 +780,21 @@ au1xxx_dbdma_reset(u32 chanid) | |||
704 | 780 | ||
705 | do { | 781 | do { |
706 | dp->dscr_cmd0 &= ~DSCR_CMD0_V; | 782 | dp->dscr_cmd0 &= ~DSCR_CMD0_V; |
783 | /* reset our SW status -- this is used to determine | ||
784 | * if a descriptor is in use by upper level SW. Since | ||
785 | * posting can reset 'V' bit. | ||
786 | */ | ||
787 | dp->sw_status = 0; | ||
707 | dp = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr)); | 788 | dp = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr)); |
708 | } while (dp != ctp->chan_desc_base); | 789 | } while (dp != ctp->chan_desc_base); |
709 | } | 790 | } |
791 | EXPORT_SYMBOL(au1xxx_dbdma_reset); | ||
710 | 792 | ||
711 | u32 | 793 | u32 |
712 | au1xxx_get_dma_residue(u32 chanid) | 794 | au1xxx_get_dma_residue(u32 chanid) |
713 | { | 795 | { |
714 | chan_tab_t *ctp; | 796 | chan_tab_t *ctp; |
715 | volatile au1x_dma_chan_t *cp; | 797 | au1x_dma_chan_t *cp; |
716 | u32 rv; | 798 | u32 rv; |
717 | 799 | ||
718 | ctp = *((chan_tab_t **)chanid); | 800 | ctp = *((chan_tab_t **)chanid); |
@@ -747,15 +829,16 @@ au1xxx_dbdma_chan_free(u32 chanid) | |||
747 | 829 | ||
748 | kfree(ctp); | 830 | kfree(ctp); |
749 | } | 831 | } |
832 | EXPORT_SYMBOL(au1xxx_dbdma_chan_free); | ||
750 | 833 | ||
751 | static irqreturn_t | 834 | static void |
752 | dbdma_interrupt(int irq, void *dev_id, struct pt_regs *regs) | 835 | dbdma_interrupt(int irq, void *dev_id, struct pt_regs *regs) |
753 | { | 836 | { |
754 | u32 intstat; | 837 | u32 intstat, flags; |
755 | u32 chan_index; | 838 | u32 chan_index; |
756 | chan_tab_t *ctp; | 839 | chan_tab_t *ctp; |
757 | au1x_ddma_desc_t *dp; | 840 | au1x_ddma_desc_t *dp; |
758 | volatile au1x_dma_chan_t *cp; | 841 | au1x_dma_chan_t *cp; |
759 | 842 | ||
760 | intstat = dbdma_gptr->ddma_intstat; | 843 | intstat = dbdma_gptr->ddma_intstat; |
761 | au_sync(); | 844 | au_sync(); |
@@ -774,19 +857,26 @@ dbdma_interrupt(int irq, void *dev_id, struct pt_regs *regs) | |||
774 | (ctp->chan_callback)(irq, ctp->chan_callparam, regs); | 857 | (ctp->chan_callback)(irq, ctp->chan_callparam, regs); |
775 | 858 | ||
776 | ctp->cur_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr)); | 859 | ctp->cur_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr)); |
777 | |||
778 | return IRQ_HANDLED; | ||
779 | } | 860 | } |
780 | 861 | ||
781 | static void | 862 | static void au1xxx_dbdma_init(void) |
782 | au1xxx_dbdma_init(void) | ||
783 | { | 863 | { |
864 | int irq_nr; | ||
865 | |||
784 | dbdma_gptr->ddma_config = 0; | 866 | dbdma_gptr->ddma_config = 0; |
785 | dbdma_gptr->ddma_throttle = 0; | 867 | dbdma_gptr->ddma_throttle = 0; |
786 | dbdma_gptr->ddma_inten = 0xffff; | 868 | dbdma_gptr->ddma_inten = 0xffff; |
787 | au_sync(); | 869 | au_sync(); |
788 | 870 | ||
789 | if (request_irq(AU1550_DDMA_INT, dbdma_interrupt, SA_INTERRUPT, | 871 | #if defined(CONFIG_SOC_AU1550) |
872 | irq_nr = AU1550_DDMA_INT; | ||
873 | #elif defined(CONFIG_SOC_AU1200) | ||
874 | irq_nr = AU1200_DDMA_INT; | ||
875 | #else | ||
876 | #error Unknown Au1x00 SOC | ||
877 | #endif | ||
878 | |||
879 | if (request_irq(irq_nr, dbdma_interrupt, SA_INTERRUPT, | ||
790 | "Au1xxx dbdma", (void *)dbdma_gptr)) | 880 | "Au1xxx dbdma", (void *)dbdma_gptr)) |
791 | printk("Can't get 1550 dbdma irq"); | 881 | printk("Can't get 1550 dbdma irq"); |
792 | } | 882 | } |
@@ -797,7 +887,8 @@ au1xxx_dbdma_dump(u32 chanid) | |||
797 | chan_tab_t *ctp; | 887 | chan_tab_t *ctp; |
798 | au1x_ddma_desc_t *dp; | 888 | au1x_ddma_desc_t *dp; |
799 | dbdev_tab_t *stp, *dtp; | 889 | dbdev_tab_t *stp, *dtp; |
800 | volatile au1x_dma_chan_t *cp; | 890 | au1x_dma_chan_t *cp; |
891 | u32 i = 0; | ||
801 | 892 | ||
802 | ctp = *((chan_tab_t **)chanid); | 893 | ctp = *((chan_tab_t **)chanid); |
803 | stp = ctp->chan_src; | 894 | stp = ctp->chan_src; |
@@ -822,15 +913,64 @@ au1xxx_dbdma_dump(u32 chanid) | |||
822 | dp = ctp->chan_desc_base; | 913 | dp = ctp->chan_desc_base; |
823 | 914 | ||
824 | do { | 915 | do { |
825 | printk("dp %08x, cmd0 %08x, cmd1 %08x\n", | 916 | printk("Dp[%d]= %08x, cmd0 %08x, cmd1 %08x\n", |
826 | (u32)dp, dp->dscr_cmd0, dp->dscr_cmd1); | 917 | i++, (u32)dp, dp->dscr_cmd0, dp->dscr_cmd1); |
827 | printk("src0 %08x, src1 %08x, dest0 %08x\n", | 918 | printk("src0 %08x, src1 %08x, dest0 %08x, dest1 %08x\n", |
828 | dp->dscr_source0, dp->dscr_source1, dp->dscr_dest0); | 919 | dp->dscr_source0, dp->dscr_source1, dp->dscr_dest0, dp->dscr_dest1); |
829 | printk("dest1 %08x, stat %08x, nxtptr %08x\n", | 920 | printk("stat %08x, nxtptr %08x\n", |
830 | dp->dscr_dest1, dp->dscr_stat, dp->dscr_nxtptr); | 921 | dp->dscr_stat, dp->dscr_nxtptr); |
831 | dp = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr)); | 922 | dp = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr)); |
832 | } while (dp != ctp->chan_desc_base); | 923 | } while (dp != ctp->chan_desc_base); |
833 | } | 924 | } |
834 | 925 | ||
926 | /* Put a descriptor into the DMA ring. | ||
927 | * This updates the source/destination pointers and byte count. | ||
928 | */ | ||
929 | u32 | ||
930 | au1xxx_dbdma_put_dscr(u32 chanid, au1x_ddma_desc_t *dscr ) | ||
931 | { | ||
932 | chan_tab_t *ctp; | ||
933 | au1x_ddma_desc_t *dp; | ||
934 | u32 nbytes=0; | ||
935 | |||
936 | /* I guess we could check this to be within the | ||
937 | * range of the table...... | ||
938 | */ | ||
939 | ctp = *((chan_tab_t **)chanid); | ||
940 | |||
941 | /* We should have multiple callers for a particular channel, | ||
942 | * an interrupt doesn't affect this pointer nor the descriptor, | ||
943 | * so no locking should be needed. | ||
944 | */ | ||
945 | dp = ctp->put_ptr; | ||
946 | |||
947 | /* If the descriptor is valid, we are way ahead of the DMA | ||
948 | * engine, so just return an error condition. | ||
949 | */ | ||
950 | if (dp->dscr_cmd0 & DSCR_CMD0_V) | ||
951 | return 0; | ||
952 | |||
953 | /* Load up buffer addresses and byte count. | ||
954 | */ | ||
955 | dp->dscr_dest0 = dscr->dscr_dest0; | ||
956 | dp->dscr_source0 = dscr->dscr_source0; | ||
957 | dp->dscr_dest1 = dscr->dscr_dest1; | ||
958 | dp->dscr_source1 = dscr->dscr_source1; | ||
959 | dp->dscr_cmd1 = dscr->dscr_cmd1; | ||
960 | nbytes = dscr->dscr_cmd1; | ||
961 | /* Allow the caller to specifiy if an interrupt is generated */ | ||
962 | dp->dscr_cmd0 &= ~DSCR_CMD0_IE; | ||
963 | dp->dscr_cmd0 |= dscr->dscr_cmd0 | DSCR_CMD0_V; | ||
964 | ctp->chan_ptr->ddma_dbell = 0; | ||
965 | |||
966 | /* Get next descriptor pointer. | ||
967 | */ | ||
968 | ctp->put_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr)); | ||
969 | |||
970 | /* return something not zero. | ||
971 | */ | ||
972 | return nbytes; | ||
973 | } | ||
974 | |||
835 | #endif /* defined(CONFIG_SOC_AU1550) || defined(CONFIG_SOC_AU1200) */ | 975 | #endif /* defined(CONFIG_SOC_AU1550) || defined(CONFIG_SOC_AU1200) */ |
836 | 976 | ||
diff --git a/arch/mips/au1000/common/irq.c b/arch/mips/au1000/common/irq.c index 0b912f739feb..ebf93bdbad14 100644 --- a/arch/mips/au1000/common/irq.c +++ b/arch/mips/au1000/common/irq.c | |||
@@ -488,7 +488,7 @@ void intc0_req0_irqdispatch(struct pt_regs *regs) | |||
488 | intc0_req0 |= au_readl(IC0_REQ0INT); | 488 | intc0_req0 |= au_readl(IC0_REQ0INT); |
489 | 489 | ||
490 | if (!intc0_req0) return; | 490 | if (!intc0_req0) return; |
491 | 491 | #ifdef AU1000_USB_DEV_REQ_INT | |
492 | /* | 492 | /* |
493 | * Because of the tight timing of SETUP token to reply | 493 | * Because of the tight timing of SETUP token to reply |
494 | * transactions, the USB devices-side packet complete | 494 | * transactions, the USB devices-side packet complete |
@@ -499,7 +499,7 @@ void intc0_req0_irqdispatch(struct pt_regs *regs) | |||
499 | do_IRQ(AU1000_USB_DEV_REQ_INT, regs); | 499 | do_IRQ(AU1000_USB_DEV_REQ_INT, regs); |
500 | return; | 500 | return; |
501 | } | 501 | } |
502 | 502 | #endif | |
503 | irq = au_ffs(intc0_req0) - 1; | 503 | irq = au_ffs(intc0_req0) - 1; |
504 | intc0_req0 &= ~(1<<irq); | 504 | intc0_req0 &= ~(1<<irq); |
505 | do_IRQ(irq, regs); | 505 | do_IRQ(irq, regs); |
diff --git a/arch/mips/au1000/common/usbdev.c b/arch/mips/au1000/common/usbdev.c index 447a9a4612a8..0b21bed7ee55 100644 --- a/arch/mips/au1000/common/usbdev.c +++ b/arch/mips/au1000/common/usbdev.c | |||
@@ -1005,11 +1005,11 @@ process_ep0_receive (struct usb_dev* dev) | |||
1005 | #endif | 1005 | #endif |
1006 | dev->ep0_stage = SETUP_STAGE; | 1006 | dev->ep0_stage = SETUP_STAGE; |
1007 | break; | 1007 | break; |
1008 | } | 1008 | } |
1009 | 1009 | ||
1010 | spin_unlock(&ep0->lock); | 1010 | spin_unlock(&ep0->lock); |
1011 | // we're done processing the packet, free it | 1011 | // we're done processing the packet, free it |
1012 | kfree(pkt); | 1012 | kfree(pkt); |
1013 | } | 1013 | } |
1014 | 1014 | ||
1015 | 1015 | ||
@@ -1072,8 +1072,7 @@ dma_done_ep0_intr(int irq, void *dev_id, struct pt_regs *regs) | |||
1072 | clear_dma_done1(ep0->indma); | 1072 | clear_dma_done1(ep0->indma); |
1073 | 1073 | ||
1074 | pkt = send_packet_complete(ep0); | 1074 | pkt = send_packet_complete(ep0); |
1075 | if (pkt) | 1075 | kfree(pkt); |
1076 | kfree(pkt); | ||
1077 | } | 1076 | } |
1078 | 1077 | ||
1079 | /* | 1078 | /* |
@@ -1302,8 +1301,7 @@ usbdev_exit(void) | |||
1302 | endpoint_flush(ep); | 1301 | endpoint_flush(ep); |
1303 | } | 1302 | } |
1304 | 1303 | ||
1305 | if (usbdev.full_conf_desc) | 1304 | kfree(usbdev.full_conf_desc); |
1306 | kfree(usbdev.full_conf_desc); | ||
1307 | } | 1305 | } |
1308 | 1306 | ||
1309 | int | 1307 | int |