aboutsummaryrefslogtreecommitdiffstats
path: root/arch/blackfin
diff options
context:
space:
mode:
Diffstat (limited to 'arch/blackfin')
-rw-r--r--arch/blackfin/include/asm/dma.h2
-rw-r--r--arch/blackfin/kernel/bfin_dma_5xx.c81
-rw-r--r--arch/blackfin/kernel/setup.c51
3 files changed, 111 insertions, 23 deletions
diff --git a/arch/blackfin/include/asm/dma.h b/arch/blackfin/include/asm/dma.h
index e4f7b8043f02..46c56185417a 100644
--- a/arch/blackfin/include/asm/dma.h
+++ b/arch/blackfin/include/asm/dma.h
@@ -253,5 +253,7 @@ static inline void clear_dma_irqstat(unsigned int channel)
253void *dma_memcpy(void *dest, const void *src, size_t count); 253void *dma_memcpy(void *dest, const void *src, size_t count);
254void *safe_dma_memcpy(void *dest, const void *src, size_t count); 254void *safe_dma_memcpy(void *dest, const void *src, size_t count);
255void blackfin_dma_early_init(void); 255void blackfin_dma_early_init(void);
256void early_dma_memcpy(void *dest, const void *src, size_t count);
257void early_dma_memcpy_done(void);
256 258
257#endif 259#endif
diff --git a/arch/blackfin/kernel/bfin_dma_5xx.c b/arch/blackfin/kernel/bfin_dma_5xx.c
index 8531693fb48d..704419e4da20 100644
--- a/arch/blackfin/kernel/bfin_dma_5xx.c
+++ b/arch/blackfin/kernel/bfin_dma_5xx.c
@@ -232,6 +232,87 @@ void blackfin_dma_resume(void)
232void __init blackfin_dma_early_init(void) 232void __init blackfin_dma_early_init(void)
233{ 233{
234 bfin_write_MDMA_S0_CONFIG(0); 234 bfin_write_MDMA_S0_CONFIG(0);
235 bfin_write_MDMA_S1_CONFIG(0);
236}
237
238void __init early_dma_memcpy(void *pdst, const void *psrc, size_t size)
239{
240 unsigned long dst = (unsigned long)pdst;
241 unsigned long src = (unsigned long)psrc;
242 struct dma_register *dst_ch, *src_ch;
243
244 /* We assume that everything is 4 byte aligned, so include
245 * a basic sanity check
246 */
247 BUG_ON(dst % 4);
248 BUG_ON(src % 4);
249 BUG_ON(size % 4);
250
251 /* Force a sync in case a previous config reset on this channel
252 * occurred. This is needed so subsequent writes to DMA registers
253 * are not spuriously lost/corrupted.
254 */
255 __builtin_bfin_ssync();
256
257 src_ch = 0;
258 /* Find an avalible memDMA channel */
259 while (1) {
260 if (!src_ch || src_ch == (struct dma_register *)MDMA_S1_NEXT_DESC_PTR) {
261 dst_ch = (struct dma_register *)MDMA_D0_NEXT_DESC_PTR;
262 src_ch = (struct dma_register *)MDMA_S0_NEXT_DESC_PTR;
263 } else {
264 dst_ch = (struct dma_register *)MDMA_D1_NEXT_DESC_PTR;
265 src_ch = (struct dma_register *)MDMA_S1_NEXT_DESC_PTR;
266 }
267
268 if (!bfin_read16(&src_ch->cfg)) {
269 break;
270 } else {
271 if (bfin_read16(&src_ch->irq_status) & DMA_DONE)
272 bfin_write16(&src_ch->cfg, 0);
273 }
274
275 }
276
277 /* Destination */
278 bfin_write32(&dst_ch->start_addr, dst);
279 bfin_write16(&dst_ch->x_count, size >> 2);
280 bfin_write16(&dst_ch->x_modify, 1 << 2);
281 bfin_write16(&dst_ch->irq_status, DMA_DONE | DMA_ERR);
282
283 /* Source */
284 bfin_write32(&src_ch->start_addr, src);
285 bfin_write16(&src_ch->x_count, size >> 2);
286 bfin_write16(&src_ch->x_modify, 1 << 2);
287 bfin_write16(&src_ch->irq_status, DMA_DONE | DMA_ERR);
288
289 /* Enable */
290 bfin_write16(&src_ch->cfg, DMAEN | WDSIZE_32);
291 bfin_write16(&dst_ch->cfg, WNR | DI_EN | DMAEN | WDSIZE_32);
292
293 /* Since we are atomic now, don't use the workaround ssync */
294 __builtin_bfin_ssync();
295}
296
297void __init early_dma_memcpy_done(void)
298{
299 while ((bfin_read_MDMA_S0_CONFIG() && !(bfin_read_MDMA_D0_IRQ_STATUS() & DMA_DONE)) ||
300 (bfin_read_MDMA_S1_CONFIG() && !(bfin_read_MDMA_D1_IRQ_STATUS() & DMA_DONE)))
301 continue;
302
303 bfin_write_MDMA_D0_IRQ_STATUS(DMA_DONE | DMA_ERR);
304 bfin_write_MDMA_D1_IRQ_STATUS(DMA_DONE | DMA_ERR);
305 /*
306 * Now that DMA is done, we would normally flush cache, but
307 * i/d cache isn't running this early, so we don't bother,
308 * and just clear out the DMA channel for next time
309 */
310 bfin_write_MDMA_S0_CONFIG(0);
311 bfin_write_MDMA_S1_CONFIG(0);
312 bfin_write_MDMA_D0_CONFIG(0);
313 bfin_write_MDMA_D1_CONFIG(0);
314
315 __builtin_bfin_ssync();
235} 316}
236 317
237/** 318/**
diff --git a/arch/blackfin/kernel/setup.c b/arch/blackfin/kernel/setup.c
index a58687bdee6a..0838eafed172 100644
--- a/arch/blackfin/kernel/setup.c
+++ b/arch/blackfin/kernel/setup.c
@@ -150,40 +150,45 @@ void __init bfin_relocate_l1_mem(void)
150 unsigned long l1_data_b_length; 150 unsigned long l1_data_b_length;
151 unsigned long l2_length; 151 unsigned long l2_length;
152 152
153 /*
154 * due to the ALIGN(4) in the arch/blackfin/kernel/vmlinux.lds.S
155 * we know that everything about l1 text/data is nice and aligned,
156 * so copy by 4 byte chunks, and don't worry about overlapping
157 * src/dest.
158 *
159 * We can't use the dma_memcpy functions, since they can call
160 * scheduler functions which might be in L1 :( and core writes
161 * into L1 instruction cause bad access errors, so we are stuck,
162 * we are required to use DMA, but can't use the common dma
163 * functions. We can't use memcpy either - since that might be
164 * going to be in the relocated L1
165 */
166
153 blackfin_dma_early_init(); 167 blackfin_dma_early_init();
154 168
169 /* if necessary, copy _stext_l1 to _etext_l1 to L1 instruction SRAM */
155 l1_code_length = _etext_l1 - _stext_l1; 170 l1_code_length = _etext_l1 - _stext_l1;
156 if (l1_code_length > L1_CODE_LENGTH) 171 if (l1_code_length)
157 panic("L1 Instruction SRAM Overflow\n"); 172 early_dma_memcpy(_stext_l1, _l1_lma_start, l1_code_length);
158 /* cannot complain as printk is not available as yet.
159 * But we can continue booting and complain later!
160 */
161
162 /* Copy _stext_l1 to _etext_l1 to L1 instruction SRAM */
163 dma_memcpy(_stext_l1, _l1_lma_start, l1_code_length);
164 173
174 /* if necessary, copy _sdata_l1 to _sbss_l1 to L1 data bank A SRAM */
165 l1_data_a_length = _sbss_l1 - _sdata_l1; 175 l1_data_a_length = _sbss_l1 - _sdata_l1;
166 if (l1_data_a_length > L1_DATA_A_LENGTH) 176 if (l1_data_a_length)
167 panic("L1 Data SRAM Bank A Overflow\n"); 177 early_dma_memcpy(_sdata_l1, _l1_lma_start + l1_code_length, l1_data_a_length);
168
169 /* Copy _sdata_l1 to _sbss_l1 to L1 data bank A SRAM */
170 dma_memcpy(_sdata_l1, _l1_lma_start + l1_code_length, l1_data_a_length);
171 178
179 /* if necessary, copy _sdata_b_l1 to _sbss_b_l1 to L1 data bank B SRAM */
172 l1_data_b_length = _sbss_b_l1 - _sdata_b_l1; 180 l1_data_b_length = _sbss_b_l1 - _sdata_b_l1;
173 if (l1_data_b_length > L1_DATA_B_LENGTH) 181 if (l1_data_b_length)
174 panic("L1 Data SRAM Bank B Overflow\n"); 182 early_dma_memcpy(_sdata_b_l1, _l1_lma_start + l1_code_length +
175
176 /* Copy _sdata_b_l1 to _sbss_b_l1 to L1 data bank B SRAM */
177 dma_memcpy(_sdata_b_l1, _l1_lma_start + l1_code_length +
178 l1_data_a_length, l1_data_b_length); 183 l1_data_a_length, l1_data_b_length);
179 184
185 early_dma_memcpy_done();
186
187 /* if necessary, copy _stext_l2 to _edata_l2 to L2 SRAM */
180 if (L2_LENGTH != 0) { 188 if (L2_LENGTH != 0) {
181 l2_length = _sbss_l2 - _stext_l2; 189 l2_length = _sbss_l2 - _stext_l2;
182 if (l2_length > L2_LENGTH) 190 if (l2_length)
183 panic("L2 SRAM Overflow\n"); 191 memcpy(_stext_l2, _l2_lma_start, l2_length);
184
185 /* Copy _stext_l2 to _edata_l2 to L2 SRAM */
186 dma_memcpy(_stext_l2, _l2_lma_start, l2_length);
187 } 192 }
188} 193}
189 194