diff options
Diffstat (limited to 'arch/ppc/8xx_io/commproc.c')
-rw-r--r-- | arch/ppc/8xx_io/commproc.c | 464 |
1 files changed, 464 insertions, 0 deletions
diff --git a/arch/ppc/8xx_io/commproc.c b/arch/ppc/8xx_io/commproc.c new file mode 100644 index 000000000000..0cc2e7a9cb11 --- /dev/null +++ b/arch/ppc/8xx_io/commproc.c | |||
@@ -0,0 +1,464 @@ | |||
1 | /* | ||
2 | * General Purpose functions for the global management of the | ||
3 | * Communication Processor Module. | ||
4 | * Copyright (c) 1997 Dan Malek (dmalek@jlc.net) | ||
5 | * | ||
6 | * In addition to the individual control of the communication | ||
7 | * channels, there are a few functions that globally affect the | ||
8 | * communication processor. | ||
9 | * | ||
10 | * Buffer descriptors must be allocated from the dual ported memory | ||
11 | * space. The allocator for that is here. When the communication | ||
12 | * process is reset, we reclaim the memory available. There is | ||
13 | * currently no deallocator for this memory. | ||
14 | * The amount of space available is platform dependent. On the | ||
15 | * MBX, the EPPC software loads additional microcode into the | ||
16 | * communication processor, and uses some of the DP ram for this | ||
17 | * purpose. Current, the first 512 bytes and the last 256 bytes of | ||
18 | * memory are used. Right now I am conservative and only use the | ||
19 | * memory that can never be used for microcode. If there are | ||
20 | * applications that require more DP ram, we can expand the boundaries | ||
21 | * but then we have to be careful of any downloaded microcode. | ||
22 | */ | ||
23 | #include <linux/errno.h> | ||
24 | #include <linux/sched.h> | ||
25 | #include <linux/kernel.h> | ||
26 | #include <linux/dma-mapping.h> | ||
27 | #include <linux/param.h> | ||
28 | #include <linux/string.h> | ||
29 | #include <linux/mm.h> | ||
30 | #include <linux/interrupt.h> | ||
31 | #include <linux/irq.h> | ||
32 | #include <linux/module.h> | ||
33 | #include <asm/mpc8xx.h> | ||
34 | #include <asm/page.h> | ||
35 | #include <asm/pgtable.h> | ||
36 | #include <asm/8xx_immap.h> | ||
37 | #include <asm/commproc.h> | ||
38 | #include <asm/io.h> | ||
39 | #include <asm/tlbflush.h> | ||
40 | #include <asm/rheap.h> | ||
41 | |||
42 | extern int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep); | ||
43 | |||
44 | static void m8xx_cpm_dpinit(void); | ||
45 | static uint host_buffer; /* One page of host buffer */ | ||
46 | static uint host_end; /* end + 1 */ | ||
47 | cpm8xx_t *cpmp; /* Pointer to comm processor space */ | ||
48 | |||
49 | /* CPM interrupt vector functions. | ||
50 | */ | ||
51 | struct cpm_action { | ||
52 | void (*handler)(void *, struct pt_regs * regs); | ||
53 | void *dev_id; | ||
54 | }; | ||
55 | static struct cpm_action cpm_vecs[CPMVEC_NR]; | ||
56 | static irqreturn_t cpm_interrupt(int irq, void * dev, struct pt_regs * regs); | ||
57 | static irqreturn_t cpm_error_interrupt(int irq, void *dev, struct pt_regs * regs); | ||
58 | static void alloc_host_memory(void); | ||
59 | /* Define a table of names to identify CPM interrupt handlers in | ||
60 | * /proc/interrupts. | ||
61 | */ | ||
62 | const char *cpm_int_name[] = | ||
63 | { "error", "PC4", "PC5", "SMC2", | ||
64 | "SMC1", "SPI", "PC6", "Timer 4", | ||
65 | "", "PC7", "PC8", "PC9", | ||
66 | "Timer 3", "", "PC10", "PC11", | ||
67 | "I2C", "RISC Timer", "Timer 2", "", | ||
68 | "IDMA2", "IDMA1", "SDMA error", "PC12", | ||
69 | "PC13", "Timer 1", "PC14", "SCC4", | ||
70 | "SCC3", "SCC2", "SCC1", "PC15" | ||
71 | }; | ||
72 | |||
73 | static void | ||
74 | cpm_mask_irq(unsigned int irq) | ||
75 | { | ||
76 | int cpm_vec = irq - CPM_IRQ_OFFSET; | ||
77 | |||
78 | ((immap_t *)IMAP_ADDR)->im_cpic.cpic_cimr &= ~(1 << cpm_vec); | ||
79 | } | ||
80 | |||
81 | static void | ||
82 | cpm_unmask_irq(unsigned int irq) | ||
83 | { | ||
84 | int cpm_vec = irq - CPM_IRQ_OFFSET; | ||
85 | |||
86 | ((immap_t *)IMAP_ADDR)->im_cpic.cpic_cimr |= (1 << cpm_vec); | ||
87 | } | ||
88 | |||
89 | static void | ||
90 | cpm_ack(unsigned int irq) | ||
91 | { | ||
92 | /* We do not need to do anything here. */ | ||
93 | } | ||
94 | |||
95 | static void | ||
96 | cpm_eoi(unsigned int irq) | ||
97 | { | ||
98 | int cpm_vec = irq - CPM_IRQ_OFFSET; | ||
99 | |||
100 | ((immap_t *)IMAP_ADDR)->im_cpic.cpic_cisr = (1 << cpm_vec); | ||
101 | } | ||
102 | |||
103 | struct hw_interrupt_type cpm_pic = { | ||
104 | .typename = " CPM ", | ||
105 | .enable = cpm_unmask_irq, | ||
106 | .disable = cpm_mask_irq, | ||
107 | .ack = cpm_ack, | ||
108 | .end = cpm_eoi, | ||
109 | }; | ||
110 | |||
111 | extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); | ||
112 | |||
113 | void | ||
114 | m8xx_cpm_reset(uint bootpage) | ||
115 | { | ||
116 | volatile immap_t *imp; | ||
117 | volatile cpm8xx_t *commproc; | ||
118 | pte_t *pte; | ||
119 | |||
120 | imp = (immap_t *)IMAP_ADDR; | ||
121 | commproc = (cpm8xx_t *)&imp->im_cpm; | ||
122 | |||
123 | #ifdef CONFIG_UCODE_PATCH | ||
124 | /* Perform a reset. | ||
125 | */ | ||
126 | commproc->cp_cpcr = (CPM_CR_RST | CPM_CR_FLG); | ||
127 | |||
128 | /* Wait for it. | ||
129 | */ | ||
130 | while (commproc->cp_cpcr & CPM_CR_FLG); | ||
131 | |||
132 | cpm_load_patch(imp); | ||
133 | #endif | ||
134 | |||
135 | /* Set SDMA Bus Request priority 5. | ||
136 | * On 860T, this also enables FEC priority 6. I am not sure | ||
137 | * this is what we realy want for some applications, but the | ||
138 | * manual recommends it. | ||
139 | * Bit 25, FAM can also be set to use FEC aggressive mode (860T). | ||
140 | */ | ||
141 | imp->im_siu_conf.sc_sdcr = 1; | ||
142 | |||
143 | /* Reclaim the DP memory for our use. */ | ||
144 | m8xx_cpm_dpinit(); | ||
145 | |||
146 | /* get the PTE for the bootpage */ | ||
147 | if (!get_pteptr(&init_mm, bootpage, &pte)) | ||
148 | panic("get_pteptr failed\n"); | ||
149 | |||
150 | /* and make it uncachable */ | ||
151 | pte_val(*pte) |= _PAGE_NO_CACHE; | ||
152 | _tlbie(bootpage); | ||
153 | |||
154 | host_buffer = bootpage; | ||
155 | host_end = host_buffer + PAGE_SIZE; | ||
156 | |||
157 | /* Tell everyone where the comm processor resides. | ||
158 | */ | ||
159 | cpmp = (cpm8xx_t *)commproc; | ||
160 | } | ||
161 | |||
162 | /* We used to do this earlier, but have to postpone as long as possible | ||
163 | * to ensure the kernel VM is now running. | ||
164 | */ | ||
165 | static void | ||
166 | alloc_host_memory(void) | ||
167 | { | ||
168 | dma_addr_t physaddr; | ||
169 | |||
170 | /* Set the host page for allocation. | ||
171 | */ | ||
172 | host_buffer = (uint)dma_alloc_coherent(NULL, PAGE_SIZE, &physaddr, | ||
173 | GFP_KERNEL); | ||
174 | host_end = host_buffer + PAGE_SIZE; | ||
175 | } | ||
176 | |||
177 | /* This is called during init_IRQ. We used to do it above, but this | ||
178 | * was too early since init_IRQ was not yet called. | ||
179 | */ | ||
180 | static struct irqaction cpm_error_irqaction = { | ||
181 | .handler = cpm_error_interrupt, | ||
182 | .mask = CPU_MASK_NONE, | ||
183 | }; | ||
184 | static struct irqaction cpm_interrupt_irqaction = { | ||
185 | .handler = cpm_interrupt, | ||
186 | .mask = CPU_MASK_NONE, | ||
187 | .name = "CPM cascade", | ||
188 | }; | ||
189 | |||
190 | void | ||
191 | cpm_interrupt_init(void) | ||
192 | { | ||
193 | int i; | ||
194 | |||
195 | /* Initialize the CPM interrupt controller. | ||
196 | */ | ||
197 | ((immap_t *)IMAP_ADDR)->im_cpic.cpic_cicr = | ||
198 | (CICR_SCD_SCC4 | CICR_SCC_SCC3 | CICR_SCB_SCC2 | CICR_SCA_SCC1) | | ||
199 | ((CPM_INTERRUPT/2) << 13) | CICR_HP_MASK; | ||
200 | ((immap_t *)IMAP_ADDR)->im_cpic.cpic_cimr = 0; | ||
201 | |||
202 | /* install the CPM interrupt controller routines for the CPM | ||
203 | * interrupt vectors | ||
204 | */ | ||
205 | for ( i = CPM_IRQ_OFFSET ; i < CPM_IRQ_OFFSET + NR_CPM_INTS ; i++ ) | ||
206 | irq_desc[i].handler = &cpm_pic; | ||
207 | |||
208 | /* Set our interrupt handler with the core CPU. */ | ||
209 | if (setup_irq(CPM_INTERRUPT, &cpm_interrupt_irqaction)) | ||
210 | panic("Could not allocate CPM IRQ!"); | ||
211 | |||
212 | /* Install our own error handler. */ | ||
213 | cpm_error_irqaction.name = cpm_int_name[CPMVEC_ERROR]; | ||
214 | if (setup_irq(CPM_IRQ_OFFSET + CPMVEC_ERROR, &cpm_error_irqaction)) | ||
215 | panic("Could not allocate CPM error IRQ!"); | ||
216 | |||
217 | ((immap_t *)IMAP_ADDR)->im_cpic.cpic_cicr |= CICR_IEN; | ||
218 | } | ||
219 | |||
220 | /* | ||
221 | * Get the CPM interrupt vector. | ||
222 | */ | ||
223 | int | ||
224 | cpm_get_irq(struct pt_regs *regs) | ||
225 | { | ||
226 | int cpm_vec; | ||
227 | |||
228 | /* Get the vector by setting the ACK bit and then reading | ||
229 | * the register. | ||
230 | */ | ||
231 | ((volatile immap_t *)IMAP_ADDR)->im_cpic.cpic_civr = 1; | ||
232 | cpm_vec = ((volatile immap_t *)IMAP_ADDR)->im_cpic.cpic_civr; | ||
233 | cpm_vec >>= 11; | ||
234 | |||
235 | return cpm_vec; | ||
236 | } | ||
237 | |||
238 | /* CPM interrupt controller cascade interrupt. | ||
239 | */ | ||
240 | static irqreturn_t | ||
241 | cpm_interrupt(int irq, void * dev, struct pt_regs * regs) | ||
242 | { | ||
243 | /* This interrupt handler never actually gets called. It is | ||
244 | * installed only to unmask the CPM cascade interrupt in the SIU | ||
245 | * and to make the CPM cascade interrupt visible in /proc/interrupts. | ||
246 | */ | ||
247 | return IRQ_HANDLED; | ||
248 | } | ||
249 | |||
250 | /* The CPM can generate the error interrupt when there is a race condition | ||
251 | * between generating and masking interrupts. All we have to do is ACK it | ||
252 | * and return. This is a no-op function so we don't need any special | ||
253 | * tests in the interrupt handler. | ||
254 | */ | ||
255 | static irqreturn_t | ||
256 | cpm_error_interrupt(int irq, void *dev, struct pt_regs *regs) | ||
257 | { | ||
258 | return IRQ_HANDLED; | ||
259 | } | ||
260 | |||
261 | /* A helper function to translate the handler prototype required by | ||
262 | * request_irq() to the handler prototype required by cpm_install_handler(). | ||
263 | */ | ||
264 | static irqreturn_t | ||
265 | cpm_handler_helper(int irq, void *dev_id, struct pt_regs *regs) | ||
266 | { | ||
267 | int cpm_vec = irq - CPM_IRQ_OFFSET; | ||
268 | |||
269 | (*cpm_vecs[cpm_vec].handler)(dev_id, regs); | ||
270 | |||
271 | return IRQ_HANDLED; | ||
272 | } | ||
273 | |||
274 | /* Install a CPM interrupt handler. | ||
275 | * This routine accepts a CPM interrupt vector in the range 0 to 31. | ||
276 | * This routine is retained for backward compatibility. Rather than using | ||
277 | * this routine to install a CPM interrupt handler, you can now use | ||
278 | * request_irq() with an IRQ in the range CPM_IRQ_OFFSET to | ||
279 | * CPM_IRQ_OFFSET + NR_CPM_INTS - 1 (16 to 47). | ||
280 | * | ||
281 | * Notice that the prototype of the interrupt handler function must be | ||
282 | * different depending on whether you install the handler with | ||
283 | * request_irq() or cpm_install_handler(). | ||
284 | */ | ||
285 | void | ||
286 | cpm_install_handler(int cpm_vec, void (*handler)(void *, struct pt_regs *regs), | ||
287 | void *dev_id) | ||
288 | { | ||
289 | int err; | ||
290 | |||
291 | /* If null handler, assume we are trying to free the IRQ. | ||
292 | */ | ||
293 | if (!handler) { | ||
294 | free_irq(CPM_IRQ_OFFSET + cpm_vec, dev_id); | ||
295 | return; | ||
296 | } | ||
297 | |||
298 | if (cpm_vecs[cpm_vec].handler != 0) | ||
299 | printk(KERN_INFO "CPM interrupt %x replacing %x\n", | ||
300 | (uint)handler, (uint)cpm_vecs[cpm_vec].handler); | ||
301 | cpm_vecs[cpm_vec].handler = handler; | ||
302 | cpm_vecs[cpm_vec].dev_id = dev_id; | ||
303 | |||
304 | if ((err = request_irq(CPM_IRQ_OFFSET + cpm_vec, cpm_handler_helper, | ||
305 | 0, cpm_int_name[cpm_vec], dev_id))) | ||
306 | printk(KERN_ERR "request_irq() returned %d for CPM vector %d\n", | ||
307 | err, cpm_vec); | ||
308 | } | ||
309 | |||
310 | /* Free a CPM interrupt handler. | ||
311 | * This routine accepts a CPM interrupt vector in the range 0 to 31. | ||
312 | * This routine is retained for backward compatibility. | ||
313 | */ | ||
314 | void | ||
315 | cpm_free_handler(int cpm_vec) | ||
316 | { | ||
317 | request_irq(CPM_IRQ_OFFSET + cpm_vec, NULL, 0, 0, | ||
318 | cpm_vecs[cpm_vec].dev_id); | ||
319 | |||
320 | cpm_vecs[cpm_vec].handler = NULL; | ||
321 | cpm_vecs[cpm_vec].dev_id = NULL; | ||
322 | } | ||
323 | |||
324 | /* We also own one page of host buffer space for the allocation of | ||
325 | * UART "fifos" and the like. | ||
326 | */ | ||
327 | uint | ||
328 | m8xx_cpm_hostalloc(uint size) | ||
329 | { | ||
330 | uint retloc; | ||
331 | |||
332 | if (host_buffer == 0) | ||
333 | alloc_host_memory(); | ||
334 | |||
335 | if ((host_buffer + size) >= host_end) | ||
336 | return(0); | ||
337 | |||
338 | retloc = host_buffer; | ||
339 | host_buffer += size; | ||
340 | |||
341 | return(retloc); | ||
342 | } | ||
343 | |||
344 | /* Set a baud rate generator. This needs lots of work. There are | ||
345 | * four BRGs, any of which can be wired to any channel. | ||
346 | * The internal baud rate clock is the system clock divided by 16. | ||
347 | * This assumes the baudrate is 16x oversampled by the uart. | ||
348 | */ | ||
349 | #define BRG_INT_CLK (((bd_t *)__res)->bi_intfreq) | ||
350 | #define BRG_UART_CLK (BRG_INT_CLK/16) | ||
351 | #define BRG_UART_CLK_DIV16 (BRG_UART_CLK/16) | ||
352 | |||
353 | void | ||
354 | cpm_setbrg(uint brg, uint rate) | ||
355 | { | ||
356 | volatile uint *bp; | ||
357 | |||
358 | /* This is good enough to get SMCs running..... | ||
359 | */ | ||
360 | bp = (uint *)&cpmp->cp_brgc1; | ||
361 | bp += brg; | ||
362 | /* The BRG has a 12-bit counter. For really slow baud rates (or | ||
363 | * really fast processors), we may have to further divide by 16. | ||
364 | */ | ||
365 | if (((BRG_UART_CLK / rate) - 1) < 4096) | ||
366 | *bp = (((BRG_UART_CLK / rate) - 1) << 1) | CPM_BRG_EN; | ||
367 | else | ||
368 | *bp = (((BRG_UART_CLK_DIV16 / rate) - 1) << 1) | | ||
369 | CPM_BRG_EN | CPM_BRG_DIV16; | ||
370 | } | ||
371 | |||
372 | /* | ||
373 | * dpalloc / dpfree bits. | ||
374 | */ | ||
375 | static spinlock_t cpm_dpmem_lock; | ||
376 | /* | ||
377 | * 16 blocks should be enough to satisfy all requests | ||
378 | * until the memory subsystem goes up... | ||
379 | */ | ||
380 | static rh_block_t cpm_boot_dpmem_rh_block[16]; | ||
381 | static rh_info_t cpm_dpmem_info; | ||
382 | |||
383 | #define CPM_DPMEM_ALIGNMENT 8 | ||
384 | |||
385 | void m8xx_cpm_dpinit(void) | ||
386 | { | ||
387 | cpm8xx_t *cp = &((immap_t *)IMAP_ADDR)->im_cpm; | ||
388 | |||
389 | spin_lock_init(&cpm_dpmem_lock); | ||
390 | |||
391 | /* Initialize the info header */ | ||
392 | rh_init(&cpm_dpmem_info, CPM_DPMEM_ALIGNMENT, | ||
393 | sizeof(cpm_boot_dpmem_rh_block) / | ||
394 | sizeof(cpm_boot_dpmem_rh_block[0]), | ||
395 | cpm_boot_dpmem_rh_block); | ||
396 | |||
397 | /* | ||
398 | * Attach the usable dpmem area. | ||
399 | * XXX: This is actually crap. CPM_DATAONLY_BASE and | ||
400 | * CPM_DATAONLY_SIZE are a subset of the available dparm. It varies | ||
401 | * with the processor and the microcode patches applied / activated. | ||
402 | * But the following should be at least safe. | ||
403 | */ | ||
404 | rh_attach_region(&cpm_dpmem_info, (void *)CPM_DATAONLY_BASE, CPM_DATAONLY_SIZE); | ||
405 | } | ||
406 | |||
407 | /* | ||
408 | * Allocate the requested size worth of DP memory. | ||
409 | * This function used to return an index into the DPRAM area. | ||
410 | * Now it returns the actuall physical address of that area. | ||
411 | * use m8xx_cpm_dpram_offset() to get the index | ||
412 | */ | ||
413 | uint cpm_dpalloc(uint size, uint align) | ||
414 | { | ||
415 | void *start; | ||
416 | unsigned long flags; | ||
417 | |||
418 | spin_lock_irqsave(&cpm_dpmem_lock, flags); | ||
419 | cpm_dpmem_info.alignment = align; | ||
420 | start = rh_alloc(&cpm_dpmem_info, size, "commproc"); | ||
421 | spin_unlock_irqrestore(&cpm_dpmem_lock, flags); | ||
422 | |||
423 | return (uint)start; | ||
424 | } | ||
425 | EXPORT_SYMBOL(cpm_dpalloc); | ||
426 | |||
427 | int cpm_dpfree(uint offset) | ||
428 | { | ||
429 | int ret; | ||
430 | unsigned long flags; | ||
431 | |||
432 | spin_lock_irqsave(&cpm_dpmem_lock, flags); | ||
433 | ret = rh_free(&cpm_dpmem_info, (void *)offset); | ||
434 | spin_unlock_irqrestore(&cpm_dpmem_lock, flags); | ||
435 | |||
436 | return ret; | ||
437 | } | ||
438 | EXPORT_SYMBOL(cpm_dpfree); | ||
439 | |||
440 | uint cpm_dpalloc_fixed(uint offset, uint size, uint align) | ||
441 | { | ||
442 | void *start; | ||
443 | unsigned long flags; | ||
444 | |||
445 | spin_lock_irqsave(&cpm_dpmem_lock, flags); | ||
446 | cpm_dpmem_info.alignment = align; | ||
447 | start = rh_alloc_fixed(&cpm_dpmem_info, (void *)offset, size, "commproc"); | ||
448 | spin_unlock_irqrestore(&cpm_dpmem_lock, flags); | ||
449 | |||
450 | return (uint)start; | ||
451 | } | ||
452 | EXPORT_SYMBOL(cpm_dpalloc_fixed); | ||
453 | |||
454 | void cpm_dpdump(void) | ||
455 | { | ||
456 | rh_dump(&cpm_dpmem_info); | ||
457 | } | ||
458 | EXPORT_SYMBOL(cpm_dpdump); | ||
459 | |||
460 | void *cpm_dpram_addr(uint offset) | ||
461 | { | ||
462 | return ((immap_t *)IMAP_ADDR)->im_cpm.cp_dpmem + offset; | ||
463 | } | ||
464 | EXPORT_SYMBOL(cpm_dpram_addr); | ||