aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-powerpc/immap_qe.h
diff options
context:
space:
mode:
authorTimur Tabi <timur@freescale.com>2007-10-03 12:34:59 -0400
committerKumar Gala <galak@kernel.crashing.org>2007-10-08 09:38:15 -0400
commit6b0b594bb81f86dbc7b0829ee5102abaab242913 (patch)
tree707463987ab05d04596763afa9db1c63cbde4c4a /include/asm-powerpc/immap_qe.h
parent6039680705906f270411435c05c869ac4f59ef10 (diff)
[POWERPC] qe: miscellaneous code improvements and fixes to the QE library
This patch makes numerous miscellaneous code improvements to the QE library. 1. Remove struct ucc_common and merge ucc_init_guemr() into ucc_set_type() (every caller of ucc_init_guemr() also calls ucc_set_type()). Modify all callers of ucc_set_type() accordingly. 2. Remove the unused enum ucc_pram_initial_offset. 3. Refactor qe_setbrg(), also implement work-around for errata QE_General4. 4. Several printk() calls were missing the terminating \n. 5. Add __iomem where needed, and change u16 to __be16 and u32 to __be32 where appropriate. 6. In ucc_slow_init() the RBASE and TBASE registers in the PRAM were programmed with the wrong value. 7. Add the protocol type to struct us_info and updated ucc_slow_init() to use it, instead of always programming QE_CR_PROTOCOL_UNSPECIFIED. 8. Rename ucc_slow_restart_x() to ucc_slow_restart_tx() 9. Add several macros in qe.h (mostly for slow UCC support, but also to standardize some naming convention) and remove several unused macros. 10. Update ucc_geth.c to use the new macros. 11. Add ucc_slow_info.protocol to specify which QE_CR_PROTOCOL_xxx protcol to use when initializing the UCC in ucc_slow_init(). 12. Rename ucc_slow_pram.rfcr to rbmr and ucc_slow_pram.tfcr to tbmr, since these are the real names of the registers. 13. Use the setbits, clrbits, and clrsetbits where appropriate. 14. Refactor ucc_set_qe_mux_rxtx(). 15. Remove all instances of 'volatile'. 16. Simplify get_cmxucr_reg(); 17. Replace qe_mux.cmxucrX with qe_mux.cmxucr[]. 18. Updated struct ucc_geth because struct ucc_fast is not padded any more. Signed-off-by: Timur Tabi <timur@freescale.com> Signed-off-by: Kumar Gala <galak@kernel.crashing.org>
Diffstat (limited to 'include/asm-powerpc/immap_qe.h')
-rw-r--r--include/asm-powerpc/immap_qe.h30
1 files changed, 10 insertions, 20 deletions
diff --git a/include/asm-powerpc/immap_qe.h b/include/asm-powerpc/immap_qe.h
index 02548f74ccb7..aba9806b31c9 100644
--- a/include/asm-powerpc/immap_qe.h
+++ b/include/asm-powerpc/immap_qe.h
@@ -97,10 +97,7 @@ struct qe_mux {
97 __be32 cmxsi1cr_l; /* CMX SI1 clock route low register */ 97 __be32 cmxsi1cr_l; /* CMX SI1 clock route low register */
98 __be32 cmxsi1cr_h; /* CMX SI1 clock route high register */ 98 __be32 cmxsi1cr_h; /* CMX SI1 clock route high register */
99 __be32 cmxsi1syr; /* CMX SI1 SYNC route register */ 99 __be32 cmxsi1syr; /* CMX SI1 SYNC route register */
100 __be32 cmxucr1; /* CMX UCC1, UCC3 clock route register */ 100 __be32 cmxucr[4]; /* CMX UCCx clock route registers */
101 __be32 cmxucr2; /* CMX UCC5, UCC7 clock route register */
102 __be32 cmxucr3; /* CMX UCC2, UCC4 clock route register */
103 __be32 cmxucr4; /* CMX UCC6, UCC8 clock route register */
104 __be32 cmxupcr; /* CMX UPC clock route register */ 101 __be32 cmxupcr; /* CMX UPC clock route register */
105 u8 res0[0x1C]; 102 u8 res0[0x1C];
106} __attribute__ ((packed)); 103} __attribute__ ((packed));
@@ -261,7 +258,6 @@ struct ucc_slow {
261 __be16 utpt; 258 __be16 utpt;
262 u8 res4[0x52]; 259 u8 res4[0x52];
263 u8 guemr; /* UCC general extended mode register */ 260 u8 guemr; /* UCC general extended mode register */
264 u8 res5[0x200 - 0x091];
265} __attribute__ ((packed)); 261} __attribute__ ((packed));
266 262
267/* QE UCC Fast */ 263/* QE UCC Fast */
@@ -294,21 +290,13 @@ struct ucc_fast {
294 __be32 urtry; /* UCC retry counter register */ 290 __be32 urtry; /* UCC retry counter register */
295 u8 res8[0x4C]; 291 u8 res8[0x4C];
296 u8 guemr; /* UCC general extended mode register */ 292 u8 guemr; /* UCC general extended mode register */
297 u8 res9[0x100 - 0x091];
298} __attribute__ ((packed));
299
300/* QE UCC */
301struct ucc_common {
302 u8 res1[0x90];
303 u8 guemr;
304 u8 res2[0x200 - 0x091];
305} __attribute__ ((packed)); 293} __attribute__ ((packed));
306 294
307struct ucc { 295struct ucc {
308 union { 296 union {
309 struct ucc_slow slow; 297 struct ucc_slow slow;
310 struct ucc_fast fast; 298 struct ucc_fast fast;
311 struct ucc_common common; 299 u8 res[0x200]; /* UCC blocks are 512 bytes each */
312 }; 300 };
313} __attribute__ ((packed)); 301} __attribute__ ((packed));
314 302
@@ -407,7 +395,7 @@ struct dbg {
407 395
408/* RISC Special Registers (Trap and Breakpoint) */ 396/* RISC Special Registers (Trap and Breakpoint) */
409struct rsp { 397struct rsp {
410 u8 fixme[0x100]; 398 u32 reg[0x40]; /* 64 32-bit registers */
411} __attribute__ ((packed)); 399} __attribute__ ((packed));
412 400
413struct qe_immap { 401struct qe_immap {
@@ -436,11 +424,13 @@ struct qe_immap {
436 u8 res13[0x600]; 424 u8 res13[0x600];
437 struct upc upc2; /* MultiPHY UTOPIA POS Ctrlr 2*/ 425 struct upc upc2; /* MultiPHY UTOPIA POS Ctrlr 2*/
438 struct sdma sdma; /* SDMA */ 426 struct sdma sdma; /* SDMA */
439 struct dbg dbg; /* Debug Space */ 427 struct dbg dbg; /* 0x104080 - 0x1040FF
440 struct rsp rsp[0x2]; /* RISC Special Registers 428 Debug Space */
429 struct rsp rsp[0x2]; /* 0x104100 - 0x1042FF
430 RISC Special Registers
441 (Trap and Breakpoint) */ 431 (Trap and Breakpoint) */
442 u8 res14[0x300]; 432 u8 res14[0x300]; /* 0x104300 - 0x1045FF */
443 u8 res15[0x3A00]; 433 u8 res15[0x3A00]; /* 0x104600 - 0x107FFF */
444 u8 res16[0x8000]; /* 0x108000 - 0x110000 */ 434 u8 res16[0x8000]; /* 0x108000 - 0x110000 */
445 u8 muram[0xC000]; /* 0x110000 - 0x11C000 435 u8 muram[0xC000]; /* 0x110000 - 0x11C000
446 Multi-user RAM */ 436 Multi-user RAM */
@@ -451,7 +441,7 @@ struct qe_immap {
451extern struct qe_immap *qe_immr; 441extern struct qe_immap *qe_immr;
452extern phys_addr_t get_qe_base(void); 442extern phys_addr_t get_qe_base(void);
453 443
454static inline unsigned long immrbar_virt_to_phys(volatile void * address) 444static inline unsigned long immrbar_virt_to_phys(void *address)
455{ 445{
456 if ( ((u32)address >= (u32)qe_immr) && 446 if ( ((u32)address >= (u32)qe_immr) &&
457 ((u32)address < ((u32)qe_immr + QE_IMMAP_SIZE)) ) 447 ((u32)address < ((u32)qe_immr + QE_IMMAP_SIZE)) )