diff options
author | Timur Tabi <timur@freescale.com> | 2007-10-03 12:34:59 -0400 |
---|---|---|
committer | Kumar Gala <galak@kernel.crashing.org> | 2007-10-08 09:38:15 -0400 |
commit | 6b0b594bb81f86dbc7b0829ee5102abaab242913 (patch) | |
tree | 707463987ab05d04596763afa9db1c63cbde4c4a /arch | |
parent | 6039680705906f270411435c05c869ac4f59ef10 (diff) |
[POWERPC] qe: miscellaneous code improvements and fixes to the QE library
This patch makes numerous miscellaneous code improvements to the QE library.
1. Remove struct ucc_common and merge ucc_init_guemr() into ucc_set_type()
(every caller of ucc_init_guemr() also calls ucc_set_type()). Modify all
callers of ucc_set_type() accordingly.
2. Remove the unused enum ucc_pram_initial_offset.
3. Refactor qe_setbrg(), also implement work-around for errata QE_General4.
4. Several printk() calls were missing the terminating \n.
5. Add __iomem where needed, and change u16 to __be16 and u32 to __be32 where
appropriate.
6. In ucc_slow_init() the RBASE and TBASE registers in the PRAM were programmed
with the wrong value.
7. Add the protocol type to struct us_info and updated ucc_slow_init() to
use it, instead of always programming QE_CR_PROTOCOL_UNSPECIFIED.
8. Rename ucc_slow_restart_x() to ucc_slow_restart_tx()
9. Add several macros in qe.h (mostly for slow UCC support, but also to
standardize some naming convention) and remove several unused macros.
10. Update ucc_geth.c to use the new macros.
11. Add ucc_slow_info.protocol to specify which QE_CR_PROTOCOL_xxx protcol
to use when initializing the UCC in ucc_slow_init().
12. Rename ucc_slow_pram.rfcr to rbmr and ucc_slow_pram.tfcr to tbmr, since
these are the real names of the registers.
13. Use the setbits, clrbits, and clrsetbits where appropriate.
14. Refactor ucc_set_qe_mux_rxtx().
15. Remove all instances of 'volatile'.
16. Simplify get_cmxucr_reg();
17. Replace qe_mux.cmxucrX with qe_mux.cmxucr[].
18. Updated struct ucc_geth because struct ucc_fast is not padded any more.
Signed-off-by: Timur Tabi <timur@freescale.com>
Signed-off-by: Kumar Gala <galak@kernel.crashing.org>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/powerpc/sysdev/qe_lib/qe.c | 36 | ||||
-rw-r--r-- | arch/powerpc/sysdev/qe_lib/qe_ic.c | 2 | ||||
-rw-r--r-- | arch/powerpc/sysdev/qe_lib/qe_io.c | 35 | ||||
-rw-r--r-- | arch/powerpc/sysdev/qe_lib/ucc.c | 270 | ||||
-rw-r--r-- | arch/powerpc/sysdev/qe_lib/ucc_fast.c | 127 | ||||
-rw-r--r-- | arch/powerpc/sysdev/qe_lib/ucc_slow.c | 48 |
6 files changed, 236 insertions, 282 deletions
diff --git a/arch/powerpc/sysdev/qe_lib/qe.c b/arch/powerpc/sysdev/qe_lib/qe.c index 90f87408b5d5..3d57d3835b04 100644 --- a/arch/powerpc/sysdev/qe_lib/qe.c +++ b/arch/powerpc/sysdev/qe_lib/qe.c | |||
@@ -141,7 +141,7 @@ EXPORT_SYMBOL(qe_issue_cmd); | |||
141 | * 16 BRGs, which can be connected to the QE channels or output | 141 | * 16 BRGs, which can be connected to the QE channels or output |
142 | * as clocks. The BRGs are in two different block of internal | 142 | * as clocks. The BRGs are in two different block of internal |
143 | * memory mapped space. | 143 | * memory mapped space. |
144 | * The baud rate clock is the system clock divided by something. | 144 | * The BRG clock is the QE clock divided by 2. |
145 | * It was set up long ago during the initial boot phase and is | 145 | * It was set up long ago during the initial boot phase and is |
146 | * is given to us. | 146 | * is given to us. |
147 | * Baud rate clocks are zero-based in the driver code (as that maps | 147 | * Baud rate clocks are zero-based in the driver code (as that maps |
@@ -165,28 +165,38 @@ unsigned int get_brg_clk(void) | |||
165 | return brg_clk; | 165 | return brg_clk; |
166 | } | 166 | } |
167 | 167 | ||
168 | /* This function is used by UARTS, or anything else that uses a 16x | 168 | /* Program the BRG to the given sampling rate and multiplier |
169 | * oversampled clock. | 169 | * |
170 | * @brg: the BRG, 1-16 | ||
171 | * @rate: the desired sampling rate | ||
172 | * @multiplier: corresponds to the value programmed in GUMR_L[RDCR] or | ||
173 | * GUMR_L[TDCR]. E.g., if this BRG is the RX clock, and GUMR_L[RDCR]=01, | ||
174 | * then 'multiplier' should be 8. | ||
175 | * | ||
176 | * Also note that the value programmed into the BRGC register must be even. | ||
170 | */ | 177 | */ |
171 | void qe_setbrg(u32 brg, u32 rate) | 178 | void qe_setbrg(unsigned int brg, unsigned int rate, unsigned int multiplier) |
172 | { | 179 | { |
173 | volatile u32 *bp; | ||
174 | u32 divisor, tempval; | 180 | u32 divisor, tempval; |
175 | int div16 = 0; | 181 | u32 div16 = 0; |
176 | 182 | ||
177 | bp = &qe_immr->brg.brgc[brg]; | 183 | divisor = get_brg_clk() / (rate * multiplier); |
178 | 184 | ||
179 | divisor = (get_brg_clk() / rate); | ||
180 | if (divisor > QE_BRGC_DIVISOR_MAX + 1) { | 185 | if (divisor > QE_BRGC_DIVISOR_MAX + 1) { |
181 | div16 = 1; | 186 | div16 = QE_BRGC_DIV16; |
182 | divisor /= 16; | 187 | divisor /= 16; |
183 | } | 188 | } |
184 | 189 | ||
185 | tempval = ((divisor - 1) << QE_BRGC_DIVISOR_SHIFT) | QE_BRGC_ENABLE; | 190 | /* Errata QE_General4, which affects some MPC832x and MPC836x SOCs, says |
186 | if (div16) | 191 | that the BRG divisor must be even if you're not using divide-by-16 |
187 | tempval |= QE_BRGC_DIV16; | 192 | mode. */ |
193 | if (!div16 && (divisor & 1)) | ||
194 | divisor++; | ||
195 | |||
196 | tempval = ((divisor - 1) << QE_BRGC_DIVISOR_SHIFT) | | ||
197 | QE_BRGC_ENABLE | div16; | ||
188 | 198 | ||
189 | out_be32(bp, tempval); | 199 | out_be32(&qe_immr->brg.brgc[brg - 1], tempval); |
190 | } | 200 | } |
191 | 201 | ||
192 | /* Initialize SNUMs (thread serial numbers) according to | 202 | /* Initialize SNUMs (thread serial numbers) according to |
diff --git a/arch/powerpc/sysdev/qe_lib/qe_ic.c b/arch/powerpc/sysdev/qe_lib/qe_ic.c index 55e6f394af82..9a2d1edd050e 100644 --- a/arch/powerpc/sysdev/qe_lib/qe_ic.c +++ b/arch/powerpc/sysdev/qe_lib/qe_ic.c | |||
@@ -405,8 +405,6 @@ void __init qe_ic_init(struct device_node *node, unsigned int flags) | |||
405 | set_irq_data(qe_ic->virq_high, qe_ic); | 405 | set_irq_data(qe_ic->virq_high, qe_ic); |
406 | set_irq_chained_handler(qe_ic->virq_high, qe_ic_cascade_high); | 406 | set_irq_chained_handler(qe_ic->virq_high, qe_ic_cascade_high); |
407 | } | 407 | } |
408 | |||
409 | printk("QEIC (%d IRQ sources) at %p\n", NR_QE_IC_INTS, qe_ic->regs); | ||
410 | } | 408 | } |
411 | 409 | ||
412 | void qe_ic_set_highest_priority(unsigned int virq, int high) | 410 | void qe_ic_set_highest_priority(unsigned int virq, int high) |
diff --git a/arch/powerpc/sysdev/qe_lib/qe_io.c b/arch/powerpc/sysdev/qe_lib/qe_io.c index e32b45bf9ff5..a114cb0c572f 100644 --- a/arch/powerpc/sysdev/qe_lib/qe_io.c +++ b/arch/powerpc/sysdev/qe_lib/qe_io.c | |||
@@ -195,29 +195,22 @@ EXPORT_SYMBOL(par_io_of_config); | |||
195 | #ifdef DEBUG | 195 | #ifdef DEBUG |
196 | static void dump_par_io(void) | 196 | static void dump_par_io(void) |
197 | { | 197 | { |
198 | int i; | 198 | unsigned int i; |
199 | 199 | ||
200 | printk(KERN_INFO "PAR IO registars:\n"); | 200 | printk(KERN_INFO "%s: par_io=%p\n", __FUNCTION__, par_io); |
201 | printk(KERN_INFO "Base address: 0x%08x\n", (u32) par_io); | ||
202 | for (i = 0; i < num_par_io_ports; i++) { | 201 | for (i = 0; i < num_par_io_ports; i++) { |
203 | printk(KERN_INFO "cpodr[%d] : addr - 0x%08x, val - 0x%08x\n", | 202 | printk(KERN_INFO " cpodr[%u]=%08x\n", i, |
204 | i, (u32) & par_io[i].cpodr, | 203 | in_be32(&par_io[i].cpodr)); |
205 | in_be32(&par_io[i].cpodr)); | 204 | printk(KERN_INFO " cpdata[%u]=%08x\n", i, |
206 | printk(KERN_INFO "cpdata[%d]: addr - 0x%08x, val - 0x%08x\n", | 205 | in_be32(&par_io[i].cpdata)); |
207 | i, (u32) & par_io[i].cpdata, | 206 | printk(KERN_INFO " cpdir1[%u]=%08x\n", i, |
208 | in_be32(&par_io[i].cpdata)); | 207 | in_be32(&par_io[i].cpdir1)); |
209 | printk(KERN_INFO "cpdir1[%d]: addr - 0x%08x, val - 0x%08x\n", | 208 | printk(KERN_INFO " cpdir2[%u]=%08x\n", i, |
210 | i, (u32) & par_io[i].cpdir1, | 209 | in_be32(&par_io[i].cpdir2)); |
211 | in_be32(&par_io[i].cpdir1)); | 210 | printk(KERN_INFO " cppar1[%u]=%08x\n", i, |
212 | printk(KERN_INFO "cpdir2[%d]: addr - 0x%08x, val - 0x%08x\n", | 211 | in_be32(&par_io[i].cppar1)); |
213 | i, (u32) & par_io[i].cpdir2, | 212 | printk(KERN_INFO " cppar2[%u]=%08x\n", i, |
214 | in_be32(&par_io[i].cpdir2)); | 213 | in_be32(&par_io[i].cppar2)); |
215 | printk(KERN_INFO "cppar1[%d]: addr - 0x%08x, val - 0x%08x\n", | ||
216 | i, (u32) & par_io[i].cppar1, | ||
217 | in_be32(&par_io[i].cppar1)); | ||
218 | printk(KERN_INFO "cppar2[%d]: addr - 0x%08x, val - 0x%08x\n", | ||
219 | i, (u32) & par_io[i].cppar2, | ||
220 | in_be32(&par_io[i].cppar2)); | ||
221 | } | 214 | } |
222 | 215 | ||
223 | } | 216 | } |
diff --git a/arch/powerpc/sysdev/qe_lib/ucc.c b/arch/powerpc/sysdev/qe_lib/ucc.c index f970e5415ac0..0e348d9af8a6 100644 --- a/arch/powerpc/sysdev/qe_lib/ucc.c +++ b/arch/powerpc/sysdev/qe_lib/ucc.c | |||
@@ -28,228 +28,188 @@ | |||
28 | 28 | ||
29 | static DEFINE_SPINLOCK(ucc_lock); | 29 | static DEFINE_SPINLOCK(ucc_lock); |
30 | 30 | ||
31 | int ucc_set_qe_mux_mii_mng(int ucc_num) | 31 | int ucc_set_qe_mux_mii_mng(unsigned int ucc_num) |
32 | { | 32 | { |
33 | unsigned long flags; | 33 | unsigned long flags; |
34 | 34 | ||
35 | if (ucc_num > UCC_MAX_NUM - 1) | ||
36 | return -EINVAL; | ||
37 | |||
35 | spin_lock_irqsave(&ucc_lock, flags); | 38 | spin_lock_irqsave(&ucc_lock, flags); |
36 | out_be32(&qe_immr->qmx.cmxgcr, | 39 | clrsetbits_be32(&qe_immr->qmx.cmxgcr, QE_CMXGCR_MII_ENET_MNG, |
37 | ((in_be32(&qe_immr->qmx.cmxgcr) & | 40 | ucc_num << QE_CMXGCR_MII_ENET_MNG_SHIFT); |
38 | ~QE_CMXGCR_MII_ENET_MNG) | | ||
39 | (ucc_num << QE_CMXGCR_MII_ENET_MNG_SHIFT))); | ||
40 | spin_unlock_irqrestore(&ucc_lock, flags); | 41 | spin_unlock_irqrestore(&ucc_lock, flags); |
41 | 42 | ||
42 | return 0; | 43 | return 0; |
43 | } | 44 | } |
44 | EXPORT_SYMBOL(ucc_set_qe_mux_mii_mng); | 45 | EXPORT_SYMBOL(ucc_set_qe_mux_mii_mng); |
45 | 46 | ||
46 | int ucc_set_type(int ucc_num, struct ucc_common *regs, | 47 | /* Configure the UCC to either Slow or Fast. |
47 | enum ucc_speed_type speed) | 48 | * |
48 | { | 49 | * A given UCC can be figured to support either "slow" devices (e.g. UART) |
49 | u8 guemr = 0; | 50 | * or "fast" devices (e.g. Ethernet). |
50 | 51 | * | |
51 | /* check if the UCC number is in range. */ | 52 | * 'ucc_num' is the UCC number, from 0 - 7. |
52 | if ((ucc_num > UCC_MAX_NUM - 1) || (ucc_num < 0)) | 53 | * |
53 | return -EINVAL; | 54 | * This function also sets the UCC_GUEMR_SET_RESERVED3 bit because that bit |
54 | 55 | * must always be set to 1. | |
55 | guemr = regs->guemr; | 56 | */ |
56 | guemr &= ~(UCC_GUEMR_MODE_MASK_RX | UCC_GUEMR_MODE_MASK_TX); | 57 | int ucc_set_type(unsigned int ucc_num, enum ucc_speed_type speed) |
57 | switch (speed) { | ||
58 | case UCC_SPEED_TYPE_SLOW: | ||
59 | guemr |= (UCC_GUEMR_MODE_SLOW_RX | UCC_GUEMR_MODE_SLOW_TX); | ||
60 | break; | ||
61 | case UCC_SPEED_TYPE_FAST: | ||
62 | guemr |= (UCC_GUEMR_MODE_FAST_RX | UCC_GUEMR_MODE_FAST_TX); | ||
63 | break; | ||
64 | default: | ||
65 | return -EINVAL; | ||
66 | } | ||
67 | regs->guemr = guemr; | ||
68 | |||
69 | return 0; | ||
70 | } | ||
71 | |||
72 | int ucc_init_guemr(struct ucc_common *regs) | ||
73 | { | 58 | { |
74 | u8 guemr = 0; | 59 | u8 __iomem *guemr; |
75 | |||
76 | if (!regs) | ||
77 | return -EINVAL; | ||
78 | |||
79 | /* Set bit 3 (which is reserved in the GUEMR register) to 1 */ | ||
80 | guemr = UCC_GUEMR_SET_RESERVED3; | ||
81 | |||
82 | regs->guemr = guemr; | ||
83 | |||
84 | return 0; | ||
85 | } | ||
86 | 60 | ||
87 | static void get_cmxucr_reg(int ucc_num, volatile u32 ** p_cmxucr, u8 * reg_num, | 61 | /* The GUEMR register is at the same location for both slow and fast |
88 | u8 * shift) | 62 | devices, so we just use uccX.slow.guemr. */ |
89 | { | ||
90 | switch (ucc_num) { | 63 | switch (ucc_num) { |
91 | case 0: *p_cmxucr = &(qe_immr->qmx.cmxucr1); | 64 | case 0: guemr = &qe_immr->ucc1.slow.guemr; |
92 | *reg_num = 1; | ||
93 | *shift = 16; | ||
94 | break; | 65 | break; |
95 | case 2: *p_cmxucr = &(qe_immr->qmx.cmxucr1); | 66 | case 1: guemr = &qe_immr->ucc2.slow.guemr; |
96 | *reg_num = 1; | ||
97 | *shift = 0; | ||
98 | break; | 67 | break; |
99 | case 4: *p_cmxucr = &(qe_immr->qmx.cmxucr2); | 68 | case 2: guemr = &qe_immr->ucc3.slow.guemr; |
100 | *reg_num = 2; | ||
101 | *shift = 16; | ||
102 | break; | 69 | break; |
103 | case 6: *p_cmxucr = &(qe_immr->qmx.cmxucr2); | 70 | case 3: guemr = &qe_immr->ucc4.slow.guemr; |
104 | *reg_num = 2; | ||
105 | *shift = 0; | ||
106 | break; | 71 | break; |
107 | case 1: *p_cmxucr = &(qe_immr->qmx.cmxucr3); | 72 | case 4: guemr = &qe_immr->ucc5.slow.guemr; |
108 | *reg_num = 3; | ||
109 | *shift = 16; | ||
110 | break; | 73 | break; |
111 | case 3: *p_cmxucr = &(qe_immr->qmx.cmxucr3); | 74 | case 5: guemr = &qe_immr->ucc6.slow.guemr; |
112 | *reg_num = 3; | ||
113 | *shift = 0; | ||
114 | break; | 75 | break; |
115 | case 5: *p_cmxucr = &(qe_immr->qmx.cmxucr4); | 76 | case 6: guemr = &qe_immr->ucc7.slow.guemr; |
116 | *reg_num = 4; | ||
117 | *shift = 16; | ||
118 | break; | 77 | break; |
119 | case 7: *p_cmxucr = &(qe_immr->qmx.cmxucr4); | 78 | case 7: guemr = &qe_immr->ucc8.slow.guemr; |
120 | *reg_num = 4; | ||
121 | *shift = 0; | ||
122 | break; | 79 | break; |
123 | default: | 80 | default: |
124 | break; | 81 | return -EINVAL; |
125 | } | 82 | } |
83 | |||
84 | clrsetbits_8(guemr, UCC_GUEMR_MODE_MASK, | ||
85 | UCC_GUEMR_SET_RESERVED3 | speed); | ||
86 | |||
87 | return 0; | ||
88 | } | ||
89 | |||
90 | static void get_cmxucr_reg(unsigned int ucc_num, __be32 **cmxucr, | ||
91 | unsigned int *reg_num, unsigned int *shift) | ||
92 | { | ||
93 | unsigned int cmx = ((ucc_num & 1) << 1) + (ucc_num > 3); | ||
94 | |||
95 | *reg_num = cmx + 1; | ||
96 | *cmxucr = &qe_immr->qmx.cmxucr[cmx]; | ||
97 | *shift = 16 - 8 * (ucc_num & 2); | ||
126 | } | 98 | } |
127 | 99 | ||
128 | int ucc_mux_set_grant_tsa_bkpt(int ucc_num, int set, u32 mask) | 100 | int ucc_mux_set_grant_tsa_bkpt(unsigned int ucc_num, int set, u32 mask) |
129 | { | 101 | { |
130 | volatile u32 *p_cmxucr; | 102 | __be32 *cmxucr; |
131 | u8 reg_num; | 103 | unsigned int reg_num; |
132 | u8 shift; | 104 | unsigned int shift; |
133 | 105 | ||
134 | /* check if the UCC number is in range. */ | 106 | /* check if the UCC number is in range. */ |
135 | if ((ucc_num > UCC_MAX_NUM - 1) || (ucc_num < 0)) | 107 | if (ucc_num > UCC_MAX_NUM - 1) |
136 | return -EINVAL; | 108 | return -EINVAL; |
137 | 109 | ||
138 | get_cmxucr_reg(ucc_num, &p_cmxucr, ®_num, &shift); | 110 | get_cmxucr_reg(ucc_num, &cmxucr, ®_num, &shift); |
139 | 111 | ||
140 | if (set) | 112 | if (set) |
141 | out_be32(p_cmxucr, in_be32(p_cmxucr) | (mask << shift)); | 113 | setbits32(cmxucr, mask << shift); |
142 | else | 114 | else |
143 | out_be32(p_cmxucr, in_be32(p_cmxucr) & ~(mask << shift)); | 115 | clrbits32(cmxucr, mask << shift); |
144 | 116 | ||
145 | return 0; | 117 | return 0; |
146 | } | 118 | } |
147 | 119 | ||
148 | int ucc_set_qe_mux_rxtx(int ucc_num, enum qe_clock clock, enum comm_dir mode) | 120 | int ucc_set_qe_mux_rxtx(unsigned int ucc_num, enum qe_clock clock, |
121 | enum comm_dir mode) | ||
149 | { | 122 | { |
150 | volatile u32 *p_cmxucr; | 123 | __be32 *cmxucr; |
151 | u8 reg_num; | 124 | unsigned int reg_num; |
152 | u8 shift; | 125 | unsigned int shift; |
153 | u32 clock_bits; | 126 | u32 clock_bits = 0; |
154 | u32 clock_mask; | ||
155 | int source = -1; | ||
156 | 127 | ||
157 | /* check if the UCC number is in range. */ | 128 | /* check if the UCC number is in range. */ |
158 | if ((ucc_num > UCC_MAX_NUM - 1) || (ucc_num < 0)) | 129 | if (ucc_num > UCC_MAX_NUM - 1) |
159 | return -EINVAL; | 130 | return -EINVAL; |
160 | 131 | ||
161 | if (!((mode == COMM_DIR_RX) || (mode == COMM_DIR_TX))) { | 132 | /* The communications direction must be RX or TX */ |
162 | printk(KERN_ERR | 133 | if (!((mode == COMM_DIR_RX) || (mode == COMM_DIR_TX))) |
163 | "ucc_set_qe_mux_rxtx: bad comm mode type passed."); | ||
164 | return -EINVAL; | 134 | return -EINVAL; |
165 | } | ||
166 | 135 | ||
167 | get_cmxucr_reg(ucc_num, &p_cmxucr, ®_num, &shift); | 136 | get_cmxucr_reg(ucc_num, &cmxucr, ®_num, &shift); |
168 | 137 | ||
169 | switch (reg_num) { | 138 | switch (reg_num) { |
170 | case 1: | 139 | case 1: |
171 | switch (clock) { | 140 | switch (clock) { |
172 | case QE_BRG1: source = 1; break; | 141 | case QE_BRG1: clock_bits = 1; break; |
173 | case QE_BRG2: source = 2; break; | 142 | case QE_BRG2: clock_bits = 2; break; |
174 | case QE_BRG7: source = 3; break; | 143 | case QE_BRG7: clock_bits = 3; break; |
175 | case QE_BRG8: source = 4; break; | 144 | case QE_BRG8: clock_bits = 4; break; |
176 | case QE_CLK9: source = 5; break; | 145 | case QE_CLK9: clock_bits = 5; break; |
177 | case QE_CLK10: source = 6; break; | 146 | case QE_CLK10: clock_bits = 6; break; |
178 | case QE_CLK11: source = 7; break; | 147 | case QE_CLK11: clock_bits = 7; break; |
179 | case QE_CLK12: source = 8; break; | 148 | case QE_CLK12: clock_bits = 8; break; |
180 | case QE_CLK15: source = 9; break; | 149 | case QE_CLK15: clock_bits = 9; break; |
181 | case QE_CLK16: source = 10; break; | 150 | case QE_CLK16: clock_bits = 10; break; |
182 | default: source = -1; break; | 151 | default: break; |
183 | } | 152 | } |
184 | break; | 153 | break; |
185 | case 2: | 154 | case 2: |
186 | switch (clock) { | 155 | switch (clock) { |
187 | case QE_BRG5: source = 1; break; | 156 | case QE_BRG5: clock_bits = 1; break; |
188 | case QE_BRG6: source = 2; break; | 157 | case QE_BRG6: clock_bits = 2; break; |
189 | case QE_BRG7: source = 3; break; | 158 | case QE_BRG7: clock_bits = 3; break; |
190 | case QE_BRG8: source = 4; break; | 159 | case QE_BRG8: clock_bits = 4; break; |
191 | case QE_CLK13: source = 5; break; | 160 | case QE_CLK13: clock_bits = 5; break; |
192 | case QE_CLK14: source = 6; break; | 161 | case QE_CLK14: clock_bits = 6; break; |
193 | case QE_CLK19: source = 7; break; | 162 | case QE_CLK19: clock_bits = 7; break; |
194 | case QE_CLK20: source = 8; break; | 163 | case QE_CLK20: clock_bits = 8; break; |
195 | case QE_CLK15: source = 9; break; | 164 | case QE_CLK15: clock_bits = 9; break; |
196 | case QE_CLK16: source = 10; break; | 165 | case QE_CLK16: clock_bits = 10; break; |
197 | default: source = -1; break; | 166 | default: break; |
198 | } | 167 | } |
199 | break; | 168 | break; |
200 | case 3: | 169 | case 3: |
201 | switch (clock) { | 170 | switch (clock) { |
202 | case QE_BRG9: source = 1; break; | 171 | case QE_BRG9: clock_bits = 1; break; |
203 | case QE_BRG10: source = 2; break; | 172 | case QE_BRG10: clock_bits = 2; break; |
204 | case QE_BRG15: source = 3; break; | 173 | case QE_BRG15: clock_bits = 3; break; |
205 | case QE_BRG16: source = 4; break; | 174 | case QE_BRG16: clock_bits = 4; break; |
206 | case QE_CLK3: source = 5; break; | 175 | case QE_CLK3: clock_bits = 5; break; |
207 | case QE_CLK4: source = 6; break; | 176 | case QE_CLK4: clock_bits = 6; break; |
208 | case QE_CLK17: source = 7; break; | 177 | case QE_CLK17: clock_bits = 7; break; |
209 | case QE_CLK18: source = 8; break; | 178 | case QE_CLK18: clock_bits = 8; break; |
210 | case QE_CLK7: source = 9; break; | 179 | case QE_CLK7: clock_bits = 9; break; |
211 | case QE_CLK8: source = 10; break; | 180 | case QE_CLK8: clock_bits = 10; break; |
212 | case QE_CLK16: source = 11; break; | 181 | case QE_CLK16: clock_bits = 11; break; |
213 | default: source = -1; break; | 182 | default: break; |
214 | } | 183 | } |
215 | break; | 184 | break; |
216 | case 4: | 185 | case 4: |
217 | switch (clock) { | 186 | switch (clock) { |
218 | case QE_BRG13: source = 1; break; | 187 | case QE_BRG13: clock_bits = 1; break; |
219 | case QE_BRG14: source = 2; break; | 188 | case QE_BRG14: clock_bits = 2; break; |
220 | case QE_BRG15: source = 3; break; | 189 | case QE_BRG15: clock_bits = 3; break; |
221 | case QE_BRG16: source = 4; break; | 190 | case QE_BRG16: clock_bits = 4; break; |
222 | case QE_CLK5: source = 5; break; | 191 | case QE_CLK5: clock_bits = 5; break; |
223 | case QE_CLK6: source = 6; break; | 192 | case QE_CLK6: clock_bits = 6; break; |
224 | case QE_CLK21: source = 7; break; | 193 | case QE_CLK21: clock_bits = 7; break; |
225 | case QE_CLK22: source = 8; break; | 194 | case QE_CLK22: clock_bits = 8; break; |
226 | case QE_CLK7: source = 9; break; | 195 | case QE_CLK7: clock_bits = 9; break; |
227 | case QE_CLK8: source = 10; break; | 196 | case QE_CLK8: clock_bits = 10; break; |
228 | case QE_CLK16: source = 11; break; | 197 | case QE_CLK16: clock_bits = 11; break; |
229 | default: source = -1; break; | 198 | default: break; |
230 | } | 199 | } |
231 | break; | 200 | break; |
232 | default: | 201 | default: break; |
233 | source = -1; | ||
234 | break; | ||
235 | } | 202 | } |
236 | 203 | ||
237 | if (source == -1) { | 204 | /* Check for invalid combination of clock and UCC number */ |
238 | printk(KERN_ERR | 205 | if (!clock_bits) |
239 | "ucc_set_qe_mux_rxtx: Bad combination of clock and UCC."); | ||
240 | return -ENOENT; | 206 | return -ENOENT; |
241 | } | ||
242 | 207 | ||
243 | clock_bits = (u32) source; | 208 | if (mode == COMM_DIR_RX) |
244 | clock_mask = QE_CMXUCR_TX_CLK_SRC_MASK; | 209 | shift += 4; |
245 | if (mode == COMM_DIR_RX) { | ||
246 | clock_bits <<= 4; /* Rx field is 4 bits to left of Tx field */ | ||
247 | clock_mask <<= 4; /* Rx field is 4 bits to left of Tx field */ | ||
248 | } | ||
249 | clock_bits <<= shift; | ||
250 | clock_mask <<= shift; | ||
251 | 210 | ||
252 | out_be32(p_cmxucr, (in_be32(p_cmxucr) & ~clock_mask) | clock_bits); | 211 | clrsetbits_be32(cmxucr, QE_CMXUCR_TX_CLK_SRC_MASK << shift, |
212 | clock_bits << shift); | ||
253 | 213 | ||
254 | return 0; | 214 | return 0; |
255 | } | 215 | } |
diff --git a/arch/powerpc/sysdev/qe_lib/ucc_fast.c b/arch/powerpc/sysdev/qe_lib/ucc_fast.c index 3df202e8d332..3223acbc39e5 100644 --- a/arch/powerpc/sysdev/qe_lib/ucc_fast.c +++ b/arch/powerpc/sysdev/qe_lib/ucc_fast.c | |||
@@ -30,46 +30,45 @@ | |||
30 | 30 | ||
31 | void ucc_fast_dump_regs(struct ucc_fast_private * uccf) | 31 | void ucc_fast_dump_regs(struct ucc_fast_private * uccf) |
32 | { | 32 | { |
33 | printk(KERN_INFO "UCC%d Fast registers:", uccf->uf_info->ucc_num); | 33 | printk(KERN_INFO "UCC%u Fast registers:\n", uccf->uf_info->ucc_num); |
34 | printk(KERN_INFO "Base address: 0x%08x", (u32) uccf->uf_regs); | 34 | printk(KERN_INFO "Base address: 0x%p\n", uccf->uf_regs); |
35 | 35 | ||
36 | printk(KERN_INFO "gumr : addr - 0x%08x, val - 0x%08x", | 36 | printk(KERN_INFO "gumr : addr=0x%p, val=0x%08x\n", |
37 | (u32) & uccf->uf_regs->gumr, in_be32(&uccf->uf_regs->gumr)); | 37 | &uccf->uf_regs->gumr, in_be32(&uccf->uf_regs->gumr)); |
38 | printk(KERN_INFO "upsmr : addr - 0x%08x, val - 0x%08x", | 38 | printk(KERN_INFO "upsmr : addr=0x%p, val=0x%08x\n", |
39 | (u32) & uccf->uf_regs->upsmr, in_be32(&uccf->uf_regs->upsmr)); | 39 | &uccf->uf_regs->upsmr, in_be32(&uccf->uf_regs->upsmr)); |
40 | printk(KERN_INFO "utodr : addr - 0x%08x, val - 0x%04x", | 40 | printk(KERN_INFO "utodr : addr=0x%p, val=0x%04x\n", |
41 | (u32) & uccf->uf_regs->utodr, in_be16(&uccf->uf_regs->utodr)); | 41 | &uccf->uf_regs->utodr, in_be16(&uccf->uf_regs->utodr)); |
42 | printk(KERN_INFO "udsr : addr - 0x%08x, val - 0x%04x", | 42 | printk(KERN_INFO "udsr : addr=0x%p, val=0x%04x\n", |
43 | (u32) & uccf->uf_regs->udsr, in_be16(&uccf->uf_regs->udsr)); | 43 | &uccf->uf_regs->udsr, in_be16(&uccf->uf_regs->udsr)); |
44 | printk(KERN_INFO "ucce : addr - 0x%08x, val - 0x%08x", | 44 | printk(KERN_INFO "ucce : addr=0x%p, val=0x%08x\n", |
45 | (u32) & uccf->uf_regs->ucce, in_be32(&uccf->uf_regs->ucce)); | 45 | &uccf->uf_regs->ucce, in_be32(&uccf->uf_regs->ucce)); |
46 | printk(KERN_INFO "uccm : addr - 0x%08x, val - 0x%08x", | 46 | printk(KERN_INFO "uccm : addr=0x%p, val=0x%08x\n", |
47 | (u32) & uccf->uf_regs->uccm, in_be32(&uccf->uf_regs->uccm)); | 47 | &uccf->uf_regs->uccm, in_be32(&uccf->uf_regs->uccm)); |
48 | printk(KERN_INFO "uccs : addr - 0x%08x, val - 0x%02x", | 48 | printk(KERN_INFO "uccs : addr=0x%p, val=0x%02x\n", |
49 | (u32) & uccf->uf_regs->uccs, uccf->uf_regs->uccs); | 49 | &uccf->uf_regs->uccs, uccf->uf_regs->uccs); |
50 | printk(KERN_INFO "urfb : addr - 0x%08x, val - 0x%08x", | 50 | printk(KERN_INFO "urfb : addr=0x%p, val=0x%08x\n", |
51 | (u32) & uccf->uf_regs->urfb, in_be32(&uccf->uf_regs->urfb)); | 51 | &uccf->uf_regs->urfb, in_be32(&uccf->uf_regs->urfb)); |
52 | printk(KERN_INFO "urfs : addr - 0x%08x, val - 0x%04x", | 52 | printk(KERN_INFO "urfs : addr=0x%p, val=0x%04x\n", |
53 | (u32) & uccf->uf_regs->urfs, in_be16(&uccf->uf_regs->urfs)); | 53 | &uccf->uf_regs->urfs, in_be16(&uccf->uf_regs->urfs)); |
54 | printk(KERN_INFO "urfet : addr - 0x%08x, val - 0x%04x", | 54 | printk(KERN_INFO "urfet : addr=0x%p, val=0x%04x\n", |
55 | (u32) & uccf->uf_regs->urfet, in_be16(&uccf->uf_regs->urfet)); | 55 | &uccf->uf_regs->urfet, in_be16(&uccf->uf_regs->urfet)); |
56 | printk(KERN_INFO "urfset: addr - 0x%08x, val - 0x%04x", | 56 | printk(KERN_INFO "urfset: addr=0x%p, val=0x%04x\n", |
57 | (u32) & uccf->uf_regs->urfset, | 57 | &uccf->uf_regs->urfset, in_be16(&uccf->uf_regs->urfset)); |
58 | in_be16(&uccf->uf_regs->urfset)); | 58 | printk(KERN_INFO "utfb : addr=0x%p, val=0x%08x\n", |
59 | printk(KERN_INFO "utfb : addr - 0x%08x, val - 0x%08x", | 59 | &uccf->uf_regs->utfb, in_be32(&uccf->uf_regs->utfb)); |
60 | (u32) & uccf->uf_regs->utfb, in_be32(&uccf->uf_regs->utfb)); | 60 | printk(KERN_INFO "utfs : addr=0x%p, val=0x%04x\n", |
61 | printk(KERN_INFO "utfs : addr - 0x%08x, val - 0x%04x", | 61 | &uccf->uf_regs->utfs, in_be16(&uccf->uf_regs->utfs)); |
62 | (u32) & uccf->uf_regs->utfs, in_be16(&uccf->uf_regs->utfs)); | 62 | printk(KERN_INFO "utfet : addr=0x%p, val=0x%04x\n", |
63 | printk(KERN_INFO "utfet : addr - 0x%08x, val - 0x%04x", | 63 | &uccf->uf_regs->utfet, in_be16(&uccf->uf_regs->utfet)); |
64 | (u32) & uccf->uf_regs->utfet, in_be16(&uccf->uf_regs->utfet)); | 64 | printk(KERN_INFO "utftt : addr=0x%p, val=0x%04x\n", |
65 | printk(KERN_INFO "utftt : addr - 0x%08x, val - 0x%04x", | 65 | &uccf->uf_regs->utftt, in_be16(&uccf->uf_regs->utftt)); |
66 | (u32) & uccf->uf_regs->utftt, in_be16(&uccf->uf_regs->utftt)); | 66 | printk(KERN_INFO "utpt : addr=0x%p, val=0x%04x\n", |
67 | printk(KERN_INFO "utpt : addr - 0x%08x, val - 0x%04x", | 67 | &uccf->uf_regs->utpt, in_be16(&uccf->uf_regs->utpt)); |
68 | (u32) & uccf->uf_regs->utpt, in_be16(&uccf->uf_regs->utpt)); | 68 | printk(KERN_INFO "urtry : addr=0x%p, val=0x%08x\n", |
69 | printk(KERN_INFO "urtry : addr - 0x%08x, val - 0x%08x", | 69 | &uccf->uf_regs->urtry, in_be32(&uccf->uf_regs->urtry)); |
70 | (u32) & uccf->uf_regs->urtry, in_be32(&uccf->uf_regs->urtry)); | 70 | printk(KERN_INFO "guemr : addr=0x%p, val=0x%02x\n", |
71 | printk(KERN_INFO "guemr : addr - 0x%08x, val - 0x%02x", | 71 | &uccf->uf_regs->guemr, uccf->uf_regs->guemr); |
72 | (u32) & uccf->uf_regs->guemr, uccf->uf_regs->guemr); | ||
73 | } | 72 | } |
74 | EXPORT_SYMBOL(ucc_fast_dump_regs); | 73 | EXPORT_SYMBOL(ucc_fast_dump_regs); |
75 | 74 | ||
@@ -149,55 +148,57 @@ int ucc_fast_init(struct ucc_fast_info * uf_info, struct ucc_fast_private ** ucc | |||
149 | 148 | ||
150 | /* check if the UCC port number is in range. */ | 149 | /* check if the UCC port number is in range. */ |
151 | if ((uf_info->ucc_num < 0) || (uf_info->ucc_num > UCC_MAX_NUM - 1)) { | 150 | if ((uf_info->ucc_num < 0) || (uf_info->ucc_num > UCC_MAX_NUM - 1)) { |
152 | printk(KERN_ERR "%s: illegal UCC number", __FUNCTION__); | 151 | printk(KERN_ERR "%s: illegal UCC number\n", __FUNCTION__); |
153 | return -EINVAL; | 152 | return -EINVAL; |
154 | } | 153 | } |
155 | 154 | ||
156 | /* Check that 'max_rx_buf_length' is properly aligned (4). */ | 155 | /* Check that 'max_rx_buf_length' is properly aligned (4). */ |
157 | if (uf_info->max_rx_buf_length & (UCC_FAST_MRBLR_ALIGNMENT - 1)) { | 156 | if (uf_info->max_rx_buf_length & (UCC_FAST_MRBLR_ALIGNMENT - 1)) { |
158 | printk(KERN_ERR "%s: max_rx_buf_length not aligned", __FUNCTION__); | 157 | printk(KERN_ERR "%s: max_rx_buf_length not aligned\n", |
158 | __FUNCTION__); | ||
159 | return -EINVAL; | 159 | return -EINVAL; |
160 | } | 160 | } |
161 | 161 | ||
162 | /* Validate Virtual Fifo register values */ | 162 | /* Validate Virtual Fifo register values */ |
163 | if (uf_info->urfs < UCC_FAST_URFS_MIN_VAL) { | 163 | if (uf_info->urfs < UCC_FAST_URFS_MIN_VAL) { |
164 | printk(KERN_ERR "%s: urfs is too small", __FUNCTION__); | 164 | printk(KERN_ERR "%s: urfs is too small\n", __FUNCTION__); |
165 | return -EINVAL; | 165 | return -EINVAL; |
166 | } | 166 | } |
167 | 167 | ||
168 | if (uf_info->urfs & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) { | 168 | if (uf_info->urfs & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) { |
169 | printk(KERN_ERR "%s: urfs is not aligned", __FUNCTION__); | 169 | printk(KERN_ERR "%s: urfs is not aligned\n", __FUNCTION__); |
170 | return -EINVAL; | 170 | return -EINVAL; |
171 | } | 171 | } |
172 | 172 | ||
173 | if (uf_info->urfet & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) { | 173 | if (uf_info->urfet & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) { |
174 | printk(KERN_ERR "%s: urfet is not aligned.", __FUNCTION__); | 174 | printk(KERN_ERR "%s: urfet is not aligned.\n", __FUNCTION__); |
175 | return -EINVAL; | 175 | return -EINVAL; |
176 | } | 176 | } |
177 | 177 | ||
178 | if (uf_info->urfset & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) { | 178 | if (uf_info->urfset & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) { |
179 | printk(KERN_ERR "%s: urfset is not aligned", __FUNCTION__); | 179 | printk(KERN_ERR "%s: urfset is not aligned\n", __FUNCTION__); |
180 | return -EINVAL; | 180 | return -EINVAL; |
181 | } | 181 | } |
182 | 182 | ||
183 | if (uf_info->utfs & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) { | 183 | if (uf_info->utfs & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) { |
184 | printk(KERN_ERR "%s: utfs is not aligned", __FUNCTION__); | 184 | printk(KERN_ERR "%s: utfs is not aligned\n", __FUNCTION__); |
185 | return -EINVAL; | 185 | return -EINVAL; |
186 | } | 186 | } |
187 | 187 | ||
188 | if (uf_info->utfet & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) { | 188 | if (uf_info->utfet & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) { |
189 | printk(KERN_ERR "%s: utfet is not aligned", __FUNCTION__); | 189 | printk(KERN_ERR "%s: utfet is not aligned\n", __FUNCTION__); |
190 | return -EINVAL; | 190 | return -EINVAL; |
191 | } | 191 | } |
192 | 192 | ||
193 | if (uf_info->utftt & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) { | 193 | if (uf_info->utftt & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) { |
194 | printk(KERN_ERR "%s: utftt is not aligned", __FUNCTION__); | 194 | printk(KERN_ERR "%s: utftt is not aligned\n", __FUNCTION__); |
195 | return -EINVAL; | 195 | return -EINVAL; |
196 | } | 196 | } |
197 | 197 | ||
198 | uccf = kzalloc(sizeof(struct ucc_fast_private), GFP_KERNEL); | 198 | uccf = kzalloc(sizeof(struct ucc_fast_private), GFP_KERNEL); |
199 | if (!uccf) { | 199 | if (!uccf) { |
200 | printk(KERN_ERR "%s: Cannot allocate private data", __FUNCTION__); | 200 | printk(KERN_ERR "%s: Cannot allocate private data\n", |
201 | __FUNCTION__); | ||
201 | return -ENOMEM; | 202 | return -ENOMEM; |
202 | } | 203 | } |
203 | 204 | ||
@@ -206,7 +207,7 @@ int ucc_fast_init(struct ucc_fast_info * uf_info, struct ucc_fast_private ** ucc | |||
206 | /* Set the PHY base address */ | 207 | /* Set the PHY base address */ |
207 | uccf->uf_regs = ioremap(uf_info->regs, sizeof(struct ucc_fast)); | 208 | uccf->uf_regs = ioremap(uf_info->regs, sizeof(struct ucc_fast)); |
208 | if (uccf->uf_regs == NULL) { | 209 | if (uccf->uf_regs == NULL) { |
209 | printk(KERN_ERR "%s: Cannot map UCC registers", __FUNCTION__); | 210 | printk(KERN_ERR "%s: Cannot map UCC registers\n", __FUNCTION__); |
210 | return -ENOMEM; | 211 | return -ENOMEM; |
211 | } | 212 | } |
212 | 213 | ||
@@ -226,18 +227,10 @@ int ucc_fast_init(struct ucc_fast_info * uf_info, struct ucc_fast_private ** ucc | |||
226 | uccf->rx_discarded = 0; | 227 | uccf->rx_discarded = 0; |
227 | #endif /* STATISTICS */ | 228 | #endif /* STATISTICS */ |
228 | 229 | ||
229 | /* Init Guemr register */ | ||
230 | if ((ret = ucc_init_guemr((struct ucc_common *) (uf_regs)))) { | ||
231 | printk(KERN_ERR "%s: cannot init GUEMR", __FUNCTION__); | ||
232 | ucc_fast_free(uccf); | ||
233 | return ret; | ||
234 | } | ||
235 | |||
236 | /* Set UCC to fast type */ | 230 | /* Set UCC to fast type */ |
237 | if ((ret = ucc_set_type(uf_info->ucc_num, | 231 | ret = ucc_set_type(uf_info->ucc_num, UCC_SPEED_TYPE_FAST); |
238 | (struct ucc_common *) (uf_regs), | 232 | if (ret) { |
239 | UCC_SPEED_TYPE_FAST))) { | 233 | printk(KERN_ERR "%s: cannot set UCC type\n", __FUNCTION__); |
240 | printk(KERN_ERR "%s: cannot set UCC type", __FUNCTION__); | ||
241 | ucc_fast_free(uccf); | 234 | ucc_fast_free(uccf); |
242 | return ret; | 235 | return ret; |
243 | } | 236 | } |
@@ -276,7 +269,8 @@ int ucc_fast_init(struct ucc_fast_info * uf_info, struct ucc_fast_private ** ucc | |||
276 | uccf->ucc_fast_tx_virtual_fifo_base_offset = | 269 | uccf->ucc_fast_tx_virtual_fifo_base_offset = |
277 | qe_muram_alloc(uf_info->utfs, UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT); | 270 | qe_muram_alloc(uf_info->utfs, UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT); |
278 | if (IS_ERR_VALUE(uccf->ucc_fast_tx_virtual_fifo_base_offset)) { | 271 | if (IS_ERR_VALUE(uccf->ucc_fast_tx_virtual_fifo_base_offset)) { |
279 | printk(KERN_ERR "%s: cannot allocate MURAM for TX FIFO", __FUNCTION__); | 272 | printk(KERN_ERR "%s: cannot allocate MURAM for TX FIFO\n", |
273 | __FUNCTION__); | ||
280 | uccf->ucc_fast_tx_virtual_fifo_base_offset = 0; | 274 | uccf->ucc_fast_tx_virtual_fifo_base_offset = 0; |
281 | ucc_fast_free(uccf); | 275 | ucc_fast_free(uccf); |
282 | return -ENOMEM; | 276 | return -ENOMEM; |
@@ -288,7 +282,8 @@ int ucc_fast_init(struct ucc_fast_info * uf_info, struct ucc_fast_private ** ucc | |||
288 | UCC_FAST_RECEIVE_VIRTUAL_FIFO_SIZE_FUDGE_FACTOR, | 282 | UCC_FAST_RECEIVE_VIRTUAL_FIFO_SIZE_FUDGE_FACTOR, |
289 | UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT); | 283 | UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT); |
290 | if (IS_ERR_VALUE(uccf->ucc_fast_rx_virtual_fifo_base_offset)) { | 284 | if (IS_ERR_VALUE(uccf->ucc_fast_rx_virtual_fifo_base_offset)) { |
291 | printk(KERN_ERR "%s: cannot allocate MURAM for RX FIFO", __FUNCTION__); | 285 | printk(KERN_ERR "%s: cannot allocate MURAM for RX FIFO\n", |
286 | __FUNCTION__); | ||
292 | uccf->ucc_fast_rx_virtual_fifo_base_offset = 0; | 287 | uccf->ucc_fast_rx_virtual_fifo_base_offset = 0; |
293 | ucc_fast_free(uccf); | 288 | ucc_fast_free(uccf); |
294 | return -ENOMEM; | 289 | return -ENOMEM; |
@@ -318,7 +313,7 @@ int ucc_fast_init(struct ucc_fast_info * uf_info, struct ucc_fast_private ** ucc | |||
318 | if ((uf_info->rx_clock != QE_CLK_NONE) && | 313 | if ((uf_info->rx_clock != QE_CLK_NONE) && |
319 | ucc_set_qe_mux_rxtx(uf_info->ucc_num, uf_info->rx_clock, | 314 | ucc_set_qe_mux_rxtx(uf_info->ucc_num, uf_info->rx_clock, |
320 | COMM_DIR_RX)) { | 315 | COMM_DIR_RX)) { |
321 | printk(KERN_ERR "%s: illegal value for RX clock", | 316 | printk(KERN_ERR "%s: illegal value for RX clock\n", |
322 | __FUNCTION__); | 317 | __FUNCTION__); |
323 | ucc_fast_free(uccf); | 318 | ucc_fast_free(uccf); |
324 | return -EINVAL; | 319 | return -EINVAL; |
@@ -327,7 +322,7 @@ int ucc_fast_init(struct ucc_fast_info * uf_info, struct ucc_fast_private ** ucc | |||
327 | if ((uf_info->tx_clock != QE_CLK_NONE) && | 322 | if ((uf_info->tx_clock != QE_CLK_NONE) && |
328 | ucc_set_qe_mux_rxtx(uf_info->ucc_num, uf_info->tx_clock, | 323 | ucc_set_qe_mux_rxtx(uf_info->ucc_num, uf_info->tx_clock, |
329 | COMM_DIR_TX)) { | 324 | COMM_DIR_TX)) { |
330 | printk(KERN_ERR "%s: illegal value for TX clock", | 325 | printk(KERN_ERR "%s: illegal value for TX clock\n", |
331 | __FUNCTION__); | 326 | __FUNCTION__); |
332 | ucc_fast_free(uccf); | 327 | ucc_fast_free(uccf); |
333 | return -EINVAL; | 328 | return -EINVAL; |
diff --git a/arch/powerpc/sysdev/qe_lib/ucc_slow.c b/arch/powerpc/sysdev/qe_lib/ucc_slow.c index 1f65c26ce63f..0174b3aeef8f 100644 --- a/arch/powerpc/sysdev/qe_lib/ucc_slow.c +++ b/arch/powerpc/sysdev/qe_lib/ucc_slow.c | |||
@@ -115,11 +115,15 @@ void ucc_slow_disable(struct ucc_slow_private * uccs, enum comm_dir mode) | |||
115 | out_be32(&us_regs->gumr_l, gumr_l); | 115 | out_be32(&us_regs->gumr_l, gumr_l); |
116 | } | 116 | } |
117 | 117 | ||
118 | /* Initialize the UCC for Slow operations | ||
119 | * | ||
120 | * The caller should initialize the following us_info | ||
121 | */ | ||
118 | int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** uccs_ret) | 122 | int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** uccs_ret) |
119 | { | 123 | { |
120 | struct ucc_slow_private *uccs; | 124 | struct ucc_slow_private *uccs; |
121 | u32 i; | 125 | u32 i; |
122 | struct ucc_slow *us_regs; | 126 | struct ucc_slow __iomem *us_regs; |
123 | u32 gumr; | 127 | u32 gumr; |
124 | struct qe_bd *bd; | 128 | struct qe_bd *bd; |
125 | u32 id; | 129 | u32 id; |
@@ -131,7 +135,7 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc | |||
131 | 135 | ||
132 | /* check if the UCC port number is in range. */ | 136 | /* check if the UCC port number is in range. */ |
133 | if ((us_info->ucc_num < 0) || (us_info->ucc_num > UCC_MAX_NUM - 1)) { | 137 | if ((us_info->ucc_num < 0) || (us_info->ucc_num > UCC_MAX_NUM - 1)) { |
134 | printk(KERN_ERR "%s: illegal UCC number", __FUNCTION__); | 138 | printk(KERN_ERR "%s: illegal UCC number\n", __FUNCTION__); |
135 | return -EINVAL; | 139 | return -EINVAL; |
136 | } | 140 | } |
137 | 141 | ||
@@ -143,13 +147,14 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc | |||
143 | */ | 147 | */ |
144 | if ((!us_info->rfw) && | 148 | if ((!us_info->rfw) && |
145 | (us_info->max_rx_buf_length & (UCC_SLOW_MRBLR_ALIGNMENT - 1))) { | 149 | (us_info->max_rx_buf_length & (UCC_SLOW_MRBLR_ALIGNMENT - 1))) { |
146 | printk(KERN_ERR "max_rx_buf_length not aligned."); | 150 | printk(KERN_ERR "max_rx_buf_length not aligned.\n"); |
147 | return -EINVAL; | 151 | return -EINVAL; |
148 | } | 152 | } |
149 | 153 | ||
150 | uccs = kzalloc(sizeof(struct ucc_slow_private), GFP_KERNEL); | 154 | uccs = kzalloc(sizeof(struct ucc_slow_private), GFP_KERNEL); |
151 | if (!uccs) { | 155 | if (!uccs) { |
152 | printk(KERN_ERR "%s: Cannot allocate private data", __FUNCTION__); | 156 | printk(KERN_ERR "%s: Cannot allocate private data\n", |
157 | __FUNCTION__); | ||
153 | return -ENOMEM; | 158 | return -ENOMEM; |
154 | } | 159 | } |
155 | 160 | ||
@@ -158,7 +163,7 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc | |||
158 | /* Set the PHY base address */ | 163 | /* Set the PHY base address */ |
159 | uccs->us_regs = ioremap(us_info->regs, sizeof(struct ucc_slow)); | 164 | uccs->us_regs = ioremap(us_info->regs, sizeof(struct ucc_slow)); |
160 | if (uccs->us_regs == NULL) { | 165 | if (uccs->us_regs == NULL) { |
161 | printk(KERN_ERR "%s: Cannot map UCC registers", __FUNCTION__); | 166 | printk(KERN_ERR "%s: Cannot map UCC registers\n", __FUNCTION__); |
162 | return -ENOMEM; | 167 | return -ENOMEM; |
163 | } | 168 | } |
164 | 169 | ||
@@ -182,22 +187,14 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc | |||
182 | return -ENOMEM; | 187 | return -ENOMEM; |
183 | } | 188 | } |
184 | id = ucc_slow_get_qe_cr_subblock(us_info->ucc_num); | 189 | id = ucc_slow_get_qe_cr_subblock(us_info->ucc_num); |
185 | qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, id, QE_CR_PROTOCOL_UNSPECIFIED, | 190 | qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, id, us_info->protocol, |
186 | uccs->us_pram_offset); | 191 | uccs->us_pram_offset); |
187 | 192 | ||
188 | uccs->us_pram = qe_muram_addr(uccs->us_pram_offset); | 193 | uccs->us_pram = qe_muram_addr(uccs->us_pram_offset); |
189 | 194 | ||
190 | /* Init Guemr register */ | ||
191 | if ((ret = ucc_init_guemr((struct ucc_common *) us_regs))) { | ||
192 | printk(KERN_ERR "%s: cannot init GUEMR", __FUNCTION__); | ||
193 | ucc_slow_free(uccs); | ||
194 | return ret; | ||
195 | } | ||
196 | |||
197 | /* Set UCC to slow type */ | 195 | /* Set UCC to slow type */ |
198 | if ((ret = ucc_set_type(us_info->ucc_num, | 196 | ret = ucc_set_type(us_info->ucc_num, UCC_SPEED_TYPE_SLOW); |
199 | (struct ucc_common *) us_regs, | 197 | if (ret) { |
200 | UCC_SPEED_TYPE_SLOW))) { | ||
201 | printk(KERN_ERR "%s: cannot set UCC type", __FUNCTION__); | 198 | printk(KERN_ERR "%s: cannot set UCC type", __FUNCTION__); |
202 | ucc_slow_free(uccs); | 199 | ucc_slow_free(uccs); |
203 | return ret; | 200 | return ret; |
@@ -212,7 +209,8 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc | |||
212 | qe_muram_alloc(us_info->rx_bd_ring_len * sizeof(struct qe_bd), | 209 | qe_muram_alloc(us_info->rx_bd_ring_len * sizeof(struct qe_bd), |
213 | QE_ALIGNMENT_OF_BD); | 210 | QE_ALIGNMENT_OF_BD); |
214 | if (IS_ERR_VALUE(uccs->rx_base_offset)) { | 211 | if (IS_ERR_VALUE(uccs->rx_base_offset)) { |
215 | printk(KERN_ERR "%s: cannot allocate RX BDs", __FUNCTION__); | 212 | printk(KERN_ERR "%s: cannot allocate %u RX BDs\n", __FUNCTION__, |
213 | us_info->rx_bd_ring_len); | ||
216 | uccs->rx_base_offset = 0; | 214 | uccs->rx_base_offset = 0; |
217 | ucc_slow_free(uccs); | 215 | ucc_slow_free(uccs); |
218 | return -ENOMEM; | 216 | return -ENOMEM; |
@@ -292,12 +290,12 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc | |||
292 | 290 | ||
293 | /* if the data is in cachable memory, the 'global' */ | 291 | /* if the data is in cachable memory, the 'global' */ |
294 | /* in the function code should be set. */ | 292 | /* in the function code should be set. */ |
295 | uccs->us_pram->tfcr = uccs->us_pram->rfcr = | 293 | uccs->us_pram->tbmr = UCC_BMR_BO_BE; |
296 | us_info->data_mem_part | QE_BMR_BYTE_ORDER_BO_MOT; | 294 | uccs->us_pram->rbmr = UCC_BMR_BO_BE; |
297 | 295 | ||
298 | /* rbase, tbase are offsets from MURAM base */ | 296 | /* rbase, tbase are offsets from MURAM base */ |
299 | out_be16(&uccs->us_pram->rbase, uccs->us_pram_offset); | 297 | out_be16(&uccs->us_pram->rbase, uccs->rx_base_offset); |
300 | out_be16(&uccs->us_pram->tbase, uccs->us_pram_offset); | 298 | out_be16(&uccs->us_pram->tbase, uccs->tx_base_offset); |
301 | 299 | ||
302 | /* Mux clocking */ | 300 | /* Mux clocking */ |
303 | /* Grant Support */ | 301 | /* Grant Support */ |
@@ -311,7 +309,7 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc | |||
311 | /* Rx clock routing */ | 309 | /* Rx clock routing */ |
312 | if (ucc_set_qe_mux_rxtx(us_info->ucc_num, us_info->rx_clock, | 310 | if (ucc_set_qe_mux_rxtx(us_info->ucc_num, us_info->rx_clock, |
313 | COMM_DIR_RX)) { | 311 | COMM_DIR_RX)) { |
314 | printk(KERN_ERR "%s: illegal value for RX clock", | 312 | printk(KERN_ERR "%s: illegal value for RX clock\n", |
315 | __FUNCTION__); | 313 | __FUNCTION__); |
316 | ucc_slow_free(uccs); | 314 | ucc_slow_free(uccs); |
317 | return -EINVAL; | 315 | return -EINVAL; |
@@ -319,7 +317,7 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc | |||
319 | /* Tx clock routing */ | 317 | /* Tx clock routing */ |
320 | if (ucc_set_qe_mux_rxtx(us_info->ucc_num, us_info->tx_clock, | 318 | if (ucc_set_qe_mux_rxtx(us_info->ucc_num, us_info->tx_clock, |
321 | COMM_DIR_TX)) { | 319 | COMM_DIR_TX)) { |
322 | printk(KERN_ERR "%s: illegal value for TX clock", | 320 | printk(KERN_ERR "%s: illegal value for TX clock\n", |
323 | __FUNCTION__); | 321 | __FUNCTION__); |
324 | ucc_slow_free(uccs); | 322 | ucc_slow_free(uccs); |
325 | return -EINVAL; | 323 | return -EINVAL; |
@@ -343,8 +341,8 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc | |||
343 | command = QE_INIT_TX; | 341 | command = QE_INIT_TX; |
344 | else | 342 | else |
345 | command = QE_INIT_RX; /* We know at least one is TRUE */ | 343 | command = QE_INIT_RX; /* We know at least one is TRUE */ |
346 | id = ucc_slow_get_qe_cr_subblock(us_info->ucc_num); | 344 | |
347 | qe_issue_cmd(command, id, QE_CR_PROTOCOL_UNSPECIFIED, 0); | 345 | qe_issue_cmd(command, id, us_info->protocol, 0); |
348 | 346 | ||
349 | *uccs_ret = uccs; | 347 | *uccs_ret = uccs; |
350 | return 0; | 348 | return 0; |