diff options
author | Timur Tabi <timur@freescale.com> | 2007-10-03 12:34:59 -0400 |
---|---|---|
committer | Kumar Gala <galak@kernel.crashing.org> | 2007-10-08 09:38:15 -0400 |
commit | 6b0b594bb81f86dbc7b0829ee5102abaab242913 (patch) | |
tree | 707463987ab05d04596763afa9db1c63cbde4c4a /arch/powerpc/sysdev/qe_lib/ucc.c | |
parent | 6039680705906f270411435c05c869ac4f59ef10 (diff) |
[POWERPC] qe: miscellaneous code improvements and fixes to the QE library
This patch makes numerous miscellaneous code improvements to the QE library.
1. Remove struct ucc_common and merge ucc_init_guemr() into ucc_set_type()
(every caller of ucc_init_guemr() also calls ucc_set_type()). Modify all
callers of ucc_set_type() accordingly.
2. Remove the unused enum ucc_pram_initial_offset.
3. Refactor qe_setbrg(), also implement work-around for errata QE_General4.
4. Several printk() calls were missing the terminating \n.
5. Add __iomem where needed, and change u16 to __be16 and u32 to __be32 where
appropriate.
6. In ucc_slow_init() the RBASE and TBASE registers in the PRAM were programmed
with the wrong value.
7. Add the protocol type to struct us_info and updated ucc_slow_init() to
use it, instead of always programming QE_CR_PROTOCOL_UNSPECIFIED.
8. Rename ucc_slow_restart_x() to ucc_slow_restart_tx()
9. Add several macros in qe.h (mostly for slow UCC support, but also to
standardize some naming convention) and remove several unused macros.
10. Update ucc_geth.c to use the new macros.
11. Add ucc_slow_info.protocol to specify which QE_CR_PROTOCOL_xxx protcol
to use when initializing the UCC in ucc_slow_init().
12. Rename ucc_slow_pram.rfcr to rbmr and ucc_slow_pram.tfcr to tbmr, since
these are the real names of the registers.
13. Use the setbits, clrbits, and clrsetbits where appropriate.
14. Refactor ucc_set_qe_mux_rxtx().
15. Remove all instances of 'volatile'.
16. Simplify get_cmxucr_reg();
17. Replace qe_mux.cmxucrX with qe_mux.cmxucr[].
18. Updated struct ucc_geth because struct ucc_fast is not padded any more.
Signed-off-by: Timur Tabi <timur@freescale.com>
Signed-off-by: Kumar Gala <galak@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/sysdev/qe_lib/ucc.c')
-rw-r--r-- | arch/powerpc/sysdev/qe_lib/ucc.c | 270 |
1 files changed, 115 insertions, 155 deletions
diff --git a/arch/powerpc/sysdev/qe_lib/ucc.c b/arch/powerpc/sysdev/qe_lib/ucc.c index f970e5415ac0..0e348d9af8a6 100644 --- a/arch/powerpc/sysdev/qe_lib/ucc.c +++ b/arch/powerpc/sysdev/qe_lib/ucc.c | |||
@@ -28,228 +28,188 @@ | |||
28 | 28 | ||
29 | static DEFINE_SPINLOCK(ucc_lock); | 29 | static DEFINE_SPINLOCK(ucc_lock); |
30 | 30 | ||
31 | int ucc_set_qe_mux_mii_mng(int ucc_num) | 31 | int ucc_set_qe_mux_mii_mng(unsigned int ucc_num) |
32 | { | 32 | { |
33 | unsigned long flags; | 33 | unsigned long flags; |
34 | 34 | ||
35 | if (ucc_num > UCC_MAX_NUM - 1) | ||
36 | return -EINVAL; | ||
37 | |||
35 | spin_lock_irqsave(&ucc_lock, flags); | 38 | spin_lock_irqsave(&ucc_lock, flags); |
36 | out_be32(&qe_immr->qmx.cmxgcr, | 39 | clrsetbits_be32(&qe_immr->qmx.cmxgcr, QE_CMXGCR_MII_ENET_MNG, |
37 | ((in_be32(&qe_immr->qmx.cmxgcr) & | 40 | ucc_num << QE_CMXGCR_MII_ENET_MNG_SHIFT); |
38 | ~QE_CMXGCR_MII_ENET_MNG) | | ||
39 | (ucc_num << QE_CMXGCR_MII_ENET_MNG_SHIFT))); | ||
40 | spin_unlock_irqrestore(&ucc_lock, flags); | 41 | spin_unlock_irqrestore(&ucc_lock, flags); |
41 | 42 | ||
42 | return 0; | 43 | return 0; |
43 | } | 44 | } |
44 | EXPORT_SYMBOL(ucc_set_qe_mux_mii_mng); | 45 | EXPORT_SYMBOL(ucc_set_qe_mux_mii_mng); |
45 | 46 | ||
46 | int ucc_set_type(int ucc_num, struct ucc_common *regs, | 47 | /* Configure the UCC to either Slow or Fast. |
47 | enum ucc_speed_type speed) | 48 | * |
48 | { | 49 | * A given UCC can be figured to support either "slow" devices (e.g. UART) |
49 | u8 guemr = 0; | 50 | * or "fast" devices (e.g. Ethernet). |
50 | 51 | * | |
51 | /* check if the UCC number is in range. */ | 52 | * 'ucc_num' is the UCC number, from 0 - 7. |
52 | if ((ucc_num > UCC_MAX_NUM - 1) || (ucc_num < 0)) | 53 | * |
53 | return -EINVAL; | 54 | * This function also sets the UCC_GUEMR_SET_RESERVED3 bit because that bit |
54 | 55 | * must always be set to 1. | |
55 | guemr = regs->guemr; | 56 | */ |
56 | guemr &= ~(UCC_GUEMR_MODE_MASK_RX | UCC_GUEMR_MODE_MASK_TX); | 57 | int ucc_set_type(unsigned int ucc_num, enum ucc_speed_type speed) |
57 | switch (speed) { | ||
58 | case UCC_SPEED_TYPE_SLOW: | ||
59 | guemr |= (UCC_GUEMR_MODE_SLOW_RX | UCC_GUEMR_MODE_SLOW_TX); | ||
60 | break; | ||
61 | case UCC_SPEED_TYPE_FAST: | ||
62 | guemr |= (UCC_GUEMR_MODE_FAST_RX | UCC_GUEMR_MODE_FAST_TX); | ||
63 | break; | ||
64 | default: | ||
65 | return -EINVAL; | ||
66 | } | ||
67 | regs->guemr = guemr; | ||
68 | |||
69 | return 0; | ||
70 | } | ||
71 | |||
72 | int ucc_init_guemr(struct ucc_common *regs) | ||
73 | { | 58 | { |
74 | u8 guemr = 0; | 59 | u8 __iomem *guemr; |
75 | |||
76 | if (!regs) | ||
77 | return -EINVAL; | ||
78 | |||
79 | /* Set bit 3 (which is reserved in the GUEMR register) to 1 */ | ||
80 | guemr = UCC_GUEMR_SET_RESERVED3; | ||
81 | |||
82 | regs->guemr = guemr; | ||
83 | |||
84 | return 0; | ||
85 | } | ||
86 | 60 | ||
87 | static void get_cmxucr_reg(int ucc_num, volatile u32 ** p_cmxucr, u8 * reg_num, | 61 | /* The GUEMR register is at the same location for both slow and fast |
88 | u8 * shift) | 62 | devices, so we just use uccX.slow.guemr. */ |
89 | { | ||
90 | switch (ucc_num) { | 63 | switch (ucc_num) { |
91 | case 0: *p_cmxucr = &(qe_immr->qmx.cmxucr1); | 64 | case 0: guemr = &qe_immr->ucc1.slow.guemr; |
92 | *reg_num = 1; | ||
93 | *shift = 16; | ||
94 | break; | 65 | break; |
95 | case 2: *p_cmxucr = &(qe_immr->qmx.cmxucr1); | 66 | case 1: guemr = &qe_immr->ucc2.slow.guemr; |
96 | *reg_num = 1; | ||
97 | *shift = 0; | ||
98 | break; | 67 | break; |
99 | case 4: *p_cmxucr = &(qe_immr->qmx.cmxucr2); | 68 | case 2: guemr = &qe_immr->ucc3.slow.guemr; |
100 | *reg_num = 2; | ||
101 | *shift = 16; | ||
102 | break; | 69 | break; |
103 | case 6: *p_cmxucr = &(qe_immr->qmx.cmxucr2); | 70 | case 3: guemr = &qe_immr->ucc4.slow.guemr; |
104 | *reg_num = 2; | ||
105 | *shift = 0; | ||
106 | break; | 71 | break; |
107 | case 1: *p_cmxucr = &(qe_immr->qmx.cmxucr3); | 72 | case 4: guemr = &qe_immr->ucc5.slow.guemr; |
108 | *reg_num = 3; | ||
109 | *shift = 16; | ||
110 | break; | 73 | break; |
111 | case 3: *p_cmxucr = &(qe_immr->qmx.cmxucr3); | 74 | case 5: guemr = &qe_immr->ucc6.slow.guemr; |
112 | *reg_num = 3; | ||
113 | *shift = 0; | ||
114 | break; | 75 | break; |
115 | case 5: *p_cmxucr = &(qe_immr->qmx.cmxucr4); | 76 | case 6: guemr = &qe_immr->ucc7.slow.guemr; |
116 | *reg_num = 4; | ||
117 | *shift = 16; | ||
118 | break; | 77 | break; |
119 | case 7: *p_cmxucr = &(qe_immr->qmx.cmxucr4); | 78 | case 7: guemr = &qe_immr->ucc8.slow.guemr; |
120 | *reg_num = 4; | ||
121 | *shift = 0; | ||
122 | break; | 79 | break; |
123 | default: | 80 | default: |
124 | break; | 81 | return -EINVAL; |
125 | } | 82 | } |
83 | |||
84 | clrsetbits_8(guemr, UCC_GUEMR_MODE_MASK, | ||
85 | UCC_GUEMR_SET_RESERVED3 | speed); | ||
86 | |||
87 | return 0; | ||
88 | } | ||
89 | |||
90 | static void get_cmxucr_reg(unsigned int ucc_num, __be32 **cmxucr, | ||
91 | unsigned int *reg_num, unsigned int *shift) | ||
92 | { | ||
93 | unsigned int cmx = ((ucc_num & 1) << 1) + (ucc_num > 3); | ||
94 | |||
95 | *reg_num = cmx + 1; | ||
96 | *cmxucr = &qe_immr->qmx.cmxucr[cmx]; | ||
97 | *shift = 16 - 8 * (ucc_num & 2); | ||
126 | } | 98 | } |
127 | 99 | ||
128 | int ucc_mux_set_grant_tsa_bkpt(int ucc_num, int set, u32 mask) | 100 | int ucc_mux_set_grant_tsa_bkpt(unsigned int ucc_num, int set, u32 mask) |
129 | { | 101 | { |
130 | volatile u32 *p_cmxucr; | 102 | __be32 *cmxucr; |
131 | u8 reg_num; | 103 | unsigned int reg_num; |
132 | u8 shift; | 104 | unsigned int shift; |
133 | 105 | ||
134 | /* check if the UCC number is in range. */ | 106 | /* check if the UCC number is in range. */ |
135 | if ((ucc_num > UCC_MAX_NUM - 1) || (ucc_num < 0)) | 107 | if (ucc_num > UCC_MAX_NUM - 1) |
136 | return -EINVAL; | 108 | return -EINVAL; |
137 | 109 | ||
138 | get_cmxucr_reg(ucc_num, &p_cmxucr, ®_num, &shift); | 110 | get_cmxucr_reg(ucc_num, &cmxucr, ®_num, &shift); |
139 | 111 | ||
140 | if (set) | 112 | if (set) |
141 | out_be32(p_cmxucr, in_be32(p_cmxucr) | (mask << shift)); | 113 | setbits32(cmxucr, mask << shift); |
142 | else | 114 | else |
143 | out_be32(p_cmxucr, in_be32(p_cmxucr) & ~(mask << shift)); | 115 | clrbits32(cmxucr, mask << shift); |
144 | 116 | ||
145 | return 0; | 117 | return 0; |
146 | } | 118 | } |
147 | 119 | ||
148 | int ucc_set_qe_mux_rxtx(int ucc_num, enum qe_clock clock, enum comm_dir mode) | 120 | int ucc_set_qe_mux_rxtx(unsigned int ucc_num, enum qe_clock clock, |
121 | enum comm_dir mode) | ||
149 | { | 122 | { |
150 | volatile u32 *p_cmxucr; | 123 | __be32 *cmxucr; |
151 | u8 reg_num; | 124 | unsigned int reg_num; |
152 | u8 shift; | 125 | unsigned int shift; |
153 | u32 clock_bits; | 126 | u32 clock_bits = 0; |
154 | u32 clock_mask; | ||
155 | int source = -1; | ||
156 | 127 | ||
157 | /* check if the UCC number is in range. */ | 128 | /* check if the UCC number is in range. */ |
158 | if ((ucc_num > UCC_MAX_NUM - 1) || (ucc_num < 0)) | 129 | if (ucc_num > UCC_MAX_NUM - 1) |
159 | return -EINVAL; | 130 | return -EINVAL; |
160 | 131 | ||
161 | if (!((mode == COMM_DIR_RX) || (mode == COMM_DIR_TX))) { | 132 | /* The communications direction must be RX or TX */ |
162 | printk(KERN_ERR | 133 | if (!((mode == COMM_DIR_RX) || (mode == COMM_DIR_TX))) |
163 | "ucc_set_qe_mux_rxtx: bad comm mode type passed."); | ||
164 | return -EINVAL; | 134 | return -EINVAL; |
165 | } | ||
166 | 135 | ||
167 | get_cmxucr_reg(ucc_num, &p_cmxucr, ®_num, &shift); | 136 | get_cmxucr_reg(ucc_num, &cmxucr, ®_num, &shift); |
168 | 137 | ||
169 | switch (reg_num) { | 138 | switch (reg_num) { |
170 | case 1: | 139 | case 1: |
171 | switch (clock) { | 140 | switch (clock) { |
172 | case QE_BRG1: source = 1; break; | 141 | case QE_BRG1: clock_bits = 1; break; |
173 | case QE_BRG2: source = 2; break; | 142 | case QE_BRG2: clock_bits = 2; break; |
174 | case QE_BRG7: source = 3; break; | 143 | case QE_BRG7: clock_bits = 3; break; |
175 | case QE_BRG8: source = 4; break; | 144 | case QE_BRG8: clock_bits = 4; break; |
176 | case QE_CLK9: source = 5; break; | 145 | case QE_CLK9: clock_bits = 5; break; |
177 | case QE_CLK10: source = 6; break; | 146 | case QE_CLK10: clock_bits = 6; break; |
178 | case QE_CLK11: source = 7; break; | 147 | case QE_CLK11: clock_bits = 7; break; |
179 | case QE_CLK12: source = 8; break; | 148 | case QE_CLK12: clock_bits = 8; break; |
180 | case QE_CLK15: source = 9; break; | 149 | case QE_CLK15: clock_bits = 9; break; |
181 | case QE_CLK16: source = 10; break; | 150 | case QE_CLK16: clock_bits = 10; break; |
182 | default: source = -1; break; | 151 | default: break; |
183 | } | 152 | } |
184 | break; | 153 | break; |
185 | case 2: | 154 | case 2: |
186 | switch (clock) { | 155 | switch (clock) { |
187 | case QE_BRG5: source = 1; break; | 156 | case QE_BRG5: clock_bits = 1; break; |
188 | case QE_BRG6: source = 2; break; | 157 | case QE_BRG6: clock_bits = 2; break; |
189 | case QE_BRG7: source = 3; break; | 158 | case QE_BRG7: clock_bits = 3; break; |
190 | case QE_BRG8: source = 4; break; | 159 | case QE_BRG8: clock_bits = 4; break; |
191 | case QE_CLK13: source = 5; break; | 160 | case QE_CLK13: clock_bits = 5; break; |
192 | case QE_CLK14: source = 6; break; | 161 | case QE_CLK14: clock_bits = 6; break; |
193 | case QE_CLK19: source = 7; break; | 162 | case QE_CLK19: clock_bits = 7; break; |
194 | case QE_CLK20: source = 8; break; | 163 | case QE_CLK20: clock_bits = 8; break; |
195 | case QE_CLK15: source = 9; break; | 164 | case QE_CLK15: clock_bits = 9; break; |
196 | case QE_CLK16: source = 10; break; | 165 | case QE_CLK16: clock_bits = 10; break; |
197 | default: source = -1; break; | 166 | default: break; |
198 | } | 167 | } |
199 | break; | 168 | break; |
200 | case 3: | 169 | case 3: |
201 | switch (clock) { | 170 | switch (clock) { |
202 | case QE_BRG9: source = 1; break; | 171 | case QE_BRG9: clock_bits = 1; break; |
203 | case QE_BRG10: source = 2; break; | 172 | case QE_BRG10: clock_bits = 2; break; |
204 | case QE_BRG15: source = 3; break; | 173 | case QE_BRG15: clock_bits = 3; break; |
205 | case QE_BRG16: source = 4; break; | 174 | case QE_BRG16: clock_bits = 4; break; |
206 | case QE_CLK3: source = 5; break; | 175 | case QE_CLK3: clock_bits = 5; break; |
207 | case QE_CLK4: source = 6; break; | 176 | case QE_CLK4: clock_bits = 6; break; |
208 | case QE_CLK17: source = 7; break; | 177 | case QE_CLK17: clock_bits = 7; break; |
209 | case QE_CLK18: source = 8; break; | 178 | case QE_CLK18: clock_bits = 8; break; |
210 | case QE_CLK7: source = 9; break; | 179 | case QE_CLK7: clock_bits = 9; break; |
211 | case QE_CLK8: source = 10; break; | 180 | case QE_CLK8: clock_bits = 10; break; |
212 | case QE_CLK16: source = 11; break; | 181 | case QE_CLK16: clock_bits = 11; break; |
213 | default: source = -1; break; | 182 | default: break; |
214 | } | 183 | } |
215 | break; | 184 | break; |
216 | case 4: | 185 | case 4: |
217 | switch (clock) { | 186 | switch (clock) { |
218 | case QE_BRG13: source = 1; break; | 187 | case QE_BRG13: clock_bits = 1; break; |
219 | case QE_BRG14: source = 2; break; | 188 | case QE_BRG14: clock_bits = 2; break; |
220 | case QE_BRG15: source = 3; break; | 189 | case QE_BRG15: clock_bits = 3; break; |
221 | case QE_BRG16: source = 4; break; | 190 | case QE_BRG16: clock_bits = 4; break; |
222 | case QE_CLK5: source = 5; break; | 191 | case QE_CLK5: clock_bits = 5; break; |
223 | case QE_CLK6: source = 6; break; | 192 | case QE_CLK6: clock_bits = 6; break; |
224 | case QE_CLK21: source = 7; break; | 193 | case QE_CLK21: clock_bits = 7; break; |
225 | case QE_CLK22: source = 8; break; | 194 | case QE_CLK22: clock_bits = 8; break; |
226 | case QE_CLK7: source = 9; break; | 195 | case QE_CLK7: clock_bits = 9; break; |
227 | case QE_CLK8: source = 10; break; | 196 | case QE_CLK8: clock_bits = 10; break; |
228 | case QE_CLK16: source = 11; break; | 197 | case QE_CLK16: clock_bits = 11; break; |
229 | default: source = -1; break; | 198 | default: break; |
230 | } | 199 | } |
231 | break; | 200 | break; |
232 | default: | 201 | default: break; |
233 | source = -1; | ||
234 | break; | ||
235 | } | 202 | } |
236 | 203 | ||
237 | if (source == -1) { | 204 | /* Check for invalid combination of clock and UCC number */ |
238 | printk(KERN_ERR | 205 | if (!clock_bits) |
239 | "ucc_set_qe_mux_rxtx: Bad combination of clock and UCC."); | ||
240 | return -ENOENT; | 206 | return -ENOENT; |
241 | } | ||
242 | 207 | ||
243 | clock_bits = (u32) source; | 208 | if (mode == COMM_DIR_RX) |
244 | clock_mask = QE_CMXUCR_TX_CLK_SRC_MASK; | 209 | shift += 4; |
245 | if (mode == COMM_DIR_RX) { | ||
246 | clock_bits <<= 4; /* Rx field is 4 bits to left of Tx field */ | ||
247 | clock_mask <<= 4; /* Rx field is 4 bits to left of Tx field */ | ||
248 | } | ||
249 | clock_bits <<= shift; | ||
250 | clock_mask <<= shift; | ||
251 | 210 | ||
252 | out_be32(p_cmxucr, (in_be32(p_cmxucr) & ~clock_mask) | clock_bits); | 211 | clrsetbits_be32(cmxucr, QE_CMXUCR_TX_CLK_SRC_MASK << shift, |
212 | clock_bits << shift); | ||
253 | 213 | ||
254 | return 0; | 214 | return 0; |
255 | } | 215 | } |