diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /arch/ppc/8260_io |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'arch/ppc/8260_io')
-rw-r--r-- | arch/ppc/8260_io/Kconfig | 65 | ||||
-rw-r--r-- | arch/ppc/8260_io/Makefile | 6 | ||||
-rw-r--r-- | arch/ppc/8260_io/enet.c | 867 | ||||
-rw-r--r-- | arch/ppc/8260_io/fcc_enet.c | 2395 |
4 files changed, 3333 insertions, 0 deletions
diff --git a/arch/ppc/8260_io/Kconfig b/arch/ppc/8260_io/Kconfig new file mode 100644 index 000000000000..ea9651e2dd6a --- /dev/null +++ b/arch/ppc/8260_io/Kconfig | |||
@@ -0,0 +1,65 @@ | |||
1 | # | ||
2 | # CPM2 Communication options | ||
3 | # | ||
4 | |||
5 | menu "CPM2 Options" | ||
6 | depends on CPM2 | ||
7 | |||
8 | config SCC_ENET | ||
9 | bool "CPM SCC Ethernet" | ||
10 | depends on NET_ETHERNET | ||
11 | |||
12 | # | ||
13 | # CONFIG_FEC_ENET is only used to get netdevices to call our init | ||
14 | # function. Any combination of FCC1,2,3 are supported. | ||
15 | # | ||
16 | config FEC_ENET | ||
17 | bool "FCC Ethernet" | ||
18 | depends on NET_ETHERNET | ||
19 | |||
20 | config FCC1_ENET | ||
21 | bool "Ethernet on FCC1" | ||
22 | depends on FEC_ENET | ||
23 | help | ||
24 | Use CPM2 fast Ethernet controller 1 to drive Ethernet (default). | ||
25 | |||
26 | config FCC2_ENET | ||
27 | bool "Ethernet on FCC2" | ||
28 | depends on FEC_ENET | ||
29 | help | ||
30 | Use CPM2 fast Ethernet controller 2 to drive Ethernet. | ||
31 | |||
32 | config FCC3_ENET | ||
33 | bool "Ethernet on FCC3" | ||
34 | depends on FEC_ENET | ||
35 | help | ||
36 | Use CPM2 fast Ethernet controller 3 to drive Ethernet. | ||
37 | |||
38 | config USE_MDIO | ||
39 | bool "Use MDIO for PHY configuration" | ||
40 | depends on FEC_ENET | ||
41 | |||
42 | choice | ||
43 | prompt "Type of PHY" | ||
44 | depends on 8260 && USE_MDIO | ||
45 | default FCC_GENERIC_PHY | ||
46 | |||
47 | config FCC_LXT970 | ||
48 | bool "LXT970" | ||
49 | |||
50 | config FCC_LXT971 | ||
51 | bool "LXT971" | ||
52 | |||
53 | config FCC_QS6612 | ||
54 | bool "QS6612" | ||
55 | |||
56 | config FCC_DM9131 | ||
57 | bool "DM9131" | ||
58 | |||
59 | config FCC_DM9161 | ||
60 | bool "DM9161" | ||
61 | |||
62 | config FCC_GENERIC_PHY | ||
63 | bool "Generic" | ||
64 | endchoice | ||
65 | endmenu | ||
diff --git a/arch/ppc/8260_io/Makefile b/arch/ppc/8260_io/Makefile new file mode 100644 index 000000000000..971f292c5d48 --- /dev/null +++ b/arch/ppc/8260_io/Makefile | |||
@@ -0,0 +1,6 @@ | |||
1 | # | ||
2 | # Makefile for the linux ppc-specific parts of comm processor (v2) | ||
3 | # | ||
4 | |||
5 | obj-$(CONFIG_FEC_ENET) += fcc_enet.o | ||
6 | obj-$(CONFIG_SCC_ENET) += enet.o | ||
diff --git a/arch/ppc/8260_io/enet.c b/arch/ppc/8260_io/enet.c new file mode 100644 index 000000000000..ac6d55fe2235 --- /dev/null +++ b/arch/ppc/8260_io/enet.c | |||
@@ -0,0 +1,867 @@ | |||
1 | /* | ||
2 | * Ethernet driver for Motorola MPC8260. | ||
3 | * Copyright (c) 1999 Dan Malek (dmalek@jlc.net) | ||
4 | * Copyright (c) 2000 MontaVista Software Inc. (source@mvista.com) | ||
5 | * 2.3.99 Updates | ||
6 | * | ||
7 | * I copied this from the 8xx CPM Ethernet driver, so follow the | ||
8 | * credits back through that. | ||
9 | * | ||
10 | * This version of the driver is somewhat selectable for the different | ||
11 | * processor/board combinations. It works for the boards I know about | ||
12 | * now, and should be easily modified to include others. Some of the | ||
13 | * configuration information is contained in <asm/commproc.h> and the | ||
14 | * remainder is here. | ||
15 | * | ||
16 | * Buffer descriptors are kept in the CPM dual port RAM, and the frame | ||
17 | * buffers are in the host memory. | ||
18 | * | ||
19 | * Right now, I am very watseful with the buffers. I allocate memory | ||
20 | * pages and then divide them into 2K frame buffers. This way I know I | ||
21 | * have buffers large enough to hold one frame within one buffer descriptor. | ||
22 | * Once I get this working, I will use 64 or 128 byte CPM buffers, which | ||
23 | * will be much more memory efficient and will easily handle lots of | ||
24 | * small packets. | ||
25 | * | ||
26 | */ | ||
27 | #include <linux/kernel.h> | ||
28 | #include <linux/sched.h> | ||
29 | #include <linux/string.h> | ||
30 | #include <linux/ptrace.h> | ||
31 | #include <linux/errno.h> | ||
32 | #include <linux/ioport.h> | ||
33 | #include <linux/slab.h> | ||
34 | #include <linux/interrupt.h> | ||
35 | #include <linux/pci.h> | ||
36 | #include <linux/init.h> | ||
37 | #include <linux/delay.h> | ||
38 | #include <linux/netdevice.h> | ||
39 | #include <linux/etherdevice.h> | ||
40 | #include <linux/skbuff.h> | ||
41 | #include <linux/spinlock.h> | ||
42 | #include <linux/bitops.h> | ||
43 | |||
44 | #include <asm/immap_cpm2.h> | ||
45 | #include <asm/pgtable.h> | ||
46 | #include <asm/mpc8260.h> | ||
47 | #include <asm/uaccess.h> | ||
48 | #include <asm/cpm2.h> | ||
49 | #include <asm/irq.h> | ||
50 | |||
51 | /* | ||
52 | * Theory of Operation | ||
53 | * | ||
54 | * The MPC8260 CPM performs the Ethernet processing on an SCC. It can use | ||
55 | * an aribtrary number of buffers on byte boundaries, but must have at | ||
56 | * least two receive buffers to prevent constant overrun conditions. | ||
57 | * | ||
58 | * The buffer descriptors are allocated from the CPM dual port memory | ||
59 | * with the data buffers allocated from host memory, just like all other | ||
60 | * serial communication protocols. The host memory buffers are allocated | ||
61 | * from the free page pool, and then divided into smaller receive and | ||
62 | * transmit buffers. The size of the buffers should be a power of two, | ||
63 | * since that nicely divides the page. This creates a ring buffer | ||
64 | * structure similar to the LANCE and other controllers. | ||
65 | * | ||
66 | * Like the LANCE driver: | ||
67 | * The driver runs as two independent, single-threaded flows of control. One | ||
68 | * is the send-packet routine, which enforces single-threaded use by the | ||
69 | * cep->tx_busy flag. The other thread is the interrupt handler, which is | ||
70 | * single threaded by the hardware and other software. | ||
71 | */ | ||
72 | |||
73 | /* The transmitter timeout | ||
74 | */ | ||
75 | #define TX_TIMEOUT (2*HZ) | ||
76 | |||
77 | /* The number of Tx and Rx buffers. These are allocated from the page | ||
78 | * pool. The code may assume these are power of two, so it is best | ||
79 | * to keep them that size. | ||
80 | * We don't need to allocate pages for the transmitter. We just use | ||
81 | * the skbuffer directly. | ||
82 | */ | ||
83 | #define CPM_ENET_RX_PAGES 4 | ||
84 | #define CPM_ENET_RX_FRSIZE 2048 | ||
85 | #define CPM_ENET_RX_FRPPG (PAGE_SIZE / CPM_ENET_RX_FRSIZE) | ||
86 | #define RX_RING_SIZE (CPM_ENET_RX_FRPPG * CPM_ENET_RX_PAGES) | ||
87 | #define TX_RING_SIZE 8 /* Must be power of two */ | ||
88 | #define TX_RING_MOD_MASK 7 /* for this to work */ | ||
89 | |||
90 | /* The CPM stores dest/src/type, data, and checksum for receive packets. | ||
91 | */ | ||
92 | #define PKT_MAXBUF_SIZE 1518 | ||
93 | #define PKT_MINBUF_SIZE 64 | ||
94 | #define PKT_MAXBLR_SIZE 1520 | ||
95 | |||
96 | /* The CPM buffer descriptors track the ring buffers. The rx_bd_base and | ||
97 | * tx_bd_base always point to the base of the buffer descriptors. The | ||
98 | * cur_rx and cur_tx point to the currently available buffer. | ||
99 | * The dirty_tx tracks the current buffer that is being sent by the | ||
100 | * controller. The cur_tx and dirty_tx are equal under both completely | ||
101 | * empty and completely full conditions. The empty/ready indicator in | ||
102 | * the buffer descriptor determines the actual condition. | ||
103 | */ | ||
104 | struct scc_enet_private { | ||
105 | /* The saved address of a sent-in-place packet/buffer, for skfree(). */ | ||
106 | struct sk_buff* tx_skbuff[TX_RING_SIZE]; | ||
107 | ushort skb_cur; | ||
108 | ushort skb_dirty; | ||
109 | |||
110 | /* CPM dual port RAM relative addresses. | ||
111 | */ | ||
112 | cbd_t *rx_bd_base; /* Address of Rx and Tx buffers. */ | ||
113 | cbd_t *tx_bd_base; | ||
114 | cbd_t *cur_rx, *cur_tx; /* The next free ring entry */ | ||
115 | cbd_t *dirty_tx; /* The ring entries to be free()ed. */ | ||
116 | scc_t *sccp; | ||
117 | struct net_device_stats stats; | ||
118 | uint tx_full; | ||
119 | spinlock_t lock; | ||
120 | }; | ||
121 | |||
122 | static int scc_enet_open(struct net_device *dev); | ||
123 | static int scc_enet_start_xmit(struct sk_buff *skb, struct net_device *dev); | ||
124 | static int scc_enet_rx(struct net_device *dev); | ||
125 | static irqreturn_t scc_enet_interrupt(int irq, void *dev_id, struct pt_regs *); | ||
126 | static int scc_enet_close(struct net_device *dev); | ||
127 | static struct net_device_stats *scc_enet_get_stats(struct net_device *dev); | ||
128 | static void set_multicast_list(struct net_device *dev); | ||
129 | |||
130 | /* These will be configurable for the SCC choice. | ||
131 | */ | ||
132 | #define CPM_ENET_BLOCK CPM_CR_SCC1_SBLOCK | ||
133 | #define CPM_ENET_PAGE CPM_CR_SCC1_PAGE | ||
134 | #define PROFF_ENET PROFF_SCC1 | ||
135 | #define SCC_ENET 0 | ||
136 | #define SIU_INT_ENET SIU_INT_SCC1 | ||
137 | |||
138 | /* These are both board and SCC dependent.... | ||
139 | */ | ||
140 | #define PD_ENET_RXD ((uint)0x00000001) | ||
141 | #define PD_ENET_TXD ((uint)0x00000002) | ||
142 | #define PD_ENET_TENA ((uint)0x00000004) | ||
143 | #define PC_ENET_RENA ((uint)0x00020000) | ||
144 | #define PC_ENET_CLSN ((uint)0x00000004) | ||
145 | #define PC_ENET_TXCLK ((uint)0x00000800) | ||
146 | #define PC_ENET_RXCLK ((uint)0x00000400) | ||
147 | #define CMX_CLK_ROUTE ((uint)0x25000000) | ||
148 | #define CMX_CLK_MASK ((uint)0xff000000) | ||
149 | |||
150 | /* Specific to a board. | ||
151 | */ | ||
152 | #define PC_EST8260_ENET_LOOPBACK ((uint)0x80000000) | ||
153 | #define PC_EST8260_ENET_SQE ((uint)0x40000000) | ||
154 | #define PC_EST8260_ENET_NOTFD ((uint)0x20000000) | ||
155 | |||
156 | static int | ||
157 | scc_enet_open(struct net_device *dev) | ||
158 | { | ||
159 | |||
160 | /* I should reset the ring buffers here, but I don't yet know | ||
161 | * a simple way to do that. | ||
162 | */ | ||
163 | netif_start_queue(dev); | ||
164 | return 0; /* Always succeed */ | ||
165 | } | ||
166 | |||
167 | static int | ||
168 | scc_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) | ||
169 | { | ||
170 | struct scc_enet_private *cep = (struct scc_enet_private *)dev->priv; | ||
171 | volatile cbd_t *bdp; | ||
172 | |||
173 | |||
174 | /* Fill in a Tx ring entry */ | ||
175 | bdp = cep->cur_tx; | ||
176 | |||
177 | #ifndef final_version | ||
178 | if (bdp->cbd_sc & BD_ENET_TX_READY) { | ||
179 | /* Ooops. All transmit buffers are full. Bail out. | ||
180 | * This should not happen, since cep->tx_full should be set. | ||
181 | */ | ||
182 | printk("%s: tx queue full!.\n", dev->name); | ||
183 | return 1; | ||
184 | } | ||
185 | #endif | ||
186 | |||
187 | /* Clear all of the status flags. | ||
188 | */ | ||
189 | bdp->cbd_sc &= ~BD_ENET_TX_STATS; | ||
190 | |||
191 | /* If the frame is short, tell CPM to pad it. | ||
192 | */ | ||
193 | if (skb->len <= ETH_ZLEN) | ||
194 | bdp->cbd_sc |= BD_ENET_TX_PAD; | ||
195 | else | ||
196 | bdp->cbd_sc &= ~BD_ENET_TX_PAD; | ||
197 | |||
198 | /* Set buffer length and buffer pointer. | ||
199 | */ | ||
200 | bdp->cbd_datlen = skb->len; | ||
201 | bdp->cbd_bufaddr = __pa(skb->data); | ||
202 | |||
203 | /* Save skb pointer. | ||
204 | */ | ||
205 | cep->tx_skbuff[cep->skb_cur] = skb; | ||
206 | |||
207 | cep->stats.tx_bytes += skb->len; | ||
208 | cep->skb_cur = (cep->skb_cur+1) & TX_RING_MOD_MASK; | ||
209 | |||
210 | spin_lock_irq(&cep->lock); | ||
211 | |||
212 | /* Send it on its way. Tell CPM its ready, interrupt when done, | ||
213 | * its the last BD of the frame, and to put the CRC on the end. | ||
214 | */ | ||
215 | bdp->cbd_sc |= (BD_ENET_TX_READY | BD_ENET_TX_INTR | BD_ENET_TX_LAST | BD_ENET_TX_TC); | ||
216 | |||
217 | dev->trans_start = jiffies; | ||
218 | |||
219 | /* If this was the last BD in the ring, start at the beginning again. | ||
220 | */ | ||
221 | if (bdp->cbd_sc & BD_ENET_TX_WRAP) | ||
222 | bdp = cep->tx_bd_base; | ||
223 | else | ||
224 | bdp++; | ||
225 | |||
226 | if (bdp->cbd_sc & BD_ENET_TX_READY) { | ||
227 | netif_stop_queue(dev); | ||
228 | cep->tx_full = 1; | ||
229 | } | ||
230 | |||
231 | cep->cur_tx = (cbd_t *)bdp; | ||
232 | |||
233 | spin_unlock_irq(&cep->lock); | ||
234 | |||
235 | return 0; | ||
236 | } | ||
237 | |||
238 | static void | ||
239 | scc_enet_timeout(struct net_device *dev) | ||
240 | { | ||
241 | struct scc_enet_private *cep = (struct scc_enet_private *)dev->priv; | ||
242 | |||
243 | printk("%s: transmit timed out.\n", dev->name); | ||
244 | cep->stats.tx_errors++; | ||
245 | #ifndef final_version | ||
246 | { | ||
247 | int i; | ||
248 | cbd_t *bdp; | ||
249 | printk(" Ring data dump: cur_tx %p%s cur_rx %p.\n", | ||
250 | cep->cur_tx, cep->tx_full ? " (full)" : "", | ||
251 | cep->cur_rx); | ||
252 | bdp = cep->tx_bd_base; | ||
253 | printk(" Tx @base %p :\n", bdp); | ||
254 | for (i = 0 ; i < TX_RING_SIZE; i++, bdp++) | ||
255 | printk("%04x %04x %08x\n", | ||
256 | bdp->cbd_sc, | ||
257 | bdp->cbd_datlen, | ||
258 | bdp->cbd_bufaddr); | ||
259 | bdp = cep->rx_bd_base; | ||
260 | printk(" Rx @base %p :\n", bdp); | ||
261 | for (i = 0 ; i < RX_RING_SIZE; i++, bdp++) | ||
262 | printk("%04x %04x %08x\n", | ||
263 | bdp->cbd_sc, | ||
264 | bdp->cbd_datlen, | ||
265 | bdp->cbd_bufaddr); | ||
266 | } | ||
267 | #endif | ||
268 | if (!cep->tx_full) | ||
269 | netif_wake_queue(dev); | ||
270 | } | ||
271 | |||
272 | /* The interrupt handler. | ||
273 | * This is called from the CPM handler, not the MPC core interrupt. | ||
274 | */ | ||
275 | static irqreturn_t | ||
276 | scc_enet_interrupt(int irq, void * dev_id, struct pt_regs * regs) | ||
277 | { | ||
278 | struct net_device *dev = dev_id; | ||
279 | volatile struct scc_enet_private *cep; | ||
280 | volatile cbd_t *bdp; | ||
281 | ushort int_events; | ||
282 | int must_restart; | ||
283 | |||
284 | cep = (struct scc_enet_private *)dev->priv; | ||
285 | |||
286 | /* Get the interrupt events that caused us to be here. | ||
287 | */ | ||
288 | int_events = cep->sccp->scc_scce; | ||
289 | cep->sccp->scc_scce = int_events; | ||
290 | must_restart = 0; | ||
291 | |||
292 | /* Handle receive event in its own function. | ||
293 | */ | ||
294 | if (int_events & SCCE_ENET_RXF) | ||
295 | scc_enet_rx(dev_id); | ||
296 | |||
297 | /* Check for a transmit error. The manual is a little unclear | ||
298 | * about this, so the debug code until I get it figured out. It | ||
299 | * appears that if TXE is set, then TXB is not set. However, | ||
300 | * if carrier sense is lost during frame transmission, the TXE | ||
301 | * bit is set, "and continues the buffer transmission normally." | ||
302 | * I don't know if "normally" implies TXB is set when the buffer | ||
303 | * descriptor is closed.....trial and error :-). | ||
304 | */ | ||
305 | |||
306 | /* Transmit OK, or non-fatal error. Update the buffer descriptors. | ||
307 | */ | ||
308 | if (int_events & (SCCE_ENET_TXE | SCCE_ENET_TXB)) { | ||
309 | spin_lock(&cep->lock); | ||
310 | bdp = cep->dirty_tx; | ||
311 | while ((bdp->cbd_sc&BD_ENET_TX_READY)==0) { | ||
312 | if ((bdp==cep->cur_tx) && (cep->tx_full == 0)) | ||
313 | break; | ||
314 | |||
315 | if (bdp->cbd_sc & BD_ENET_TX_HB) /* No heartbeat */ | ||
316 | cep->stats.tx_heartbeat_errors++; | ||
317 | if (bdp->cbd_sc & BD_ENET_TX_LC) /* Late collision */ | ||
318 | cep->stats.tx_window_errors++; | ||
319 | if (bdp->cbd_sc & BD_ENET_TX_RL) /* Retrans limit */ | ||
320 | cep->stats.tx_aborted_errors++; | ||
321 | if (bdp->cbd_sc & BD_ENET_TX_UN) /* Underrun */ | ||
322 | cep->stats.tx_fifo_errors++; | ||
323 | if (bdp->cbd_sc & BD_ENET_TX_CSL) /* Carrier lost */ | ||
324 | cep->stats.tx_carrier_errors++; | ||
325 | |||
326 | |||
327 | /* No heartbeat or Lost carrier are not really bad errors. | ||
328 | * The others require a restart transmit command. | ||
329 | */ | ||
330 | if (bdp->cbd_sc & | ||
331 | (BD_ENET_TX_LC | BD_ENET_TX_RL | BD_ENET_TX_UN)) { | ||
332 | must_restart = 1; | ||
333 | cep->stats.tx_errors++; | ||
334 | } | ||
335 | |||
336 | cep->stats.tx_packets++; | ||
337 | |||
338 | /* Deferred means some collisions occurred during transmit, | ||
339 | * but we eventually sent the packet OK. | ||
340 | */ | ||
341 | if (bdp->cbd_sc & BD_ENET_TX_DEF) | ||
342 | cep->stats.collisions++; | ||
343 | |||
344 | /* Free the sk buffer associated with this last transmit. | ||
345 | */ | ||
346 | dev_kfree_skb_irq(cep->tx_skbuff[cep->skb_dirty]); | ||
347 | cep->skb_dirty = (cep->skb_dirty + 1) & TX_RING_MOD_MASK; | ||
348 | |||
349 | /* Update pointer to next buffer descriptor to be transmitted. | ||
350 | */ | ||
351 | if (bdp->cbd_sc & BD_ENET_TX_WRAP) | ||
352 | bdp = cep->tx_bd_base; | ||
353 | else | ||
354 | bdp++; | ||
355 | |||
356 | /* I don't know if we can be held off from processing these | ||
357 | * interrupts for more than one frame time. I really hope | ||
358 | * not. In such a case, we would now want to check the | ||
359 | * currently available BD (cur_tx) and determine if any | ||
360 | * buffers between the dirty_tx and cur_tx have also been | ||
361 | * sent. We would want to process anything in between that | ||
362 | * does not have BD_ENET_TX_READY set. | ||
363 | */ | ||
364 | |||
365 | /* Since we have freed up a buffer, the ring is no longer | ||
366 | * full. | ||
367 | */ | ||
368 | if (cep->tx_full) { | ||
369 | cep->tx_full = 0; | ||
370 | if (netif_queue_stopped(dev)) { | ||
371 | netif_wake_queue(dev); | ||
372 | } | ||
373 | } | ||
374 | |||
375 | cep->dirty_tx = (cbd_t *)bdp; | ||
376 | } | ||
377 | |||
378 | if (must_restart) { | ||
379 | volatile cpm_cpm2_t *cp; | ||
380 | |||
381 | /* Some transmit errors cause the transmitter to shut | ||
382 | * down. We now issue a restart transmit. Since the | ||
383 | * errors close the BD and update the pointers, the restart | ||
384 | * _should_ pick up without having to reset any of our | ||
385 | * pointers either. | ||
386 | */ | ||
387 | |||
388 | cp = cpmp; | ||
389 | cp->cp_cpcr = | ||
390 | mk_cr_cmd(CPM_ENET_PAGE, CPM_ENET_BLOCK, 0, | ||
391 | CPM_CR_RESTART_TX) | CPM_CR_FLG; | ||
392 | while (cp->cp_cpcr & CPM_CR_FLG); | ||
393 | } | ||
394 | spin_unlock(&cep->lock); | ||
395 | } | ||
396 | |||
397 | /* Check for receive busy, i.e. packets coming but no place to | ||
398 | * put them. This "can't happen" because the receive interrupt | ||
399 | * is tossing previous frames. | ||
400 | */ | ||
401 | if (int_events & SCCE_ENET_BSY) { | ||
402 | cep->stats.rx_dropped++; | ||
403 | printk("SCC ENET: BSY can't happen.\n"); | ||
404 | } | ||
405 | |||
406 | return IRQ_HANDLED; | ||
407 | } | ||
408 | |||
409 | /* During a receive, the cur_rx points to the current incoming buffer. | ||
410 | * When we update through the ring, if the next incoming buffer has | ||
411 | * not been given to the system, we just set the empty indicator, | ||
412 | * effectively tossing the packet. | ||
413 | */ | ||
414 | static int | ||
415 | scc_enet_rx(struct net_device *dev) | ||
416 | { | ||
417 | struct scc_enet_private *cep; | ||
418 | volatile cbd_t *bdp; | ||
419 | struct sk_buff *skb; | ||
420 | ushort pkt_len; | ||
421 | |||
422 | cep = (struct scc_enet_private *)dev->priv; | ||
423 | |||
424 | /* First, grab all of the stats for the incoming packet. | ||
425 | * These get messed up if we get called due to a busy condition. | ||
426 | */ | ||
427 | bdp = cep->cur_rx; | ||
428 | |||
429 | for (;;) { | ||
430 | if (bdp->cbd_sc & BD_ENET_RX_EMPTY) | ||
431 | break; | ||
432 | |||
433 | #ifndef final_version | ||
434 | /* Since we have allocated space to hold a complete frame, both | ||
435 | * the first and last indicators should be set. | ||
436 | */ | ||
437 | if ((bdp->cbd_sc & (BD_ENET_RX_FIRST | BD_ENET_RX_LAST)) != | ||
438 | (BD_ENET_RX_FIRST | BD_ENET_RX_LAST)) | ||
439 | printk("CPM ENET: rcv is not first+last\n"); | ||
440 | #endif | ||
441 | |||
442 | /* Frame too long or too short. | ||
443 | */ | ||
444 | if (bdp->cbd_sc & (BD_ENET_RX_LG | BD_ENET_RX_SH)) | ||
445 | cep->stats.rx_length_errors++; | ||
446 | if (bdp->cbd_sc & BD_ENET_RX_NO) /* Frame alignment */ | ||
447 | cep->stats.rx_frame_errors++; | ||
448 | if (bdp->cbd_sc & BD_ENET_RX_CR) /* CRC Error */ | ||
449 | cep->stats.rx_crc_errors++; | ||
450 | if (bdp->cbd_sc & BD_ENET_RX_OV) /* FIFO overrun */ | ||
451 | cep->stats.rx_crc_errors++; | ||
452 | |||
453 | /* Report late collisions as a frame error. | ||
454 | * On this error, the BD is closed, but we don't know what we | ||
455 | * have in the buffer. So, just drop this frame on the floor. | ||
456 | */ | ||
457 | if (bdp->cbd_sc & BD_ENET_RX_CL) { | ||
458 | cep->stats.rx_frame_errors++; | ||
459 | } | ||
460 | else { | ||
461 | |||
462 | /* Process the incoming frame. | ||
463 | */ | ||
464 | cep->stats.rx_packets++; | ||
465 | pkt_len = bdp->cbd_datlen; | ||
466 | cep->stats.rx_bytes += pkt_len; | ||
467 | |||
468 | /* This does 16 byte alignment, much more than we need. | ||
469 | * The packet length includes FCS, but we don't want to | ||
470 | * include that when passing upstream as it messes up | ||
471 | * bridging applications. | ||
472 | */ | ||
473 | skb = dev_alloc_skb(pkt_len-4); | ||
474 | |||
475 | if (skb == NULL) { | ||
476 | printk("%s: Memory squeeze, dropping packet.\n", dev->name); | ||
477 | cep->stats.rx_dropped++; | ||
478 | } | ||
479 | else { | ||
480 | skb->dev = dev; | ||
481 | skb_put(skb,pkt_len-4); /* Make room */ | ||
482 | eth_copy_and_sum(skb, | ||
483 | (unsigned char *)__va(bdp->cbd_bufaddr), | ||
484 | pkt_len-4, 0); | ||
485 | skb->protocol=eth_type_trans(skb,dev); | ||
486 | netif_rx(skb); | ||
487 | } | ||
488 | } | ||
489 | |||
490 | /* Clear the status flags for this buffer. | ||
491 | */ | ||
492 | bdp->cbd_sc &= ~BD_ENET_RX_STATS; | ||
493 | |||
494 | /* Mark the buffer empty. | ||
495 | */ | ||
496 | bdp->cbd_sc |= BD_ENET_RX_EMPTY; | ||
497 | |||
498 | /* Update BD pointer to next entry. | ||
499 | */ | ||
500 | if (bdp->cbd_sc & BD_ENET_RX_WRAP) | ||
501 | bdp = cep->rx_bd_base; | ||
502 | else | ||
503 | bdp++; | ||
504 | |||
505 | } | ||
506 | cep->cur_rx = (cbd_t *)bdp; | ||
507 | |||
508 | return 0; | ||
509 | } | ||
510 | |||
511 | static int | ||
512 | scc_enet_close(struct net_device *dev) | ||
513 | { | ||
514 | /* Don't know what to do yet. | ||
515 | */ | ||
516 | netif_stop_queue(dev); | ||
517 | |||
518 | return 0; | ||
519 | } | ||
520 | |||
521 | static struct net_device_stats *scc_enet_get_stats(struct net_device *dev) | ||
522 | { | ||
523 | struct scc_enet_private *cep = (struct scc_enet_private *)dev->priv; | ||
524 | |||
525 | return &cep->stats; | ||
526 | } | ||
527 | |||
528 | /* Set or clear the multicast filter for this adaptor. | ||
529 | * Skeleton taken from sunlance driver. | ||
530 | * The CPM Ethernet implementation allows Multicast as well as individual | ||
531 | * MAC address filtering. Some of the drivers check to make sure it is | ||
532 | * a group multicast address, and discard those that are not. I guess I | ||
533 | * will do the same for now, but just remove the test if you want | ||
534 | * individual filtering as well (do the upper net layers want or support | ||
535 | * this kind of feature?). | ||
536 | */ | ||
537 | |||
538 | static void set_multicast_list(struct net_device *dev) | ||
539 | { | ||
540 | struct scc_enet_private *cep; | ||
541 | struct dev_mc_list *dmi; | ||
542 | u_char *mcptr, *tdptr; | ||
543 | volatile scc_enet_t *ep; | ||
544 | int i, j; | ||
545 | cep = (struct scc_enet_private *)dev->priv; | ||
546 | |||
547 | /* Get pointer to SCC area in parameter RAM. | ||
548 | */ | ||
549 | ep = (scc_enet_t *)dev->base_addr; | ||
550 | |||
551 | if (dev->flags&IFF_PROMISC) { | ||
552 | |||
553 | /* Log any net taps. */ | ||
554 | printk("%s: Promiscuous mode enabled.\n", dev->name); | ||
555 | cep->sccp->scc_psmr |= SCC_PSMR_PRO; | ||
556 | } else { | ||
557 | |||
558 | cep->sccp->scc_psmr &= ~SCC_PSMR_PRO; | ||
559 | |||
560 | if (dev->flags & IFF_ALLMULTI) { | ||
561 | /* Catch all multicast addresses, so set the | ||
562 | * filter to all 1's. | ||
563 | */ | ||
564 | ep->sen_gaddr1 = 0xffff; | ||
565 | ep->sen_gaddr2 = 0xffff; | ||
566 | ep->sen_gaddr3 = 0xffff; | ||
567 | ep->sen_gaddr4 = 0xffff; | ||
568 | } | ||
569 | else { | ||
570 | /* Clear filter and add the addresses in the list. | ||
571 | */ | ||
572 | ep->sen_gaddr1 = 0; | ||
573 | ep->sen_gaddr2 = 0; | ||
574 | ep->sen_gaddr3 = 0; | ||
575 | ep->sen_gaddr4 = 0; | ||
576 | |||
577 | dmi = dev->mc_list; | ||
578 | |||
579 | for (i=0; i<dev->mc_count; i++) { | ||
580 | |||
581 | /* Only support group multicast for now. | ||
582 | */ | ||
583 | if (!(dmi->dmi_addr[0] & 1)) | ||
584 | continue; | ||
585 | |||
586 | /* The address in dmi_addr is LSB first, | ||
587 | * and taddr is MSB first. We have to | ||
588 | * copy bytes MSB first from dmi_addr. | ||
589 | */ | ||
590 | mcptr = (u_char *)dmi->dmi_addr + 5; | ||
591 | tdptr = (u_char *)&ep->sen_taddrh; | ||
592 | for (j=0; j<6; j++) | ||
593 | *tdptr++ = *mcptr--; | ||
594 | |||
595 | /* Ask CPM to run CRC and set bit in | ||
596 | * filter mask. | ||
597 | */ | ||
598 | cpmp->cp_cpcr = mk_cr_cmd(CPM_ENET_PAGE, | ||
599 | CPM_ENET_BLOCK, 0, | ||
600 | CPM_CR_SET_GADDR) | CPM_CR_FLG; | ||
601 | /* this delay is necessary here -- Cort */ | ||
602 | udelay(10); | ||
603 | while (cpmp->cp_cpcr & CPM_CR_FLG); | ||
604 | } | ||
605 | } | ||
606 | } | ||
607 | } | ||
608 | |||
609 | /* Initialize the CPM Ethernet on SCC. | ||
610 | */ | ||
611 | static int __init scc_enet_init(void) | ||
612 | { | ||
613 | struct net_device *dev; | ||
614 | struct scc_enet_private *cep; | ||
615 | int i, j, err; | ||
616 | uint dp_offset; | ||
617 | unsigned char *eap; | ||
618 | unsigned long mem_addr; | ||
619 | bd_t *bd; | ||
620 | volatile cbd_t *bdp; | ||
621 | volatile cpm_cpm2_t *cp; | ||
622 | volatile scc_t *sccp; | ||
623 | volatile scc_enet_t *ep; | ||
624 | volatile cpm2_map_t *immap; | ||
625 | volatile iop_cpm2_t *io; | ||
626 | |||
627 | cp = cpmp; /* Get pointer to Communication Processor */ | ||
628 | |||
629 | immap = (cpm2_map_t *)CPM_MAP_ADDR; /* and to internal registers */ | ||
630 | io = &immap->im_ioport; | ||
631 | |||
632 | bd = (bd_t *)__res; | ||
633 | |||
634 | /* Create an Ethernet device instance. | ||
635 | */ | ||
636 | dev = alloc_etherdev(sizeof(*cep)); | ||
637 | if (!dev) | ||
638 | return -ENOMEM; | ||
639 | |||
640 | cep = dev->priv; | ||
641 | spin_lock_init(&cep->lock); | ||
642 | |||
643 | /* Get pointer to SCC area in parameter RAM. | ||
644 | */ | ||
645 | ep = (scc_enet_t *)(&immap->im_dprambase[PROFF_ENET]); | ||
646 | |||
647 | /* And another to the SCC register area. | ||
648 | */ | ||
649 | sccp = (volatile scc_t *)(&immap->im_scc[SCC_ENET]); | ||
650 | cep->sccp = (scc_t *)sccp; /* Keep the pointer handy */ | ||
651 | |||
652 | /* Disable receive and transmit in case someone left it running. | ||
653 | */ | ||
654 | sccp->scc_gsmrl &= ~(SCC_GSMRL_ENR | SCC_GSMRL_ENT); | ||
655 | |||
656 | /* Configure port C and D pins for SCC Ethernet. This | ||
657 | * won't work for all SCC possibilities....it will be | ||
658 | * board/port specific. | ||
659 | */ | ||
660 | io->iop_pparc |= | ||
661 | (PC_ENET_RENA | PC_ENET_CLSN | PC_ENET_TXCLK | PC_ENET_RXCLK); | ||
662 | io->iop_pdirc &= | ||
663 | ~(PC_ENET_RENA | PC_ENET_CLSN | PC_ENET_TXCLK | PC_ENET_RXCLK); | ||
664 | io->iop_psorc &= | ||
665 | ~(PC_ENET_RENA | PC_ENET_TXCLK | PC_ENET_RXCLK); | ||
666 | io->iop_psorc |= PC_ENET_CLSN; | ||
667 | |||
668 | io->iop_ppard |= (PD_ENET_RXD | PD_ENET_TXD | PD_ENET_TENA); | ||
669 | io->iop_pdird |= (PD_ENET_TXD | PD_ENET_TENA); | ||
670 | io->iop_pdird &= ~PD_ENET_RXD; | ||
671 | io->iop_psord |= PD_ENET_TXD; | ||
672 | io->iop_psord &= ~(PD_ENET_RXD | PD_ENET_TENA); | ||
673 | |||
674 | /* Configure Serial Interface clock routing. | ||
675 | * First, clear all SCC bits to zero, then set the ones we want. | ||
676 | */ | ||
677 | immap->im_cpmux.cmx_scr &= ~CMX_CLK_MASK; | ||
678 | immap->im_cpmux.cmx_scr |= CMX_CLK_ROUTE; | ||
679 | |||
680 | /* Allocate space for the buffer descriptors in the DP ram. | ||
681 | * These are relative offsets in the DP ram address space. | ||
682 | * Initialize base addresses for the buffer descriptors. | ||
683 | */ | ||
684 | dp_offset = cpm_dpalloc(sizeof(cbd_t) * RX_RING_SIZE, 8); | ||
685 | ep->sen_genscc.scc_rbase = dp_offset; | ||
686 | cep->rx_bd_base = (cbd_t *)cpm_dpram_addr(dp_offset); | ||
687 | |||
688 | dp_offset = cpm_dpalloc(sizeof(cbd_t) * TX_RING_SIZE, 8); | ||
689 | ep->sen_genscc.scc_tbase = dp_offset; | ||
690 | cep->tx_bd_base = (cbd_t *)cpm_dpram_addr(dp_offset); | ||
691 | |||
692 | cep->dirty_tx = cep->cur_tx = cep->tx_bd_base; | ||
693 | cep->cur_rx = cep->rx_bd_base; | ||
694 | |||
695 | ep->sen_genscc.scc_rfcr = CPMFCR_GBL | CPMFCR_EB; | ||
696 | ep->sen_genscc.scc_tfcr = CPMFCR_GBL | CPMFCR_EB; | ||
697 | |||
698 | /* Set maximum bytes per receive buffer. | ||
699 | * This appears to be an Ethernet frame size, not the buffer | ||
700 | * fragment size. It must be a multiple of four. | ||
701 | */ | ||
702 | ep->sen_genscc.scc_mrblr = PKT_MAXBLR_SIZE; | ||
703 | |||
704 | /* Set CRC preset and mask. | ||
705 | */ | ||
706 | ep->sen_cpres = 0xffffffff; | ||
707 | ep->sen_cmask = 0xdebb20e3; | ||
708 | |||
709 | ep->sen_crcec = 0; /* CRC Error counter */ | ||
710 | ep->sen_alec = 0; /* alignment error counter */ | ||
711 | ep->sen_disfc = 0; /* discard frame counter */ | ||
712 | |||
713 | ep->sen_pads = 0x8888; /* Tx short frame pad character */ | ||
714 | ep->sen_retlim = 15; /* Retry limit threshold */ | ||
715 | |||
716 | ep->sen_maxflr = PKT_MAXBUF_SIZE; /* maximum frame length register */ | ||
717 | ep->sen_minflr = PKT_MINBUF_SIZE; /* minimum frame length register */ | ||
718 | |||
719 | ep->sen_maxd1 = PKT_MAXBLR_SIZE; /* maximum DMA1 length */ | ||
720 | ep->sen_maxd2 = PKT_MAXBLR_SIZE; /* maximum DMA2 length */ | ||
721 | |||
722 | /* Clear hash tables. | ||
723 | */ | ||
724 | ep->sen_gaddr1 = 0; | ||
725 | ep->sen_gaddr2 = 0; | ||
726 | ep->sen_gaddr3 = 0; | ||
727 | ep->sen_gaddr4 = 0; | ||
728 | ep->sen_iaddr1 = 0; | ||
729 | ep->sen_iaddr2 = 0; | ||
730 | ep->sen_iaddr3 = 0; | ||
731 | ep->sen_iaddr4 = 0; | ||
732 | |||
733 | /* Set Ethernet station address. | ||
734 | * | ||
735 | * This is supplied in the board information structure, so we | ||
736 | * copy that into the controller. | ||
737 | */ | ||
738 | eap = (unsigned char *)&(ep->sen_paddrh); | ||
739 | for (i=5; i>=0; i--) | ||
740 | *eap++ = dev->dev_addr[i] = bd->bi_enetaddr[i]; | ||
741 | |||
742 | ep->sen_pper = 0; /* 'cause the book says so */ | ||
743 | ep->sen_taddrl = 0; /* temp address (LSB) */ | ||
744 | ep->sen_taddrm = 0; | ||
745 | ep->sen_taddrh = 0; /* temp address (MSB) */ | ||
746 | |||
747 | /* Now allocate the host memory pages and initialize the | ||
748 | * buffer descriptors. | ||
749 | */ | ||
750 | bdp = cep->tx_bd_base; | ||
751 | for (i=0; i<TX_RING_SIZE; i++) { | ||
752 | |||
753 | /* Initialize the BD for every fragment in the page. | ||
754 | */ | ||
755 | bdp->cbd_sc = 0; | ||
756 | bdp->cbd_bufaddr = 0; | ||
757 | bdp++; | ||
758 | } | ||
759 | |||
760 | /* Set the last buffer to wrap. | ||
761 | */ | ||
762 | bdp--; | ||
763 | bdp->cbd_sc |= BD_SC_WRAP; | ||
764 | |||
765 | bdp = cep->rx_bd_base; | ||
766 | for (i=0; i<CPM_ENET_RX_PAGES; i++) { | ||
767 | |||
768 | /* Allocate a page. | ||
769 | */ | ||
770 | mem_addr = __get_free_page(GFP_KERNEL); | ||
771 | /* BUG: no check for failure */ | ||
772 | |||
773 | /* Initialize the BD for every fragment in the page. | ||
774 | */ | ||
775 | for (j=0; j<CPM_ENET_RX_FRPPG; j++) { | ||
776 | bdp->cbd_sc = BD_ENET_RX_EMPTY | BD_ENET_RX_INTR; | ||
777 | bdp->cbd_bufaddr = __pa(mem_addr); | ||
778 | mem_addr += CPM_ENET_RX_FRSIZE; | ||
779 | bdp++; | ||
780 | } | ||
781 | } | ||
782 | |||
783 | /* Set the last buffer to wrap. | ||
784 | */ | ||
785 | bdp--; | ||
786 | bdp->cbd_sc |= BD_SC_WRAP; | ||
787 | |||
788 | /* Let's re-initialize the channel now. We have to do it later | ||
789 | * than the manual describes because we have just now finished | ||
790 | * the BD initialization. | ||
791 | */ | ||
792 | cpmp->cp_cpcr = mk_cr_cmd(CPM_ENET_PAGE, CPM_ENET_BLOCK, 0, | ||
793 | CPM_CR_INIT_TRX) | CPM_CR_FLG; | ||
794 | while (cp->cp_cpcr & CPM_CR_FLG); | ||
795 | |||
796 | cep->skb_cur = cep->skb_dirty = 0; | ||
797 | |||
798 | sccp->scc_scce = 0xffff; /* Clear any pending events */ | ||
799 | |||
800 | /* Enable interrupts for transmit error, complete frame | ||
801 | * received, and any transmit buffer we have also set the | ||
802 | * interrupt flag. | ||
803 | */ | ||
804 | sccp->scc_sccm = (SCCE_ENET_TXE | SCCE_ENET_RXF | SCCE_ENET_TXB); | ||
805 | |||
806 | /* Install our interrupt handler. | ||
807 | */ | ||
808 | request_irq(SIU_INT_ENET, scc_enet_interrupt, 0, "enet", dev); | ||
809 | /* BUG: no check for failure */ | ||
810 | |||
811 | /* Set GSMR_H to enable all normal operating modes. | ||
812 | * Set GSMR_L to enable Ethernet to MC68160. | ||
813 | */ | ||
814 | sccp->scc_gsmrh = 0; | ||
815 | sccp->scc_gsmrl = (SCC_GSMRL_TCI | SCC_GSMRL_TPL_48 | SCC_GSMRL_TPP_10 | SCC_GSMRL_MODE_ENET); | ||
816 | |||
817 | /* Set sync/delimiters. | ||
818 | */ | ||
819 | sccp->scc_dsr = 0xd555; | ||
820 | |||
821 | /* Set processing mode. Use Ethernet CRC, catch broadcast, and | ||
822 | * start frame search 22 bit times after RENA. | ||
823 | */ | ||
824 | sccp->scc_psmr = (SCC_PSMR_ENCRC | SCC_PSMR_NIB22); | ||
825 | |||
826 | /* It is now OK to enable the Ethernet transmitter. | ||
827 | * Unfortunately, there are board implementation differences here. | ||
828 | */ | ||
829 | io->iop_pparc &= ~(PC_EST8260_ENET_LOOPBACK | | ||
830 | PC_EST8260_ENET_SQE | PC_EST8260_ENET_NOTFD); | ||
831 | io->iop_psorc &= ~(PC_EST8260_ENET_LOOPBACK | | ||
832 | PC_EST8260_ENET_SQE | PC_EST8260_ENET_NOTFD); | ||
833 | io->iop_pdirc |= (PC_EST8260_ENET_LOOPBACK | | ||
834 | PC_EST8260_ENET_SQE | PC_EST8260_ENET_NOTFD); | ||
835 | io->iop_pdatc &= ~(PC_EST8260_ENET_LOOPBACK | PC_EST8260_ENET_SQE); | ||
836 | io->iop_pdatc |= PC_EST8260_ENET_NOTFD; | ||
837 | |||
838 | dev->base_addr = (unsigned long)ep; | ||
839 | |||
840 | /* The CPM Ethernet specific entries in the device structure. */ | ||
841 | dev->open = scc_enet_open; | ||
842 | dev->hard_start_xmit = scc_enet_start_xmit; | ||
843 | dev->tx_timeout = scc_enet_timeout; | ||
844 | dev->watchdog_timeo = TX_TIMEOUT; | ||
845 | dev->stop = scc_enet_close; | ||
846 | dev->get_stats = scc_enet_get_stats; | ||
847 | dev->set_multicast_list = set_multicast_list; | ||
848 | |||
849 | /* And last, enable the transmit and receive processing. | ||
850 | */ | ||
851 | sccp->scc_gsmrl |= (SCC_GSMRL_ENR | SCC_GSMRL_ENT); | ||
852 | |||
853 | err = register_netdev(dev); | ||
854 | if (err) { | ||
855 | free_netdev(dev); | ||
856 | return err; | ||
857 | } | ||
858 | |||
859 | printk("%s: SCC ENET Version 0.1, ", dev->name); | ||
860 | for (i=0; i<5; i++) | ||
861 | printk("%02x:", dev->dev_addr[i]); | ||
862 | printk("%02x\n", dev->dev_addr[5]); | ||
863 | |||
864 | return 0; | ||
865 | } | ||
866 | |||
867 | module_init(scc_enet_init); | ||
diff --git a/arch/ppc/8260_io/fcc_enet.c b/arch/ppc/8260_io/fcc_enet.c new file mode 100644 index 000000000000..2086c6ad1147 --- /dev/null +++ b/arch/ppc/8260_io/fcc_enet.c | |||
@@ -0,0 +1,2395 @@ | |||
1 | /* | ||
2 | * Fast Ethernet Controller (FCC) driver for Motorola MPC8260. | ||
3 | * Copyright (c) 2000 MontaVista Software, Inc. Dan Malek (dmalek@jlc.net) | ||
4 | * | ||
5 | * This version of the driver is a combination of the 8xx fec and | ||
6 | * 8260 SCC Ethernet drivers. This version has some additional | ||
7 | * configuration options, which should probably be moved out of | ||
8 | * here. This driver currently works for the EST SBC8260, | ||
9 | * SBS Diablo/BCM, Embedded Planet RPX6, TQM8260, and others. | ||
10 | * | ||
11 | * Right now, I am very watseful with the buffers. I allocate memory | ||
12 | * pages and then divide them into 2K frame buffers. This way I know I | ||
13 | * have buffers large enough to hold one frame within one buffer descriptor. | ||
14 | * Once I get this working, I will use 64 or 128 byte CPM buffers, which | ||
15 | * will be much more memory efficient and will easily handle lots of | ||
16 | * small packets. Since this is a cache coherent processor and CPM, | ||
17 | * I could also preallocate SKB's and use them directly on the interface. | ||
18 | * | ||
19 | * 2004-12 Leo Li (leoli@freescale.com) | ||
20 | * - Rework the FCC clock configuration part, make it easier to configure. | ||
21 | * | ||
22 | */ | ||
23 | |||
24 | #include <linux/config.h> | ||
25 | #include <linux/kernel.h> | ||
26 | #include <linux/sched.h> | ||
27 | #include <linux/string.h> | ||
28 | #include <linux/ptrace.h> | ||
29 | #include <linux/errno.h> | ||
30 | #include <linux/ioport.h> | ||
31 | #include <linux/slab.h> | ||
32 | #include <linux/interrupt.h> | ||
33 | #include <linux/pci.h> | ||
34 | #include <linux/init.h> | ||
35 | #include <linux/delay.h> | ||
36 | #include <linux/netdevice.h> | ||
37 | #include <linux/etherdevice.h> | ||
38 | #include <linux/skbuff.h> | ||
39 | #include <linux/spinlock.h> | ||
40 | #include <linux/mii.h> | ||
41 | #include <linux/workqueue.h> | ||
42 | #include <linux/bitops.h> | ||
43 | |||
44 | #include <asm/immap_cpm2.h> | ||
45 | #include <asm/pgtable.h> | ||
46 | #include <asm/mpc8260.h> | ||
47 | #include <asm/irq.h> | ||
48 | #include <asm/uaccess.h> | ||
49 | #include <asm/signal.h> | ||
50 | |||
51 | /* We can't use the PHY interrupt if we aren't using MDIO. */ | ||
52 | #if !defined(CONFIG_USE_MDIO) | ||
53 | #undef PHY_INTERRUPT | ||
54 | #endif | ||
55 | |||
56 | /* If we have a PHY interrupt, we will advertise both full-duplex and half- | ||
57 | * duplex capabilities. If we don't have a PHY interrupt, then we will only | ||
58 | * advertise half-duplex capabilities. | ||
59 | */ | ||
60 | #define MII_ADVERTISE_HALF (ADVERTISE_100HALF | ADVERTISE_10HALF | \ | ||
61 | ADVERTISE_CSMA) | ||
62 | #define MII_ADVERTISE_ALL (ADVERTISE_100FULL | ADVERTISE_10FULL | \ | ||
63 | MII_ADVERTISE_HALF) | ||
64 | #ifdef PHY_INTERRUPT | ||
65 | #define MII_ADVERTISE_DEFAULT MII_ADVERTISE_ALL | ||
66 | #else | ||
67 | #define MII_ADVERTISE_DEFAULT MII_ADVERTISE_HALF | ||
68 | #endif | ||
69 | #include <asm/cpm2.h> | ||
70 | |||
71 | /* The transmitter timeout | ||
72 | */ | ||
73 | #define TX_TIMEOUT (2*HZ) | ||
74 | |||
75 | #ifdef CONFIG_USE_MDIO | ||
76 | /* Forward declarations of some structures to support different PHYs */ | ||
77 | |||
78 | typedef struct { | ||
79 | uint mii_data; | ||
80 | void (*funct)(uint mii_reg, struct net_device *dev); | ||
81 | } phy_cmd_t; | ||
82 | |||
83 | typedef struct { | ||
84 | uint id; | ||
85 | char *name; | ||
86 | |||
87 | const phy_cmd_t *config; | ||
88 | const phy_cmd_t *startup; | ||
89 | const phy_cmd_t *ack_int; | ||
90 | const phy_cmd_t *shutdown; | ||
91 | } phy_info_t; | ||
92 | |||
93 | /* values for phy_status */ | ||
94 | |||
95 | #define PHY_CONF_ANE 0x0001 /* 1 auto-negotiation enabled */ | ||
96 | #define PHY_CONF_LOOP 0x0002 /* 1 loopback mode enabled */ | ||
97 | #define PHY_CONF_SPMASK 0x00f0 /* mask for speed */ | ||
98 | #define PHY_CONF_10HDX 0x0010 /* 10 Mbit half duplex supported */ | ||
99 | #define PHY_CONF_10FDX 0x0020 /* 10 Mbit full duplex supported */ | ||
100 | #define PHY_CONF_100HDX 0x0040 /* 100 Mbit half duplex supported */ | ||
101 | #define PHY_CONF_100FDX 0x0080 /* 100 Mbit full duplex supported */ | ||
102 | |||
103 | #define PHY_STAT_LINK 0x0100 /* 1 up - 0 down */ | ||
104 | #define PHY_STAT_FAULT 0x0200 /* 1 remote fault */ | ||
105 | #define PHY_STAT_ANC 0x0400 /* 1 auto-negotiation complete */ | ||
106 | #define PHY_STAT_SPMASK 0xf000 /* mask for speed */ | ||
107 | #define PHY_STAT_10HDX 0x1000 /* 10 Mbit half duplex selected */ | ||
108 | #define PHY_STAT_10FDX 0x2000 /* 10 Mbit full duplex selected */ | ||
109 | #define PHY_STAT_100HDX 0x4000 /* 100 Mbit half duplex selected */ | ||
110 | #define PHY_STAT_100FDX 0x8000 /* 100 Mbit full duplex selected */ | ||
111 | #endif /* CONFIG_USE_MDIO */ | ||
112 | |||
113 | /* The number of Tx and Rx buffers. These are allocated from the page | ||
114 | * pool. The code may assume these are power of two, so it is best | ||
115 | * to keep them that size. | ||
116 | * We don't need to allocate pages for the transmitter. We just use | ||
117 | * the skbuffer directly. | ||
118 | */ | ||
119 | #define FCC_ENET_RX_PAGES 16 | ||
120 | #define FCC_ENET_RX_FRSIZE 2048 | ||
121 | #define FCC_ENET_RX_FRPPG (PAGE_SIZE / FCC_ENET_RX_FRSIZE) | ||
122 | #define RX_RING_SIZE (FCC_ENET_RX_FRPPG * FCC_ENET_RX_PAGES) | ||
123 | #define TX_RING_SIZE 16 /* Must be power of two */ | ||
124 | #define TX_RING_MOD_MASK 15 /* for this to work */ | ||
125 | |||
126 | /* The FCC stores dest/src/type, data, and checksum for receive packets. | ||
127 | * size includes support for VLAN | ||
128 | */ | ||
129 | #define PKT_MAXBUF_SIZE 1522 | ||
130 | #define PKT_MINBUF_SIZE 64 | ||
131 | |||
132 | /* Maximum input DMA size. Must be a should(?) be a multiple of 4. | ||
133 | * size includes support for VLAN | ||
134 | */ | ||
135 | #define PKT_MAXDMA_SIZE 1524 | ||
136 | |||
137 | /* Maximum input buffer size. Must be a multiple of 32. | ||
138 | */ | ||
139 | #define PKT_MAXBLR_SIZE 1536 | ||
140 | |||
141 | static int fcc_enet_open(struct net_device *dev); | ||
142 | static int fcc_enet_start_xmit(struct sk_buff *skb, struct net_device *dev); | ||
143 | static int fcc_enet_rx(struct net_device *dev); | ||
144 | static irqreturn_t fcc_enet_interrupt(int irq, void *dev_id, struct pt_regs *); | ||
145 | static int fcc_enet_close(struct net_device *dev); | ||
146 | static struct net_device_stats *fcc_enet_get_stats(struct net_device *dev); | ||
147 | /* static void set_multicast_list(struct net_device *dev); */ | ||
148 | static void fcc_restart(struct net_device *dev, int duplex); | ||
149 | static void fcc_stop(struct net_device *dev); | ||
150 | static int fcc_enet_set_mac_address(struct net_device *dev, void *addr); | ||
151 | |||
152 | /* These will be configurable for the FCC choice. | ||
153 | * Multiple ports can be configured. There is little choice among the | ||
154 | * I/O pins to the PHY, except the clocks. We will need some board | ||
155 | * dependent clock selection. | ||
156 | * Why in the hell did I put these inside #ifdef's? I dunno, maybe to | ||
157 | * help show what pins are used for each device. | ||
158 | */ | ||
159 | |||
160 | /* Since the CLK setting changes greatly from board to board, I changed | ||
161 | * it to a easy way. You just need to specify which CLK number to use. | ||
162 | * Note that only limited choices can be make on each port. | ||
163 | */ | ||
164 | |||
165 | /* FCC1 Clock Source Configuration. There are board specific. | ||
166 | Can only choose from CLK9-12 */ | ||
167 | #ifdef CONFIG_SBC82xx | ||
168 | #define F1_RXCLK 9 | ||
169 | #define F1_TXCLK 10 | ||
170 | #elif defined(CONFIG_ADS8272) | ||
171 | #define F1_RXCLK 11 | ||
172 | #define F1_TXCLK 10 | ||
173 | #else | ||
174 | #define F1_RXCLK 12 | ||
175 | #define F1_TXCLK 11 | ||
176 | #endif | ||
177 | |||
178 | /* FCC2 Clock Source Configuration. There are board specific. | ||
179 | Can only choose from CLK13-16 */ | ||
180 | #ifdef CONFIG_ADS8272 | ||
181 | #define F2_RXCLK 15 | ||
182 | #define F2_TXCLK 16 | ||
183 | #else | ||
184 | #define F2_RXCLK 13 | ||
185 | #define F2_TXCLK 14 | ||
186 | #endif | ||
187 | |||
188 | /* FCC3 Clock Source Configuration. There are board specific. | ||
189 | Can only choose from CLK13-16 */ | ||
190 | #define F3_RXCLK 15 | ||
191 | #define F3_TXCLK 16 | ||
192 | |||
193 | /* Automatically generates register configurations */ | ||
194 | #define PC_CLK(x) ((uint)(1<<(x-1))) /* FCC CLK I/O ports */ | ||
195 | |||
196 | #define CMXFCR_RF1CS(x) ((uint)((x-5)<<27)) /* FCC1 Receive Clock Source */ | ||
197 | #define CMXFCR_TF1CS(x) ((uint)((x-5)<<24)) /* FCC1 Transmit Clock Source */ | ||
198 | #define CMXFCR_RF2CS(x) ((uint)((x-9)<<19)) /* FCC2 Receive Clock Source */ | ||
199 | #define CMXFCR_TF2CS(x) ((uint)((x-9)<<16)) /* FCC2 Transmit Clock Source */ | ||
200 | #define CMXFCR_RF3CS(x) ((uint)((x-9)<<11)) /* FCC3 Receive Clock Source */ | ||
201 | #define CMXFCR_TF3CS(x) ((uint)((x-9)<<8)) /* FCC3 Transmit Clock Source */ | ||
202 | |||
203 | #define PC_F1RXCLK PC_CLK(F1_RXCLK) | ||
204 | #define PC_F1TXCLK PC_CLK(F1_TXCLK) | ||
205 | #define CMX1_CLK_ROUTE (CMXFCR_RF1CS(F1_RXCLK) | CMXFCR_TF1CS(F1_TXCLK)) | ||
206 | #define CMX1_CLK_MASK ((uint)0xff000000) | ||
207 | |||
208 | #define PC_F2RXCLK PC_CLK(F2_RXCLK) | ||
209 | #define PC_F2TXCLK PC_CLK(F2_TXCLK) | ||
210 | #define CMX2_CLK_ROUTE (CMXFCR_RF2CS(F2_RXCLK) | CMXFCR_TF2CS(F2_TXCLK)) | ||
211 | #define CMX2_CLK_MASK ((uint)0x00ff0000) | ||
212 | |||
213 | #define PC_F3RXCLK PC_CLK(F3_RXCLK) | ||
214 | #define PC_F3TXCLK PC_CLK(F3_TXCLK) | ||
215 | #define CMX3_CLK_ROUTE (CMXFCR_RF3CS(F3_RXCLK) | CMXFCR_TF3CS(F3_TXCLK)) | ||
216 | #define CMX3_CLK_MASK ((uint)0x0000ff00) | ||
217 | |||
218 | |||
219 | /* I/O Pin assignment for FCC1. I don't yet know the best way to do this, | ||
220 | * but there is little variation among the choices. | ||
221 | */ | ||
222 | #define PA1_COL ((uint)0x00000001) | ||
223 | #define PA1_CRS ((uint)0x00000002) | ||
224 | #define PA1_TXER ((uint)0x00000004) | ||
225 | #define PA1_TXEN ((uint)0x00000008) | ||
226 | #define PA1_RXDV ((uint)0x00000010) | ||
227 | #define PA1_RXER ((uint)0x00000020) | ||
228 | #define PA1_TXDAT ((uint)0x00003c00) | ||
229 | #define PA1_RXDAT ((uint)0x0003c000) | ||
230 | #define PA1_PSORA_BOUT (PA1_RXDAT | PA1_TXDAT) | ||
231 | #define PA1_PSORA_BIN (PA1_COL | PA1_CRS | PA1_TXER | PA1_TXEN | \ | ||
232 | PA1_RXDV | PA1_RXER) | ||
233 | #define PA1_DIRA_BOUT (PA1_RXDAT | PA1_CRS | PA1_COL | PA1_RXER | PA1_RXDV) | ||
234 | #define PA1_DIRA_BIN (PA1_TXDAT | PA1_TXEN | PA1_TXER) | ||
235 | |||
236 | |||
237 | /* I/O Pin assignment for FCC2. I don't yet know the best way to do this, | ||
238 | * but there is little variation among the choices. | ||
239 | */ | ||
240 | #define PB2_TXER ((uint)0x00000001) | ||
241 | #define PB2_RXDV ((uint)0x00000002) | ||
242 | #define PB2_TXEN ((uint)0x00000004) | ||
243 | #define PB2_RXER ((uint)0x00000008) | ||
244 | #define PB2_COL ((uint)0x00000010) | ||
245 | #define PB2_CRS ((uint)0x00000020) | ||
246 | #define PB2_TXDAT ((uint)0x000003c0) | ||
247 | #define PB2_RXDAT ((uint)0x00003c00) | ||
248 | #define PB2_PSORB_BOUT (PB2_RXDAT | PB2_TXDAT | PB2_CRS | PB2_COL | \ | ||
249 | PB2_RXER | PB2_RXDV | PB2_TXER) | ||
250 | #define PB2_PSORB_BIN (PB2_TXEN) | ||
251 | #define PB2_DIRB_BOUT (PB2_RXDAT | PB2_CRS | PB2_COL | PB2_RXER | PB2_RXDV) | ||
252 | #define PB2_DIRB_BIN (PB2_TXDAT | PB2_TXEN | PB2_TXER) | ||
253 | |||
254 | |||
255 | /* I/O Pin assignment for FCC3. I don't yet know the best way to do this, | ||
256 | * but there is little variation among the choices. | ||
257 | */ | ||
258 | #define PB3_RXDV ((uint)0x00004000) | ||
259 | #define PB3_RXER ((uint)0x00008000) | ||
260 | #define PB3_TXER ((uint)0x00010000) | ||
261 | #define PB3_TXEN ((uint)0x00020000) | ||
262 | #define PB3_COL ((uint)0x00040000) | ||
263 | #define PB3_CRS ((uint)0x00080000) | ||
264 | #ifndef CONFIG_RPX8260 | ||
265 | #define PB3_TXDAT ((uint)0x0f000000) | ||
266 | #define PC3_TXDAT ((uint)0x00000000) | ||
267 | #else | ||
268 | #define PB3_TXDAT ((uint)0x0f000000) | ||
269 | #define PC3_TXDAT 0 | ||
270 | #endif | ||
271 | #define PB3_RXDAT ((uint)0x00f00000) | ||
272 | #define PB3_PSORB_BOUT (PB3_RXDAT | PB3_TXDAT | PB3_CRS | PB3_COL | \ | ||
273 | PB3_RXER | PB3_RXDV | PB3_TXER | PB3_TXEN) | ||
274 | #define PB3_PSORB_BIN (0) | ||
275 | #define PB3_DIRB_BOUT (PB3_RXDAT | PB3_CRS | PB3_COL | PB3_RXER | PB3_RXDV) | ||
276 | #define PB3_DIRB_BIN (PB3_TXDAT | PB3_TXEN | PB3_TXER) | ||
277 | |||
278 | #define PC3_PSORC_BOUT (PC3_TXDAT) | ||
279 | #define PC3_PSORC_BIN (0) | ||
280 | #define PC3_DIRC_BOUT (0) | ||
281 | #define PC3_DIRC_BIN (PC3_TXDAT) | ||
282 | |||
283 | |||
284 | /* MII status/control serial interface. | ||
285 | */ | ||
286 | #if defined(CONFIG_RPX8260) | ||
287 | /* The EP8260 doesn't use Port C for MDIO */ | ||
288 | #define PC_MDIO ((uint)0x00000000) | ||
289 | #define PC_MDCK ((uint)0x00000000) | ||
290 | #elif defined(CONFIG_TQM8260) | ||
291 | /* TQM8260 has MDIO and MDCK on PC30 and PC31 respectively */ | ||
292 | #define PC_MDIO ((uint)0x00000002) | ||
293 | #define PC_MDCK ((uint)0x00000001) | ||
294 | #elif defined(CONFIG_ADS8272) | ||
295 | #define PC_MDIO ((uint)0x00002000) | ||
296 | #define PC_MDCK ((uint)0x00001000) | ||
297 | #elif defined(CONFIG_EST8260) || defined(CONFIG_ADS8260) || defined(CONFIG_PQ2FADS) | ||
298 | #define PC_MDIO ((uint)0x00400000) | ||
299 | #define PC_MDCK ((uint)0x00200000) | ||
300 | #else | ||
301 | #define PC_MDIO ((uint)0x00000004) | ||
302 | #define PC_MDCK ((uint)0x00000020) | ||
303 | #endif | ||
304 | |||
305 | #if defined(CONFIG_USE_MDIO) && (!defined(PC_MDIO) || !defined(PC_MDCK)) | ||
306 | #error "Must define PC_MDIO and PC_MDCK if using MDIO" | ||
307 | #endif | ||
308 | |||
309 | /* PHY addresses */ | ||
310 | /* default to dynamic config of phy addresses */ | ||
311 | #define FCC1_PHY_ADDR 0 | ||
312 | #ifdef CONFIG_PQ2FADS | ||
313 | #define FCC2_PHY_ADDR 0 | ||
314 | #else | ||
315 | #define FCC2_PHY_ADDR 2 | ||
316 | #endif | ||
317 | #define FCC3_PHY_ADDR 3 | ||
318 | |||
319 | /* A table of information for supporting FCCs. This does two things. | ||
320 | * First, we know how many FCCs we have and they are always externally | ||
321 | * numbered from zero. Second, it holds control register and I/O | ||
322 | * information that could be different among board designs. | ||
323 | */ | ||
324 | typedef struct fcc_info { | ||
325 | uint fc_fccnum; | ||
326 | uint fc_phyaddr; | ||
327 | uint fc_cpmblock; | ||
328 | uint fc_cpmpage; | ||
329 | uint fc_proff; | ||
330 | uint fc_interrupt; | ||
331 | uint fc_trxclocks; | ||
332 | uint fc_clockroute; | ||
333 | uint fc_clockmask; | ||
334 | uint fc_mdio; | ||
335 | uint fc_mdck; | ||
336 | } fcc_info_t; | ||
337 | |||
338 | static fcc_info_t fcc_ports[] = { | ||
339 | #ifdef CONFIG_FCC1_ENET | ||
340 | { 0, FCC1_PHY_ADDR, CPM_CR_FCC1_SBLOCK, CPM_CR_FCC1_PAGE, PROFF_FCC1, SIU_INT_FCC1, | ||
341 | (PC_F1RXCLK | PC_F1TXCLK), CMX1_CLK_ROUTE, CMX1_CLK_MASK, | ||
342 | PC_MDIO, PC_MDCK }, | ||
343 | #endif | ||
344 | #ifdef CONFIG_FCC2_ENET | ||
345 | { 1, FCC2_PHY_ADDR, CPM_CR_FCC2_SBLOCK, CPM_CR_FCC2_PAGE, PROFF_FCC2, SIU_INT_FCC2, | ||
346 | (PC_F2RXCLK | PC_F2TXCLK), CMX2_CLK_ROUTE, CMX2_CLK_MASK, | ||
347 | PC_MDIO, PC_MDCK }, | ||
348 | #endif | ||
349 | #ifdef CONFIG_FCC3_ENET | ||
350 | { 2, FCC3_PHY_ADDR, CPM_CR_FCC3_SBLOCK, CPM_CR_FCC3_PAGE, PROFF_FCC3, SIU_INT_FCC3, | ||
351 | (PC_F3RXCLK | PC_F3TXCLK), CMX3_CLK_ROUTE, CMX3_CLK_MASK, | ||
352 | PC_MDIO, PC_MDCK }, | ||
353 | #endif | ||
354 | }; | ||
355 | |||
356 | /* The FCC buffer descriptors track the ring buffers. The rx_bd_base and | ||
357 | * tx_bd_base always point to the base of the buffer descriptors. The | ||
358 | * cur_rx and cur_tx point to the currently available buffer. | ||
359 | * The dirty_tx tracks the current buffer that is being sent by the | ||
360 | * controller. The cur_tx and dirty_tx are equal under both completely | ||
361 | * empty and completely full conditions. The empty/ready indicator in | ||
362 | * the buffer descriptor determines the actual condition. | ||
363 | */ | ||
364 | struct fcc_enet_private { | ||
365 | /* The saved address of a sent-in-place packet/buffer, for skfree(). */ | ||
366 | struct sk_buff* tx_skbuff[TX_RING_SIZE]; | ||
367 | ushort skb_cur; | ||
368 | ushort skb_dirty; | ||
369 | |||
370 | /* CPM dual port RAM relative addresses. | ||
371 | */ | ||
372 | cbd_t *rx_bd_base; /* Address of Rx and Tx buffers. */ | ||
373 | cbd_t *tx_bd_base; | ||
374 | cbd_t *cur_rx, *cur_tx; /* The next free ring entry */ | ||
375 | cbd_t *dirty_tx; /* The ring entries to be free()ed. */ | ||
376 | volatile fcc_t *fccp; | ||
377 | volatile fcc_enet_t *ep; | ||
378 | struct net_device_stats stats; | ||
379 | uint tx_free; | ||
380 | spinlock_t lock; | ||
381 | |||
382 | #ifdef CONFIG_USE_MDIO | ||
383 | uint phy_id; | ||
384 | uint phy_id_done; | ||
385 | uint phy_status; | ||
386 | phy_info_t *phy; | ||
387 | struct work_struct phy_relink; | ||
388 | struct work_struct phy_display_config; | ||
389 | |||
390 | uint sequence_done; | ||
391 | |||
392 | uint phy_addr; | ||
393 | #endif /* CONFIG_USE_MDIO */ | ||
394 | |||
395 | int link; | ||
396 | int old_link; | ||
397 | int full_duplex; | ||
398 | |||
399 | fcc_info_t *fip; | ||
400 | }; | ||
401 | |||
402 | static void init_fcc_shutdown(fcc_info_t *fip, struct fcc_enet_private *cep, | ||
403 | volatile cpm2_map_t *immap); | ||
404 | static void init_fcc_startup(fcc_info_t *fip, struct net_device *dev); | ||
405 | static void init_fcc_ioports(fcc_info_t *fip, volatile iop_cpm2_t *io, | ||
406 | volatile cpm2_map_t *immap); | ||
407 | static void init_fcc_param(fcc_info_t *fip, struct net_device *dev, | ||
408 | volatile cpm2_map_t *immap); | ||
409 | |||
410 | #ifdef CONFIG_USE_MDIO | ||
411 | static int mii_queue(struct net_device *dev, int request, void (*func)(uint, struct net_device *)); | ||
412 | static uint mii_send_receive(fcc_info_t *fip, uint cmd); | ||
413 | static void mii_do_cmd(struct net_device *dev, const phy_cmd_t *c); | ||
414 | |||
415 | /* Make MII read/write commands for the FCC. | ||
416 | */ | ||
417 | #define mk_mii_read(REG) (0x60020000 | (((REG) & 0x1f) << 18)) | ||
418 | #define mk_mii_write(REG, VAL) (0x50020000 | (((REG) & 0x1f) << 18) | \ | ||
419 | ((VAL) & 0xffff)) | ||
420 | #define mk_mii_end 0 | ||
421 | #endif /* CONFIG_USE_MDIO */ | ||
422 | |||
423 | |||
424 | static int | ||
425 | fcc_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) | ||
426 | { | ||
427 | struct fcc_enet_private *cep = (struct fcc_enet_private *)dev->priv; | ||
428 | volatile cbd_t *bdp; | ||
429 | |||
430 | /* Fill in a Tx ring entry */ | ||
431 | bdp = cep->cur_tx; | ||
432 | |||
433 | #ifndef final_version | ||
434 | if (!cep->tx_free || (bdp->cbd_sc & BD_ENET_TX_READY)) { | ||
435 | /* Ooops. All transmit buffers are full. Bail out. | ||
436 | * This should not happen, since the tx queue should be stopped. | ||
437 | */ | ||
438 | printk("%s: tx queue full!.\n", dev->name); | ||
439 | return 1; | ||
440 | } | ||
441 | #endif | ||
442 | |||
443 | /* Clear all of the status flags. */ | ||
444 | bdp->cbd_sc &= ~BD_ENET_TX_STATS; | ||
445 | |||
446 | /* If the frame is short, tell CPM to pad it. */ | ||
447 | if (skb->len <= ETH_ZLEN) | ||
448 | bdp->cbd_sc |= BD_ENET_TX_PAD; | ||
449 | else | ||
450 | bdp->cbd_sc &= ~BD_ENET_TX_PAD; | ||
451 | |||
452 | /* Set buffer length and buffer pointer. */ | ||
453 | bdp->cbd_datlen = skb->len; | ||
454 | bdp->cbd_bufaddr = __pa(skb->data); | ||
455 | |||
456 | spin_lock_irq(&cep->lock); | ||
457 | |||
458 | /* Save skb pointer. */ | ||
459 | cep->tx_skbuff[cep->skb_cur] = skb; | ||
460 | |||
461 | cep->stats.tx_bytes += skb->len; | ||
462 | cep->skb_cur = (cep->skb_cur+1) & TX_RING_MOD_MASK; | ||
463 | |||
464 | /* Send it on its way. Tell CPM its ready, interrupt when done, | ||
465 | * its the last BD of the frame, and to put the CRC on the end. | ||
466 | */ | ||
467 | bdp->cbd_sc |= (BD_ENET_TX_READY | BD_ENET_TX_INTR | BD_ENET_TX_LAST | BD_ENET_TX_TC); | ||
468 | |||
469 | #if 0 | ||
470 | /* Errata says don't do this. */ | ||
471 | cep->fccp->fcc_ftodr = 0x8000; | ||
472 | #endif | ||
473 | dev->trans_start = jiffies; | ||
474 | |||
475 | /* If this was the last BD in the ring, start at the beginning again. */ | ||
476 | if (bdp->cbd_sc & BD_ENET_TX_WRAP) | ||
477 | bdp = cep->tx_bd_base; | ||
478 | else | ||
479 | bdp++; | ||
480 | |||
481 | if (!--cep->tx_free) | ||
482 | netif_stop_queue(dev); | ||
483 | |||
484 | cep->cur_tx = (cbd_t *)bdp; | ||
485 | |||
486 | spin_unlock_irq(&cep->lock); | ||
487 | |||
488 | return 0; | ||
489 | } | ||
490 | |||
491 | |||
492 | static void | ||
493 | fcc_enet_timeout(struct net_device *dev) | ||
494 | { | ||
495 | struct fcc_enet_private *cep = (struct fcc_enet_private *)dev->priv; | ||
496 | |||
497 | printk("%s: transmit timed out.\n", dev->name); | ||
498 | cep->stats.tx_errors++; | ||
499 | #ifndef final_version | ||
500 | { | ||
501 | int i; | ||
502 | cbd_t *bdp; | ||
503 | printk(" Ring data dump: cur_tx %p tx_free %d cur_rx %p.\n", | ||
504 | cep->cur_tx, cep->tx_free, | ||
505 | cep->cur_rx); | ||
506 | bdp = cep->tx_bd_base; | ||
507 | printk(" Tx @base %p :\n", bdp); | ||
508 | for (i = 0 ; i < TX_RING_SIZE; i++, bdp++) | ||
509 | printk("%04x %04x %08x\n", | ||
510 | bdp->cbd_sc, | ||
511 | bdp->cbd_datlen, | ||
512 | bdp->cbd_bufaddr); | ||
513 | bdp = cep->rx_bd_base; | ||
514 | printk(" Rx @base %p :\n", bdp); | ||
515 | for (i = 0 ; i < RX_RING_SIZE; i++, bdp++) | ||
516 | printk("%04x %04x %08x\n", | ||
517 | bdp->cbd_sc, | ||
518 | bdp->cbd_datlen, | ||
519 | bdp->cbd_bufaddr); | ||
520 | } | ||
521 | #endif | ||
522 | if (cep->tx_free) | ||
523 | netif_wake_queue(dev); | ||
524 | } | ||
525 | |||
526 | /* The interrupt handler. */ | ||
527 | static irqreturn_t | ||
528 | fcc_enet_interrupt(int irq, void * dev_id, struct pt_regs * regs) | ||
529 | { | ||
530 | struct net_device *dev = dev_id; | ||
531 | volatile struct fcc_enet_private *cep; | ||
532 | volatile cbd_t *bdp; | ||
533 | ushort int_events; | ||
534 | int must_restart; | ||
535 | |||
536 | cep = (struct fcc_enet_private *)dev->priv; | ||
537 | |||
538 | /* Get the interrupt events that caused us to be here. | ||
539 | */ | ||
540 | int_events = cep->fccp->fcc_fcce; | ||
541 | cep->fccp->fcc_fcce = (int_events & cep->fccp->fcc_fccm); | ||
542 | must_restart = 0; | ||
543 | |||
544 | #ifdef PHY_INTERRUPT | ||
545 | /* We have to be careful here to make sure that we aren't | ||
546 | * interrupted by a PHY interrupt. | ||
547 | */ | ||
548 | disable_irq_nosync(PHY_INTERRUPT); | ||
549 | #endif | ||
550 | |||
551 | /* Handle receive event in its own function. | ||
552 | */ | ||
553 | if (int_events & FCC_ENET_RXF) | ||
554 | fcc_enet_rx(dev_id); | ||
555 | |||
556 | /* Check for a transmit error. The manual is a little unclear | ||
557 | * about this, so the debug code until I get it figured out. It | ||
558 | * appears that if TXE is set, then TXB is not set. However, | ||
559 | * if carrier sense is lost during frame transmission, the TXE | ||
560 | * bit is set, "and continues the buffer transmission normally." | ||
561 | * I don't know if "normally" implies TXB is set when the buffer | ||
562 | * descriptor is closed.....trial and error :-). | ||
563 | */ | ||
564 | |||
565 | /* Transmit OK, or non-fatal error. Update the buffer descriptors. | ||
566 | */ | ||
567 | if (int_events & (FCC_ENET_TXE | FCC_ENET_TXB)) { | ||
568 | spin_lock(&cep->lock); | ||
569 | bdp = cep->dirty_tx; | ||
570 | while ((bdp->cbd_sc&BD_ENET_TX_READY)==0) { | ||
571 | if (cep->tx_free == TX_RING_SIZE) | ||
572 | break; | ||
573 | |||
574 | if (bdp->cbd_sc & BD_ENET_TX_HB) /* No heartbeat */ | ||
575 | cep->stats.tx_heartbeat_errors++; | ||
576 | if (bdp->cbd_sc & BD_ENET_TX_LC) /* Late collision */ | ||
577 | cep->stats.tx_window_errors++; | ||
578 | if (bdp->cbd_sc & BD_ENET_TX_RL) /* Retrans limit */ | ||
579 | cep->stats.tx_aborted_errors++; | ||
580 | if (bdp->cbd_sc & BD_ENET_TX_UN) /* Underrun */ | ||
581 | cep->stats.tx_fifo_errors++; | ||
582 | if (bdp->cbd_sc & BD_ENET_TX_CSL) /* Carrier lost */ | ||
583 | cep->stats.tx_carrier_errors++; | ||
584 | |||
585 | |||
586 | /* No heartbeat or Lost carrier are not really bad errors. | ||
587 | * The others require a restart transmit command. | ||
588 | */ | ||
589 | if (bdp->cbd_sc & | ||
590 | (BD_ENET_TX_LC | BD_ENET_TX_RL | BD_ENET_TX_UN)) { | ||
591 | must_restart = 1; | ||
592 | cep->stats.tx_errors++; | ||
593 | } | ||
594 | |||
595 | cep->stats.tx_packets++; | ||
596 | |||
597 | /* Deferred means some collisions occurred during transmit, | ||
598 | * but we eventually sent the packet OK. | ||
599 | */ | ||
600 | if (bdp->cbd_sc & BD_ENET_TX_DEF) | ||
601 | cep->stats.collisions++; | ||
602 | |||
603 | /* Free the sk buffer associated with this last transmit. */ | ||
604 | dev_kfree_skb_irq(cep->tx_skbuff[cep->skb_dirty]); | ||
605 | cep->tx_skbuff[cep->skb_dirty] = NULL; | ||
606 | cep->skb_dirty = (cep->skb_dirty + 1) & TX_RING_MOD_MASK; | ||
607 | |||
608 | /* Update pointer to next buffer descriptor to be transmitted. */ | ||
609 | if (bdp->cbd_sc & BD_ENET_TX_WRAP) | ||
610 | bdp = cep->tx_bd_base; | ||
611 | else | ||
612 | bdp++; | ||
613 | |||
614 | /* I don't know if we can be held off from processing these | ||
615 | * interrupts for more than one frame time. I really hope | ||
616 | * not. In such a case, we would now want to check the | ||
617 | * currently available BD (cur_tx) and determine if any | ||
618 | * buffers between the dirty_tx and cur_tx have also been | ||
619 | * sent. We would want to process anything in between that | ||
620 | * does not have BD_ENET_TX_READY set. | ||
621 | */ | ||
622 | |||
623 | /* Since we have freed up a buffer, the ring is no longer | ||
624 | * full. | ||
625 | */ | ||
626 | if (!cep->tx_free++) { | ||
627 | if (netif_queue_stopped(dev)) { | ||
628 | netif_wake_queue(dev); | ||
629 | } | ||
630 | } | ||
631 | |||
632 | cep->dirty_tx = (cbd_t *)bdp; | ||
633 | } | ||
634 | |||
635 | if (must_restart) { | ||
636 | volatile cpm_cpm2_t *cp; | ||
637 | |||
638 | /* Some transmit errors cause the transmitter to shut | ||
639 | * down. We now issue a restart transmit. Since the | ||
640 | * errors close the BD and update the pointers, the restart | ||
641 | * _should_ pick up without having to reset any of our | ||
642 | * pointers either. Also, To workaround 8260 device erratum | ||
643 | * CPM37, we must disable and then re-enable the transmitter | ||
644 | * following a Late Collision, Underrun, or Retry Limit error. | ||
645 | */ | ||
646 | cep->fccp->fcc_gfmr &= ~FCC_GFMR_ENT; | ||
647 | udelay(10); /* wait a few microseconds just on principle */ | ||
648 | cep->fccp->fcc_gfmr |= FCC_GFMR_ENT; | ||
649 | |||
650 | cp = cpmp; | ||
651 | cp->cp_cpcr = | ||
652 | mk_cr_cmd(cep->fip->fc_cpmpage, cep->fip->fc_cpmblock, | ||
653 | 0x0c, CPM_CR_RESTART_TX) | CPM_CR_FLG; | ||
654 | while (cp->cp_cpcr & CPM_CR_FLG); | ||
655 | } | ||
656 | spin_unlock(&cep->lock); | ||
657 | } | ||
658 | |||
659 | /* Check for receive busy, i.e. packets coming but no place to | ||
660 | * put them. | ||
661 | */ | ||
662 | if (int_events & FCC_ENET_BSY) { | ||
663 | cep->fccp->fcc_fcce = FCC_ENET_BSY; | ||
664 | cep->stats.rx_dropped++; | ||
665 | } | ||
666 | |||
667 | #ifdef PHY_INTERRUPT | ||
668 | enable_irq(PHY_INTERRUPT); | ||
669 | #endif | ||
670 | return IRQ_HANDLED; | ||
671 | } | ||
672 | |||
673 | /* During a receive, the cur_rx points to the current incoming buffer. | ||
674 | * When we update through the ring, if the next incoming buffer has | ||
675 | * not been given to the system, we just set the empty indicator, | ||
676 | * effectively tossing the packet. | ||
677 | */ | ||
678 | static int | ||
679 | fcc_enet_rx(struct net_device *dev) | ||
680 | { | ||
681 | struct fcc_enet_private *cep; | ||
682 | volatile cbd_t *bdp; | ||
683 | struct sk_buff *skb; | ||
684 | ushort pkt_len; | ||
685 | |||
686 | cep = (struct fcc_enet_private *)dev->priv; | ||
687 | |||
688 | /* First, grab all of the stats for the incoming packet. | ||
689 | * These get messed up if we get called due to a busy condition. | ||
690 | */ | ||
691 | bdp = cep->cur_rx; | ||
692 | |||
693 | for (;;) { | ||
694 | if (bdp->cbd_sc & BD_ENET_RX_EMPTY) | ||
695 | break; | ||
696 | |||
697 | #ifndef final_version | ||
698 | /* Since we have allocated space to hold a complete frame, both | ||
699 | * the first and last indicators should be set. | ||
700 | */ | ||
701 | if ((bdp->cbd_sc & (BD_ENET_RX_FIRST | BD_ENET_RX_LAST)) != | ||
702 | (BD_ENET_RX_FIRST | BD_ENET_RX_LAST)) | ||
703 | printk("CPM ENET: rcv is not first+last\n"); | ||
704 | #endif | ||
705 | |||
706 | /* Frame too long or too short. */ | ||
707 | if (bdp->cbd_sc & (BD_ENET_RX_LG | BD_ENET_RX_SH)) | ||
708 | cep->stats.rx_length_errors++; | ||
709 | if (bdp->cbd_sc & BD_ENET_RX_NO) /* Frame alignment */ | ||
710 | cep->stats.rx_frame_errors++; | ||
711 | if (bdp->cbd_sc & BD_ENET_RX_CR) /* CRC Error */ | ||
712 | cep->stats.rx_crc_errors++; | ||
713 | if (bdp->cbd_sc & BD_ENET_RX_OV) /* FIFO overrun */ | ||
714 | cep->stats.rx_crc_errors++; | ||
715 | if (bdp->cbd_sc & BD_ENET_RX_CL) /* Late Collision */ | ||
716 | cep->stats.rx_frame_errors++; | ||
717 | |||
718 | if (!(bdp->cbd_sc & | ||
719 | (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO | BD_ENET_RX_CR | ||
720 | | BD_ENET_RX_OV | BD_ENET_RX_CL))) | ||
721 | { | ||
722 | /* Process the incoming frame. */ | ||
723 | cep->stats.rx_packets++; | ||
724 | |||
725 | /* Remove the FCS from the packet length. */ | ||
726 | pkt_len = bdp->cbd_datlen - 4; | ||
727 | cep->stats.rx_bytes += pkt_len; | ||
728 | |||
729 | /* This does 16 byte alignment, much more than we need. */ | ||
730 | skb = dev_alloc_skb(pkt_len); | ||
731 | |||
732 | if (skb == NULL) { | ||
733 | printk("%s: Memory squeeze, dropping packet.\n", dev->name); | ||
734 | cep->stats.rx_dropped++; | ||
735 | } | ||
736 | else { | ||
737 | skb->dev = dev; | ||
738 | skb_put(skb,pkt_len); /* Make room */ | ||
739 | eth_copy_and_sum(skb, | ||
740 | (unsigned char *)__va(bdp->cbd_bufaddr), | ||
741 | pkt_len, 0); | ||
742 | skb->protocol=eth_type_trans(skb,dev); | ||
743 | netif_rx(skb); | ||
744 | } | ||
745 | } | ||
746 | |||
747 | /* Clear the status flags for this buffer. */ | ||
748 | bdp->cbd_sc &= ~BD_ENET_RX_STATS; | ||
749 | |||
750 | /* Mark the buffer empty. */ | ||
751 | bdp->cbd_sc |= BD_ENET_RX_EMPTY; | ||
752 | |||
753 | /* Update BD pointer to next entry. */ | ||
754 | if (bdp->cbd_sc & BD_ENET_RX_WRAP) | ||
755 | bdp = cep->rx_bd_base; | ||
756 | else | ||
757 | bdp++; | ||
758 | |||
759 | } | ||
760 | cep->cur_rx = (cbd_t *)bdp; | ||
761 | |||
762 | return 0; | ||
763 | } | ||
764 | |||
765 | static int | ||
766 | fcc_enet_close(struct net_device *dev) | ||
767 | { | ||
768 | #ifdef CONFIG_USE_MDIO | ||
769 | struct fcc_enet_private *fep = dev->priv; | ||
770 | #endif | ||
771 | |||
772 | netif_stop_queue(dev); | ||
773 | fcc_stop(dev); | ||
774 | #ifdef CONFIG_USE_MDIO | ||
775 | if (fep->phy) | ||
776 | mii_do_cmd(dev, fep->phy->shutdown); | ||
777 | #endif | ||
778 | |||
779 | return 0; | ||
780 | } | ||
781 | |||
782 | static struct net_device_stats *fcc_enet_get_stats(struct net_device *dev) | ||
783 | { | ||
784 | struct fcc_enet_private *cep = (struct fcc_enet_private *)dev->priv; | ||
785 | |||
786 | return &cep->stats; | ||
787 | } | ||
788 | |||
789 | #ifdef CONFIG_USE_MDIO | ||
790 | |||
791 | /* NOTE: Most of the following comes from the FEC driver for 860. The | ||
792 | * overall structure of MII code has been retained (as it's proved stable | ||
793 | * and well-tested), but actual transfer requests are processed "at once" | ||
794 | * instead of being queued (there's no interrupt-driven MII transfer | ||
795 | * mechanism, one has to toggle the data/clock bits manually). | ||
796 | */ | ||
797 | static int | ||
798 | mii_queue(struct net_device *dev, int regval, void (*func)(uint, struct net_device *)) | ||
799 | { | ||
800 | struct fcc_enet_private *fep; | ||
801 | int retval, tmp; | ||
802 | |||
803 | /* Add PHY address to register command. */ | ||
804 | fep = dev->priv; | ||
805 | regval |= fep->phy_addr << 23; | ||
806 | |||
807 | retval = 0; | ||
808 | |||
809 | tmp = mii_send_receive(fep->fip, regval); | ||
810 | if (func) | ||
811 | func(tmp, dev); | ||
812 | |||
813 | return retval; | ||
814 | } | ||
815 | |||
816 | static void mii_do_cmd(struct net_device *dev, const phy_cmd_t *c) | ||
817 | { | ||
818 | int k; | ||
819 | |||
820 | if(!c) | ||
821 | return; | ||
822 | |||
823 | for(k = 0; (c+k)->mii_data != mk_mii_end; k++) | ||
824 | mii_queue(dev, (c+k)->mii_data, (c+k)->funct); | ||
825 | } | ||
826 | |||
827 | static void mii_parse_sr(uint mii_reg, struct net_device *dev) | ||
828 | { | ||
829 | volatile struct fcc_enet_private *fep = dev->priv; | ||
830 | uint s = fep->phy_status; | ||
831 | |||
832 | s &= ~(PHY_STAT_LINK | PHY_STAT_FAULT | PHY_STAT_ANC); | ||
833 | |||
834 | if (mii_reg & BMSR_LSTATUS) | ||
835 | s |= PHY_STAT_LINK; | ||
836 | if (mii_reg & BMSR_RFAULT) | ||
837 | s |= PHY_STAT_FAULT; | ||
838 | if (mii_reg & BMSR_ANEGCOMPLETE) | ||
839 | s |= PHY_STAT_ANC; | ||
840 | |||
841 | fep->phy_status = s; | ||
842 | } | ||
843 | |||
844 | static void mii_parse_cr(uint mii_reg, struct net_device *dev) | ||
845 | { | ||
846 | volatile struct fcc_enet_private *fep = dev->priv; | ||
847 | uint s = fep->phy_status; | ||
848 | |||
849 | s &= ~(PHY_CONF_ANE | PHY_CONF_LOOP); | ||
850 | |||
851 | if (mii_reg & BMCR_ANENABLE) | ||
852 | s |= PHY_CONF_ANE; | ||
853 | if (mii_reg & BMCR_LOOPBACK) | ||
854 | s |= PHY_CONF_LOOP; | ||
855 | |||
856 | fep->phy_status = s; | ||
857 | } | ||
858 | |||
859 | static void mii_parse_anar(uint mii_reg, struct net_device *dev) | ||
860 | { | ||
861 | volatile struct fcc_enet_private *fep = dev->priv; | ||
862 | uint s = fep->phy_status; | ||
863 | |||
864 | s &= ~(PHY_CONF_SPMASK); | ||
865 | |||
866 | if (mii_reg & ADVERTISE_10HALF) | ||
867 | s |= PHY_CONF_10HDX; | ||
868 | if (mii_reg & ADVERTISE_10FULL) | ||
869 | s |= PHY_CONF_10FDX; | ||
870 | if (mii_reg & ADVERTISE_100HALF) | ||
871 | s |= PHY_CONF_100HDX; | ||
872 | if (mii_reg & ADVERTISE_100FULL) | ||
873 | s |= PHY_CONF_100FDX; | ||
874 | |||
875 | fep->phy_status = s; | ||
876 | } | ||
877 | |||
878 | /* ------------------------------------------------------------------------- */ | ||
879 | /* Generic PHY support. Should work for all PHYs, but does not support link | ||
880 | * change interrupts. | ||
881 | */ | ||
882 | #ifdef CONFIG_FCC_GENERIC_PHY | ||
883 | |||
884 | static phy_info_t phy_info_generic = { | ||
885 | 0x00000000, /* 0-->match any PHY */ | ||
886 | "GENERIC", | ||
887 | |||
888 | (const phy_cmd_t []) { /* config */ | ||
889 | /* advertise only half-duplex capabilities */ | ||
890 | { mk_mii_write(MII_ADVERTISE, MII_ADVERTISE_HALF), | ||
891 | mii_parse_anar }, | ||
892 | |||
893 | /* enable auto-negotiation */ | ||
894 | { mk_mii_write(MII_BMCR, BMCR_ANENABLE), mii_parse_cr }, | ||
895 | { mk_mii_end, } | ||
896 | }, | ||
897 | (const phy_cmd_t []) { /* startup */ | ||
898 | /* restart auto-negotiation */ | ||
899 | { mk_mii_write(MII_BMCR, BMCR_ANENABLE | BMCR_ANRESTART), | ||
900 | NULL }, | ||
901 | { mk_mii_end, } | ||
902 | }, | ||
903 | (const phy_cmd_t []) { /* ack_int */ | ||
904 | /* We don't actually use the ack_int table with a generic | ||
905 | * PHY, but putting a reference to mii_parse_sr here keeps | ||
906 | * us from getting a compiler warning about unused static | ||
907 | * functions in the case where we only compile in generic | ||
908 | * PHY support. | ||
909 | */ | ||
910 | { mk_mii_read(MII_BMSR), mii_parse_sr }, | ||
911 | { mk_mii_end, } | ||
912 | }, | ||
913 | (const phy_cmd_t []) { /* shutdown */ | ||
914 | { mk_mii_end, } | ||
915 | }, | ||
916 | }; | ||
917 | #endif /* ifdef CONFIG_FCC_GENERIC_PHY */ | ||
918 | |||
919 | /* ------------------------------------------------------------------------- */ | ||
920 | /* The Level one LXT970 is used by many boards */ | ||
921 | |||
922 | #ifdef CONFIG_FCC_LXT970 | ||
923 | |||
924 | #define MII_LXT970_MIRROR 16 /* Mirror register */ | ||
925 | #define MII_LXT970_IER 17 /* Interrupt Enable Register */ | ||
926 | #define MII_LXT970_ISR 18 /* Interrupt Status Register */ | ||
927 | #define MII_LXT970_CONFIG 19 /* Configuration Register */ | ||
928 | #define MII_LXT970_CSR 20 /* Chip Status Register */ | ||
929 | |||
930 | static void mii_parse_lxt970_csr(uint mii_reg, struct net_device *dev) | ||
931 | { | ||
932 | volatile struct fcc_enet_private *fep = dev->priv; | ||
933 | uint s = fep->phy_status; | ||
934 | |||
935 | s &= ~(PHY_STAT_SPMASK); | ||
936 | |||
937 | if (mii_reg & 0x0800) { | ||
938 | if (mii_reg & 0x1000) | ||
939 | s |= PHY_STAT_100FDX; | ||
940 | else | ||
941 | s |= PHY_STAT_100HDX; | ||
942 | } else { | ||
943 | if (mii_reg & 0x1000) | ||
944 | s |= PHY_STAT_10FDX; | ||
945 | else | ||
946 | s |= PHY_STAT_10HDX; | ||
947 | } | ||
948 | |||
949 | fep->phy_status = s; | ||
950 | } | ||
951 | |||
952 | static phy_info_t phy_info_lxt970 = { | ||
953 | 0x07810000, | ||
954 | "LXT970", | ||
955 | |||
956 | (const phy_cmd_t []) { /* config */ | ||
957 | #if 0 | ||
958 | // { mk_mii_write(MII_ADVERTISE, 0x0021), NULL }, | ||
959 | |||
960 | /* Set default operation of 100-TX....for some reason | ||
961 | * some of these bits are set on power up, which is wrong. | ||
962 | */ | ||
963 | { mk_mii_write(MII_LXT970_CONFIG, 0), NULL }, | ||
964 | #endif | ||
965 | { mk_mii_read(MII_BMCR), mii_parse_cr }, | ||
966 | { mk_mii_read(MII_ADVERTISE), mii_parse_anar }, | ||
967 | { mk_mii_end, } | ||
968 | }, | ||
969 | (const phy_cmd_t []) { /* startup - enable interrupts */ | ||
970 | { mk_mii_write(MII_LXT970_IER, 0x0002), NULL }, | ||
971 | { mk_mii_write(MII_BMCR, 0x1200), NULL }, /* autonegotiate */ | ||
972 | { mk_mii_end, } | ||
973 | }, | ||
974 | (const phy_cmd_t []) { /* ack_int */ | ||
975 | /* read SR and ISR to acknowledge */ | ||
976 | |||
977 | { mk_mii_read(MII_BMSR), mii_parse_sr }, | ||
978 | { mk_mii_read(MII_LXT970_ISR), NULL }, | ||
979 | |||
980 | /* find out the current status */ | ||
981 | |||
982 | { mk_mii_read(MII_LXT970_CSR), mii_parse_lxt970_csr }, | ||
983 | { mk_mii_end, } | ||
984 | }, | ||
985 | (const phy_cmd_t []) { /* shutdown - disable interrupts */ | ||
986 | { mk_mii_write(MII_LXT970_IER, 0x0000), NULL }, | ||
987 | { mk_mii_end, } | ||
988 | }, | ||
989 | }; | ||
990 | |||
991 | #endif /* CONFIG_FEC_LXT970 */ | ||
992 | |||
993 | /* ------------------------------------------------------------------------- */ | ||
994 | /* The Level one LXT971 is used on some of my custom boards */ | ||
995 | |||
996 | #ifdef CONFIG_FCC_LXT971 | ||
997 | |||
998 | /* register definitions for the 971 */ | ||
999 | |||
1000 | #define MII_LXT971_PCR 16 /* Port Control Register */ | ||
1001 | #define MII_LXT971_SR2 17 /* Status Register 2 */ | ||
1002 | #define MII_LXT971_IER 18 /* Interrupt Enable Register */ | ||
1003 | #define MII_LXT971_ISR 19 /* Interrupt Status Register */ | ||
1004 | #define MII_LXT971_LCR 20 /* LED Control Register */ | ||
1005 | #define MII_LXT971_TCR 30 /* Transmit Control Register */ | ||
1006 | |||
1007 | /* | ||
1008 | * I had some nice ideas of running the MDIO faster... | ||
1009 | * The 971 should support 8MHz and I tried it, but things acted really | ||
1010 | * weird, so 2.5 MHz ought to be enough for anyone... | ||
1011 | */ | ||
1012 | |||
1013 | static void mii_parse_lxt971_sr2(uint mii_reg, struct net_device *dev) | ||
1014 | { | ||
1015 | volatile struct fcc_enet_private *fep = dev->priv; | ||
1016 | uint s = fep->phy_status; | ||
1017 | |||
1018 | s &= ~(PHY_STAT_SPMASK); | ||
1019 | |||
1020 | if (mii_reg & 0x4000) { | ||
1021 | if (mii_reg & 0x0200) | ||
1022 | s |= PHY_STAT_100FDX; | ||
1023 | else | ||
1024 | s |= PHY_STAT_100HDX; | ||
1025 | } else { | ||
1026 | if (mii_reg & 0x0200) | ||
1027 | s |= PHY_STAT_10FDX; | ||
1028 | else | ||
1029 | s |= PHY_STAT_10HDX; | ||
1030 | } | ||
1031 | if (mii_reg & 0x0008) | ||
1032 | s |= PHY_STAT_FAULT; | ||
1033 | |||
1034 | fep->phy_status = s; | ||
1035 | } | ||
1036 | |||
1037 | static phy_info_t phy_info_lxt971 = { | ||
1038 | 0x0001378e, | ||
1039 | "LXT971", | ||
1040 | |||
1041 | (const phy_cmd_t []) { /* config */ | ||
1042 | /* configure link capabilities to advertise */ | ||
1043 | { mk_mii_write(MII_ADVERTISE, MII_ADVERTISE_DEFAULT), | ||
1044 | mii_parse_anar }, | ||
1045 | |||
1046 | /* enable auto-negotiation */ | ||
1047 | { mk_mii_write(MII_BMCR, BMCR_ANENABLE), mii_parse_cr }, | ||
1048 | { mk_mii_end, } | ||
1049 | }, | ||
1050 | (const phy_cmd_t []) { /* startup - enable interrupts */ | ||
1051 | { mk_mii_write(MII_LXT971_IER, 0x00f2), NULL }, | ||
1052 | |||
1053 | /* restart auto-negotiation */ | ||
1054 | { mk_mii_write(MII_BMCR, BMCR_ANENABLE | BMCR_ANRESTART), | ||
1055 | NULL }, | ||
1056 | { mk_mii_end, } | ||
1057 | }, | ||
1058 | (const phy_cmd_t []) { /* ack_int */ | ||
1059 | /* find out the current status */ | ||
1060 | { mk_mii_read(MII_BMSR), NULL }, | ||
1061 | { mk_mii_read(MII_BMSR), mii_parse_sr }, | ||
1062 | { mk_mii_read(MII_LXT971_SR2), mii_parse_lxt971_sr2 }, | ||
1063 | |||
1064 | /* we only need to read ISR to acknowledge */ | ||
1065 | { mk_mii_read(MII_LXT971_ISR), NULL }, | ||
1066 | { mk_mii_end, } | ||
1067 | }, | ||
1068 | (const phy_cmd_t []) { /* shutdown - disable interrupts */ | ||
1069 | { mk_mii_write(MII_LXT971_IER, 0x0000), NULL }, | ||
1070 | { mk_mii_end, } | ||
1071 | }, | ||
1072 | }; | ||
1073 | |||
1074 | #endif /* CONFIG_FCC_LXT971 */ | ||
1075 | |||
1076 | /* ------------------------------------------------------------------------- */ | ||
1077 | /* The Quality Semiconductor QS6612 is used on the RPX CLLF */ | ||
1078 | |||
1079 | #ifdef CONFIG_FCC_QS6612 | ||
1080 | |||
1081 | /* register definitions */ | ||
1082 | |||
1083 | #define MII_QS6612_MCR 17 /* Mode Control Register */ | ||
1084 | #define MII_QS6612_FTR 27 /* Factory Test Register */ | ||
1085 | #define MII_QS6612_MCO 28 /* Misc. Control Register */ | ||
1086 | #define MII_QS6612_ISR 29 /* Interrupt Source Register */ | ||
1087 | #define MII_QS6612_IMR 30 /* Interrupt Mask Register */ | ||
1088 | #define MII_QS6612_PCR 31 /* 100BaseTx PHY Control Reg. */ | ||
1089 | |||
1090 | static void mii_parse_qs6612_pcr(uint mii_reg, struct net_device *dev) | ||
1091 | { | ||
1092 | volatile struct fcc_enet_private *fep = dev->priv; | ||
1093 | uint s = fep->phy_status; | ||
1094 | |||
1095 | s &= ~(PHY_STAT_SPMASK); | ||
1096 | |||
1097 | switch((mii_reg >> 2) & 7) { | ||
1098 | case 1: s |= PHY_STAT_10HDX; break; | ||
1099 | case 2: s |= PHY_STAT_100HDX; break; | ||
1100 | case 5: s |= PHY_STAT_10FDX; break; | ||
1101 | case 6: s |= PHY_STAT_100FDX; break; | ||
1102 | } | ||
1103 | |||
1104 | fep->phy_status = s; | ||
1105 | } | ||
1106 | |||
1107 | static phy_info_t phy_info_qs6612 = { | ||
1108 | 0x00181440, | ||
1109 | "QS6612", | ||
1110 | |||
1111 | (const phy_cmd_t []) { /* config */ | ||
1112 | // { mk_mii_write(MII_ADVERTISE, 0x061), NULL }, /* 10 Mbps */ | ||
1113 | |||
1114 | /* The PHY powers up isolated on the RPX, | ||
1115 | * so send a command to allow operation. | ||
1116 | */ | ||
1117 | |||
1118 | { mk_mii_write(MII_QS6612_PCR, 0x0dc0), NULL }, | ||
1119 | |||
1120 | /* parse cr and anar to get some info */ | ||
1121 | |||
1122 | { mk_mii_read(MII_BMCR), mii_parse_cr }, | ||
1123 | { mk_mii_read(MII_ADVERTISE), mii_parse_anar }, | ||
1124 | { mk_mii_end, } | ||
1125 | }, | ||
1126 | (const phy_cmd_t []) { /* startup - enable interrupts */ | ||
1127 | { mk_mii_write(MII_QS6612_IMR, 0x003a), NULL }, | ||
1128 | { mk_mii_write(MII_BMCR, 0x1200), NULL }, /* autonegotiate */ | ||
1129 | { mk_mii_end, } | ||
1130 | }, | ||
1131 | (const phy_cmd_t []) { /* ack_int */ | ||
1132 | |||
1133 | /* we need to read ISR, SR and ANER to acknowledge */ | ||
1134 | |||
1135 | { mk_mii_read(MII_QS6612_ISR), NULL }, | ||
1136 | { mk_mii_read(MII_BMSR), mii_parse_sr }, | ||
1137 | { mk_mii_read(MII_EXPANSION), NULL }, | ||
1138 | |||
1139 | /* read pcr to get info */ | ||
1140 | |||
1141 | { mk_mii_read(MII_QS6612_PCR), mii_parse_qs6612_pcr }, | ||
1142 | { mk_mii_end, } | ||
1143 | }, | ||
1144 | (const phy_cmd_t []) { /* shutdown - disable interrupts */ | ||
1145 | { mk_mii_write(MII_QS6612_IMR, 0x0000), NULL }, | ||
1146 | { mk_mii_end, } | ||
1147 | }, | ||
1148 | }; | ||
1149 | |||
1150 | |||
1151 | #endif /* CONFIG_FEC_QS6612 */ | ||
1152 | |||
1153 | |||
1154 | /* ------------------------------------------------------------------------- */ | ||
1155 | /* The Davicom DM9131 is used on the HYMOD board */ | ||
1156 | |||
1157 | #ifdef CONFIG_FCC_DM9131 | ||
1158 | |||
1159 | /* register definitions */ | ||
1160 | |||
1161 | #define MII_DM9131_ACR 16 /* Aux. Config Register */ | ||
1162 | #define MII_DM9131_ACSR 17 /* Aux. Config/Status Register */ | ||
1163 | #define MII_DM9131_10TCSR 18 /* 10BaseT Config/Status Reg. */ | ||
1164 | #define MII_DM9131_INTR 21 /* Interrupt Register */ | ||
1165 | #define MII_DM9131_RECR 22 /* Receive Error Counter Reg. */ | ||
1166 | #define MII_DM9131_DISCR 23 /* Disconnect Counter Register */ | ||
1167 | |||
1168 | static void mii_parse_dm9131_acsr(uint mii_reg, struct net_device *dev) | ||
1169 | { | ||
1170 | volatile struct fcc_enet_private *fep = dev->priv; | ||
1171 | uint s = fep->phy_status; | ||
1172 | |||
1173 | s &= ~(PHY_STAT_SPMASK); | ||
1174 | |||
1175 | switch ((mii_reg >> 12) & 0xf) { | ||
1176 | case 1: s |= PHY_STAT_10HDX; break; | ||
1177 | case 2: s |= PHY_STAT_10FDX; break; | ||
1178 | case 4: s |= PHY_STAT_100HDX; break; | ||
1179 | case 8: s |= PHY_STAT_100FDX; break; | ||
1180 | } | ||
1181 | |||
1182 | fep->phy_status = s; | ||
1183 | } | ||
1184 | |||
1185 | static phy_info_t phy_info_dm9131 = { | ||
1186 | 0x00181b80, | ||
1187 | "DM9131", | ||
1188 | |||
1189 | (const phy_cmd_t []) { /* config */ | ||
1190 | /* parse cr and anar to get some info */ | ||
1191 | { mk_mii_read(MII_BMCR), mii_parse_cr }, | ||
1192 | { mk_mii_read(MII_ADVERTISE), mii_parse_anar }, | ||
1193 | { mk_mii_end, } | ||
1194 | }, | ||
1195 | (const phy_cmd_t []) { /* startup - enable interrupts */ | ||
1196 | { mk_mii_write(MII_DM9131_INTR, 0x0002), NULL }, | ||
1197 | { mk_mii_write(MII_BMCR, 0x1200), NULL }, /* autonegotiate */ | ||
1198 | { mk_mii_end, } | ||
1199 | }, | ||
1200 | (const phy_cmd_t []) { /* ack_int */ | ||
1201 | |||
1202 | /* we need to read INTR, SR and ANER to acknowledge */ | ||
1203 | |||
1204 | { mk_mii_read(MII_DM9131_INTR), NULL }, | ||
1205 | { mk_mii_read(MII_BMSR), mii_parse_sr }, | ||
1206 | { mk_mii_read(MII_EXPANSION), NULL }, | ||
1207 | |||
1208 | /* read acsr to get info */ | ||
1209 | |||
1210 | { mk_mii_read(MII_DM9131_ACSR), mii_parse_dm9131_acsr }, | ||
1211 | { mk_mii_end, } | ||
1212 | }, | ||
1213 | (const phy_cmd_t []) { /* shutdown - disable interrupts */ | ||
1214 | { mk_mii_write(MII_DM9131_INTR, 0x0f00), NULL }, | ||
1215 | { mk_mii_end, } | ||
1216 | }, | ||
1217 | }; | ||
1218 | |||
1219 | |||
1220 | #endif /* CONFIG_FEC_DM9131 */ | ||
1221 | #ifdef CONFIG_FCC_DM9161 | ||
1222 | /* ------------------------------------------------------------------------- */ | ||
1223 | /* DM9161 Control register values */ | ||
1224 | #define MIIM_DM9161_CR_STOP 0x0400 | ||
1225 | #define MIIM_DM9161_CR_RSTAN 0x1200 | ||
1226 | |||
1227 | #define MIIM_DM9161_SCR 0x10 | ||
1228 | #define MIIM_DM9161_SCR_INIT 0x0610 | ||
1229 | |||
1230 | /* DM9161 Specified Configuration and Status Register */ | ||
1231 | #define MIIM_DM9161_SCSR 0x11 | ||
1232 | #define MIIM_DM9161_SCSR_100F 0x8000 | ||
1233 | #define MIIM_DM9161_SCSR_100H 0x4000 | ||
1234 | #define MIIM_DM9161_SCSR_10F 0x2000 | ||
1235 | #define MIIM_DM9161_SCSR_10H 0x1000 | ||
1236 | /* DM9161 10BT register */ | ||
1237 | #define MIIM_DM9161_10BTCSR 0x12 | ||
1238 | #define MIIM_DM9161_10BTCSR_INIT 0x7800 | ||
1239 | /* DM9161 Interrupt Register */ | ||
1240 | #define MIIM_DM9161_INTR 0x15 | ||
1241 | #define MIIM_DM9161_INTR_PEND 0x8000 | ||
1242 | #define MIIM_DM9161_INTR_DPLX_MASK 0x0800 | ||
1243 | #define MIIM_DM9161_INTR_SPD_MASK 0x0400 | ||
1244 | #define MIIM_DM9161_INTR_LINK_MASK 0x0200 | ||
1245 | #define MIIM_DM9161_INTR_MASK 0x0100 | ||
1246 | #define MIIM_DM9161_INTR_DPLX_CHANGE 0x0010 | ||
1247 | #define MIIM_DM9161_INTR_SPD_CHANGE 0x0008 | ||
1248 | #define MIIM_DM9161_INTR_LINK_CHANGE 0x0004 | ||
1249 | #define MIIM_DM9161_INTR_INIT 0x0000 | ||
1250 | #define MIIM_DM9161_INTR_STOP \ | ||
1251 | (MIIM_DM9161_INTR_DPLX_MASK | MIIM_DM9161_INTR_SPD_MASK \ | ||
1252 | | MIIM_DM9161_INTR_LINK_MASK | MIIM_DM9161_INTR_MASK) | ||
1253 | |||
1254 | static void mii_parse_dm9161_sr(uint mii_reg, struct net_device * dev) | ||
1255 | { | ||
1256 | volatile struct fcc_enet_private *fep = dev->priv; | ||
1257 | uint regstat, timeout=0xffff; | ||
1258 | |||
1259 | while(!(mii_reg & 0x0020) && timeout--) | ||
1260 | { | ||
1261 | regstat=mk_mii_read(MII_BMSR); | ||
1262 | regstat |= fep->phy_addr <<23; | ||
1263 | mii_reg = mii_send_receive(fep->fip,regstat); | ||
1264 | } | ||
1265 | |||
1266 | mii_parse_sr(mii_reg, dev); | ||
1267 | } | ||
1268 | |||
1269 | static void mii_parse_dm9161_scsr(uint mii_reg, struct net_device * dev) | ||
1270 | { | ||
1271 | volatile struct fcc_enet_private *fep = dev->priv; | ||
1272 | uint s = fep->phy_status; | ||
1273 | |||
1274 | s &= ~(PHY_STAT_SPMASK); | ||
1275 | switch((mii_reg >>12) & 0xf) { | ||
1276 | case 1: | ||
1277 | { | ||
1278 | s |= PHY_STAT_10HDX; | ||
1279 | printk("10BaseT Half Duplex\n"); | ||
1280 | break; | ||
1281 | } | ||
1282 | case 2: | ||
1283 | { | ||
1284 | s |= PHY_STAT_10FDX; | ||
1285 | printk("10BaseT Full Duplex\n"); | ||
1286 | break; | ||
1287 | } | ||
1288 | case 4: | ||
1289 | { | ||
1290 | s |= PHY_STAT_100HDX; | ||
1291 | printk("100BaseT Half Duplex\n"); | ||
1292 | break; | ||
1293 | } | ||
1294 | case 8: | ||
1295 | { | ||
1296 | s |= PHY_STAT_100FDX; | ||
1297 | printk("100BaseT Full Duplex\n"); | ||
1298 | break; | ||
1299 | } | ||
1300 | } | ||
1301 | |||
1302 | fep->phy_status = s; | ||
1303 | |||
1304 | } | ||
1305 | |||
1306 | static void mii_dm9161_wait(uint mii_reg, struct net_device *dev) | ||
1307 | { | ||
1308 | int timeout = HZ; | ||
1309 | |||
1310 | /* Davicom takes a bit to come up after a reset, | ||
1311 | * so wait here for a bit */ | ||
1312 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
1313 | schedule_timeout(timeout); | ||
1314 | } | ||
1315 | |||
1316 | static phy_info_t phy_info_dm9161 = { | ||
1317 | 0x00181b88, | ||
1318 | "Davicom DM9161E", | ||
1319 | (const phy_cmd_t[]) { /* config */ | ||
1320 | { mk_mii_write(MII_BMCR, MIIM_DM9161_CR_STOP), NULL}, | ||
1321 | /* Do not bypass the scrambler/descrambler */ | ||
1322 | { mk_mii_write(MIIM_DM9161_SCR, MIIM_DM9161_SCR_INIT), NULL}, | ||
1323 | /* Configure 10BTCSR register */ | ||
1324 | { mk_mii_write(MIIM_DM9161_10BTCSR, MIIM_DM9161_10BTCSR_INIT),NULL}, | ||
1325 | /* Configure some basic stuff */ | ||
1326 | { mk_mii_write(MII_BMCR, 0x1000), NULL}, | ||
1327 | { mk_mii_read(MII_BMCR), mii_parse_cr }, | ||
1328 | { mk_mii_read(MII_ADVERTISE), mii_parse_anar }, | ||
1329 | { mk_mii_end,} | ||
1330 | }, | ||
1331 | (const phy_cmd_t[]) { /* startup */ | ||
1332 | /* Restart Auto Negotiation */ | ||
1333 | { mk_mii_write(MII_BMCR, MIIM_DM9161_CR_RSTAN), NULL}, | ||
1334 | /* Status is read once to clear old link state */ | ||
1335 | { mk_mii_read(MII_BMSR), mii_dm9161_wait}, | ||
1336 | /* Auto-negotiate */ | ||
1337 | { mk_mii_read(MII_BMSR), mii_parse_dm9161_sr}, | ||
1338 | /* Read the status */ | ||
1339 | { mk_mii_read(MIIM_DM9161_SCSR), mii_parse_dm9161_scsr}, | ||
1340 | /* Clear any pending interrupts */ | ||
1341 | { mk_mii_read(MIIM_DM9161_INTR), NULL}, | ||
1342 | /* Enable Interrupts */ | ||
1343 | { mk_mii_write(MIIM_DM9161_INTR, MIIM_DM9161_INTR_INIT), NULL}, | ||
1344 | { mk_mii_end,} | ||
1345 | }, | ||
1346 | (const phy_cmd_t[]) { /* ack_int */ | ||
1347 | { mk_mii_read(MIIM_DM9161_INTR), NULL}, | ||
1348 | #if 0 | ||
1349 | { mk_mii_read(MII_BMSR), NULL}, | ||
1350 | { mk_mii_read(MII_BMSR), mii_parse_dm9161_sr}, | ||
1351 | { mk_mii_read(MIIM_DM9161_SCSR), mii_parse_dm9161_scsr}, | ||
1352 | #endif | ||
1353 | { mk_mii_end,} | ||
1354 | }, | ||
1355 | (const phy_cmd_t[]) { /* shutdown */ | ||
1356 | { mk_mii_read(MIIM_DM9161_INTR),NULL}, | ||
1357 | { mk_mii_write(MIIM_DM9161_INTR, MIIM_DM9161_INTR_STOP), NULL}, | ||
1358 | { mk_mii_end,} | ||
1359 | }, | ||
1360 | }; | ||
1361 | #endif /* CONFIG_FCC_DM9161 */ | ||
1362 | |||
1363 | static phy_info_t *phy_info[] = { | ||
1364 | |||
1365 | #ifdef CONFIG_FCC_LXT970 | ||
1366 | &phy_info_lxt970, | ||
1367 | #endif /* CONFIG_FEC_LXT970 */ | ||
1368 | |||
1369 | #ifdef CONFIG_FCC_LXT971 | ||
1370 | &phy_info_lxt971, | ||
1371 | #endif /* CONFIG_FEC_LXT971 */ | ||
1372 | |||
1373 | #ifdef CONFIG_FCC_QS6612 | ||
1374 | &phy_info_qs6612, | ||
1375 | #endif /* CONFIG_FEC_QS6612 */ | ||
1376 | |||
1377 | #ifdef CONFIG_FCC_DM9131 | ||
1378 | &phy_info_dm9131, | ||
1379 | #endif /* CONFIG_FEC_DM9131 */ | ||
1380 | |||
1381 | #ifdef CONFIG_FCC_DM9161 | ||
1382 | &phy_info_dm9161, | ||
1383 | #endif /* CONFIG_FCC_DM9161 */ | ||
1384 | |||
1385 | #ifdef CONFIG_FCC_GENERIC_PHY | ||
1386 | /* Generic PHY support. This must be the last PHY in the table. | ||
1387 | * It will be used to support any PHY that doesn't match a previous | ||
1388 | * entry in the table. | ||
1389 | */ | ||
1390 | &phy_info_generic, | ||
1391 | #endif /* CONFIG_FCC_GENERIC_PHY */ | ||
1392 | |||
1393 | NULL | ||
1394 | }; | ||
1395 | |||
1396 | static void mii_display_status(void *data) | ||
1397 | { | ||
1398 | struct net_device *dev = data; | ||
1399 | volatile struct fcc_enet_private *fep = dev->priv; | ||
1400 | uint s = fep->phy_status; | ||
1401 | |||
1402 | if (!fep->link && !fep->old_link) { | ||
1403 | /* Link is still down - don't print anything */ | ||
1404 | return; | ||
1405 | } | ||
1406 | |||
1407 | printk("%s: status: ", dev->name); | ||
1408 | |||
1409 | if (!fep->link) { | ||
1410 | printk("link down"); | ||
1411 | } else { | ||
1412 | printk("link up"); | ||
1413 | |||
1414 | switch(s & PHY_STAT_SPMASK) { | ||
1415 | case PHY_STAT_100FDX: printk(", 100 Mbps Full Duplex"); break; | ||
1416 | case PHY_STAT_100HDX: printk(", 100 Mbps Half Duplex"); break; | ||
1417 | case PHY_STAT_10FDX: printk(", 10 Mbps Full Duplex"); break; | ||
1418 | case PHY_STAT_10HDX: printk(", 10 Mbps Half Duplex"); break; | ||
1419 | default: | ||
1420 | printk(", Unknown speed/duplex"); | ||
1421 | } | ||
1422 | |||
1423 | if (s & PHY_STAT_ANC) | ||
1424 | printk(", auto-negotiation complete"); | ||
1425 | } | ||
1426 | |||
1427 | if (s & PHY_STAT_FAULT) | ||
1428 | printk(", remote fault"); | ||
1429 | |||
1430 | printk(".\n"); | ||
1431 | } | ||
1432 | |||
1433 | static void mii_display_config(void *data) | ||
1434 | { | ||
1435 | struct net_device *dev = data; | ||
1436 | volatile struct fcc_enet_private *fep = dev->priv; | ||
1437 | uint s = fep->phy_status; | ||
1438 | |||
1439 | printk("%s: config: auto-negotiation ", dev->name); | ||
1440 | |||
1441 | if (s & PHY_CONF_ANE) | ||
1442 | printk("on"); | ||
1443 | else | ||
1444 | printk("off"); | ||
1445 | |||
1446 | if (s & PHY_CONF_100FDX) | ||
1447 | printk(", 100FDX"); | ||
1448 | if (s & PHY_CONF_100HDX) | ||
1449 | printk(", 100HDX"); | ||
1450 | if (s & PHY_CONF_10FDX) | ||
1451 | printk(", 10FDX"); | ||
1452 | if (s & PHY_CONF_10HDX) | ||
1453 | printk(", 10HDX"); | ||
1454 | if (!(s & PHY_CONF_SPMASK)) | ||
1455 | printk(", No speed/duplex selected?"); | ||
1456 | |||
1457 | if (s & PHY_CONF_LOOP) | ||
1458 | printk(", loopback enabled"); | ||
1459 | |||
1460 | printk(".\n"); | ||
1461 | |||
1462 | fep->sequence_done = 1; | ||
1463 | } | ||
1464 | |||
1465 | static void mii_relink(struct net_device *dev) | ||
1466 | { | ||
1467 | struct fcc_enet_private *fep = dev->priv; | ||
1468 | int duplex = 0; | ||
1469 | |||
1470 | fep->old_link = fep->link; | ||
1471 | fep->link = (fep->phy_status & PHY_STAT_LINK) ? 1 : 0; | ||
1472 | |||
1473 | #ifdef MDIO_DEBUG | ||
1474 | printk(" mii_relink: link=%d\n", fep->link); | ||
1475 | #endif | ||
1476 | |||
1477 | if (fep->link) { | ||
1478 | if (fep->phy_status | ||
1479 | & (PHY_STAT_100FDX | PHY_STAT_10FDX)) | ||
1480 | duplex = 1; | ||
1481 | fcc_restart(dev, duplex); | ||
1482 | #ifdef MDIO_DEBUG | ||
1483 | printk(" mii_relink: duplex=%d\n", duplex); | ||
1484 | #endif | ||
1485 | } | ||
1486 | } | ||
1487 | |||
1488 | static void mii_queue_relink(uint mii_reg, struct net_device *dev) | ||
1489 | { | ||
1490 | struct fcc_enet_private *fep = dev->priv; | ||
1491 | |||
1492 | mii_relink(dev); | ||
1493 | |||
1494 | schedule_work(&fep->phy_relink); | ||
1495 | } | ||
1496 | |||
1497 | static void mii_queue_config(uint mii_reg, struct net_device *dev) | ||
1498 | { | ||
1499 | struct fcc_enet_private *fep = dev->priv; | ||
1500 | |||
1501 | schedule_work(&fep->phy_display_config); | ||
1502 | } | ||
1503 | |||
1504 | phy_cmd_t phy_cmd_relink[] = { { mk_mii_read(MII_BMCR), mii_queue_relink }, | ||
1505 | { mk_mii_end, } }; | ||
1506 | phy_cmd_t phy_cmd_config[] = { { mk_mii_read(MII_BMCR), mii_queue_config }, | ||
1507 | { mk_mii_end, } }; | ||
1508 | |||
1509 | |||
1510 | /* Read remainder of PHY ID. | ||
1511 | */ | ||
1512 | static void | ||
1513 | mii_discover_phy3(uint mii_reg, struct net_device *dev) | ||
1514 | { | ||
1515 | struct fcc_enet_private *fep; | ||
1516 | int i; | ||
1517 | |||
1518 | fep = dev->priv; | ||
1519 | printk("mii_reg: %08x\n", mii_reg); | ||
1520 | fep->phy_id |= (mii_reg & 0xffff); | ||
1521 | |||
1522 | for(i = 0; phy_info[i]; i++) | ||
1523 | if((phy_info[i]->id == (fep->phy_id >> 4)) || !phy_info[i]->id) | ||
1524 | break; | ||
1525 | |||
1526 | if(!phy_info[i]) | ||
1527 | panic("%s: PHY id 0x%08x is not supported!\n", | ||
1528 | dev->name, fep->phy_id); | ||
1529 | |||
1530 | fep->phy = phy_info[i]; | ||
1531 | fep->phy_id_done = 1; | ||
1532 | |||
1533 | printk("%s: Phy @ 0x%x, type %s (0x%08x)\n", | ||
1534 | dev->name, fep->phy_addr, fep->phy->name, fep->phy_id); | ||
1535 | } | ||
1536 | |||
1537 | /* Scan all of the MII PHY addresses looking for someone to respond | ||
1538 | * with a valid ID. This usually happens quickly. | ||
1539 | */ | ||
1540 | static void | ||
1541 | mii_discover_phy(uint mii_reg, struct net_device *dev) | ||
1542 | { | ||
1543 | struct fcc_enet_private *fep; | ||
1544 | uint phytype; | ||
1545 | |||
1546 | fep = dev->priv; | ||
1547 | |||
1548 | if ((phytype = (mii_reg & 0xffff)) != 0xffff) { | ||
1549 | |||
1550 | /* Got first part of ID, now get remainder. */ | ||
1551 | fep->phy_id = phytype << 16; | ||
1552 | mii_queue(dev, mk_mii_read(MII_PHYSID2), mii_discover_phy3); | ||
1553 | } else { | ||
1554 | fep->phy_addr++; | ||
1555 | if (fep->phy_addr < 32) { | ||
1556 | mii_queue(dev, mk_mii_read(MII_PHYSID1), | ||
1557 | mii_discover_phy); | ||
1558 | } else { | ||
1559 | printk("fec: No PHY device found.\n"); | ||
1560 | } | ||
1561 | } | ||
1562 | } | ||
1563 | #endif /* CONFIG_USE_MDIO */ | ||
1564 | |||
1565 | #ifdef PHY_INTERRUPT | ||
1566 | /* This interrupt occurs when the PHY detects a link change. */ | ||
1567 | static irqreturn_t | ||
1568 | mii_link_interrupt(int irq, void * dev_id, struct pt_regs * regs) | ||
1569 | { | ||
1570 | struct net_device *dev = dev_id; | ||
1571 | struct fcc_enet_private *fep = dev->priv; | ||
1572 | fcc_info_t *fip = fep->fip; | ||
1573 | |||
1574 | if (fep->phy) { | ||
1575 | /* We don't want to be interrupted by an FCC | ||
1576 | * interrupt here. | ||
1577 | */ | ||
1578 | disable_irq_nosync(fip->fc_interrupt); | ||
1579 | |||
1580 | mii_do_cmd(dev, fep->phy->ack_int); | ||
1581 | /* restart and display status */ | ||
1582 | mii_do_cmd(dev, phy_cmd_relink); | ||
1583 | |||
1584 | enable_irq(fip->fc_interrupt); | ||
1585 | } | ||
1586 | return IRQ_HANDLED; | ||
1587 | } | ||
1588 | #endif /* ifdef PHY_INTERRUPT */ | ||
1589 | |||
1590 | #if 0 /* This should be fixed someday */ | ||
1591 | /* Set or clear the multicast filter for this adaptor. | ||
1592 | * Skeleton taken from sunlance driver. | ||
1593 | * The CPM Ethernet implementation allows Multicast as well as individual | ||
1594 | * MAC address filtering. Some of the drivers check to make sure it is | ||
1595 | * a group multicast address, and discard those that are not. I guess I | ||
1596 | * will do the same for now, but just remove the test if you want | ||
1597 | * individual filtering as well (do the upper net layers want or support | ||
1598 | * this kind of feature?). | ||
1599 | */ | ||
1600 | static void | ||
1601 | set_multicast_list(struct net_device *dev) | ||
1602 | { | ||
1603 | struct fcc_enet_private *cep; | ||
1604 | struct dev_mc_list *dmi; | ||
1605 | u_char *mcptr, *tdptr; | ||
1606 | volatile fcc_enet_t *ep; | ||
1607 | int i, j; | ||
1608 | |||
1609 | cep = (struct fcc_enet_private *)dev->priv; | ||
1610 | |||
1611 | return; | ||
1612 | /* Get pointer to FCC area in parameter RAM. | ||
1613 | */ | ||
1614 | ep = (fcc_enet_t *)dev->base_addr; | ||
1615 | |||
1616 | if (dev->flags&IFF_PROMISC) { | ||
1617 | |||
1618 | /* Log any net taps. */ | ||
1619 | printk("%s: Promiscuous mode enabled.\n", dev->name); | ||
1620 | cep->fccp->fcc_fpsmr |= FCC_PSMR_PRO; | ||
1621 | } else { | ||
1622 | |||
1623 | cep->fccp->fcc_fpsmr &= ~FCC_PSMR_PRO; | ||
1624 | |||
1625 | if (dev->flags & IFF_ALLMULTI) { | ||
1626 | /* Catch all multicast addresses, so set the | ||
1627 | * filter to all 1's. | ||
1628 | */ | ||
1629 | ep->fen_gaddrh = 0xffffffff; | ||
1630 | ep->fen_gaddrl = 0xffffffff; | ||
1631 | } | ||
1632 | else { | ||
1633 | /* Clear filter and add the addresses in the list. | ||
1634 | */ | ||
1635 | ep->fen_gaddrh = 0; | ||
1636 | ep->fen_gaddrl = 0; | ||
1637 | |||
1638 | dmi = dev->mc_list; | ||
1639 | |||
1640 | for (i=0; i<dev->mc_count; i++, dmi = dmi->next) { | ||
1641 | |||
1642 | /* Only support group multicast for now. | ||
1643 | */ | ||
1644 | if (!(dmi->dmi_addr[0] & 1)) | ||
1645 | continue; | ||
1646 | |||
1647 | /* The address in dmi_addr is LSB first, | ||
1648 | * and taddr is MSB first. We have to | ||
1649 | * copy bytes MSB first from dmi_addr. | ||
1650 | */ | ||
1651 | mcptr = (u_char *)dmi->dmi_addr + 5; | ||
1652 | tdptr = (u_char *)&ep->fen_taddrh; | ||
1653 | for (j=0; j<6; j++) | ||
1654 | *tdptr++ = *mcptr--; | ||
1655 | |||
1656 | /* Ask CPM to run CRC and set bit in | ||
1657 | * filter mask. | ||
1658 | */ | ||
1659 | cpmp->cp_cpcr = mk_cr_cmd(cep->fip->fc_cpmpage, | ||
1660 | cep->fip->fc_cpmblock, 0x0c, | ||
1661 | CPM_CR_SET_GADDR) | CPM_CR_FLG; | ||
1662 | udelay(10); | ||
1663 | while (cpmp->cp_cpcr & CPM_CR_FLG); | ||
1664 | } | ||
1665 | } | ||
1666 | } | ||
1667 | } | ||
1668 | #endif /* if 0 */ | ||
1669 | |||
1670 | |||
1671 | /* Set the individual MAC address. | ||
1672 | */ | ||
1673 | int fcc_enet_set_mac_address(struct net_device *dev, void *p) | ||
1674 | { | ||
1675 | struct sockaddr *addr= (struct sockaddr *) p; | ||
1676 | struct fcc_enet_private *cep; | ||
1677 | volatile fcc_enet_t *ep; | ||
1678 | unsigned char *eap; | ||
1679 | int i; | ||
1680 | |||
1681 | cep = (struct fcc_enet_private *)(dev->priv); | ||
1682 | ep = cep->ep; | ||
1683 | |||
1684 | if (netif_running(dev)) | ||
1685 | return -EBUSY; | ||
1686 | |||
1687 | memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); | ||
1688 | |||
1689 | eap = (unsigned char *) &(ep->fen_paddrh); | ||
1690 | for (i=5; i>=0; i--) | ||
1691 | *eap++ = addr->sa_data[i]; | ||
1692 | |||
1693 | return 0; | ||
1694 | } | ||
1695 | |||
1696 | |||
1697 | /* Initialize the CPM Ethernet on FCC. | ||
1698 | */ | ||
1699 | static int __init fec_enet_init(void) | ||
1700 | { | ||
1701 | struct net_device *dev; | ||
1702 | struct fcc_enet_private *cep; | ||
1703 | fcc_info_t *fip; | ||
1704 | int i, np, err; | ||
1705 | volatile cpm2_map_t *immap; | ||
1706 | volatile iop_cpm2_t *io; | ||
1707 | |||
1708 | immap = (cpm2_map_t *)CPM_MAP_ADDR; /* and to internal registers */ | ||
1709 | io = &immap->im_ioport; | ||
1710 | |||
1711 | np = sizeof(fcc_ports) / sizeof(fcc_info_t); | ||
1712 | fip = fcc_ports; | ||
1713 | |||
1714 | while (np-- > 0) { | ||
1715 | /* Create an Ethernet device instance. | ||
1716 | */ | ||
1717 | dev = alloc_etherdev(sizeof(*cep)); | ||
1718 | if (!dev) | ||
1719 | return -ENOMEM; | ||
1720 | |||
1721 | cep = dev->priv; | ||
1722 | spin_lock_init(&cep->lock); | ||
1723 | cep->fip = fip; | ||
1724 | |||
1725 | init_fcc_shutdown(fip, cep, immap); | ||
1726 | init_fcc_ioports(fip, io, immap); | ||
1727 | init_fcc_param(fip, dev, immap); | ||
1728 | |||
1729 | dev->base_addr = (unsigned long)(cep->ep); | ||
1730 | |||
1731 | /* The CPM Ethernet specific entries in the device | ||
1732 | * structure. | ||
1733 | */ | ||
1734 | dev->open = fcc_enet_open; | ||
1735 | dev->hard_start_xmit = fcc_enet_start_xmit; | ||
1736 | dev->tx_timeout = fcc_enet_timeout; | ||
1737 | dev->watchdog_timeo = TX_TIMEOUT; | ||
1738 | dev->stop = fcc_enet_close; | ||
1739 | dev->get_stats = fcc_enet_get_stats; | ||
1740 | /* dev->set_multicast_list = set_multicast_list; */ | ||
1741 | dev->set_mac_address = fcc_enet_set_mac_address; | ||
1742 | |||
1743 | init_fcc_startup(fip, dev); | ||
1744 | |||
1745 | err = register_netdev(dev); | ||
1746 | if (err) { | ||
1747 | free_netdev(dev); | ||
1748 | return err; | ||
1749 | } | ||
1750 | |||
1751 | printk("%s: FCC ENET Version 0.3, ", dev->name); | ||
1752 | for (i=0; i<5; i++) | ||
1753 | printk("%02x:", dev->dev_addr[i]); | ||
1754 | printk("%02x\n", dev->dev_addr[5]); | ||
1755 | |||
1756 | #ifdef CONFIG_USE_MDIO | ||
1757 | /* Queue up command to detect the PHY and initialize the | ||
1758 | * remainder of the interface. | ||
1759 | */ | ||
1760 | cep->phy_id_done = 0; | ||
1761 | cep->phy_addr = fip->fc_phyaddr; | ||
1762 | mii_queue(dev, mk_mii_read(MII_PHYSID1), mii_discover_phy); | ||
1763 | INIT_WORK(&cep->phy_relink, mii_display_status, dev); | ||
1764 | INIT_WORK(&cep->phy_display_config, mii_display_config, dev); | ||
1765 | #endif /* CONFIG_USE_MDIO */ | ||
1766 | |||
1767 | fip++; | ||
1768 | } | ||
1769 | |||
1770 | return 0; | ||
1771 | } | ||
1772 | module_init(fec_enet_init); | ||
1773 | |||
1774 | /* Make sure the device is shut down during initialization. | ||
1775 | */ | ||
1776 | static void __init | ||
1777 | init_fcc_shutdown(fcc_info_t *fip, struct fcc_enet_private *cep, | ||
1778 | volatile cpm2_map_t *immap) | ||
1779 | { | ||
1780 | volatile fcc_enet_t *ep; | ||
1781 | volatile fcc_t *fccp; | ||
1782 | |||
1783 | /* Get pointer to FCC area in parameter RAM. | ||
1784 | */ | ||
1785 | ep = (fcc_enet_t *)(&immap->im_dprambase[fip->fc_proff]); | ||
1786 | |||
1787 | /* And another to the FCC register area. | ||
1788 | */ | ||
1789 | fccp = (volatile fcc_t *)(&immap->im_fcc[fip->fc_fccnum]); | ||
1790 | cep->fccp = fccp; /* Keep the pointers handy */ | ||
1791 | cep->ep = ep; | ||
1792 | |||
1793 | /* Disable receive and transmit in case someone left it running. | ||
1794 | */ | ||
1795 | fccp->fcc_gfmr &= ~(FCC_GFMR_ENR | FCC_GFMR_ENT); | ||
1796 | } | ||
1797 | |||
1798 | /* Initialize the I/O pins for the FCC Ethernet. | ||
1799 | */ | ||
1800 | static void __init | ||
1801 | init_fcc_ioports(fcc_info_t *fip, volatile iop_cpm2_t *io, | ||
1802 | volatile cpm2_map_t *immap) | ||
1803 | { | ||
1804 | |||
1805 | /* FCC1 pins are on port A/C. FCC2/3 are port B/C. | ||
1806 | */ | ||
1807 | if (fip->fc_proff == PROFF_FCC1) { | ||
1808 | /* Configure port A and C pins for FCC1 Ethernet. | ||
1809 | */ | ||
1810 | io->iop_pdira &= ~PA1_DIRA_BOUT; | ||
1811 | io->iop_pdira |= PA1_DIRA_BIN; | ||
1812 | io->iop_psora &= ~PA1_PSORA_BOUT; | ||
1813 | io->iop_psora |= PA1_PSORA_BIN; | ||
1814 | io->iop_ppara |= (PA1_DIRA_BOUT | PA1_DIRA_BIN); | ||
1815 | } | ||
1816 | if (fip->fc_proff == PROFF_FCC2) { | ||
1817 | /* Configure port B and C pins for FCC Ethernet. | ||
1818 | */ | ||
1819 | io->iop_pdirb &= ~PB2_DIRB_BOUT; | ||
1820 | io->iop_pdirb |= PB2_DIRB_BIN; | ||
1821 | io->iop_psorb &= ~PB2_PSORB_BOUT; | ||
1822 | io->iop_psorb |= PB2_PSORB_BIN; | ||
1823 | io->iop_pparb |= (PB2_DIRB_BOUT | PB2_DIRB_BIN); | ||
1824 | } | ||
1825 | if (fip->fc_proff == PROFF_FCC3) { | ||
1826 | /* Configure port B and C pins for FCC Ethernet. | ||
1827 | */ | ||
1828 | io->iop_pdirb &= ~PB3_DIRB_BOUT; | ||
1829 | io->iop_pdirb |= PB3_DIRB_BIN; | ||
1830 | io->iop_psorb &= ~PB3_PSORB_BOUT; | ||
1831 | io->iop_psorb |= PB3_PSORB_BIN; | ||
1832 | io->iop_pparb |= (PB3_DIRB_BOUT | PB3_DIRB_BIN); | ||
1833 | |||
1834 | io->iop_pdirc &= ~PC3_DIRC_BOUT; | ||
1835 | io->iop_pdirc |= PC3_DIRC_BIN; | ||
1836 | io->iop_psorc &= ~PC3_PSORC_BOUT; | ||
1837 | io->iop_psorc |= PC3_PSORC_BIN; | ||
1838 | io->iop_pparc |= (PC3_DIRC_BOUT | PC3_DIRC_BIN); | ||
1839 | |||
1840 | } | ||
1841 | |||
1842 | /* Port C has clocks...... | ||
1843 | */ | ||
1844 | io->iop_psorc &= ~(fip->fc_trxclocks); | ||
1845 | io->iop_pdirc &= ~(fip->fc_trxclocks); | ||
1846 | io->iop_pparc |= fip->fc_trxclocks; | ||
1847 | |||
1848 | #ifdef CONFIG_USE_MDIO | ||
1849 | /* ....and the MII serial clock/data. | ||
1850 | */ | ||
1851 | io->iop_pdatc |= (fip->fc_mdio | fip->fc_mdck); | ||
1852 | io->iop_podrc &= ~(fip->fc_mdio | fip->fc_mdck); | ||
1853 | io->iop_pdirc |= (fip->fc_mdio | fip->fc_mdck); | ||
1854 | io->iop_pparc &= ~(fip->fc_mdio | fip->fc_mdck); | ||
1855 | #endif /* CONFIG_USE_MDIO */ | ||
1856 | |||
1857 | /* Configure Serial Interface clock routing. | ||
1858 | * First, clear all FCC bits to zero, | ||
1859 | * then set the ones we want. | ||
1860 | */ | ||
1861 | immap->im_cpmux.cmx_fcr &= ~(fip->fc_clockmask); | ||
1862 | immap->im_cpmux.cmx_fcr |= fip->fc_clockroute; | ||
1863 | } | ||
1864 | |||
1865 | static void __init | ||
1866 | init_fcc_param(fcc_info_t *fip, struct net_device *dev, | ||
1867 | volatile cpm2_map_t *immap) | ||
1868 | { | ||
1869 | unsigned char *eap; | ||
1870 | unsigned long mem_addr; | ||
1871 | bd_t *bd; | ||
1872 | int i, j; | ||
1873 | struct fcc_enet_private *cep; | ||
1874 | volatile fcc_enet_t *ep; | ||
1875 | volatile cbd_t *bdp; | ||
1876 | volatile cpm_cpm2_t *cp; | ||
1877 | |||
1878 | cep = (struct fcc_enet_private *)(dev->priv); | ||
1879 | ep = cep->ep; | ||
1880 | cp = cpmp; | ||
1881 | |||
1882 | bd = (bd_t *)__res; | ||
1883 | |||
1884 | /* Zero the whole thing.....I must have missed some individually. | ||
1885 | * It works when I do this. | ||
1886 | */ | ||
1887 | memset((char *)ep, 0, sizeof(fcc_enet_t)); | ||
1888 | |||
1889 | /* Allocate space for the buffer descriptors from regular memory. | ||
1890 | * Initialize base addresses for the buffer descriptors. | ||
1891 | */ | ||
1892 | cep->rx_bd_base = (cbd_t *)kmalloc(sizeof(cbd_t) * RX_RING_SIZE, | ||
1893 | GFP_KERNEL | GFP_DMA); | ||
1894 | ep->fen_genfcc.fcc_rbase = __pa(cep->rx_bd_base); | ||
1895 | cep->tx_bd_base = (cbd_t *)kmalloc(sizeof(cbd_t) * TX_RING_SIZE, | ||
1896 | GFP_KERNEL | GFP_DMA); | ||
1897 | ep->fen_genfcc.fcc_tbase = __pa(cep->tx_bd_base); | ||
1898 | |||
1899 | cep->dirty_tx = cep->cur_tx = cep->tx_bd_base; | ||
1900 | cep->cur_rx = cep->rx_bd_base; | ||
1901 | |||
1902 | ep->fen_genfcc.fcc_rstate = (CPMFCR_GBL | CPMFCR_EB) << 24; | ||
1903 | ep->fen_genfcc.fcc_tstate = (CPMFCR_GBL | CPMFCR_EB) << 24; | ||
1904 | |||
1905 | /* Set maximum bytes per receive buffer. | ||
1906 | * It must be a multiple of 32. | ||
1907 | */ | ||
1908 | ep->fen_genfcc.fcc_mrblr = PKT_MAXBLR_SIZE; | ||
1909 | |||
1910 | /* Allocate space in the reserved FCC area of DPRAM for the | ||
1911 | * internal buffers. No one uses this space (yet), so we | ||
1912 | * can do this. Later, we will add resource management for | ||
1913 | * this area. | ||
1914 | */ | ||
1915 | mem_addr = CPM_FCC_SPECIAL_BASE + (fip->fc_fccnum * 128); | ||
1916 | ep->fen_genfcc.fcc_riptr = mem_addr; | ||
1917 | ep->fen_genfcc.fcc_tiptr = mem_addr+32; | ||
1918 | ep->fen_padptr = mem_addr+64; | ||
1919 | memset((char *)(&(immap->im_dprambase[(mem_addr+64)])), 0x88, 32); | ||
1920 | |||
1921 | ep->fen_genfcc.fcc_rbptr = 0; | ||
1922 | ep->fen_genfcc.fcc_tbptr = 0; | ||
1923 | ep->fen_genfcc.fcc_rcrc = 0; | ||
1924 | ep->fen_genfcc.fcc_tcrc = 0; | ||
1925 | ep->fen_genfcc.fcc_res1 = 0; | ||
1926 | ep->fen_genfcc.fcc_res2 = 0; | ||
1927 | |||
1928 | ep->fen_camptr = 0; /* CAM isn't used in this driver */ | ||
1929 | |||
1930 | /* Set CRC preset and mask. | ||
1931 | */ | ||
1932 | ep->fen_cmask = 0xdebb20e3; | ||
1933 | ep->fen_cpres = 0xffffffff; | ||
1934 | |||
1935 | ep->fen_crcec = 0; /* CRC Error counter */ | ||
1936 | ep->fen_alec = 0; /* alignment error counter */ | ||
1937 | ep->fen_disfc = 0; /* discard frame counter */ | ||
1938 | ep->fen_retlim = 15; /* Retry limit threshold */ | ||
1939 | ep->fen_pper = 0; /* Normal persistence */ | ||
1940 | |||
1941 | /* Clear hash filter tables. | ||
1942 | */ | ||
1943 | ep->fen_gaddrh = 0; | ||
1944 | ep->fen_gaddrl = 0; | ||
1945 | ep->fen_iaddrh = 0; | ||
1946 | ep->fen_iaddrl = 0; | ||
1947 | |||
1948 | /* Clear the Out-of-sequence TxBD. | ||
1949 | */ | ||
1950 | ep->fen_tfcstat = 0; | ||
1951 | ep->fen_tfclen = 0; | ||
1952 | ep->fen_tfcptr = 0; | ||
1953 | |||
1954 | ep->fen_mflr = PKT_MAXBUF_SIZE; /* maximum frame length register */ | ||
1955 | ep->fen_minflr = PKT_MINBUF_SIZE; /* minimum frame length register */ | ||
1956 | |||
1957 | /* Set Ethernet station address. | ||
1958 | * | ||
1959 | * This is supplied in the board information structure, so we | ||
1960 | * copy that into the controller. | ||
1961 | * So, far we have only been given one Ethernet address. We make | ||
1962 | * it unique by setting a few bits in the upper byte of the | ||
1963 | * non-static part of the address. | ||
1964 | */ | ||
1965 | eap = (unsigned char *)&(ep->fen_paddrh); | ||
1966 | for (i=5; i>=0; i--) { | ||
1967 | |||
1968 | /* | ||
1969 | * The EP8260 only uses FCC3, so we can safely give it the real | ||
1970 | * MAC address. | ||
1971 | */ | ||
1972 | #ifdef CONFIG_SBC82xx | ||
1973 | if (i == 5) { | ||
1974 | /* bd->bi_enetaddr holds the SCC0 address; the FCC | ||
1975 | devices count up from there */ | ||
1976 | dev->dev_addr[i] = bd->bi_enetaddr[i] & ~3; | ||
1977 | dev->dev_addr[i] += 1 + fip->fc_fccnum; | ||
1978 | *eap++ = dev->dev_addr[i]; | ||
1979 | } | ||
1980 | #else | ||
1981 | #ifndef CONFIG_RPX8260 | ||
1982 | if (i == 3) { | ||
1983 | dev->dev_addr[i] = bd->bi_enetaddr[i]; | ||
1984 | dev->dev_addr[i] |= (1 << (7 - fip->fc_fccnum)); | ||
1985 | *eap++ = dev->dev_addr[i]; | ||
1986 | } else | ||
1987 | #endif | ||
1988 | { | ||
1989 | *eap++ = dev->dev_addr[i] = bd->bi_enetaddr[i]; | ||
1990 | } | ||
1991 | #endif | ||
1992 | } | ||
1993 | |||
1994 | ep->fen_taddrh = 0; | ||
1995 | ep->fen_taddrm = 0; | ||
1996 | ep->fen_taddrl = 0; | ||
1997 | |||
1998 | ep->fen_maxd1 = PKT_MAXDMA_SIZE; /* maximum DMA1 length */ | ||
1999 | ep->fen_maxd2 = PKT_MAXDMA_SIZE; /* maximum DMA2 length */ | ||
2000 | |||
2001 | /* Clear stat counters, in case we ever enable RMON. | ||
2002 | */ | ||
2003 | ep->fen_octc = 0; | ||
2004 | ep->fen_colc = 0; | ||
2005 | ep->fen_broc = 0; | ||
2006 | ep->fen_mulc = 0; | ||
2007 | ep->fen_uspc = 0; | ||
2008 | ep->fen_frgc = 0; | ||
2009 | ep->fen_ospc = 0; | ||
2010 | ep->fen_jbrc = 0; | ||
2011 | ep->fen_p64c = 0; | ||
2012 | ep->fen_p65c = 0; | ||
2013 | ep->fen_p128c = 0; | ||
2014 | ep->fen_p256c = 0; | ||
2015 | ep->fen_p512c = 0; | ||
2016 | ep->fen_p1024c = 0; | ||
2017 | |||
2018 | ep->fen_rfthr = 0; /* Suggested by manual */ | ||
2019 | ep->fen_rfcnt = 0; | ||
2020 | ep->fen_cftype = 0; | ||
2021 | |||
2022 | /* Now allocate the host memory pages and initialize the | ||
2023 | * buffer descriptors. | ||
2024 | */ | ||
2025 | bdp = cep->tx_bd_base; | ||
2026 | for (i=0; i<TX_RING_SIZE; i++) { | ||
2027 | |||
2028 | /* Initialize the BD for every fragment in the page. | ||
2029 | */ | ||
2030 | bdp->cbd_sc = 0; | ||
2031 | bdp->cbd_datlen = 0; | ||
2032 | bdp->cbd_bufaddr = 0; | ||
2033 | bdp++; | ||
2034 | } | ||
2035 | |||
2036 | /* Set the last buffer to wrap. | ||
2037 | */ | ||
2038 | bdp--; | ||
2039 | bdp->cbd_sc |= BD_SC_WRAP; | ||
2040 | |||
2041 | bdp = cep->rx_bd_base; | ||
2042 | for (i=0; i<FCC_ENET_RX_PAGES; i++) { | ||
2043 | |||
2044 | /* Allocate a page. | ||
2045 | */ | ||
2046 | mem_addr = __get_free_page(GFP_KERNEL); | ||
2047 | |||
2048 | /* Initialize the BD for every fragment in the page. | ||
2049 | */ | ||
2050 | for (j=0; j<FCC_ENET_RX_FRPPG; j++) { | ||
2051 | bdp->cbd_sc = BD_ENET_RX_EMPTY | BD_ENET_RX_INTR; | ||
2052 | bdp->cbd_datlen = 0; | ||
2053 | bdp->cbd_bufaddr = __pa(mem_addr); | ||
2054 | mem_addr += FCC_ENET_RX_FRSIZE; | ||
2055 | bdp++; | ||
2056 | } | ||
2057 | } | ||
2058 | |||
2059 | /* Set the last buffer to wrap. | ||
2060 | */ | ||
2061 | bdp--; | ||
2062 | bdp->cbd_sc |= BD_SC_WRAP; | ||
2063 | |||
2064 | /* Let's re-initialize the channel now. We have to do it later | ||
2065 | * than the manual describes because we have just now finished | ||
2066 | * the BD initialization. | ||
2067 | */ | ||
2068 | cp->cp_cpcr = mk_cr_cmd(fip->fc_cpmpage, fip->fc_cpmblock, 0x0c, | ||
2069 | CPM_CR_INIT_TRX) | CPM_CR_FLG; | ||
2070 | while (cp->cp_cpcr & CPM_CR_FLG); | ||
2071 | |||
2072 | cep->skb_cur = cep->skb_dirty = 0; | ||
2073 | } | ||
2074 | |||
2075 | /* Let 'er rip. | ||
2076 | */ | ||
2077 | static void __init | ||
2078 | init_fcc_startup(fcc_info_t *fip, struct net_device *dev) | ||
2079 | { | ||
2080 | volatile fcc_t *fccp; | ||
2081 | struct fcc_enet_private *cep; | ||
2082 | |||
2083 | cep = (struct fcc_enet_private *)(dev->priv); | ||
2084 | fccp = cep->fccp; | ||
2085 | |||
2086 | #ifdef CONFIG_RPX8260 | ||
2087 | #ifdef PHY_INTERRUPT | ||
2088 | /* Route PHY interrupt to IRQ. The following code only works for | ||
2089 | * IRQ1 - IRQ7. It does not work for Port C interrupts. | ||
2090 | */ | ||
2091 | *((volatile u_char *) (RPX_CSR_ADDR + 13)) &= ~BCSR13_FETH_IRQMASK; | ||
2092 | *((volatile u_char *) (RPX_CSR_ADDR + 13)) |= | ||
2093 | ((PHY_INTERRUPT - SIU_INT_IRQ1 + 1) << 4); | ||
2094 | #endif | ||
2095 | /* Initialize MDIO pins. */ | ||
2096 | *((volatile u_char *) (RPX_CSR_ADDR + 4)) &= ~BCSR4_MII_MDC; | ||
2097 | *((volatile u_char *) (RPX_CSR_ADDR + 4)) |= | ||
2098 | BCSR4_MII_READ | BCSR4_MII_MDIO; | ||
2099 | /* Enable external LXT971 PHY. */ | ||
2100 | *((volatile u_char *) (RPX_CSR_ADDR + 4)) |= BCSR4_EN_PHY; | ||
2101 | udelay(1000); | ||
2102 | *((volatile u_char *) (RPX_CSR_ADDR+ 4)) |= BCSR4_EN_MII; | ||
2103 | udelay(1000); | ||
2104 | #endif /* ifdef CONFIG_RPX8260 */ | ||
2105 | |||
2106 | fccp->fcc_fcce = 0xffff; /* Clear any pending events */ | ||
2107 | |||
2108 | /* Leave FCC interrupts masked for now. Will be unmasked by | ||
2109 | * fcc_restart(). | ||
2110 | */ | ||
2111 | fccp->fcc_fccm = 0; | ||
2112 | |||
2113 | /* Install our interrupt handler. | ||
2114 | */ | ||
2115 | if (request_irq(fip->fc_interrupt, fcc_enet_interrupt, 0, "fenet", | ||
2116 | dev) < 0) | ||
2117 | printk("Can't get FCC IRQ %d\n", fip->fc_interrupt); | ||
2118 | |||
2119 | #ifdef PHY_INTERRUPT | ||
2120 | #ifdef CONFIG_ADS8272 | ||
2121 | if (request_irq(PHY_INTERRUPT, mii_link_interrupt, SA_SHIRQ, | ||
2122 | "mii", dev) < 0) | ||
2123 | printk(KERN_CRIT "Can't get MII IRQ %d\n", PHY_INTERRUPT); | ||
2124 | #else | ||
2125 | /* Make IRQn edge triggered. This does not work if PHY_INTERRUPT is | ||
2126 | * on Port C. | ||
2127 | */ | ||
2128 | ((volatile cpm2_map_t *) CPM_MAP_ADDR)->im_intctl.ic_siexr |= | ||
2129 | (1 << (14 - (PHY_INTERRUPT - SIU_INT_IRQ1))); | ||
2130 | |||
2131 | if (request_irq(PHY_INTERRUPT, mii_link_interrupt, 0, | ||
2132 | "mii", dev) < 0) | ||
2133 | printk(KERN_CRIT "Can't get MII IRQ %d\n", PHY_INTERRUPT); | ||
2134 | #endif | ||
2135 | #endif /* PHY_INTERRUPT */ | ||
2136 | |||
2137 | /* Set GFMR to enable Ethernet operating mode. | ||
2138 | */ | ||
2139 | fccp->fcc_gfmr = (FCC_GFMR_TCI | FCC_GFMR_MODE_ENET); | ||
2140 | |||
2141 | /* Set sync/delimiters. | ||
2142 | */ | ||
2143 | fccp->fcc_fdsr = 0xd555; | ||
2144 | |||
2145 | /* Set protocol specific processing mode for Ethernet. | ||
2146 | * This has to be adjusted for Full Duplex operation after we can | ||
2147 | * determine how to detect that. | ||
2148 | */ | ||
2149 | fccp->fcc_fpsmr = FCC_PSMR_ENCRC; | ||
2150 | |||
2151 | #ifdef CONFIG_PQ2ADS | ||
2152 | /* Enable the PHY. */ | ||
2153 | *(volatile uint *)(BCSR_ADDR + 4) &= ~BCSR1_FETHIEN; | ||
2154 | *(volatile uint *)(BCSR_ADDR + 4) |= BCSR1_FETH_RST; | ||
2155 | #endif | ||
2156 | #if defined(CONFIG_PQ2ADS) || defined(CONFIG_PQ2FADS) | ||
2157 | /* Enable the 2nd PHY. */ | ||
2158 | *(volatile uint *)(BCSR_ADDR + 12) &= ~BCSR3_FETHIEN2; | ||
2159 | *(volatile uint *)(BCSR_ADDR + 12) |= BCSR3_FETH2_RST; | ||
2160 | #endif | ||
2161 | |||
2162 | #if defined(CONFIG_USE_MDIO) || defined(CONFIG_TQM8260) | ||
2163 | /* start in full duplex mode, and negotiate speed | ||
2164 | */ | ||
2165 | fcc_restart (dev, 1); | ||
2166 | #else | ||
2167 | /* start in half duplex mode | ||
2168 | */ | ||
2169 | fcc_restart (dev, 0); | ||
2170 | #endif | ||
2171 | } | ||
2172 | |||
2173 | #ifdef CONFIG_USE_MDIO | ||
2174 | /* MII command/status interface. | ||
2175 | * I'm not going to describe all of the details. You can find the | ||
2176 | * protocol definition in many other places, including the data sheet | ||
2177 | * of most PHY parts. | ||
2178 | * I wonder what "they" were thinking (maybe weren't) when they leave | ||
2179 | * the I2C in the CPM but I have to toggle these bits...... | ||
2180 | */ | ||
2181 | #ifdef CONFIG_RPX8260 | ||
2182 | /* The EP8260 has the MDIO pins in a BCSR instead of on Port C | ||
2183 | * like most other boards. | ||
2184 | */ | ||
2185 | #define MDIO_ADDR ((volatile u_char *)(RPX_CSR_ADDR + 4)) | ||
2186 | #define MAKE_MDIO_OUTPUT *MDIO_ADDR &= ~BCSR4_MII_READ | ||
2187 | #define MAKE_MDIO_INPUT *MDIO_ADDR |= BCSR4_MII_READ | BCSR4_MII_MDIO | ||
2188 | #define OUT_MDIO(bit) \ | ||
2189 | if (bit) \ | ||
2190 | *MDIO_ADDR |= BCSR4_MII_MDIO; \ | ||
2191 | else \ | ||
2192 | *MDIO_ADDR &= ~BCSR4_MII_MDIO; | ||
2193 | #define IN_MDIO (*MDIO_ADDR & BCSR4_MII_MDIO) | ||
2194 | #define OUT_MDC(bit) \ | ||
2195 | if (bit) \ | ||
2196 | *MDIO_ADDR |= BCSR4_MII_MDC; \ | ||
2197 | else \ | ||
2198 | *MDIO_ADDR &= ~BCSR4_MII_MDC; | ||
2199 | #else /* ifdef CONFIG_RPX8260 */ | ||
2200 | /* This is for the usual case where the MDIO pins are on Port C. | ||
2201 | */ | ||
2202 | #define MDIO_ADDR (((volatile cpm2_map_t *)CPM_MAP_ADDR)->im_ioport) | ||
2203 | #define MAKE_MDIO_OUTPUT MDIO_ADDR.iop_pdirc |= fip->fc_mdio | ||
2204 | #define MAKE_MDIO_INPUT MDIO_ADDR.iop_pdirc &= ~fip->fc_mdio | ||
2205 | #define OUT_MDIO(bit) \ | ||
2206 | if (bit) \ | ||
2207 | MDIO_ADDR.iop_pdatc |= fip->fc_mdio; \ | ||
2208 | else \ | ||
2209 | MDIO_ADDR.iop_pdatc &= ~fip->fc_mdio; | ||
2210 | #define IN_MDIO ((MDIO_ADDR.iop_pdatc) & fip->fc_mdio) | ||
2211 | #define OUT_MDC(bit) \ | ||
2212 | if (bit) \ | ||
2213 | MDIO_ADDR.iop_pdatc |= fip->fc_mdck; \ | ||
2214 | else \ | ||
2215 | MDIO_ADDR.iop_pdatc &= ~fip->fc_mdck; | ||
2216 | #endif /* ifdef CONFIG_RPX8260 */ | ||
2217 | |||
2218 | static uint | ||
2219 | mii_send_receive(fcc_info_t *fip, uint cmd) | ||
2220 | { | ||
2221 | uint retval; | ||
2222 | int read_op, i, off; | ||
2223 | const int us = 1; | ||
2224 | |||
2225 | read_op = ((cmd & 0xf0000000) == 0x60000000); | ||
2226 | |||
2227 | /* Write preamble | ||
2228 | */ | ||
2229 | OUT_MDIO(1); | ||
2230 | MAKE_MDIO_OUTPUT; | ||
2231 | OUT_MDIO(1); | ||
2232 | for (i = 0; i < 32; i++) | ||
2233 | { | ||
2234 | udelay(us); | ||
2235 | OUT_MDC(1); | ||
2236 | udelay(us); | ||
2237 | OUT_MDC(0); | ||
2238 | } | ||
2239 | |||
2240 | /* Write data | ||
2241 | */ | ||
2242 | for (i = 0, off = 31; i < (read_op ? 14 : 32); i++, --off) | ||
2243 | { | ||
2244 | OUT_MDIO((cmd >> off) & 0x00000001); | ||
2245 | udelay(us); | ||
2246 | OUT_MDC(1); | ||
2247 | udelay(us); | ||
2248 | OUT_MDC(0); | ||
2249 | } | ||
2250 | |||
2251 | retval = cmd; | ||
2252 | |||
2253 | if (read_op) | ||
2254 | { | ||
2255 | retval >>= 16; | ||
2256 | |||
2257 | MAKE_MDIO_INPUT; | ||
2258 | udelay(us); | ||
2259 | OUT_MDC(1); | ||
2260 | udelay(us); | ||
2261 | OUT_MDC(0); | ||
2262 | |||
2263 | for (i = 0; i < 16; i++) | ||
2264 | { | ||
2265 | udelay(us); | ||
2266 | OUT_MDC(1); | ||
2267 | udelay(us); | ||
2268 | retval <<= 1; | ||
2269 | if (IN_MDIO) | ||
2270 | retval++; | ||
2271 | OUT_MDC(0); | ||
2272 | } | ||
2273 | } | ||
2274 | |||
2275 | MAKE_MDIO_INPUT; | ||
2276 | udelay(us); | ||
2277 | OUT_MDC(1); | ||
2278 | udelay(us); | ||
2279 | OUT_MDC(0); | ||
2280 | |||
2281 | return retval; | ||
2282 | } | ||
2283 | #endif /* CONFIG_USE_MDIO */ | ||
2284 | |||
2285 | static void | ||
2286 | fcc_stop(struct net_device *dev) | ||
2287 | { | ||
2288 | struct fcc_enet_private *fep= (struct fcc_enet_private *)(dev->priv); | ||
2289 | volatile fcc_t *fccp = fep->fccp; | ||
2290 | fcc_info_t *fip = fep->fip; | ||
2291 | volatile fcc_enet_t *ep = fep->ep; | ||
2292 | volatile cpm_cpm2_t *cp = cpmp; | ||
2293 | volatile cbd_t *bdp; | ||
2294 | int i; | ||
2295 | |||
2296 | if ((fccp->fcc_gfmr & (FCC_GFMR_ENR | FCC_GFMR_ENT)) == 0) | ||
2297 | return; /* already down */ | ||
2298 | |||
2299 | fccp->fcc_fccm = 0; | ||
2300 | |||
2301 | /* issue the graceful stop tx command */ | ||
2302 | while (cp->cp_cpcr & CPM_CR_FLG); | ||
2303 | cp->cp_cpcr = mk_cr_cmd(fip->fc_cpmpage, fip->fc_cpmblock, | ||
2304 | 0x0c, CPM_CR_GRA_STOP_TX) | CPM_CR_FLG; | ||
2305 | while (cp->cp_cpcr & CPM_CR_FLG); | ||
2306 | |||
2307 | /* Disable transmit/receive */ | ||
2308 | fccp->fcc_gfmr &= ~(FCC_GFMR_ENR | FCC_GFMR_ENT); | ||
2309 | |||
2310 | /* issue the restart tx command */ | ||
2311 | fccp->fcc_fcce = FCC_ENET_GRA; | ||
2312 | while (cp->cp_cpcr & CPM_CR_FLG); | ||
2313 | cp->cp_cpcr = mk_cr_cmd(fip->fc_cpmpage, fip->fc_cpmblock, | ||
2314 | 0x0c, CPM_CR_RESTART_TX) | CPM_CR_FLG; | ||
2315 | while (cp->cp_cpcr & CPM_CR_FLG); | ||
2316 | |||
2317 | /* free tx buffers */ | ||
2318 | fep->skb_cur = fep->skb_dirty = 0; | ||
2319 | for (i=0; i<=TX_RING_MOD_MASK; i++) { | ||
2320 | if (fep->tx_skbuff[i] != NULL) { | ||
2321 | dev_kfree_skb(fep->tx_skbuff[i]); | ||
2322 | fep->tx_skbuff[i] = NULL; | ||
2323 | } | ||
2324 | } | ||
2325 | fep->dirty_tx = fep->cur_tx = fep->tx_bd_base; | ||
2326 | fep->tx_free = TX_RING_SIZE; | ||
2327 | ep->fen_genfcc.fcc_tbptr = ep->fen_genfcc.fcc_tbase; | ||
2328 | |||
2329 | /* Initialize the tx buffer descriptors. */ | ||
2330 | bdp = fep->tx_bd_base; | ||
2331 | for (i=0; i<TX_RING_SIZE; i++) { | ||
2332 | bdp->cbd_sc = 0; | ||
2333 | bdp->cbd_datlen = 0; | ||
2334 | bdp->cbd_bufaddr = 0; | ||
2335 | bdp++; | ||
2336 | } | ||
2337 | /* Set the last buffer to wrap. */ | ||
2338 | bdp--; | ||
2339 | bdp->cbd_sc |= BD_SC_WRAP; | ||
2340 | } | ||
2341 | |||
2342 | static void | ||
2343 | fcc_restart(struct net_device *dev, int duplex) | ||
2344 | { | ||
2345 | struct fcc_enet_private *fep = (struct fcc_enet_private *)(dev->priv); | ||
2346 | volatile fcc_t *fccp = fep->fccp; | ||
2347 | |||
2348 | /* stop any transmissions in progress */ | ||
2349 | fcc_stop(dev); | ||
2350 | |||
2351 | if (duplex) | ||
2352 | fccp->fcc_fpsmr |= FCC_PSMR_FDE | FCC_PSMR_LPB; | ||
2353 | else | ||
2354 | fccp->fcc_fpsmr &= ~(FCC_PSMR_FDE | FCC_PSMR_LPB); | ||
2355 | |||
2356 | /* Enable interrupts for transmit error, complete frame | ||
2357 | * received, and any transmit buffer we have also set the | ||
2358 | * interrupt flag. | ||
2359 | */ | ||
2360 | fccp->fcc_fccm = (FCC_ENET_TXE | FCC_ENET_RXF | FCC_ENET_TXB); | ||
2361 | |||
2362 | /* Enable transmit/receive */ | ||
2363 | fccp->fcc_gfmr |= FCC_GFMR_ENR | FCC_GFMR_ENT; | ||
2364 | } | ||
2365 | |||
2366 | static int | ||
2367 | fcc_enet_open(struct net_device *dev) | ||
2368 | { | ||
2369 | struct fcc_enet_private *fep = dev->priv; | ||
2370 | |||
2371 | #ifdef CONFIG_USE_MDIO | ||
2372 | fep->sequence_done = 0; | ||
2373 | fep->link = 0; | ||
2374 | |||
2375 | if (fep->phy) { | ||
2376 | fcc_restart(dev, 0); /* always start in half-duplex */ | ||
2377 | mii_do_cmd(dev, fep->phy->ack_int); | ||
2378 | mii_do_cmd(dev, fep->phy->config); | ||
2379 | mii_do_cmd(dev, phy_cmd_config); /* display configuration */ | ||
2380 | while(!fep->sequence_done) | ||
2381 | schedule(); | ||
2382 | |||
2383 | mii_do_cmd(dev, fep->phy->startup); | ||
2384 | netif_start_queue(dev); | ||
2385 | return 0; /* Success */ | ||
2386 | } | ||
2387 | return -ENODEV; /* No PHY we understand */ | ||
2388 | #else | ||
2389 | fep->link = 1; | ||
2390 | fcc_restart(dev, 0); /* always start in half-duplex */ | ||
2391 | netif_start_queue(dev); | ||
2392 | return 0; /* Always succeed */ | ||
2393 | #endif /* CONFIG_USE_MDIO */ | ||
2394 | } | ||
2395 | |||