diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /drivers/net/mv643xx_eth.c |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'drivers/net/mv643xx_eth.c')
-rw-r--r-- | drivers/net/mv643xx_eth.c | 3033 |
1 files changed, 3033 insertions, 0 deletions
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c new file mode 100644 index 000000000000..d6de213720f4 --- /dev/null +++ b/drivers/net/mv643xx_eth.c | |||
@@ -0,0 +1,3033 @@ | |||
1 | /* | ||
2 | * drivers/net/mv643xx_eth.c - Driver for MV643XX ethernet ports | ||
3 | * Copyright (C) 2002 Matthew Dharm <mdharm@momenco.com> | ||
4 | * | ||
5 | * Based on the 64360 driver from: | ||
6 | * Copyright (C) 2002 rabeeh@galileo.co.il | ||
7 | * | ||
8 | * Copyright (C) 2003 PMC-Sierra, Inc., | ||
9 | * written by Manish Lachwani (lachwani@pmc-sierra.com) | ||
10 | * | ||
11 | * Copyright (C) 2003 Ralf Baechle <ralf@linux-mips.org> | ||
12 | * | ||
13 | * Copyright (C) 2004-2005 MontaVista Software, Inc. | ||
14 | * Dale Farnsworth <dale@farnsworth.org> | ||
15 | * | ||
16 | * Copyright (C) 2004 Steven J. Hill <sjhill1@rockwellcollins.com> | ||
17 | * <sjhill@realitydiluted.com> | ||
18 | * | ||
19 | * This program is free software; you can redistribute it and/or | ||
20 | * modify it under the terms of the GNU General Public License | ||
21 | * as published by the Free Software Foundation; either version 2 | ||
22 | * of the License, or (at your option) any later version. | ||
23 | * | ||
24 | * This program is distributed in the hope that it will be useful, | ||
25 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
26 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
27 | * GNU General Public License for more details. | ||
28 | * | ||
29 | * You should have received a copy of the GNU General Public License | ||
30 | * along with this program; if not, write to the Free Software | ||
31 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
32 | */ | ||
33 | #include <linux/init.h> | ||
34 | #include <linux/dma-mapping.h> | ||
35 | #include <linux/tcp.h> | ||
36 | #include <linux/udp.h> | ||
37 | #include <linux/etherdevice.h> | ||
38 | |||
39 | #include <linux/bitops.h> | ||
40 | #include <linux/delay.h> | ||
41 | #include <linux/ethtool.h> | ||
42 | #include <asm/io.h> | ||
43 | #include <asm/types.h> | ||
44 | #include <asm/pgtable.h> | ||
45 | #include <asm/system.h> | ||
46 | #include <asm/delay.h> | ||
47 | #include "mv643xx_eth.h" | ||
48 | |||
49 | /* | ||
50 | * The first part is the high level driver of the gigE ethernet ports. | ||
51 | */ | ||
52 | |||
53 | /* Constants */ | ||
54 | #define VLAN_HLEN 4 | ||
55 | #define FCS_LEN 4 | ||
56 | #define WRAP NET_IP_ALIGN + ETH_HLEN + VLAN_HLEN + FCS_LEN | ||
57 | #define RX_SKB_SIZE ((dev->mtu + WRAP + 7) & ~0x7) | ||
58 | |||
59 | #define INT_CAUSE_UNMASK_ALL 0x0007ffff | ||
60 | #define INT_CAUSE_UNMASK_ALL_EXT 0x0011ffff | ||
61 | #ifdef MV643XX_RX_QUEUE_FILL_ON_TASK | ||
62 | #define INT_CAUSE_MASK_ALL 0x00000000 | ||
63 | #define INT_CAUSE_CHECK_BITS INT_CAUSE_UNMASK_ALL | ||
64 | #define INT_CAUSE_CHECK_BITS_EXT INT_CAUSE_UNMASK_ALL_EXT | ||
65 | #endif | ||
66 | |||
67 | #ifdef MV643XX_CHECKSUM_OFFLOAD_TX | ||
68 | #define MAX_DESCS_PER_SKB (MAX_SKB_FRAGS + 1) | ||
69 | #else | ||
70 | #define MAX_DESCS_PER_SKB 1 | ||
71 | #endif | ||
72 | |||
73 | #define PHY_WAIT_ITERATIONS 1000 /* 1000 iterations * 10uS = 10mS max */ | ||
74 | #define PHY_WAIT_MICRO_SECONDS 10 | ||
75 | |||
76 | /* Static function declarations */ | ||
77 | static int eth_port_link_is_up(unsigned int eth_port_num); | ||
78 | static void eth_port_uc_addr_get(struct net_device *dev, | ||
79 | unsigned char *MacAddr); | ||
80 | static int mv643xx_eth_real_open(struct net_device *); | ||
81 | static int mv643xx_eth_real_stop(struct net_device *); | ||
82 | static int mv643xx_eth_change_mtu(struct net_device *, int); | ||
83 | static struct net_device_stats *mv643xx_eth_get_stats(struct net_device *); | ||
84 | static void eth_port_init_mac_tables(unsigned int eth_port_num); | ||
85 | #ifdef MV643XX_NAPI | ||
86 | static int mv643xx_poll(struct net_device *dev, int *budget); | ||
87 | #endif | ||
88 | static void ethernet_phy_set(unsigned int eth_port_num, int phy_addr); | ||
89 | static int ethernet_phy_detect(unsigned int eth_port_num); | ||
90 | static struct ethtool_ops mv643xx_ethtool_ops; | ||
91 | |||
92 | static char mv643xx_driver_name[] = "mv643xx_eth"; | ||
93 | static char mv643xx_driver_version[] = "1.0"; | ||
94 | |||
95 | static void __iomem *mv643xx_eth_shared_base; | ||
96 | |||
97 | /* used to protect MV643XX_ETH_SMI_REG, which is shared across ports */ | ||
98 | static spinlock_t mv643xx_eth_phy_lock = SPIN_LOCK_UNLOCKED; | ||
99 | |||
100 | static inline u32 mv_read(int offset) | ||
101 | { | ||
102 | void *__iomem reg_base; | ||
103 | |||
104 | reg_base = mv643xx_eth_shared_base - MV643XX_ETH_SHARED_REGS; | ||
105 | |||
106 | return readl(reg_base + offset); | ||
107 | } | ||
108 | |||
109 | static inline void mv_write(int offset, u32 data) | ||
110 | { | ||
111 | void * __iomem reg_base; | ||
112 | |||
113 | reg_base = mv643xx_eth_shared_base - MV643XX_ETH_SHARED_REGS; | ||
114 | writel(data, reg_base + offset); | ||
115 | } | ||
116 | |||
117 | /* | ||
118 | * Changes MTU (maximum transfer unit) of the gigabit ethenret port | ||
119 | * | ||
120 | * Input : pointer to ethernet interface network device structure | ||
121 | * new mtu size | ||
122 | * Output : 0 upon success, -EINVAL upon failure | ||
123 | */ | ||
124 | static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu) | ||
125 | { | ||
126 | struct mv643xx_private *mp = netdev_priv(dev); | ||
127 | unsigned long flags; | ||
128 | |||
129 | spin_lock_irqsave(&mp->lock, flags); | ||
130 | |||
131 | if ((new_mtu > 9500) || (new_mtu < 64)) { | ||
132 | spin_unlock_irqrestore(&mp->lock, flags); | ||
133 | return -EINVAL; | ||
134 | } | ||
135 | |||
136 | dev->mtu = new_mtu; | ||
137 | /* | ||
138 | * Stop then re-open the interface. This will allocate RX skb's with | ||
139 | * the new MTU. | ||
140 | * There is a possible danger that the open will not successed, due | ||
141 | * to memory is full, which might fail the open function. | ||
142 | */ | ||
143 | if (netif_running(dev)) { | ||
144 | if (mv643xx_eth_real_stop(dev)) | ||
145 | printk(KERN_ERR | ||
146 | "%s: Fatal error on stopping device\n", | ||
147 | dev->name); | ||
148 | if (mv643xx_eth_real_open(dev)) | ||
149 | printk(KERN_ERR | ||
150 | "%s: Fatal error on opening device\n", | ||
151 | dev->name); | ||
152 | } | ||
153 | |||
154 | spin_unlock_irqrestore(&mp->lock, flags); | ||
155 | return 0; | ||
156 | } | ||
157 | |||
158 | /* | ||
159 | * mv643xx_eth_rx_task | ||
160 | * | ||
161 | * Fills / refills RX queue on a certain gigabit ethernet port | ||
162 | * | ||
163 | * Input : pointer to ethernet interface network device structure | ||
164 | * Output : N/A | ||
165 | */ | ||
166 | static void mv643xx_eth_rx_task(void *data) | ||
167 | { | ||
168 | struct net_device *dev = (struct net_device *)data; | ||
169 | struct mv643xx_private *mp = netdev_priv(dev); | ||
170 | struct pkt_info pkt_info; | ||
171 | struct sk_buff *skb; | ||
172 | |||
173 | if (test_and_set_bit(0, &mp->rx_task_busy)) | ||
174 | panic("%s: Error in test_set_bit / clear_bit", dev->name); | ||
175 | |||
176 | while (mp->rx_ring_skbs < (mp->rx_ring_size - 5)) { | ||
177 | skb = dev_alloc_skb(RX_SKB_SIZE); | ||
178 | if (!skb) | ||
179 | break; | ||
180 | mp->rx_ring_skbs++; | ||
181 | pkt_info.cmd_sts = ETH_RX_ENABLE_INTERRUPT; | ||
182 | pkt_info.byte_cnt = RX_SKB_SIZE; | ||
183 | pkt_info.buf_ptr = dma_map_single(NULL, skb->data, RX_SKB_SIZE, | ||
184 | DMA_FROM_DEVICE); | ||
185 | pkt_info.return_info = skb; | ||
186 | if (eth_rx_return_buff(mp, &pkt_info) != ETH_OK) { | ||
187 | printk(KERN_ERR | ||
188 | "%s: Error allocating RX Ring\n", dev->name); | ||
189 | break; | ||
190 | } | ||
191 | skb_reserve(skb, 2); | ||
192 | } | ||
193 | clear_bit(0, &mp->rx_task_busy); | ||
194 | /* | ||
195 | * If RX ring is empty of SKB, set a timer to try allocating | ||
196 | * again in a later time . | ||
197 | */ | ||
198 | if ((mp->rx_ring_skbs == 0) && (mp->rx_timer_flag == 0)) { | ||
199 | printk(KERN_INFO "%s: Rx ring is empty\n", dev->name); | ||
200 | /* After 100mSec */ | ||
201 | mp->timeout.expires = jiffies + (HZ / 10); | ||
202 | add_timer(&mp->timeout); | ||
203 | mp->rx_timer_flag = 1; | ||
204 | } | ||
205 | #ifdef MV643XX_RX_QUEUE_FILL_ON_TASK | ||
206 | else { | ||
207 | /* Return interrupts */ | ||
208 | mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(mp->port_num), | ||
209 | INT_CAUSE_UNMASK_ALL); | ||
210 | } | ||
211 | #endif | ||
212 | } | ||
213 | |||
214 | /* | ||
215 | * mv643xx_eth_rx_task_timer_wrapper | ||
216 | * | ||
217 | * Timer routine to wake up RX queue filling task. This function is | ||
218 | * used only in case the RX queue is empty, and all alloc_skb has | ||
219 | * failed (due to out of memory event). | ||
220 | * | ||
221 | * Input : pointer to ethernet interface network device structure | ||
222 | * Output : N/A | ||
223 | */ | ||
224 | static void mv643xx_eth_rx_task_timer_wrapper(unsigned long data) | ||
225 | { | ||
226 | struct net_device *dev = (struct net_device *)data; | ||
227 | struct mv643xx_private *mp = netdev_priv(dev); | ||
228 | |||
229 | mp->rx_timer_flag = 0; | ||
230 | mv643xx_eth_rx_task((void *)data); | ||
231 | } | ||
232 | |||
233 | /* | ||
234 | * mv643xx_eth_update_mac_address | ||
235 | * | ||
236 | * Update the MAC address of the port in the address table | ||
237 | * | ||
238 | * Input : pointer to ethernet interface network device structure | ||
239 | * Output : N/A | ||
240 | */ | ||
241 | static void mv643xx_eth_update_mac_address(struct net_device *dev) | ||
242 | { | ||
243 | struct mv643xx_private *mp = netdev_priv(dev); | ||
244 | unsigned int port_num = mp->port_num; | ||
245 | |||
246 | eth_port_init_mac_tables(port_num); | ||
247 | memcpy(mp->port_mac_addr, dev->dev_addr, 6); | ||
248 | eth_port_uc_addr_set(port_num, mp->port_mac_addr); | ||
249 | } | ||
250 | |||
251 | /* | ||
252 | * mv643xx_eth_set_rx_mode | ||
253 | * | ||
254 | * Change from promiscuos to regular rx mode | ||
255 | * | ||
256 | * Input : pointer to ethernet interface network device structure | ||
257 | * Output : N/A | ||
258 | */ | ||
259 | static void mv643xx_eth_set_rx_mode(struct net_device *dev) | ||
260 | { | ||
261 | struct mv643xx_private *mp = netdev_priv(dev); | ||
262 | u32 config_reg; | ||
263 | |||
264 | config_reg = ethernet_get_config_reg(mp->port_num); | ||
265 | if (dev->flags & IFF_PROMISC) | ||
266 | config_reg |= (u32) MV643XX_ETH_UNICAST_PROMISCUOUS_MODE; | ||
267 | else | ||
268 | config_reg &= ~(u32) MV643XX_ETH_UNICAST_PROMISCUOUS_MODE; | ||
269 | ethernet_set_config_reg(mp->port_num, config_reg); | ||
270 | } | ||
271 | |||
272 | /* | ||
273 | * mv643xx_eth_set_mac_address | ||
274 | * | ||
275 | * Change the interface's mac address. | ||
276 | * No special hardware thing should be done because interface is always | ||
277 | * put in promiscuous mode. | ||
278 | * | ||
279 | * Input : pointer to ethernet interface network device structure and | ||
280 | * a pointer to the designated entry to be added to the cache. | ||
281 | * Output : zero upon success, negative upon failure | ||
282 | */ | ||
283 | static int mv643xx_eth_set_mac_address(struct net_device *dev, void *addr) | ||
284 | { | ||
285 | int i; | ||
286 | |||
287 | for (i = 0; i < 6; i++) | ||
288 | /* +2 is for the offset of the HW addr type */ | ||
289 | dev->dev_addr[i] = ((unsigned char *)addr)[i + 2]; | ||
290 | mv643xx_eth_update_mac_address(dev); | ||
291 | return 0; | ||
292 | } | ||
293 | |||
294 | /* | ||
295 | * mv643xx_eth_tx_timeout | ||
296 | * | ||
297 | * Called upon a timeout on transmitting a packet | ||
298 | * | ||
299 | * Input : pointer to ethernet interface network device structure. | ||
300 | * Output : N/A | ||
301 | */ | ||
302 | static void mv643xx_eth_tx_timeout(struct net_device *dev) | ||
303 | { | ||
304 | struct mv643xx_private *mp = netdev_priv(dev); | ||
305 | |||
306 | printk(KERN_INFO "%s: TX timeout ", dev->name); | ||
307 | |||
308 | /* Do the reset outside of interrupt context */ | ||
309 | schedule_work(&mp->tx_timeout_task); | ||
310 | } | ||
311 | |||
312 | /* | ||
313 | * mv643xx_eth_tx_timeout_task | ||
314 | * | ||
315 | * Actual routine to reset the adapter when a timeout on Tx has occurred | ||
316 | */ | ||
317 | static void mv643xx_eth_tx_timeout_task(struct net_device *dev) | ||
318 | { | ||
319 | struct mv643xx_private *mp = netdev_priv(dev); | ||
320 | |||
321 | netif_device_detach(dev); | ||
322 | eth_port_reset(mp->port_num); | ||
323 | eth_port_start(mp); | ||
324 | netif_device_attach(dev); | ||
325 | } | ||
326 | |||
327 | /* | ||
328 | * mv643xx_eth_free_tx_queue | ||
329 | * | ||
330 | * Input : dev - a pointer to the required interface | ||
331 | * | ||
332 | * Output : 0 if was able to release skb , nonzero otherwise | ||
333 | */ | ||
334 | static int mv643xx_eth_free_tx_queue(struct net_device *dev, | ||
335 | unsigned int eth_int_cause_ext) | ||
336 | { | ||
337 | struct mv643xx_private *mp = netdev_priv(dev); | ||
338 | struct net_device_stats *stats = &mp->stats; | ||
339 | struct pkt_info pkt_info; | ||
340 | int released = 1; | ||
341 | |||
342 | if (!(eth_int_cause_ext & (BIT0 | BIT8))) | ||
343 | return released; | ||
344 | |||
345 | spin_lock(&mp->lock); | ||
346 | |||
347 | /* Check only queue 0 */ | ||
348 | while (eth_tx_return_desc(mp, &pkt_info) == ETH_OK) { | ||
349 | if (pkt_info.cmd_sts & BIT0) { | ||
350 | printk("%s: Error in TX\n", dev->name); | ||
351 | stats->tx_errors++; | ||
352 | } | ||
353 | |||
354 | /* | ||
355 | * If return_info is different than 0, release the skb. | ||
356 | * The case where return_info is not 0 is only in case | ||
357 | * when transmitted a scatter/gather packet, where only | ||
358 | * last skb releases the whole chain. | ||
359 | */ | ||
360 | if (pkt_info.return_info) { | ||
361 | if (skb_shinfo(pkt_info.return_info)->nr_frags) | ||
362 | dma_unmap_page(NULL, pkt_info.buf_ptr, | ||
363 | pkt_info.byte_cnt, | ||
364 | DMA_TO_DEVICE); | ||
365 | else | ||
366 | dma_unmap_single(NULL, pkt_info.buf_ptr, | ||
367 | pkt_info.byte_cnt, | ||
368 | DMA_TO_DEVICE); | ||
369 | |||
370 | dev_kfree_skb_irq(pkt_info.return_info); | ||
371 | released = 0; | ||
372 | |||
373 | /* | ||
374 | * Decrement the number of outstanding skbs counter on | ||
375 | * the TX queue. | ||
376 | */ | ||
377 | if (mp->tx_ring_skbs == 0) | ||
378 | panic("ERROR - TX outstanding SKBs" | ||
379 | " counter is corrupted"); | ||
380 | mp->tx_ring_skbs--; | ||
381 | } else | ||
382 | dma_unmap_page(NULL, pkt_info.buf_ptr, | ||
383 | pkt_info.byte_cnt, DMA_TO_DEVICE); | ||
384 | } | ||
385 | |||
386 | spin_unlock(&mp->lock); | ||
387 | |||
388 | return released; | ||
389 | } | ||
390 | |||
391 | /* | ||
392 | * mv643xx_eth_receive | ||
393 | * | ||
394 | * This function is forward packets that are received from the port's | ||
395 | * queues toward kernel core or FastRoute them to another interface. | ||
396 | * | ||
397 | * Input : dev - a pointer to the required interface | ||
398 | * max - maximum number to receive (0 means unlimted) | ||
399 | * | ||
400 | * Output : number of served packets | ||
401 | */ | ||
402 | #ifdef MV643XX_NAPI | ||
403 | static int mv643xx_eth_receive_queue(struct net_device *dev, int budget) | ||
404 | #else | ||
405 | static int mv643xx_eth_receive_queue(struct net_device *dev) | ||
406 | #endif | ||
407 | { | ||
408 | struct mv643xx_private *mp = netdev_priv(dev); | ||
409 | struct net_device_stats *stats = &mp->stats; | ||
410 | unsigned int received_packets = 0; | ||
411 | struct sk_buff *skb; | ||
412 | struct pkt_info pkt_info; | ||
413 | |||
414 | #ifdef MV643XX_NAPI | ||
415 | while (eth_port_receive(mp, &pkt_info) == ETH_OK && budget > 0) { | ||
416 | #else | ||
417 | while (eth_port_receive(mp, &pkt_info) == ETH_OK) { | ||
418 | #endif | ||
419 | mp->rx_ring_skbs--; | ||
420 | received_packets++; | ||
421 | #ifdef MV643XX_NAPI | ||
422 | budget--; | ||
423 | #endif | ||
424 | /* Update statistics. Note byte count includes 4 byte CRC count */ | ||
425 | stats->rx_packets++; | ||
426 | stats->rx_bytes += pkt_info.byte_cnt; | ||
427 | skb = pkt_info.return_info; | ||
428 | /* | ||
429 | * In case received a packet without first / last bits on OR | ||
430 | * the error summary bit is on, the packets needs to be dropeed. | ||
431 | */ | ||
432 | if (((pkt_info.cmd_sts | ||
433 | & (ETH_RX_FIRST_DESC | ETH_RX_LAST_DESC)) != | ||
434 | (ETH_RX_FIRST_DESC | ETH_RX_LAST_DESC)) | ||
435 | || (pkt_info.cmd_sts & ETH_ERROR_SUMMARY)) { | ||
436 | stats->rx_dropped++; | ||
437 | if ((pkt_info.cmd_sts & (ETH_RX_FIRST_DESC | | ||
438 | ETH_RX_LAST_DESC)) != | ||
439 | (ETH_RX_FIRST_DESC | ETH_RX_LAST_DESC)) { | ||
440 | if (net_ratelimit()) | ||
441 | printk(KERN_ERR | ||
442 | "%s: Received packet spread " | ||
443 | "on multiple descriptors\n", | ||
444 | dev->name); | ||
445 | } | ||
446 | if (pkt_info.cmd_sts & ETH_ERROR_SUMMARY) | ||
447 | stats->rx_errors++; | ||
448 | |||
449 | dev_kfree_skb_irq(skb); | ||
450 | } else { | ||
451 | /* | ||
452 | * The -4 is for the CRC in the trailer of the | ||
453 | * received packet | ||
454 | */ | ||
455 | skb_put(skb, pkt_info.byte_cnt - 4); | ||
456 | skb->dev = dev; | ||
457 | |||
458 | if (pkt_info.cmd_sts & ETH_LAYER_4_CHECKSUM_OK) { | ||
459 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
460 | skb->csum = htons( | ||
461 | (pkt_info.cmd_sts & 0x0007fff8) >> 3); | ||
462 | } | ||
463 | skb->protocol = eth_type_trans(skb, dev); | ||
464 | #ifdef MV643XX_NAPI | ||
465 | netif_receive_skb(skb); | ||
466 | #else | ||
467 | netif_rx(skb); | ||
468 | #endif | ||
469 | } | ||
470 | } | ||
471 | |||
472 | return received_packets; | ||
473 | } | ||
474 | |||
475 | /* | ||
476 | * mv643xx_eth_int_handler | ||
477 | * | ||
478 | * Main interrupt handler for the gigbit ethernet ports | ||
479 | * | ||
480 | * Input : irq - irq number (not used) | ||
481 | * dev_id - a pointer to the required interface's data structure | ||
482 | * regs - not used | ||
483 | * Output : N/A | ||
484 | */ | ||
485 | |||
486 | static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id, | ||
487 | struct pt_regs *regs) | ||
488 | { | ||
489 | struct net_device *dev = (struct net_device *)dev_id; | ||
490 | struct mv643xx_private *mp = netdev_priv(dev); | ||
491 | u32 eth_int_cause, eth_int_cause_ext = 0; | ||
492 | unsigned int port_num = mp->port_num; | ||
493 | |||
494 | /* Read interrupt cause registers */ | ||
495 | eth_int_cause = mv_read(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num)) & | ||
496 | INT_CAUSE_UNMASK_ALL; | ||
497 | |||
498 | if (eth_int_cause & BIT1) | ||
499 | eth_int_cause_ext = mv_read( | ||
500 | MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num)) & | ||
501 | INT_CAUSE_UNMASK_ALL_EXT; | ||
502 | |||
503 | #ifdef MV643XX_NAPI | ||
504 | if (!(eth_int_cause & 0x0007fffd)) { | ||
505 | /* Dont ack the Rx interrupt */ | ||
506 | #endif | ||
507 | /* | ||
508 | * Clear specific ethernet port intrerrupt registers by | ||
509 | * acknowleding relevant bits. | ||
510 | */ | ||
511 | mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num), | ||
512 | ~eth_int_cause); | ||
513 | if (eth_int_cause_ext != 0x0) | ||
514 | mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG | ||
515 | (port_num), ~eth_int_cause_ext); | ||
516 | |||
517 | /* UDP change : We may need this */ | ||
518 | if ((eth_int_cause_ext & 0x0000ffff) && | ||
519 | (mv643xx_eth_free_tx_queue(dev, eth_int_cause_ext) == 0) && | ||
520 | (mp->tx_ring_size > mp->tx_ring_skbs + MAX_DESCS_PER_SKB)) | ||
521 | netif_wake_queue(dev); | ||
522 | #ifdef MV643XX_NAPI | ||
523 | } else { | ||
524 | if (netif_rx_schedule_prep(dev)) { | ||
525 | /* Mask all the interrupts */ | ||
526 | mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), 0); | ||
527 | mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG | ||
528 | (port_num), 0); | ||
529 | __netif_rx_schedule(dev); | ||
530 | } | ||
531 | #else | ||
532 | if (eth_int_cause & (BIT2 | BIT11)) | ||
533 | mv643xx_eth_receive_queue(dev, 0); | ||
534 | |||
535 | /* | ||
536 | * After forwarded received packets to upper layer, add a task | ||
537 | * in an interrupts enabled context that refills the RX ring | ||
538 | * with skb's. | ||
539 | */ | ||
540 | #ifdef MV643XX_RX_QUEUE_FILL_ON_TASK | ||
541 | /* Unmask all interrupts on ethernet port */ | ||
542 | mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), | ||
543 | INT_CAUSE_MASK_ALL); | ||
544 | queue_task(&mp->rx_task, &tq_immediate); | ||
545 | mark_bh(IMMEDIATE_BH); | ||
546 | #else | ||
547 | mp->rx_task.func(dev); | ||
548 | #endif | ||
549 | #endif | ||
550 | } | ||
551 | /* PHY status changed */ | ||
552 | if (eth_int_cause_ext & (BIT16 | BIT20)) { | ||
553 | if (eth_port_link_is_up(port_num)) { | ||
554 | netif_carrier_on(dev); | ||
555 | netif_wake_queue(dev); | ||
556 | /* Start TX queue */ | ||
557 | mv_write(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG | ||
558 | (port_num), 1); | ||
559 | } else { | ||
560 | netif_carrier_off(dev); | ||
561 | netif_stop_queue(dev); | ||
562 | } | ||
563 | } | ||
564 | |||
565 | /* | ||
566 | * If no real interrupt occured, exit. | ||
567 | * This can happen when using gigE interrupt coalescing mechanism. | ||
568 | */ | ||
569 | if ((eth_int_cause == 0x0) && (eth_int_cause_ext == 0x0)) | ||
570 | return IRQ_NONE; | ||
571 | |||
572 | return IRQ_HANDLED; | ||
573 | } | ||
574 | |||
575 | #ifdef MV643XX_COAL | ||
576 | |||
577 | /* | ||
578 | * eth_port_set_rx_coal - Sets coalescing interrupt mechanism on RX path | ||
579 | * | ||
580 | * DESCRIPTION: | ||
581 | * This routine sets the RX coalescing interrupt mechanism parameter. | ||
582 | * This parameter is a timeout counter, that counts in 64 t_clk | ||
583 | * chunks ; that when timeout event occurs a maskable interrupt | ||
584 | * occurs. | ||
585 | * The parameter is calculated using the tClk of the MV-643xx chip | ||
586 | * , and the required delay of the interrupt in usec. | ||
587 | * | ||
588 | * INPUT: | ||
589 | * unsigned int eth_port_num Ethernet port number | ||
590 | * unsigned int t_clk t_clk of the MV-643xx chip in HZ units | ||
591 | * unsigned int delay Delay in usec | ||
592 | * | ||
593 | * OUTPUT: | ||
594 | * Interrupt coalescing mechanism value is set in MV-643xx chip. | ||
595 | * | ||
596 | * RETURN: | ||
597 | * The interrupt coalescing value set in the gigE port. | ||
598 | * | ||
599 | */ | ||
600 | static unsigned int eth_port_set_rx_coal(unsigned int eth_port_num, | ||
601 | unsigned int t_clk, unsigned int delay) | ||
602 | { | ||
603 | unsigned int coal = ((t_clk / 1000000) * delay) / 64; | ||
604 | |||
605 | /* Set RX Coalescing mechanism */ | ||
606 | mv_write(MV643XX_ETH_SDMA_CONFIG_REG(eth_port_num), | ||
607 | ((coal & 0x3fff) << 8) | | ||
608 | (mv_read(MV643XX_ETH_SDMA_CONFIG_REG(eth_port_num)) | ||
609 | & 0xffc000ff)); | ||
610 | |||
611 | return coal; | ||
612 | } | ||
613 | #endif | ||
614 | |||
615 | /* | ||
616 | * eth_port_set_tx_coal - Sets coalescing interrupt mechanism on TX path | ||
617 | * | ||
618 | * DESCRIPTION: | ||
619 | * This routine sets the TX coalescing interrupt mechanism parameter. | ||
620 | * This parameter is a timeout counter, that counts in 64 t_clk | ||
621 | * chunks ; that when timeout event occurs a maskable interrupt | ||
622 | * occurs. | ||
623 | * The parameter is calculated using the t_cLK frequency of the | ||
624 | * MV-643xx chip and the required delay in the interrupt in uSec | ||
625 | * | ||
626 | * INPUT: | ||
627 | * unsigned int eth_port_num Ethernet port number | ||
628 | * unsigned int t_clk t_clk of the MV-643xx chip in HZ units | ||
629 | * unsigned int delay Delay in uSeconds | ||
630 | * | ||
631 | * OUTPUT: | ||
632 | * Interrupt coalescing mechanism value is set in MV-643xx chip. | ||
633 | * | ||
634 | * RETURN: | ||
635 | * The interrupt coalescing value set in the gigE port. | ||
636 | * | ||
637 | */ | ||
638 | static unsigned int eth_port_set_tx_coal(unsigned int eth_port_num, | ||
639 | unsigned int t_clk, unsigned int delay) | ||
640 | { | ||
641 | unsigned int coal; | ||
642 | coal = ((t_clk / 1000000) * delay) / 64; | ||
643 | /* Set TX Coalescing mechanism */ | ||
644 | mv_write(MV643XX_ETH_TX_FIFO_URGENT_THRESHOLD_REG(eth_port_num), | ||
645 | coal << 4); | ||
646 | return coal; | ||
647 | } | ||
648 | |||
649 | /* | ||
650 | * mv643xx_eth_open | ||
651 | * | ||
652 | * This function is called when openning the network device. The function | ||
653 | * should initialize all the hardware, initialize cyclic Rx/Tx | ||
654 | * descriptors chain and buffers and allocate an IRQ to the network | ||
655 | * device. | ||
656 | * | ||
657 | * Input : a pointer to the network device structure | ||
658 | * | ||
659 | * Output : zero of success , nonzero if fails. | ||
660 | */ | ||
661 | |||
662 | static int mv643xx_eth_open(struct net_device *dev) | ||
663 | { | ||
664 | struct mv643xx_private *mp = netdev_priv(dev); | ||
665 | unsigned int port_num = mp->port_num; | ||
666 | int err; | ||
667 | |||
668 | spin_lock_irq(&mp->lock); | ||
669 | |||
670 | err = request_irq(dev->irq, mv643xx_eth_int_handler, | ||
671 | SA_INTERRUPT | SA_SAMPLE_RANDOM, dev->name, dev); | ||
672 | |||
673 | if (err) { | ||
674 | printk(KERN_ERR "Can not assign IRQ number to MV643XX_eth%d\n", | ||
675 | port_num); | ||
676 | err = -EAGAIN; | ||
677 | goto out; | ||
678 | } | ||
679 | |||
680 | if (mv643xx_eth_real_open(dev)) { | ||
681 | printk("%s: Error opening interface\n", dev->name); | ||
682 | err = -EBUSY; | ||
683 | goto out_free; | ||
684 | } | ||
685 | |||
686 | spin_unlock_irq(&mp->lock); | ||
687 | |||
688 | return 0; | ||
689 | |||
690 | out_free: | ||
691 | free_irq(dev->irq, dev); | ||
692 | |||
693 | out: | ||
694 | spin_unlock_irq(&mp->lock); | ||
695 | |||
696 | return err; | ||
697 | } | ||
698 | |||
699 | /* | ||
700 | * ether_init_rx_desc_ring - Curve a Rx chain desc list and buffer in memory. | ||
701 | * | ||
702 | * DESCRIPTION: | ||
703 | * This function prepares a Rx chained list of descriptors and packet | ||
704 | * buffers in a form of a ring. The routine must be called after port | ||
705 | * initialization routine and before port start routine. | ||
706 | * The Ethernet SDMA engine uses CPU bus addresses to access the various | ||
707 | * devices in the system (i.e. DRAM). This function uses the ethernet | ||
708 | * struct 'virtual to physical' routine (set by the user) to set the ring | ||
709 | * with physical addresses. | ||
710 | * | ||
711 | * INPUT: | ||
712 | * struct mv643xx_private *mp Ethernet Port Control srtuct. | ||
713 | * | ||
714 | * OUTPUT: | ||
715 | * The routine updates the Ethernet port control struct with information | ||
716 | * regarding the Rx descriptors and buffers. | ||
717 | * | ||
718 | * RETURN: | ||
719 | * None. | ||
720 | */ | ||
721 | static void ether_init_rx_desc_ring(struct mv643xx_private *mp) | ||
722 | { | ||
723 | volatile struct eth_rx_desc *p_rx_desc; | ||
724 | int rx_desc_num = mp->rx_ring_size; | ||
725 | int i; | ||
726 | |||
727 | /* initialize the next_desc_ptr links in the Rx descriptors ring */ | ||
728 | p_rx_desc = (struct eth_rx_desc *)mp->p_rx_desc_area; | ||
729 | for (i = 0; i < rx_desc_num; i++) { | ||
730 | p_rx_desc[i].next_desc_ptr = mp->rx_desc_dma + | ||
731 | ((i + 1) % rx_desc_num) * sizeof(struct eth_rx_desc); | ||
732 | } | ||
733 | |||
734 | /* Save Rx desc pointer to driver struct. */ | ||
735 | mp->rx_curr_desc_q = 0; | ||
736 | mp->rx_used_desc_q = 0; | ||
737 | |||
738 | mp->rx_desc_area_size = rx_desc_num * sizeof(struct eth_rx_desc); | ||
739 | |||
740 | /* Add the queue to the list of RX queues of this port */ | ||
741 | mp->port_rx_queue_command |= 1; | ||
742 | } | ||
743 | |||
744 | /* | ||
745 | * ether_init_tx_desc_ring - Curve a Tx chain desc list and buffer in memory. | ||
746 | * | ||
747 | * DESCRIPTION: | ||
748 | * This function prepares a Tx chained list of descriptors and packet | ||
749 | * buffers in a form of a ring. The routine must be called after port | ||
750 | * initialization routine and before port start routine. | ||
751 | * The Ethernet SDMA engine uses CPU bus addresses to access the various | ||
752 | * devices in the system (i.e. DRAM). This function uses the ethernet | ||
753 | * struct 'virtual to physical' routine (set by the user) to set the ring | ||
754 | * with physical addresses. | ||
755 | * | ||
756 | * INPUT: | ||
757 | * struct mv643xx_private *mp Ethernet Port Control srtuct. | ||
758 | * | ||
759 | * OUTPUT: | ||
760 | * The routine updates the Ethernet port control struct with information | ||
761 | * regarding the Tx descriptors and buffers. | ||
762 | * | ||
763 | * RETURN: | ||
764 | * None. | ||
765 | */ | ||
766 | static void ether_init_tx_desc_ring(struct mv643xx_private *mp) | ||
767 | { | ||
768 | int tx_desc_num = mp->tx_ring_size; | ||
769 | struct eth_tx_desc *p_tx_desc; | ||
770 | int i; | ||
771 | |||
772 | /* Initialize the next_desc_ptr links in the Tx descriptors ring */ | ||
773 | p_tx_desc = (struct eth_tx_desc *)mp->p_tx_desc_area; | ||
774 | for (i = 0; i < tx_desc_num; i++) { | ||
775 | p_tx_desc[i].next_desc_ptr = mp->tx_desc_dma + | ||
776 | ((i + 1) % tx_desc_num) * sizeof(struct eth_tx_desc); | ||
777 | } | ||
778 | |||
779 | mp->tx_curr_desc_q = 0; | ||
780 | mp->tx_used_desc_q = 0; | ||
781 | #ifdef MV643XX_CHECKSUM_OFFLOAD_TX | ||
782 | mp->tx_first_desc_q = 0; | ||
783 | #endif | ||
784 | |||
785 | mp->tx_desc_area_size = tx_desc_num * sizeof(struct eth_tx_desc); | ||
786 | |||
787 | /* Add the queue to the list of Tx queues of this port */ | ||
788 | mp->port_tx_queue_command |= 1; | ||
789 | } | ||
790 | |||
791 | /* Helper function for mv643xx_eth_open */ | ||
792 | static int mv643xx_eth_real_open(struct net_device *dev) | ||
793 | { | ||
794 | struct mv643xx_private *mp = netdev_priv(dev); | ||
795 | unsigned int port_num = mp->port_num; | ||
796 | unsigned int size; | ||
797 | |||
798 | /* Stop RX Queues */ | ||
799 | mv_write(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num), 0x0000ff00); | ||
800 | |||
801 | /* Clear the ethernet port interrupts */ | ||
802 | mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num), 0); | ||
803 | mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), 0); | ||
804 | |||
805 | /* Unmask RX buffer and TX end interrupt */ | ||
806 | mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), | ||
807 | INT_CAUSE_UNMASK_ALL); | ||
808 | |||
809 | /* Unmask phy and link status changes interrupts */ | ||
810 | mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG(port_num), | ||
811 | INT_CAUSE_UNMASK_ALL_EXT); | ||
812 | |||
813 | /* Set the MAC Address */ | ||
814 | memcpy(mp->port_mac_addr, dev->dev_addr, 6); | ||
815 | |||
816 | eth_port_init(mp); | ||
817 | |||
818 | INIT_WORK(&mp->rx_task, (void (*)(void *))mv643xx_eth_rx_task, dev); | ||
819 | |||
820 | memset(&mp->timeout, 0, sizeof(struct timer_list)); | ||
821 | mp->timeout.function = mv643xx_eth_rx_task_timer_wrapper; | ||
822 | mp->timeout.data = (unsigned long)dev; | ||
823 | |||
824 | mp->rx_task_busy = 0; | ||
825 | mp->rx_timer_flag = 0; | ||
826 | |||
827 | /* Allocate RX and TX skb rings */ | ||
828 | mp->rx_skb = kmalloc(sizeof(*mp->rx_skb) * mp->rx_ring_size, | ||
829 | GFP_KERNEL); | ||
830 | if (!mp->rx_skb) { | ||
831 | printk(KERN_ERR "%s: Cannot allocate Rx skb ring\n", dev->name); | ||
832 | return -ENOMEM; | ||
833 | } | ||
834 | mp->tx_skb = kmalloc(sizeof(*mp->tx_skb) * mp->tx_ring_size, | ||
835 | GFP_KERNEL); | ||
836 | if (!mp->tx_skb) { | ||
837 | printk(KERN_ERR "%s: Cannot allocate Tx skb ring\n", dev->name); | ||
838 | kfree(mp->rx_skb); | ||
839 | return -ENOMEM; | ||
840 | } | ||
841 | |||
842 | /* Allocate TX ring */ | ||
843 | mp->tx_ring_skbs = 0; | ||
844 | size = mp->tx_ring_size * sizeof(struct eth_tx_desc); | ||
845 | mp->tx_desc_area_size = size; | ||
846 | |||
847 | if (mp->tx_sram_size) { | ||
848 | mp->p_tx_desc_area = ioremap(mp->tx_sram_addr, | ||
849 | mp->tx_sram_size); | ||
850 | mp->tx_desc_dma = mp->tx_sram_addr; | ||
851 | } else | ||
852 | mp->p_tx_desc_area = dma_alloc_coherent(NULL, size, | ||
853 | &mp->tx_desc_dma, | ||
854 | GFP_KERNEL); | ||
855 | |||
856 | if (!mp->p_tx_desc_area) { | ||
857 | printk(KERN_ERR "%s: Cannot allocate Tx Ring (size %d bytes)\n", | ||
858 | dev->name, size); | ||
859 | kfree(mp->rx_skb); | ||
860 | kfree(mp->tx_skb); | ||
861 | return -ENOMEM; | ||
862 | } | ||
863 | BUG_ON((u32) mp->p_tx_desc_area & 0xf); /* check 16-byte alignment */ | ||
864 | memset((void *)mp->p_tx_desc_area, 0, mp->tx_desc_area_size); | ||
865 | |||
866 | ether_init_tx_desc_ring(mp); | ||
867 | |||
868 | /* Allocate RX ring */ | ||
869 | mp->rx_ring_skbs = 0; | ||
870 | size = mp->rx_ring_size * sizeof(struct eth_rx_desc); | ||
871 | mp->rx_desc_area_size = size; | ||
872 | |||
873 | if (mp->rx_sram_size) { | ||
874 | mp->p_rx_desc_area = ioremap(mp->rx_sram_addr, | ||
875 | mp->rx_sram_size); | ||
876 | mp->rx_desc_dma = mp->rx_sram_addr; | ||
877 | } else | ||
878 | mp->p_rx_desc_area = dma_alloc_coherent(NULL, size, | ||
879 | &mp->rx_desc_dma, | ||
880 | GFP_KERNEL); | ||
881 | |||
882 | if (!mp->p_rx_desc_area) { | ||
883 | printk(KERN_ERR "%s: Cannot allocate Rx ring (size %d bytes)\n", | ||
884 | dev->name, size); | ||
885 | printk(KERN_ERR "%s: Freeing previously allocated TX queues...", | ||
886 | dev->name); | ||
887 | if (mp->rx_sram_size) | ||
888 | iounmap(mp->p_rx_desc_area); | ||
889 | else | ||
890 | dma_free_coherent(NULL, mp->tx_desc_area_size, | ||
891 | mp->p_tx_desc_area, mp->tx_desc_dma); | ||
892 | kfree(mp->rx_skb); | ||
893 | kfree(mp->tx_skb); | ||
894 | return -ENOMEM; | ||
895 | } | ||
896 | memset((void *)mp->p_rx_desc_area, 0, size); | ||
897 | |||
898 | ether_init_rx_desc_ring(mp); | ||
899 | |||
900 | mv643xx_eth_rx_task(dev); /* Fill RX ring with skb's */ | ||
901 | |||
902 | eth_port_start(mp); | ||
903 | |||
904 | /* Interrupt Coalescing */ | ||
905 | |||
906 | #ifdef MV643XX_COAL | ||
907 | mp->rx_int_coal = | ||
908 | eth_port_set_rx_coal(port_num, 133000000, MV643XX_RX_COAL); | ||
909 | #endif | ||
910 | |||
911 | mp->tx_int_coal = | ||
912 | eth_port_set_tx_coal(port_num, 133000000, MV643XX_TX_COAL); | ||
913 | |||
914 | netif_start_queue(dev); | ||
915 | |||
916 | return 0; | ||
917 | } | ||
918 | |||
919 | static void mv643xx_eth_free_tx_rings(struct net_device *dev) | ||
920 | { | ||
921 | struct mv643xx_private *mp = netdev_priv(dev); | ||
922 | unsigned int port_num = mp->port_num; | ||
923 | unsigned int curr; | ||
924 | |||
925 | /* Stop Tx Queues */ | ||
926 | mv_write(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num), 0x0000ff00); | ||
927 | |||
928 | /* Free outstanding skb's on TX rings */ | ||
929 | for (curr = 0; mp->tx_ring_skbs && curr < mp->tx_ring_size; curr++) { | ||
930 | if (mp->tx_skb[curr]) { | ||
931 | dev_kfree_skb(mp->tx_skb[curr]); | ||
932 | mp->tx_ring_skbs--; | ||
933 | } | ||
934 | } | ||
935 | if (mp->tx_ring_skbs) | ||
936 | printk("%s: Error on Tx descriptor free - could not free %d" | ||
937 | " descriptors\n", dev->name, mp->tx_ring_skbs); | ||
938 | |||
939 | /* Free TX ring */ | ||
940 | if (mp->tx_sram_size) | ||
941 | iounmap(mp->p_tx_desc_area); | ||
942 | else | ||
943 | dma_free_coherent(NULL, mp->tx_desc_area_size, | ||
944 | mp->p_tx_desc_area, mp->tx_desc_dma); | ||
945 | } | ||
946 | |||
947 | static void mv643xx_eth_free_rx_rings(struct net_device *dev) | ||
948 | { | ||
949 | struct mv643xx_private *mp = netdev_priv(dev); | ||
950 | unsigned int port_num = mp->port_num; | ||
951 | int curr; | ||
952 | |||
953 | /* Stop RX Queues */ | ||
954 | mv_write(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num), 0x0000ff00); | ||
955 | |||
956 | /* Free preallocated skb's on RX rings */ | ||
957 | for (curr = 0; mp->rx_ring_skbs && curr < mp->rx_ring_size; curr++) { | ||
958 | if (mp->rx_skb[curr]) { | ||
959 | dev_kfree_skb(mp->rx_skb[curr]); | ||
960 | mp->rx_ring_skbs--; | ||
961 | } | ||
962 | } | ||
963 | |||
964 | if (mp->rx_ring_skbs) | ||
965 | printk(KERN_ERR | ||
966 | "%s: Error in freeing Rx Ring. %d skb's still" | ||
967 | " stuck in RX Ring - ignoring them\n", dev->name, | ||
968 | mp->rx_ring_skbs); | ||
969 | /* Free RX ring */ | ||
970 | if (mp->rx_sram_size) | ||
971 | iounmap(mp->p_rx_desc_area); | ||
972 | else | ||
973 | dma_free_coherent(NULL, mp->rx_desc_area_size, | ||
974 | mp->p_rx_desc_area, mp->rx_desc_dma); | ||
975 | } | ||
976 | |||
977 | /* | ||
978 | * mv643xx_eth_stop | ||
979 | * | ||
980 | * This function is used when closing the network device. | ||
981 | * It updates the hardware, | ||
982 | * release all memory that holds buffers and descriptors and release the IRQ. | ||
983 | * Input : a pointer to the device structure | ||
984 | * Output : zero if success , nonzero if fails | ||
985 | */ | ||
986 | |||
987 | /* Helper function for mv643xx_eth_stop */ | ||
988 | |||
989 | static int mv643xx_eth_real_stop(struct net_device *dev) | ||
990 | { | ||
991 | struct mv643xx_private *mp = netdev_priv(dev); | ||
992 | unsigned int port_num = mp->port_num; | ||
993 | |||
994 | netif_carrier_off(dev); | ||
995 | netif_stop_queue(dev); | ||
996 | |||
997 | mv643xx_eth_free_tx_rings(dev); | ||
998 | mv643xx_eth_free_rx_rings(dev); | ||
999 | |||
1000 | eth_port_reset(mp->port_num); | ||
1001 | |||
1002 | /* Disable ethernet port interrupts */ | ||
1003 | mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num), 0); | ||
1004 | mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), 0); | ||
1005 | |||
1006 | /* Mask RX buffer and TX end interrupt */ | ||
1007 | mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), 0); | ||
1008 | |||
1009 | /* Mask phy and link status changes interrupts */ | ||
1010 | mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG(port_num), 0); | ||
1011 | |||
1012 | return 0; | ||
1013 | } | ||
1014 | |||
1015 | static int mv643xx_eth_stop(struct net_device *dev) | ||
1016 | { | ||
1017 | struct mv643xx_private *mp = netdev_priv(dev); | ||
1018 | |||
1019 | spin_lock_irq(&mp->lock); | ||
1020 | |||
1021 | mv643xx_eth_real_stop(dev); | ||
1022 | |||
1023 | free_irq(dev->irq, dev); | ||
1024 | spin_unlock_irq(&mp->lock); | ||
1025 | |||
1026 | return 0; | ||
1027 | } | ||
1028 | |||
1029 | #ifdef MV643XX_NAPI | ||
1030 | static void mv643xx_tx(struct net_device *dev) | ||
1031 | { | ||
1032 | struct mv643xx_private *mp = netdev_priv(dev); | ||
1033 | struct pkt_info pkt_info; | ||
1034 | |||
1035 | while (eth_tx_return_desc(mp, &pkt_info) == ETH_OK) { | ||
1036 | if (pkt_info.return_info) { | ||
1037 | if (skb_shinfo(pkt_info.return_info)->nr_frags) | ||
1038 | dma_unmap_page(NULL, pkt_info.buf_ptr, | ||
1039 | pkt_info.byte_cnt, | ||
1040 | DMA_TO_DEVICE); | ||
1041 | else | ||
1042 | dma_unmap_single(NULL, pkt_info.buf_ptr, | ||
1043 | pkt_info.byte_cnt, | ||
1044 | DMA_TO_DEVICE); | ||
1045 | |||
1046 | dev_kfree_skb_irq(pkt_info.return_info); | ||
1047 | |||
1048 | if (mp->tx_ring_skbs) | ||
1049 | mp->tx_ring_skbs--; | ||
1050 | } else | ||
1051 | dma_unmap_page(NULL, pkt_info.buf_ptr, | ||
1052 | pkt_info.byte_cnt, DMA_TO_DEVICE); | ||
1053 | } | ||
1054 | |||
1055 | if (netif_queue_stopped(dev) && | ||
1056 | mp->tx_ring_size > mp->tx_ring_skbs + MAX_DESCS_PER_SKB) | ||
1057 | netif_wake_queue(dev); | ||
1058 | } | ||
1059 | |||
1060 | /* | ||
1061 | * mv643xx_poll | ||
1062 | * | ||
1063 | * This function is used in case of NAPI | ||
1064 | */ | ||
1065 | static int mv643xx_poll(struct net_device *dev, int *budget) | ||
1066 | { | ||
1067 | struct mv643xx_private *mp = netdev_priv(dev); | ||
1068 | int done = 1, orig_budget, work_done; | ||
1069 | unsigned int port_num = mp->port_num; | ||
1070 | unsigned long flags; | ||
1071 | |||
1072 | #ifdef MV643XX_TX_FAST_REFILL | ||
1073 | if (++mp->tx_clean_threshold > 5) { | ||
1074 | spin_lock_irqsave(&mp->lock, flags); | ||
1075 | mv643xx_tx(dev); | ||
1076 | mp->tx_clean_threshold = 0; | ||
1077 | spin_unlock_irqrestore(&mp->lock, flags); | ||
1078 | } | ||
1079 | #endif | ||
1080 | |||
1081 | if ((mv_read(MV643XX_ETH_RX_CURRENT_QUEUE_DESC_PTR_0(port_num))) | ||
1082 | != (u32) mp->rx_used_desc_q) { | ||
1083 | orig_budget = *budget; | ||
1084 | if (orig_budget > dev->quota) | ||
1085 | orig_budget = dev->quota; | ||
1086 | work_done = mv643xx_eth_receive_queue(dev, orig_budget); | ||
1087 | mp->rx_task.func(dev); | ||
1088 | *budget -= work_done; | ||
1089 | dev->quota -= work_done; | ||
1090 | if (work_done >= orig_budget) | ||
1091 | done = 0; | ||
1092 | } | ||
1093 | |||
1094 | if (done) { | ||
1095 | spin_lock_irqsave(&mp->lock, flags); | ||
1096 | __netif_rx_complete(dev); | ||
1097 | mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num), 0); | ||
1098 | mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), 0); | ||
1099 | mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), | ||
1100 | INT_CAUSE_UNMASK_ALL); | ||
1101 | mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG(port_num), | ||
1102 | INT_CAUSE_UNMASK_ALL_EXT); | ||
1103 | spin_unlock_irqrestore(&mp->lock, flags); | ||
1104 | } | ||
1105 | |||
1106 | return done ? 0 : 1; | ||
1107 | } | ||
1108 | #endif | ||
1109 | |||
1110 | /* | ||
1111 | * mv643xx_eth_start_xmit | ||
1112 | * | ||
1113 | * This function is queues a packet in the Tx descriptor for | ||
1114 | * required port. | ||
1115 | * | ||
1116 | * Input : skb - a pointer to socket buffer | ||
1117 | * dev - a pointer to the required port | ||
1118 | * | ||
1119 | * Output : zero upon success | ||
1120 | */ | ||
1121 | static int mv643xx_eth_start_xmit(struct sk_buff *skb, struct net_device *dev) | ||
1122 | { | ||
1123 | struct mv643xx_private *mp = netdev_priv(dev); | ||
1124 | struct net_device_stats *stats = &mp->stats; | ||
1125 | ETH_FUNC_RET_STATUS status; | ||
1126 | unsigned long flags; | ||
1127 | struct pkt_info pkt_info; | ||
1128 | |||
1129 | if (netif_queue_stopped(dev)) { | ||
1130 | printk(KERN_ERR | ||
1131 | "%s: Tried sending packet when interface is stopped\n", | ||
1132 | dev->name); | ||
1133 | return 1; | ||
1134 | } | ||
1135 | |||
1136 | /* This is a hard error, log it. */ | ||
1137 | if ((mp->tx_ring_size - mp->tx_ring_skbs) <= | ||
1138 | (skb_shinfo(skb)->nr_frags + 1)) { | ||
1139 | netif_stop_queue(dev); | ||
1140 | printk(KERN_ERR | ||
1141 | "%s: Bug in mv643xx_eth - Trying to transmit when" | ||
1142 | " queue full !\n", dev->name); | ||
1143 | return 1; | ||
1144 | } | ||
1145 | |||
1146 | /* Paranoid check - this shouldn't happen */ | ||
1147 | if (skb == NULL) { | ||
1148 | stats->tx_dropped++; | ||
1149 | printk(KERN_ERR "mv64320_eth paranoid check failed\n"); | ||
1150 | return 1; | ||
1151 | } | ||
1152 | |||
1153 | spin_lock_irqsave(&mp->lock, flags); | ||
1154 | |||
1155 | /* Update packet info data structure -- DMA owned, first last */ | ||
1156 | #ifdef MV643XX_CHECKSUM_OFFLOAD_TX | ||
1157 | if (!skb_shinfo(skb)->nr_frags) { | ||
1158 | linear: | ||
1159 | if (skb->ip_summed != CHECKSUM_HW) { | ||
1160 | pkt_info.cmd_sts = ETH_TX_ENABLE_INTERRUPT | | ||
1161 | ETH_TX_FIRST_DESC | ETH_TX_LAST_DESC; | ||
1162 | pkt_info.l4i_chk = 0; | ||
1163 | } else { | ||
1164 | u32 ipheader = skb->nh.iph->ihl << 11; | ||
1165 | |||
1166 | pkt_info.cmd_sts = ETH_TX_ENABLE_INTERRUPT | | ||
1167 | ETH_TX_FIRST_DESC | ETH_TX_LAST_DESC | | ||
1168 | ETH_GEN_TCP_UDP_CHECKSUM | | ||
1169 | ETH_GEN_IP_V_4_CHECKSUM | ipheader; | ||
1170 | /* CPU already calculated pseudo header checksum. */ | ||
1171 | if (skb->nh.iph->protocol == IPPROTO_UDP) { | ||
1172 | pkt_info.cmd_sts |= ETH_UDP_FRAME; | ||
1173 | pkt_info.l4i_chk = skb->h.uh->check; | ||
1174 | } else if (skb->nh.iph->protocol == IPPROTO_TCP) | ||
1175 | pkt_info.l4i_chk = skb->h.th->check; | ||
1176 | else { | ||
1177 | printk(KERN_ERR | ||
1178 | "%s: chksum proto != TCP or UDP\n", | ||
1179 | dev->name); | ||
1180 | spin_unlock_irqrestore(&mp->lock, flags); | ||
1181 | return 1; | ||
1182 | } | ||
1183 | } | ||
1184 | pkt_info.byte_cnt = skb->len; | ||
1185 | pkt_info.buf_ptr = dma_map_single(NULL, skb->data, skb->len, | ||
1186 | DMA_TO_DEVICE); | ||
1187 | pkt_info.return_info = skb; | ||
1188 | mp->tx_ring_skbs++; | ||
1189 | status = eth_port_send(mp, &pkt_info); | ||
1190 | if ((status == ETH_ERROR) || (status == ETH_QUEUE_FULL)) | ||
1191 | printk(KERN_ERR "%s: Error on transmitting packet\n", | ||
1192 | dev->name); | ||
1193 | stats->tx_bytes += pkt_info.byte_cnt; | ||
1194 | } else { | ||
1195 | unsigned int frag; | ||
1196 | u32 ipheader; | ||
1197 | |||
1198 | /* Since hardware can't handle unaligned fragments smaller | ||
1199 | * than 9 bytes, if we find any, we linearize the skb | ||
1200 | * and start again. When I've seen it, it's always been | ||
1201 | * the first frag (probably near the end of the page), | ||
1202 | * but we check all frags to be safe. | ||
1203 | */ | ||
1204 | for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { | ||
1205 | skb_frag_t *fragp; | ||
1206 | |||
1207 | fragp = &skb_shinfo(skb)->frags[frag]; | ||
1208 | if (fragp->size <= 8 && fragp->page_offset & 0x7) { | ||
1209 | skb_linearize(skb, GFP_ATOMIC); | ||
1210 | printk(KERN_DEBUG "%s: unaligned tiny fragment" | ||
1211 | "%d of %d, fixed\n", | ||
1212 | dev->name, frag, | ||
1213 | skb_shinfo(skb)->nr_frags); | ||
1214 | goto linear; | ||
1215 | } | ||
1216 | } | ||
1217 | |||
1218 | /* first frag which is skb header */ | ||
1219 | pkt_info.byte_cnt = skb_headlen(skb); | ||
1220 | pkt_info.buf_ptr = dma_map_single(NULL, skb->data, | ||
1221 | skb_headlen(skb), | ||
1222 | DMA_TO_DEVICE); | ||
1223 | pkt_info.l4i_chk = 0; | ||
1224 | pkt_info.return_info = 0; | ||
1225 | pkt_info.cmd_sts = ETH_TX_FIRST_DESC; | ||
1226 | |||
1227 | if (skb->ip_summed == CHECKSUM_HW) { | ||
1228 | ipheader = skb->nh.iph->ihl << 11; | ||
1229 | pkt_info.cmd_sts |= ETH_GEN_TCP_UDP_CHECKSUM | | ||
1230 | ETH_GEN_IP_V_4_CHECKSUM | ipheader; | ||
1231 | /* CPU already calculated pseudo header checksum. */ | ||
1232 | if (skb->nh.iph->protocol == IPPROTO_UDP) { | ||
1233 | pkt_info.cmd_sts |= ETH_UDP_FRAME; | ||
1234 | pkt_info.l4i_chk = skb->h.uh->check; | ||
1235 | } else if (skb->nh.iph->protocol == IPPROTO_TCP) | ||
1236 | pkt_info.l4i_chk = skb->h.th->check; | ||
1237 | else { | ||
1238 | printk(KERN_ERR | ||
1239 | "%s: chksum proto != TCP or UDP\n", | ||
1240 | dev->name); | ||
1241 | spin_unlock_irqrestore(&mp->lock, flags); | ||
1242 | return 1; | ||
1243 | } | ||
1244 | } | ||
1245 | |||
1246 | status = eth_port_send(mp, &pkt_info); | ||
1247 | if (status != ETH_OK) { | ||
1248 | if ((status == ETH_ERROR)) | ||
1249 | printk(KERN_ERR | ||
1250 | "%s: Error on transmitting packet\n", | ||
1251 | dev->name); | ||
1252 | if (status == ETH_QUEUE_FULL) | ||
1253 | printk("Error on Queue Full \n"); | ||
1254 | if (status == ETH_QUEUE_LAST_RESOURCE) | ||
1255 | printk("Tx resource error \n"); | ||
1256 | } | ||
1257 | stats->tx_bytes += pkt_info.byte_cnt; | ||
1258 | |||
1259 | /* Check for the remaining frags */ | ||
1260 | for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { | ||
1261 | skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag]; | ||
1262 | pkt_info.l4i_chk = 0x0000; | ||
1263 | pkt_info.cmd_sts = 0x00000000; | ||
1264 | |||
1265 | /* Last Frag enables interrupt and frees the skb */ | ||
1266 | if (frag == (skb_shinfo(skb)->nr_frags - 1)) { | ||
1267 | pkt_info.cmd_sts |= ETH_TX_ENABLE_INTERRUPT | | ||
1268 | ETH_TX_LAST_DESC; | ||
1269 | pkt_info.return_info = skb; | ||
1270 | mp->tx_ring_skbs++; | ||
1271 | } else { | ||
1272 | pkt_info.return_info = 0; | ||
1273 | } | ||
1274 | pkt_info.l4i_chk = 0; | ||
1275 | pkt_info.byte_cnt = this_frag->size; | ||
1276 | |||
1277 | pkt_info.buf_ptr = dma_map_page(NULL, this_frag->page, | ||
1278 | this_frag->page_offset, | ||
1279 | this_frag->size, | ||
1280 | DMA_TO_DEVICE); | ||
1281 | |||
1282 | status = eth_port_send(mp, &pkt_info); | ||
1283 | |||
1284 | if (status != ETH_OK) { | ||
1285 | if ((status == ETH_ERROR)) | ||
1286 | printk(KERN_ERR "%s: Error on " | ||
1287 | "transmitting packet\n", | ||
1288 | dev->name); | ||
1289 | |||
1290 | if (status == ETH_QUEUE_LAST_RESOURCE) | ||
1291 | printk("Tx resource error \n"); | ||
1292 | |||
1293 | if (status == ETH_QUEUE_FULL) | ||
1294 | printk("Queue is full \n"); | ||
1295 | } | ||
1296 | stats->tx_bytes += pkt_info.byte_cnt; | ||
1297 | } | ||
1298 | } | ||
1299 | #else | ||
1300 | pkt_info.cmd_sts = ETH_TX_ENABLE_INTERRUPT | ETH_TX_FIRST_DESC | | ||
1301 | ETH_TX_LAST_DESC; | ||
1302 | pkt_info.l4i_chk = 0; | ||
1303 | pkt_info.byte_cnt = skb->len; | ||
1304 | pkt_info.buf_ptr = dma_map_single(NULL, skb->data, skb->len, | ||
1305 | DMA_TO_DEVICE); | ||
1306 | pkt_info.return_info = skb; | ||
1307 | mp->tx_ring_skbs++; | ||
1308 | status = eth_port_send(mp, &pkt_info); | ||
1309 | if ((status == ETH_ERROR) || (status == ETH_QUEUE_FULL)) | ||
1310 | printk(KERN_ERR "%s: Error on transmitting packet\n", | ||
1311 | dev->name); | ||
1312 | stats->tx_bytes += pkt_info.byte_cnt; | ||
1313 | #endif | ||
1314 | |||
1315 | /* Check if TX queue can handle another skb. If not, then | ||
1316 | * signal higher layers to stop requesting TX | ||
1317 | */ | ||
1318 | if (mp->tx_ring_size <= (mp->tx_ring_skbs + MAX_DESCS_PER_SKB)) | ||
1319 | /* | ||
1320 | * Stop getting skb's from upper layers. | ||
1321 | * Getting skb's from upper layers will be enabled again after | ||
1322 | * packets are released. | ||
1323 | */ | ||
1324 | netif_stop_queue(dev); | ||
1325 | |||
1326 | /* Update statistics and start of transmittion time */ | ||
1327 | stats->tx_packets++; | ||
1328 | dev->trans_start = jiffies; | ||
1329 | |||
1330 | spin_unlock_irqrestore(&mp->lock, flags); | ||
1331 | |||
1332 | return 0; /* success */ | ||
1333 | } | ||
1334 | |||
1335 | /* | ||
1336 | * mv643xx_eth_get_stats | ||
1337 | * | ||
1338 | * Returns a pointer to the interface statistics. | ||
1339 | * | ||
1340 | * Input : dev - a pointer to the required interface | ||
1341 | * | ||
1342 | * Output : a pointer to the interface's statistics | ||
1343 | */ | ||
1344 | |||
1345 | static struct net_device_stats *mv643xx_eth_get_stats(struct net_device *dev) | ||
1346 | { | ||
1347 | struct mv643xx_private *mp = netdev_priv(dev); | ||
1348 | |||
1349 | return &mp->stats; | ||
1350 | } | ||
1351 | |||
1352 | /*/ | ||
1353 | * mv643xx_eth_probe | ||
1354 | * | ||
1355 | * First function called after registering the network device. | ||
1356 | * It's purpose is to initialize the device as an ethernet device, | ||
1357 | * fill the ethernet device structure with pointers * to functions, | ||
1358 | * and set the MAC address of the interface | ||
1359 | * | ||
1360 | * Input : struct device * | ||
1361 | * Output : -ENOMEM if failed , 0 if success | ||
1362 | */ | ||
1363 | static int mv643xx_eth_probe(struct device *ddev) | ||
1364 | { | ||
1365 | struct platform_device *pdev = to_platform_device(ddev); | ||
1366 | struct mv643xx_eth_platform_data *pd; | ||
1367 | int port_num = pdev->id; | ||
1368 | struct mv643xx_private *mp; | ||
1369 | struct net_device *dev; | ||
1370 | u8 *p; | ||
1371 | struct resource *res; | ||
1372 | int err; | ||
1373 | |||
1374 | dev = alloc_etherdev(sizeof(struct mv643xx_private)); | ||
1375 | if (!dev) | ||
1376 | return -ENOMEM; | ||
1377 | |||
1378 | dev_set_drvdata(ddev, dev); | ||
1379 | |||
1380 | mp = netdev_priv(dev); | ||
1381 | |||
1382 | res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); | ||
1383 | BUG_ON(!res); | ||
1384 | dev->irq = res->start; | ||
1385 | |||
1386 | mp->port_num = port_num; | ||
1387 | |||
1388 | dev->open = mv643xx_eth_open; | ||
1389 | dev->stop = mv643xx_eth_stop; | ||
1390 | dev->hard_start_xmit = mv643xx_eth_start_xmit; | ||
1391 | dev->get_stats = mv643xx_eth_get_stats; | ||
1392 | dev->set_mac_address = mv643xx_eth_set_mac_address; | ||
1393 | dev->set_multicast_list = mv643xx_eth_set_rx_mode; | ||
1394 | |||
1395 | /* No need to Tx Timeout */ | ||
1396 | dev->tx_timeout = mv643xx_eth_tx_timeout; | ||
1397 | #ifdef MV643XX_NAPI | ||
1398 | dev->poll = mv643xx_poll; | ||
1399 | dev->weight = 64; | ||
1400 | #endif | ||
1401 | |||
1402 | dev->watchdog_timeo = 2 * HZ; | ||
1403 | dev->tx_queue_len = mp->tx_ring_size; | ||
1404 | dev->base_addr = 0; | ||
1405 | dev->change_mtu = mv643xx_eth_change_mtu; | ||
1406 | SET_ETHTOOL_OPS(dev, &mv643xx_ethtool_ops); | ||
1407 | |||
1408 | #ifdef MV643XX_CHECKSUM_OFFLOAD_TX | ||
1409 | #ifdef MAX_SKB_FRAGS | ||
1410 | /* | ||
1411 | * Zero copy can only work if we use Discovery II memory. Else, we will | ||
1412 | * have to map the buffers to ISA memory which is only 16 MB | ||
1413 | */ | ||
1414 | dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_HW_CSUM; | ||
1415 | #endif | ||
1416 | #endif | ||
1417 | |||
1418 | /* Configure the timeout task */ | ||
1419 | INIT_WORK(&mp->tx_timeout_task, | ||
1420 | (void (*)(void *))mv643xx_eth_tx_timeout_task, dev); | ||
1421 | |||
1422 | spin_lock_init(&mp->lock); | ||
1423 | |||
1424 | /* set default config values */ | ||
1425 | eth_port_uc_addr_get(dev, dev->dev_addr); | ||
1426 | mp->port_config = MV643XX_ETH_PORT_CONFIG_DEFAULT_VALUE; | ||
1427 | mp->port_config_extend = MV643XX_ETH_PORT_CONFIG_EXTEND_DEFAULT_VALUE; | ||
1428 | mp->port_sdma_config = MV643XX_ETH_PORT_SDMA_CONFIG_DEFAULT_VALUE; | ||
1429 | mp->port_serial_control = MV643XX_ETH_PORT_SERIAL_CONTROL_DEFAULT_VALUE; | ||
1430 | mp->rx_ring_size = MV643XX_ETH_PORT_DEFAULT_RECEIVE_QUEUE_SIZE; | ||
1431 | mp->tx_ring_size = MV643XX_ETH_PORT_DEFAULT_TRANSMIT_QUEUE_SIZE; | ||
1432 | |||
1433 | pd = pdev->dev.platform_data; | ||
1434 | if (pd) { | ||
1435 | if (pd->mac_addr != NULL) | ||
1436 | memcpy(dev->dev_addr, pd->mac_addr, 6); | ||
1437 | |||
1438 | if (pd->phy_addr || pd->force_phy_addr) | ||
1439 | ethernet_phy_set(port_num, pd->phy_addr); | ||
1440 | |||
1441 | if (pd->port_config || pd->force_port_config) | ||
1442 | mp->port_config = pd->port_config; | ||
1443 | |||
1444 | if (pd->port_config_extend || pd->force_port_config_extend) | ||
1445 | mp->port_config_extend = pd->port_config_extend; | ||
1446 | |||
1447 | if (pd->port_sdma_config || pd->force_port_sdma_config) | ||
1448 | mp->port_sdma_config = pd->port_sdma_config; | ||
1449 | |||
1450 | if (pd->port_serial_control || pd->force_port_serial_control) | ||
1451 | mp->port_serial_control = pd->port_serial_control; | ||
1452 | |||
1453 | if (pd->rx_queue_size) | ||
1454 | mp->rx_ring_size = pd->rx_queue_size; | ||
1455 | |||
1456 | if (pd->tx_queue_size) | ||
1457 | mp->tx_ring_size = pd->tx_queue_size; | ||
1458 | |||
1459 | if (pd->tx_sram_size) { | ||
1460 | mp->tx_sram_size = pd->tx_sram_size; | ||
1461 | mp->tx_sram_addr = pd->tx_sram_addr; | ||
1462 | } | ||
1463 | |||
1464 | if (pd->rx_sram_size) { | ||
1465 | mp->rx_sram_size = pd->rx_sram_size; | ||
1466 | mp->rx_sram_addr = pd->rx_sram_addr; | ||
1467 | } | ||
1468 | } | ||
1469 | |||
1470 | err = ethernet_phy_detect(port_num); | ||
1471 | if (err) { | ||
1472 | pr_debug("MV643xx ethernet port %d: " | ||
1473 | "No PHY detected at addr %d\n", | ||
1474 | port_num, ethernet_phy_get(port_num)); | ||
1475 | return err; | ||
1476 | } | ||
1477 | |||
1478 | err = register_netdev(dev); | ||
1479 | if (err) | ||
1480 | goto out; | ||
1481 | |||
1482 | p = dev->dev_addr; | ||
1483 | printk(KERN_NOTICE | ||
1484 | "%s: port %d with MAC address %02x:%02x:%02x:%02x:%02x:%02x\n", | ||
1485 | dev->name, port_num, p[0], p[1], p[2], p[3], p[4], p[5]); | ||
1486 | |||
1487 | if (dev->features & NETIF_F_SG) | ||
1488 | printk(KERN_NOTICE "%s: Scatter Gather Enabled\n", dev->name); | ||
1489 | |||
1490 | if (dev->features & NETIF_F_IP_CSUM) | ||
1491 | printk(KERN_NOTICE "%s: TX TCP/IP Checksumming Supported\n", | ||
1492 | dev->name); | ||
1493 | |||
1494 | #ifdef MV643XX_CHECKSUM_OFFLOAD_TX | ||
1495 | printk(KERN_NOTICE "%s: RX TCP/UDP Checksum Offload ON \n", dev->name); | ||
1496 | #endif | ||
1497 | |||
1498 | #ifdef MV643XX_COAL | ||
1499 | printk(KERN_NOTICE "%s: TX and RX Interrupt Coalescing ON \n", | ||
1500 | dev->name); | ||
1501 | #endif | ||
1502 | |||
1503 | #ifdef MV643XX_NAPI | ||
1504 | printk(KERN_NOTICE "%s: RX NAPI Enabled \n", dev->name); | ||
1505 | #endif | ||
1506 | |||
1507 | return 0; | ||
1508 | |||
1509 | out: | ||
1510 | free_netdev(dev); | ||
1511 | |||
1512 | return err; | ||
1513 | } | ||
1514 | |||
1515 | static int mv643xx_eth_remove(struct device *ddev) | ||
1516 | { | ||
1517 | struct net_device *dev = dev_get_drvdata(ddev); | ||
1518 | |||
1519 | unregister_netdev(dev); | ||
1520 | flush_scheduled_work(); | ||
1521 | |||
1522 | free_netdev(dev); | ||
1523 | dev_set_drvdata(ddev, NULL); | ||
1524 | return 0; | ||
1525 | } | ||
1526 | |||
1527 | static int mv643xx_eth_shared_probe(struct device *ddev) | ||
1528 | { | ||
1529 | struct platform_device *pdev = to_platform_device(ddev); | ||
1530 | struct resource *res; | ||
1531 | |||
1532 | printk(KERN_NOTICE "MV-643xx 10/100/1000 Ethernet Driver\n"); | ||
1533 | |||
1534 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
1535 | if (res == NULL) | ||
1536 | return -ENODEV; | ||
1537 | |||
1538 | mv643xx_eth_shared_base = ioremap(res->start, | ||
1539 | MV643XX_ETH_SHARED_REGS_SIZE); | ||
1540 | if (mv643xx_eth_shared_base == NULL) | ||
1541 | return -ENOMEM; | ||
1542 | |||
1543 | return 0; | ||
1544 | |||
1545 | } | ||
1546 | |||
1547 | static int mv643xx_eth_shared_remove(struct device *ddev) | ||
1548 | { | ||
1549 | iounmap(mv643xx_eth_shared_base); | ||
1550 | mv643xx_eth_shared_base = NULL; | ||
1551 | |||
1552 | return 0; | ||
1553 | } | ||
1554 | |||
1555 | static struct device_driver mv643xx_eth_driver = { | ||
1556 | .name = MV643XX_ETH_NAME, | ||
1557 | .bus = &platform_bus_type, | ||
1558 | .probe = mv643xx_eth_probe, | ||
1559 | .remove = mv643xx_eth_remove, | ||
1560 | }; | ||
1561 | |||
1562 | static struct device_driver mv643xx_eth_shared_driver = { | ||
1563 | .name = MV643XX_ETH_SHARED_NAME, | ||
1564 | .bus = &platform_bus_type, | ||
1565 | .probe = mv643xx_eth_shared_probe, | ||
1566 | .remove = mv643xx_eth_shared_remove, | ||
1567 | }; | ||
1568 | |||
1569 | /* | ||
1570 | * mv643xx_init_module | ||
1571 | * | ||
1572 | * Registers the network drivers into the Linux kernel | ||
1573 | * | ||
1574 | * Input : N/A | ||
1575 | * | ||
1576 | * Output : N/A | ||
1577 | */ | ||
1578 | static int __init mv643xx_init_module(void) | ||
1579 | { | ||
1580 | int rc; | ||
1581 | |||
1582 | rc = driver_register(&mv643xx_eth_shared_driver); | ||
1583 | if (!rc) { | ||
1584 | rc = driver_register(&mv643xx_eth_driver); | ||
1585 | if (rc) | ||
1586 | driver_unregister(&mv643xx_eth_shared_driver); | ||
1587 | } | ||
1588 | return rc; | ||
1589 | } | ||
1590 | |||
1591 | /* | ||
1592 | * mv643xx_cleanup_module | ||
1593 | * | ||
1594 | * Registers the network drivers into the Linux kernel | ||
1595 | * | ||
1596 | * Input : N/A | ||
1597 | * | ||
1598 | * Output : N/A | ||
1599 | */ | ||
1600 | static void __exit mv643xx_cleanup_module(void) | ||
1601 | { | ||
1602 | driver_unregister(&mv643xx_eth_driver); | ||
1603 | driver_unregister(&mv643xx_eth_shared_driver); | ||
1604 | } | ||
1605 | |||
1606 | module_init(mv643xx_init_module); | ||
1607 | module_exit(mv643xx_cleanup_module); | ||
1608 | |||
1609 | MODULE_LICENSE("GPL"); | ||
1610 | MODULE_AUTHOR( "Rabeeh Khoury, Assaf Hoffman, Matthew Dharm, Manish Lachwani" | ||
1611 | " and Dale Farnsworth"); | ||
1612 | MODULE_DESCRIPTION("Ethernet driver for Marvell MV643XX"); | ||
1613 | |||
1614 | /* | ||
1615 | * The second part is the low level driver of the gigE ethernet ports. | ||
1616 | */ | ||
1617 | |||
1618 | /* | ||
1619 | * Marvell's Gigabit Ethernet controller low level driver | ||
1620 | * | ||
1621 | * DESCRIPTION: | ||
1622 | * This file introduce low level API to Marvell's Gigabit Ethernet | ||
1623 | * controller. This Gigabit Ethernet Controller driver API controls | ||
1624 | * 1) Operations (i.e. port init, start, reset etc'). | ||
1625 | * 2) Data flow (i.e. port send, receive etc'). | ||
1626 | * Each Gigabit Ethernet port is controlled via | ||
1627 | * struct mv643xx_private. | ||
1628 | * This struct includes user configuration information as well as | ||
1629 | * driver internal data needed for its operations. | ||
1630 | * | ||
1631 | * Supported Features: | ||
1632 | * - This low level driver is OS independent. Allocating memory for | ||
1633 | * the descriptor rings and buffers are not within the scope of | ||
1634 | * this driver. | ||
1635 | * - The user is free from Rx/Tx queue managing. | ||
1636 | * - This low level driver introduce functionality API that enable | ||
1637 | * the to operate Marvell's Gigabit Ethernet Controller in a | ||
1638 | * convenient way. | ||
1639 | * - Simple Gigabit Ethernet port operation API. | ||
1640 | * - Simple Gigabit Ethernet port data flow API. | ||
1641 | * - Data flow and operation API support per queue functionality. | ||
1642 | * - Support cached descriptors for better performance. | ||
1643 | * - Enable access to all four DRAM banks and internal SRAM memory | ||
1644 | * spaces. | ||
1645 | * - PHY access and control API. | ||
1646 | * - Port control register configuration API. | ||
1647 | * - Full control over Unicast and Multicast MAC configurations. | ||
1648 | * | ||
1649 | * Operation flow: | ||
1650 | * | ||
1651 | * Initialization phase | ||
1652 | * This phase complete the initialization of the the | ||
1653 | * mv643xx_private struct. | ||
1654 | * User information regarding port configuration has to be set | ||
1655 | * prior to calling the port initialization routine. | ||
1656 | * | ||
1657 | * In this phase any port Tx/Rx activity is halted, MIB counters | ||
1658 | * are cleared, PHY address is set according to user parameter and | ||
1659 | * access to DRAM and internal SRAM memory spaces. | ||
1660 | * | ||
1661 | * Driver ring initialization | ||
1662 | * Allocating memory for the descriptor rings and buffers is not | ||
1663 | * within the scope of this driver. Thus, the user is required to | ||
1664 | * allocate memory for the descriptors ring and buffers. Those | ||
1665 | * memory parameters are used by the Rx and Tx ring initialization | ||
1666 | * routines in order to curve the descriptor linked list in a form | ||
1667 | * of a ring. | ||
1668 | * Note: Pay special attention to alignment issues when using | ||
1669 | * cached descriptors/buffers. In this phase the driver store | ||
1670 | * information in the mv643xx_private struct regarding each queue | ||
1671 | * ring. | ||
1672 | * | ||
1673 | * Driver start | ||
1674 | * This phase prepares the Ethernet port for Rx and Tx activity. | ||
1675 | * It uses the information stored in the mv643xx_private struct to | ||
1676 | * initialize the various port registers. | ||
1677 | * | ||
1678 | * Data flow: | ||
1679 | * All packet references to/from the driver are done using | ||
1680 | * struct pkt_info. | ||
1681 | * This struct is a unified struct used with Rx and Tx operations. | ||
1682 | * This way the user is not required to be familiar with neither | ||
1683 | * Tx nor Rx descriptors structures. | ||
1684 | * The driver's descriptors rings are management by indexes. | ||
1685 | * Those indexes controls the ring resources and used to indicate | ||
1686 | * a SW resource error: | ||
1687 | * 'current' | ||
1688 | * This index points to the current available resource for use. For | ||
1689 | * example in Rx process this index will point to the descriptor | ||
1690 | * that will be passed to the user upon calling the receive | ||
1691 | * routine. In Tx process, this index will point to the descriptor | ||
1692 | * that will be assigned with the user packet info and transmitted. | ||
1693 | * 'used' | ||
1694 | * This index points to the descriptor that need to restore its | ||
1695 | * resources. For example in Rx process, using the Rx buffer return | ||
1696 | * API will attach the buffer returned in packet info to the | ||
1697 | * descriptor pointed by 'used'. In Tx process, using the Tx | ||
1698 | * descriptor return will merely return the user packet info with | ||
1699 | * the command status of the transmitted buffer pointed by the | ||
1700 | * 'used' index. Nevertheless, it is essential to use this routine | ||
1701 | * to update the 'used' index. | ||
1702 | * 'first' | ||
1703 | * This index supports Tx Scatter-Gather. It points to the first | ||
1704 | * descriptor of a packet assembled of multiple buffers. For | ||
1705 | * example when in middle of Such packet we have a Tx resource | ||
1706 | * error the 'curr' index get the value of 'first' to indicate | ||
1707 | * that the ring returned to its state before trying to transmit | ||
1708 | * this packet. | ||
1709 | * | ||
1710 | * Receive operation: | ||
1711 | * The eth_port_receive API set the packet information struct, | ||
1712 | * passed by the caller, with received information from the | ||
1713 | * 'current' SDMA descriptor. | ||
1714 | * It is the user responsibility to return this resource back | ||
1715 | * to the Rx descriptor ring to enable the reuse of this source. | ||
1716 | * Return Rx resource is done using the eth_rx_return_buff API. | ||
1717 | * | ||
1718 | * Transmit operation: | ||
1719 | * The eth_port_send API supports Scatter-Gather which enables to | ||
1720 | * send a packet spanned over multiple buffers. This means that | ||
1721 | * for each packet info structure given by the user and put into | ||
1722 | * the Tx descriptors ring, will be transmitted only if the 'LAST' | ||
1723 | * bit will be set in the packet info command status field. This | ||
1724 | * API also consider restriction regarding buffer alignments and | ||
1725 | * sizes. | ||
1726 | * The user must return a Tx resource after ensuring the buffer | ||
1727 | * has been transmitted to enable the Tx ring indexes to update. | ||
1728 | * | ||
1729 | * BOARD LAYOUT | ||
1730 | * This device is on-board. No jumper diagram is necessary. | ||
1731 | * | ||
1732 | * EXTERNAL INTERFACE | ||
1733 | * | ||
1734 | * Prior to calling the initialization routine eth_port_init() the user | ||
1735 | * must set the following fields under mv643xx_private struct: | ||
1736 | * port_num User Ethernet port number. | ||
1737 | * port_mac_addr[6] User defined port MAC address. | ||
1738 | * port_config User port configuration value. | ||
1739 | * port_config_extend User port config extend value. | ||
1740 | * port_sdma_config User port SDMA config value. | ||
1741 | * port_serial_control User port serial control value. | ||
1742 | * | ||
1743 | * This driver data flow is done using the struct pkt_info which | ||
1744 | * is a unified struct for Rx and Tx operations: | ||
1745 | * | ||
1746 | * byte_cnt Tx/Rx descriptor buffer byte count. | ||
1747 | * l4i_chk CPU provided TCP Checksum. For Tx operation | ||
1748 | * only. | ||
1749 | * cmd_sts Tx/Rx descriptor command status. | ||
1750 | * buf_ptr Tx/Rx descriptor buffer pointer. | ||
1751 | * return_info Tx/Rx user resource return information. | ||
1752 | */ | ||
1753 | |||
1754 | /* defines */ | ||
1755 | /* SDMA command macros */ | ||
1756 | #define ETH_ENABLE_TX_QUEUE(eth_port) \ | ||
1757 | mv_write(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(eth_port), 1) | ||
1758 | |||
1759 | /* locals */ | ||
1760 | |||
1761 | /* PHY routines */ | ||
1762 | static int ethernet_phy_get(unsigned int eth_port_num); | ||
1763 | static void ethernet_phy_set(unsigned int eth_port_num, int phy_addr); | ||
1764 | |||
1765 | /* Ethernet Port routines */ | ||
1766 | static int eth_port_uc_addr(unsigned int eth_port_num, unsigned char uc_nibble, | ||
1767 | int option); | ||
1768 | |||
1769 | /* | ||
1770 | * eth_port_init - Initialize the Ethernet port driver | ||
1771 | * | ||
1772 | * DESCRIPTION: | ||
1773 | * This function prepares the ethernet port to start its activity: | ||
1774 | * 1) Completes the ethernet port driver struct initialization toward port | ||
1775 | * start routine. | ||
1776 | * 2) Resets the device to a quiescent state in case of warm reboot. | ||
1777 | * 3) Enable SDMA access to all four DRAM banks as well as internal SRAM. | ||
1778 | * 4) Clean MAC tables. The reset status of those tables is unknown. | ||
1779 | * 5) Set PHY address. | ||
1780 | * Note: Call this routine prior to eth_port_start routine and after | ||
1781 | * setting user values in the user fields of Ethernet port control | ||
1782 | * struct. | ||
1783 | * | ||
1784 | * INPUT: | ||
1785 | * struct mv643xx_private *mp Ethernet port control struct | ||
1786 | * | ||
1787 | * OUTPUT: | ||
1788 | * See description. | ||
1789 | * | ||
1790 | * RETURN: | ||
1791 | * None. | ||
1792 | */ | ||
1793 | static void eth_port_init(struct mv643xx_private *mp) | ||
1794 | { | ||
1795 | mp->port_rx_queue_command = 0; | ||
1796 | mp->port_tx_queue_command = 0; | ||
1797 | |||
1798 | mp->rx_resource_err = 0; | ||
1799 | mp->tx_resource_err = 0; | ||
1800 | |||
1801 | eth_port_reset(mp->port_num); | ||
1802 | |||
1803 | eth_port_init_mac_tables(mp->port_num); | ||
1804 | |||
1805 | ethernet_phy_reset(mp->port_num); | ||
1806 | } | ||
1807 | |||
1808 | /* | ||
1809 | * eth_port_start - Start the Ethernet port activity. | ||
1810 | * | ||
1811 | * DESCRIPTION: | ||
1812 | * This routine prepares the Ethernet port for Rx and Tx activity: | ||
1813 | * 1. Initialize Tx and Rx Current Descriptor Pointer for each queue that | ||
1814 | * has been initialized a descriptor's ring (using | ||
1815 | * ether_init_tx_desc_ring for Tx and ether_init_rx_desc_ring for Rx) | ||
1816 | * 2. Initialize and enable the Ethernet configuration port by writing to | ||
1817 | * the port's configuration and command registers. | ||
1818 | * 3. Initialize and enable the SDMA by writing to the SDMA's | ||
1819 | * configuration and command registers. After completing these steps, | ||
1820 | * the ethernet port SDMA can starts to perform Rx and Tx activities. | ||
1821 | * | ||
1822 | * Note: Each Rx and Tx queue descriptor's list must be initialized prior | ||
1823 | * to calling this function (use ether_init_tx_desc_ring for Tx queues | ||
1824 | * and ether_init_rx_desc_ring for Rx queues). | ||
1825 | * | ||
1826 | * INPUT: | ||
1827 | * struct mv643xx_private *mp Ethernet port control struct | ||
1828 | * | ||
1829 | * OUTPUT: | ||
1830 | * Ethernet port is ready to receive and transmit. | ||
1831 | * | ||
1832 | * RETURN: | ||
1833 | * None. | ||
1834 | */ | ||
1835 | static void eth_port_start(struct mv643xx_private *mp) | ||
1836 | { | ||
1837 | unsigned int port_num = mp->port_num; | ||
1838 | int tx_curr_desc, rx_curr_desc; | ||
1839 | |||
1840 | /* Assignment of Tx CTRP of given queue */ | ||
1841 | tx_curr_desc = mp->tx_curr_desc_q; | ||
1842 | mv_write(MV643XX_ETH_TX_CURRENT_QUEUE_DESC_PTR_0(port_num), | ||
1843 | (u32)((struct eth_tx_desc *)mp->tx_desc_dma + tx_curr_desc)); | ||
1844 | |||
1845 | /* Assignment of Rx CRDP of given queue */ | ||
1846 | rx_curr_desc = mp->rx_curr_desc_q; | ||
1847 | mv_write(MV643XX_ETH_RX_CURRENT_QUEUE_DESC_PTR_0(port_num), | ||
1848 | (u32)((struct eth_rx_desc *)mp->rx_desc_dma + rx_curr_desc)); | ||
1849 | |||
1850 | /* Add the assigned Ethernet address to the port's address table */ | ||
1851 | eth_port_uc_addr_set(port_num, mp->port_mac_addr); | ||
1852 | |||
1853 | /* Assign port configuration and command. */ | ||
1854 | mv_write(MV643XX_ETH_PORT_CONFIG_REG(port_num), mp->port_config); | ||
1855 | |||
1856 | mv_write(MV643XX_ETH_PORT_CONFIG_EXTEND_REG(port_num), | ||
1857 | mp->port_config_extend); | ||
1858 | |||
1859 | |||
1860 | /* Increase the Rx side buffer size if supporting GigE */ | ||
1861 | if (mp->port_serial_control & MV643XX_ETH_SET_GMII_SPEED_TO_1000) | ||
1862 | mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num), | ||
1863 | (mp->port_serial_control & 0xfff1ffff) | (0x5 << 17)); | ||
1864 | else | ||
1865 | mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num), | ||
1866 | mp->port_serial_control); | ||
1867 | |||
1868 | mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num), | ||
1869 | mv_read(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num)) | | ||
1870 | MV643XX_ETH_SERIAL_PORT_ENABLE); | ||
1871 | |||
1872 | /* Assign port SDMA configuration */ | ||
1873 | mv_write(MV643XX_ETH_SDMA_CONFIG_REG(port_num), | ||
1874 | mp->port_sdma_config); | ||
1875 | |||
1876 | /* Enable port Rx. */ | ||
1877 | mv_write(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num), | ||
1878 | mp->port_rx_queue_command); | ||
1879 | } | ||
1880 | |||
1881 | /* | ||
1882 | * eth_port_uc_addr_set - This function Set the port Unicast address. | ||
1883 | * | ||
1884 | * DESCRIPTION: | ||
1885 | * This function Set the port Ethernet MAC address. | ||
1886 | * | ||
1887 | * INPUT: | ||
1888 | * unsigned int eth_port_num Port number. | ||
1889 | * char * p_addr Address to be set | ||
1890 | * | ||
1891 | * OUTPUT: | ||
1892 | * Set MAC address low and high registers. also calls eth_port_uc_addr() | ||
1893 | * To set the unicast table with the proper information. | ||
1894 | * | ||
1895 | * RETURN: | ||
1896 | * N/A. | ||
1897 | * | ||
1898 | */ | ||
1899 | static void eth_port_uc_addr_set(unsigned int eth_port_num, | ||
1900 | unsigned char *p_addr) | ||
1901 | { | ||
1902 | unsigned int mac_h; | ||
1903 | unsigned int mac_l; | ||
1904 | |||
1905 | mac_l = (p_addr[4] << 8) | (p_addr[5]); | ||
1906 | mac_h = (p_addr[0] << 24) | (p_addr[1] << 16) | (p_addr[2] << 8) | | ||
1907 | (p_addr[3] << 0); | ||
1908 | |||
1909 | mv_write(MV643XX_ETH_MAC_ADDR_LOW(eth_port_num), mac_l); | ||
1910 | mv_write(MV643XX_ETH_MAC_ADDR_HIGH(eth_port_num), mac_h); | ||
1911 | |||
1912 | /* Accept frames of this address */ | ||
1913 | eth_port_uc_addr(eth_port_num, p_addr[5], ACCEPT_MAC_ADDR); | ||
1914 | |||
1915 | return; | ||
1916 | } | ||
1917 | |||
1918 | /* | ||
1919 | * eth_port_uc_addr_get - This function retrieves the port Unicast address | ||
1920 | * (MAC address) from the ethernet hw registers. | ||
1921 | * | ||
1922 | * DESCRIPTION: | ||
1923 | * This function retrieves the port Ethernet MAC address. | ||
1924 | * | ||
1925 | * INPUT: | ||
1926 | * unsigned int eth_port_num Port number. | ||
1927 | * char *MacAddr pointer where the MAC address is stored | ||
1928 | * | ||
1929 | * OUTPUT: | ||
1930 | * Copy the MAC address to the location pointed to by MacAddr | ||
1931 | * | ||
1932 | * RETURN: | ||
1933 | * N/A. | ||
1934 | * | ||
1935 | */ | ||
1936 | static void eth_port_uc_addr_get(struct net_device *dev, unsigned char *p_addr) | ||
1937 | { | ||
1938 | struct mv643xx_private *mp = netdev_priv(dev); | ||
1939 | unsigned int mac_h; | ||
1940 | unsigned int mac_l; | ||
1941 | |||
1942 | mac_h = mv_read(MV643XX_ETH_MAC_ADDR_HIGH(mp->port_num)); | ||
1943 | mac_l = mv_read(MV643XX_ETH_MAC_ADDR_LOW(mp->port_num)); | ||
1944 | |||
1945 | p_addr[0] = (mac_h >> 24) & 0xff; | ||
1946 | p_addr[1] = (mac_h >> 16) & 0xff; | ||
1947 | p_addr[2] = (mac_h >> 8) & 0xff; | ||
1948 | p_addr[3] = mac_h & 0xff; | ||
1949 | p_addr[4] = (mac_l >> 8) & 0xff; | ||
1950 | p_addr[5] = mac_l & 0xff; | ||
1951 | } | ||
1952 | |||
1953 | /* | ||
1954 | * eth_port_uc_addr - This function Set the port unicast address table | ||
1955 | * | ||
1956 | * DESCRIPTION: | ||
1957 | * This function locates the proper entry in the Unicast table for the | ||
1958 | * specified MAC nibble and sets its properties according to function | ||
1959 | * parameters. | ||
1960 | * | ||
1961 | * INPUT: | ||
1962 | * unsigned int eth_port_num Port number. | ||
1963 | * unsigned char uc_nibble Unicast MAC Address last nibble. | ||
1964 | * int option 0 = Add, 1 = remove address. | ||
1965 | * | ||
1966 | * OUTPUT: | ||
1967 | * This function add/removes MAC addresses from the port unicast address | ||
1968 | * table. | ||
1969 | * | ||
1970 | * RETURN: | ||
1971 | * true is output succeeded. | ||
1972 | * false if option parameter is invalid. | ||
1973 | * | ||
1974 | */ | ||
1975 | static int eth_port_uc_addr(unsigned int eth_port_num, unsigned char uc_nibble, | ||
1976 | int option) | ||
1977 | { | ||
1978 | unsigned int unicast_reg; | ||
1979 | unsigned int tbl_offset; | ||
1980 | unsigned int reg_offset; | ||
1981 | |||
1982 | /* Locate the Unicast table entry */ | ||
1983 | uc_nibble = (0xf & uc_nibble); | ||
1984 | tbl_offset = (uc_nibble / 4) * 4; /* Register offset from unicast table base */ | ||
1985 | reg_offset = uc_nibble % 4; /* Entry offset within the above register */ | ||
1986 | |||
1987 | switch (option) { | ||
1988 | case REJECT_MAC_ADDR: | ||
1989 | /* Clear accepts frame bit at given unicast DA table entry */ | ||
1990 | unicast_reg = mv_read((MV643XX_ETH_DA_FILTER_UNICAST_TABLE_BASE | ||
1991 | (eth_port_num) + tbl_offset)); | ||
1992 | |||
1993 | unicast_reg &= (0x0E << (8 * reg_offset)); | ||
1994 | |||
1995 | mv_write((MV643XX_ETH_DA_FILTER_UNICAST_TABLE_BASE | ||
1996 | (eth_port_num) + tbl_offset), unicast_reg); | ||
1997 | break; | ||
1998 | |||
1999 | case ACCEPT_MAC_ADDR: | ||
2000 | /* Set accepts frame bit at unicast DA filter table entry */ | ||
2001 | unicast_reg = | ||
2002 | mv_read((MV643XX_ETH_DA_FILTER_UNICAST_TABLE_BASE | ||
2003 | (eth_port_num) + tbl_offset)); | ||
2004 | |||
2005 | unicast_reg |= (0x01 << (8 * reg_offset)); | ||
2006 | |||
2007 | mv_write((MV643XX_ETH_DA_FILTER_UNICAST_TABLE_BASE | ||
2008 | (eth_port_num) + tbl_offset), unicast_reg); | ||
2009 | |||
2010 | break; | ||
2011 | |||
2012 | default: | ||
2013 | return 0; | ||
2014 | } | ||
2015 | |||
2016 | return 1; | ||
2017 | } | ||
2018 | |||
2019 | /* | ||
2020 | * eth_port_init_mac_tables - Clear all entrance in the UC, SMC and OMC tables | ||
2021 | * | ||
2022 | * DESCRIPTION: | ||
2023 | * Go through all the DA filter tables (Unicast, Special Multicast & | ||
2024 | * Other Multicast) and set each entry to 0. | ||
2025 | * | ||
2026 | * INPUT: | ||
2027 | * unsigned int eth_port_num Ethernet Port number. | ||
2028 | * | ||
2029 | * OUTPUT: | ||
2030 | * Multicast and Unicast packets are rejected. | ||
2031 | * | ||
2032 | * RETURN: | ||
2033 | * None. | ||
2034 | */ | ||
2035 | static void eth_port_init_mac_tables(unsigned int eth_port_num) | ||
2036 | { | ||
2037 | int table_index; | ||
2038 | |||
2039 | /* Clear DA filter unicast table (Ex_dFUT) */ | ||
2040 | for (table_index = 0; table_index <= 0xC; table_index += 4) | ||
2041 | mv_write((MV643XX_ETH_DA_FILTER_UNICAST_TABLE_BASE | ||
2042 | (eth_port_num) + table_index), 0); | ||
2043 | |||
2044 | for (table_index = 0; table_index <= 0xFC; table_index += 4) { | ||
2045 | /* Clear DA filter special multicast table (Ex_dFSMT) */ | ||
2046 | mv_write((MV643XX_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE | ||
2047 | (eth_port_num) + table_index), 0); | ||
2048 | /* Clear DA filter other multicast table (Ex_dFOMT) */ | ||
2049 | mv_write((MV643XX_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE | ||
2050 | (eth_port_num) + table_index), 0); | ||
2051 | } | ||
2052 | } | ||
2053 | |||
2054 | /* | ||
2055 | * eth_clear_mib_counters - Clear all MIB counters | ||
2056 | * | ||
2057 | * DESCRIPTION: | ||
2058 | * This function clears all MIB counters of a specific ethernet port. | ||
2059 | * A read from the MIB counter will reset the counter. | ||
2060 | * | ||
2061 | * INPUT: | ||
2062 | * unsigned int eth_port_num Ethernet Port number. | ||
2063 | * | ||
2064 | * OUTPUT: | ||
2065 | * After reading all MIB counters, the counters resets. | ||
2066 | * | ||
2067 | * RETURN: | ||
2068 | * MIB counter value. | ||
2069 | * | ||
2070 | */ | ||
2071 | static void eth_clear_mib_counters(unsigned int eth_port_num) | ||
2072 | { | ||
2073 | int i; | ||
2074 | |||
2075 | /* Perform dummy reads from MIB counters */ | ||
2076 | for (i = ETH_MIB_GOOD_OCTETS_RECEIVED_LOW; i < ETH_MIB_LATE_COLLISION; | ||
2077 | i += 4) | ||
2078 | mv_read(MV643XX_ETH_MIB_COUNTERS_BASE(eth_port_num) + i); | ||
2079 | } | ||
2080 | |||
2081 | static inline u32 read_mib(struct mv643xx_private *mp, int offset) | ||
2082 | { | ||
2083 | return mv_read(MV643XX_ETH_MIB_COUNTERS_BASE(mp->port_num) + offset); | ||
2084 | } | ||
2085 | |||
2086 | static void eth_update_mib_counters(struct mv643xx_private *mp) | ||
2087 | { | ||
2088 | struct mv643xx_mib_counters *p = &mp->mib_counters; | ||
2089 | int offset; | ||
2090 | |||
2091 | p->good_octets_received += | ||
2092 | read_mib(mp, ETH_MIB_GOOD_OCTETS_RECEIVED_LOW); | ||
2093 | p->good_octets_received += | ||
2094 | (u64)read_mib(mp, ETH_MIB_GOOD_OCTETS_RECEIVED_HIGH) << 32; | ||
2095 | |||
2096 | for (offset = ETH_MIB_BAD_OCTETS_RECEIVED; | ||
2097 | offset <= ETH_MIB_FRAMES_1024_TO_MAX_OCTETS; | ||
2098 | offset += 4) | ||
2099 | *(u32 *)((char *)p + offset) = read_mib(mp, offset); | ||
2100 | |||
2101 | p->good_octets_sent += read_mib(mp, ETH_MIB_GOOD_OCTETS_SENT_LOW); | ||
2102 | p->good_octets_sent += | ||
2103 | (u64)read_mib(mp, ETH_MIB_GOOD_OCTETS_SENT_HIGH) << 32; | ||
2104 | |||
2105 | for (offset = ETH_MIB_GOOD_FRAMES_SENT; | ||
2106 | offset <= ETH_MIB_LATE_COLLISION; | ||
2107 | offset += 4) | ||
2108 | *(u32 *)((char *)p + offset) = read_mib(mp, offset); | ||
2109 | } | ||
2110 | |||
2111 | /* | ||
2112 | * ethernet_phy_detect - Detect whether a phy is present | ||
2113 | * | ||
2114 | * DESCRIPTION: | ||
2115 | * This function tests whether there is a PHY present on | ||
2116 | * the specified port. | ||
2117 | * | ||
2118 | * INPUT: | ||
2119 | * unsigned int eth_port_num Ethernet Port number. | ||
2120 | * | ||
2121 | * OUTPUT: | ||
2122 | * None | ||
2123 | * | ||
2124 | * RETURN: | ||
2125 | * 0 on success | ||
2126 | * -ENODEV on failure | ||
2127 | * | ||
2128 | */ | ||
2129 | static int ethernet_phy_detect(unsigned int port_num) | ||
2130 | { | ||
2131 | unsigned int phy_reg_data0; | ||
2132 | int auto_neg; | ||
2133 | |||
2134 | eth_port_read_smi_reg(port_num, 0, &phy_reg_data0); | ||
2135 | auto_neg = phy_reg_data0 & 0x1000; | ||
2136 | phy_reg_data0 ^= 0x1000; /* invert auto_neg */ | ||
2137 | eth_port_write_smi_reg(port_num, 0, phy_reg_data0); | ||
2138 | |||
2139 | eth_port_read_smi_reg(port_num, 0, &phy_reg_data0); | ||
2140 | if ((phy_reg_data0 & 0x1000) == auto_neg) | ||
2141 | return -ENODEV; /* change didn't take */ | ||
2142 | |||
2143 | phy_reg_data0 ^= 0x1000; | ||
2144 | eth_port_write_smi_reg(port_num, 0, phy_reg_data0); | ||
2145 | return 0; | ||
2146 | } | ||
2147 | |||
2148 | /* | ||
2149 | * ethernet_phy_get - Get the ethernet port PHY address. | ||
2150 | * | ||
2151 | * DESCRIPTION: | ||
2152 | * This routine returns the given ethernet port PHY address. | ||
2153 | * | ||
2154 | * INPUT: | ||
2155 | * unsigned int eth_port_num Ethernet Port number. | ||
2156 | * | ||
2157 | * OUTPUT: | ||
2158 | * None. | ||
2159 | * | ||
2160 | * RETURN: | ||
2161 | * PHY address. | ||
2162 | * | ||
2163 | */ | ||
2164 | static int ethernet_phy_get(unsigned int eth_port_num) | ||
2165 | { | ||
2166 | unsigned int reg_data; | ||
2167 | |||
2168 | reg_data = mv_read(MV643XX_ETH_PHY_ADDR_REG); | ||
2169 | |||
2170 | return ((reg_data >> (5 * eth_port_num)) & 0x1f); | ||
2171 | } | ||
2172 | |||
2173 | /* | ||
2174 | * ethernet_phy_set - Set the ethernet port PHY address. | ||
2175 | * | ||
2176 | * DESCRIPTION: | ||
2177 | * This routine sets the given ethernet port PHY address. | ||
2178 | * | ||
2179 | * INPUT: | ||
2180 | * unsigned int eth_port_num Ethernet Port number. | ||
2181 | * int phy_addr PHY address. | ||
2182 | * | ||
2183 | * OUTPUT: | ||
2184 | * None. | ||
2185 | * | ||
2186 | * RETURN: | ||
2187 | * None. | ||
2188 | * | ||
2189 | */ | ||
2190 | static void ethernet_phy_set(unsigned int eth_port_num, int phy_addr) | ||
2191 | { | ||
2192 | u32 reg_data; | ||
2193 | int addr_shift = 5 * eth_port_num; | ||
2194 | |||
2195 | reg_data = mv_read(MV643XX_ETH_PHY_ADDR_REG); | ||
2196 | reg_data &= ~(0x1f << addr_shift); | ||
2197 | reg_data |= (phy_addr & 0x1f) << addr_shift; | ||
2198 | mv_write(MV643XX_ETH_PHY_ADDR_REG, reg_data); | ||
2199 | } | ||
2200 | |||
2201 | /* | ||
2202 | * ethernet_phy_reset - Reset Ethernet port PHY. | ||
2203 | * | ||
2204 | * DESCRIPTION: | ||
2205 | * This routine utilizes the SMI interface to reset the ethernet port PHY. | ||
2206 | * | ||
2207 | * INPUT: | ||
2208 | * unsigned int eth_port_num Ethernet Port number. | ||
2209 | * | ||
2210 | * OUTPUT: | ||
2211 | * The PHY is reset. | ||
2212 | * | ||
2213 | * RETURN: | ||
2214 | * None. | ||
2215 | * | ||
2216 | */ | ||
2217 | static void ethernet_phy_reset(unsigned int eth_port_num) | ||
2218 | { | ||
2219 | unsigned int phy_reg_data; | ||
2220 | |||
2221 | /* Reset the PHY */ | ||
2222 | eth_port_read_smi_reg(eth_port_num, 0, &phy_reg_data); | ||
2223 | phy_reg_data |= 0x8000; /* Set bit 15 to reset the PHY */ | ||
2224 | eth_port_write_smi_reg(eth_port_num, 0, phy_reg_data); | ||
2225 | } | ||
2226 | |||
2227 | /* | ||
2228 | * eth_port_reset - Reset Ethernet port | ||
2229 | * | ||
2230 | * DESCRIPTION: | ||
2231 | * This routine resets the chip by aborting any SDMA engine activity and | ||
2232 | * clearing the MIB counters. The Receiver and the Transmit unit are in | ||
2233 | * idle state after this command is performed and the port is disabled. | ||
2234 | * | ||
2235 | * INPUT: | ||
2236 | * unsigned int eth_port_num Ethernet Port number. | ||
2237 | * | ||
2238 | * OUTPUT: | ||
2239 | * Channel activity is halted. | ||
2240 | * | ||
2241 | * RETURN: | ||
2242 | * None. | ||
2243 | * | ||
2244 | */ | ||
2245 | static void eth_port_reset(unsigned int port_num) | ||
2246 | { | ||
2247 | unsigned int reg_data; | ||
2248 | |||
2249 | /* Stop Tx port activity. Check port Tx activity. */ | ||
2250 | reg_data = mv_read(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num)); | ||
2251 | |||
2252 | if (reg_data & 0xFF) { | ||
2253 | /* Issue stop command for active channels only */ | ||
2254 | mv_write(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num), | ||
2255 | (reg_data << 8)); | ||
2256 | |||
2257 | /* Wait for all Tx activity to terminate. */ | ||
2258 | /* Check port cause register that all Tx queues are stopped */ | ||
2259 | while (mv_read(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num)) | ||
2260 | & 0xFF) | ||
2261 | udelay(10); | ||
2262 | } | ||
2263 | |||
2264 | /* Stop Rx port activity. Check port Rx activity. */ | ||
2265 | reg_data = mv_read(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num)); | ||
2266 | |||
2267 | if (reg_data & 0xFF) { | ||
2268 | /* Issue stop command for active channels only */ | ||
2269 | mv_write(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num), | ||
2270 | (reg_data << 8)); | ||
2271 | |||
2272 | /* Wait for all Rx activity to terminate. */ | ||
2273 | /* Check port cause register that all Rx queues are stopped */ | ||
2274 | while (mv_read(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num)) | ||
2275 | & 0xFF) | ||
2276 | udelay(10); | ||
2277 | } | ||
2278 | |||
2279 | /* Clear all MIB counters */ | ||
2280 | eth_clear_mib_counters(port_num); | ||
2281 | |||
2282 | /* Reset the Enable bit in the Configuration Register */ | ||
2283 | reg_data = mv_read(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num)); | ||
2284 | reg_data &= ~MV643XX_ETH_SERIAL_PORT_ENABLE; | ||
2285 | mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num), reg_data); | ||
2286 | } | ||
2287 | |||
2288 | /* | ||
2289 | * ethernet_set_config_reg - Set specified bits in configuration register. | ||
2290 | * | ||
2291 | * DESCRIPTION: | ||
2292 | * This function sets specified bits in the given ethernet | ||
2293 | * configuration register. | ||
2294 | * | ||
2295 | * INPUT: | ||
2296 | * unsigned int eth_port_num Ethernet Port number. | ||
2297 | * unsigned int value 32 bit value. | ||
2298 | * | ||
2299 | * OUTPUT: | ||
2300 | * The set bits in the value parameter are set in the configuration | ||
2301 | * register. | ||
2302 | * | ||
2303 | * RETURN: | ||
2304 | * None. | ||
2305 | * | ||
2306 | */ | ||
2307 | static void ethernet_set_config_reg(unsigned int eth_port_num, | ||
2308 | unsigned int value) | ||
2309 | { | ||
2310 | unsigned int eth_config_reg; | ||
2311 | |||
2312 | eth_config_reg = mv_read(MV643XX_ETH_PORT_CONFIG_REG(eth_port_num)); | ||
2313 | eth_config_reg |= value; | ||
2314 | mv_write(MV643XX_ETH_PORT_CONFIG_REG(eth_port_num), eth_config_reg); | ||
2315 | } | ||
2316 | |||
2317 | static int eth_port_autoneg_supported(unsigned int eth_port_num) | ||
2318 | { | ||
2319 | unsigned int phy_reg_data0; | ||
2320 | |||
2321 | eth_port_read_smi_reg(eth_port_num, 0, &phy_reg_data0); | ||
2322 | |||
2323 | return phy_reg_data0 & 0x1000; | ||
2324 | } | ||
2325 | |||
2326 | static int eth_port_link_is_up(unsigned int eth_port_num) | ||
2327 | { | ||
2328 | unsigned int phy_reg_data1; | ||
2329 | |||
2330 | eth_port_read_smi_reg(eth_port_num, 1, &phy_reg_data1); | ||
2331 | |||
2332 | if (eth_port_autoneg_supported(eth_port_num)) { | ||
2333 | if (phy_reg_data1 & 0x20) /* auto-neg complete */ | ||
2334 | return 1; | ||
2335 | } else if (phy_reg_data1 & 0x4) /* link up */ | ||
2336 | return 1; | ||
2337 | |||
2338 | return 0; | ||
2339 | } | ||
2340 | |||
2341 | /* | ||
2342 | * ethernet_get_config_reg - Get the port configuration register | ||
2343 | * | ||
2344 | * DESCRIPTION: | ||
2345 | * This function returns the configuration register value of the given | ||
2346 | * ethernet port. | ||
2347 | * | ||
2348 | * INPUT: | ||
2349 | * unsigned int eth_port_num Ethernet Port number. | ||
2350 | * | ||
2351 | * OUTPUT: | ||
2352 | * None. | ||
2353 | * | ||
2354 | * RETURN: | ||
2355 | * Port configuration register value. | ||
2356 | */ | ||
2357 | static unsigned int ethernet_get_config_reg(unsigned int eth_port_num) | ||
2358 | { | ||
2359 | unsigned int eth_config_reg; | ||
2360 | |||
2361 | eth_config_reg = mv_read(MV643XX_ETH_PORT_CONFIG_EXTEND_REG | ||
2362 | (eth_port_num)); | ||
2363 | return eth_config_reg; | ||
2364 | } | ||
2365 | |||
2366 | /* | ||
2367 | * eth_port_read_smi_reg - Read PHY registers | ||
2368 | * | ||
2369 | * DESCRIPTION: | ||
2370 | * This routine utilize the SMI interface to interact with the PHY in | ||
2371 | * order to perform PHY register read. | ||
2372 | * | ||
2373 | * INPUT: | ||
2374 | * unsigned int port_num Ethernet Port number. | ||
2375 | * unsigned int phy_reg PHY register address offset. | ||
2376 | * unsigned int *value Register value buffer. | ||
2377 | * | ||
2378 | * OUTPUT: | ||
2379 | * Write the value of a specified PHY register into given buffer. | ||
2380 | * | ||
2381 | * RETURN: | ||
2382 | * false if the PHY is busy or read data is not in valid state. | ||
2383 | * true otherwise. | ||
2384 | * | ||
2385 | */ | ||
2386 | static void eth_port_read_smi_reg(unsigned int port_num, | ||
2387 | unsigned int phy_reg, unsigned int *value) | ||
2388 | { | ||
2389 | int phy_addr = ethernet_phy_get(port_num); | ||
2390 | unsigned long flags; | ||
2391 | int i; | ||
2392 | |||
2393 | /* the SMI register is a shared resource */ | ||
2394 | spin_lock_irqsave(&mv643xx_eth_phy_lock, flags); | ||
2395 | |||
2396 | /* wait for the SMI register to become available */ | ||
2397 | for (i = 0; mv_read(MV643XX_ETH_SMI_REG) & ETH_SMI_BUSY; i++) { | ||
2398 | if (i == PHY_WAIT_ITERATIONS) { | ||
2399 | printk("mv643xx PHY busy timeout, port %d\n", port_num); | ||
2400 | goto out; | ||
2401 | } | ||
2402 | udelay(PHY_WAIT_MICRO_SECONDS); | ||
2403 | } | ||
2404 | |||
2405 | mv_write(MV643XX_ETH_SMI_REG, | ||
2406 | (phy_addr << 16) | (phy_reg << 21) | ETH_SMI_OPCODE_READ); | ||
2407 | |||
2408 | /* now wait for the data to be valid */ | ||
2409 | for (i = 0; !(mv_read(MV643XX_ETH_SMI_REG) & ETH_SMI_READ_VALID); i++) { | ||
2410 | if (i == PHY_WAIT_ITERATIONS) { | ||
2411 | printk("mv643xx PHY read timeout, port %d\n", port_num); | ||
2412 | goto out; | ||
2413 | } | ||
2414 | udelay(PHY_WAIT_MICRO_SECONDS); | ||
2415 | } | ||
2416 | |||
2417 | *value = mv_read(MV643XX_ETH_SMI_REG) & 0xffff; | ||
2418 | out: | ||
2419 | spin_unlock_irqrestore(&mv643xx_eth_phy_lock, flags); | ||
2420 | } | ||
2421 | |||
2422 | /* | ||
2423 | * eth_port_write_smi_reg - Write to PHY registers | ||
2424 | * | ||
2425 | * DESCRIPTION: | ||
2426 | * This routine utilize the SMI interface to interact with the PHY in | ||
2427 | * order to perform writes to PHY registers. | ||
2428 | * | ||
2429 | * INPUT: | ||
2430 | * unsigned int eth_port_num Ethernet Port number. | ||
2431 | * unsigned int phy_reg PHY register address offset. | ||
2432 | * unsigned int value Register value. | ||
2433 | * | ||
2434 | * OUTPUT: | ||
2435 | * Write the given value to the specified PHY register. | ||
2436 | * | ||
2437 | * RETURN: | ||
2438 | * false if the PHY is busy. | ||
2439 | * true otherwise. | ||
2440 | * | ||
2441 | */ | ||
2442 | static void eth_port_write_smi_reg(unsigned int eth_port_num, | ||
2443 | unsigned int phy_reg, unsigned int value) | ||
2444 | { | ||
2445 | int phy_addr; | ||
2446 | int i; | ||
2447 | unsigned long flags; | ||
2448 | |||
2449 | phy_addr = ethernet_phy_get(eth_port_num); | ||
2450 | |||
2451 | /* the SMI register is a shared resource */ | ||
2452 | spin_lock_irqsave(&mv643xx_eth_phy_lock, flags); | ||
2453 | |||
2454 | /* wait for the SMI register to become available */ | ||
2455 | for (i = 0; mv_read(MV643XX_ETH_SMI_REG) & ETH_SMI_BUSY; i++) { | ||
2456 | if (i == PHY_WAIT_ITERATIONS) { | ||
2457 | printk("mv643xx PHY busy timeout, port %d\n", | ||
2458 | eth_port_num); | ||
2459 | goto out; | ||
2460 | } | ||
2461 | udelay(PHY_WAIT_MICRO_SECONDS); | ||
2462 | } | ||
2463 | |||
2464 | mv_write(MV643XX_ETH_SMI_REG, (phy_addr << 16) | (phy_reg << 21) | | ||
2465 | ETH_SMI_OPCODE_WRITE | (value & 0xffff)); | ||
2466 | out: | ||
2467 | spin_unlock_irqrestore(&mv643xx_eth_phy_lock, flags); | ||
2468 | } | ||
2469 | |||
2470 | /* | ||
2471 | * eth_port_send - Send an Ethernet packet | ||
2472 | * | ||
2473 | * DESCRIPTION: | ||
2474 | * This routine send a given packet described by p_pktinfo parameter. It | ||
2475 | * supports transmitting of a packet spaned over multiple buffers. The | ||
2476 | * routine updates 'curr' and 'first' indexes according to the packet | ||
2477 | * segment passed to the routine. In case the packet segment is first, | ||
2478 | * the 'first' index is update. In any case, the 'curr' index is updated. | ||
2479 | * If the routine get into Tx resource error it assigns 'curr' index as | ||
2480 | * 'first'. This way the function can abort Tx process of multiple | ||
2481 | * descriptors per packet. | ||
2482 | * | ||
2483 | * INPUT: | ||
2484 | * struct mv643xx_private *mp Ethernet Port Control srtuct. | ||
2485 | * struct pkt_info *p_pkt_info User packet buffer. | ||
2486 | * | ||
2487 | * OUTPUT: | ||
2488 | * Tx ring 'curr' and 'first' indexes are updated. | ||
2489 | * | ||
2490 | * RETURN: | ||
2491 | * ETH_QUEUE_FULL in case of Tx resource error. | ||
2492 | * ETH_ERROR in case the routine can not access Tx desc ring. | ||
2493 | * ETH_QUEUE_LAST_RESOURCE if the routine uses the last Tx resource. | ||
2494 | * ETH_OK otherwise. | ||
2495 | * | ||
2496 | */ | ||
2497 | #ifdef MV643XX_CHECKSUM_OFFLOAD_TX | ||
2498 | /* | ||
2499 | * Modified to include the first descriptor pointer in case of SG | ||
2500 | */ | ||
2501 | static ETH_FUNC_RET_STATUS eth_port_send(struct mv643xx_private *mp, | ||
2502 | struct pkt_info *p_pkt_info) | ||
2503 | { | ||
2504 | int tx_desc_curr, tx_desc_used, tx_first_desc, tx_next_desc; | ||
2505 | struct eth_tx_desc *current_descriptor; | ||
2506 | struct eth_tx_desc *first_descriptor; | ||
2507 | u32 command; | ||
2508 | |||
2509 | /* Do not process Tx ring in case of Tx ring resource error */ | ||
2510 | if (mp->tx_resource_err) | ||
2511 | return ETH_QUEUE_FULL; | ||
2512 | |||
2513 | /* | ||
2514 | * The hardware requires that each buffer that is <= 8 bytes | ||
2515 | * in length must be aligned on an 8 byte boundary. | ||
2516 | */ | ||
2517 | if (p_pkt_info->byte_cnt <= 8 && p_pkt_info->buf_ptr & 0x7) { | ||
2518 | printk(KERN_ERR | ||
2519 | "mv643xx_eth port %d: packet size <= 8 problem\n", | ||
2520 | mp->port_num); | ||
2521 | return ETH_ERROR; | ||
2522 | } | ||
2523 | |||
2524 | /* Get the Tx Desc ring indexes */ | ||
2525 | tx_desc_curr = mp->tx_curr_desc_q; | ||
2526 | tx_desc_used = mp->tx_used_desc_q; | ||
2527 | |||
2528 | current_descriptor = &mp->p_tx_desc_area[tx_desc_curr]; | ||
2529 | |||
2530 | tx_next_desc = (tx_desc_curr + 1) % mp->tx_ring_size; | ||
2531 | |||
2532 | current_descriptor->buf_ptr = p_pkt_info->buf_ptr; | ||
2533 | current_descriptor->byte_cnt = p_pkt_info->byte_cnt; | ||
2534 | current_descriptor->l4i_chk = p_pkt_info->l4i_chk; | ||
2535 | mp->tx_skb[tx_desc_curr] = p_pkt_info->return_info; | ||
2536 | |||
2537 | command = p_pkt_info->cmd_sts | ETH_ZERO_PADDING | ETH_GEN_CRC | | ||
2538 | ETH_BUFFER_OWNED_BY_DMA; | ||
2539 | if (command & ETH_TX_FIRST_DESC) { | ||
2540 | tx_first_desc = tx_desc_curr; | ||
2541 | mp->tx_first_desc_q = tx_first_desc; | ||
2542 | first_descriptor = current_descriptor; | ||
2543 | mp->tx_first_command = command; | ||
2544 | } else { | ||
2545 | tx_first_desc = mp->tx_first_desc_q; | ||
2546 | first_descriptor = &mp->p_tx_desc_area[tx_first_desc]; | ||
2547 | BUG_ON(first_descriptor == NULL); | ||
2548 | current_descriptor->cmd_sts = command; | ||
2549 | } | ||
2550 | |||
2551 | if (command & ETH_TX_LAST_DESC) { | ||
2552 | wmb(); | ||
2553 | first_descriptor->cmd_sts = mp->tx_first_command; | ||
2554 | |||
2555 | wmb(); | ||
2556 | ETH_ENABLE_TX_QUEUE(mp->port_num); | ||
2557 | |||
2558 | /* | ||
2559 | * Finish Tx packet. Update first desc in case of Tx resource | ||
2560 | * error */ | ||
2561 | tx_first_desc = tx_next_desc; | ||
2562 | mp->tx_first_desc_q = tx_first_desc; | ||
2563 | } | ||
2564 | |||
2565 | /* Check for ring index overlap in the Tx desc ring */ | ||
2566 | if (tx_next_desc == tx_desc_used) { | ||
2567 | mp->tx_resource_err = 1; | ||
2568 | mp->tx_curr_desc_q = tx_first_desc; | ||
2569 | |||
2570 | return ETH_QUEUE_LAST_RESOURCE; | ||
2571 | } | ||
2572 | |||
2573 | mp->tx_curr_desc_q = tx_next_desc; | ||
2574 | |||
2575 | return ETH_OK; | ||
2576 | } | ||
2577 | #else | ||
2578 | static ETH_FUNC_RET_STATUS eth_port_send(struct mv643xx_private *mp, | ||
2579 | struct pkt_info *p_pkt_info) | ||
2580 | { | ||
2581 | int tx_desc_curr; | ||
2582 | int tx_desc_used; | ||
2583 | struct eth_tx_desc *current_descriptor; | ||
2584 | unsigned int command_status; | ||
2585 | |||
2586 | /* Do not process Tx ring in case of Tx ring resource error */ | ||
2587 | if (mp->tx_resource_err) | ||
2588 | return ETH_QUEUE_FULL; | ||
2589 | |||
2590 | /* Get the Tx Desc ring indexes */ | ||
2591 | tx_desc_curr = mp->tx_curr_desc_q; | ||
2592 | tx_desc_used = mp->tx_used_desc_q; | ||
2593 | current_descriptor = &mp->p_tx_desc_area[tx_desc_curr]; | ||
2594 | |||
2595 | command_status = p_pkt_info->cmd_sts | ETH_ZERO_PADDING | ETH_GEN_CRC; | ||
2596 | current_descriptor->buf_ptr = p_pkt_info->buf_ptr; | ||
2597 | current_descriptor->byte_cnt = p_pkt_info->byte_cnt; | ||
2598 | mp->tx_skb[tx_desc_curr] = p_pkt_info->return_info; | ||
2599 | |||
2600 | /* Set last desc with DMA ownership and interrupt enable. */ | ||
2601 | wmb(); | ||
2602 | current_descriptor->cmd_sts = command_status | | ||
2603 | ETH_BUFFER_OWNED_BY_DMA | ETH_TX_ENABLE_INTERRUPT; | ||
2604 | |||
2605 | wmb(); | ||
2606 | ETH_ENABLE_TX_QUEUE(mp->port_num); | ||
2607 | |||
2608 | /* Finish Tx packet. Update first desc in case of Tx resource error */ | ||
2609 | tx_desc_curr = (tx_desc_curr + 1) % mp->tx_ring_size; | ||
2610 | |||
2611 | /* Update the current descriptor */ | ||
2612 | mp->tx_curr_desc_q = tx_desc_curr; | ||
2613 | |||
2614 | /* Check for ring index overlap in the Tx desc ring */ | ||
2615 | if (tx_desc_curr == tx_desc_used) { | ||
2616 | mp->tx_resource_err = 1; | ||
2617 | return ETH_QUEUE_LAST_RESOURCE; | ||
2618 | } | ||
2619 | |||
2620 | return ETH_OK; | ||
2621 | } | ||
2622 | #endif | ||
2623 | |||
2624 | /* | ||
2625 | * eth_tx_return_desc - Free all used Tx descriptors | ||
2626 | * | ||
2627 | * DESCRIPTION: | ||
2628 | * This routine returns the transmitted packet information to the caller. | ||
2629 | * It uses the 'first' index to support Tx desc return in case a transmit | ||
2630 | * of a packet spanned over multiple buffer still in process. | ||
2631 | * In case the Tx queue was in "resource error" condition, where there are | ||
2632 | * no available Tx resources, the function resets the resource error flag. | ||
2633 | * | ||
2634 | * INPUT: | ||
2635 | * struct mv643xx_private *mp Ethernet Port Control srtuct. | ||
2636 | * struct pkt_info *p_pkt_info User packet buffer. | ||
2637 | * | ||
2638 | * OUTPUT: | ||
2639 | * Tx ring 'first' and 'used' indexes are updated. | ||
2640 | * | ||
2641 | * RETURN: | ||
2642 | * ETH_ERROR in case the routine can not access Tx desc ring. | ||
2643 | * ETH_RETRY in case there is transmission in process. | ||
2644 | * ETH_END_OF_JOB if the routine has nothing to release. | ||
2645 | * ETH_OK otherwise. | ||
2646 | * | ||
2647 | */ | ||
2648 | static ETH_FUNC_RET_STATUS eth_tx_return_desc(struct mv643xx_private *mp, | ||
2649 | struct pkt_info *p_pkt_info) | ||
2650 | { | ||
2651 | int tx_desc_used; | ||
2652 | #ifdef MV643XX_CHECKSUM_OFFLOAD_TX | ||
2653 | int tx_busy_desc = mp->tx_first_desc_q; | ||
2654 | #else | ||
2655 | int tx_busy_desc = mp->tx_curr_desc_q; | ||
2656 | #endif | ||
2657 | struct eth_tx_desc *p_tx_desc_used; | ||
2658 | unsigned int command_status; | ||
2659 | |||
2660 | /* Get the Tx Desc ring indexes */ | ||
2661 | tx_desc_used = mp->tx_used_desc_q; | ||
2662 | |||
2663 | p_tx_desc_used = &mp->p_tx_desc_area[tx_desc_used]; | ||
2664 | |||
2665 | /* Sanity check */ | ||
2666 | if (p_tx_desc_used == NULL) | ||
2667 | return ETH_ERROR; | ||
2668 | |||
2669 | /* Stop release. About to overlap the current available Tx descriptor */ | ||
2670 | if (tx_desc_used == tx_busy_desc && !mp->tx_resource_err) | ||
2671 | return ETH_END_OF_JOB; | ||
2672 | |||
2673 | command_status = p_tx_desc_used->cmd_sts; | ||
2674 | |||
2675 | /* Still transmitting... */ | ||
2676 | if (command_status & (ETH_BUFFER_OWNED_BY_DMA)) | ||
2677 | return ETH_RETRY; | ||
2678 | |||
2679 | /* Pass the packet information to the caller */ | ||
2680 | p_pkt_info->cmd_sts = command_status; | ||
2681 | p_pkt_info->return_info = mp->tx_skb[tx_desc_used]; | ||
2682 | mp->tx_skb[tx_desc_used] = NULL; | ||
2683 | |||
2684 | /* Update the next descriptor to release. */ | ||
2685 | mp->tx_used_desc_q = (tx_desc_used + 1) % mp->tx_ring_size; | ||
2686 | |||
2687 | /* Any Tx return cancels the Tx resource error status */ | ||
2688 | mp->tx_resource_err = 0; | ||
2689 | |||
2690 | return ETH_OK; | ||
2691 | } | ||
2692 | |||
2693 | /* | ||
2694 | * eth_port_receive - Get received information from Rx ring. | ||
2695 | * | ||
2696 | * DESCRIPTION: | ||
2697 | * This routine returns the received data to the caller. There is no | ||
2698 | * data copying during routine operation. All information is returned | ||
2699 | * using pointer to packet information struct passed from the caller. | ||
2700 | * If the routine exhausts Rx ring resources then the resource error flag | ||
2701 | * is set. | ||
2702 | * | ||
2703 | * INPUT: | ||
2704 | * struct mv643xx_private *mp Ethernet Port Control srtuct. | ||
2705 | * struct pkt_info *p_pkt_info User packet buffer. | ||
2706 | * | ||
2707 | * OUTPUT: | ||
2708 | * Rx ring current and used indexes are updated. | ||
2709 | * | ||
2710 | * RETURN: | ||
2711 | * ETH_ERROR in case the routine can not access Rx desc ring. | ||
2712 | * ETH_QUEUE_FULL if Rx ring resources are exhausted. | ||
2713 | * ETH_END_OF_JOB if there is no received data. | ||
2714 | * ETH_OK otherwise. | ||
2715 | */ | ||
2716 | static ETH_FUNC_RET_STATUS eth_port_receive(struct mv643xx_private *mp, | ||
2717 | struct pkt_info *p_pkt_info) | ||
2718 | { | ||
2719 | int rx_next_curr_desc, rx_curr_desc, rx_used_desc; | ||
2720 | volatile struct eth_rx_desc *p_rx_desc; | ||
2721 | unsigned int command_status; | ||
2722 | |||
2723 | /* Do not process Rx ring in case of Rx ring resource error */ | ||
2724 | if (mp->rx_resource_err) | ||
2725 | return ETH_QUEUE_FULL; | ||
2726 | |||
2727 | /* Get the Rx Desc ring 'curr and 'used' indexes */ | ||
2728 | rx_curr_desc = mp->rx_curr_desc_q; | ||
2729 | rx_used_desc = mp->rx_used_desc_q; | ||
2730 | |||
2731 | p_rx_desc = &mp->p_rx_desc_area[rx_curr_desc]; | ||
2732 | |||
2733 | /* The following parameters are used to save readings from memory */ | ||
2734 | command_status = p_rx_desc->cmd_sts; | ||
2735 | rmb(); | ||
2736 | |||
2737 | /* Nothing to receive... */ | ||
2738 | if (command_status & (ETH_BUFFER_OWNED_BY_DMA)) | ||
2739 | return ETH_END_OF_JOB; | ||
2740 | |||
2741 | p_pkt_info->byte_cnt = (p_rx_desc->byte_cnt) - RX_BUF_OFFSET; | ||
2742 | p_pkt_info->cmd_sts = command_status; | ||
2743 | p_pkt_info->buf_ptr = (p_rx_desc->buf_ptr) + RX_BUF_OFFSET; | ||
2744 | p_pkt_info->return_info = mp->rx_skb[rx_curr_desc]; | ||
2745 | p_pkt_info->l4i_chk = p_rx_desc->buf_size; | ||
2746 | |||
2747 | /* Clean the return info field to indicate that the packet has been */ | ||
2748 | /* moved to the upper layers */ | ||
2749 | mp->rx_skb[rx_curr_desc] = NULL; | ||
2750 | |||
2751 | /* Update current index in data structure */ | ||
2752 | rx_next_curr_desc = (rx_curr_desc + 1) % mp->rx_ring_size; | ||
2753 | mp->rx_curr_desc_q = rx_next_curr_desc; | ||
2754 | |||
2755 | /* Rx descriptors exhausted. Set the Rx ring resource error flag */ | ||
2756 | if (rx_next_curr_desc == rx_used_desc) | ||
2757 | mp->rx_resource_err = 1; | ||
2758 | |||
2759 | return ETH_OK; | ||
2760 | } | ||
2761 | |||
2762 | /* | ||
2763 | * eth_rx_return_buff - Returns a Rx buffer back to the Rx ring. | ||
2764 | * | ||
2765 | * DESCRIPTION: | ||
2766 | * This routine returns a Rx buffer back to the Rx ring. It retrieves the | ||
2767 | * next 'used' descriptor and attached the returned buffer to it. | ||
2768 | * In case the Rx ring was in "resource error" condition, where there are | ||
2769 | * no available Rx resources, the function resets the resource error flag. | ||
2770 | * | ||
2771 | * INPUT: | ||
2772 | * struct mv643xx_private *mp Ethernet Port Control srtuct. | ||
2773 | * struct pkt_info *p_pkt_info Information on returned buffer. | ||
2774 | * | ||
2775 | * OUTPUT: | ||
2776 | * New available Rx resource in Rx descriptor ring. | ||
2777 | * | ||
2778 | * RETURN: | ||
2779 | * ETH_ERROR in case the routine can not access Rx desc ring. | ||
2780 | * ETH_OK otherwise. | ||
2781 | */ | ||
2782 | static ETH_FUNC_RET_STATUS eth_rx_return_buff(struct mv643xx_private *mp, | ||
2783 | struct pkt_info *p_pkt_info) | ||
2784 | { | ||
2785 | int used_rx_desc; /* Where to return Rx resource */ | ||
2786 | volatile struct eth_rx_desc *p_used_rx_desc; | ||
2787 | |||
2788 | /* Get 'used' Rx descriptor */ | ||
2789 | used_rx_desc = mp->rx_used_desc_q; | ||
2790 | p_used_rx_desc = &mp->p_rx_desc_area[used_rx_desc]; | ||
2791 | |||
2792 | p_used_rx_desc->buf_ptr = p_pkt_info->buf_ptr; | ||
2793 | p_used_rx_desc->buf_size = p_pkt_info->byte_cnt; | ||
2794 | mp->rx_skb[used_rx_desc] = p_pkt_info->return_info; | ||
2795 | |||
2796 | /* Flush the write pipe */ | ||
2797 | |||
2798 | /* Return the descriptor to DMA ownership */ | ||
2799 | wmb(); | ||
2800 | p_used_rx_desc->cmd_sts = | ||
2801 | ETH_BUFFER_OWNED_BY_DMA | ETH_RX_ENABLE_INTERRUPT; | ||
2802 | wmb(); | ||
2803 | |||
2804 | /* Move the used descriptor pointer to the next descriptor */ | ||
2805 | mp->rx_used_desc_q = (used_rx_desc + 1) % mp->rx_ring_size; | ||
2806 | |||
2807 | /* Any Rx return cancels the Rx resource error status */ | ||
2808 | mp->rx_resource_err = 0; | ||
2809 | |||
2810 | return ETH_OK; | ||
2811 | } | ||
2812 | |||
2813 | /************* Begin ethtool support *************************/ | ||
2814 | |||
2815 | struct mv643xx_stats { | ||
2816 | char stat_string[ETH_GSTRING_LEN]; | ||
2817 | int sizeof_stat; | ||
2818 | int stat_offset; | ||
2819 | }; | ||
2820 | |||
2821 | #define MV643XX_STAT(m) sizeof(((struct mv643xx_private *)0)->m), \ | ||
2822 | offsetof(struct mv643xx_private, m) | ||
2823 | |||
2824 | static const struct mv643xx_stats mv643xx_gstrings_stats[] = { | ||
2825 | { "rx_packets", MV643XX_STAT(stats.rx_packets) }, | ||
2826 | { "tx_packets", MV643XX_STAT(stats.tx_packets) }, | ||
2827 | { "rx_bytes", MV643XX_STAT(stats.rx_bytes) }, | ||
2828 | { "tx_bytes", MV643XX_STAT(stats.tx_bytes) }, | ||
2829 | { "rx_errors", MV643XX_STAT(stats.rx_errors) }, | ||
2830 | { "tx_errors", MV643XX_STAT(stats.tx_errors) }, | ||
2831 | { "rx_dropped", MV643XX_STAT(stats.rx_dropped) }, | ||
2832 | { "tx_dropped", MV643XX_STAT(stats.tx_dropped) }, | ||
2833 | { "good_octets_received", MV643XX_STAT(mib_counters.good_octets_received) }, | ||
2834 | { "bad_octets_received", MV643XX_STAT(mib_counters.bad_octets_received) }, | ||
2835 | { "internal_mac_transmit_err", MV643XX_STAT(mib_counters.internal_mac_transmit_err) }, | ||
2836 | { "good_frames_received", MV643XX_STAT(mib_counters.good_frames_received) }, | ||
2837 | { "bad_frames_received", MV643XX_STAT(mib_counters.bad_frames_received) }, | ||
2838 | { "broadcast_frames_received", MV643XX_STAT(mib_counters.broadcast_frames_received) }, | ||
2839 | { "multicast_frames_received", MV643XX_STAT(mib_counters.multicast_frames_received) }, | ||
2840 | { "frames_64_octets", MV643XX_STAT(mib_counters.frames_64_octets) }, | ||
2841 | { "frames_65_to_127_octets", MV643XX_STAT(mib_counters.frames_65_to_127_octets) }, | ||
2842 | { "frames_128_to_255_octets", MV643XX_STAT(mib_counters.frames_128_to_255_octets) }, | ||
2843 | { "frames_256_to_511_octets", MV643XX_STAT(mib_counters.frames_256_to_511_octets) }, | ||
2844 | { "frames_512_to_1023_octets", MV643XX_STAT(mib_counters.frames_512_to_1023_octets) }, | ||
2845 | { "frames_1024_to_max_octets", MV643XX_STAT(mib_counters.frames_1024_to_max_octets) }, | ||
2846 | { "good_octets_sent", MV643XX_STAT(mib_counters.good_octets_sent) }, | ||
2847 | { "good_frames_sent", MV643XX_STAT(mib_counters.good_frames_sent) }, | ||
2848 | { "excessive_collision", MV643XX_STAT(mib_counters.excessive_collision) }, | ||
2849 | { "multicast_frames_sent", MV643XX_STAT(mib_counters.multicast_frames_sent) }, | ||
2850 | { "broadcast_frames_sent", MV643XX_STAT(mib_counters.broadcast_frames_sent) }, | ||
2851 | { "unrec_mac_control_received", MV643XX_STAT(mib_counters.unrec_mac_control_received) }, | ||
2852 | { "fc_sent", MV643XX_STAT(mib_counters.fc_sent) }, | ||
2853 | { "good_fc_received", MV643XX_STAT(mib_counters.good_fc_received) }, | ||
2854 | { "bad_fc_received", MV643XX_STAT(mib_counters.bad_fc_received) }, | ||
2855 | { "undersize_received", MV643XX_STAT(mib_counters.undersize_received) }, | ||
2856 | { "fragments_received", MV643XX_STAT(mib_counters.fragments_received) }, | ||
2857 | { "oversize_received", MV643XX_STAT(mib_counters.oversize_received) }, | ||
2858 | { "jabber_received", MV643XX_STAT(mib_counters.jabber_received) }, | ||
2859 | { "mac_receive_error", MV643XX_STAT(mib_counters.mac_receive_error) }, | ||
2860 | { "bad_crc_event", MV643XX_STAT(mib_counters.bad_crc_event) }, | ||
2861 | { "collision", MV643XX_STAT(mib_counters.collision) }, | ||
2862 | { "late_collision", MV643XX_STAT(mib_counters.late_collision) }, | ||
2863 | }; | ||
2864 | |||
2865 | #define MV643XX_STATS_LEN \ | ||
2866 | sizeof(mv643xx_gstrings_stats) / sizeof(struct mv643xx_stats) | ||
2867 | |||
2868 | static int | ||
2869 | mv643xx_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) | ||
2870 | { | ||
2871 | struct mv643xx_private *mp = netdev->priv; | ||
2872 | int port_num = mp->port_num; | ||
2873 | int autoneg = eth_port_autoneg_supported(port_num); | ||
2874 | int mode_10_bit; | ||
2875 | int auto_duplex; | ||
2876 | int half_duplex = 0; | ||
2877 | int full_duplex = 0; | ||
2878 | int auto_speed; | ||
2879 | int speed_10 = 0; | ||
2880 | int speed_100 = 0; | ||
2881 | int speed_1000 = 0; | ||
2882 | |||
2883 | u32 pcs = mv_read(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num)); | ||
2884 | u32 psr = mv_read(MV643XX_ETH_PORT_STATUS_REG(port_num)); | ||
2885 | |||
2886 | mode_10_bit = psr & MV643XX_ETH_PORT_STATUS_MODE_10_BIT; | ||
2887 | |||
2888 | if (mode_10_bit) { | ||
2889 | ecmd->supported = SUPPORTED_10baseT_Half; | ||
2890 | } else { | ||
2891 | ecmd->supported = (SUPPORTED_10baseT_Half | | ||
2892 | SUPPORTED_10baseT_Full | | ||
2893 | SUPPORTED_100baseT_Half | | ||
2894 | SUPPORTED_100baseT_Full | | ||
2895 | SUPPORTED_1000baseT_Full | | ||
2896 | (autoneg ? SUPPORTED_Autoneg : 0) | | ||
2897 | SUPPORTED_TP); | ||
2898 | |||
2899 | auto_duplex = !(pcs & MV643XX_ETH_DISABLE_AUTO_NEG_FOR_DUPLX); | ||
2900 | auto_speed = !(pcs & MV643XX_ETH_DISABLE_AUTO_NEG_SPEED_GMII); | ||
2901 | |||
2902 | ecmd->advertising = ADVERTISED_TP; | ||
2903 | |||
2904 | if (autoneg) { | ||
2905 | ecmd->advertising |= ADVERTISED_Autoneg; | ||
2906 | |||
2907 | if (auto_duplex) { | ||
2908 | half_duplex = 1; | ||
2909 | full_duplex = 1; | ||
2910 | } else { | ||
2911 | if (pcs & MV643XX_ETH_SET_FULL_DUPLEX_MODE) | ||
2912 | full_duplex = 1; | ||
2913 | else | ||
2914 | half_duplex = 1; | ||
2915 | } | ||
2916 | |||
2917 | if (auto_speed) { | ||
2918 | speed_10 = 1; | ||
2919 | speed_100 = 1; | ||
2920 | speed_1000 = 1; | ||
2921 | } else { | ||
2922 | if (pcs & MV643XX_ETH_SET_GMII_SPEED_TO_1000) | ||
2923 | speed_1000 = 1; | ||
2924 | else if (pcs & MV643XX_ETH_SET_MII_SPEED_TO_100) | ||
2925 | speed_100 = 1; | ||
2926 | else | ||
2927 | speed_10 = 1; | ||
2928 | } | ||
2929 | |||
2930 | if (speed_10 & half_duplex) | ||
2931 | ecmd->advertising |= ADVERTISED_10baseT_Half; | ||
2932 | if (speed_10 & full_duplex) | ||
2933 | ecmd->advertising |= ADVERTISED_10baseT_Full; | ||
2934 | if (speed_100 & half_duplex) | ||
2935 | ecmd->advertising |= ADVERTISED_100baseT_Half; | ||
2936 | if (speed_100 & full_duplex) | ||
2937 | ecmd->advertising |= ADVERTISED_100baseT_Full; | ||
2938 | if (speed_1000) | ||
2939 | ecmd->advertising |= ADVERTISED_1000baseT_Full; | ||
2940 | } | ||
2941 | } | ||
2942 | |||
2943 | ecmd->port = PORT_TP; | ||
2944 | ecmd->phy_address = ethernet_phy_get(port_num); | ||
2945 | |||
2946 | ecmd->transceiver = XCVR_EXTERNAL; | ||
2947 | |||
2948 | if (netif_carrier_ok(netdev)) { | ||
2949 | if (mode_10_bit) | ||
2950 | ecmd->speed = SPEED_10; | ||
2951 | else { | ||
2952 | if (psr & MV643XX_ETH_PORT_STATUS_GMII_1000) | ||
2953 | ecmd->speed = SPEED_1000; | ||
2954 | else if (psr & MV643XX_ETH_PORT_STATUS_MII_100) | ||
2955 | ecmd->speed = SPEED_100; | ||
2956 | else | ||
2957 | ecmd->speed = SPEED_10; | ||
2958 | } | ||
2959 | |||
2960 | if (psr & MV643XX_ETH_PORT_STATUS_FULL_DUPLEX) | ||
2961 | ecmd->duplex = DUPLEX_FULL; | ||
2962 | else | ||
2963 | ecmd->duplex = DUPLEX_HALF; | ||
2964 | } else { | ||
2965 | ecmd->speed = -1; | ||
2966 | ecmd->duplex = -1; | ||
2967 | } | ||
2968 | |||
2969 | ecmd->autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE; | ||
2970 | return 0; | ||
2971 | } | ||
2972 | |||
2973 | static void | ||
2974 | mv643xx_get_drvinfo(struct net_device *netdev, | ||
2975 | struct ethtool_drvinfo *drvinfo) | ||
2976 | { | ||
2977 | strncpy(drvinfo->driver, mv643xx_driver_name, 32); | ||
2978 | strncpy(drvinfo->version, mv643xx_driver_version, 32); | ||
2979 | strncpy(drvinfo->fw_version, "N/A", 32); | ||
2980 | strncpy(drvinfo->bus_info, "mv643xx", 32); | ||
2981 | drvinfo->n_stats = MV643XX_STATS_LEN; | ||
2982 | } | ||
2983 | |||
2984 | static int | ||
2985 | mv643xx_get_stats_count(struct net_device *netdev) | ||
2986 | { | ||
2987 | return MV643XX_STATS_LEN; | ||
2988 | } | ||
2989 | |||
2990 | static void | ||
2991 | mv643xx_get_ethtool_stats(struct net_device *netdev, | ||
2992 | struct ethtool_stats *stats, uint64_t *data) | ||
2993 | { | ||
2994 | struct mv643xx_private *mp = netdev->priv; | ||
2995 | int i; | ||
2996 | |||
2997 | eth_update_mib_counters(mp); | ||
2998 | |||
2999 | for(i = 0; i < MV643XX_STATS_LEN; i++) { | ||
3000 | char *p = (char *)mp+mv643xx_gstrings_stats[i].stat_offset; | ||
3001 | data[i] = (mv643xx_gstrings_stats[i].sizeof_stat == | ||
3002 | sizeof(uint64_t)) ? *(uint64_t *)p : *(uint32_t *)p; | ||
3003 | } | ||
3004 | } | ||
3005 | |||
3006 | static void | ||
3007 | mv643xx_get_strings(struct net_device *netdev, uint32_t stringset, uint8_t *data) | ||
3008 | { | ||
3009 | int i; | ||
3010 | |||
3011 | switch(stringset) { | ||
3012 | case ETH_SS_STATS: | ||
3013 | for (i=0; i < MV643XX_STATS_LEN; i++) { | ||
3014 | memcpy(data + i * ETH_GSTRING_LEN, | ||
3015 | mv643xx_gstrings_stats[i].stat_string, | ||
3016 | ETH_GSTRING_LEN); | ||
3017 | } | ||
3018 | break; | ||
3019 | } | ||
3020 | } | ||
3021 | |||
3022 | static struct ethtool_ops mv643xx_ethtool_ops = { | ||
3023 | .get_settings = mv643xx_get_settings, | ||
3024 | .get_drvinfo = mv643xx_get_drvinfo, | ||
3025 | .get_link = ethtool_op_get_link, | ||
3026 | .get_sg = ethtool_op_get_sg, | ||
3027 | .set_sg = ethtool_op_set_sg, | ||
3028 | .get_strings = mv643xx_get_strings, | ||
3029 | .get_stats_count = mv643xx_get_stats_count, | ||
3030 | .get_ethtool_stats = mv643xx_get_ethtool_stats, | ||
3031 | }; | ||
3032 | |||
3033 | /************* End ethtool support *************************/ | ||