diff options
Diffstat (limited to 'drivers/net/mv643xx_eth.c')
-rw-r--r-- | drivers/net/mv643xx_eth.c | 1558 |
1 files changed, 573 insertions, 985 deletions
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c index c0998ef938e0..9f2661355a4a 100644 --- a/drivers/net/mv643xx_eth.c +++ b/drivers/net/mv643xx_eth.c | |||
@@ -10,7 +10,7 @@ | |||
10 | * | 10 | * |
11 | * Copyright (C) 2003 Ralf Baechle <ralf@linux-mips.org> | 11 | * Copyright (C) 2003 Ralf Baechle <ralf@linux-mips.org> |
12 | * | 12 | * |
13 | * Copyright (C) 2004-2005 MontaVista Software, Inc. | 13 | * Copyright (C) 2004-2006 MontaVista Software, Inc. |
14 | * Dale Farnsworth <dale@farnsworth.org> | 14 | * Dale Farnsworth <dale@farnsworth.org> |
15 | * | 15 | * |
16 | * Copyright (C) 2004 Steven J. Hill <sjhill1@rockwellcollins.com> | 16 | * Copyright (C) 2004 Steven J. Hill <sjhill1@rockwellcollins.com> |
@@ -37,8 +37,6 @@ | |||
37 | #include <linux/tcp.h> | 37 | #include <linux/tcp.h> |
38 | #include <linux/udp.h> | 38 | #include <linux/udp.h> |
39 | #include <linux/etherdevice.h> | 39 | #include <linux/etherdevice.h> |
40 | #include <linux/in.h> | ||
41 | #include <linux/ip.h> | ||
42 | 40 | ||
43 | #include <linux/bitops.h> | 41 | #include <linux/bitops.h> |
44 | #include <linux/delay.h> | 42 | #include <linux/delay.h> |
@@ -52,39 +50,16 @@ | |||
52 | #include <asm/delay.h> | 50 | #include <asm/delay.h> |
53 | #include "mv643xx_eth.h" | 51 | #include "mv643xx_eth.h" |
54 | 52 | ||
55 | /* | ||
56 | * The first part is the high level driver of the gigE ethernet ports. | ||
57 | */ | ||
58 | |||
59 | /* Constants */ | ||
60 | #define VLAN_HLEN 4 | ||
61 | #define FCS_LEN 4 | ||
62 | #define DMA_ALIGN 8 /* hw requires 8-byte alignment */ | ||
63 | #define HW_IP_ALIGN 2 /* hw aligns IP header */ | ||
64 | #define WRAP HW_IP_ALIGN + ETH_HLEN + VLAN_HLEN + FCS_LEN | ||
65 | #define RX_SKB_SIZE ((dev->mtu + WRAP + 7) & ~0x7) | ||
66 | |||
67 | #define INT_UNMASK_ALL 0x0007ffff | ||
68 | #define INT_UNMASK_ALL_EXT 0x0011ffff | ||
69 | #define INT_MASK_ALL 0x00000000 | ||
70 | #define INT_MASK_ALL_EXT 0x00000000 | ||
71 | #define INT_CAUSE_CHECK_BITS INT_CAUSE_UNMASK_ALL | ||
72 | #define INT_CAUSE_CHECK_BITS_EXT INT_CAUSE_UNMASK_ALL_EXT | ||
73 | |||
74 | #ifdef MV643XX_CHECKSUM_OFFLOAD_TX | ||
75 | #define MAX_DESCS_PER_SKB (MAX_SKB_FRAGS + 1) | ||
76 | #else | ||
77 | #define MAX_DESCS_PER_SKB 1 | ||
78 | #endif | ||
79 | |||
80 | #define PHY_WAIT_ITERATIONS 1000 /* 1000 iterations * 10uS = 10mS max */ | ||
81 | #define PHY_WAIT_MICRO_SECONDS 10 | ||
82 | |||
83 | /* Static function declarations */ | 53 | /* Static function declarations */ |
84 | static int eth_port_link_is_up(unsigned int eth_port_num); | ||
85 | static void eth_port_uc_addr_get(struct net_device *dev, | 54 | static void eth_port_uc_addr_get(struct net_device *dev, |
86 | unsigned char *MacAddr); | 55 | unsigned char *MacAddr); |
87 | static void eth_port_set_multicast_list(struct net_device *); | 56 | static void eth_port_set_multicast_list(struct net_device *); |
57 | static void mv643xx_eth_port_enable_tx(unsigned int port_num, | ||
58 | unsigned int queues); | ||
59 | static void mv643xx_eth_port_enable_rx(unsigned int port_num, | ||
60 | unsigned int queues); | ||
61 | static unsigned int mv643xx_eth_port_disable_tx(unsigned int port_num); | ||
62 | static unsigned int mv643xx_eth_port_disable_rx(unsigned int port_num); | ||
88 | static int mv643xx_eth_open(struct net_device *); | 63 | static int mv643xx_eth_open(struct net_device *); |
89 | static int mv643xx_eth_stop(struct net_device *); | 64 | static int mv643xx_eth_stop(struct net_device *); |
90 | static int mv643xx_eth_change_mtu(struct net_device *, int); | 65 | static int mv643xx_eth_change_mtu(struct net_device *, int); |
@@ -93,8 +68,12 @@ static void eth_port_init_mac_tables(unsigned int eth_port_num); | |||
93 | #ifdef MV643XX_NAPI | 68 | #ifdef MV643XX_NAPI |
94 | static int mv643xx_poll(struct net_device *dev, int *budget); | 69 | static int mv643xx_poll(struct net_device *dev, int *budget); |
95 | #endif | 70 | #endif |
71 | static int ethernet_phy_get(unsigned int eth_port_num); | ||
96 | static void ethernet_phy_set(unsigned int eth_port_num, int phy_addr); | 72 | static void ethernet_phy_set(unsigned int eth_port_num, int phy_addr); |
97 | static int ethernet_phy_detect(unsigned int eth_port_num); | 73 | static int ethernet_phy_detect(unsigned int eth_port_num); |
74 | static int mv643xx_mdio_read(struct net_device *dev, int phy_id, int location); | ||
75 | static void mv643xx_mdio_write(struct net_device *dev, int phy_id, int location, int val); | ||
76 | static int mv643xx_eth_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); | ||
98 | static struct ethtool_ops mv643xx_ethtool_ops; | 77 | static struct ethtool_ops mv643xx_ethtool_ops; |
99 | 78 | ||
100 | static char mv643xx_driver_name[] = "mv643xx_eth"; | 79 | static char mv643xx_driver_name[] = "mv643xx_eth"; |
@@ -153,67 +132,53 @@ static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu) | |||
153 | } | 132 | } |
154 | 133 | ||
155 | /* | 134 | /* |
156 | * mv643xx_eth_rx_task | 135 | * mv643xx_eth_rx_refill_descs |
157 | * | 136 | * |
158 | * Fills / refills RX queue on a certain gigabit ethernet port | 137 | * Fills / refills RX queue on a certain gigabit ethernet port |
159 | * | 138 | * |
160 | * Input : pointer to ethernet interface network device structure | 139 | * Input : pointer to ethernet interface network device structure |
161 | * Output : N/A | 140 | * Output : N/A |
162 | */ | 141 | */ |
163 | static void mv643xx_eth_rx_task(void *data) | 142 | static void mv643xx_eth_rx_refill_descs(struct net_device *dev) |
164 | { | 143 | { |
165 | struct net_device *dev = (struct net_device *)data; | ||
166 | struct mv643xx_private *mp = netdev_priv(dev); | 144 | struct mv643xx_private *mp = netdev_priv(dev); |
167 | struct pkt_info pkt_info; | 145 | struct pkt_info pkt_info; |
168 | struct sk_buff *skb; | 146 | struct sk_buff *skb; |
169 | int unaligned; | 147 | int unaligned; |
170 | 148 | ||
171 | if (test_and_set_bit(0, &mp->rx_task_busy)) | 149 | while (mp->rx_desc_count < mp->rx_ring_size) { |
172 | panic("%s: Error in test_set_bit / clear_bit", dev->name); | 150 | skb = dev_alloc_skb(ETH_RX_SKB_SIZE + ETH_DMA_ALIGN); |
173 | |||
174 | while (mp->rx_ring_skbs < (mp->rx_ring_size - 5)) { | ||
175 | skb = dev_alloc_skb(RX_SKB_SIZE + DMA_ALIGN); | ||
176 | if (!skb) | 151 | if (!skb) |
177 | break; | 152 | break; |
178 | mp->rx_ring_skbs++; | 153 | mp->rx_desc_count++; |
179 | unaligned = (u32)skb->data & (DMA_ALIGN - 1); | 154 | unaligned = (u32)skb->data & (ETH_DMA_ALIGN - 1); |
180 | if (unaligned) | 155 | if (unaligned) |
181 | skb_reserve(skb, DMA_ALIGN - unaligned); | 156 | skb_reserve(skb, ETH_DMA_ALIGN - unaligned); |
182 | pkt_info.cmd_sts = ETH_RX_ENABLE_INTERRUPT; | 157 | pkt_info.cmd_sts = ETH_RX_ENABLE_INTERRUPT; |
183 | pkt_info.byte_cnt = RX_SKB_SIZE; | 158 | pkt_info.byte_cnt = ETH_RX_SKB_SIZE; |
184 | pkt_info.buf_ptr = dma_map_single(NULL, skb->data, RX_SKB_SIZE, | 159 | pkt_info.buf_ptr = dma_map_single(NULL, skb->data, |
185 | DMA_FROM_DEVICE); | 160 | ETH_RX_SKB_SIZE, DMA_FROM_DEVICE); |
186 | pkt_info.return_info = skb; | 161 | pkt_info.return_info = skb; |
187 | if (eth_rx_return_buff(mp, &pkt_info) != ETH_OK) { | 162 | if (eth_rx_return_buff(mp, &pkt_info) != ETH_OK) { |
188 | printk(KERN_ERR | 163 | printk(KERN_ERR |
189 | "%s: Error allocating RX Ring\n", dev->name); | 164 | "%s: Error allocating RX Ring\n", dev->name); |
190 | break; | 165 | break; |
191 | } | 166 | } |
192 | skb_reserve(skb, HW_IP_ALIGN); | 167 | skb_reserve(skb, ETH_HW_IP_ALIGN); |
193 | } | 168 | } |
194 | clear_bit(0, &mp->rx_task_busy); | ||
195 | /* | 169 | /* |
196 | * If RX ring is empty of SKB, set a timer to try allocating | 170 | * If RX ring is empty of SKB, set a timer to try allocating |
197 | * again in a later time . | 171 | * again at a later time. |
198 | */ | 172 | */ |
199 | if ((mp->rx_ring_skbs == 0) && (mp->rx_timer_flag == 0)) { | 173 | if (mp->rx_desc_count == 0) { |
200 | printk(KERN_INFO "%s: Rx ring is empty\n", dev->name); | 174 | printk(KERN_INFO "%s: Rx ring is empty\n", dev->name); |
201 | /* After 100mSec */ | 175 | mp->timeout.expires = jiffies + (HZ / 10); /* 100 mSec */ |
202 | mp->timeout.expires = jiffies + (HZ / 10); | ||
203 | add_timer(&mp->timeout); | 176 | add_timer(&mp->timeout); |
204 | mp->rx_timer_flag = 1; | ||
205 | } | ||
206 | #ifdef MV643XX_RX_QUEUE_FILL_ON_TASK | ||
207 | else { | ||
208 | /* Return interrupts */ | ||
209 | mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(mp->port_num), | ||
210 | INT_UNMASK_ALL); | ||
211 | } | 177 | } |
212 | #endif | ||
213 | } | 178 | } |
214 | 179 | ||
215 | /* | 180 | /* |
216 | * mv643xx_eth_rx_task_timer_wrapper | 181 | * mv643xx_eth_rx_refill_descs_timer_wrapper |
217 | * | 182 | * |
218 | * Timer routine to wake up RX queue filling task. This function is | 183 | * Timer routine to wake up RX queue filling task. This function is |
219 | * used only in case the RX queue is empty, and all alloc_skb has | 184 | * used only in case the RX queue is empty, and all alloc_skb has |
@@ -222,13 +187,9 @@ static void mv643xx_eth_rx_task(void *data) | |||
222 | * Input : pointer to ethernet interface network device structure | 187 | * Input : pointer to ethernet interface network device structure |
223 | * Output : N/A | 188 | * Output : N/A |
224 | */ | 189 | */ |
225 | static void mv643xx_eth_rx_task_timer_wrapper(unsigned long data) | 190 | static inline void mv643xx_eth_rx_refill_descs_timer_wrapper(unsigned long data) |
226 | { | 191 | { |
227 | struct net_device *dev = (struct net_device *)data; | 192 | mv643xx_eth_rx_refill_descs((struct net_device *)data); |
228 | struct mv643xx_private *mp = netdev_priv(dev); | ||
229 | |||
230 | mp->rx_timer_flag = 0; | ||
231 | mv643xx_eth_rx_task((void *)data); | ||
232 | } | 193 | } |
233 | 194 | ||
234 | /* | 195 | /* |
@@ -245,8 +206,7 @@ static void mv643xx_eth_update_mac_address(struct net_device *dev) | |||
245 | unsigned int port_num = mp->port_num; | 206 | unsigned int port_num = mp->port_num; |
246 | 207 | ||
247 | eth_port_init_mac_tables(port_num); | 208 | eth_port_init_mac_tables(port_num); |
248 | memcpy(mp->port_mac_addr, dev->dev_addr, 6); | 209 | eth_port_uc_addr_set(port_num, dev->dev_addr); |
249 | eth_port_uc_addr_set(port_num, mp->port_mac_addr); | ||
250 | } | 210 | } |
251 | 211 | ||
252 | /* | 212 | /* |
@@ -260,13 +220,14 @@ static void mv643xx_eth_update_mac_address(struct net_device *dev) | |||
260 | static void mv643xx_eth_set_rx_mode(struct net_device *dev) | 220 | static void mv643xx_eth_set_rx_mode(struct net_device *dev) |
261 | { | 221 | { |
262 | struct mv643xx_private *mp = netdev_priv(dev); | 222 | struct mv643xx_private *mp = netdev_priv(dev); |
223 | u32 config_reg; | ||
263 | 224 | ||
225 | config_reg = mv_read(MV643XX_ETH_PORT_CONFIG_REG(mp->port_num)); | ||
264 | if (dev->flags & IFF_PROMISC) | 226 | if (dev->flags & IFF_PROMISC) |
265 | mp->port_config |= (u32) MV643XX_ETH_UNICAST_PROMISCUOUS_MODE; | 227 | config_reg |= (u32) MV643XX_ETH_UNICAST_PROMISCUOUS_MODE; |
266 | else | 228 | else |
267 | mp->port_config &= ~(u32) MV643XX_ETH_UNICAST_PROMISCUOUS_MODE; | 229 | config_reg &= ~(u32) MV643XX_ETH_UNICAST_PROMISCUOUS_MODE; |
268 | 230 | mv_write(MV643XX_ETH_PORT_CONFIG_REG(mp->port_num), config_reg); | |
269 | mv_write(MV643XX_ETH_PORT_CONFIG_REG(mp->port_num), mp->port_config); | ||
270 | 231 | ||
271 | eth_port_set_multicast_list(dev); | 232 | eth_port_set_multicast_list(dev); |
272 | } | 233 | } |
@@ -322,53 +283,82 @@ static void mv643xx_eth_tx_timeout_task(struct net_device *dev) | |||
322 | 283 | ||
323 | netif_device_detach(dev); | 284 | netif_device_detach(dev); |
324 | eth_port_reset(mp->port_num); | 285 | eth_port_reset(mp->port_num); |
325 | eth_port_start(mp); | 286 | eth_port_start(dev); |
326 | netif_device_attach(dev); | 287 | netif_device_attach(dev); |
327 | } | 288 | } |
328 | 289 | ||
329 | /* | 290 | /** |
330 | * mv643xx_eth_free_tx_queue | 291 | * mv643xx_eth_free_tx_descs - Free the tx desc data for completed descriptors |
331 | * | ||
332 | * Input : dev - a pointer to the required interface | ||
333 | * | 292 | * |
334 | * Output : 0 if was able to release skb , nonzero otherwise | 293 | * If force is non-zero, frees uncompleted descriptors as well |
335 | */ | 294 | */ |
336 | static int mv643xx_eth_free_tx_queue(struct net_device *dev, | 295 | int mv643xx_eth_free_tx_descs(struct net_device *dev, int force) |
337 | unsigned int eth_int_cause_ext) | ||
338 | { | 296 | { |
339 | struct mv643xx_private *mp = netdev_priv(dev); | 297 | struct mv643xx_private *mp = netdev_priv(dev); |
340 | struct net_device_stats *stats = &mp->stats; | 298 | struct eth_tx_desc *desc; |
341 | struct pkt_info pkt_info; | 299 | u32 cmd_sts; |
342 | int released = 1; | 300 | struct sk_buff *skb; |
301 | unsigned long flags; | ||
302 | int tx_index; | ||
303 | dma_addr_t addr; | ||
304 | int count; | ||
305 | int released = 0; | ||
306 | |||
307 | while (mp->tx_desc_count > 0) { | ||
308 | spin_lock_irqsave(&mp->lock, flags); | ||
309 | tx_index = mp->tx_used_desc_q; | ||
310 | desc = &mp->p_tx_desc_area[tx_index]; | ||
311 | cmd_sts = desc->cmd_sts; | ||
312 | |||
313 | if (!force && (cmd_sts & ETH_BUFFER_OWNED_BY_DMA)) { | ||
314 | spin_unlock_irqrestore(&mp->lock, flags); | ||
315 | return released; | ||
316 | } | ||
343 | 317 | ||
344 | if (!(eth_int_cause_ext & (BIT0 | BIT8))) | 318 | mp->tx_used_desc_q = (tx_index + 1) % mp->tx_ring_size; |
345 | return released; | 319 | mp->tx_desc_count--; |
346 | 320 | ||
347 | /* Check only queue 0 */ | 321 | addr = desc->buf_ptr; |
348 | while (eth_tx_return_desc(mp, &pkt_info) == ETH_OK) { | 322 | count = desc->byte_cnt; |
349 | if (pkt_info.cmd_sts & BIT0) { | 323 | skb = mp->tx_skb[tx_index]; |
324 | if (skb) | ||
325 | mp->tx_skb[tx_index] = NULL; | ||
326 | |||
327 | spin_unlock_irqrestore(&mp->lock, flags); | ||
328 | |||
329 | if (cmd_sts & ETH_ERROR_SUMMARY) { | ||
350 | printk("%s: Error in TX\n", dev->name); | 330 | printk("%s: Error in TX\n", dev->name); |
351 | stats->tx_errors++; | 331 | mp->stats.tx_errors++; |
352 | } | 332 | } |
353 | 333 | ||
354 | if (pkt_info.cmd_sts & ETH_TX_FIRST_DESC) | 334 | if (cmd_sts & ETH_TX_FIRST_DESC) |
355 | dma_unmap_single(NULL, pkt_info.buf_ptr, | 335 | dma_unmap_single(NULL, addr, count, DMA_TO_DEVICE); |
356 | pkt_info.byte_cnt, | ||
357 | DMA_TO_DEVICE); | ||
358 | else | 336 | else |
359 | dma_unmap_page(NULL, pkt_info.buf_ptr, | 337 | dma_unmap_page(NULL, addr, count, DMA_TO_DEVICE); |
360 | pkt_info.byte_cnt, | ||
361 | DMA_TO_DEVICE); | ||
362 | 338 | ||
363 | if (pkt_info.return_info) { | 339 | if (skb) |
364 | dev_kfree_skb_irq(pkt_info.return_info); | 340 | dev_kfree_skb_irq(skb); |
365 | released = 0; | 341 | |
366 | } | 342 | released = 1; |
367 | } | 343 | } |
368 | 344 | ||
369 | return released; | 345 | return released; |
370 | } | 346 | } |
371 | 347 | ||
348 | static void mv643xx_eth_free_completed_tx_descs(struct net_device *dev) | ||
349 | { | ||
350 | struct mv643xx_private *mp = netdev_priv(dev); | ||
351 | |||
352 | if (mv643xx_eth_free_tx_descs(dev, 0) && | ||
353 | mp->tx_ring_size - mp->tx_desc_count >= MAX_DESCS_PER_SKB) | ||
354 | netif_wake_queue(dev); | ||
355 | } | ||
356 | |||
357 | static void mv643xx_eth_free_all_tx_descs(struct net_device *dev) | ||
358 | { | ||
359 | mv643xx_eth_free_tx_descs(dev, 1); | ||
360 | } | ||
361 | |||
372 | /* | 362 | /* |
373 | * mv643xx_eth_receive | 363 | * mv643xx_eth_receive |
374 | * | 364 | * |
@@ -380,11 +370,7 @@ static int mv643xx_eth_free_tx_queue(struct net_device *dev, | |||
380 | * | 370 | * |
381 | * Output : number of served packets | 371 | * Output : number of served packets |
382 | */ | 372 | */ |
383 | #ifdef MV643XX_NAPI | ||
384 | static int mv643xx_eth_receive_queue(struct net_device *dev, int budget) | 373 | static int mv643xx_eth_receive_queue(struct net_device *dev, int budget) |
385 | #else | ||
386 | static int mv643xx_eth_receive_queue(struct net_device *dev) | ||
387 | #endif | ||
388 | { | 374 | { |
389 | struct mv643xx_private *mp = netdev_priv(dev); | 375 | struct mv643xx_private *mp = netdev_priv(dev); |
390 | struct net_device_stats *stats = &mp->stats; | 376 | struct net_device_stats *stats = &mp->stats; |
@@ -392,15 +378,14 @@ static int mv643xx_eth_receive_queue(struct net_device *dev) | |||
392 | struct sk_buff *skb; | 378 | struct sk_buff *skb; |
393 | struct pkt_info pkt_info; | 379 | struct pkt_info pkt_info; |
394 | 380 | ||
395 | #ifdef MV643XX_NAPI | ||
396 | while (budget-- > 0 && eth_port_receive(mp, &pkt_info) == ETH_OK) { | 381 | while (budget-- > 0 && eth_port_receive(mp, &pkt_info) == ETH_OK) { |
397 | #else | 382 | mp->rx_desc_count--; |
398 | while (eth_port_receive(mp, &pkt_info) == ETH_OK) { | ||
399 | #endif | ||
400 | mp->rx_ring_skbs--; | ||
401 | received_packets++; | 383 | received_packets++; |
402 | 384 | ||
403 | /* Update statistics. Note byte count includes 4 byte CRC count */ | 385 | /* |
386 | * Update statistics. | ||
387 | * Note byte count includes 4 byte CRC count | ||
388 | */ | ||
404 | stats->rx_packets++; | 389 | stats->rx_packets++; |
405 | stats->rx_bytes += pkt_info.byte_cnt; | 390 | stats->rx_bytes += pkt_info.byte_cnt; |
406 | skb = pkt_info.return_info; | 391 | skb = pkt_info.return_info; |
@@ -448,10 +433,61 @@ static int mv643xx_eth_receive_queue(struct net_device *dev) | |||
448 | } | 433 | } |
449 | dev->last_rx = jiffies; | 434 | dev->last_rx = jiffies; |
450 | } | 435 | } |
436 | mv643xx_eth_rx_refill_descs(dev); /* Fill RX ring with skb's */ | ||
451 | 437 | ||
452 | return received_packets; | 438 | return received_packets; |
453 | } | 439 | } |
454 | 440 | ||
441 | /* Set the mv643xx port configuration register for the speed/duplex mode. */ | ||
442 | static void mv643xx_eth_update_pscr(struct net_device *dev, | ||
443 | struct ethtool_cmd *ecmd) | ||
444 | { | ||
445 | struct mv643xx_private *mp = netdev_priv(dev); | ||
446 | int port_num = mp->port_num; | ||
447 | u32 o_pscr, n_pscr; | ||
448 | unsigned int queues; | ||
449 | |||
450 | o_pscr = mv_read(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num)); | ||
451 | n_pscr = o_pscr; | ||
452 | |||
453 | /* clear speed, duplex and rx buffer size fields */ | ||
454 | n_pscr &= ~(MV643XX_ETH_SET_MII_SPEED_TO_100 | | ||
455 | MV643XX_ETH_SET_GMII_SPEED_TO_1000 | | ||
456 | MV643XX_ETH_SET_FULL_DUPLEX_MODE | | ||
457 | MV643XX_ETH_MAX_RX_PACKET_MASK); | ||
458 | |||
459 | if (ecmd->duplex == DUPLEX_FULL) | ||
460 | n_pscr |= MV643XX_ETH_SET_FULL_DUPLEX_MODE; | ||
461 | |||
462 | if (ecmd->speed == SPEED_1000) | ||
463 | n_pscr |= MV643XX_ETH_SET_GMII_SPEED_TO_1000 | | ||
464 | MV643XX_ETH_MAX_RX_PACKET_9700BYTE; | ||
465 | else { | ||
466 | if (ecmd->speed == SPEED_100) | ||
467 | n_pscr |= MV643XX_ETH_SET_MII_SPEED_TO_100; | ||
468 | n_pscr |= MV643XX_ETH_MAX_RX_PACKET_1522BYTE; | ||
469 | } | ||
470 | |||
471 | if (n_pscr != o_pscr) { | ||
472 | if ((o_pscr & MV643XX_ETH_SERIAL_PORT_ENABLE) == 0) | ||
473 | mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num), | ||
474 | n_pscr); | ||
475 | else { | ||
476 | queues = mv643xx_eth_port_disable_tx(port_num); | ||
477 | |||
478 | o_pscr &= ~MV643XX_ETH_SERIAL_PORT_ENABLE; | ||
479 | mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num), | ||
480 | o_pscr); | ||
481 | mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num), | ||
482 | n_pscr); | ||
483 | mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num), | ||
484 | n_pscr); | ||
485 | if (queues) | ||
486 | mv643xx_eth_port_enable_tx(port_num, queues); | ||
487 | } | ||
488 | } | ||
489 | } | ||
490 | |||
455 | /* | 491 | /* |
456 | * mv643xx_eth_int_handler | 492 | * mv643xx_eth_int_handler |
457 | * | 493 | * |
@@ -473,78 +509,52 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id, | |||
473 | 509 | ||
474 | /* Read interrupt cause registers */ | 510 | /* Read interrupt cause registers */ |
475 | eth_int_cause = mv_read(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num)) & | 511 | eth_int_cause = mv_read(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num)) & |
476 | INT_UNMASK_ALL; | 512 | ETH_INT_UNMASK_ALL; |
477 | 513 | if (eth_int_cause & ETH_INT_CAUSE_EXT) { | |
478 | if (eth_int_cause & BIT1) | ||
479 | eth_int_cause_ext = mv_read( | 514 | eth_int_cause_ext = mv_read( |
480 | MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num)) & | 515 | MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num)) & |
481 | INT_UNMASK_ALL_EXT; | 516 | ETH_INT_UNMASK_ALL_EXT; |
517 | mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), | ||
518 | ~eth_int_cause_ext); | ||
519 | } | ||
482 | 520 | ||
483 | #ifdef MV643XX_NAPI | 521 | /* PHY status changed */ |
484 | if (!(eth_int_cause & 0x0007fffd)) { | 522 | if (eth_int_cause_ext & ETH_INT_CAUSE_PHY) { |
485 | /* Dont ack the Rx interrupt */ | 523 | struct ethtool_cmd cmd; |
486 | #endif | 524 | |
487 | /* | 525 | if (mii_link_ok(&mp->mii)) { |
488 | * Clear specific ethernet port intrerrupt registers by | 526 | mii_ethtool_gset(&mp->mii, &cmd); |
489 | * acknowleding relevant bits. | 527 | mv643xx_eth_update_pscr(dev, &cmd); |
490 | */ | 528 | mv643xx_eth_port_enable_tx(port_num, |
491 | mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num), | 529 | ETH_TX_QUEUES_ENABLED); |
492 | ~eth_int_cause); | 530 | if (!netif_carrier_ok(dev)) { |
493 | if (eth_int_cause_ext != 0x0) | 531 | netif_carrier_on(dev); |
494 | mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG | 532 | if (mp->tx_ring_size - mp->tx_desc_count >= |
495 | (port_num), ~eth_int_cause_ext); | 533 | MAX_DESCS_PER_SKB) |
496 | 534 | netif_wake_queue(dev); | |
497 | /* UDP change : We may need this */ | 535 | } |
498 | if ((eth_int_cause_ext & 0x0000ffff) && | 536 | } else if (netif_carrier_ok(dev)) { |
499 | (mv643xx_eth_free_tx_queue(dev, eth_int_cause_ext) == 0) && | 537 | netif_stop_queue(dev); |
500 | (mp->tx_ring_size > mp->tx_ring_skbs + MAX_DESCS_PER_SKB)) | 538 | netif_carrier_off(dev); |
501 | netif_wake_queue(dev); | ||
502 | #ifdef MV643XX_NAPI | ||
503 | } else { | ||
504 | if (netif_rx_schedule_prep(dev)) { | ||
505 | /* Mask all the interrupts */ | ||
506 | mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), | ||
507 | INT_MASK_ALL); | ||
508 | /* wait for previous write to complete */ | ||
509 | mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num)); | ||
510 | __netif_rx_schedule(dev); | ||
511 | } | 539 | } |
512 | #else | 540 | } |
513 | if (eth_int_cause & (BIT2 | BIT11)) | ||
514 | mv643xx_eth_receive_queue(dev, 0); | ||
515 | 541 | ||
516 | /* | 542 | #ifdef MV643XX_NAPI |
517 | * After forwarded received packets to upper layer, add a task | 543 | if (eth_int_cause & ETH_INT_CAUSE_RX) { |
518 | * in an interrupts enabled context that refills the RX ring | 544 | /* schedule the NAPI poll routine to maintain port */ |
519 | * with skb's. | ||
520 | */ | ||
521 | #ifdef MV643XX_RX_QUEUE_FILL_ON_TASK | ||
522 | /* Mask all interrupts on ethernet port */ | ||
523 | mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), | 545 | mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), |
524 | INT_MASK_ALL); | 546 | ETH_INT_MASK_ALL); |
525 | /* wait for previous write to take effect */ | 547 | /* wait for previous write to complete */ |
526 | mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num)); | 548 | mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num)); |
527 | 549 | ||
528 | queue_task(&mp->rx_task, &tq_immediate); | 550 | netif_rx_schedule(dev); |
529 | mark_bh(IMMEDIATE_BH); | 551 | } |
530 | #else | 552 | #else |
531 | mp->rx_task.func(dev); | 553 | if (eth_int_cause & ETH_INT_CAUSE_RX) |
554 | mv643xx_eth_receive_queue(dev, INT_MAX); | ||
555 | if (eth_int_cause_ext & ETH_INT_CAUSE_TX) | ||
556 | mv643xx_eth_free_completed_tx_descs(dev); | ||
532 | #endif | 557 | #endif |
533 | #endif | ||
534 | } | ||
535 | /* PHY status changed */ | ||
536 | if (eth_int_cause_ext & (BIT16 | BIT20)) { | ||
537 | if (eth_port_link_is_up(port_num)) { | ||
538 | netif_carrier_on(dev); | ||
539 | netif_wake_queue(dev); | ||
540 | /* Start TX queue */ | ||
541 | mv_write(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG | ||
542 | (port_num), 1); | ||
543 | } else { | ||
544 | netif_carrier_off(dev); | ||
545 | netif_stop_queue(dev); | ||
546 | } | ||
547 | } | ||
548 | 558 | ||
549 | /* | 559 | /* |
550 | * If no real interrupt occured, exit. | 560 | * If no real interrupt occured, exit. |
@@ -670,9 +680,6 @@ static void ether_init_rx_desc_ring(struct mv643xx_private *mp) | |||
670 | mp->rx_used_desc_q = 0; | 680 | mp->rx_used_desc_q = 0; |
671 | 681 | ||
672 | mp->rx_desc_area_size = rx_desc_num * sizeof(struct eth_rx_desc); | 682 | mp->rx_desc_area_size = rx_desc_num * sizeof(struct eth_rx_desc); |
673 | |||
674 | /* Add the queue to the list of RX queues of this port */ | ||
675 | mp->port_rx_queue_command |= 1; | ||
676 | } | 683 | } |
677 | 684 | ||
678 | /* | 685 | /* |
@@ -712,14 +719,36 @@ static void ether_init_tx_desc_ring(struct mv643xx_private *mp) | |||
712 | 719 | ||
713 | mp->tx_curr_desc_q = 0; | 720 | mp->tx_curr_desc_q = 0; |
714 | mp->tx_used_desc_q = 0; | 721 | mp->tx_used_desc_q = 0; |
715 | #ifdef MV643XX_CHECKSUM_OFFLOAD_TX | ||
716 | mp->tx_first_desc_q = 0; | ||
717 | #endif | ||
718 | 722 | ||
719 | mp->tx_desc_area_size = tx_desc_num * sizeof(struct eth_tx_desc); | 723 | mp->tx_desc_area_size = tx_desc_num * sizeof(struct eth_tx_desc); |
724 | } | ||
720 | 725 | ||
721 | /* Add the queue to the list of Tx queues of this port */ | 726 | static int mv643xx_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) |
722 | mp->port_tx_queue_command |= 1; | 727 | { |
728 | struct mv643xx_private *mp = netdev_priv(dev); | ||
729 | int err; | ||
730 | |||
731 | spin_lock_irq(&mp->lock); | ||
732 | err = mii_ethtool_sset(&mp->mii, cmd); | ||
733 | spin_unlock_irq(&mp->lock); | ||
734 | |||
735 | return err; | ||
736 | } | ||
737 | |||
738 | static int mv643xx_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | ||
739 | { | ||
740 | struct mv643xx_private *mp = netdev_priv(dev); | ||
741 | int err; | ||
742 | |||
743 | spin_lock_irq(&mp->lock); | ||
744 | err = mii_ethtool_gset(&mp->mii, cmd); | ||
745 | spin_unlock_irq(&mp->lock); | ||
746 | |||
747 | /* The PHY may support 1000baseT_Half, but the mv643xx does not */ | ||
748 | cmd->supported &= ~SUPPORTED_1000baseT_Half; | ||
749 | cmd->advertising &= ~ADVERTISED_1000baseT_Half; | ||
750 | |||
751 | return err; | ||
723 | } | 752 | } |
724 | 753 | ||
725 | /* | 754 | /* |
@@ -750,23 +779,12 @@ static int mv643xx_eth_open(struct net_device *dev) | |||
750 | return -EAGAIN; | 779 | return -EAGAIN; |
751 | } | 780 | } |
752 | 781 | ||
753 | /* Stop RX Queues */ | ||
754 | mv_write(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num), 0x0000ff00); | ||
755 | |||
756 | /* Set the MAC Address */ | ||
757 | memcpy(mp->port_mac_addr, dev->dev_addr, 6); | ||
758 | |||
759 | eth_port_init(mp); | 782 | eth_port_init(mp); |
760 | 783 | ||
761 | INIT_WORK(&mp->rx_task, (void (*)(void *))mv643xx_eth_rx_task, dev); | ||
762 | |||
763 | memset(&mp->timeout, 0, sizeof(struct timer_list)); | 784 | memset(&mp->timeout, 0, sizeof(struct timer_list)); |
764 | mp->timeout.function = mv643xx_eth_rx_task_timer_wrapper; | 785 | mp->timeout.function = mv643xx_eth_rx_refill_descs_timer_wrapper; |
765 | mp->timeout.data = (unsigned long)dev; | 786 | mp->timeout.data = (unsigned long)dev; |
766 | 787 | ||
767 | mp->rx_task_busy = 0; | ||
768 | mp->rx_timer_flag = 0; | ||
769 | |||
770 | /* Allocate RX and TX skb rings */ | 788 | /* Allocate RX and TX skb rings */ |
771 | mp->rx_skb = kmalloc(sizeof(*mp->rx_skb) * mp->rx_ring_size, | 789 | mp->rx_skb = kmalloc(sizeof(*mp->rx_skb) * mp->rx_ring_size, |
772 | GFP_KERNEL); | 790 | GFP_KERNEL); |
@@ -784,7 +802,7 @@ static int mv643xx_eth_open(struct net_device *dev) | |||
784 | } | 802 | } |
785 | 803 | ||
786 | /* Allocate TX ring */ | 804 | /* Allocate TX ring */ |
787 | mp->tx_ring_skbs = 0; | 805 | mp->tx_desc_count = 0; |
788 | size = mp->tx_ring_size * sizeof(struct eth_tx_desc); | 806 | size = mp->tx_ring_size * sizeof(struct eth_tx_desc); |
789 | mp->tx_desc_area_size = size; | 807 | mp->tx_desc_area_size = size; |
790 | 808 | ||
@@ -809,7 +827,7 @@ static int mv643xx_eth_open(struct net_device *dev) | |||
809 | ether_init_tx_desc_ring(mp); | 827 | ether_init_tx_desc_ring(mp); |
810 | 828 | ||
811 | /* Allocate RX ring */ | 829 | /* Allocate RX ring */ |
812 | mp->rx_ring_skbs = 0; | 830 | mp->rx_desc_count = 0; |
813 | size = mp->rx_ring_size * sizeof(struct eth_rx_desc); | 831 | size = mp->rx_ring_size * sizeof(struct eth_rx_desc); |
814 | mp->rx_desc_area_size = size; | 832 | mp->rx_desc_area_size = size; |
815 | 833 | ||
@@ -839,9 +857,13 @@ static int mv643xx_eth_open(struct net_device *dev) | |||
839 | 857 | ||
840 | ether_init_rx_desc_ring(mp); | 858 | ether_init_rx_desc_ring(mp); |
841 | 859 | ||
842 | mv643xx_eth_rx_task(dev); /* Fill RX ring with skb's */ | 860 | mv643xx_eth_rx_refill_descs(dev); /* Fill RX ring with skb's */ |
861 | |||
862 | /* Clear any pending ethernet port interrupts */ | ||
863 | mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num), 0); | ||
864 | mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), 0); | ||
843 | 865 | ||
844 | eth_port_start(mp); | 866 | eth_port_start(dev); |
845 | 867 | ||
846 | /* Interrupt Coalescing */ | 868 | /* Interrupt Coalescing */ |
847 | 869 | ||
@@ -853,16 +875,13 @@ static int mv643xx_eth_open(struct net_device *dev) | |||
853 | mp->tx_int_coal = | 875 | mp->tx_int_coal = |
854 | eth_port_set_tx_coal(port_num, 133000000, MV643XX_TX_COAL); | 876 | eth_port_set_tx_coal(port_num, 133000000, MV643XX_TX_COAL); |
855 | 877 | ||
856 | /* Clear any pending ethernet port interrupts */ | ||
857 | mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num), 0); | ||
858 | mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), 0); | ||
859 | |||
860 | /* Unmask phy and link status changes interrupts */ | 878 | /* Unmask phy and link status changes interrupts */ |
861 | mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG(port_num), | 879 | mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG(port_num), |
862 | INT_UNMASK_ALL_EXT); | 880 | ETH_INT_UNMASK_ALL_EXT); |
863 | 881 | ||
864 | /* Unmask RX buffer and TX end interrupt */ | 882 | /* Unmask RX buffer and TX end interrupt */ |
865 | mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), INT_UNMASK_ALL); | 883 | mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), ETH_INT_UNMASK_ALL); |
884 | |||
866 | return 0; | 885 | return 0; |
867 | 886 | ||
868 | out_free_tx_skb: | 887 | out_free_tx_skb: |
@@ -878,25 +897,14 @@ out_free_irq: | |||
878 | static void mv643xx_eth_free_tx_rings(struct net_device *dev) | 897 | static void mv643xx_eth_free_tx_rings(struct net_device *dev) |
879 | { | 898 | { |
880 | struct mv643xx_private *mp = netdev_priv(dev); | 899 | struct mv643xx_private *mp = netdev_priv(dev); |
881 | unsigned int port_num = mp->port_num; | ||
882 | unsigned int curr; | ||
883 | struct sk_buff *skb; | ||
884 | 900 | ||
885 | /* Stop Tx Queues */ | 901 | /* Stop Tx Queues */ |
886 | mv_write(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num), 0x0000ff00); | 902 | mv643xx_eth_port_disable_tx(mp->port_num); |
887 | 903 | ||
888 | /* Free outstanding skb's on TX rings */ | 904 | /* Free outstanding skb's on TX ring */ |
889 | for (curr = 0; mp->tx_ring_skbs && curr < mp->tx_ring_size; curr++) { | 905 | mv643xx_eth_free_all_tx_descs(dev); |
890 | skb = mp->tx_skb[curr]; | 906 | |
891 | if (skb) { | 907 | BUG_ON(mp->tx_used_desc_q != mp->tx_curr_desc_q); |
892 | mp->tx_ring_skbs -= skb_shinfo(skb)->nr_frags; | ||
893 | dev_kfree_skb(skb); | ||
894 | mp->tx_ring_skbs--; | ||
895 | } | ||
896 | } | ||
897 | if (mp->tx_ring_skbs) | ||
898 | printk("%s: Error on Tx descriptor free - could not free %d" | ||
899 | " descriptors\n", dev->name, mp->tx_ring_skbs); | ||
900 | 908 | ||
901 | /* Free TX ring */ | 909 | /* Free TX ring */ |
902 | if (mp->tx_sram_size) | 910 | if (mp->tx_sram_size) |
@@ -913,21 +921,21 @@ static void mv643xx_eth_free_rx_rings(struct net_device *dev) | |||
913 | int curr; | 921 | int curr; |
914 | 922 | ||
915 | /* Stop RX Queues */ | 923 | /* Stop RX Queues */ |
916 | mv_write(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num), 0x0000ff00); | 924 | mv643xx_eth_port_disable_rx(port_num); |
917 | 925 | ||
918 | /* Free preallocated skb's on RX rings */ | 926 | /* Free preallocated skb's on RX rings */ |
919 | for (curr = 0; mp->rx_ring_skbs && curr < mp->rx_ring_size; curr++) { | 927 | for (curr = 0; mp->rx_desc_count && curr < mp->rx_ring_size; curr++) { |
920 | if (mp->rx_skb[curr]) { | 928 | if (mp->rx_skb[curr]) { |
921 | dev_kfree_skb(mp->rx_skb[curr]); | 929 | dev_kfree_skb(mp->rx_skb[curr]); |
922 | mp->rx_ring_skbs--; | 930 | mp->rx_desc_count--; |
923 | } | 931 | } |
924 | } | 932 | } |
925 | 933 | ||
926 | if (mp->rx_ring_skbs) | 934 | if (mp->rx_desc_count) |
927 | printk(KERN_ERR | 935 | printk(KERN_ERR |
928 | "%s: Error in freeing Rx Ring. %d skb's still" | 936 | "%s: Error in freeing Rx Ring. %d skb's still" |
929 | " stuck in RX Ring - ignoring them\n", dev->name, | 937 | " stuck in RX Ring - ignoring them\n", dev->name, |
930 | mp->rx_ring_skbs); | 938 | mp->rx_desc_count); |
931 | /* Free RX ring */ | 939 | /* Free RX ring */ |
932 | if (mp->rx_sram_size) | 940 | if (mp->rx_sram_size) |
933 | iounmap(mp->p_rx_desc_area); | 941 | iounmap(mp->p_rx_desc_area); |
@@ -952,7 +960,7 @@ static int mv643xx_eth_stop(struct net_device *dev) | |||
952 | unsigned int port_num = mp->port_num; | 960 | unsigned int port_num = mp->port_num; |
953 | 961 | ||
954 | /* Mask all interrupts on ethernet port */ | 962 | /* Mask all interrupts on ethernet port */ |
955 | mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), INT_MASK_ALL); | 963 | mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), ETH_INT_MASK_ALL); |
956 | /* wait for previous write to complete */ | 964 | /* wait for previous write to complete */ |
957 | mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num)); | 965 | mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num)); |
958 | 966 | ||
@@ -977,30 +985,6 @@ static int mv643xx_eth_stop(struct net_device *dev) | |||
977 | } | 985 | } |
978 | 986 | ||
979 | #ifdef MV643XX_NAPI | 987 | #ifdef MV643XX_NAPI |
980 | static void mv643xx_tx(struct net_device *dev) | ||
981 | { | ||
982 | struct mv643xx_private *mp = netdev_priv(dev); | ||
983 | struct pkt_info pkt_info; | ||
984 | |||
985 | while (eth_tx_return_desc(mp, &pkt_info) == ETH_OK) { | ||
986 | if (pkt_info.cmd_sts & ETH_TX_FIRST_DESC) | ||
987 | dma_unmap_single(NULL, pkt_info.buf_ptr, | ||
988 | pkt_info.byte_cnt, | ||
989 | DMA_TO_DEVICE); | ||
990 | else | ||
991 | dma_unmap_page(NULL, pkt_info.buf_ptr, | ||
992 | pkt_info.byte_cnt, | ||
993 | DMA_TO_DEVICE); | ||
994 | |||
995 | if (pkt_info.return_info) | ||
996 | dev_kfree_skb_irq(pkt_info.return_info); | ||
997 | } | ||
998 | |||
999 | if (netif_queue_stopped(dev) && | ||
1000 | mp->tx_ring_size > mp->tx_ring_skbs + MAX_DESCS_PER_SKB) | ||
1001 | netif_wake_queue(dev); | ||
1002 | } | ||
1003 | |||
1004 | /* | 988 | /* |
1005 | * mv643xx_poll | 989 | * mv643xx_poll |
1006 | * | 990 | * |
@@ -1014,7 +998,7 @@ static int mv643xx_poll(struct net_device *dev, int *budget) | |||
1014 | 998 | ||
1015 | #ifdef MV643XX_TX_FAST_REFILL | 999 | #ifdef MV643XX_TX_FAST_REFILL |
1016 | if (++mp->tx_clean_threshold > 5) { | 1000 | if (++mp->tx_clean_threshold > 5) { |
1017 | mv643xx_tx(dev); | 1001 | mv643xx_eth_free_completed_tx_descs(dev); |
1018 | mp->tx_clean_threshold = 0; | 1002 | mp->tx_clean_threshold = 0; |
1019 | } | 1003 | } |
1020 | #endif | 1004 | #endif |
@@ -1025,7 +1009,6 @@ static int mv643xx_poll(struct net_device *dev, int *budget) | |||
1025 | if (orig_budget > dev->quota) | 1009 | if (orig_budget > dev->quota) |
1026 | orig_budget = dev->quota; | 1010 | orig_budget = dev->quota; |
1027 | work_done = mv643xx_eth_receive_queue(dev, orig_budget); | 1011 | work_done = mv643xx_eth_receive_queue(dev, orig_budget); |
1028 | mp->rx_task.func(dev); | ||
1029 | *budget -= work_done; | 1012 | *budget -= work_done; |
1030 | dev->quota -= work_done; | 1013 | dev->quota -= work_done; |
1031 | if (work_done >= orig_budget) | 1014 | if (work_done >= orig_budget) |
@@ -1037,14 +1020,17 @@ static int mv643xx_poll(struct net_device *dev, int *budget) | |||
1037 | mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num), 0); | 1020 | mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num), 0); |
1038 | mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), 0); | 1021 | mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), 0); |
1039 | mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), | 1022 | mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), |
1040 | INT_UNMASK_ALL); | 1023 | ETH_INT_UNMASK_ALL); |
1041 | } | 1024 | } |
1042 | 1025 | ||
1043 | return done ? 0 : 1; | 1026 | return done ? 0 : 1; |
1044 | } | 1027 | } |
1045 | #endif | 1028 | #endif |
1046 | 1029 | ||
1047 | /* Hardware can't handle unaligned fragments smaller than 9 bytes. | 1030 | /** |
1031 | * has_tiny_unaligned_frags - check if skb has any small, unaligned fragments | ||
1032 | * | ||
1033 | * Hardware can't handle unaligned fragments smaller than 9 bytes. | ||
1048 | * This helper function detects that case. | 1034 | * This helper function detects that case. |
1049 | */ | 1035 | */ |
1050 | 1036 | ||
@@ -1061,223 +1047,166 @@ static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb) | |||
1061 | return 0; | 1047 | return 0; |
1062 | } | 1048 | } |
1063 | 1049 | ||
1050 | /** | ||
1051 | * eth_alloc_tx_desc_index - return the index of the next available tx desc | ||
1052 | */ | ||
1053 | static int eth_alloc_tx_desc_index(struct mv643xx_private *mp) | ||
1054 | { | ||
1055 | int tx_desc_curr; | ||
1064 | 1056 | ||
1065 | /* | 1057 | BUG_ON(mp->tx_desc_count >= mp->tx_ring_size); |
1066 | * mv643xx_eth_start_xmit | 1058 | |
1067 | * | 1059 | tx_desc_curr = mp->tx_curr_desc_q; |
1068 | * This function is queues a packet in the Tx descriptor for | 1060 | mp->tx_curr_desc_q = (tx_desc_curr + 1) % mp->tx_ring_size; |
1069 | * required port. | 1061 | |
1070 | * | 1062 | BUG_ON(mp->tx_curr_desc_q == mp->tx_used_desc_q); |
1071 | * Input : skb - a pointer to socket buffer | 1063 | |
1072 | * dev - a pointer to the required port | 1064 | return tx_desc_curr; |
1065 | } | ||
1066 | |||
1067 | /** | ||
1068 | * eth_tx_fill_frag_descs - fill tx hw descriptors for an skb's fragments. | ||
1073 | * | 1069 | * |
1074 | * Output : zero upon success | 1070 | * Ensure the data for each fragment to be transmitted is mapped properly, |
1071 | * then fill in descriptors in the tx hw queue. | ||
1075 | */ | 1072 | */ |
1076 | static int mv643xx_eth_start_xmit(struct sk_buff *skb, struct net_device *dev) | 1073 | static void eth_tx_fill_frag_descs(struct mv643xx_private *mp, |
1074 | struct sk_buff *skb) | ||
1077 | { | 1075 | { |
1078 | struct mv643xx_private *mp = netdev_priv(dev); | 1076 | int frag; |
1079 | struct net_device_stats *stats = &mp->stats; | 1077 | int tx_index; |
1080 | ETH_FUNC_RET_STATUS status; | 1078 | struct eth_tx_desc *desc; |
1081 | unsigned long flags; | ||
1082 | struct pkt_info pkt_info; | ||
1083 | 1079 | ||
1084 | if (netif_queue_stopped(dev)) { | 1080 | for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { |
1085 | printk(KERN_ERR | 1081 | skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag]; |
1086 | "%s: Tried sending packet when interface is stopped\n", | 1082 | |
1087 | dev->name); | 1083 | tx_index = eth_alloc_tx_desc_index(mp); |
1088 | return 1; | 1084 | desc = &mp->p_tx_desc_area[tx_index]; |
1085 | |||
1086 | desc->cmd_sts = ETH_BUFFER_OWNED_BY_DMA; | ||
1087 | /* Last Frag enables interrupt and frees the skb */ | ||
1088 | if (frag == (skb_shinfo(skb)->nr_frags - 1)) { | ||
1089 | desc->cmd_sts |= ETH_ZERO_PADDING | | ||
1090 | ETH_TX_LAST_DESC | | ||
1091 | ETH_TX_ENABLE_INTERRUPT; | ||
1092 | mp->tx_skb[tx_index] = skb; | ||
1093 | } else | ||
1094 | mp->tx_skb[tx_index] = 0; | ||
1095 | |||
1096 | desc = &mp->p_tx_desc_area[tx_index]; | ||
1097 | desc->l4i_chk = 0; | ||
1098 | desc->byte_cnt = this_frag->size; | ||
1099 | desc->buf_ptr = dma_map_page(NULL, this_frag->page, | ||
1100 | this_frag->page_offset, | ||
1101 | this_frag->size, | ||
1102 | DMA_TO_DEVICE); | ||
1089 | } | 1103 | } |
1104 | } | ||
1090 | 1105 | ||
1091 | /* This is a hard error, log it. */ | 1106 | /** |
1092 | if ((mp->tx_ring_size - mp->tx_ring_skbs) <= | 1107 | * eth_tx_submit_descs_for_skb - submit data from an skb to the tx hw |
1093 | (skb_shinfo(skb)->nr_frags + 1)) { | 1108 | * |
1094 | netif_stop_queue(dev); | 1109 | * Ensure the data for an skb to be transmitted is mapped properly, |
1095 | printk(KERN_ERR | 1110 | * then fill in descriptors in the tx hw queue and start the hardware. |
1096 | "%s: Bug in mv643xx_eth - Trying to transmit when" | 1111 | */ |
1097 | " queue full !\n", dev->name); | 1112 | static void eth_tx_submit_descs_for_skb(struct mv643xx_private *mp, |
1098 | return 1; | 1113 | struct sk_buff *skb) |
1099 | } | 1114 | { |
1115 | int tx_index; | ||
1116 | struct eth_tx_desc *desc; | ||
1117 | u32 cmd_sts; | ||
1118 | int length; | ||
1119 | int nr_frags = skb_shinfo(skb)->nr_frags; | ||
1100 | 1120 | ||
1101 | /* Paranoid check - this shouldn't happen */ | 1121 | cmd_sts = ETH_TX_FIRST_DESC | ETH_GEN_CRC | ETH_BUFFER_OWNED_BY_DMA; |
1102 | if (skb == NULL) { | ||
1103 | stats->tx_dropped++; | ||
1104 | printk(KERN_ERR "mv64320_eth paranoid check failed\n"); | ||
1105 | return 1; | ||
1106 | } | ||
1107 | 1122 | ||
1108 | #ifdef MV643XX_CHECKSUM_OFFLOAD_TX | 1123 | tx_index = eth_alloc_tx_desc_index(mp); |
1109 | if (has_tiny_unaligned_frags(skb)) { | 1124 | desc = &mp->p_tx_desc_area[tx_index]; |
1110 | if ((skb_linearize(skb, GFP_ATOMIC) != 0)) { | ||
1111 | stats->tx_dropped++; | ||
1112 | printk(KERN_DEBUG "%s: failed to linearize tiny " | ||
1113 | "unaligned fragment\n", dev->name); | ||
1114 | return 1; | ||
1115 | } | ||
1116 | } | ||
1117 | 1125 | ||
1118 | spin_lock_irqsave(&mp->lock, flags); | 1126 | if (nr_frags) { |
1127 | eth_tx_fill_frag_descs(mp, skb); | ||
1119 | 1128 | ||
1120 | if (!skb_shinfo(skb)->nr_frags) { | 1129 | length = skb_headlen(skb); |
1121 | if (skb->ip_summed != CHECKSUM_HW) { | 1130 | mp->tx_skb[tx_index] = 0; |
1122 | /* Errata BTS #50, IHL must be 5 if no HW checksum */ | ||
1123 | pkt_info.cmd_sts = ETH_TX_ENABLE_INTERRUPT | | ||
1124 | ETH_TX_FIRST_DESC | | ||
1125 | ETH_TX_LAST_DESC | | ||
1126 | 5 << ETH_TX_IHL_SHIFT; | ||
1127 | pkt_info.l4i_chk = 0; | ||
1128 | } else { | ||
1129 | pkt_info.cmd_sts = ETH_TX_ENABLE_INTERRUPT | | ||
1130 | ETH_TX_FIRST_DESC | | ||
1131 | ETH_TX_LAST_DESC | | ||
1132 | ETH_GEN_TCP_UDP_CHECKSUM | | ||
1133 | ETH_GEN_IP_V_4_CHECKSUM | | ||
1134 | skb->nh.iph->ihl << ETH_TX_IHL_SHIFT; | ||
1135 | /* CPU already calculated pseudo header checksum. */ | ||
1136 | if ((skb->protocol == ETH_P_IP) && | ||
1137 | (skb->nh.iph->protocol == IPPROTO_UDP) ) { | ||
1138 | pkt_info.cmd_sts |= ETH_UDP_FRAME; | ||
1139 | pkt_info.l4i_chk = skb->h.uh->check; | ||
1140 | } else if ((skb->protocol == ETH_P_IP) && | ||
1141 | (skb->nh.iph->protocol == IPPROTO_TCP)) | ||
1142 | pkt_info.l4i_chk = skb->h.th->check; | ||
1143 | else { | ||
1144 | printk(KERN_ERR | ||
1145 | "%s: chksum proto != IPv4 TCP or UDP\n", | ||
1146 | dev->name); | ||
1147 | spin_unlock_irqrestore(&mp->lock, flags); | ||
1148 | return 1; | ||
1149 | } | ||
1150 | } | ||
1151 | pkt_info.byte_cnt = skb->len; | ||
1152 | pkt_info.buf_ptr = dma_map_single(NULL, skb->data, skb->len, | ||
1153 | DMA_TO_DEVICE); | ||
1154 | pkt_info.return_info = skb; | ||
1155 | status = eth_port_send(mp, &pkt_info); | ||
1156 | if ((status == ETH_ERROR) || (status == ETH_QUEUE_FULL)) | ||
1157 | printk(KERN_ERR "%s: Error on transmitting packet\n", | ||
1158 | dev->name); | ||
1159 | stats->tx_bytes += pkt_info.byte_cnt; | ||
1160 | } else { | 1131 | } else { |
1161 | unsigned int frag; | 1132 | cmd_sts |= ETH_ZERO_PADDING | |
1133 | ETH_TX_LAST_DESC | | ||
1134 | ETH_TX_ENABLE_INTERRUPT; | ||
1135 | length = skb->len; | ||
1136 | mp->tx_skb[tx_index] = skb; | ||
1137 | } | ||
1162 | 1138 | ||
1163 | /* first frag which is skb header */ | 1139 | desc->byte_cnt = length; |
1164 | pkt_info.byte_cnt = skb_headlen(skb); | 1140 | desc->buf_ptr = dma_map_single(NULL, skb->data, length, DMA_TO_DEVICE); |
1165 | pkt_info.buf_ptr = dma_map_single(NULL, skb->data, | ||
1166 | skb_headlen(skb), | ||
1167 | DMA_TO_DEVICE); | ||
1168 | pkt_info.l4i_chk = 0; | ||
1169 | pkt_info.return_info = 0; | ||
1170 | |||
1171 | if (skb->ip_summed != CHECKSUM_HW) | ||
1172 | /* Errata BTS #50, IHL must be 5 if no HW checksum */ | ||
1173 | pkt_info.cmd_sts = ETH_TX_FIRST_DESC | | ||
1174 | 5 << ETH_TX_IHL_SHIFT; | ||
1175 | else { | ||
1176 | pkt_info.cmd_sts = ETH_TX_FIRST_DESC | | ||
1177 | ETH_GEN_TCP_UDP_CHECKSUM | | ||
1178 | ETH_GEN_IP_V_4_CHECKSUM | | ||
1179 | skb->nh.iph->ihl << ETH_TX_IHL_SHIFT; | ||
1180 | /* CPU already calculated pseudo header checksum. */ | ||
1181 | if ((skb->protocol == ETH_P_IP) && | ||
1182 | (skb->nh.iph->protocol == IPPROTO_UDP)) { | ||
1183 | pkt_info.cmd_sts |= ETH_UDP_FRAME; | ||
1184 | pkt_info.l4i_chk = skb->h.uh->check; | ||
1185 | } else if ((skb->protocol == ETH_P_IP) && | ||
1186 | (skb->nh.iph->protocol == IPPROTO_TCP)) | ||
1187 | pkt_info.l4i_chk = skb->h.th->check; | ||
1188 | else { | ||
1189 | printk(KERN_ERR | ||
1190 | "%s: chksum proto != IPv4 TCP or UDP\n", | ||
1191 | dev->name); | ||
1192 | spin_unlock_irqrestore(&mp->lock, flags); | ||
1193 | return 1; | ||
1194 | } | ||
1195 | } | ||
1196 | 1141 | ||
1197 | status = eth_port_send(mp, &pkt_info); | 1142 | if (skb->ip_summed == CHECKSUM_HW) { |
1198 | if (status != ETH_OK) { | 1143 | BUG_ON(skb->protocol != ETH_P_IP); |
1199 | if ((status == ETH_ERROR)) | 1144 | |
1200 | printk(KERN_ERR | 1145 | cmd_sts |= ETH_GEN_TCP_UDP_CHECKSUM | |
1201 | "%s: Error on transmitting packet\n", | 1146 | ETH_GEN_IP_V_4_CHECKSUM | |
1202 | dev->name); | 1147 | skb->nh.iph->ihl << ETH_TX_IHL_SHIFT; |
1203 | if (status == ETH_QUEUE_FULL) | 1148 | |
1204 | printk("Error on Queue Full \n"); | 1149 | switch (skb->nh.iph->protocol) { |
1205 | if (status == ETH_QUEUE_LAST_RESOURCE) | 1150 | case IPPROTO_UDP: |
1206 | printk("Tx resource error \n"); | 1151 | cmd_sts |= ETH_UDP_FRAME; |
1152 | desc->l4i_chk = skb->h.uh->check; | ||
1153 | break; | ||
1154 | case IPPROTO_TCP: | ||
1155 | desc->l4i_chk = skb->h.th->check; | ||
1156 | break; | ||
1157 | default: | ||
1158 | BUG(); | ||
1207 | } | 1159 | } |
1208 | stats->tx_bytes += pkt_info.byte_cnt; | 1160 | } else { |
1209 | 1161 | /* Errata BTS #50, IHL must be 5 if no HW checksum */ | |
1210 | /* Check for the remaining frags */ | 1162 | cmd_sts |= 5 << ETH_TX_IHL_SHIFT; |
1211 | for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { | 1163 | desc->l4i_chk = 0; |
1212 | skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag]; | 1164 | } |
1213 | pkt_info.l4i_chk = 0x0000; | 1165 | |
1214 | pkt_info.cmd_sts = 0x00000000; | 1166 | /* ensure all other descriptors are written before first cmd_sts */ |
1215 | 1167 | wmb(); | |
1216 | /* Last Frag enables interrupt and frees the skb */ | 1168 | desc->cmd_sts = cmd_sts; |
1217 | if (frag == (skb_shinfo(skb)->nr_frags - 1)) { | ||
1218 | pkt_info.cmd_sts |= ETH_TX_ENABLE_INTERRUPT | | ||
1219 | ETH_TX_LAST_DESC; | ||
1220 | pkt_info.return_info = skb; | ||
1221 | } else { | ||
1222 | pkt_info.return_info = 0; | ||
1223 | } | ||
1224 | pkt_info.l4i_chk = 0; | ||
1225 | pkt_info.byte_cnt = this_frag->size; | ||
1226 | 1169 | ||
1227 | pkt_info.buf_ptr = dma_map_page(NULL, this_frag->page, | 1170 | /* ensure all descriptors are written before poking hardware */ |
1228 | this_frag->page_offset, | 1171 | wmb(); |
1229 | this_frag->size, | 1172 | mv643xx_eth_port_enable_tx(mp->port_num, ETH_TX_QUEUES_ENABLED); |
1230 | DMA_TO_DEVICE); | ||
1231 | 1173 | ||
1232 | status = eth_port_send(mp, &pkt_info); | 1174 | mp->tx_desc_count += nr_frags + 1; |
1175 | } | ||
1233 | 1176 | ||
1234 | if (status != ETH_OK) { | 1177 | /** |
1235 | if ((status == ETH_ERROR)) | 1178 | * mv643xx_eth_start_xmit - queue an skb to the hardware for transmission |
1236 | printk(KERN_ERR "%s: Error on " | 1179 | * |
1237 | "transmitting packet\n", | 1180 | */ |
1238 | dev->name); | 1181 | static int mv643xx_eth_start_xmit(struct sk_buff *skb, struct net_device *dev) |
1182 | { | ||
1183 | struct mv643xx_private *mp = netdev_priv(dev); | ||
1184 | struct net_device_stats *stats = &mp->stats; | ||
1185 | unsigned long flags; | ||
1239 | 1186 | ||
1240 | if (status == ETH_QUEUE_LAST_RESOURCE) | 1187 | BUG_ON(netif_queue_stopped(dev)); |
1241 | printk("Tx resource error \n"); | 1188 | BUG_ON(skb == NULL); |
1189 | BUG_ON(mp->tx_ring_size - mp->tx_desc_count < MAX_DESCS_PER_SKB); | ||
1242 | 1190 | ||
1243 | if (status == ETH_QUEUE_FULL) | 1191 | if (has_tiny_unaligned_frags(skb)) { |
1244 | printk("Queue is full \n"); | 1192 | if ((skb_linearize(skb, GFP_ATOMIC) != 0)) { |
1245 | } | 1193 | stats->tx_dropped++; |
1246 | stats->tx_bytes += pkt_info.byte_cnt; | 1194 | printk(KERN_DEBUG "%s: failed to linearize tiny " |
1195 | "unaligned fragment\n", dev->name); | ||
1196 | return 1; | ||
1247 | } | 1197 | } |
1248 | } | 1198 | } |
1249 | #else | ||
1250 | spin_lock_irqsave(&mp->lock, flags); | ||
1251 | 1199 | ||
1252 | pkt_info.cmd_sts = ETH_TX_ENABLE_INTERRUPT | ETH_TX_FIRST_DESC | | 1200 | spin_lock_irqsave(&mp->lock, flags); |
1253 | ETH_TX_LAST_DESC; | ||
1254 | pkt_info.l4i_chk = 0; | ||
1255 | pkt_info.byte_cnt = skb->len; | ||
1256 | pkt_info.buf_ptr = dma_map_single(NULL, skb->data, skb->len, | ||
1257 | DMA_TO_DEVICE); | ||
1258 | pkt_info.return_info = skb; | ||
1259 | status = eth_port_send(mp, &pkt_info); | ||
1260 | if ((status == ETH_ERROR) || (status == ETH_QUEUE_FULL)) | ||
1261 | printk(KERN_ERR "%s: Error on transmitting packet\n", | ||
1262 | dev->name); | ||
1263 | stats->tx_bytes += pkt_info.byte_cnt; | ||
1264 | #endif | ||
1265 | |||
1266 | /* Check if TX queue can handle another skb. If not, then | ||
1267 | * signal higher layers to stop requesting TX | ||
1268 | */ | ||
1269 | if (mp->tx_ring_size <= (mp->tx_ring_skbs + MAX_DESCS_PER_SKB)) | ||
1270 | /* | ||
1271 | * Stop getting skb's from upper layers. | ||
1272 | * Getting skb's from upper layers will be enabled again after | ||
1273 | * packets are released. | ||
1274 | */ | ||
1275 | netif_stop_queue(dev); | ||
1276 | 1201 | ||
1277 | /* Update statistics and start of transmittion time */ | 1202 | eth_tx_submit_descs_for_skb(mp, skb); |
1203 | stats->tx_bytes = skb->len; | ||
1278 | stats->tx_packets++; | 1204 | stats->tx_packets++; |
1279 | dev->trans_start = jiffies; | 1205 | dev->trans_start = jiffies; |
1280 | 1206 | ||
1207 | if (mp->tx_ring_size - mp->tx_desc_count < MAX_DESCS_PER_SKB) | ||
1208 | netif_stop_queue(dev); | ||
1209 | |||
1281 | spin_unlock_irqrestore(&mp->lock, flags); | 1210 | spin_unlock_irqrestore(&mp->lock, flags); |
1282 | 1211 | ||
1283 | return 0; /* success */ | 1212 | return 0; /* success */ |
@@ -1306,16 +1235,45 @@ static void mv643xx_netpoll(struct net_device *netdev) | |||
1306 | struct mv643xx_private *mp = netdev_priv(netdev); | 1235 | struct mv643xx_private *mp = netdev_priv(netdev); |
1307 | int port_num = mp->port_num; | 1236 | int port_num = mp->port_num; |
1308 | 1237 | ||
1309 | mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), INT_MASK_ALL); | 1238 | mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), ETH_INT_MASK_ALL); |
1310 | /* wait for previous write to complete */ | 1239 | /* wait for previous write to complete */ |
1311 | mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num)); | 1240 | mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num)); |
1312 | 1241 | ||
1313 | mv643xx_eth_int_handler(netdev->irq, netdev, NULL); | 1242 | mv643xx_eth_int_handler(netdev->irq, netdev, NULL); |
1314 | 1243 | ||
1315 | mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), INT_UNMASK_ALL); | 1244 | mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), ETH_INT_UNMASK_ALL); |
1316 | } | 1245 | } |
1317 | #endif | 1246 | #endif |
1318 | 1247 | ||
1248 | static void mv643xx_init_ethtool_cmd(struct net_device *dev, int phy_address, | ||
1249 | int speed, int duplex, | ||
1250 | struct ethtool_cmd *cmd) | ||
1251 | { | ||
1252 | struct mv643xx_private *mp = netdev_priv(dev); | ||
1253 | |||
1254 | memset(cmd, 0, sizeof(*cmd)); | ||
1255 | |||
1256 | cmd->port = PORT_MII; | ||
1257 | cmd->transceiver = XCVR_INTERNAL; | ||
1258 | cmd->phy_address = phy_address; | ||
1259 | |||
1260 | if (speed == 0) { | ||
1261 | cmd->autoneg = AUTONEG_ENABLE; | ||
1262 | /* mii lib checks, but doesn't use speed on AUTONEG_ENABLE */ | ||
1263 | cmd->speed = SPEED_100; | ||
1264 | cmd->advertising = ADVERTISED_10baseT_Half | | ||
1265 | ADVERTISED_10baseT_Full | | ||
1266 | ADVERTISED_100baseT_Half | | ||
1267 | ADVERTISED_100baseT_Full; | ||
1268 | if (mp->mii.supports_gmii) | ||
1269 | cmd->advertising |= ADVERTISED_1000baseT_Full; | ||
1270 | } else { | ||
1271 | cmd->autoneg = AUTONEG_DISABLE; | ||
1272 | cmd->speed = speed; | ||
1273 | cmd->duplex = duplex; | ||
1274 | } | ||
1275 | } | ||
1276 | |||
1319 | /*/ | 1277 | /*/ |
1320 | * mv643xx_eth_probe | 1278 | * mv643xx_eth_probe |
1321 | * | 1279 | * |
@@ -1336,6 +1294,9 @@ static int mv643xx_eth_probe(struct platform_device *pdev) | |||
1336 | u8 *p; | 1294 | u8 *p; |
1337 | struct resource *res; | 1295 | struct resource *res; |
1338 | int err; | 1296 | int err; |
1297 | struct ethtool_cmd cmd; | ||
1298 | int duplex = DUPLEX_HALF; | ||
1299 | int speed = 0; /* default to auto-negotiation */ | ||
1339 | 1300 | ||
1340 | dev = alloc_etherdev(sizeof(struct mv643xx_private)); | 1301 | dev = alloc_etherdev(sizeof(struct mv643xx_private)); |
1341 | if (!dev) | 1302 | if (!dev) |
@@ -1373,6 +1334,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev) | |||
1373 | dev->tx_queue_len = mp->tx_ring_size; | 1334 | dev->tx_queue_len = mp->tx_ring_size; |
1374 | dev->base_addr = 0; | 1335 | dev->base_addr = 0; |
1375 | dev->change_mtu = mv643xx_eth_change_mtu; | 1336 | dev->change_mtu = mv643xx_eth_change_mtu; |
1337 | dev->do_ioctl = mv643xx_eth_do_ioctl; | ||
1376 | SET_ETHTOOL_OPS(dev, &mv643xx_ethtool_ops); | 1338 | SET_ETHTOOL_OPS(dev, &mv643xx_ethtool_ops); |
1377 | 1339 | ||
1378 | #ifdef MV643XX_CHECKSUM_OFFLOAD_TX | 1340 | #ifdef MV643XX_CHECKSUM_OFFLOAD_TX |
@@ -1393,33 +1355,17 @@ static int mv643xx_eth_probe(struct platform_device *pdev) | |||
1393 | 1355 | ||
1394 | /* set default config values */ | 1356 | /* set default config values */ |
1395 | eth_port_uc_addr_get(dev, dev->dev_addr); | 1357 | eth_port_uc_addr_get(dev, dev->dev_addr); |
1396 | mp->port_config = MV643XX_ETH_PORT_CONFIG_DEFAULT_VALUE; | ||
1397 | mp->port_config_extend = MV643XX_ETH_PORT_CONFIG_EXTEND_DEFAULT_VALUE; | ||
1398 | mp->port_sdma_config = MV643XX_ETH_PORT_SDMA_CONFIG_DEFAULT_VALUE; | ||
1399 | mp->port_serial_control = MV643XX_ETH_PORT_SERIAL_CONTROL_DEFAULT_VALUE; | ||
1400 | mp->rx_ring_size = MV643XX_ETH_PORT_DEFAULT_RECEIVE_QUEUE_SIZE; | 1358 | mp->rx_ring_size = MV643XX_ETH_PORT_DEFAULT_RECEIVE_QUEUE_SIZE; |
1401 | mp->tx_ring_size = MV643XX_ETH_PORT_DEFAULT_TRANSMIT_QUEUE_SIZE; | 1359 | mp->tx_ring_size = MV643XX_ETH_PORT_DEFAULT_TRANSMIT_QUEUE_SIZE; |
1402 | 1360 | ||
1403 | pd = pdev->dev.platform_data; | 1361 | pd = pdev->dev.platform_data; |
1404 | if (pd) { | 1362 | if (pd) { |
1405 | if (pd->mac_addr != NULL) | 1363 | if (pd->mac_addr) |
1406 | memcpy(dev->dev_addr, pd->mac_addr, 6); | 1364 | memcpy(dev->dev_addr, pd->mac_addr, 6); |
1407 | 1365 | ||
1408 | if (pd->phy_addr || pd->force_phy_addr) | 1366 | if (pd->phy_addr || pd->force_phy_addr) |
1409 | ethernet_phy_set(port_num, pd->phy_addr); | 1367 | ethernet_phy_set(port_num, pd->phy_addr); |
1410 | 1368 | ||
1411 | if (pd->port_config || pd->force_port_config) | ||
1412 | mp->port_config = pd->port_config; | ||
1413 | |||
1414 | if (pd->port_config_extend || pd->force_port_config_extend) | ||
1415 | mp->port_config_extend = pd->port_config_extend; | ||
1416 | |||
1417 | if (pd->port_sdma_config || pd->force_port_sdma_config) | ||
1418 | mp->port_sdma_config = pd->port_sdma_config; | ||
1419 | |||
1420 | if (pd->port_serial_control || pd->force_port_serial_control) | ||
1421 | mp->port_serial_control = pd->port_serial_control; | ||
1422 | |||
1423 | if (pd->rx_queue_size) | 1369 | if (pd->rx_queue_size) |
1424 | mp->rx_ring_size = pd->rx_queue_size; | 1370 | mp->rx_ring_size = pd->rx_queue_size; |
1425 | 1371 | ||
@@ -1435,16 +1381,33 @@ static int mv643xx_eth_probe(struct platform_device *pdev) | |||
1435 | mp->rx_sram_size = pd->rx_sram_size; | 1381 | mp->rx_sram_size = pd->rx_sram_size; |
1436 | mp->rx_sram_addr = pd->rx_sram_addr; | 1382 | mp->rx_sram_addr = pd->rx_sram_addr; |
1437 | } | 1383 | } |
1384 | |||
1385 | duplex = pd->duplex; | ||
1386 | speed = pd->speed; | ||
1438 | } | 1387 | } |
1439 | 1388 | ||
1389 | /* Hook up MII support for ethtool */ | ||
1390 | mp->mii.dev = dev; | ||
1391 | mp->mii.mdio_read = mv643xx_mdio_read; | ||
1392 | mp->mii.mdio_write = mv643xx_mdio_write; | ||
1393 | mp->mii.phy_id = ethernet_phy_get(port_num); | ||
1394 | mp->mii.phy_id_mask = 0x3f; | ||
1395 | mp->mii.reg_num_mask = 0x1f; | ||
1396 | |||
1440 | err = ethernet_phy_detect(port_num); | 1397 | err = ethernet_phy_detect(port_num); |
1441 | if (err) { | 1398 | if (err) { |
1442 | pr_debug("MV643xx ethernet port %d: " | 1399 | pr_debug("MV643xx ethernet port %d: " |
1443 | "No PHY detected at addr %d\n", | 1400 | "No PHY detected at addr %d\n", |
1444 | port_num, ethernet_phy_get(port_num)); | 1401 | port_num, ethernet_phy_get(port_num)); |
1445 | return err; | 1402 | goto out; |
1446 | } | 1403 | } |
1447 | 1404 | ||
1405 | ethernet_phy_reset(port_num); | ||
1406 | mp->mii.supports_gmii = mii_check_gmii_support(&mp->mii); | ||
1407 | mv643xx_init_ethtool_cmd(dev, mp->mii.phy_id, speed, duplex, &cmd); | ||
1408 | mv643xx_eth_update_pscr(dev, &cmd); | ||
1409 | mv643xx_set_settings(dev, &cmd); | ||
1410 | |||
1448 | err = register_netdev(dev); | 1411 | err = register_netdev(dev); |
1449 | if (err) | 1412 | if (err) |
1450 | goto out; | 1413 | goto out; |
@@ -1689,26 +1652,9 @@ MODULE_DESCRIPTION("Ethernet driver for Marvell MV643XX"); | |||
1689 | * to the Rx descriptor ring to enable the reuse of this source. | 1652 | * to the Rx descriptor ring to enable the reuse of this source. |
1690 | * Return Rx resource is done using the eth_rx_return_buff API. | 1653 | * Return Rx resource is done using the eth_rx_return_buff API. |
1691 | * | 1654 | * |
1692 | * Transmit operation: | ||
1693 | * The eth_port_send API supports Scatter-Gather which enables to | ||
1694 | * send a packet spanned over multiple buffers. This means that | ||
1695 | * for each packet info structure given by the user and put into | ||
1696 | * the Tx descriptors ring, will be transmitted only if the 'LAST' | ||
1697 | * bit will be set in the packet info command status field. This | ||
1698 | * API also consider restriction regarding buffer alignments and | ||
1699 | * sizes. | ||
1700 | * The user must return a Tx resource after ensuring the buffer | ||
1701 | * has been transmitted to enable the Tx ring indexes to update. | ||
1702 | * | ||
1703 | * BOARD LAYOUT | ||
1704 | * This device is on-board. No jumper diagram is necessary. | ||
1705 | * | ||
1706 | * EXTERNAL INTERFACE | ||
1707 | * | ||
1708 | * Prior to calling the initialization routine eth_port_init() the user | 1655 | * Prior to calling the initialization routine eth_port_init() the user |
1709 | * must set the following fields under mv643xx_private struct: | 1656 | * must set the following fields under mv643xx_private struct: |
1710 | * port_num User Ethernet port number. | 1657 | * port_num User Ethernet port number. |
1711 | * port_mac_addr[6] User defined port MAC address. | ||
1712 | * port_config User port configuration value. | 1658 | * port_config User port configuration value. |
1713 | * port_config_extend User port config extend value. | 1659 | * port_config_extend User port config extend value. |
1714 | * port_sdma_config User port SDMA config value. | 1660 | * port_sdma_config User port SDMA config value. |
@@ -1725,20 +1671,12 @@ MODULE_DESCRIPTION("Ethernet driver for Marvell MV643XX"); | |||
1725 | * return_info Tx/Rx user resource return information. | 1671 | * return_info Tx/Rx user resource return information. |
1726 | */ | 1672 | */ |
1727 | 1673 | ||
1728 | /* defines */ | ||
1729 | /* SDMA command macros */ | ||
1730 | #define ETH_ENABLE_TX_QUEUE(eth_port) \ | ||
1731 | mv_write(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(eth_port), 1) | ||
1732 | |||
1733 | /* locals */ | ||
1734 | |||
1735 | /* PHY routines */ | 1674 | /* PHY routines */ |
1736 | static int ethernet_phy_get(unsigned int eth_port_num); | 1675 | static int ethernet_phy_get(unsigned int eth_port_num); |
1737 | static void ethernet_phy_set(unsigned int eth_port_num, int phy_addr); | 1676 | static void ethernet_phy_set(unsigned int eth_port_num, int phy_addr); |
1738 | 1677 | ||
1739 | /* Ethernet Port routines */ | 1678 | /* Ethernet Port routines */ |
1740 | static int eth_port_uc_addr(unsigned int eth_port_num, unsigned char uc_nibble, | 1679 | static void eth_port_set_filter_table_entry(int table, unsigned char entry); |
1741 | int option); | ||
1742 | 1680 | ||
1743 | /* | 1681 | /* |
1744 | * eth_port_init - Initialize the Ethernet port driver | 1682 | * eth_port_init - Initialize the Ethernet port driver |
@@ -1766,17 +1704,11 @@ static int eth_port_uc_addr(unsigned int eth_port_num, unsigned char uc_nibble, | |||
1766 | */ | 1704 | */ |
1767 | static void eth_port_init(struct mv643xx_private *mp) | 1705 | static void eth_port_init(struct mv643xx_private *mp) |
1768 | { | 1706 | { |
1769 | mp->port_rx_queue_command = 0; | ||
1770 | mp->port_tx_queue_command = 0; | ||
1771 | |||
1772 | mp->rx_resource_err = 0; | 1707 | mp->rx_resource_err = 0; |
1773 | mp->tx_resource_err = 0; | ||
1774 | 1708 | ||
1775 | eth_port_reset(mp->port_num); | 1709 | eth_port_reset(mp->port_num); |
1776 | 1710 | ||
1777 | eth_port_init_mac_tables(mp->port_num); | 1711 | eth_port_init_mac_tables(mp->port_num); |
1778 | |||
1779 | ethernet_phy_reset(mp->port_num); | ||
1780 | } | 1712 | } |
1781 | 1713 | ||
1782 | /* | 1714 | /* |
@@ -1798,7 +1730,7 @@ static void eth_port_init(struct mv643xx_private *mp) | |||
1798 | * and ether_init_rx_desc_ring for Rx queues). | 1730 | * and ether_init_rx_desc_ring for Rx queues). |
1799 | * | 1731 | * |
1800 | * INPUT: | 1732 | * INPUT: |
1801 | * struct mv643xx_private *mp Ethernet port control struct | 1733 | * dev - a pointer to the required interface |
1802 | * | 1734 | * |
1803 | * OUTPUT: | 1735 | * OUTPUT: |
1804 | * Ethernet port is ready to receive and transmit. | 1736 | * Ethernet port is ready to receive and transmit. |
@@ -1806,10 +1738,13 @@ static void eth_port_init(struct mv643xx_private *mp) | |||
1806 | * RETURN: | 1738 | * RETURN: |
1807 | * None. | 1739 | * None. |
1808 | */ | 1740 | */ |
1809 | static void eth_port_start(struct mv643xx_private *mp) | 1741 | static void eth_port_start(struct net_device *dev) |
1810 | { | 1742 | { |
1743 | struct mv643xx_private *mp = netdev_priv(dev); | ||
1811 | unsigned int port_num = mp->port_num; | 1744 | unsigned int port_num = mp->port_num; |
1812 | int tx_curr_desc, rx_curr_desc; | 1745 | int tx_curr_desc, rx_curr_desc; |
1746 | u32 pscr; | ||
1747 | struct ethtool_cmd ethtool_cmd; | ||
1813 | 1748 | ||
1814 | /* Assignment of Tx CTRP of given queue */ | 1749 | /* Assignment of Tx CTRP of given queue */ |
1815 | tx_curr_desc = mp->tx_curr_desc_q; | 1750 | tx_curr_desc = mp->tx_curr_desc_q; |
@@ -1822,37 +1757,45 @@ static void eth_port_start(struct mv643xx_private *mp) | |||
1822 | (u32)((struct eth_rx_desc *)mp->rx_desc_dma + rx_curr_desc)); | 1757 | (u32)((struct eth_rx_desc *)mp->rx_desc_dma + rx_curr_desc)); |
1823 | 1758 | ||
1824 | /* Add the assigned Ethernet address to the port's address table */ | 1759 | /* Add the assigned Ethernet address to the port's address table */ |
1825 | eth_port_uc_addr_set(port_num, mp->port_mac_addr); | 1760 | eth_port_uc_addr_set(port_num, dev->dev_addr); |
1826 | 1761 | ||
1827 | /* Assign port configuration and command. */ | 1762 | /* Assign port configuration and command. */ |
1828 | mv_write(MV643XX_ETH_PORT_CONFIG_REG(port_num), mp->port_config); | 1763 | mv_write(MV643XX_ETH_PORT_CONFIG_REG(port_num), |
1764 | MV643XX_ETH_PORT_CONFIG_DEFAULT_VALUE); | ||
1829 | 1765 | ||
1830 | mv_write(MV643XX_ETH_PORT_CONFIG_EXTEND_REG(port_num), | 1766 | mv_write(MV643XX_ETH_PORT_CONFIG_EXTEND_REG(port_num), |
1831 | mp->port_config_extend); | 1767 | MV643XX_ETH_PORT_CONFIG_EXTEND_DEFAULT_VALUE); |
1832 | 1768 | ||
1769 | pscr = mv_read(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num)); | ||
1833 | 1770 | ||
1834 | /* Increase the Rx side buffer size if supporting GigE */ | 1771 | pscr &= ~(MV643XX_ETH_SERIAL_PORT_ENABLE | MV643XX_ETH_FORCE_LINK_PASS); |
1835 | if (mp->port_serial_control & MV643XX_ETH_SET_GMII_SPEED_TO_1000) | 1772 | mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num), pscr); |
1836 | mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num), | 1773 | |
1837 | (mp->port_serial_control & 0xfff1ffff) | (0x5 << 17)); | 1774 | pscr |= MV643XX_ETH_DISABLE_AUTO_NEG_FOR_FLOW_CTRL | |
1838 | else | 1775 | MV643XX_ETH_DISABLE_AUTO_NEG_SPEED_GMII | |
1839 | mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num), | 1776 | MV643XX_ETH_DISABLE_AUTO_NEG_FOR_DUPLX | |
1840 | mp->port_serial_control); | 1777 | MV643XX_ETH_DO_NOT_FORCE_LINK_FAIL | |
1778 | MV643XX_ETH_SERIAL_PORT_CONTROL_RESERVED; | ||
1841 | 1779 | ||
1842 | mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num), | 1780 | mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num), pscr); |
1843 | mv_read(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num)) | | 1781 | |
1844 | MV643XX_ETH_SERIAL_PORT_ENABLE); | 1782 | pscr |= MV643XX_ETH_SERIAL_PORT_ENABLE; |
1783 | mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num), pscr); | ||
1845 | 1784 | ||
1846 | /* Assign port SDMA configuration */ | 1785 | /* Assign port SDMA configuration */ |
1847 | mv_write(MV643XX_ETH_SDMA_CONFIG_REG(port_num), | 1786 | mv_write(MV643XX_ETH_SDMA_CONFIG_REG(port_num), |
1848 | mp->port_sdma_config); | 1787 | MV643XX_ETH_PORT_SDMA_CONFIG_DEFAULT_VALUE); |
1849 | 1788 | ||
1850 | /* Enable port Rx. */ | 1789 | /* Enable port Rx. */ |
1851 | mv_write(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num), | 1790 | mv643xx_eth_port_enable_rx(port_num, ETH_RX_QUEUES_ENABLED); |
1852 | mp->port_rx_queue_command); | ||
1853 | 1791 | ||
1854 | /* Disable port bandwidth limits by clearing MTU register */ | 1792 | /* Disable port bandwidth limits by clearing MTU register */ |
1855 | mv_write(MV643XX_ETH_MAXIMUM_TRANSMIT_UNIT(port_num), 0); | 1793 | mv_write(MV643XX_ETH_MAXIMUM_TRANSMIT_UNIT(port_num), 0); |
1794 | |||
1795 | /* save phy settings across reset */ | ||
1796 | mv643xx_get_settings(dev, ðtool_cmd); | ||
1797 | ethernet_phy_reset(mp->port_num); | ||
1798 | mv643xx_set_settings(dev, ðtool_cmd); | ||
1856 | } | 1799 | } |
1857 | 1800 | ||
1858 | /* | 1801 | /* |
@@ -1866,8 +1809,9 @@ static void eth_port_start(struct mv643xx_private *mp) | |||
1866 | * char * p_addr Address to be set | 1809 | * char * p_addr Address to be set |
1867 | * | 1810 | * |
1868 | * OUTPUT: | 1811 | * OUTPUT: |
1869 | * Set MAC address low and high registers. also calls eth_port_uc_addr() | 1812 | * Set MAC address low and high registers. also calls |
1870 | * To set the unicast table with the proper information. | 1813 | * eth_port_set_filter_table_entry() to set the unicast |
1814 | * table with the proper information. | ||
1871 | * | 1815 | * |
1872 | * RETURN: | 1816 | * RETURN: |
1873 | * N/A. | 1817 | * N/A. |
@@ -1878,6 +1822,7 @@ static void eth_port_uc_addr_set(unsigned int eth_port_num, | |||
1878 | { | 1822 | { |
1879 | unsigned int mac_h; | 1823 | unsigned int mac_h; |
1880 | unsigned int mac_l; | 1824 | unsigned int mac_l; |
1825 | int table; | ||
1881 | 1826 | ||
1882 | mac_l = (p_addr[4] << 8) | (p_addr[5]); | 1827 | mac_l = (p_addr[4] << 8) | (p_addr[5]); |
1883 | mac_h = (p_addr[0] << 24) | (p_addr[1] << 16) | (p_addr[2] << 8) | | 1828 | mac_h = (p_addr[0] << 24) | (p_addr[1] << 16) | (p_addr[2] << 8) | |
@@ -1887,9 +1832,8 @@ static void eth_port_uc_addr_set(unsigned int eth_port_num, | |||
1887 | mv_write(MV643XX_ETH_MAC_ADDR_HIGH(eth_port_num), mac_h); | 1832 | mv_write(MV643XX_ETH_MAC_ADDR_HIGH(eth_port_num), mac_h); |
1888 | 1833 | ||
1889 | /* Accept frames of this address */ | 1834 | /* Accept frames of this address */ |
1890 | eth_port_uc_addr(eth_port_num, p_addr[5], ACCEPT_MAC_ADDR); | 1835 | table = MV643XX_ETH_DA_FILTER_UNICAST_TABLE_BASE(eth_port_num); |
1891 | 1836 | eth_port_set_filter_table_entry(table, p_addr[5] & 0x0f); | |
1892 | return; | ||
1893 | } | 1837 | } |
1894 | 1838 | ||
1895 | /* | 1839 | /* |
@@ -1928,72 +1872,6 @@ static void eth_port_uc_addr_get(struct net_device *dev, unsigned char *p_addr) | |||
1928 | } | 1872 | } |
1929 | 1873 | ||
1930 | /* | 1874 | /* |
1931 | * eth_port_uc_addr - This function Set the port unicast address table | ||
1932 | * | ||
1933 | * DESCRIPTION: | ||
1934 | * This function locates the proper entry in the Unicast table for the | ||
1935 | * specified MAC nibble and sets its properties according to function | ||
1936 | * parameters. | ||
1937 | * | ||
1938 | * INPUT: | ||
1939 | * unsigned int eth_port_num Port number. | ||
1940 | * unsigned char uc_nibble Unicast MAC Address last nibble. | ||
1941 | * int option 0 = Add, 1 = remove address. | ||
1942 | * | ||
1943 | * OUTPUT: | ||
1944 | * This function add/removes MAC addresses from the port unicast address | ||
1945 | * table. | ||
1946 | * | ||
1947 | * RETURN: | ||
1948 | * true is output succeeded. | ||
1949 | * false if option parameter is invalid. | ||
1950 | * | ||
1951 | */ | ||
1952 | static int eth_port_uc_addr(unsigned int eth_port_num, unsigned char uc_nibble, | ||
1953 | int option) | ||
1954 | { | ||
1955 | unsigned int unicast_reg; | ||
1956 | unsigned int tbl_offset; | ||
1957 | unsigned int reg_offset; | ||
1958 | |||
1959 | /* Locate the Unicast table entry */ | ||
1960 | uc_nibble = (0xf & uc_nibble); | ||
1961 | tbl_offset = (uc_nibble / 4) * 4; /* Register offset from unicast table base */ | ||
1962 | reg_offset = uc_nibble % 4; /* Entry offset within the above register */ | ||
1963 | |||
1964 | switch (option) { | ||
1965 | case REJECT_MAC_ADDR: | ||
1966 | /* Clear accepts frame bit at given unicast DA table entry */ | ||
1967 | unicast_reg = mv_read((MV643XX_ETH_DA_FILTER_UNICAST_TABLE_BASE | ||
1968 | (eth_port_num) + tbl_offset)); | ||
1969 | |||
1970 | unicast_reg &= (0x0E << (8 * reg_offset)); | ||
1971 | |||
1972 | mv_write((MV643XX_ETH_DA_FILTER_UNICAST_TABLE_BASE | ||
1973 | (eth_port_num) + tbl_offset), unicast_reg); | ||
1974 | break; | ||
1975 | |||
1976 | case ACCEPT_MAC_ADDR: | ||
1977 | /* Set accepts frame bit at unicast DA filter table entry */ | ||
1978 | unicast_reg = | ||
1979 | mv_read((MV643XX_ETH_DA_FILTER_UNICAST_TABLE_BASE | ||
1980 | (eth_port_num) + tbl_offset)); | ||
1981 | |||
1982 | unicast_reg |= (0x01 << (8 * reg_offset)); | ||
1983 | |||
1984 | mv_write((MV643XX_ETH_DA_FILTER_UNICAST_TABLE_BASE | ||
1985 | (eth_port_num) + tbl_offset), unicast_reg); | ||
1986 | |||
1987 | break; | ||
1988 | |||
1989 | default: | ||
1990 | return 0; | ||
1991 | } | ||
1992 | |||
1993 | return 1; | ||
1994 | } | ||
1995 | |||
1996 | /* | ||
1997 | * The entries in each table are indexed by a hash of a packet's MAC | 1875 | * The entries in each table are indexed by a hash of a packet's MAC |
1998 | * address. One bit in each entry determines whether the packet is | 1876 | * address. One bit in each entry determines whether the packet is |
1999 | * accepted. There are 4 entries (each 8 bits wide) in each register | 1877 | * accepted. There are 4 entries (each 8 bits wide) in each register |
@@ -2205,8 +2083,8 @@ static void eth_port_init_mac_tables(unsigned int eth_port_num) | |||
2205 | 2083 | ||
2206 | /* Clear DA filter unicast table (Ex_dFUT) */ | 2084 | /* Clear DA filter unicast table (Ex_dFUT) */ |
2207 | for (table_index = 0; table_index <= 0xC; table_index += 4) | 2085 | for (table_index = 0; table_index <= 0xC; table_index += 4) |
2208 | mv_write((MV643XX_ETH_DA_FILTER_UNICAST_TABLE_BASE | 2086 | mv_write(MV643XX_ETH_DA_FILTER_UNICAST_TABLE_BASE |
2209 | (eth_port_num) + table_index), 0); | 2087 | (eth_port_num) + table_index, 0); |
2210 | 2088 | ||
2211 | for (table_index = 0; table_index <= 0xFC; table_index += 4) { | 2089 | for (table_index = 0; table_index <= 0xFC; table_index += 4) { |
2212 | /* Clear DA filter special multicast table (Ex_dFSMT) */ | 2090 | /* Clear DA filter special multicast table (Ex_dFSMT) */ |
@@ -2389,6 +2267,73 @@ static void ethernet_phy_reset(unsigned int eth_port_num) | |||
2389 | eth_port_read_smi_reg(eth_port_num, 0, &phy_reg_data); | 2267 | eth_port_read_smi_reg(eth_port_num, 0, &phy_reg_data); |
2390 | phy_reg_data |= 0x8000; /* Set bit 15 to reset the PHY */ | 2268 | phy_reg_data |= 0x8000; /* Set bit 15 to reset the PHY */ |
2391 | eth_port_write_smi_reg(eth_port_num, 0, phy_reg_data); | 2269 | eth_port_write_smi_reg(eth_port_num, 0, phy_reg_data); |
2270 | |||
2271 | /* wait for PHY to come out of reset */ | ||
2272 | do { | ||
2273 | udelay(1); | ||
2274 | eth_port_read_smi_reg(eth_port_num, 0, &phy_reg_data); | ||
2275 | } while (phy_reg_data & 0x8000); | ||
2276 | } | ||
2277 | |||
2278 | static void mv643xx_eth_port_enable_tx(unsigned int port_num, | ||
2279 | unsigned int queues) | ||
2280 | { | ||
2281 | mv_write(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num), queues); | ||
2282 | } | ||
2283 | |||
2284 | static void mv643xx_eth_port_enable_rx(unsigned int port_num, | ||
2285 | unsigned int queues) | ||
2286 | { | ||
2287 | mv_write(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num), queues); | ||
2288 | } | ||
2289 | |||
2290 | static unsigned int mv643xx_eth_port_disable_tx(unsigned int port_num) | ||
2291 | { | ||
2292 | u32 queues; | ||
2293 | |||
2294 | /* Stop Tx port activity. Check port Tx activity. */ | ||
2295 | queues = mv_read(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num)) | ||
2296 | & 0xFF; | ||
2297 | if (queues) { | ||
2298 | /* Issue stop command for active queues only */ | ||
2299 | mv_write(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num), | ||
2300 | (queues << 8)); | ||
2301 | |||
2302 | /* Wait for all Tx activity to terminate. */ | ||
2303 | /* Check port cause register that all Tx queues are stopped */ | ||
2304 | while (mv_read(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num)) | ||
2305 | & 0xFF) | ||
2306 | udelay(PHY_WAIT_MICRO_SECONDS); | ||
2307 | |||
2308 | /* Wait for Tx FIFO to empty */ | ||
2309 | while (mv_read(MV643XX_ETH_PORT_STATUS_REG(port_num)) & | ||
2310 | ETH_PORT_TX_FIFO_EMPTY) | ||
2311 | udelay(PHY_WAIT_MICRO_SECONDS); | ||
2312 | } | ||
2313 | |||
2314 | return queues; | ||
2315 | } | ||
2316 | |||
2317 | static unsigned int mv643xx_eth_port_disable_rx(unsigned int port_num) | ||
2318 | { | ||
2319 | u32 queues; | ||
2320 | |||
2321 | /* Stop Rx port activity. Check port Rx activity. */ | ||
2322 | queues = mv_read(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num)) | ||
2323 | & 0xFF; | ||
2324 | if (queues) { | ||
2325 | /* Issue stop command for active queues only */ | ||
2326 | mv_write(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num), | ||
2327 | (queues << 8)); | ||
2328 | |||
2329 | /* Wait for all Rx activity to terminate. */ | ||
2330 | /* Check port cause register that all Rx queues are stopped */ | ||
2331 | while (mv_read(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num)) | ||
2332 | & 0xFF) | ||
2333 | udelay(PHY_WAIT_MICRO_SECONDS); | ||
2334 | } | ||
2335 | |||
2336 | return queues; | ||
2392 | } | 2337 | } |
2393 | 2338 | ||
2394 | /* | 2339 | /* |
@@ -2413,70 +2358,21 @@ static void eth_port_reset(unsigned int port_num) | |||
2413 | { | 2358 | { |
2414 | unsigned int reg_data; | 2359 | unsigned int reg_data; |
2415 | 2360 | ||
2416 | /* Stop Tx port activity. Check port Tx activity. */ | 2361 | mv643xx_eth_port_disable_tx(port_num); |
2417 | reg_data = mv_read(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num)); | 2362 | mv643xx_eth_port_disable_rx(port_num); |
2418 | |||
2419 | if (reg_data & 0xFF) { | ||
2420 | /* Issue stop command for active channels only */ | ||
2421 | mv_write(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num), | ||
2422 | (reg_data << 8)); | ||
2423 | |||
2424 | /* Wait for all Tx activity to terminate. */ | ||
2425 | /* Check port cause register that all Tx queues are stopped */ | ||
2426 | while (mv_read(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num)) | ||
2427 | & 0xFF) | ||
2428 | udelay(10); | ||
2429 | } | ||
2430 | |||
2431 | /* Stop Rx port activity. Check port Rx activity. */ | ||
2432 | reg_data = mv_read(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num)); | ||
2433 | |||
2434 | if (reg_data & 0xFF) { | ||
2435 | /* Issue stop command for active channels only */ | ||
2436 | mv_write(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num), | ||
2437 | (reg_data << 8)); | ||
2438 | |||
2439 | /* Wait for all Rx activity to terminate. */ | ||
2440 | /* Check port cause register that all Rx queues are stopped */ | ||
2441 | while (mv_read(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num)) | ||
2442 | & 0xFF) | ||
2443 | udelay(10); | ||
2444 | } | ||
2445 | 2363 | ||
2446 | /* Clear all MIB counters */ | 2364 | /* Clear all MIB counters */ |
2447 | eth_clear_mib_counters(port_num); | 2365 | eth_clear_mib_counters(port_num); |
2448 | 2366 | ||
2449 | /* Reset the Enable bit in the Configuration Register */ | 2367 | /* Reset the Enable bit in the Configuration Register */ |
2450 | reg_data = mv_read(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num)); | 2368 | reg_data = mv_read(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num)); |
2451 | reg_data &= ~MV643XX_ETH_SERIAL_PORT_ENABLE; | 2369 | reg_data &= ~(MV643XX_ETH_SERIAL_PORT_ENABLE | |
2370 | MV643XX_ETH_DO_NOT_FORCE_LINK_FAIL | | ||
2371 | MV643XX_ETH_FORCE_LINK_PASS); | ||
2452 | mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num), reg_data); | 2372 | mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num), reg_data); |
2453 | } | 2373 | } |
2454 | 2374 | ||
2455 | 2375 | ||
2456 | static int eth_port_autoneg_supported(unsigned int eth_port_num) | ||
2457 | { | ||
2458 | unsigned int phy_reg_data0; | ||
2459 | |||
2460 | eth_port_read_smi_reg(eth_port_num, 0, &phy_reg_data0); | ||
2461 | |||
2462 | return phy_reg_data0 & 0x1000; | ||
2463 | } | ||
2464 | |||
2465 | static int eth_port_link_is_up(unsigned int eth_port_num) | ||
2466 | { | ||
2467 | unsigned int phy_reg_data1; | ||
2468 | |||
2469 | eth_port_read_smi_reg(eth_port_num, 1, &phy_reg_data1); | ||
2470 | |||
2471 | if (eth_port_autoneg_supported(eth_port_num)) { | ||
2472 | if (phy_reg_data1 & 0x20) /* auto-neg complete */ | ||
2473 | return 1; | ||
2474 | } else if (phy_reg_data1 & 0x4) /* link up */ | ||
2475 | return 1; | ||
2476 | |||
2477 | return 0; | ||
2478 | } | ||
2479 | |||
2480 | /* | 2376 | /* |
2481 | * eth_port_read_smi_reg - Read PHY registers | 2377 | * eth_port_read_smi_reg - Read PHY registers |
2482 | * | 2378 | * |
@@ -2582,250 +2478,21 @@ out: | |||
2582 | } | 2478 | } |
2583 | 2479 | ||
2584 | /* | 2480 | /* |
2585 | * eth_port_send - Send an Ethernet packet | 2481 | * Wrappers for MII support library. |
2586 | * | ||
2587 | * DESCRIPTION: | ||
2588 | * This routine send a given packet described by p_pktinfo parameter. It | ||
2589 | * supports transmitting of a packet spaned over multiple buffers. The | ||
2590 | * routine updates 'curr' and 'first' indexes according to the packet | ||
2591 | * segment passed to the routine. In case the packet segment is first, | ||
2592 | * the 'first' index is update. In any case, the 'curr' index is updated. | ||
2593 | * If the routine get into Tx resource error it assigns 'curr' index as | ||
2594 | * 'first'. This way the function can abort Tx process of multiple | ||
2595 | * descriptors per packet. | ||
2596 | * | ||
2597 | * INPUT: | ||
2598 | * struct mv643xx_private *mp Ethernet Port Control srtuct. | ||
2599 | * struct pkt_info *p_pkt_info User packet buffer. | ||
2600 | * | ||
2601 | * OUTPUT: | ||
2602 | * Tx ring 'curr' and 'first' indexes are updated. | ||
2603 | * | ||
2604 | * RETURN: | ||
2605 | * ETH_QUEUE_FULL in case of Tx resource error. | ||
2606 | * ETH_ERROR in case the routine can not access Tx desc ring. | ||
2607 | * ETH_QUEUE_LAST_RESOURCE if the routine uses the last Tx resource. | ||
2608 | * ETH_OK otherwise. | ||
2609 | * | ||
2610 | */ | ||
2611 | #ifdef MV643XX_CHECKSUM_OFFLOAD_TX | ||
2612 | /* | ||
2613 | * Modified to include the first descriptor pointer in case of SG | ||
2614 | */ | 2482 | */ |
2615 | static ETH_FUNC_RET_STATUS eth_port_send(struct mv643xx_private *mp, | 2483 | static int mv643xx_mdio_read(struct net_device *dev, int phy_id, int location) |
2616 | struct pkt_info *p_pkt_info) | ||
2617 | { | ||
2618 | int tx_desc_curr, tx_desc_used, tx_first_desc, tx_next_desc; | ||
2619 | struct eth_tx_desc *current_descriptor; | ||
2620 | struct eth_tx_desc *first_descriptor; | ||
2621 | u32 command; | ||
2622 | |||
2623 | /* Do not process Tx ring in case of Tx ring resource error */ | ||
2624 | if (mp->tx_resource_err) | ||
2625 | return ETH_QUEUE_FULL; | ||
2626 | |||
2627 | /* | ||
2628 | * The hardware requires that each buffer that is <= 8 bytes | ||
2629 | * in length must be aligned on an 8 byte boundary. | ||
2630 | */ | ||
2631 | if (p_pkt_info->byte_cnt <= 8 && p_pkt_info->buf_ptr & 0x7) { | ||
2632 | printk(KERN_ERR | ||
2633 | "mv643xx_eth port %d: packet size <= 8 problem\n", | ||
2634 | mp->port_num); | ||
2635 | return ETH_ERROR; | ||
2636 | } | ||
2637 | |||
2638 | mp->tx_ring_skbs++; | ||
2639 | BUG_ON(mp->tx_ring_skbs > mp->tx_ring_size); | ||
2640 | |||
2641 | /* Get the Tx Desc ring indexes */ | ||
2642 | tx_desc_curr = mp->tx_curr_desc_q; | ||
2643 | tx_desc_used = mp->tx_used_desc_q; | ||
2644 | |||
2645 | current_descriptor = &mp->p_tx_desc_area[tx_desc_curr]; | ||
2646 | |||
2647 | tx_next_desc = (tx_desc_curr + 1) % mp->tx_ring_size; | ||
2648 | |||
2649 | current_descriptor->buf_ptr = p_pkt_info->buf_ptr; | ||
2650 | current_descriptor->byte_cnt = p_pkt_info->byte_cnt; | ||
2651 | current_descriptor->l4i_chk = p_pkt_info->l4i_chk; | ||
2652 | mp->tx_skb[tx_desc_curr] = p_pkt_info->return_info; | ||
2653 | |||
2654 | command = p_pkt_info->cmd_sts | ETH_ZERO_PADDING | ETH_GEN_CRC | | ||
2655 | ETH_BUFFER_OWNED_BY_DMA; | ||
2656 | if (command & ETH_TX_FIRST_DESC) { | ||
2657 | tx_first_desc = tx_desc_curr; | ||
2658 | mp->tx_first_desc_q = tx_first_desc; | ||
2659 | first_descriptor = current_descriptor; | ||
2660 | mp->tx_first_command = command; | ||
2661 | } else { | ||
2662 | tx_first_desc = mp->tx_first_desc_q; | ||
2663 | first_descriptor = &mp->p_tx_desc_area[tx_first_desc]; | ||
2664 | BUG_ON(first_descriptor == NULL); | ||
2665 | current_descriptor->cmd_sts = command; | ||
2666 | } | ||
2667 | |||
2668 | if (command & ETH_TX_LAST_DESC) { | ||
2669 | wmb(); | ||
2670 | first_descriptor->cmd_sts = mp->tx_first_command; | ||
2671 | |||
2672 | wmb(); | ||
2673 | ETH_ENABLE_TX_QUEUE(mp->port_num); | ||
2674 | |||
2675 | /* | ||
2676 | * Finish Tx packet. Update first desc in case of Tx resource | ||
2677 | * error */ | ||
2678 | tx_first_desc = tx_next_desc; | ||
2679 | mp->tx_first_desc_q = tx_first_desc; | ||
2680 | } | ||
2681 | |||
2682 | /* Check for ring index overlap in the Tx desc ring */ | ||
2683 | if (tx_next_desc == tx_desc_used) { | ||
2684 | mp->tx_resource_err = 1; | ||
2685 | mp->tx_curr_desc_q = tx_first_desc; | ||
2686 | |||
2687 | return ETH_QUEUE_LAST_RESOURCE; | ||
2688 | } | ||
2689 | |||
2690 | mp->tx_curr_desc_q = tx_next_desc; | ||
2691 | |||
2692 | return ETH_OK; | ||
2693 | } | ||
2694 | #else | ||
2695 | static ETH_FUNC_RET_STATUS eth_port_send(struct mv643xx_private *mp, | ||
2696 | struct pkt_info *p_pkt_info) | ||
2697 | { | 2484 | { |
2698 | int tx_desc_curr; | 2485 | int val; |
2699 | int tx_desc_used; | 2486 | struct mv643xx_private *mp = netdev_priv(dev); |
2700 | struct eth_tx_desc *current_descriptor; | ||
2701 | unsigned int command_status; | ||
2702 | |||
2703 | /* Do not process Tx ring in case of Tx ring resource error */ | ||
2704 | if (mp->tx_resource_err) | ||
2705 | return ETH_QUEUE_FULL; | ||
2706 | |||
2707 | mp->tx_ring_skbs++; | ||
2708 | BUG_ON(mp->tx_ring_skbs > mp->tx_ring_size); | ||
2709 | |||
2710 | /* Get the Tx Desc ring indexes */ | ||
2711 | tx_desc_curr = mp->tx_curr_desc_q; | ||
2712 | tx_desc_used = mp->tx_used_desc_q; | ||
2713 | current_descriptor = &mp->p_tx_desc_area[tx_desc_curr]; | ||
2714 | |||
2715 | command_status = p_pkt_info->cmd_sts | ETH_ZERO_PADDING | ETH_GEN_CRC; | ||
2716 | current_descriptor->buf_ptr = p_pkt_info->buf_ptr; | ||
2717 | current_descriptor->byte_cnt = p_pkt_info->byte_cnt; | ||
2718 | mp->tx_skb[tx_desc_curr] = p_pkt_info->return_info; | ||
2719 | |||
2720 | /* Set last desc with DMA ownership and interrupt enable. */ | ||
2721 | wmb(); | ||
2722 | current_descriptor->cmd_sts = command_status | | ||
2723 | ETH_BUFFER_OWNED_BY_DMA | ETH_TX_ENABLE_INTERRUPT; | ||
2724 | |||
2725 | wmb(); | ||
2726 | ETH_ENABLE_TX_QUEUE(mp->port_num); | ||
2727 | |||
2728 | /* Finish Tx packet. Update first desc in case of Tx resource error */ | ||
2729 | tx_desc_curr = (tx_desc_curr + 1) % mp->tx_ring_size; | ||
2730 | |||
2731 | /* Update the current descriptor */ | ||
2732 | mp->tx_curr_desc_q = tx_desc_curr; | ||
2733 | |||
2734 | /* Check for ring index overlap in the Tx desc ring */ | ||
2735 | if (tx_desc_curr == tx_desc_used) { | ||
2736 | mp->tx_resource_err = 1; | ||
2737 | return ETH_QUEUE_LAST_RESOURCE; | ||
2738 | } | ||
2739 | 2487 | ||
2740 | return ETH_OK; | 2488 | eth_port_read_smi_reg(mp->port_num, location, &val); |
2489 | return val; | ||
2741 | } | 2490 | } |
2742 | #endif | ||
2743 | 2491 | ||
2744 | /* | 2492 | static void mv643xx_mdio_write(struct net_device *dev, int phy_id, int location, int val) |
2745 | * eth_tx_return_desc - Free all used Tx descriptors | ||
2746 | * | ||
2747 | * DESCRIPTION: | ||
2748 | * This routine returns the transmitted packet information to the caller. | ||
2749 | * It uses the 'first' index to support Tx desc return in case a transmit | ||
2750 | * of a packet spanned over multiple buffer still in process. | ||
2751 | * In case the Tx queue was in "resource error" condition, where there are | ||
2752 | * no available Tx resources, the function resets the resource error flag. | ||
2753 | * | ||
2754 | * INPUT: | ||
2755 | * struct mv643xx_private *mp Ethernet Port Control srtuct. | ||
2756 | * struct pkt_info *p_pkt_info User packet buffer. | ||
2757 | * | ||
2758 | * OUTPUT: | ||
2759 | * Tx ring 'first' and 'used' indexes are updated. | ||
2760 | * | ||
2761 | * RETURN: | ||
2762 | * ETH_OK on success | ||
2763 | * ETH_ERROR otherwise. | ||
2764 | * | ||
2765 | */ | ||
2766 | static ETH_FUNC_RET_STATUS eth_tx_return_desc(struct mv643xx_private *mp, | ||
2767 | struct pkt_info *p_pkt_info) | ||
2768 | { | 2493 | { |
2769 | int tx_desc_used; | 2494 | struct mv643xx_private *mp = netdev_priv(dev); |
2770 | int tx_busy_desc; | 2495 | eth_port_write_smi_reg(mp->port_num, location, val); |
2771 | struct eth_tx_desc *p_tx_desc_used; | ||
2772 | unsigned int command_status; | ||
2773 | unsigned long flags; | ||
2774 | int err = ETH_OK; | ||
2775 | |||
2776 | spin_lock_irqsave(&mp->lock, flags); | ||
2777 | |||
2778 | #ifdef MV643XX_CHECKSUM_OFFLOAD_TX | ||
2779 | tx_busy_desc = mp->tx_first_desc_q; | ||
2780 | #else | ||
2781 | tx_busy_desc = mp->tx_curr_desc_q; | ||
2782 | #endif | ||
2783 | |||
2784 | /* Get the Tx Desc ring indexes */ | ||
2785 | tx_desc_used = mp->tx_used_desc_q; | ||
2786 | |||
2787 | p_tx_desc_used = &mp->p_tx_desc_area[tx_desc_used]; | ||
2788 | |||
2789 | /* Sanity check */ | ||
2790 | if (p_tx_desc_used == NULL) { | ||
2791 | err = ETH_ERROR; | ||
2792 | goto out; | ||
2793 | } | ||
2794 | |||
2795 | /* Stop release. About to overlap the current available Tx descriptor */ | ||
2796 | if (tx_desc_used == tx_busy_desc && !mp->tx_resource_err) { | ||
2797 | err = ETH_ERROR; | ||
2798 | goto out; | ||
2799 | } | ||
2800 | |||
2801 | command_status = p_tx_desc_used->cmd_sts; | ||
2802 | |||
2803 | /* Still transmitting... */ | ||
2804 | if (command_status & (ETH_BUFFER_OWNED_BY_DMA)) { | ||
2805 | err = ETH_ERROR; | ||
2806 | goto out; | ||
2807 | } | ||
2808 | |||
2809 | /* Pass the packet information to the caller */ | ||
2810 | p_pkt_info->cmd_sts = command_status; | ||
2811 | p_pkt_info->return_info = mp->tx_skb[tx_desc_used]; | ||
2812 | p_pkt_info->buf_ptr = p_tx_desc_used->buf_ptr; | ||
2813 | p_pkt_info->byte_cnt = p_tx_desc_used->byte_cnt; | ||
2814 | mp->tx_skb[tx_desc_used] = NULL; | ||
2815 | |||
2816 | /* Update the next descriptor to release. */ | ||
2817 | mp->tx_used_desc_q = (tx_desc_used + 1) % mp->tx_ring_size; | ||
2818 | |||
2819 | /* Any Tx return cancels the Tx resource error status */ | ||
2820 | mp->tx_resource_err = 0; | ||
2821 | |||
2822 | BUG_ON(mp->tx_ring_skbs == 0); | ||
2823 | mp->tx_ring_skbs--; | ||
2824 | |||
2825 | out: | ||
2826 | spin_unlock_irqrestore(&mp->lock, flags); | ||
2827 | |||
2828 | return err; | ||
2829 | } | 2496 | } |
2830 | 2497 | ||
2831 | /* | 2498 | /* |
@@ -3017,111 +2684,6 @@ static const struct mv643xx_stats mv643xx_gstrings_stats[] = { | |||
3017 | #define MV643XX_STATS_LEN \ | 2684 | #define MV643XX_STATS_LEN \ |
3018 | sizeof(mv643xx_gstrings_stats) / sizeof(struct mv643xx_stats) | 2685 | sizeof(mv643xx_gstrings_stats) / sizeof(struct mv643xx_stats) |
3019 | 2686 | ||
3020 | static int | ||
3021 | mv643xx_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) | ||
3022 | { | ||
3023 | struct mv643xx_private *mp = netdev->priv; | ||
3024 | int port_num = mp->port_num; | ||
3025 | int autoneg = eth_port_autoneg_supported(port_num); | ||
3026 | int mode_10_bit; | ||
3027 | int auto_duplex; | ||
3028 | int half_duplex = 0; | ||
3029 | int full_duplex = 0; | ||
3030 | int auto_speed; | ||
3031 | int speed_10 = 0; | ||
3032 | int speed_100 = 0; | ||
3033 | int speed_1000 = 0; | ||
3034 | |||
3035 | u32 pcs = mv_read(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num)); | ||
3036 | u32 psr = mv_read(MV643XX_ETH_PORT_STATUS_REG(port_num)); | ||
3037 | |||
3038 | mode_10_bit = psr & MV643XX_ETH_PORT_STATUS_MODE_10_BIT; | ||
3039 | |||
3040 | if (mode_10_bit) { | ||
3041 | ecmd->supported = SUPPORTED_10baseT_Half; | ||
3042 | } else { | ||
3043 | ecmd->supported = (SUPPORTED_10baseT_Half | | ||
3044 | SUPPORTED_10baseT_Full | | ||
3045 | SUPPORTED_100baseT_Half | | ||
3046 | SUPPORTED_100baseT_Full | | ||
3047 | SUPPORTED_1000baseT_Full | | ||
3048 | (autoneg ? SUPPORTED_Autoneg : 0) | | ||
3049 | SUPPORTED_TP); | ||
3050 | |||
3051 | auto_duplex = !(pcs & MV643XX_ETH_DISABLE_AUTO_NEG_FOR_DUPLX); | ||
3052 | auto_speed = !(pcs & MV643XX_ETH_DISABLE_AUTO_NEG_SPEED_GMII); | ||
3053 | |||
3054 | ecmd->advertising = ADVERTISED_TP; | ||
3055 | |||
3056 | if (autoneg) { | ||
3057 | ecmd->advertising |= ADVERTISED_Autoneg; | ||
3058 | |||
3059 | if (auto_duplex) { | ||
3060 | half_duplex = 1; | ||
3061 | full_duplex = 1; | ||
3062 | } else { | ||
3063 | if (pcs & MV643XX_ETH_SET_FULL_DUPLEX_MODE) | ||
3064 | full_duplex = 1; | ||
3065 | else | ||
3066 | half_duplex = 1; | ||
3067 | } | ||
3068 | |||
3069 | if (auto_speed) { | ||
3070 | speed_10 = 1; | ||
3071 | speed_100 = 1; | ||
3072 | speed_1000 = 1; | ||
3073 | } else { | ||
3074 | if (pcs & MV643XX_ETH_SET_GMII_SPEED_TO_1000) | ||
3075 | speed_1000 = 1; | ||
3076 | else if (pcs & MV643XX_ETH_SET_MII_SPEED_TO_100) | ||
3077 | speed_100 = 1; | ||
3078 | else | ||
3079 | speed_10 = 1; | ||
3080 | } | ||
3081 | |||
3082 | if (speed_10 & half_duplex) | ||
3083 | ecmd->advertising |= ADVERTISED_10baseT_Half; | ||
3084 | if (speed_10 & full_duplex) | ||
3085 | ecmd->advertising |= ADVERTISED_10baseT_Full; | ||
3086 | if (speed_100 & half_duplex) | ||
3087 | ecmd->advertising |= ADVERTISED_100baseT_Half; | ||
3088 | if (speed_100 & full_duplex) | ||
3089 | ecmd->advertising |= ADVERTISED_100baseT_Full; | ||
3090 | if (speed_1000) | ||
3091 | ecmd->advertising |= ADVERTISED_1000baseT_Full; | ||
3092 | } | ||
3093 | } | ||
3094 | |||
3095 | ecmd->port = PORT_TP; | ||
3096 | ecmd->phy_address = ethernet_phy_get(port_num); | ||
3097 | |||
3098 | ecmd->transceiver = XCVR_EXTERNAL; | ||
3099 | |||
3100 | if (netif_carrier_ok(netdev)) { | ||
3101 | if (mode_10_bit) | ||
3102 | ecmd->speed = SPEED_10; | ||
3103 | else { | ||
3104 | if (psr & MV643XX_ETH_PORT_STATUS_GMII_1000) | ||
3105 | ecmd->speed = SPEED_1000; | ||
3106 | else if (psr & MV643XX_ETH_PORT_STATUS_MII_100) | ||
3107 | ecmd->speed = SPEED_100; | ||
3108 | else | ||
3109 | ecmd->speed = SPEED_10; | ||
3110 | } | ||
3111 | |||
3112 | if (psr & MV643XX_ETH_PORT_STATUS_FULL_DUPLEX) | ||
3113 | ecmd->duplex = DUPLEX_FULL; | ||
3114 | else | ||
3115 | ecmd->duplex = DUPLEX_HALF; | ||
3116 | } else { | ||
3117 | ecmd->speed = -1; | ||
3118 | ecmd->duplex = -1; | ||
3119 | } | ||
3120 | |||
3121 | ecmd->autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE; | ||
3122 | return 0; | ||
3123 | } | ||
3124 | |||
3125 | static void mv643xx_get_drvinfo(struct net_device *netdev, | 2687 | static void mv643xx_get_drvinfo(struct net_device *netdev, |
3126 | struct ethtool_drvinfo *drvinfo) | 2688 | struct ethtool_drvinfo *drvinfo) |
3127 | { | 2689 | { |
@@ -3168,15 +2730,41 @@ static void mv643xx_get_strings(struct net_device *netdev, uint32_t stringset, | |||
3168 | } | 2730 | } |
3169 | } | 2731 | } |
3170 | 2732 | ||
2733 | static u32 mv643xx_eth_get_link(struct net_device *dev) | ||
2734 | { | ||
2735 | struct mv643xx_private *mp = netdev_priv(dev); | ||
2736 | |||
2737 | return mii_link_ok(&mp->mii); | ||
2738 | } | ||
2739 | |||
2740 | static int mv643xx_eth_nway_restart(struct net_device *dev) | ||
2741 | { | ||
2742 | struct mv643xx_private *mp = netdev_priv(dev); | ||
2743 | |||
2744 | return mii_nway_restart(&mp->mii); | ||
2745 | } | ||
2746 | |||
2747 | static int mv643xx_eth_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | ||
2748 | { | ||
2749 | struct mv643xx_private *mp = netdev_priv(dev); | ||
2750 | |||
2751 | return generic_mii_ioctl(&mp->mii, if_mii(ifr), cmd, NULL); | ||
2752 | } | ||
2753 | |||
3171 | static struct ethtool_ops mv643xx_ethtool_ops = { | 2754 | static struct ethtool_ops mv643xx_ethtool_ops = { |
3172 | .get_settings = mv643xx_get_settings, | 2755 | .get_settings = mv643xx_get_settings, |
2756 | .set_settings = mv643xx_set_settings, | ||
3173 | .get_drvinfo = mv643xx_get_drvinfo, | 2757 | .get_drvinfo = mv643xx_get_drvinfo, |
3174 | .get_link = ethtool_op_get_link, | 2758 | .get_link = mv643xx_eth_get_link, |
3175 | .get_sg = ethtool_op_get_sg, | 2759 | .get_sg = ethtool_op_get_sg, |
3176 | .set_sg = ethtool_op_set_sg, | 2760 | .set_sg = ethtool_op_set_sg, |
3177 | .get_strings = mv643xx_get_strings, | 2761 | .get_strings = mv643xx_get_strings, |
3178 | .get_stats_count = mv643xx_get_stats_count, | 2762 | .get_stats_count = mv643xx_get_stats_count, |
3179 | .get_ethtool_stats = mv643xx_get_ethtool_stats, | 2763 | .get_ethtool_stats = mv643xx_get_ethtool_stats, |
2764 | .get_strings = mv643xx_get_strings, | ||
2765 | .get_stats_count = mv643xx_get_stats_count, | ||
2766 | .get_ethtool_stats = mv643xx_get_ethtool_stats, | ||
2767 | .nway_reset = mv643xx_eth_nway_restart, | ||
3180 | }; | 2768 | }; |
3181 | 2769 | ||
3182 | /************* End ethtool support *************************/ | 2770 | /************* End ethtool support *************************/ |