diff options
Diffstat (limited to 'drivers/net/ethernet/renesas')
-rw-r--r-- | drivers/net/ethernet/renesas/sh_eth.c | 383 | ||||
-rw-r--r-- | drivers/net/ethernet/renesas/sh_eth.h | 77 |
2 files changed, 254 insertions, 206 deletions
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 667169b82526..af0b867a6cf6 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c | |||
@@ -49,6 +49,34 @@ | |||
49 | NETIF_MSG_RX_ERR| \ | 49 | NETIF_MSG_RX_ERR| \ |
50 | NETIF_MSG_TX_ERR) | 50 | NETIF_MSG_TX_ERR) |
51 | 51 | ||
52 | #if defined(CONFIG_CPU_SUBTYPE_SH7734) || \ | ||
53 | defined(CONFIG_CPU_SUBTYPE_SH7763) || \ | ||
54 | defined(CONFIG_ARCH_R8A7740) | ||
55 | static void sh_eth_select_mii(struct net_device *ndev) | ||
56 | { | ||
57 | u32 value = 0x0; | ||
58 | struct sh_eth_private *mdp = netdev_priv(ndev); | ||
59 | |||
60 | switch (mdp->phy_interface) { | ||
61 | case PHY_INTERFACE_MODE_GMII: | ||
62 | value = 0x2; | ||
63 | break; | ||
64 | case PHY_INTERFACE_MODE_MII: | ||
65 | value = 0x1; | ||
66 | break; | ||
67 | case PHY_INTERFACE_MODE_RMII: | ||
68 | value = 0x0; | ||
69 | break; | ||
70 | default: | ||
71 | pr_warn("PHY interface mode was not setup. Set to MII.\n"); | ||
72 | value = 0x1; | ||
73 | break; | ||
74 | } | ||
75 | |||
76 | sh_eth_write(ndev, value, RMII_MII); | ||
77 | } | ||
78 | #endif | ||
79 | |||
52 | /* There is CPU dependent code */ | 80 | /* There is CPU dependent code */ |
53 | #if defined(CONFIG_CPU_SUBTYPE_SH7724) | 81 | #if defined(CONFIG_CPU_SUBTYPE_SH7724) |
54 | #define SH_ETH_RESET_DEFAULT 1 | 82 | #define SH_ETH_RESET_DEFAULT 1 |
@@ -102,6 +130,8 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = { | |||
102 | #elif defined(CONFIG_CPU_SUBTYPE_SH7757) | 130 | #elif defined(CONFIG_CPU_SUBTYPE_SH7757) |
103 | #define SH_ETH_HAS_BOTH_MODULES 1 | 131 | #define SH_ETH_HAS_BOTH_MODULES 1 |
104 | #define SH_ETH_HAS_TSU 1 | 132 | #define SH_ETH_HAS_TSU 1 |
133 | static int sh_eth_check_reset(struct net_device *ndev); | ||
134 | |||
105 | static void sh_eth_set_duplex(struct net_device *ndev) | 135 | static void sh_eth_set_duplex(struct net_device *ndev) |
106 | { | 136 | { |
107 | struct sh_eth_private *mdp = netdev_priv(ndev); | 137 | struct sh_eth_private *mdp = netdev_priv(ndev); |
@@ -176,23 +206,19 @@ static void sh_eth_chip_reset_giga(struct net_device *ndev) | |||
176 | } | 206 | } |
177 | 207 | ||
178 | static int sh_eth_is_gether(struct sh_eth_private *mdp); | 208 | static int sh_eth_is_gether(struct sh_eth_private *mdp); |
179 | static void sh_eth_reset(struct net_device *ndev) | 209 | static int sh_eth_reset(struct net_device *ndev) |
180 | { | 210 | { |
181 | struct sh_eth_private *mdp = netdev_priv(ndev); | 211 | struct sh_eth_private *mdp = netdev_priv(ndev); |
182 | int cnt = 100; | 212 | int ret = 0; |
183 | 213 | ||
184 | if (sh_eth_is_gether(mdp)) { | 214 | if (sh_eth_is_gether(mdp)) { |
185 | sh_eth_write(ndev, 0x03, EDSR); | 215 | sh_eth_write(ndev, 0x03, EDSR); |
186 | sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, | 216 | sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, |
187 | EDMR); | 217 | EDMR); |
188 | while (cnt > 0) { | 218 | |
189 | if (!(sh_eth_read(ndev, EDMR) & 0x3)) | 219 | ret = sh_eth_check_reset(ndev); |
190 | break; | 220 | if (ret) |
191 | mdelay(1); | 221 | goto out; |
192 | cnt--; | ||
193 | } | ||
194 | if (cnt < 0) | ||
195 | printk(KERN_ERR "Device reset fail\n"); | ||
196 | 222 | ||
197 | /* Table Init */ | 223 | /* Table Init */ |
198 | sh_eth_write(ndev, 0x0, TDLAR); | 224 | sh_eth_write(ndev, 0x0, TDLAR); |
@@ -210,6 +236,9 @@ static void sh_eth_reset(struct net_device *ndev) | |||
210 | sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER, | 236 | sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER, |
211 | EDMR); | 237 | EDMR); |
212 | } | 238 | } |
239 | |||
240 | out: | ||
241 | return ret; | ||
213 | } | 242 | } |
214 | 243 | ||
215 | static void sh_eth_set_duplex_giga(struct net_device *ndev) | 244 | static void sh_eth_set_duplex_giga(struct net_device *ndev) |
@@ -282,7 +311,9 @@ static struct sh_eth_cpu_data *sh_eth_get_cpu_data(struct sh_eth_private *mdp) | |||
282 | 311 | ||
283 | #elif defined(CONFIG_CPU_SUBTYPE_SH7734) || defined(CONFIG_CPU_SUBTYPE_SH7763) | 312 | #elif defined(CONFIG_CPU_SUBTYPE_SH7734) || defined(CONFIG_CPU_SUBTYPE_SH7763) |
284 | #define SH_ETH_HAS_TSU 1 | 313 | #define SH_ETH_HAS_TSU 1 |
314 | static int sh_eth_check_reset(struct net_device *ndev); | ||
285 | static void sh_eth_reset_hw_crc(struct net_device *ndev); | 315 | static void sh_eth_reset_hw_crc(struct net_device *ndev); |
316 | |||
286 | static void sh_eth_chip_reset(struct net_device *ndev) | 317 | static void sh_eth_chip_reset(struct net_device *ndev) |
287 | { | 318 | { |
288 | struct sh_eth_private *mdp = netdev_priv(ndev); | 319 | struct sh_eth_private *mdp = netdev_priv(ndev); |
@@ -292,35 +323,6 @@ static void sh_eth_chip_reset(struct net_device *ndev) | |||
292 | mdelay(1); | 323 | mdelay(1); |
293 | } | 324 | } |
294 | 325 | ||
295 | static void sh_eth_reset(struct net_device *ndev) | ||
296 | { | ||
297 | int cnt = 100; | ||
298 | |||
299 | sh_eth_write(ndev, EDSR_ENALL, EDSR); | ||
300 | sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, EDMR); | ||
301 | while (cnt > 0) { | ||
302 | if (!(sh_eth_read(ndev, EDMR) & 0x3)) | ||
303 | break; | ||
304 | mdelay(1); | ||
305 | cnt--; | ||
306 | } | ||
307 | if (cnt == 0) | ||
308 | printk(KERN_ERR "Device reset fail\n"); | ||
309 | |||
310 | /* Table Init */ | ||
311 | sh_eth_write(ndev, 0x0, TDLAR); | ||
312 | sh_eth_write(ndev, 0x0, TDFAR); | ||
313 | sh_eth_write(ndev, 0x0, TDFXR); | ||
314 | sh_eth_write(ndev, 0x0, TDFFR); | ||
315 | sh_eth_write(ndev, 0x0, RDLAR); | ||
316 | sh_eth_write(ndev, 0x0, RDFAR); | ||
317 | sh_eth_write(ndev, 0x0, RDFXR); | ||
318 | sh_eth_write(ndev, 0x0, RDFFR); | ||
319 | |||
320 | /* Reset HW CRC register */ | ||
321 | sh_eth_reset_hw_crc(ndev); | ||
322 | } | ||
323 | |||
324 | static void sh_eth_set_duplex(struct net_device *ndev) | 326 | static void sh_eth_set_duplex(struct net_device *ndev) |
325 | { | 327 | { |
326 | struct sh_eth_private *mdp = netdev_priv(ndev); | 328 | struct sh_eth_private *mdp = netdev_priv(ndev); |
@@ -377,9 +379,41 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = { | |||
377 | .tsu = 1, | 379 | .tsu = 1, |
378 | #if defined(CONFIG_CPU_SUBTYPE_SH7734) | 380 | #if defined(CONFIG_CPU_SUBTYPE_SH7734) |
379 | .hw_crc = 1, | 381 | .hw_crc = 1, |
382 | .select_mii = 1, | ||
380 | #endif | 383 | #endif |
381 | }; | 384 | }; |
382 | 385 | ||
386 | static int sh_eth_reset(struct net_device *ndev) | ||
387 | { | ||
388 | int ret = 0; | ||
389 | |||
390 | sh_eth_write(ndev, EDSR_ENALL, EDSR); | ||
391 | sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, EDMR); | ||
392 | |||
393 | ret = sh_eth_check_reset(ndev); | ||
394 | if (ret) | ||
395 | goto out; | ||
396 | |||
397 | /* Table Init */ | ||
398 | sh_eth_write(ndev, 0x0, TDLAR); | ||
399 | sh_eth_write(ndev, 0x0, TDFAR); | ||
400 | sh_eth_write(ndev, 0x0, TDFXR); | ||
401 | sh_eth_write(ndev, 0x0, TDFFR); | ||
402 | sh_eth_write(ndev, 0x0, RDLAR); | ||
403 | sh_eth_write(ndev, 0x0, RDFAR); | ||
404 | sh_eth_write(ndev, 0x0, RDFXR); | ||
405 | sh_eth_write(ndev, 0x0, RDFFR); | ||
406 | |||
407 | /* Reset HW CRC register */ | ||
408 | sh_eth_reset_hw_crc(ndev); | ||
409 | |||
410 | /* Select MII mode */ | ||
411 | if (sh_eth_my_cpu_data.select_mii) | ||
412 | sh_eth_select_mii(ndev); | ||
413 | out: | ||
414 | return ret; | ||
415 | } | ||
416 | |||
383 | static void sh_eth_reset_hw_crc(struct net_device *ndev) | 417 | static void sh_eth_reset_hw_crc(struct net_device *ndev) |
384 | { | 418 | { |
385 | if (sh_eth_my_cpu_data.hw_crc) | 419 | if (sh_eth_my_cpu_data.hw_crc) |
@@ -388,44 +422,29 @@ static void sh_eth_reset_hw_crc(struct net_device *ndev) | |||
388 | 422 | ||
389 | #elif defined(CONFIG_ARCH_R8A7740) | 423 | #elif defined(CONFIG_ARCH_R8A7740) |
390 | #define SH_ETH_HAS_TSU 1 | 424 | #define SH_ETH_HAS_TSU 1 |
425 | static int sh_eth_check_reset(struct net_device *ndev); | ||
426 | |||
391 | static void sh_eth_chip_reset(struct net_device *ndev) | 427 | static void sh_eth_chip_reset(struct net_device *ndev) |
392 | { | 428 | { |
393 | struct sh_eth_private *mdp = netdev_priv(ndev); | 429 | struct sh_eth_private *mdp = netdev_priv(ndev); |
394 | unsigned long mii; | ||
395 | 430 | ||
396 | /* reset device */ | 431 | /* reset device */ |
397 | sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR); | 432 | sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR); |
398 | mdelay(1); | 433 | mdelay(1); |
399 | 434 | ||
400 | switch (mdp->phy_interface) { | 435 | sh_eth_select_mii(ndev); |
401 | case PHY_INTERFACE_MODE_GMII: | ||
402 | mii = 2; | ||
403 | break; | ||
404 | case PHY_INTERFACE_MODE_MII: | ||
405 | mii = 1; | ||
406 | break; | ||
407 | case PHY_INTERFACE_MODE_RMII: | ||
408 | default: | ||
409 | mii = 0; | ||
410 | break; | ||
411 | } | ||
412 | sh_eth_write(ndev, mii, RMII_MII); | ||
413 | } | 436 | } |
414 | 437 | ||
415 | static void sh_eth_reset(struct net_device *ndev) | 438 | static int sh_eth_reset(struct net_device *ndev) |
416 | { | 439 | { |
417 | int cnt = 100; | 440 | int ret = 0; |
418 | 441 | ||
419 | sh_eth_write(ndev, EDSR_ENALL, EDSR); | 442 | sh_eth_write(ndev, EDSR_ENALL, EDSR); |
420 | sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, EDMR); | 443 | sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, EDMR); |
421 | while (cnt > 0) { | 444 | |
422 | if (!(sh_eth_read(ndev, EDMR) & 0x3)) | 445 | ret = sh_eth_check_reset(ndev); |
423 | break; | 446 | if (ret) |
424 | mdelay(1); | 447 | goto out; |
425 | cnt--; | ||
426 | } | ||
427 | if (cnt == 0) | ||
428 | printk(KERN_ERR "Device reset fail\n"); | ||
429 | 448 | ||
430 | /* Table Init */ | 449 | /* Table Init */ |
431 | sh_eth_write(ndev, 0x0, TDLAR); | 450 | sh_eth_write(ndev, 0x0, TDLAR); |
@@ -436,6 +455,9 @@ static void sh_eth_reset(struct net_device *ndev) | |||
436 | sh_eth_write(ndev, 0x0, RDFAR); | 455 | sh_eth_write(ndev, 0x0, RDFAR); |
437 | sh_eth_write(ndev, 0x0, RDFXR); | 456 | sh_eth_write(ndev, 0x0, RDFXR); |
438 | sh_eth_write(ndev, 0x0, RDFFR); | 457 | sh_eth_write(ndev, 0x0, RDFFR); |
458 | |||
459 | out: | ||
460 | return ret; | ||
439 | } | 461 | } |
440 | 462 | ||
441 | static void sh_eth_set_duplex(struct net_device *ndev) | 463 | static void sh_eth_set_duplex(struct net_device *ndev) |
@@ -492,6 +514,7 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = { | |||
492 | .no_trimd = 1, | 514 | .no_trimd = 1, |
493 | .no_ade = 1, | 515 | .no_ade = 1, |
494 | .tsu = 1, | 516 | .tsu = 1, |
517 | .select_mii = 1, | ||
495 | }; | 518 | }; |
496 | 519 | ||
497 | #elif defined(CONFIG_CPU_SUBTYPE_SH7619) | 520 | #elif defined(CONFIG_CPU_SUBTYPE_SH7619) |
@@ -543,11 +566,31 @@ static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd) | |||
543 | 566 | ||
544 | #if defined(SH_ETH_RESET_DEFAULT) | 567 | #if defined(SH_ETH_RESET_DEFAULT) |
545 | /* Chip Reset */ | 568 | /* Chip Reset */ |
546 | static void sh_eth_reset(struct net_device *ndev) | 569 | static int sh_eth_reset(struct net_device *ndev) |
547 | { | 570 | { |
548 | sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER, EDMR); | 571 | sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER, EDMR); |
549 | mdelay(3); | 572 | mdelay(3); |
550 | sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER, EDMR); | 573 | sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER, EDMR); |
574 | |||
575 | return 0; | ||
576 | } | ||
577 | #else | ||
578 | static int sh_eth_check_reset(struct net_device *ndev) | ||
579 | { | ||
580 | int ret = 0; | ||
581 | int cnt = 100; | ||
582 | |||
583 | while (cnt > 0) { | ||
584 | if (!(sh_eth_read(ndev, EDMR) & 0x3)) | ||
585 | break; | ||
586 | mdelay(1); | ||
587 | cnt--; | ||
588 | } | ||
589 | if (cnt < 0) { | ||
590 | printk(KERN_ERR "Device reset fail\n"); | ||
591 | ret = -ETIMEDOUT; | ||
592 | } | ||
593 | return ret; | ||
551 | } | 594 | } |
552 | #endif | 595 | #endif |
553 | 596 | ||
@@ -739,21 +782,23 @@ static void sh_eth_ring_free(struct net_device *ndev) | |||
739 | 782 | ||
740 | /* Free Rx skb ringbuffer */ | 783 | /* Free Rx skb ringbuffer */ |
741 | if (mdp->rx_skbuff) { | 784 | if (mdp->rx_skbuff) { |
742 | for (i = 0; i < RX_RING_SIZE; i++) { | 785 | for (i = 0; i < mdp->num_rx_ring; i++) { |
743 | if (mdp->rx_skbuff[i]) | 786 | if (mdp->rx_skbuff[i]) |
744 | dev_kfree_skb(mdp->rx_skbuff[i]); | 787 | dev_kfree_skb(mdp->rx_skbuff[i]); |
745 | } | 788 | } |
746 | } | 789 | } |
747 | kfree(mdp->rx_skbuff); | 790 | kfree(mdp->rx_skbuff); |
791 | mdp->rx_skbuff = NULL; | ||
748 | 792 | ||
749 | /* Free Tx skb ringbuffer */ | 793 | /* Free Tx skb ringbuffer */ |
750 | if (mdp->tx_skbuff) { | 794 | if (mdp->tx_skbuff) { |
751 | for (i = 0; i < TX_RING_SIZE; i++) { | 795 | for (i = 0; i < mdp->num_tx_ring; i++) { |
752 | if (mdp->tx_skbuff[i]) | 796 | if (mdp->tx_skbuff[i]) |
753 | dev_kfree_skb(mdp->tx_skbuff[i]); | 797 | dev_kfree_skb(mdp->tx_skbuff[i]); |
754 | } | 798 | } |
755 | } | 799 | } |
756 | kfree(mdp->tx_skbuff); | 800 | kfree(mdp->tx_skbuff); |
801 | mdp->tx_skbuff = NULL; | ||
757 | } | 802 | } |
758 | 803 | ||
759 | /* format skb and descriptor buffer */ | 804 | /* format skb and descriptor buffer */ |
@@ -764,8 +809,8 @@ static void sh_eth_ring_format(struct net_device *ndev) | |||
764 | struct sk_buff *skb; | 809 | struct sk_buff *skb; |
765 | struct sh_eth_rxdesc *rxdesc = NULL; | 810 | struct sh_eth_rxdesc *rxdesc = NULL; |
766 | struct sh_eth_txdesc *txdesc = NULL; | 811 | struct sh_eth_txdesc *txdesc = NULL; |
767 | int rx_ringsize = sizeof(*rxdesc) * RX_RING_SIZE; | 812 | int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring; |
768 | int tx_ringsize = sizeof(*txdesc) * TX_RING_SIZE; | 813 | int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring; |
769 | 814 | ||
770 | mdp->cur_rx = mdp->cur_tx = 0; | 815 | mdp->cur_rx = mdp->cur_tx = 0; |
771 | mdp->dirty_rx = mdp->dirty_tx = 0; | 816 | mdp->dirty_rx = mdp->dirty_tx = 0; |
@@ -773,7 +818,7 @@ static void sh_eth_ring_format(struct net_device *ndev) | |||
773 | memset(mdp->rx_ring, 0, rx_ringsize); | 818 | memset(mdp->rx_ring, 0, rx_ringsize); |
774 | 819 | ||
775 | /* build Rx ring buffer */ | 820 | /* build Rx ring buffer */ |
776 | for (i = 0; i < RX_RING_SIZE; i++) { | 821 | for (i = 0; i < mdp->num_rx_ring; i++) { |
777 | /* skb */ | 822 | /* skb */ |
778 | mdp->rx_skbuff[i] = NULL; | 823 | mdp->rx_skbuff[i] = NULL; |
779 | skb = netdev_alloc_skb(ndev, mdp->rx_buf_sz); | 824 | skb = netdev_alloc_skb(ndev, mdp->rx_buf_sz); |
@@ -799,7 +844,7 @@ static void sh_eth_ring_format(struct net_device *ndev) | |||
799 | } | 844 | } |
800 | } | 845 | } |
801 | 846 | ||
802 | mdp->dirty_rx = (u32) (i - RX_RING_SIZE); | 847 | mdp->dirty_rx = (u32) (i - mdp->num_rx_ring); |
803 | 848 | ||
804 | /* Mark the last entry as wrapping the ring. */ | 849 | /* Mark the last entry as wrapping the ring. */ |
805 | rxdesc->status |= cpu_to_edmac(mdp, RD_RDEL); | 850 | rxdesc->status |= cpu_to_edmac(mdp, RD_RDEL); |
@@ -807,7 +852,7 @@ static void sh_eth_ring_format(struct net_device *ndev) | |||
807 | memset(mdp->tx_ring, 0, tx_ringsize); | 852 | memset(mdp->tx_ring, 0, tx_ringsize); |
808 | 853 | ||
809 | /* build Tx ring buffer */ | 854 | /* build Tx ring buffer */ |
810 | for (i = 0; i < TX_RING_SIZE; i++) { | 855 | for (i = 0; i < mdp->num_tx_ring; i++) { |
811 | mdp->tx_skbuff[i] = NULL; | 856 | mdp->tx_skbuff[i] = NULL; |
812 | txdesc = &mdp->tx_ring[i]; | 857 | txdesc = &mdp->tx_ring[i]; |
813 | txdesc->status = cpu_to_edmac(mdp, TD_TFP); | 858 | txdesc->status = cpu_to_edmac(mdp, TD_TFP); |
@@ -841,7 +886,7 @@ static int sh_eth_ring_init(struct net_device *ndev) | |||
841 | mdp->rx_buf_sz += NET_IP_ALIGN; | 886 | mdp->rx_buf_sz += NET_IP_ALIGN; |
842 | 887 | ||
843 | /* Allocate RX and TX skb rings */ | 888 | /* Allocate RX and TX skb rings */ |
844 | mdp->rx_skbuff = kmalloc(sizeof(*mdp->rx_skbuff) * RX_RING_SIZE, | 889 | mdp->rx_skbuff = kmalloc(sizeof(*mdp->rx_skbuff) * mdp->num_rx_ring, |
845 | GFP_KERNEL); | 890 | GFP_KERNEL); |
846 | if (!mdp->rx_skbuff) { | 891 | if (!mdp->rx_skbuff) { |
847 | dev_err(&ndev->dev, "Cannot allocate Rx skb\n"); | 892 | dev_err(&ndev->dev, "Cannot allocate Rx skb\n"); |
@@ -849,7 +894,7 @@ static int sh_eth_ring_init(struct net_device *ndev) | |||
849 | return ret; | 894 | return ret; |
850 | } | 895 | } |
851 | 896 | ||
852 | mdp->tx_skbuff = kmalloc(sizeof(*mdp->tx_skbuff) * TX_RING_SIZE, | 897 | mdp->tx_skbuff = kmalloc(sizeof(*mdp->tx_skbuff) * mdp->num_tx_ring, |
853 | GFP_KERNEL); | 898 | GFP_KERNEL); |
854 | if (!mdp->tx_skbuff) { | 899 | if (!mdp->tx_skbuff) { |
855 | dev_err(&ndev->dev, "Cannot allocate Tx skb\n"); | 900 | dev_err(&ndev->dev, "Cannot allocate Tx skb\n"); |
@@ -858,7 +903,7 @@ static int sh_eth_ring_init(struct net_device *ndev) | |||
858 | } | 903 | } |
859 | 904 | ||
860 | /* Allocate all Rx descriptors. */ | 905 | /* Allocate all Rx descriptors. */ |
861 | rx_ringsize = sizeof(struct sh_eth_rxdesc) * RX_RING_SIZE; | 906 | rx_ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring; |
862 | mdp->rx_ring = dma_alloc_coherent(NULL, rx_ringsize, &mdp->rx_desc_dma, | 907 | mdp->rx_ring = dma_alloc_coherent(NULL, rx_ringsize, &mdp->rx_desc_dma, |
863 | GFP_KERNEL); | 908 | GFP_KERNEL); |
864 | 909 | ||
@@ -872,7 +917,7 @@ static int sh_eth_ring_init(struct net_device *ndev) | |||
872 | mdp->dirty_rx = 0; | 917 | mdp->dirty_rx = 0; |
873 | 918 | ||
874 | /* Allocate all Tx descriptors. */ | 919 | /* Allocate all Tx descriptors. */ |
875 | tx_ringsize = sizeof(struct sh_eth_txdesc) * TX_RING_SIZE; | 920 | tx_ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring; |
876 | mdp->tx_ring = dma_alloc_coherent(NULL, tx_ringsize, &mdp->tx_desc_dma, | 921 | mdp->tx_ring = dma_alloc_coherent(NULL, tx_ringsize, &mdp->tx_desc_dma, |
877 | GFP_KERNEL); | 922 | GFP_KERNEL); |
878 | if (!mdp->tx_ring) { | 923 | if (!mdp->tx_ring) { |
@@ -890,19 +935,41 @@ desc_ring_free: | |||
890 | skb_ring_free: | 935 | skb_ring_free: |
891 | /* Free Rx and Tx skb ring buffer */ | 936 | /* Free Rx and Tx skb ring buffer */ |
892 | sh_eth_ring_free(ndev); | 937 | sh_eth_ring_free(ndev); |
938 | mdp->tx_ring = NULL; | ||
939 | mdp->rx_ring = NULL; | ||
893 | 940 | ||
894 | return ret; | 941 | return ret; |
895 | } | 942 | } |
896 | 943 | ||
897 | static int sh_eth_dev_init(struct net_device *ndev) | 944 | static void sh_eth_free_dma_buffer(struct sh_eth_private *mdp) |
945 | { | ||
946 | int ringsize; | ||
947 | |||
948 | if (mdp->rx_ring) { | ||
949 | ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring; | ||
950 | dma_free_coherent(NULL, ringsize, mdp->rx_ring, | ||
951 | mdp->rx_desc_dma); | ||
952 | mdp->rx_ring = NULL; | ||
953 | } | ||
954 | |||
955 | if (mdp->tx_ring) { | ||
956 | ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring; | ||
957 | dma_free_coherent(NULL, ringsize, mdp->tx_ring, | ||
958 | mdp->tx_desc_dma); | ||
959 | mdp->tx_ring = NULL; | ||
960 | } | ||
961 | } | ||
962 | |||
963 | static int sh_eth_dev_init(struct net_device *ndev, bool start) | ||
898 | { | 964 | { |
899 | int ret = 0; | 965 | int ret = 0; |
900 | struct sh_eth_private *mdp = netdev_priv(ndev); | 966 | struct sh_eth_private *mdp = netdev_priv(ndev); |
901 | u_int32_t rx_int_var, tx_int_var; | ||
902 | u32 val; | 967 | u32 val; |
903 | 968 | ||
904 | /* Soft Reset */ | 969 | /* Soft Reset */ |
905 | sh_eth_reset(ndev); | 970 | ret = sh_eth_reset(ndev); |
971 | if (ret) | ||
972 | goto out; | ||
906 | 973 | ||
907 | /* Descriptor format */ | 974 | /* Descriptor format */ |
908 | sh_eth_ring_format(ndev); | 975 | sh_eth_ring_format(ndev); |
@@ -926,9 +993,7 @@ static int sh_eth_dev_init(struct net_device *ndev) | |||
926 | /* Frame recv control */ | 993 | /* Frame recv control */ |
927 | sh_eth_write(ndev, mdp->cd->rmcr_value, RMCR); | 994 | sh_eth_write(ndev, mdp->cd->rmcr_value, RMCR); |
928 | 995 | ||
929 | rx_int_var = mdp->rx_int_var = DESC_I_RINT8 | DESC_I_RINT5; | 996 | sh_eth_write(ndev, DESC_I_RINT8 | DESC_I_RINT5 | DESC_I_TINT2, TRSCER); |
930 | tx_int_var = mdp->tx_int_var = DESC_I_TINT2; | ||
931 | sh_eth_write(ndev, rx_int_var | tx_int_var, TRSCER); | ||
932 | 997 | ||
933 | if (mdp->cd->bculr) | 998 | if (mdp->cd->bculr) |
934 | sh_eth_write(ndev, 0x800, BCULR); /* Burst sycle set */ | 999 | sh_eth_write(ndev, 0x800, BCULR); /* Burst sycle set */ |
@@ -943,7 +1008,8 @@ static int sh_eth_dev_init(struct net_device *ndev) | |||
943 | RFLR); | 1008 | RFLR); |
944 | 1009 | ||
945 | sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR); | 1010 | sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR); |
946 | sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); | 1011 | if (start) |
1012 | sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); | ||
947 | 1013 | ||
948 | /* PAUSE Prohibition */ | 1014 | /* PAUSE Prohibition */ |
949 | val = (sh_eth_read(ndev, ECMR) & ECMR_DM) | | 1015 | val = (sh_eth_read(ndev, ECMR) & ECMR_DM) | |
@@ -958,7 +1024,8 @@ static int sh_eth_dev_init(struct net_device *ndev) | |||
958 | sh_eth_write(ndev, mdp->cd->ecsr_value, ECSR); | 1024 | sh_eth_write(ndev, mdp->cd->ecsr_value, ECSR); |
959 | 1025 | ||
960 | /* E-MAC Interrupt Enable register */ | 1026 | /* E-MAC Interrupt Enable register */ |
961 | sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR); | 1027 | if (start) |
1028 | sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR); | ||
962 | 1029 | ||
963 | /* Set MAC address */ | 1030 | /* Set MAC address */ |
964 | update_mac_address(ndev); | 1031 | update_mac_address(ndev); |
@@ -971,11 +1038,14 @@ static int sh_eth_dev_init(struct net_device *ndev) | |||
971 | if (mdp->cd->tpauser) | 1038 | if (mdp->cd->tpauser) |
972 | sh_eth_write(ndev, TPAUSER_UNLIMITED, TPAUSER); | 1039 | sh_eth_write(ndev, TPAUSER_UNLIMITED, TPAUSER); |
973 | 1040 | ||
974 | /* Setting the Rx mode will start the Rx process. */ | 1041 | if (start) { |
975 | sh_eth_write(ndev, EDRRR_R, EDRRR); | 1042 | /* Setting the Rx mode will start the Rx process. */ |
1043 | sh_eth_write(ndev, EDRRR_R, EDRRR); | ||
976 | 1044 | ||
977 | netif_start_queue(ndev); | 1045 | netif_start_queue(ndev); |
1046 | } | ||
978 | 1047 | ||
1048 | out: | ||
979 | return ret; | 1049 | return ret; |
980 | } | 1050 | } |
981 | 1051 | ||
@@ -988,7 +1058,7 @@ static int sh_eth_txfree(struct net_device *ndev) | |||
988 | int entry = 0; | 1058 | int entry = 0; |
989 | 1059 | ||
990 | for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) { | 1060 | for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) { |
991 | entry = mdp->dirty_tx % TX_RING_SIZE; | 1061 | entry = mdp->dirty_tx % mdp->num_tx_ring; |
992 | txdesc = &mdp->tx_ring[entry]; | 1062 | txdesc = &mdp->tx_ring[entry]; |
993 | if (txdesc->status & cpu_to_edmac(mdp, TD_TACT)) | 1063 | if (txdesc->status & cpu_to_edmac(mdp, TD_TACT)) |
994 | break; | 1064 | break; |
@@ -1001,7 +1071,7 @@ static int sh_eth_txfree(struct net_device *ndev) | |||
1001 | freeNum++; | 1071 | freeNum++; |
1002 | } | 1072 | } |
1003 | txdesc->status = cpu_to_edmac(mdp, TD_TFP); | 1073 | txdesc->status = cpu_to_edmac(mdp, TD_TFP); |
1004 | if (entry >= TX_RING_SIZE - 1) | 1074 | if (entry >= mdp->num_tx_ring - 1) |
1005 | txdesc->status |= cpu_to_edmac(mdp, TD_TDLE); | 1075 | txdesc->status |= cpu_to_edmac(mdp, TD_TDLE); |
1006 | 1076 | ||
1007 | ndev->stats.tx_packets++; | 1077 | ndev->stats.tx_packets++; |
@@ -1011,13 +1081,13 @@ static int sh_eth_txfree(struct net_device *ndev) | |||
1011 | } | 1081 | } |
1012 | 1082 | ||
1013 | /* Packet receive function */ | 1083 | /* Packet receive function */ |
1014 | static int sh_eth_rx(struct net_device *ndev) | 1084 | static int sh_eth_rx(struct net_device *ndev, u32 intr_status) |
1015 | { | 1085 | { |
1016 | struct sh_eth_private *mdp = netdev_priv(ndev); | 1086 | struct sh_eth_private *mdp = netdev_priv(ndev); |
1017 | struct sh_eth_rxdesc *rxdesc; | 1087 | struct sh_eth_rxdesc *rxdesc; |
1018 | 1088 | ||
1019 | int entry = mdp->cur_rx % RX_RING_SIZE; | 1089 | int entry = mdp->cur_rx % mdp->num_rx_ring; |
1020 | int boguscnt = (mdp->dirty_rx + RX_RING_SIZE) - mdp->cur_rx; | 1090 | int boguscnt = (mdp->dirty_rx + mdp->num_rx_ring) - mdp->cur_rx; |
1021 | struct sk_buff *skb; | 1091 | struct sk_buff *skb; |
1022 | u16 pkt_len = 0; | 1092 | u16 pkt_len = 0; |
1023 | u32 desc_status; | 1093 | u32 desc_status; |
@@ -1068,13 +1138,13 @@ static int sh_eth_rx(struct net_device *ndev) | |||
1068 | ndev->stats.rx_bytes += pkt_len; | 1138 | ndev->stats.rx_bytes += pkt_len; |
1069 | } | 1139 | } |
1070 | rxdesc->status |= cpu_to_edmac(mdp, RD_RACT); | 1140 | rxdesc->status |= cpu_to_edmac(mdp, RD_RACT); |
1071 | entry = (++mdp->cur_rx) % RX_RING_SIZE; | 1141 | entry = (++mdp->cur_rx) % mdp->num_rx_ring; |
1072 | rxdesc = &mdp->rx_ring[entry]; | 1142 | rxdesc = &mdp->rx_ring[entry]; |
1073 | } | 1143 | } |
1074 | 1144 | ||
1075 | /* Refill the Rx ring buffers. */ | 1145 | /* Refill the Rx ring buffers. */ |
1076 | for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) { | 1146 | for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) { |
1077 | entry = mdp->dirty_rx % RX_RING_SIZE; | 1147 | entry = mdp->dirty_rx % mdp->num_rx_ring; |
1078 | rxdesc = &mdp->rx_ring[entry]; | 1148 | rxdesc = &mdp->rx_ring[entry]; |
1079 | /* The size of the buffer is 16 byte boundary. */ | 1149 | /* The size of the buffer is 16 byte boundary. */ |
1080 | rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16); | 1150 | rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16); |
@@ -1091,7 +1161,7 @@ static int sh_eth_rx(struct net_device *ndev) | |||
1091 | skb_checksum_none_assert(skb); | 1161 | skb_checksum_none_assert(skb); |
1092 | rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4)); | 1162 | rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4)); |
1093 | } | 1163 | } |
1094 | if (entry >= RX_RING_SIZE - 1) | 1164 | if (entry >= mdp->num_rx_ring - 1) |
1095 | rxdesc->status |= | 1165 | rxdesc->status |= |
1096 | cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL); | 1166 | cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL); |
1097 | else | 1167 | else |
@@ -1102,9 +1172,11 @@ static int sh_eth_rx(struct net_device *ndev) | |||
1102 | /* Restart Rx engine if stopped. */ | 1172 | /* Restart Rx engine if stopped. */ |
1103 | /* If we don't need to check status, don't. -KDU */ | 1173 | /* If we don't need to check status, don't. -KDU */ |
1104 | if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) { | 1174 | if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) { |
1105 | /* fix the values for the next receiving */ | 1175 | /* fix the values for the next receiving if RDE is set */ |
1106 | mdp->cur_rx = mdp->dirty_rx = (sh_eth_read(ndev, RDFAR) - | 1176 | if (intr_status & EESR_RDE) |
1107 | sh_eth_read(ndev, RDLAR)) >> 4; | 1177 | mdp->cur_rx = mdp->dirty_rx = |
1178 | (sh_eth_read(ndev, RDFAR) - | ||
1179 | sh_eth_read(ndev, RDLAR)) >> 4; | ||
1108 | sh_eth_write(ndev, EDRRR_R, EDRRR); | 1180 | sh_eth_write(ndev, EDRRR_R, EDRRR); |
1109 | } | 1181 | } |
1110 | 1182 | ||
@@ -1273,7 +1345,7 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev) | |||
1273 | EESR_RTSF | /* short frame recv */ | 1345 | EESR_RTSF | /* short frame recv */ |
1274 | EESR_PRE | /* PHY-LSI recv error */ | 1346 | EESR_PRE | /* PHY-LSI recv error */ |
1275 | EESR_CERF)){ /* recv frame CRC error */ | 1347 | EESR_CERF)){ /* recv frame CRC error */ |
1276 | sh_eth_rx(ndev); | 1348 | sh_eth_rx(ndev, intr_status); |
1277 | } | 1349 | } |
1278 | 1350 | ||
1279 | /* Tx Check */ | 1351 | /* Tx Check */ |
@@ -1291,14 +1363,6 @@ other_irq: | |||
1291 | return ret; | 1363 | return ret; |
1292 | } | 1364 | } |
1293 | 1365 | ||
1294 | static void sh_eth_timer(unsigned long data) | ||
1295 | { | ||
1296 | struct net_device *ndev = (struct net_device *)data; | ||
1297 | struct sh_eth_private *mdp = netdev_priv(ndev); | ||
1298 | |||
1299 | mod_timer(&mdp->timer, jiffies + (10 * HZ)); | ||
1300 | } | ||
1301 | |||
1302 | /* PHY state control function */ | 1366 | /* PHY state control function */ |
1303 | static void sh_eth_adjust_link(struct net_device *ndev) | 1367 | static void sh_eth_adjust_link(struct net_device *ndev) |
1304 | { | 1368 | { |
@@ -1497,6 +1561,71 @@ static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data) | |||
1497 | } | 1561 | } |
1498 | } | 1562 | } |
1499 | 1563 | ||
1564 | static void sh_eth_get_ringparam(struct net_device *ndev, | ||
1565 | struct ethtool_ringparam *ring) | ||
1566 | { | ||
1567 | struct sh_eth_private *mdp = netdev_priv(ndev); | ||
1568 | |||
1569 | ring->rx_max_pending = RX_RING_MAX; | ||
1570 | ring->tx_max_pending = TX_RING_MAX; | ||
1571 | ring->rx_pending = mdp->num_rx_ring; | ||
1572 | ring->tx_pending = mdp->num_tx_ring; | ||
1573 | } | ||
1574 | |||
1575 | static int sh_eth_set_ringparam(struct net_device *ndev, | ||
1576 | struct ethtool_ringparam *ring) | ||
1577 | { | ||
1578 | struct sh_eth_private *mdp = netdev_priv(ndev); | ||
1579 | int ret; | ||
1580 | |||
1581 | if (ring->tx_pending > TX_RING_MAX || | ||
1582 | ring->rx_pending > RX_RING_MAX || | ||
1583 | ring->tx_pending < TX_RING_MIN || | ||
1584 | ring->rx_pending < RX_RING_MIN) | ||
1585 | return -EINVAL; | ||
1586 | if (ring->rx_mini_pending || ring->rx_jumbo_pending) | ||
1587 | return -EINVAL; | ||
1588 | |||
1589 | if (netif_running(ndev)) { | ||
1590 | netif_tx_disable(ndev); | ||
1591 | /* Disable interrupts by clearing the interrupt mask. */ | ||
1592 | sh_eth_write(ndev, 0x0000, EESIPR); | ||
1593 | /* Stop the chip's Tx and Rx processes. */ | ||
1594 | sh_eth_write(ndev, 0, EDTRR); | ||
1595 | sh_eth_write(ndev, 0, EDRRR); | ||
1596 | synchronize_irq(ndev->irq); | ||
1597 | } | ||
1598 | |||
1599 | /* Free all the skbuffs in the Rx queue. */ | ||
1600 | sh_eth_ring_free(ndev); | ||
1601 | /* Free DMA buffer */ | ||
1602 | sh_eth_free_dma_buffer(mdp); | ||
1603 | |||
1604 | /* Set new parameters */ | ||
1605 | mdp->num_rx_ring = ring->rx_pending; | ||
1606 | mdp->num_tx_ring = ring->tx_pending; | ||
1607 | |||
1608 | ret = sh_eth_ring_init(ndev); | ||
1609 | if (ret < 0) { | ||
1610 | dev_err(&ndev->dev, "%s: sh_eth_ring_init failed.\n", __func__); | ||
1611 | return ret; | ||
1612 | } | ||
1613 | ret = sh_eth_dev_init(ndev, false); | ||
1614 | if (ret < 0) { | ||
1615 | dev_err(&ndev->dev, "%s: sh_eth_dev_init failed.\n", __func__); | ||
1616 | return ret; | ||
1617 | } | ||
1618 | |||
1619 | if (netif_running(ndev)) { | ||
1620 | sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); | ||
1621 | /* Setting the Rx mode will start the Rx process. */ | ||
1622 | sh_eth_write(ndev, EDRRR_R, EDRRR); | ||
1623 | netif_wake_queue(ndev); | ||
1624 | } | ||
1625 | |||
1626 | return 0; | ||
1627 | } | ||
1628 | |||
1500 | static const struct ethtool_ops sh_eth_ethtool_ops = { | 1629 | static const struct ethtool_ops sh_eth_ethtool_ops = { |
1501 | .get_settings = sh_eth_get_settings, | 1630 | .get_settings = sh_eth_get_settings, |
1502 | .set_settings = sh_eth_set_settings, | 1631 | .set_settings = sh_eth_set_settings, |
@@ -1507,6 +1636,8 @@ static const struct ethtool_ops sh_eth_ethtool_ops = { | |||
1507 | .get_strings = sh_eth_get_strings, | 1636 | .get_strings = sh_eth_get_strings, |
1508 | .get_ethtool_stats = sh_eth_get_ethtool_stats, | 1637 | .get_ethtool_stats = sh_eth_get_ethtool_stats, |
1509 | .get_sset_count = sh_eth_get_sset_count, | 1638 | .get_sset_count = sh_eth_get_sset_count, |
1639 | .get_ringparam = sh_eth_get_ringparam, | ||
1640 | .set_ringparam = sh_eth_set_ringparam, | ||
1510 | }; | 1641 | }; |
1511 | 1642 | ||
1512 | /* network device open function */ | 1643 | /* network device open function */ |
@@ -1537,7 +1668,7 @@ static int sh_eth_open(struct net_device *ndev) | |||
1537 | goto out_free_irq; | 1668 | goto out_free_irq; |
1538 | 1669 | ||
1539 | /* device init */ | 1670 | /* device init */ |
1540 | ret = sh_eth_dev_init(ndev); | 1671 | ret = sh_eth_dev_init(ndev, true); |
1541 | if (ret) | 1672 | if (ret) |
1542 | goto out_free_irq; | 1673 | goto out_free_irq; |
1543 | 1674 | ||
@@ -1546,11 +1677,6 @@ static int sh_eth_open(struct net_device *ndev) | |||
1546 | if (ret) | 1677 | if (ret) |
1547 | goto out_free_irq; | 1678 | goto out_free_irq; |
1548 | 1679 | ||
1549 | /* Set the timer to check for link beat. */ | ||
1550 | init_timer(&mdp->timer); | ||
1551 | mdp->timer.expires = (jiffies + (24 * HZ)) / 10;/* 2.4 sec. */ | ||
1552 | setup_timer(&mdp->timer, sh_eth_timer, (unsigned long)ndev); | ||
1553 | |||
1554 | return ret; | 1680 | return ret; |
1555 | 1681 | ||
1556 | out_free_irq: | 1682 | out_free_irq: |
@@ -1575,11 +1701,8 @@ static void sh_eth_tx_timeout(struct net_device *ndev) | |||
1575 | /* tx_errors count up */ | 1701 | /* tx_errors count up */ |
1576 | ndev->stats.tx_errors++; | 1702 | ndev->stats.tx_errors++; |
1577 | 1703 | ||
1578 | /* timer off */ | ||
1579 | del_timer_sync(&mdp->timer); | ||
1580 | |||
1581 | /* Free all the skbuffs in the Rx queue. */ | 1704 | /* Free all the skbuffs in the Rx queue. */ |
1582 | for (i = 0; i < RX_RING_SIZE; i++) { | 1705 | for (i = 0; i < mdp->num_rx_ring; i++) { |
1583 | rxdesc = &mdp->rx_ring[i]; | 1706 | rxdesc = &mdp->rx_ring[i]; |
1584 | rxdesc->status = 0; | 1707 | rxdesc->status = 0; |
1585 | rxdesc->addr = 0xBADF00D0; | 1708 | rxdesc->addr = 0xBADF00D0; |
@@ -1587,18 +1710,14 @@ static void sh_eth_tx_timeout(struct net_device *ndev) | |||
1587 | dev_kfree_skb(mdp->rx_skbuff[i]); | 1710 | dev_kfree_skb(mdp->rx_skbuff[i]); |
1588 | mdp->rx_skbuff[i] = NULL; | 1711 | mdp->rx_skbuff[i] = NULL; |
1589 | } | 1712 | } |
1590 | for (i = 0; i < TX_RING_SIZE; i++) { | 1713 | for (i = 0; i < mdp->num_tx_ring; i++) { |
1591 | if (mdp->tx_skbuff[i]) | 1714 | if (mdp->tx_skbuff[i]) |
1592 | dev_kfree_skb(mdp->tx_skbuff[i]); | 1715 | dev_kfree_skb(mdp->tx_skbuff[i]); |
1593 | mdp->tx_skbuff[i] = NULL; | 1716 | mdp->tx_skbuff[i] = NULL; |
1594 | } | 1717 | } |
1595 | 1718 | ||
1596 | /* device init */ | 1719 | /* device init */ |
1597 | sh_eth_dev_init(ndev); | 1720 | sh_eth_dev_init(ndev, true); |
1598 | |||
1599 | /* timer on */ | ||
1600 | mdp->timer.expires = (jiffies + (24 * HZ)) / 10;/* 2.4 sec. */ | ||
1601 | add_timer(&mdp->timer); | ||
1602 | } | 1721 | } |
1603 | 1722 | ||
1604 | /* Packet transmit function */ | 1723 | /* Packet transmit function */ |
@@ -1610,7 +1729,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
1610 | unsigned long flags; | 1729 | unsigned long flags; |
1611 | 1730 | ||
1612 | spin_lock_irqsave(&mdp->lock, flags); | 1731 | spin_lock_irqsave(&mdp->lock, flags); |
1613 | if ((mdp->cur_tx - mdp->dirty_tx) >= (TX_RING_SIZE - 4)) { | 1732 | if ((mdp->cur_tx - mdp->dirty_tx) >= (mdp->num_tx_ring - 4)) { |
1614 | if (!sh_eth_txfree(ndev)) { | 1733 | if (!sh_eth_txfree(ndev)) { |
1615 | if (netif_msg_tx_queued(mdp)) | 1734 | if (netif_msg_tx_queued(mdp)) |
1616 | dev_warn(&ndev->dev, "TxFD exhausted.\n"); | 1735 | dev_warn(&ndev->dev, "TxFD exhausted.\n"); |
@@ -1621,7 +1740,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
1621 | } | 1740 | } |
1622 | spin_unlock_irqrestore(&mdp->lock, flags); | 1741 | spin_unlock_irqrestore(&mdp->lock, flags); |
1623 | 1742 | ||
1624 | entry = mdp->cur_tx % TX_RING_SIZE; | 1743 | entry = mdp->cur_tx % mdp->num_tx_ring; |
1625 | mdp->tx_skbuff[entry] = skb; | 1744 | mdp->tx_skbuff[entry] = skb; |
1626 | txdesc = &mdp->tx_ring[entry]; | 1745 | txdesc = &mdp->tx_ring[entry]; |
1627 | /* soft swap. */ | 1746 | /* soft swap. */ |
@@ -1635,7 +1754,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
1635 | else | 1754 | else |
1636 | txdesc->buffer_length = skb->len; | 1755 | txdesc->buffer_length = skb->len; |
1637 | 1756 | ||
1638 | if (entry >= TX_RING_SIZE - 1) | 1757 | if (entry >= mdp->num_tx_ring - 1) |
1639 | txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE); | 1758 | txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE); |
1640 | else | 1759 | else |
1641 | txdesc->status |= cpu_to_edmac(mdp, TD_TACT); | 1760 | txdesc->status |= cpu_to_edmac(mdp, TD_TACT); |
@@ -1652,7 +1771,6 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
1652 | static int sh_eth_close(struct net_device *ndev) | 1771 | static int sh_eth_close(struct net_device *ndev) |
1653 | { | 1772 | { |
1654 | struct sh_eth_private *mdp = netdev_priv(ndev); | 1773 | struct sh_eth_private *mdp = netdev_priv(ndev); |
1655 | int ringsize; | ||
1656 | 1774 | ||
1657 | netif_stop_queue(ndev); | 1775 | netif_stop_queue(ndev); |
1658 | 1776 | ||
@@ -1671,18 +1789,11 @@ static int sh_eth_close(struct net_device *ndev) | |||
1671 | 1789 | ||
1672 | free_irq(ndev->irq, ndev); | 1790 | free_irq(ndev->irq, ndev); |
1673 | 1791 | ||
1674 | del_timer_sync(&mdp->timer); | ||
1675 | |||
1676 | /* Free all the skbuffs in the Rx queue. */ | 1792 | /* Free all the skbuffs in the Rx queue. */ |
1677 | sh_eth_ring_free(ndev); | 1793 | sh_eth_ring_free(ndev); |
1678 | 1794 | ||
1679 | /* free DMA buffer */ | 1795 | /* free DMA buffer */ |
1680 | ringsize = sizeof(struct sh_eth_rxdesc) * RX_RING_SIZE; | 1796 | sh_eth_free_dma_buffer(mdp); |
1681 | dma_free_coherent(NULL, ringsize, mdp->rx_ring, mdp->rx_desc_dma); | ||
1682 | |||
1683 | /* free DMA buffer */ | ||
1684 | ringsize = sizeof(struct sh_eth_txdesc) * TX_RING_SIZE; | ||
1685 | dma_free_coherent(NULL, ringsize, mdp->tx_ring, mdp->tx_desc_dma); | ||
1686 | 1797 | ||
1687 | pm_runtime_put_sync(&mdp->pdev->dev); | 1798 | pm_runtime_put_sync(&mdp->pdev->dev); |
1688 | 1799 | ||
@@ -2273,6 +2384,8 @@ static int sh_eth_drv_probe(struct platform_device *pdev) | |||
2273 | ether_setup(ndev); | 2384 | ether_setup(ndev); |
2274 | 2385 | ||
2275 | mdp = netdev_priv(ndev); | 2386 | mdp = netdev_priv(ndev); |
2387 | mdp->num_tx_ring = TX_RING_SIZE; | ||
2388 | mdp->num_rx_ring = RX_RING_SIZE; | ||
2276 | mdp->addr = ioremap(res->start, resource_size(res)); | 2389 | mdp->addr = ioremap(res->start, resource_size(res)); |
2277 | if (mdp->addr == NULL) { | 2390 | if (mdp->addr == NULL) { |
2278 | ret = -ENOMEM; | 2391 | ret = -ENOMEM; |
@@ -2310,8 +2423,6 @@ static int sh_eth_drv_probe(struct platform_device *pdev) | |||
2310 | 2423 | ||
2311 | /* debug message level */ | 2424 | /* debug message level */ |
2312 | mdp->msg_enable = SH_ETH_DEF_MSG_ENABLE; | 2425 | mdp->msg_enable = SH_ETH_DEF_MSG_ENABLE; |
2313 | mdp->post_rx = POST_RX >> (devno << 1); | ||
2314 | mdp->post_fw = POST_FW >> (devno << 1); | ||
2315 | 2426 | ||
2316 | /* read and set MAC address */ | 2427 | /* read and set MAC address */ |
2317 | read_mac_address(ndev, pd->mac_addr); | 2428 | read_mac_address(ndev, pd->mac_addr); |
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h index 57b8e1fc5d15..bae84fd2e73a 100644 --- a/drivers/net/ethernet/renesas/sh_eth.h +++ b/drivers/net/ethernet/renesas/sh_eth.h | |||
@@ -27,6 +27,10 @@ | |||
27 | #define TX_TIMEOUT (5*HZ) | 27 | #define TX_TIMEOUT (5*HZ) |
28 | #define TX_RING_SIZE 64 /* Tx ring size */ | 28 | #define TX_RING_SIZE 64 /* Tx ring size */ |
29 | #define RX_RING_SIZE 64 /* Rx ring size */ | 29 | #define RX_RING_SIZE 64 /* Rx ring size */ |
30 | #define TX_RING_MIN 64 | ||
31 | #define RX_RING_MIN 64 | ||
32 | #define TX_RING_MAX 1024 | ||
33 | #define RX_RING_MAX 1024 | ||
30 | #define ETHERSMALL 60 | 34 | #define ETHERSMALL 60 |
31 | #define PKT_BUF_SZ 1538 | 35 | #define PKT_BUF_SZ 1538 |
32 | #define SH_ETH_TSU_TIMEOUT_MS 500 | 36 | #define SH_ETH_TSU_TIMEOUT_MS 500 |
@@ -585,71 +589,6 @@ enum RPADIR_BIT { | |||
585 | /* FDR */ | 589 | /* FDR */ |
586 | #define DEFAULT_FDR_INIT 0x00000707 | 590 | #define DEFAULT_FDR_INIT 0x00000707 |
587 | 591 | ||
588 | enum phy_offsets { | ||
589 | PHY_CTRL = 0, PHY_STAT = 1, PHY_IDT1 = 2, PHY_IDT2 = 3, | ||
590 | PHY_ANA = 4, PHY_ANL = 5, PHY_ANE = 6, | ||
591 | PHY_16 = 16, | ||
592 | }; | ||
593 | |||
594 | /* PHY_CTRL */ | ||
595 | enum PHY_CTRL_BIT { | ||
596 | PHY_C_RESET = 0x8000, PHY_C_LOOPBK = 0x4000, PHY_C_SPEEDSL = 0x2000, | ||
597 | PHY_C_ANEGEN = 0x1000, PHY_C_PWRDN = 0x0800, PHY_C_ISO = 0x0400, | ||
598 | PHY_C_RANEG = 0x0200, PHY_C_DUPLEX = 0x0100, PHY_C_COLT = 0x0080, | ||
599 | }; | ||
600 | #define DM9161_PHY_C_ANEGEN 0 /* auto nego special */ | ||
601 | |||
602 | /* PHY_STAT */ | ||
603 | enum PHY_STAT_BIT { | ||
604 | PHY_S_100T4 = 0x8000, PHY_S_100X_F = 0x4000, PHY_S_100X_H = 0x2000, | ||
605 | PHY_S_10T_F = 0x1000, PHY_S_10T_H = 0x0800, PHY_S_ANEGC = 0x0020, | ||
606 | PHY_S_RFAULT = 0x0010, PHY_S_ANEGA = 0x0008, PHY_S_LINK = 0x0004, | ||
607 | PHY_S_JAB = 0x0002, PHY_S_EXTD = 0x0001, | ||
608 | }; | ||
609 | |||
610 | /* PHY_ANA */ | ||
611 | enum PHY_ANA_BIT { | ||
612 | PHY_A_NP = 0x8000, PHY_A_ACK = 0x4000, PHY_A_RF = 0x2000, | ||
613 | PHY_A_FCS = 0x0400, PHY_A_T4 = 0x0200, PHY_A_FDX = 0x0100, | ||
614 | PHY_A_HDX = 0x0080, PHY_A_10FDX = 0x0040, PHY_A_10HDX = 0x0020, | ||
615 | PHY_A_SEL = 0x001e, | ||
616 | }; | ||
617 | /* PHY_ANL */ | ||
618 | enum PHY_ANL_BIT { | ||
619 | PHY_L_NP = 0x8000, PHY_L_ACK = 0x4000, PHY_L_RF = 0x2000, | ||
620 | PHY_L_FCS = 0x0400, PHY_L_T4 = 0x0200, PHY_L_FDX = 0x0100, | ||
621 | PHY_L_HDX = 0x0080, PHY_L_10FDX = 0x0040, PHY_L_10HDX = 0x0020, | ||
622 | PHY_L_SEL = 0x001f, | ||
623 | }; | ||
624 | |||
625 | /* PHY_ANE */ | ||
626 | enum PHY_ANE_BIT { | ||
627 | PHY_E_PDF = 0x0010, PHY_E_LPNPA = 0x0008, PHY_E_NPA = 0x0004, | ||
628 | PHY_E_PRX = 0x0002, PHY_E_LPANEGA = 0x0001, | ||
629 | }; | ||
630 | |||
631 | /* DM9161 */ | ||
632 | enum PHY_16_BIT { | ||
633 | PHY_16_BP4B45 = 0x8000, PHY_16_BPSCR = 0x4000, PHY_16_BPALIGN = 0x2000, | ||
634 | PHY_16_BP_ADPOK = 0x1000, PHY_16_Repeatmode = 0x0800, | ||
635 | PHY_16_TXselect = 0x0400, | ||
636 | PHY_16_Rsvd = 0x0200, PHY_16_RMIIEnable = 0x0100, | ||
637 | PHY_16_Force100LNK = 0x0080, | ||
638 | PHY_16_APDLED_CTL = 0x0040, PHY_16_COLLED_CTL = 0x0020, | ||
639 | PHY_16_RPDCTR_EN = 0x0010, | ||
640 | PHY_16_ResetStMch = 0x0008, PHY_16_PreamSupr = 0x0004, | ||
641 | PHY_16_Sleepmode = 0x0002, | ||
642 | PHY_16_RemoteLoopOut = 0x0001, | ||
643 | }; | ||
644 | |||
645 | #define POST_RX 0x08 | ||
646 | #define POST_FW 0x04 | ||
647 | #define POST0_RX (POST_RX) | ||
648 | #define POST0_FW (POST_FW) | ||
649 | #define POST1_RX (POST_RX >> 2) | ||
650 | #define POST1_FW (POST_FW >> 2) | ||
651 | #define POST_ALL (POST0_RX | POST0_FW | POST1_RX | POST1_FW) | ||
652 | |||
653 | /* ARSTR */ | 592 | /* ARSTR */ |
654 | enum ARSTR_BIT { ARSTR_ARSTR = 0x00000001, }; | 593 | enum ARSTR_BIT { ARSTR_ARSTR = 0x00000001, }; |
655 | 594 | ||
@@ -757,6 +696,7 @@ struct sh_eth_cpu_data { | |||
757 | unsigned no_trimd:1; /* E-DMAC DO NOT have TRIMD */ | 696 | unsigned no_trimd:1; /* E-DMAC DO NOT have TRIMD */ |
758 | unsigned no_ade:1; /* E-DMAC DO NOT have ADE bit in EESR */ | 697 | unsigned no_ade:1; /* E-DMAC DO NOT have ADE bit in EESR */ |
759 | unsigned hw_crc:1; /* E-DMAC have CSMR */ | 698 | unsigned hw_crc:1; /* E-DMAC have CSMR */ |
699 | unsigned select_mii:1; /* EtherC have RMII_MII (MII select register) */ | ||
760 | }; | 700 | }; |
761 | 701 | ||
762 | struct sh_eth_private { | 702 | struct sh_eth_private { |
@@ -765,13 +705,14 @@ struct sh_eth_private { | |||
765 | const u16 *reg_offset; | 705 | const u16 *reg_offset; |
766 | void __iomem *addr; | 706 | void __iomem *addr; |
767 | void __iomem *tsu_addr; | 707 | void __iomem *tsu_addr; |
708 | u32 num_rx_ring; | ||
709 | u32 num_tx_ring; | ||
768 | dma_addr_t rx_desc_dma; | 710 | dma_addr_t rx_desc_dma; |
769 | dma_addr_t tx_desc_dma; | 711 | dma_addr_t tx_desc_dma; |
770 | struct sh_eth_rxdesc *rx_ring; | 712 | struct sh_eth_rxdesc *rx_ring; |
771 | struct sh_eth_txdesc *tx_ring; | 713 | struct sh_eth_txdesc *tx_ring; |
772 | struct sk_buff **rx_skbuff; | 714 | struct sk_buff **rx_skbuff; |
773 | struct sk_buff **tx_skbuff; | 715 | struct sk_buff **tx_skbuff; |
774 | struct timer_list timer; | ||
775 | spinlock_t lock; | 716 | spinlock_t lock; |
776 | u32 cur_rx, dirty_rx; /* Producer/consumer ring indices */ | 717 | u32 cur_rx, dirty_rx; /* Producer/consumer ring indices */ |
777 | u32 cur_tx, dirty_tx; | 718 | u32 cur_tx, dirty_tx; |
@@ -786,10 +727,6 @@ struct sh_eth_private { | |||
786 | int msg_enable; | 727 | int msg_enable; |
787 | int speed; | 728 | int speed; |
788 | int duplex; | 729 | int duplex; |
789 | u32 rx_int_var, tx_int_var; /* interrupt control variables */ | ||
790 | char post_rx; /* POST receive */ | ||
791 | char post_fw; /* POST forward */ | ||
792 | struct net_device_stats tsu_stats; /* TSU forward status */ | ||
793 | int port; /* for TSU */ | 730 | int port; /* for TSU */ |
794 | int vlan_num_ids; /* for VLAN tag filter */ | 731 | int vlan_num_ids; /* for VLAN tag filter */ |
795 | 732 | ||