aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJeff Garzik <jeff@garzik.org>2008-09-24 20:40:52 -0400
committerJeff Garzik <jgarzik@redhat.com>2008-09-24 20:40:52 -0400
commitae19161e28a7b48d2752eff3ac8eb7703986c313 (patch)
tree0d174e0ef02b0e21951dd40df6ebbfc320d11732
parent152cbcf94baec68b45832db5024184906ab798b1 (diff)
parent042af53c7839282de15cc7fd7ad8ab938d74ab7c (diff)
Merge branch 'for-2.6.28' of git://git.marvell.com/mv643xx_eth into upstream-next
-rw-r--r--arch/arm/mach-kirkwood/db88f6281-bp-setup.c2
-rw-r--r--arch/arm/mach-kirkwood/rd88f6192-nas-setup.c2
-rw-r--r--arch/arm/mach-kirkwood/rd88f6281-setup.c2
-rw-r--r--arch/arm/mach-loki/lb88rc8480-setup.c2
-rw-r--r--arch/arm/mach-mv78xx0/common.c6
-rw-r--r--arch/arm/mach-mv78xx0/db78x00-bp-setup.c8
-rw-r--r--arch/arm/mach-orion5x/db88f5281-setup.c2
-rw-r--r--arch/arm/mach-orion5x/dns323-setup.c2
-rw-r--r--arch/arm/mach-orion5x/kurobox_pro-setup.c2
-rw-r--r--arch/arm/mach-orion5x/mss2-setup.c2
-rw-r--r--arch/arm/mach-orion5x/mv2120-setup.c2
-rw-r--r--arch/arm/mach-orion5x/rd88f5181l-fxo-setup.c2
-rw-r--r--arch/arm/mach-orion5x/rd88f5181l-ge-setup.c2
-rw-r--r--arch/arm/mach-orion5x/rd88f5182-setup.c2
-rw-r--r--arch/arm/mach-orion5x/ts78xx-setup.c3
-rw-r--r--arch/arm/mach-orion5x/tsx09-common.c2
-rw-r--r--arch/arm/mach-orion5x/wnr854t-setup.c2
-rw-r--r--arch/arm/mach-orion5x/wrt350n-v2-setup.c2
-rw-r--r--arch/powerpc/sysdev/mv64x60_dev.c6
-rw-r--r--drivers/net/Kconfig2
-rw-r--r--drivers/net/mv643xx_eth.c1438
-rw-r--r--drivers/net/phy/mdio_bus.c89
-rw-r--r--include/linux/mv643xx_eth.h13
-rw-r--r--include/linux/phy.h2
24 files changed, 820 insertions, 777 deletions
diff --git a/arch/arm/mach-kirkwood/db88f6281-bp-setup.c b/arch/arm/mach-kirkwood/db88f6281-bp-setup.c
index 610fb24d8ae2..cd317795f355 100644
--- a/arch/arm/mach-kirkwood/db88f6281-bp-setup.c
+++ b/arch/arm/mach-kirkwood/db88f6281-bp-setup.c
@@ -25,7 +25,7 @@
25#include "common.h" 25#include "common.h"
26 26
27static struct mv643xx_eth_platform_data db88f6281_ge00_data = { 27static struct mv643xx_eth_platform_data db88f6281_ge00_data = {
28 .phy_addr = 8, 28 .phy_addr = MV643XX_ETH_PHY_ADDR(8),
29}; 29};
30 30
31static struct mv_sata_platform_data db88f6281_sata_data = { 31static struct mv_sata_platform_data db88f6281_sata_data = {
diff --git a/arch/arm/mach-kirkwood/rd88f6192-nas-setup.c b/arch/arm/mach-kirkwood/rd88f6192-nas-setup.c
index a3012d445971..b1d1a87a6821 100644
--- a/arch/arm/mach-kirkwood/rd88f6192-nas-setup.c
+++ b/arch/arm/mach-kirkwood/rd88f6192-nas-setup.c
@@ -30,7 +30,7 @@
30#define RD88F6192_GPIO_USB_VBUS 10 30#define RD88F6192_GPIO_USB_VBUS 10
31 31
32static struct mv643xx_eth_platform_data rd88f6192_ge00_data = { 32static struct mv643xx_eth_platform_data rd88f6192_ge00_data = {
33 .phy_addr = 8, 33 .phy_addr = MV643XX_ETH_PHY_ADDR(8),
34}; 34};
35 35
36static struct mv_sata_platform_data rd88f6192_sata_data = { 36static struct mv_sata_platform_data rd88f6192_sata_data = {
diff --git a/arch/arm/mach-kirkwood/rd88f6281-setup.c b/arch/arm/mach-kirkwood/rd88f6281-setup.c
index d96487a0f18b..b6416615c0b9 100644
--- a/arch/arm/mach-kirkwood/rd88f6281-setup.c
+++ b/arch/arm/mach-kirkwood/rd88f6281-setup.c
@@ -69,7 +69,7 @@ static struct platform_device rd88f6281_nand_flash = {
69}; 69};
70 70
71static struct mv643xx_eth_platform_data rd88f6281_ge00_data = { 71static struct mv643xx_eth_platform_data rd88f6281_ge00_data = {
72 .phy_addr = -1, 72 .phy_addr = MV643XX_ETH_PHY_NONE,
73 .speed = SPEED_1000, 73 .speed = SPEED_1000,
74 .duplex = DUPLEX_FULL, 74 .duplex = DUPLEX_FULL,
75}; 75};
diff --git a/arch/arm/mach-loki/lb88rc8480-setup.c b/arch/arm/mach-loki/lb88rc8480-setup.c
index 2cc9ac9b488f..85f9c1296aa0 100644
--- a/arch/arm/mach-loki/lb88rc8480-setup.c
+++ b/arch/arm/mach-loki/lb88rc8480-setup.c
@@ -67,7 +67,7 @@ static struct platform_device lb88rc8480_boot_flash = {
67}; 67};
68 68
69static struct mv643xx_eth_platform_data lb88rc8480_ge0_data = { 69static struct mv643xx_eth_platform_data lb88rc8480_ge0_data = {
70 .phy_addr = 1, 70 .phy_addr = MV643XX_ETH_PHY_ADDR(1),
71 .mac_addr = { 0x00, 0x50, 0x43, 0x11, 0x22, 0x33 }, 71 .mac_addr = { 0x00, 0x50, 0x43, 0x11, 0x22, 0x33 },
72}; 72};
73 73
diff --git a/arch/arm/mach-mv78xx0/common.c b/arch/arm/mach-mv78xx0/common.c
index 953a26c469cb..5842d3bb02b2 100644
--- a/arch/arm/mach-mv78xx0/common.c
+++ b/arch/arm/mach-mv78xx0/common.c
@@ -330,6 +330,7 @@ void __init mv78xx0_ge00_init(struct mv643xx_eth_platform_data *eth_data)
330struct mv643xx_eth_shared_platform_data mv78xx0_ge01_shared_data = { 330struct mv643xx_eth_shared_platform_data mv78xx0_ge01_shared_data = {
331 .t_clk = 0, 331 .t_clk = 0,
332 .dram = &mv78xx0_mbus_dram_info, 332 .dram = &mv78xx0_mbus_dram_info,
333 .shared_smi = &mv78xx0_ge00_shared,
333}; 334};
334 335
335static struct resource mv78xx0_ge01_shared_resources[] = { 336static struct resource mv78xx0_ge01_shared_resources[] = {
@@ -370,7 +371,6 @@ static struct platform_device mv78xx0_ge01 = {
370void __init mv78xx0_ge01_init(struct mv643xx_eth_platform_data *eth_data) 371void __init mv78xx0_ge01_init(struct mv643xx_eth_platform_data *eth_data)
371{ 372{
372 eth_data->shared = &mv78xx0_ge01_shared; 373 eth_data->shared = &mv78xx0_ge01_shared;
373 eth_data->shared_smi = &mv78xx0_ge00_shared;
374 mv78xx0_ge01.dev.platform_data = eth_data; 374 mv78xx0_ge01.dev.platform_data = eth_data;
375 375
376 platform_device_register(&mv78xx0_ge01_shared); 376 platform_device_register(&mv78xx0_ge01_shared);
@@ -384,6 +384,7 @@ void __init mv78xx0_ge01_init(struct mv643xx_eth_platform_data *eth_data)
384struct mv643xx_eth_shared_platform_data mv78xx0_ge10_shared_data = { 384struct mv643xx_eth_shared_platform_data mv78xx0_ge10_shared_data = {
385 .t_clk = 0, 385 .t_clk = 0,
386 .dram = &mv78xx0_mbus_dram_info, 386 .dram = &mv78xx0_mbus_dram_info,
387 .shared_smi = &mv78xx0_ge00_shared,
387}; 388};
388 389
389static struct resource mv78xx0_ge10_shared_resources[] = { 390static struct resource mv78xx0_ge10_shared_resources[] = {
@@ -424,7 +425,6 @@ static struct platform_device mv78xx0_ge10 = {
424void __init mv78xx0_ge10_init(struct mv643xx_eth_platform_data *eth_data) 425void __init mv78xx0_ge10_init(struct mv643xx_eth_platform_data *eth_data)
425{ 426{
426 eth_data->shared = &mv78xx0_ge10_shared; 427 eth_data->shared = &mv78xx0_ge10_shared;
427 eth_data->shared_smi = &mv78xx0_ge00_shared;
428 mv78xx0_ge10.dev.platform_data = eth_data; 428 mv78xx0_ge10.dev.platform_data = eth_data;
429 429
430 platform_device_register(&mv78xx0_ge10_shared); 430 platform_device_register(&mv78xx0_ge10_shared);
@@ -438,6 +438,7 @@ void __init mv78xx0_ge10_init(struct mv643xx_eth_platform_data *eth_data)
438struct mv643xx_eth_shared_platform_data mv78xx0_ge11_shared_data = { 438struct mv643xx_eth_shared_platform_data mv78xx0_ge11_shared_data = {
439 .t_clk = 0, 439 .t_clk = 0,
440 .dram = &mv78xx0_mbus_dram_info, 440 .dram = &mv78xx0_mbus_dram_info,
441 .shared_smi = &mv78xx0_ge00_shared,
441}; 442};
442 443
443static struct resource mv78xx0_ge11_shared_resources[] = { 444static struct resource mv78xx0_ge11_shared_resources[] = {
@@ -478,7 +479,6 @@ static struct platform_device mv78xx0_ge11 = {
478void __init mv78xx0_ge11_init(struct mv643xx_eth_platform_data *eth_data) 479void __init mv78xx0_ge11_init(struct mv643xx_eth_platform_data *eth_data)
479{ 480{
480 eth_data->shared = &mv78xx0_ge11_shared; 481 eth_data->shared = &mv78xx0_ge11_shared;
481 eth_data->shared_smi = &mv78xx0_ge00_shared;
482 mv78xx0_ge11.dev.platform_data = eth_data; 482 mv78xx0_ge11.dev.platform_data = eth_data;
483 483
484 platform_device_register(&mv78xx0_ge11_shared); 484 platform_device_register(&mv78xx0_ge11_shared);
diff --git a/arch/arm/mach-mv78xx0/db78x00-bp-setup.c b/arch/arm/mach-mv78xx0/db78x00-bp-setup.c
index a2d0c9783604..49f434c39eb7 100644
--- a/arch/arm/mach-mv78xx0/db78x00-bp-setup.c
+++ b/arch/arm/mach-mv78xx0/db78x00-bp-setup.c
@@ -19,19 +19,19 @@
19#include "common.h" 19#include "common.h"
20 20
21static struct mv643xx_eth_platform_data db78x00_ge00_data = { 21static struct mv643xx_eth_platform_data db78x00_ge00_data = {
22 .phy_addr = 8, 22 .phy_addr = MV643XX_ETH_PHY_ADDR(8),
23}; 23};
24 24
25static struct mv643xx_eth_platform_data db78x00_ge01_data = { 25static struct mv643xx_eth_platform_data db78x00_ge01_data = {
26 .phy_addr = 9, 26 .phy_addr = MV643XX_ETH_PHY_ADDR(9),
27}; 27};
28 28
29static struct mv643xx_eth_platform_data db78x00_ge10_data = { 29static struct mv643xx_eth_platform_data db78x00_ge10_data = {
30 .phy_addr = -1, 30 .phy_addr = MV643XX_ETH_PHY_NONE,
31}; 31};
32 32
33static struct mv643xx_eth_platform_data db78x00_ge11_data = { 33static struct mv643xx_eth_platform_data db78x00_ge11_data = {
34 .phy_addr = -1, 34 .phy_addr = MV643XX_ETH_PHY_NONE,
35}; 35};
36 36
37static struct mv_sata_platform_data db78x00_sata_data = { 37static struct mv_sata_platform_data db78x00_sata_data = {
diff --git a/arch/arm/mach-orion5x/db88f5281-setup.c b/arch/arm/mach-orion5x/db88f5281-setup.c
index ff13e9060b18..d318bea2af91 100644
--- a/arch/arm/mach-orion5x/db88f5281-setup.c
+++ b/arch/arm/mach-orion5x/db88f5281-setup.c
@@ -285,7 +285,7 @@ subsys_initcall(db88f5281_pci_init);
285 * Ethernet 285 * Ethernet
286 ****************************************************************************/ 286 ****************************************************************************/
287static struct mv643xx_eth_platform_data db88f5281_eth_data = { 287static struct mv643xx_eth_platform_data db88f5281_eth_data = {
288 .phy_addr = 8, 288 .phy_addr = MV643XX_ETH_PHY_ADDR(8),
289}; 289};
290 290
291/***************************************************************************** 291/*****************************************************************************
diff --git a/arch/arm/mach-orion5x/dns323-setup.c b/arch/arm/mach-orion5x/dns323-setup.c
index b38c65ccfb15..3e66098340a5 100644
--- a/arch/arm/mach-orion5x/dns323-setup.c
+++ b/arch/arm/mach-orion5x/dns323-setup.c
@@ -79,7 +79,7 @@ subsys_initcall(dns323_pci_init);
79 */ 79 */
80 80
81static struct mv643xx_eth_platform_data dns323_eth_data = { 81static struct mv643xx_eth_platform_data dns323_eth_data = {
82 .phy_addr = 8, 82 .phy_addr = MV643XX_ETH_PHY_ADDR(8),
83}; 83};
84 84
85/**************************************************************************** 85/****************************************************************************
diff --git a/arch/arm/mach-orion5x/kurobox_pro-setup.c b/arch/arm/mach-orion5x/kurobox_pro-setup.c
index e321ec331839..610f2a6297f8 100644
--- a/arch/arm/mach-orion5x/kurobox_pro-setup.c
+++ b/arch/arm/mach-orion5x/kurobox_pro-setup.c
@@ -161,7 +161,7 @@ subsys_initcall(kurobox_pro_pci_init);
161 ****************************************************************************/ 161 ****************************************************************************/
162 162
163static struct mv643xx_eth_platform_data kurobox_pro_eth_data = { 163static struct mv643xx_eth_platform_data kurobox_pro_eth_data = {
164 .phy_addr = 8, 164 .phy_addr = MV643XX_ETH_PHY_ADDR(8),
165}; 165};
166 166
167/***************************************************************************** 167/*****************************************************************************
diff --git a/arch/arm/mach-orion5x/mss2-setup.c b/arch/arm/mach-orion5x/mss2-setup.c
index 53ff1893b883..68acca98e638 100644
--- a/arch/arm/mach-orion5x/mss2-setup.c
+++ b/arch/arm/mach-orion5x/mss2-setup.c
@@ -109,7 +109,7 @@ subsys_initcall(mss2_pci_init);
109 ****************************************************************************/ 109 ****************************************************************************/
110 110
111static struct mv643xx_eth_platform_data mss2_eth_data = { 111static struct mv643xx_eth_platform_data mss2_eth_data = {
112 .phy_addr = 8, 112 .phy_addr = MV643XX_ETH_PHY_ADDR(8),
113}; 113};
114 114
115/***************************************************************************** 115/*****************************************************************************
diff --git a/arch/arm/mach-orion5x/mv2120-setup.c b/arch/arm/mach-orion5x/mv2120-setup.c
index 978d4d599396..97c9ccb2ac60 100644
--- a/arch/arm/mach-orion5x/mv2120-setup.c
+++ b/arch/arm/mach-orion5x/mv2120-setup.c
@@ -39,7 +39,7 @@
39 * Ethernet 39 * Ethernet
40 ****************************************************************************/ 40 ****************************************************************************/
41static struct mv643xx_eth_platform_data mv2120_eth_data = { 41static struct mv643xx_eth_platform_data mv2120_eth_data = {
42 .phy_addr = 8, 42 .phy_addr = MV643XX_ETH_PHY_ADDR(8),
43}; 43};
44 44
45static struct mv_sata_platform_data mv2120_sata_data = { 45static struct mv_sata_platform_data mv2120_sata_data = {
diff --git a/arch/arm/mach-orion5x/rd88f5181l-fxo-setup.c b/arch/arm/mach-orion5x/rd88f5181l-fxo-setup.c
index e72fe1e065e8..500cdadaf09c 100644
--- a/arch/arm/mach-orion5x/rd88f5181l-fxo-setup.c
+++ b/arch/arm/mach-orion5x/rd88f5181l-fxo-setup.c
@@ -88,7 +88,7 @@ static struct orion5x_mpp_mode rd88f5181l_fxo_mpp_modes[] __initdata = {
88}; 88};
89 89
90static struct mv643xx_eth_platform_data rd88f5181l_fxo_eth_data = { 90static struct mv643xx_eth_platform_data rd88f5181l_fxo_eth_data = {
91 .phy_addr = -1, 91 .phy_addr = MV643XX_ETH_PHY_NONE,
92 .speed = SPEED_1000, 92 .speed = SPEED_1000,
93 .duplex = DUPLEX_FULL, 93 .duplex = DUPLEX_FULL,
94}; 94};
diff --git a/arch/arm/mach-orion5x/rd88f5181l-ge-setup.c b/arch/arm/mach-orion5x/rd88f5181l-ge-setup.c
index a1fe3257320d..ebde81416499 100644
--- a/arch/arm/mach-orion5x/rd88f5181l-ge-setup.c
+++ b/arch/arm/mach-orion5x/rd88f5181l-ge-setup.c
@@ -89,7 +89,7 @@ static struct orion5x_mpp_mode rd88f5181l_ge_mpp_modes[] __initdata = {
89}; 89};
90 90
91static struct mv643xx_eth_platform_data rd88f5181l_ge_eth_data = { 91static struct mv643xx_eth_platform_data rd88f5181l_ge_eth_data = {
92 .phy_addr = -1, 92 .phy_addr = MV643XX_ETH_PHY_NONE,
93 .speed = SPEED_1000, 93 .speed = SPEED_1000,
94 .duplex = DUPLEX_FULL, 94 .duplex = DUPLEX_FULL,
95}; 95};
diff --git a/arch/arm/mach-orion5x/rd88f5182-setup.c b/arch/arm/mach-orion5x/rd88f5182-setup.c
index 4c3bcd76ac85..a04f9e4b633a 100644
--- a/arch/arm/mach-orion5x/rd88f5182-setup.c
+++ b/arch/arm/mach-orion5x/rd88f5182-setup.c
@@ -221,7 +221,7 @@ subsys_initcall(rd88f5182_pci_init);
221 ****************************************************************************/ 221 ****************************************************************************/
222 222
223static struct mv643xx_eth_platform_data rd88f5182_eth_data = { 223static struct mv643xx_eth_platform_data rd88f5182_eth_data = {
224 .phy_addr = 8, 224 .phy_addr = MV643XX_ETH_PHY_ADDR(8),
225}; 225};
226 226
227/***************************************************************************** 227/*****************************************************************************
diff --git a/arch/arm/mach-orion5x/ts78xx-setup.c b/arch/arm/mach-orion5x/ts78xx-setup.c
index ae0a5dccd2a1..1368e9fd1a06 100644
--- a/arch/arm/mach-orion5x/ts78xx-setup.c
+++ b/arch/arm/mach-orion5x/ts78xx-setup.c
@@ -103,8 +103,7 @@ static struct platform_device ts78xx_nor_boot_flash = {
103 * Ethernet 103 * Ethernet
104 ****************************************************************************/ 104 ****************************************************************************/
105static struct mv643xx_eth_platform_data ts78xx_eth_data = { 105static struct mv643xx_eth_platform_data ts78xx_eth_data = {
106 .phy_addr = 0, 106 .phy_addr = MV643XX_ETH_PHY_ADDR(0),
107 .force_phy_addr = 1,
108}; 107};
109 108
110/***************************************************************************** 109/*****************************************************************************
diff --git a/arch/arm/mach-orion5x/tsx09-common.c b/arch/arm/mach-orion5x/tsx09-common.c
index 83feac3147a6..19cde24fbfdf 100644
--- a/arch/arm/mach-orion5x/tsx09-common.c
+++ b/arch/arm/mach-orion5x/tsx09-common.c
@@ -48,7 +48,7 @@ void qnap_tsx09_power_off(void)
48 ****************************************************************************/ 48 ****************************************************************************/
49 49
50struct mv643xx_eth_platform_data qnap_tsx09_eth_data = { 50struct mv643xx_eth_platform_data qnap_tsx09_eth_data = {
51 .phy_addr = 8, 51 .phy_addr = MV643XX_ETH_PHY_ADDR(8),
52}; 52};
53 53
54static int __init qnap_tsx09_parse_hex_nibble(char n) 54static int __init qnap_tsx09_parse_hex_nibble(char n)
diff --git a/arch/arm/mach-orion5x/wnr854t-setup.c b/arch/arm/mach-orion5x/wnr854t-setup.c
index b6bc43e07eed..7ddc22c2bb54 100644
--- a/arch/arm/mach-orion5x/wnr854t-setup.c
+++ b/arch/arm/mach-orion5x/wnr854t-setup.c
@@ -92,7 +92,7 @@ static struct platform_device wnr854t_nor_flash = {
92}; 92};
93 93
94static struct mv643xx_eth_platform_data wnr854t_eth_data = { 94static struct mv643xx_eth_platform_data wnr854t_eth_data = {
95 .phy_addr = -1, 95 .phy_addr = MV643XX_ETH_PHY_NONE,
96 .speed = SPEED_1000, 96 .speed = SPEED_1000,
97 .duplex = DUPLEX_FULL, 97 .duplex = DUPLEX_FULL,
98}; 98};
diff --git a/arch/arm/mach-orion5x/wrt350n-v2-setup.c b/arch/arm/mach-orion5x/wrt350n-v2-setup.c
index b10da17b3fbd..9a4fd5256462 100644
--- a/arch/arm/mach-orion5x/wrt350n-v2-setup.c
+++ b/arch/arm/mach-orion5x/wrt350n-v2-setup.c
@@ -100,7 +100,7 @@ static struct platform_device wrt350n_v2_nor_flash = {
100}; 100};
101 101
102static struct mv643xx_eth_platform_data wrt350n_v2_eth_data = { 102static struct mv643xx_eth_platform_data wrt350n_v2_eth_data = {
103 .phy_addr = -1, 103 .phy_addr = MV643XX_ETH_PHY_NONE,
104 .speed = SPEED_1000, 104 .speed = SPEED_1000,
105 .duplex = DUPLEX_FULL, 105 .duplex = DUPLEX_FULL,
106}; 106};
diff --git a/arch/powerpc/sysdev/mv64x60_dev.c b/arch/powerpc/sysdev/mv64x60_dev.c
index 32e0ad0ebea8..b6bd775d2e22 100644
--- a/arch/powerpc/sysdev/mv64x60_dev.c
+++ b/arch/powerpc/sysdev/mv64x60_dev.c
@@ -293,10 +293,8 @@ static int __init mv64x60_eth_device_setup(struct device_node *np, int id,
293 return -ENODEV; 293 return -ENODEV;
294 294
295 prop = of_get_property(phy, "reg", NULL); 295 prop = of_get_property(phy, "reg", NULL);
296 if (prop) { 296 if (prop)
297 pdata.force_phy_addr = 1; 297 pdata.phy_addr = MV643XX_ETH_PHY_ADDR(*prop);
298 pdata.phy_addr = *prop;
299 }
300 298
301 of_node_put(phy); 299 of_node_put(phy);
302 300
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 031b95b1f229..42b4eb703c2d 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2274,7 +2274,7 @@ config UGETH_TX_ON_DEMAND
2274config MV643XX_ETH 2274config MV643XX_ETH
2275 tristate "Marvell Discovery (643XX) and Orion ethernet support" 2275 tristate "Marvell Discovery (643XX) and Orion ethernet support"
2276 depends on MV64360 || MV64X60 || (PPC_MULTIPLATFORM && PPC32) || PLAT_ORION 2276 depends on MV64360 || MV64X60 || (PPC_MULTIPLATFORM && PPC32) || PLAT_ORION
2277 select MII 2277 select PHYLIB
2278 help 2278 help
2279 This driver supports the gigabit ethernet MACs in the 2279 This driver supports the gigabit ethernet MACs in the
2280 Marvell Discovery PPC/MIPS chipset family (MV643XX) and 2280 Marvell Discovery PPC/MIPS chipset family (MV643XX) and
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index 0a18b9e96da1..55aa8ba7e0f2 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -48,30 +48,28 @@
48#include <linux/kernel.h> 48#include <linux/kernel.h>
49#include <linux/spinlock.h> 49#include <linux/spinlock.h>
50#include <linux/workqueue.h> 50#include <linux/workqueue.h>
51#include <linux/mii.h> 51#include <linux/phy.h>
52#include <linux/mv643xx_eth.h> 52#include <linux/mv643xx_eth.h>
53#include <asm/io.h> 53#include <asm/io.h>
54#include <asm/types.h> 54#include <asm/types.h>
55#include <asm/system.h> 55#include <asm/system.h>
56 56
57static char mv643xx_eth_driver_name[] = "mv643xx_eth"; 57static char mv643xx_eth_driver_name[] = "mv643xx_eth";
58static char mv643xx_eth_driver_version[] = "1.3"; 58static char mv643xx_eth_driver_version[] = "1.4";
59 59
60#define MV643XX_ETH_CHECKSUM_OFFLOAD_TX
61#define MV643XX_ETH_NAPI
62#define MV643XX_ETH_TX_FAST_REFILL
63
64#ifdef MV643XX_ETH_CHECKSUM_OFFLOAD_TX
65#define MAX_DESCS_PER_SKB (MAX_SKB_FRAGS + 1)
66#else
67#define MAX_DESCS_PER_SKB 1
68#endif
69 60
70/* 61/*
71 * Registers shared between all ports. 62 * Registers shared between all ports.
72 */ 63 */
73#define PHY_ADDR 0x0000 64#define PHY_ADDR 0x0000
74#define SMI_REG 0x0004 65#define SMI_REG 0x0004
66#define SMI_BUSY 0x10000000
67#define SMI_READ_VALID 0x08000000
68#define SMI_OPCODE_READ 0x04000000
69#define SMI_OPCODE_WRITE 0x00000000
70#define ERR_INT_CAUSE 0x0080
71#define ERR_INT_SMI_DONE 0x00000010
72#define ERR_INT_MASK 0x0084
75#define WINDOW_BASE(w) (0x0200 + ((w) << 3)) 73#define WINDOW_BASE(w) (0x0200 + ((w) << 3))
76#define WINDOW_SIZE(w) (0x0204 + ((w) << 3)) 74#define WINDOW_SIZE(w) (0x0204 + ((w) << 3))
77#define WINDOW_REMAP_HIGH(w) (0x0280 + ((w) << 2)) 75#define WINDOW_REMAP_HIGH(w) (0x0280 + ((w) << 2))
@@ -104,16 +102,12 @@ static char mv643xx_eth_driver_version[] = "1.3";
104#define TX_BW_MTU(p) (0x0458 + ((p) << 10)) 102#define TX_BW_MTU(p) (0x0458 + ((p) << 10))
105#define TX_BW_BURST(p) (0x045c + ((p) << 10)) 103#define TX_BW_BURST(p) (0x045c + ((p) << 10))
106#define INT_CAUSE(p) (0x0460 + ((p) << 10)) 104#define INT_CAUSE(p) (0x0460 + ((p) << 10))
107#define INT_TX_END_0 0x00080000
108#define INT_TX_END 0x07f80000 105#define INT_TX_END 0x07f80000
109#define INT_RX 0x0007fbfc 106#define INT_RX 0x000003fc
110#define INT_EXT 0x00000002 107#define INT_EXT 0x00000002
111#define INT_CAUSE_EXT(p) (0x0464 + ((p) << 10)) 108#define INT_CAUSE_EXT(p) (0x0464 + ((p) << 10))
112#define INT_EXT_LINK 0x00100000 109#define INT_EXT_LINK_PHY 0x00110000
113#define INT_EXT_PHY 0x00010000 110#define INT_EXT_TX 0x000000ff
114#define INT_EXT_TX_ERROR_0 0x00000100
115#define INT_EXT_TX_0 0x00000001
116#define INT_EXT_TX 0x0000ffff
117#define INT_MASK(p) (0x0468 + ((p) << 10)) 111#define INT_MASK(p) (0x0468 + ((p) << 10))
118#define INT_MASK_EXT(p) (0x046c + ((p) << 10)) 112#define INT_MASK_EXT(p) (0x046c + ((p) << 10))
119#define TX_FIFO_URGENT_THRESHOLD(p) (0x0474 + ((p) << 10)) 113#define TX_FIFO_URGENT_THRESHOLD(p) (0x0474 + ((p) << 10))
@@ -171,8 +165,8 @@ static char mv643xx_eth_driver_version[] = "1.3";
171#define FORCE_LINK_PASS (1 << 1) 165#define FORCE_LINK_PASS (1 << 1)
172#define SERIAL_PORT_ENABLE (1 << 0) 166#define SERIAL_PORT_ENABLE (1 << 0)
173 167
174#define DEFAULT_RX_QUEUE_SIZE 400 168#define DEFAULT_RX_QUEUE_SIZE 128
175#define DEFAULT_TX_QUEUE_SIZE 800 169#define DEFAULT_TX_QUEUE_SIZE 256
176 170
177 171
178/* 172/*
@@ -249,9 +243,23 @@ struct mv643xx_eth_shared_private {
249 void __iomem *base; 243 void __iomem *base;
250 244
251 /* 245 /*
252 * Protects access to SMI_REG, which is shared between ports. 246 * Points at the right SMI instance to use.
247 */
248 struct mv643xx_eth_shared_private *smi;
249
250 /*
251 * Provides access to local SMI interface.
252 */
253 struct mii_bus smi_bus;
254
255 /*
256 * If we have access to the error interrupt pin (which is
257 * somewhat misnamed as it not only reflects internal errors
258 * but also reflects SMI completion), use that to wait for
259 * SMI access completion instead of polling the SMI busy bit.
253 */ 260 */
254 spinlock_t phy_lock; 261 int err_interrupt;
262 wait_queue_head_t smi_busy_wait;
255 263
256 /* 264 /*
257 * Per-port MBUS window access register value. 265 * Per-port MBUS window access register value.
@@ -263,9 +271,13 @@ struct mv643xx_eth_shared_private {
263 */ 271 */
264 unsigned int t_clk; 272 unsigned int t_clk;
265 int extended_rx_coal_limit; 273 int extended_rx_coal_limit;
266 int tx_bw_control_moved; 274 int tx_bw_control;
267}; 275};
268 276
277#define TX_BW_CONTROL_ABSENT 0
278#define TX_BW_CONTROL_OLD_LAYOUT 1
279#define TX_BW_CONTROL_NEW_LAYOUT 2
280
269 281
270/* per-port *****************************************************************/ 282/* per-port *****************************************************************/
271struct mib_counters { 283struct mib_counters {
@@ -314,8 +326,6 @@ struct rx_queue {
314 dma_addr_t rx_desc_dma; 326 dma_addr_t rx_desc_dma;
315 int rx_desc_area_size; 327 int rx_desc_area_size;
316 struct sk_buff **rx_skb; 328 struct sk_buff **rx_skb;
317
318 struct timer_list rx_oom;
319}; 329};
320 330
321struct tx_queue { 331struct tx_queue {
@@ -330,7 +340,12 @@ struct tx_queue {
330 struct tx_desc *tx_desc_area; 340 struct tx_desc *tx_desc_area;
331 dma_addr_t tx_desc_dma; 341 dma_addr_t tx_desc_dma;
332 int tx_desc_area_size; 342 int tx_desc_area_size;
333 struct sk_buff **tx_skb; 343
344 struct sk_buff_head tx_skb;
345
346 unsigned long tx_packets;
347 unsigned long tx_bytes;
348 unsigned long tx_dropped;
334}; 349};
335 350
336struct mv643xx_eth_private { 351struct mv643xx_eth_private {
@@ -339,14 +354,21 @@ struct mv643xx_eth_private {
339 354
340 struct net_device *dev; 355 struct net_device *dev;
341 356
342 struct mv643xx_eth_shared_private *shared_smi; 357 struct phy_device *phy;
343 int phy_addr;
344
345 spinlock_t lock;
346 358
359 struct timer_list mib_counters_timer;
360 spinlock_t mib_counters_lock;
347 struct mib_counters mib_counters; 361 struct mib_counters mib_counters;
362
348 struct work_struct tx_timeout_task; 363 struct work_struct tx_timeout_task;
349 struct mii_if_info mii; 364
365 struct napi_struct napi;
366 u8 work_link;
367 u8 work_tx;
368 u8 work_tx_end;
369 u8 work_rx;
370 u8 work_rx_refill;
371 u8 work_rx_oom;
350 372
351 /* 373 /*
352 * RX state. 374 * RX state.
@@ -354,9 +376,8 @@ struct mv643xx_eth_private {
354 int default_rx_ring_size; 376 int default_rx_ring_size;
355 unsigned long rx_desc_sram_addr; 377 unsigned long rx_desc_sram_addr;
356 int rx_desc_sram_size; 378 int rx_desc_sram_size;
357 u8 rxq_mask; 379 int rxq_count;
358 int rxq_primary; 380 struct timer_list rx_oom;
359 struct napi_struct napi;
360 struct rx_queue rxq[8]; 381 struct rx_queue rxq[8];
361 382
362 /* 383 /*
@@ -365,12 +386,8 @@ struct mv643xx_eth_private {
365 int default_tx_ring_size; 386 int default_tx_ring_size;
366 unsigned long tx_desc_sram_addr; 387 unsigned long tx_desc_sram_addr;
367 int tx_desc_sram_size; 388 int tx_desc_sram_size;
368 u8 txq_mask; 389 int txq_count;
369 int txq_primary;
370 struct tx_queue txq[8]; 390 struct tx_queue txq[8];
371#ifdef MV643XX_ETH_TX_FAST_REFILL
372 int tx_clean_threshold;
373#endif
374}; 391};
375 392
376 393
@@ -440,94 +457,21 @@ static void txq_disable(struct tx_queue *txq)
440 udelay(10); 457 udelay(10);
441} 458}
442 459
443static void __txq_maybe_wake(struct tx_queue *txq) 460static void txq_maybe_wake(struct tx_queue *txq)
444{ 461{
445 struct mv643xx_eth_private *mp = txq_to_mp(txq); 462 struct mv643xx_eth_private *mp = txq_to_mp(txq);
463 struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
446 464
447 /* 465 if (netif_tx_queue_stopped(nq)) {
448 * netif_{stop,wake}_queue() flow control only applies to 466 __netif_tx_lock(nq, smp_processor_id());
449 * the primary queue. 467 if (txq->tx_ring_size - txq->tx_desc_count >= MAX_SKB_FRAGS + 1)
450 */ 468 netif_tx_wake_queue(nq);
451 BUG_ON(txq->index != mp->txq_primary); 469 __netif_tx_unlock(nq);
452
453 if (txq->tx_ring_size - txq->tx_desc_count >= MAX_DESCS_PER_SKB)
454 netif_wake_queue(mp->dev);
455}
456
457
458/* rx ***********************************************************************/
459static void txq_reclaim(struct tx_queue *txq, int force);
460
461static void rxq_refill(struct rx_queue *rxq)
462{
463 struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
464 unsigned long flags;
465
466 spin_lock_irqsave(&mp->lock, flags);
467
468 while (rxq->rx_desc_count < rxq->rx_ring_size) {
469 int skb_size;
470 struct sk_buff *skb;
471 int unaligned;
472 int rx;
473
474 /*
475 * Reserve 2+14 bytes for an ethernet header (the
476 * hardware automatically prepends 2 bytes of dummy
477 * data to each received packet), 16 bytes for up to
478 * four VLAN tags, and 4 bytes for the trailing FCS
479 * -- 36 bytes total.
480 */
481 skb_size = mp->dev->mtu + 36;
482
483 /*
484 * Make sure that the skb size is a multiple of 8
485 * bytes, as the lower three bits of the receive
486 * descriptor's buffer size field are ignored by
487 * the hardware.
488 */
489 skb_size = (skb_size + 7) & ~7;
490
491 skb = dev_alloc_skb(skb_size + dma_get_cache_alignment() - 1);
492 if (skb == NULL)
493 break;
494
495 unaligned = (u32)skb->data & (dma_get_cache_alignment() - 1);
496 if (unaligned)
497 skb_reserve(skb, dma_get_cache_alignment() - unaligned);
498
499 rxq->rx_desc_count++;
500 rx = rxq->rx_used_desc;
501 rxq->rx_used_desc = (rx + 1) % rxq->rx_ring_size;
502
503 rxq->rx_desc_area[rx].buf_ptr = dma_map_single(NULL, skb->data,
504 skb_size, DMA_FROM_DEVICE);
505 rxq->rx_desc_area[rx].buf_size = skb_size;
506 rxq->rx_skb[rx] = skb;
507 wmb();
508 rxq->rx_desc_area[rx].cmd_sts = BUFFER_OWNED_BY_DMA |
509 RX_ENABLE_INTERRUPT;
510 wmb();
511
512 /*
513 * The hardware automatically prepends 2 bytes of
514 * dummy data to each received packet, so that the
515 * IP header ends up 16-byte aligned.
516 */
517 skb_reserve(skb, 2);
518 } 470 }
519
520 if (rxq->rx_desc_count != rxq->rx_ring_size)
521 mod_timer(&rxq->rx_oom, jiffies + (HZ / 10));
522
523 spin_unlock_irqrestore(&mp->lock, flags);
524} 471}
525 472
526static inline void rxq_refill_timer_wrapper(unsigned long data)
527{
528 rxq_refill((struct rx_queue *)data);
529}
530 473
474/* rx napi ******************************************************************/
531static int rxq_process(struct rx_queue *rxq, int budget) 475static int rxq_process(struct rx_queue *rxq, int budget)
532{ 476{
533 struct mv643xx_eth_private *mp = rxq_to_mp(rxq); 477 struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
@@ -539,31 +483,31 @@ static int rxq_process(struct rx_queue *rxq, int budget)
539 struct rx_desc *rx_desc; 483 struct rx_desc *rx_desc;
540 unsigned int cmd_sts; 484 unsigned int cmd_sts;
541 struct sk_buff *skb; 485 struct sk_buff *skb;
542 unsigned long flags; 486 u16 byte_cnt;
543
544 spin_lock_irqsave(&mp->lock, flags);
545 487
546 rx_desc = &rxq->rx_desc_area[rxq->rx_curr_desc]; 488 rx_desc = &rxq->rx_desc_area[rxq->rx_curr_desc];
547 489
548 cmd_sts = rx_desc->cmd_sts; 490 cmd_sts = rx_desc->cmd_sts;
549 if (cmd_sts & BUFFER_OWNED_BY_DMA) { 491 if (cmd_sts & BUFFER_OWNED_BY_DMA)
550 spin_unlock_irqrestore(&mp->lock, flags);
551 break; 492 break;
552 }
553 rmb(); 493 rmb();
554 494
555 skb = rxq->rx_skb[rxq->rx_curr_desc]; 495 skb = rxq->rx_skb[rxq->rx_curr_desc];
556 rxq->rx_skb[rxq->rx_curr_desc] = NULL; 496 rxq->rx_skb[rxq->rx_curr_desc] = NULL;
557 497
558 rxq->rx_curr_desc = (rxq->rx_curr_desc + 1) % rxq->rx_ring_size; 498 rxq->rx_curr_desc++;
559 499 if (rxq->rx_curr_desc == rxq->rx_ring_size)
560 spin_unlock_irqrestore(&mp->lock, flags); 500 rxq->rx_curr_desc = 0;
561 501
562 dma_unmap_single(NULL, rx_desc->buf_ptr + 2, 502 dma_unmap_single(NULL, rx_desc->buf_ptr,
563 rx_desc->buf_size, DMA_FROM_DEVICE); 503 rx_desc->buf_size, DMA_FROM_DEVICE);
564 rxq->rx_desc_count--; 504 rxq->rx_desc_count--;
565 rx++; 505 rx++;
566 506
507 mp->work_rx_refill |= 1 << rxq->index;
508
509 byte_cnt = rx_desc->byte_cnt;
510
567 /* 511 /*
568 * Update statistics. 512 * Update statistics.
569 * 513 *
@@ -573,7 +517,7 @@ static int rxq_process(struct rx_queue *rxq, int budget)
573 * byte CRC at the end of the packet (which we do count). 517 * byte CRC at the end of the packet (which we do count).
574 */ 518 */
575 stats->rx_packets++; 519 stats->rx_packets++;
576 stats->rx_bytes += rx_desc->byte_cnt - 2; 520 stats->rx_bytes += byte_cnt - 2;
577 521
578 /* 522 /*
579 * In case we received a packet without first / last bits 523 * In case we received a packet without first / last bits
@@ -596,72 +540,96 @@ static int rxq_process(struct rx_queue *rxq, int budget)
596 if (cmd_sts & ERROR_SUMMARY) 540 if (cmd_sts & ERROR_SUMMARY)
597 stats->rx_errors++; 541 stats->rx_errors++;
598 542
599 dev_kfree_skb_irq(skb); 543 dev_kfree_skb(skb);
600 } else { 544 } else {
601 /* 545 /*
602 * The -4 is for the CRC in the trailer of the 546 * The -4 is for the CRC in the trailer of the
603 * received packet 547 * received packet
604 */ 548 */
605 skb_put(skb, rx_desc->byte_cnt - 2 - 4); 549 skb_put(skb, byte_cnt - 2 - 4);
606 550
607 if (cmd_sts & LAYER_4_CHECKSUM_OK) { 551 if (cmd_sts & LAYER_4_CHECKSUM_OK)
608 skb->ip_summed = CHECKSUM_UNNECESSARY; 552 skb->ip_summed = CHECKSUM_UNNECESSARY;
609 skb->csum = htons(
610 (cmd_sts & 0x0007fff8) >> 3);
611 }
612 skb->protocol = eth_type_trans(skb, mp->dev); 553 skb->protocol = eth_type_trans(skb, mp->dev);
613#ifdef MV643XX_ETH_NAPI
614 netif_receive_skb(skb); 554 netif_receive_skb(skb);
615#else
616 netif_rx(skb);
617#endif
618 } 555 }
619 556
620 mp->dev->last_rx = jiffies; 557 mp->dev->last_rx = jiffies;
621 } 558 }
622 559
623 rxq_refill(rxq); 560 if (rx < budget)
561 mp->work_rx &= ~(1 << rxq->index);
624 562
625 return rx; 563 return rx;
626} 564}
627 565
628#ifdef MV643XX_ETH_NAPI 566static int rxq_refill(struct rx_queue *rxq, int budget)
629static int mv643xx_eth_poll(struct napi_struct *napi, int budget)
630{ 567{
631 struct mv643xx_eth_private *mp; 568 struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
632 int rx; 569 int skb_size;
633 int i; 570 int refilled;
634 571
635 mp = container_of(napi, struct mv643xx_eth_private, napi); 572 /*
573 * Reserve 2+14 bytes for an ethernet header (the hardware
574 * automatically prepends 2 bytes of dummy data to each
575 * received packet), 16 bytes for up to four VLAN tags, and
576 * 4 bytes for the trailing FCS -- 36 bytes total.
577 */
578 skb_size = rxq_to_mp(rxq)->dev->mtu + 36;
636 579
637#ifdef MV643XX_ETH_TX_FAST_REFILL 580 /*
638 if (++mp->tx_clean_threshold > 5) { 581 * Make sure that the skb size is a multiple of 8 bytes, as
639 mp->tx_clean_threshold = 0; 582 * the lower three bits of the receive descriptor's buffer
640 for (i = 0; i < 8; i++) 583 * size field are ignored by the hardware.
641 if (mp->txq_mask & (1 << i)) 584 */
642 txq_reclaim(mp->txq + i, 0); 585 skb_size = (skb_size + 7) & ~7;
643 586
644 if (netif_carrier_ok(mp->dev)) { 587 refilled = 0;
645 spin_lock_irq(&mp->lock); 588 while (refilled < budget && rxq->rx_desc_count < rxq->rx_ring_size) {
646 __txq_maybe_wake(mp->txq + mp->txq_primary); 589 struct sk_buff *skb;
647 spin_unlock_irq(&mp->lock); 590 int unaligned;
591 int rx;
592
593 skb = dev_alloc_skb(skb_size + dma_get_cache_alignment() - 1);
594 if (skb == NULL) {
595 mp->work_rx_oom |= 1 << rxq->index;
596 goto oom;
648 } 597 }
649 }
650#endif
651 598
652 rx = 0; 599 unaligned = (u32)skb->data & (dma_get_cache_alignment() - 1);
653 for (i = 7; rx < budget && i >= 0; i--) 600 if (unaligned)
654 if (mp->rxq_mask & (1 << i)) 601 skb_reserve(skb, dma_get_cache_alignment() - unaligned);
655 rx += rxq_process(mp->rxq + i, budget - rx);
656 602
657 if (rx < budget) { 603 refilled++;
658 netif_rx_complete(mp->dev, napi); 604 rxq->rx_desc_count++;
659 wrl(mp, INT_MASK(mp->port_num), INT_TX_END | INT_RX | INT_EXT); 605
606 rx = rxq->rx_used_desc++;
607 if (rxq->rx_used_desc == rxq->rx_ring_size)
608 rxq->rx_used_desc = 0;
609
610 rxq->rx_desc_area[rx].buf_ptr = dma_map_single(NULL, skb->data,
611 skb_size, DMA_FROM_DEVICE);
612 rxq->rx_desc_area[rx].buf_size = skb_size;
613 rxq->rx_skb[rx] = skb;
614 wmb();
615 rxq->rx_desc_area[rx].cmd_sts = BUFFER_OWNED_BY_DMA |
616 RX_ENABLE_INTERRUPT;
617 wmb();
618
619 /*
620 * The hardware automatically prepends 2 bytes of
621 * dummy data to each received packet, so that the
622 * IP header ends up 16-byte aligned.
623 */
624 skb_reserve(skb, 2);
660 } 625 }
661 626
662 return rx; 627 if (refilled < budget)
628 mp->work_rx_refill &= ~(1 << rxq->index);
629
630oom:
631 return refilled;
663} 632}
664#endif
665 633
666 634
667/* tx ***********************************************************************/ 635/* tx ***********************************************************************/
@@ -684,8 +652,9 @@ static int txq_alloc_desc_index(struct tx_queue *txq)
684 652
685 BUG_ON(txq->tx_desc_count >= txq->tx_ring_size); 653 BUG_ON(txq->tx_desc_count >= txq->tx_ring_size);
686 654
687 tx_desc_curr = txq->tx_curr_desc; 655 tx_desc_curr = txq->tx_curr_desc++;
688 txq->tx_curr_desc = (tx_desc_curr + 1) % txq->tx_ring_size; 656 if (txq->tx_curr_desc == txq->tx_ring_size)
657 txq->tx_curr_desc = 0;
689 658
690 BUG_ON(txq->tx_curr_desc == txq->tx_used_desc); 659 BUG_ON(txq->tx_curr_desc == txq->tx_used_desc);
691 660
@@ -714,10 +683,8 @@ static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
714 desc->cmd_sts = BUFFER_OWNED_BY_DMA | 683 desc->cmd_sts = BUFFER_OWNED_BY_DMA |
715 ZERO_PADDING | TX_LAST_DESC | 684 ZERO_PADDING | TX_LAST_DESC |
716 TX_ENABLE_INTERRUPT; 685 TX_ENABLE_INTERRUPT;
717 txq->tx_skb[tx_index] = skb;
718 } else { 686 } else {
719 desc->cmd_sts = BUFFER_OWNED_BY_DMA; 687 desc->cmd_sts = BUFFER_OWNED_BY_DMA;
720 txq->tx_skb[tx_index] = NULL;
721 } 688 }
722 689
723 desc->l4i_chk = 0; 690 desc->l4i_chk = 0;
@@ -734,144 +701,222 @@ static inline __be16 sum16_as_be(__sum16 sum)
734 return (__force __be16)sum; 701 return (__force __be16)sum;
735} 702}
736 703
737static void txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb) 704static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb)
738{ 705{
739 struct mv643xx_eth_private *mp = txq_to_mp(txq); 706 struct mv643xx_eth_private *mp = txq_to_mp(txq);
740 int nr_frags = skb_shinfo(skb)->nr_frags; 707 int nr_frags = skb_shinfo(skb)->nr_frags;
741 int tx_index; 708 int tx_index;
742 struct tx_desc *desc; 709 struct tx_desc *desc;
743 u32 cmd_sts; 710 u32 cmd_sts;
711 u16 l4i_chk;
744 int length; 712 int length;
745 713
746 cmd_sts = TX_FIRST_DESC | GEN_CRC | BUFFER_OWNED_BY_DMA; 714 cmd_sts = TX_FIRST_DESC | GEN_CRC | BUFFER_OWNED_BY_DMA;
747 715 l4i_chk = 0;
748 tx_index = txq_alloc_desc_index(txq);
749 desc = &txq->tx_desc_area[tx_index];
750
751 if (nr_frags) {
752 txq_submit_frag_skb(txq, skb);
753
754 length = skb_headlen(skb);
755 txq->tx_skb[tx_index] = NULL;
756 } else {
757 cmd_sts |= ZERO_PADDING | TX_LAST_DESC | TX_ENABLE_INTERRUPT;
758 length = skb->len;
759 txq->tx_skb[tx_index] = skb;
760 }
761
762 desc->byte_cnt = length;
763 desc->buf_ptr = dma_map_single(NULL, skb->data, length, DMA_TO_DEVICE);
764 716
765 if (skb->ip_summed == CHECKSUM_PARTIAL) { 717 if (skb->ip_summed == CHECKSUM_PARTIAL) {
766 int mac_hdr_len; 718 int tag_bytes;
767 719
768 BUG_ON(skb->protocol != htons(ETH_P_IP) && 720 BUG_ON(skb->protocol != htons(ETH_P_IP) &&
769 skb->protocol != htons(ETH_P_8021Q)); 721 skb->protocol != htons(ETH_P_8021Q));
770 722
771 cmd_sts |= GEN_TCP_UDP_CHECKSUM | 723 tag_bytes = (void *)ip_hdr(skb) - (void *)skb->data - ETH_HLEN;
772 GEN_IP_V4_CHECKSUM | 724 if (unlikely(tag_bytes & ~12)) {
773 ip_hdr(skb)->ihl << TX_IHL_SHIFT; 725 if (skb_checksum_help(skb) == 0)
726 goto no_csum;
727 kfree_skb(skb);
728 return 1;
729 }
774 730
775 mac_hdr_len = (void *)ip_hdr(skb) - (void *)skb->data; 731 if (tag_bytes & 4)
776 switch (mac_hdr_len - ETH_HLEN) {
777 case 0:
778 break;
779 case 4:
780 cmd_sts |= MAC_HDR_EXTRA_4_BYTES; 732 cmd_sts |= MAC_HDR_EXTRA_4_BYTES;
781 break; 733 if (tag_bytes & 8)
782 case 8:
783 cmd_sts |= MAC_HDR_EXTRA_8_BYTES; 734 cmd_sts |= MAC_HDR_EXTRA_8_BYTES;
784 break; 735
785 case 12: 736 cmd_sts |= GEN_TCP_UDP_CHECKSUM |
786 cmd_sts |= MAC_HDR_EXTRA_4_BYTES; 737 GEN_IP_V4_CHECKSUM |
787 cmd_sts |= MAC_HDR_EXTRA_8_BYTES; 738 ip_hdr(skb)->ihl << TX_IHL_SHIFT;
788 break;
789 default:
790 if (net_ratelimit())
791 dev_printk(KERN_ERR, &txq_to_mp(txq)->dev->dev,
792 "mac header length is %d?!\n", mac_hdr_len);
793 break;
794 }
795 739
796 switch (ip_hdr(skb)->protocol) { 740 switch (ip_hdr(skb)->protocol) {
797 case IPPROTO_UDP: 741 case IPPROTO_UDP:
798 cmd_sts |= UDP_FRAME; 742 cmd_sts |= UDP_FRAME;
799 desc->l4i_chk = ntohs(sum16_as_be(udp_hdr(skb)->check)); 743 l4i_chk = ntohs(sum16_as_be(udp_hdr(skb)->check));
800 break; 744 break;
801 case IPPROTO_TCP: 745 case IPPROTO_TCP:
802 desc->l4i_chk = ntohs(sum16_as_be(tcp_hdr(skb)->check)); 746 l4i_chk = ntohs(sum16_as_be(tcp_hdr(skb)->check));
803 break; 747 break;
804 default: 748 default:
805 BUG(); 749 BUG();
806 } 750 }
807 } else { 751 } else {
752no_csum:
808 /* Errata BTS #50, IHL must be 5 if no HW checksum */ 753 /* Errata BTS #50, IHL must be 5 if no HW checksum */
809 cmd_sts |= 5 << TX_IHL_SHIFT; 754 cmd_sts |= 5 << TX_IHL_SHIFT;
810 desc->l4i_chk = 0;
811 } 755 }
812 756
757 tx_index = txq_alloc_desc_index(txq);
758 desc = &txq->tx_desc_area[tx_index];
759
760 if (nr_frags) {
761 txq_submit_frag_skb(txq, skb);
762 length = skb_headlen(skb);
763 } else {
764 cmd_sts |= ZERO_PADDING | TX_LAST_DESC | TX_ENABLE_INTERRUPT;
765 length = skb->len;
766 }
767
768 desc->l4i_chk = l4i_chk;
769 desc->byte_cnt = length;
770 desc->buf_ptr = dma_map_single(NULL, skb->data, length, DMA_TO_DEVICE);
771
772 __skb_queue_tail(&txq->tx_skb, skb);
773
813 /* ensure all other descriptors are written before first cmd_sts */ 774 /* ensure all other descriptors are written before first cmd_sts */
814 wmb(); 775 wmb();
815 desc->cmd_sts = cmd_sts; 776 desc->cmd_sts = cmd_sts;
816 777
817 /* clear TX_END interrupt status */ 778 /* clear TX_END status */
818 wrl(mp, INT_CAUSE(mp->port_num), ~(INT_TX_END_0 << txq->index)); 779 mp->work_tx_end &= ~(1 << txq->index);
819 rdl(mp, INT_CAUSE(mp->port_num));
820 780
821 /* ensure all descriptors are written before poking hardware */ 781 /* ensure all descriptors are written before poking hardware */
822 wmb(); 782 wmb();
823 txq_enable(txq); 783 txq_enable(txq);
824 784
825 txq->tx_desc_count += nr_frags + 1; 785 txq->tx_desc_count += nr_frags + 1;
786
787 return 0;
826} 788}
827 789
828static int mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev) 790static int mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev)
829{ 791{
830 struct mv643xx_eth_private *mp = netdev_priv(dev); 792 struct mv643xx_eth_private *mp = netdev_priv(dev);
831 struct net_device_stats *stats = &dev->stats; 793 int queue;
832 struct tx_queue *txq; 794 struct tx_queue *txq;
833 unsigned long flags; 795 struct netdev_queue *nq;
796
797 queue = skb_get_queue_mapping(skb);
798 txq = mp->txq + queue;
799 nq = netdev_get_tx_queue(dev, queue);
834 800
835 if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) { 801 if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) {
836 stats->tx_dropped++; 802 txq->tx_dropped++;
837 dev_printk(KERN_DEBUG, &dev->dev, 803 dev_printk(KERN_DEBUG, &dev->dev,
838 "failed to linearize skb with tiny " 804 "failed to linearize skb with tiny "
839 "unaligned fragment\n"); 805 "unaligned fragment\n");
840 return NETDEV_TX_BUSY; 806 return NETDEV_TX_BUSY;
841 } 807 }
842 808
843 spin_lock_irqsave(&mp->lock, flags); 809 if (txq->tx_ring_size - txq->tx_desc_count < MAX_SKB_FRAGS + 1) {
844 810 if (net_ratelimit())
845 txq = mp->txq + mp->txq_primary; 811 dev_printk(KERN_ERR, &dev->dev, "tx queue full?!\n");
846
847 if (txq->tx_ring_size - txq->tx_desc_count < MAX_DESCS_PER_SKB) {
848 spin_unlock_irqrestore(&mp->lock, flags);
849 if (txq->index == mp->txq_primary && net_ratelimit())
850 dev_printk(KERN_ERR, &dev->dev,
851 "primary tx queue full?!\n");
852 kfree_skb(skb); 812 kfree_skb(skb);
853 return NETDEV_TX_OK; 813 return NETDEV_TX_OK;
854 } 814 }
855 815
856 txq_submit_skb(txq, skb); 816 if (!txq_submit_skb(txq, skb)) {
857 stats->tx_bytes += skb->len;
858 stats->tx_packets++;
859 dev->trans_start = jiffies;
860
861 if (txq->index == mp->txq_primary) {
862 int entries_left; 817 int entries_left;
863 818
819 txq->tx_bytes += skb->len;
820 txq->tx_packets++;
821 dev->trans_start = jiffies;
822
864 entries_left = txq->tx_ring_size - txq->tx_desc_count; 823 entries_left = txq->tx_ring_size - txq->tx_desc_count;
865 if (entries_left < MAX_DESCS_PER_SKB) 824 if (entries_left < MAX_SKB_FRAGS + 1)
866 netif_stop_queue(dev); 825 netif_tx_stop_queue(nq);
867 } 826 }
868 827
869 spin_unlock_irqrestore(&mp->lock, flags);
870
871 return NETDEV_TX_OK; 828 return NETDEV_TX_OK;
872} 829}
873 830
874 831
832/* tx napi ******************************************************************/
833static void txq_kick(struct tx_queue *txq)
834{
835 struct mv643xx_eth_private *mp = txq_to_mp(txq);
836 struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
837 u32 hw_desc_ptr;
838 u32 expected_ptr;
839
840 __netif_tx_lock(nq, smp_processor_id());
841
842 if (rdl(mp, TXQ_COMMAND(mp->port_num)) & (1 << txq->index))
843 goto out;
844
845 hw_desc_ptr = rdl(mp, TXQ_CURRENT_DESC_PTR(mp->port_num, txq->index));
846 expected_ptr = (u32)txq->tx_desc_dma +
847 txq->tx_curr_desc * sizeof(struct tx_desc);
848
849 if (hw_desc_ptr != expected_ptr)
850 txq_enable(txq);
851
852out:
853 __netif_tx_unlock(nq);
854
855 mp->work_tx_end &= ~(1 << txq->index);
856}
857
858static int txq_reclaim(struct tx_queue *txq, int budget, int force)
859{
860 struct mv643xx_eth_private *mp = txq_to_mp(txq);
861 struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
862 int reclaimed;
863
864 __netif_tx_lock(nq, smp_processor_id());
865
866 reclaimed = 0;
867 while (reclaimed < budget && txq->tx_desc_count > 0) {
868 int tx_index;
869 struct tx_desc *desc;
870 u32 cmd_sts;
871 struct sk_buff *skb;
872
873 tx_index = txq->tx_used_desc;
874 desc = &txq->tx_desc_area[tx_index];
875 cmd_sts = desc->cmd_sts;
876
877 if (cmd_sts & BUFFER_OWNED_BY_DMA) {
878 if (!force)
879 break;
880 desc->cmd_sts = cmd_sts & ~BUFFER_OWNED_BY_DMA;
881 }
882
883 txq->tx_used_desc = tx_index + 1;
884 if (txq->tx_used_desc == txq->tx_ring_size)
885 txq->tx_used_desc = 0;
886
887 reclaimed++;
888 txq->tx_desc_count--;
889
890 skb = NULL;
891 if (cmd_sts & TX_LAST_DESC)
892 skb = __skb_dequeue(&txq->tx_skb);
893
894 if (cmd_sts & ERROR_SUMMARY) {
895 dev_printk(KERN_INFO, &mp->dev->dev, "tx error\n");
896 mp->dev->stats.tx_errors++;
897 }
898
899 if (cmd_sts & TX_FIRST_DESC) {
900 dma_unmap_single(NULL, desc->buf_ptr,
901 desc->byte_cnt, DMA_TO_DEVICE);
902 } else {
903 dma_unmap_page(NULL, desc->buf_ptr,
904 desc->byte_cnt, DMA_TO_DEVICE);
905 }
906
907 if (skb)
908 dev_kfree_skb(skb);
909 }
910
911 __netif_tx_unlock(nq);
912
913 if (reclaimed < budget)
914 mp->work_tx &= ~(1 << txq->index);
915
916 return reclaimed;
917}
918
919
875/* tx rate control **********************************************************/ 920/* tx rate control **********************************************************/
876/* 921/*
877 * Set total maximum TX rate (shared by all TX queues for this port) 922 * Set total maximum TX rate (shared by all TX queues for this port)
@@ -895,14 +940,17 @@ static void tx_set_rate(struct mv643xx_eth_private *mp, int rate, int burst)
895 if (bucket_size > 65535) 940 if (bucket_size > 65535)
896 bucket_size = 65535; 941 bucket_size = 65535;
897 942
898 if (mp->shared->tx_bw_control_moved) { 943 switch (mp->shared->tx_bw_control) {
899 wrl(mp, TX_BW_RATE_MOVED(mp->port_num), token_rate); 944 case TX_BW_CONTROL_OLD_LAYOUT:
900 wrl(mp, TX_BW_MTU_MOVED(mp->port_num), mtu);
901 wrl(mp, TX_BW_BURST_MOVED(mp->port_num), bucket_size);
902 } else {
903 wrl(mp, TX_BW_RATE(mp->port_num), token_rate); 945 wrl(mp, TX_BW_RATE(mp->port_num), token_rate);
904 wrl(mp, TX_BW_MTU(mp->port_num), mtu); 946 wrl(mp, TX_BW_MTU(mp->port_num), mtu);
905 wrl(mp, TX_BW_BURST(mp->port_num), bucket_size); 947 wrl(mp, TX_BW_BURST(mp->port_num), bucket_size);
948 break;
949 case TX_BW_CONTROL_NEW_LAYOUT:
950 wrl(mp, TX_BW_RATE_MOVED(mp->port_num), token_rate);
951 wrl(mp, TX_BW_MTU_MOVED(mp->port_num), mtu);
952 wrl(mp, TX_BW_BURST_MOVED(mp->port_num), bucket_size);
953 break;
906 } 954 }
907} 955}
908 956
@@ -934,14 +982,21 @@ static void txq_set_fixed_prio_mode(struct tx_queue *txq)
934 /* 982 /*
935 * Turn on fixed priority mode. 983 * Turn on fixed priority mode.
936 */ 984 */
937 if (mp->shared->tx_bw_control_moved) 985 off = 0;
938 off = TXQ_FIX_PRIO_CONF_MOVED(mp->port_num); 986 switch (mp->shared->tx_bw_control) {
939 else 987 case TX_BW_CONTROL_OLD_LAYOUT:
940 off = TXQ_FIX_PRIO_CONF(mp->port_num); 988 off = TXQ_FIX_PRIO_CONF(mp->port_num);
989 break;
990 case TX_BW_CONTROL_NEW_LAYOUT:
991 off = TXQ_FIX_PRIO_CONF_MOVED(mp->port_num);
992 break;
993 }
941 994
942 val = rdl(mp, off); 995 if (off) {
943 val |= 1 << txq->index; 996 val = rdl(mp, off);
944 wrl(mp, off, val); 997 val |= 1 << txq->index;
998 wrl(mp, off, val);
999 }
945} 1000}
946 1001
947static void txq_set_wrr(struct tx_queue *txq, int weight) 1002static void txq_set_wrr(struct tx_queue *txq, int weight)
@@ -953,95 +1008,147 @@ static void txq_set_wrr(struct tx_queue *txq, int weight)
953 /* 1008 /*
954 * Turn off fixed priority mode. 1009 * Turn off fixed priority mode.
955 */ 1010 */
956 if (mp->shared->tx_bw_control_moved) 1011 off = 0;
957 off = TXQ_FIX_PRIO_CONF_MOVED(mp->port_num); 1012 switch (mp->shared->tx_bw_control) {
958 else 1013 case TX_BW_CONTROL_OLD_LAYOUT:
959 off = TXQ_FIX_PRIO_CONF(mp->port_num); 1014 off = TXQ_FIX_PRIO_CONF(mp->port_num);
1015 break;
1016 case TX_BW_CONTROL_NEW_LAYOUT:
1017 off = TXQ_FIX_PRIO_CONF_MOVED(mp->port_num);
1018 break;
1019 }
960 1020
961 val = rdl(mp, off); 1021 if (off) {
962 val &= ~(1 << txq->index); 1022 val = rdl(mp, off);
963 wrl(mp, off, val); 1023 val &= ~(1 << txq->index);
1024 wrl(mp, off, val);
964 1025
965 /* 1026 /*
966 * Configure WRR weight for this queue. 1027 * Configure WRR weight for this queue.
967 */ 1028 */
968 off = TXQ_BW_WRR_CONF(mp->port_num, txq->index); 1029 off = TXQ_BW_WRR_CONF(mp->port_num, txq->index);
969 1030
970 val = rdl(mp, off); 1031 val = rdl(mp, off);
971 val = (val & ~0xff) | (weight & 0xff); 1032 val = (val & ~0xff) | (weight & 0xff);
972 wrl(mp, off, val); 1033 wrl(mp, off, val);
1034 }
973} 1035}
974 1036
975 1037
976/* mii management interface *************************************************/ 1038/* mii management interface *************************************************/
977#define SMI_BUSY 0x10000000 1039static irqreturn_t mv643xx_eth_err_irq(int irq, void *dev_id)
978#define SMI_READ_VALID 0x08000000 1040{
979#define SMI_OPCODE_READ 0x04000000 1041 struct mv643xx_eth_shared_private *msp = dev_id;
980#define SMI_OPCODE_WRITE 0x00000000 1042
1043 if (readl(msp->base + ERR_INT_CAUSE) & ERR_INT_SMI_DONE) {
1044 writel(~ERR_INT_SMI_DONE, msp->base + ERR_INT_CAUSE);
1045 wake_up(&msp->smi_busy_wait);
1046 return IRQ_HANDLED;
1047 }
1048
1049 return IRQ_NONE;
1050}
981 1051
982static void smi_reg_read(struct mv643xx_eth_private *mp, unsigned int addr, 1052static int smi_is_done(struct mv643xx_eth_shared_private *msp)
983 unsigned int reg, unsigned int *value)
984{ 1053{
985 void __iomem *smi_reg = mp->shared_smi->base + SMI_REG; 1054 return !(readl(msp->base + SMI_REG) & SMI_BUSY);
986 unsigned long flags; 1055}
987 int i;
988 1056
989 /* the SMI register is a shared resource */ 1057static int smi_wait_ready(struct mv643xx_eth_shared_private *msp)
990 spin_lock_irqsave(&mp->shared_smi->phy_lock, flags); 1058{
1059 if (msp->err_interrupt == NO_IRQ) {
1060 int i;
991 1061
992 /* wait for the SMI register to become available */ 1062 for (i = 0; !smi_is_done(msp); i++) {
993 for (i = 0; readl(smi_reg) & SMI_BUSY; i++) { 1063 if (i == 10)
994 if (i == 1000) { 1064 return -ETIMEDOUT;
995 printk("%s: PHY busy timeout\n", mp->dev->name); 1065 msleep(10);
996 goto out;
997 } 1066 }
998 udelay(10); 1067
1068 return 0;
1069 }
1070
1071 if (!wait_event_timeout(msp->smi_busy_wait, smi_is_done(msp),
1072 msecs_to_jiffies(100)))
1073 return -ETIMEDOUT;
1074
1075 return 0;
1076}
1077
1078static int smi_bus_read(struct mii_bus *bus, int addr, int reg)
1079{
1080 struct mv643xx_eth_shared_private *msp = bus->priv;
1081 void __iomem *smi_reg = msp->base + SMI_REG;
1082 int ret;
1083
1084 if (smi_wait_ready(msp)) {
1085 printk("mv643xx_eth: SMI bus busy timeout\n");
1086 return -ETIMEDOUT;
999 } 1087 }
1000 1088
1001 writel(SMI_OPCODE_READ | (reg << 21) | (addr << 16), smi_reg); 1089 writel(SMI_OPCODE_READ | (reg << 21) | (addr << 16), smi_reg);
1002 1090
1003 /* now wait for the data to be valid */ 1091 if (smi_wait_ready(msp)) {
1004 for (i = 0; !(readl(smi_reg) & SMI_READ_VALID); i++) { 1092 printk("mv643xx_eth: SMI bus busy timeout\n");
1005 if (i == 1000) { 1093 return -ETIMEDOUT;
1006 printk("%s: PHY read timeout\n", mp->dev->name);
1007 goto out;
1008 }
1009 udelay(10);
1010 } 1094 }
1011 1095
1012 *value = readl(smi_reg) & 0xffff; 1096 ret = readl(smi_reg);
1013out: 1097 if (!(ret & SMI_READ_VALID)) {
1014 spin_unlock_irqrestore(&mp->shared_smi->phy_lock, flags); 1098 printk("mv643xx_eth: SMI bus read not valid\n");
1099 return -ENODEV;
1100 }
1101
1102 return ret & 0xffff;
1015} 1103}
1016 1104
1017static void smi_reg_write(struct mv643xx_eth_private *mp, 1105static int smi_bus_write(struct mii_bus *bus, int addr, int reg, u16 val)
1018 unsigned int addr,
1019 unsigned int reg, unsigned int value)
1020{ 1106{
1021 void __iomem *smi_reg = mp->shared_smi->base + SMI_REG; 1107 struct mv643xx_eth_shared_private *msp = bus->priv;
1022 unsigned long flags; 1108 void __iomem *smi_reg = msp->base + SMI_REG;
1023 int i;
1024
1025 /* the SMI register is a shared resource */
1026 spin_lock_irqsave(&mp->shared_smi->phy_lock, flags);
1027 1109
1028 /* wait for the SMI register to become available */ 1110 if (smi_wait_ready(msp)) {
1029 for (i = 0; readl(smi_reg) & SMI_BUSY; i++) { 1111 printk("mv643xx_eth: SMI bus busy timeout\n");
1030 if (i == 1000) { 1112 return -ETIMEDOUT;
1031 printk("%s: PHY busy timeout\n", mp->dev->name);
1032 goto out;
1033 }
1034 udelay(10);
1035 } 1113 }
1036 1114
1037 writel(SMI_OPCODE_WRITE | (reg << 21) | 1115 writel(SMI_OPCODE_WRITE | (reg << 21) |
1038 (addr << 16) | (value & 0xffff), smi_reg); 1116 (addr << 16) | (val & 0xffff), smi_reg);
1039out: 1117
1040 spin_unlock_irqrestore(&mp->shared_smi->phy_lock, flags); 1118 if (smi_wait_ready(msp)) {
1119 printk("mv643xx_eth: SMI bus busy timeout\n");
1120 return -ETIMEDOUT;
1121 }
1122
1123 return 0;
1041} 1124}
1042 1125
1043 1126
1044/* mib counters *************************************************************/ 1127/* statistics ***************************************************************/
1128static struct net_device_stats *mv643xx_eth_get_stats(struct net_device *dev)
1129{
1130 struct mv643xx_eth_private *mp = netdev_priv(dev);
1131 struct net_device_stats *stats = &dev->stats;
1132 unsigned long tx_packets = 0;
1133 unsigned long tx_bytes = 0;
1134 unsigned long tx_dropped = 0;
1135 int i;
1136
1137 for (i = 0; i < mp->txq_count; i++) {
1138 struct tx_queue *txq = mp->txq + i;
1139
1140 tx_packets += txq->tx_packets;
1141 tx_bytes += txq->tx_bytes;
1142 tx_dropped += txq->tx_dropped;
1143 }
1144
1145 stats->tx_packets = tx_packets;
1146 stats->tx_bytes = tx_bytes;
1147 stats->tx_dropped = tx_dropped;
1148
1149 return stats;
1150}
1151
1045static inline u32 mib_read(struct mv643xx_eth_private *mp, int offset) 1152static inline u32 mib_read(struct mv643xx_eth_private *mp, int offset)
1046{ 1153{
1047 return rdl(mp, MIB_COUNTERS(mp->port_num) + offset); 1154 return rdl(mp, MIB_COUNTERS(mp->port_num) + offset);
@@ -1059,6 +1166,7 @@ static void mib_counters_update(struct mv643xx_eth_private *mp)
1059{ 1166{
1060 struct mib_counters *p = &mp->mib_counters; 1167 struct mib_counters *p = &mp->mib_counters;
1061 1168
1169 spin_lock(&mp->mib_counters_lock);
1062 p->good_octets_received += mib_read(mp, 0x00); 1170 p->good_octets_received += mib_read(mp, 0x00);
1063 p->good_octets_received += (u64)mib_read(mp, 0x04) << 32; 1171 p->good_octets_received += (u64)mib_read(mp, 0x04) << 32;
1064 p->bad_octets_received += mib_read(mp, 0x08); 1172 p->bad_octets_received += mib_read(mp, 0x08);
@@ -1091,6 +1199,16 @@ static void mib_counters_update(struct mv643xx_eth_private *mp)
1091 p->bad_crc_event += mib_read(mp, 0x74); 1199 p->bad_crc_event += mib_read(mp, 0x74);
1092 p->collision += mib_read(mp, 0x78); 1200 p->collision += mib_read(mp, 0x78);
1093 p->late_collision += mib_read(mp, 0x7c); 1201 p->late_collision += mib_read(mp, 0x7c);
1202 spin_unlock(&mp->mib_counters_lock);
1203
1204 mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ);
1205}
1206
1207static void mib_counters_timer_wrapper(unsigned long _mp)
1208{
1209 struct mv643xx_eth_private *mp = (void *)_mp;
1210
1211 mib_counters_update(mp);
1094} 1212}
1095 1213
1096 1214
@@ -1156,9 +1274,9 @@ static int mv643xx_eth_get_settings(struct net_device *dev, struct ethtool_cmd *
1156 struct mv643xx_eth_private *mp = netdev_priv(dev); 1274 struct mv643xx_eth_private *mp = netdev_priv(dev);
1157 int err; 1275 int err;
1158 1276
1159 spin_lock_irq(&mp->lock); 1277 err = phy_read_status(mp->phy);
1160 err = mii_ethtool_gset(&mp->mii, cmd); 1278 if (err == 0)
1161 spin_unlock_irq(&mp->lock); 1279 err = phy_ethtool_gset(mp->phy, cmd);
1162 1280
1163 /* 1281 /*
1164 * The MAC does not support 1000baseT_Half. 1282 * The MAC does not support 1000baseT_Half.
@@ -1206,18 +1324,13 @@ static int mv643xx_eth_get_settings_phyless(struct net_device *dev, struct ethto
1206static int mv643xx_eth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 1324static int mv643xx_eth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1207{ 1325{
1208 struct mv643xx_eth_private *mp = netdev_priv(dev); 1326 struct mv643xx_eth_private *mp = netdev_priv(dev);
1209 int err;
1210 1327
1211 /* 1328 /*
1212 * The MAC does not support 1000baseT_Half. 1329 * The MAC does not support 1000baseT_Half.
1213 */ 1330 */
1214 cmd->advertising &= ~ADVERTISED_1000baseT_Half; 1331 cmd->advertising &= ~ADVERTISED_1000baseT_Half;
1215 1332
1216 spin_lock_irq(&mp->lock); 1333 return phy_ethtool_sset(mp->phy, cmd);
1217 err = mii_ethtool_sset(&mp->mii, cmd);
1218 spin_unlock_irq(&mp->lock);
1219
1220 return err;
1221} 1334}
1222 1335
1223static int mv643xx_eth_set_settings_phyless(struct net_device *dev, struct ethtool_cmd *cmd) 1336static int mv643xx_eth_set_settings_phyless(struct net_device *dev, struct ethtool_cmd *cmd)
@@ -1239,7 +1352,7 @@ static int mv643xx_eth_nway_reset(struct net_device *dev)
1239{ 1352{
1240 struct mv643xx_eth_private *mp = netdev_priv(dev); 1353 struct mv643xx_eth_private *mp = netdev_priv(dev);
1241 1354
1242 return mii_nway_restart(&mp->mii); 1355 return genphy_restart_aneg(mp->phy);
1243} 1356}
1244 1357
1245static int mv643xx_eth_nway_reset_phyless(struct net_device *dev) 1358static int mv643xx_eth_nway_reset_phyless(struct net_device *dev)
@@ -1249,14 +1362,7 @@ static int mv643xx_eth_nway_reset_phyless(struct net_device *dev)
1249 1362
1250static u32 mv643xx_eth_get_link(struct net_device *dev) 1363static u32 mv643xx_eth_get_link(struct net_device *dev)
1251{ 1364{
1252 struct mv643xx_eth_private *mp = netdev_priv(dev); 1365 return !!netif_carrier_ok(dev);
1253
1254 return mii_link_ok(&mp->mii);
1255}
1256
1257static u32 mv643xx_eth_get_link_phyless(struct net_device *dev)
1258{
1259 return 1;
1260} 1366}
1261 1367
1262static void mv643xx_eth_get_strings(struct net_device *dev, 1368static void mv643xx_eth_get_strings(struct net_device *dev,
@@ -1277,9 +1383,10 @@ static void mv643xx_eth_get_ethtool_stats(struct net_device *dev,
1277 struct ethtool_stats *stats, 1383 struct ethtool_stats *stats,
1278 uint64_t *data) 1384 uint64_t *data)
1279{ 1385{
1280 struct mv643xx_eth_private *mp = dev->priv; 1386 struct mv643xx_eth_private *mp = netdev_priv(dev);
1281 int i; 1387 int i;
1282 1388
1389 mv643xx_eth_get_stats(dev);
1283 mib_counters_update(mp); 1390 mib_counters_update(mp);
1284 1391
1285 for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) { 1392 for (i = 0; i < ARRAY_SIZE(mv643xx_eth_stats); i++) {
@@ -1323,7 +1430,7 @@ static const struct ethtool_ops mv643xx_eth_ethtool_ops_phyless = {
1323 .set_settings = mv643xx_eth_set_settings_phyless, 1430 .set_settings = mv643xx_eth_set_settings_phyless,
1324 .get_drvinfo = mv643xx_eth_get_drvinfo, 1431 .get_drvinfo = mv643xx_eth_get_drvinfo,
1325 .nway_reset = mv643xx_eth_nway_reset_phyless, 1432 .nway_reset = mv643xx_eth_nway_reset_phyless,
1326 .get_link = mv643xx_eth_get_link_phyless, 1433 .get_link = mv643xx_eth_get_link,
1327 .set_sg = ethtool_op_set_sg, 1434 .set_sg = ethtool_op_set_sg,
1328 .get_strings = mv643xx_eth_get_strings, 1435 .get_strings = mv643xx_eth_get_strings,
1329 .get_ethtool_stats = mv643xx_eth_get_ethtool_stats, 1436 .get_ethtool_stats = mv643xx_eth_get_ethtool_stats,
@@ -1487,7 +1594,7 @@ static int rxq_init(struct mv643xx_eth_private *mp, int index)
1487 1594
1488 size = rxq->rx_ring_size * sizeof(struct rx_desc); 1595 size = rxq->rx_ring_size * sizeof(struct rx_desc);
1489 1596
1490 if (index == mp->rxq_primary && size <= mp->rx_desc_sram_size) { 1597 if (index == 0 && size <= mp->rx_desc_sram_size) {
1491 rxq->rx_desc_area = ioremap(mp->rx_desc_sram_addr, 1598 rxq->rx_desc_area = ioremap(mp->rx_desc_sram_addr,
1492 mp->rx_desc_sram_size); 1599 mp->rx_desc_sram_size);
1493 rxq->rx_desc_dma = mp->rx_desc_sram_addr; 1600 rxq->rx_desc_dma = mp->rx_desc_sram_addr;
@@ -1515,20 +1622,21 @@ static int rxq_init(struct mv643xx_eth_private *mp, int index)
1515 1622
1516 rx_desc = (struct rx_desc *)rxq->rx_desc_area; 1623 rx_desc = (struct rx_desc *)rxq->rx_desc_area;
1517 for (i = 0; i < rxq->rx_ring_size; i++) { 1624 for (i = 0; i < rxq->rx_ring_size; i++) {
1518 int nexti = (i + 1) % rxq->rx_ring_size; 1625 int nexti;
1626
1627 nexti = i + 1;
1628 if (nexti == rxq->rx_ring_size)
1629 nexti = 0;
1630
1519 rx_desc[i].next_desc_ptr = rxq->rx_desc_dma + 1631 rx_desc[i].next_desc_ptr = rxq->rx_desc_dma +
1520 nexti * sizeof(struct rx_desc); 1632 nexti * sizeof(struct rx_desc);
1521 } 1633 }
1522 1634
1523 init_timer(&rxq->rx_oom);
1524 rxq->rx_oom.data = (unsigned long)rxq;
1525 rxq->rx_oom.function = rxq_refill_timer_wrapper;
1526
1527 return 0; 1635 return 0;
1528 1636
1529 1637
1530out_free: 1638out_free:
1531 if (index == mp->rxq_primary && size <= mp->rx_desc_sram_size) 1639 if (index == 0 && size <= mp->rx_desc_sram_size)
1532 iounmap(rxq->rx_desc_area); 1640 iounmap(rxq->rx_desc_area);
1533 else 1641 else
1534 dma_free_coherent(NULL, size, 1642 dma_free_coherent(NULL, size,
@@ -1546,8 +1654,6 @@ static void rxq_deinit(struct rx_queue *rxq)
1546 1654
1547 rxq_disable(rxq); 1655 rxq_disable(rxq);
1548 1656
1549 del_timer_sync(&rxq->rx_oom);
1550
1551 for (i = 0; i < rxq->rx_ring_size; i++) { 1657 for (i = 0; i < rxq->rx_ring_size; i++) {
1552 if (rxq->rx_skb[i]) { 1658 if (rxq->rx_skb[i]) {
1553 dev_kfree_skb(rxq->rx_skb[i]); 1659 dev_kfree_skb(rxq->rx_skb[i]);
@@ -1561,7 +1667,7 @@ static void rxq_deinit(struct rx_queue *rxq)
1561 rxq->rx_desc_count); 1667 rxq->rx_desc_count);
1562 } 1668 }
1563 1669
1564 if (rxq->index == mp->rxq_primary && 1670 if (rxq->index == 0 &&
1565 rxq->rx_desc_area_size <= mp->rx_desc_sram_size) 1671 rxq->rx_desc_area_size <= mp->rx_desc_sram_size)
1566 iounmap(rxq->rx_desc_area); 1672 iounmap(rxq->rx_desc_area);
1567 else 1673 else
@@ -1588,7 +1694,7 @@ static int txq_init(struct mv643xx_eth_private *mp, int index)
1588 1694
1589 size = txq->tx_ring_size * sizeof(struct tx_desc); 1695 size = txq->tx_ring_size * sizeof(struct tx_desc);
1590 1696
1591 if (index == mp->txq_primary && size <= mp->tx_desc_sram_size) { 1697 if (index == 0 && size <= mp->tx_desc_sram_size) {
1592 txq->tx_desc_area = ioremap(mp->tx_desc_sram_addr, 1698 txq->tx_desc_area = ioremap(mp->tx_desc_sram_addr,
1593 mp->tx_desc_sram_size); 1699 mp->tx_desc_sram_size);
1594 txq->tx_desc_dma = mp->tx_desc_sram_addr; 1700 txq->tx_desc_dma = mp->tx_desc_sram_addr;
@@ -1601,120 +1707,97 @@ static int txq_init(struct mv643xx_eth_private *mp, int index)
1601 if (txq->tx_desc_area == NULL) { 1707 if (txq->tx_desc_area == NULL) {
1602 dev_printk(KERN_ERR, &mp->dev->dev, 1708 dev_printk(KERN_ERR, &mp->dev->dev,
1603 "can't allocate tx ring (%d bytes)\n", size); 1709 "can't allocate tx ring (%d bytes)\n", size);
1604 goto out; 1710 return -ENOMEM;
1605 } 1711 }
1606 memset(txq->tx_desc_area, 0, size); 1712 memset(txq->tx_desc_area, 0, size);
1607 1713
1608 txq->tx_desc_area_size = size; 1714 txq->tx_desc_area_size = size;
1609 txq->tx_skb = kmalloc(txq->tx_ring_size * sizeof(*txq->tx_skb),
1610 GFP_KERNEL);
1611 if (txq->tx_skb == NULL) {
1612 dev_printk(KERN_ERR, &mp->dev->dev,
1613 "can't allocate tx skb ring\n");
1614 goto out_free;
1615 }
1616 1715
1617 tx_desc = (struct tx_desc *)txq->tx_desc_area; 1716 tx_desc = (struct tx_desc *)txq->tx_desc_area;
1618 for (i = 0; i < txq->tx_ring_size; i++) { 1717 for (i = 0; i < txq->tx_ring_size; i++) {
1619 struct tx_desc *txd = tx_desc + i; 1718 struct tx_desc *txd = tx_desc + i;
1620 int nexti = (i + 1) % txq->tx_ring_size; 1719 int nexti;
1720
1721 nexti = i + 1;
1722 if (nexti == txq->tx_ring_size)
1723 nexti = 0;
1621 1724
1622 txd->cmd_sts = 0; 1725 txd->cmd_sts = 0;
1623 txd->next_desc_ptr = txq->tx_desc_dma + 1726 txd->next_desc_ptr = txq->tx_desc_dma +
1624 nexti * sizeof(struct tx_desc); 1727 nexti * sizeof(struct tx_desc);
1625 } 1728 }
1626 1729
1627 return 0; 1730 skb_queue_head_init(&txq->tx_skb);
1628
1629 1731
1630out_free: 1732 return 0;
1631 if (index == mp->txq_primary && size <= mp->tx_desc_sram_size)
1632 iounmap(txq->tx_desc_area);
1633 else
1634 dma_free_coherent(NULL, size,
1635 txq->tx_desc_area,
1636 txq->tx_desc_dma);
1637
1638out:
1639 return -ENOMEM;
1640} 1733}
1641 1734
1642static void txq_reclaim(struct tx_queue *txq, int force) 1735static void txq_deinit(struct tx_queue *txq)
1643{ 1736{
1644 struct mv643xx_eth_private *mp = txq_to_mp(txq); 1737 struct mv643xx_eth_private *mp = txq_to_mp(txq);
1645 unsigned long flags;
1646 1738
1647 spin_lock_irqsave(&mp->lock, flags); 1739 txq_disable(txq);
1648 while (txq->tx_desc_count > 0) { 1740 txq_reclaim(txq, txq->tx_ring_size, 1);
1649 int tx_index;
1650 struct tx_desc *desc;
1651 u32 cmd_sts;
1652 struct sk_buff *skb;
1653 dma_addr_t addr;
1654 int count;
1655
1656 tx_index = txq->tx_used_desc;
1657 desc = &txq->tx_desc_area[tx_index];
1658 cmd_sts = desc->cmd_sts;
1659 1741
1660 if (cmd_sts & BUFFER_OWNED_BY_DMA) { 1742 BUG_ON(txq->tx_used_desc != txq->tx_curr_desc);
1661 if (!force)
1662 break;
1663 desc->cmd_sts = cmd_sts & ~BUFFER_OWNED_BY_DMA;
1664 }
1665 1743
1666 txq->tx_used_desc = (tx_index + 1) % txq->tx_ring_size; 1744 if (txq->index == 0 &&
1667 txq->tx_desc_count--; 1745 txq->tx_desc_area_size <= mp->tx_desc_sram_size)
1746 iounmap(txq->tx_desc_area);
1747 else
1748 dma_free_coherent(NULL, txq->tx_desc_area_size,
1749 txq->tx_desc_area, txq->tx_desc_dma);
1750}
1668 1751
1669 addr = desc->buf_ptr;
1670 count = desc->byte_cnt;
1671 skb = txq->tx_skb[tx_index];
1672 txq->tx_skb[tx_index] = NULL;
1673 1752
1674 if (cmd_sts & ERROR_SUMMARY) { 1753/* netdev ops and related ***************************************************/
1675 dev_printk(KERN_INFO, &mp->dev->dev, "tx error\n"); 1754static int mv643xx_eth_collect_events(struct mv643xx_eth_private *mp)
1676 mp->dev->stats.tx_errors++; 1755{
1677 } 1756 u32 int_cause;
1757 u32 int_cause_ext;
1678 1758
1679 /* 1759 int_cause = rdl(mp, INT_CAUSE(mp->port_num)) &
1680 * Drop mp->lock while we free the skb. 1760 (INT_TX_END | INT_RX | INT_EXT);
1681 */ 1761 if (int_cause == 0)
1682 spin_unlock_irqrestore(&mp->lock, flags); 1762 return 0;
1683 1763
1684 if (cmd_sts & TX_FIRST_DESC) 1764 int_cause_ext = 0;
1685 dma_unmap_single(NULL, addr, count, DMA_TO_DEVICE); 1765 if (int_cause & INT_EXT)
1686 else 1766 int_cause_ext = rdl(mp, INT_CAUSE_EXT(mp->port_num));
1687 dma_unmap_page(NULL, addr, count, DMA_TO_DEVICE);
1688 1767
1689 if (skb) 1768 int_cause &= INT_TX_END | INT_RX;
1690 dev_kfree_skb_irq(skb); 1769 if (int_cause) {
1770 wrl(mp, INT_CAUSE(mp->port_num), ~int_cause);
1771 mp->work_tx_end |= ((int_cause & INT_TX_END) >> 19) &
1772 ~(rdl(mp, TXQ_COMMAND(mp->port_num)) & 0xff);
1773 mp->work_rx |= (int_cause & INT_RX) >> 2;
1774 }
1691 1775
1692 spin_lock_irqsave(&mp->lock, flags); 1776 int_cause_ext &= INT_EXT_LINK_PHY | INT_EXT_TX;
1777 if (int_cause_ext) {
1778 wrl(mp, INT_CAUSE_EXT(mp->port_num), ~int_cause_ext);
1779 if (int_cause_ext & INT_EXT_LINK_PHY)
1780 mp->work_link = 1;
1781 mp->work_tx |= int_cause_ext & INT_EXT_TX;
1693 } 1782 }
1694 spin_unlock_irqrestore(&mp->lock, flags); 1783
1784 return 1;
1695} 1785}
1696 1786
1697static void txq_deinit(struct tx_queue *txq) 1787static irqreturn_t mv643xx_eth_irq(int irq, void *dev_id)
1698{ 1788{
1699 struct mv643xx_eth_private *mp = txq_to_mp(txq); 1789 struct net_device *dev = (struct net_device *)dev_id;
1700 1790 struct mv643xx_eth_private *mp = netdev_priv(dev);
1701 txq_disable(txq);
1702 txq_reclaim(txq, 1);
1703 1791
1704 BUG_ON(txq->tx_used_desc != txq->tx_curr_desc); 1792 if (unlikely(!mv643xx_eth_collect_events(mp)))
1793 return IRQ_NONE;
1705 1794
1706 if (txq->index == mp->txq_primary && 1795 wrl(mp, INT_MASK(mp->port_num), 0);
1707 txq->tx_desc_area_size <= mp->tx_desc_sram_size) 1796 napi_schedule(&mp->napi);
1708 iounmap(txq->tx_desc_area);
1709 else
1710 dma_free_coherent(NULL, txq->tx_desc_area_size,
1711 txq->tx_desc_area, txq->tx_desc_dma);
1712 1797
1713 kfree(txq->tx_skb); 1798 return IRQ_HANDLED;
1714} 1799}
1715 1800
1716
1717/* netdev ops and related ***************************************************/
1718static void handle_link_event(struct mv643xx_eth_private *mp) 1801static void handle_link_event(struct mv643xx_eth_private *mp)
1719{ 1802{
1720 struct net_device *dev = mp->dev; 1803 struct net_device *dev = mp->dev;
@@ -1731,15 +1814,12 @@ static void handle_link_event(struct mv643xx_eth_private *mp)
1731 printk(KERN_INFO "%s: link down\n", dev->name); 1814 printk(KERN_INFO "%s: link down\n", dev->name);
1732 1815
1733 netif_carrier_off(dev); 1816 netif_carrier_off(dev);
1734 netif_stop_queue(dev);
1735 1817
1736 for (i = 0; i < 8; i++) { 1818 for (i = 0; i < mp->txq_count; i++) {
1737 struct tx_queue *txq = mp->txq + i; 1819 struct tx_queue *txq = mp->txq + i;
1738 1820
1739 if (mp->txq_mask & (1 << i)) { 1821 txq_reclaim(txq, txq->tx_ring_size, 1);
1740 txq_reclaim(txq, 1); 1822 txq_reset_hw_ptr(txq);
1741 txq_reset_hw_ptr(txq);
1742 }
1743 } 1823 }
1744 } 1824 }
1745 return; 1825 return;
@@ -1767,119 +1847,93 @@ static void handle_link_event(struct mv643xx_eth_private *mp)
1767 speed, duplex ? "full" : "half", 1847 speed, duplex ? "full" : "half",
1768 fc ? "en" : "dis"); 1848 fc ? "en" : "dis");
1769 1849
1770 if (!netif_carrier_ok(dev)) { 1850 if (!netif_carrier_ok(dev))
1771 netif_carrier_on(dev); 1851 netif_carrier_on(dev);
1772 netif_wake_queue(dev);
1773 }
1774} 1852}
1775 1853
1776static irqreturn_t mv643xx_eth_irq(int irq, void *dev_id) 1854static int mv643xx_eth_poll(struct napi_struct *napi, int budget)
1777{ 1855{
1778 struct net_device *dev = (struct net_device *)dev_id; 1856 struct mv643xx_eth_private *mp;
1779 struct mv643xx_eth_private *mp = netdev_priv(dev); 1857 int work_done;
1780 u32 int_cause;
1781 u32 int_cause_ext;
1782
1783 int_cause = rdl(mp, INT_CAUSE(mp->port_num)) &
1784 (INT_TX_END | INT_RX | INT_EXT);
1785 if (int_cause == 0)
1786 return IRQ_NONE;
1787
1788 int_cause_ext = 0;
1789 if (int_cause & INT_EXT) {
1790 int_cause_ext = rdl(mp, INT_CAUSE_EXT(mp->port_num))
1791 & (INT_EXT_LINK | INT_EXT_PHY | INT_EXT_TX);
1792 wrl(mp, INT_CAUSE_EXT(mp->port_num), ~int_cause_ext);
1793 }
1794
1795 if (int_cause_ext & (INT_EXT_PHY | INT_EXT_LINK))
1796 handle_link_event(mp);
1797 1858
1798 /* 1859 mp = container_of(napi, struct mv643xx_eth_private, napi);
1799 * RxBuffer or RxError set for any of the 8 queues?
1800 */
1801#ifdef MV643XX_ETH_NAPI
1802 if (int_cause & INT_RX) {
1803 wrl(mp, INT_CAUSE(mp->port_num), ~(int_cause & INT_RX));
1804 wrl(mp, INT_MASK(mp->port_num), 0x00000000);
1805 rdl(mp, INT_MASK(mp->port_num));
1806 1860
1807 netif_rx_schedule(dev, &mp->napi); 1861 mp->work_rx_refill |= mp->work_rx_oom;
1808 } 1862 mp->work_rx_oom = 0;
1809#else
1810 if (int_cause & INT_RX) {
1811 int i;
1812 1863
1813 for (i = 7; i >= 0; i--) 1864 work_done = 0;
1814 if (mp->rxq_mask & (1 << i)) 1865 while (work_done < budget) {
1815 rxq_process(mp->rxq + i, INT_MAX); 1866 u8 queue_mask;
1816 } 1867 int queue;
1817#endif 1868 int work_tbd;
1818 1869
1819 /* 1870 if (mp->work_link) {
1820 * TxBuffer or TxError set for any of the 8 queues? 1871 mp->work_link = 0;
1821 */ 1872 handle_link_event(mp);
1822 if (int_cause_ext & INT_EXT_TX) { 1873 continue;
1823 int i; 1874 }
1824 1875
1825 for (i = 0; i < 8; i++) 1876 queue_mask = mp->work_tx | mp->work_tx_end |
1826 if (mp->txq_mask & (1 << i)) 1877 mp->work_rx | mp->work_rx_refill;
1827 txq_reclaim(mp->txq + i, 0); 1878 if (!queue_mask) {
1879 if (mv643xx_eth_collect_events(mp))
1880 continue;
1881 break;
1882 }
1828 1883
1829 /* 1884 queue = fls(queue_mask) - 1;
1830 * Enough space again in the primary TX queue for a 1885 queue_mask = 1 << queue;
1831 * full packet? 1886
1832 */ 1887 work_tbd = budget - work_done;
1833 if (netif_carrier_ok(dev)) { 1888 if (work_tbd > 16)
1834 spin_lock(&mp->lock); 1889 work_tbd = 16;
1835 __txq_maybe_wake(mp->txq + mp->txq_primary); 1890
1836 spin_unlock(&mp->lock); 1891 if (mp->work_tx_end & queue_mask) {
1892 txq_kick(mp->txq + queue);
1893 } else if (mp->work_tx & queue_mask) {
1894 work_done += txq_reclaim(mp->txq + queue, work_tbd, 0);
1895 txq_maybe_wake(mp->txq + queue);
1896 } else if (mp->work_rx & queue_mask) {
1897 work_done += rxq_process(mp->rxq + queue, work_tbd);
1898 } else if (mp->work_rx_refill & queue_mask) {
1899 work_done += rxq_refill(mp->rxq + queue, work_tbd);
1900 } else {
1901 BUG();
1837 } 1902 }
1838 } 1903 }
1839 1904
1840 /* 1905 if (work_done < budget) {
1841 * Any TxEnd interrupts? 1906 if (mp->work_rx_oom)
1842 */ 1907 mod_timer(&mp->rx_oom, jiffies + (HZ / 10));
1843 if (int_cause & INT_TX_END) { 1908 napi_complete(napi);
1844 int i; 1909 wrl(mp, INT_MASK(mp->port_num), INT_TX_END | INT_RX | INT_EXT);
1845 1910 }
1846 wrl(mp, INT_CAUSE(mp->port_num), ~(int_cause & INT_TX_END));
1847
1848 spin_lock(&mp->lock);
1849 for (i = 0; i < 8; i++) {
1850 struct tx_queue *txq = mp->txq + i;
1851 u32 hw_desc_ptr;
1852 u32 expected_ptr;
1853
1854 if ((int_cause & (INT_TX_END_0 << i)) == 0)
1855 continue;
1856 1911
1857 hw_desc_ptr = 1912 return work_done;
1858 rdl(mp, TXQ_CURRENT_DESC_PTR(mp->port_num, i)); 1913}
1859 expected_ptr = (u32)txq->tx_desc_dma +
1860 txq->tx_curr_desc * sizeof(struct tx_desc);
1861 1914
1862 if (hw_desc_ptr != expected_ptr) 1915static inline void oom_timer_wrapper(unsigned long data)
1863 txq_enable(txq); 1916{
1864 } 1917 struct mv643xx_eth_private *mp = (void *)data;
1865 spin_unlock(&mp->lock);
1866 }
1867 1918
1868 return IRQ_HANDLED; 1919 napi_schedule(&mp->napi);
1869} 1920}
1870 1921
1871static void phy_reset(struct mv643xx_eth_private *mp) 1922static void phy_reset(struct mv643xx_eth_private *mp)
1872{ 1923{
1873 unsigned int data; 1924 int data;
1925
1926 data = phy_read(mp->phy, MII_BMCR);
1927 if (data < 0)
1928 return;
1874 1929
1875 smi_reg_read(mp, mp->phy_addr, MII_BMCR, &data);
1876 data |= BMCR_RESET; 1930 data |= BMCR_RESET;
1877 smi_reg_write(mp, mp->phy_addr, MII_BMCR, data); 1931 if (phy_write(mp->phy, MII_BMCR, data) < 0)
1932 return;
1878 1933
1879 do { 1934 do {
1880 udelay(1); 1935 data = phy_read(mp->phy, MII_BMCR);
1881 smi_reg_read(mp, mp->phy_addr, MII_BMCR, &data); 1936 } while (data >= 0 && data & BMCR_RESET);
1882 } while (data & BMCR_RESET);
1883} 1937}
1884 1938
1885static void port_start(struct mv643xx_eth_private *mp) 1939static void port_start(struct mv643xx_eth_private *mp)
@@ -1890,7 +1944,7 @@ static void port_start(struct mv643xx_eth_private *mp)
1890 /* 1944 /*
1891 * Perform PHY reset, if there is a PHY. 1945 * Perform PHY reset, if there is a PHY.
1892 */ 1946 */
1893 if (mp->phy_addr != -1) { 1947 if (mp->phy != NULL) {
1894 struct ethtool_cmd cmd; 1948 struct ethtool_cmd cmd;
1895 1949
1896 mv643xx_eth_get_settings(mp->dev, &cmd); 1950 mv643xx_eth_get_settings(mp->dev, &cmd);
@@ -1907,7 +1961,7 @@ static void port_start(struct mv643xx_eth_private *mp)
1907 wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr); 1961 wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr);
1908 1962
1909 pscr |= DO_NOT_FORCE_LINK_FAIL; 1963 pscr |= DO_NOT_FORCE_LINK_FAIL;
1910 if (mp->phy_addr == -1) 1964 if (mp->phy == NULL)
1911 pscr |= FORCE_LINK_PASS; 1965 pscr |= FORCE_LINK_PASS;
1912 wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr); 1966 wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr);
1913 1967
@@ -1917,12 +1971,9 @@ static void port_start(struct mv643xx_eth_private *mp)
1917 * Configure TX path and queues. 1971 * Configure TX path and queues.
1918 */ 1972 */
1919 tx_set_rate(mp, 1000000000, 16777216); 1973 tx_set_rate(mp, 1000000000, 16777216);
1920 for (i = 0; i < 8; i++) { 1974 for (i = 0; i < mp->txq_count; i++) {
1921 struct tx_queue *txq = mp->txq + i; 1975 struct tx_queue *txq = mp->txq + i;
1922 1976
1923 if ((mp->txq_mask & (1 << i)) == 0)
1924 continue;
1925
1926 txq_reset_hw_ptr(txq); 1977 txq_reset_hw_ptr(txq);
1927 txq_set_rate(txq, 1000000000, 16777216); 1978 txq_set_rate(txq, 1000000000, 16777216);
1928 txq_set_fixed_prio_mode(txq); 1979 txq_set_fixed_prio_mode(txq);
@@ -1935,9 +1986,10 @@ static void port_start(struct mv643xx_eth_private *mp)
1935 1986
1936 /* 1987 /*
1937 * Receive all unmatched unicast, TCP, UDP, BPDU and broadcast 1988 * Receive all unmatched unicast, TCP, UDP, BPDU and broadcast
1938 * frames to RX queue #0. 1989 * frames to RX queue #0, and include the pseudo-header when
1990 * calculating receive checksums.
1939 */ 1991 */
1940 wrl(mp, PORT_CONFIG(mp->port_num), 0x00000000); 1992 wrl(mp, PORT_CONFIG(mp->port_num), 0x02000000);
1941 1993
1942 /* 1994 /*
1943 * Treat BPDUs as normal multicasts, and disable partition mode. 1995 * Treat BPDUs as normal multicasts, and disable partition mode.
@@ -1947,14 +1999,11 @@ static void port_start(struct mv643xx_eth_private *mp)
1947 /* 1999 /*
1948 * Enable the receive queues. 2000 * Enable the receive queues.
1949 */ 2001 */
1950 for (i = 0; i < 8; i++) { 2002 for (i = 0; i < mp->rxq_count; i++) {
1951 struct rx_queue *rxq = mp->rxq + i; 2003 struct rx_queue *rxq = mp->rxq + i;
1952 int off = RXQ_CURRENT_DESC_PTR(mp->port_num, i); 2004 int off = RXQ_CURRENT_DESC_PTR(mp->port_num, i);
1953 u32 addr; 2005 u32 addr;
1954 2006
1955 if ((mp->rxq_mask & (1 << i)) == 0)
1956 continue;
1957
1958 addr = (u32)rxq->rx_desc_dma; 2007 addr = (u32)rxq->rx_desc_dma;
1959 addr += rxq->rx_curr_desc * sizeof(struct rx_desc); 2008 addr += rxq->rx_curr_desc * sizeof(struct rx_desc);
1960 wrl(mp, off, addr); 2009 wrl(mp, off, addr);
@@ -2004,8 +2053,7 @@ static int mv643xx_eth_open(struct net_device *dev)
2004 rdl(mp, INT_CAUSE_EXT(mp->port_num)); 2053 rdl(mp, INT_CAUSE_EXT(mp->port_num));
2005 2054
2006 err = request_irq(dev->irq, mv643xx_eth_irq, 2055 err = request_irq(dev->irq, mv643xx_eth_irq,
2007 IRQF_SHARED | IRQF_SAMPLE_RANDOM, 2056 IRQF_SHARED, dev->name, dev);
2008 dev->name, dev);
2009 if (err) { 2057 if (err) {
2010 dev_printk(KERN_ERR, &dev->dev, "can't assign irq\n"); 2058 dev_printk(KERN_ERR, &dev->dev, "can't assign irq\n");
2011 return -EAGAIN; 2059 return -EAGAIN;
@@ -2013,58 +2061,49 @@ static int mv643xx_eth_open(struct net_device *dev)
2013 2061
2014 init_mac_tables(mp); 2062 init_mac_tables(mp);
2015 2063
2016 for (i = 0; i < 8; i++) { 2064 napi_enable(&mp->napi);
2017 if ((mp->rxq_mask & (1 << i)) == 0)
2018 continue;
2019 2065
2066 for (i = 0; i < mp->rxq_count; i++) {
2020 err = rxq_init(mp, i); 2067 err = rxq_init(mp, i);
2021 if (err) { 2068 if (err) {
2022 while (--i >= 0) 2069 while (--i >= 0)
2023 if (mp->rxq_mask & (1 << i)) 2070 rxq_deinit(mp->rxq + i);
2024 rxq_deinit(mp->rxq + i);
2025 goto out; 2071 goto out;
2026 } 2072 }
2027 2073
2028 rxq_refill(mp->rxq + i); 2074 rxq_refill(mp->rxq + i, INT_MAX);
2029 } 2075 }
2030 2076
2031 for (i = 0; i < 8; i++) { 2077 if (mp->work_rx_oom) {
2032 if ((mp->txq_mask & (1 << i)) == 0) 2078 mp->rx_oom.expires = jiffies + (HZ / 10);
2033 continue; 2079 add_timer(&mp->rx_oom);
2080 }
2034 2081
2082 for (i = 0; i < mp->txq_count; i++) {
2035 err = txq_init(mp, i); 2083 err = txq_init(mp, i);
2036 if (err) { 2084 if (err) {
2037 while (--i >= 0) 2085 while (--i >= 0)
2038 if (mp->txq_mask & (1 << i)) 2086 txq_deinit(mp->txq + i);
2039 txq_deinit(mp->txq + i);
2040 goto out_free; 2087 goto out_free;
2041 } 2088 }
2042 } 2089 }
2043 2090
2044#ifdef MV643XX_ETH_NAPI
2045 napi_enable(&mp->napi);
2046#endif
2047
2048 netif_carrier_off(dev); 2091 netif_carrier_off(dev);
2049 netif_stop_queue(dev);
2050 2092
2051 port_start(mp); 2093 port_start(mp);
2052 2094
2053 set_rx_coal(mp, 0); 2095 set_rx_coal(mp, 0);
2054 set_tx_coal(mp, 0); 2096 set_tx_coal(mp, 0);
2055 2097
2056 wrl(mp, INT_MASK_EXT(mp->port_num), 2098 wrl(mp, INT_MASK_EXT(mp->port_num), INT_EXT_LINK_PHY | INT_EXT_TX);
2057 INT_EXT_LINK | INT_EXT_PHY | INT_EXT_TX);
2058
2059 wrl(mp, INT_MASK(mp->port_num), INT_TX_END | INT_RX | INT_EXT); 2099 wrl(mp, INT_MASK(mp->port_num), INT_TX_END | INT_RX | INT_EXT);
2060 2100
2061 return 0; 2101 return 0;
2062 2102
2063 2103
2064out_free: 2104out_free:
2065 for (i = 0; i < 8; i++) 2105 for (i = 0; i < mp->rxq_count; i++)
2066 if (mp->rxq_mask & (1 << i)) 2106 rxq_deinit(mp->rxq + i);
2067 rxq_deinit(mp->rxq + i);
2068out: 2107out:
2069 free_irq(dev->irq, dev); 2108 free_irq(dev->irq, dev);
2070 2109
@@ -2076,12 +2115,10 @@ static void port_reset(struct mv643xx_eth_private *mp)
2076 unsigned int data; 2115 unsigned int data;
2077 int i; 2116 int i;
2078 2117
2079 for (i = 0; i < 8; i++) { 2118 for (i = 0; i < mp->rxq_count; i++)
2080 if (mp->rxq_mask & (1 << i)) 2119 rxq_disable(mp->rxq + i);
2081 rxq_disable(mp->rxq + i); 2120 for (i = 0; i < mp->txq_count; i++)
2082 if (mp->txq_mask & (1 << i)) 2121 txq_disable(mp->txq + i);
2083 txq_disable(mp->txq + i);
2084 }
2085 2122
2086 while (1) { 2123 while (1) {
2087 u32 ps = rdl(mp, PORT_STATUS(mp->port_num)); 2124 u32 ps = rdl(mp, PORT_STATUS(mp->port_num));
@@ -2107,23 +2144,24 @@ static int mv643xx_eth_stop(struct net_device *dev)
2107 wrl(mp, INT_MASK(mp->port_num), 0x00000000); 2144 wrl(mp, INT_MASK(mp->port_num), 0x00000000);
2108 rdl(mp, INT_MASK(mp->port_num)); 2145 rdl(mp, INT_MASK(mp->port_num));
2109 2146
2110#ifdef MV643XX_ETH_NAPI 2147 del_timer_sync(&mp->mib_counters_timer);
2148
2111 napi_disable(&mp->napi); 2149 napi_disable(&mp->napi);
2112#endif 2150
2151 del_timer_sync(&mp->rx_oom);
2152
2113 netif_carrier_off(dev); 2153 netif_carrier_off(dev);
2114 netif_stop_queue(dev);
2115 2154
2116 free_irq(dev->irq, dev); 2155 free_irq(dev->irq, dev);
2117 2156
2118 port_reset(mp); 2157 port_reset(mp);
2158 mv643xx_eth_get_stats(dev);
2119 mib_counters_update(mp); 2159 mib_counters_update(mp);
2120 2160
2121 for (i = 0; i < 8; i++) { 2161 for (i = 0; i < mp->rxq_count; i++)
2122 if (mp->rxq_mask & (1 << i)) 2162 rxq_deinit(mp->rxq + i);
2123 rxq_deinit(mp->rxq + i); 2163 for (i = 0; i < mp->txq_count; i++)
2124 if (mp->txq_mask & (1 << i)) 2164 txq_deinit(mp->txq + i);
2125 txq_deinit(mp->txq + i);
2126 }
2127 2165
2128 return 0; 2166 return 0;
2129} 2167}
@@ -2132,8 +2170,8 @@ static int mv643xx_eth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2132{ 2170{
2133 struct mv643xx_eth_private *mp = netdev_priv(dev); 2171 struct mv643xx_eth_private *mp = netdev_priv(dev);
2134 2172
2135 if (mp->phy_addr != -1) 2173 if (mp->phy != NULL)
2136 return generic_mii_ioctl(&mp->mii, if_mii(ifr), cmd, NULL); 2174 return phy_mii_ioctl(mp->phy, if_mii(ifr), cmd);
2137 2175
2138 return -EOPNOTSUPP; 2176 return -EOPNOTSUPP;
2139} 2177}
@@ -2173,12 +2211,10 @@ static void tx_timeout_task(struct work_struct *ugly)
2173 2211
2174 mp = container_of(ugly, struct mv643xx_eth_private, tx_timeout_task); 2212 mp = container_of(ugly, struct mv643xx_eth_private, tx_timeout_task);
2175 if (netif_running(mp->dev)) { 2213 if (netif_running(mp->dev)) {
2176 netif_stop_queue(mp->dev); 2214 netif_tx_stop_all_queues(mp->dev);
2177
2178 port_reset(mp); 2215 port_reset(mp);
2179 port_start(mp); 2216 port_start(mp);
2180 2217 netif_tx_wake_all_queues(mp->dev);
2181 __txq_maybe_wake(mp->txq + mp->txq_primary);
2182 } 2218 }
2183} 2219}
2184 2220
@@ -2205,22 +2241,6 @@ static void mv643xx_eth_netpoll(struct net_device *dev)
2205} 2241}
2206#endif 2242#endif
2207 2243
2208static int mv643xx_eth_mdio_read(struct net_device *dev, int addr, int reg)
2209{
2210 struct mv643xx_eth_private *mp = netdev_priv(dev);
2211 int val;
2212
2213 smi_reg_read(mp, addr, reg, &val);
2214
2215 return val;
2216}
2217
2218static void mv643xx_eth_mdio_write(struct net_device *dev, int addr, int reg, int val)
2219{
2220 struct mv643xx_eth_private *mp = netdev_priv(dev);
2221 smi_reg_write(mp, addr, reg, val);
2222}
2223
2224 2244
2225/* platform glue ************************************************************/ 2245/* platform glue ************************************************************/
2226static void 2246static void
@@ -2272,14 +2292,20 @@ static void infer_hw_params(struct mv643xx_eth_shared_private *msp)
2272 msp->extended_rx_coal_limit = 0; 2292 msp->extended_rx_coal_limit = 0;
2273 2293
2274 /* 2294 /*
2275 * Check whether the TX rate control registers are in the 2295 * Check whether the MAC supports TX rate control, and if
2276 * old or the new place. 2296 * yes, whether its associated registers are in the old or
2297 * the new place.
2277 */ 2298 */
2278 writel(1, msp->base + TX_BW_MTU_MOVED(0)); 2299 writel(1, msp->base + TX_BW_MTU_MOVED(0));
2279 if (readl(msp->base + TX_BW_MTU_MOVED(0)) & 1) 2300 if (readl(msp->base + TX_BW_MTU_MOVED(0)) & 1) {
2280 msp->tx_bw_control_moved = 1; 2301 msp->tx_bw_control = TX_BW_CONTROL_NEW_LAYOUT;
2281 else 2302 } else {
2282 msp->tx_bw_control_moved = 0; 2303 writel(7, msp->base + TX_BW_RATE(0));
2304 if (readl(msp->base + TX_BW_RATE(0)) & 7)
2305 msp->tx_bw_control = TX_BW_CONTROL_OLD_LAYOUT;
2306 else
2307 msp->tx_bw_control = TX_BW_CONTROL_ABSENT;
2308 }
2283} 2309}
2284 2310
2285static int mv643xx_eth_shared_probe(struct platform_device *pdev) 2311static int mv643xx_eth_shared_probe(struct platform_device *pdev)
@@ -2309,7 +2335,41 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev)
2309 if (msp->base == NULL) 2335 if (msp->base == NULL)
2310 goto out_free; 2336 goto out_free;
2311 2337
2312 spin_lock_init(&msp->phy_lock); 2338 /*
2339 * Set up and register SMI bus.
2340 */
2341 if (pd == NULL || pd->shared_smi == NULL) {
2342 msp->smi_bus.priv = msp;
2343 msp->smi_bus.name = "mv643xx_eth smi";
2344 msp->smi_bus.read = smi_bus_read;
2345 msp->smi_bus.write = smi_bus_write,
2346 snprintf(msp->smi_bus.id, MII_BUS_ID_SIZE, "%d", pdev->id);
2347 msp->smi_bus.dev = &pdev->dev;
2348 msp->smi_bus.phy_mask = 0xffffffff;
2349 if (mdiobus_register(&msp->smi_bus) < 0)
2350 goto out_unmap;
2351 msp->smi = msp;
2352 } else {
2353 msp->smi = platform_get_drvdata(pd->shared_smi);
2354 }
2355
2356 msp->err_interrupt = NO_IRQ;
2357 init_waitqueue_head(&msp->smi_busy_wait);
2358
2359 /*
2360 * Check whether the error interrupt is hooked up.
2361 */
2362 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
2363 if (res != NULL) {
2364 int err;
2365
2366 err = request_irq(res->start, mv643xx_eth_err_irq,
2367 IRQF_SHARED, "mv643xx_eth", msp);
2368 if (!err) {
2369 writel(ERR_INT_SMI_DONE, msp->base + ERR_INT_MASK);
2370 msp->err_interrupt = res->start;
2371 }
2372 }
2313 2373
2314 /* 2374 /*
2315 * (Re-)program MBUS remapping windows if we are asked to. 2375 * (Re-)program MBUS remapping windows if we are asked to.
@@ -2327,6 +2387,8 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev)
2327 2387
2328 return 0; 2388 return 0;
2329 2389
2390out_unmap:
2391 iounmap(msp->base);
2330out_free: 2392out_free:
2331 kfree(msp); 2393 kfree(msp);
2332out: 2394out:
@@ -2336,7 +2398,12 @@ out:
2336static int mv643xx_eth_shared_remove(struct platform_device *pdev) 2398static int mv643xx_eth_shared_remove(struct platform_device *pdev)
2337{ 2399{
2338 struct mv643xx_eth_shared_private *msp = platform_get_drvdata(pdev); 2400 struct mv643xx_eth_shared_private *msp = platform_get_drvdata(pdev);
2401 struct mv643xx_eth_shared_platform_data *pd = pdev->dev.platform_data;
2339 2402
2403 if (pd == NULL || pd->shared_smi == NULL)
2404 mdiobus_unregister(&msp->smi_bus);
2405 if (msp->err_interrupt != NO_IRQ)
2406 free_irq(msp->err_interrupt, msp);
2340 iounmap(msp->base); 2407 iounmap(msp->base);
2341 kfree(msp); 2408 kfree(msp);
2342 2409
@@ -2382,33 +2449,13 @@ static void set_params(struct mv643xx_eth_private *mp,
2382 else 2449 else
2383 uc_addr_get(mp, dev->dev_addr); 2450 uc_addr_get(mp, dev->dev_addr);
2384 2451
2385 if (pd->phy_addr == -1) {
2386 mp->shared_smi = NULL;
2387 mp->phy_addr = -1;
2388 } else {
2389 mp->shared_smi = mp->shared;
2390 if (pd->shared_smi != NULL)
2391 mp->shared_smi = platform_get_drvdata(pd->shared_smi);
2392
2393 if (pd->force_phy_addr || pd->phy_addr) {
2394 mp->phy_addr = pd->phy_addr & 0x3f;
2395 phy_addr_set(mp, mp->phy_addr);
2396 } else {
2397 mp->phy_addr = phy_addr_get(mp);
2398 }
2399 }
2400
2401 mp->default_rx_ring_size = DEFAULT_RX_QUEUE_SIZE; 2452 mp->default_rx_ring_size = DEFAULT_RX_QUEUE_SIZE;
2402 if (pd->rx_queue_size) 2453 if (pd->rx_queue_size)
2403 mp->default_rx_ring_size = pd->rx_queue_size; 2454 mp->default_rx_ring_size = pd->rx_queue_size;
2404 mp->rx_desc_sram_addr = pd->rx_sram_addr; 2455 mp->rx_desc_sram_addr = pd->rx_sram_addr;
2405 mp->rx_desc_sram_size = pd->rx_sram_size; 2456 mp->rx_desc_sram_size = pd->rx_sram_size;
2406 2457
2407 if (pd->rx_queue_mask) 2458 mp->rxq_count = pd->rx_queue_count ? : 1;
2408 mp->rxq_mask = pd->rx_queue_mask;
2409 else
2410 mp->rxq_mask = 0x01;
2411 mp->rxq_primary = fls(mp->rxq_mask) - 1;
2412 2459
2413 mp->default_tx_ring_size = DEFAULT_TX_QUEUE_SIZE; 2460 mp->default_tx_ring_size = DEFAULT_TX_QUEUE_SIZE;
2414 if (pd->tx_queue_size) 2461 if (pd->tx_queue_size)
@@ -2416,76 +2463,63 @@ static void set_params(struct mv643xx_eth_private *mp,
2416 mp->tx_desc_sram_addr = pd->tx_sram_addr; 2463 mp->tx_desc_sram_addr = pd->tx_sram_addr;
2417 mp->tx_desc_sram_size = pd->tx_sram_size; 2464 mp->tx_desc_sram_size = pd->tx_sram_size;
2418 2465
2419 if (pd->tx_queue_mask) 2466 mp->txq_count = pd->tx_queue_count ? : 1;
2420 mp->txq_mask = pd->tx_queue_mask;
2421 else
2422 mp->txq_mask = 0x01;
2423 mp->txq_primary = fls(mp->txq_mask) - 1;
2424} 2467}
2425 2468
2426static int phy_detect(struct mv643xx_eth_private *mp) 2469static struct phy_device *phy_scan(struct mv643xx_eth_private *mp,
2470 int phy_addr)
2427{ 2471{
2428 unsigned int data; 2472 struct mii_bus *bus = &mp->shared->smi->smi_bus;
2429 unsigned int data2; 2473 struct phy_device *phydev;
2474 int start;
2475 int num;
2476 int i;
2477
2478 if (phy_addr == MV643XX_ETH_PHY_ADDR_DEFAULT) {
2479 start = phy_addr_get(mp) & 0x1f;
2480 num = 32;
2481 } else {
2482 start = phy_addr & 0x1f;
2483 num = 1;
2484 }
2430 2485
2431 smi_reg_read(mp, mp->phy_addr, MII_BMCR, &data); 2486 phydev = NULL;
2432 smi_reg_write(mp, mp->phy_addr, MII_BMCR, data ^ BMCR_ANENABLE); 2487 for (i = 0; i < num; i++) {
2488 int addr = (start + i) & 0x1f;
2433 2489
2434 smi_reg_read(mp, mp->phy_addr, MII_BMCR, &data2); 2490 if (bus->phy_map[addr] == NULL)
2435 if (((data ^ data2) & BMCR_ANENABLE) == 0) 2491 mdiobus_scan(bus, addr);
2436 return -ENODEV;
2437 2492
2438 smi_reg_write(mp, mp->phy_addr, MII_BMCR, data); 2493 if (phydev == NULL) {
2494 phydev = bus->phy_map[addr];
2495 if (phydev != NULL)
2496 phy_addr_set(mp, addr);
2497 }
2498 }
2439 2499
2440 return 0; 2500 return phydev;
2441} 2501}
2442 2502
2443static int phy_init(struct mv643xx_eth_private *mp, 2503static void phy_init(struct mv643xx_eth_private *mp, int speed, int duplex)
2444 struct mv643xx_eth_platform_data *pd)
2445{ 2504{
2446 struct ethtool_cmd cmd; 2505 struct phy_device *phy = mp->phy;
2447 int err;
2448 2506
2449 err = phy_detect(mp);
2450 if (err) {
2451 dev_printk(KERN_INFO, &mp->dev->dev,
2452 "no PHY detected at addr %d\n", mp->phy_addr);
2453 return err;
2454 }
2455 phy_reset(mp); 2507 phy_reset(mp);
2456 2508
2457 mp->mii.phy_id = mp->phy_addr; 2509 phy_attach(mp->dev, phy->dev.bus_id, 0, PHY_INTERFACE_MODE_GMII);
2458 mp->mii.phy_id_mask = 0x3f; 2510
2459 mp->mii.reg_num_mask = 0x1f; 2511 if (speed == 0) {
2460 mp->mii.dev = mp->dev; 2512 phy->autoneg = AUTONEG_ENABLE;
2461 mp->mii.mdio_read = mv643xx_eth_mdio_read; 2513 phy->speed = 0;
2462 mp->mii.mdio_write = mv643xx_eth_mdio_write; 2514 phy->duplex = 0;
2463 2515 phy->advertising = phy->supported | ADVERTISED_Autoneg;
2464 mp->mii.supports_gmii = mii_check_gmii_support(&mp->mii);
2465
2466 memset(&cmd, 0, sizeof(cmd));
2467
2468 cmd.port = PORT_MII;
2469 cmd.transceiver = XCVR_INTERNAL;
2470 cmd.phy_address = mp->phy_addr;
2471 if (pd->speed == 0) {
2472 cmd.autoneg = AUTONEG_ENABLE;
2473 cmd.speed = SPEED_100;
2474 cmd.advertising = ADVERTISED_10baseT_Half |
2475 ADVERTISED_10baseT_Full |
2476 ADVERTISED_100baseT_Half |
2477 ADVERTISED_100baseT_Full;
2478 if (mp->mii.supports_gmii)
2479 cmd.advertising |= ADVERTISED_1000baseT_Full;
2480 } else { 2516 } else {
2481 cmd.autoneg = AUTONEG_DISABLE; 2517 phy->autoneg = AUTONEG_DISABLE;
2482 cmd.speed = pd->speed; 2518 phy->advertising = 0;
2483 cmd.duplex = pd->duplex; 2519 phy->speed = speed;
2520 phy->duplex = duplex;
2484 } 2521 }
2485 2522 phy_start_aneg(phy);
2486 mv643xx_eth_set_settings(mp->dev, &cmd);
2487
2488 return 0;
2489} 2523}
2490 2524
2491static void init_pscr(struct mv643xx_eth_private *mp, int speed, int duplex) 2525static void init_pscr(struct mv643xx_eth_private *mp, int speed, int duplex)
@@ -2499,7 +2533,7 @@ static void init_pscr(struct mv643xx_eth_private *mp, int speed, int duplex)
2499 } 2533 }
2500 2534
2501 pscr = MAX_RX_PACKET_9700BYTE | SERIAL_PORT_CONTROL_RESERVED; 2535 pscr = MAX_RX_PACKET_9700BYTE | SERIAL_PORT_CONTROL_RESERVED;
2502 if (mp->phy_addr == -1) { 2536 if (mp->phy == NULL) {
2503 pscr |= DISABLE_AUTO_NEG_SPEED_GMII; 2537 pscr |= DISABLE_AUTO_NEG_SPEED_GMII;
2504 if (speed == SPEED_1000) 2538 if (speed == SPEED_1000)
2505 pscr |= SET_GMII_SPEED_TO_1000; 2539 pscr |= SET_GMII_SPEED_TO_1000;
@@ -2538,7 +2572,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
2538 return -ENODEV; 2572 return -ENODEV;
2539 } 2573 }
2540 2574
2541 dev = alloc_etherdev(sizeof(struct mv643xx_eth_private)); 2575 dev = alloc_etherdev_mq(sizeof(struct mv643xx_eth_private), 8);
2542 if (!dev) 2576 if (!dev)
2543 return -ENOMEM; 2577 return -ENOMEM;
2544 2578
@@ -2549,33 +2583,47 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
2549 mp->port_num = pd->port_number; 2583 mp->port_num = pd->port_number;
2550 2584
2551 mp->dev = dev; 2585 mp->dev = dev;
2552#ifdef MV643XX_ETH_NAPI
2553 netif_napi_add(dev, &mp->napi, mv643xx_eth_poll, 64);
2554#endif
2555 2586
2556 set_params(mp, pd); 2587 set_params(mp, pd);
2588 dev->real_num_tx_queues = mp->txq_count;
2557 2589
2558 spin_lock_init(&mp->lock); 2590 if (pd->phy_addr != MV643XX_ETH_PHY_NONE)
2559 2591 mp->phy = phy_scan(mp, pd->phy_addr);
2560 mib_counters_clear(mp);
2561 INIT_WORK(&mp->tx_timeout_task, tx_timeout_task);
2562
2563 if (mp->phy_addr != -1) {
2564 err = phy_init(mp, pd);
2565 if (err)
2566 goto out;
2567 2592
2593 if (mp->phy != NULL) {
2594 phy_init(mp, pd->speed, pd->duplex);
2568 SET_ETHTOOL_OPS(dev, &mv643xx_eth_ethtool_ops); 2595 SET_ETHTOOL_OPS(dev, &mv643xx_eth_ethtool_ops);
2569 } else { 2596 } else {
2570 SET_ETHTOOL_OPS(dev, &mv643xx_eth_ethtool_ops_phyless); 2597 SET_ETHTOOL_OPS(dev, &mv643xx_eth_ethtool_ops_phyless);
2571 } 2598 }
2599
2572 init_pscr(mp, pd->speed, pd->duplex); 2600 init_pscr(mp, pd->speed, pd->duplex);
2573 2601
2574 2602
2603 mib_counters_clear(mp);
2604
2605 init_timer(&mp->mib_counters_timer);
2606 mp->mib_counters_timer.data = (unsigned long)mp;
2607 mp->mib_counters_timer.function = mib_counters_timer_wrapper;
2608 mp->mib_counters_timer.expires = jiffies + 30 * HZ;
2609 add_timer(&mp->mib_counters_timer);
2610
2611 spin_lock_init(&mp->mib_counters_lock);
2612
2613 INIT_WORK(&mp->tx_timeout_task, tx_timeout_task);
2614
2615 netif_napi_add(dev, &mp->napi, mv643xx_eth_poll, 128);
2616
2617 init_timer(&mp->rx_oom);
2618 mp->rx_oom.data = (unsigned long)mp;
2619 mp->rx_oom.function = oom_timer_wrapper;
2620
2621
2575 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 2622 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
2576 BUG_ON(!res); 2623 BUG_ON(!res);
2577 dev->irq = res->start; 2624 dev->irq = res->start;
2578 2625
2626 dev->get_stats = mv643xx_eth_get_stats;
2579 dev->hard_start_xmit = mv643xx_eth_xmit; 2627 dev->hard_start_xmit = mv643xx_eth_xmit;
2580 dev->open = mv643xx_eth_open; 2628 dev->open = mv643xx_eth_open;
2581 dev->stop = mv643xx_eth_stop; 2629 dev->stop = mv643xx_eth_stop;
@@ -2590,14 +2638,8 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
2590 dev->watchdog_timeo = 2 * HZ; 2638 dev->watchdog_timeo = 2 * HZ;
2591 dev->base_addr = 0; 2639 dev->base_addr = 0;
2592 2640
2593#ifdef MV643XX_ETH_CHECKSUM_OFFLOAD_TX
2594 /*
2595 * Zero copy can only work if we use Discovery II memory. Else, we will
2596 * have to map the buffers to ISA memory which is only 16 MB
2597 */
2598 dev->features = NETIF_F_SG | NETIF_F_IP_CSUM; 2641 dev->features = NETIF_F_SG | NETIF_F_IP_CSUM;
2599 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM; 2642 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM;
2600#endif
2601 2643
2602 SET_NETDEV_DEV(dev, &pdev->dev); 2644 SET_NETDEV_DEV(dev, &pdev->dev);
2603 2645
@@ -2611,16 +2653,6 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
2611 dev_printk(KERN_NOTICE, &dev->dev, "port %d with MAC address %s\n", 2653 dev_printk(KERN_NOTICE, &dev->dev, "port %d with MAC address %s\n",
2612 mp->port_num, print_mac(mac, dev->dev_addr)); 2654 mp->port_num, print_mac(mac, dev->dev_addr));
2613 2655
2614 if (dev->features & NETIF_F_SG)
2615 dev_printk(KERN_NOTICE, &dev->dev, "scatter/gather enabled\n");
2616
2617 if (dev->features & NETIF_F_IP_CSUM)
2618 dev_printk(KERN_NOTICE, &dev->dev, "tx checksum offload\n");
2619
2620#ifdef MV643XX_ETH_NAPI
2621 dev_printk(KERN_NOTICE, &dev->dev, "napi enabled\n");
2622#endif
2623
2624 if (mp->tx_desc_sram_size > 0) 2656 if (mp->tx_desc_sram_size > 0)
2625 dev_printk(KERN_NOTICE, &dev->dev, "configured with sram\n"); 2657 dev_printk(KERN_NOTICE, &dev->dev, "configured with sram\n");
2626 2658
@@ -2637,6 +2669,8 @@ static int mv643xx_eth_remove(struct platform_device *pdev)
2637 struct mv643xx_eth_private *mp = platform_get_drvdata(pdev); 2669 struct mv643xx_eth_private *mp = platform_get_drvdata(pdev);
2638 2670
2639 unregister_netdev(mp->dev); 2671 unregister_netdev(mp->dev);
2672 if (mp->phy != NULL)
2673 phy_detach(mp->phy);
2640 flush_scheduled_work(); 2674 flush_scheduled_work();
2641 free_netdev(mp->dev); 2675 free_netdev(mp->dev);
2642 2676
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 94e0b7ed76f1..e7508c10887c 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -60,49 +60,14 @@ int mdiobus_register(struct mii_bus *bus)
60 bus->reset(bus); 60 bus->reset(bus);
61 61
62 for (i = 0; i < PHY_MAX_ADDR; i++) { 62 for (i = 0; i < PHY_MAX_ADDR; i++) {
63 struct phy_device *phydev; 63 bus->phy_map[i] = NULL;
64 if ((bus->phy_mask & (1 << i)) == 0) {
65 struct phy_device *phydev;
64 66
65 if (bus->phy_mask & (1 << i)) { 67 phydev = mdiobus_scan(bus, i);
66 bus->phy_map[i] = NULL; 68 if (IS_ERR(phydev))
67 continue; 69 err = PTR_ERR(phydev);
68 } 70 }
69
70 phydev = get_phy_device(bus, i);
71
72 if (IS_ERR(phydev))
73 return PTR_ERR(phydev);
74
75 /* There's a PHY at this address
76 * We need to set:
77 * 1) IRQ
78 * 2) bus_id
79 * 3) parent
80 * 4) bus
81 * 5) mii_bus
82 * And, we need to register it */
83 if (phydev) {
84 phydev->irq = bus->irq[i];
85
86 phydev->dev.parent = bus->dev;
87 phydev->dev.bus = &mdio_bus_type;
88 snprintf(phydev->dev.bus_id, BUS_ID_SIZE, PHY_ID_FMT, bus->id, i);
89
90 phydev->bus = bus;
91
92 /* Run all of the fixups for this PHY */
93 phy_scan_fixups(phydev);
94
95 err = device_register(&phydev->dev);
96
97 if (err) {
98 printk(KERN_ERR "phy %d failed to register\n",
99 i);
100 phy_device_free(phydev);
101 phydev = NULL;
102 }
103 }
104
105 bus->phy_map[i] = phydev;
106 } 71 }
107 72
108 pr_info("%s: probed\n", bus->name); 73 pr_info("%s: probed\n", bus->name);
@@ -122,6 +87,48 @@ void mdiobus_unregister(struct mii_bus *bus)
122} 87}
123EXPORT_SYMBOL(mdiobus_unregister); 88EXPORT_SYMBOL(mdiobus_unregister);
124 89
90struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr)
91{
92 struct phy_device *phydev;
93 int err;
94
95 phydev = get_phy_device(bus, addr);
96 if (IS_ERR(phydev) || phydev == NULL)
97 return phydev;
98
99 /* There's a PHY at this address
100 * We need to set:
101 * 1) IRQ
102 * 2) bus_id
103 * 3) parent
104 * 4) bus
105 * 5) mii_bus
106 * And, we need to register it */
107
108 phydev->irq = bus->irq != NULL ? bus->irq[addr] : PHY_POLL;
109
110 phydev->dev.parent = bus->dev;
111 phydev->dev.bus = &mdio_bus_type;
112 snprintf(phydev->dev.bus_id, BUS_ID_SIZE, PHY_ID_FMT, bus->id, addr);
113
114 phydev->bus = bus;
115
116 /* Run all of the fixups for this PHY */
117 phy_scan_fixups(phydev);
118
119 err = device_register(&phydev->dev);
120 if (err) {
121 printk(KERN_ERR "phy %d failed to register\n", addr);
122 phy_device_free(phydev);
123 phydev = NULL;
124 }
125
126 bus->phy_map[addr] = phydev;
127
128 return phydev;
129}
130EXPORT_SYMBOL(mdiobus_scan);
131
125/** 132/**
126 * mdio_bus_match - determine if given PHY driver supports the given PHY device 133 * mdio_bus_match - determine if given PHY driver supports the given PHY device
127 * @dev: target PHY device 134 * @dev: target PHY device
diff --git a/include/linux/mv643xx_eth.h b/include/linux/mv643xx_eth.h
index 12078577aef6..cbbbe9bfecad 100644
--- a/include/linux/mv643xx_eth.h
+++ b/include/linux/mv643xx_eth.h
@@ -17,9 +17,14 @@
17 17
18struct mv643xx_eth_shared_platform_data { 18struct mv643xx_eth_shared_platform_data {
19 struct mbus_dram_target_info *dram; 19 struct mbus_dram_target_info *dram;
20 struct platform_device *shared_smi;
20 unsigned int t_clk; 21 unsigned int t_clk;
21}; 22};
22 23
24#define MV643XX_ETH_PHY_ADDR_DEFAULT 0
25#define MV643XX_ETH_PHY_ADDR(x) (0x80 | (x))
26#define MV643XX_ETH_PHY_NONE 0xff
27
23struct mv643xx_eth_platform_data { 28struct mv643xx_eth_platform_data {
24 /* 29 /*
25 * Pointer back to our parent instance, and our port number. 30 * Pointer back to our parent instance, and our port number.
@@ -30,8 +35,6 @@ struct mv643xx_eth_platform_data {
30 /* 35 /*
31 * Whether a PHY is present, and if yes, at which address. 36 * Whether a PHY is present, and if yes, at which address.
32 */ 37 */
33 struct platform_device *shared_smi;
34 int force_phy_addr;
35 int phy_addr; 38 int phy_addr;
36 39
37 /* 40 /*
@@ -49,10 +52,10 @@ struct mv643xx_eth_platform_data {
49 int duplex; 52 int duplex;
50 53
51 /* 54 /*
52 * Which RX/TX queues to use. 55 * How many RX/TX queues to use.
53 */ 56 */
54 int rx_queue_mask; 57 int rx_queue_count;
55 int tx_queue_mask; 58 int tx_queue_count;
56 59
57 /* 60 /*
58 * Override default RX/TX queue sizes if nonzero. 61 * Override default RX/TX queue sizes if nonzero.
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 7224c4099a28..5f170f5b1a30 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -410,6 +410,8 @@ int phy_start_aneg(struct phy_device *phydev);
410 410
411int mdiobus_register(struct mii_bus *bus); 411int mdiobus_register(struct mii_bus *bus);
412void mdiobus_unregister(struct mii_bus *bus); 412void mdiobus_unregister(struct mii_bus *bus);
413struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr);
414
413void phy_sanitize_settings(struct phy_device *phydev); 415void phy_sanitize_settings(struct phy_device *phydev);
414int phy_stop_interrupts(struct phy_device *phydev); 416int phy_stop_interrupts(struct phy_device *phydev);
415int phy_enable_interrupts(struct phy_device *phydev); 417int phy_enable_interrupts(struct phy_device *phydev);