aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorJeff Garzik <jeff@garzik.org>2006-08-29 17:55:59 -0400
committerJeff Garzik <jeff@garzik.org>2006-08-29 17:55:59 -0400
commitb01e86fee6c821e4e003fd4e9f65453ac478a58e (patch)
tree21695e10cbe7001d2ccc8c87cee5e7a7865b1025 /drivers/net
parente889173c2c67dc288e9b050ab066cfae151b047e (diff)
parent60d4684068ff1eec78f55b5888d0bd2d4cca1520 (diff)
Merge /spare/repo/linux-2.6 into upstream
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/3c515.c3
-rw-r--r--drivers/net/82596.c9
-rw-r--r--drivers/net/Kconfig41
-rw-r--r--drivers/net/Makefile3
-rw-r--r--drivers/net/ac3200.c3
-rw-r--r--drivers/net/appletalk/cops.c2
-rw-r--r--drivers/net/at1700.c2
-rw-r--r--drivers/net/cs89x0.c3
-rw-r--r--drivers/net/dm9000.c14
-rw-r--r--drivers/net/e1000/e1000_hw.c89
-rw-r--r--drivers/net/e1000/e1000_hw.h32
-rw-r--r--drivers/net/e1000/e1000_main.c2
-rw-r--r--drivers/net/e2100.c4
-rw-r--r--drivers/net/eepro.c3
-rw-r--r--drivers/net/eexpress.c2
-rw-r--r--drivers/net/es3210.c3
-rw-r--r--drivers/net/eth16i.c2
-rw-r--r--drivers/net/fealnx.c2
-rw-r--r--drivers/net/fs_enet/Makefile6
-rw-r--r--drivers/net/fs_enet/fec.h42
-rw-r--r--drivers/net/fs_enet/fs_enet-main.c207
-rw-r--r--drivers/net/fs_enet/fs_enet-mii.c505
-rw-r--r--drivers/net/fs_enet/fs_enet.h40
-rw-r--r--drivers/net/fs_enet/mac-fcc.c32
-rw-r--r--drivers/net/fs_enet/mac-fec.c142
-rw-r--r--drivers/net/fs_enet/mac-scc.c4
-rw-r--r--drivers/net/fs_enet/mii-bitbang.c448
-rw-r--r--drivers/net/fs_enet/mii-fec.c243
-rw-r--r--drivers/net/fs_enet/mii-fixed.c91
-rw-r--r--drivers/net/lance.c2
-rw-r--r--drivers/net/lne390.c2
-rw-r--r--drivers/net/ni52.c2
-rw-r--r--drivers/net/ni65.c2
-rw-r--r--drivers/net/pcmcia/xirc2ps_cs.c18
-rw-r--r--drivers/net/pcnet32.c25
-rw-r--r--drivers/net/phy/Kconfig17
-rw-r--r--drivers/net/phy/Makefile1
-rw-r--r--drivers/net/phy/fixed.c358
-rw-r--r--drivers/net/phy/mdio_bus.c1
-rw-r--r--drivers/net/phy/phy_device.c51
-rw-r--r--drivers/net/s2io.c1
-rw-r--r--drivers/net/seeq8005.c2
-rw-r--r--drivers/net/skge.c2
-rw-r--r--drivers/net/sky2.c5
-rw-r--r--drivers/net/smc911x.c1
-rw-r--r--drivers/net/smc91x.c8
-rw-r--r--drivers/net/smc91x.h29
-rw-r--r--drivers/net/spider_net.c12
-rw-r--r--drivers/net/spider_net.h3
-rw-r--r--drivers/net/spider_net_ethtool.c13
-rw-r--r--drivers/net/sundance.c2
-rw-r--r--drivers/net/tokenring/ibmtr.c4
-rw-r--r--drivers/net/tokenring/smctr.c5
-rw-r--r--drivers/net/tulip/winbond-840.c2
-rw-r--r--drivers/net/tulip/xircom_cb.c3
-rw-r--r--drivers/net/ucc_geth.c4278
-rw-r--r--drivers/net/ucc_geth.h1339
-rw-r--r--drivers/net/ucc_geth_phy.c801
-rw-r--r--drivers/net/ucc_geth_phy.h217
-rw-r--r--drivers/net/via-rhine.c90
-rw-r--r--drivers/net/wan/c101.c9
-rw-r--r--drivers/net/wd.c4
-rw-r--r--drivers/net/wireless/hostap/hostap_hw.c3
-rw-r--r--drivers/net/wireless/spectrum_cs.c2
64 files changed, 8078 insertions, 1215 deletions
diff --git a/drivers/net/3c515.c b/drivers/net/3c515.c
index 4532b17e40ea..aedfddf20cb3 100644
--- a/drivers/net/3c515.c
+++ b/drivers/net/3c515.c
@@ -1003,7 +1003,8 @@ static int corkscrew_start_xmit(struct sk_buff *skb,
1003 /* Calculate the next Tx descriptor entry. */ 1003 /* Calculate the next Tx descriptor entry. */
1004 int entry = vp->cur_tx % TX_RING_SIZE; 1004 int entry = vp->cur_tx % TX_RING_SIZE;
1005 struct boom_tx_desc *prev_entry; 1005 struct boom_tx_desc *prev_entry;
1006 unsigned long flags, i; 1006 unsigned long flags;
1007 int i;
1007 1008
1008 if (vp->tx_full) /* No room to transmit with */ 1009 if (vp->tx_full) /* No room to transmit with */
1009 return 1; 1010 return 1;
diff --git a/drivers/net/82596.c b/drivers/net/82596.c
index 7e2ca9571467..257d3bce3993 100644
--- a/drivers/net/82596.c
+++ b/drivers/net/82596.c
@@ -899,7 +899,7 @@ memory_squeeze:
899} 899}
900 900
901 901
902static inline void i596_cleanup_cmd(struct net_device *dev, struct i596_private *lp) 902static void i596_cleanup_cmd(struct net_device *dev, struct i596_private *lp)
903{ 903{
904 struct i596_cmd *ptr; 904 struct i596_cmd *ptr;
905 905
@@ -932,7 +932,8 @@ static inline void i596_cleanup_cmd(struct net_device *dev, struct i596_private
932 lp->scb.cmd = I596_NULL; 932 lp->scb.cmd = I596_NULL;
933} 933}
934 934
935static inline void i596_reset(struct net_device *dev, struct i596_private *lp, int ioaddr) 935static void i596_reset(struct net_device *dev, struct i596_private *lp,
936 int ioaddr)
936{ 937{
937 unsigned long flags; 938 unsigned long flags;
938 939
@@ -1578,7 +1579,7 @@ static int debug = -1;
1578module_param(debug, int, 0); 1579module_param(debug, int, 0);
1579MODULE_PARM_DESC(debug, "i82596 debug mask"); 1580MODULE_PARM_DESC(debug, "i82596 debug mask");
1580 1581
1581int init_module(void) 1582int __init init_module(void)
1582{ 1583{
1583 if (debug >= 0) 1584 if (debug >= 0)
1584 i596_debug = debug; 1585 i596_debug = debug;
@@ -1588,7 +1589,7 @@ int init_module(void)
1588 return 0; 1589 return 0;
1589} 1590}
1590 1591
1591void cleanup_module(void) 1592void __exit cleanup_module(void)
1592{ 1593{
1593 unregister_netdev(dev_82596); 1594 unregister_netdev(dev_82596);
1594#ifdef __mc68000__ 1595#ifdef __mc68000__
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 39189903e355..30b3671d833d 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1724,6 +1724,20 @@ config VIA_RHINE_MMIO
1724 1724
1725 If unsure, say Y. 1725 If unsure, say Y.
1726 1726
1727config VIA_RHINE_NAPI
1728 bool "Use Rx Polling (NAPI)"
1729 depends on VIA_RHINE
1730 help
1731 NAPI is a new driver API designed to reduce CPU and interrupt load
1732 when the driver is receiving lots of packets from the card.
1733
1734 If your estimated Rx load is 10kpps or more, or if the card will be
1735 deployed on potentially unfriendly networks (e.g. in a firewall),
1736 then say Y here.
1737
1738 See <file:Documentation/networking/NAPI_HOWTO.txt> for more
1739 information.
1740
1727config LAN_SAA9730 1741config LAN_SAA9730
1728 bool "Philips SAA9730 Ethernet support (EXPERIMENTAL)" 1742 bool "Philips SAA9730 Ethernet support (EXPERIMENTAL)"
1729 depends on NET_PCI && EXPERIMENTAL && MIPS 1743 depends on NET_PCI && EXPERIMENTAL && MIPS
@@ -2219,6 +2233,33 @@ config GFAR_NAPI
2219 bool "NAPI Support" 2233 bool "NAPI Support"
2220 depends on GIANFAR 2234 depends on GIANFAR
2221 2235
2236config UCC_GETH
2237 tristate "Freescale QE UCC GETH"
2238 depends on QUICC_ENGINE && UCC_FAST
2239 help
2240 This driver supports the Gigabit Ethernet mode of QE UCC.
2241 QE can be found on MPC836x CPUs.
2242
2243config UGETH_NAPI
2244 bool "NAPI Support"
2245 depends on UCC_GETH
2246
2247config UGETH_MAGIC_PACKET
2248 bool "Magic Packet detection support"
2249 depends on UCC_GETH
2250
2251config UGETH_FILTERING
2252 bool "Mac address filtering support"
2253 depends on UCC_GETH
2254
2255config UGETH_TX_ON_DEMOND
2256 bool "Transmit on Demond support"
2257 depends on UCC_GETH
2258
2259config UGETH_HAS_GIGA
2260 bool
2261 depends on UCC_GETH && MPC836x
2262
2222config MV643XX_ETH 2263config MV643XX_ETH
2223 tristate "MV-643XX Ethernet support" 2264 tristate "MV-643XX Ethernet support"
2224 depends on MOMENCO_OCELOT_C || MOMENCO_JAGUAR_ATX || MV64360 || MOMENCO_OCELOT_3 || PPC_MULTIPLATFORM 2265 depends on MOMENCO_OCELOT_C || MOMENCO_JAGUAR_ATX || MV64360 || MOMENCO_OCELOT_3 || PPC_MULTIPLATFORM
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index c91e95126f78..8427bf9dec9d 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -18,6 +18,9 @@ gianfar_driver-objs := gianfar.o \
18 gianfar_mii.o \ 18 gianfar_mii.o \
19 gianfar_sysfs.o 19 gianfar_sysfs.o
20 20
21obj-$(CONFIG_UCC_GETH) += ucc_geth_driver.o
22ucc_geth_driver-objs := ucc_geth.o ucc_geth_phy.o
23
21# 24#
22# link order important here 25# link order important here
23# 26#
diff --git a/drivers/net/ac3200.c b/drivers/net/ac3200.c
index 7952dc6d77e3..0fbbcb75af69 100644
--- a/drivers/net/ac3200.c
+++ b/drivers/net/ac3200.c
@@ -370,8 +370,7 @@ MODULE_PARM_DESC(mem, "Memory base address(es)");
370MODULE_DESCRIPTION("Ansel AC3200 EISA ethernet driver"); 370MODULE_DESCRIPTION("Ansel AC3200 EISA ethernet driver");
371MODULE_LICENSE("GPL"); 371MODULE_LICENSE("GPL");
372 372
373int 373int __init init_module(void)
374init_module(void)
375{ 374{
376 struct net_device *dev; 375 struct net_device *dev;
377 int this_dev, found = 0; 376 int this_dev, found = 0;
diff --git a/drivers/net/appletalk/cops.c b/drivers/net/appletalk/cops.c
index 1d01ac0000e4..ae7f828344d9 100644
--- a/drivers/net/appletalk/cops.c
+++ b/drivers/net/appletalk/cops.c
@@ -1030,7 +1030,7 @@ module_param(io, int, 0);
1030module_param(irq, int, 0); 1030module_param(irq, int, 0);
1031module_param(board_type, int, 0); 1031module_param(board_type, int, 0);
1032 1032
1033int init_module(void) 1033int __init init_module(void)
1034{ 1034{
1035 if (io == 0) 1035 if (io == 0)
1036 printk(KERN_WARNING "%s: You shouldn't autoprobe with insmod\n", 1036 printk(KERN_WARNING "%s: You shouldn't autoprobe with insmod\n",
diff --git a/drivers/net/at1700.c b/drivers/net/at1700.c
index 5d7929c79bce..4ca061c2d5b2 100644
--- a/drivers/net/at1700.c
+++ b/drivers/net/at1700.c
@@ -901,7 +901,7 @@ MODULE_PARM_DESC(io, "AT1700/FMV18X I/O base address");
901MODULE_PARM_DESC(irq, "AT1700/FMV18X IRQ number"); 901MODULE_PARM_DESC(irq, "AT1700/FMV18X IRQ number");
902MODULE_PARM_DESC(net_debug, "AT1700/FMV18X debug level (0-6)"); 902MODULE_PARM_DESC(net_debug, "AT1700/FMV18X debug level (0-6)");
903 903
904int init_module(void) 904int __init init_module(void)
905{ 905{
906 if (io == 0) 906 if (io == 0)
907 printk("at1700: You should not use auto-probing with insmod!\n"); 907 printk("at1700: You should not use auto-probing with insmod!\n");
diff --git a/drivers/net/cs89x0.c b/drivers/net/cs89x0.c
index 47eecce35fa4..2dcca79b1f6a 100644
--- a/drivers/net/cs89x0.c
+++ b/drivers/net/cs89x0.c
@@ -1905,8 +1905,7 @@ MODULE_LICENSE("GPL");
1905 1905
1906*/ 1906*/
1907 1907
1908int 1908int __init init_module(void)
1909init_module(void)
1910{ 1909{
1911 struct net_device *dev = alloc_etherdev(sizeof(struct net_local)); 1910 struct net_device *dev = alloc_etherdev(sizeof(struct net_local));
1912 struct net_local *lp; 1911 struct net_local *lp;
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c
index 1b758b707134..3d76fa144c4f 100644
--- a/drivers/net/dm9000.c
+++ b/drivers/net/dm9000.c
@@ -339,6 +339,17 @@ static void dm9000_timeout(struct net_device *dev)
339 spin_unlock_irqrestore(&db->lock,flags); 339 spin_unlock_irqrestore(&db->lock,flags);
340} 340}
341 341
342#ifdef CONFIG_NET_POLL_CONTROLLER
343/*
344 *Used by netconsole
345 */
346static void dm9000_poll_controller(struct net_device *dev)
347{
348 disable_irq(dev->irq);
349 dm9000_interrupt(dev->irq,dev,NULL);
350 enable_irq(dev->irq);
351}
352#endif
342 353
343/* dm9000_release_board 354/* dm9000_release_board
344 * 355 *
@@ -538,6 +549,9 @@ dm9000_probe(struct platform_device *pdev)
538 ndev->stop = &dm9000_stop; 549 ndev->stop = &dm9000_stop;
539 ndev->get_stats = &dm9000_get_stats; 550 ndev->get_stats = &dm9000_get_stats;
540 ndev->set_multicast_list = &dm9000_hash_table; 551 ndev->set_multicast_list = &dm9000_hash_table;
552#ifdef CONFIG_NET_POLL_CONTROLLER
553 ndev->poll_controller = &dm9000_poll_controller;
554#endif
541 555
542#ifdef DM9000_PROGRAM_EEPROM 556#ifdef DM9000_PROGRAM_EEPROM
543 program_eeprom(db); 557 program_eeprom(db);
diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c
index 583518ae49ce..b3b919116e0f 100644
--- a/drivers/net/e1000/e1000_hw.c
+++ b/drivers/net/e1000/e1000_hw.c
@@ -105,6 +105,33 @@ static int32_t e1000_configure_kmrn_for_10_100(struct e1000_hw *hw,
105 uint16_t duplex); 105 uint16_t duplex);
106static int32_t e1000_configure_kmrn_for_1000(struct e1000_hw *hw); 106static int32_t e1000_configure_kmrn_for_1000(struct e1000_hw *hw);
107 107
108static int32_t e1000_erase_ich8_4k_segment(struct e1000_hw *hw,
109 uint32_t segment);
110static int32_t e1000_get_software_flag(struct e1000_hw *hw);
111static int32_t e1000_get_software_semaphore(struct e1000_hw *hw);
112static int32_t e1000_init_lcd_from_nvm(struct e1000_hw *hw);
113static int32_t e1000_kumeran_lock_loss_workaround(struct e1000_hw *hw);
114static int32_t e1000_read_eeprom_ich8(struct e1000_hw *hw, uint16_t offset,
115 uint16_t words, uint16_t *data);
116static int32_t e1000_read_ich8_byte(struct e1000_hw *hw, uint32_t index,
117 uint8_t* data);
118static int32_t e1000_read_ich8_word(struct e1000_hw *hw, uint32_t index,
119 uint16_t *data);
120static int32_t e1000_read_kmrn_reg(struct e1000_hw *hw, uint32_t reg_addr,
121 uint16_t *data);
122static void e1000_release_software_flag(struct e1000_hw *hw);
123static void e1000_release_software_semaphore(struct e1000_hw *hw);
124static int32_t e1000_set_pci_ex_no_snoop(struct e1000_hw *hw,
125 uint32_t no_snoop);
126static int32_t e1000_verify_write_ich8_byte(struct e1000_hw *hw,
127 uint32_t index, uint8_t byte);
128static int32_t e1000_write_eeprom_ich8(struct e1000_hw *hw, uint16_t offset,
129 uint16_t words, uint16_t *data);
130static int32_t e1000_write_ich8_byte(struct e1000_hw *hw, uint32_t index,
131 uint8_t data);
132static int32_t e1000_write_kmrn_reg(struct e1000_hw *hw, uint32_t reg_addr,
133 uint16_t data);
134
108/* IGP cable length table */ 135/* IGP cable length table */
109static const 136static const
110uint16_t e1000_igp_cable_length_table[IGP01E1000_AGC_LENGTH_TABLE_SIZE] = 137uint16_t e1000_igp_cable_length_table[IGP01E1000_AGC_LENGTH_TABLE_SIZE] =
@@ -3233,7 +3260,7 @@ e1000_shift_in_mdi_bits(struct e1000_hw *hw)
3233 return data; 3260 return data;
3234} 3261}
3235 3262
3236int32_t 3263static int32_t
3237e1000_swfw_sync_acquire(struct e1000_hw *hw, uint16_t mask) 3264e1000_swfw_sync_acquire(struct e1000_hw *hw, uint16_t mask)
3238{ 3265{
3239 uint32_t swfw_sync = 0; 3266 uint32_t swfw_sync = 0;
@@ -3277,7 +3304,7 @@ e1000_swfw_sync_acquire(struct e1000_hw *hw, uint16_t mask)
3277 return E1000_SUCCESS; 3304 return E1000_SUCCESS;
3278} 3305}
3279 3306
3280void 3307static void
3281e1000_swfw_sync_release(struct e1000_hw *hw, uint16_t mask) 3308e1000_swfw_sync_release(struct e1000_hw *hw, uint16_t mask)
3282{ 3309{
3283 uint32_t swfw_sync; 3310 uint32_t swfw_sync;
@@ -3575,7 +3602,7 @@ e1000_write_phy_reg_ex(struct e1000_hw *hw,
3575 return E1000_SUCCESS; 3602 return E1000_SUCCESS;
3576} 3603}
3577 3604
3578int32_t 3605static int32_t
3579e1000_read_kmrn_reg(struct e1000_hw *hw, 3606e1000_read_kmrn_reg(struct e1000_hw *hw,
3580 uint32_t reg_addr, 3607 uint32_t reg_addr,
3581 uint16_t *data) 3608 uint16_t *data)
@@ -3608,7 +3635,7 @@ e1000_read_kmrn_reg(struct e1000_hw *hw,
3608 return E1000_SUCCESS; 3635 return E1000_SUCCESS;
3609} 3636}
3610 3637
3611int32_t 3638static int32_t
3612e1000_write_kmrn_reg(struct e1000_hw *hw, 3639e1000_write_kmrn_reg(struct e1000_hw *hw,
3613 uint32_t reg_addr, 3640 uint32_t reg_addr,
3614 uint16_t data) 3641 uint16_t data)
@@ -3839,7 +3866,7 @@ e1000_phy_powerdown_workaround(struct e1000_hw *hw)
3839* 3866*
3840* hw - struct containing variables accessed by shared code 3867* hw - struct containing variables accessed by shared code
3841******************************************************************************/ 3868******************************************************************************/
3842int32_t 3869static int32_t
3843e1000_kumeran_lock_loss_workaround(struct e1000_hw *hw) 3870e1000_kumeran_lock_loss_workaround(struct e1000_hw *hw)
3844{ 3871{
3845 int32_t ret_val; 3872 int32_t ret_val;
@@ -4086,7 +4113,7 @@ e1000_phy_igp_get_info(struct e1000_hw *hw,
4086* hw - Struct containing variables accessed by shared code 4113* hw - Struct containing variables accessed by shared code
4087* phy_info - PHY information structure 4114* phy_info - PHY information structure
4088******************************************************************************/ 4115******************************************************************************/
4089int32_t 4116static int32_t
4090e1000_phy_ife_get_info(struct e1000_hw *hw, 4117e1000_phy_ife_get_info(struct e1000_hw *hw,
4091 struct e1000_phy_info *phy_info) 4118 struct e1000_phy_info *phy_info)
4092{ 4119{
@@ -5643,6 +5670,7 @@ e1000_init_rx_addrs(struct e1000_hw *hw)
5643 * for the first 15 multicast addresses, and hashes the rest into the 5670 * for the first 15 multicast addresses, and hashes the rest into the
5644 * multicast table. 5671 * multicast table.
5645 *****************************************************************************/ 5672 *****************************************************************************/
5673#if 0
5646void 5674void
5647e1000_mc_addr_list_update(struct e1000_hw *hw, 5675e1000_mc_addr_list_update(struct e1000_hw *hw,
5648 uint8_t *mc_addr_list, 5676 uint8_t *mc_addr_list,
@@ -5719,6 +5747,7 @@ e1000_mc_addr_list_update(struct e1000_hw *hw,
5719 } 5747 }
5720 DEBUGOUT("MC Update Complete\n"); 5748 DEBUGOUT("MC Update Complete\n");
5721} 5749}
5750#endif /* 0 */
5722 5751
5723/****************************************************************************** 5752/******************************************************************************
5724 * Hashes an address to determine its location in the multicast table 5753 * Hashes an address to determine its location in the multicast table
@@ -6587,6 +6616,7 @@ e1000_get_bus_info(struct e1000_hw *hw)
6587 * hw - Struct containing variables accessed by shared code 6616 * hw - Struct containing variables accessed by shared code
6588 * offset - offset to read from 6617 * offset - offset to read from
6589 *****************************************************************************/ 6618 *****************************************************************************/
6619#if 0
6590uint32_t 6620uint32_t
6591e1000_read_reg_io(struct e1000_hw *hw, 6621e1000_read_reg_io(struct e1000_hw *hw,
6592 uint32_t offset) 6622 uint32_t offset)
@@ -6597,6 +6627,7 @@ e1000_read_reg_io(struct e1000_hw *hw,
6597 e1000_io_write(hw, io_addr, offset); 6627 e1000_io_write(hw, io_addr, offset);
6598 return e1000_io_read(hw, io_data); 6628 return e1000_io_read(hw, io_data);
6599} 6629}
6630#endif /* 0 */
6600 6631
6601/****************************************************************************** 6632/******************************************************************************
6602 * Writes a value to one of the devices registers using port I/O (as opposed to 6633 * Writes a value to one of the devices registers using port I/O (as opposed to
@@ -7909,6 +7940,7 @@ e1000_set_pci_express_master_disable(struct e1000_hw *hw)
7909 * returns: - none. 7940 * returns: - none.
7910 * 7941 *
7911 ***************************************************************************/ 7942 ***************************************************************************/
7943#if 0
7912void 7944void
7913e1000_enable_pciex_master(struct e1000_hw *hw) 7945e1000_enable_pciex_master(struct e1000_hw *hw)
7914{ 7946{
@@ -7923,6 +7955,7 @@ e1000_enable_pciex_master(struct e1000_hw *hw)
7923 ctrl &= ~E1000_CTRL_GIO_MASTER_DISABLE; 7955 ctrl &= ~E1000_CTRL_GIO_MASTER_DISABLE;
7924 E1000_WRITE_REG(hw, CTRL, ctrl); 7956 E1000_WRITE_REG(hw, CTRL, ctrl);
7925} 7957}
7958#endif /* 0 */
7926 7959
7927/******************************************************************************* 7960/*******************************************************************************
7928 * 7961 *
@@ -8148,7 +8181,7 @@ e1000_put_hw_eeprom_semaphore(struct e1000_hw *hw)
8148 * E1000_SUCCESS at any other case. 8181 * E1000_SUCCESS at any other case.
8149 * 8182 *
8150 ***************************************************************************/ 8183 ***************************************************************************/
8151int32_t 8184static int32_t
8152e1000_get_software_semaphore(struct e1000_hw *hw) 8185e1000_get_software_semaphore(struct e1000_hw *hw)
8153{ 8186{
8154 int32_t timeout = hw->eeprom.word_size + 1; 8187 int32_t timeout = hw->eeprom.word_size + 1;
@@ -8183,7 +8216,7 @@ e1000_get_software_semaphore(struct e1000_hw *hw)
8183 * hw: Struct containing variables accessed by shared code 8216 * hw: Struct containing variables accessed by shared code
8184 * 8217 *
8185 ***************************************************************************/ 8218 ***************************************************************************/
8186void 8219static void
8187e1000_release_software_semaphore(struct e1000_hw *hw) 8220e1000_release_software_semaphore(struct e1000_hw *hw)
8188{ 8221{
8189 uint32_t swsm; 8222 uint32_t swsm;
@@ -8265,7 +8298,7 @@ e1000_arc_subsystem_valid(struct e1000_hw *hw)
8265 * returns: E1000_SUCCESS 8298 * returns: E1000_SUCCESS
8266 * 8299 *
8267 *****************************************************************************/ 8300 *****************************************************************************/
8268int32_t 8301static int32_t
8269e1000_set_pci_ex_no_snoop(struct e1000_hw *hw, uint32_t no_snoop) 8302e1000_set_pci_ex_no_snoop(struct e1000_hw *hw, uint32_t no_snoop)
8270{ 8303{
8271 uint32_t gcr_reg = 0; 8304 uint32_t gcr_reg = 0;
@@ -8306,7 +8339,7 @@ e1000_set_pci_ex_no_snoop(struct e1000_hw *hw, uint32_t no_snoop)
8306 * hw: Struct containing variables accessed by shared code 8339 * hw: Struct containing variables accessed by shared code
8307 * 8340 *
8308 ***************************************************************************/ 8341 ***************************************************************************/
8309int32_t 8342static int32_t
8310e1000_get_software_flag(struct e1000_hw *hw) 8343e1000_get_software_flag(struct e1000_hw *hw)
8311{ 8344{
8312 int32_t timeout = PHY_CFG_TIMEOUT; 8345 int32_t timeout = PHY_CFG_TIMEOUT;
@@ -8345,7 +8378,7 @@ e1000_get_software_flag(struct e1000_hw *hw)
8345 * hw: Struct containing variables accessed by shared code 8378 * hw: Struct containing variables accessed by shared code
8346 * 8379 *
8347 ***************************************************************************/ 8380 ***************************************************************************/
8348void 8381static void
8349e1000_release_software_flag(struct e1000_hw *hw) 8382e1000_release_software_flag(struct e1000_hw *hw)
8350{ 8383{
8351 uint32_t extcnf_ctrl; 8384 uint32_t extcnf_ctrl;
@@ -8369,6 +8402,7 @@ e1000_release_software_flag(struct e1000_hw *hw)
8369 * hw: Struct containing variables accessed by shared code 8402 * hw: Struct containing variables accessed by shared code
8370 * 8403 *
8371 ***************************************************************************/ 8404 ***************************************************************************/
8405#if 0
8372int32_t 8406int32_t
8373e1000_ife_disable_dynamic_power_down(struct e1000_hw *hw) 8407e1000_ife_disable_dynamic_power_down(struct e1000_hw *hw)
8374{ 8408{
@@ -8388,6 +8422,7 @@ e1000_ife_disable_dynamic_power_down(struct e1000_hw *hw)
8388 8422
8389 return ret_val; 8423 return ret_val;
8390} 8424}
8425#endif /* 0 */
8391 8426
8392/*************************************************************************** 8427/***************************************************************************
8393 * 8428 *
@@ -8397,6 +8432,7 @@ e1000_ife_disable_dynamic_power_down(struct e1000_hw *hw)
8397 * hw: Struct containing variables accessed by shared code 8432 * hw: Struct containing variables accessed by shared code
8398 * 8433 *
8399 ***************************************************************************/ 8434 ***************************************************************************/
8435#if 0
8400int32_t 8436int32_t
8401e1000_ife_enable_dynamic_power_down(struct e1000_hw *hw) 8437e1000_ife_enable_dynamic_power_down(struct e1000_hw *hw)
8402{ 8438{
@@ -8416,6 +8452,7 @@ e1000_ife_enable_dynamic_power_down(struct e1000_hw *hw)
8416 8452
8417 return ret_val; 8453 return ret_val;
8418} 8454}
8455#endif /* 0 */
8419 8456
8420/****************************************************************************** 8457/******************************************************************************
8421 * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access 8458 * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
@@ -8426,7 +8463,7 @@ e1000_ife_enable_dynamic_power_down(struct e1000_hw *hw)
8426 * data - word read from the EEPROM 8463 * data - word read from the EEPROM
8427 * words - number of words to read 8464 * words - number of words to read
8428 *****************************************************************************/ 8465 *****************************************************************************/
8429int32_t 8466static int32_t
8430e1000_read_eeprom_ich8(struct e1000_hw *hw, uint16_t offset, uint16_t words, 8467e1000_read_eeprom_ich8(struct e1000_hw *hw, uint16_t offset, uint16_t words,
8431 uint16_t *data) 8468 uint16_t *data)
8432{ 8469{
@@ -8482,7 +8519,7 @@ e1000_read_eeprom_ich8(struct e1000_hw *hw, uint16_t offset, uint16_t words,
8482 * words - number of words to write 8519 * words - number of words to write
8483 * data - words to write to the EEPROM 8520 * data - words to write to the EEPROM
8484 *****************************************************************************/ 8521 *****************************************************************************/
8485int32_t 8522static int32_t
8486e1000_write_eeprom_ich8(struct e1000_hw *hw, uint16_t offset, uint16_t words, 8523e1000_write_eeprom_ich8(struct e1000_hw *hw, uint16_t offset, uint16_t words,
8487 uint16_t *data) 8524 uint16_t *data)
8488{ 8525{
@@ -8529,7 +8566,7 @@ e1000_write_eeprom_ich8(struct e1000_hw *hw, uint16_t offset, uint16_t words,
8529 * 8566 *
8530 * hw - The pointer to the hw structure 8567 * hw - The pointer to the hw structure
8531 ****************************************************************************/ 8568 ****************************************************************************/
8532int32_t 8569static int32_t
8533e1000_ich8_cycle_init(struct e1000_hw *hw) 8570e1000_ich8_cycle_init(struct e1000_hw *hw)
8534{ 8571{
8535 union ich8_hws_flash_status hsfsts; 8572 union ich8_hws_flash_status hsfsts;
@@ -8596,7 +8633,7 @@ e1000_ich8_cycle_init(struct e1000_hw *hw)
8596 * 8633 *
8597 * hw - The pointer to the hw structure 8634 * hw - The pointer to the hw structure
8598 ****************************************************************************/ 8635 ****************************************************************************/
8599int32_t 8636static int32_t
8600e1000_ich8_flash_cycle(struct e1000_hw *hw, uint32_t timeout) 8637e1000_ich8_flash_cycle(struct e1000_hw *hw, uint32_t timeout)
8601{ 8638{
8602 union ich8_hws_flash_ctrl hsflctl; 8639 union ich8_hws_flash_ctrl hsflctl;
@@ -8631,7 +8668,7 @@ e1000_ich8_flash_cycle(struct e1000_hw *hw, uint32_t timeout)
8631 * size - Size of data to read, 1=byte 2=word 8668 * size - Size of data to read, 1=byte 2=word
8632 * data - Pointer to the word to store the value read. 8669 * data - Pointer to the word to store the value read.
8633 *****************************************************************************/ 8670 *****************************************************************************/
8634int32_t 8671static int32_t
8635e1000_read_ich8_data(struct e1000_hw *hw, uint32_t index, 8672e1000_read_ich8_data(struct e1000_hw *hw, uint32_t index,
8636 uint32_t size, uint16_t* data) 8673 uint32_t size, uint16_t* data)
8637{ 8674{
@@ -8710,7 +8747,7 @@ e1000_read_ich8_data(struct e1000_hw *hw, uint32_t index,
8710 * size - Size of data to read, 1=byte 2=word 8747 * size - Size of data to read, 1=byte 2=word
8711 * data - The byte(s) to write to the NVM. 8748 * data - The byte(s) to write to the NVM.
8712 *****************************************************************************/ 8749 *****************************************************************************/
8713int32_t 8750static int32_t
8714e1000_write_ich8_data(struct e1000_hw *hw, uint32_t index, uint32_t size, 8751e1000_write_ich8_data(struct e1000_hw *hw, uint32_t index, uint32_t size,
8715 uint16_t data) 8752 uint16_t data)
8716{ 8753{
@@ -8785,7 +8822,7 @@ e1000_write_ich8_data(struct e1000_hw *hw, uint32_t index, uint32_t size,
8785 * index - The index of the byte to read. 8822 * index - The index of the byte to read.
8786 * data - Pointer to a byte to store the value read. 8823 * data - Pointer to a byte to store the value read.
8787 *****************************************************************************/ 8824 *****************************************************************************/
8788int32_t 8825static int32_t
8789e1000_read_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t* data) 8826e1000_read_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t* data)
8790{ 8827{
8791 int32_t status = E1000_SUCCESS; 8828 int32_t status = E1000_SUCCESS;
@@ -8808,7 +8845,7 @@ e1000_read_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t* data)
8808 * index - The index of the byte to write. 8845 * index - The index of the byte to write.
8809 * byte - The byte to write to the NVM. 8846 * byte - The byte to write to the NVM.
8810 *****************************************************************************/ 8847 *****************************************************************************/
8811int32_t 8848static int32_t
8812e1000_verify_write_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t byte) 8849e1000_verify_write_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t byte)
8813{ 8850{
8814 int32_t error = E1000_SUCCESS; 8851 int32_t error = E1000_SUCCESS;
@@ -8839,7 +8876,7 @@ e1000_verify_write_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t byte)
8839 * index - The index of the byte to read. 8876 * index - The index of the byte to read.
8840 * data - The byte to write to the NVM. 8877 * data - The byte to write to the NVM.
8841 *****************************************************************************/ 8878 *****************************************************************************/
8842int32_t 8879static int32_t
8843e1000_write_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t data) 8880e1000_write_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t data)
8844{ 8881{
8845 int32_t status = E1000_SUCCESS; 8882 int32_t status = E1000_SUCCESS;
@@ -8857,7 +8894,7 @@ e1000_write_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t data)
8857 * index - The starting byte index of the word to read. 8894 * index - The starting byte index of the word to read.
8858 * data - Pointer to a word to store the value read. 8895 * data - Pointer to a word to store the value read.
8859 *****************************************************************************/ 8896 *****************************************************************************/
8860int32_t 8897static int32_t
8861e1000_read_ich8_word(struct e1000_hw *hw, uint32_t index, uint16_t *data) 8898e1000_read_ich8_word(struct e1000_hw *hw, uint32_t index, uint16_t *data)
8862{ 8899{
8863 int32_t status = E1000_SUCCESS; 8900 int32_t status = E1000_SUCCESS;
@@ -8872,6 +8909,7 @@ e1000_read_ich8_word(struct e1000_hw *hw, uint32_t index, uint16_t *data)
8872 * index - The starting byte index of the word to read. 8909 * index - The starting byte index of the word to read.
8873 * data - The word to write to the NVM. 8910 * data - The word to write to the NVM.
8874 *****************************************************************************/ 8911 *****************************************************************************/
8912#if 0
8875int32_t 8913int32_t
8876e1000_write_ich8_word(struct e1000_hw *hw, uint32_t index, uint16_t data) 8914e1000_write_ich8_word(struct e1000_hw *hw, uint32_t index, uint16_t data)
8877{ 8915{
@@ -8879,6 +8917,7 @@ e1000_write_ich8_word(struct e1000_hw *hw, uint32_t index, uint16_t data)
8879 status = e1000_write_ich8_data(hw, index, 2, data); 8917 status = e1000_write_ich8_data(hw, index, 2, data);
8880 return status; 8918 return status;
8881} 8919}
8920#endif /* 0 */
8882 8921
8883/****************************************************************************** 8922/******************************************************************************
8884 * Erases the bank specified. Each bank is a 4k block. Segments are 0 based. 8923 * Erases the bank specified. Each bank is a 4k block. Segments are 0 based.
@@ -8887,7 +8926,7 @@ e1000_write_ich8_word(struct e1000_hw *hw, uint32_t index, uint16_t data)
8887 * hw - pointer to e1000_hw structure 8926 * hw - pointer to e1000_hw structure
8888 * segment - 0 for first segment, 1 for second segment, etc. 8927 * segment - 0 for first segment, 1 for second segment, etc.
8889 *****************************************************************************/ 8928 *****************************************************************************/
8890int32_t 8929static int32_t
8891e1000_erase_ich8_4k_segment(struct e1000_hw *hw, uint32_t segment) 8930e1000_erase_ich8_4k_segment(struct e1000_hw *hw, uint32_t segment)
8892{ 8931{
8893 union ich8_hws_flash_status hsfsts; 8932 union ich8_hws_flash_status hsfsts;
@@ -8984,6 +9023,7 @@ e1000_erase_ich8_4k_segment(struct e1000_hw *hw, uint32_t segment)
8984 * hw: Struct containing variables accessed by shared code 9023 * hw: Struct containing variables accessed by shared code
8985 * 9024 *
8986 *****************************************************************************/ 9025 *****************************************************************************/
9026#if 0
8987int32_t 9027int32_t
8988e1000_duplex_reversal(struct e1000_hw *hw) 9028e1000_duplex_reversal(struct e1000_hw *hw)
8989{ 9029{
@@ -9012,8 +9052,9 @@ e1000_duplex_reversal(struct e1000_hw *hw)
9012 9052
9013 return ret_val; 9053 return ret_val;
9014} 9054}
9055#endif /* 0 */
9015 9056
9016int32_t 9057static int32_t
9017e1000_init_lcd_from_nvm_config_region(struct e1000_hw *hw, 9058e1000_init_lcd_from_nvm_config_region(struct e1000_hw *hw,
9018 uint32_t cnf_base_addr, uint32_t cnf_size) 9059 uint32_t cnf_base_addr, uint32_t cnf_size)
9019{ 9060{
@@ -9047,7 +9088,7 @@ e1000_init_lcd_from_nvm_config_region(struct e1000_hw *hw,
9047} 9088}
9048 9089
9049 9090
9050int32_t 9091static int32_t
9051e1000_init_lcd_from_nvm(struct e1000_hw *hw) 9092e1000_init_lcd_from_nvm(struct e1000_hw *hw)
9052{ 9093{
9053 uint32_t reg_data, cnf_base_addr, cnf_size, ret_val, loop; 9094 uint32_t reg_data, cnf_base_addr, cnf_size, ret_val, loop;
diff --git a/drivers/net/e1000/e1000_hw.h b/drivers/net/e1000/e1000_hw.h
index f9341e3276b3..375b95518c31 100644
--- a/drivers/net/e1000/e1000_hw.h
+++ b/drivers/net/e1000/e1000_hw.h
@@ -323,13 +323,8 @@ int32_t e1000_write_phy_reg(struct e1000_hw *hw, uint32_t reg_addr, uint16_t dat
323int32_t e1000_phy_hw_reset(struct e1000_hw *hw); 323int32_t e1000_phy_hw_reset(struct e1000_hw *hw);
324int32_t e1000_phy_reset(struct e1000_hw *hw); 324int32_t e1000_phy_reset(struct e1000_hw *hw);
325void e1000_phy_powerdown_workaround(struct e1000_hw *hw); 325void e1000_phy_powerdown_workaround(struct e1000_hw *hw);
326int32_t e1000_kumeran_lock_loss_workaround(struct e1000_hw *hw);
327int32_t e1000_init_lcd_from_nvm_config_region(struct e1000_hw *hw, uint32_t cnf_base_addr, uint32_t cnf_size);
328int32_t e1000_init_lcd_from_nvm(struct e1000_hw *hw);
329int32_t e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info); 326int32_t e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info);
330int32_t e1000_validate_mdi_setting(struct e1000_hw *hw); 327int32_t e1000_validate_mdi_setting(struct e1000_hw *hw);
331int32_t e1000_read_kmrn_reg(struct e1000_hw *hw, uint32_t reg_addr, uint16_t *data);
332int32_t e1000_write_kmrn_reg(struct e1000_hw *hw, uint32_t reg_addr, uint16_t data);
333 328
334/* EEPROM Functions */ 329/* EEPROM Functions */
335int32_t e1000_init_eeprom_params(struct e1000_hw *hw); 330int32_t e1000_init_eeprom_params(struct e1000_hw *hw);
@@ -400,13 +395,8 @@ int32_t e1000_update_eeprom_checksum(struct e1000_hw *hw);
400int32_t e1000_write_eeprom(struct e1000_hw *hw, uint16_t reg, uint16_t words, uint16_t *data); 395int32_t e1000_write_eeprom(struct e1000_hw *hw, uint16_t reg, uint16_t words, uint16_t *data);
401int32_t e1000_read_part_num(struct e1000_hw *hw, uint32_t * part_num); 396int32_t e1000_read_part_num(struct e1000_hw *hw, uint32_t * part_num);
402int32_t e1000_read_mac_addr(struct e1000_hw * hw); 397int32_t e1000_read_mac_addr(struct e1000_hw * hw);
403int32_t e1000_swfw_sync_acquire(struct e1000_hw *hw, uint16_t mask);
404void e1000_swfw_sync_release(struct e1000_hw *hw, uint16_t mask);
405void e1000_release_software_flag(struct e1000_hw *hw);
406int32_t e1000_get_software_flag(struct e1000_hw *hw);
407 398
408/* Filters (multicast, vlan, receive) */ 399/* Filters (multicast, vlan, receive) */
409void e1000_mc_addr_list_update(struct e1000_hw *hw, uint8_t * mc_addr_list, uint32_t mc_addr_count, uint32_t pad, uint32_t rar_used_count);
410uint32_t e1000_hash_mc_addr(struct e1000_hw *hw, uint8_t * mc_addr); 400uint32_t e1000_hash_mc_addr(struct e1000_hw *hw, uint8_t * mc_addr);
411void e1000_mta_set(struct e1000_hw *hw, uint32_t hash_value); 401void e1000_mta_set(struct e1000_hw *hw, uint32_t hash_value);
412void e1000_rar_set(struct e1000_hw *hw, uint8_t * mc_addr, uint32_t rar_index); 402void e1000_rar_set(struct e1000_hw *hw, uint8_t * mc_addr, uint32_t rar_index);
@@ -431,31 +421,9 @@ void e1000_pci_clear_mwi(struct e1000_hw *hw);
431void e1000_read_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t * value); 421void e1000_read_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t * value);
432void e1000_write_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t * value); 422void e1000_write_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t * value);
433/* Port I/O is only supported on 82544 and newer */ 423/* Port I/O is only supported on 82544 and newer */
434uint32_t e1000_io_read(struct e1000_hw *hw, unsigned long port);
435uint32_t e1000_read_reg_io(struct e1000_hw *hw, uint32_t offset);
436void e1000_io_write(struct e1000_hw *hw, unsigned long port, uint32_t value); 424void e1000_io_write(struct e1000_hw *hw, unsigned long port, uint32_t value);
437void e1000_enable_pciex_master(struct e1000_hw *hw);
438int32_t e1000_disable_pciex_master(struct e1000_hw *hw); 425int32_t e1000_disable_pciex_master(struct e1000_hw *hw);
439int32_t e1000_get_software_semaphore(struct e1000_hw *hw);
440void e1000_release_software_semaphore(struct e1000_hw *hw);
441int32_t e1000_check_phy_reset_block(struct e1000_hw *hw); 426int32_t e1000_check_phy_reset_block(struct e1000_hw *hw);
442int32_t e1000_set_pci_ex_no_snoop(struct e1000_hw *hw, uint32_t no_snoop);
443
444int32_t e1000_read_ich8_byte(struct e1000_hw *hw, uint32_t index,
445 uint8_t *data);
446int32_t e1000_verify_write_ich8_byte(struct e1000_hw *hw, uint32_t index,
447 uint8_t byte);
448int32_t e1000_write_ich8_byte(struct e1000_hw *hw, uint32_t index,
449 uint8_t byte);
450int32_t e1000_read_ich8_word(struct e1000_hw *hw, uint32_t index,
451 uint16_t *data);
452int32_t e1000_read_ich8_data(struct e1000_hw *hw, uint32_t index,
453 uint32_t size, uint16_t *data);
454int32_t e1000_read_eeprom_ich8(struct e1000_hw *hw, uint16_t offset,
455 uint16_t words, uint16_t *data);
456int32_t e1000_write_eeprom_ich8(struct e1000_hw *hw, uint16_t offset,
457 uint16_t words, uint16_t *data);
458int32_t e1000_erase_ich8_4k_segment(struct e1000_hw *hw, uint32_t segment);
459 427
460 428
461#define E1000_READ_REG_IO(a, reg) \ 429#define E1000_READ_REG_IO(a, reg) \
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 627f224d78bc..726f43d55937 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -4386,11 +4386,13 @@ e1000_write_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t *value)
4386 pci_write_config_word(adapter->pdev, reg, *value); 4386 pci_write_config_word(adapter->pdev, reg, *value);
4387} 4387}
4388 4388
4389#if 0
4389uint32_t 4390uint32_t
4390e1000_io_read(struct e1000_hw *hw, unsigned long port) 4391e1000_io_read(struct e1000_hw *hw, unsigned long port)
4391{ 4392{
4392 return inl(port); 4393 return inl(port);
4393} 4394}
4395#endif /* 0 */
4394 4396
4395void 4397void
4396e1000_io_write(struct e1000_hw *hw, unsigned long port, uint32_t value) 4398e1000_io_write(struct e1000_hw *hw, unsigned long port, uint32_t value)
diff --git a/drivers/net/e2100.c b/drivers/net/e2100.c
index e5c5cd2a2712..e4e733a380e3 100644
--- a/drivers/net/e2100.c
+++ b/drivers/net/e2100.c
@@ -425,8 +425,8 @@ MODULE_LICENSE("GPL");
425 425
426/* This is set up so that only a single autoprobe takes place per call. 426/* This is set up so that only a single autoprobe takes place per call.
427ISA device autoprobes on a running machine are not recommended. */ 427ISA device autoprobes on a running machine are not recommended. */
428int 428
429init_module(void) 429int __init init_module(void)
430{ 430{
431 struct net_device *dev; 431 struct net_device *dev;
432 int this_dev, found = 0; 432 int this_dev, found = 0;
diff --git a/drivers/net/eepro.c b/drivers/net/eepro.c
index 20d31430c74f..8dc61d65dd23 100644
--- a/drivers/net/eepro.c
+++ b/drivers/net/eepro.c
@@ -1807,8 +1807,7 @@ MODULE_PARM_DESC(irq, "EtherExpress Pro/10 IRQ number(s)");
1807MODULE_PARM_DESC(mem, "EtherExpress Pro/10 Rx buffer size(es) in kB (3-29)"); 1807MODULE_PARM_DESC(mem, "EtherExpress Pro/10 Rx buffer size(es) in kB (3-29)");
1808MODULE_PARM_DESC(autodetect, "EtherExpress Pro/10 force board(s) detection (0-1)"); 1808MODULE_PARM_DESC(autodetect, "EtherExpress Pro/10 force board(s) detection (0-1)");
1809 1809
1810int 1810int __init init_module(void)
1811init_module(void)
1812{ 1811{
1813 struct net_device *dev; 1812 struct net_device *dev;
1814 int i; 1813 int i;
diff --git a/drivers/net/eexpress.c b/drivers/net/eexpress.c
index 33291bcf6d4c..0701c1d810ca 100644
--- a/drivers/net/eexpress.c
+++ b/drivers/net/eexpress.c
@@ -1698,7 +1698,7 @@ MODULE_LICENSE("GPL");
1698 * are specified, we verify and then use them. If no parameters are given, we 1698 * are specified, we verify and then use them. If no parameters are given, we
1699 * autoprobe for one card only. 1699 * autoprobe for one card only.
1700 */ 1700 */
1701int init_module(void) 1701int __init init_module(void)
1702{ 1702{
1703 struct net_device *dev; 1703 struct net_device *dev;
1704 int this_dev, found = 0; 1704 int this_dev, found = 0;
diff --git a/drivers/net/es3210.c b/drivers/net/es3210.c
index 6b0ab1eac3fb..fd7b32a24ea4 100644
--- a/drivers/net/es3210.c
+++ b/drivers/net/es3210.c
@@ -421,8 +421,7 @@ MODULE_PARM_DESC(mem, "memory base address(es)");
421MODULE_DESCRIPTION("Racal-Interlan ES3210 EISA ethernet driver"); 421MODULE_DESCRIPTION("Racal-Interlan ES3210 EISA ethernet driver");
422MODULE_LICENSE("GPL"); 422MODULE_LICENSE("GPL");
423 423
424int 424int __init init_module(void)
425init_module(void)
426{ 425{
427 struct net_device *dev; 426 struct net_device *dev;
428 int this_dev, found = 0; 427 int this_dev, found = 0;
diff --git a/drivers/net/eth16i.c b/drivers/net/eth16i.c
index 4bf76f86d8e9..ca42efa9143c 100644
--- a/drivers/net/eth16i.c
+++ b/drivers/net/eth16i.c
@@ -1434,7 +1434,7 @@ MODULE_PARM_DESC(mediatype, "eth16i media type of interface(s) (bnc,tp,dix,auto,
1434module_param(debug, int, 0); 1434module_param(debug, int, 0);
1435MODULE_PARM_DESC(debug, "eth16i debug level (0-6)"); 1435MODULE_PARM_DESC(debug, "eth16i debug level (0-6)");
1436 1436
1437int init_module(void) 1437int __init init_module(void)
1438{ 1438{
1439 int this_dev, found = 0; 1439 int this_dev, found = 0;
1440 struct net_device *dev; 1440 struct net_device *dev;
diff --git a/drivers/net/fealnx.c b/drivers/net/fealnx.c
index 97d34fee8c1f..567e27413cfd 100644
--- a/drivers/net/fealnx.c
+++ b/drivers/net/fealnx.c
@@ -92,7 +92,7 @@ static int full_duplex[MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1 };
92#include <asm/uaccess.h> 92#include <asm/uaccess.h>
93 93
94/* These identify the driver base version and may not be removed. */ 94/* These identify the driver base version and may not be removed. */
95static char version[] __devinitdata = 95static char version[] =
96KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE "\n"; 96KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE "\n";
97 97
98 98
diff --git a/drivers/net/fs_enet/Makefile b/drivers/net/fs_enet/Makefile
index d6dd3f2fb43e..02d4dc18ba69 100644
--- a/drivers/net/fs_enet/Makefile
+++ b/drivers/net/fs_enet/Makefile
@@ -4,7 +4,7 @@
4 4
5obj-$(CONFIG_FS_ENET) += fs_enet.o 5obj-$(CONFIG_FS_ENET) += fs_enet.o
6 6
7obj-$(CONFIG_8xx) += mac-fec.o mac-scc.o 7obj-$(CONFIG_8xx) += mac-fec.o mac-scc.o mii-fec.o
8obj-$(CONFIG_8260) += mac-fcc.o 8obj-$(CONFIG_CPM2) += mac-fcc.o mii-bitbang.o
9 9
10fs_enet-objs := fs_enet-main.o fs_enet-mii.o mii-bitbang.o mii-fixed.o 10fs_enet-objs := fs_enet-main.o
diff --git a/drivers/net/fs_enet/fec.h b/drivers/net/fs_enet/fec.h
new file mode 100644
index 000000000000..e980527e2b99
--- /dev/null
+++ b/drivers/net/fs_enet/fec.h
@@ -0,0 +1,42 @@
1#ifndef FS_ENET_FEC_H
2#define FS_ENET_FEC_H
3
4/* CRC polynomium used by the FEC for the multicast group filtering */
5#define FEC_CRC_POLY 0x04C11DB7
6
7#define FEC_MAX_MULTICAST_ADDRS 64
8
9/* Interrupt events/masks.
10*/
11#define FEC_ENET_HBERR 0x80000000U /* Heartbeat error */
12#define FEC_ENET_BABR 0x40000000U /* Babbling receiver */
13#define FEC_ENET_BABT 0x20000000U /* Babbling transmitter */
14#define FEC_ENET_GRA 0x10000000U /* Graceful stop complete */
15#define FEC_ENET_TXF 0x08000000U /* Full frame transmitted */
16#define FEC_ENET_TXB 0x04000000U /* A buffer was transmitted */
17#define FEC_ENET_RXF 0x02000000U /* Full frame received */
18#define FEC_ENET_RXB 0x01000000U /* A buffer was received */
19#define FEC_ENET_MII 0x00800000U /* MII interrupt */
20#define FEC_ENET_EBERR 0x00400000U /* SDMA bus error */
21
22#define FEC_ECNTRL_PINMUX 0x00000004
23#define FEC_ECNTRL_ETHER_EN 0x00000002
24#define FEC_ECNTRL_RESET 0x00000001
25
26#define FEC_RCNTRL_BC_REJ 0x00000010
27#define FEC_RCNTRL_PROM 0x00000008
28#define FEC_RCNTRL_MII_MODE 0x00000004
29#define FEC_RCNTRL_DRT 0x00000002
30#define FEC_RCNTRL_LOOP 0x00000001
31
32#define FEC_TCNTRL_FDEN 0x00000004
33#define FEC_TCNTRL_HBC 0x00000002
34#define FEC_TCNTRL_GTS 0x00000001
35
36
37
38/*
39 * Delay to wait for FEC reset command to complete (in us)
40 */
41#define FEC_RESET_DELAY 50
42#endif
diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c
index f6abff5846b3..df62506a1787 100644
--- a/drivers/net/fs_enet/fs_enet-main.c
+++ b/drivers/net/fs_enet/fs_enet-main.c
@@ -37,6 +37,7 @@
37#include <linux/bitops.h> 37#include <linux/bitops.h>
38#include <linux/fs.h> 38#include <linux/fs.h>
39#include <linux/platform_device.h> 39#include <linux/platform_device.h>
40#include <linux/phy.h>
40 41
41#include <linux/vmalloc.h> 42#include <linux/vmalloc.h>
42#include <asm/pgtable.h> 43#include <asm/pgtable.h>
@@ -682,35 +683,6 @@ static void fs_free_irq(struct net_device *dev, int irq)
682 (*fep->ops->post_free_irq)(dev, irq); 683 (*fep->ops->post_free_irq)(dev, irq);
683} 684}
684 685
685/**********************************************************************************/
686
687/* This interrupt occurs when the PHY detects a link change. */
688static irqreturn_t
689fs_mii_link_interrupt(int irq, void *dev_id, struct pt_regs *regs)
690{
691 struct net_device *dev = dev_id;
692 struct fs_enet_private *fep;
693 const struct fs_platform_info *fpi;
694
695 fep = netdev_priv(dev);
696 fpi = fep->fpi;
697
698 /*
699 * Acknowledge the interrupt if possible. If we have not
700 * found the PHY yet we can't process or acknowledge the
701 * interrupt now. Instead we ignore this interrupt for now,
702 * which we can do since it is edge triggered. It will be
703 * acknowledged later by fs_enet_open().
704 */
705 if (!fep->phy)
706 return IRQ_NONE;
707
708 fs_mii_ack_int(dev);
709 fs_mii_link_status_change_check(dev, 0);
710
711 return IRQ_HANDLED;
712}
713
714static void fs_timeout(struct net_device *dev) 686static void fs_timeout(struct net_device *dev)
715{ 687{
716 struct fs_enet_private *fep = netdev_priv(dev); 688 struct fs_enet_private *fep = netdev_priv(dev);
@@ -722,10 +694,13 @@ static void fs_timeout(struct net_device *dev)
722 spin_lock_irqsave(&fep->lock, flags); 694 spin_lock_irqsave(&fep->lock, flags);
723 695
724 if (dev->flags & IFF_UP) { 696 if (dev->flags & IFF_UP) {
697 phy_stop(fep->phydev);
725 (*fep->ops->stop)(dev); 698 (*fep->ops->stop)(dev);
726 (*fep->ops->restart)(dev); 699 (*fep->ops->restart)(dev);
700 phy_start(fep->phydev);
727 } 701 }
728 702
703 phy_start(fep->phydev);
729 wake = fep->tx_free && !(CBDR_SC(fep->cur_tx) & BD_ENET_TX_READY); 704 wake = fep->tx_free && !(CBDR_SC(fep->cur_tx) & BD_ENET_TX_READY);
730 spin_unlock_irqrestore(&fep->lock, flags); 705 spin_unlock_irqrestore(&fep->lock, flags);
731 706
@@ -733,35 +708,112 @@ static void fs_timeout(struct net_device *dev)
733 netif_wake_queue(dev); 708 netif_wake_queue(dev);
734} 709}
735 710
711/*-----------------------------------------------------------------------------
712 * generic link-change handler - should be sufficient for most cases
713 *-----------------------------------------------------------------------------*/
714static void generic_adjust_link(struct net_device *dev)
715{
716 struct fs_enet_private *fep = netdev_priv(dev);
717 struct phy_device *phydev = fep->phydev;
718 int new_state = 0;
719
720 if (phydev->link) {
721
722 /* adjust to duplex mode */
723 if (phydev->duplex != fep->oldduplex){
724 new_state = 1;
725 fep->oldduplex = phydev->duplex;
726 }
727
728 if (phydev->speed != fep->oldspeed) {
729 new_state = 1;
730 fep->oldspeed = phydev->speed;
731 }
732
733 if (!fep->oldlink) {
734 new_state = 1;
735 fep->oldlink = 1;
736 netif_schedule(dev);
737 netif_carrier_on(dev);
738 netif_start_queue(dev);
739 }
740
741 if (new_state)
742 fep->ops->restart(dev);
743
744 } else if (fep->oldlink) {
745 new_state = 1;
746 fep->oldlink = 0;
747 fep->oldspeed = 0;
748 fep->oldduplex = -1;
749 netif_carrier_off(dev);
750 netif_stop_queue(dev);
751 }
752
753 if (new_state && netif_msg_link(fep))
754 phy_print_status(phydev);
755}
756
757
758static void fs_adjust_link(struct net_device *dev)
759{
760 struct fs_enet_private *fep = netdev_priv(dev);
761 unsigned long flags;
762
763 spin_lock_irqsave(&fep->lock, flags);
764
765 if(fep->ops->adjust_link)
766 fep->ops->adjust_link(dev);
767 else
768 generic_adjust_link(dev);
769
770 spin_unlock_irqrestore(&fep->lock, flags);
771}
772
773static int fs_init_phy(struct net_device *dev)
774{
775 struct fs_enet_private *fep = netdev_priv(dev);
776 struct phy_device *phydev;
777
778 fep->oldlink = 0;
779 fep->oldspeed = 0;
780 fep->oldduplex = -1;
781 if(fep->fpi->bus_id)
782 phydev = phy_connect(dev, fep->fpi->bus_id, &fs_adjust_link, 0);
783 else {
784 printk("No phy bus ID specified in BSP code\n");
785 return -EINVAL;
786 }
787 if (IS_ERR(phydev)) {
788 printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
789 return PTR_ERR(phydev);
790 }
791
792 fep->phydev = phydev;
793
794 return 0;
795}
796
797
736static int fs_enet_open(struct net_device *dev) 798static int fs_enet_open(struct net_device *dev)
737{ 799{
738 struct fs_enet_private *fep = netdev_priv(dev); 800 struct fs_enet_private *fep = netdev_priv(dev);
739 const struct fs_platform_info *fpi = fep->fpi;
740 int r; 801 int r;
802 int err;
741 803
742 /* Install our interrupt handler. */ 804 /* Install our interrupt handler. */
743 r = fs_request_irq(dev, fep->interrupt, "fs_enet-mac", fs_enet_interrupt); 805 r = fs_request_irq(dev, fep->interrupt, "fs_enet-mac", fs_enet_interrupt);
744 if (r != 0) { 806 if (r != 0) {
745 printk(KERN_ERR DRV_MODULE_NAME 807 printk(KERN_ERR DRV_MODULE_NAME
746 ": %s Could not allocate FEC IRQ!", dev->name); 808 ": %s Could not allocate FS_ENET IRQ!", dev->name);
747 return -EINVAL; 809 return -EINVAL;
748 } 810 }
749 811
750 /* Install our phy interrupt handler */ 812 err = fs_init_phy(dev);
751 if (fpi->phy_irq != -1) { 813 if(err)
752 814 return err;
753 r = fs_request_irq(dev, fpi->phy_irq, "fs_enet-phy", fs_mii_link_interrupt);
754 if (r != 0) {
755 printk(KERN_ERR DRV_MODULE_NAME
756 ": %s Could not allocate PHY IRQ!", dev->name);
757 fs_free_irq(dev, fep->interrupt);
758 return -EINVAL;
759 }
760 }
761 815
762 fs_mii_startup(dev); 816 phy_start(fep->phydev);
763 netif_carrier_off(dev);
764 fs_mii_link_status_change_check(dev, 1);
765 817
766 return 0; 818 return 0;
767} 819}
@@ -769,20 +821,19 @@ static int fs_enet_open(struct net_device *dev)
769static int fs_enet_close(struct net_device *dev) 821static int fs_enet_close(struct net_device *dev)
770{ 822{
771 struct fs_enet_private *fep = netdev_priv(dev); 823 struct fs_enet_private *fep = netdev_priv(dev);
772 const struct fs_platform_info *fpi = fep->fpi;
773 unsigned long flags; 824 unsigned long flags;
774 825
775 netif_stop_queue(dev); 826 netif_stop_queue(dev);
776 netif_carrier_off(dev); 827 netif_carrier_off(dev);
777 fs_mii_shutdown(dev); 828 phy_stop(fep->phydev);
778 829
779 spin_lock_irqsave(&fep->lock, flags); 830 spin_lock_irqsave(&fep->lock, flags);
780 (*fep->ops->stop)(dev); 831 (*fep->ops->stop)(dev);
781 spin_unlock_irqrestore(&fep->lock, flags); 832 spin_unlock_irqrestore(&fep->lock, flags);
782 833
783 /* release any irqs */ 834 /* release any irqs */
784 if (fpi->phy_irq != -1) 835 phy_disconnect(fep->phydev);
785 fs_free_irq(dev, fpi->phy_irq); 836 fep->phydev = NULL;
786 fs_free_irq(dev, fep->interrupt); 837 fs_free_irq(dev, fep->interrupt);
787 838
788 return 0; 839 return 0;
@@ -830,33 +881,19 @@ static void fs_get_regs(struct net_device *dev, struct ethtool_regs *regs,
830static int fs_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 881static int fs_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
831{ 882{
832 struct fs_enet_private *fep = netdev_priv(dev); 883 struct fs_enet_private *fep = netdev_priv(dev);
833 unsigned long flags; 884 return phy_ethtool_gset(fep->phydev, cmd);
834 int rc;
835
836 spin_lock_irqsave(&fep->lock, flags);
837 rc = mii_ethtool_gset(&fep->mii_if, cmd);
838 spin_unlock_irqrestore(&fep->lock, flags);
839
840 return rc;
841} 885}
842 886
843static int fs_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 887static int fs_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
844{ 888{
845 struct fs_enet_private *fep = netdev_priv(dev); 889 struct fs_enet_private *fep = netdev_priv(dev);
846 unsigned long flags; 890 phy_ethtool_sset(fep->phydev, cmd);
847 int rc; 891 return 0;
848
849 spin_lock_irqsave(&fep->lock, flags);
850 rc = mii_ethtool_sset(&fep->mii_if, cmd);
851 spin_unlock_irqrestore(&fep->lock, flags);
852
853 return rc;
854} 892}
855 893
856static int fs_nway_reset(struct net_device *dev) 894static int fs_nway_reset(struct net_device *dev)
857{ 895{
858 struct fs_enet_private *fep = netdev_priv(dev); 896 return 0;
859 return mii_nway_restart(&fep->mii_if);
860} 897}
861 898
862static u32 fs_get_msglevel(struct net_device *dev) 899static u32 fs_get_msglevel(struct net_device *dev)
@@ -898,7 +935,7 @@ static int fs_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
898 return -EINVAL; 935 return -EINVAL;
899 936
900 spin_lock_irqsave(&fep->lock, flags); 937 spin_lock_irqsave(&fep->lock, flags);
901 rc = generic_mii_ioctl(&fep->mii_if, mii, cmd, NULL); 938 rc = phy_mii_ioctl(fep->phydev, mii, cmd);
902 spin_unlock_irqrestore(&fep->lock, flags); 939 spin_unlock_irqrestore(&fep->lock, flags);
903 return rc; 940 return rc;
904} 941}
@@ -1030,12 +1067,6 @@ static struct net_device *fs_init_instance(struct device *dev,
1030 } 1067 }
1031 registered = 1; 1068 registered = 1;
1032 1069
1033 err = fs_mii_connect(ndev);
1034 if (err != 0) {
1035 printk(KERN_ERR DRV_MODULE_NAME
1036 ": %s fs_mii_connect failed.\n", ndev->name);
1037 goto err;
1038 }
1039 1070
1040 return ndev; 1071 return ndev;
1041 1072
@@ -1073,8 +1104,6 @@ static int fs_cleanup_instance(struct net_device *ndev)
1073 1104
1074 fpi = fep->fpi; 1105 fpi = fep->fpi;
1075 1106
1076 fs_mii_disconnect(ndev);
1077
1078 unregister_netdev(ndev); 1107 unregister_netdev(ndev);
1079 1108
1080 dma_free_coherent(fep->dev, (fpi->tx_ring + fpi->rx_ring) * sizeof(cbd_t), 1109 dma_free_coherent(fep->dev, (fpi->tx_ring + fpi->rx_ring) * sizeof(cbd_t),
@@ -1196,17 +1225,39 @@ static int __init fs_init(void)
1196 r = setup_immap(); 1225 r = setup_immap();
1197 if (r != 0) 1226 if (r != 0)
1198 return r; 1227 return r;
1199 r = driver_register(&fs_enet_fec_driver); 1228
1229#ifdef CONFIG_FS_ENET_HAS_FCC
1230 /* let's insert mii stuff */
1231 r = fs_enet_mdio_bb_init();
1232
1233 if (r != 0) {
1234 printk(KERN_ERR DRV_MODULE_NAME
1235 "BB PHY init failed.\n");
1236 return r;
1237 }
1238 r = driver_register(&fs_enet_fcc_driver);
1200 if (r != 0) 1239 if (r != 0)
1201 goto err; 1240 goto err;
1241#endif
1202 1242
1203 r = driver_register(&fs_enet_fcc_driver); 1243#ifdef CONFIG_FS_ENET_HAS_FEC
1244 r = fs_enet_mdio_fec_init();
1245 if (r != 0) {
1246 printk(KERN_ERR DRV_MODULE_NAME
1247 "FEC PHY init failed.\n");
1248 return r;
1249 }
1250
1251 r = driver_register(&fs_enet_fec_driver);
1204 if (r != 0) 1252 if (r != 0)
1205 goto err; 1253 goto err;
1254#endif
1206 1255
1256#ifdef CONFIG_FS_ENET_HAS_SCC
1207 r = driver_register(&fs_enet_scc_driver); 1257 r = driver_register(&fs_enet_scc_driver);
1208 if (r != 0) 1258 if (r != 0)
1209 goto err; 1259 goto err;
1260#endif
1210 1261
1211 return 0; 1262 return 0;
1212err: 1263err:
diff --git a/drivers/net/fs_enet/fs_enet-mii.c b/drivers/net/fs_enet/fs_enet-mii.c
deleted file mode 100644
index b7e6e21725cb..000000000000
--- a/drivers/net/fs_enet/fs_enet-mii.c
+++ /dev/null
@@ -1,505 +0,0 @@
1/*
2 * Combined Ethernet driver for Motorola MPC8xx and MPC82xx.
3 *
4 * Copyright (c) 2003 Intracom S.A.
5 * by Pantelis Antoniou <panto@intracom.gr>
6 *
7 * 2005 (c) MontaVista Software, Inc.
8 * Vitaly Bordug <vbordug@ru.mvista.com>
9 *
10 * Heavily based on original FEC driver by Dan Malek <dan@embeddededge.com>
11 * and modifications by Joakim Tjernlund <joakim.tjernlund@lumentis.se>
12 *
13 * This file is licensed under the terms of the GNU General Public License
14 * version 2. This program is licensed "as is" without any warranty of any
15 * kind, whether express or implied.
16 */
17
18
19#include <linux/module.h>
20#include <linux/types.h>
21#include <linux/kernel.h>
22#include <linux/sched.h>
23#include <linux/string.h>
24#include <linux/ptrace.h>
25#include <linux/errno.h>
26#include <linux/ioport.h>
27#include <linux/slab.h>
28#include <linux/interrupt.h>
29#include <linux/pci.h>
30#include <linux/init.h>
31#include <linux/delay.h>
32#include <linux/netdevice.h>
33#include <linux/etherdevice.h>
34#include <linux/skbuff.h>
35#include <linux/spinlock.h>
36#include <linux/mii.h>
37#include <linux/ethtool.h>
38#include <linux/bitops.h>
39
40#include <asm/pgtable.h>
41#include <asm/irq.h>
42#include <asm/uaccess.h>
43
44#include "fs_enet.h"
45
46/*************************************************/
47
48/*
49 * Generic PHY support.
50 * Should work for all PHYs, but link change is detected by polling
51 */
52
53static void generic_timer_callback(unsigned long data)
54{
55 struct net_device *dev = (struct net_device *)data;
56 struct fs_enet_private *fep = netdev_priv(dev);
57
58 fep->phy_timer_list.expires = jiffies + HZ / 2;
59
60 add_timer(&fep->phy_timer_list);
61
62 fs_mii_link_status_change_check(dev, 0);
63}
64
65static void generic_startup(struct net_device *dev)
66{
67 struct fs_enet_private *fep = netdev_priv(dev);
68
69 fep->phy_timer_list.expires = jiffies + HZ / 2; /* every 500ms */
70 fep->phy_timer_list.data = (unsigned long)dev;
71 fep->phy_timer_list.function = generic_timer_callback;
72 add_timer(&fep->phy_timer_list);
73}
74
75static void generic_shutdown(struct net_device *dev)
76{
77 struct fs_enet_private *fep = netdev_priv(dev);
78
79 del_timer_sync(&fep->phy_timer_list);
80}
81
82/* ------------------------------------------------------------------------- */
83/* The Davicom DM9161 is used on the NETTA board */
84
85/* register definitions */
86
87#define MII_DM9161_ANAR 4 /* Aux. Config Register */
88#define MII_DM9161_ACR 16 /* Aux. Config Register */
89#define MII_DM9161_ACSR 17 /* Aux. Config/Status Register */
90#define MII_DM9161_10TCSR 18 /* 10BaseT Config/Status Reg. */
91#define MII_DM9161_INTR 21 /* Interrupt Register */
92#define MII_DM9161_RECR 22 /* Receive Error Counter Reg. */
93#define MII_DM9161_DISCR 23 /* Disconnect Counter Register */
94
95static void dm9161_startup(struct net_device *dev)
96{
97 struct fs_enet_private *fep = netdev_priv(dev);
98
99 fs_mii_write(dev, fep->mii_if.phy_id, MII_DM9161_INTR, 0x0000);
100 /* Start autonegotiation */
101 fs_mii_write(dev, fep->mii_if.phy_id, MII_BMCR, 0x1200);
102
103 set_current_state(TASK_UNINTERRUPTIBLE);
104 schedule_timeout(HZ*8);
105}
106
107static void dm9161_ack_int(struct net_device *dev)
108{
109 struct fs_enet_private *fep = netdev_priv(dev);
110
111 fs_mii_read(dev, fep->mii_if.phy_id, MII_DM9161_INTR);
112}
113
114static void dm9161_shutdown(struct net_device *dev)
115{
116 struct fs_enet_private *fep = netdev_priv(dev);
117
118 fs_mii_write(dev, fep->mii_if.phy_id, MII_DM9161_INTR, 0x0f00);
119}
120
121/**********************************************************************************/
122
123static const struct phy_info phy_info[] = {
124 {
125 .id = 0x00181b88,
126 .name = "DM9161",
127 .startup = dm9161_startup,
128 .ack_int = dm9161_ack_int,
129 .shutdown = dm9161_shutdown,
130 }, {
131 .id = 0,
132 .name = "GENERIC",
133 .startup = generic_startup,
134 .shutdown = generic_shutdown,
135 },
136};
137
138/**********************************************************************************/
139
140static int phy_id_detect(struct net_device *dev)
141{
142 struct fs_enet_private *fep = netdev_priv(dev);
143 const struct fs_platform_info *fpi = fep->fpi;
144 struct fs_enet_mii_bus *bus = fep->mii_bus;
145 int i, r, start, end, phytype, physubtype;
146 const struct phy_info *phy;
147 int phy_hwid, phy_id;
148
149 phy_hwid = -1;
150 fep->phy = NULL;
151
152 /* auto-detect? */
153 if (fpi->phy_addr == -1) {
154 start = 1;
155 end = 32;
156 } else { /* direct */
157 start = fpi->phy_addr;
158 end = start + 1;
159 }
160
161 for (phy_id = start; phy_id < end; phy_id++) {
162 /* skip already used phy addresses on this bus */
163 if (bus->usage_map & (1 << phy_id))
164 continue;
165 r = fs_mii_read(dev, phy_id, MII_PHYSID1);
166 if (r == -1 || (phytype = (r & 0xffff)) == 0xffff)
167 continue;
168 r = fs_mii_read(dev, phy_id, MII_PHYSID2);
169 if (r == -1 || (physubtype = (r & 0xffff)) == 0xffff)
170 continue;
171 phy_hwid = (phytype << 16) | physubtype;
172 if (phy_hwid != -1)
173 break;
174 }
175
176 if (phy_hwid == -1) {
177 printk(KERN_ERR DRV_MODULE_NAME
178 ": %s No PHY detected! range=0x%02x-0x%02x\n",
179 dev->name, start, end);
180 return -1;
181 }
182
183 for (i = 0, phy = phy_info; i < ARRAY_SIZE(phy_info); i++, phy++)
184 if (phy->id == (phy_hwid >> 4) || phy->id == 0)
185 break;
186
187 if (i >= ARRAY_SIZE(phy_info)) {
188 printk(KERN_ERR DRV_MODULE_NAME
189 ": %s PHY id 0x%08x is not supported!\n",
190 dev->name, phy_hwid);
191 return -1;
192 }
193
194 fep->phy = phy;
195
196 /* mark this address as used */
197 bus->usage_map |= (1 << phy_id);
198
199 printk(KERN_INFO DRV_MODULE_NAME
200 ": %s Phy @ 0x%x, type %s (0x%08x)%s\n",
201 dev->name, phy_id, fep->phy->name, phy_hwid,
202 fpi->phy_addr == -1 ? " (auto-detected)" : "");
203
204 return phy_id;
205}
206
207void fs_mii_startup(struct net_device *dev)
208{
209 struct fs_enet_private *fep = netdev_priv(dev);
210
211 if (fep->phy->startup)
212 (*fep->phy->startup) (dev);
213}
214
215void fs_mii_shutdown(struct net_device *dev)
216{
217 struct fs_enet_private *fep = netdev_priv(dev);
218
219 if (fep->phy->shutdown)
220 (*fep->phy->shutdown) (dev);
221}
222
223void fs_mii_ack_int(struct net_device *dev)
224{
225 struct fs_enet_private *fep = netdev_priv(dev);
226
227 if (fep->phy->ack_int)
228 (*fep->phy->ack_int) (dev);
229}
230
231#define MII_LINK 0x0001
232#define MII_HALF 0x0002
233#define MII_FULL 0x0004
234#define MII_BASE4 0x0008
235#define MII_10M 0x0010
236#define MII_100M 0x0020
237#define MII_1G 0x0040
238#define MII_10G 0x0080
239
240/* return full mii info at one gulp, with a usable form */
241static unsigned int mii_full_status(struct mii_if_info *mii)
242{
243 unsigned int status;
244 int bmsr, adv, lpa, neg;
245 struct fs_enet_private* fep = netdev_priv(mii->dev);
246
247 /* first, a dummy read, needed to latch some MII phys */
248 (void)mii->mdio_read(mii->dev, mii->phy_id, MII_BMSR);
249 bmsr = mii->mdio_read(mii->dev, mii->phy_id, MII_BMSR);
250
251 /* no link */
252 if ((bmsr & BMSR_LSTATUS) == 0)
253 return 0;
254
255 status = MII_LINK;
256
257 /* Lets look what ANEG says if it's supported - otherwize we shall
258 take the right values from the platform info*/
259 if(!mii->force_media) {
260 /* autoneg not completed; don't bother */
261 if ((bmsr & BMSR_ANEGCOMPLETE) == 0)
262 return 0;
263
264 adv = (*mii->mdio_read)(mii->dev, mii->phy_id, MII_ADVERTISE);
265 lpa = (*mii->mdio_read)(mii->dev, mii->phy_id, MII_LPA);
266
267 neg = lpa & adv;
268 } else {
269 neg = fep->fpi->bus_info->lpa;
270 }
271
272 if (neg & LPA_100FULL)
273 status |= MII_FULL | MII_100M;
274 else if (neg & LPA_100BASE4)
275 status |= MII_FULL | MII_BASE4 | MII_100M;
276 else if (neg & LPA_100HALF)
277 status |= MII_HALF | MII_100M;
278 else if (neg & LPA_10FULL)
279 status |= MII_FULL | MII_10M;
280 else
281 status |= MII_HALF | MII_10M;
282
283 return status;
284}
285
286void fs_mii_link_status_change_check(struct net_device *dev, int init_media)
287{
288 struct fs_enet_private *fep = netdev_priv(dev);
289 struct mii_if_info *mii = &fep->mii_if;
290 unsigned int mii_status;
291 int ok_to_print, link, duplex, speed;
292 unsigned long flags;
293
294 ok_to_print = netif_msg_link(fep);
295
296 mii_status = mii_full_status(mii);
297
298 if (!init_media && mii_status == fep->last_mii_status)
299 return;
300
301 fep->last_mii_status = mii_status;
302
303 link = !!(mii_status & MII_LINK);
304 duplex = !!(mii_status & MII_FULL);
305 speed = (mii_status & MII_100M) ? 100 : 10;
306
307 if (link == 0) {
308 netif_carrier_off(mii->dev);
309 netif_stop_queue(dev);
310 if (!init_media) {
311 spin_lock_irqsave(&fep->lock, flags);
312 (*fep->ops->stop)(dev);
313 spin_unlock_irqrestore(&fep->lock, flags);
314 }
315
316 if (ok_to_print)
317 printk(KERN_INFO "%s: link down\n", mii->dev->name);
318
319 } else {
320
321 mii->full_duplex = duplex;
322
323 netif_carrier_on(mii->dev);
324
325 spin_lock_irqsave(&fep->lock, flags);
326 fep->duplex = duplex;
327 fep->speed = speed;
328 (*fep->ops->restart)(dev);
329 spin_unlock_irqrestore(&fep->lock, flags);
330
331 netif_start_queue(dev);
332
333 if (ok_to_print)
334 printk(KERN_INFO "%s: link up, %dMbps, %s-duplex\n",
335 dev->name, speed, duplex ? "full" : "half");
336 }
337}
338
339/**********************************************************************************/
340
341int fs_mii_read(struct net_device *dev, int phy_id, int location)
342{
343 struct fs_enet_private *fep = netdev_priv(dev);
344 struct fs_enet_mii_bus *bus = fep->mii_bus;
345
346 unsigned long flags;
347 int ret;
348
349 spin_lock_irqsave(&bus->mii_lock, flags);
350 ret = (*bus->mii_read)(bus, phy_id, location);
351 spin_unlock_irqrestore(&bus->mii_lock, flags);
352
353 return ret;
354}
355
356void fs_mii_write(struct net_device *dev, int phy_id, int location, int value)
357{
358 struct fs_enet_private *fep = netdev_priv(dev);
359 struct fs_enet_mii_bus *bus = fep->mii_bus;
360 unsigned long flags;
361
362 spin_lock_irqsave(&bus->mii_lock, flags);
363 (*bus->mii_write)(bus, phy_id, location, value);
364 spin_unlock_irqrestore(&bus->mii_lock, flags);
365}
366
367/*****************************************************************************/
368
369/* list of all registered mii buses */
370static LIST_HEAD(fs_mii_bus_list);
371
372static struct fs_enet_mii_bus *lookup_bus(int method, int id)
373{
374 struct list_head *ptr;
375 struct fs_enet_mii_bus *bus;
376
377 list_for_each(ptr, &fs_mii_bus_list) {
378 bus = list_entry(ptr, struct fs_enet_mii_bus, list);
379 if (bus->bus_info->method == method &&
380 bus->bus_info->id == id)
381 return bus;
382 }
383 return NULL;
384}
385
386static struct fs_enet_mii_bus *create_bus(const struct fs_mii_bus_info *bi)
387{
388 struct fs_enet_mii_bus *bus;
389 int ret = 0;
390
391 bus = kmalloc(sizeof(*bus), GFP_KERNEL);
392 if (bus == NULL) {
393 ret = -ENOMEM;
394 goto err;
395 }
396 memset(bus, 0, sizeof(*bus));
397 spin_lock_init(&bus->mii_lock);
398 bus->bus_info = bi;
399 bus->refs = 0;
400 bus->usage_map = 0;
401
402 /* perform initialization */
403 switch (bi->method) {
404
405 case fsmii_fixed:
406 ret = fs_mii_fixed_init(bus);
407 if (ret != 0)
408 goto err;
409 break;
410
411 case fsmii_bitbang:
412 ret = fs_mii_bitbang_init(bus);
413 if (ret != 0)
414 goto err;
415 break;
416#ifdef CONFIG_FS_ENET_HAS_FEC
417 case fsmii_fec:
418 ret = fs_mii_fec_init(bus);
419 if (ret != 0)
420 goto err;
421 break;
422#endif
423 default:
424 ret = -EINVAL;
425 goto err;
426 }
427
428 list_add(&bus->list, &fs_mii_bus_list);
429
430 return bus;
431
432err:
433 kfree(bus);
434 return ERR_PTR(ret);
435}
436
437static void destroy_bus(struct fs_enet_mii_bus *bus)
438{
439 /* remove from bus list */
440 list_del(&bus->list);
441
442 /* nothing more needed */
443 kfree(bus);
444}
445
446int fs_mii_connect(struct net_device *dev)
447{
448 struct fs_enet_private *fep = netdev_priv(dev);
449 const struct fs_platform_info *fpi = fep->fpi;
450 struct fs_enet_mii_bus *bus = NULL;
451
452 /* check method validity */
453 switch (fpi->bus_info->method) {
454 case fsmii_fixed:
455 case fsmii_bitbang:
456 break;
457#ifdef CONFIG_FS_ENET_HAS_FEC
458 case fsmii_fec:
459 break;
460#endif
461 default:
462 printk(KERN_ERR DRV_MODULE_NAME
463 ": %s Unknown MII bus method (%d)!\n",
464 dev->name, fpi->bus_info->method);
465 return -EINVAL;
466 }
467
468 bus = lookup_bus(fpi->bus_info->method, fpi->bus_info->id);
469
470 /* if not found create new bus */
471 if (bus == NULL) {
472 bus = create_bus(fpi->bus_info);
473 if (IS_ERR(bus)) {
474 printk(KERN_ERR DRV_MODULE_NAME
475 ": %s MII bus creation failure!\n", dev->name);
476 return PTR_ERR(bus);
477 }
478 }
479
480 bus->refs++;
481
482 fep->mii_bus = bus;
483
484 fep->mii_if.dev = dev;
485 fep->mii_if.phy_id_mask = 0x1f;
486 fep->mii_if.reg_num_mask = 0x1f;
487 fep->mii_if.mdio_read = fs_mii_read;
488 fep->mii_if.mdio_write = fs_mii_write;
489 fep->mii_if.force_media = fpi->bus_info->disable_aneg;
490 fep->mii_if.phy_id = phy_id_detect(dev);
491
492 return 0;
493}
494
495void fs_mii_disconnect(struct net_device *dev)
496{
497 struct fs_enet_private *fep = netdev_priv(dev);
498 struct fs_enet_mii_bus *bus = NULL;
499
500 bus = fep->mii_bus;
501 fep->mii_bus = NULL;
502
503 if (--bus->refs <= 0)
504 destroy_bus(bus);
505}
diff --git a/drivers/net/fs_enet/fs_enet.h b/drivers/net/fs_enet/fs_enet.h
index e7ec96c964a9..95022c005f75 100644
--- a/drivers/net/fs_enet/fs_enet.h
+++ b/drivers/net/fs_enet/fs_enet.h
@@ -5,6 +5,7 @@
5#include <linux/netdevice.h> 5#include <linux/netdevice.h>
6#include <linux/types.h> 6#include <linux/types.h>
7#include <linux/list.h> 7#include <linux/list.h>
8#include <linux/phy.h>
8 9
9#include <linux/fs_enet_pd.h> 10#include <linux/fs_enet_pd.h>
10 11
@@ -12,12 +13,30 @@
12 13
13#ifdef CONFIG_CPM1 14#ifdef CONFIG_CPM1
14#include <asm/commproc.h> 15#include <asm/commproc.h>
16
17struct fec_info {
18 fec_t* fecp;
19 u32 mii_speed;
20};
15#endif 21#endif
16 22
17#ifdef CONFIG_CPM2 23#ifdef CONFIG_CPM2
18#include <asm/cpm2.h> 24#include <asm/cpm2.h>
19#endif 25#endif
20 26
27/* This is used to operate with pins.
28 Note that the actual port size may
29 be different; cpm(s) handle it OK */
30struct bb_info {
31 u8 mdio_dat_msk;
32 u8 mdio_dir_msk;
33 u8 *mdio_dir;
34 u8 *mdio_dat;
35 u8 mdc_msk;
36 u8 *mdc_dat;
37 int delay;
38};
39
21/* hw driver ops */ 40/* hw driver ops */
22struct fs_ops { 41struct fs_ops {
23 int (*setup_data)(struct net_device *dev); 42 int (*setup_data)(struct net_device *dev);
@@ -25,6 +44,7 @@ struct fs_ops {
25 void (*free_bd)(struct net_device *dev); 44 void (*free_bd)(struct net_device *dev);
26 void (*cleanup_data)(struct net_device *dev); 45 void (*cleanup_data)(struct net_device *dev);
27 void (*set_multicast_list)(struct net_device *dev); 46 void (*set_multicast_list)(struct net_device *dev);
47 void (*adjust_link)(struct net_device *dev);
28 void (*restart)(struct net_device *dev); 48 void (*restart)(struct net_device *dev);
29 void (*stop)(struct net_device *dev); 49 void (*stop)(struct net_device *dev);
30 void (*pre_request_irq)(struct net_device *dev, int irq); 50 void (*pre_request_irq)(struct net_device *dev, int irq);
@@ -100,10 +120,6 @@ struct fs_enet_mii_bus {
100 }; 120 };
101}; 121};
102 122
103int fs_mii_bitbang_init(struct fs_enet_mii_bus *bus);
104int fs_mii_fixed_init(struct fs_enet_mii_bus *bus);
105int fs_mii_fec_init(struct fs_enet_mii_bus *bus);
106
107struct fs_enet_private { 123struct fs_enet_private {
108 struct device *dev; /* pointer back to the device (must be initialized first) */ 124 struct device *dev; /* pointer back to the device (must be initialized first) */
109 spinlock_t lock; /* during all ops except TX pckt processing */ 125 spinlock_t lock; /* during all ops except TX pckt processing */
@@ -130,7 +146,8 @@ struct fs_enet_private {
130 struct fs_enet_mii_bus *mii_bus; 146 struct fs_enet_mii_bus *mii_bus;
131 int interrupt; 147 int interrupt;
132 148
133 int duplex, speed; /* current settings */ 149 struct phy_device *phydev;
150 int oldduplex, oldspeed, oldlink; /* current settings */
134 151
135 /* event masks */ 152 /* event masks */
136 u32 ev_napi_rx; /* mask of NAPI rx events */ 153 u32 ev_napi_rx; /* mask of NAPI rx events */
@@ -168,15 +185,9 @@ struct fs_enet_private {
168}; 185};
169 186
170/***************************************************************************/ 187/***************************************************************************/
171 188int fs_enet_mdio_bb_init(void);
172int fs_mii_read(struct net_device *dev, int phy_id, int location); 189int fs_mii_fixed_init(struct fs_enet_mii_bus *bus);
173void fs_mii_write(struct net_device *dev, int phy_id, int location, int value); 190int fs_enet_mdio_fec_init(void);
174
175void fs_mii_startup(struct net_device *dev);
176void fs_mii_shutdown(struct net_device *dev);
177void fs_mii_ack_int(struct net_device *dev);
178
179void fs_mii_link_status_change_check(struct net_device *dev, int init_media);
180 191
181void fs_init_bds(struct net_device *dev); 192void fs_init_bds(struct net_device *dev);
182void fs_cleanup_bds(struct net_device *dev); 193void fs_cleanup_bds(struct net_device *dev);
@@ -194,7 +205,6 @@ int fs_enet_platform_init(void);
194void fs_enet_platform_cleanup(void); 205void fs_enet_platform_cleanup(void);
195 206
196/***************************************************************************/ 207/***************************************************************************/
197
198/* buffer descriptor access macros */ 208/* buffer descriptor access macros */
199 209
200/* access macros */ 210/* access macros */
diff --git a/drivers/net/fs_enet/mac-fcc.c b/drivers/net/fs_enet/mac-fcc.c
index 64e20982c1fe..1ff2597b8495 100644
--- a/drivers/net/fs_enet/mac-fcc.c
+++ b/drivers/net/fs_enet/mac-fcc.c
@@ -34,6 +34,7 @@
34#include <linux/bitops.h> 34#include <linux/bitops.h>
35#include <linux/fs.h> 35#include <linux/fs.h>
36#include <linux/platform_device.h> 36#include <linux/platform_device.h>
37#include <linux/phy.h>
37 38
38#include <asm/immap_cpm2.h> 39#include <asm/immap_cpm2.h>
39#include <asm/mpc8260.h> 40#include <asm/mpc8260.h>
@@ -122,22 +123,32 @@ static int do_pd_setup(struct fs_enet_private *fep)
122 123
123 /* Attach the memory for the FCC Parameter RAM */ 124 /* Attach the memory for the FCC Parameter RAM */
124 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fcc_pram"); 125 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fcc_pram");
125 fep->fcc.ep = (void *)r->start; 126 fep->fcc.ep = (void *)ioremap(r->start, r->end - r->start + 1);
126
127 if (fep->fcc.ep == NULL) 127 if (fep->fcc.ep == NULL)
128 return -EINVAL; 128 return -EINVAL;
129 129
130 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fcc_regs"); 130 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fcc_regs");
131 fep->fcc.fccp = (void *)r->start; 131 fep->fcc.fccp = (void *)ioremap(r->start, r->end - r->start + 1);
132
133 if (fep->fcc.fccp == NULL) 132 if (fep->fcc.fccp == NULL)
134 return -EINVAL; 133 return -EINVAL;
135 134
136 fep->fcc.fcccp = (void *)fep->fpi->fcc_regs_c; 135 if (fep->fpi->fcc_regs_c) {
136
137 fep->fcc.fcccp = (void *)fep->fpi->fcc_regs_c;
138 } else {
139 r = platform_get_resource_byname(pdev, IORESOURCE_MEM,
140 "fcc_regs_c");
141 fep->fcc.fcccp = (void *)ioremap(r->start,
142 r->end - r->start + 1);
143 }
137 144
138 if (fep->fcc.fcccp == NULL) 145 if (fep->fcc.fcccp == NULL)
139 return -EINVAL; 146 return -EINVAL;
140 147
148 fep->fcc.mem = (void *)fep->fpi->mem_offset;
149 if (fep->fcc.mem == NULL)
150 return -EINVAL;
151
141 return 0; 152 return 0;
142} 153}
143 154
@@ -155,8 +166,6 @@ static int setup_data(struct net_device *dev)
155 if ((unsigned int)fep->fcc.idx >= 3) /* max 3 FCCs */ 166 if ((unsigned int)fep->fcc.idx >= 3) /* max 3 FCCs */
156 return -EINVAL; 167 return -EINVAL;
157 168
158 fep->fcc.mem = (void *)fpi->mem_offset;
159
160 if (do_pd_setup(fep) != 0) 169 if (do_pd_setup(fep) != 0)
161 return -EINVAL; 170 return -EINVAL;
162 171
@@ -394,7 +403,7 @@ static void restart(struct net_device *dev)
394 403
395 /* adjust to speed (for RMII mode) */ 404 /* adjust to speed (for RMII mode) */
396 if (fpi->use_rmii) { 405 if (fpi->use_rmii) {
397 if (fep->speed == 100) 406 if (fep->phydev->speed == 100)
398 C8(fcccp, fcc_gfemr, 0x20); 407 C8(fcccp, fcc_gfemr, 0x20);
399 else 408 else
400 S8(fcccp, fcc_gfemr, 0x20); 409 S8(fcccp, fcc_gfemr, 0x20);
@@ -420,7 +429,7 @@ static void restart(struct net_device *dev)
420 S32(fccp, fcc_fpsmr, FCC_PSMR_RMII); 429 S32(fccp, fcc_fpsmr, FCC_PSMR_RMII);
421 430
422 /* adjust to duplex mode */ 431 /* adjust to duplex mode */
423 if (fep->duplex) 432 if (fep->phydev->duplex)
424 S32(fccp, fcc_fpsmr, FCC_PSMR_FDE | FCC_PSMR_LPB); 433 S32(fccp, fcc_fpsmr, FCC_PSMR_FDE | FCC_PSMR_LPB);
425 else 434 else
426 C32(fccp, fcc_fpsmr, FCC_PSMR_FDE | FCC_PSMR_LPB); 435 C32(fccp, fcc_fpsmr, FCC_PSMR_FDE | FCC_PSMR_LPB);
@@ -486,7 +495,10 @@ static void rx_bd_done(struct net_device *dev)
486 495
487static void tx_kickstart(struct net_device *dev) 496static void tx_kickstart(struct net_device *dev)
488{ 497{
489 /* nothing */ 498 struct fs_enet_private *fep = netdev_priv(dev);
499 fcc_t *fccp = fep->fcc.fccp;
500
501 S32(fccp, fcc_ftodr, 0x80);
490} 502}
491 503
492static u32 get_int_events(struct net_device *dev) 504static u32 get_int_events(struct net_device *dev)
diff --git a/drivers/net/fs_enet/mac-fec.c b/drivers/net/fs_enet/mac-fec.c
index e09547077529..c2c5fd419bd0 100644
--- a/drivers/net/fs_enet/mac-fec.c
+++ b/drivers/net/fs_enet/mac-fec.c
@@ -46,6 +46,7 @@
46#endif 46#endif
47 47
48#include "fs_enet.h" 48#include "fs_enet.h"
49#include "fec.h"
49 50
50/*************************************************/ 51/*************************************************/
51 52
@@ -75,50 +76,8 @@
75/* clear bits */ 76/* clear bits */
76#define FC(_fecp, _reg, _v) FW(_fecp, _reg, FR(_fecp, _reg) & ~(_v)) 77#define FC(_fecp, _reg, _v) FW(_fecp, _reg, FR(_fecp, _reg) & ~(_v))
77 78
78
79/* CRC polynomium used by the FEC for the multicast group filtering */
80#define FEC_CRC_POLY 0x04C11DB7
81
82#define FEC_MAX_MULTICAST_ADDRS 64
83
84/* Interrupt events/masks.
85*/
86#define FEC_ENET_HBERR 0x80000000U /* Heartbeat error */
87#define FEC_ENET_BABR 0x40000000U /* Babbling receiver */
88#define FEC_ENET_BABT 0x20000000U /* Babbling transmitter */
89#define FEC_ENET_GRA 0x10000000U /* Graceful stop complete */
90#define FEC_ENET_TXF 0x08000000U /* Full frame transmitted */
91#define FEC_ENET_TXB 0x04000000U /* A buffer was transmitted */
92#define FEC_ENET_RXF 0x02000000U /* Full frame received */
93#define FEC_ENET_RXB 0x01000000U /* A buffer was received */
94#define FEC_ENET_MII 0x00800000U /* MII interrupt */
95#define FEC_ENET_EBERR 0x00400000U /* SDMA bus error */
96
97#define FEC_ECNTRL_PINMUX 0x00000004
98#define FEC_ECNTRL_ETHER_EN 0x00000002
99#define FEC_ECNTRL_RESET 0x00000001
100
101#define FEC_RCNTRL_BC_REJ 0x00000010
102#define FEC_RCNTRL_PROM 0x00000008
103#define FEC_RCNTRL_MII_MODE 0x00000004
104#define FEC_RCNTRL_DRT 0x00000002
105#define FEC_RCNTRL_LOOP 0x00000001
106
107#define FEC_TCNTRL_FDEN 0x00000004
108#define FEC_TCNTRL_HBC 0x00000002
109#define FEC_TCNTRL_GTS 0x00000001
110
111
112/* Make MII read/write commands for the FEC.
113*/
114#define mk_mii_read(REG) (0x60020000 | ((REG & 0x1f) << 18))
115#define mk_mii_write(REG, VAL) (0x50020000 | ((REG & 0x1f) << 18) | (VAL & 0xffff))
116#define mk_mii_end 0
117
118#define FEC_MII_LOOPS 10000
119
120/* 79/*
121 * Delay to wait for FEC reset command to complete (in us) 80 * Delay to wait for FEC reset command to complete (in us)
122 */ 81 */
123#define FEC_RESET_DELAY 50 82#define FEC_RESET_DELAY 50
124 83
@@ -303,13 +262,15 @@ static void restart(struct net_device *dev)
303 int r; 262 int r;
304 u32 addrhi, addrlo; 263 u32 addrhi, addrlo;
305 264
265 struct mii_bus* mii = fep->phydev->bus;
266 struct fec_info* fec_inf = mii->priv;
267
306 r = whack_reset(fep->fec.fecp); 268 r = whack_reset(fep->fec.fecp);
307 if (r != 0) 269 if (r != 0)
308 printk(KERN_ERR DRV_MODULE_NAME 270 printk(KERN_ERR DRV_MODULE_NAME
309 ": %s FEC Reset FAILED!\n", dev->name); 271 ": %s FEC Reset FAILED!\n", dev->name);
310
311 /* 272 /*
312 * Set station address. 273 * Set station address.
313 */ 274 */
314 addrhi = ((u32) dev->dev_addr[0] << 24) | 275 addrhi = ((u32) dev->dev_addr[0] << 24) |
315 ((u32) dev->dev_addr[1] << 16) | 276 ((u32) dev->dev_addr[1] << 16) |
@@ -350,12 +311,12 @@ static void restart(struct net_device *dev)
350 FW(fecp, fun_code, 0x78000000); 311 FW(fecp, fun_code, 0x78000000);
351 312
352 /* 313 /*
353 * Set MII speed. 314 * Set MII speed.
354 */ 315 */
355 FW(fecp, mii_speed, fep->mii_bus->fec.mii_speed); 316 FW(fecp, mii_speed, fec_inf->mii_speed);
356 317
357 /* 318 /*
358 * Clear any outstanding interrupt. 319 * Clear any outstanding interrupt.
359 */ 320 */
360 FW(fecp, ievent, 0xffc0); 321 FW(fecp, ievent, 0xffc0);
361 FW(fecp, ivec, (fep->interrupt / 2) << 29); 322 FW(fecp, ivec, (fep->interrupt / 2) << 29);
@@ -390,11 +351,12 @@ static void restart(struct net_device *dev)
390 } 351 }
391#endif 352#endif
392 353
354
393 FW(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */ 355 FW(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */
394 /* 356 /*
395 * adjust to duplex mode 357 * adjust to duplex mode
396 */ 358 */
397 if (fep->duplex) { 359 if (fep->phydev->duplex) {
398 FC(fecp, r_cntrl, FEC_RCNTRL_DRT); 360 FC(fecp, r_cntrl, FEC_RCNTRL_DRT);
399 FS(fecp, x_cntrl, FEC_TCNTRL_FDEN); /* FD enable */ 361 FS(fecp, x_cntrl, FEC_TCNTRL_FDEN); /* FD enable */
400 } else { 362 } else {
@@ -418,9 +380,11 @@ static void restart(struct net_device *dev)
418static void stop(struct net_device *dev) 380static void stop(struct net_device *dev)
419{ 381{
420 struct fs_enet_private *fep = netdev_priv(dev); 382 struct fs_enet_private *fep = netdev_priv(dev);
383 const struct fs_platform_info *fpi = fep->fpi;
421 fec_t *fecp = fep->fec.fecp; 384 fec_t *fecp = fep->fec.fecp;
422 struct fs_enet_mii_bus *bus = fep->mii_bus; 385
423 const struct fs_mii_bus_info *bi = bus->bus_info; 386 struct fec_info* feci= fep->phydev->bus->priv;
387
424 int i; 388 int i;
425 389
426 if ((FR(fecp, ecntrl) & FEC_ECNTRL_ETHER_EN) == 0) 390 if ((FR(fecp, ecntrl) & FEC_ECNTRL_ETHER_EN) == 0)
@@ -444,11 +408,11 @@ static void stop(struct net_device *dev)
444 fs_cleanup_bds(dev); 408 fs_cleanup_bds(dev);
445 409
446 /* shut down FEC1? that's where the mii bus is */ 410 /* shut down FEC1? that's where the mii bus is */
447 if (fep->fec.idx == 0 && bus->refs > 1 && bi->method == fsmii_fec) { 411 if (fpi->has_phy) {
448 FS(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */ 412 FS(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */
449 FS(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN); 413 FS(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN);
450 FW(fecp, ievent, FEC_ENET_MII); 414 FW(fecp, ievent, FEC_ENET_MII);
451 FW(fecp, mii_speed, bus->fec.mii_speed); 415 FW(fecp, mii_speed, feci->mii_speed);
452 } 416 }
453} 417}
454 418
@@ -583,73 +547,3 @@ const struct fs_ops fs_fec_ops = {
583 .free_bd = free_bd, 547 .free_bd = free_bd,
584}; 548};
585 549
586/***********************************************************************/
587
588static int mii_read(struct fs_enet_mii_bus *bus, int phy_id, int location)
589{
590 fec_t *fecp = bus->fec.fecp;
591 int i, ret = -1;
592
593 if ((FR(fecp, r_cntrl) & FEC_RCNTRL_MII_MODE) == 0)
594 BUG();
595
596 /* Add PHY address to register command. */
597 FW(fecp, mii_data, (phy_id << 23) | mk_mii_read(location));
598
599 for (i = 0; i < FEC_MII_LOOPS; i++)
600 if ((FR(fecp, ievent) & FEC_ENET_MII) != 0)
601 break;
602
603 if (i < FEC_MII_LOOPS) {
604 FW(fecp, ievent, FEC_ENET_MII);
605 ret = FR(fecp, mii_data) & 0xffff;
606 }
607
608 return ret;
609}
610
611static void mii_write(struct fs_enet_mii_bus *bus, int phy_id, int location, int value)
612{
613 fec_t *fecp = bus->fec.fecp;
614 int i;
615
616 /* this must never happen */
617 if ((FR(fecp, r_cntrl) & FEC_RCNTRL_MII_MODE) == 0)
618 BUG();
619
620 /* Add PHY address to register command. */
621 FW(fecp, mii_data, (phy_id << 23) | mk_mii_write(location, value));
622
623 for (i = 0; i < FEC_MII_LOOPS; i++)
624 if ((FR(fecp, ievent) & FEC_ENET_MII) != 0)
625 break;
626
627 if (i < FEC_MII_LOOPS)
628 FW(fecp, ievent, FEC_ENET_MII);
629}
630
631int fs_mii_fec_init(struct fs_enet_mii_bus *bus)
632{
633 bd_t *bd = (bd_t *)__res;
634 const struct fs_mii_bus_info *bi = bus->bus_info;
635 fec_t *fecp;
636
637 if (bi->id != 0)
638 return -1;
639
640 bus->fec.fecp = &((immap_t *)fs_enet_immap)->im_cpm.cp_fec;
641 bus->fec.mii_speed = ((((bd->bi_intfreq + 4999999) / 2500000) / 2)
642 & 0x3F) << 1;
643
644 fecp = bus->fec.fecp;
645
646 FS(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */
647 FS(fecp, ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN);
648 FW(fecp, ievent, FEC_ENET_MII);
649 FW(fecp, mii_speed, bus->fec.mii_speed);
650
651 bus->mii_read = mii_read;
652 bus->mii_write = mii_write;
653
654 return 0;
655}
diff --git a/drivers/net/fs_enet/mac-scc.c b/drivers/net/fs_enet/mac-scc.c
index eaa24fab645f..95ec5872c507 100644
--- a/drivers/net/fs_enet/mac-scc.c
+++ b/drivers/net/fs_enet/mac-scc.c
@@ -369,7 +369,7 @@ static void restart(struct net_device *dev)
369 W16(sccp, scc_psmr, SCC_PSMR_ENCRC | SCC_PSMR_NIB22); 369 W16(sccp, scc_psmr, SCC_PSMR_ENCRC | SCC_PSMR_NIB22);
370 370
371 /* Set full duplex mode if needed */ 371 /* Set full duplex mode if needed */
372 if (fep->duplex) 372 if (fep->phydev->duplex)
373 S16(sccp, scc_psmr, SCC_PSMR_LPB | SCC_PSMR_FDE); 373 S16(sccp, scc_psmr, SCC_PSMR_LPB | SCC_PSMR_FDE);
374 374
375 S32(sccp, scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT); 375 S32(sccp, scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT);
@@ -500,6 +500,8 @@ static void tx_restart(struct net_device *dev)
500 scc_cr_cmd(fep, CPM_CR_RESTART_TX); 500 scc_cr_cmd(fep, CPM_CR_RESTART_TX);
501} 501}
502 502
503
504
503/*************************************************************************/ 505/*************************************************************************/
504 506
505const struct fs_ops fs_scc_ops = { 507const struct fs_ops fs_scc_ops = {
diff --git a/drivers/net/fs_enet/mii-bitbang.c b/drivers/net/fs_enet/mii-bitbang.c
index 48f9cf83ab6f..0b9b8b5c847c 100644
--- a/drivers/net/fs_enet/mii-bitbang.c
+++ b/drivers/net/fs_enet/mii-bitbang.c
@@ -33,6 +33,7 @@
33#include <linux/mii.h> 33#include <linux/mii.h>
34#include <linux/ethtool.h> 34#include <linux/ethtool.h>
35#include <linux/bitops.h> 35#include <linux/bitops.h>
36#include <linux/platform_device.h>
36 37
37#include <asm/pgtable.h> 38#include <asm/pgtable.h>
38#include <asm/irq.h> 39#include <asm/irq.h>
@@ -40,129 +41,25 @@
40 41
41#include "fs_enet.h" 42#include "fs_enet.h"
42 43
43#ifdef CONFIG_8xx 44static int bitbang_prep_bit(u8 **datp, u8 *mskp,
44static int bitbang_prep_bit(u8 **dirp, u8 **datp, u8 *mskp, int port, int bit) 45 struct fs_mii_bit *mii_bit)
45{ 46{
46 immap_t *im = (immap_t *)fs_enet_immap; 47 void *dat;
47 void *dir, *dat, *ppar;
48 int adv; 48 int adv;
49 u8 msk; 49 u8 msk;
50 50
51 switch (port) { 51 dat = (void*) mii_bit->offset;
52 case fsiop_porta:
53 dir = &im->im_ioport.iop_padir;
54 dat = &im->im_ioport.iop_padat;
55 ppar = &im->im_ioport.iop_papar;
56 break;
57
58 case fsiop_portb:
59 dir = &im->im_cpm.cp_pbdir;
60 dat = &im->im_cpm.cp_pbdat;
61 ppar = &im->im_cpm.cp_pbpar;
62 break;
63
64 case fsiop_portc:
65 dir = &im->im_ioport.iop_pcdir;
66 dat = &im->im_ioport.iop_pcdat;
67 ppar = &im->im_ioport.iop_pcpar;
68 break;
69
70 case fsiop_portd:
71 dir = &im->im_ioport.iop_pddir;
72 dat = &im->im_ioport.iop_pddat;
73 ppar = &im->im_ioport.iop_pdpar;
74 break;
75
76 case fsiop_porte:
77 dir = &im->im_cpm.cp_pedir;
78 dat = &im->im_cpm.cp_pedat;
79 ppar = &im->im_cpm.cp_pepar;
80 break;
81
82 default:
83 printk(KERN_ERR DRV_MODULE_NAME
84 "Illegal port value %d!\n", port);
85 return -EINVAL;
86 }
87
88 adv = bit >> 3;
89 dir = (char *)dir + adv;
90 dat = (char *)dat + adv;
91 ppar = (char *)ppar + adv;
92
93 msk = 1 << (7 - (bit & 7));
94 if ((in_8(ppar) & msk) != 0) {
95 printk(KERN_ERR DRV_MODULE_NAME
96 "pin %d on port %d is not general purpose!\n", bit, port);
97 return -EINVAL;
98 }
99
100 *dirp = dir;
101 *datp = dat;
102 *mskp = msk;
103
104 return 0;
105}
106#endif
107
108#ifdef CONFIG_8260
109static int bitbang_prep_bit(u8 **dirp, u8 **datp, u8 *mskp, int port, int bit)
110{
111 iop_cpm2_t *io = &((cpm2_map_t *)fs_enet_immap)->im_ioport;
112 void *dir, *dat, *ppar;
113 int adv;
114 u8 msk;
115
116 switch (port) {
117 case fsiop_porta:
118 dir = &io->iop_pdira;
119 dat = &io->iop_pdata;
120 ppar = &io->iop_ppara;
121 break;
122
123 case fsiop_portb:
124 dir = &io->iop_pdirb;
125 dat = &io->iop_pdatb;
126 ppar = &io->iop_pparb;
127 break;
128
129 case fsiop_portc:
130 dir = &io->iop_pdirc;
131 dat = &io->iop_pdatc;
132 ppar = &io->iop_pparc;
133 break;
134
135 case fsiop_portd:
136 dir = &io->iop_pdird;
137 dat = &io->iop_pdatd;
138 ppar = &io->iop_ppard;
139 break;
140
141 default:
142 printk(KERN_ERR DRV_MODULE_NAME
143 "Illegal port value %d!\n", port);
144 return -EINVAL;
145 }
146 52
147 adv = bit >> 3; 53 adv = mii_bit->bit >> 3;
148 dir = (char *)dir + adv;
149 dat = (char *)dat + adv; 54 dat = (char *)dat + adv;
150 ppar = (char *)ppar + adv;
151 55
152 msk = 1 << (7 - (bit & 7)); 56 msk = 1 << (7 - (mii_bit->bit & 7));
153 if ((in_8(ppar) & msk) != 0) {
154 printk(KERN_ERR DRV_MODULE_NAME
155 "pin %d on port %d is not general purpose!\n", bit, port);
156 return -EINVAL;
157 }
158 57
159 *dirp = dir;
160 *datp = dat; 58 *datp = dat;
161 *mskp = msk; 59 *mskp = msk;
162 60
163 return 0; 61 return 0;
164} 62}
165#endif
166 63
167static inline void bb_set(u8 *p, u8 m) 64static inline void bb_set(u8 *p, u8 m)
168{ 65{
@@ -179,44 +76,44 @@ static inline int bb_read(u8 *p, u8 m)
179 return (in_8(p) & m) != 0; 76 return (in_8(p) & m) != 0;
180} 77}
181 78
182static inline void mdio_active(struct fs_enet_mii_bus *bus) 79static inline void mdio_active(struct bb_info *bitbang)
183{ 80{
184 bb_set(bus->bitbang.mdio_dir, bus->bitbang.mdio_msk); 81 bb_set(bitbang->mdio_dir, bitbang->mdio_dir_msk);
185} 82}
186 83
187static inline void mdio_tristate(struct fs_enet_mii_bus *bus) 84static inline void mdio_tristate(struct bb_info *bitbang )
188{ 85{
189 bb_clr(bus->bitbang.mdio_dir, bus->bitbang.mdio_msk); 86 bb_clr(bitbang->mdio_dir, bitbang->mdio_dir_msk);
190} 87}
191 88
192static inline int mdio_read(struct fs_enet_mii_bus *bus) 89static inline int mdio_read(struct bb_info *bitbang )
193{ 90{
194 return bb_read(bus->bitbang.mdio_dat, bus->bitbang.mdio_msk); 91 return bb_read(bitbang->mdio_dat, bitbang->mdio_dat_msk);
195} 92}
196 93
197static inline void mdio(struct fs_enet_mii_bus *bus, int what) 94static inline void mdio(struct bb_info *bitbang , int what)
198{ 95{
199 if (what) 96 if (what)
200 bb_set(bus->bitbang.mdio_dat, bus->bitbang.mdio_msk); 97 bb_set(bitbang->mdio_dat, bitbang->mdio_dat_msk);
201 else 98 else
202 bb_clr(bus->bitbang.mdio_dat, bus->bitbang.mdio_msk); 99 bb_clr(bitbang->mdio_dat, bitbang->mdio_dat_msk);
203} 100}
204 101
205static inline void mdc(struct fs_enet_mii_bus *bus, int what) 102static inline void mdc(struct bb_info *bitbang , int what)
206{ 103{
207 if (what) 104 if (what)
208 bb_set(bus->bitbang.mdc_dat, bus->bitbang.mdc_msk); 105 bb_set(bitbang->mdc_dat, bitbang->mdc_msk);
209 else 106 else
210 bb_clr(bus->bitbang.mdc_dat, bus->bitbang.mdc_msk); 107 bb_clr(bitbang->mdc_dat, bitbang->mdc_msk);
211} 108}
212 109
213static inline void mii_delay(struct fs_enet_mii_bus *bus) 110static inline void mii_delay(struct bb_info *bitbang )
214{ 111{
215 udelay(bus->bus_info->i.bitbang.delay); 112 udelay(bitbang->delay);
216} 113}
217 114
218/* Utility to send the preamble, address, and register (common to read and write). */ 115/* Utility to send the preamble, address, and register (common to read and write). */
219static void bitbang_pre(struct fs_enet_mii_bus *bus, int read, u8 addr, u8 reg) 116static void bitbang_pre(struct bb_info *bitbang , int read, u8 addr, u8 reg)
220{ 117{
221 int j; 118 int j;
222 119
@@ -228,177 +125,284 @@ static void bitbang_pre(struct fs_enet_mii_bus *bus, int read, u8 addr, u8 reg)
228 * but it is safer and will be much more robust. 125 * but it is safer and will be much more robust.
229 */ 126 */
230 127
231 mdio_active(bus); 128 mdio_active(bitbang);
232 mdio(bus, 1); 129 mdio(bitbang, 1);
233 for (j = 0; j < 32; j++) { 130 for (j = 0; j < 32; j++) {
234 mdc(bus, 0); 131 mdc(bitbang, 0);
235 mii_delay(bus); 132 mii_delay(bitbang);
236 mdc(bus, 1); 133 mdc(bitbang, 1);
237 mii_delay(bus); 134 mii_delay(bitbang);
238 } 135 }
239 136
240 /* send the start bit (01) and the read opcode (10) or write (10) */ 137 /* send the start bit (01) and the read opcode (10) or write (10) */
241 mdc(bus, 0); 138 mdc(bitbang, 0);
242 mdio(bus, 0); 139 mdio(bitbang, 0);
243 mii_delay(bus); 140 mii_delay(bitbang);
244 mdc(bus, 1); 141 mdc(bitbang, 1);
245 mii_delay(bus); 142 mii_delay(bitbang);
246 mdc(bus, 0); 143 mdc(bitbang, 0);
247 mdio(bus, 1); 144 mdio(bitbang, 1);
248 mii_delay(bus); 145 mii_delay(bitbang);
249 mdc(bus, 1); 146 mdc(bitbang, 1);
250 mii_delay(bus); 147 mii_delay(bitbang);
251 mdc(bus, 0); 148 mdc(bitbang, 0);
252 mdio(bus, read); 149 mdio(bitbang, read);
253 mii_delay(bus); 150 mii_delay(bitbang);
254 mdc(bus, 1); 151 mdc(bitbang, 1);
255 mii_delay(bus); 152 mii_delay(bitbang);
256 mdc(bus, 0); 153 mdc(bitbang, 0);
257 mdio(bus, !read); 154 mdio(bitbang, !read);
258 mii_delay(bus); 155 mii_delay(bitbang);
259 mdc(bus, 1); 156 mdc(bitbang, 1);
260 mii_delay(bus); 157 mii_delay(bitbang);
261 158
262 /* send the PHY address */ 159 /* send the PHY address */
263 for (j = 0; j < 5; j++) { 160 for (j = 0; j < 5; j++) {
264 mdc(bus, 0); 161 mdc(bitbang, 0);
265 mdio(bus, (addr & 0x10) != 0); 162 mdio(bitbang, (addr & 0x10) != 0);
266 mii_delay(bus); 163 mii_delay(bitbang);
267 mdc(bus, 1); 164 mdc(bitbang, 1);
268 mii_delay(bus); 165 mii_delay(bitbang);
269 addr <<= 1; 166 addr <<= 1;
270 } 167 }
271 168
272 /* send the register address */ 169 /* send the register address */
273 for (j = 0; j < 5; j++) { 170 for (j = 0; j < 5; j++) {
274 mdc(bus, 0); 171 mdc(bitbang, 0);
275 mdio(bus, (reg & 0x10) != 0); 172 mdio(bitbang, (reg & 0x10) != 0);
276 mii_delay(bus); 173 mii_delay(bitbang);
277 mdc(bus, 1); 174 mdc(bitbang, 1);
278 mii_delay(bus); 175 mii_delay(bitbang);
279 reg <<= 1; 176 reg <<= 1;
280 } 177 }
281} 178}
282 179
283static int mii_read(struct fs_enet_mii_bus *bus, int phy_id, int location) 180static int fs_enet_mii_bb_read(struct mii_bus *bus , int phy_id, int location)
284{ 181{
285 u16 rdreg; 182 u16 rdreg;
286 int ret, j; 183 int ret, j;
287 u8 addr = phy_id & 0xff; 184 u8 addr = phy_id & 0xff;
288 u8 reg = location & 0xff; 185 u8 reg = location & 0xff;
186 struct bb_info* bitbang = bus->priv;
289 187
290 bitbang_pre(bus, 1, addr, reg); 188 bitbang_pre(bitbang, 1, addr, reg);
291 189
292 /* tri-state our MDIO I/O pin so we can read */ 190 /* tri-state our MDIO I/O pin so we can read */
293 mdc(bus, 0); 191 mdc(bitbang, 0);
294 mdio_tristate(bus); 192 mdio_tristate(bitbang);
295 mii_delay(bus); 193 mii_delay(bitbang);
296 mdc(bus, 1); 194 mdc(bitbang, 1);
297 mii_delay(bus); 195 mii_delay(bitbang);
298 196
299 /* check the turnaround bit: the PHY should be driving it to zero */ 197 /* check the turnaround bit: the PHY should be driving it to zero */
300 if (mdio_read(bus) != 0) { 198 if (mdio_read(bitbang) != 0) {
301 /* PHY didn't drive TA low */ 199 /* PHY didn't drive TA low */
302 for (j = 0; j < 32; j++) { 200 for (j = 0; j < 32; j++) {
303 mdc(bus, 0); 201 mdc(bitbang, 0);
304 mii_delay(bus); 202 mii_delay(bitbang);
305 mdc(bus, 1); 203 mdc(bitbang, 1);
306 mii_delay(bus); 204 mii_delay(bitbang);
307 } 205 }
308 ret = -1; 206 ret = -1;
309 goto out; 207 goto out;
310 } 208 }
311 209
312 mdc(bus, 0); 210 mdc(bitbang, 0);
313 mii_delay(bus); 211 mii_delay(bitbang);
314 212
315 /* read 16 bits of register data, MSB first */ 213 /* read 16 bits of register data, MSB first */
316 rdreg = 0; 214 rdreg = 0;
317 for (j = 0; j < 16; j++) { 215 for (j = 0; j < 16; j++) {
318 mdc(bus, 1); 216 mdc(bitbang, 1);
319 mii_delay(bus); 217 mii_delay(bitbang);
320 rdreg <<= 1; 218 rdreg <<= 1;
321 rdreg |= mdio_read(bus); 219 rdreg |= mdio_read(bitbang);
322 mdc(bus, 0); 220 mdc(bitbang, 0);
323 mii_delay(bus); 221 mii_delay(bitbang);
324 } 222 }
325 223
326 mdc(bus, 1); 224 mdc(bitbang, 1);
327 mii_delay(bus); 225 mii_delay(bitbang);
328 mdc(bus, 0); 226 mdc(bitbang, 0);
329 mii_delay(bus); 227 mii_delay(bitbang);
330 mdc(bus, 1); 228 mdc(bitbang, 1);
331 mii_delay(bus); 229 mii_delay(bitbang);
332 230
333 ret = rdreg; 231 ret = rdreg;
334out: 232out:
335 return ret; 233 return ret;
336} 234}
337 235
338static void mii_write(struct fs_enet_mii_bus *bus, int phy_id, int location, int val) 236static int fs_enet_mii_bb_write(struct mii_bus *bus, int phy_id, int location, u16 val)
339{ 237{
340 int j; 238 int j;
239 struct bb_info* bitbang = bus->priv;
240
341 u8 addr = phy_id & 0xff; 241 u8 addr = phy_id & 0xff;
342 u8 reg = location & 0xff; 242 u8 reg = location & 0xff;
343 u16 value = val & 0xffff; 243 u16 value = val & 0xffff;
344 244
345 bitbang_pre(bus, 0, addr, reg); 245 bitbang_pre(bitbang, 0, addr, reg);
346 246
347 /* send the turnaround (10) */ 247 /* send the turnaround (10) */
348 mdc(bus, 0); 248 mdc(bitbang, 0);
349 mdio(bus, 1); 249 mdio(bitbang, 1);
350 mii_delay(bus); 250 mii_delay(bitbang);
351 mdc(bus, 1); 251 mdc(bitbang, 1);
352 mii_delay(bus); 252 mii_delay(bitbang);
353 mdc(bus, 0); 253 mdc(bitbang, 0);
354 mdio(bus, 0); 254 mdio(bitbang, 0);
355 mii_delay(bus); 255 mii_delay(bitbang);
356 mdc(bus, 1); 256 mdc(bitbang, 1);
357 mii_delay(bus); 257 mii_delay(bitbang);
358 258
359 /* write 16 bits of register data, MSB first */ 259 /* write 16 bits of register data, MSB first */
360 for (j = 0; j < 16; j++) { 260 for (j = 0; j < 16; j++) {
361 mdc(bus, 0); 261 mdc(bitbang, 0);
362 mdio(bus, (value & 0x8000) != 0); 262 mdio(bitbang, (value & 0x8000) != 0);
363 mii_delay(bus); 263 mii_delay(bitbang);
364 mdc(bus, 1); 264 mdc(bitbang, 1);
365 mii_delay(bus); 265 mii_delay(bitbang);
366 value <<= 1; 266 value <<= 1;
367 } 267 }
368 268
369 /* 269 /*
370 * Tri-state the MDIO line. 270 * Tri-state the MDIO line.
371 */ 271 */
372 mdio_tristate(bus); 272 mdio_tristate(bitbang);
373 mdc(bus, 0); 273 mdc(bitbang, 0);
374 mii_delay(bus); 274 mii_delay(bitbang);
375 mdc(bus, 1); 275 mdc(bitbang, 1);
376 mii_delay(bus); 276 mii_delay(bitbang);
277 return 0;
377} 278}
378 279
379int fs_mii_bitbang_init(struct fs_enet_mii_bus *bus) 280static int fs_enet_mii_bb_reset(struct mii_bus *bus)
281{
282 /*nothing here - dunno how to reset it*/
283 return 0;
284}
285
286static int fs_mii_bitbang_init(struct bb_info *bitbang, struct fs_mii_bb_platform_info* fmpi)
380{ 287{
381 const struct fs_mii_bus_info *bi = bus->bus_info;
382 int r; 288 int r;
383 289
384 r = bitbang_prep_bit(&bus->bitbang.mdio_dir, 290 bitbang->delay = fmpi->delay;
385 &bus->bitbang.mdio_dat, 291
386 &bus->bitbang.mdio_msk, 292 r = bitbang_prep_bit(&bitbang->mdio_dir,
387 bi->i.bitbang.mdio_port, 293 &bitbang->mdio_dir_msk,
388 bi->i.bitbang.mdio_bit); 294 &fmpi->mdio_dir);
389 if (r != 0) 295 if (r != 0)
390 return r; 296 return r;
391 297
392 r = bitbang_prep_bit(&bus->bitbang.mdc_dir, 298 r = bitbang_prep_bit(&bitbang->mdio_dat,
393 &bus->bitbang.mdc_dat, 299 &bitbang->mdio_dat_msk,
394 &bus->bitbang.mdc_msk, 300 &fmpi->mdio_dat);
395 bi->i.bitbang.mdc_port,
396 bi->i.bitbang.mdc_bit);
397 if (r != 0) 301 if (r != 0)
398 return r; 302 return r;
399 303
400 bus->mii_read = mii_read; 304 r = bitbang_prep_bit(&bitbang->mdc_dat,
401 bus->mii_write = mii_write; 305 &bitbang->mdc_msk,
306 &fmpi->mdc_dat);
307 if (r != 0)
308 return r;
402 309
403 return 0; 310 return 0;
404} 311}
312
313
314static int __devinit fs_enet_mdio_probe(struct device *dev)
315{
316 struct platform_device *pdev = to_platform_device(dev);
317 struct fs_mii_bb_platform_info *pdata;
318 struct mii_bus *new_bus;
319 struct bb_info *bitbang;
320 int err = 0;
321
322 if (NULL == dev)
323 return -EINVAL;
324
325 new_bus = kzalloc(sizeof(struct mii_bus), GFP_KERNEL);
326
327 if (NULL == new_bus)
328 return -ENOMEM;
329
330 bitbang = kzalloc(sizeof(struct bb_info), GFP_KERNEL);
331
332 if (NULL == bitbang)
333 return -ENOMEM;
334
335 new_bus->name = "BB MII Bus",
336 new_bus->read = &fs_enet_mii_bb_read,
337 new_bus->write = &fs_enet_mii_bb_write,
338 new_bus->reset = &fs_enet_mii_bb_reset,
339 new_bus->id = pdev->id;
340
341 new_bus->phy_mask = ~0x9;
342 pdata = (struct fs_mii_bb_platform_info *)pdev->dev.platform_data;
343
344 if (NULL == pdata) {
345 printk(KERN_ERR "gfar mdio %d: Missing platform data!\n", pdev->id);
346 return -ENODEV;
347 }
348
349 /*set up workspace*/
350 fs_mii_bitbang_init(bitbang, pdata);
351
352 new_bus->priv = bitbang;
353
354 new_bus->irq = pdata->irq;
355
356 new_bus->dev = dev;
357 dev_set_drvdata(dev, new_bus);
358
359 err = mdiobus_register(new_bus);
360
361 if (0 != err) {
362 printk (KERN_ERR "%s: Cannot register as MDIO bus\n",
363 new_bus->name);
364 goto bus_register_fail;
365 }
366
367 return 0;
368
369bus_register_fail:
370 kfree(bitbang);
371 kfree(new_bus);
372
373 return err;
374}
375
376
377static int fs_enet_mdio_remove(struct device *dev)
378{
379 struct mii_bus *bus = dev_get_drvdata(dev);
380
381 mdiobus_unregister(bus);
382
383 dev_set_drvdata(dev, NULL);
384
385 iounmap((void *) (&bus->priv));
386 bus->priv = NULL;
387 kfree(bus);
388
389 return 0;
390}
391
392static struct device_driver fs_enet_bb_mdio_driver = {
393 .name = "fsl-bb-mdio",
394 .bus = &platform_bus_type,
395 .probe = fs_enet_mdio_probe,
396 .remove = fs_enet_mdio_remove,
397};
398
399int fs_enet_mdio_bb_init(void)
400{
401 return driver_register(&fs_enet_bb_mdio_driver);
402}
403
404void fs_enet_mdio_bb_exit(void)
405{
406 driver_unregister(&fs_enet_bb_mdio_driver);
407}
408
diff --git a/drivers/net/fs_enet/mii-fec.c b/drivers/net/fs_enet/mii-fec.c
new file mode 100644
index 000000000000..1328e10caa35
--- /dev/null
+++ b/drivers/net/fs_enet/mii-fec.c
@@ -0,0 +1,243 @@
1/*
2 * Combined Ethernet driver for Motorola MPC8xx and MPC82xx.
3 *
4 * Copyright (c) 2003 Intracom S.A.
5 * by Pantelis Antoniou <panto@intracom.gr>
6 *
7 * 2005 (c) MontaVista Software, Inc.
8 * Vitaly Bordug <vbordug@ru.mvista.com>
9 *
10 * This file is licensed under the terms of the GNU General Public License
11 * version 2. This program is licensed "as is" without any warranty of any
12 * kind, whether express or implied.
13 */
14
15
16#include <linux/config.h>
17#include <linux/module.h>
18#include <linux/types.h>
19#include <linux/kernel.h>
20#include <linux/sched.h>
21#include <linux/string.h>
22#include <linux/ptrace.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/interrupt.h>
27#include <linux/pci.h>
28#include <linux/init.h>
29#include <linux/delay.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/spinlock.h>
34#include <linux/mii.h>
35#include <linux/ethtool.h>
36#include <linux/bitops.h>
37#include <linux/platform_device.h>
38
39#include <asm/pgtable.h>
40#include <asm/irq.h>
41#include <asm/uaccess.h>
42
43#include "fs_enet.h"
44#include "fec.h"
45
46/* Make MII read/write commands for the FEC.
47*/
48#define mk_mii_read(REG) (0x60020000 | ((REG & 0x1f) << 18))
49#define mk_mii_write(REG, VAL) (0x50020000 | ((REG & 0x1f) << 18) | (VAL & 0xffff))
50#define mk_mii_end 0
51
52#define FEC_MII_LOOPS 10000
53
54static int match_has_phy (struct device *dev, void* data)
55{
56 struct platform_device* pdev = container_of(dev, struct platform_device, dev);
57 struct fs_platform_info* fpi;
58 if(strcmp(pdev->name, (char*)data))
59 {
60 return 0;
61 }
62
63 fpi = pdev->dev.platform_data;
64 if((fpi)&&(fpi->has_phy))
65 return 1;
66 return 0;
67}
68
69static int fs_mii_fec_init(struct fec_info* fec, struct fs_mii_fec_platform_info *fmpi)
70{
71 struct resource *r;
72 fec_t *fecp;
73 char* name = "fsl-cpm-fec";
74
75 /* we need fec in order to be useful */
76 struct platform_device *fec_pdev =
77 container_of(bus_find_device(&platform_bus_type, NULL, name, match_has_phy),
78 struct platform_device, dev);
79
80 if(fec_pdev == NULL) {
81 printk(KERN_ERR"Unable to find PHY for %s", name);
82 return -ENODEV;
83 }
84
85 r = platform_get_resource_byname(fec_pdev, IORESOURCE_MEM, "regs");
86
87 fec->fecp = fecp = (fec_t*)ioremap(r->start,sizeof(fec_t));
88 fec->mii_speed = fmpi->mii_speed;
89
90 setbits32(&fecp->fec_r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */
91 setbits32(&fecp->fec_ecntrl, FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN);
92 out_be32(&fecp->fec_ievent, FEC_ENET_MII);
93 out_be32(&fecp->fec_mii_speed, fec->mii_speed);
94
95 return 0;
96}
97
98static int fs_enet_fec_mii_read(struct mii_bus *bus , int phy_id, int location)
99{
100 struct fec_info* fec = bus->priv;
101 fec_t *fecp = fec->fecp;
102 int i, ret = -1;
103
104 if ((in_be32(&fecp->fec_r_cntrl) & FEC_RCNTRL_MII_MODE) == 0)
105 BUG();
106
107 /* Add PHY address to register command. */
108 out_be32(&fecp->fec_mii_data, (phy_id << 23) | mk_mii_read(location));
109
110 for (i = 0; i < FEC_MII_LOOPS; i++)
111 if ((in_be32(&fecp->fec_ievent) & FEC_ENET_MII) != 0)
112 break;
113
114 if (i < FEC_MII_LOOPS) {
115 out_be32(&fecp->fec_ievent, FEC_ENET_MII);
116 ret = in_be32(&fecp->fec_mii_data) & 0xffff;
117 }
118
119 return ret;
120
121}
122
123static int fs_enet_fec_mii_write(struct mii_bus *bus, int phy_id, int location, u16 val)
124{
125 struct fec_info* fec = bus->priv;
126 fec_t *fecp = fec->fecp;
127 int i;
128
129 /* this must never happen */
130 if ((in_be32(&fecp->fec_r_cntrl) & FEC_RCNTRL_MII_MODE) == 0)
131 BUG();
132
133 /* Add PHY address to register command. */
134 out_be32(&fecp->fec_mii_data, (phy_id << 23) | mk_mii_write(location, val));
135
136 for (i = 0; i < FEC_MII_LOOPS; i++)
137 if ((in_be32(&fecp->fec_ievent) & FEC_ENET_MII) != 0)
138 break;
139
140 if (i < FEC_MII_LOOPS)
141 out_be32(&fecp->fec_ievent, FEC_ENET_MII);
142
143 return 0;
144
145}
146
147static int fs_enet_fec_mii_reset(struct mii_bus *bus)
148{
149 /* nothing here - for now */
150 return 0;
151}
152
153static int __devinit fs_enet_fec_mdio_probe(struct device *dev)
154{
155 struct platform_device *pdev = to_platform_device(dev);
156 struct fs_mii_fec_platform_info *pdata;
157 struct mii_bus *new_bus;
158 struct fec_info *fec;
159 int err = 0;
160 if (NULL == dev)
161 return -EINVAL;
162 new_bus = kzalloc(sizeof(struct mii_bus), GFP_KERNEL);
163
164 if (NULL == new_bus)
165 return -ENOMEM;
166
167 fec = kzalloc(sizeof(struct fec_info), GFP_KERNEL);
168
169 if (NULL == fec)
170 return -ENOMEM;
171
172 new_bus->name = "FEC MII Bus",
173 new_bus->read = &fs_enet_fec_mii_read,
174 new_bus->write = &fs_enet_fec_mii_write,
175 new_bus->reset = &fs_enet_fec_mii_reset,
176 new_bus->id = pdev->id;
177
178 pdata = (struct fs_mii_fec_platform_info *)pdev->dev.platform_data;
179
180 if (NULL == pdata) {
181 printk(KERN_ERR "fs_enet FEC mdio %d: Missing platform data!\n", pdev->id);
182 return -ENODEV;
183 }
184
185 /*set up workspace*/
186
187 fs_mii_fec_init(fec, pdata);
188 new_bus->priv = fec;
189
190 new_bus->irq = pdata->irq;
191
192 new_bus->dev = dev;
193 dev_set_drvdata(dev, new_bus);
194
195 err = mdiobus_register(new_bus);
196
197 if (0 != err) {
198 printk (KERN_ERR "%s: Cannot register as MDIO bus\n",
199 new_bus->name);
200 goto bus_register_fail;
201 }
202
203 return 0;
204
205bus_register_fail:
206 kfree(new_bus);
207
208 return err;
209}
210
211
212static int fs_enet_fec_mdio_remove(struct device *dev)
213{
214 struct mii_bus *bus = dev_get_drvdata(dev);
215
216 mdiobus_unregister(bus);
217
218 dev_set_drvdata(dev, NULL);
219 kfree(bus->priv);
220
221 bus->priv = NULL;
222 kfree(bus);
223
224 return 0;
225}
226
227static struct device_driver fs_enet_fec_mdio_driver = {
228 .name = "fsl-cpm-fec-mdio",
229 .bus = &platform_bus_type,
230 .probe = fs_enet_fec_mdio_probe,
231 .remove = fs_enet_fec_mdio_remove,
232};
233
234int fs_enet_mdio_fec_init(void)
235{
236 return driver_register(&fs_enet_fec_mdio_driver);
237}
238
239void fs_enet_mdio_fec_exit(void)
240{
241 driver_unregister(&fs_enet_fec_mdio_driver);
242}
243
diff --git a/drivers/net/fs_enet/mii-fixed.c b/drivers/net/fs_enet/mii-fixed.c
deleted file mode 100644
index ae4a9c3bb393..000000000000
--- a/drivers/net/fs_enet/mii-fixed.c
+++ /dev/null
@@ -1,91 +0,0 @@
1/*
2 * Combined Ethernet driver for Motorola MPC8xx and MPC82xx.
3 *
4 * Copyright (c) 2003 Intracom S.A.
5 * by Pantelis Antoniou <panto@intracom.gr>
6 *
7 * 2005 (c) MontaVista Software, Inc.
8 * Vitaly Bordug <vbordug@ru.mvista.com>
9 *
10 * This file is licensed under the terms of the GNU General Public License
11 * version 2. This program is licensed "as is" without any warranty of any
12 * kind, whether express or implied.
13 */
14
15
16#include <linux/module.h>
17#include <linux/types.h>
18#include <linux/kernel.h>
19#include <linux/sched.h>
20#include <linux/string.h>
21#include <linux/ptrace.h>
22#include <linux/errno.h>
23#include <linux/ioport.h>
24#include <linux/slab.h>
25#include <linux/interrupt.h>
26#include <linux/pci.h>
27#include <linux/init.h>
28#include <linux/delay.h>
29#include <linux/netdevice.h>
30#include <linux/etherdevice.h>
31#include <linux/skbuff.h>
32#include <linux/spinlock.h>
33#include <linux/mii.h>
34#include <linux/ethtool.h>
35#include <linux/bitops.h>
36
37#include <asm/pgtable.h>
38#include <asm/irq.h>
39#include <asm/uaccess.h>
40
41#include "fs_enet.h"
42
43static const u16 mii_regs[7] = {
44 0x3100,
45 0x786d,
46 0x0fff,
47 0x0fff,
48 0x01e1,
49 0x45e1,
50 0x0003,
51};
52
53static int mii_read(struct fs_enet_mii_bus *bus, int phy_id, int location)
54{
55 int ret = 0;
56
57 if ((unsigned int)location >= ARRAY_SIZE(mii_regs))
58 return -1;
59
60 if (location != 5)
61 ret = mii_regs[location];
62 else
63 ret = bus->fixed.lpa;
64
65 return ret;
66}
67
68static void mii_write(struct fs_enet_mii_bus *bus, int phy_id, int location, int val)
69{
70 /* do nothing */
71}
72
73int fs_mii_fixed_init(struct fs_enet_mii_bus *bus)
74{
75 const struct fs_mii_bus_info *bi = bus->bus_info;
76
77 bus->fixed.lpa = 0x45e1; /* default 100Mb, full duplex */
78
79 /* if speed is fixed at 10Mb, remove 100Mb modes */
80 if (bi->i.fixed.speed == 10)
81 bus->fixed.lpa &= ~LPA_100;
82
83 /* if duplex is half, remove full duplex modes */
84 if (bi->i.fixed.duplex == 0)
85 bus->fixed.lpa &= ~LPA_DUPLEX;
86
87 bus->mii_read = mii_read;
88 bus->mii_write = mii_write;
89
90 return 0;
91}
diff --git a/drivers/net/lance.c b/drivers/net/lance.c
index c1c3452c90ca..5b4dbfe5fb77 100644
--- a/drivers/net/lance.c
+++ b/drivers/net/lance.c
@@ -326,7 +326,7 @@ MODULE_PARM_DESC(dma, "LANCE/PCnet ISA DMA channel (ignored for some devices)");
326MODULE_PARM_DESC(irq, "LANCE/PCnet IRQ number (ignored for some devices)"); 326MODULE_PARM_DESC(irq, "LANCE/PCnet IRQ number (ignored for some devices)");
327MODULE_PARM_DESC(lance_debug, "LANCE/PCnet debug level (0-7)"); 327MODULE_PARM_DESC(lance_debug, "LANCE/PCnet debug level (0-7)");
328 328
329int init_module(void) 329int __init init_module(void)
330{ 330{
331 struct net_device *dev; 331 struct net_device *dev;
332 int this_dev, found = 0; 332 int this_dev, found = 0;
diff --git a/drivers/net/lne390.c b/drivers/net/lne390.c
index 646e89fc3562..c0ec7f6abcb2 100644
--- a/drivers/net/lne390.c
+++ b/drivers/net/lne390.c
@@ -406,7 +406,7 @@ MODULE_PARM_DESC(mem, "memory base address(es)");
406MODULE_DESCRIPTION("Mylex LNE390A/B EISA Ethernet driver"); 406MODULE_DESCRIPTION("Mylex LNE390A/B EISA Ethernet driver");
407MODULE_LICENSE("GPL"); 407MODULE_LICENSE("GPL");
408 408
409int init_module(void) 409int __init init_module(void)
410{ 410{
411 struct net_device *dev; 411 struct net_device *dev;
412 int this_dev, found = 0; 412 int this_dev, found = 0;
diff --git a/drivers/net/ni52.c b/drivers/net/ni52.c
index fa854c8fde75..4d52ecf8af56 100644
--- a/drivers/net/ni52.c
+++ b/drivers/net/ni52.c
@@ -1323,7 +1323,7 @@ MODULE_PARM_DESC(irq, "NI5210 IRQ number,required");
1323MODULE_PARM_DESC(memstart, "NI5210 memory base address,required"); 1323MODULE_PARM_DESC(memstart, "NI5210 memory base address,required");
1324MODULE_PARM_DESC(memend, "NI5210 memory end address,required"); 1324MODULE_PARM_DESC(memend, "NI5210 memory end address,required");
1325 1325
1326int init_module(void) 1326int __init init_module(void)
1327{ 1327{
1328 if(io <= 0x0 || !memend || !memstart || irq < 2) { 1328 if(io <= 0x0 || !memend || !memstart || irq < 2) {
1329 printk("ni52: Autoprobing not allowed for modules.\nni52: Set symbols 'io' 'irq' 'memstart' and 'memend'\n"); 1329 printk("ni52: Autoprobing not allowed for modules.\nni52: Set symbols 'io' 'irq' 'memstart' and 'memend'\n");
diff --git a/drivers/net/ni65.c b/drivers/net/ni65.c
index bb42ff218484..810cc572f5f7 100644
--- a/drivers/net/ni65.c
+++ b/drivers/net/ni65.c
@@ -1253,7 +1253,7 @@ MODULE_PARM_DESC(irq, "ni6510 IRQ number (ignored for some cards)");
1253MODULE_PARM_DESC(io, "ni6510 I/O base address"); 1253MODULE_PARM_DESC(io, "ni6510 I/O base address");
1254MODULE_PARM_DESC(dma, "ni6510 ISA DMA channel (ignored for some cards)"); 1254MODULE_PARM_DESC(dma, "ni6510 ISA DMA channel (ignored for some cards)");
1255 1255
1256int init_module(void) 1256int __init init_module(void)
1257{ 1257{
1258 dev_ni65 = ni65_probe(-1); 1258 dev_ni65 = ni65_probe(-1);
1259 return IS_ERR(dev_ni65) ? PTR_ERR(dev_ni65) : 0; 1259 return IS_ERR(dev_ni65) ? PTR_ERR(dev_ni65) : 0;
diff --git a/drivers/net/pcmcia/xirc2ps_cs.c b/drivers/net/pcmcia/xirc2ps_cs.c
index 9bae77ce1314..4122bb46f5ff 100644
--- a/drivers/net/pcmcia/xirc2ps_cs.c
+++ b/drivers/net/pcmcia/xirc2ps_cs.c
@@ -345,6 +345,7 @@ typedef struct local_info_t {
345 void __iomem *dingo_ccr; /* only used for CEM56 cards */ 345 void __iomem *dingo_ccr; /* only used for CEM56 cards */
346 unsigned last_ptr_value; /* last packets transmitted value */ 346 unsigned last_ptr_value; /* last packets transmitted value */
347 const char *manf_str; 347 const char *manf_str;
348 struct work_struct tx_timeout_task;
348} local_info_t; 349} local_info_t;
349 350
350/**************** 351/****************
@@ -352,6 +353,7 @@ typedef struct local_info_t {
352 */ 353 */
353static int do_start_xmit(struct sk_buff *skb, struct net_device *dev); 354static int do_start_xmit(struct sk_buff *skb, struct net_device *dev);
354static void do_tx_timeout(struct net_device *dev); 355static void do_tx_timeout(struct net_device *dev);
356static void xirc2ps_tx_timeout_task(void *data);
355static struct net_device_stats *do_get_stats(struct net_device *dev); 357static struct net_device_stats *do_get_stats(struct net_device *dev);
356static void set_addresses(struct net_device *dev); 358static void set_addresses(struct net_device *dev);
357static void set_multicast_list(struct net_device *dev); 359static void set_multicast_list(struct net_device *dev);
@@ -589,6 +591,7 @@ xirc2ps_probe(struct pcmcia_device *link)
589#ifdef HAVE_TX_TIMEOUT 591#ifdef HAVE_TX_TIMEOUT
590 dev->tx_timeout = do_tx_timeout; 592 dev->tx_timeout = do_tx_timeout;
591 dev->watchdog_timeo = TX_TIMEOUT; 593 dev->watchdog_timeo = TX_TIMEOUT;
594 INIT_WORK(&local->tx_timeout_task, xirc2ps_tx_timeout_task, dev);
592#endif 595#endif
593 596
594 return xirc2ps_config(link); 597 return xirc2ps_config(link);
@@ -1341,17 +1344,24 @@ xirc2ps_interrupt(int irq, void *dev_id, struct pt_regs *regs)
1341/*====================================================================*/ 1344/*====================================================================*/
1342 1345
1343static void 1346static void
1344do_tx_timeout(struct net_device *dev) 1347xirc2ps_tx_timeout_task(void *data)
1345{ 1348{
1346 local_info_t *lp = netdev_priv(dev); 1349 struct net_device *dev = data;
1347 printk(KERN_NOTICE "%s: transmit timed out\n", dev->name);
1348 lp->stats.tx_errors++;
1349 /* reset the card */ 1350 /* reset the card */
1350 do_reset(dev,1); 1351 do_reset(dev,1);
1351 dev->trans_start = jiffies; 1352 dev->trans_start = jiffies;
1352 netif_wake_queue(dev); 1353 netif_wake_queue(dev);
1353} 1354}
1354 1355
1356static void
1357do_tx_timeout(struct net_device *dev)
1358{
1359 local_info_t *lp = netdev_priv(dev);
1360 lp->stats.tx_errors++;
1361 printk(KERN_NOTICE "%s: transmit timed out\n", dev->name);
1362 schedule_work(&lp->tx_timeout_task);
1363}
1364
1355static int 1365static int
1356do_start_xmit(struct sk_buff *skb, struct net_device *dev) 1366do_start_xmit(struct sk_buff *skb, struct net_device *dev)
1357{ 1367{
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
index 4daafe303358..d50bcb89dd28 100644
--- a/drivers/net/pcnet32.c
+++ b/drivers/net/pcnet32.c
@@ -202,6 +202,8 @@ static int homepna[MAX_UNITS];
202#define CSR15 15 202#define CSR15 15
203#define PCNET32_MC_FILTER 8 203#define PCNET32_MC_FILTER 8
204 204
205#define PCNET32_79C970A 0x2621
206
205/* The PCNET32 Rx and Tx ring descriptors. */ 207/* The PCNET32 Rx and Tx ring descriptors. */
206struct pcnet32_rx_head { 208struct pcnet32_rx_head {
207 u32 base; 209 u32 base;
@@ -289,6 +291,7 @@ struct pcnet32_private {
289 291
290 /* each bit indicates an available PHY */ 292 /* each bit indicates an available PHY */
291 u32 phymask; 293 u32 phymask;
294 unsigned short chip_version; /* which variant this is */
292}; 295};
293 296
294static int pcnet32_probe_pci(struct pci_dev *, const struct pci_device_id *); 297static int pcnet32_probe_pci(struct pci_dev *, const struct pci_device_id *);
@@ -724,9 +727,11 @@ static u32 pcnet32_get_link(struct net_device *dev)
724 spin_lock_irqsave(&lp->lock, flags); 727 spin_lock_irqsave(&lp->lock, flags);
725 if (lp->mii) { 728 if (lp->mii) {
726 r = mii_link_ok(&lp->mii_if); 729 r = mii_link_ok(&lp->mii_if);
727 } else { 730 } else if (lp->chip_version >= PCNET32_79C970A) {
728 ulong ioaddr = dev->base_addr; /* card base I/O address */ 731 ulong ioaddr = dev->base_addr; /* card base I/O address */
729 r = (lp->a.read_bcr(ioaddr, 4) != 0xc0); 732 r = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
733 } else { /* can not detect link on really old chips */
734 r = 1;
730 } 735 }
731 spin_unlock_irqrestore(&lp->lock, flags); 736 spin_unlock_irqrestore(&lp->lock, flags);
732 737
@@ -1091,6 +1096,10 @@ static int pcnet32_suspend(struct net_device *dev, unsigned long *flags,
1091 ulong ioaddr = dev->base_addr; 1096 ulong ioaddr = dev->base_addr;
1092 int ticks; 1097 int ticks;
1093 1098
1099 /* really old chips have to be stopped. */
1100 if (lp->chip_version < PCNET32_79C970A)
1101 return 0;
1102
1094 /* set SUSPEND (SPND) - CSR5 bit 0 */ 1103 /* set SUSPEND (SPND) - CSR5 bit 0 */
1095 csr5 = a->read_csr(ioaddr, CSR5); 1104 csr5 = a->read_csr(ioaddr, CSR5);
1096 a->write_csr(ioaddr, CSR5, csr5 | CSR5_SUSPEND); 1105 a->write_csr(ioaddr, CSR5, csr5 | CSR5_SUSPEND);
@@ -1529,6 +1538,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
1529 lp->mii_if.reg_num_mask = 0x1f; 1538 lp->mii_if.reg_num_mask = 0x1f;
1530 lp->dxsuflo = dxsuflo; 1539 lp->dxsuflo = dxsuflo;
1531 lp->mii = mii; 1540 lp->mii = mii;
1541 lp->chip_version = chip_version;
1532 lp->msg_enable = pcnet32_debug; 1542 lp->msg_enable = pcnet32_debug;
1533 if ((cards_found >= MAX_UNITS) 1543 if ((cards_found >= MAX_UNITS)
1534 || (options[cards_found] > sizeof(options_mapping))) 1544 || (options[cards_found] > sizeof(options_mapping)))
@@ -1839,10 +1849,7 @@ static int pcnet32_open(struct net_device *dev)
1839 val |= 2; 1849 val |= 2;
1840 } else if (lp->options & PCNET32_PORT_ASEL) { 1850 } else if (lp->options & PCNET32_PORT_ASEL) {
1841 /* workaround of xSeries250, turn on for 79C975 only */ 1851 /* workaround of xSeries250, turn on for 79C975 only */
1842 i = ((lp->a.read_csr(ioaddr, 88) | 1852 if (lp->chip_version == 0x2627)
1843 (lp->a.
1844 read_csr(ioaddr, 89) << 16)) >> 12) & 0xffff;
1845 if (i == 0x2627)
1846 val |= 3; 1853 val |= 3;
1847 } 1854 }
1848 lp->a.write_bcr(ioaddr, 9, val); 1855 lp->a.write_bcr(ioaddr, 9, val);
@@ -1986,9 +1993,11 @@ static int pcnet32_open(struct net_device *dev)
1986 1993
1987 netif_start_queue(dev); 1994 netif_start_queue(dev);
1988 1995
1989 /* Print the link status and start the watchdog */ 1996 if (lp->chip_version >= PCNET32_79C970A) {
1990 pcnet32_check_media(dev, 1); 1997 /* Print the link status and start the watchdog */
1991 mod_timer(&(lp->watchdog_timer), PCNET32_WATCHDOG_TIMEOUT); 1998 pcnet32_check_media(dev, 1);
1999 mod_timer(&(lp->watchdog_timer), PCNET32_WATCHDOG_TIMEOUT);
2000 }
1992 2001
1993 i = 0; 2002 i = 0;
1994 while (i++ < 100) 2003 while (i++ < 100)
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 2ba6d3a40e2e..b79ec0d7480f 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -56,5 +56,22 @@ config SMSC_PHY
56 ---help--- 56 ---help---
57 Currently supports the LAN83C185 PHY 57 Currently supports the LAN83C185 PHY
58 58
59config FIXED_PHY
60 tristate "Drivers for PHY emulation on fixed speed/link"
61 depends on PHYLIB
62 ---help---
63 Adds the driver to PHY layer to cover the boards that do not have any PHY bound,
64 but with the ability to manipulate with speed/link in software. The relavant MII
65 speed/duplex parameters could be effectively handled in user-specified fuction.
66 Currently tested with mpc866ads.
67
68config FIXED_MII_10_FDX
69 bool "Emulation for 10M Fdx fixed PHY behavior"
70 depends on FIXED_PHY
71
72config FIXED_MII_100_FDX
73 bool "Emulation for 100M Fdx fixed PHY behavior"
74 depends on FIXED_PHY
75
59endmenu 76endmenu
60 77
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index a00e61942525..320f8323123f 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -10,3 +10,4 @@ obj-$(CONFIG_LXT_PHY) += lxt.o
10obj-$(CONFIG_QSEMI_PHY) += qsemi.o 10obj-$(CONFIG_QSEMI_PHY) += qsemi.o
11obj-$(CONFIG_SMSC_PHY) += smsc.o 11obj-$(CONFIG_SMSC_PHY) += smsc.o
12obj-$(CONFIG_VITESSE_PHY) += vitesse.o 12obj-$(CONFIG_VITESSE_PHY) += vitesse.o
13obj-$(CONFIG_FIXED_PHY) += fixed.o
diff --git a/drivers/net/phy/fixed.c b/drivers/net/phy/fixed.c
new file mode 100644
index 000000000000..341036df4710
--- /dev/null
+++ b/drivers/net/phy/fixed.c
@@ -0,0 +1,358 @@
1/*
2 * drivers/net/phy/fixed.c
3 *
4 * Driver for fixed PHYs, when transceiver is able to operate in one fixed mode.
5 *
6 * Author: Vitaly Bordug
7 *
8 * Copyright (c) 2006 MontaVista Software, Inc.
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
14 *
15 */
16#include <linux/config.h>
17#include <linux/kernel.h>
18#include <linux/sched.h>
19#include <linux/string.h>
20#include <linux/errno.h>
21#include <linux/unistd.h>
22#include <linux/slab.h>
23#include <linux/interrupt.h>
24#include <linux/init.h>
25#include <linux/delay.h>
26#include <linux/netdevice.h>
27#include <linux/etherdevice.h>
28#include <linux/skbuff.h>
29#include <linux/spinlock.h>
30#include <linux/mm.h>
31#include <linux/module.h>
32#include <linux/mii.h>
33#include <linux/ethtool.h>
34#include <linux/phy.h>
35
36#include <asm/io.h>
37#include <asm/irq.h>
38#include <asm/uaccess.h>
39
40#define MII_REGS_NUM 7
41
42/*
43 The idea is to emulate normal phy behavior by responding with
44 pre-defined values to mii BMCR read, so that read_status hook could
45 take all the needed info.
46*/
47
48struct fixed_phy_status {
49 u8 link;
50 u16 speed;
51 u8 duplex;
52};
53
54/*-----------------------------------------------------------------------------
55 * Private information hoder for mii_bus
56 *-----------------------------------------------------------------------------*/
57struct fixed_info {
58 u16 *regs;
59 u8 regs_num;
60 struct fixed_phy_status phy_status;
61 struct phy_device *phydev; /* pointer to the container */
62 /* link & speed cb */
63 int(*link_update)(struct net_device*, struct fixed_phy_status*);
64
65};
66
67/*-----------------------------------------------------------------------------
68 * If something weird is required to be done with link/speed,
69 * network driver is able to assign a function to implement this.
70 * May be useful for PHY's that need to be software-driven.
71 *-----------------------------------------------------------------------------*/
72int fixed_mdio_set_link_update(struct phy_device* phydev,
73 int(*link_update)(struct net_device*, struct fixed_phy_status*))
74{
75 struct fixed_info *fixed;
76
77 if(link_update == NULL)
78 return -EINVAL;
79
80 if(phydev) {
81 if(phydev->bus) {
82 fixed = phydev->bus->priv;
83 fixed->link_update = link_update;
84 return 0;
85 }
86 }
87 return -EINVAL;
88}
89EXPORT_SYMBOL(fixed_mdio_set_link_update);
90
91/*-----------------------------------------------------------------------------
92 * This is used for updating internal mii regs from the status
93 *-----------------------------------------------------------------------------*/
94static int fixed_mdio_update_regs(struct fixed_info *fixed)
95{
96 u16 *regs = fixed->regs;
97 u16 bmsr = 0;
98 u16 bmcr = 0;
99
100 if(!regs) {
101 printk(KERN_ERR "%s: regs not set up", __FUNCTION__);
102 return -EINVAL;
103 }
104
105 if(fixed->phy_status.link)
106 bmsr |= BMSR_LSTATUS;
107
108 if(fixed->phy_status.duplex) {
109 bmcr |= BMCR_FULLDPLX;
110
111 switch ( fixed->phy_status.speed ) {
112 case 100:
113 bmsr |= BMSR_100FULL;
114 bmcr |= BMCR_SPEED100;
115 break;
116
117 case 10:
118 bmsr |= BMSR_10FULL;
119 break;
120 }
121 } else {
122 switch ( fixed->phy_status.speed ) {
123 case 100:
124 bmsr |= BMSR_100HALF;
125 bmcr |= BMCR_SPEED100;
126 break;
127
128 case 10:
129 bmsr |= BMSR_100HALF;
130 break;
131 }
132 }
133
134 regs[MII_BMCR] = bmcr;
135 regs[MII_BMSR] = bmsr | 0x800; /*we are always capable of 10 hdx*/
136
137 return 0;
138}
139
140static int fixed_mii_read(struct mii_bus *bus, int phy_id, int location)
141{
142 struct fixed_info *fixed = bus->priv;
143
144 /* if user has registered link update callback, use it */
145 if(fixed->phydev)
146 if(fixed->phydev->attached_dev) {
147 if(fixed->link_update) {
148 fixed->link_update(fixed->phydev->attached_dev,
149 &fixed->phy_status);
150 fixed_mdio_update_regs(fixed);
151 }
152 }
153
154 if ((unsigned int)location >= fixed->regs_num)
155 return -1;
156 return fixed->regs[location];
157}
158
159static int fixed_mii_write(struct mii_bus *bus, int phy_id, int location, u16 val)
160{
161 /* do nothing for now*/
162 return 0;
163}
164
165static int fixed_mii_reset(struct mii_bus *bus)
166{
167 /*nothing here - no way/need to reset it*/
168 return 0;
169}
170
171static int fixed_config_aneg(struct phy_device *phydev)
172{
173 /* :TODO:03/13/2006 09:45:37 PM::
174 The full autoneg funcionality can be emulated,
175 but no need to have anything here for now
176 */
177 return 0;
178}
179
180/*-----------------------------------------------------------------------------
181 * the manual bind will do the magic - with phy_id_mask == 0
182 * match will never return true...
183 *-----------------------------------------------------------------------------*/
184static struct phy_driver fixed_mdio_driver = {
185 .name = "Fixed PHY",
186 .features = PHY_BASIC_FEATURES,
187 .config_aneg = fixed_config_aneg,
188 .read_status = genphy_read_status,
189 .driver = { .owner = THIS_MODULE,},
190};
191
192/*-----------------------------------------------------------------------------
193 * This func is used to create all the necessary stuff, bind
194 * the fixed phy driver and register all it on the mdio_bus_type.
195 * speed is either 10 or 100, duplex is boolean.
196 * number is used to create multiple fixed PHYs, so that several devices can
197 * utilize them simultaneously.
198 *-----------------------------------------------------------------------------*/
199static int fixed_mdio_register_device(int number, int speed, int duplex)
200{
201 struct mii_bus *new_bus;
202 struct fixed_info *fixed;
203 struct phy_device *phydev;
204 int err = 0;
205
206 struct device* dev = kzalloc(sizeof(struct device), GFP_KERNEL);
207
208 if (NULL == dev)
209 return -ENOMEM;
210
211 new_bus = kzalloc(sizeof(struct mii_bus), GFP_KERNEL);
212
213 if (NULL == new_bus) {
214 kfree(dev);
215 return -ENOMEM;
216 }
217 fixed = kzalloc(sizeof(struct fixed_info), GFP_KERNEL);
218
219 if (NULL == fixed) {
220 kfree(dev);
221 kfree(new_bus);
222 return -ENOMEM;
223 }
224
225 fixed->regs = kzalloc(MII_REGS_NUM*sizeof(int), GFP_KERNEL);
226 fixed->regs_num = MII_REGS_NUM;
227 fixed->phy_status.speed = speed;
228 fixed->phy_status.duplex = duplex;
229 fixed->phy_status.link = 1;
230
231 new_bus->name = "Fixed MII Bus",
232 new_bus->read = &fixed_mii_read,
233 new_bus->write = &fixed_mii_write,
234 new_bus->reset = &fixed_mii_reset,
235
236 /*set up workspace*/
237 fixed_mdio_update_regs(fixed);
238 new_bus->priv = fixed;
239
240 new_bus->dev = dev;
241 dev_set_drvdata(dev, new_bus);
242
243 /* create phy_device and register it on the mdio bus */
244 phydev = phy_device_create(new_bus, 0, 0);
245
246 /*
247 Put the phydev pointer into the fixed pack so that bus read/write code could
248 be able to access for instance attached netdev. Well it doesn't have to do
249 so, only in case of utilizing user-specified link-update...
250 */
251 fixed->phydev = phydev;
252
253 if(NULL == phydev) {
254 err = -ENOMEM;
255 goto device_create_fail;
256 }
257
258 phydev->irq = -1;
259 phydev->dev.bus = &mdio_bus_type;
260
261 if(number)
262 snprintf(phydev->dev.bus_id, BUS_ID_SIZE,
263 "fixed_%d@%d:%d", number, speed, duplex);
264 else
265 snprintf(phydev->dev.bus_id, BUS_ID_SIZE,
266 "fixed@%d:%d", speed, duplex);
267 phydev->bus = new_bus;
268
269 err = device_register(&phydev->dev);
270 if(err) {
271 printk(KERN_ERR "Phy %s failed to register\n",
272 phydev->dev.bus_id);
273 goto bus_register_fail;
274 }
275
276 /*
277 the mdio bus has phy_id match... In order not to do it
278 artificially, we are binding the driver here by hand;
279 it will be the same for all the fixed phys anyway.
280 */
281 down_write(&phydev->dev.bus->subsys.rwsem);
282
283 phydev->dev.driver = &fixed_mdio_driver.driver;
284
285 err = phydev->dev.driver->probe(&phydev->dev);
286 if(err < 0) {
287 printk(KERN_ERR "Phy %s: problems with fixed driver\n",phydev->dev.bus_id);
288 up_write(&phydev->dev.bus->subsys.rwsem);
289 goto probe_fail;
290 }
291
292 device_bind_driver(&phydev->dev);
293 up_write(&phydev->dev.bus->subsys.rwsem);
294
295 return 0;
296
297probe_fail:
298 device_unregister(&phydev->dev);
299bus_register_fail:
300 kfree(phydev);
301device_create_fail:
302 kfree(dev);
303 kfree(new_bus);
304 kfree(fixed);
305
306 return err;
307}
308
309
310MODULE_DESCRIPTION("Fixed PHY device & driver for PAL");
311MODULE_AUTHOR("Vitaly Bordug");
312MODULE_LICENSE("GPL");
313
314static int __init fixed_init(void)
315{
316 int ret;
317 int duplex = 0;
318
319 /* register on the bus... Not expected to be matched with anything there... */
320 phy_driver_register(&fixed_mdio_driver);
321
322 /* So let the fun begin...
323 We will create several mdio devices here, and will bound the upper
324 driver to them.
325
326 Then the external software can lookup the phy bus by searching
327 fixed@speed:duplex, e.g. fixed@100:1, to be connected to the
328 virtual 100M Fdx phy.
329
330 In case several virtual PHYs required, the bus_id will be in form
331 fixed_<num>@<speed>:<duplex>, which make it able even to define
332 driver-specific link control callback, if for instance PHY is completely
333 SW-driven.
334
335 */
336
337#ifdef CONFIG_FIXED_MII_DUPLEX
338 duplex = 1;
339#endif
340
341#ifdef CONFIG_FIXED_MII_100_FDX
342 fixed_mdio_register_device(0, 100, 1);
343#endif
344
345#ifdef CONFIX_FIXED_MII_10_FDX
346 fixed_mdio_register_device(0, 10, 1);
347#endif
348 return 0;
349}
350
351static void __exit fixed_exit(void)
352{
353 phy_driver_unregister(&fixed_mdio_driver);
354 /* :WARNING:02/18/2006 04:32:40 AM:: Cleanup all the created stuff */
355}
356
357module_init(fixed_init);
358module_exit(fixed_exit);
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 1dde390c164d..cf6660c93ffa 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -159,6 +159,7 @@ struct bus_type mdio_bus_type = {
159 .suspend = mdio_bus_suspend, 159 .suspend = mdio_bus_suspend,
160 .resume = mdio_bus_resume, 160 .resume = mdio_bus_resume,
161}; 161};
162EXPORT_SYMBOL(mdio_bus_type);
162 163
163int __init mdio_bus_init(void) 164int __init mdio_bus_init(void)
164{ 165{
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 1bc1e032c5d6..2d1ecfdc80db 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -45,6 +45,35 @@ static struct phy_driver genphy_driver;
45extern int mdio_bus_init(void); 45extern int mdio_bus_init(void);
46extern void mdio_bus_exit(void); 46extern void mdio_bus_exit(void);
47 47
48struct phy_device* phy_device_create(struct mii_bus *bus, int addr, int phy_id)
49{
50 struct phy_device *dev;
51 /* We allocate the device, and initialize the
52 * default values */
53 dev = kcalloc(1, sizeof(*dev), GFP_KERNEL);
54
55 if (NULL == dev)
56 return (struct phy_device*) PTR_ERR((void*)-ENOMEM);
57
58 dev->speed = 0;
59 dev->duplex = -1;
60 dev->pause = dev->asym_pause = 0;
61 dev->link = 1;
62
63 dev->autoneg = AUTONEG_ENABLE;
64
65 dev->addr = addr;
66 dev->phy_id = phy_id;
67 dev->bus = bus;
68
69 dev->state = PHY_DOWN;
70
71 spin_lock_init(&dev->lock);
72
73 return dev;
74}
75EXPORT_SYMBOL(phy_device_create);
76
48/* get_phy_device 77/* get_phy_device
49 * 78 *
50 * description: Reads the ID registers of the PHY at addr on the 79 * description: Reads the ID registers of the PHY at addr on the
@@ -78,27 +107,7 @@ struct phy_device * get_phy_device(struct mii_bus *bus, int addr)
78 if (0xffffffff == phy_id) 107 if (0xffffffff == phy_id)
79 return NULL; 108 return NULL;
80 109
81 /* Otherwise, we allocate the device, and initialize the 110 dev = phy_device_create(bus, addr, phy_id);
82 * default values */
83 dev = kcalloc(1, sizeof(*dev), GFP_KERNEL);
84
85 if (NULL == dev)
86 return ERR_PTR(-ENOMEM);
87
88 dev->speed = 0;
89 dev->duplex = -1;
90 dev->pause = dev->asym_pause = 0;
91 dev->link = 1;
92
93 dev->autoneg = AUTONEG_ENABLE;
94
95 dev->addr = addr;
96 dev->phy_id = phy_id;
97 dev->bus = bus;
98
99 dev->state = PHY_DOWN;
100
101 spin_lock_init(&dev->lock);
102 111
103 return dev; 112 return dev;
104} 113}
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index 132ed32bce1a..e72e0e099060 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -71,6 +71,7 @@
71#include <asm/uaccess.h> 71#include <asm/uaccess.h>
72#include <asm/io.h> 72#include <asm/io.h>
73#include <asm/div64.h> 73#include <asm/div64.h>
74#include <asm/irq.h>
74 75
75/* local include */ 76/* local include */
76#include "s2io.h" 77#include "s2io.h"
diff --git a/drivers/net/seeq8005.c b/drivers/net/seeq8005.c
index efd0f235020f..01392bca0223 100644
--- a/drivers/net/seeq8005.c
+++ b/drivers/net/seeq8005.c
@@ -742,7 +742,7 @@ module_param(irq, int, 0);
742MODULE_PARM_DESC(io, "SEEQ 8005 I/O base address"); 742MODULE_PARM_DESC(io, "SEEQ 8005 I/O base address");
743MODULE_PARM_DESC(irq, "SEEQ 8005 IRQ number"); 743MODULE_PARM_DESC(irq, "SEEQ 8005 IRQ number");
744 744
745int init_module(void) 745int __init init_module(void)
746{ 746{
747 dev_seeq = seeq8005_probe(-1); 747 dev_seeq = seeq8005_probe(-1);
748 if (IS_ERR(dev_seeq)) 748 if (IS_ERR(dev_seeq))
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index 7de9a07b2ac2..ad878dfddef4 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -2211,6 +2211,7 @@ static int skge_up(struct net_device *dev)
2211 skge_write8(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_START | CSR_IRQ_CL_F); 2211 skge_write8(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_START | CSR_IRQ_CL_F);
2212 skge_led(skge, LED_MODE_ON); 2212 skge_led(skge, LED_MODE_ON);
2213 2213
2214 netif_poll_enable(dev);
2214 return 0; 2215 return 0;
2215 2216
2216 free_rx_ring: 2217 free_rx_ring:
@@ -2279,6 +2280,7 @@ static int skge_down(struct net_device *dev)
2279 2280
2280 skge_led(skge, LED_MODE_OFF); 2281 skge_led(skge, LED_MODE_OFF);
2281 2282
2283 netif_poll_disable(dev);
2282 skge_tx_clean(skge); 2284 skge_tx_clean(skge);
2283 skge_rx_clean(skge); 2285 skge_rx_clean(skge);
2284 2286
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index de91609ca112..933e87f1cc68 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -233,6 +233,8 @@ static void sky2_set_power_state(struct sky2_hw *hw, pci_power_t state)
233 if (hw->ports > 1) 233 if (hw->ports > 1)
234 reg1 |= PCI_Y2_PHY2_COMA; 234 reg1 |= PCI_Y2_PHY2_COMA;
235 } 235 }
236 sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
237 udelay(100);
236 238
237 if (hw->chip_id == CHIP_ID_YUKON_EC_U) { 239 if (hw->chip_id == CHIP_ID_YUKON_EC_U) {
238 sky2_pci_write32(hw, PCI_DEV_REG3, 0); 240 sky2_pci_write32(hw, PCI_DEV_REG3, 0);
@@ -242,9 +244,6 @@ static void sky2_set_power_state(struct sky2_hw *hw, pci_power_t state)
242 sky2_pci_write32(hw, PCI_DEV_REG5, 0); 244 sky2_pci_write32(hw, PCI_DEV_REG5, 0);
243 } 245 }
244 246
245 sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
246 udelay(100);
247
248 break; 247 break;
249 248
250 case PCI_D3hot: 249 case PCI_D3hot:
diff --git a/drivers/net/smc911x.c b/drivers/net/smc911x.c
index d37bd860b336..0b15290df278 100644
--- a/drivers/net/smc911x.c
+++ b/drivers/net/smc911x.c
@@ -1092,6 +1092,7 @@ static irqreturn_t smc911x_interrupt(int irq, void *dev_id, struct pt_regs *regs
1092 /* Spurious interrupt check */ 1092 /* Spurious interrupt check */
1093 if ((SMC_GET_IRQ_CFG() & (INT_CFG_IRQ_INT_ | INT_CFG_IRQ_EN_)) != 1093 if ((SMC_GET_IRQ_CFG() & (INT_CFG_IRQ_INT_ | INT_CFG_IRQ_EN_)) !=
1094 (INT_CFG_IRQ_INT_ | INT_CFG_IRQ_EN_)) { 1094 (INT_CFG_IRQ_INT_ | INT_CFG_IRQ_EN_)) {
1095 spin_unlock_irqrestore(&lp->lock, flags);
1095 return IRQ_NONE; 1096 return IRQ_NONE;
1096 } 1097 }
1097 1098
diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c
index 3d8dcb6c8758..cf62373b808b 100644
--- a/drivers/net/smc91x.c
+++ b/drivers/net/smc91x.c
@@ -321,12 +321,12 @@ static void smc_reset(struct net_device *dev)
321 DBG(2, "%s: %s\n", dev->name, __FUNCTION__); 321 DBG(2, "%s: %s\n", dev->name, __FUNCTION__);
322 322
323 /* Disable all interrupts, block TX tasklet */ 323 /* Disable all interrupts, block TX tasklet */
324 spin_lock(&lp->lock); 324 spin_lock_irq(&lp->lock);
325 SMC_SELECT_BANK(2); 325 SMC_SELECT_BANK(2);
326 SMC_SET_INT_MASK(0); 326 SMC_SET_INT_MASK(0);
327 pending_skb = lp->pending_tx_skb; 327 pending_skb = lp->pending_tx_skb;
328 lp->pending_tx_skb = NULL; 328 lp->pending_tx_skb = NULL;
329 spin_unlock(&lp->lock); 329 spin_unlock_irq(&lp->lock);
330 330
331 /* free any pending tx skb */ 331 /* free any pending tx skb */
332 if (pending_skb) { 332 if (pending_skb) {
@@ -448,12 +448,12 @@ static void smc_shutdown(struct net_device *dev)
448 DBG(2, "%s: %s\n", CARDNAME, __FUNCTION__); 448 DBG(2, "%s: %s\n", CARDNAME, __FUNCTION__);
449 449
450 /* no more interrupts for me */ 450 /* no more interrupts for me */
451 spin_lock(&lp->lock); 451 spin_lock_irq(&lp->lock);
452 SMC_SELECT_BANK(2); 452 SMC_SELECT_BANK(2);
453 SMC_SET_INT_MASK(0); 453 SMC_SET_INT_MASK(0);
454 pending_skb = lp->pending_tx_skb; 454 pending_skb = lp->pending_tx_skb;
455 lp->pending_tx_skb = NULL; 455 lp->pending_tx_skb = NULL;
456 spin_unlock(&lp->lock); 456 spin_unlock_irq(&lp->lock);
457 if (pending_skb) 457 if (pending_skb)
458 dev_kfree_skb(pending_skb); 458 dev_kfree_skb(pending_skb);
459 459
diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h
index 4ec4b4d23ae5..7aa7fbac8224 100644
--- a/drivers/net/smc91x.h
+++ b/drivers/net/smc91x.h
@@ -136,14 +136,9 @@
136#define SMC_CAN_USE_32BIT 0 136#define SMC_CAN_USE_32BIT 0
137#define SMC_IO_SHIFT 0 137#define SMC_IO_SHIFT 0
138#define SMC_NOWAIT 1 138#define SMC_NOWAIT 1
139#define SMC_USE_PXA_DMA 1
140 139
141#define SMC_inb(a, r) readb((a) + (r))
142#define SMC_inw(a, r) readw((a) + (r)) 140#define SMC_inw(a, r) readw((a) + (r))
143#define SMC_inl(a, r) readl((a) + (r))
144#define SMC_outb(v, a, r) writeb(v, (a) + (r))
145#define SMC_outw(v, a, r) writew(v, (a) + (r)) 141#define SMC_outw(v, a, r) writew(v, (a) + (r))
146#define SMC_outl(v, a, r) writel(v, (a) + (r))
147#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l) 142#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
148#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l) 143#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
149 144
@@ -189,16 +184,10 @@ SMC_outw(u16 val, void __iomem *ioaddr, int reg)
189#define SMC_IO_SHIFT 0 184#define SMC_IO_SHIFT 0
190#define SMC_NOWAIT 1 185#define SMC_NOWAIT 1
191 186
192#define SMC_inb(a, r) readb((a) + (r))
193#define SMC_outb(v, a, r) writeb(v, (a) + (r))
194#define SMC_inw(a, r) readw((a) + (r)) 187#define SMC_inw(a, r) readw((a) + (r))
195#define SMC_outw(v, a, r) writew(v, (a) + (r)) 188#define SMC_outw(v, a, r) writew(v, (a) + (r))
196#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l) 189#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
197#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l) 190#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
198#define SMC_inl(a, r) readl((a) + (r))
199#define SMC_outl(v, a, r) writel(v, (a) + (r))
200#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l)
201#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l)
202 191
203#include <asm/mach-types.h> 192#include <asm/mach-types.h>
204#include <asm/arch/cpu.h> 193#include <asm/arch/cpu.h>
@@ -372,6 +361,24 @@ static inline void LPD7_SMC_outsw (unsigned char* a, int r,
372 361
373#define SMC_IRQ_FLAGS (0) 362#define SMC_IRQ_FLAGS (0)
374 363
364#elif defined(CONFIG_ARCH_VERSATILE)
365
366#define SMC_CAN_USE_8BIT 1
367#define SMC_CAN_USE_16BIT 1
368#define SMC_CAN_USE_32BIT 1
369#define SMC_NOWAIT 1
370
371#define SMC_inb(a, r) readb((a) + (r))
372#define SMC_inw(a, r) readw((a) + (r))
373#define SMC_inl(a, r) readl((a) + (r))
374#define SMC_outb(v, a, r) writeb(v, (a) + (r))
375#define SMC_outw(v, a, r) writew(v, (a) + (r))
376#define SMC_outl(v, a, r) writel(v, (a) + (r))
377#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l)
378#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l)
379
380#define SMC_IRQ_FLAGS (0)
381
375#else 382#else
376 383
377#define SMC_CAN_USE_8BIT 1 384#define SMC_CAN_USE_8BIT 1
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c
index 647f62e9707d..88907218457a 100644
--- a/drivers/net/spider_net.c
+++ b/drivers/net/spider_net.c
@@ -1611,13 +1611,12 @@ spider_net_open(struct net_device *netdev)
1611 int result; 1611 int result;
1612 1612
1613 result = -ENOMEM; 1613 result = -ENOMEM;
1614 if (spider_net_init_chain(card, &card->tx_chain, 1614 if (spider_net_init_chain(card, &card->tx_chain, card->descr,
1615 card->descr, 1615 PCI_DMA_TODEVICE, card->tx_desc))
1616 PCI_DMA_TODEVICE, tx_descriptors))
1617 goto alloc_tx_failed; 1616 goto alloc_tx_failed;
1618 if (spider_net_init_chain(card, &card->rx_chain, 1617 if (spider_net_init_chain(card, &card->rx_chain,
1619 card->descr + tx_descriptors, 1618 card->descr + card->rx_desc,
1620 PCI_DMA_FROMDEVICE, rx_descriptors)) 1619 PCI_DMA_FROMDEVICE, card->rx_desc))
1621 goto alloc_rx_failed; 1620 goto alloc_rx_failed;
1622 1621
1623 /* allocate rx skbs */ 1622 /* allocate rx skbs */
@@ -2005,6 +2004,9 @@ spider_net_setup_netdev(struct spider_net_card *card)
2005 2004
2006 card->options.rx_csum = SPIDER_NET_RX_CSUM_DEFAULT; 2005 card->options.rx_csum = SPIDER_NET_RX_CSUM_DEFAULT;
2007 2006
2007 card->tx_desc = tx_descriptors;
2008 card->rx_desc = rx_descriptors;
2009
2008 spider_net_setup_netdev_ops(netdev); 2010 spider_net_setup_netdev_ops(netdev);
2009 2011
2010 netdev->features = NETIF_F_HW_CSUM | NETIF_F_LLTX; 2012 netdev->features = NETIF_F_HW_CSUM | NETIF_F_LLTX;
diff --git a/drivers/net/spider_net.h b/drivers/net/spider_net.h
index f6dcf180ae3d..30407cdf0892 100644
--- a/drivers/net/spider_net.h
+++ b/drivers/net/spider_net.h
@@ -440,6 +440,9 @@ struct spider_net_card {
440 /* for ethtool */ 440 /* for ethtool */
441 int msg_enable; 441 int msg_enable;
442 442
443 int rx_desc;
444 int tx_desc;
445
443 struct spider_net_descr descr[0]; 446 struct spider_net_descr descr[0];
444}; 447};
445 448
diff --git a/drivers/net/spider_net_ethtool.c b/drivers/net/spider_net_ethtool.c
index a5bb0b7633af..02209222b8c9 100644
--- a/drivers/net/spider_net_ethtool.c
+++ b/drivers/net/spider_net_ethtool.c
@@ -130,6 +130,18 @@ spider_net_ethtool_set_tx_csum(struct net_device *netdev, uint32_t data)
130 return 0; 130 return 0;
131} 131}
132 132
133static void
134spider_net_ethtool_get_ringparam(struct net_device *netdev,
135 struct ethtool_ringparam *ering)
136{
137 struct spider_net_card *card = netdev->priv;
138
139 ering->tx_max_pending = SPIDER_NET_TX_DESCRIPTORS_MAX;
140 ering->tx_pending = card->tx_desc;
141 ering->rx_max_pending = SPIDER_NET_RX_DESCRIPTORS_MAX;
142 ering->rx_pending = card->rx_desc;
143}
144
133struct ethtool_ops spider_net_ethtool_ops = { 145struct ethtool_ops spider_net_ethtool_ops = {
134 .get_settings = spider_net_ethtool_get_settings, 146 .get_settings = spider_net_ethtool_get_settings,
135 .get_drvinfo = spider_net_ethtool_get_drvinfo, 147 .get_drvinfo = spider_net_ethtool_get_drvinfo,
@@ -141,5 +153,6 @@ struct ethtool_ops spider_net_ethtool_ops = {
141 .set_rx_csum = spider_net_ethtool_set_rx_csum, 153 .set_rx_csum = spider_net_ethtool_set_rx_csum,
142 .get_tx_csum = spider_net_ethtool_get_tx_csum, 154 .get_tx_csum = spider_net_ethtool_get_tx_csum,
143 .set_tx_csum = spider_net_ethtool_set_tx_csum, 155 .set_tx_csum = spider_net_ethtool_set_tx_csum,
156 .get_ringparam = spider_net_ethtool_get_ringparam,
144}; 157};
145 158
diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c
index ac17377b3e9f..698568e751da 100644
--- a/drivers/net/sundance.c
+++ b/drivers/net/sundance.c
@@ -107,7 +107,7 @@ static char *media[MAX_UNITS];
107#endif 107#endif
108 108
109/* These identify the driver base version and may not be removed. */ 109/* These identify the driver base version and may not be removed. */
110static char version[] __devinitdata = 110static char version[] =
111KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker\n" 111KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker\n"
112KERN_INFO " http://www.scyld.com/network/sundance.html\n"; 112KERN_INFO " http://www.scyld.com/network/sundance.html\n";
113 113
diff --git a/drivers/net/tokenring/ibmtr.c b/drivers/net/tokenring/ibmtr.c
index 9f491563944e..4470025ff7f8 100644
--- a/drivers/net/tokenring/ibmtr.c
+++ b/drivers/net/tokenring/ibmtr.c
@@ -140,7 +140,7 @@ in the event that chatty debug messages are desired - jjs 12/30/98 */
140 140
141/* version and credits */ 141/* version and credits */
142#ifndef PCMCIA 142#ifndef PCMCIA
143static char version[] __initdata = 143static char version[] __devinitdata =
144 "\nibmtr.c: v1.3.57 8/ 7/94 Peter De Schrijver and Mark Swanson\n" 144 "\nibmtr.c: v1.3.57 8/ 7/94 Peter De Schrijver and Mark Swanson\n"
145 " v2.1.125 10/20/98 Paul Norton <pnorton@ieee.org>\n" 145 " v2.1.125 10/20/98 Paul Norton <pnorton@ieee.org>\n"
146 " v2.2.0 12/30/98 Joel Sloan <jjs@c-me.com>\n" 146 " v2.2.0 12/30/98 Joel Sloan <jjs@c-me.com>\n"
@@ -216,7 +216,7 @@ static int __devinitdata turbo_irq[IBMTR_MAX_ADAPTERS] = {0};
216static int __devinitdata turbo_searched = 0; 216static int __devinitdata turbo_searched = 0;
217 217
218#ifndef PCMCIA 218#ifndef PCMCIA
219static __u32 ibmtr_mem_base __initdata = 0xd0000; 219static __u32 ibmtr_mem_base __devinitdata = 0xd0000;
220#endif 220#endif
221 221
222static void __devinit PrtChanID(char *pcid, short stride) 222static void __devinit PrtChanID(char *pcid, short stride)
diff --git a/drivers/net/tokenring/smctr.c b/drivers/net/tokenring/smctr.c
index cd2e0251e2bc..85a7f797d343 100644
--- a/drivers/net/tokenring/smctr.c
+++ b/drivers/net/tokenring/smctr.c
@@ -5666,7 +5666,7 @@ module_param_array(io, int, NULL, 0);
5666module_param_array(irq, int, NULL, 0); 5666module_param_array(irq, int, NULL, 0);
5667module_param(ringspeed, int, 0); 5667module_param(ringspeed, int, 0);
5668 5668
5669static struct net_device *setup_card(int n) 5669static struct net_device * __init setup_card(int n)
5670{ 5670{
5671 struct net_device *dev = alloc_trdev(sizeof(struct net_local)); 5671 struct net_device *dev = alloc_trdev(sizeof(struct net_local));
5672 int err; 5672 int err;
@@ -5696,9 +5696,8 @@ out:
5696 free_netdev(dev); 5696 free_netdev(dev);
5697 return ERR_PTR(err); 5697 return ERR_PTR(err);
5698} 5698}
5699
5700 5699
5701int init_module(void) 5700int __init init_module(void)
5702{ 5701{
5703 int i, found = 0; 5702 int i, found = 0;
5704 struct net_device *dev; 5703 struct net_device *dev;
diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c
index 7f414815cc62..eba9083da146 100644
--- a/drivers/net/tulip/winbond-840.c
+++ b/drivers/net/tulip/winbond-840.c
@@ -138,7 +138,7 @@ static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
138#include <asm/irq.h> 138#include <asm/irq.h>
139 139
140/* These identify the driver base version and may not be removed. */ 140/* These identify the driver base version and may not be removed. */
141static char version[] __devinitdata = 141static char version[] =
142KERN_INFO DRV_NAME ".c:v" DRV_VERSION " (2.4 port) " DRV_RELDATE " Donald Becker <becker@scyld.com>\n" 142KERN_INFO DRV_NAME ".c:v" DRV_VERSION " (2.4 port) " DRV_RELDATE " Donald Becker <becker@scyld.com>\n"
143KERN_INFO " http://www.scyld.com/network/drivers.html\n"; 143KERN_INFO " http://www.scyld.com/network/drivers.html\n";
144 144
diff --git a/drivers/net/tulip/xircom_cb.c b/drivers/net/tulip/xircom_cb.c
index f874e4f6ccf6..cf43390d2c80 100644
--- a/drivers/net/tulip/xircom_cb.c
+++ b/drivers/net/tulip/xircom_cb.c
@@ -1264,8 +1264,7 @@ static void investigate_write_descriptor(struct net_device *dev, struct xircom_p
1264 1264
1265static int __init xircom_init(void) 1265static int __init xircom_init(void)
1266{ 1266{
1267 pci_register_driver(&xircom_ops); 1267 return pci_register_driver(&xircom_ops);
1268 return 0;
1269} 1268}
1270 1269
1271static void __exit xircom_exit(void) 1270static void __exit xircom_exit(void)
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
new file mode 100644
index 000000000000..47f49ef72bdc
--- /dev/null
+++ b/drivers/net/ucc_geth.c
@@ -0,0 +1,4278 @@
1/*
2 * Copyright (C) Freescale Semicondutor, Inc. 2006. All rights reserved.
3 *
4 * Author: Shlomi Gridish <gridish@freescale.com>
5 *
6 * Description:
7 * QE UCC Gigabit Ethernet Driver
8 *
9 * Changelog:
10 * Jul 6, 2006 Li Yang <LeoLi@freescale.com>
11 * - Rearrange code and style fixes
12 *
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of the GNU General Public License as published by the
15 * Free Software Foundation; either version 2 of the License, or (at your
16 * option) any later version.
17 */
18#include <linux/kernel.h>
19#include <linux/init.h>
20#include <linux/errno.h>
21#include <linux/slab.h>
22#include <linux/stddef.h>
23#include <linux/interrupt.h>
24#include <linux/netdevice.h>
25#include <linux/etherdevice.h>
26#include <linux/skbuff.h>
27#include <linux/spinlock.h>
28#include <linux/mm.h>
29#include <linux/ethtool.h>
30#include <linux/delay.h>
31#include <linux/dma-mapping.h>
32#include <linux/fsl_devices.h>
33#include <linux/ethtool.h>
34#include <linux/platform_device.h>
35#include <linux/mii.h>
36
37#include <asm/uaccess.h>
38#include <asm/irq.h>
39#include <asm/io.h>
40#include <asm/immap_qe.h>
41#include <asm/qe.h>
42#include <asm/ucc.h>
43#include <asm/ucc_fast.h>
44
45#include "ucc_geth.h"
46#include "ucc_geth_phy.h"
47
48#undef DEBUG
49
50#define DRV_DESC "QE UCC Gigabit Ethernet Controller version:June 20, 2006"
51#define DRV_NAME "ucc_geth"
52
53#define ugeth_printk(level, format, arg...) \
54 printk(level format "\n", ## arg)
55
56#define ugeth_dbg(format, arg...) \
57 ugeth_printk(KERN_DEBUG , format , ## arg)
58#define ugeth_err(format, arg...) \
59 ugeth_printk(KERN_ERR , format , ## arg)
60#define ugeth_info(format, arg...) \
61 ugeth_printk(KERN_INFO , format , ## arg)
62#define ugeth_warn(format, arg...) \
63 ugeth_printk(KERN_WARNING , format , ## arg)
64
65#ifdef UGETH_VERBOSE_DEBUG
66#define ugeth_vdbg ugeth_dbg
67#else
68#define ugeth_vdbg(fmt, args...) do { } while (0)
69#endif /* UGETH_VERBOSE_DEBUG */
70
71static DEFINE_SPINLOCK(ugeth_lock);
72
73static ucc_geth_info_t ugeth_primary_info = {
74 .uf_info = {
75 .bd_mem_part = MEM_PART_SYSTEM,
76 .rtsm = UCC_FAST_SEND_IDLES_BETWEEN_FRAMES,
77 .max_rx_buf_length = 1536,
78/* FIXME: should be changed in run time for 1G and 100M */
79#ifdef CONFIG_UGETH_HAS_GIGA
80 .urfs = UCC_GETH_URFS_GIGA_INIT,
81 .urfet = UCC_GETH_URFET_GIGA_INIT,
82 .urfset = UCC_GETH_URFSET_GIGA_INIT,
83 .utfs = UCC_GETH_UTFS_GIGA_INIT,
84 .utfet = UCC_GETH_UTFET_GIGA_INIT,
85 .utftt = UCC_GETH_UTFTT_GIGA_INIT,
86#else
87 .urfs = UCC_GETH_URFS_INIT,
88 .urfet = UCC_GETH_URFET_INIT,
89 .urfset = UCC_GETH_URFSET_INIT,
90 .utfs = UCC_GETH_UTFS_INIT,
91 .utfet = UCC_GETH_UTFET_INIT,
92 .utftt = UCC_GETH_UTFTT_INIT,
93#endif
94 .ufpt = 256,
95 .mode = UCC_FAST_PROTOCOL_MODE_ETHERNET,
96 .ttx_trx = UCC_FAST_GUMR_TRANSPARENT_TTX_TRX_NORMAL,
97 .tenc = UCC_FAST_TX_ENCODING_NRZ,
98 .renc = UCC_FAST_RX_ENCODING_NRZ,
99 .tcrc = UCC_FAST_16_BIT_CRC,
100 .synl = UCC_FAST_SYNC_LEN_NOT_USED,
101 },
102 .numQueuesTx = 1,
103 .numQueuesRx = 1,
104 .extendedFilteringChainPointer = ((uint32_t) NULL),
105 .typeorlen = 3072 /*1536 */ ,
106 .nonBackToBackIfgPart1 = 0x40,
107 .nonBackToBackIfgPart2 = 0x60,
108 .miminumInterFrameGapEnforcement = 0x50,
109 .backToBackInterFrameGap = 0x60,
110 .mblinterval = 128,
111 .nortsrbytetime = 5,
112 .fracsiz = 1,
113 .strictpriorityq = 0xff,
114 .altBebTruncation = 0xa,
115 .excessDefer = 1,
116 .maxRetransmission = 0xf,
117 .collisionWindow = 0x37,
118 .receiveFlowControl = 1,
119 .maxGroupAddrInHash = 4,
120 .maxIndAddrInHash = 4,
121 .prel = 7,
122 .maxFrameLength = 1518,
123 .minFrameLength = 64,
124 .maxD1Length = 1520,
125 .maxD2Length = 1520,
126 .vlantype = 0x8100,
127 .ecamptr = ((uint32_t) NULL),
128 .eventRegMask = UCCE_OTHER,
129 .pausePeriod = 0xf000,
130 .interruptcoalescingmaxvalue = {1, 1, 1, 1, 1, 1, 1, 1},
131 .bdRingLenTx = {
132 TX_BD_RING_LEN,
133 TX_BD_RING_LEN,
134 TX_BD_RING_LEN,
135 TX_BD_RING_LEN,
136 TX_BD_RING_LEN,
137 TX_BD_RING_LEN,
138 TX_BD_RING_LEN,
139 TX_BD_RING_LEN},
140
141 .bdRingLenRx = {
142 RX_BD_RING_LEN,
143 RX_BD_RING_LEN,
144 RX_BD_RING_LEN,
145 RX_BD_RING_LEN,
146 RX_BD_RING_LEN,
147 RX_BD_RING_LEN,
148 RX_BD_RING_LEN,
149 RX_BD_RING_LEN},
150
151 .numStationAddresses = UCC_GETH_NUM_OF_STATION_ADDRESSES_1,
152 .largestexternallookupkeysize =
153 QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_NONE,
154 .statisticsMode = UCC_GETH_STATISTICS_GATHERING_MODE_NONE,
155 .vlanOperationTagged = UCC_GETH_VLAN_OPERATION_TAGGED_NOP,
156 .vlanOperationNonTagged = UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP,
157 .rxQoSMode = UCC_GETH_QOS_MODE_DEFAULT,
158 .aufc = UPSMR_AUTOMATIC_FLOW_CONTROL_MODE_NONE,
159 .padAndCrc = MACCFG2_PAD_AND_CRC_MODE_PAD_AND_CRC,
160 .numThreadsTx = UCC_GETH_NUM_OF_THREADS_4,
161 .numThreadsRx = UCC_GETH_NUM_OF_THREADS_4,
162 .riscTx = QE_RISC_ALLOCATION_RISC1_AND_RISC2,
163 .riscRx = QE_RISC_ALLOCATION_RISC1_AND_RISC2,
164};
165
166static ucc_geth_info_t ugeth_info[8];
167
168#ifdef DEBUG
169static void mem_disp(u8 *addr, int size)
170{
171 u8 *i;
172 int size16Aling = (size >> 4) << 4;
173 int size4Aling = (size >> 2) << 2;
174 int notAlign = 0;
175 if (size % 16)
176 notAlign = 1;
177
178 for (i = addr; (u32) i < (u32) addr + size16Aling; i += 16)
179 printk("0x%08x: %08x %08x %08x %08x\r\n",
180 (u32) i,
181 *((u32 *) (i)),
182 *((u32 *) (i + 4)),
183 *((u32 *) (i + 8)), *((u32 *) (i + 12)));
184 if (notAlign == 1)
185 printk("0x%08x: ", (u32) i);
186 for (; (u32) i < (u32) addr + size4Aling; i += 4)
187 printk("%08x ", *((u32 *) (i)));
188 for (; (u32) i < (u32) addr + size; i++)
189 printk("%02x", *((u8 *) (i)));
190 if (notAlign == 1)
191 printk("\r\n");
192}
193#endif /* DEBUG */
194
195#ifdef CONFIG_UGETH_FILTERING
196static void enqueue(struct list_head *node, struct list_head *lh)
197{
198 unsigned long flags;
199
200 spin_lock_irqsave(ugeth_lock, flags);
201 list_add_tail(node, lh);
202 spin_unlock_irqrestore(ugeth_lock, flags);
203}
204#endif /* CONFIG_UGETH_FILTERING */
205
206static struct list_head *dequeue(struct list_head *lh)
207{
208 unsigned long flags;
209
210 spin_lock_irqsave(ugeth_lock, flags);
211 if (!list_empty(lh)) {
212 struct list_head *node = lh->next;
213 list_del(node);
214 spin_unlock_irqrestore(ugeth_lock, flags);
215 return node;
216 } else {
217 spin_unlock_irqrestore(ugeth_lock, flags);
218 return NULL;
219 }
220}
221
222static int get_interface_details(enet_interface_e enet_interface,
223 enet_speed_e *speed,
224 int *r10m,
225 int *rmm,
226 int *rpm,
227 int *tbi, int *limited_to_full_duplex)
228{
229 /* Analyze enet_interface according to Interface Mode
230 Configuration table */
231 switch (enet_interface) {
232 case ENET_10_MII:
233 *speed = ENET_SPEED_10BT;
234 break;
235 case ENET_10_RMII:
236 *speed = ENET_SPEED_10BT;
237 *r10m = 1;
238 *rmm = 1;
239 break;
240 case ENET_10_RGMII:
241 *speed = ENET_SPEED_10BT;
242 *rpm = 1;
243 *r10m = 1;
244 *limited_to_full_duplex = 1;
245 break;
246 case ENET_100_MII:
247 *speed = ENET_SPEED_100BT;
248 break;
249 case ENET_100_RMII:
250 *speed = ENET_SPEED_100BT;
251 *rmm = 1;
252 break;
253 case ENET_100_RGMII:
254 *speed = ENET_SPEED_100BT;
255 *rpm = 1;
256 *limited_to_full_duplex = 1;
257 break;
258 case ENET_1000_GMII:
259 *speed = ENET_SPEED_1000BT;
260 *limited_to_full_duplex = 1;
261 break;
262 case ENET_1000_RGMII:
263 *speed = ENET_SPEED_1000BT;
264 *rpm = 1;
265 *limited_to_full_duplex = 1;
266 break;
267 case ENET_1000_TBI:
268 *speed = ENET_SPEED_1000BT;
269 *tbi = 1;
270 *limited_to_full_duplex = 1;
271 break;
272 case ENET_1000_RTBI:
273 *speed = ENET_SPEED_1000BT;
274 *rpm = 1;
275 *tbi = 1;
276 *limited_to_full_duplex = 1;
277 break;
278 default:
279 return -EINVAL;
280 break;
281 }
282
283 return 0;
284}
285
286static struct sk_buff *get_new_skb(ucc_geth_private_t *ugeth, u8 *bd)
287{
288 struct sk_buff *skb = NULL;
289
290 skb = dev_alloc_skb(ugeth->ug_info->uf_info.max_rx_buf_length +
291 UCC_GETH_RX_DATA_BUF_ALIGNMENT);
292
293 if (skb == NULL)
294 return NULL;
295
296 /* We need the data buffer to be aligned properly. We will reserve
297 * as many bytes as needed to align the data properly
298 */
299 skb_reserve(skb,
300 UCC_GETH_RX_DATA_BUF_ALIGNMENT -
301 (((unsigned)skb->data) & (UCC_GETH_RX_DATA_BUF_ALIGNMENT -
302 1)));
303
304 skb->dev = ugeth->dev;
305
306 BD_BUFFER_SET(bd,
307 dma_map_single(NULL,
308 skb->data,
309 ugeth->ug_info->uf_info.max_rx_buf_length +
310 UCC_GETH_RX_DATA_BUF_ALIGNMENT,
311 DMA_FROM_DEVICE));
312
313 BD_STATUS_AND_LENGTH_SET(bd,
314 (R_E | R_I |
315 (BD_STATUS_AND_LENGTH(bd) & R_W)));
316
317 return skb;
318}
319
320static int rx_bd_buffer_set(ucc_geth_private_t *ugeth, u8 rxQ)
321{
322 u8 *bd;
323 u32 bd_status;
324 struct sk_buff *skb;
325 int i;
326
327 bd = ugeth->p_rx_bd_ring[rxQ];
328 i = 0;
329
330 do {
331 bd_status = BD_STATUS_AND_LENGTH(bd);
332 skb = get_new_skb(ugeth, bd);
333
334 if (!skb) /* If can not allocate data buffer,
335 abort. Cleanup will be elsewhere */
336 return -ENOMEM;
337
338 ugeth->rx_skbuff[rxQ][i] = skb;
339
340 /* advance the BD pointer */
341 bd += UCC_GETH_SIZE_OF_BD;
342 i++;
343 } while (!(bd_status & R_W));
344
345 return 0;
346}
347
348static int fill_init_enet_entries(ucc_geth_private_t *ugeth,
349 volatile u32 *p_start,
350 u8 num_entries,
351 u32 thread_size,
352 u32 thread_alignment,
353 qe_risc_allocation_e risc,
354 int skip_page_for_first_entry)
355{
356 u32 init_enet_offset;
357 u8 i;
358 int snum;
359
360 for (i = 0; i < num_entries; i++) {
361 if ((snum = qe_get_snum()) < 0) {
362 ugeth_err("fill_init_enet_entries: Can not get SNUM.");
363 return snum;
364 }
365 if ((i == 0) && skip_page_for_first_entry)
366 /* First entry of Rx does not have page */
367 init_enet_offset = 0;
368 else {
369 init_enet_offset =
370 qe_muram_alloc(thread_size, thread_alignment);
371 if (IS_MURAM_ERR(init_enet_offset)) {
372 ugeth_err
373 ("fill_init_enet_entries: Can not allocate DPRAM memory.");
374 qe_put_snum((u8) snum);
375 return -ENOMEM;
376 }
377 }
378 *(p_start++) =
379 ((u8) snum << ENET_INIT_PARAM_SNUM_SHIFT) | init_enet_offset
380 | risc;
381 }
382
383 return 0;
384}
385
386static int return_init_enet_entries(ucc_geth_private_t *ugeth,
387 volatile u32 *p_start,
388 u8 num_entries,
389 qe_risc_allocation_e risc,
390 int skip_page_for_first_entry)
391{
392 u32 init_enet_offset;
393 u8 i;
394 int snum;
395
396 for (i = 0; i < num_entries; i++) {
397 /* Check that this entry was actually valid --
398 needed in case failed in allocations */
399 if ((*p_start & ENET_INIT_PARAM_RISC_MASK) == risc) {
400 snum =
401 (u32) (*p_start & ENET_INIT_PARAM_SNUM_MASK) >>
402 ENET_INIT_PARAM_SNUM_SHIFT;
403 qe_put_snum((u8) snum);
404 if (!((i == 0) && skip_page_for_first_entry)) {
405 /* First entry of Rx does not have page */
406 init_enet_offset =
407 (in_be32(p_start) &
408 ENET_INIT_PARAM_PTR_MASK);
409 qe_muram_free(init_enet_offset);
410 }
411 *(p_start++) = 0; /* Just for cosmetics */
412 }
413 }
414
415 return 0;
416}
417
418#ifdef DEBUG
419static int dump_init_enet_entries(ucc_geth_private_t *ugeth,
420 volatile u32 *p_start,
421 u8 num_entries,
422 u32 thread_size,
423 qe_risc_allocation_e risc,
424 int skip_page_for_first_entry)
425{
426 u32 init_enet_offset;
427 u8 i;
428 int snum;
429
430 for (i = 0; i < num_entries; i++) {
431 /* Check that this entry was actually valid --
432 needed in case failed in allocations */
433 if ((*p_start & ENET_INIT_PARAM_RISC_MASK) == risc) {
434 snum =
435 (u32) (*p_start & ENET_INIT_PARAM_SNUM_MASK) >>
436 ENET_INIT_PARAM_SNUM_SHIFT;
437 qe_put_snum((u8) snum);
438 if (!((i == 0) && skip_page_for_first_entry)) {
439 /* First entry of Rx does not have page */
440 init_enet_offset =
441 (in_be32(p_start) &
442 ENET_INIT_PARAM_PTR_MASK);
443 ugeth_info("Init enet entry %d:", i);
444 ugeth_info("Base address: 0x%08x",
445 (u32)
446 qe_muram_addr(init_enet_offset));
447 mem_disp(qe_muram_addr(init_enet_offset),
448 thread_size);
449 }
450 p_start++;
451 }
452 }
453
454 return 0;
455}
456#endif
457
458#ifdef CONFIG_UGETH_FILTERING
459static enet_addr_container_t *get_enet_addr_container(void)
460{
461 enet_addr_container_t *enet_addr_cont;
462
463 /* allocate memory */
464 enet_addr_cont = kmalloc(sizeof(enet_addr_container_t), GFP_KERNEL);
465 if (!enet_addr_cont) {
466 ugeth_err("%s: No memory for enet_addr_container_t object.",
467 __FUNCTION__);
468 return NULL;
469 }
470
471 return enet_addr_cont;
472}
473#endif /* CONFIG_UGETH_FILTERING */
474
475static void put_enet_addr_container(enet_addr_container_t *enet_addr_cont)
476{
477 kfree(enet_addr_cont);
478}
479
480#ifdef CONFIG_UGETH_FILTERING
481static int hw_add_addr_in_paddr(ucc_geth_private_t *ugeth,
482 enet_addr_t *p_enet_addr, u8 paddr_num)
483{
484 ucc_geth_82xx_address_filtering_pram_t *p_82xx_addr_filt;
485
486 if (!(paddr_num < NUM_OF_PADDRS)) {
487 ugeth_warn("%s: Illagel paddr_num.", __FUNCTION__);
488 return -EINVAL;
489 }
490
491 p_82xx_addr_filt =
492 (ucc_geth_82xx_address_filtering_pram_t *) ugeth->p_rx_glbl_pram->
493 addressfiltering;
494
495 /* Ethernet frames are defined in Little Endian mode, */
496 /* therefore to insert the address we reverse the bytes. */
497 out_be16(&p_82xx_addr_filt->paddr[paddr_num].h,
498 (u16) (((u16) (((u16) ((*p_enet_addr)[5])) << 8)) |
499 (u16) (*p_enet_addr)[4]));
500 out_be16(&p_82xx_addr_filt->paddr[paddr_num].m,
501 (u16) (((u16) (((u16) ((*p_enet_addr)[3])) << 8)) |
502 (u16) (*p_enet_addr)[2]));
503 out_be16(&p_82xx_addr_filt->paddr[paddr_num].l,
504 (u16) (((u16) (((u16) ((*p_enet_addr)[1])) << 8)) |
505 (u16) (*p_enet_addr)[0]));
506
507 return 0;
508}
509#endif /* CONFIG_UGETH_FILTERING */
510
511static int hw_clear_addr_in_paddr(ucc_geth_private_t *ugeth, u8 paddr_num)
512{
513 ucc_geth_82xx_address_filtering_pram_t *p_82xx_addr_filt;
514
515 if (!(paddr_num < NUM_OF_PADDRS)) {
516 ugeth_warn("%s: Illagel paddr_num.", __FUNCTION__);
517 return -EINVAL;
518 }
519
520 p_82xx_addr_filt =
521 (ucc_geth_82xx_address_filtering_pram_t *) ugeth->p_rx_glbl_pram->
522 addressfiltering;
523
524 /* Writing address ff.ff.ff.ff.ff.ff disables address
525 recognition for this register */
526 out_be16(&p_82xx_addr_filt->paddr[paddr_num].h, 0xffff);
527 out_be16(&p_82xx_addr_filt->paddr[paddr_num].m, 0xffff);
528 out_be16(&p_82xx_addr_filt->paddr[paddr_num].l, 0xffff);
529
530 return 0;
531}
532
533static void hw_add_addr_in_hash(ucc_geth_private_t *ugeth,
534 enet_addr_t *p_enet_addr)
535{
536 ucc_geth_82xx_address_filtering_pram_t *p_82xx_addr_filt;
537 u32 cecr_subblock;
538
539 p_82xx_addr_filt =
540 (ucc_geth_82xx_address_filtering_pram_t *) ugeth->p_rx_glbl_pram->
541 addressfiltering;
542
543 cecr_subblock =
544 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
545
546 /* Ethernet frames are defined in Little Endian mode,
547 therefor to insert */
548 /* the address to the hash (Big Endian mode), we reverse the bytes.*/
549 out_be16(&p_82xx_addr_filt->taddr.h,
550 (u16) (((u16) (((u16) ((*p_enet_addr)[5])) << 8)) |
551 (u16) (*p_enet_addr)[4]));
552 out_be16(&p_82xx_addr_filt->taddr.m,
553 (u16) (((u16) (((u16) ((*p_enet_addr)[3])) << 8)) |
554 (u16) (*p_enet_addr)[2]));
555 out_be16(&p_82xx_addr_filt->taddr.l,
556 (u16) (((u16) (((u16) ((*p_enet_addr)[1])) << 8)) |
557 (u16) (*p_enet_addr)[0]));
558
559 qe_issue_cmd(QE_SET_GROUP_ADDRESS, cecr_subblock,
560 (u8) QE_CR_PROTOCOL_ETHERNET, 0);
561}
562
563#ifdef CONFIG_UGETH_MAGIC_PACKET
564static void magic_packet_detection_enable(ucc_geth_private_t *ugeth)
565{
566 ucc_fast_private_t *uccf;
567 ucc_geth_t *ug_regs;
568 u32 maccfg2, uccm;
569
570 uccf = ugeth->uccf;
571 ug_regs = ugeth->ug_regs;
572
573 /* Enable interrupts for magic packet detection */
574 uccm = in_be32(uccf->p_uccm);
575 uccm |= UCCE_MPD;
576 out_be32(uccf->p_uccm, uccm);
577
578 /* Enable magic packet detection */
579 maccfg2 = in_be32(&ug_regs->maccfg2);
580 maccfg2 |= MACCFG2_MPE;
581 out_be32(&ug_regs->maccfg2, maccfg2);
582}
583
584static void magic_packet_detection_disable(ucc_geth_private_t *ugeth)
585{
586 ucc_fast_private_t *uccf;
587 ucc_geth_t *ug_regs;
588 u32 maccfg2, uccm;
589
590 uccf = ugeth->uccf;
591 ug_regs = ugeth->ug_regs;
592
593 /* Disable interrupts for magic packet detection */
594 uccm = in_be32(uccf->p_uccm);
595 uccm &= ~UCCE_MPD;
596 out_be32(uccf->p_uccm, uccm);
597
598 /* Disable magic packet detection */
599 maccfg2 = in_be32(&ug_regs->maccfg2);
600 maccfg2 &= ~MACCFG2_MPE;
601 out_be32(&ug_regs->maccfg2, maccfg2);
602}
603#endif /* MAGIC_PACKET */
604
605static inline int compare_addr(enet_addr_t *addr1, enet_addr_t *addr2)
606{
607 return memcmp(addr1, addr2, ENET_NUM_OCTETS_PER_ADDRESS);
608}
609
610#ifdef DEBUG
611static void get_statistics(ucc_geth_private_t *ugeth,
612 ucc_geth_tx_firmware_statistics_t *
613 tx_firmware_statistics,
614 ucc_geth_rx_firmware_statistics_t *
615 rx_firmware_statistics,
616 ucc_geth_hardware_statistics_t *hardware_statistics)
617{
618 ucc_fast_t *uf_regs;
619 ucc_geth_t *ug_regs;
620 ucc_geth_tx_firmware_statistics_pram_t *p_tx_fw_statistics_pram;
621 ucc_geth_rx_firmware_statistics_pram_t *p_rx_fw_statistics_pram;
622
623 ug_regs = ugeth->ug_regs;
624 uf_regs = (ucc_fast_t *) ug_regs;
625 p_tx_fw_statistics_pram = ugeth->p_tx_fw_statistics_pram;
626 p_rx_fw_statistics_pram = ugeth->p_rx_fw_statistics_pram;
627
628 /* Tx firmware only if user handed pointer and driver actually
629 gathers Tx firmware statistics */
630 if (tx_firmware_statistics && p_tx_fw_statistics_pram) {
631 tx_firmware_statistics->sicoltx =
632 in_be32(&p_tx_fw_statistics_pram->sicoltx);
633 tx_firmware_statistics->mulcoltx =
634 in_be32(&p_tx_fw_statistics_pram->mulcoltx);
635 tx_firmware_statistics->latecoltxfr =
636 in_be32(&p_tx_fw_statistics_pram->latecoltxfr);
637 tx_firmware_statistics->frabortduecol =
638 in_be32(&p_tx_fw_statistics_pram->frabortduecol);
639 tx_firmware_statistics->frlostinmactxer =
640 in_be32(&p_tx_fw_statistics_pram->frlostinmactxer);
641 tx_firmware_statistics->carriersenseertx =
642 in_be32(&p_tx_fw_statistics_pram->carriersenseertx);
643 tx_firmware_statistics->frtxok =
644 in_be32(&p_tx_fw_statistics_pram->frtxok);
645 tx_firmware_statistics->txfrexcessivedefer =
646 in_be32(&p_tx_fw_statistics_pram->txfrexcessivedefer);
647 tx_firmware_statistics->txpkts256 =
648 in_be32(&p_tx_fw_statistics_pram->txpkts256);
649 tx_firmware_statistics->txpkts512 =
650 in_be32(&p_tx_fw_statistics_pram->txpkts512);
651 tx_firmware_statistics->txpkts1024 =
652 in_be32(&p_tx_fw_statistics_pram->txpkts1024);
653 tx_firmware_statistics->txpktsjumbo =
654 in_be32(&p_tx_fw_statistics_pram->txpktsjumbo);
655 }
656
657 /* Rx firmware only if user handed pointer and driver actually
658 * gathers Rx firmware statistics */
659 if (rx_firmware_statistics && p_rx_fw_statistics_pram) {
660 int i;
661 rx_firmware_statistics->frrxfcser =
662 in_be32(&p_rx_fw_statistics_pram->frrxfcser);
663 rx_firmware_statistics->fraligner =
664 in_be32(&p_rx_fw_statistics_pram->fraligner);
665 rx_firmware_statistics->inrangelenrxer =
666 in_be32(&p_rx_fw_statistics_pram->inrangelenrxer);
667 rx_firmware_statistics->outrangelenrxer =
668 in_be32(&p_rx_fw_statistics_pram->outrangelenrxer);
669 rx_firmware_statistics->frtoolong =
670 in_be32(&p_rx_fw_statistics_pram->frtoolong);
671 rx_firmware_statistics->runt =
672 in_be32(&p_rx_fw_statistics_pram->runt);
673 rx_firmware_statistics->verylongevent =
674 in_be32(&p_rx_fw_statistics_pram->verylongevent);
675 rx_firmware_statistics->symbolerror =
676 in_be32(&p_rx_fw_statistics_pram->symbolerror);
677 rx_firmware_statistics->dropbsy =
678 in_be32(&p_rx_fw_statistics_pram->dropbsy);
679 for (i = 0; i < 0x8; i++)
680 rx_firmware_statistics->res0[i] =
681 p_rx_fw_statistics_pram->res0[i];
682 rx_firmware_statistics->mismatchdrop =
683 in_be32(&p_rx_fw_statistics_pram->mismatchdrop);
684 rx_firmware_statistics->underpkts =
685 in_be32(&p_rx_fw_statistics_pram->underpkts);
686 rx_firmware_statistics->pkts256 =
687 in_be32(&p_rx_fw_statistics_pram->pkts256);
688 rx_firmware_statistics->pkts512 =
689 in_be32(&p_rx_fw_statistics_pram->pkts512);
690 rx_firmware_statistics->pkts1024 =
691 in_be32(&p_rx_fw_statistics_pram->pkts1024);
692 rx_firmware_statistics->pktsjumbo =
693 in_be32(&p_rx_fw_statistics_pram->pktsjumbo);
694 rx_firmware_statistics->frlossinmacer =
695 in_be32(&p_rx_fw_statistics_pram->frlossinmacer);
696 rx_firmware_statistics->pausefr =
697 in_be32(&p_rx_fw_statistics_pram->pausefr);
698 for (i = 0; i < 0x4; i++)
699 rx_firmware_statistics->res1[i] =
700 p_rx_fw_statistics_pram->res1[i];
701 rx_firmware_statistics->removevlan =
702 in_be32(&p_rx_fw_statistics_pram->removevlan);
703 rx_firmware_statistics->replacevlan =
704 in_be32(&p_rx_fw_statistics_pram->replacevlan);
705 rx_firmware_statistics->insertvlan =
706 in_be32(&p_rx_fw_statistics_pram->insertvlan);
707 }
708
709 /* Hardware only if user handed pointer and driver actually
710 gathers hardware statistics */
711 if (hardware_statistics && (in_be32(&uf_regs->upsmr) & UPSMR_HSE)) {
712 hardware_statistics->tx64 = in_be32(&ug_regs->tx64);
713 hardware_statistics->tx127 = in_be32(&ug_regs->tx127);
714 hardware_statistics->tx255 = in_be32(&ug_regs->tx255);
715 hardware_statistics->rx64 = in_be32(&ug_regs->rx64);
716 hardware_statistics->rx127 = in_be32(&ug_regs->rx127);
717 hardware_statistics->rx255 = in_be32(&ug_regs->rx255);
718 hardware_statistics->txok = in_be32(&ug_regs->txok);
719 hardware_statistics->txcf = in_be16(&ug_regs->txcf);
720 hardware_statistics->tmca = in_be32(&ug_regs->tmca);
721 hardware_statistics->tbca = in_be32(&ug_regs->tbca);
722 hardware_statistics->rxfok = in_be32(&ug_regs->rxfok);
723 hardware_statistics->rxbok = in_be32(&ug_regs->rxbok);
724 hardware_statistics->rbyt = in_be32(&ug_regs->rbyt);
725 hardware_statistics->rmca = in_be32(&ug_regs->rmca);
726 hardware_statistics->rbca = in_be32(&ug_regs->rbca);
727 }
728}
729
730static void dump_bds(ucc_geth_private_t *ugeth)
731{
732 int i;
733 int length;
734
735 for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) {
736 if (ugeth->p_tx_bd_ring[i]) {
737 length =
738 (ugeth->ug_info->bdRingLenTx[i] *
739 UCC_GETH_SIZE_OF_BD);
740 ugeth_info("TX BDs[%d]", i);
741 mem_disp(ugeth->p_tx_bd_ring[i], length);
742 }
743 }
744 for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
745 if (ugeth->p_rx_bd_ring[i]) {
746 length =
747 (ugeth->ug_info->bdRingLenRx[i] *
748 UCC_GETH_SIZE_OF_BD);
749 ugeth_info("RX BDs[%d]", i);
750 mem_disp(ugeth->p_rx_bd_ring[i], length);
751 }
752 }
753}
754
755static void dump_regs(ucc_geth_private_t *ugeth)
756{
757 int i;
758
759 ugeth_info("UCC%d Geth registers:", ugeth->ug_info->uf_info.ucc_num);
760 ugeth_info("Base address: 0x%08x", (u32) ugeth->ug_regs);
761
762 ugeth_info("maccfg1 : addr - 0x%08x, val - 0x%08x",
763 (u32) & ugeth->ug_regs->maccfg1,
764 in_be32(&ugeth->ug_regs->maccfg1));
765 ugeth_info("maccfg2 : addr - 0x%08x, val - 0x%08x",
766 (u32) & ugeth->ug_regs->maccfg2,
767 in_be32(&ugeth->ug_regs->maccfg2));
768 ugeth_info("ipgifg : addr - 0x%08x, val - 0x%08x",
769 (u32) & ugeth->ug_regs->ipgifg,
770 in_be32(&ugeth->ug_regs->ipgifg));
771 ugeth_info("hafdup : addr - 0x%08x, val - 0x%08x",
772 (u32) & ugeth->ug_regs->hafdup,
773 in_be32(&ugeth->ug_regs->hafdup));
774 ugeth_info("miimcfg : addr - 0x%08x, val - 0x%08x",
775 (u32) & ugeth->ug_regs->miimng.miimcfg,
776 in_be32(&ugeth->ug_regs->miimng.miimcfg));
777 ugeth_info("miimcom : addr - 0x%08x, val - 0x%08x",
778 (u32) & ugeth->ug_regs->miimng.miimcom,
779 in_be32(&ugeth->ug_regs->miimng.miimcom));
780 ugeth_info("miimadd : addr - 0x%08x, val - 0x%08x",
781 (u32) & ugeth->ug_regs->miimng.miimadd,
782 in_be32(&ugeth->ug_regs->miimng.miimadd));
783 ugeth_info("miimcon : addr - 0x%08x, val - 0x%08x",
784 (u32) & ugeth->ug_regs->miimng.miimcon,
785 in_be32(&ugeth->ug_regs->miimng.miimcon));
786 ugeth_info("miimstat : addr - 0x%08x, val - 0x%08x",
787 (u32) & ugeth->ug_regs->miimng.miimstat,
788 in_be32(&ugeth->ug_regs->miimng.miimstat));
789 ugeth_info("miimmind : addr - 0x%08x, val - 0x%08x",
790 (u32) & ugeth->ug_regs->miimng.miimind,
791 in_be32(&ugeth->ug_regs->miimng.miimind));
792 ugeth_info("ifctl : addr - 0x%08x, val - 0x%08x",
793 (u32) & ugeth->ug_regs->ifctl,
794 in_be32(&ugeth->ug_regs->ifctl));
795 ugeth_info("ifstat : addr - 0x%08x, val - 0x%08x",
796 (u32) & ugeth->ug_regs->ifstat,
797 in_be32(&ugeth->ug_regs->ifstat));
798 ugeth_info("macstnaddr1: addr - 0x%08x, val - 0x%08x",
799 (u32) & ugeth->ug_regs->macstnaddr1,
800 in_be32(&ugeth->ug_regs->macstnaddr1));
801 ugeth_info("macstnaddr2: addr - 0x%08x, val - 0x%08x",
802 (u32) & ugeth->ug_regs->macstnaddr2,
803 in_be32(&ugeth->ug_regs->macstnaddr2));
804 ugeth_info("uempr : addr - 0x%08x, val - 0x%08x",
805 (u32) & ugeth->ug_regs->uempr,
806 in_be32(&ugeth->ug_regs->uempr));
807 ugeth_info("utbipar : addr - 0x%08x, val - 0x%08x",
808 (u32) & ugeth->ug_regs->utbipar,
809 in_be32(&ugeth->ug_regs->utbipar));
810 ugeth_info("uescr : addr - 0x%08x, val - 0x%04x",
811 (u32) & ugeth->ug_regs->uescr,
812 in_be16(&ugeth->ug_regs->uescr));
813 ugeth_info("tx64 : addr - 0x%08x, val - 0x%08x",
814 (u32) & ugeth->ug_regs->tx64,
815 in_be32(&ugeth->ug_regs->tx64));
816 ugeth_info("tx127 : addr - 0x%08x, val - 0x%08x",
817 (u32) & ugeth->ug_regs->tx127,
818 in_be32(&ugeth->ug_regs->tx127));
819 ugeth_info("tx255 : addr - 0x%08x, val - 0x%08x",
820 (u32) & ugeth->ug_regs->tx255,
821 in_be32(&ugeth->ug_regs->tx255));
822 ugeth_info("rx64 : addr - 0x%08x, val - 0x%08x",
823 (u32) & ugeth->ug_regs->rx64,
824 in_be32(&ugeth->ug_regs->rx64));
825 ugeth_info("rx127 : addr - 0x%08x, val - 0x%08x",
826 (u32) & ugeth->ug_regs->rx127,
827 in_be32(&ugeth->ug_regs->rx127));
828 ugeth_info("rx255 : addr - 0x%08x, val - 0x%08x",
829 (u32) & ugeth->ug_regs->rx255,
830 in_be32(&ugeth->ug_regs->rx255));
831 ugeth_info("txok : addr - 0x%08x, val - 0x%08x",
832 (u32) & ugeth->ug_regs->txok,
833 in_be32(&ugeth->ug_regs->txok));
834 ugeth_info("txcf : addr - 0x%08x, val - 0x%04x",
835 (u32) & ugeth->ug_regs->txcf,
836 in_be16(&ugeth->ug_regs->txcf));
837 ugeth_info("tmca : addr - 0x%08x, val - 0x%08x",
838 (u32) & ugeth->ug_regs->tmca,
839 in_be32(&ugeth->ug_regs->tmca));
840 ugeth_info("tbca : addr - 0x%08x, val - 0x%08x",
841 (u32) & ugeth->ug_regs->tbca,
842 in_be32(&ugeth->ug_regs->tbca));
843 ugeth_info("rxfok : addr - 0x%08x, val - 0x%08x",
844 (u32) & ugeth->ug_regs->rxfok,
845 in_be32(&ugeth->ug_regs->rxfok));
846 ugeth_info("rxbok : addr - 0x%08x, val - 0x%08x",
847 (u32) & ugeth->ug_regs->rxbok,
848 in_be32(&ugeth->ug_regs->rxbok));
849 ugeth_info("rbyt : addr - 0x%08x, val - 0x%08x",
850 (u32) & ugeth->ug_regs->rbyt,
851 in_be32(&ugeth->ug_regs->rbyt));
852 ugeth_info("rmca : addr - 0x%08x, val - 0x%08x",
853 (u32) & ugeth->ug_regs->rmca,
854 in_be32(&ugeth->ug_regs->rmca));
855 ugeth_info("rbca : addr - 0x%08x, val - 0x%08x",
856 (u32) & ugeth->ug_regs->rbca,
857 in_be32(&ugeth->ug_regs->rbca));
858 ugeth_info("scar : addr - 0x%08x, val - 0x%08x",
859 (u32) & ugeth->ug_regs->scar,
860 in_be32(&ugeth->ug_regs->scar));
861 ugeth_info("scam : addr - 0x%08x, val - 0x%08x",
862 (u32) & ugeth->ug_regs->scam,
863 in_be32(&ugeth->ug_regs->scam));
864
865 if (ugeth->p_thread_data_tx) {
866 int numThreadsTxNumerical;
867 switch (ugeth->ug_info->numThreadsTx) {
868 case UCC_GETH_NUM_OF_THREADS_1:
869 numThreadsTxNumerical = 1;
870 break;
871 case UCC_GETH_NUM_OF_THREADS_2:
872 numThreadsTxNumerical = 2;
873 break;
874 case UCC_GETH_NUM_OF_THREADS_4:
875 numThreadsTxNumerical = 4;
876 break;
877 case UCC_GETH_NUM_OF_THREADS_6:
878 numThreadsTxNumerical = 6;
879 break;
880 case UCC_GETH_NUM_OF_THREADS_8:
881 numThreadsTxNumerical = 8;
882 break;
883 default:
884 numThreadsTxNumerical = 0;
885 break;
886 }
887
888 ugeth_info("Thread data TXs:");
889 ugeth_info("Base address: 0x%08x",
890 (u32) ugeth->p_thread_data_tx);
891 for (i = 0; i < numThreadsTxNumerical; i++) {
892 ugeth_info("Thread data TX[%d]:", i);
893 ugeth_info("Base address: 0x%08x",
894 (u32) & ugeth->p_thread_data_tx[i]);
895 mem_disp((u8 *) & ugeth->p_thread_data_tx[i],
896 sizeof(ucc_geth_thread_data_tx_t));
897 }
898 }
899 if (ugeth->p_thread_data_rx) {
900 int numThreadsRxNumerical;
901 switch (ugeth->ug_info->numThreadsRx) {
902 case UCC_GETH_NUM_OF_THREADS_1:
903 numThreadsRxNumerical = 1;
904 break;
905 case UCC_GETH_NUM_OF_THREADS_2:
906 numThreadsRxNumerical = 2;
907 break;
908 case UCC_GETH_NUM_OF_THREADS_4:
909 numThreadsRxNumerical = 4;
910 break;
911 case UCC_GETH_NUM_OF_THREADS_6:
912 numThreadsRxNumerical = 6;
913 break;
914 case UCC_GETH_NUM_OF_THREADS_8:
915 numThreadsRxNumerical = 8;
916 break;
917 default:
918 numThreadsRxNumerical = 0;
919 break;
920 }
921
922 ugeth_info("Thread data RX:");
923 ugeth_info("Base address: 0x%08x",
924 (u32) ugeth->p_thread_data_rx);
925 for (i = 0; i < numThreadsRxNumerical; i++) {
926 ugeth_info("Thread data RX[%d]:", i);
927 ugeth_info("Base address: 0x%08x",
928 (u32) & ugeth->p_thread_data_rx[i]);
929 mem_disp((u8 *) & ugeth->p_thread_data_rx[i],
930 sizeof(ucc_geth_thread_data_rx_t));
931 }
932 }
933 if (ugeth->p_exf_glbl_param) {
934 ugeth_info("EXF global param:");
935 ugeth_info("Base address: 0x%08x",
936 (u32) ugeth->p_exf_glbl_param);
937 mem_disp((u8 *) ugeth->p_exf_glbl_param,
938 sizeof(*ugeth->p_exf_glbl_param));
939 }
940 if (ugeth->p_tx_glbl_pram) {
941 ugeth_info("TX global param:");
942 ugeth_info("Base address: 0x%08x", (u32) ugeth->p_tx_glbl_pram);
943 ugeth_info("temoder : addr - 0x%08x, val - 0x%04x",
944 (u32) & ugeth->p_tx_glbl_pram->temoder,
945 in_be16(&ugeth->p_tx_glbl_pram->temoder));
946 ugeth_info("sqptr : addr - 0x%08x, val - 0x%08x",
947 (u32) & ugeth->p_tx_glbl_pram->sqptr,
948 in_be32(&ugeth->p_tx_glbl_pram->sqptr));
949 ugeth_info("schedulerbasepointer: addr - 0x%08x, val - 0x%08x",
950 (u32) & ugeth->p_tx_glbl_pram->schedulerbasepointer,
951 in_be32(&ugeth->p_tx_glbl_pram->
952 schedulerbasepointer));
953 ugeth_info("txrmonbaseptr: addr - 0x%08x, val - 0x%08x",
954 (u32) & ugeth->p_tx_glbl_pram->txrmonbaseptr,
955 in_be32(&ugeth->p_tx_glbl_pram->txrmonbaseptr));
956 ugeth_info("tstate : addr - 0x%08x, val - 0x%08x",
957 (u32) & ugeth->p_tx_glbl_pram->tstate,
958 in_be32(&ugeth->p_tx_glbl_pram->tstate));
959 ugeth_info("iphoffset[0] : addr - 0x%08x, val - 0x%02x",
960 (u32) & ugeth->p_tx_glbl_pram->iphoffset[0],
961 ugeth->p_tx_glbl_pram->iphoffset[0]);
962 ugeth_info("iphoffset[1] : addr - 0x%08x, val - 0x%02x",
963 (u32) & ugeth->p_tx_glbl_pram->iphoffset[1],
964 ugeth->p_tx_glbl_pram->iphoffset[1]);
965 ugeth_info("iphoffset[2] : addr - 0x%08x, val - 0x%02x",
966 (u32) & ugeth->p_tx_glbl_pram->iphoffset[2],
967 ugeth->p_tx_glbl_pram->iphoffset[2]);
968 ugeth_info("iphoffset[3] : addr - 0x%08x, val - 0x%02x",
969 (u32) & ugeth->p_tx_glbl_pram->iphoffset[3],
970 ugeth->p_tx_glbl_pram->iphoffset[3]);
971 ugeth_info("iphoffset[4] : addr - 0x%08x, val - 0x%02x",
972 (u32) & ugeth->p_tx_glbl_pram->iphoffset[4],
973 ugeth->p_tx_glbl_pram->iphoffset[4]);
974 ugeth_info("iphoffset[5] : addr - 0x%08x, val - 0x%02x",
975 (u32) & ugeth->p_tx_glbl_pram->iphoffset[5],
976 ugeth->p_tx_glbl_pram->iphoffset[5]);
977 ugeth_info("iphoffset[6] : addr - 0x%08x, val - 0x%02x",
978 (u32) & ugeth->p_tx_glbl_pram->iphoffset[6],
979 ugeth->p_tx_glbl_pram->iphoffset[6]);
980 ugeth_info("iphoffset[7] : addr - 0x%08x, val - 0x%02x",
981 (u32) & ugeth->p_tx_glbl_pram->iphoffset[7],
982 ugeth->p_tx_glbl_pram->iphoffset[7]);
983 ugeth_info("vtagtable[0] : addr - 0x%08x, val - 0x%08x",
984 (u32) & ugeth->p_tx_glbl_pram->vtagtable[0],
985 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[0]));
986 ugeth_info("vtagtable[1] : addr - 0x%08x, val - 0x%08x",
987 (u32) & ugeth->p_tx_glbl_pram->vtagtable[1],
988 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[1]));
989 ugeth_info("vtagtable[2] : addr - 0x%08x, val - 0x%08x",
990 (u32) & ugeth->p_tx_glbl_pram->vtagtable[2],
991 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[2]));
992 ugeth_info("vtagtable[3] : addr - 0x%08x, val - 0x%08x",
993 (u32) & ugeth->p_tx_glbl_pram->vtagtable[3],
994 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[3]));
995 ugeth_info("vtagtable[4] : addr - 0x%08x, val - 0x%08x",
996 (u32) & ugeth->p_tx_glbl_pram->vtagtable[4],
997 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[4]));
998 ugeth_info("vtagtable[5] : addr - 0x%08x, val - 0x%08x",
999 (u32) & ugeth->p_tx_glbl_pram->vtagtable[5],
1000 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[5]));
1001 ugeth_info("vtagtable[6] : addr - 0x%08x, val - 0x%08x",
1002 (u32) & ugeth->p_tx_glbl_pram->vtagtable[6],
1003 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[6]));
1004 ugeth_info("vtagtable[7] : addr - 0x%08x, val - 0x%08x",
1005 (u32) & ugeth->p_tx_glbl_pram->vtagtable[7],
1006 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[7]));
1007 ugeth_info("tqptr : addr - 0x%08x, val - 0x%08x",
1008 (u32) & ugeth->p_tx_glbl_pram->tqptr,
1009 in_be32(&ugeth->p_tx_glbl_pram->tqptr));
1010 }
1011 if (ugeth->p_rx_glbl_pram) {
1012 ugeth_info("RX global param:");
1013 ugeth_info("Base address: 0x%08x", (u32) ugeth->p_rx_glbl_pram);
1014 ugeth_info("remoder : addr - 0x%08x, val - 0x%08x",
1015 (u32) & ugeth->p_rx_glbl_pram->remoder,
1016 in_be32(&ugeth->p_rx_glbl_pram->remoder));
1017 ugeth_info("rqptr : addr - 0x%08x, val - 0x%08x",
1018 (u32) & ugeth->p_rx_glbl_pram->rqptr,
1019 in_be32(&ugeth->p_rx_glbl_pram->rqptr));
1020 ugeth_info("typeorlen : addr - 0x%08x, val - 0x%04x",
1021 (u32) & ugeth->p_rx_glbl_pram->typeorlen,
1022 in_be16(&ugeth->p_rx_glbl_pram->typeorlen));
1023 ugeth_info("rxgstpack : addr - 0x%08x, val - 0x%02x",
1024 (u32) & ugeth->p_rx_glbl_pram->rxgstpack,
1025 ugeth->p_rx_glbl_pram->rxgstpack);
1026 ugeth_info("rxrmonbaseptr : addr - 0x%08x, val - 0x%08x",
1027 (u32) & ugeth->p_rx_glbl_pram->rxrmonbaseptr,
1028 in_be32(&ugeth->p_rx_glbl_pram->rxrmonbaseptr));
1029 ugeth_info("intcoalescingptr: addr - 0x%08x, val - 0x%08x",
1030 (u32) & ugeth->p_rx_glbl_pram->intcoalescingptr,
1031 in_be32(&ugeth->p_rx_glbl_pram->intcoalescingptr));
1032 ugeth_info("rstate : addr - 0x%08x, val - 0x%02x",
1033 (u32) & ugeth->p_rx_glbl_pram->rstate,
1034 ugeth->p_rx_glbl_pram->rstate);
1035 ugeth_info("mrblr : addr - 0x%08x, val - 0x%04x",
1036 (u32) & ugeth->p_rx_glbl_pram->mrblr,
1037 in_be16(&ugeth->p_rx_glbl_pram->mrblr));
1038 ugeth_info("rbdqptr : addr - 0x%08x, val - 0x%08x",
1039 (u32) & ugeth->p_rx_glbl_pram->rbdqptr,
1040 in_be32(&ugeth->p_rx_glbl_pram->rbdqptr));
1041 ugeth_info("mflr : addr - 0x%08x, val - 0x%04x",
1042 (u32) & ugeth->p_rx_glbl_pram->mflr,
1043 in_be16(&ugeth->p_rx_glbl_pram->mflr));
1044 ugeth_info("minflr : addr - 0x%08x, val - 0x%04x",
1045 (u32) & ugeth->p_rx_glbl_pram->minflr,
1046 in_be16(&ugeth->p_rx_glbl_pram->minflr));
1047 ugeth_info("maxd1 : addr - 0x%08x, val - 0x%04x",
1048 (u32) & ugeth->p_rx_glbl_pram->maxd1,
1049 in_be16(&ugeth->p_rx_glbl_pram->maxd1));
1050 ugeth_info("maxd2 : addr - 0x%08x, val - 0x%04x",
1051 (u32) & ugeth->p_rx_glbl_pram->maxd2,
1052 in_be16(&ugeth->p_rx_glbl_pram->maxd2));
1053 ugeth_info("ecamptr : addr - 0x%08x, val - 0x%08x",
1054 (u32) & ugeth->p_rx_glbl_pram->ecamptr,
1055 in_be32(&ugeth->p_rx_glbl_pram->ecamptr));
1056 ugeth_info("l2qt : addr - 0x%08x, val - 0x%08x",
1057 (u32) & ugeth->p_rx_glbl_pram->l2qt,
1058 in_be32(&ugeth->p_rx_glbl_pram->l2qt));
1059 ugeth_info("l3qt[0] : addr - 0x%08x, val - 0x%08x",
1060 (u32) & ugeth->p_rx_glbl_pram->l3qt[0],
1061 in_be32(&ugeth->p_rx_glbl_pram->l3qt[0]));
1062 ugeth_info("l3qt[1] : addr - 0x%08x, val - 0x%08x",
1063 (u32) & ugeth->p_rx_glbl_pram->l3qt[1],
1064 in_be32(&ugeth->p_rx_glbl_pram->l3qt[1]));
1065 ugeth_info("l3qt[2] : addr - 0x%08x, val - 0x%08x",
1066 (u32) & ugeth->p_rx_glbl_pram->l3qt[2],
1067 in_be32(&ugeth->p_rx_glbl_pram->l3qt[2]));
1068 ugeth_info("l3qt[3] : addr - 0x%08x, val - 0x%08x",
1069 (u32) & ugeth->p_rx_glbl_pram->l3qt[3],
1070 in_be32(&ugeth->p_rx_glbl_pram->l3qt[3]));
1071 ugeth_info("l3qt[4] : addr - 0x%08x, val - 0x%08x",
1072 (u32) & ugeth->p_rx_glbl_pram->l3qt[4],
1073 in_be32(&ugeth->p_rx_glbl_pram->l3qt[4]));
1074 ugeth_info("l3qt[5] : addr - 0x%08x, val - 0x%08x",
1075 (u32) & ugeth->p_rx_glbl_pram->l3qt[5],
1076 in_be32(&ugeth->p_rx_glbl_pram->l3qt[5]));
1077 ugeth_info("l3qt[6] : addr - 0x%08x, val - 0x%08x",
1078 (u32) & ugeth->p_rx_glbl_pram->l3qt[6],
1079 in_be32(&ugeth->p_rx_glbl_pram->l3qt[6]));
1080 ugeth_info("l3qt[7] : addr - 0x%08x, val - 0x%08x",
1081 (u32) & ugeth->p_rx_glbl_pram->l3qt[7],
1082 in_be32(&ugeth->p_rx_glbl_pram->l3qt[7]));
1083 ugeth_info("vlantype : addr - 0x%08x, val - 0x%04x",
1084 (u32) & ugeth->p_rx_glbl_pram->vlantype,
1085 in_be16(&ugeth->p_rx_glbl_pram->vlantype));
1086 ugeth_info("vlantci : addr - 0x%08x, val - 0x%04x",
1087 (u32) & ugeth->p_rx_glbl_pram->vlantci,
1088 in_be16(&ugeth->p_rx_glbl_pram->vlantci));
1089 for (i = 0; i < 64; i++)
1090 ugeth_info
1091 ("addressfiltering[%d]: addr - 0x%08x, val - 0x%02x",
1092 i,
1093 (u32) & ugeth->p_rx_glbl_pram->addressfiltering[i],
1094 ugeth->p_rx_glbl_pram->addressfiltering[i]);
1095 ugeth_info("exfGlobalParam : addr - 0x%08x, val - 0x%08x",
1096 (u32) & ugeth->p_rx_glbl_pram->exfGlobalParam,
1097 in_be32(&ugeth->p_rx_glbl_pram->exfGlobalParam));
1098 }
1099 if (ugeth->p_send_q_mem_reg) {
1100 ugeth_info("Send Q memory registers:");
1101 ugeth_info("Base address: 0x%08x",
1102 (u32) ugeth->p_send_q_mem_reg);
1103 for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) {
1104 ugeth_info("SQQD[%d]:", i);
1105 ugeth_info("Base address: 0x%08x",
1106 (u32) & ugeth->p_send_q_mem_reg->sqqd[i]);
1107 mem_disp((u8 *) & ugeth->p_send_q_mem_reg->sqqd[i],
1108 sizeof(ucc_geth_send_queue_qd_t));
1109 }
1110 }
1111 if (ugeth->p_scheduler) {
1112 ugeth_info("Scheduler:");
1113 ugeth_info("Base address: 0x%08x", (u32) ugeth->p_scheduler);
1114 mem_disp((u8 *) ugeth->p_scheduler,
1115 sizeof(*ugeth->p_scheduler));
1116 }
1117 if (ugeth->p_tx_fw_statistics_pram) {
1118 ugeth_info("TX FW statistics pram:");
1119 ugeth_info("Base address: 0x%08x",
1120 (u32) ugeth->p_tx_fw_statistics_pram);
1121 mem_disp((u8 *) ugeth->p_tx_fw_statistics_pram,
1122 sizeof(*ugeth->p_tx_fw_statistics_pram));
1123 }
1124 if (ugeth->p_rx_fw_statistics_pram) {
1125 ugeth_info("RX FW statistics pram:");
1126 ugeth_info("Base address: 0x%08x",
1127 (u32) ugeth->p_rx_fw_statistics_pram);
1128 mem_disp((u8 *) ugeth->p_rx_fw_statistics_pram,
1129 sizeof(*ugeth->p_rx_fw_statistics_pram));
1130 }
1131 if (ugeth->p_rx_irq_coalescing_tbl) {
1132 ugeth_info("RX IRQ coalescing tables:");
1133 ugeth_info("Base address: 0x%08x",
1134 (u32) ugeth->p_rx_irq_coalescing_tbl);
1135 for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
1136 ugeth_info("RX IRQ coalescing table entry[%d]:", i);
1137 ugeth_info("Base address: 0x%08x",
1138 (u32) & ugeth->p_rx_irq_coalescing_tbl->
1139 coalescingentry[i]);
1140 ugeth_info
1141 ("interruptcoalescingmaxvalue: addr - 0x%08x, val - 0x%08x",
1142 (u32) & ugeth->p_rx_irq_coalescing_tbl->
1143 coalescingentry[i].interruptcoalescingmaxvalue,
1144 in_be32(&ugeth->p_rx_irq_coalescing_tbl->
1145 coalescingentry[i].
1146 interruptcoalescingmaxvalue));
1147 ugeth_info
1148 ("interruptcoalescingcounter : addr - 0x%08x, val - 0x%08x",
1149 (u32) & ugeth->p_rx_irq_coalescing_tbl->
1150 coalescingentry[i].interruptcoalescingcounter,
1151 in_be32(&ugeth->p_rx_irq_coalescing_tbl->
1152 coalescingentry[i].
1153 interruptcoalescingcounter));
1154 }
1155 }
1156 if (ugeth->p_rx_bd_qs_tbl) {
1157 ugeth_info("RX BD QS tables:");
1158 ugeth_info("Base address: 0x%08x", (u32) ugeth->p_rx_bd_qs_tbl);
1159 for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
1160 ugeth_info("RX BD QS table[%d]:", i);
1161 ugeth_info("Base address: 0x%08x",
1162 (u32) & ugeth->p_rx_bd_qs_tbl[i]);
1163 ugeth_info
1164 ("bdbaseptr : addr - 0x%08x, val - 0x%08x",
1165 (u32) & ugeth->p_rx_bd_qs_tbl[i].bdbaseptr,
1166 in_be32(&ugeth->p_rx_bd_qs_tbl[i].bdbaseptr));
1167 ugeth_info
1168 ("bdptr : addr - 0x%08x, val - 0x%08x",
1169 (u32) & ugeth->p_rx_bd_qs_tbl[i].bdptr,
1170 in_be32(&ugeth->p_rx_bd_qs_tbl[i].bdptr));
1171 ugeth_info
1172 ("externalbdbaseptr: addr - 0x%08x, val - 0x%08x",
1173 (u32) & ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr,
1174 in_be32(&ugeth->p_rx_bd_qs_tbl[i].
1175 externalbdbaseptr));
1176 ugeth_info
1177 ("externalbdptr : addr - 0x%08x, val - 0x%08x",
1178 (u32) & ugeth->p_rx_bd_qs_tbl[i].externalbdptr,
1179 in_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdptr));
1180 ugeth_info("ucode RX Prefetched BDs:");
1181 ugeth_info("Base address: 0x%08x",
1182 (u32)
1183 qe_muram_addr(in_be32
1184 (&ugeth->p_rx_bd_qs_tbl[i].
1185 bdbaseptr)));
1186 mem_disp((u8 *)
1187 qe_muram_addr(in_be32
1188 (&ugeth->p_rx_bd_qs_tbl[i].
1189 bdbaseptr)),
1190 sizeof(ucc_geth_rx_prefetched_bds_t));
1191 }
1192 }
1193 if (ugeth->p_init_enet_param_shadow) {
1194 int size;
1195 ugeth_info("Init enet param shadow:");
1196 ugeth_info("Base address: 0x%08x",
1197 (u32) ugeth->p_init_enet_param_shadow);
1198 mem_disp((u8 *) ugeth->p_init_enet_param_shadow,
1199 sizeof(*ugeth->p_init_enet_param_shadow));
1200
1201 size = sizeof(ucc_geth_thread_rx_pram_t);
1202 if (ugeth->ug_info->rxExtendedFiltering) {
1203 size +=
1204 THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING;
1205 if (ugeth->ug_info->largestexternallookupkeysize ==
1206 QE_FLTR_TABLE_LOOKUP_KEY_SIZE_8_BYTES)
1207 size +=
1208 THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8;
1209 if (ugeth->ug_info->largestexternallookupkeysize ==
1210 QE_FLTR_TABLE_LOOKUP_KEY_SIZE_16_BYTES)
1211 size +=
1212 THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16;
1213 }
1214
1215 dump_init_enet_entries(ugeth,
1216 &(ugeth->p_init_enet_param_shadow->
1217 txthread[0]),
1218 ENET_INIT_PARAM_MAX_ENTRIES_TX,
1219 sizeof(ucc_geth_thread_tx_pram_t),
1220 ugeth->ug_info->riscTx, 0);
1221 dump_init_enet_entries(ugeth,
1222 &(ugeth->p_init_enet_param_shadow->
1223 rxthread[0]),
1224 ENET_INIT_PARAM_MAX_ENTRIES_RX, size,
1225 ugeth->ug_info->riscRx, 1);
1226 }
1227}
1228#endif /* DEBUG */
1229
1230static void init_default_reg_vals(volatile u32 *upsmr_register,
1231 volatile u32 *maccfg1_register,
1232 volatile u32 *maccfg2_register)
1233{
1234 out_be32(upsmr_register, UCC_GETH_UPSMR_INIT);
1235 out_be32(maccfg1_register, UCC_GETH_MACCFG1_INIT);
1236 out_be32(maccfg2_register, UCC_GETH_MACCFG2_INIT);
1237}
1238
1239static int init_half_duplex_params(int alt_beb,
1240 int back_pressure_no_backoff,
1241 int no_backoff,
1242 int excess_defer,
1243 u8 alt_beb_truncation,
1244 u8 max_retransmissions,
1245 u8 collision_window,
1246 volatile u32 *hafdup_register)
1247{
1248 u32 value = 0;
1249
1250 if ((alt_beb_truncation > HALFDUP_ALT_BEB_TRUNCATION_MAX) ||
1251 (max_retransmissions > HALFDUP_MAX_RETRANSMISSION_MAX) ||
1252 (collision_window > HALFDUP_COLLISION_WINDOW_MAX))
1253 return -EINVAL;
1254
1255 value = (u32) (alt_beb_truncation << HALFDUP_ALT_BEB_TRUNCATION_SHIFT);
1256
1257 if (alt_beb)
1258 value |= HALFDUP_ALT_BEB;
1259 if (back_pressure_no_backoff)
1260 value |= HALFDUP_BACK_PRESSURE_NO_BACKOFF;
1261 if (no_backoff)
1262 value |= HALFDUP_NO_BACKOFF;
1263 if (excess_defer)
1264 value |= HALFDUP_EXCESSIVE_DEFER;
1265
1266 value |= (max_retransmissions << HALFDUP_MAX_RETRANSMISSION_SHIFT);
1267
1268 value |= collision_window;
1269
1270 out_be32(hafdup_register, value);
1271 return 0;
1272}
1273
1274static int init_inter_frame_gap_params(u8 non_btb_cs_ipg,
1275 u8 non_btb_ipg,
1276 u8 min_ifg,
1277 u8 btb_ipg,
1278 volatile u32 *ipgifg_register)
1279{
1280 u32 value = 0;
1281
1282 /* Non-Back-to-back IPG part 1 should be <= Non-Back-to-back
1283 IPG part 2 */
1284 if (non_btb_cs_ipg > non_btb_ipg)
1285 return -EINVAL;
1286
1287 if ((non_btb_cs_ipg > IPGIFG_NON_BACK_TO_BACK_IFG_PART1_MAX) ||
1288 (non_btb_ipg > IPGIFG_NON_BACK_TO_BACK_IFG_PART2_MAX) ||
1289 /*(min_ifg > IPGIFG_MINIMUM_IFG_ENFORCEMENT_MAX) || */
1290 (btb_ipg > IPGIFG_BACK_TO_BACK_IFG_MAX))
1291 return -EINVAL;
1292
1293 value |=
1294 ((non_btb_cs_ipg << IPGIFG_NON_BACK_TO_BACK_IFG_PART1_SHIFT) &
1295 IPGIFG_NBTB_CS_IPG_MASK);
1296 value |=
1297 ((non_btb_ipg << IPGIFG_NON_BACK_TO_BACK_IFG_PART2_SHIFT) &
1298 IPGIFG_NBTB_IPG_MASK);
1299 value |=
1300 ((min_ifg << IPGIFG_MINIMUM_IFG_ENFORCEMENT_SHIFT) &
1301 IPGIFG_MIN_IFG_MASK);
1302 value |= (btb_ipg & IPGIFG_BTB_IPG_MASK);
1303
1304 out_be32(ipgifg_register, value);
1305 return 0;
1306}
1307
1308static int init_flow_control_params(u32 automatic_flow_control_mode,
1309 int rx_flow_control_enable,
1310 int tx_flow_control_enable,
1311 u16 pause_period,
1312 u16 extension_field,
1313 volatile u32 *upsmr_register,
1314 volatile u32 *uempr_register,
1315 volatile u32 *maccfg1_register)
1316{
1317 u32 value = 0;
1318
1319 /* Set UEMPR register */
1320 value = (u32) pause_period << UEMPR_PAUSE_TIME_VALUE_SHIFT;
1321 value |= (u32) extension_field << UEMPR_EXTENDED_PAUSE_TIME_VALUE_SHIFT;
1322 out_be32(uempr_register, value);
1323
1324 /* Set UPSMR register */
1325 value = in_be32(upsmr_register);
1326 value |= automatic_flow_control_mode;
1327 out_be32(upsmr_register, value);
1328
1329 value = in_be32(maccfg1_register);
1330 if (rx_flow_control_enable)
1331 value |= MACCFG1_FLOW_RX;
1332 if (tx_flow_control_enable)
1333 value |= MACCFG1_FLOW_TX;
1334 out_be32(maccfg1_register, value);
1335
1336 return 0;
1337}
1338
1339static int init_hw_statistics_gathering_mode(int enable_hardware_statistics,
1340 int auto_zero_hardware_statistics,
1341 volatile u32 *upsmr_register,
1342 volatile u16 *uescr_register)
1343{
1344 u32 upsmr_value = 0;
1345 u16 uescr_value = 0;
1346 /* Enable hardware statistics gathering if requested */
1347 if (enable_hardware_statistics) {
1348 upsmr_value = in_be32(upsmr_register);
1349 upsmr_value |= UPSMR_HSE;
1350 out_be32(upsmr_register, upsmr_value);
1351 }
1352
1353 /* Clear hardware statistics counters */
1354 uescr_value = in_be16(uescr_register);
1355 uescr_value |= UESCR_CLRCNT;
1356 /* Automatically zero hardware statistics counters on read,
1357 if requested */
1358 if (auto_zero_hardware_statistics)
1359 uescr_value |= UESCR_AUTOZ;
1360 out_be16(uescr_register, uescr_value);
1361
1362 return 0;
1363}
1364
1365static int init_firmware_statistics_gathering_mode(int
1366 enable_tx_firmware_statistics,
1367 int enable_rx_firmware_statistics,
1368 volatile u32 *tx_rmon_base_ptr,
1369 u32 tx_firmware_statistics_structure_address,
1370 volatile u32 *rx_rmon_base_ptr,
1371 u32 rx_firmware_statistics_structure_address,
1372 volatile u16 *temoder_register,
1373 volatile u32 *remoder_register)
1374{
1375 /* Note: this function does not check if */
1376 /* the parameters it receives are NULL */
1377 u16 temoder_value;
1378 u32 remoder_value;
1379
1380 if (enable_tx_firmware_statistics) {
1381 out_be32(tx_rmon_base_ptr,
1382 tx_firmware_statistics_structure_address);
1383 temoder_value = in_be16(temoder_register);
1384 temoder_value |= TEMODER_TX_RMON_STATISTICS_ENABLE;
1385 out_be16(temoder_register, temoder_value);
1386 }
1387
1388 if (enable_rx_firmware_statistics) {
1389 out_be32(rx_rmon_base_ptr,
1390 rx_firmware_statistics_structure_address);
1391 remoder_value = in_be32(remoder_register);
1392 remoder_value |= REMODER_RX_RMON_STATISTICS_ENABLE;
1393 out_be32(remoder_register, remoder_value);
1394 }
1395
1396 return 0;
1397}
1398
1399static int init_mac_station_addr_regs(u8 address_byte_0,
1400 u8 address_byte_1,
1401 u8 address_byte_2,
1402 u8 address_byte_3,
1403 u8 address_byte_4,
1404 u8 address_byte_5,
1405 volatile u32 *macstnaddr1_register,
1406 volatile u32 *macstnaddr2_register)
1407{
1408 u32 value = 0;
1409
1410 /* Example: for a station address of 0x12345678ABCD, */
1411 /* 0x12 is byte 0, 0x34 is byte 1 and so on and 0xCD is byte 5 */
1412
1413 /* MACSTNADDR1 Register: */
1414
1415 /* 0 7 8 15 */
1416 /* station address byte 5 station address byte 4 */
1417 /* 16 23 24 31 */
1418 /* station address byte 3 station address byte 2 */
1419 value |= (u32) ((address_byte_2 << 0) & 0x000000FF);
1420 value |= (u32) ((address_byte_3 << 8) & 0x0000FF00);
1421 value |= (u32) ((address_byte_4 << 16) & 0x00FF0000);
1422 value |= (u32) ((address_byte_5 << 24) & 0xFF000000);
1423
1424 out_be32(macstnaddr1_register, value);
1425
1426 /* MACSTNADDR2 Register: */
1427
1428 /* 0 7 8 15 */
1429 /* station address byte 1 station address byte 0 */
1430 /* 16 23 24 31 */
1431 /* reserved reserved */
1432 value = 0;
1433 value |= (u32) ((address_byte_0 << 16) & 0x00FF0000);
1434 value |= (u32) ((address_byte_1 << 24) & 0xFF000000);
1435
1436 out_be32(macstnaddr2_register, value);
1437
1438 return 0;
1439}
1440
1441static int init_mac_duplex_mode(int full_duplex,
1442 int limited_to_full_duplex,
1443 volatile u32 *maccfg2_register)
1444{
1445 u32 value = 0;
1446
1447 /* some interfaces must work in full duplex mode */
1448 if ((full_duplex == 0) && (limited_to_full_duplex == 1))
1449 return -EINVAL;
1450
1451 value = in_be32(maccfg2_register);
1452
1453 if (full_duplex)
1454 value |= MACCFG2_FDX;
1455 else
1456 value &= ~MACCFG2_FDX;
1457
1458 out_be32(maccfg2_register, value);
1459 return 0;
1460}
1461
1462static int init_check_frame_length_mode(int length_check,
1463 volatile u32 *maccfg2_register)
1464{
1465 u32 value = 0;
1466
1467 value = in_be32(maccfg2_register);
1468
1469 if (length_check)
1470 value |= MACCFG2_LC;
1471 else
1472 value &= ~MACCFG2_LC;
1473
1474 out_be32(maccfg2_register, value);
1475 return 0;
1476}
1477
1478static int init_preamble_length(u8 preamble_length,
1479 volatile u32 *maccfg2_register)
1480{
1481 u32 value = 0;
1482
1483 if ((preamble_length < 3) || (preamble_length > 7))
1484 return -EINVAL;
1485
1486 value = in_be32(maccfg2_register);
1487 value &= ~MACCFG2_PREL_MASK;
1488 value |= (preamble_length << MACCFG2_PREL_SHIFT);
1489 out_be32(maccfg2_register, value);
1490 return 0;
1491}
1492
1493static int init_mii_management_configuration(int reset_mgmt,
1494 int preamble_supress,
1495 volatile u32 *miimcfg_register,
1496 volatile u32 *miimind_register)
1497{
1498 unsigned int timeout = PHY_INIT_TIMEOUT;
1499 u32 value = 0;
1500
1501 value = in_be32(miimcfg_register);
1502 if (reset_mgmt) {
1503 value |= MIIMCFG_RESET_MANAGEMENT;
1504 out_be32(miimcfg_register, value);
1505 }
1506
1507 value = 0;
1508
1509 if (preamble_supress)
1510 value |= MIIMCFG_NO_PREAMBLE;
1511
1512 value |= UCC_GETH_MIIMCFG_MNGMNT_CLC_DIV_INIT;
1513 out_be32(miimcfg_register, value);
1514
1515 /* Wait until the bus is free */
1516 while ((in_be32(miimind_register) & MIIMIND_BUSY) && timeout--)
1517 cpu_relax();
1518
1519 if (timeout <= 0) {
1520 ugeth_err("%s: The MII Bus is stuck!", __FUNCTION__);
1521 return -ETIMEDOUT;
1522 }
1523
1524 return 0;
1525}
1526
1527static int init_rx_parameters(int reject_broadcast,
1528 int receive_short_frames,
1529 int promiscuous, volatile u32 *upsmr_register)
1530{
1531 u32 value = 0;
1532
1533 value = in_be32(upsmr_register);
1534
1535 if (reject_broadcast)
1536 value |= UPSMR_BRO;
1537 else
1538 value &= ~UPSMR_BRO;
1539
1540 if (receive_short_frames)
1541 value |= UPSMR_RSH;
1542 else
1543 value &= ~UPSMR_RSH;
1544
1545 if (promiscuous)
1546 value |= UPSMR_PRO;
1547 else
1548 value &= ~UPSMR_PRO;
1549
1550 out_be32(upsmr_register, value);
1551
1552 return 0;
1553}
1554
1555static int init_max_rx_buff_len(u16 max_rx_buf_len,
1556 volatile u16 *mrblr_register)
1557{
1558 /* max_rx_buf_len value must be a multiple of 128 */
1559 if ((max_rx_buf_len == 0)
1560 || (max_rx_buf_len % UCC_GETH_MRBLR_ALIGNMENT))
1561 return -EINVAL;
1562
1563 out_be16(mrblr_register, max_rx_buf_len);
1564 return 0;
1565}
1566
1567static int init_min_frame_len(u16 min_frame_length,
1568 volatile u16 *minflr_register,
1569 volatile u16 *mrblr_register)
1570{
1571 u16 mrblr_value = 0;
1572
1573 mrblr_value = in_be16(mrblr_register);
1574 if (min_frame_length >= (mrblr_value - 4))
1575 return -EINVAL;
1576
1577 out_be16(minflr_register, min_frame_length);
1578 return 0;
1579}
1580
1581static int adjust_enet_interface(ucc_geth_private_t *ugeth)
1582{
1583 ucc_geth_info_t *ug_info;
1584 ucc_geth_t *ug_regs;
1585 ucc_fast_t *uf_regs;
1586 enet_speed_e speed;
1587 int ret_val, rpm = 0, tbi = 0, r10m = 0, rmm =
1588 0, limited_to_full_duplex = 0;
1589 u32 upsmr, maccfg2, utbipar, tbiBaseAddress;
1590 u16 value;
1591
1592 ugeth_vdbg("%s: IN", __FUNCTION__);
1593
1594 ug_info = ugeth->ug_info;
1595 ug_regs = ugeth->ug_regs;
1596 uf_regs = ugeth->uccf->uf_regs;
1597
1598 /* Analyze enet_interface according to Interface Mode Configuration
1599 table */
1600 ret_val =
1601 get_interface_details(ug_info->enet_interface, &speed, &r10m, &rmm,
1602 &rpm, &tbi, &limited_to_full_duplex);
1603 if (ret_val != 0) {
1604 ugeth_err
1605 ("%s: half duplex not supported in requested configuration.",
1606 __FUNCTION__);
1607 return ret_val;
1608 }
1609
1610 /* Set MACCFG2 */
1611 maccfg2 = in_be32(&ug_regs->maccfg2);
1612 maccfg2 &= ~MACCFG2_INTERFACE_MODE_MASK;
1613 if ((speed == ENET_SPEED_10BT) || (speed == ENET_SPEED_100BT))
1614 maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE;
1615 else if (speed == ENET_SPEED_1000BT)
1616 maccfg2 |= MACCFG2_INTERFACE_MODE_BYTE;
1617 maccfg2 |= ug_info->padAndCrc;
1618 out_be32(&ug_regs->maccfg2, maccfg2);
1619
1620 /* Set UPSMR */
1621 upsmr = in_be32(&uf_regs->upsmr);
1622 upsmr &= ~(UPSMR_RPM | UPSMR_R10M | UPSMR_TBIM | UPSMR_RMM);
1623 if (rpm)
1624 upsmr |= UPSMR_RPM;
1625 if (r10m)
1626 upsmr |= UPSMR_R10M;
1627 if (tbi)
1628 upsmr |= UPSMR_TBIM;
1629 if (rmm)
1630 upsmr |= UPSMR_RMM;
1631 out_be32(&uf_regs->upsmr, upsmr);
1632
1633 /* Set UTBIPAR */
1634 utbipar = in_be32(&ug_regs->utbipar);
1635 utbipar &= ~UTBIPAR_PHY_ADDRESS_MASK;
1636 if (tbi)
1637 utbipar |=
1638 (ug_info->phy_address +
1639 ugeth->ug_info->uf_info.
1640 ucc_num) << UTBIPAR_PHY_ADDRESS_SHIFT;
1641 else
1642 utbipar |=
1643 (0x10 +
1644 ugeth->ug_info->uf_info.
1645 ucc_num) << UTBIPAR_PHY_ADDRESS_SHIFT;
1646 out_be32(&ug_regs->utbipar, utbipar);
1647
1648 /* Disable autonegotiation in tbi mode, because by default it
1649 comes up in autonegotiation mode. */
1650 /* Note that this depends on proper setting in utbipar register. */
1651 if (tbi) {
1652 tbiBaseAddress = in_be32(&ug_regs->utbipar);
1653 tbiBaseAddress &= UTBIPAR_PHY_ADDRESS_MASK;
1654 tbiBaseAddress >>= UTBIPAR_PHY_ADDRESS_SHIFT;
1655 value =
1656 ugeth->mii_info->mdio_read(ugeth->dev, (u8) tbiBaseAddress,
1657 ENET_TBI_MII_CR);
1658 value &= ~0x1000; /* Turn off autonegotiation */
1659 ugeth->mii_info->mdio_write(ugeth->dev, (u8) tbiBaseAddress,
1660 ENET_TBI_MII_CR, value);
1661 }
1662
1663 ret_val = init_mac_duplex_mode(1,
1664 limited_to_full_duplex,
1665 &ug_regs->maccfg2);
1666 if (ret_val != 0) {
1667 ugeth_err
1668 ("%s: half duplex not supported in requested configuration.",
1669 __FUNCTION__);
1670 return ret_val;
1671 }
1672
1673 init_check_frame_length_mode(ug_info->lengthCheckRx, &ug_regs->maccfg2);
1674
1675 ret_val = init_preamble_length(ug_info->prel, &ug_regs->maccfg2);
1676 if (ret_val != 0) {
1677 ugeth_err
1678 ("%s: Preamble length must be between 3 and 7 inclusive.",
1679 __FUNCTION__);
1680 return ret_val;
1681 }
1682
1683 return 0;
1684}
1685
1686/* Called every time the controller might need to be made
1687 * aware of new link state. The PHY code conveys this
1688 * information through variables in the ugeth structure, and this
1689 * function converts those variables into the appropriate
1690 * register values, and can bring down the device if needed.
1691 */
1692static void adjust_link(struct net_device *dev)
1693{
1694 ucc_geth_private_t *ugeth = netdev_priv(dev);
1695 ucc_geth_t *ug_regs;
1696 u32 tempval;
1697 struct ugeth_mii_info *mii_info = ugeth->mii_info;
1698
1699 ug_regs = ugeth->ug_regs;
1700
1701 if (mii_info->link) {
1702 /* Now we make sure that we can be in full duplex mode.
1703 * If not, we operate in half-duplex mode. */
1704 if (mii_info->duplex != ugeth->oldduplex) {
1705 if (!(mii_info->duplex)) {
1706 tempval = in_be32(&ug_regs->maccfg2);
1707 tempval &= ~(MACCFG2_FDX);
1708 out_be32(&ug_regs->maccfg2, tempval);
1709
1710 ugeth_info("%s: Half Duplex", dev->name);
1711 } else {
1712 tempval = in_be32(&ug_regs->maccfg2);
1713 tempval |= MACCFG2_FDX;
1714 out_be32(&ug_regs->maccfg2, tempval);
1715
1716 ugeth_info("%s: Full Duplex", dev->name);
1717 }
1718
1719 ugeth->oldduplex = mii_info->duplex;
1720 }
1721
1722 if (mii_info->speed != ugeth->oldspeed) {
1723 switch (mii_info->speed) {
1724 case 1000:
1725#ifdef CONFIG_MPC836x
1726/* FIXME: This code is for 100Mbs BUG fixing,
1727remove this when it is fixed!!! */
1728 if (ugeth->ug_info->enet_interface ==
1729 ENET_1000_GMII)
1730 /* Run the commands which initialize the PHY */
1731 {
1732 tempval =
1733 (u32) mii_info->mdio_read(ugeth->
1734 dev, mii_info->mii_id, 0x1b);
1735 tempval |= 0x000f;
1736 mii_info->mdio_write(ugeth->dev,
1737 mii_info->mii_id, 0x1b,
1738 (u16) tempval);
1739 tempval =
1740 (u32) mii_info->mdio_read(ugeth->
1741 dev, mii_info->mii_id,
1742 MII_BMCR);
1743 mii_info->mdio_write(ugeth->dev,
1744 mii_info->mii_id, MII_BMCR,
1745 (u16) (tempval | BMCR_RESET));
1746 } else if (ugeth->ug_info->enet_interface ==
1747 ENET_1000_RGMII)
1748 /* Run the commands which initialize the PHY */
1749 {
1750 tempval =
1751 (u32) mii_info->mdio_read(ugeth->
1752 dev, mii_info->mii_id, 0x1b);
1753 tempval = (tempval & ~0x000f) | 0x000b;
1754 mii_info->mdio_write(ugeth->dev,
1755 mii_info->mii_id, 0x1b,
1756 (u16) tempval);
1757 tempval =
1758 (u32) mii_info->mdio_read(ugeth->
1759 dev, mii_info->mii_id,
1760 MII_BMCR);
1761 mii_info->mdio_write(ugeth->dev,
1762 mii_info->mii_id, MII_BMCR,
1763 (u16) (tempval | BMCR_RESET));
1764 }
1765 msleep(4000);
1766#endif /* CONFIG_MPC8360 */
1767 adjust_enet_interface(ugeth);
1768 break;
1769 case 100:
1770 case 10:
1771#ifdef CONFIG_MPC836x
1772/* FIXME: This code is for 100Mbs BUG fixing,
1773remove this lines when it will be fixed!!! */
1774 ugeth->ug_info->enet_interface = ENET_100_RGMII;
1775 tempval =
1776 (u32) mii_info->mdio_read(ugeth->dev,
1777 mii_info->mii_id,
1778 0x1b);
1779 tempval = (tempval & ~0x000f) | 0x000b;
1780 mii_info->mdio_write(ugeth->dev,
1781 mii_info->mii_id, 0x1b,
1782 (u16) tempval);
1783 tempval =
1784 (u32) mii_info->mdio_read(ugeth->dev,
1785 mii_info->mii_id,
1786 MII_BMCR);
1787 mii_info->mdio_write(ugeth->dev,
1788 mii_info->mii_id, MII_BMCR,
1789 (u16) (tempval |
1790 BMCR_RESET));
1791 msleep(4000);
1792#endif /* CONFIG_MPC8360 */
1793 adjust_enet_interface(ugeth);
1794 break;
1795 default:
1796 ugeth_warn
1797 ("%s: Ack! Speed (%d) is not 10/100/1000!",
1798 dev->name, mii_info->speed);
1799 break;
1800 }
1801
1802 ugeth_info("%s: Speed %dBT", dev->name,
1803 mii_info->speed);
1804
1805 ugeth->oldspeed = mii_info->speed;
1806 }
1807
1808 if (!ugeth->oldlink) {
1809 ugeth_info("%s: Link is up", dev->name);
1810 ugeth->oldlink = 1;
1811 netif_carrier_on(dev);
1812 netif_schedule(dev);
1813 }
1814 } else {
1815 if (ugeth->oldlink) {
1816 ugeth_info("%s: Link is down", dev->name);
1817 ugeth->oldlink = 0;
1818 ugeth->oldspeed = 0;
1819 ugeth->oldduplex = -1;
1820 netif_carrier_off(dev);
1821 }
1822 }
1823}
1824
1825/* Configure the PHY for dev.
1826 * returns 0 if success. -1 if failure
1827 */
1828static int init_phy(struct net_device *dev)
1829{
1830 ucc_geth_private_t *ugeth = netdev_priv(dev);
1831 struct phy_info *curphy;
1832 ucc_mii_mng_t *mii_regs;
1833 struct ugeth_mii_info *mii_info;
1834 int err;
1835
1836 mii_regs = &ugeth->ug_regs->miimng;
1837
1838 ugeth->oldlink = 0;
1839 ugeth->oldspeed = 0;
1840 ugeth->oldduplex = -1;
1841
1842 mii_info = kmalloc(sizeof(struct ugeth_mii_info), GFP_KERNEL);
1843
1844 if (NULL == mii_info) {
1845 ugeth_err("%s: Could not allocate mii_info", dev->name);
1846 return -ENOMEM;
1847 }
1848
1849 mii_info->mii_regs = mii_regs;
1850 mii_info->speed = SPEED_1000;
1851 mii_info->duplex = DUPLEX_FULL;
1852 mii_info->pause = 0;
1853 mii_info->link = 0;
1854
1855 mii_info->advertising = (ADVERTISED_10baseT_Half |
1856 ADVERTISED_10baseT_Full |
1857 ADVERTISED_100baseT_Half |
1858 ADVERTISED_100baseT_Full |
1859 ADVERTISED_1000baseT_Full);
1860 mii_info->autoneg = 1;
1861
1862 mii_info->mii_id = ugeth->ug_info->phy_address;
1863
1864 mii_info->dev = dev;
1865
1866 mii_info->mdio_read = &read_phy_reg;
1867 mii_info->mdio_write = &write_phy_reg;
1868
1869 ugeth->mii_info = mii_info;
1870
1871 spin_lock_irq(&ugeth->lock);
1872
1873 /* Set this UCC to be the master of the MII managment */
1874 ucc_set_qe_mux_mii_mng(ugeth->ug_info->uf_info.ucc_num);
1875
1876 if (init_mii_management_configuration(1,
1877 ugeth->ug_info->
1878 miiPreambleSupress,
1879 &mii_regs->miimcfg,
1880 &mii_regs->miimind)) {
1881 ugeth_err("%s: The MII Bus is stuck!", dev->name);
1882 err = -1;
1883 goto bus_fail;
1884 }
1885
1886 spin_unlock_irq(&ugeth->lock);
1887
1888 /* get info for this PHY */
1889 curphy = get_phy_info(ugeth->mii_info);
1890
1891 if (curphy == NULL) {
1892 ugeth_err("%s: No PHY found", dev->name);
1893 err = -1;
1894 goto no_phy;
1895 }
1896
1897 mii_info->phyinfo = curphy;
1898
1899 /* Run the commands which initialize the PHY */
1900 if (curphy->init) {
1901 err = curphy->init(ugeth->mii_info);
1902 if (err)
1903 goto phy_init_fail;
1904 }
1905
1906 return 0;
1907
1908 phy_init_fail:
1909 no_phy:
1910 bus_fail:
1911 kfree(mii_info);
1912
1913 return err;
1914}
1915
1916#ifdef CONFIG_UGETH_TX_ON_DEMOND
1917static int ugeth_transmit_on_demand(ucc_geth_private_t *ugeth)
1918{
1919 ucc_fast_transmit_on_demand(ugeth->uccf);
1920
1921 return 0;
1922}
1923#endif
1924
1925static int ugeth_graceful_stop_tx(ucc_geth_private_t *ugeth)
1926{
1927 ucc_fast_private_t *uccf;
1928 u32 cecr_subblock;
1929 u32 temp;
1930
1931 uccf = ugeth->uccf;
1932
1933 /* Mask GRACEFUL STOP TX interrupt bit and clear it */
1934 temp = in_be32(uccf->p_uccm);
1935 temp &= ~UCCE_GRA;
1936 out_be32(uccf->p_uccm, temp);
1937 out_be32(uccf->p_ucce, UCCE_GRA); /* clear by writing 1 */
1938
1939 /* Issue host command */
1940 cecr_subblock =
1941 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
1942 qe_issue_cmd(QE_GRACEFUL_STOP_TX, cecr_subblock,
1943 (u8) QE_CR_PROTOCOL_ETHERNET, 0);
1944
1945 /* Wait for command to complete */
1946 do {
1947 temp = in_be32(uccf->p_ucce);
1948 } while (!(temp & UCCE_GRA));
1949
1950 uccf->stopped_tx = 1;
1951
1952 return 0;
1953}
1954
1955static int ugeth_graceful_stop_rx(ucc_geth_private_t * ugeth)
1956{
1957 ucc_fast_private_t *uccf;
1958 u32 cecr_subblock;
1959 u8 temp;
1960
1961 uccf = ugeth->uccf;
1962
1963 /* Clear acknowledge bit */
1964 temp = ugeth->p_rx_glbl_pram->rxgstpack;
1965 temp &= ~GRACEFUL_STOP_ACKNOWLEDGE_RX;
1966 ugeth->p_rx_glbl_pram->rxgstpack = temp;
1967
1968 /* Keep issuing command and checking acknowledge bit until
1969 it is asserted, according to spec */
1970 do {
1971 /* Issue host command */
1972 cecr_subblock =
1973 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.
1974 ucc_num);
1975 qe_issue_cmd(QE_GRACEFUL_STOP_RX, cecr_subblock,
1976 (u8) QE_CR_PROTOCOL_ETHERNET, 0);
1977
1978 temp = ugeth->p_rx_glbl_pram->rxgstpack;
1979 } while (!(temp & GRACEFUL_STOP_ACKNOWLEDGE_RX));
1980
1981 uccf->stopped_rx = 1;
1982
1983 return 0;
1984}
1985
1986static int ugeth_restart_tx(ucc_geth_private_t *ugeth)
1987{
1988 ucc_fast_private_t *uccf;
1989 u32 cecr_subblock;
1990
1991 uccf = ugeth->uccf;
1992
1993 cecr_subblock =
1994 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
1995 qe_issue_cmd(QE_RESTART_TX, cecr_subblock, (u8) QE_CR_PROTOCOL_ETHERNET,
1996 0);
1997 uccf->stopped_tx = 0;
1998
1999 return 0;
2000}
2001
2002static int ugeth_restart_rx(ucc_geth_private_t *ugeth)
2003{
2004 ucc_fast_private_t *uccf;
2005 u32 cecr_subblock;
2006
2007 uccf = ugeth->uccf;
2008
2009 cecr_subblock =
2010 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
2011 qe_issue_cmd(QE_RESTART_RX, cecr_subblock, (u8) QE_CR_PROTOCOL_ETHERNET,
2012 0);
2013 uccf->stopped_rx = 0;
2014
2015 return 0;
2016}
2017
2018static int ugeth_enable(ucc_geth_private_t *ugeth, comm_dir_e mode)
2019{
2020 ucc_fast_private_t *uccf;
2021 int enabled_tx, enabled_rx;
2022
2023 uccf = ugeth->uccf;
2024
2025 /* check if the UCC number is in range. */
2026 if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) {
2027 ugeth_err("%s: ucc_num out of range.", __FUNCTION__);
2028 return -EINVAL;
2029 }
2030
2031 enabled_tx = uccf->enabled_tx;
2032 enabled_rx = uccf->enabled_rx;
2033
2034 /* Get Tx and Rx going again, in case this channel was actively
2035 disabled. */
2036 if ((mode & COMM_DIR_TX) && (!enabled_tx) && uccf->stopped_tx)
2037 ugeth_restart_tx(ugeth);
2038 if ((mode & COMM_DIR_RX) && (!enabled_rx) && uccf->stopped_rx)
2039 ugeth_restart_rx(ugeth);
2040
2041 ucc_fast_enable(uccf, mode); /* OK to do even if not disabled */
2042
2043 return 0;
2044
2045}
2046
2047static int ugeth_disable(ucc_geth_private_t * ugeth, comm_dir_e mode)
2048{
2049 ucc_fast_private_t *uccf;
2050
2051 uccf = ugeth->uccf;
2052
2053 /* check if the UCC number is in range. */
2054 if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) {
2055 ugeth_err("%s: ucc_num out of range.", __FUNCTION__);
2056 return -EINVAL;
2057 }
2058
2059 /* Stop any transmissions */
2060 if ((mode & COMM_DIR_TX) && uccf->enabled_tx && !uccf->stopped_tx)
2061 ugeth_graceful_stop_tx(ugeth);
2062
2063 /* Stop any receptions */
2064 if ((mode & COMM_DIR_RX) && uccf->enabled_rx && !uccf->stopped_rx)
2065 ugeth_graceful_stop_rx(ugeth);
2066
2067 ucc_fast_disable(ugeth->uccf, mode); /* OK to do even if not enabled */
2068
2069 return 0;
2070}
2071
2072static void ugeth_dump_regs(ucc_geth_private_t *ugeth)
2073{
2074#ifdef DEBUG
2075 ucc_fast_dump_regs(ugeth->uccf);
2076 dump_regs(ugeth);
2077 dump_bds(ugeth);
2078#endif
2079}
2080
2081#ifdef CONFIG_UGETH_FILTERING
2082static int ugeth_ext_filtering_serialize_tad(ucc_geth_tad_params_t *
2083 p_UccGethTadParams,
2084 qe_fltr_tad_t *qe_fltr_tad)
2085{
2086 u16 temp;
2087
2088 /* Zero serialized TAD */
2089 memset(qe_fltr_tad, 0, QE_FLTR_TAD_SIZE);
2090
2091 qe_fltr_tad->serialized[0] |= UCC_GETH_TAD_V; /* Must have this */
2092 if (p_UccGethTadParams->rx_non_dynamic_extended_features_mode ||
2093 (p_UccGethTadParams->vtag_op != UCC_GETH_VLAN_OPERATION_TAGGED_NOP)
2094 || (p_UccGethTadParams->vnontag_op !=
2095 UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP)
2096 )
2097 qe_fltr_tad->serialized[0] |= UCC_GETH_TAD_EF;
2098 if (p_UccGethTadParams->reject_frame)
2099 qe_fltr_tad->serialized[0] |= UCC_GETH_TAD_REJ;
2100 temp =
2101 (u16) (((u16) p_UccGethTadParams->
2102 vtag_op) << UCC_GETH_TAD_VTAG_OP_SHIFT);
2103 qe_fltr_tad->serialized[0] |= (u8) (temp >> 8); /* upper bits */
2104
2105 qe_fltr_tad->serialized[1] |= (u8) (temp & 0x00ff); /* lower bits */
2106 if (p_UccGethTadParams->vnontag_op ==
2107 UCC_GETH_VLAN_OPERATION_NON_TAGGED_Q_TAG_INSERT)
2108 qe_fltr_tad->serialized[1] |= UCC_GETH_TAD_V_NON_VTAG_OP;
2109 qe_fltr_tad->serialized[1] |=
2110 p_UccGethTadParams->rqos << UCC_GETH_TAD_RQOS_SHIFT;
2111
2112 qe_fltr_tad->serialized[2] |=
2113 p_UccGethTadParams->vpri << UCC_GETH_TAD_V_PRIORITY_SHIFT;
2114 /* upper bits */
2115 qe_fltr_tad->serialized[2] |= (u8) (p_UccGethTadParams->vid >> 8);
2116 /* lower bits */
2117 qe_fltr_tad->serialized[3] |= (u8) (p_UccGethTadParams->vid & 0x00ff);
2118
2119 return 0;
2120}
2121
2122static enet_addr_container_t
2123 *ugeth_82xx_filtering_get_match_addr_in_hash(ucc_geth_private_t *ugeth,
2124 enet_addr_t *p_enet_addr)
2125{
2126 enet_addr_container_t *enet_addr_cont;
2127 struct list_head *p_lh;
2128 u16 i, num;
2129 int32_t j;
2130 u8 *p_counter;
2131
2132 if ((*p_enet_addr)[0] & ENET_GROUP_ADDR) {
2133 p_lh = &ugeth->group_hash_q;
2134 p_counter = &(ugeth->numGroupAddrInHash);
2135 } else {
2136 p_lh = &ugeth->ind_hash_q;
2137 p_counter = &(ugeth->numIndAddrInHash);
2138 }
2139
2140 if (!p_lh)
2141 return NULL;
2142
2143 num = *p_counter;
2144
2145 for (i = 0; i < num; i++) {
2146 enet_addr_cont =
2147 (enet_addr_container_t *)
2148 ENET_ADDR_CONT_ENTRY(dequeue(p_lh));
2149 for (j = ENET_NUM_OCTETS_PER_ADDRESS - 1; j >= 0; j--) {
2150 if ((*p_enet_addr)[j] != (enet_addr_cont->address)[j])
2151 break;
2152 if (j == 0)
2153 return enet_addr_cont; /* Found */
2154 }
2155 enqueue(p_lh, &enet_addr_cont->node); /* Put it back */
2156 }
2157 return NULL;
2158}
2159
2160static int ugeth_82xx_filtering_add_addr_in_hash(ucc_geth_private_t *ugeth,
2161 enet_addr_t *p_enet_addr)
2162{
2163 ucc_geth_enet_address_recognition_location_e location;
2164 enet_addr_container_t *enet_addr_cont;
2165 struct list_head *p_lh;
2166 u8 i;
2167 u32 limit;
2168 u8 *p_counter;
2169
2170 if ((*p_enet_addr)[0] & ENET_GROUP_ADDR) {
2171 p_lh = &ugeth->group_hash_q;
2172 limit = ugeth->ug_info->maxGroupAddrInHash;
2173 location =
2174 UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_GROUP_HASH;
2175 p_counter = &(ugeth->numGroupAddrInHash);
2176 } else {
2177 p_lh = &ugeth->ind_hash_q;
2178 limit = ugeth->ug_info->maxIndAddrInHash;
2179 location =
2180 UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_INDIVIDUAL_HASH;
2181 p_counter = &(ugeth->numIndAddrInHash);
2182 }
2183
2184 if ((enet_addr_cont =
2185 ugeth_82xx_filtering_get_match_addr_in_hash(ugeth, p_enet_addr))) {
2186 list_add(p_lh, &enet_addr_cont->node); /* Put it back */
2187 return 0;
2188 }
2189 if ((!p_lh) || (!(*p_counter < limit)))
2190 return -EBUSY;
2191 if (!(enet_addr_cont = get_enet_addr_container()))
2192 return -ENOMEM;
2193 for (i = 0; i < ENET_NUM_OCTETS_PER_ADDRESS; i++)
2194 (enet_addr_cont->address)[i] = (*p_enet_addr)[i];
2195 enet_addr_cont->location = location;
2196 enqueue(p_lh, &enet_addr_cont->node); /* Put it back */
2197 ++(*p_counter);
2198
2199 hw_add_addr_in_hash(ugeth, &(enet_addr_cont->address));
2200
2201 return 0;
2202}
2203
2204static int ugeth_82xx_filtering_clear_addr_in_hash(ucc_geth_private_t *ugeth,
2205 enet_addr_t *p_enet_addr)
2206{
2207 ucc_geth_82xx_address_filtering_pram_t *p_82xx_addr_filt;
2208 enet_addr_container_t *enet_addr_cont;
2209 ucc_fast_private_t *uccf;
2210 comm_dir_e comm_dir;
2211 u16 i, num;
2212 struct list_head *p_lh;
2213 u32 *addr_h, *addr_l;
2214 u8 *p_counter;
2215
2216 uccf = ugeth->uccf;
2217
2218 p_82xx_addr_filt =
2219 (ucc_geth_82xx_address_filtering_pram_t *) ugeth->p_rx_glbl_pram->
2220 addressfiltering;
2221
2222 if (!
2223 (enet_addr_cont =
2224 ugeth_82xx_filtering_get_match_addr_in_hash(ugeth, p_enet_addr)))
2225 return -ENOENT;
2226
2227 /* It's been found and removed from the CQ. */
2228 /* Now destroy its container */
2229 put_enet_addr_container(enet_addr_cont);
2230
2231 if ((*p_enet_addr)[0] & ENET_GROUP_ADDR) {
2232 addr_h = &(p_82xx_addr_filt->gaddr_h);
2233 addr_l = &(p_82xx_addr_filt->gaddr_l);
2234 p_lh = &ugeth->group_hash_q;
2235 p_counter = &(ugeth->numGroupAddrInHash);
2236 } else {
2237 addr_h = &(p_82xx_addr_filt->iaddr_h);
2238 addr_l = &(p_82xx_addr_filt->iaddr_l);
2239 p_lh = &ugeth->ind_hash_q;
2240 p_counter = &(ugeth->numIndAddrInHash);
2241 }
2242
2243 comm_dir = 0;
2244 if (uccf->enabled_tx)
2245 comm_dir |= COMM_DIR_TX;
2246 if (uccf->enabled_rx)
2247 comm_dir |= COMM_DIR_RX;
2248 if (comm_dir)
2249 ugeth_disable(ugeth, comm_dir);
2250
2251 /* Clear the hash table. */
2252 out_be32(addr_h, 0x00000000);
2253 out_be32(addr_l, 0x00000000);
2254
2255 /* Add all remaining CQ elements back into hash */
2256 num = --(*p_counter);
2257 for (i = 0; i < num; i++) {
2258 enet_addr_cont =
2259 (enet_addr_container_t *)
2260 ENET_ADDR_CONT_ENTRY(dequeue(p_lh));
2261 hw_add_addr_in_hash(ugeth, &(enet_addr_cont->address));
2262 enqueue(p_lh, &enet_addr_cont->node); /* Put it back */
2263 }
2264
2265 if (comm_dir)
2266 ugeth_enable(ugeth, comm_dir);
2267
2268 return 0;
2269}
2270#endif /* CONFIG_UGETH_FILTERING */
2271
2272static int ugeth_82xx_filtering_clear_all_addr_in_hash(ucc_geth_private_t *
2273 ugeth,
2274 enet_addr_type_e
2275 enet_addr_type)
2276{
2277 ucc_geth_82xx_address_filtering_pram_t *p_82xx_addr_filt;
2278 ucc_fast_private_t *uccf;
2279 comm_dir_e comm_dir;
2280 struct list_head *p_lh;
2281 u16 i, num;
2282 u32 *addr_h, *addr_l;
2283 u8 *p_counter;
2284
2285 uccf = ugeth->uccf;
2286
2287 p_82xx_addr_filt =
2288 (ucc_geth_82xx_address_filtering_pram_t *) ugeth->p_rx_glbl_pram->
2289 addressfiltering;
2290
2291 if (enet_addr_type == ENET_ADDR_TYPE_GROUP) {
2292 addr_h = &(p_82xx_addr_filt->gaddr_h);
2293 addr_l = &(p_82xx_addr_filt->gaddr_l);
2294 p_lh = &ugeth->group_hash_q;
2295 p_counter = &(ugeth->numGroupAddrInHash);
2296 } else if (enet_addr_type == ENET_ADDR_TYPE_INDIVIDUAL) {
2297 addr_h = &(p_82xx_addr_filt->iaddr_h);
2298 addr_l = &(p_82xx_addr_filt->iaddr_l);
2299 p_lh = &ugeth->ind_hash_q;
2300 p_counter = &(ugeth->numIndAddrInHash);
2301 } else
2302 return -EINVAL;
2303
2304 comm_dir = 0;
2305 if (uccf->enabled_tx)
2306 comm_dir |= COMM_DIR_TX;
2307 if (uccf->enabled_rx)
2308 comm_dir |= COMM_DIR_RX;
2309 if (comm_dir)
2310 ugeth_disable(ugeth, comm_dir);
2311
2312 /* Clear the hash table. */
2313 out_be32(addr_h, 0x00000000);
2314 out_be32(addr_l, 0x00000000);
2315
2316 if (!p_lh)
2317 return 0;
2318
2319 num = *p_counter;
2320
2321 /* Delete all remaining CQ elements */
2322 for (i = 0; i < num; i++)
2323 put_enet_addr_container(ENET_ADDR_CONT_ENTRY(dequeue(p_lh)));
2324
2325 *p_counter = 0;
2326
2327 if (comm_dir)
2328 ugeth_enable(ugeth, comm_dir);
2329
2330 return 0;
2331}
2332
2333#ifdef CONFIG_UGETH_FILTERING
2334static int ugeth_82xx_filtering_add_addr_in_paddr(ucc_geth_private_t *ugeth,
2335 enet_addr_t *p_enet_addr,
2336 u8 paddr_num)
2337{
2338 int i;
2339
2340 if ((*p_enet_addr)[0] & ENET_GROUP_ADDR)
2341 ugeth_warn
2342 ("%s: multicast address added to paddr will have no "
2343 "effect - is this what you wanted?",
2344 __FUNCTION__);
2345
2346 ugeth->indAddrRegUsed[paddr_num] = 1; /* mark this paddr as used */
2347 /* store address in our database */
2348 for (i = 0; i < ENET_NUM_OCTETS_PER_ADDRESS; i++)
2349 ugeth->paddr[paddr_num][i] = (*p_enet_addr)[i];
2350 /* put in hardware */
2351 return hw_add_addr_in_paddr(ugeth, p_enet_addr, paddr_num);
2352}
2353#endif /* CONFIG_UGETH_FILTERING */
2354
2355static int ugeth_82xx_filtering_clear_addr_in_paddr(ucc_geth_private_t *ugeth,
2356 u8 paddr_num)
2357{
2358 ugeth->indAddrRegUsed[paddr_num] = 0; /* mark this paddr as not used */
2359 return hw_clear_addr_in_paddr(ugeth, paddr_num);/* clear in hardware */
2360}
2361
2362static void ucc_geth_memclean(ucc_geth_private_t *ugeth)
2363{
2364 u16 i, j;
2365 u8 *bd;
2366
2367 if (!ugeth)
2368 return;
2369
2370 if (ugeth->uccf)
2371 ucc_fast_free(ugeth->uccf);
2372
2373 if (ugeth->p_thread_data_tx) {
2374 qe_muram_free(ugeth->thread_dat_tx_offset);
2375 ugeth->p_thread_data_tx = NULL;
2376 }
2377 if (ugeth->p_thread_data_rx) {
2378 qe_muram_free(ugeth->thread_dat_rx_offset);
2379 ugeth->p_thread_data_rx = NULL;
2380 }
2381 if (ugeth->p_exf_glbl_param) {
2382 qe_muram_free(ugeth->exf_glbl_param_offset);
2383 ugeth->p_exf_glbl_param = NULL;
2384 }
2385 if (ugeth->p_rx_glbl_pram) {
2386 qe_muram_free(ugeth->rx_glbl_pram_offset);
2387 ugeth->p_rx_glbl_pram = NULL;
2388 }
2389 if (ugeth->p_tx_glbl_pram) {
2390 qe_muram_free(ugeth->tx_glbl_pram_offset);
2391 ugeth->p_tx_glbl_pram = NULL;
2392 }
2393 if (ugeth->p_send_q_mem_reg) {
2394 qe_muram_free(ugeth->send_q_mem_reg_offset);
2395 ugeth->p_send_q_mem_reg = NULL;
2396 }
2397 if (ugeth->p_scheduler) {
2398 qe_muram_free(ugeth->scheduler_offset);
2399 ugeth->p_scheduler = NULL;
2400 }
2401 if (ugeth->p_tx_fw_statistics_pram) {
2402 qe_muram_free(ugeth->tx_fw_statistics_pram_offset);
2403 ugeth->p_tx_fw_statistics_pram = NULL;
2404 }
2405 if (ugeth->p_rx_fw_statistics_pram) {
2406 qe_muram_free(ugeth->rx_fw_statistics_pram_offset);
2407 ugeth->p_rx_fw_statistics_pram = NULL;
2408 }
2409 if (ugeth->p_rx_irq_coalescing_tbl) {
2410 qe_muram_free(ugeth->rx_irq_coalescing_tbl_offset);
2411 ugeth->p_rx_irq_coalescing_tbl = NULL;
2412 }
2413 if (ugeth->p_rx_bd_qs_tbl) {
2414 qe_muram_free(ugeth->rx_bd_qs_tbl_offset);
2415 ugeth->p_rx_bd_qs_tbl = NULL;
2416 }
2417 if (ugeth->p_init_enet_param_shadow) {
2418 return_init_enet_entries(ugeth,
2419 &(ugeth->p_init_enet_param_shadow->
2420 rxthread[0]),
2421 ENET_INIT_PARAM_MAX_ENTRIES_RX,
2422 ugeth->ug_info->riscRx, 1);
2423 return_init_enet_entries(ugeth,
2424 &(ugeth->p_init_enet_param_shadow->
2425 txthread[0]),
2426 ENET_INIT_PARAM_MAX_ENTRIES_TX,
2427 ugeth->ug_info->riscTx, 0);
2428 kfree(ugeth->p_init_enet_param_shadow);
2429 ugeth->p_init_enet_param_shadow = NULL;
2430 }
2431 for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) {
2432 bd = ugeth->p_tx_bd_ring[i];
2433 for (j = 0; j < ugeth->ug_info->bdRingLenTx[i]; j++) {
2434 if (ugeth->tx_skbuff[i][j]) {
2435 dma_unmap_single(NULL,
2436 BD_BUFFER_ARG(bd),
2437 (BD_STATUS_AND_LENGTH(bd) &
2438 BD_LENGTH_MASK),
2439 DMA_TO_DEVICE);
2440 dev_kfree_skb_any(ugeth->tx_skbuff[i][j]);
2441 ugeth->tx_skbuff[i][j] = NULL;
2442 }
2443 }
2444
2445 kfree(ugeth->tx_skbuff[i]);
2446
2447 if (ugeth->p_tx_bd_ring[i]) {
2448 if (ugeth->ug_info->uf_info.bd_mem_part ==
2449 MEM_PART_SYSTEM)
2450 kfree((void *)ugeth->tx_bd_ring_offset[i]);
2451 else if (ugeth->ug_info->uf_info.bd_mem_part ==
2452 MEM_PART_MURAM)
2453 qe_muram_free(ugeth->tx_bd_ring_offset[i]);
2454 ugeth->p_tx_bd_ring[i] = NULL;
2455 }
2456 }
2457 for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
2458 if (ugeth->p_rx_bd_ring[i]) {
2459 /* Return existing data buffers in ring */
2460 bd = ugeth->p_rx_bd_ring[i];
2461 for (j = 0; j < ugeth->ug_info->bdRingLenRx[i]; j++) {
2462 if (ugeth->rx_skbuff[i][j]) {
2463 dma_unmap_single(NULL, BD_BUFFER(bd),
2464 ugeth->ug_info->
2465 uf_info.
2466 max_rx_buf_length +
2467 UCC_GETH_RX_DATA_BUF_ALIGNMENT,
2468 DMA_FROM_DEVICE);
2469
2470 dev_kfree_skb_any(ugeth->
2471 rx_skbuff[i][j]);
2472 ugeth->rx_skbuff[i][j] = NULL;
2473 }
2474 bd += UCC_GETH_SIZE_OF_BD;
2475 }
2476
2477 kfree(ugeth->rx_skbuff[i]);
2478
2479 if (ugeth->ug_info->uf_info.bd_mem_part ==
2480 MEM_PART_SYSTEM)
2481 kfree((void *)ugeth->rx_bd_ring_offset[i]);
2482 else if (ugeth->ug_info->uf_info.bd_mem_part ==
2483 MEM_PART_MURAM)
2484 qe_muram_free(ugeth->rx_bd_ring_offset[i]);
2485 ugeth->p_rx_bd_ring[i] = NULL;
2486 }
2487 }
2488 while (!list_empty(&ugeth->group_hash_q))
2489 put_enet_addr_container(ENET_ADDR_CONT_ENTRY
2490 (dequeue(&ugeth->group_hash_q)));
2491 while (!list_empty(&ugeth->ind_hash_q))
2492 put_enet_addr_container(ENET_ADDR_CONT_ENTRY
2493 (dequeue(&ugeth->ind_hash_q)));
2494
2495}
2496
2497static void ucc_geth_set_multi(struct net_device *dev)
2498{
2499 ucc_geth_private_t *ugeth;
2500 struct dev_mc_list *dmi;
2501 ucc_fast_t *uf_regs;
2502 ucc_geth_82xx_address_filtering_pram_t *p_82xx_addr_filt;
2503 enet_addr_t tempaddr;
2504 u8 *mcptr, *tdptr;
2505 int i, j;
2506
2507 ugeth = netdev_priv(dev);
2508
2509 uf_regs = ugeth->uccf->uf_regs;
2510
2511 if (dev->flags & IFF_PROMISC) {
2512
2513 /* Log any net taps. */
2514 printk("%s: Promiscuous mode enabled.\n", dev->name);
2515 uf_regs->upsmr |= UPSMR_PRO;
2516
2517 } else {
2518
2519 uf_regs->upsmr &= ~UPSMR_PRO;
2520
2521 p_82xx_addr_filt =
2522 (ucc_geth_82xx_address_filtering_pram_t *) ugeth->
2523 p_rx_glbl_pram->addressfiltering;
2524
2525 if (dev->flags & IFF_ALLMULTI) {
2526 /* Catch all multicast addresses, so set the
2527 * filter to all 1's.
2528 */
2529 out_be32(&p_82xx_addr_filt->gaddr_h, 0xffffffff);
2530 out_be32(&p_82xx_addr_filt->gaddr_l, 0xffffffff);
2531 } else {
2532 /* Clear filter and add the addresses in the list.
2533 */
2534 out_be32(&p_82xx_addr_filt->gaddr_h, 0x0);
2535 out_be32(&p_82xx_addr_filt->gaddr_l, 0x0);
2536
2537 dmi = dev->mc_list;
2538
2539 for (i = 0; i < dev->mc_count; i++, dmi = dmi->next) {
2540
2541 /* Only support group multicast for now.
2542 */
2543 if (!(dmi->dmi_addr[0] & 1))
2544 continue;
2545
2546 /* The address in dmi_addr is LSB first,
2547 * and taddr is MSB first. We have to
2548 * copy bytes MSB first from dmi_addr.
2549 */
2550 mcptr = (u8 *) dmi->dmi_addr + 5;
2551 tdptr = (u8 *) & tempaddr;
2552 for (j = 0; j < 6; j++)
2553 *tdptr++ = *mcptr--;
2554
2555 /* Ask CPM to run CRC and set bit in
2556 * filter mask.
2557 */
2558 hw_add_addr_in_hash(ugeth, &tempaddr);
2559
2560 }
2561 }
2562 }
2563}
2564
2565static void ucc_geth_stop(ucc_geth_private_t *ugeth)
2566{
2567 ucc_geth_t *ug_regs = ugeth->ug_regs;
2568 u32 tempval;
2569
2570 ugeth_vdbg("%s: IN", __FUNCTION__);
2571
2572 /* Disable the controller */
2573 ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
2574
2575 /* Tell the kernel the link is down */
2576 ugeth->mii_info->link = 0;
2577 adjust_link(ugeth->dev);
2578
2579 /* Mask all interrupts */
2580 out_be32(ugeth->uccf->p_ucce, 0x00000000);
2581
2582 /* Clear all interrupts */
2583 out_be32(ugeth->uccf->p_ucce, 0xffffffff);
2584
2585 /* Disable Rx and Tx */
2586 tempval = in_be32(&ug_regs->maccfg1);
2587 tempval &= ~(MACCFG1_ENABLE_RX | MACCFG1_ENABLE_TX);
2588 out_be32(&ug_regs->maccfg1, tempval);
2589
2590 if (ugeth->ug_info->board_flags & FSL_UGETH_BRD_HAS_PHY_INTR) {
2591 /* Clear any pending interrupts */
2592 mii_clear_phy_interrupt(ugeth->mii_info);
2593
2594 /* Disable PHY Interrupts */
2595 mii_configure_phy_interrupt(ugeth->mii_info,
2596 MII_INTERRUPT_DISABLED);
2597 }
2598
2599 free_irq(ugeth->ug_info->uf_info.irq, ugeth->dev);
2600
2601 if (ugeth->ug_info->board_flags & FSL_UGETH_BRD_HAS_PHY_INTR) {
2602 free_irq(ugeth->ug_info->phy_interrupt, ugeth->dev);
2603 } else {
2604 del_timer_sync(&ugeth->phy_info_timer);
2605 }
2606
2607 ucc_geth_memclean(ugeth);
2608}
2609
2610static int ucc_geth_startup(ucc_geth_private_t *ugeth)
2611{
2612 ucc_geth_82xx_address_filtering_pram_t *p_82xx_addr_filt;
2613 ucc_geth_init_pram_t *p_init_enet_pram;
2614 ucc_fast_private_t *uccf;
2615 ucc_geth_info_t *ug_info;
2616 ucc_fast_info_t *uf_info;
2617 ucc_fast_t *uf_regs;
2618 ucc_geth_t *ug_regs;
2619 int ret_val = -EINVAL;
2620 u32 remoder = UCC_GETH_REMODER_INIT;
2621 u32 init_enet_pram_offset, cecr_subblock, command, maccfg1;
2622 u32 ifstat, i, j, size, l2qt, l3qt, length;
2623 u16 temoder = UCC_GETH_TEMODER_INIT;
2624 u16 test;
2625 u8 function_code = 0;
2626 u8 *bd, *endOfRing;
2627 u8 numThreadsRxNumerical, numThreadsTxNumerical;
2628
2629 ugeth_vdbg("%s: IN", __FUNCTION__);
2630
2631 ug_info = ugeth->ug_info;
2632 uf_info = &ug_info->uf_info;
2633
2634 if (!((uf_info->bd_mem_part == MEM_PART_SYSTEM) ||
2635 (uf_info->bd_mem_part == MEM_PART_MURAM))) {
2636 ugeth_err("%s: Bad memory partition value.", __FUNCTION__);
2637 return -EINVAL;
2638 }
2639
2640 /* Rx BD lengths */
2641 for (i = 0; i < ug_info->numQueuesRx; i++) {
2642 if ((ug_info->bdRingLenRx[i] < UCC_GETH_RX_BD_RING_SIZE_MIN) ||
2643 (ug_info->bdRingLenRx[i] %
2644 UCC_GETH_RX_BD_RING_SIZE_ALIGNMENT)) {
2645 ugeth_err
2646 ("%s: Rx BD ring length must be multiple of 4,"
2647 " no smaller than 8.", __FUNCTION__);
2648 return -EINVAL;
2649 }
2650 }
2651
2652 /* Tx BD lengths */
2653 for (i = 0; i < ug_info->numQueuesTx; i++) {
2654 if (ug_info->bdRingLenTx[i] < UCC_GETH_TX_BD_RING_SIZE_MIN) {
2655 ugeth_err
2656 ("%s: Tx BD ring length must be no smaller than 2.",
2657 __FUNCTION__);
2658 return -EINVAL;
2659 }
2660 }
2661
2662 /* mrblr */
2663 if ((uf_info->max_rx_buf_length == 0) ||
2664 (uf_info->max_rx_buf_length % UCC_GETH_MRBLR_ALIGNMENT)) {
2665 ugeth_err
2666 ("%s: max_rx_buf_length must be non-zero multiple of 128.",
2667 __FUNCTION__);
2668 return -EINVAL;
2669 }
2670
2671 /* num Tx queues */
2672 if (ug_info->numQueuesTx > NUM_TX_QUEUES) {
2673 ugeth_err("%s: number of tx queues too large.", __FUNCTION__);
2674 return -EINVAL;
2675 }
2676
2677 /* num Rx queues */
2678 if (ug_info->numQueuesRx > NUM_RX_QUEUES) {
2679 ugeth_err("%s: number of rx queues too large.", __FUNCTION__);
2680 return -EINVAL;
2681 }
2682
2683 /* l2qt */
2684 for (i = 0; i < UCC_GETH_VLAN_PRIORITY_MAX; i++) {
2685 if (ug_info->l2qt[i] >= ug_info->numQueuesRx) {
2686 ugeth_err
2687 ("%s: VLAN priority table entry must not be"
2688 " larger than number of Rx queues.",
2689 __FUNCTION__);
2690 return -EINVAL;
2691 }
2692 }
2693
2694 /* l3qt */
2695 for (i = 0; i < UCC_GETH_IP_PRIORITY_MAX; i++) {
2696 if (ug_info->l3qt[i] >= ug_info->numQueuesRx) {
2697 ugeth_err
2698 ("%s: IP priority table entry must not be"
2699 " larger than number of Rx queues.",
2700 __FUNCTION__);
2701 return -EINVAL;
2702 }
2703 }
2704
2705 if (ug_info->cam && !ug_info->ecamptr) {
2706 ugeth_err("%s: If cam mode is chosen, must supply cam ptr.",
2707 __FUNCTION__);
2708 return -EINVAL;
2709 }
2710
2711 if ((ug_info->numStationAddresses !=
2712 UCC_GETH_NUM_OF_STATION_ADDRESSES_1)
2713 && ug_info->rxExtendedFiltering) {
2714 ugeth_err("%s: Number of station addresses greater than 1 "
2715 "not allowed in extended parsing mode.",
2716 __FUNCTION__);
2717 return -EINVAL;
2718 }
2719
2720 /* Generate uccm_mask for receive */
2721 uf_info->uccm_mask = ug_info->eventRegMask & UCCE_OTHER;/* Errors */
2722 for (i = 0; i < ug_info->numQueuesRx; i++)
2723 uf_info->uccm_mask |= (UCCE_RXBF_SINGLE_MASK << i);
2724
2725 for (i = 0; i < ug_info->numQueuesTx; i++)
2726 uf_info->uccm_mask |= (UCCE_TXBF_SINGLE_MASK << i);
2727 /* Initialize the general fast UCC block. */
2728 if (ucc_fast_init(uf_info, &uccf)) {
2729 ugeth_err("%s: Failed to init uccf.", __FUNCTION__);
2730 ucc_geth_memclean(ugeth);
2731 return -ENOMEM;
2732 }
2733 ugeth->uccf = uccf;
2734
2735 switch (ug_info->numThreadsRx) {
2736 case UCC_GETH_NUM_OF_THREADS_1:
2737 numThreadsRxNumerical = 1;
2738 break;
2739 case UCC_GETH_NUM_OF_THREADS_2:
2740 numThreadsRxNumerical = 2;
2741 break;
2742 case UCC_GETH_NUM_OF_THREADS_4:
2743 numThreadsRxNumerical = 4;
2744 break;
2745 case UCC_GETH_NUM_OF_THREADS_6:
2746 numThreadsRxNumerical = 6;
2747 break;
2748 case UCC_GETH_NUM_OF_THREADS_8:
2749 numThreadsRxNumerical = 8;
2750 break;
2751 default:
2752 ugeth_err("%s: Bad number of Rx threads value.", __FUNCTION__);
2753 ucc_geth_memclean(ugeth);
2754 return -EINVAL;
2755 break;
2756 }
2757
2758 switch (ug_info->numThreadsTx) {
2759 case UCC_GETH_NUM_OF_THREADS_1:
2760 numThreadsTxNumerical = 1;
2761 break;
2762 case UCC_GETH_NUM_OF_THREADS_2:
2763 numThreadsTxNumerical = 2;
2764 break;
2765 case UCC_GETH_NUM_OF_THREADS_4:
2766 numThreadsTxNumerical = 4;
2767 break;
2768 case UCC_GETH_NUM_OF_THREADS_6:
2769 numThreadsTxNumerical = 6;
2770 break;
2771 case UCC_GETH_NUM_OF_THREADS_8:
2772 numThreadsTxNumerical = 8;
2773 break;
2774 default:
2775 ugeth_err("%s: Bad number of Tx threads value.", __FUNCTION__);
2776 ucc_geth_memclean(ugeth);
2777 return -EINVAL;
2778 break;
2779 }
2780
2781 /* Calculate rx_extended_features */
2782 ugeth->rx_non_dynamic_extended_features = ug_info->ipCheckSumCheck ||
2783 ug_info->ipAddressAlignment ||
2784 (ug_info->numStationAddresses !=
2785 UCC_GETH_NUM_OF_STATION_ADDRESSES_1);
2786
2787 ugeth->rx_extended_features = ugeth->rx_non_dynamic_extended_features ||
2788 (ug_info->vlanOperationTagged != UCC_GETH_VLAN_OPERATION_TAGGED_NOP)
2789 || (ug_info->vlanOperationNonTagged !=
2790 UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP);
2791
2792 uf_regs = uccf->uf_regs;
2793 ug_regs = (ucc_geth_t *) (uccf->uf_regs);
2794 ugeth->ug_regs = ug_regs;
2795
2796 init_default_reg_vals(&uf_regs->upsmr,
2797 &ug_regs->maccfg1, &ug_regs->maccfg2);
2798
2799 /* Set UPSMR */
2800 /* For more details see the hardware spec. */
2801 init_rx_parameters(ug_info->bro,
2802 ug_info->rsh, ug_info->pro, &uf_regs->upsmr);
2803
2804 /* We're going to ignore other registers for now, */
2805 /* except as needed to get up and running */
2806
2807 /* Set MACCFG1 */
2808 /* For more details see the hardware spec. */
2809 init_flow_control_params(ug_info->aufc,
2810 ug_info->receiveFlowControl,
2811 1,
2812 ug_info->pausePeriod,
2813 ug_info->extensionField,
2814 &uf_regs->upsmr,
2815 &ug_regs->uempr, &ug_regs->maccfg1);
2816
2817 maccfg1 = in_be32(&ug_regs->maccfg1);
2818 maccfg1 |= MACCFG1_ENABLE_RX;
2819 maccfg1 |= MACCFG1_ENABLE_TX;
2820 out_be32(&ug_regs->maccfg1, maccfg1);
2821
2822 /* Set IPGIFG */
2823 /* For more details see the hardware spec. */
2824 ret_val = init_inter_frame_gap_params(ug_info->nonBackToBackIfgPart1,
2825 ug_info->nonBackToBackIfgPart2,
2826 ug_info->
2827 miminumInterFrameGapEnforcement,
2828 ug_info->backToBackInterFrameGap,
2829 &ug_regs->ipgifg);
2830 if (ret_val != 0) {
2831 ugeth_err("%s: IPGIFG initialization parameter too large.",
2832 __FUNCTION__);
2833 ucc_geth_memclean(ugeth);
2834 return ret_val;
2835 }
2836
2837 /* Set HAFDUP */
2838 /* For more details see the hardware spec. */
2839 ret_val = init_half_duplex_params(ug_info->altBeb,
2840 ug_info->backPressureNoBackoff,
2841 ug_info->noBackoff,
2842 ug_info->excessDefer,
2843 ug_info->altBebTruncation,
2844 ug_info->maxRetransmission,
2845 ug_info->collisionWindow,
2846 &ug_regs->hafdup);
2847 if (ret_val != 0) {
2848 ugeth_err("%s: Half Duplex initialization parameter too large.",
2849 __FUNCTION__);
2850 ucc_geth_memclean(ugeth);
2851 return ret_val;
2852 }
2853
2854 /* Set IFSTAT */
2855 /* For more details see the hardware spec. */
2856 /* Read only - resets upon read */
2857 ifstat = in_be32(&ug_regs->ifstat);
2858
2859 /* Clear UEMPR */
2860 /* For more details see the hardware spec. */
2861 out_be32(&ug_regs->uempr, 0);
2862
2863 /* Set UESCR */
2864 /* For more details see the hardware spec. */
2865 init_hw_statistics_gathering_mode((ug_info->statisticsMode &
2866 UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE),
2867 0, &uf_regs->upsmr, &ug_regs->uescr);
2868
2869 /* Allocate Tx bds */
2870 for (j = 0; j < ug_info->numQueuesTx; j++) {
2871 /* Allocate in multiple of
2872 UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT,
2873 according to spec */
2874 length = ((ug_info->bdRingLenTx[j] * UCC_GETH_SIZE_OF_BD)
2875 / UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT)
2876 * UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT;
2877 if ((ug_info->bdRingLenTx[j] * UCC_GETH_SIZE_OF_BD) %
2878 UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT)
2879 length += UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT;
2880 if (uf_info->bd_mem_part == MEM_PART_SYSTEM) {
2881 u32 align = 4;
2882 if (UCC_GETH_TX_BD_RING_ALIGNMENT > 4)
2883 align = UCC_GETH_TX_BD_RING_ALIGNMENT;
2884 ugeth->tx_bd_ring_offset[j] =
2885 (u32) (kmalloc((u32) (length + align),
2886 GFP_KERNEL));
2887 if (ugeth->tx_bd_ring_offset[j] != 0)
2888 ugeth->p_tx_bd_ring[j] =
2889 (void*)((ugeth->tx_bd_ring_offset[j] +
2890 align) & ~(align - 1));
2891 } else if (uf_info->bd_mem_part == MEM_PART_MURAM) {
2892 ugeth->tx_bd_ring_offset[j] =
2893 qe_muram_alloc(length,
2894 UCC_GETH_TX_BD_RING_ALIGNMENT);
2895 if (!IS_MURAM_ERR(ugeth->tx_bd_ring_offset[j]))
2896 ugeth->p_tx_bd_ring[j] =
2897 (u8 *) qe_muram_addr(ugeth->
2898 tx_bd_ring_offset[j]);
2899 }
2900 if (!ugeth->p_tx_bd_ring[j]) {
2901 ugeth_err
2902 ("%s: Can not allocate memory for Tx bd rings.",
2903 __FUNCTION__);
2904 ucc_geth_memclean(ugeth);
2905 return -ENOMEM;
2906 }
2907 /* Zero unused end of bd ring, according to spec */
2908 memset(ugeth->p_tx_bd_ring[j] +
2909 ug_info->bdRingLenTx[j] * UCC_GETH_SIZE_OF_BD, 0,
2910 length - ug_info->bdRingLenTx[j] * UCC_GETH_SIZE_OF_BD);
2911 }
2912
2913 /* Allocate Rx bds */
2914 for (j = 0; j < ug_info->numQueuesRx; j++) {
2915 length = ug_info->bdRingLenRx[j] * UCC_GETH_SIZE_OF_BD;
2916 if (uf_info->bd_mem_part == MEM_PART_SYSTEM) {
2917 u32 align = 4;
2918 if (UCC_GETH_RX_BD_RING_ALIGNMENT > 4)
2919 align = UCC_GETH_RX_BD_RING_ALIGNMENT;
2920 ugeth->rx_bd_ring_offset[j] =
2921 (u32) (kmalloc((u32) (length + align), GFP_KERNEL));
2922 if (ugeth->rx_bd_ring_offset[j] != 0)
2923 ugeth->p_rx_bd_ring[j] =
2924 (void*)((ugeth->rx_bd_ring_offset[j] +
2925 align) & ~(align - 1));
2926 } else if (uf_info->bd_mem_part == MEM_PART_MURAM) {
2927 ugeth->rx_bd_ring_offset[j] =
2928 qe_muram_alloc(length,
2929 UCC_GETH_RX_BD_RING_ALIGNMENT);
2930 if (!IS_MURAM_ERR(ugeth->rx_bd_ring_offset[j]))
2931 ugeth->p_rx_bd_ring[j] =
2932 (u8 *) qe_muram_addr(ugeth->
2933 rx_bd_ring_offset[j]);
2934 }
2935 if (!ugeth->p_rx_bd_ring[j]) {
2936 ugeth_err
2937 ("%s: Can not allocate memory for Rx bd rings.",
2938 __FUNCTION__);
2939 ucc_geth_memclean(ugeth);
2940 return -ENOMEM;
2941 }
2942 }
2943
2944 /* Init Tx bds */
2945 for (j = 0; j < ug_info->numQueuesTx; j++) {
2946 /* Setup the skbuff rings */
2947 ugeth->tx_skbuff[j] =
2948 (struct sk_buff **)kmalloc(sizeof(struct sk_buff *) *
2949 ugeth->ug_info->bdRingLenTx[j],
2950 GFP_KERNEL);
2951
2952 if (ugeth->tx_skbuff[j] == NULL) {
2953 ugeth_err("%s: Could not allocate tx_skbuff",
2954 __FUNCTION__);
2955 ucc_geth_memclean(ugeth);
2956 return -ENOMEM;
2957 }
2958
2959 for (i = 0; i < ugeth->ug_info->bdRingLenTx[j]; i++)
2960 ugeth->tx_skbuff[j][i] = NULL;
2961
2962 ugeth->skb_curtx[j] = ugeth->skb_dirtytx[j] = 0;
2963 bd = ugeth->confBd[j] = ugeth->txBd[j] = ugeth->p_tx_bd_ring[j];
2964 for (i = 0; i < ug_info->bdRingLenTx[j]; i++) {
2965 BD_BUFFER_CLEAR(bd);
2966 BD_STATUS_AND_LENGTH_SET(bd, 0);
2967 bd += UCC_GETH_SIZE_OF_BD;
2968 }
2969 bd -= UCC_GETH_SIZE_OF_BD;
2970 BD_STATUS_AND_LENGTH_SET(bd, T_W);/* for last BD set Wrap bit */
2971 }
2972
2973 /* Init Rx bds */
2974 for (j = 0; j < ug_info->numQueuesRx; j++) {
2975 /* Setup the skbuff rings */
2976 ugeth->rx_skbuff[j] =
2977 (struct sk_buff **)kmalloc(sizeof(struct sk_buff *) *
2978 ugeth->ug_info->bdRingLenRx[j],
2979 GFP_KERNEL);
2980
2981 if (ugeth->rx_skbuff[j] == NULL) {
2982 ugeth_err("%s: Could not allocate rx_skbuff",
2983 __FUNCTION__);
2984 ucc_geth_memclean(ugeth);
2985 return -ENOMEM;
2986 }
2987
2988 for (i = 0; i < ugeth->ug_info->bdRingLenRx[j]; i++)
2989 ugeth->rx_skbuff[j][i] = NULL;
2990
2991 ugeth->skb_currx[j] = 0;
2992 bd = ugeth->rxBd[j] = ugeth->p_rx_bd_ring[j];
2993 for (i = 0; i < ug_info->bdRingLenRx[j]; i++) {
2994 BD_STATUS_AND_LENGTH_SET(bd, R_I);
2995 BD_BUFFER_CLEAR(bd);
2996 bd += UCC_GETH_SIZE_OF_BD;
2997 }
2998 bd -= UCC_GETH_SIZE_OF_BD;
2999 BD_STATUS_AND_LENGTH_SET(bd, R_W);/* for last BD set Wrap bit */
3000 }
3001
3002 /*
3003 * Global PRAM
3004 */
3005 /* Tx global PRAM */
3006 /* Allocate global tx parameter RAM page */
3007 ugeth->tx_glbl_pram_offset =
3008 qe_muram_alloc(sizeof(ucc_geth_tx_global_pram_t),
3009 UCC_GETH_TX_GLOBAL_PRAM_ALIGNMENT);
3010 if (IS_MURAM_ERR(ugeth->tx_glbl_pram_offset)) {
3011 ugeth_err
3012 ("%s: Can not allocate DPRAM memory for p_tx_glbl_pram.",
3013 __FUNCTION__);
3014 ucc_geth_memclean(ugeth);
3015 return -ENOMEM;
3016 }
3017 ugeth->p_tx_glbl_pram =
3018 (ucc_geth_tx_global_pram_t *) qe_muram_addr(ugeth->
3019 tx_glbl_pram_offset);
3020 /* Zero out p_tx_glbl_pram */
3021 memset(ugeth->p_tx_glbl_pram, 0, sizeof(ucc_geth_tx_global_pram_t));
3022
3023 /* Fill global PRAM */
3024
3025 /* TQPTR */
3026 /* Size varies with number of Tx threads */
3027 ugeth->thread_dat_tx_offset =
3028 qe_muram_alloc(numThreadsTxNumerical *
3029 sizeof(ucc_geth_thread_data_tx_t) +
3030 32 * (numThreadsTxNumerical == 1),
3031 UCC_GETH_THREAD_DATA_ALIGNMENT);
3032 if (IS_MURAM_ERR(ugeth->thread_dat_tx_offset)) {
3033 ugeth_err
3034 ("%s: Can not allocate DPRAM memory for p_thread_data_tx.",
3035 __FUNCTION__);
3036 ucc_geth_memclean(ugeth);
3037 return -ENOMEM;
3038 }
3039
3040 ugeth->p_thread_data_tx =
3041 (ucc_geth_thread_data_tx_t *) qe_muram_addr(ugeth->
3042 thread_dat_tx_offset);
3043 out_be32(&ugeth->p_tx_glbl_pram->tqptr, ugeth->thread_dat_tx_offset);
3044
3045 /* vtagtable */
3046 for (i = 0; i < UCC_GETH_TX_VTAG_TABLE_ENTRY_MAX; i++)
3047 out_be32(&ugeth->p_tx_glbl_pram->vtagtable[i],
3048 ug_info->vtagtable[i]);
3049
3050 /* iphoffset */
3051 for (i = 0; i < TX_IP_OFFSET_ENTRY_MAX; i++)
3052 ugeth->p_tx_glbl_pram->iphoffset[i] = ug_info->iphoffset[i];
3053
3054 /* SQPTR */
3055 /* Size varies with number of Tx queues */
3056 ugeth->send_q_mem_reg_offset =
3057 qe_muram_alloc(ug_info->numQueuesTx *
3058 sizeof(ucc_geth_send_queue_qd_t),
3059 UCC_GETH_SEND_QUEUE_QUEUE_DESCRIPTOR_ALIGNMENT);
3060 if (IS_MURAM_ERR(ugeth->send_q_mem_reg_offset)) {
3061 ugeth_err
3062 ("%s: Can not allocate DPRAM memory for p_send_q_mem_reg.",
3063 __FUNCTION__);
3064 ucc_geth_memclean(ugeth);
3065 return -ENOMEM;
3066 }
3067
3068 ugeth->p_send_q_mem_reg =
3069 (ucc_geth_send_queue_mem_region_t *) qe_muram_addr(ugeth->
3070 send_q_mem_reg_offset);
3071 out_be32(&ugeth->p_tx_glbl_pram->sqptr, ugeth->send_q_mem_reg_offset);
3072
3073 /* Setup the table */
3074 /* Assume BD rings are already established */
3075 for (i = 0; i < ug_info->numQueuesTx; i++) {
3076 endOfRing =
3077 ugeth->p_tx_bd_ring[i] + (ug_info->bdRingLenTx[i] -
3078 1) * UCC_GETH_SIZE_OF_BD;
3079 if (ugeth->ug_info->uf_info.bd_mem_part == MEM_PART_SYSTEM) {
3080 out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].bd_ring_base,
3081 (u32) virt_to_phys(ugeth->p_tx_bd_ring[i]));
3082 out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].
3083 last_bd_completed_address,
3084 (u32) virt_to_phys(endOfRing));
3085 } else if (ugeth->ug_info->uf_info.bd_mem_part ==
3086 MEM_PART_MURAM) {
3087 out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].bd_ring_base,
3088 (u32) immrbar_virt_to_phys(ugeth->
3089 p_tx_bd_ring[i]));
3090 out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].
3091 last_bd_completed_address,
3092 (u32) immrbar_virt_to_phys(endOfRing));
3093 }
3094 }
3095
3096 /* schedulerbasepointer */
3097
3098 if (ug_info->numQueuesTx > 1) {
3099 /* scheduler exists only if more than 1 tx queue */
3100 ugeth->scheduler_offset =
3101 qe_muram_alloc(sizeof(ucc_geth_scheduler_t),
3102 UCC_GETH_SCHEDULER_ALIGNMENT);
3103 if (IS_MURAM_ERR(ugeth->scheduler_offset)) {
3104 ugeth_err
3105 ("%s: Can not allocate DPRAM memory for p_scheduler.",
3106 __FUNCTION__);
3107 ucc_geth_memclean(ugeth);
3108 return -ENOMEM;
3109 }
3110
3111 ugeth->p_scheduler =
3112 (ucc_geth_scheduler_t *) qe_muram_addr(ugeth->
3113 scheduler_offset);
3114 out_be32(&ugeth->p_tx_glbl_pram->schedulerbasepointer,
3115 ugeth->scheduler_offset);
3116 /* Zero out p_scheduler */
3117 memset(ugeth->p_scheduler, 0, sizeof(ucc_geth_scheduler_t));
3118
3119 /* Set values in scheduler */
3120 out_be32(&ugeth->p_scheduler->mblinterval,
3121 ug_info->mblinterval);
3122 out_be16(&ugeth->p_scheduler->nortsrbytetime,
3123 ug_info->nortsrbytetime);
3124 ugeth->p_scheduler->fracsiz = ug_info->fracsiz;
3125 ugeth->p_scheduler->strictpriorityq = ug_info->strictpriorityq;
3126 ugeth->p_scheduler->txasap = ug_info->txasap;
3127 ugeth->p_scheduler->extrabw = ug_info->extrabw;
3128 for (i = 0; i < NUM_TX_QUEUES; i++)
3129 ugeth->p_scheduler->weightfactor[i] =
3130 ug_info->weightfactor[i];
3131
3132 /* Set pointers to cpucount registers in scheduler */
3133 ugeth->p_cpucount[0] = &(ugeth->p_scheduler->cpucount0);
3134 ugeth->p_cpucount[1] = &(ugeth->p_scheduler->cpucount1);
3135 ugeth->p_cpucount[2] = &(ugeth->p_scheduler->cpucount2);
3136 ugeth->p_cpucount[3] = &(ugeth->p_scheduler->cpucount3);
3137 ugeth->p_cpucount[4] = &(ugeth->p_scheduler->cpucount4);
3138 ugeth->p_cpucount[5] = &(ugeth->p_scheduler->cpucount5);
3139 ugeth->p_cpucount[6] = &(ugeth->p_scheduler->cpucount6);
3140 ugeth->p_cpucount[7] = &(ugeth->p_scheduler->cpucount7);
3141 }
3142
3143 /* schedulerbasepointer */
3144 /* TxRMON_PTR (statistics) */
3145 if (ug_info->
3146 statisticsMode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX) {
3147 ugeth->tx_fw_statistics_pram_offset =
3148 qe_muram_alloc(sizeof
3149 (ucc_geth_tx_firmware_statistics_pram_t),
3150 UCC_GETH_TX_STATISTICS_ALIGNMENT);
3151 if (IS_MURAM_ERR(ugeth->tx_fw_statistics_pram_offset)) {
3152 ugeth_err
3153 ("%s: Can not allocate DPRAM memory for"
3154 " p_tx_fw_statistics_pram.", __FUNCTION__);
3155 ucc_geth_memclean(ugeth);
3156 return -ENOMEM;
3157 }
3158 ugeth->p_tx_fw_statistics_pram =
3159 (ucc_geth_tx_firmware_statistics_pram_t *)
3160 qe_muram_addr(ugeth->tx_fw_statistics_pram_offset);
3161 /* Zero out p_tx_fw_statistics_pram */
3162 memset(ugeth->p_tx_fw_statistics_pram,
3163 0, sizeof(ucc_geth_tx_firmware_statistics_pram_t));
3164 }
3165
3166 /* temoder */
3167 /* Already has speed set */
3168
3169 if (ug_info->numQueuesTx > 1)
3170 temoder |= TEMODER_SCHEDULER_ENABLE;
3171 if (ug_info->ipCheckSumGenerate)
3172 temoder |= TEMODER_IP_CHECKSUM_GENERATE;
3173 temoder |= ((ug_info->numQueuesTx - 1) << TEMODER_NUM_OF_QUEUES_SHIFT);
3174 out_be16(&ugeth->p_tx_glbl_pram->temoder, temoder);
3175
3176 test = in_be16(&ugeth->p_tx_glbl_pram->temoder);
3177
3178 /* Function code register value to be used later */
3179 function_code = QE_BMR_BYTE_ORDER_BO_MOT | UCC_FAST_FUNCTION_CODE_GBL;
3180 /* Required for QE */
3181
3182 /* function code register */
3183 out_be32(&ugeth->p_tx_glbl_pram->tstate, ((u32) function_code) << 24);
3184
3185 /* Rx global PRAM */
3186 /* Allocate global rx parameter RAM page */
3187 ugeth->rx_glbl_pram_offset =
3188 qe_muram_alloc(sizeof(ucc_geth_rx_global_pram_t),
3189 UCC_GETH_RX_GLOBAL_PRAM_ALIGNMENT);
3190 if (IS_MURAM_ERR(ugeth->rx_glbl_pram_offset)) {
3191 ugeth_err
3192 ("%s: Can not allocate DPRAM memory for p_rx_glbl_pram.",
3193 __FUNCTION__);
3194 ucc_geth_memclean(ugeth);
3195 return -ENOMEM;
3196 }
3197 ugeth->p_rx_glbl_pram =
3198 (ucc_geth_rx_global_pram_t *) qe_muram_addr(ugeth->
3199 rx_glbl_pram_offset);
3200 /* Zero out p_rx_glbl_pram */
3201 memset(ugeth->p_rx_glbl_pram, 0, sizeof(ucc_geth_rx_global_pram_t));
3202
3203 /* Fill global PRAM */
3204
3205 /* RQPTR */
3206 /* Size varies with number of Rx threads */
3207 ugeth->thread_dat_rx_offset =
3208 qe_muram_alloc(numThreadsRxNumerical *
3209 sizeof(ucc_geth_thread_data_rx_t),
3210 UCC_GETH_THREAD_DATA_ALIGNMENT);
3211 if (IS_MURAM_ERR(ugeth->thread_dat_rx_offset)) {
3212 ugeth_err
3213 ("%s: Can not allocate DPRAM memory for p_thread_data_rx.",
3214 __FUNCTION__);
3215 ucc_geth_memclean(ugeth);
3216 return -ENOMEM;
3217 }
3218
3219 ugeth->p_thread_data_rx =
3220 (ucc_geth_thread_data_rx_t *) qe_muram_addr(ugeth->
3221 thread_dat_rx_offset);
3222 out_be32(&ugeth->p_rx_glbl_pram->rqptr, ugeth->thread_dat_rx_offset);
3223
3224 /* typeorlen */
3225 out_be16(&ugeth->p_rx_glbl_pram->typeorlen, ug_info->typeorlen);
3226
3227 /* rxrmonbaseptr (statistics) */
3228 if (ug_info->
3229 statisticsMode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX) {
3230 ugeth->rx_fw_statistics_pram_offset =
3231 qe_muram_alloc(sizeof
3232 (ucc_geth_rx_firmware_statistics_pram_t),
3233 UCC_GETH_RX_STATISTICS_ALIGNMENT);
3234 if (IS_MURAM_ERR(ugeth->rx_fw_statistics_pram_offset)) {
3235 ugeth_err
3236 ("%s: Can not allocate DPRAM memory for"
3237 " p_rx_fw_statistics_pram.", __FUNCTION__);
3238 ucc_geth_memclean(ugeth);
3239 return -ENOMEM;
3240 }
3241 ugeth->p_rx_fw_statistics_pram =
3242 (ucc_geth_rx_firmware_statistics_pram_t *)
3243 qe_muram_addr(ugeth->rx_fw_statistics_pram_offset);
3244 /* Zero out p_rx_fw_statistics_pram */
3245 memset(ugeth->p_rx_fw_statistics_pram, 0,
3246 sizeof(ucc_geth_rx_firmware_statistics_pram_t));
3247 }
3248
3249 /* intCoalescingPtr */
3250
3251 /* Size varies with number of Rx queues */
3252 ugeth->rx_irq_coalescing_tbl_offset =
3253 qe_muram_alloc(ug_info->numQueuesRx *
3254 sizeof(ucc_geth_rx_interrupt_coalescing_entry_t),
3255 UCC_GETH_RX_INTERRUPT_COALESCING_ALIGNMENT);
3256 if (IS_MURAM_ERR(ugeth->rx_irq_coalescing_tbl_offset)) {
3257 ugeth_err
3258 ("%s: Can not allocate DPRAM memory for"
3259 " p_rx_irq_coalescing_tbl.", __FUNCTION__);
3260 ucc_geth_memclean(ugeth);
3261 return -ENOMEM;
3262 }
3263
3264 ugeth->p_rx_irq_coalescing_tbl =
3265 (ucc_geth_rx_interrupt_coalescing_table_t *)
3266 qe_muram_addr(ugeth->rx_irq_coalescing_tbl_offset);
3267 out_be32(&ugeth->p_rx_glbl_pram->intcoalescingptr,
3268 ugeth->rx_irq_coalescing_tbl_offset);
3269
3270 /* Fill interrupt coalescing table */
3271 for (i = 0; i < ug_info->numQueuesRx; i++) {
3272 out_be32(&ugeth->p_rx_irq_coalescing_tbl->coalescingentry[i].
3273 interruptcoalescingmaxvalue,
3274 ug_info->interruptcoalescingmaxvalue[i]);
3275 out_be32(&ugeth->p_rx_irq_coalescing_tbl->coalescingentry[i].
3276 interruptcoalescingcounter,
3277 ug_info->interruptcoalescingmaxvalue[i]);
3278 }
3279
3280 /* MRBLR */
3281 init_max_rx_buff_len(uf_info->max_rx_buf_length,
3282 &ugeth->p_rx_glbl_pram->mrblr);
3283 /* MFLR */
3284 out_be16(&ugeth->p_rx_glbl_pram->mflr, ug_info->maxFrameLength);
3285 /* MINFLR */
3286 init_min_frame_len(ug_info->minFrameLength,
3287 &ugeth->p_rx_glbl_pram->minflr,
3288 &ugeth->p_rx_glbl_pram->mrblr);
3289 /* MAXD1 */
3290 out_be16(&ugeth->p_rx_glbl_pram->maxd1, ug_info->maxD1Length);
3291 /* MAXD2 */
3292 out_be16(&ugeth->p_rx_glbl_pram->maxd2, ug_info->maxD2Length);
3293
3294 /* l2qt */
3295 l2qt = 0;
3296 for (i = 0; i < UCC_GETH_VLAN_PRIORITY_MAX; i++)
3297 l2qt |= (ug_info->l2qt[i] << (28 - 4 * i));
3298 out_be32(&ugeth->p_rx_glbl_pram->l2qt, l2qt);
3299
3300 /* l3qt */
3301 for (j = 0; j < UCC_GETH_IP_PRIORITY_MAX; j += 8) {
3302 l3qt = 0;
3303 for (i = 0; i < 8; i++)
3304 l3qt |= (ug_info->l3qt[j + i] << (28 - 4 * i));
3305 out_be32(&ugeth->p_rx_glbl_pram->l3qt[j], l3qt);
3306 }
3307
3308 /* vlantype */
3309 out_be16(&ugeth->p_rx_glbl_pram->vlantype, ug_info->vlantype);
3310
3311 /* vlantci */
3312 out_be16(&ugeth->p_rx_glbl_pram->vlantci, ug_info->vlantci);
3313
3314 /* ecamptr */
3315 out_be32(&ugeth->p_rx_glbl_pram->ecamptr, ug_info->ecamptr);
3316
3317 /* RBDQPTR */
3318 /* Size varies with number of Rx queues */
3319 ugeth->rx_bd_qs_tbl_offset =
3320 qe_muram_alloc(ug_info->numQueuesRx *
3321 (sizeof(ucc_geth_rx_bd_queues_entry_t) +
3322 sizeof(ucc_geth_rx_prefetched_bds_t)),
3323 UCC_GETH_RX_BD_QUEUES_ALIGNMENT);
3324 if (IS_MURAM_ERR(ugeth->rx_bd_qs_tbl_offset)) {
3325 ugeth_err
3326 ("%s: Can not allocate DPRAM memory for p_rx_bd_qs_tbl.",
3327 __FUNCTION__);
3328 ucc_geth_memclean(ugeth);
3329 return -ENOMEM;
3330 }
3331
3332 ugeth->p_rx_bd_qs_tbl =
3333 (ucc_geth_rx_bd_queues_entry_t *) qe_muram_addr(ugeth->
3334 rx_bd_qs_tbl_offset);
3335 out_be32(&ugeth->p_rx_glbl_pram->rbdqptr, ugeth->rx_bd_qs_tbl_offset);
3336 /* Zero out p_rx_bd_qs_tbl */
3337 memset(ugeth->p_rx_bd_qs_tbl,
3338 0,
3339 ug_info->numQueuesRx * (sizeof(ucc_geth_rx_bd_queues_entry_t) +
3340 sizeof(ucc_geth_rx_prefetched_bds_t)));
3341
3342 /* Setup the table */
3343 /* Assume BD rings are already established */
3344 for (i = 0; i < ug_info->numQueuesRx; i++) {
3345 if (ugeth->ug_info->uf_info.bd_mem_part == MEM_PART_SYSTEM) {
3346 out_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr,
3347 (u32) virt_to_phys(ugeth->p_rx_bd_ring[i]));
3348 } else if (ugeth->ug_info->uf_info.bd_mem_part ==
3349 MEM_PART_MURAM) {
3350 out_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr,
3351 (u32) immrbar_virt_to_phys(ugeth->
3352 p_rx_bd_ring[i]));
3353 }
3354 /* rest of fields handled by QE */
3355 }
3356
3357 /* remoder */
3358 /* Already has speed set */
3359
3360 if (ugeth->rx_extended_features)
3361 remoder |= REMODER_RX_EXTENDED_FEATURES;
3362 if (ug_info->rxExtendedFiltering)
3363 remoder |= REMODER_RX_EXTENDED_FILTERING;
3364 if (ug_info->dynamicMaxFrameLength)
3365 remoder |= REMODER_DYNAMIC_MAX_FRAME_LENGTH;
3366 if (ug_info->dynamicMinFrameLength)
3367 remoder |= REMODER_DYNAMIC_MIN_FRAME_LENGTH;
3368 remoder |=
3369 ug_info->vlanOperationTagged << REMODER_VLAN_OPERATION_TAGGED_SHIFT;
3370 remoder |=
3371 ug_info->
3372 vlanOperationNonTagged << REMODER_VLAN_OPERATION_NON_TAGGED_SHIFT;
3373 remoder |= ug_info->rxQoSMode << REMODER_RX_QOS_MODE_SHIFT;
3374 remoder |= ((ug_info->numQueuesRx - 1) << REMODER_NUM_OF_QUEUES_SHIFT);
3375 if (ug_info->ipCheckSumCheck)
3376 remoder |= REMODER_IP_CHECKSUM_CHECK;
3377 if (ug_info->ipAddressAlignment)
3378 remoder |= REMODER_IP_ADDRESS_ALIGNMENT;
3379 out_be32(&ugeth->p_rx_glbl_pram->remoder, remoder);
3380
3381 /* Note that this function must be called */
3382 /* ONLY AFTER p_tx_fw_statistics_pram */
3383 /* andp_UccGethRxFirmwareStatisticsPram are allocated ! */
3384 init_firmware_statistics_gathering_mode((ug_info->
3385 statisticsMode &
3386 UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX),
3387 (ug_info->statisticsMode &
3388 UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX),
3389 &ugeth->p_tx_glbl_pram->txrmonbaseptr,
3390 ugeth->tx_fw_statistics_pram_offset,
3391 &ugeth->p_rx_glbl_pram->rxrmonbaseptr,
3392 ugeth->rx_fw_statistics_pram_offset,
3393 &ugeth->p_tx_glbl_pram->temoder,
3394 &ugeth->p_rx_glbl_pram->remoder);
3395
3396 /* function code register */
3397 ugeth->p_rx_glbl_pram->rstate = function_code;
3398
3399 /* initialize extended filtering */
3400 if (ug_info->rxExtendedFiltering) {
3401 if (!ug_info->extendedFilteringChainPointer) {
3402 ugeth_err("%s: Null Extended Filtering Chain Pointer.",
3403 __FUNCTION__);
3404 ucc_geth_memclean(ugeth);
3405 return -EINVAL;
3406 }
3407
3408 /* Allocate memory for extended filtering Mode Global
3409 Parameters */
3410 ugeth->exf_glbl_param_offset =
3411 qe_muram_alloc(sizeof(ucc_geth_exf_global_pram_t),
3412 UCC_GETH_RX_EXTENDED_FILTERING_GLOBAL_PARAMETERS_ALIGNMENT);
3413 if (IS_MURAM_ERR(ugeth->exf_glbl_param_offset)) {
3414 ugeth_err
3415 ("%s: Can not allocate DPRAM memory for"
3416 " p_exf_glbl_param.", __FUNCTION__);
3417 ucc_geth_memclean(ugeth);
3418 return -ENOMEM;
3419 }
3420
3421 ugeth->p_exf_glbl_param =
3422 (ucc_geth_exf_global_pram_t *) qe_muram_addr(ugeth->
3423 exf_glbl_param_offset);
3424 out_be32(&ugeth->p_rx_glbl_pram->exfGlobalParam,
3425 ugeth->exf_glbl_param_offset);
3426 out_be32(&ugeth->p_exf_glbl_param->l2pcdptr,
3427 (u32) ug_info->extendedFilteringChainPointer);
3428
3429 } else { /* initialize 82xx style address filtering */
3430
3431 /* Init individual address recognition registers to disabled */
3432
3433 for (j = 0; j < NUM_OF_PADDRS; j++)
3434 ugeth_82xx_filtering_clear_addr_in_paddr(ugeth, (u8) j);
3435
3436 /* Create CQs for hash tables */
3437 if (ug_info->maxGroupAddrInHash > 0) {
3438 INIT_LIST_HEAD(&ugeth->group_hash_q);
3439 }
3440 if (ug_info->maxIndAddrInHash > 0) {
3441 INIT_LIST_HEAD(&ugeth->ind_hash_q);
3442 }
3443 p_82xx_addr_filt =
3444 (ucc_geth_82xx_address_filtering_pram_t *) ugeth->
3445 p_rx_glbl_pram->addressfiltering;
3446
3447 ugeth_82xx_filtering_clear_all_addr_in_hash(ugeth,
3448 ENET_ADDR_TYPE_GROUP);
3449 ugeth_82xx_filtering_clear_all_addr_in_hash(ugeth,
3450 ENET_ADDR_TYPE_INDIVIDUAL);
3451 }
3452
3453 /*
3454 * Initialize UCC at QE level
3455 */
3456
3457 command = QE_INIT_TX_RX;
3458
3459 /* Allocate shadow InitEnet command parameter structure.
3460 * This is needed because after the InitEnet command is executed,
3461 * the structure in DPRAM is released, because DPRAM is a premium
3462 * resource.
3463 * This shadow structure keeps a copy of what was done so that the
3464 * allocated resources can be released when the channel is freed.
3465 */
3466 if (!(ugeth->p_init_enet_param_shadow =
3467 (ucc_geth_init_pram_t *) kmalloc(sizeof(ucc_geth_init_pram_t),
3468 GFP_KERNEL))) {
3469 ugeth_err
3470 ("%s: Can not allocate memory for"
3471 " p_UccInitEnetParamShadows.", __FUNCTION__);
3472 ucc_geth_memclean(ugeth);
3473 return -ENOMEM;
3474 }
3475 /* Zero out *p_init_enet_param_shadow */
3476 memset((char *)ugeth->p_init_enet_param_shadow,
3477 0, sizeof(ucc_geth_init_pram_t));
3478
3479 /* Fill shadow InitEnet command parameter structure */
3480
3481 ugeth->p_init_enet_param_shadow->resinit1 =
3482 ENET_INIT_PARAM_MAGIC_RES_INIT1;
3483 ugeth->p_init_enet_param_shadow->resinit2 =
3484 ENET_INIT_PARAM_MAGIC_RES_INIT2;
3485 ugeth->p_init_enet_param_shadow->resinit3 =
3486 ENET_INIT_PARAM_MAGIC_RES_INIT3;
3487 ugeth->p_init_enet_param_shadow->resinit4 =
3488 ENET_INIT_PARAM_MAGIC_RES_INIT4;
3489 ugeth->p_init_enet_param_shadow->resinit5 =
3490 ENET_INIT_PARAM_MAGIC_RES_INIT5;
3491 ugeth->p_init_enet_param_shadow->rgftgfrxglobal |=
3492 ((u32) ug_info->numThreadsRx) << ENET_INIT_PARAM_RGF_SHIFT;
3493 ugeth->p_init_enet_param_shadow->rgftgfrxglobal |=
3494 ((u32) ug_info->numThreadsTx) << ENET_INIT_PARAM_TGF_SHIFT;
3495
3496 ugeth->p_init_enet_param_shadow->rgftgfrxglobal |=
3497 ugeth->rx_glbl_pram_offset | ug_info->riscRx;
3498 if ((ug_info->largestexternallookupkeysize !=
3499 QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_NONE)
3500 && (ug_info->largestexternallookupkeysize !=
3501 QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_8_BYTES)
3502 && (ug_info->largestexternallookupkeysize !=
3503 QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_16_BYTES)) {
3504 ugeth_err("%s: Invalid largest External Lookup Key Size.",
3505 __FUNCTION__);
3506 ucc_geth_memclean(ugeth);
3507 return -EINVAL;
3508 }
3509 ugeth->p_init_enet_param_shadow->largestexternallookupkeysize =
3510 ug_info->largestexternallookupkeysize;
3511 size = sizeof(ucc_geth_thread_rx_pram_t);
3512 if (ug_info->rxExtendedFiltering) {
3513 size += THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING;
3514 if (ug_info->largestexternallookupkeysize ==
3515 QE_FLTR_TABLE_LOOKUP_KEY_SIZE_8_BYTES)
3516 size +=
3517 THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8;
3518 if (ug_info->largestexternallookupkeysize ==
3519 QE_FLTR_TABLE_LOOKUP_KEY_SIZE_16_BYTES)
3520 size +=
3521 THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16;
3522 }
3523
3524 if ((ret_val = fill_init_enet_entries(ugeth, &(ugeth->
3525 p_init_enet_param_shadow->rxthread[0]),
3526 (u8) (numThreadsRxNumerical + 1)
3527 /* Rx needs one extra for terminator */
3528 , size, UCC_GETH_THREAD_RX_PRAM_ALIGNMENT,
3529 ug_info->riscRx, 1)) != 0) {
3530 ugeth_err("%s: Can not fill p_init_enet_param_shadow.",
3531 __FUNCTION__);
3532 ucc_geth_memclean(ugeth);
3533 return ret_val;
3534 }
3535
3536 ugeth->p_init_enet_param_shadow->txglobal =
3537 ugeth->tx_glbl_pram_offset | ug_info->riscTx;
3538 if ((ret_val =
3539 fill_init_enet_entries(ugeth,
3540 &(ugeth->p_init_enet_param_shadow->
3541 txthread[0]), numThreadsTxNumerical,
3542 sizeof(ucc_geth_thread_tx_pram_t),
3543 UCC_GETH_THREAD_TX_PRAM_ALIGNMENT,
3544 ug_info->riscTx, 0)) != 0) {
3545 ugeth_err("%s: Can not fill p_init_enet_param_shadow.",
3546 __FUNCTION__);
3547 ucc_geth_memclean(ugeth);
3548 return ret_val;
3549 }
3550
3551 /* Load Rx bds with buffers */
3552 for (i = 0; i < ug_info->numQueuesRx; i++) {
3553 if ((ret_val = rx_bd_buffer_set(ugeth, (u8) i)) != 0) {
3554 ugeth_err("%s: Can not fill Rx bds with buffers.",
3555 __FUNCTION__);
3556 ucc_geth_memclean(ugeth);
3557 return ret_val;
3558 }
3559 }
3560
3561 /* Allocate InitEnet command parameter structure */
3562 init_enet_pram_offset = qe_muram_alloc(sizeof(ucc_geth_init_pram_t), 4);
3563 if (IS_MURAM_ERR(init_enet_pram_offset)) {
3564 ugeth_err
3565 ("%s: Can not allocate DPRAM memory for p_init_enet_pram.",
3566 __FUNCTION__);
3567 ucc_geth_memclean(ugeth);
3568 return -ENOMEM;
3569 }
3570 p_init_enet_pram =
3571 (ucc_geth_init_pram_t *) qe_muram_addr(init_enet_pram_offset);
3572
3573 /* Copy shadow InitEnet command parameter structure into PRAM */
3574 p_init_enet_pram->resinit1 = ugeth->p_init_enet_param_shadow->resinit1;
3575 p_init_enet_pram->resinit2 = ugeth->p_init_enet_param_shadow->resinit2;
3576 p_init_enet_pram->resinit3 = ugeth->p_init_enet_param_shadow->resinit3;
3577 p_init_enet_pram->resinit4 = ugeth->p_init_enet_param_shadow->resinit4;
3578 out_be16(&p_init_enet_pram->resinit5,
3579 ugeth->p_init_enet_param_shadow->resinit5);
3580 p_init_enet_pram->largestexternallookupkeysize =
3581 ugeth->p_init_enet_param_shadow->largestexternallookupkeysize;
3582 out_be32(&p_init_enet_pram->rgftgfrxglobal,
3583 ugeth->p_init_enet_param_shadow->rgftgfrxglobal);
3584 for (i = 0; i < ENET_INIT_PARAM_MAX_ENTRIES_RX; i++)
3585 out_be32(&p_init_enet_pram->rxthread[i],
3586 ugeth->p_init_enet_param_shadow->rxthread[i]);
3587 out_be32(&p_init_enet_pram->txglobal,
3588 ugeth->p_init_enet_param_shadow->txglobal);
3589 for (i = 0; i < ENET_INIT_PARAM_MAX_ENTRIES_TX; i++)
3590 out_be32(&p_init_enet_pram->txthread[i],
3591 ugeth->p_init_enet_param_shadow->txthread[i]);
3592
3593 /* Issue QE command */
3594 cecr_subblock =
3595 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
3596 qe_issue_cmd(command, cecr_subblock, (u8) QE_CR_PROTOCOL_ETHERNET,
3597 init_enet_pram_offset);
3598
3599 /* Free InitEnet command parameter */
3600 qe_muram_free(init_enet_pram_offset);
3601
3602 return 0;
3603}
3604
3605/* returns a net_device_stats structure pointer */
3606static struct net_device_stats *ucc_geth_get_stats(struct net_device *dev)
3607{
3608 ucc_geth_private_t *ugeth = netdev_priv(dev);
3609
3610 return &(ugeth->stats);
3611}
3612
3613/* ucc_geth_timeout gets called when a packet has not been
3614 * transmitted after a set amount of time.
3615 * For now, assume that clearing out all the structures, and
3616 * starting over will fix the problem. */
3617static void ucc_geth_timeout(struct net_device *dev)
3618{
3619 ucc_geth_private_t *ugeth = netdev_priv(dev);
3620
3621 ugeth_vdbg("%s: IN", __FUNCTION__);
3622
3623 ugeth->stats.tx_errors++;
3624
3625 ugeth_dump_regs(ugeth);
3626
3627 if (dev->flags & IFF_UP) {
3628 ucc_geth_stop(ugeth);
3629 ucc_geth_startup(ugeth);
3630 }
3631
3632 netif_schedule(dev);
3633}
3634
3635/* This is called by the kernel when a frame is ready for transmission. */
3636/* It is pointed to by the dev->hard_start_xmit function pointer */
3637static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev)
3638{
3639 ucc_geth_private_t *ugeth = netdev_priv(dev);
3640 u8 *bd; /* BD pointer */
3641 u32 bd_status;
3642 u8 txQ = 0;
3643
3644 ugeth_vdbg("%s: IN", __FUNCTION__);
3645
3646 spin_lock_irq(&ugeth->lock);
3647
3648 ugeth->stats.tx_bytes += skb->len;
3649
3650 /* Start from the next BD that should be filled */
3651 bd = ugeth->txBd[txQ];
3652 bd_status = BD_STATUS_AND_LENGTH(bd);
3653 /* Save the skb pointer so we can free it later */
3654 ugeth->tx_skbuff[txQ][ugeth->skb_curtx[txQ]] = skb;
3655
3656 /* Update the current skb pointer (wrapping if this was the last) */
3657 ugeth->skb_curtx[txQ] =
3658 (ugeth->skb_curtx[txQ] +
3659 1) & TX_RING_MOD_MASK(ugeth->ug_info->bdRingLenTx[txQ]);
3660
3661 /* set up the buffer descriptor */
3662 BD_BUFFER_SET(bd,
3663 dma_map_single(NULL, skb->data, skb->len, DMA_TO_DEVICE));
3664
3665 //printk(KERN_DEBUG"skb->data is 0x%x\n",skb->data);
3666
3667 bd_status = (bd_status & T_W) | T_R | T_I | T_L | skb->len;
3668
3669 BD_STATUS_AND_LENGTH_SET(bd, bd_status);
3670
3671 dev->trans_start = jiffies;
3672
3673 /* Move to next BD in the ring */
3674 if (!(bd_status & T_W))
3675 ugeth->txBd[txQ] = bd + UCC_GETH_SIZE_OF_BD;
3676 else
3677 ugeth->txBd[txQ] = ugeth->p_tx_bd_ring[txQ];
3678
3679 /* If the next BD still needs to be cleaned up, then the bds
3680 are full. We need to tell the kernel to stop sending us stuff. */
3681 if (bd == ugeth->confBd[txQ]) {
3682 if (!netif_queue_stopped(dev))
3683 netif_stop_queue(dev);
3684 }
3685
3686 if (ugeth->p_scheduler) {
3687 ugeth->cpucount[txQ]++;
3688 /* Indicate to QE that there are more Tx bds ready for
3689 transmission */
3690 /* This is done by writing a running counter of the bd
3691 count to the scheduler PRAM. */
3692 out_be16(ugeth->p_cpucount[txQ], ugeth->cpucount[txQ]);
3693 }
3694
3695 spin_unlock_irq(&ugeth->lock);
3696
3697 return 0;
3698}
3699
3700static int ucc_geth_rx(ucc_geth_private_t *ugeth, u8 rxQ, int rx_work_limit)
3701{
3702 struct sk_buff *skb;
3703 u8 *bd;
3704 u16 length, howmany = 0;
3705 u32 bd_status;
3706 u8 *bdBuffer;
3707
3708 ugeth_vdbg("%s: IN", __FUNCTION__);
3709
3710 spin_lock(&ugeth->lock);
3711 /* collect received buffers */
3712 bd = ugeth->rxBd[rxQ];
3713
3714 bd_status = BD_STATUS_AND_LENGTH(bd);
3715
3716 /* while there are received buffers and BD is full (~R_E) */
3717 while (!((bd_status & (R_E)) || (--rx_work_limit < 0))) {
3718 bdBuffer = (u8 *) BD_BUFFER(bd);
3719 length = (u16) ((bd_status & BD_LENGTH_MASK) - 4);
3720 skb = ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]];
3721
3722 /* determine whether buffer is first, last, first and last
3723 (single buffer frame) or middle (not first and not last) */
3724 if (!skb ||
3725 (!(bd_status & (R_F | R_L))) ||
3726 (bd_status & R_ERRORS_FATAL)) {
3727 ugeth_vdbg("%s, %d: ERROR!!! skb - 0x%08x",
3728 __FUNCTION__, __LINE__, (u32) skb);
3729 if (skb)
3730 dev_kfree_skb_any(skb);
3731
3732 ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = NULL;
3733 ugeth->stats.rx_dropped++;
3734 } else {
3735 ugeth->stats.rx_packets++;
3736 howmany++;
3737
3738 /* Prep the skb for the packet */
3739 skb_put(skb, length);
3740
3741 /* Tell the skb what kind of packet this is */
3742 skb->protocol = eth_type_trans(skb, ugeth->dev);
3743
3744 ugeth->stats.rx_bytes += length;
3745 /* Send the packet up the stack */
3746#ifdef CONFIG_UGETH_NAPI
3747 netif_receive_skb(skb);
3748#else
3749 netif_rx(skb);
3750#endif /* CONFIG_UGETH_NAPI */
3751 }
3752
3753 ugeth->dev->last_rx = jiffies;
3754
3755 skb = get_new_skb(ugeth, bd);
3756 if (!skb) {
3757 ugeth_warn("%s: No Rx Data Buffer", __FUNCTION__);
3758 spin_unlock(&ugeth->lock);
3759 ugeth->stats.rx_dropped++;
3760 break;
3761 }
3762
3763 ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = skb;
3764
3765 /* update to point at the next skb */
3766 ugeth->skb_currx[rxQ] =
3767 (ugeth->skb_currx[rxQ] +
3768 1) & RX_RING_MOD_MASK(ugeth->ug_info->bdRingLenRx[rxQ]);
3769
3770 if (bd_status & R_W)
3771 bd = ugeth->p_rx_bd_ring[rxQ];
3772 else
3773 bd += UCC_GETH_SIZE_OF_BD;
3774
3775 bd_status = BD_STATUS_AND_LENGTH(bd);
3776 }
3777
3778 ugeth->rxBd[rxQ] = bd;
3779 spin_unlock(&ugeth->lock);
3780 return howmany;
3781}
3782
3783static int ucc_geth_tx(struct net_device *dev, u8 txQ)
3784{
3785 /* Start from the next BD that should be filled */
3786 ucc_geth_private_t *ugeth = netdev_priv(dev);
3787 u8 *bd; /* BD pointer */
3788 u32 bd_status;
3789
3790 bd = ugeth->confBd[txQ];
3791 bd_status = BD_STATUS_AND_LENGTH(bd);
3792
3793 /* Normal processing. */
3794 while ((bd_status & T_R) == 0) {
3795 /* BD contains already transmitted buffer. */
3796 /* Handle the transmitted buffer and release */
3797 /* the BD to be used with the current frame */
3798
3799 if ((bd = ugeth->txBd[txQ]) && (netif_queue_stopped(dev) == 0))
3800 break;
3801
3802 ugeth->stats.tx_packets++;
3803
3804 /* Free the sk buffer associated with this TxBD */
3805 dev_kfree_skb_irq(ugeth->
3806 tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]]);
3807 ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]] = NULL;
3808 ugeth->skb_dirtytx[txQ] =
3809 (ugeth->skb_dirtytx[txQ] +
3810 1) & TX_RING_MOD_MASK(ugeth->ug_info->bdRingLenTx[txQ]);
3811
3812 /* We freed a buffer, so now we can restart transmission */
3813 if (netif_queue_stopped(dev))
3814 netif_wake_queue(dev);
3815
3816 /* Advance the confirmation BD pointer */
3817 if (!(bd_status & T_W))
3818 ugeth->confBd[txQ] += UCC_GETH_SIZE_OF_BD;
3819 else
3820 ugeth->confBd[txQ] = ugeth->p_tx_bd_ring[txQ];
3821 }
3822 return 0;
3823}
3824
3825#ifdef CONFIG_UGETH_NAPI
3826static int ucc_geth_poll(struct net_device *dev, int *budget)
3827{
3828 ucc_geth_private_t *ugeth = netdev_priv(dev);
3829 int howmany;
3830 int rx_work_limit = *budget;
3831 u8 rxQ = 0;
3832
3833 if (rx_work_limit > dev->quota)
3834 rx_work_limit = dev->quota;
3835
3836 howmany = ucc_geth_rx(ugeth, rxQ, rx_work_limit);
3837
3838 dev->quota -= howmany;
3839 rx_work_limit -= howmany;
3840 *budget -= howmany;
3841
3842 if (rx_work_limit >= 0)
3843 netif_rx_complete(dev);
3844
3845 return (rx_work_limit < 0) ? 1 : 0;
3846}
3847#endif /* CONFIG_UGETH_NAPI */
3848
3849static irqreturn_t ucc_geth_irq_handler(int irq, void *info,
3850 struct pt_regs *regs)
3851{
3852 struct net_device *dev = (struct net_device *)info;
3853 ucc_geth_private_t *ugeth = netdev_priv(dev);
3854 ucc_fast_private_t *uccf;
3855 ucc_geth_info_t *ug_info;
3856 register u32 ucce = 0;
3857 register u32 bit_mask = UCCE_RXBF_SINGLE_MASK;
3858 register u32 tx_mask = UCCE_TXBF_SINGLE_MASK;
3859 register u8 i;
3860
3861 ugeth_vdbg("%s: IN", __FUNCTION__);
3862
3863 if (!ugeth)
3864 return IRQ_NONE;
3865
3866 uccf = ugeth->uccf;
3867 ug_info = ugeth->ug_info;
3868
3869 do {
3870 ucce |= (u32) (in_be32(uccf->p_ucce) & in_be32(uccf->p_uccm));
3871
3872 /* clear event bits for next time */
3873 /* Side effect here is to mask ucce variable
3874 for future processing below. */
3875 out_be32(uccf->p_ucce, ucce); /* Clear with ones,
3876 but only bits in UCCM */
3877
3878 /* We ignore Tx interrupts because Tx confirmation is
3879 done inside Tx routine */
3880
3881 for (i = 0; i < ug_info->numQueuesRx; i++) {
3882 if (ucce & bit_mask)
3883 ucc_geth_rx(ugeth, i,
3884 (int)ugeth->ug_info->
3885 bdRingLenRx[i]);
3886 ucce &= ~bit_mask;
3887 bit_mask <<= 1;
3888 }
3889
3890 for (i = 0; i < ug_info->numQueuesTx; i++) {
3891 if (ucce & tx_mask)
3892 ucc_geth_tx(dev, i);
3893 ucce &= ~tx_mask;
3894 tx_mask <<= 1;
3895 }
3896
3897 /* Exceptions */
3898 if (ucce & UCCE_BSY) {
3899 ugeth_vdbg("Got BUSY irq!!!!");
3900 ugeth->stats.rx_errors++;
3901 ucce &= ~UCCE_BSY;
3902 }
3903 if (ucce & UCCE_OTHER) {
3904 ugeth_vdbg("Got frame with error (ucce - 0x%08x)!!!!",
3905 ucce);
3906 ugeth->stats.rx_errors++;
3907 ucce &= ~ucce;
3908 }
3909 }
3910 while (ucce);
3911
3912 return IRQ_HANDLED;
3913}
3914
3915static irqreturn_t phy_interrupt(int irq, void *dev_id, struct pt_regs *regs)
3916{
3917 struct net_device *dev = (struct net_device *)dev_id;
3918 ucc_geth_private_t *ugeth = netdev_priv(dev);
3919
3920 ugeth_vdbg("%s: IN", __FUNCTION__);
3921
3922 /* Clear the interrupt */
3923 mii_clear_phy_interrupt(ugeth->mii_info);
3924
3925 /* Disable PHY interrupts */
3926 mii_configure_phy_interrupt(ugeth->mii_info, MII_INTERRUPT_DISABLED);
3927
3928 /* Schedule the phy change */
3929 schedule_work(&ugeth->tq);
3930
3931 return IRQ_HANDLED;
3932}
3933
3934/* Scheduled by the phy_interrupt/timer to handle PHY changes */
3935static void ugeth_phy_change(void *data)
3936{
3937 struct net_device *dev = (struct net_device *)data;
3938 ucc_geth_private_t *ugeth = netdev_priv(dev);
3939 ucc_geth_t *ug_regs;
3940 int result = 0;
3941
3942 ugeth_vdbg("%s: IN", __FUNCTION__);
3943
3944 ug_regs = ugeth->ug_regs;
3945
3946 /* Delay to give the PHY a chance to change the
3947 * register state */
3948 msleep(1);
3949
3950 /* Update the link, speed, duplex */
3951 result = ugeth->mii_info->phyinfo->read_status(ugeth->mii_info);
3952
3953 /* Adjust the known status as long as the link
3954 * isn't still coming up */
3955 if ((0 == result) || (ugeth->mii_info->link == 0))
3956 adjust_link(dev);
3957
3958 /* Reenable interrupts, if needed */
3959 if (ugeth->ug_info->board_flags & FSL_UGETH_BRD_HAS_PHY_INTR)
3960 mii_configure_phy_interrupt(ugeth->mii_info,
3961 MII_INTERRUPT_ENABLED);
3962}
3963
3964/* Called every so often on systems that don't interrupt
3965 * the core for PHY changes */
3966static void ugeth_phy_timer(unsigned long data)
3967{
3968 struct net_device *dev = (struct net_device *)data;
3969 ucc_geth_private_t *ugeth = netdev_priv(dev);
3970
3971 schedule_work(&ugeth->tq);
3972
3973 mod_timer(&ugeth->phy_info_timer, jiffies + PHY_CHANGE_TIME * HZ);
3974}
3975
3976/* Keep trying aneg for some time
3977 * If, after GFAR_AN_TIMEOUT seconds, it has not
3978 * finished, we switch to forced.
3979 * Either way, once the process has completed, we either
3980 * request the interrupt, or switch the timer over to
3981 * using ugeth_phy_timer to check status */
3982static void ugeth_phy_startup_timer(unsigned long data)
3983{
3984 struct ugeth_mii_info *mii_info = (struct ugeth_mii_info *)data;
3985 ucc_geth_private_t *ugeth = netdev_priv(mii_info->dev);
3986 static int secondary = UGETH_AN_TIMEOUT;
3987 int result;
3988
3989 /* Configure the Auto-negotiation */
3990 result = mii_info->phyinfo->config_aneg(mii_info);
3991
3992 /* If autonegotiation failed to start, and
3993 * we haven't timed out, reset the timer, and return */
3994 if (result && secondary--) {
3995 mod_timer(&ugeth->phy_info_timer, jiffies + HZ);
3996 return;
3997 } else if (result) {
3998 /* Couldn't start autonegotiation.
3999 * Try switching to forced */
4000 mii_info->autoneg = 0;
4001 result = mii_info->phyinfo->config_aneg(mii_info);
4002
4003 /* Forcing failed! Give up */
4004 if (result) {
4005 ugeth_err("%s: Forcing failed!", mii_info->dev->name);
4006 return;
4007 }
4008 }
4009
4010 /* Kill the timer so it can be restarted */
4011 del_timer_sync(&ugeth->phy_info_timer);
4012
4013 /* Grab the PHY interrupt, if necessary/possible */
4014 if (ugeth->ug_info->board_flags & FSL_UGETH_BRD_HAS_PHY_INTR) {
4015 if (request_irq(ugeth->ug_info->phy_interrupt,
4016 phy_interrupt,
4017 SA_SHIRQ, "phy_interrupt", mii_info->dev) < 0) {
4018 ugeth_err("%s: Can't get IRQ %d (PHY)",
4019 mii_info->dev->name,
4020 ugeth->ug_info->phy_interrupt);
4021 } else {
4022 mii_configure_phy_interrupt(ugeth->mii_info,
4023 MII_INTERRUPT_ENABLED);
4024 return;
4025 }
4026 }
4027
4028 /* Start the timer again, this time in order to
4029 * handle a change in status */
4030 init_timer(&ugeth->phy_info_timer);
4031 ugeth->phy_info_timer.function = &ugeth_phy_timer;
4032 ugeth->phy_info_timer.data = (unsigned long)mii_info->dev;
4033 mod_timer(&ugeth->phy_info_timer, jiffies + PHY_CHANGE_TIME * HZ);
4034}
4035
4036/* Called when something needs to use the ethernet device */
4037/* Returns 0 for success. */
4038static int ucc_geth_open(struct net_device *dev)
4039{
4040 ucc_geth_private_t *ugeth = netdev_priv(dev);
4041 int err;
4042
4043 ugeth_vdbg("%s: IN", __FUNCTION__);
4044
4045 /* Test station address */
4046 if (dev->dev_addr[0] & ENET_GROUP_ADDR) {
4047 ugeth_err("%s: Multicast address used for station address"
4048 " - is this what you wanted?", __FUNCTION__);
4049 return -EINVAL;
4050 }
4051
4052 err = ucc_geth_startup(ugeth);
4053 if (err) {
4054 ugeth_err("%s: Cannot configure net device, aborting.",
4055 dev->name);
4056 return err;
4057 }
4058
4059 err = adjust_enet_interface(ugeth);
4060 if (err) {
4061 ugeth_err("%s: Cannot configure net device, aborting.",
4062 dev->name);
4063 return err;
4064 }
4065
4066 /* Set MACSTNADDR1, MACSTNADDR2 */
4067 /* For more details see the hardware spec. */
4068 init_mac_station_addr_regs(dev->dev_addr[0],
4069 dev->dev_addr[1],
4070 dev->dev_addr[2],
4071 dev->dev_addr[3],
4072 dev->dev_addr[4],
4073 dev->dev_addr[5],
4074 &ugeth->ug_regs->macstnaddr1,
4075 &ugeth->ug_regs->macstnaddr2);
4076
4077 err = init_phy(dev);
4078 if (err) {
4079 ugeth_err("%s: Cannot initialzie PHY, aborting.", dev->name);
4080 return err;
4081 }
4082#ifndef CONFIG_UGETH_NAPI
4083 err =
4084 request_irq(ugeth->ug_info->uf_info.irq, ucc_geth_irq_handler, 0,
4085 "UCC Geth", dev);
4086 if (err) {
4087 ugeth_err("%s: Cannot get IRQ for net device, aborting.",
4088 dev->name);
4089 ucc_geth_stop(ugeth);
4090 return err;
4091 }
4092#endif /* CONFIG_UGETH_NAPI */
4093
4094 /* Set up the PHY change work queue */
4095 INIT_WORK(&ugeth->tq, ugeth_phy_change, dev);
4096
4097 init_timer(&ugeth->phy_info_timer);
4098 ugeth->phy_info_timer.function = &ugeth_phy_startup_timer;
4099 ugeth->phy_info_timer.data = (unsigned long)ugeth->mii_info;
4100 mod_timer(&ugeth->phy_info_timer, jiffies + HZ);
4101
4102 err = ugeth_enable(ugeth, COMM_DIR_RX_AND_TX);
4103 if (err) {
4104 ugeth_err("%s: Cannot enable net device, aborting.", dev->name);
4105 ucc_geth_stop(ugeth);
4106 return err;
4107 }
4108
4109 netif_start_queue(dev);
4110
4111 return err;
4112}
4113
4114/* Stops the kernel queue, and halts the controller */
4115static int ucc_geth_close(struct net_device *dev)
4116{
4117 ucc_geth_private_t *ugeth = netdev_priv(dev);
4118
4119 ugeth_vdbg("%s: IN", __FUNCTION__);
4120
4121 ucc_geth_stop(ugeth);
4122
4123 /* Shutdown the PHY */
4124 if (ugeth->mii_info->phyinfo->close)
4125 ugeth->mii_info->phyinfo->close(ugeth->mii_info);
4126
4127 kfree(ugeth->mii_info);
4128
4129 netif_stop_queue(dev);
4130
4131 return 0;
4132}
4133
4134struct ethtool_ops ucc_geth_ethtool_ops = {
4135 .get_settings = NULL,
4136 .get_drvinfo = NULL,
4137 .get_regs_len = NULL,
4138 .get_regs = NULL,
4139 .get_link = NULL,
4140 .get_coalesce = NULL,
4141 .set_coalesce = NULL,
4142 .get_ringparam = NULL,
4143 .set_ringparam = NULL,
4144 .get_strings = NULL,
4145 .get_stats_count = NULL,
4146 .get_ethtool_stats = NULL,
4147};
4148
4149static int ucc_geth_probe(struct device *device)
4150{
4151 struct platform_device *pdev = to_platform_device(device);
4152 struct ucc_geth_platform_data *ugeth_pdata;
4153 struct net_device *dev = NULL;
4154 struct ucc_geth_private *ugeth = NULL;
4155 struct ucc_geth_info *ug_info;
4156 int err;
4157 static int mii_mng_configured = 0;
4158
4159 ugeth_vdbg("%s: IN", __FUNCTION__);
4160
4161 ugeth_pdata = (struct ucc_geth_platform_data *)pdev->dev.platform_data;
4162
4163 ug_info = &ugeth_info[pdev->id];
4164 ug_info->uf_info.ucc_num = pdev->id;
4165 ug_info->uf_info.rx_clock = ugeth_pdata->rx_clock;
4166 ug_info->uf_info.tx_clock = ugeth_pdata->tx_clock;
4167 ug_info->uf_info.regs = ugeth_pdata->phy_reg_addr;
4168 ug_info->uf_info.irq = platform_get_irq(pdev, 0);
4169 ug_info->phy_address = ugeth_pdata->phy_id;
4170 ug_info->enet_interface = ugeth_pdata->phy_interface;
4171 ug_info->board_flags = ugeth_pdata->board_flags;
4172 ug_info->phy_interrupt = ugeth_pdata->phy_interrupt;
4173
4174 printk(KERN_INFO "ucc_geth: UCC%1d at 0x%8x (irq = %d) \n",
4175 ug_info->uf_info.ucc_num + 1, ug_info->uf_info.regs,
4176 ug_info->uf_info.irq);
4177
4178 if (ug_info == NULL) {
4179 ugeth_err("%s: [%d] Missing additional data!", __FUNCTION__,
4180 pdev->id);
4181 return -ENODEV;
4182 }
4183
4184 if (!mii_mng_configured) {
4185 ucc_set_qe_mux_mii_mng(ug_info->uf_info.ucc_num);
4186 mii_mng_configured = 1;
4187 }
4188
4189 /* Create an ethernet device instance */
4190 dev = alloc_etherdev(sizeof(*ugeth));
4191
4192 if (dev == NULL)
4193 return -ENOMEM;
4194
4195 ugeth = netdev_priv(dev);
4196 spin_lock_init(&ugeth->lock);
4197
4198 dev_set_drvdata(device, dev);
4199
4200 /* Set the dev->base_addr to the gfar reg region */
4201 dev->base_addr = (unsigned long)(ug_info->uf_info.regs);
4202
4203 SET_MODULE_OWNER(dev);
4204 SET_NETDEV_DEV(dev, device);
4205
4206 /* Fill in the dev structure */
4207 dev->open = ucc_geth_open;
4208 dev->hard_start_xmit = ucc_geth_start_xmit;
4209 dev->tx_timeout = ucc_geth_timeout;
4210 dev->watchdog_timeo = TX_TIMEOUT;
4211#ifdef CONFIG_UGETH_NAPI
4212 dev->poll = ucc_geth_poll;
4213 dev->weight = UCC_GETH_DEV_WEIGHT;
4214#endif /* CONFIG_UGETH_NAPI */
4215 dev->stop = ucc_geth_close;
4216 dev->get_stats = ucc_geth_get_stats;
4217// dev->change_mtu = ucc_geth_change_mtu;
4218 dev->mtu = 1500;
4219 dev->set_multicast_list = ucc_geth_set_multi;
4220 dev->ethtool_ops = &ucc_geth_ethtool_ops;
4221
4222 err = register_netdev(dev);
4223 if (err) {
4224 ugeth_err("%s: Cannot register net device, aborting.",
4225 dev->name);
4226 free_netdev(dev);
4227 return err;
4228 }
4229
4230 ugeth->ug_info = ug_info;
4231 ugeth->dev = dev;
4232 memcpy(dev->dev_addr, ugeth_pdata->mac_addr, 6);
4233
4234 return 0;
4235}
4236
4237static int ucc_geth_remove(struct device *device)
4238{
4239 struct net_device *dev = dev_get_drvdata(device);
4240 struct ucc_geth_private *ugeth = netdev_priv(dev);
4241
4242 dev_set_drvdata(device, NULL);
4243 ucc_geth_memclean(ugeth);
4244 free_netdev(dev);
4245
4246 return 0;
4247}
4248
4249/* Structure for a device driver */
4250static struct device_driver ucc_geth_driver = {
4251 .name = DRV_NAME,
4252 .bus = &platform_bus_type,
4253 .probe = ucc_geth_probe,
4254 .remove = ucc_geth_remove,
4255};
4256
4257static int __init ucc_geth_init(void)
4258{
4259 int i;
4260 printk(KERN_INFO "ucc_geth: " DRV_DESC "\n");
4261 for (i = 0; i < 8; i++)
4262 memcpy(&(ugeth_info[i]), &ugeth_primary_info,
4263 sizeof(ugeth_primary_info));
4264
4265 return driver_register(&ucc_geth_driver);
4266}
4267
4268static void __exit ucc_geth_exit(void)
4269{
4270 driver_unregister(&ucc_geth_driver);
4271}
4272
4273module_init(ucc_geth_init);
4274module_exit(ucc_geth_exit);
4275
4276MODULE_AUTHOR("Freescale Semiconductor, Inc");
4277MODULE_DESCRIPTION(DRV_DESC);
4278MODULE_LICENSE("GPL");
diff --git a/drivers/net/ucc_geth.h b/drivers/net/ucc_geth.h
new file mode 100644
index 000000000000..005965f5dd9b
--- /dev/null
+++ b/drivers/net/ucc_geth.h
@@ -0,0 +1,1339 @@
1/*
2 * Copyright (C) Freescale Semicondutor, Inc. 2006. All rights reserved.
3 *
4 * Author: Shlomi Gridish <gridish@freescale.com>
5 *
6 * Description:
7 * Internal header file for UCC Gigabit Ethernet unit routines.
8 *
9 * Changelog:
10 * Jun 28, 2006 Li Yang <LeoLi@freescale.com>
11 * - Rearrange code and style fixes
12 *
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of the GNU General Public License as published by the
15 * Free Software Foundation; either version 2 of the License, or (at your
16 * option) any later version.
17 */
18#ifndef __UCC_GETH_H__
19#define __UCC_GETH_H__
20
21#include <linux/kernel.h>
22#include <linux/list.h>
23#include <linux/fsl_devices.h>
24
25#include <asm/immap_qe.h>
26#include <asm/qe.h>
27
28#include <asm/ucc.h>
29#include <asm/ucc_fast.h>
30
31#define NUM_TX_QUEUES 8
32#define NUM_RX_QUEUES 8
33#define NUM_BDS_IN_PREFETCHED_BDS 4
34#define TX_IP_OFFSET_ENTRY_MAX 8
35#define NUM_OF_PADDRS 4
36#define ENET_INIT_PARAM_MAX_ENTRIES_RX 9
37#define ENET_INIT_PARAM_MAX_ENTRIES_TX 8
38
39typedef struct ucc_mii_mng {
40 u32 miimcfg; /* MII management configuration reg */
41 u32 miimcom; /* MII management command reg */
42 u32 miimadd; /* MII management address reg */
43 u32 miimcon; /* MII management control reg */
44 u32 miimstat; /* MII management status reg */
45 u32 miimind; /* MII management indication reg */
46} __attribute__ ((packed)) ucc_mii_mng_t;
47
48typedef struct ucc_geth {
49 ucc_fast_t uccf;
50
51 u32 maccfg1; /* mac configuration reg. 1 */
52 u32 maccfg2; /* mac configuration reg. 2 */
53 u32 ipgifg; /* interframe gap reg. */
54 u32 hafdup; /* half-duplex reg. */
55 u8 res1[0x10];
56 ucc_mii_mng_t miimng; /* MII management structure */
57 u32 ifctl; /* interface control reg */
58 u32 ifstat; /* interface statux reg */
59 u32 macstnaddr1; /* mac station address part 1 reg */
60 u32 macstnaddr2; /* mac station address part 2 reg */
61 u8 res2[0x8];
62 u32 uempr; /* UCC Ethernet Mac parameter reg */
63 u32 utbipar; /* UCC tbi address reg */
64 u16 uescr; /* UCC Ethernet statistics control reg */
65 u8 res3[0x180 - 0x15A];
66 u32 tx64; /* Total number of frames (including bad
67 frames) transmitted that were exactly of the
68 minimal length (64 for un tagged, 68 for
69 tagged, or with length exactly equal to the
70 parameter MINLength */
71 u32 tx127; /* Total number of frames (including bad
72 frames) transmitted that were between
73 MINLength (Including FCS length==4) and 127
74 octets */
75 u32 tx255; /* Total number of frames (including bad
76 frames) transmitted that were between 128
77 (Including FCS length==4) and 255 octets */
78 u32 rx64; /* Total number of frames received including
79 bad frames that were exactly of the mninimal
80 length (64 bytes) */
81 u32 rx127; /* Total number of frames (including bad
82 frames) received that were between MINLength
83 (Including FCS length==4) and 127 octets */
84 u32 rx255; /* Total number of frames (including bad
85 frames) received that were between 128
86 (Including FCS length==4) and 255 octets */
87 u32 txok; /* Total number of octets residing in frames
88 that where involved in succesfull
89 transmission */
90 u16 txcf; /* Total number of PAUSE control frames
91 transmitted by this MAC */
92 u8 res4[0x2];
93 u32 tmca; /* Total number of frames that were transmitted
94 succesfully with the group address bit set
95 that are not broadcast frames */
96 u32 tbca; /* Total number of frames transmitted
97 succesfully that had destination address
98 field equal to the broadcast address */
99 u32 rxfok; /* Total number of frames received OK */
100 u32 rxbok; /* Total number of octets received OK */
101 u32 rbyt; /* Total number of octets received including
102 octets in bad frames. Must be implemented in
103 HW because it includes octets in frames that
104 never even reach the UCC */
105 u32 rmca; /* Total number of frames that were received
106 succesfully with the group address bit set
107 that are not broadcast frames */
108 u32 rbca; /* Total number of frames received succesfully
109 that had destination address equal to the
110 broadcast address */
111 u32 scar; /* Statistics carry register */
112 u32 scam; /* Statistics caryy mask register */
113 u8 res5[0x200 - 0x1c4];
114} __attribute__ ((packed)) ucc_geth_t;
115
116/* UCC GETH TEMODR Register */
117#define TEMODER_TX_RMON_STATISTICS_ENABLE 0x0100 /* enable Tx statistics
118 */
119#define TEMODER_SCHEDULER_ENABLE 0x2000 /* enable scheduler */
120#define TEMODER_IP_CHECKSUM_GENERATE 0x0400 /* generate IPv4
121 checksums */
122#define TEMODER_PERFORMANCE_OPTIMIZATION_MODE1 0x0200 /* enable performance
123 optimization
124 enhancement (mode1) */
125#define TEMODER_RMON_STATISTICS 0x0100 /* enable tx statistics
126 */
127#define TEMODER_NUM_OF_QUEUES_SHIFT (15-15) /* Number of queues <<
128 shift */
129
130/* UCC GETH TEMODR Register */
131#define REMODER_RX_RMON_STATISTICS_ENABLE 0x00001000 /* enable Rx
132 statistics */
133#define REMODER_RX_EXTENDED_FEATURES 0x80000000 /* enable
134 extended
135 features */
136#define REMODER_VLAN_OPERATION_TAGGED_SHIFT (31-9 ) /* vlan operation
137 tagged << shift */
138#define REMODER_VLAN_OPERATION_NON_TAGGED_SHIFT (31-10) /* vlan operation non
139 tagged << shift */
140#define REMODER_RX_QOS_MODE_SHIFT (31-15) /* rx QoS mode << shift
141 */
142#define REMODER_RMON_STATISTICS 0x00001000 /* enable rx
143 statistics */
144#define REMODER_RX_EXTENDED_FILTERING 0x00000800 /* extended
145 filtering
146 vs.
147 mpc82xx-like
148 filtering */
149#define REMODER_NUM_OF_QUEUES_SHIFT (31-23) /* Number of queues <<
150 shift */
151#define REMODER_DYNAMIC_MAX_FRAME_LENGTH 0x00000008 /* enable
152 dynamic max
153 frame length
154 */
155#define REMODER_DYNAMIC_MIN_FRAME_LENGTH 0x00000004 /* enable
156 dynamic min
157 frame length
158 */
159#define REMODER_IP_CHECKSUM_CHECK 0x00000002 /* check IPv4
160 checksums */
161#define REMODER_IP_ADDRESS_ALIGNMENT 0x00000001 /* align ip
162 address to
163 4-byte
164 boundary */
165
166/* UCC GETH Event Register */
167#define UCCE_MPD 0x80000000 /* Magic packet
168 detection */
169#define UCCE_SCAR 0x40000000
170#define UCCE_GRA 0x20000000 /* Tx graceful
171 stop
172 complete */
173#define UCCE_CBPR 0x10000000
174#define UCCE_BSY 0x08000000
175#define UCCE_RXC 0x04000000
176#define UCCE_TXC 0x02000000
177#define UCCE_TXE 0x01000000
178#define UCCE_TXB7 0x00800000
179#define UCCE_TXB6 0x00400000
180#define UCCE_TXB5 0x00200000
181#define UCCE_TXB4 0x00100000
182#define UCCE_TXB3 0x00080000
183#define UCCE_TXB2 0x00040000
184#define UCCE_TXB1 0x00020000
185#define UCCE_TXB0 0x00010000
186#define UCCE_RXB7 0x00008000
187#define UCCE_RXB6 0x00004000
188#define UCCE_RXB5 0x00002000
189#define UCCE_RXB4 0x00001000
190#define UCCE_RXB3 0x00000800
191#define UCCE_RXB2 0x00000400
192#define UCCE_RXB1 0x00000200
193#define UCCE_RXB0 0x00000100
194#define UCCE_RXF7 0x00000080
195#define UCCE_RXF6 0x00000040
196#define UCCE_RXF5 0x00000020
197#define UCCE_RXF4 0x00000010
198#define UCCE_RXF3 0x00000008
199#define UCCE_RXF2 0x00000004
200#define UCCE_RXF1 0x00000002
201#define UCCE_RXF0 0x00000001
202
203#define UCCE_RXBF_SINGLE_MASK (UCCE_RXF0)
204#define UCCE_TXBF_SINGLE_MASK (UCCE_TXB0)
205
206#define UCCE_TXB (UCCE_TXB7 | UCCE_TXB6 | UCCE_TXB5 | UCCE_TXB4 |\
207 UCCE_TXB3 | UCCE_TXB2 | UCCE_TXB1 | UCCE_TXB0)
208#define UCCE_RXB (UCCE_RXB7 | UCCE_RXB6 | UCCE_RXB5 | UCCE_RXB4 |\
209 UCCE_RXB3 | UCCE_RXB2 | UCCE_RXB1 | UCCE_RXB0)
210#define UCCE_RXF (UCCE_RXF7 | UCCE_RXF6 | UCCE_RXF5 | UCCE_RXF4 |\
211 UCCE_RXF3 | UCCE_RXF2 | UCCE_RXF1 | UCCE_RXF0)
212#define UCCE_OTHER (UCCE_SCAR | UCCE_GRA | UCCE_CBPR | UCCE_BSY |\
213 UCCE_RXC | UCCE_TXC | UCCE_TXE)
214
215/* UCC GETH UPSMR (Protocol Specific Mode Register) */
216#define UPSMR_ECM 0x04000000 /* Enable CAM
217 Miss or
218 Enable
219 Filtering
220 Miss */
221#define UPSMR_HSE 0x02000000 /* Hardware
222 Statistics
223 Enable */
224#define UPSMR_PRO 0x00400000 /* Promiscuous*/
225#define UPSMR_CAP 0x00200000 /* CAM polarity
226 */
227#define UPSMR_RSH 0x00100000 /* Receive
228 Short Frames
229 */
230#define UPSMR_RPM 0x00080000 /* Reduced Pin
231 Mode
232 interfaces */
233#define UPSMR_R10M 0x00040000 /* RGMII/RMII
234 10 Mode */
235#define UPSMR_RLPB 0x00020000 /* RMII
236 Loopback
237 Mode */
238#define UPSMR_TBIM 0x00010000 /* Ten-bit
239 Interface
240 Mode */
241#define UPSMR_RMM 0x00001000 /* RMII/RGMII
242 Mode */
243#define UPSMR_CAM 0x00000400 /* CAM Address
244 Matching */
245#define UPSMR_BRO 0x00000200 /* Broadcast
246 Address */
247#define UPSMR_RES1 0x00002000 /* Reserved
248 feild - must
249 be 1 */
250
251/* UCC GETH MACCFG1 (MAC Configuration 1 Register) */
252#define MACCFG1_FLOW_RX 0x00000020 /* Flow Control
253 Rx */
254#define MACCFG1_FLOW_TX 0x00000010 /* Flow Control
255 Tx */
256#define MACCFG1_ENABLE_SYNCHED_RX 0x00000008 /* Rx Enable
257 synchronized
258 to Rx stream
259 */
260#define MACCFG1_ENABLE_RX 0x00000004 /* Enable Rx */
261#define MACCFG1_ENABLE_SYNCHED_TX 0x00000002 /* Tx Enable
262 synchronized
263 to Tx stream
264 */
265#define MACCFG1_ENABLE_TX 0x00000001 /* Enable Tx */
266
267/* UCC GETH MACCFG2 (MAC Configuration 2 Register) */
268#define MACCFG2_PREL_SHIFT (31 - 19) /* Preamble
269 Length <<
270 shift */
271#define MACCFG2_PREL_MASK 0x0000f000 /* Preamble
272 Length mask */
273#define MACCFG2_SRP 0x00000080 /* Soft Receive
274 Preamble */
275#define MACCFG2_STP 0x00000040 /* Soft
276 Transmit
277 Preamble */
278#define MACCFG2_RESERVED_1 0x00000020 /* Reserved -
279 must be set
280 to 1 */
281#define MACCFG2_LC 0x00000010 /* Length Check
282 */
283#define MACCFG2_MPE 0x00000008 /* Magic packet
284 detect */
285#define MACCFG2_FDX 0x00000001 /* Full Duplex */
286#define MACCFG2_FDX_MASK 0x00000001 /* Full Duplex
287 mask */
288#define MACCFG2_PAD_CRC 0x00000004
289#define MACCFG2_CRC_EN 0x00000002
290#define MACCFG2_PAD_AND_CRC_MODE_NONE 0x00000000 /* Neither
291 Padding
292 short frames
293 nor CRC */
294#define MACCFG2_PAD_AND_CRC_MODE_CRC_ONLY 0x00000002 /* Append CRC
295 only */
296#define MACCFG2_PAD_AND_CRC_MODE_PAD_AND_CRC 0x00000004
297#define MACCFG2_INTERFACE_MODE_NIBBLE 0x00000100 /* nibble mode
298 (MII/RMII/RGMII
299 10/100bps) */
300#define MACCFG2_INTERFACE_MODE_BYTE 0x00000200 /* byte mode
301 (GMII/TBI/RTB/RGMII
302 1000bps ) */
303#define MACCFG2_INTERFACE_MODE_MASK 0x00000300 /* mask
304 covering all
305 relevant
306 bits */
307
308/* UCC GETH IPGIFG (Inter-frame Gap / Inter-Frame Gap Register) */
309#define IPGIFG_NON_BACK_TO_BACK_IFG_PART1_SHIFT (31 - 7) /* Non
310 back-to-back
311 inter frame
312 gap part 1.
313 << shift */
314#define IPGIFG_NON_BACK_TO_BACK_IFG_PART2_SHIFT (31 - 15) /* Non
315 back-to-back
316 inter frame
317 gap part 2.
318 << shift */
319#define IPGIFG_MINIMUM_IFG_ENFORCEMENT_SHIFT (31 - 23) /* Mimimum IFG
320 Enforcement
321 << shift */
322#define IPGIFG_BACK_TO_BACK_IFG_SHIFT (31 - 31) /* back-to-back
323 inter frame
324 gap << shift
325 */
326#define IPGIFG_NON_BACK_TO_BACK_IFG_PART1_MAX 127 /* Non back-to-back
327 inter frame gap part
328 1. max val */
329#define IPGIFG_NON_BACK_TO_BACK_IFG_PART2_MAX 127 /* Non back-to-back
330 inter frame gap part
331 2. max val */
332#define IPGIFG_MINIMUM_IFG_ENFORCEMENT_MAX 255 /* Mimimum IFG
333 Enforcement max val */
334#define IPGIFG_BACK_TO_BACK_IFG_MAX 127 /* back-to-back inter
335 frame gap max val */
336#define IPGIFG_NBTB_CS_IPG_MASK 0x7F000000
337#define IPGIFG_NBTB_IPG_MASK 0x007F0000
338#define IPGIFG_MIN_IFG_MASK 0x0000FF00
339#define IPGIFG_BTB_IPG_MASK 0x0000007F
340
341/* UCC GETH HAFDUP (Half Duplex Register) */
342#define HALFDUP_ALT_BEB_TRUNCATION_SHIFT (31 - 11) /* Alternate
343 Binary
344 Exponential
345 Backoff
346 Truncation
347 << shift */
348#define HALFDUP_ALT_BEB_TRUNCATION_MAX 0xf /* Alternate Binary
349 Exponential Backoff
350 Truncation max val */
351#define HALFDUP_ALT_BEB 0x00080000 /* Alternate
352 Binary
353 Exponential
354 Backoff */
355#define HALFDUP_BACK_PRESSURE_NO_BACKOFF 0x00040000 /* Back
356 pressure no
357 backoff */
358#define HALFDUP_NO_BACKOFF 0x00020000 /* No Backoff */
359#define HALFDUP_EXCESSIVE_DEFER 0x00010000 /* Excessive
360 Defer */
361#define HALFDUP_MAX_RETRANSMISSION_SHIFT (31 - 19) /* Maximum
362 Retransmission
363 << shift */
364#define HALFDUP_MAX_RETRANSMISSION_MAX 0xf /* Maximum
365 Retransmission max
366 val */
367#define HALFDUP_COLLISION_WINDOW_SHIFT (31 - 31) /* Collision
368 Window <<
369 shift */
370#define HALFDUP_COLLISION_WINDOW_MAX 0x3f /* Collision Window max
371 val */
372#define HALFDUP_ALT_BEB_TR_MASK 0x00F00000
373#define HALFDUP_RETRANS_MASK 0x0000F000
374#define HALFDUP_COL_WINDOW_MASK 0x0000003F
375
376/* UCC GETH UCCS (Ethernet Status Register) */
377#define UCCS_BPR 0x02 /* Back pressure (in
378 half duplex mode) */
379#define UCCS_PAU 0x02 /* Pause state (in full
380 duplex mode) */
381#define UCCS_MPD 0x01 /* Magic Packet
382 Detected */
383
384/* UCC GETH MIIMCFG (MII Management Configuration Register) */
385#define MIIMCFG_RESET_MANAGEMENT 0x80000000 /* Reset
386 management */
387#define MIIMCFG_NO_PREAMBLE 0x00000010 /* Preamble
388 suppress */
389#define MIIMCFG_CLOCK_DIVIDE_SHIFT (31 - 31) /* clock divide
390 << shift */
391#define MIIMCFG_CLOCK_DIVIDE_MAX 0xf /* clock divide max val
392 */
393#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_2 0x00000000 /* divide by 2 */
394#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_4 0x00000001 /* divide by 4 */
395#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_6 0x00000002 /* divide by 6 */
396#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_8 0x00000003 /* divide by 8 */
397#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_10 0x00000004 /* divide by 10
398 */
399#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_14 0x00000005 /* divide by 14
400 */
401#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_16 0x00000008 /* divide by 16
402 */
403#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_20 0x00000006 /* divide by 20
404 */
405#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_28 0x00000007 /* divide by 28
406 */
407#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_32 0x00000009 /* divide by 32
408 */
409#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_48 0x0000000a /* divide by 48
410 */
411#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_64 0x0000000b /* divide by 64
412 */
413#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_80 0x0000000c /* divide by 80
414 */
415#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_112 0x0000000d /* divide by
416 112 */
417#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_160 0x0000000e /* divide by
418 160 */
419#define MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_224 0x0000000f /* divide by
420 224 */
421
422/* UCC GETH MIIMCOM (MII Management Command Register) */
423#define MIIMCOM_SCAN_CYCLE 0x00000002 /* Scan cycle */
424#define MIIMCOM_READ_CYCLE 0x00000001 /* Read cycle */
425
426/* UCC GETH MIIMADD (MII Management Address Register) */
427#define MIIMADD_PHY_ADDRESS_SHIFT (31 - 23) /* PHY Address
428 << shift */
429#define MIIMADD_PHY_REGISTER_SHIFT (31 - 31) /* PHY Register
430 << shift */
431
432/* UCC GETH MIIMCON (MII Management Control Register) */
433#define MIIMCON_PHY_CONTROL_SHIFT (31 - 31) /* PHY Control
434 << shift */
435#define MIIMCON_PHY_STATUS_SHIFT (31 - 31) /* PHY Status
436 << shift */
437
438/* UCC GETH MIIMIND (MII Management Indicator Register) */
439#define MIIMIND_NOT_VALID 0x00000004 /* Not valid */
440#define MIIMIND_SCAN 0x00000002 /* Scan in
441 progress */
442#define MIIMIND_BUSY 0x00000001
443
444/* UCC GETH IFSTAT (Interface Status Register) */
445#define IFSTAT_EXCESS_DEFER 0x00000200 /* Excessive
446 transmission
447 defer */
448
449/* UCC GETH MACSTNADDR1 (Station Address Part 1 Register) */
450#define MACSTNADDR1_OCTET_6_SHIFT (31 - 7) /* Station
451 address 6th
452 octet <<
453 shift */
454#define MACSTNADDR1_OCTET_5_SHIFT (31 - 15) /* Station
455 address 5th
456 octet <<
457 shift */
458#define MACSTNADDR1_OCTET_4_SHIFT (31 - 23) /* Station
459 address 4th
460 octet <<
461 shift */
462#define MACSTNADDR1_OCTET_3_SHIFT (31 - 31) /* Station
463 address 3rd
464 octet <<
465 shift */
466
467/* UCC GETH MACSTNADDR2 (Station Address Part 2 Register) */
468#define MACSTNADDR2_OCTET_2_SHIFT (31 - 7) /* Station
469 address 2nd
470 octet <<
471 shift */
472#define MACSTNADDR2_OCTET_1_SHIFT (31 - 15) /* Station
473 address 1st
474 octet <<
475 shift */
476
477/* UCC GETH UEMPR (Ethernet Mac Parameter Register) */
478#define UEMPR_PAUSE_TIME_VALUE_SHIFT (31 - 15) /* Pause time
479 value <<
480 shift */
481#define UEMPR_EXTENDED_PAUSE_TIME_VALUE_SHIFT (31 - 31) /* Extended
482 pause time
483 value <<
484 shift */
485
486/* UCC GETH UTBIPAR (Ten Bit Interface Physical Address Register) */
487#define UTBIPAR_PHY_ADDRESS_SHIFT (31 - 31) /* Phy address
488 << shift */
489#define UTBIPAR_PHY_ADDRESS_MASK 0x0000001f /* Phy address
490 mask */
491
492/* UCC GETH UESCR (Ethernet Statistics Control Register) */
493#define UESCR_AUTOZ 0x8000 /* Automatically zero
494 addressed
495 statistical counter
496 values */
497#define UESCR_CLRCNT 0x4000 /* Clear all statistics
498 counters */
499#define UESCR_MAXCOV_SHIFT (15 - 7) /* Max
500 Coalescing
501 Value <<
502 shift */
503#define UESCR_SCOV_SHIFT (15 - 15) /* Status
504 Coalescing
505 Value <<
506 shift */
507
508/* UCC GETH UDSR (Data Synchronization Register) */
509#define UDSR_MAGIC 0x067E
510
511typedef struct ucc_geth_thread_data_tx {
512 u8 res0[104];
513} __attribute__ ((packed)) ucc_geth_thread_data_tx_t;
514
515typedef struct ucc_geth_thread_data_rx {
516 u8 res0[40];
517} __attribute__ ((packed)) ucc_geth_thread_data_rx_t;
518
519/* Send Queue Queue-Descriptor */
520typedef struct ucc_geth_send_queue_qd {
521 u32 bd_ring_base; /* pointer to BD ring base address */
522 u8 res0[0x8];
523 u32 last_bd_completed_address;/* initialize to last entry in BD ring */
524 u8 res1[0x30];
525} __attribute__ ((packed)) ucc_geth_send_queue_qd_t;
526
527typedef struct ucc_geth_send_queue_mem_region {
528 ucc_geth_send_queue_qd_t sqqd[NUM_TX_QUEUES];
529} __attribute__ ((packed)) ucc_geth_send_queue_mem_region_t;
530
531typedef struct ucc_geth_thread_tx_pram {
532 u8 res0[64];
533} __attribute__ ((packed)) ucc_geth_thread_tx_pram_t;
534
535typedef struct ucc_geth_thread_rx_pram {
536 u8 res0[128];
537} __attribute__ ((packed)) ucc_geth_thread_rx_pram_t;
538
539#define THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING 64
540#define THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8 64
541#define THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16 96
542
543typedef struct ucc_geth_scheduler {
544 u16 cpucount0; /* CPU packet counter */
545 u16 cpucount1; /* CPU packet counter */
546 u16 cecount0; /* QE packet counter */
547 u16 cecount1; /* QE packet counter */
548 u16 cpucount2; /* CPU packet counter */
549 u16 cpucount3; /* CPU packet counter */
550 u16 cecount2; /* QE packet counter */
551 u16 cecount3; /* QE packet counter */
552 u16 cpucount4; /* CPU packet counter */
553 u16 cpucount5; /* CPU packet counter */
554 u16 cecount4; /* QE packet counter */
555 u16 cecount5; /* QE packet counter */
556 u16 cpucount6; /* CPU packet counter */
557 u16 cpucount7; /* CPU packet counter */
558 u16 cecount6; /* QE packet counter */
559 u16 cecount7; /* QE packet counter */
560 u32 weightstatus[NUM_TX_QUEUES]; /* accumulated weight factor */
561 u32 rtsrshadow; /* temporary variable handled by QE */
562 u32 time; /* temporary variable handled by QE */
563 u32 ttl; /* temporary variable handled by QE */
564 u32 mblinterval; /* max burst length interval */
565 u16 nortsrbytetime; /* normalized value of byte time in tsr units */
566 u8 fracsiz; /* radix 2 log value of denom. of
567 NorTSRByteTime */
568 u8 res0[1];
569 u8 strictpriorityq; /* Strict Priority Mask register */
570 u8 txasap; /* Transmit ASAP register */
571 u8 extrabw; /* Extra BandWidth register */
572 u8 oldwfqmask; /* temporary variable handled by QE */
573 u8 weightfactor[NUM_TX_QUEUES];
574 /**< weight factor for queues */
575 u32 minw; /* temporary variable handled by QE */
576 u8 res1[0x70 - 0x64];
577} __attribute__ ((packed)) ucc_geth_scheduler_t;
578
579typedef struct ucc_geth_tx_firmware_statistics_pram {
580 u32 sicoltx; /* single collision */
581 u32 mulcoltx; /* multiple collision */
582 u32 latecoltxfr; /* late collision */
583 u32 frabortduecol; /* frames aborted due to transmit collision */
584 u32 frlostinmactxer; /* frames lost due to internal MAC error
585 transmission that are not counted on any
586 other counter */
587 u32 carriersenseertx; /* carrier sense error */
588 u32 frtxok; /* frames transmitted OK */
589 u32 txfrexcessivedefer; /* frames with defferal time greater than
590 specified threshold */
591 u32 txpkts256; /* total packets (including bad) between 256
592 and 511 octets */
593 u32 txpkts512; /* total packets (including bad) between 512
594 and 1023 octets */
595 u32 txpkts1024; /* total packets (including bad) between 1024
596 and 1518 octets */
597 u32 txpktsjumbo; /* total packets (including bad) between 1024
598 and MAXLength octets */
599} __attribute__ ((packed)) ucc_geth_tx_firmware_statistics_pram_t;
600
601typedef struct ucc_geth_rx_firmware_statistics_pram {
602 u32 frrxfcser; /* frames with crc error */
603 u32 fraligner; /* frames with alignment error */
604 u32 inrangelenrxer; /* in range length error */
605 u32 outrangelenrxer; /* out of range length error */
606 u32 frtoolong; /* frame too long */
607 u32 runt; /* runt */
608 u32 verylongevent; /* very long event */
609 u32 symbolerror; /* symbol error */
610 u32 dropbsy; /* drop because of BD not ready */
611 u8 res0[0x8];
612 u32 mismatchdrop; /* drop because of MAC filtering (e.g. address
613 or type mismatch) */
614 u32 underpkts; /* total frames less than 64 octets */
615 u32 pkts256; /* total frames (including bad) between 256 and
616 511 octets */
617 u32 pkts512; /* total frames (including bad) between 512 and
618 1023 octets */
619 u32 pkts1024; /* total frames (including bad) between 1024
620 and 1518 octets */
621 u32 pktsjumbo; /* total frames (including bad) between 1024
622 and MAXLength octets */
623 u32 frlossinmacer; /* frames lost because of internal MAC error
624 that is not counted in any other counter */
625 u32 pausefr; /* pause frames */
626 u8 res1[0x4];
627 u32 removevlan; /* total frames that had their VLAN tag removed
628 */
629 u32 replacevlan; /* total frames that had their VLAN tag
630 replaced */
631 u32 insertvlan; /* total frames that had their VLAN tag
632 inserted */
633} __attribute__ ((packed)) ucc_geth_rx_firmware_statistics_pram_t;
634
635typedef struct ucc_geth_rx_interrupt_coalescing_entry {
636 u32 interruptcoalescingmaxvalue; /* interrupt coalescing max
637 value */
638 u32 interruptcoalescingcounter; /* interrupt coalescing counter,
639 initialize to
640 interruptcoalescingmaxvalue */
641} __attribute__ ((packed)) ucc_geth_rx_interrupt_coalescing_entry_t;
642
643typedef struct ucc_geth_rx_interrupt_coalescing_table {
644 ucc_geth_rx_interrupt_coalescing_entry_t coalescingentry[NUM_RX_QUEUES];
645 /**< interrupt coalescing entry */
646} __attribute__ ((packed)) ucc_geth_rx_interrupt_coalescing_table_t;
647
648typedef struct ucc_geth_rx_prefetched_bds {
649 qe_bd_t bd[NUM_BDS_IN_PREFETCHED_BDS]; /* prefetched bd */
650} __attribute__ ((packed)) ucc_geth_rx_prefetched_bds_t;
651
652typedef struct ucc_geth_rx_bd_queues_entry {
653 u32 bdbaseptr; /* BD base pointer */
654 u32 bdptr; /* BD pointer */
655 u32 externalbdbaseptr; /* external BD base pointer */
656 u32 externalbdptr; /* external BD pointer */
657} __attribute__ ((packed)) ucc_geth_rx_bd_queues_entry_t;
658
659typedef struct ucc_geth_tx_global_pram {
660 u16 temoder;
661 u8 res0[0x38 - 0x02];
662 u32 sqptr; /* a base pointer to send queue memory region */
663 u32 schedulerbasepointer; /* a base pointer to scheduler memory
664 region */
665 u32 txrmonbaseptr; /* base pointer to Tx RMON statistics counter */
666 u32 tstate; /* tx internal state. High byte contains
667 function code */
668 u8 iphoffset[TX_IP_OFFSET_ENTRY_MAX];
669 u32 vtagtable[0x8]; /* 8 4-byte VLAN tags */
670 u32 tqptr; /* a base pointer to the Tx Queues Memory
671 Region */
672 u8 res2[0x80 - 0x74];
673} __attribute__ ((packed)) ucc_geth_tx_global_pram_t;
674
675/* structure representing Extended Filtering Global Parameters in PRAM */
676typedef struct ucc_geth_exf_global_pram {
677 u32 l2pcdptr; /* individual address filter, high */
678 u8 res0[0x10 - 0x04];
679} __attribute__ ((packed)) ucc_geth_exf_global_pram_t;
680
681typedef struct ucc_geth_rx_global_pram {
682 u32 remoder; /* ethernet mode reg. */
683 u32 rqptr; /* base pointer to the Rx Queues Memory Region*/
684 u32 res0[0x1];
685 u8 res1[0x20 - 0xC];
686 u16 typeorlen; /* cutoff point less than which, type/len field
687 is considered length */
688 u8 res2[0x1];
689 u8 rxgstpack; /* acknowledgement on GRACEFUL STOP RX command*/
690 u32 rxrmonbaseptr; /* base pointer to Rx RMON statistics counter */
691 u8 res3[0x30 - 0x28];
692 u32 intcoalescingptr; /* Interrupt coalescing table pointer */
693 u8 res4[0x36 - 0x34];
694 u8 rstate; /* rx internal state. High byte contains
695 function code */
696 u8 res5[0x46 - 0x37];
697 u16 mrblr; /* max receive buffer length reg. */
698 u32 rbdqptr; /* base pointer to RxBD parameter table
699 description */
700 u16 mflr; /* max frame length reg. */
701 u16 minflr; /* min frame length reg. */
702 u16 maxd1; /* max dma1 length reg. */
703 u16 maxd2; /* max dma2 length reg. */
704 u32 ecamptr; /* external CAM address */
705 u32 l2qt; /* VLAN priority mapping table. */
706 u32 l3qt[0x8]; /* IP priority mapping table. */
707 u16 vlantype; /* vlan type */
708 u16 vlantci; /* default vlan tci */
709 u8 addressfiltering[64]; /* address filtering data structure */
710 u32 exfGlobalParam; /* base address for extended filtering global
711 parameters */
712 u8 res6[0x100 - 0xC4]; /* Initialize to zero */
713} __attribute__ ((packed)) ucc_geth_rx_global_pram_t;
714
715#define GRACEFUL_STOP_ACKNOWLEDGE_RX 0x01
716
717/* structure representing InitEnet command */
718typedef struct ucc_geth_init_pram {
719 u8 resinit1;
720 u8 resinit2;
721 u8 resinit3;
722 u8 resinit4;
723 u16 resinit5;
724 u8 res1[0x1];
725 u8 largestexternallookupkeysize;
726 u32 rgftgfrxglobal;
727 u32 rxthread[ENET_INIT_PARAM_MAX_ENTRIES_RX]; /* rx threads */
728 u8 res2[0x38 - 0x30];
729 u32 txglobal; /* tx global */
730 u32 txthread[ENET_INIT_PARAM_MAX_ENTRIES_TX]; /* tx threads */
731 u8 res3[0x1];
732} __attribute__ ((packed)) ucc_geth_init_pram_t;
733
734#define ENET_INIT_PARAM_RGF_SHIFT (32 - 4)
735#define ENET_INIT_PARAM_TGF_SHIFT (32 - 8)
736
737#define ENET_INIT_PARAM_RISC_MASK 0x0000003f
738#define ENET_INIT_PARAM_PTR_MASK 0x00ffffc0
739#define ENET_INIT_PARAM_SNUM_MASK 0xff000000
740#define ENET_INIT_PARAM_SNUM_SHIFT 24
741
742#define ENET_INIT_PARAM_MAGIC_RES_INIT1 0x06
743#define ENET_INIT_PARAM_MAGIC_RES_INIT2 0x30
744#define ENET_INIT_PARAM_MAGIC_RES_INIT3 0xff
745#define ENET_INIT_PARAM_MAGIC_RES_INIT4 0x00
746#define ENET_INIT_PARAM_MAGIC_RES_INIT5 0x0400
747
748/* structure representing 82xx Address Filtering Enet Address in PRAM */
749typedef struct ucc_geth_82xx_enet_address {
750 u8 res1[0x2];
751 u16 h; /* address (MSB) */
752 u16 m; /* address */
753 u16 l; /* address (LSB) */
754} __attribute__ ((packed)) ucc_geth_82xx_enet_address_t;
755
756/* structure representing 82xx Address Filtering PRAM */
757typedef struct ucc_geth_82xx_address_filtering_pram {
758 u32 iaddr_h; /* individual address filter, high */
759 u32 iaddr_l; /* individual address filter, low */
760 u32 gaddr_h; /* group address filter, high */
761 u32 gaddr_l; /* group address filter, low */
762 ucc_geth_82xx_enet_address_t taddr;
763 ucc_geth_82xx_enet_address_t paddr[NUM_OF_PADDRS];
764 u8 res0[0x40 - 0x38];
765} __attribute__ ((packed)) ucc_geth_82xx_address_filtering_pram_t;
766
767/* GETH Tx firmware statistics structure, used when calling
768 UCC_GETH_GetStatistics. */
769typedef struct ucc_geth_tx_firmware_statistics {
770 u32 sicoltx; /* single collision */
771 u32 mulcoltx; /* multiple collision */
772 u32 latecoltxfr; /* late collision */
773 u32 frabortduecol; /* frames aborted due to transmit collision */
774 u32 frlostinmactxer; /* frames lost due to internal MAC error
775 transmission that are not counted on any
776 other counter */
777 u32 carriersenseertx; /* carrier sense error */
778 u32 frtxok; /* frames transmitted OK */
779 u32 txfrexcessivedefer; /* frames with defferal time greater than
780 specified threshold */
781 u32 txpkts256; /* total packets (including bad) between 256
782 and 511 octets */
783 u32 txpkts512; /* total packets (including bad) between 512
784 and 1023 octets */
785 u32 txpkts1024; /* total packets (including bad) between 1024
786 and 1518 octets */
787 u32 txpktsjumbo; /* total packets (including bad) between 1024
788 and MAXLength octets */
789} __attribute__ ((packed)) ucc_geth_tx_firmware_statistics_t;
790
791/* GETH Rx firmware statistics structure, used when calling
792 UCC_GETH_GetStatistics. */
793typedef struct ucc_geth_rx_firmware_statistics {
794 u32 frrxfcser; /* frames with crc error */
795 u32 fraligner; /* frames with alignment error */
796 u32 inrangelenrxer; /* in range length error */
797 u32 outrangelenrxer; /* out of range length error */
798 u32 frtoolong; /* frame too long */
799 u32 runt; /* runt */
800 u32 verylongevent; /* very long event */
801 u32 symbolerror; /* symbol error */
802 u32 dropbsy; /* drop because of BD not ready */
803 u8 res0[0x8];
804 u32 mismatchdrop; /* drop because of MAC filtering (e.g. address
805 or type mismatch) */
806 u32 underpkts; /* total frames less than 64 octets */
807 u32 pkts256; /* total frames (including bad) between 256 and
808 511 octets */
809 u32 pkts512; /* total frames (including bad) between 512 and
810 1023 octets */
811 u32 pkts1024; /* total frames (including bad) between 1024
812 and 1518 octets */
813 u32 pktsjumbo; /* total frames (including bad) between 1024
814 and MAXLength octets */
815 u32 frlossinmacer; /* frames lost because of internal MAC error
816 that is not counted in any other counter */
817 u32 pausefr; /* pause frames */
818 u8 res1[0x4];
819 u32 removevlan; /* total frames that had their VLAN tag removed
820 */
821 u32 replacevlan; /* total frames that had their VLAN tag
822 replaced */
823 u32 insertvlan; /* total frames that had their VLAN tag
824 inserted */
825} __attribute__ ((packed)) ucc_geth_rx_firmware_statistics_t;
826
827/* GETH hardware statistics structure, used when calling
828 UCC_GETH_GetStatistics. */
829typedef struct ucc_geth_hardware_statistics {
830 u32 tx64; /* Total number of frames (including bad
831 frames) transmitted that were exactly of the
832 minimal length (64 for un tagged, 68 for
833 tagged, or with length exactly equal to the
834 parameter MINLength */
835 u32 tx127; /* Total number of frames (including bad
836 frames) transmitted that were between
837 MINLength (Including FCS length==4) and 127
838 octets */
839 u32 tx255; /* Total number of frames (including bad
840 frames) transmitted that were between 128
841 (Including FCS length==4) and 255 octets */
842 u32 rx64; /* Total number of frames received including
843 bad frames that were exactly of the mninimal
844 length (64 bytes) */
845 u32 rx127; /* Total number of frames (including bad
846 frames) received that were between MINLength
847 (Including FCS length==4) and 127 octets */
848 u32 rx255; /* Total number of frames (including bad
849 frames) received that were between 128
850 (Including FCS length==4) and 255 octets */
851 u32 txok; /* Total number of octets residing in frames
852 that where involved in succesfull
853 transmission */
854 u16 txcf; /* Total number of PAUSE control frames
855 transmitted by this MAC */
856 u32 tmca; /* Total number of frames that were transmitted
857 succesfully with the group address bit set
858 that are not broadcast frames */
859 u32 tbca; /* Total number of frames transmitted
860 succesfully that had destination address
861 field equal to the broadcast address */
862 u32 rxfok; /* Total number of frames received OK */
863 u32 rxbok; /* Total number of octets received OK */
864 u32 rbyt; /* Total number of octets received including
865 octets in bad frames. Must be implemented in
866 HW because it includes octets in frames that
867 never even reach the UCC */
868 u32 rmca; /* Total number of frames that were received
869 succesfully with the group address bit set
870 that are not broadcast frames */
871 u32 rbca; /* Total number of frames received succesfully
872 that had destination address equal to the
873 broadcast address */
874} __attribute__ ((packed)) ucc_geth_hardware_statistics_t;
875
876/* UCC GETH Tx errors returned via TxConf callback */
877#define TX_ERRORS_DEF 0x0200
878#define TX_ERRORS_EXDEF 0x0100
879#define TX_ERRORS_LC 0x0080
880#define TX_ERRORS_RL 0x0040
881#define TX_ERRORS_RC_MASK 0x003C
882#define TX_ERRORS_RC_SHIFT 2
883#define TX_ERRORS_UN 0x0002
884#define TX_ERRORS_CSL 0x0001
885
886/* UCC GETH Rx errors returned via RxStore callback */
887#define RX_ERRORS_CMR 0x0200
888#define RX_ERRORS_M 0x0100
889#define RX_ERRORS_BC 0x0080
890#define RX_ERRORS_MC 0x0040
891
892/* Transmit BD. These are in addition to values defined in uccf. */
893#define T_VID 0x003c0000 /* insert VLAN id index mask. */
894#define T_DEF (((u32) TX_ERRORS_DEF ) << 16)
895#define T_EXDEF (((u32) TX_ERRORS_EXDEF ) << 16)
896#define T_LC (((u32) TX_ERRORS_LC ) << 16)
897#define T_RL (((u32) TX_ERRORS_RL ) << 16)
898#define T_RC_MASK (((u32) TX_ERRORS_RC_MASK ) << 16)
899#define T_UN (((u32) TX_ERRORS_UN ) << 16)
900#define T_CSL (((u32) TX_ERRORS_CSL ) << 16)
901#define T_ERRORS_REPORT (T_DEF | T_EXDEF | T_LC | T_RL | T_RC_MASK \
902 | T_UN | T_CSL) /* transmit errors to report */
903
904/* Receive BD. These are in addition to values defined in uccf. */
905#define R_LG 0x00200000 /* Frame length violation. */
906#define R_NO 0x00100000 /* Non-octet aligned frame. */
907#define R_SH 0x00080000 /* Short frame. */
908#define R_CR 0x00040000 /* CRC error. */
909#define R_OV 0x00020000 /* Overrun. */
910#define R_IPCH 0x00010000 /* IP checksum check failed. */
911#define R_CMR (((u32) RX_ERRORS_CMR ) << 16)
912#define R_M (((u32) RX_ERRORS_M ) << 16)
913#define R_BC (((u32) RX_ERRORS_BC ) << 16)
914#define R_MC (((u32) RX_ERRORS_MC ) << 16)
915#define R_ERRORS_REPORT (R_CMR | R_M | R_BC | R_MC) /* receive errors to
916 report */
917#define R_ERRORS_FATAL (R_LG | R_NO | R_SH | R_CR | \
918 R_OV | R_IPCH) /* receive errors to discard */
919
920/* Alignments */
921#define UCC_GETH_RX_GLOBAL_PRAM_ALIGNMENT 256
922#define UCC_GETH_TX_GLOBAL_PRAM_ALIGNMENT 128
923#define UCC_GETH_THREAD_RX_PRAM_ALIGNMENT 128
924#define UCC_GETH_THREAD_TX_PRAM_ALIGNMENT 64
925#define UCC_GETH_THREAD_DATA_ALIGNMENT 256 /* spec gives values
926 based on num of
927 threads, but always
928 using the maximum is
929 easier */
930#define UCC_GETH_SEND_QUEUE_QUEUE_DESCRIPTOR_ALIGNMENT 32
931#define UCC_GETH_SCHEDULER_ALIGNMENT 4 /* This is a guess */
932#define UCC_GETH_TX_STATISTICS_ALIGNMENT 4 /* This is a guess */
933#define UCC_GETH_RX_STATISTICS_ALIGNMENT 4 /* This is a guess */
934#define UCC_GETH_RX_INTERRUPT_COALESCING_ALIGNMENT 4 /* This is a
935 guess */
936#define UCC_GETH_RX_BD_QUEUES_ALIGNMENT 8 /* This is a guess */
937#define UCC_GETH_RX_PREFETCHED_BDS_ALIGNMENT 128 /* This is a guess */
938#define UCC_GETH_RX_EXTENDED_FILTERING_GLOBAL_PARAMETERS_ALIGNMENT 4 /* This
939 is a
940 guess
941 */
942#define UCC_GETH_RX_BD_RING_ALIGNMENT 32
943#define UCC_GETH_TX_BD_RING_ALIGNMENT 32
944#define UCC_GETH_MRBLR_ALIGNMENT 128
945#define UCC_GETH_RX_BD_RING_SIZE_ALIGNMENT 4
946#define UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT 32
947#define UCC_GETH_RX_DATA_BUF_ALIGNMENT 64
948
949#define UCC_GETH_TAD_EF 0x80
950#define UCC_GETH_TAD_V 0x40
951#define UCC_GETH_TAD_REJ 0x20
952#define UCC_GETH_TAD_VTAG_OP_RIGHT_SHIFT 2
953#define UCC_GETH_TAD_VTAG_OP_SHIFT 6
954#define UCC_GETH_TAD_V_NON_VTAG_OP 0x20
955#define UCC_GETH_TAD_RQOS_SHIFT 0
956#define UCC_GETH_TAD_V_PRIORITY_SHIFT 5
957#define UCC_GETH_TAD_CFI 0x10
958
959#define UCC_GETH_VLAN_PRIORITY_MAX 8
960#define UCC_GETH_IP_PRIORITY_MAX 64
961#define UCC_GETH_TX_VTAG_TABLE_ENTRY_MAX 8
962#define UCC_GETH_RX_BD_RING_SIZE_MIN 8
963#define UCC_GETH_TX_BD_RING_SIZE_MIN 2
964
965#define UCC_GETH_SIZE_OF_BD QE_SIZEOF_BD
966
967/* Driver definitions */
968#define TX_BD_RING_LEN 0x10
969#define RX_BD_RING_LEN 0x10
970#define UCC_GETH_DEV_WEIGHT TX_BD_RING_LEN
971
972#define TX_RING_MOD_MASK(size) (size-1)
973#define RX_RING_MOD_MASK(size) (size-1)
974
975#define ENET_NUM_OCTETS_PER_ADDRESS 6
976#define ENET_GROUP_ADDR 0x01 /* Group address mask
977 for ethernet
978 addresses */
979
980#define TX_TIMEOUT (1*HZ)
981#define SKB_ALLOC_TIMEOUT 100000
982#define PHY_INIT_TIMEOUT 100000
983#define PHY_CHANGE_TIME 2
984
985/* Fast Ethernet (10/100 Mbps) */
986#define UCC_GETH_URFS_INIT 512 /* Rx virtual FIFO size
987 */
988#define UCC_GETH_URFET_INIT 256 /* 1/2 urfs */
989#define UCC_GETH_URFSET_INIT 384 /* 3/4 urfs */
990#define UCC_GETH_UTFS_INIT 512 /* Tx virtual FIFO size
991 */
992#define UCC_GETH_UTFET_INIT 256 /* 1/2 utfs */
993#define UCC_GETH_UTFTT_INIT 128
994/* Gigabit Ethernet (1000 Mbps) */
995#define UCC_GETH_URFS_GIGA_INIT 4096/*2048*/ /* Rx virtual
996 FIFO size */
997#define UCC_GETH_URFET_GIGA_INIT 2048/*1024*/ /* 1/2 urfs */
998#define UCC_GETH_URFSET_GIGA_INIT 3072/*1536*/ /* 3/4 urfs */
999#define UCC_GETH_UTFS_GIGA_INIT 8192/*2048*/ /* Tx virtual
1000 FIFO size */
1001#define UCC_GETH_UTFET_GIGA_INIT 4096/*1024*/ /* 1/2 utfs */
1002#define UCC_GETH_UTFTT_GIGA_INIT 0x400/*0x40*/ /* */
1003
1004#define UCC_GETH_REMODER_INIT 0 /* bits that must be
1005 set */
1006#define UCC_GETH_TEMODER_INIT 0xC000 /* bits that must */
1007#define UCC_GETH_UPSMR_INIT (UPSMR_RES1) /* Start value
1008 for this
1009 register */
1010#define UCC_GETH_MACCFG1_INIT 0
1011#define UCC_GETH_MACCFG2_INIT (MACCFG2_RESERVED_1)
1012#define UCC_GETH_MIIMCFG_MNGMNT_CLC_DIV_INIT \
1013 (MIIMCFG_MANAGEMENT_CLOCK_DIVIDE_BY_112)
1014
1015/* Ethernet speed */
1016typedef enum enet_speed {
1017 ENET_SPEED_10BT, /* 10 Base T */
1018 ENET_SPEED_100BT, /* 100 Base T */
1019 ENET_SPEED_1000BT /* 1000 Base T */
1020} enet_speed_e;
1021
1022/* Ethernet Address Type. */
1023typedef enum enet_addr_type {
1024 ENET_ADDR_TYPE_INDIVIDUAL,
1025 ENET_ADDR_TYPE_GROUP,
1026 ENET_ADDR_TYPE_BROADCAST
1027} enet_addr_type_e;
1028
1029/* TBI / MII Set Register */
1030typedef enum enet_tbi_mii_reg {
1031 ENET_TBI_MII_CR = 0x00, /* Control (CR ) */
1032 ENET_TBI_MII_SR = 0x01, /* Status (SR ) */
1033 ENET_TBI_MII_ANA = 0x04, /* AN advertisement (ANA ) */
1034 ENET_TBI_MII_ANLPBPA = 0x05, /* AN link partner base page ability
1035 (ANLPBPA) */
1036 ENET_TBI_MII_ANEX = 0x06, /* AN expansion (ANEX ) */
1037 ENET_TBI_MII_ANNPT = 0x07, /* AN next page transmit (ANNPT ) */
1038 ENET_TBI_MII_ANLPANP = 0x08, /* AN link partner ability next page
1039 (ANLPANP) */
1040 ENET_TBI_MII_EXST = 0x0F, /* Extended status (EXST ) */
1041 ENET_TBI_MII_JD = 0x10, /* Jitter diagnostics (JD ) */
1042 ENET_TBI_MII_TBICON = 0x11 /* TBI control (TBICON ) */
1043} enet_tbi_mii_reg_e;
1044
1045/* UCC GETH 82xx Ethernet Address Recognition Location */
1046typedef enum ucc_geth_enet_address_recognition_location {
1047 UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_STATION_ADDRESS,/* station
1048 address */
1049 UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_PADDR_FIRST, /* additional
1050 station
1051 address
1052 paddr1 */
1053 UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_PADDR2, /* additional
1054 station
1055 address
1056 paddr2 */
1057 UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_PADDR3, /* additional
1058 station
1059 address
1060 paddr3 */
1061 UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_PADDR_LAST, /* additional
1062 station
1063 address
1064 paddr4 */
1065 UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_GROUP_HASH, /* group hash */
1066 UCC_GETH_ENET_ADDRESS_RECOGNITION_LOCATION_INDIVIDUAL_HASH /* individual
1067 hash */
1068} ucc_geth_enet_address_recognition_location_e;
1069
1070/* UCC GETH vlan operation tagged */
1071typedef enum ucc_geth_vlan_operation_tagged {
1072 UCC_GETH_VLAN_OPERATION_TAGGED_NOP = 0x0, /* Tagged - nop */
1073 UCC_GETH_VLAN_OPERATION_TAGGED_REPLACE_VID_PORTION_OF_Q_TAG
1074 = 0x1, /* Tagged - replace vid portion of q tag */
1075 UCC_GETH_VLAN_OPERATION_TAGGED_IF_VID0_REPLACE_VID_WITH_DEFAULT_VALUE
1076 = 0x2, /* Tagged - if vid0 replace vid with default value */
1077 UCC_GETH_VLAN_OPERATION_TAGGED_EXTRACT_Q_TAG_FROM_FRAME
1078 = 0x3 /* Tagged - extract q tag from frame */
1079} ucc_geth_vlan_operation_tagged_e;
1080
1081/* UCC GETH vlan operation non-tagged */
1082typedef enum ucc_geth_vlan_operation_non_tagged {
1083 UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP = 0x0, /* Non tagged - nop */
1084 UCC_GETH_VLAN_OPERATION_NON_TAGGED_Q_TAG_INSERT = 0x1 /* Non tagged -
1085 q tag insert
1086 */
1087} ucc_geth_vlan_operation_non_tagged_e;
1088
1089/* UCC GETH Rx Quality of Service Mode */
1090typedef enum ucc_geth_qos_mode {
1091 UCC_GETH_QOS_MODE_DEFAULT = 0x0, /* default queue */
1092 UCC_GETH_QOS_MODE_QUEUE_NUM_FROM_L2_CRITERIA = 0x1, /* queue
1093 determined
1094 by L2
1095 criteria */
1096 UCC_GETH_QOS_MODE_QUEUE_NUM_FROM_L3_CRITERIA = 0x2 /* queue
1097 determined
1098 by L3
1099 criteria */
1100} ucc_geth_qos_mode_e;
1101
1102/* UCC GETH Statistics Gathering Mode - These are bit flags, 'or' them together
1103 for combined functionality */
1104typedef enum ucc_geth_statistics_gathering_mode {
1105 UCC_GETH_STATISTICS_GATHERING_MODE_NONE = 0x00000000, /* No
1106 statistics
1107 gathering */
1108 UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE = 0x00000001,/* Enable
1109 hardware
1110 statistics
1111 gathering
1112 */
1113 UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX = 0x00000004,/*Enable
1114 firmware
1115 tx
1116 statistics
1117 gathering
1118 */
1119 UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX = 0x00000008/* Enable
1120 firmware
1121 rx
1122 statistics
1123 gathering
1124 */
1125} ucc_geth_statistics_gathering_mode_e;
1126
1127/* UCC GETH Pad and CRC Mode - Note, Padding without CRC is not possible */
1128typedef enum ucc_geth_maccfg2_pad_and_crc_mode {
1129 UCC_GETH_PAD_AND_CRC_MODE_NONE
1130 = MACCFG2_PAD_AND_CRC_MODE_NONE, /* Neither Padding
1131 short frames
1132 nor CRC */
1133 UCC_GETH_PAD_AND_CRC_MODE_CRC_ONLY
1134 = MACCFG2_PAD_AND_CRC_MODE_CRC_ONLY, /* Append
1135 CRC only */
1136 UCC_GETH_PAD_AND_CRC_MODE_PAD_AND_CRC =
1137 MACCFG2_PAD_AND_CRC_MODE_PAD_AND_CRC
1138} ucc_geth_maccfg2_pad_and_crc_mode_e;
1139
1140/* UCC GETH upsmr Flow Control Mode */
1141typedef enum ucc_geth_flow_control_mode {
1142 UPSMR_AUTOMATIC_FLOW_CONTROL_MODE_NONE = 0x00000000, /* No automatic
1143 flow control
1144 */
1145 UPSMR_AUTOMATIC_FLOW_CONTROL_MODE_PAUSE_WHEN_EMERGENCY
1146 = 0x00004000 /* Send pause frame when RxFIFO reaches its
1147 emergency threshold */
1148} ucc_geth_flow_control_mode_e;
1149
1150/* UCC GETH number of threads */
1151typedef enum ucc_geth_num_of_threads {
1152 UCC_GETH_NUM_OF_THREADS_1 = 0x1, /* 1 */
1153 UCC_GETH_NUM_OF_THREADS_2 = 0x2, /* 2 */
1154 UCC_GETH_NUM_OF_THREADS_4 = 0x0, /* 4 */
1155 UCC_GETH_NUM_OF_THREADS_6 = 0x3, /* 6 */
1156 UCC_GETH_NUM_OF_THREADS_8 = 0x4 /* 8 */
1157} ucc_geth_num_of_threads_e;
1158
1159/* UCC GETH number of station addresses */
1160typedef enum ucc_geth_num_of_station_addresses {
1161 UCC_GETH_NUM_OF_STATION_ADDRESSES_1, /* 1 */
1162 UCC_GETH_NUM_OF_STATION_ADDRESSES_5 /* 5 */
1163} ucc_geth_num_of_station_addresses_e;
1164
1165typedef u8 enet_addr_t[ENET_NUM_OCTETS_PER_ADDRESS];
1166
1167/* UCC GETH 82xx Ethernet Address Container */
1168typedef struct enet_addr_container {
1169 enet_addr_t address; /* ethernet address */
1170 ucc_geth_enet_address_recognition_location_e location; /* location in
1171 82xx address
1172 recognition
1173 hardware */
1174 struct list_head node;
1175} enet_addr_container_t;
1176
1177#define ENET_ADDR_CONT_ENTRY(ptr) list_entry(ptr, enet_addr_container_t, node)
1178
1179/* UCC GETH Termination Action Descriptor (TAD) structure. */
1180typedef struct ucc_geth_tad_params {
1181 int rx_non_dynamic_extended_features_mode;
1182 int reject_frame;
1183 ucc_geth_vlan_operation_tagged_e vtag_op;
1184 ucc_geth_vlan_operation_non_tagged_e vnontag_op;
1185 ucc_geth_qos_mode_e rqos;
1186 u8 vpri;
1187 u16 vid;
1188} ucc_geth_tad_params_t;
1189
1190/* GETH protocol initialization structure */
1191typedef struct ucc_geth_info {
1192 ucc_fast_info_t uf_info;
1193 u8 numQueuesTx;
1194 u8 numQueuesRx;
1195 int ipCheckSumCheck;
1196 int ipCheckSumGenerate;
1197 int rxExtendedFiltering;
1198 u32 extendedFilteringChainPointer;
1199 u16 typeorlen;
1200 int dynamicMaxFrameLength;
1201 int dynamicMinFrameLength;
1202 u8 nonBackToBackIfgPart1;
1203 u8 nonBackToBackIfgPart2;
1204 u8 miminumInterFrameGapEnforcement;
1205 u8 backToBackInterFrameGap;
1206 int ipAddressAlignment;
1207 int lengthCheckRx;
1208 u32 mblinterval;
1209 u16 nortsrbytetime;
1210 u8 fracsiz;
1211 u8 strictpriorityq;
1212 u8 txasap;
1213 u8 extrabw;
1214 int miiPreambleSupress;
1215 u8 altBebTruncation;
1216 int altBeb;
1217 int backPressureNoBackoff;
1218 int noBackoff;
1219 int excessDefer;
1220 u8 maxRetransmission;
1221 u8 collisionWindow;
1222 int pro;
1223 int cap;
1224 int rsh;
1225 int rlpb;
1226 int cam;
1227 int bro;
1228 int ecm;
1229 int receiveFlowControl;
1230 u8 maxGroupAddrInHash;
1231 u8 maxIndAddrInHash;
1232 u8 prel;
1233 u16 maxFrameLength;
1234 u16 minFrameLength;
1235 u16 maxD1Length;
1236 u16 maxD2Length;
1237 u16 vlantype;
1238 u16 vlantci;
1239 u32 ecamptr;
1240 u32 eventRegMask;
1241 u16 pausePeriod;
1242 u16 extensionField;
1243 u8 phy_address;
1244 u32 board_flags;
1245 u32 phy_interrupt;
1246 u8 weightfactor[NUM_TX_QUEUES];
1247 u8 interruptcoalescingmaxvalue[NUM_RX_QUEUES];
1248 u8 l2qt[UCC_GETH_VLAN_PRIORITY_MAX];
1249 u8 l3qt[UCC_GETH_IP_PRIORITY_MAX];
1250 u32 vtagtable[UCC_GETH_TX_VTAG_TABLE_ENTRY_MAX];
1251 u8 iphoffset[TX_IP_OFFSET_ENTRY_MAX];
1252 u16 bdRingLenTx[NUM_TX_QUEUES];
1253 u16 bdRingLenRx[NUM_RX_QUEUES];
1254 enet_interface_e enet_interface;
1255 ucc_geth_num_of_station_addresses_e numStationAddresses;
1256 qe_fltr_largest_external_tbl_lookup_key_size_e
1257 largestexternallookupkeysize;
1258 ucc_geth_statistics_gathering_mode_e statisticsMode;
1259 ucc_geth_vlan_operation_tagged_e vlanOperationTagged;
1260 ucc_geth_vlan_operation_non_tagged_e vlanOperationNonTagged;
1261 ucc_geth_qos_mode_e rxQoSMode;
1262 ucc_geth_flow_control_mode_e aufc;
1263 ucc_geth_maccfg2_pad_and_crc_mode_e padAndCrc;
1264 ucc_geth_num_of_threads_e numThreadsTx;
1265 ucc_geth_num_of_threads_e numThreadsRx;
1266 qe_risc_allocation_e riscTx;
1267 qe_risc_allocation_e riscRx;
1268} ucc_geth_info_t;
1269
1270/* structure representing UCC GETH */
1271typedef struct ucc_geth_private {
1272 ucc_geth_info_t *ug_info;
1273 ucc_fast_private_t *uccf;
1274 struct net_device *dev;
1275 struct net_device_stats stats; /* linux network statistics */
1276 ucc_geth_t *ug_regs;
1277 ucc_geth_init_pram_t *p_init_enet_param_shadow;
1278 ucc_geth_exf_global_pram_t *p_exf_glbl_param;
1279 u32 exf_glbl_param_offset;
1280 ucc_geth_rx_global_pram_t *p_rx_glbl_pram;
1281 u32 rx_glbl_pram_offset;
1282 ucc_geth_tx_global_pram_t *p_tx_glbl_pram;
1283 u32 tx_glbl_pram_offset;
1284 ucc_geth_send_queue_mem_region_t *p_send_q_mem_reg;
1285 u32 send_q_mem_reg_offset;
1286 ucc_geth_thread_data_tx_t *p_thread_data_tx;
1287 u32 thread_dat_tx_offset;
1288 ucc_geth_thread_data_rx_t *p_thread_data_rx;
1289 u32 thread_dat_rx_offset;
1290 ucc_geth_scheduler_t *p_scheduler;
1291 u32 scheduler_offset;
1292 ucc_geth_tx_firmware_statistics_pram_t *p_tx_fw_statistics_pram;
1293 u32 tx_fw_statistics_pram_offset;
1294 ucc_geth_rx_firmware_statistics_pram_t *p_rx_fw_statistics_pram;
1295 u32 rx_fw_statistics_pram_offset;
1296 ucc_geth_rx_interrupt_coalescing_table_t *p_rx_irq_coalescing_tbl;
1297 u32 rx_irq_coalescing_tbl_offset;
1298 ucc_geth_rx_bd_queues_entry_t *p_rx_bd_qs_tbl;
1299 u32 rx_bd_qs_tbl_offset;
1300 u8 *p_tx_bd_ring[NUM_TX_QUEUES];
1301 u32 tx_bd_ring_offset[NUM_TX_QUEUES];
1302 u8 *p_rx_bd_ring[NUM_RX_QUEUES];
1303 u32 rx_bd_ring_offset[NUM_RX_QUEUES];
1304 u8 *confBd[NUM_TX_QUEUES];
1305 u8 *txBd[NUM_TX_QUEUES];
1306 u8 *rxBd[NUM_RX_QUEUES];
1307 int badFrame[NUM_RX_QUEUES];
1308 u16 cpucount[NUM_TX_QUEUES];
1309 volatile u16 *p_cpucount[NUM_TX_QUEUES];
1310 int indAddrRegUsed[NUM_OF_PADDRS];
1311 enet_addr_t paddr[NUM_OF_PADDRS];
1312 u8 numGroupAddrInHash;
1313 u8 numIndAddrInHash;
1314 u8 numIndAddrInReg;
1315 int rx_extended_features;
1316 int rx_non_dynamic_extended_features;
1317 struct list_head conf_skbs;
1318 struct list_head group_hash_q;
1319 struct list_head ind_hash_q;
1320 u32 saved_uccm;
1321 spinlock_t lock;
1322 /* pointers to arrays of skbuffs for tx and rx */
1323 struct sk_buff **tx_skbuff[NUM_TX_QUEUES];
1324 struct sk_buff **rx_skbuff[NUM_RX_QUEUES];
1325 /* indices pointing to the next free sbk in skb arrays */
1326 u16 skb_curtx[NUM_TX_QUEUES];
1327 u16 skb_currx[NUM_RX_QUEUES];
1328 /* index of the first skb which hasn't been transmitted yet. */
1329 u16 skb_dirtytx[NUM_TX_QUEUES];
1330
1331 struct work_struct tq;
1332 struct timer_list phy_info_timer;
1333 struct ugeth_mii_info *mii_info;
1334 int oldspeed;
1335 int oldduplex;
1336 int oldlink;
1337} ucc_geth_private_t;
1338
1339#endif /* __UCC_GETH_H__ */
diff --git a/drivers/net/ucc_geth_phy.c b/drivers/net/ucc_geth_phy.c
new file mode 100644
index 000000000000..f91028c5386d
--- /dev/null
+++ b/drivers/net/ucc_geth_phy.c
@@ -0,0 +1,801 @@
1/*
2 * Copyright (C) Freescale Semicondutor, Inc. 2006. All rights reserved.
3 *
4 * Author: Shlomi Gridish <gridish@freescale.com>
5 *
6 * Description:
7 * UCC GETH Driver -- PHY handling
8 *
9 * Changelog:
10 * Jun 28, 2006 Li Yang <LeoLi@freescale.com>
11 * - Rearrange code and style fixes
12 *
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of the GNU General Public License as published by the
15 * Free Software Foundation; either version 2 of the License, or (at your
16 * option) any later version.
17 *
18 */
19
20#include <linux/config.h>
21#include <linux/kernel.h>
22#include <linux/sched.h>
23#include <linux/string.h>
24#include <linux/errno.h>
25#include <linux/slab.h>
26#include <linux/interrupt.h>
27#include <linux/init.h>
28#include <linux/delay.h>
29#include <linux/netdevice.h>
30#include <linux/etherdevice.h>
31#include <linux/skbuff.h>
32#include <linux/spinlock.h>
33#include <linux/mm.h>
34#include <linux/module.h>
35#include <linux/version.h>
36#include <linux/crc32.h>
37#include <linux/mii.h>
38#include <linux/ethtool.h>
39
40#include <asm/io.h>
41#include <asm/irq.h>
42#include <asm/uaccess.h>
43
44#include "ucc_geth.h"
45#include "ucc_geth_phy.h"
46#include <platforms/83xx/mpc8360e_pb.h>
47
48#define ugphy_printk(level, format, arg...) \
49 printk(level format "\n", ## arg)
50
51#define ugphy_dbg(format, arg...) \
52 ugphy_printk(KERN_DEBUG, format , ## arg)
53#define ugphy_err(format, arg...) \
54 ugphy_printk(KERN_ERR, format , ## arg)
55#define ugphy_info(format, arg...) \
56 ugphy_printk(KERN_INFO, format , ## arg)
57#define ugphy_warn(format, arg...) \
58 ugphy_printk(KERN_WARNING, format , ## arg)
59
60#ifdef UGETH_VERBOSE_DEBUG
61#define ugphy_vdbg ugphy_dbg
62#else
63#define ugphy_vdbg(fmt, args...) do { } while (0)
64#endif /* UGETH_VERBOSE_DEBUG */
65
66static void config_genmii_advert(struct ugeth_mii_info *mii_info);
67static void genmii_setup_forced(struct ugeth_mii_info *mii_info);
68static void genmii_restart_aneg(struct ugeth_mii_info *mii_info);
69static int gbit_config_aneg(struct ugeth_mii_info *mii_info);
70static int genmii_config_aneg(struct ugeth_mii_info *mii_info);
71static int genmii_update_link(struct ugeth_mii_info *mii_info);
72static int genmii_read_status(struct ugeth_mii_info *mii_info);
73u16 phy_read(struct ugeth_mii_info *mii_info, u16 regnum);
74void phy_write(struct ugeth_mii_info *mii_info, u16 regnum, u16 val);
75
76static u8 *bcsr_regs = NULL;
77
78/* Write value to the PHY for this device to the register at regnum, */
79/* waiting until the write is done before it returns. All PHY */
80/* configuration has to be done through the TSEC1 MIIM regs */
81void write_phy_reg(struct net_device *dev, int mii_id, int regnum, int value)
82{
83 ucc_geth_private_t *ugeth = netdev_priv(dev);
84 ucc_mii_mng_t *mii_regs;
85 enet_tbi_mii_reg_e mii_reg = (enet_tbi_mii_reg_e) regnum;
86 u32 tmp_reg;
87
88 ugphy_vdbg("%s: IN", __FUNCTION__);
89
90 spin_lock_irq(&ugeth->lock);
91
92 mii_regs = ugeth->mii_info->mii_regs;
93
94 /* Set this UCC to be the master of the MII managment */
95 ucc_set_qe_mux_mii_mng(ugeth->ug_info->uf_info.ucc_num);
96
97 /* Stop the MII management read cycle */
98 out_be32(&mii_regs->miimcom, 0);
99 /* Setting up the MII Mangement Address Register */
100 tmp_reg = ((u32) mii_id << MIIMADD_PHY_ADDRESS_SHIFT) | mii_reg;
101 out_be32(&mii_regs->miimadd, tmp_reg);
102
103 /* Setting up the MII Mangement Control Register with the value */
104 out_be32(&mii_regs->miimcon, (u32) value);
105
106 /* Wait till MII management write is complete */
107 while ((in_be32(&mii_regs->miimind)) & MIIMIND_BUSY)
108 cpu_relax();
109
110 spin_unlock_irq(&ugeth->lock);
111
112 udelay(10000);
113}
114
115/* Reads from register regnum in the PHY for device dev, */
116/* returning the value. Clears miimcom first. All PHY */
117/* configuration has to be done through the TSEC1 MIIM regs */
118int read_phy_reg(struct net_device *dev, int mii_id, int regnum)
119{
120 ucc_geth_private_t *ugeth = netdev_priv(dev);
121 ucc_mii_mng_t *mii_regs;
122 enet_tbi_mii_reg_e mii_reg = (enet_tbi_mii_reg_e) regnum;
123 u32 tmp_reg;
124 u16 value;
125
126 ugphy_vdbg("%s: IN", __FUNCTION__);
127
128 spin_lock_irq(&ugeth->lock);
129
130 mii_regs = ugeth->mii_info->mii_regs;
131
132 /* Setting up the MII Mangement Address Register */
133 tmp_reg = ((u32) mii_id << MIIMADD_PHY_ADDRESS_SHIFT) | mii_reg;
134 out_be32(&mii_regs->miimadd, tmp_reg);
135
136 /* Perform an MII management read cycle */
137 out_be32(&mii_regs->miimcom, MIIMCOM_READ_CYCLE);
138
139 /* Wait till MII management write is complete */
140 while ((in_be32(&mii_regs->miimind)) & MIIMIND_BUSY)
141 cpu_relax();
142
143 udelay(10000);
144
145 /* Read MII management status */
146 value = (u16) in_be32(&mii_regs->miimstat);
147 out_be32(&mii_regs->miimcom, 0);
148 if (value == 0xffff)
149 ugphy_warn("read wrong value : mii_id %d,mii_reg %d, base %08x",
150 mii_id, mii_reg, (u32) & (mii_regs->miimcfg));
151
152 spin_unlock_irq(&ugeth->lock);
153
154 return (value);
155}
156
157void mii_clear_phy_interrupt(struct ugeth_mii_info *mii_info)
158{
159 ugphy_vdbg("%s: IN", __FUNCTION__);
160
161 if (mii_info->phyinfo->ack_interrupt)
162 mii_info->phyinfo->ack_interrupt(mii_info);
163}
164
165void mii_configure_phy_interrupt(struct ugeth_mii_info *mii_info,
166 u32 interrupts)
167{
168 ugphy_vdbg("%s: IN", __FUNCTION__);
169
170 mii_info->interrupts = interrupts;
171 if (mii_info->phyinfo->config_intr)
172 mii_info->phyinfo->config_intr(mii_info);
173}
174
175/* Writes MII_ADVERTISE with the appropriate values, after
176 * sanitizing advertise to make sure only supported features
177 * are advertised
178 */
179static void config_genmii_advert(struct ugeth_mii_info *mii_info)
180{
181 u32 advertise;
182 u16 adv;
183
184 ugphy_vdbg("%s: IN", __FUNCTION__);
185
186 /* Only allow advertising what this PHY supports */
187 mii_info->advertising &= mii_info->phyinfo->features;
188 advertise = mii_info->advertising;
189
190 /* Setup standard advertisement */
191 adv = phy_read(mii_info, MII_ADVERTISE);
192 adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
193 if (advertise & ADVERTISED_10baseT_Half)
194 adv |= ADVERTISE_10HALF;
195 if (advertise & ADVERTISED_10baseT_Full)
196 adv |= ADVERTISE_10FULL;
197 if (advertise & ADVERTISED_100baseT_Half)
198 adv |= ADVERTISE_100HALF;
199 if (advertise & ADVERTISED_100baseT_Full)
200 adv |= ADVERTISE_100FULL;
201 phy_write(mii_info, MII_ADVERTISE, adv);
202}
203
204static void genmii_setup_forced(struct ugeth_mii_info *mii_info)
205{
206 u16 ctrl;
207 u32 features = mii_info->phyinfo->features;
208
209 ugphy_vdbg("%s: IN", __FUNCTION__);
210
211 ctrl = phy_read(mii_info, MII_BMCR);
212
213 ctrl &=
214 ~(BMCR_FULLDPLX | BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE);
215 ctrl |= BMCR_RESET;
216
217 switch (mii_info->speed) {
218 case SPEED_1000:
219 if (features & (SUPPORTED_1000baseT_Half
220 | SUPPORTED_1000baseT_Full)) {
221 ctrl |= BMCR_SPEED1000;
222 break;
223 }
224 mii_info->speed = SPEED_100;
225 case SPEED_100:
226 if (features & (SUPPORTED_100baseT_Half
227 | SUPPORTED_100baseT_Full)) {
228 ctrl |= BMCR_SPEED100;
229 break;
230 }
231 mii_info->speed = SPEED_10;
232 case SPEED_10:
233 if (features & (SUPPORTED_10baseT_Half
234 | SUPPORTED_10baseT_Full))
235 break;
236 default: /* Unsupported speed! */
237 ugphy_err("%s: Bad speed!", mii_info->dev->name);
238 break;
239 }
240
241 phy_write(mii_info, MII_BMCR, ctrl);
242}
243
244/* Enable and Restart Autonegotiation */
245static void genmii_restart_aneg(struct ugeth_mii_info *mii_info)
246{
247 u16 ctl;
248
249 ugphy_vdbg("%s: IN", __FUNCTION__);
250
251 ctl = phy_read(mii_info, MII_BMCR);
252 ctl |= (BMCR_ANENABLE | BMCR_ANRESTART);
253 phy_write(mii_info, MII_BMCR, ctl);
254}
255
256static int gbit_config_aneg(struct ugeth_mii_info *mii_info)
257{
258 u16 adv;
259 u32 advertise;
260
261 ugphy_vdbg("%s: IN", __FUNCTION__);
262
263 if (mii_info->autoneg) {
264 /* Configure the ADVERTISE register */
265 config_genmii_advert(mii_info);
266 advertise = mii_info->advertising;
267
268 adv = phy_read(mii_info, MII_1000BASETCONTROL);
269 adv &= ~(MII_1000BASETCONTROL_FULLDUPLEXCAP |
270 MII_1000BASETCONTROL_HALFDUPLEXCAP);
271 if (advertise & SUPPORTED_1000baseT_Half)
272 adv |= MII_1000BASETCONTROL_HALFDUPLEXCAP;
273 if (advertise & SUPPORTED_1000baseT_Full)
274 adv |= MII_1000BASETCONTROL_FULLDUPLEXCAP;
275 phy_write(mii_info, MII_1000BASETCONTROL, adv);
276
277 /* Start/Restart aneg */
278 genmii_restart_aneg(mii_info);
279 } else
280 genmii_setup_forced(mii_info);
281
282 return 0;
283}
284
285static int genmii_config_aneg(struct ugeth_mii_info *mii_info)
286{
287 ugphy_vdbg("%s: IN", __FUNCTION__);
288
289 if (mii_info->autoneg) {
290 config_genmii_advert(mii_info);
291 genmii_restart_aneg(mii_info);
292 } else
293 genmii_setup_forced(mii_info);
294
295 return 0;
296}
297
298static int genmii_update_link(struct ugeth_mii_info *mii_info)
299{
300 u16 status;
301
302 ugphy_vdbg("%s: IN", __FUNCTION__);
303
304 /* Do a fake read */
305 phy_read(mii_info, MII_BMSR);
306
307 /* Read link and autonegotiation status */
308 status = phy_read(mii_info, MII_BMSR);
309 if ((status & BMSR_LSTATUS) == 0)
310 mii_info->link = 0;
311 else
312 mii_info->link = 1;
313
314 /* If we are autonegotiating, and not done,
315 * return an error */
316 if (mii_info->autoneg && !(status & BMSR_ANEGCOMPLETE))
317 return -EAGAIN;
318
319 return 0;
320}
321
322static int genmii_read_status(struct ugeth_mii_info *mii_info)
323{
324 u16 status;
325 int err;
326
327 ugphy_vdbg("%s: IN", __FUNCTION__);
328
329 /* Update the link, but return if there
330 * was an error */
331 err = genmii_update_link(mii_info);
332 if (err)
333 return err;
334
335 if (mii_info->autoneg) {
336 status = phy_read(mii_info, MII_LPA);
337
338 if (status & (LPA_10FULL | LPA_100FULL))
339 mii_info->duplex = DUPLEX_FULL;
340 else
341 mii_info->duplex = DUPLEX_HALF;
342 if (status & (LPA_100FULL | LPA_100HALF))
343 mii_info->speed = SPEED_100;
344 else
345 mii_info->speed = SPEED_10;
346 mii_info->pause = 0;
347 }
348 /* On non-aneg, we assume what we put in BMCR is the speed,
349 * though magic-aneg shouldn't prevent this case from occurring
350 */
351
352 return 0;
353}
354
355static int marvell_init(struct ugeth_mii_info *mii_info)
356{
357 ugphy_vdbg("%s: IN", __FUNCTION__);
358
359 phy_write(mii_info, 0x14, 0x0cd2);
360 phy_write(mii_info, MII_BMCR,
361 phy_read(mii_info, MII_BMCR) | BMCR_RESET);
362 msleep(4000);
363
364 return 0;
365}
366
367static int marvell_config_aneg(struct ugeth_mii_info *mii_info)
368{
369 ugphy_vdbg("%s: IN", __FUNCTION__);
370
371 /* The Marvell PHY has an errata which requires
372 * that certain registers get written in order
373 * to restart autonegotiation */
374 phy_write(mii_info, MII_BMCR, BMCR_RESET);
375
376 phy_write(mii_info, 0x1d, 0x1f);
377 phy_write(mii_info, 0x1e, 0x200c);
378 phy_write(mii_info, 0x1d, 0x5);
379 phy_write(mii_info, 0x1e, 0);
380 phy_write(mii_info, 0x1e, 0x100);
381
382 gbit_config_aneg(mii_info);
383
384 return 0;
385}
386
387static int marvell_read_status(struct ugeth_mii_info *mii_info)
388{
389 u16 status;
390 int err;
391
392 ugphy_vdbg("%s: IN", __FUNCTION__);
393
394 /* Update the link, but return if there
395 * was an error */
396 err = genmii_update_link(mii_info);
397 if (err)
398 return err;
399
400 /* If the link is up, read the speed and duplex */
401 /* If we aren't autonegotiating, assume speeds
402 * are as set */
403 if (mii_info->autoneg && mii_info->link) {
404 int speed;
405 status = phy_read(mii_info, MII_M1011_PHY_SPEC_STATUS);
406
407 /* Get the duplexity */
408 if (status & MII_M1011_PHY_SPEC_STATUS_FULLDUPLEX)
409 mii_info->duplex = DUPLEX_FULL;
410 else
411 mii_info->duplex = DUPLEX_HALF;
412
413 /* Get the speed */
414 speed = status & MII_M1011_PHY_SPEC_STATUS_SPD_MASK;
415 switch (speed) {
416 case MII_M1011_PHY_SPEC_STATUS_1000:
417 mii_info->speed = SPEED_1000;
418 break;
419 case MII_M1011_PHY_SPEC_STATUS_100:
420 mii_info->speed = SPEED_100;
421 break;
422 default:
423 mii_info->speed = SPEED_10;
424 break;
425 }
426 mii_info->pause = 0;
427 }
428
429 return 0;
430}
431
432static int marvell_ack_interrupt(struct ugeth_mii_info *mii_info)
433{
434 ugphy_vdbg("%s: IN", __FUNCTION__);
435
436 /* Clear the interrupts by reading the reg */
437 phy_read(mii_info, MII_M1011_IEVENT);
438
439 return 0;
440}
441
442static int marvell_config_intr(struct ugeth_mii_info *mii_info)
443{
444 ugphy_vdbg("%s: IN", __FUNCTION__);
445
446 if (mii_info->interrupts == MII_INTERRUPT_ENABLED)
447 phy_write(mii_info, MII_M1011_IMASK, MII_M1011_IMASK_INIT);
448 else
449 phy_write(mii_info, MII_M1011_IMASK, MII_M1011_IMASK_CLEAR);
450
451 return 0;
452}
453
454static int cis820x_init(struct ugeth_mii_info *mii_info)
455{
456 ugphy_vdbg("%s: IN", __FUNCTION__);
457
458 phy_write(mii_info, MII_CIS8201_AUX_CONSTAT,
459 MII_CIS8201_AUXCONSTAT_INIT);
460 phy_write(mii_info, MII_CIS8201_EXT_CON1, MII_CIS8201_EXTCON1_INIT);
461
462 return 0;
463}
464
465static int cis820x_read_status(struct ugeth_mii_info *mii_info)
466{
467 u16 status;
468 int err;
469
470 ugphy_vdbg("%s: IN", __FUNCTION__);
471
472 /* Update the link, but return if there
473 * was an error */
474 err = genmii_update_link(mii_info);
475 if (err)
476 return err;
477
478 /* If the link is up, read the speed and duplex */
479 /* If we aren't autonegotiating, assume speeds
480 * are as set */
481 if (mii_info->autoneg && mii_info->link) {
482 int speed;
483
484 status = phy_read(mii_info, MII_CIS8201_AUX_CONSTAT);
485 if (status & MII_CIS8201_AUXCONSTAT_DUPLEX)
486 mii_info->duplex = DUPLEX_FULL;
487 else
488 mii_info->duplex = DUPLEX_HALF;
489
490 speed = status & MII_CIS8201_AUXCONSTAT_SPEED;
491
492 switch (speed) {
493 case MII_CIS8201_AUXCONSTAT_GBIT:
494 mii_info->speed = SPEED_1000;
495 break;
496 case MII_CIS8201_AUXCONSTAT_100:
497 mii_info->speed = SPEED_100;
498 break;
499 default:
500 mii_info->speed = SPEED_10;
501 break;
502 }
503 }
504
505 return 0;
506}
507
508static int cis820x_ack_interrupt(struct ugeth_mii_info *mii_info)
509{
510 ugphy_vdbg("%s: IN", __FUNCTION__);
511
512 phy_read(mii_info, MII_CIS8201_ISTAT);
513
514 return 0;
515}
516
517static int cis820x_config_intr(struct ugeth_mii_info *mii_info)
518{
519 ugphy_vdbg("%s: IN", __FUNCTION__);
520
521 if (mii_info->interrupts == MII_INTERRUPT_ENABLED)
522 phy_write(mii_info, MII_CIS8201_IMASK, MII_CIS8201_IMASK_MASK);
523 else
524 phy_write(mii_info, MII_CIS8201_IMASK, 0);
525
526 return 0;
527}
528
529#define DM9161_DELAY 10
530
531static int dm9161_read_status(struct ugeth_mii_info *mii_info)
532{
533 u16 status;
534 int err;
535
536 ugphy_vdbg("%s: IN", __FUNCTION__);
537
538 /* Update the link, but return if there
539 * was an error */
540 err = genmii_update_link(mii_info);
541 if (err)
542 return err;
543
544 /* If the link is up, read the speed and duplex */
545 /* If we aren't autonegotiating, assume speeds
546 * are as set */
547 if (mii_info->autoneg && mii_info->link) {
548 status = phy_read(mii_info, MII_DM9161_SCSR);
549 if (status & (MII_DM9161_SCSR_100F | MII_DM9161_SCSR_100H))
550 mii_info->speed = SPEED_100;
551 else
552 mii_info->speed = SPEED_10;
553
554 if (status & (MII_DM9161_SCSR_100F | MII_DM9161_SCSR_10F))
555 mii_info->duplex = DUPLEX_FULL;
556 else
557 mii_info->duplex = DUPLEX_HALF;
558 }
559
560 return 0;
561}
562
563static int dm9161_config_aneg(struct ugeth_mii_info *mii_info)
564{
565 struct dm9161_private *priv = mii_info->priv;
566
567 ugphy_vdbg("%s: IN", __FUNCTION__);
568
569 if (0 == priv->resetdone)
570 return -EAGAIN;
571
572 return 0;
573}
574
575static void dm9161_timer(unsigned long data)
576{
577 struct ugeth_mii_info *mii_info = (struct ugeth_mii_info *)data;
578 struct dm9161_private *priv = mii_info->priv;
579 u16 status = phy_read(mii_info, MII_BMSR);
580
581 ugphy_vdbg("%s: IN", __FUNCTION__);
582
583 if (status & BMSR_ANEGCOMPLETE) {
584 priv->resetdone = 1;
585 } else
586 mod_timer(&priv->timer, jiffies + DM9161_DELAY * HZ);
587}
588
589static int dm9161_init(struct ugeth_mii_info *mii_info)
590{
591 struct dm9161_private *priv;
592
593 ugphy_vdbg("%s: IN", __FUNCTION__);
594
595 /* Allocate the private data structure */
596 priv = kmalloc(sizeof(struct dm9161_private), GFP_KERNEL);
597
598 if (NULL == priv)
599 return -ENOMEM;
600
601 mii_info->priv = priv;
602
603 /* Reset is not done yet */
604 priv->resetdone = 0;
605
606 phy_write(mii_info, MII_BMCR,
607 phy_read(mii_info, MII_BMCR) | BMCR_RESET);
608
609 phy_write(mii_info, MII_BMCR,
610 phy_read(mii_info, MII_BMCR) & ~BMCR_ISOLATE);
611
612 config_genmii_advert(mii_info);
613 /* Start/Restart aneg */
614 genmii_config_aneg(mii_info);
615
616 /* Start a timer for DM9161_DELAY seconds to wait
617 * for the PHY to be ready */
618 init_timer(&priv->timer);
619 priv->timer.function = &dm9161_timer;
620 priv->timer.data = (unsigned long)mii_info;
621 mod_timer(&priv->timer, jiffies + DM9161_DELAY * HZ);
622
623 return 0;
624}
625
626static void dm9161_close(struct ugeth_mii_info *mii_info)
627{
628 struct dm9161_private *priv = mii_info->priv;
629
630 ugphy_vdbg("%s: IN", __FUNCTION__);
631
632 del_timer_sync(&priv->timer);
633 kfree(priv);
634}
635
636static int dm9161_ack_interrupt(struct ugeth_mii_info *mii_info)
637{
638/* FIXME: This lines are for BUG fixing in the mpc8325.
639Remove this from here when it's fixed */
640 if (bcsr_regs == NULL)
641 bcsr_regs = (u8 *) ioremap(BCSR_PHYS_ADDR, BCSR_SIZE);
642 bcsr_regs[14] |= 0x40;
643 ugphy_vdbg("%s: IN", __FUNCTION__);
644
645 /* Clear the interrupts by reading the reg */
646 phy_read(mii_info, MII_DM9161_INTR);
647
648
649 return 0;
650}
651
652static int dm9161_config_intr(struct ugeth_mii_info *mii_info)
653{
654/* FIXME: This lines are for BUG fixing in the mpc8325.
655Remove this from here when it's fixed */
656 if (bcsr_regs == NULL) {
657 bcsr_regs = (u8 *) ioremap(BCSR_PHYS_ADDR, BCSR_SIZE);
658 bcsr_regs[14] &= ~0x40;
659 }
660 ugphy_vdbg("%s: IN", __FUNCTION__);
661
662 if (mii_info->interrupts == MII_INTERRUPT_ENABLED)
663 phy_write(mii_info, MII_DM9161_INTR, MII_DM9161_INTR_INIT);
664 else
665 phy_write(mii_info, MII_DM9161_INTR, MII_DM9161_INTR_STOP);
666
667 return 0;
668}
669
670/* Cicada 820x */
671static struct phy_info phy_info_cis820x = {
672 .phy_id = 0x000fc440,
673 .name = "Cicada Cis8204",
674 .phy_id_mask = 0x000fffc0,
675 .features = MII_GBIT_FEATURES,
676 .init = &cis820x_init,
677 .config_aneg = &gbit_config_aneg,
678 .read_status = &cis820x_read_status,
679 .ack_interrupt = &cis820x_ack_interrupt,
680 .config_intr = &cis820x_config_intr,
681};
682
683static struct phy_info phy_info_dm9161 = {
684 .phy_id = 0x0181b880,
685 .phy_id_mask = 0x0ffffff0,
686 .name = "Davicom DM9161E",
687 .init = dm9161_init,
688 .config_aneg = dm9161_config_aneg,
689 .read_status = dm9161_read_status,
690 .close = dm9161_close,
691};
692
693static struct phy_info phy_info_dm9161a = {
694 .phy_id = 0x0181b8a0,
695 .phy_id_mask = 0x0ffffff0,
696 .name = "Davicom DM9161A",
697 .features = MII_BASIC_FEATURES,
698 .init = dm9161_init,
699 .config_aneg = dm9161_config_aneg,
700 .read_status = dm9161_read_status,
701 .ack_interrupt = dm9161_ack_interrupt,
702 .config_intr = dm9161_config_intr,
703 .close = dm9161_close,
704};
705
706static struct phy_info phy_info_marvell = {
707 .phy_id = 0x01410c00,
708 .phy_id_mask = 0xffffff00,
709 .name = "Marvell 88E11x1",
710 .features = MII_GBIT_FEATURES,
711 .init = &marvell_init,
712 .config_aneg = &marvell_config_aneg,
713 .read_status = &marvell_read_status,
714 .ack_interrupt = &marvell_ack_interrupt,
715 .config_intr = &marvell_config_intr,
716};
717
718static struct phy_info phy_info_genmii = {
719 .phy_id = 0x00000000,
720 .phy_id_mask = 0x00000000,
721 .name = "Generic MII",
722 .features = MII_BASIC_FEATURES,
723 .config_aneg = genmii_config_aneg,
724 .read_status = genmii_read_status,
725};
726
727static struct phy_info *phy_info[] = {
728 &phy_info_cis820x,
729 &phy_info_marvell,
730 &phy_info_dm9161,
731 &phy_info_dm9161a,
732 &phy_info_genmii,
733 NULL
734};
735
736u16 phy_read(struct ugeth_mii_info *mii_info, u16 regnum)
737{
738 u16 retval;
739 unsigned long flags;
740
741 ugphy_vdbg("%s: IN", __FUNCTION__);
742
743 spin_lock_irqsave(&mii_info->mdio_lock, flags);
744 retval = mii_info->mdio_read(mii_info->dev, mii_info->mii_id, regnum);
745 spin_unlock_irqrestore(&mii_info->mdio_lock, flags);
746
747 return retval;
748}
749
750void phy_write(struct ugeth_mii_info *mii_info, u16 regnum, u16 val)
751{
752 unsigned long flags;
753
754 ugphy_vdbg("%s: IN", __FUNCTION__);
755
756 spin_lock_irqsave(&mii_info->mdio_lock, flags);
757 mii_info->mdio_write(mii_info->dev, mii_info->mii_id, regnum, val);
758 spin_unlock_irqrestore(&mii_info->mdio_lock, flags);
759}
760
761/* Use the PHY ID registers to determine what type of PHY is attached
762 * to device dev. return a struct phy_info structure describing that PHY
763 */
764struct phy_info *get_phy_info(struct ugeth_mii_info *mii_info)
765{
766 u16 phy_reg;
767 u32 phy_ID;
768 int i;
769 struct phy_info *theInfo = NULL;
770 struct net_device *dev = mii_info->dev;
771
772 ugphy_vdbg("%s: IN", __FUNCTION__);
773
774 /* Grab the bits from PHYIR1, and put them in the upper half */
775 phy_reg = phy_read(mii_info, MII_PHYSID1);
776 phy_ID = (phy_reg & 0xffff) << 16;
777
778 /* Grab the bits from PHYIR2, and put them in the lower half */
779 phy_reg = phy_read(mii_info, MII_PHYSID2);
780 phy_ID |= (phy_reg & 0xffff);
781
782 /* loop through all the known PHY types, and find one that */
783 /* matches the ID we read from the PHY. */
784 for (i = 0; phy_info[i]; i++)
785 if (phy_info[i]->phy_id == (phy_ID & phy_info[i]->phy_id_mask)){
786 theInfo = phy_info[i];
787 break;
788 }
789
790 /* This shouldn't happen, as we have generic PHY support */
791 if (theInfo == NULL) {
792 ugphy_info("%s: PHY id %x is not supported!", dev->name,
793 phy_ID);
794 return NULL;
795 } else {
796 ugphy_info("%s: PHY is %s (%x)", dev->name, theInfo->name,
797 phy_ID);
798 }
799
800 return theInfo;
801}
diff --git a/drivers/net/ucc_geth_phy.h b/drivers/net/ucc_geth_phy.h
new file mode 100644
index 000000000000..2f98b8f1bb0a
--- /dev/null
+++ b/drivers/net/ucc_geth_phy.h
@@ -0,0 +1,217 @@
1/*
2 * Copyright (C) Freescale Semicondutor, Inc. 2006. All rights reserved.
3 *
4 * Author: Shlomi Gridish <gridish@freescale.com>
5 *
6 * Description:
7 * UCC GETH Driver -- PHY handling
8 *
9 * Changelog:
10 * Jun 28, 2006 Li Yang <LeoLi@freescale.com>
11 * - Rearrange code and style fixes
12 *
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of the GNU General Public License as published by the
15 * Free Software Foundation; either version 2 of the License, or (at your
16 * option) any later version.
17 *
18 */
19#ifndef __UCC_GETH_PHY_H__
20#define __UCC_GETH_PHY_H__
21
22#define MII_end ((u32)-2)
23#define MII_read ((u32)-1)
24
25#define MIIMIND_BUSY 0x00000001
26#define MIIMIND_NOTVALID 0x00000004
27
28#define UGETH_AN_TIMEOUT 2000
29
30/* 1000BT control (Marvell & BCM54xx at least) */
31#define MII_1000BASETCONTROL 0x09
32#define MII_1000BASETCONTROL_FULLDUPLEXCAP 0x0200
33#define MII_1000BASETCONTROL_HALFDUPLEXCAP 0x0100
34
35/* Cicada Extended Control Register 1 */
36#define MII_CIS8201_EXT_CON1 0x17
37#define MII_CIS8201_EXTCON1_INIT 0x0000
38
39/* Cicada Interrupt Mask Register */
40#define MII_CIS8201_IMASK 0x19
41#define MII_CIS8201_IMASK_IEN 0x8000
42#define MII_CIS8201_IMASK_SPEED 0x4000
43#define MII_CIS8201_IMASK_LINK 0x2000
44#define MII_CIS8201_IMASK_DUPLEX 0x1000
45#define MII_CIS8201_IMASK_MASK 0xf000
46
47/* Cicada Interrupt Status Register */
48#define MII_CIS8201_ISTAT 0x1a
49#define MII_CIS8201_ISTAT_STATUS 0x8000
50#define MII_CIS8201_ISTAT_SPEED 0x4000
51#define MII_CIS8201_ISTAT_LINK 0x2000
52#define MII_CIS8201_ISTAT_DUPLEX 0x1000
53
54/* Cicada Auxiliary Control/Status Register */
55#define MII_CIS8201_AUX_CONSTAT 0x1c
56#define MII_CIS8201_AUXCONSTAT_INIT 0x0004
57#define MII_CIS8201_AUXCONSTAT_DUPLEX 0x0020
58#define MII_CIS8201_AUXCONSTAT_SPEED 0x0018
59#define MII_CIS8201_AUXCONSTAT_GBIT 0x0010
60#define MII_CIS8201_AUXCONSTAT_100 0x0008
61
62/* 88E1011 PHY Status Register */
63#define MII_M1011_PHY_SPEC_STATUS 0x11
64#define MII_M1011_PHY_SPEC_STATUS_1000 0x8000
65#define MII_M1011_PHY_SPEC_STATUS_100 0x4000
66#define MII_M1011_PHY_SPEC_STATUS_SPD_MASK 0xc000
67#define MII_M1011_PHY_SPEC_STATUS_FULLDUPLEX 0x2000
68#define MII_M1011_PHY_SPEC_STATUS_RESOLVED 0x0800
69#define MII_M1011_PHY_SPEC_STATUS_LINK 0x0400
70
71#define MII_M1011_IEVENT 0x13
72#define MII_M1011_IEVENT_CLEAR 0x0000
73
74#define MII_M1011_IMASK 0x12
75#define MII_M1011_IMASK_INIT 0x6400
76#define MII_M1011_IMASK_CLEAR 0x0000
77
78#define MII_DM9161_SCR 0x10
79#define MII_DM9161_SCR_INIT 0x0610
80
81/* DM9161 Specified Configuration and Status Register */
82#define MII_DM9161_SCSR 0x11
83#define MII_DM9161_SCSR_100F 0x8000
84#define MII_DM9161_SCSR_100H 0x4000
85#define MII_DM9161_SCSR_10F 0x2000
86#define MII_DM9161_SCSR_10H 0x1000
87
88/* DM9161 Interrupt Register */
89#define MII_DM9161_INTR 0x15
90#define MII_DM9161_INTR_PEND 0x8000
91#define MII_DM9161_INTR_DPLX_MASK 0x0800
92#define MII_DM9161_INTR_SPD_MASK 0x0400
93#define MII_DM9161_INTR_LINK_MASK 0x0200
94#define MII_DM9161_INTR_MASK 0x0100
95#define MII_DM9161_INTR_DPLX_CHANGE 0x0010
96#define MII_DM9161_INTR_SPD_CHANGE 0x0008
97#define MII_DM9161_INTR_LINK_CHANGE 0x0004
98#define MII_DM9161_INTR_INIT 0x0000
99#define MII_DM9161_INTR_STOP \
100(MII_DM9161_INTR_DPLX_MASK | MII_DM9161_INTR_SPD_MASK \
101 | MII_DM9161_INTR_LINK_MASK | MII_DM9161_INTR_MASK)
102
103/* DM9161 10BT Configuration/Status */
104#define MII_DM9161_10BTCSR 0x12
105#define MII_DM9161_10BTCSR_INIT 0x7800
106
107#define MII_BASIC_FEATURES (SUPPORTED_10baseT_Half | \
108 SUPPORTED_10baseT_Full | \
109 SUPPORTED_100baseT_Half | \
110 SUPPORTED_100baseT_Full | \
111 SUPPORTED_Autoneg | \
112 SUPPORTED_TP | \
113 SUPPORTED_MII)
114
115#define MII_GBIT_FEATURES (MII_BASIC_FEATURES | \
116 SUPPORTED_1000baseT_Half | \
117 SUPPORTED_1000baseT_Full)
118
119#define MII_READ_COMMAND 0x00000001
120
121#define MII_INTERRUPT_DISABLED 0x0
122#define MII_INTERRUPT_ENABLED 0x1
123/* Taken from mii_if_info and sungem_phy.h */
124struct ugeth_mii_info {
125 /* Information about the PHY type */
126 /* And management functions */
127 struct phy_info *phyinfo;
128
129 ucc_mii_mng_t *mii_regs;
130
131 /* forced speed & duplex (no autoneg)
132 * partner speed & duplex & pause (autoneg)
133 */
134 int speed;
135 int duplex;
136 int pause;
137
138 /* The most recently read link state */
139 int link;
140
141 /* Enabled Interrupts */
142 u32 interrupts;
143
144 u32 advertising;
145 int autoneg;
146 int mii_id;
147
148 /* private data pointer */
149 /* For use by PHYs to maintain extra state */
150 void *priv;
151
152 /* Provided by host chip */
153 struct net_device *dev;
154
155 /* A lock to ensure that only one thing can read/write
156 * the MDIO bus at a time */
157 spinlock_t mdio_lock;
158
159 /* Provided by ethernet driver */
160 int (*mdio_read) (struct net_device * dev, int mii_id, int reg);
161 void (*mdio_write) (struct net_device * dev, int mii_id, int reg,
162 int val);
163};
164
165/* struct phy_info: a structure which defines attributes for a PHY
166 *
167 * id will contain a number which represents the PHY. During
168 * startup, the driver will poll the PHY to find out what its
169 * UID--as defined by registers 2 and 3--is. The 32-bit result
170 * gotten from the PHY will be ANDed with phy_id_mask to
171 * discard any bits which may change based on revision numbers
172 * unimportant to functionality
173 *
174 * There are 6 commands which take a ugeth_mii_info structure.
175 * Each PHY must declare config_aneg, and read_status.
176 */
177struct phy_info {
178 u32 phy_id;
179 char *name;
180 unsigned int phy_id_mask;
181 u32 features;
182
183 /* Called to initialize the PHY */
184 int (*init) (struct ugeth_mii_info * mii_info);
185
186 /* Called to suspend the PHY for power */
187 int (*suspend) (struct ugeth_mii_info * mii_info);
188
189 /* Reconfigures autonegotiation (or disables it) */
190 int (*config_aneg) (struct ugeth_mii_info * mii_info);
191
192 /* Determines the negotiated speed and duplex */
193 int (*read_status) (struct ugeth_mii_info * mii_info);
194
195 /* Clears any pending interrupts */
196 int (*ack_interrupt) (struct ugeth_mii_info * mii_info);
197
198 /* Enables or disables interrupts */
199 int (*config_intr) (struct ugeth_mii_info * mii_info);
200
201 /* Clears up any memory if needed */
202 void (*close) (struct ugeth_mii_info * mii_info);
203};
204
205struct phy_info *get_phy_info(struct ugeth_mii_info *mii_info);
206void write_phy_reg(struct net_device *dev, int mii_id, int regnum, int value);
207int read_phy_reg(struct net_device *dev, int mii_id, int regnum);
208void mii_clear_phy_interrupt(struct ugeth_mii_info *mii_info);
209void mii_configure_phy_interrupt(struct ugeth_mii_info *mii_info,
210 u32 interrupts);
211
212struct dm9161_private {
213 struct timer_list timer;
214 int resetdone;
215};
216
217#endif /* __UCC_GETH_PHY_H__ */
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c
index d3d0ec970318..ae971080e2e4 100644
--- a/drivers/net/via-rhine.c
+++ b/drivers/net/via-rhine.c
@@ -30,8 +30,8 @@
30*/ 30*/
31 31
32#define DRV_NAME "via-rhine" 32#define DRV_NAME "via-rhine"
33#define DRV_VERSION "1.4.0" 33#define DRV_VERSION "1.4.1"
34#define DRV_RELDATE "June-27-2006" 34#define DRV_RELDATE "July-24-2006"
35 35
36 36
37/* A few user-configurable values. 37/* A few user-configurable values.
@@ -44,6 +44,10 @@ static int max_interrupt_work = 20;
44 Setting to > 1518 effectively disables this feature. */ 44 Setting to > 1518 effectively disables this feature. */
45static int rx_copybreak; 45static int rx_copybreak;
46 46
47/* Work-around for broken BIOSes: they are unable to get the chip back out of
48 power state D3 so PXE booting fails. bootparam(7): via-rhine.avoid_D3=1 */
49static int avoid_D3;
50
47/* 51/*
48 * In case you are looking for 'options[]' or 'full_duplex[]', they 52 * In case you are looking for 'options[]' or 'full_duplex[]', they
49 * are gone. Use ethtool(8) instead. 53 * are gone. Use ethtool(8) instead.
@@ -63,7 +67,11 @@ static const int multicast_filter_limit = 32;
63 There are no ill effects from too-large receive rings. */ 67 There are no ill effects from too-large receive rings. */
64#define TX_RING_SIZE 16 68#define TX_RING_SIZE 16
65#define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */ 69#define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */
70#ifdef CONFIG_VIA_RHINE_NAPI
71#define RX_RING_SIZE 64
72#else
66#define RX_RING_SIZE 16 73#define RX_RING_SIZE 16
74#endif
67 75
68 76
69/* Operational parameters that usually are not changed. */ 77/* Operational parameters that usually are not changed. */
@@ -116,9 +124,11 @@ MODULE_LICENSE("GPL");
116module_param(max_interrupt_work, int, 0); 124module_param(max_interrupt_work, int, 0);
117module_param(debug, int, 0); 125module_param(debug, int, 0);
118module_param(rx_copybreak, int, 0); 126module_param(rx_copybreak, int, 0);
127module_param(avoid_D3, bool, 0);
119MODULE_PARM_DESC(max_interrupt_work, "VIA Rhine maximum events handled per interrupt"); 128MODULE_PARM_DESC(max_interrupt_work, "VIA Rhine maximum events handled per interrupt");
120MODULE_PARM_DESC(debug, "VIA Rhine debug level (0-7)"); 129MODULE_PARM_DESC(debug, "VIA Rhine debug level (0-7)");
121MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames"); 130MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames");
131MODULE_PARM_DESC(avoid_D3, "Avoid power state D3 (work-around for broken BIOSes)");
122 132
123/* 133/*
124 Theory of Operation 134 Theory of Operation
@@ -396,7 +406,7 @@ static void rhine_tx_timeout(struct net_device *dev);
396static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev); 406static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev);
397static irqreturn_t rhine_interrupt(int irq, void *dev_instance, struct pt_regs *regs); 407static irqreturn_t rhine_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
398static void rhine_tx(struct net_device *dev); 408static void rhine_tx(struct net_device *dev);
399static void rhine_rx(struct net_device *dev); 409static int rhine_rx(struct net_device *dev, int limit);
400static void rhine_error(struct net_device *dev, int intr_status); 410static void rhine_error(struct net_device *dev, int intr_status);
401static void rhine_set_rx_mode(struct net_device *dev); 411static void rhine_set_rx_mode(struct net_device *dev);
402static struct net_device_stats *rhine_get_stats(struct net_device *dev); 412static struct net_device_stats *rhine_get_stats(struct net_device *dev);
@@ -564,6 +574,32 @@ static void rhine_poll(struct net_device *dev)
564} 574}
565#endif 575#endif
566 576
577#ifdef CONFIG_VIA_RHINE_NAPI
578static int rhine_napipoll(struct net_device *dev, int *budget)
579{
580 struct rhine_private *rp = netdev_priv(dev);
581 void __iomem *ioaddr = rp->base;
582 int done, limit = min(dev->quota, *budget);
583
584 done = rhine_rx(dev, limit);
585 *budget -= done;
586 dev->quota -= done;
587
588 if (done < limit) {
589 netif_rx_complete(dev);
590
591 iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow |
592 IntrRxDropped | IntrRxNoBuf | IntrTxAborted |
593 IntrTxDone | IntrTxError | IntrTxUnderrun |
594 IntrPCIErr | IntrStatsMax | IntrLinkChange,
595 ioaddr + IntrEnable);
596 return 0;
597 }
598 else
599 return 1;
600}
601#endif
602
567static void rhine_hw_init(struct net_device *dev, long pioaddr) 603static void rhine_hw_init(struct net_device *dev, long pioaddr)
568{ 604{
569 struct rhine_private *rp = netdev_priv(dev); 605 struct rhine_private *rp = netdev_priv(dev);
@@ -744,6 +780,10 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
744#ifdef CONFIG_NET_POLL_CONTROLLER 780#ifdef CONFIG_NET_POLL_CONTROLLER
745 dev->poll_controller = rhine_poll; 781 dev->poll_controller = rhine_poll;
746#endif 782#endif
783#ifdef CONFIG_VIA_RHINE_NAPI
784 dev->poll = rhine_napipoll;
785 dev->weight = 64;
786#endif
747 if (rp->quirks & rqRhineI) 787 if (rp->quirks & rqRhineI)
748 dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM; 788 dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
749 789
@@ -789,6 +829,9 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
789 } 829 }
790 } 830 }
791 rp->mii_if.phy_id = phy_id; 831 rp->mii_if.phy_id = phy_id;
832 if (debug > 1 && avoid_D3)
833 printk(KERN_INFO "%s: No D3 power state at shutdown.\n",
834 dev->name);
792 835
793 return 0; 836 return 0;
794 837
@@ -1014,6 +1057,8 @@ static void init_registers(struct net_device *dev)
1014 1057
1015 rhine_set_rx_mode(dev); 1058 rhine_set_rx_mode(dev);
1016 1059
1060 netif_poll_enable(dev);
1061
1017 /* Enable interrupts by setting the interrupt mask. */ 1062 /* Enable interrupts by setting the interrupt mask. */
1018 iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow | 1063 iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow |
1019 IntrRxDropped | IntrRxNoBuf | IntrTxAborted | 1064 IntrRxDropped | IntrRxNoBuf | IntrTxAborted |
@@ -1268,8 +1313,18 @@ static irqreturn_t rhine_interrupt(int irq, void *dev_instance, struct pt_regs *
1268 dev->name, intr_status); 1313 dev->name, intr_status);
1269 1314
1270 if (intr_status & (IntrRxDone | IntrRxErr | IntrRxDropped | 1315 if (intr_status & (IntrRxDone | IntrRxErr | IntrRxDropped |
1271 IntrRxWakeUp | IntrRxEmpty | IntrRxNoBuf)) 1316 IntrRxWakeUp | IntrRxEmpty | IntrRxNoBuf)) {
1272 rhine_rx(dev); 1317#ifdef CONFIG_VIA_RHINE_NAPI
1318 iowrite16(IntrTxAborted |
1319 IntrTxDone | IntrTxError | IntrTxUnderrun |
1320 IntrPCIErr | IntrStatsMax | IntrLinkChange,
1321 ioaddr + IntrEnable);
1322
1323 netif_rx_schedule(dev);
1324#else
1325 rhine_rx(dev, RX_RING_SIZE);
1326#endif
1327 }
1273 1328
1274 if (intr_status & (IntrTxErrSummary | IntrTxDone)) { 1329 if (intr_status & (IntrTxErrSummary | IntrTxDone)) {
1275 if (intr_status & IntrTxErrSummary) { 1330 if (intr_status & IntrTxErrSummary) {
@@ -1367,13 +1422,12 @@ static void rhine_tx(struct net_device *dev)
1367 spin_unlock(&rp->lock); 1422 spin_unlock(&rp->lock);
1368} 1423}
1369 1424
1370/* This routine is logically part of the interrupt handler, but isolated 1425/* Process up to limit frames from receive ring */
1371 for clarity and better register allocation. */ 1426static int rhine_rx(struct net_device *dev, int limit)
1372static void rhine_rx(struct net_device *dev)
1373{ 1427{
1374 struct rhine_private *rp = netdev_priv(dev); 1428 struct rhine_private *rp = netdev_priv(dev);
1429 int count;
1375 int entry = rp->cur_rx % RX_RING_SIZE; 1430 int entry = rp->cur_rx % RX_RING_SIZE;
1376 int boguscnt = rp->dirty_rx + RX_RING_SIZE - rp->cur_rx;
1377 1431
1378 if (debug > 4) { 1432 if (debug > 4) {
1379 printk(KERN_DEBUG "%s: rhine_rx(), entry %d status %8.8x.\n", 1433 printk(KERN_DEBUG "%s: rhine_rx(), entry %d status %8.8x.\n",
@@ -1382,16 +1436,18 @@ static void rhine_rx(struct net_device *dev)
1382 } 1436 }
1383 1437
1384 /* If EOP is set on the next entry, it's a new packet. Send it up. */ 1438 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1385 while (!(rp->rx_head_desc->rx_status & cpu_to_le32(DescOwn))) { 1439 for (count = 0; count < limit; ++count) {
1386 struct rx_desc *desc = rp->rx_head_desc; 1440 struct rx_desc *desc = rp->rx_head_desc;
1387 u32 desc_status = le32_to_cpu(desc->rx_status); 1441 u32 desc_status = le32_to_cpu(desc->rx_status);
1388 int data_size = desc_status >> 16; 1442 int data_size = desc_status >> 16;
1389 1443
1444 if (desc_status & DescOwn)
1445 break;
1446
1390 if (debug > 4) 1447 if (debug > 4)
1391 printk(KERN_DEBUG "rhine_rx() status is %8.8x.\n", 1448 printk(KERN_DEBUG "rhine_rx() status is %8.8x.\n",
1392 desc_status); 1449 desc_status);
1393 if (--boguscnt < 0) 1450
1394 break;
1395 if ((desc_status & (RxWholePkt | RxErr)) != RxWholePkt) { 1451 if ((desc_status & (RxWholePkt | RxErr)) != RxWholePkt) {
1396 if ((desc_status & RxWholePkt) != RxWholePkt) { 1452 if ((desc_status & RxWholePkt) != RxWholePkt) {
1397 printk(KERN_WARNING "%s: Oversized Ethernet " 1453 printk(KERN_WARNING "%s: Oversized Ethernet "
@@ -1460,7 +1516,11 @@ static void rhine_rx(struct net_device *dev)
1460 PCI_DMA_FROMDEVICE); 1516 PCI_DMA_FROMDEVICE);
1461 } 1517 }
1462 skb->protocol = eth_type_trans(skb, dev); 1518 skb->protocol = eth_type_trans(skb, dev);
1519#ifdef CONFIG_VIA_RHINE_NAPI
1520 netif_receive_skb(skb);
1521#else
1463 netif_rx(skb); 1522 netif_rx(skb);
1523#endif
1464 dev->last_rx = jiffies; 1524 dev->last_rx = jiffies;
1465 rp->stats.rx_bytes += pkt_len; 1525 rp->stats.rx_bytes += pkt_len;
1466 rp->stats.rx_packets++; 1526 rp->stats.rx_packets++;
@@ -1487,6 +1547,8 @@ static void rhine_rx(struct net_device *dev)
1487 } 1547 }
1488 rp->rx_ring[entry].rx_status = cpu_to_le32(DescOwn); 1548 rp->rx_ring[entry].rx_status = cpu_to_le32(DescOwn);
1489 } 1549 }
1550
1551 return count;
1490} 1552}
1491 1553
1492/* 1554/*
@@ -1776,6 +1838,7 @@ static int rhine_close(struct net_device *dev)
1776 spin_lock_irq(&rp->lock); 1838 spin_lock_irq(&rp->lock);
1777 1839
1778 netif_stop_queue(dev); 1840 netif_stop_queue(dev);
1841 netif_poll_disable(dev);
1779 1842
1780 if (debug > 1) 1843 if (debug > 1)
1781 printk(KERN_DEBUG "%s: Shutting down ethercard, " 1844 printk(KERN_DEBUG "%s: Shutting down ethercard, "
@@ -1857,7 +1920,8 @@ static void rhine_shutdown (struct pci_dev *pdev)
1857 } 1920 }
1858 1921
1859 /* Hit power state D3 (sleep) */ 1922 /* Hit power state D3 (sleep) */
1860 iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW); 1923 if (!avoid_D3)
1924 iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW);
1861 1925
1862 /* TODO: Check use of pci_enable_wake() */ 1926 /* TODO: Check use of pci_enable_wake() */
1863 1927
diff --git a/drivers/net/wan/c101.c b/drivers/net/wan/c101.c
index 435e91ec4620..6b63b350cd52 100644
--- a/drivers/net/wan/c101.c
+++ b/drivers/net/wan/c101.c
@@ -118,7 +118,7 @@ static inline void openwin(card_t *card, u8 page)
118 118
119static inline void set_carrier(port_t *port) 119static inline void set_carrier(port_t *port)
120{ 120{
121 if (!sca_in(MSCI1_OFFSET + ST3, port) & ST3_DCD) 121 if (!(sca_in(MSCI1_OFFSET + ST3, port) & ST3_DCD))
122 netif_carrier_on(port_to_dev(port)); 122 netif_carrier_on(port_to_dev(port));
123 else 123 else
124 netif_carrier_off(port_to_dev(port)); 124 netif_carrier_off(port_to_dev(port));
@@ -127,10 +127,10 @@ static inline void set_carrier(port_t *port)
127 127
128static void sca_msci_intr(port_t *port) 128static void sca_msci_intr(port_t *port)
129{ 129{
130 u8 stat = sca_in(MSCI1_OFFSET + ST1, port); /* read MSCI ST1 status */ 130 u8 stat = sca_in(MSCI0_OFFSET + ST1, port); /* read MSCI ST1 status */
131 131
132 /* Reset MSCI TX underrun status bit */ 132 /* Reset MSCI TX underrun and CDCD (ignored) status bit */
133 sca_out(stat & ST1_UDRN, MSCI0_OFFSET + ST1, port); 133 sca_out(stat & (ST1_UDRN | ST1_CDCD), MSCI0_OFFSET + ST1, port);
134 134
135 if (stat & ST1_UDRN) { 135 if (stat & ST1_UDRN) {
136 struct net_device_stats *stats = hdlc_stats(port_to_dev(port)); 136 struct net_device_stats *stats = hdlc_stats(port_to_dev(port));
@@ -138,6 +138,7 @@ static void sca_msci_intr(port_t *port)
138 stats->tx_fifo_errors++; 138 stats->tx_fifo_errors++;
139 } 139 }
140 140
141 stat = sca_in(MSCI1_OFFSET + ST1, port); /* read MSCI1 ST1 status */
141 /* Reset MSCI CDCD status bit - uses ch#2 DCD input */ 142 /* Reset MSCI CDCD status bit - uses ch#2 DCD input */
142 sca_out(stat & ST1_CDCD, MSCI1_OFFSET + ST1, port); 143 sca_out(stat & ST1_CDCD, MSCI1_OFFSET + ST1, port);
143 144
diff --git a/drivers/net/wd.c b/drivers/net/wd.c
index 7caa8dc88a58..b1ba1872f315 100644
--- a/drivers/net/wd.c
+++ b/drivers/net/wd.c
@@ -500,8 +500,8 @@ MODULE_LICENSE("GPL");
500 500
501/* This is set up so that only a single autoprobe takes place per call. 501/* This is set up so that only a single autoprobe takes place per call.
502ISA device autoprobes on a running machine are not recommended. */ 502ISA device autoprobes on a running machine are not recommended. */
503int 503
504init_module(void) 504int __init init_module(void)
505{ 505{
506 struct net_device *dev; 506 struct net_device *dev;
507 int this_dev, found = 0; 507 int this_dev, found = 0;
diff --git a/drivers/net/wireless/hostap/hostap_hw.c b/drivers/net/wireless/hostap/hostap_hw.c
index dafaa5ff5aa6..d500012fdc7a 100644
--- a/drivers/net/wireless/hostap/hostap_hw.c
+++ b/drivers/net/wireless/hostap/hostap_hw.c
@@ -1042,6 +1042,9 @@ static int prism2_reset_port(struct net_device *dev)
1042 dev->name, local->fragm_threshold); 1042 dev->name, local->fragm_threshold);
1043 } 1043 }
1044 1044
1045 /* Some firmwares lose antenna selection settings on reset */
1046 (void) hostap_set_antsel(local);
1047
1045 return res; 1048 return res;
1046} 1049}
1047 1050
diff --git a/drivers/net/wireless/spectrum_cs.c b/drivers/net/wireless/spectrum_cs.c
index 7f78b7801fb3..bcc7038130f6 100644
--- a/drivers/net/wireless/spectrum_cs.c
+++ b/drivers/net/wireless/spectrum_cs.c
@@ -242,7 +242,7 @@ spectrum_reset(struct pcmcia_device *link, int idle)
242 u_int save_cor; 242 u_int save_cor;
243 243
244 /* Doing it if hardware is gone is guaranteed crash */ 244 /* Doing it if hardware is gone is guaranteed crash */
245 if (pcmcia_dev_present(link)) 245 if (!pcmcia_dev_present(link))
246 return -ENODEV; 246 return -ENODEV;
247 247
248 /* Save original COR value */ 248 /* Save original COR value */