diff options
author | Jeff Kirsher <jeffrey.t.kirsher@intel.com> | 2011-06-14 15:56:50 -0400 |
---|---|---|
committer | Jeff Kirsher <jeffrey.t.kirsher@intel.com> | 2011-08-12 06:41:10 -0400 |
commit | b544dbac41218fd015ac79455cfc1e57736e9b0c (patch) | |
tree | 6881af397456d0237dbb123ccb585a1a8086c166 /drivers/net/ethernet | |
parent | de69a4f240a1d43bc6a587c836c5ce1c66e36f23 (diff) |
davinci*/tlan/cpmac: Move the Texas Instruments (TI) drivers
Move the Texas Instruments drivers to drivers/net/ethernet/ti/ and
make the necessary Kconfig and Makefile changes.
CC: Sriram <srk@ti.com>
CC: Vinay Hegde <vinay.hegde@ti.com>
CC: Cyril Chemparathy <cyril@ti.com>
CC: Samuel Chessman <chessman@tux.org>
CC: <torben.mathiasen@compaq.com>
CC: Eugene Konev <ejka@imfi.kspu.ru>
CC: Florian Fainelli <florian@openwrt.org>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r-- | drivers/net/ethernet/Kconfig | 1 | ||||
-rw-r--r-- | drivers/net/ethernet/Makefile | 1 | ||||
-rw-r--r-- | drivers/net/ethernet/ti/Kconfig | 76 | ||||
-rw-r--r-- | drivers/net/ethernet/ti/Makefile | 9 | ||||
-rw-r--r-- | drivers/net/ethernet/ti/cpmac.c | 1305 | ||||
-rw-r--r-- | drivers/net/ethernet/ti/davinci_cpdma.c | 970 | ||||
-rw-r--r-- | drivers/net/ethernet/ti/davinci_cpdma.h | 109 | ||||
-rw-r--r-- | drivers/net/ethernet/ti/davinci_emac.c | 2047 | ||||
-rw-r--r-- | drivers/net/ethernet/ti/davinci_mdio.c | 475 | ||||
-rw-r--r-- | drivers/net/ethernet/ti/tlan.c | 3258 | ||||
-rw-r--r-- | drivers/net/ethernet/ti/tlan.h | 546 |
11 files changed, 8797 insertions, 0 deletions
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index f53a4bc53ddb..3983e702b97a 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig | |||
@@ -63,6 +63,7 @@ source "drivers/net/ethernet/smsc/Kconfig" | |||
63 | source "drivers/net/ethernet/stmicro/Kconfig" | 63 | source "drivers/net/ethernet/stmicro/Kconfig" |
64 | source "drivers/net/ethernet/sun/Kconfig" | 64 | source "drivers/net/ethernet/sun/Kconfig" |
65 | source "drivers/net/ethernet/tehuti/Kconfig" | 65 | source "drivers/net/ethernet/tehuti/Kconfig" |
66 | source "drivers/net/ethernet/ti/Kconfig" | ||
66 | source "drivers/net/ethernet/toshiba/Kconfig" | 67 | source "drivers/net/ethernet/toshiba/Kconfig" |
67 | source "drivers/net/ethernet/tundra/Kconfig" | 68 | source "drivers/net/ethernet/tundra/Kconfig" |
68 | source "drivers/net/ethernet/via/Kconfig" | 69 | source "drivers/net/ethernet/via/Kconfig" |
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index 4491d8491adb..873d27591466 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile | |||
@@ -43,6 +43,7 @@ obj-$(CONFIG_NET_VENDOR_SMSC) += smsc/ | |||
43 | obj-$(CONFIG_NET_VENDOR_STMICRO) += stmicro/ | 43 | obj-$(CONFIG_NET_VENDOR_STMICRO) += stmicro/ |
44 | obj-$(CONFIG_NET_VENDOR_SUN) += sun/ | 44 | obj-$(CONFIG_NET_VENDOR_SUN) += sun/ |
45 | obj-$(CONFIG_NET_VENDOR_TEHUTI) += tehuti/ | 45 | obj-$(CONFIG_NET_VENDOR_TEHUTI) += tehuti/ |
46 | obj-$(CONFIG_NET_VENDOR_TI) += ti/ | ||
46 | obj-$(CONFIG_NET_VENDOR_TOSHIBA) += toshiba/ | 47 | obj-$(CONFIG_NET_VENDOR_TOSHIBA) += toshiba/ |
47 | obj-$(CONFIG_NET_VENDOR_TUNDRA) += tundra/ | 48 | obj-$(CONFIG_NET_VENDOR_TUNDRA) += tundra/ |
48 | obj-$(CONFIG_NET_VENDOR_VIA) += via/ | 49 | obj-$(CONFIG_NET_VENDOR_VIA) += via/ |
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig new file mode 100644 index 000000000000..1284319ba7e0 --- /dev/null +++ b/drivers/net/ethernet/ti/Kconfig | |||
@@ -0,0 +1,76 @@ | |||
1 | # | ||
2 | # TI device configuration | ||
3 | # | ||
4 | |||
5 | config NET_VENDOR_TI | ||
6 | bool "Texas Instruments (TI) devices" | ||
7 | depends on PCI || EISA || AR7 || (ARM && (ARCH_DAVINCI || ARCH_OMAP3)) | ||
8 | ---help--- | ||
9 | If you have a network (Ethernet) card belonging to this class, say Y | ||
10 | and read the Ethernet-HOWTO, available from | ||
11 | <http://www.tldp.org/docs.html#howto>. | ||
12 | |||
13 | Note that the answer to this question doesn't directly affect the | ||
14 | kernel: saying N will just cause the configurator to skip all | ||
15 | the questions about TI devices. If you say Y, you will be asked for | ||
16 | your specific card in the following questions. | ||
17 | |||
18 | if NET_VENDOR_TI | ||
19 | |||
20 | config TI_DAVINCI_EMAC | ||
21 | tristate "TI DaVinci EMAC Support" | ||
22 | depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 ) | ||
23 | select TI_DAVINCI_MDIO | ||
24 | select TI_DAVINCI_CPDMA | ||
25 | select PHYLIB | ||
26 | ---help--- | ||
27 | This driver supports TI's DaVinci Ethernet . | ||
28 | |||
29 | To compile this driver as a module, choose M here: the module | ||
30 | will be called davinci_emac_driver. This is recommended. | ||
31 | |||
32 | config TI_DAVINCI_MDIO | ||
33 | tristate "TI DaVinci MDIO Support" | ||
34 | depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 ) | ||
35 | select PHYLIB | ||
36 | ---help--- | ||
37 | This driver supports TI's DaVinci MDIO module. | ||
38 | |||
39 | To compile this driver as a module, choose M here: the module | ||
40 | will be called davinci_mdio. This is recommended. | ||
41 | |||
42 | config TI_DAVINCI_CPDMA | ||
43 | tristate "TI DaVinci CPDMA Support" | ||
44 | depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 ) | ||
45 | ---help--- | ||
46 | This driver supports TI's DaVinci CPDMA dma engine. | ||
47 | |||
48 | To compile this driver as a module, choose M here: the module | ||
49 | will be called davinci_cpdma. This is recommended. | ||
50 | |||
51 | config TLAN | ||
52 | tristate "TI ThunderLAN support" | ||
53 | depends on (PCI || EISA) | ||
54 | ---help--- | ||
55 | If you have a PCI Ethernet network card based on the ThunderLAN chip | ||
56 | which is supported by this driver, say Y and read the | ||
57 | Ethernet-HOWTO, available from | ||
58 | <http://www.tldp.org/docs.html#howto>. | ||
59 | |||
60 | Devices currently supported by this driver are Compaq Netelligent, | ||
61 | Compaq NetFlex and Olicom cards. Please read the file | ||
62 | <file:Documentation/networking/tlan.txt> for more details. | ||
63 | |||
64 | To compile this driver as a module, choose M here. The module | ||
65 | will be called tlan. | ||
66 | |||
67 | Please email feedback to <torben.mathiasen@compaq.com>. | ||
68 | |||
69 | config CPMAC | ||
70 | tristate "TI AR7 CPMAC Ethernet support (EXPERIMENTAL)" | ||
71 | depends on EXPERIMENTAL && AR7 | ||
72 | select PHYLIB | ||
73 | ---help--- | ||
74 | TI AR7 CPMAC Ethernet support | ||
75 | |||
76 | endif # NET_VENDOR_TI | ||
diff --git a/drivers/net/ethernet/ti/Makefile b/drivers/net/ethernet/ti/Makefile new file mode 100644 index 000000000000..aedb3af74e5a --- /dev/null +++ b/drivers/net/ethernet/ti/Makefile | |||
@@ -0,0 +1,9 @@ | |||
1 | # | ||
2 | # Makefile for the TI network device drivers. | ||
3 | # | ||
4 | |||
5 | obj-$(CONFIG_TLAN) += tlan.o | ||
6 | obj-$(CONFIG_CPMAC) += cpmac.o | ||
7 | obj-$(CONFIG_TI_DAVINCI_EMAC) += davinci_emac.o | ||
8 | obj-$(CONFIG_TI_DAVINCI_MDIO) += davinci_mdio.o | ||
9 | obj-$(CONFIG_TI_DAVINCI_CPDMA) += davinci_cpdma.o | ||
diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c new file mode 100644 index 000000000000..e0638cb4b07c --- /dev/null +++ b/drivers/net/ethernet/ti/cpmac.c | |||
@@ -0,0 +1,1305 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2006, 2007 Eugene Konev | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2 of the License, or | ||
7 | * (at your option) any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | ||
17 | */ | ||
18 | |||
19 | #include <linux/module.h> | ||
20 | #include <linux/init.h> | ||
21 | #include <linux/interrupt.h> | ||
22 | #include <linux/moduleparam.h> | ||
23 | |||
24 | #include <linux/sched.h> | ||
25 | #include <linux/kernel.h> | ||
26 | #include <linux/slab.h> | ||
27 | #include <linux/errno.h> | ||
28 | #include <linux/types.h> | ||
29 | #include <linux/delay.h> | ||
30 | |||
31 | #include <linux/netdevice.h> | ||
32 | #include <linux/if_vlan.h> | ||
33 | #include <linux/etherdevice.h> | ||
34 | #include <linux/ethtool.h> | ||
35 | #include <linux/skbuff.h> | ||
36 | #include <linux/mii.h> | ||
37 | #include <linux/phy.h> | ||
38 | #include <linux/phy_fixed.h> | ||
39 | #include <linux/platform_device.h> | ||
40 | #include <linux/dma-mapping.h> | ||
41 | #include <linux/clk.h> | ||
42 | #include <linux/gpio.h> | ||
43 | #include <linux/atomic.h> | ||
44 | |||
45 | MODULE_AUTHOR("Eugene Konev <ejka@imfi.kspu.ru>"); | ||
46 | MODULE_DESCRIPTION("TI AR7 ethernet driver (CPMAC)"); | ||
47 | MODULE_LICENSE("GPL"); | ||
48 | MODULE_ALIAS("platform:cpmac"); | ||
49 | |||
50 | static int debug_level = 8; | ||
51 | static int dumb_switch; | ||
52 | |||
53 | /* Next 2 are only used in cpmac_probe, so it's pointless to change them */ | ||
54 | module_param(debug_level, int, 0444); | ||
55 | module_param(dumb_switch, int, 0444); | ||
56 | |||
57 | MODULE_PARM_DESC(debug_level, "Number of NETIF_MSG bits to enable"); | ||
58 | MODULE_PARM_DESC(dumb_switch, "Assume switch is not connected to MDIO bus"); | ||
59 | |||
60 | #define CPMAC_VERSION "0.5.2" | ||
61 | /* frame size + 802.1q tag + FCS size */ | ||
62 | #define CPMAC_SKB_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN) | ||
63 | #define CPMAC_QUEUES 8 | ||
64 | |||
65 | /* Ethernet registers */ | ||
66 | #define CPMAC_TX_CONTROL 0x0004 | ||
67 | #define CPMAC_TX_TEARDOWN 0x0008 | ||
68 | #define CPMAC_RX_CONTROL 0x0014 | ||
69 | #define CPMAC_RX_TEARDOWN 0x0018 | ||
70 | #define CPMAC_MBP 0x0100 | ||
71 | # define MBP_RXPASSCRC 0x40000000 | ||
72 | # define MBP_RXQOS 0x20000000 | ||
73 | # define MBP_RXNOCHAIN 0x10000000 | ||
74 | # define MBP_RXCMF 0x01000000 | ||
75 | # define MBP_RXSHORT 0x00800000 | ||
76 | # define MBP_RXCEF 0x00400000 | ||
77 | # define MBP_RXPROMISC 0x00200000 | ||
78 | # define MBP_PROMISCCHAN(channel) (((channel) & 0x7) << 16) | ||
79 | # define MBP_RXBCAST 0x00002000 | ||
80 | # define MBP_BCASTCHAN(channel) (((channel) & 0x7) << 8) | ||
81 | # define MBP_RXMCAST 0x00000020 | ||
82 | # define MBP_MCASTCHAN(channel) ((channel) & 0x7) | ||
83 | #define CPMAC_UNICAST_ENABLE 0x0104 | ||
84 | #define CPMAC_UNICAST_CLEAR 0x0108 | ||
85 | #define CPMAC_MAX_LENGTH 0x010c | ||
86 | #define CPMAC_BUFFER_OFFSET 0x0110 | ||
87 | #define CPMAC_MAC_CONTROL 0x0160 | ||
88 | # define MAC_TXPTYPE 0x00000200 | ||
89 | # define MAC_TXPACE 0x00000040 | ||
90 | # define MAC_MII 0x00000020 | ||
91 | # define MAC_TXFLOW 0x00000010 | ||
92 | # define MAC_RXFLOW 0x00000008 | ||
93 | # define MAC_MTEST 0x00000004 | ||
94 | # define MAC_LOOPBACK 0x00000002 | ||
95 | # define MAC_FDX 0x00000001 | ||
96 | #define CPMAC_MAC_STATUS 0x0164 | ||
97 | # define MAC_STATUS_QOS 0x00000004 | ||
98 | # define MAC_STATUS_RXFLOW 0x00000002 | ||
99 | # define MAC_STATUS_TXFLOW 0x00000001 | ||
100 | #define CPMAC_TX_INT_ENABLE 0x0178 | ||
101 | #define CPMAC_TX_INT_CLEAR 0x017c | ||
102 | #define CPMAC_MAC_INT_VECTOR 0x0180 | ||
103 | # define MAC_INT_STATUS 0x00080000 | ||
104 | # define MAC_INT_HOST 0x00040000 | ||
105 | # define MAC_INT_RX 0x00020000 | ||
106 | # define MAC_INT_TX 0x00010000 | ||
107 | #define CPMAC_MAC_EOI_VECTOR 0x0184 | ||
108 | #define CPMAC_RX_INT_ENABLE 0x0198 | ||
109 | #define CPMAC_RX_INT_CLEAR 0x019c | ||
110 | #define CPMAC_MAC_INT_ENABLE 0x01a8 | ||
111 | #define CPMAC_MAC_INT_CLEAR 0x01ac | ||
112 | #define CPMAC_MAC_ADDR_LO(channel) (0x01b0 + (channel) * 4) | ||
113 | #define CPMAC_MAC_ADDR_MID 0x01d0 | ||
114 | #define CPMAC_MAC_ADDR_HI 0x01d4 | ||
115 | #define CPMAC_MAC_HASH_LO 0x01d8 | ||
116 | #define CPMAC_MAC_HASH_HI 0x01dc | ||
117 | #define CPMAC_TX_PTR(channel) (0x0600 + (channel) * 4) | ||
118 | #define CPMAC_RX_PTR(channel) (0x0620 + (channel) * 4) | ||
119 | #define CPMAC_TX_ACK(channel) (0x0640 + (channel) * 4) | ||
120 | #define CPMAC_RX_ACK(channel) (0x0660 + (channel) * 4) | ||
121 | #define CPMAC_REG_END 0x0680 | ||
122 | /* | ||
123 | * Rx/Tx statistics | ||
124 | * TODO: use some of them to fill stats in cpmac_stats() | ||
125 | */ | ||
126 | #define CPMAC_STATS_RX_GOOD 0x0200 | ||
127 | #define CPMAC_STATS_RX_BCAST 0x0204 | ||
128 | #define CPMAC_STATS_RX_MCAST 0x0208 | ||
129 | #define CPMAC_STATS_RX_PAUSE 0x020c | ||
130 | #define CPMAC_STATS_RX_CRC 0x0210 | ||
131 | #define CPMAC_STATS_RX_ALIGN 0x0214 | ||
132 | #define CPMAC_STATS_RX_OVER 0x0218 | ||
133 | #define CPMAC_STATS_RX_JABBER 0x021c | ||
134 | #define CPMAC_STATS_RX_UNDER 0x0220 | ||
135 | #define CPMAC_STATS_RX_FRAG 0x0224 | ||
136 | #define CPMAC_STATS_RX_FILTER 0x0228 | ||
137 | #define CPMAC_STATS_RX_QOSFILTER 0x022c | ||
138 | #define CPMAC_STATS_RX_OCTETS 0x0230 | ||
139 | |||
140 | #define CPMAC_STATS_TX_GOOD 0x0234 | ||
141 | #define CPMAC_STATS_TX_BCAST 0x0238 | ||
142 | #define CPMAC_STATS_TX_MCAST 0x023c | ||
143 | #define CPMAC_STATS_TX_PAUSE 0x0240 | ||
144 | #define CPMAC_STATS_TX_DEFER 0x0244 | ||
145 | #define CPMAC_STATS_TX_COLLISION 0x0248 | ||
146 | #define CPMAC_STATS_TX_SINGLECOLL 0x024c | ||
147 | #define CPMAC_STATS_TX_MULTICOLL 0x0250 | ||
148 | #define CPMAC_STATS_TX_EXCESSCOLL 0x0254 | ||
149 | #define CPMAC_STATS_TX_LATECOLL 0x0258 | ||
150 | #define CPMAC_STATS_TX_UNDERRUN 0x025c | ||
151 | #define CPMAC_STATS_TX_CARRIERSENSE 0x0260 | ||
152 | #define CPMAC_STATS_TX_OCTETS 0x0264 | ||
153 | |||
154 | #define cpmac_read(base, reg) (readl((void __iomem *)(base) + (reg))) | ||
155 | #define cpmac_write(base, reg, val) (writel(val, (void __iomem *)(base) + \ | ||
156 | (reg))) | ||
157 | |||
158 | /* MDIO bus */ | ||
159 | #define CPMAC_MDIO_VERSION 0x0000 | ||
160 | #define CPMAC_MDIO_CONTROL 0x0004 | ||
161 | # define MDIOC_IDLE 0x80000000 | ||
162 | # define MDIOC_ENABLE 0x40000000 | ||
163 | # define MDIOC_PREAMBLE 0x00100000 | ||
164 | # define MDIOC_FAULT 0x00080000 | ||
165 | # define MDIOC_FAULTDETECT 0x00040000 | ||
166 | # define MDIOC_INTTEST 0x00020000 | ||
167 | # define MDIOC_CLKDIV(div) ((div) & 0xff) | ||
168 | #define CPMAC_MDIO_ALIVE 0x0008 | ||
169 | #define CPMAC_MDIO_LINK 0x000c | ||
170 | #define CPMAC_MDIO_ACCESS(channel) (0x0080 + (channel) * 8) | ||
171 | # define MDIO_BUSY 0x80000000 | ||
172 | # define MDIO_WRITE 0x40000000 | ||
173 | # define MDIO_REG(reg) (((reg) & 0x1f) << 21) | ||
174 | # define MDIO_PHY(phy) (((phy) & 0x1f) << 16) | ||
175 | # define MDIO_DATA(data) ((data) & 0xffff) | ||
176 | #define CPMAC_MDIO_PHYSEL(channel) (0x0084 + (channel) * 8) | ||
177 | # define PHYSEL_LINKSEL 0x00000040 | ||
178 | # define PHYSEL_LINKINT 0x00000020 | ||
179 | |||
180 | struct cpmac_desc { | ||
181 | u32 hw_next; | ||
182 | u32 hw_data; | ||
183 | u16 buflen; | ||
184 | u16 bufflags; | ||
185 | u16 datalen; | ||
186 | u16 dataflags; | ||
187 | #define CPMAC_SOP 0x8000 | ||
188 | #define CPMAC_EOP 0x4000 | ||
189 | #define CPMAC_OWN 0x2000 | ||
190 | #define CPMAC_EOQ 0x1000 | ||
191 | struct sk_buff *skb; | ||
192 | struct cpmac_desc *next; | ||
193 | struct cpmac_desc *prev; | ||
194 | dma_addr_t mapping; | ||
195 | dma_addr_t data_mapping; | ||
196 | }; | ||
197 | |||
198 | struct cpmac_priv { | ||
199 | spinlock_t lock; | ||
200 | spinlock_t rx_lock; | ||
201 | struct cpmac_desc *rx_head; | ||
202 | int ring_size; | ||
203 | struct cpmac_desc *desc_ring; | ||
204 | dma_addr_t dma_ring; | ||
205 | void __iomem *regs; | ||
206 | struct mii_bus *mii_bus; | ||
207 | struct phy_device *phy; | ||
208 | char phy_name[MII_BUS_ID_SIZE + 3]; | ||
209 | int oldlink, oldspeed, oldduplex; | ||
210 | u32 msg_enable; | ||
211 | struct net_device *dev; | ||
212 | struct work_struct reset_work; | ||
213 | struct platform_device *pdev; | ||
214 | struct napi_struct napi; | ||
215 | atomic_t reset_pending; | ||
216 | }; | ||
217 | |||
218 | static irqreturn_t cpmac_irq(int, void *); | ||
219 | static void cpmac_hw_start(struct net_device *dev); | ||
220 | static void cpmac_hw_stop(struct net_device *dev); | ||
221 | static int cpmac_stop(struct net_device *dev); | ||
222 | static int cpmac_open(struct net_device *dev); | ||
223 | |||
224 | static void cpmac_dump_regs(struct net_device *dev) | ||
225 | { | ||
226 | int i; | ||
227 | struct cpmac_priv *priv = netdev_priv(dev); | ||
228 | for (i = 0; i < CPMAC_REG_END; i += 4) { | ||
229 | if (i % 16 == 0) { | ||
230 | if (i) | ||
231 | pr_cont("\n"); | ||
232 | printk(KERN_DEBUG "%s: reg[%p]:", dev->name, | ||
233 | priv->regs + i); | ||
234 | } | ||
235 | printk(" %08x", cpmac_read(priv->regs, i)); | ||
236 | } | ||
237 | printk("\n"); | ||
238 | } | ||
239 | |||
240 | static void cpmac_dump_desc(struct net_device *dev, struct cpmac_desc *desc) | ||
241 | { | ||
242 | int i; | ||
243 | printk(KERN_DEBUG "%s: desc[%p]:", dev->name, desc); | ||
244 | for (i = 0; i < sizeof(*desc) / 4; i++) | ||
245 | printk(" %08x", ((u32 *)desc)[i]); | ||
246 | printk("\n"); | ||
247 | } | ||
248 | |||
249 | static void cpmac_dump_all_desc(struct net_device *dev) | ||
250 | { | ||
251 | struct cpmac_priv *priv = netdev_priv(dev); | ||
252 | struct cpmac_desc *dump = priv->rx_head; | ||
253 | do { | ||
254 | cpmac_dump_desc(dev, dump); | ||
255 | dump = dump->next; | ||
256 | } while (dump != priv->rx_head); | ||
257 | } | ||
258 | |||
259 | static void cpmac_dump_skb(struct net_device *dev, struct sk_buff *skb) | ||
260 | { | ||
261 | int i; | ||
262 | printk(KERN_DEBUG "%s: skb 0x%p, len=%d\n", dev->name, skb, skb->len); | ||
263 | for (i = 0; i < skb->len; i++) { | ||
264 | if (i % 16 == 0) { | ||
265 | if (i) | ||
266 | pr_cont("\n"); | ||
267 | printk(KERN_DEBUG "%s: data[%p]:", dev->name, | ||
268 | skb->data + i); | ||
269 | } | ||
270 | printk(" %02x", ((u8 *)skb->data)[i]); | ||
271 | } | ||
272 | printk("\n"); | ||
273 | } | ||
274 | |||
275 | static int cpmac_mdio_read(struct mii_bus *bus, int phy_id, int reg) | ||
276 | { | ||
277 | u32 val; | ||
278 | |||
279 | while (cpmac_read(bus->priv, CPMAC_MDIO_ACCESS(0)) & MDIO_BUSY) | ||
280 | cpu_relax(); | ||
281 | cpmac_write(bus->priv, CPMAC_MDIO_ACCESS(0), MDIO_BUSY | MDIO_REG(reg) | | ||
282 | MDIO_PHY(phy_id)); | ||
283 | while ((val = cpmac_read(bus->priv, CPMAC_MDIO_ACCESS(0))) & MDIO_BUSY) | ||
284 | cpu_relax(); | ||
285 | return MDIO_DATA(val); | ||
286 | } | ||
287 | |||
288 | static int cpmac_mdio_write(struct mii_bus *bus, int phy_id, | ||
289 | int reg, u16 val) | ||
290 | { | ||
291 | while (cpmac_read(bus->priv, CPMAC_MDIO_ACCESS(0)) & MDIO_BUSY) | ||
292 | cpu_relax(); | ||
293 | cpmac_write(bus->priv, CPMAC_MDIO_ACCESS(0), MDIO_BUSY | MDIO_WRITE | | ||
294 | MDIO_REG(reg) | MDIO_PHY(phy_id) | MDIO_DATA(val)); | ||
295 | return 0; | ||
296 | } | ||
297 | |||
298 | static int cpmac_mdio_reset(struct mii_bus *bus) | ||
299 | { | ||
300 | struct clk *cpmac_clk; | ||
301 | |||
302 | cpmac_clk = clk_get(&bus->dev, "cpmac"); | ||
303 | if (IS_ERR(cpmac_clk)) { | ||
304 | printk(KERN_ERR "unable to get cpmac clock\n"); | ||
305 | return -1; | ||
306 | } | ||
307 | ar7_device_reset(AR7_RESET_BIT_MDIO); | ||
308 | cpmac_write(bus->priv, CPMAC_MDIO_CONTROL, MDIOC_ENABLE | | ||
309 | MDIOC_CLKDIV(clk_get_rate(cpmac_clk) / 2200000 - 1)); | ||
310 | return 0; | ||
311 | } | ||
312 | |||
313 | static int mii_irqs[PHY_MAX_ADDR] = { PHY_POLL, }; | ||
314 | |||
315 | static struct mii_bus *cpmac_mii; | ||
316 | |||
317 | static int cpmac_config(struct net_device *dev, struct ifmap *map) | ||
318 | { | ||
319 | if (dev->flags & IFF_UP) | ||
320 | return -EBUSY; | ||
321 | |||
322 | /* Don't allow changing the I/O address */ | ||
323 | if (map->base_addr != dev->base_addr) | ||
324 | return -EOPNOTSUPP; | ||
325 | |||
326 | /* ignore other fields */ | ||
327 | return 0; | ||
328 | } | ||
329 | |||
330 | static void cpmac_set_multicast_list(struct net_device *dev) | ||
331 | { | ||
332 | struct netdev_hw_addr *ha; | ||
333 | u8 tmp; | ||
334 | u32 mbp, bit, hash[2] = { 0, }; | ||
335 | struct cpmac_priv *priv = netdev_priv(dev); | ||
336 | |||
337 | mbp = cpmac_read(priv->regs, CPMAC_MBP); | ||
338 | if (dev->flags & IFF_PROMISC) { | ||
339 | cpmac_write(priv->regs, CPMAC_MBP, (mbp & ~MBP_PROMISCCHAN(0)) | | ||
340 | MBP_RXPROMISC); | ||
341 | } else { | ||
342 | cpmac_write(priv->regs, CPMAC_MBP, mbp & ~MBP_RXPROMISC); | ||
343 | if (dev->flags & IFF_ALLMULTI) { | ||
344 | /* enable all multicast mode */ | ||
345 | cpmac_write(priv->regs, CPMAC_MAC_HASH_LO, 0xffffffff); | ||
346 | cpmac_write(priv->regs, CPMAC_MAC_HASH_HI, 0xffffffff); | ||
347 | } else { | ||
348 | /* | ||
349 | * cpmac uses some strange mac address hashing | ||
350 | * (not crc32) | ||
351 | */ | ||
352 | netdev_for_each_mc_addr(ha, dev) { | ||
353 | bit = 0; | ||
354 | tmp = ha->addr[0]; | ||
355 | bit ^= (tmp >> 2) ^ (tmp << 4); | ||
356 | tmp = ha->addr[1]; | ||
357 | bit ^= (tmp >> 4) ^ (tmp << 2); | ||
358 | tmp = ha->addr[2]; | ||
359 | bit ^= (tmp >> 6) ^ tmp; | ||
360 | tmp = ha->addr[3]; | ||
361 | bit ^= (tmp >> 2) ^ (tmp << 4); | ||
362 | tmp = ha->addr[4]; | ||
363 | bit ^= (tmp >> 4) ^ (tmp << 2); | ||
364 | tmp = ha->addr[5]; | ||
365 | bit ^= (tmp >> 6) ^ tmp; | ||
366 | bit &= 0x3f; | ||
367 | hash[bit / 32] |= 1 << (bit % 32); | ||
368 | } | ||
369 | |||
370 | cpmac_write(priv->regs, CPMAC_MAC_HASH_LO, hash[0]); | ||
371 | cpmac_write(priv->regs, CPMAC_MAC_HASH_HI, hash[1]); | ||
372 | } | ||
373 | } | ||
374 | } | ||
375 | |||
376 | static struct sk_buff *cpmac_rx_one(struct cpmac_priv *priv, | ||
377 | struct cpmac_desc *desc) | ||
378 | { | ||
379 | struct sk_buff *skb, *result = NULL; | ||
380 | |||
381 | if (unlikely(netif_msg_hw(priv))) | ||
382 | cpmac_dump_desc(priv->dev, desc); | ||
383 | cpmac_write(priv->regs, CPMAC_RX_ACK(0), (u32)desc->mapping); | ||
384 | if (unlikely(!desc->datalen)) { | ||
385 | if (netif_msg_rx_err(priv) && net_ratelimit()) | ||
386 | printk(KERN_WARNING "%s: rx: spurious interrupt\n", | ||
387 | priv->dev->name); | ||
388 | return NULL; | ||
389 | } | ||
390 | |||
391 | skb = netdev_alloc_skb_ip_align(priv->dev, CPMAC_SKB_SIZE); | ||
392 | if (likely(skb)) { | ||
393 | skb_put(desc->skb, desc->datalen); | ||
394 | desc->skb->protocol = eth_type_trans(desc->skb, priv->dev); | ||
395 | skb_checksum_none_assert(desc->skb); | ||
396 | priv->dev->stats.rx_packets++; | ||
397 | priv->dev->stats.rx_bytes += desc->datalen; | ||
398 | result = desc->skb; | ||
399 | dma_unmap_single(&priv->dev->dev, desc->data_mapping, | ||
400 | CPMAC_SKB_SIZE, DMA_FROM_DEVICE); | ||
401 | desc->skb = skb; | ||
402 | desc->data_mapping = dma_map_single(&priv->dev->dev, skb->data, | ||
403 | CPMAC_SKB_SIZE, | ||
404 | DMA_FROM_DEVICE); | ||
405 | desc->hw_data = (u32)desc->data_mapping; | ||
406 | if (unlikely(netif_msg_pktdata(priv))) { | ||
407 | printk(KERN_DEBUG "%s: received packet:\n", | ||
408 | priv->dev->name); | ||
409 | cpmac_dump_skb(priv->dev, result); | ||
410 | } | ||
411 | } else { | ||
412 | if (netif_msg_rx_err(priv) && net_ratelimit()) | ||
413 | printk(KERN_WARNING | ||
414 | "%s: low on skbs, dropping packet\n", | ||
415 | priv->dev->name); | ||
416 | priv->dev->stats.rx_dropped++; | ||
417 | } | ||
418 | |||
419 | desc->buflen = CPMAC_SKB_SIZE; | ||
420 | desc->dataflags = CPMAC_OWN; | ||
421 | |||
422 | return result; | ||
423 | } | ||
424 | |||
425 | static int cpmac_poll(struct napi_struct *napi, int budget) | ||
426 | { | ||
427 | struct sk_buff *skb; | ||
428 | struct cpmac_desc *desc, *restart; | ||
429 | struct cpmac_priv *priv = container_of(napi, struct cpmac_priv, napi); | ||
430 | int received = 0, processed = 0; | ||
431 | |||
432 | spin_lock(&priv->rx_lock); | ||
433 | if (unlikely(!priv->rx_head)) { | ||
434 | if (netif_msg_rx_err(priv) && net_ratelimit()) | ||
435 | printk(KERN_WARNING "%s: rx: polling, but no queue\n", | ||
436 | priv->dev->name); | ||
437 | spin_unlock(&priv->rx_lock); | ||
438 | napi_complete(napi); | ||
439 | return 0; | ||
440 | } | ||
441 | |||
442 | desc = priv->rx_head; | ||
443 | restart = NULL; | ||
444 | while (((desc->dataflags & CPMAC_OWN) == 0) && (received < budget)) { | ||
445 | processed++; | ||
446 | |||
447 | if ((desc->dataflags & CPMAC_EOQ) != 0) { | ||
448 | /* The last update to eoq->hw_next didn't happen | ||
449 | * soon enough, and the receiver stopped here. | ||
450 | *Remember this descriptor so we can restart | ||
451 | * the receiver after freeing some space. | ||
452 | */ | ||
453 | if (unlikely(restart)) { | ||
454 | if (netif_msg_rx_err(priv)) | ||
455 | printk(KERN_ERR "%s: poll found a" | ||
456 | " duplicate EOQ: %p and %p\n", | ||
457 | priv->dev->name, restart, desc); | ||
458 | goto fatal_error; | ||
459 | } | ||
460 | |||
461 | restart = desc->next; | ||
462 | } | ||
463 | |||
464 | skb = cpmac_rx_one(priv, desc); | ||
465 | if (likely(skb)) { | ||
466 | netif_receive_skb(skb); | ||
467 | received++; | ||
468 | } | ||
469 | desc = desc->next; | ||
470 | } | ||
471 | |||
472 | if (desc != priv->rx_head) { | ||
473 | /* We freed some buffers, but not the whole ring, | ||
474 | * add what we did free to the rx list */ | ||
475 | desc->prev->hw_next = (u32)0; | ||
476 | priv->rx_head->prev->hw_next = priv->rx_head->mapping; | ||
477 | } | ||
478 | |||
479 | /* Optimization: If we did not actually process an EOQ (perhaps because | ||
480 | * of quota limits), check to see if the tail of the queue has EOQ set. | ||
481 | * We should immediately restart in that case so that the receiver can | ||
482 | * restart and run in parallel with more packet processing. | ||
483 | * This lets us handle slightly larger bursts before running | ||
484 | * out of ring space (assuming dev->weight < ring_size) */ | ||
485 | |||
486 | if (!restart && | ||
487 | (priv->rx_head->prev->dataflags & (CPMAC_OWN|CPMAC_EOQ)) | ||
488 | == CPMAC_EOQ && | ||
489 | (priv->rx_head->dataflags & CPMAC_OWN) != 0) { | ||
490 | /* reset EOQ so the poll loop (above) doesn't try to | ||
491 | * restart this when it eventually gets to this descriptor. | ||
492 | */ | ||
493 | priv->rx_head->prev->dataflags &= ~CPMAC_EOQ; | ||
494 | restart = priv->rx_head; | ||
495 | } | ||
496 | |||
497 | if (restart) { | ||
498 | priv->dev->stats.rx_errors++; | ||
499 | priv->dev->stats.rx_fifo_errors++; | ||
500 | if (netif_msg_rx_err(priv) && net_ratelimit()) | ||
501 | printk(KERN_WARNING "%s: rx dma ring overrun\n", | ||
502 | priv->dev->name); | ||
503 | |||
504 | if (unlikely((restart->dataflags & CPMAC_OWN) == 0)) { | ||
505 | if (netif_msg_drv(priv)) | ||
506 | printk(KERN_ERR "%s: cpmac_poll is trying to " | ||
507 | "restart rx from a descriptor that's " | ||
508 | "not free: %p\n", | ||
509 | priv->dev->name, restart); | ||
510 | goto fatal_error; | ||
511 | } | ||
512 | |||
513 | cpmac_write(priv->regs, CPMAC_RX_PTR(0), restart->mapping); | ||
514 | } | ||
515 | |||
516 | priv->rx_head = desc; | ||
517 | spin_unlock(&priv->rx_lock); | ||
518 | if (unlikely(netif_msg_rx_status(priv))) | ||
519 | printk(KERN_DEBUG "%s: poll processed %d packets\n", | ||
520 | priv->dev->name, received); | ||
521 | if (processed == 0) { | ||
522 | /* we ran out of packets to read, | ||
523 | * revert to interrupt-driven mode */ | ||
524 | napi_complete(napi); | ||
525 | cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1); | ||
526 | return 0; | ||
527 | } | ||
528 | |||
529 | return 1; | ||
530 | |||
531 | fatal_error: | ||
532 | /* Something went horribly wrong. | ||
533 | * Reset hardware to try to recover rather than wedging. */ | ||
534 | |||
535 | if (netif_msg_drv(priv)) { | ||
536 | printk(KERN_ERR "%s: cpmac_poll is confused. " | ||
537 | "Resetting hardware\n", priv->dev->name); | ||
538 | cpmac_dump_all_desc(priv->dev); | ||
539 | printk(KERN_DEBUG "%s: RX_PTR(0)=0x%08x RX_ACK(0)=0x%08x\n", | ||
540 | priv->dev->name, | ||
541 | cpmac_read(priv->regs, CPMAC_RX_PTR(0)), | ||
542 | cpmac_read(priv->regs, CPMAC_RX_ACK(0))); | ||
543 | } | ||
544 | |||
545 | spin_unlock(&priv->rx_lock); | ||
546 | napi_complete(napi); | ||
547 | netif_tx_stop_all_queues(priv->dev); | ||
548 | napi_disable(&priv->napi); | ||
549 | |||
550 | atomic_inc(&priv->reset_pending); | ||
551 | cpmac_hw_stop(priv->dev); | ||
552 | if (!schedule_work(&priv->reset_work)) | ||
553 | atomic_dec(&priv->reset_pending); | ||
554 | return 0; | ||
555 | |||
556 | } | ||
557 | |||
558 | static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev) | ||
559 | { | ||
560 | int queue, len; | ||
561 | struct cpmac_desc *desc; | ||
562 | struct cpmac_priv *priv = netdev_priv(dev); | ||
563 | |||
564 | if (unlikely(atomic_read(&priv->reset_pending))) | ||
565 | return NETDEV_TX_BUSY; | ||
566 | |||
567 | if (unlikely(skb_padto(skb, ETH_ZLEN))) | ||
568 | return NETDEV_TX_OK; | ||
569 | |||
570 | len = max(skb->len, ETH_ZLEN); | ||
571 | queue = skb_get_queue_mapping(skb); | ||
572 | netif_stop_subqueue(dev, queue); | ||
573 | |||
574 | desc = &priv->desc_ring[queue]; | ||
575 | if (unlikely(desc->dataflags & CPMAC_OWN)) { | ||
576 | if (netif_msg_tx_err(priv) && net_ratelimit()) | ||
577 | printk(KERN_WARNING "%s: tx dma ring full\n", | ||
578 | dev->name); | ||
579 | return NETDEV_TX_BUSY; | ||
580 | } | ||
581 | |||
582 | spin_lock(&priv->lock); | ||
583 | spin_unlock(&priv->lock); | ||
584 | desc->dataflags = CPMAC_SOP | CPMAC_EOP | CPMAC_OWN; | ||
585 | desc->skb = skb; | ||
586 | desc->data_mapping = dma_map_single(&dev->dev, skb->data, len, | ||
587 | DMA_TO_DEVICE); | ||
588 | desc->hw_data = (u32)desc->data_mapping; | ||
589 | desc->datalen = len; | ||
590 | desc->buflen = len; | ||
591 | if (unlikely(netif_msg_tx_queued(priv))) | ||
592 | printk(KERN_DEBUG "%s: sending 0x%p, len=%d\n", dev->name, skb, | ||
593 | skb->len); | ||
594 | if (unlikely(netif_msg_hw(priv))) | ||
595 | cpmac_dump_desc(dev, desc); | ||
596 | if (unlikely(netif_msg_pktdata(priv))) | ||
597 | cpmac_dump_skb(dev, skb); | ||
598 | cpmac_write(priv->regs, CPMAC_TX_PTR(queue), (u32)desc->mapping); | ||
599 | |||
600 | return NETDEV_TX_OK; | ||
601 | } | ||
602 | |||
603 | static void cpmac_end_xmit(struct net_device *dev, int queue) | ||
604 | { | ||
605 | struct cpmac_desc *desc; | ||
606 | struct cpmac_priv *priv = netdev_priv(dev); | ||
607 | |||
608 | desc = &priv->desc_ring[queue]; | ||
609 | cpmac_write(priv->regs, CPMAC_TX_ACK(queue), (u32)desc->mapping); | ||
610 | if (likely(desc->skb)) { | ||
611 | spin_lock(&priv->lock); | ||
612 | dev->stats.tx_packets++; | ||
613 | dev->stats.tx_bytes += desc->skb->len; | ||
614 | spin_unlock(&priv->lock); | ||
615 | dma_unmap_single(&dev->dev, desc->data_mapping, desc->skb->len, | ||
616 | DMA_TO_DEVICE); | ||
617 | |||
618 | if (unlikely(netif_msg_tx_done(priv))) | ||
619 | printk(KERN_DEBUG "%s: sent 0x%p, len=%d\n", dev->name, | ||
620 | desc->skb, desc->skb->len); | ||
621 | |||
622 | dev_kfree_skb_irq(desc->skb); | ||
623 | desc->skb = NULL; | ||
624 | if (__netif_subqueue_stopped(dev, queue)) | ||
625 | netif_wake_subqueue(dev, queue); | ||
626 | } else { | ||
627 | if (netif_msg_tx_err(priv) && net_ratelimit()) | ||
628 | printk(KERN_WARNING | ||
629 | "%s: end_xmit: spurious interrupt\n", dev->name); | ||
630 | if (__netif_subqueue_stopped(dev, queue)) | ||
631 | netif_wake_subqueue(dev, queue); | ||
632 | } | ||
633 | } | ||
634 | |||
635 | static void cpmac_hw_stop(struct net_device *dev) | ||
636 | { | ||
637 | int i; | ||
638 | struct cpmac_priv *priv = netdev_priv(dev); | ||
639 | struct plat_cpmac_data *pdata = priv->pdev->dev.platform_data; | ||
640 | |||
641 | ar7_device_reset(pdata->reset_bit); | ||
642 | cpmac_write(priv->regs, CPMAC_RX_CONTROL, | ||
643 | cpmac_read(priv->regs, CPMAC_RX_CONTROL) & ~1); | ||
644 | cpmac_write(priv->regs, CPMAC_TX_CONTROL, | ||
645 | cpmac_read(priv->regs, CPMAC_TX_CONTROL) & ~1); | ||
646 | for (i = 0; i < 8; i++) { | ||
647 | cpmac_write(priv->regs, CPMAC_TX_PTR(i), 0); | ||
648 | cpmac_write(priv->regs, CPMAC_RX_PTR(i), 0); | ||
649 | } | ||
650 | cpmac_write(priv->regs, CPMAC_UNICAST_CLEAR, 0xff); | ||
651 | cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 0xff); | ||
652 | cpmac_write(priv->regs, CPMAC_TX_INT_CLEAR, 0xff); | ||
653 | cpmac_write(priv->regs, CPMAC_MAC_INT_CLEAR, 0xff); | ||
654 | cpmac_write(priv->regs, CPMAC_MAC_CONTROL, | ||
655 | cpmac_read(priv->regs, CPMAC_MAC_CONTROL) & ~MAC_MII); | ||
656 | } | ||
657 | |||
658 | static void cpmac_hw_start(struct net_device *dev) | ||
659 | { | ||
660 | int i; | ||
661 | struct cpmac_priv *priv = netdev_priv(dev); | ||
662 | struct plat_cpmac_data *pdata = priv->pdev->dev.platform_data; | ||
663 | |||
664 | ar7_device_reset(pdata->reset_bit); | ||
665 | for (i = 0; i < 8; i++) { | ||
666 | cpmac_write(priv->regs, CPMAC_TX_PTR(i), 0); | ||
667 | cpmac_write(priv->regs, CPMAC_RX_PTR(i), 0); | ||
668 | } | ||
669 | cpmac_write(priv->regs, CPMAC_RX_PTR(0), priv->rx_head->mapping); | ||
670 | |||
671 | cpmac_write(priv->regs, CPMAC_MBP, MBP_RXSHORT | MBP_RXBCAST | | ||
672 | MBP_RXMCAST); | ||
673 | cpmac_write(priv->regs, CPMAC_BUFFER_OFFSET, 0); | ||
674 | for (i = 0; i < 8; i++) | ||
675 | cpmac_write(priv->regs, CPMAC_MAC_ADDR_LO(i), dev->dev_addr[5]); | ||
676 | cpmac_write(priv->regs, CPMAC_MAC_ADDR_MID, dev->dev_addr[4]); | ||
677 | cpmac_write(priv->regs, CPMAC_MAC_ADDR_HI, dev->dev_addr[0] | | ||
678 | (dev->dev_addr[1] << 8) | (dev->dev_addr[2] << 16) | | ||
679 | (dev->dev_addr[3] << 24)); | ||
680 | cpmac_write(priv->regs, CPMAC_MAX_LENGTH, CPMAC_SKB_SIZE); | ||
681 | cpmac_write(priv->regs, CPMAC_UNICAST_CLEAR, 0xff); | ||
682 | cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 0xff); | ||
683 | cpmac_write(priv->regs, CPMAC_TX_INT_CLEAR, 0xff); | ||
684 | cpmac_write(priv->regs, CPMAC_MAC_INT_CLEAR, 0xff); | ||
685 | cpmac_write(priv->regs, CPMAC_UNICAST_ENABLE, 1); | ||
686 | cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1); | ||
687 | cpmac_write(priv->regs, CPMAC_TX_INT_ENABLE, 0xff); | ||
688 | cpmac_write(priv->regs, CPMAC_MAC_INT_ENABLE, 3); | ||
689 | |||
690 | cpmac_write(priv->regs, CPMAC_RX_CONTROL, | ||
691 | cpmac_read(priv->regs, CPMAC_RX_CONTROL) | 1); | ||
692 | cpmac_write(priv->regs, CPMAC_TX_CONTROL, | ||
693 | cpmac_read(priv->regs, CPMAC_TX_CONTROL) | 1); | ||
694 | cpmac_write(priv->regs, CPMAC_MAC_CONTROL, | ||
695 | cpmac_read(priv->regs, CPMAC_MAC_CONTROL) | MAC_MII | | ||
696 | MAC_FDX); | ||
697 | } | ||
698 | |||
699 | static void cpmac_clear_rx(struct net_device *dev) | ||
700 | { | ||
701 | struct cpmac_priv *priv = netdev_priv(dev); | ||
702 | struct cpmac_desc *desc; | ||
703 | int i; | ||
704 | if (unlikely(!priv->rx_head)) | ||
705 | return; | ||
706 | desc = priv->rx_head; | ||
707 | for (i = 0; i < priv->ring_size; i++) { | ||
708 | if ((desc->dataflags & CPMAC_OWN) == 0) { | ||
709 | if (netif_msg_rx_err(priv) && net_ratelimit()) | ||
710 | printk(KERN_WARNING "%s: packet dropped\n", | ||
711 | dev->name); | ||
712 | if (unlikely(netif_msg_hw(priv))) | ||
713 | cpmac_dump_desc(dev, desc); | ||
714 | desc->dataflags = CPMAC_OWN; | ||
715 | dev->stats.rx_dropped++; | ||
716 | } | ||
717 | desc->hw_next = desc->next->mapping; | ||
718 | desc = desc->next; | ||
719 | } | ||
720 | priv->rx_head->prev->hw_next = 0; | ||
721 | } | ||
722 | |||
723 | static void cpmac_clear_tx(struct net_device *dev) | ||
724 | { | ||
725 | struct cpmac_priv *priv = netdev_priv(dev); | ||
726 | int i; | ||
727 | if (unlikely(!priv->desc_ring)) | ||
728 | return; | ||
729 | for (i = 0; i < CPMAC_QUEUES; i++) { | ||
730 | priv->desc_ring[i].dataflags = 0; | ||
731 | if (priv->desc_ring[i].skb) { | ||
732 | dev_kfree_skb_any(priv->desc_ring[i].skb); | ||
733 | priv->desc_ring[i].skb = NULL; | ||
734 | } | ||
735 | } | ||
736 | } | ||
737 | |||
738 | static void cpmac_hw_error(struct work_struct *work) | ||
739 | { | ||
740 | struct cpmac_priv *priv = | ||
741 | container_of(work, struct cpmac_priv, reset_work); | ||
742 | |||
743 | spin_lock(&priv->rx_lock); | ||
744 | cpmac_clear_rx(priv->dev); | ||
745 | spin_unlock(&priv->rx_lock); | ||
746 | cpmac_clear_tx(priv->dev); | ||
747 | cpmac_hw_start(priv->dev); | ||
748 | barrier(); | ||
749 | atomic_dec(&priv->reset_pending); | ||
750 | |||
751 | netif_tx_wake_all_queues(priv->dev); | ||
752 | cpmac_write(priv->regs, CPMAC_MAC_INT_ENABLE, 3); | ||
753 | } | ||
754 | |||
755 | static void cpmac_check_status(struct net_device *dev) | ||
756 | { | ||
757 | struct cpmac_priv *priv = netdev_priv(dev); | ||
758 | |||
759 | u32 macstatus = cpmac_read(priv->regs, CPMAC_MAC_STATUS); | ||
760 | int rx_channel = (macstatus >> 8) & 7; | ||
761 | int rx_code = (macstatus >> 12) & 15; | ||
762 | int tx_channel = (macstatus >> 16) & 7; | ||
763 | int tx_code = (macstatus >> 20) & 15; | ||
764 | |||
765 | if (rx_code || tx_code) { | ||
766 | if (netif_msg_drv(priv) && net_ratelimit()) { | ||
767 | /* Can't find any documentation on what these | ||
768 | *error codes actually are. So just log them and hope.. | ||
769 | */ | ||
770 | if (rx_code) | ||
771 | printk(KERN_WARNING "%s: host error %d on rx " | ||
772 | "channel %d (macstatus %08x), resetting\n", | ||
773 | dev->name, rx_code, rx_channel, macstatus); | ||
774 | if (tx_code) | ||
775 | printk(KERN_WARNING "%s: host error %d on tx " | ||
776 | "channel %d (macstatus %08x), resetting\n", | ||
777 | dev->name, tx_code, tx_channel, macstatus); | ||
778 | } | ||
779 | |||
780 | netif_tx_stop_all_queues(dev); | ||
781 | cpmac_hw_stop(dev); | ||
782 | if (schedule_work(&priv->reset_work)) | ||
783 | atomic_inc(&priv->reset_pending); | ||
784 | if (unlikely(netif_msg_hw(priv))) | ||
785 | cpmac_dump_regs(dev); | ||
786 | } | ||
787 | cpmac_write(priv->regs, CPMAC_MAC_INT_CLEAR, 0xff); | ||
788 | } | ||
789 | |||
790 | static irqreturn_t cpmac_irq(int irq, void *dev_id) | ||
791 | { | ||
792 | struct net_device *dev = dev_id; | ||
793 | struct cpmac_priv *priv; | ||
794 | int queue; | ||
795 | u32 status; | ||
796 | |||
797 | priv = netdev_priv(dev); | ||
798 | |||
799 | status = cpmac_read(priv->regs, CPMAC_MAC_INT_VECTOR); | ||
800 | |||
801 | if (unlikely(netif_msg_intr(priv))) | ||
802 | printk(KERN_DEBUG "%s: interrupt status: 0x%08x\n", dev->name, | ||
803 | status); | ||
804 | |||
805 | if (status & MAC_INT_TX) | ||
806 | cpmac_end_xmit(dev, (status & 7)); | ||
807 | |||
808 | if (status & MAC_INT_RX) { | ||
809 | queue = (status >> 8) & 7; | ||
810 | if (napi_schedule_prep(&priv->napi)) { | ||
811 | cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 1 << queue); | ||
812 | __napi_schedule(&priv->napi); | ||
813 | } | ||
814 | } | ||
815 | |||
816 | cpmac_write(priv->regs, CPMAC_MAC_EOI_VECTOR, 0); | ||
817 | |||
818 | if (unlikely(status & (MAC_INT_HOST | MAC_INT_STATUS))) | ||
819 | cpmac_check_status(dev); | ||
820 | |||
821 | return IRQ_HANDLED; | ||
822 | } | ||
823 | |||
824 | static void cpmac_tx_timeout(struct net_device *dev) | ||
825 | { | ||
826 | struct cpmac_priv *priv = netdev_priv(dev); | ||
827 | |||
828 | spin_lock(&priv->lock); | ||
829 | dev->stats.tx_errors++; | ||
830 | spin_unlock(&priv->lock); | ||
831 | if (netif_msg_tx_err(priv) && net_ratelimit()) | ||
832 | printk(KERN_WARNING "%s: transmit timeout\n", dev->name); | ||
833 | |||
834 | atomic_inc(&priv->reset_pending); | ||
835 | barrier(); | ||
836 | cpmac_clear_tx(dev); | ||
837 | barrier(); | ||
838 | atomic_dec(&priv->reset_pending); | ||
839 | |||
840 | netif_tx_wake_all_queues(priv->dev); | ||
841 | } | ||
842 | |||
843 | static int cpmac_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | ||
844 | { | ||
845 | struct cpmac_priv *priv = netdev_priv(dev); | ||
846 | if (!(netif_running(dev))) | ||
847 | return -EINVAL; | ||
848 | if (!priv->phy) | ||
849 | return -EINVAL; | ||
850 | |||
851 | return phy_mii_ioctl(priv->phy, ifr, cmd); | ||
852 | } | ||
853 | |||
854 | static int cpmac_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | ||
855 | { | ||
856 | struct cpmac_priv *priv = netdev_priv(dev); | ||
857 | |||
858 | if (priv->phy) | ||
859 | return phy_ethtool_gset(priv->phy, cmd); | ||
860 | |||
861 | return -EINVAL; | ||
862 | } | ||
863 | |||
864 | static int cpmac_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | ||
865 | { | ||
866 | struct cpmac_priv *priv = netdev_priv(dev); | ||
867 | |||
868 | if (!capable(CAP_NET_ADMIN)) | ||
869 | return -EPERM; | ||
870 | |||
871 | if (priv->phy) | ||
872 | return phy_ethtool_sset(priv->phy, cmd); | ||
873 | |||
874 | return -EINVAL; | ||
875 | } | ||
876 | |||
877 | static void cpmac_get_ringparam(struct net_device *dev, | ||
878 | struct ethtool_ringparam *ring) | ||
879 | { | ||
880 | struct cpmac_priv *priv = netdev_priv(dev); | ||
881 | |||
882 | ring->rx_max_pending = 1024; | ||
883 | ring->rx_mini_max_pending = 1; | ||
884 | ring->rx_jumbo_max_pending = 1; | ||
885 | ring->tx_max_pending = 1; | ||
886 | |||
887 | ring->rx_pending = priv->ring_size; | ||
888 | ring->rx_mini_pending = 1; | ||
889 | ring->rx_jumbo_pending = 1; | ||
890 | ring->tx_pending = 1; | ||
891 | } | ||
892 | |||
893 | static int cpmac_set_ringparam(struct net_device *dev, | ||
894 | struct ethtool_ringparam *ring) | ||
895 | { | ||
896 | struct cpmac_priv *priv = netdev_priv(dev); | ||
897 | |||
898 | if (netif_running(dev)) | ||
899 | return -EBUSY; | ||
900 | priv->ring_size = ring->rx_pending; | ||
901 | return 0; | ||
902 | } | ||
903 | |||
904 | static void cpmac_get_drvinfo(struct net_device *dev, | ||
905 | struct ethtool_drvinfo *info) | ||
906 | { | ||
907 | strcpy(info->driver, "cpmac"); | ||
908 | strcpy(info->version, CPMAC_VERSION); | ||
909 | info->fw_version[0] = '\0'; | ||
910 | sprintf(info->bus_info, "%s", "cpmac"); | ||
911 | info->regdump_len = 0; | ||
912 | } | ||
913 | |||
914 | static const struct ethtool_ops cpmac_ethtool_ops = { | ||
915 | .get_settings = cpmac_get_settings, | ||
916 | .set_settings = cpmac_set_settings, | ||
917 | .get_drvinfo = cpmac_get_drvinfo, | ||
918 | .get_link = ethtool_op_get_link, | ||
919 | .get_ringparam = cpmac_get_ringparam, | ||
920 | .set_ringparam = cpmac_set_ringparam, | ||
921 | }; | ||
922 | |||
923 | static void cpmac_adjust_link(struct net_device *dev) | ||
924 | { | ||
925 | struct cpmac_priv *priv = netdev_priv(dev); | ||
926 | int new_state = 0; | ||
927 | |||
928 | spin_lock(&priv->lock); | ||
929 | if (priv->phy->link) { | ||
930 | netif_tx_start_all_queues(dev); | ||
931 | if (priv->phy->duplex != priv->oldduplex) { | ||
932 | new_state = 1; | ||
933 | priv->oldduplex = priv->phy->duplex; | ||
934 | } | ||
935 | |||
936 | if (priv->phy->speed != priv->oldspeed) { | ||
937 | new_state = 1; | ||
938 | priv->oldspeed = priv->phy->speed; | ||
939 | } | ||
940 | |||
941 | if (!priv->oldlink) { | ||
942 | new_state = 1; | ||
943 | priv->oldlink = 1; | ||
944 | } | ||
945 | } else if (priv->oldlink) { | ||
946 | new_state = 1; | ||
947 | priv->oldlink = 0; | ||
948 | priv->oldspeed = 0; | ||
949 | priv->oldduplex = -1; | ||
950 | } | ||
951 | |||
952 | if (new_state && netif_msg_link(priv) && net_ratelimit()) | ||
953 | phy_print_status(priv->phy); | ||
954 | |||
955 | spin_unlock(&priv->lock); | ||
956 | } | ||
957 | |||
958 | static int cpmac_open(struct net_device *dev) | ||
959 | { | ||
960 | int i, size, res; | ||
961 | struct cpmac_priv *priv = netdev_priv(dev); | ||
962 | struct resource *mem; | ||
963 | struct cpmac_desc *desc; | ||
964 | struct sk_buff *skb; | ||
965 | |||
966 | mem = platform_get_resource_byname(priv->pdev, IORESOURCE_MEM, "regs"); | ||
967 | if (!request_mem_region(mem->start, resource_size(mem), dev->name)) { | ||
968 | if (netif_msg_drv(priv)) | ||
969 | printk(KERN_ERR "%s: failed to request registers\n", | ||
970 | dev->name); | ||
971 | res = -ENXIO; | ||
972 | goto fail_reserve; | ||
973 | } | ||
974 | |||
975 | priv->regs = ioremap(mem->start, resource_size(mem)); | ||
976 | if (!priv->regs) { | ||
977 | if (netif_msg_drv(priv)) | ||
978 | printk(KERN_ERR "%s: failed to remap registers\n", | ||
979 | dev->name); | ||
980 | res = -ENXIO; | ||
981 | goto fail_remap; | ||
982 | } | ||
983 | |||
984 | size = priv->ring_size + CPMAC_QUEUES; | ||
985 | priv->desc_ring = dma_alloc_coherent(&dev->dev, | ||
986 | sizeof(struct cpmac_desc) * size, | ||
987 | &priv->dma_ring, | ||
988 | GFP_KERNEL); | ||
989 | if (!priv->desc_ring) { | ||
990 | res = -ENOMEM; | ||
991 | goto fail_alloc; | ||
992 | } | ||
993 | |||
994 | for (i = 0; i < size; i++) | ||
995 | priv->desc_ring[i].mapping = priv->dma_ring + sizeof(*desc) * i; | ||
996 | |||
997 | priv->rx_head = &priv->desc_ring[CPMAC_QUEUES]; | ||
998 | for (i = 0, desc = priv->rx_head; i < priv->ring_size; i++, desc++) { | ||
999 | skb = netdev_alloc_skb_ip_align(dev, CPMAC_SKB_SIZE); | ||
1000 | if (unlikely(!skb)) { | ||
1001 | res = -ENOMEM; | ||
1002 | goto fail_desc; | ||
1003 | } | ||
1004 | desc->skb = skb; | ||
1005 | desc->data_mapping = dma_map_single(&dev->dev, skb->data, | ||
1006 | CPMAC_SKB_SIZE, | ||
1007 | DMA_FROM_DEVICE); | ||
1008 | desc->hw_data = (u32)desc->data_mapping; | ||
1009 | desc->buflen = CPMAC_SKB_SIZE; | ||
1010 | desc->dataflags = CPMAC_OWN; | ||
1011 | desc->next = &priv->rx_head[(i + 1) % priv->ring_size]; | ||
1012 | desc->next->prev = desc; | ||
1013 | desc->hw_next = (u32)desc->next->mapping; | ||
1014 | } | ||
1015 | |||
1016 | priv->rx_head->prev->hw_next = (u32)0; | ||
1017 | |||
1018 | res = request_irq(dev->irq, cpmac_irq, IRQF_SHARED, dev->name, dev); | ||
1019 | if (res) { | ||
1020 | if (netif_msg_drv(priv)) | ||
1021 | printk(KERN_ERR "%s: failed to obtain irq\n", | ||
1022 | dev->name); | ||
1023 | goto fail_irq; | ||
1024 | } | ||
1025 | |||
1026 | atomic_set(&priv->reset_pending, 0); | ||
1027 | INIT_WORK(&priv->reset_work, cpmac_hw_error); | ||
1028 | cpmac_hw_start(dev); | ||
1029 | |||
1030 | napi_enable(&priv->napi); | ||
1031 | priv->phy->state = PHY_CHANGELINK; | ||
1032 | phy_start(priv->phy); | ||
1033 | |||
1034 | return 0; | ||
1035 | |||
1036 | fail_irq: | ||
1037 | fail_desc: | ||
1038 | for (i = 0; i < priv->ring_size; i++) { | ||
1039 | if (priv->rx_head[i].skb) { | ||
1040 | dma_unmap_single(&dev->dev, | ||
1041 | priv->rx_head[i].data_mapping, | ||
1042 | CPMAC_SKB_SIZE, | ||
1043 | DMA_FROM_DEVICE); | ||
1044 | kfree_skb(priv->rx_head[i].skb); | ||
1045 | } | ||
1046 | } | ||
1047 | fail_alloc: | ||
1048 | kfree(priv->desc_ring); | ||
1049 | iounmap(priv->regs); | ||
1050 | |||
1051 | fail_remap: | ||
1052 | release_mem_region(mem->start, resource_size(mem)); | ||
1053 | |||
1054 | fail_reserve: | ||
1055 | return res; | ||
1056 | } | ||
1057 | |||
1058 | static int cpmac_stop(struct net_device *dev) | ||
1059 | { | ||
1060 | int i; | ||
1061 | struct cpmac_priv *priv = netdev_priv(dev); | ||
1062 | struct resource *mem; | ||
1063 | |||
1064 | netif_tx_stop_all_queues(dev); | ||
1065 | |||
1066 | cancel_work_sync(&priv->reset_work); | ||
1067 | napi_disable(&priv->napi); | ||
1068 | phy_stop(priv->phy); | ||
1069 | |||
1070 | cpmac_hw_stop(dev); | ||
1071 | |||
1072 | for (i = 0; i < 8; i++) | ||
1073 | cpmac_write(priv->regs, CPMAC_TX_PTR(i), 0); | ||
1074 | cpmac_write(priv->regs, CPMAC_RX_PTR(0), 0); | ||
1075 | cpmac_write(priv->regs, CPMAC_MBP, 0); | ||
1076 | |||
1077 | free_irq(dev->irq, dev); | ||
1078 | iounmap(priv->regs); | ||
1079 | mem = platform_get_resource_byname(priv->pdev, IORESOURCE_MEM, "regs"); | ||
1080 | release_mem_region(mem->start, resource_size(mem)); | ||
1081 | priv->rx_head = &priv->desc_ring[CPMAC_QUEUES]; | ||
1082 | for (i = 0; i < priv->ring_size; i++) { | ||
1083 | if (priv->rx_head[i].skb) { | ||
1084 | dma_unmap_single(&dev->dev, | ||
1085 | priv->rx_head[i].data_mapping, | ||
1086 | CPMAC_SKB_SIZE, | ||
1087 | DMA_FROM_DEVICE); | ||
1088 | kfree_skb(priv->rx_head[i].skb); | ||
1089 | } | ||
1090 | } | ||
1091 | |||
1092 | dma_free_coherent(&dev->dev, sizeof(struct cpmac_desc) * | ||
1093 | (CPMAC_QUEUES + priv->ring_size), | ||
1094 | priv->desc_ring, priv->dma_ring); | ||
1095 | return 0; | ||
1096 | } | ||
1097 | |||
1098 | static const struct net_device_ops cpmac_netdev_ops = { | ||
1099 | .ndo_open = cpmac_open, | ||
1100 | .ndo_stop = cpmac_stop, | ||
1101 | .ndo_start_xmit = cpmac_start_xmit, | ||
1102 | .ndo_tx_timeout = cpmac_tx_timeout, | ||
1103 | .ndo_set_multicast_list = cpmac_set_multicast_list, | ||
1104 | .ndo_do_ioctl = cpmac_ioctl, | ||
1105 | .ndo_set_config = cpmac_config, | ||
1106 | .ndo_change_mtu = eth_change_mtu, | ||
1107 | .ndo_validate_addr = eth_validate_addr, | ||
1108 | .ndo_set_mac_address = eth_mac_addr, | ||
1109 | }; | ||
1110 | |||
1111 | static int external_switch; | ||
1112 | |||
1113 | static int __devinit cpmac_probe(struct platform_device *pdev) | ||
1114 | { | ||
1115 | int rc, phy_id; | ||
1116 | char mdio_bus_id[MII_BUS_ID_SIZE]; | ||
1117 | struct resource *mem; | ||
1118 | struct cpmac_priv *priv; | ||
1119 | struct net_device *dev; | ||
1120 | struct plat_cpmac_data *pdata; | ||
1121 | |||
1122 | pdata = pdev->dev.platform_data; | ||
1123 | |||
1124 | if (external_switch || dumb_switch) { | ||
1125 | strncpy(mdio_bus_id, "0", MII_BUS_ID_SIZE); /* fixed phys bus */ | ||
1126 | phy_id = pdev->id; | ||
1127 | } else { | ||
1128 | for (phy_id = 0; phy_id < PHY_MAX_ADDR; phy_id++) { | ||
1129 | if (!(pdata->phy_mask & (1 << phy_id))) | ||
1130 | continue; | ||
1131 | if (!cpmac_mii->phy_map[phy_id]) | ||
1132 | continue; | ||
1133 | strncpy(mdio_bus_id, cpmac_mii->id, MII_BUS_ID_SIZE); | ||
1134 | break; | ||
1135 | } | ||
1136 | } | ||
1137 | |||
1138 | if (phy_id == PHY_MAX_ADDR) { | ||
1139 | dev_err(&pdev->dev, "no PHY present, falling back " | ||
1140 | "to switch on MDIO bus 0\n"); | ||
1141 | strncpy(mdio_bus_id, "0", MII_BUS_ID_SIZE); /* fixed phys bus */ | ||
1142 | phy_id = pdev->id; | ||
1143 | } | ||
1144 | |||
1145 | dev = alloc_etherdev_mq(sizeof(*priv), CPMAC_QUEUES); | ||
1146 | |||
1147 | if (!dev) { | ||
1148 | printk(KERN_ERR "cpmac: Unable to allocate net_device\n"); | ||
1149 | return -ENOMEM; | ||
1150 | } | ||
1151 | |||
1152 | platform_set_drvdata(pdev, dev); | ||
1153 | priv = netdev_priv(dev); | ||
1154 | |||
1155 | priv->pdev = pdev; | ||
1156 | mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs"); | ||
1157 | if (!mem) { | ||
1158 | rc = -ENODEV; | ||
1159 | goto fail; | ||
1160 | } | ||
1161 | |||
1162 | dev->irq = platform_get_irq_byname(pdev, "irq"); | ||
1163 | |||
1164 | dev->netdev_ops = &cpmac_netdev_ops; | ||
1165 | dev->ethtool_ops = &cpmac_ethtool_ops; | ||
1166 | |||
1167 | netif_napi_add(dev, &priv->napi, cpmac_poll, 64); | ||
1168 | |||
1169 | spin_lock_init(&priv->lock); | ||
1170 | spin_lock_init(&priv->rx_lock); | ||
1171 | priv->dev = dev; | ||
1172 | priv->ring_size = 64; | ||
1173 | priv->msg_enable = netif_msg_init(debug_level, 0xff); | ||
1174 | memcpy(dev->dev_addr, pdata->dev_addr, sizeof(pdata->dev_addr)); | ||
1175 | |||
1176 | snprintf(priv->phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT, | ||
1177 | mdio_bus_id, phy_id); | ||
1178 | |||
1179 | priv->phy = phy_connect(dev, priv->phy_name, cpmac_adjust_link, 0, | ||
1180 | PHY_INTERFACE_MODE_MII); | ||
1181 | |||
1182 | if (IS_ERR(priv->phy)) { | ||
1183 | if (netif_msg_drv(priv)) | ||
1184 | printk(KERN_ERR "%s: Could not attach to PHY\n", | ||
1185 | dev->name); | ||
1186 | rc = PTR_ERR(priv->phy); | ||
1187 | goto fail; | ||
1188 | } | ||
1189 | |||
1190 | rc = register_netdev(dev); | ||
1191 | if (rc) { | ||
1192 | printk(KERN_ERR "cpmac: error %i registering device %s\n", rc, | ||
1193 | dev->name); | ||
1194 | goto fail; | ||
1195 | } | ||
1196 | |||
1197 | if (netif_msg_probe(priv)) { | ||
1198 | printk(KERN_INFO | ||
1199 | "cpmac: device %s (regs: %p, irq: %d, phy: %s, " | ||
1200 | "mac: %pM)\n", dev->name, (void *)mem->start, dev->irq, | ||
1201 | priv->phy_name, dev->dev_addr); | ||
1202 | } | ||
1203 | return 0; | ||
1204 | |||
1205 | fail: | ||
1206 | free_netdev(dev); | ||
1207 | return rc; | ||
1208 | } | ||
1209 | |||
1210 | static int __devexit cpmac_remove(struct platform_device *pdev) | ||
1211 | { | ||
1212 | struct net_device *dev = platform_get_drvdata(pdev); | ||
1213 | unregister_netdev(dev); | ||
1214 | free_netdev(dev); | ||
1215 | return 0; | ||
1216 | } | ||
1217 | |||
1218 | static struct platform_driver cpmac_driver = { | ||
1219 | .driver.name = "cpmac", | ||
1220 | .driver.owner = THIS_MODULE, | ||
1221 | .probe = cpmac_probe, | ||
1222 | .remove = __devexit_p(cpmac_remove), | ||
1223 | }; | ||
1224 | |||
1225 | int __devinit cpmac_init(void) | ||
1226 | { | ||
1227 | u32 mask; | ||
1228 | int i, res; | ||
1229 | |||
1230 | cpmac_mii = mdiobus_alloc(); | ||
1231 | if (cpmac_mii == NULL) | ||
1232 | return -ENOMEM; | ||
1233 | |||
1234 | cpmac_mii->name = "cpmac-mii"; | ||
1235 | cpmac_mii->read = cpmac_mdio_read; | ||
1236 | cpmac_mii->write = cpmac_mdio_write; | ||
1237 | cpmac_mii->reset = cpmac_mdio_reset; | ||
1238 | cpmac_mii->irq = mii_irqs; | ||
1239 | |||
1240 | cpmac_mii->priv = ioremap(AR7_REGS_MDIO, 256); | ||
1241 | |||
1242 | if (!cpmac_mii->priv) { | ||
1243 | printk(KERN_ERR "Can't ioremap mdio registers\n"); | ||
1244 | res = -ENXIO; | ||
1245 | goto fail_alloc; | ||
1246 | } | ||
1247 | |||
1248 | #warning FIXME: unhardcode gpio&reset bits | ||
1249 | ar7_gpio_disable(26); | ||
1250 | ar7_gpio_disable(27); | ||
1251 | ar7_device_reset(AR7_RESET_BIT_CPMAC_LO); | ||
1252 | ar7_device_reset(AR7_RESET_BIT_CPMAC_HI); | ||
1253 | ar7_device_reset(AR7_RESET_BIT_EPHY); | ||
1254 | |||
1255 | cpmac_mii->reset(cpmac_mii); | ||
1256 | |||
1257 | for (i = 0; i < 300; i++) { | ||
1258 | mask = cpmac_read(cpmac_mii->priv, CPMAC_MDIO_ALIVE); | ||
1259 | if (mask) | ||
1260 | break; | ||
1261 | else | ||
1262 | msleep(10); | ||
1263 | } | ||
1264 | |||
1265 | mask &= 0x7fffffff; | ||
1266 | if (mask & (mask - 1)) { | ||
1267 | external_switch = 1; | ||
1268 | mask = 0; | ||
1269 | } | ||
1270 | |||
1271 | cpmac_mii->phy_mask = ~(mask | 0x80000000); | ||
1272 | snprintf(cpmac_mii->id, MII_BUS_ID_SIZE, "1"); | ||
1273 | |||
1274 | res = mdiobus_register(cpmac_mii); | ||
1275 | if (res) | ||
1276 | goto fail_mii; | ||
1277 | |||
1278 | res = platform_driver_register(&cpmac_driver); | ||
1279 | if (res) | ||
1280 | goto fail_cpmac; | ||
1281 | |||
1282 | return 0; | ||
1283 | |||
1284 | fail_cpmac: | ||
1285 | mdiobus_unregister(cpmac_mii); | ||
1286 | |||
1287 | fail_mii: | ||
1288 | iounmap(cpmac_mii->priv); | ||
1289 | |||
1290 | fail_alloc: | ||
1291 | mdiobus_free(cpmac_mii); | ||
1292 | |||
1293 | return res; | ||
1294 | } | ||
1295 | |||
1296 | void __devexit cpmac_exit(void) | ||
1297 | { | ||
1298 | platform_driver_unregister(&cpmac_driver); | ||
1299 | mdiobus_unregister(cpmac_mii); | ||
1300 | iounmap(cpmac_mii->priv); | ||
1301 | mdiobus_free(cpmac_mii); | ||
1302 | } | ||
1303 | |||
1304 | module_init(cpmac_init); | ||
1305 | module_exit(cpmac_exit); | ||
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c new file mode 100644 index 000000000000..dca9d3369cdd --- /dev/null +++ b/drivers/net/ethernet/ti/davinci_cpdma.c | |||
@@ -0,0 +1,970 @@ | |||
1 | /* | ||
2 | * Texas Instruments CPDMA Driver | ||
3 | * | ||
4 | * Copyright (C) 2010 Texas Instruments | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License as | ||
8 | * published by the Free Software Foundation version 2. | ||
9 | * | ||
10 | * This program is distributed "as is" WITHOUT ANY WARRANTY of any | ||
11 | * kind, whether express or implied; without even the implied warranty | ||
12 | * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | */ | ||
15 | #include <linux/kernel.h> | ||
16 | #include <linux/spinlock.h> | ||
17 | #include <linux/device.h> | ||
18 | #include <linux/slab.h> | ||
19 | #include <linux/err.h> | ||
20 | #include <linux/dma-mapping.h> | ||
21 | #include <linux/io.h> | ||
22 | |||
23 | #include "davinci_cpdma.h" | ||
24 | |||
25 | /* DMA Registers */ | ||
26 | #define CPDMA_TXIDVER 0x00 | ||
27 | #define CPDMA_TXCONTROL 0x04 | ||
28 | #define CPDMA_TXTEARDOWN 0x08 | ||
29 | #define CPDMA_RXIDVER 0x10 | ||
30 | #define CPDMA_RXCONTROL 0x14 | ||
31 | #define CPDMA_SOFTRESET 0x1c | ||
32 | #define CPDMA_RXTEARDOWN 0x18 | ||
33 | #define CPDMA_TXINTSTATRAW 0x80 | ||
34 | #define CPDMA_TXINTSTATMASKED 0x84 | ||
35 | #define CPDMA_TXINTMASKSET 0x88 | ||
36 | #define CPDMA_TXINTMASKCLEAR 0x8c | ||
37 | #define CPDMA_MACINVECTOR 0x90 | ||
38 | #define CPDMA_MACEOIVECTOR 0x94 | ||
39 | #define CPDMA_RXINTSTATRAW 0xa0 | ||
40 | #define CPDMA_RXINTSTATMASKED 0xa4 | ||
41 | #define CPDMA_RXINTMASKSET 0xa8 | ||
42 | #define CPDMA_RXINTMASKCLEAR 0xac | ||
43 | #define CPDMA_DMAINTSTATRAW 0xb0 | ||
44 | #define CPDMA_DMAINTSTATMASKED 0xb4 | ||
45 | #define CPDMA_DMAINTMASKSET 0xb8 | ||
46 | #define CPDMA_DMAINTMASKCLEAR 0xbc | ||
47 | #define CPDMA_DMAINT_HOSTERR BIT(1) | ||
48 | |||
49 | /* the following exist only if has_ext_regs is set */ | ||
50 | #define CPDMA_DMACONTROL 0x20 | ||
51 | #define CPDMA_DMASTATUS 0x24 | ||
52 | #define CPDMA_RXBUFFOFS 0x28 | ||
53 | #define CPDMA_EM_CONTROL 0x2c | ||
54 | |||
55 | /* Descriptor mode bits */ | ||
56 | #define CPDMA_DESC_SOP BIT(31) | ||
57 | #define CPDMA_DESC_EOP BIT(30) | ||
58 | #define CPDMA_DESC_OWNER BIT(29) | ||
59 | #define CPDMA_DESC_EOQ BIT(28) | ||
60 | #define CPDMA_DESC_TD_COMPLETE BIT(27) | ||
61 | #define CPDMA_DESC_PASS_CRC BIT(26) | ||
62 | |||
63 | #define CPDMA_TEARDOWN_VALUE 0xfffffffc | ||
64 | |||
65 | struct cpdma_desc { | ||
66 | /* hardware fields */ | ||
67 | u32 hw_next; | ||
68 | u32 hw_buffer; | ||
69 | u32 hw_len; | ||
70 | u32 hw_mode; | ||
71 | /* software fields */ | ||
72 | void *sw_token; | ||
73 | u32 sw_buffer; | ||
74 | u32 sw_len; | ||
75 | }; | ||
76 | |||
77 | struct cpdma_desc_pool { | ||
78 | u32 phys; | ||
79 | u32 hw_addr; | ||
80 | void __iomem *iomap; /* ioremap map */ | ||
81 | void *cpumap; /* dma_alloc map */ | ||
82 | int desc_size, mem_size; | ||
83 | int num_desc, used_desc; | ||
84 | unsigned long *bitmap; | ||
85 | struct device *dev; | ||
86 | spinlock_t lock; | ||
87 | }; | ||
88 | |||
89 | enum cpdma_state { | ||
90 | CPDMA_STATE_IDLE, | ||
91 | CPDMA_STATE_ACTIVE, | ||
92 | CPDMA_STATE_TEARDOWN, | ||
93 | }; | ||
94 | |||
95 | const char *cpdma_state_str[] = { "idle", "active", "teardown" }; | ||
96 | |||
97 | struct cpdma_ctlr { | ||
98 | enum cpdma_state state; | ||
99 | struct cpdma_params params; | ||
100 | struct device *dev; | ||
101 | struct cpdma_desc_pool *pool; | ||
102 | spinlock_t lock; | ||
103 | struct cpdma_chan *channels[2 * CPDMA_MAX_CHANNELS]; | ||
104 | }; | ||
105 | |||
106 | struct cpdma_chan { | ||
107 | enum cpdma_state state; | ||
108 | struct cpdma_ctlr *ctlr; | ||
109 | int chan_num; | ||
110 | spinlock_t lock; | ||
111 | struct cpdma_desc __iomem *head, *tail; | ||
112 | int count; | ||
113 | void __iomem *hdp, *cp, *rxfree; | ||
114 | u32 mask; | ||
115 | cpdma_handler_fn handler; | ||
116 | enum dma_data_direction dir; | ||
117 | struct cpdma_chan_stats stats; | ||
118 | /* offsets into dmaregs */ | ||
119 | int int_set, int_clear, td; | ||
120 | }; | ||
121 | |||
122 | /* The following make access to common cpdma_ctlr params more readable */ | ||
123 | #define dmaregs params.dmaregs | ||
124 | #define num_chan params.num_chan | ||
125 | |||
126 | /* various accessors */ | ||
127 | #define dma_reg_read(ctlr, ofs) __raw_readl((ctlr)->dmaregs + (ofs)) | ||
128 | #define chan_read(chan, fld) __raw_readl((chan)->fld) | ||
129 | #define desc_read(desc, fld) __raw_readl(&(desc)->fld) | ||
130 | #define dma_reg_write(ctlr, ofs, v) __raw_writel(v, (ctlr)->dmaregs + (ofs)) | ||
131 | #define chan_write(chan, fld, v) __raw_writel(v, (chan)->fld) | ||
132 | #define desc_write(desc, fld, v) __raw_writel((u32)(v), &(desc)->fld) | ||
133 | |||
134 | /* | ||
135 | * Utility constructs for a cpdma descriptor pool. Some devices (e.g. davinci | ||
136 | * emac) have dedicated on-chip memory for these descriptors. Some other | ||
137 | * devices (e.g. cpsw switches) use plain old memory. Descriptor pools | ||
138 | * abstract out these details | ||
139 | */ | ||
140 | static struct cpdma_desc_pool * | ||
141 | cpdma_desc_pool_create(struct device *dev, u32 phys, u32 hw_addr, | ||
142 | int size, int align) | ||
143 | { | ||
144 | int bitmap_size; | ||
145 | struct cpdma_desc_pool *pool; | ||
146 | |||
147 | pool = kzalloc(sizeof(*pool), GFP_KERNEL); | ||
148 | if (!pool) | ||
149 | return NULL; | ||
150 | |||
151 | spin_lock_init(&pool->lock); | ||
152 | |||
153 | pool->dev = dev; | ||
154 | pool->mem_size = size; | ||
155 | pool->desc_size = ALIGN(sizeof(struct cpdma_desc), align); | ||
156 | pool->num_desc = size / pool->desc_size; | ||
157 | |||
158 | bitmap_size = (pool->num_desc / BITS_PER_LONG) * sizeof(long); | ||
159 | pool->bitmap = kzalloc(bitmap_size, GFP_KERNEL); | ||
160 | if (!pool->bitmap) | ||
161 | goto fail; | ||
162 | |||
163 | if (phys) { | ||
164 | pool->phys = phys; | ||
165 | pool->iomap = ioremap(phys, size); | ||
166 | pool->hw_addr = hw_addr; | ||
167 | } else { | ||
168 | pool->cpumap = dma_alloc_coherent(dev, size, &pool->phys, | ||
169 | GFP_KERNEL); | ||
170 | pool->iomap = pool->cpumap; | ||
171 | pool->hw_addr = pool->phys; | ||
172 | } | ||
173 | |||
174 | if (pool->iomap) | ||
175 | return pool; | ||
176 | |||
177 | fail: | ||
178 | kfree(pool->bitmap); | ||
179 | kfree(pool); | ||
180 | return NULL; | ||
181 | } | ||
182 | |||
183 | static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool) | ||
184 | { | ||
185 | unsigned long flags; | ||
186 | |||
187 | if (!pool) | ||
188 | return; | ||
189 | |||
190 | spin_lock_irqsave(&pool->lock, flags); | ||
191 | WARN_ON(pool->used_desc); | ||
192 | kfree(pool->bitmap); | ||
193 | if (pool->cpumap) { | ||
194 | dma_free_coherent(pool->dev, pool->mem_size, pool->cpumap, | ||
195 | pool->phys); | ||
196 | } else { | ||
197 | iounmap(pool->iomap); | ||
198 | } | ||
199 | spin_unlock_irqrestore(&pool->lock, flags); | ||
200 | kfree(pool); | ||
201 | } | ||
202 | |||
203 | static inline dma_addr_t desc_phys(struct cpdma_desc_pool *pool, | ||
204 | struct cpdma_desc __iomem *desc) | ||
205 | { | ||
206 | if (!desc) | ||
207 | return 0; | ||
208 | return pool->hw_addr + (__force dma_addr_t)desc - | ||
209 | (__force dma_addr_t)pool->iomap; | ||
210 | } | ||
211 | |||
212 | static inline struct cpdma_desc __iomem * | ||
213 | desc_from_phys(struct cpdma_desc_pool *pool, dma_addr_t dma) | ||
214 | { | ||
215 | return dma ? pool->iomap + dma - pool->hw_addr : NULL; | ||
216 | } | ||
217 | |||
218 | static struct cpdma_desc __iomem * | ||
219 | cpdma_desc_alloc(struct cpdma_desc_pool *pool, int num_desc) | ||
220 | { | ||
221 | unsigned long flags; | ||
222 | int index; | ||
223 | struct cpdma_desc __iomem *desc = NULL; | ||
224 | |||
225 | spin_lock_irqsave(&pool->lock, flags); | ||
226 | |||
227 | index = bitmap_find_next_zero_area(pool->bitmap, pool->num_desc, 0, | ||
228 | num_desc, 0); | ||
229 | if (index < pool->num_desc) { | ||
230 | bitmap_set(pool->bitmap, index, num_desc); | ||
231 | desc = pool->iomap + pool->desc_size * index; | ||
232 | pool->used_desc++; | ||
233 | } | ||
234 | |||
235 | spin_unlock_irqrestore(&pool->lock, flags); | ||
236 | return desc; | ||
237 | } | ||
238 | |||
239 | static void cpdma_desc_free(struct cpdma_desc_pool *pool, | ||
240 | struct cpdma_desc __iomem *desc, int num_desc) | ||
241 | { | ||
242 | unsigned long flags, index; | ||
243 | |||
244 | index = ((unsigned long)desc - (unsigned long)pool->iomap) / | ||
245 | pool->desc_size; | ||
246 | spin_lock_irqsave(&pool->lock, flags); | ||
247 | bitmap_clear(pool->bitmap, index, num_desc); | ||
248 | pool->used_desc--; | ||
249 | spin_unlock_irqrestore(&pool->lock, flags); | ||
250 | } | ||
251 | |||
252 | struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params) | ||
253 | { | ||
254 | struct cpdma_ctlr *ctlr; | ||
255 | |||
256 | ctlr = kzalloc(sizeof(*ctlr), GFP_KERNEL); | ||
257 | if (!ctlr) | ||
258 | return NULL; | ||
259 | |||
260 | ctlr->state = CPDMA_STATE_IDLE; | ||
261 | ctlr->params = *params; | ||
262 | ctlr->dev = params->dev; | ||
263 | spin_lock_init(&ctlr->lock); | ||
264 | |||
265 | ctlr->pool = cpdma_desc_pool_create(ctlr->dev, | ||
266 | ctlr->params.desc_mem_phys, | ||
267 | ctlr->params.desc_hw_addr, | ||
268 | ctlr->params.desc_mem_size, | ||
269 | ctlr->params.desc_align); | ||
270 | if (!ctlr->pool) { | ||
271 | kfree(ctlr); | ||
272 | return NULL; | ||
273 | } | ||
274 | |||
275 | if (WARN_ON(ctlr->num_chan > CPDMA_MAX_CHANNELS)) | ||
276 | ctlr->num_chan = CPDMA_MAX_CHANNELS; | ||
277 | return ctlr; | ||
278 | } | ||
279 | |||
280 | int cpdma_ctlr_start(struct cpdma_ctlr *ctlr) | ||
281 | { | ||
282 | unsigned long flags; | ||
283 | int i; | ||
284 | |||
285 | spin_lock_irqsave(&ctlr->lock, flags); | ||
286 | if (ctlr->state != CPDMA_STATE_IDLE) { | ||
287 | spin_unlock_irqrestore(&ctlr->lock, flags); | ||
288 | return -EBUSY; | ||
289 | } | ||
290 | |||
291 | if (ctlr->params.has_soft_reset) { | ||
292 | unsigned long timeout = jiffies + HZ/10; | ||
293 | |||
294 | dma_reg_write(ctlr, CPDMA_SOFTRESET, 1); | ||
295 | while (time_before(jiffies, timeout)) { | ||
296 | if (dma_reg_read(ctlr, CPDMA_SOFTRESET) == 0) | ||
297 | break; | ||
298 | } | ||
299 | WARN_ON(!time_before(jiffies, timeout)); | ||
300 | } | ||
301 | |||
302 | for (i = 0; i < ctlr->num_chan; i++) { | ||
303 | __raw_writel(0, ctlr->params.txhdp + 4 * i); | ||
304 | __raw_writel(0, ctlr->params.rxhdp + 4 * i); | ||
305 | __raw_writel(0, ctlr->params.txcp + 4 * i); | ||
306 | __raw_writel(0, ctlr->params.rxcp + 4 * i); | ||
307 | } | ||
308 | |||
309 | dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff); | ||
310 | dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff); | ||
311 | |||
312 | dma_reg_write(ctlr, CPDMA_TXCONTROL, 1); | ||
313 | dma_reg_write(ctlr, CPDMA_RXCONTROL, 1); | ||
314 | |||
315 | ctlr->state = CPDMA_STATE_ACTIVE; | ||
316 | |||
317 | for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) { | ||
318 | if (ctlr->channels[i]) | ||
319 | cpdma_chan_start(ctlr->channels[i]); | ||
320 | } | ||
321 | spin_unlock_irqrestore(&ctlr->lock, flags); | ||
322 | return 0; | ||
323 | } | ||
324 | |||
325 | int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr) | ||
326 | { | ||
327 | unsigned long flags; | ||
328 | int i; | ||
329 | |||
330 | spin_lock_irqsave(&ctlr->lock, flags); | ||
331 | if (ctlr->state != CPDMA_STATE_ACTIVE) { | ||
332 | spin_unlock_irqrestore(&ctlr->lock, flags); | ||
333 | return -EINVAL; | ||
334 | } | ||
335 | |||
336 | ctlr->state = CPDMA_STATE_TEARDOWN; | ||
337 | |||
338 | for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) { | ||
339 | if (ctlr->channels[i]) | ||
340 | cpdma_chan_stop(ctlr->channels[i]); | ||
341 | } | ||
342 | |||
343 | dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff); | ||
344 | dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff); | ||
345 | |||
346 | dma_reg_write(ctlr, CPDMA_TXCONTROL, 0); | ||
347 | dma_reg_write(ctlr, CPDMA_RXCONTROL, 0); | ||
348 | |||
349 | ctlr->state = CPDMA_STATE_IDLE; | ||
350 | |||
351 | spin_unlock_irqrestore(&ctlr->lock, flags); | ||
352 | return 0; | ||
353 | } | ||
354 | |||
355 | int cpdma_ctlr_dump(struct cpdma_ctlr *ctlr) | ||
356 | { | ||
357 | struct device *dev = ctlr->dev; | ||
358 | unsigned long flags; | ||
359 | int i; | ||
360 | |||
361 | spin_lock_irqsave(&ctlr->lock, flags); | ||
362 | |||
363 | dev_info(dev, "CPDMA: state: %s", cpdma_state_str[ctlr->state]); | ||
364 | |||
365 | dev_info(dev, "CPDMA: txidver: %x", | ||
366 | dma_reg_read(ctlr, CPDMA_TXIDVER)); | ||
367 | dev_info(dev, "CPDMA: txcontrol: %x", | ||
368 | dma_reg_read(ctlr, CPDMA_TXCONTROL)); | ||
369 | dev_info(dev, "CPDMA: txteardown: %x", | ||
370 | dma_reg_read(ctlr, CPDMA_TXTEARDOWN)); | ||
371 | dev_info(dev, "CPDMA: rxidver: %x", | ||
372 | dma_reg_read(ctlr, CPDMA_RXIDVER)); | ||
373 | dev_info(dev, "CPDMA: rxcontrol: %x", | ||
374 | dma_reg_read(ctlr, CPDMA_RXCONTROL)); | ||
375 | dev_info(dev, "CPDMA: softreset: %x", | ||
376 | dma_reg_read(ctlr, CPDMA_SOFTRESET)); | ||
377 | dev_info(dev, "CPDMA: rxteardown: %x", | ||
378 | dma_reg_read(ctlr, CPDMA_RXTEARDOWN)); | ||
379 | dev_info(dev, "CPDMA: txintstatraw: %x", | ||
380 | dma_reg_read(ctlr, CPDMA_TXINTSTATRAW)); | ||
381 | dev_info(dev, "CPDMA: txintstatmasked: %x", | ||
382 | dma_reg_read(ctlr, CPDMA_TXINTSTATMASKED)); | ||
383 | dev_info(dev, "CPDMA: txintmaskset: %x", | ||
384 | dma_reg_read(ctlr, CPDMA_TXINTMASKSET)); | ||
385 | dev_info(dev, "CPDMA: txintmaskclear: %x", | ||
386 | dma_reg_read(ctlr, CPDMA_TXINTMASKCLEAR)); | ||
387 | dev_info(dev, "CPDMA: macinvector: %x", | ||
388 | dma_reg_read(ctlr, CPDMA_MACINVECTOR)); | ||
389 | dev_info(dev, "CPDMA: maceoivector: %x", | ||
390 | dma_reg_read(ctlr, CPDMA_MACEOIVECTOR)); | ||
391 | dev_info(dev, "CPDMA: rxintstatraw: %x", | ||
392 | dma_reg_read(ctlr, CPDMA_RXINTSTATRAW)); | ||
393 | dev_info(dev, "CPDMA: rxintstatmasked: %x", | ||
394 | dma_reg_read(ctlr, CPDMA_RXINTSTATMASKED)); | ||
395 | dev_info(dev, "CPDMA: rxintmaskset: %x", | ||
396 | dma_reg_read(ctlr, CPDMA_RXINTMASKSET)); | ||
397 | dev_info(dev, "CPDMA: rxintmaskclear: %x", | ||
398 | dma_reg_read(ctlr, CPDMA_RXINTMASKCLEAR)); | ||
399 | dev_info(dev, "CPDMA: dmaintstatraw: %x", | ||
400 | dma_reg_read(ctlr, CPDMA_DMAINTSTATRAW)); | ||
401 | dev_info(dev, "CPDMA: dmaintstatmasked: %x", | ||
402 | dma_reg_read(ctlr, CPDMA_DMAINTSTATMASKED)); | ||
403 | dev_info(dev, "CPDMA: dmaintmaskset: %x", | ||
404 | dma_reg_read(ctlr, CPDMA_DMAINTMASKSET)); | ||
405 | dev_info(dev, "CPDMA: dmaintmaskclear: %x", | ||
406 | dma_reg_read(ctlr, CPDMA_DMAINTMASKCLEAR)); | ||
407 | |||
408 | if (!ctlr->params.has_ext_regs) { | ||
409 | dev_info(dev, "CPDMA: dmacontrol: %x", | ||
410 | dma_reg_read(ctlr, CPDMA_DMACONTROL)); | ||
411 | dev_info(dev, "CPDMA: dmastatus: %x", | ||
412 | dma_reg_read(ctlr, CPDMA_DMASTATUS)); | ||
413 | dev_info(dev, "CPDMA: rxbuffofs: %x", | ||
414 | dma_reg_read(ctlr, CPDMA_RXBUFFOFS)); | ||
415 | } | ||
416 | |||
417 | for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) | ||
418 | if (ctlr->channels[i]) | ||
419 | cpdma_chan_dump(ctlr->channels[i]); | ||
420 | |||
421 | spin_unlock_irqrestore(&ctlr->lock, flags); | ||
422 | return 0; | ||
423 | } | ||
424 | |||
425 | int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr) | ||
426 | { | ||
427 | unsigned long flags; | ||
428 | int ret = 0, i; | ||
429 | |||
430 | if (!ctlr) | ||
431 | return -EINVAL; | ||
432 | |||
433 | spin_lock_irqsave(&ctlr->lock, flags); | ||
434 | if (ctlr->state != CPDMA_STATE_IDLE) | ||
435 | cpdma_ctlr_stop(ctlr); | ||
436 | |||
437 | for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) { | ||
438 | if (ctlr->channels[i]) | ||
439 | cpdma_chan_destroy(ctlr->channels[i]); | ||
440 | } | ||
441 | |||
442 | cpdma_desc_pool_destroy(ctlr->pool); | ||
443 | spin_unlock_irqrestore(&ctlr->lock, flags); | ||
444 | kfree(ctlr); | ||
445 | return ret; | ||
446 | } | ||
447 | |||
448 | int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable) | ||
449 | { | ||
450 | unsigned long flags; | ||
451 | int i, reg; | ||
452 | |||
453 | spin_lock_irqsave(&ctlr->lock, flags); | ||
454 | if (ctlr->state != CPDMA_STATE_ACTIVE) { | ||
455 | spin_unlock_irqrestore(&ctlr->lock, flags); | ||
456 | return -EINVAL; | ||
457 | } | ||
458 | |||
459 | reg = enable ? CPDMA_DMAINTMASKSET : CPDMA_DMAINTMASKCLEAR; | ||
460 | dma_reg_write(ctlr, reg, CPDMA_DMAINT_HOSTERR); | ||
461 | |||
462 | for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) { | ||
463 | if (ctlr->channels[i]) | ||
464 | cpdma_chan_int_ctrl(ctlr->channels[i], enable); | ||
465 | } | ||
466 | |||
467 | spin_unlock_irqrestore(&ctlr->lock, flags); | ||
468 | return 0; | ||
469 | } | ||
470 | |||
471 | void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr) | ||
472 | { | ||
473 | dma_reg_write(ctlr, CPDMA_MACEOIVECTOR, 0); | ||
474 | } | ||
475 | |||
476 | struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num, | ||
477 | cpdma_handler_fn handler) | ||
478 | { | ||
479 | struct cpdma_chan *chan; | ||
480 | int ret, offset = (chan_num % CPDMA_MAX_CHANNELS) * 4; | ||
481 | unsigned long flags; | ||
482 | |||
483 | if (__chan_linear(chan_num) >= ctlr->num_chan) | ||
484 | return NULL; | ||
485 | |||
486 | ret = -ENOMEM; | ||
487 | chan = kzalloc(sizeof(*chan), GFP_KERNEL); | ||
488 | if (!chan) | ||
489 | goto err_chan_alloc; | ||
490 | |||
491 | spin_lock_irqsave(&ctlr->lock, flags); | ||
492 | ret = -EBUSY; | ||
493 | if (ctlr->channels[chan_num]) | ||
494 | goto err_chan_busy; | ||
495 | |||
496 | chan->ctlr = ctlr; | ||
497 | chan->state = CPDMA_STATE_IDLE; | ||
498 | chan->chan_num = chan_num; | ||
499 | chan->handler = handler; | ||
500 | |||
501 | if (is_rx_chan(chan)) { | ||
502 | chan->hdp = ctlr->params.rxhdp + offset; | ||
503 | chan->cp = ctlr->params.rxcp + offset; | ||
504 | chan->rxfree = ctlr->params.rxfree + offset; | ||
505 | chan->int_set = CPDMA_RXINTMASKSET; | ||
506 | chan->int_clear = CPDMA_RXINTMASKCLEAR; | ||
507 | chan->td = CPDMA_RXTEARDOWN; | ||
508 | chan->dir = DMA_FROM_DEVICE; | ||
509 | } else { | ||
510 | chan->hdp = ctlr->params.txhdp + offset; | ||
511 | chan->cp = ctlr->params.txcp + offset; | ||
512 | chan->int_set = CPDMA_TXINTMASKSET; | ||
513 | chan->int_clear = CPDMA_TXINTMASKCLEAR; | ||
514 | chan->td = CPDMA_TXTEARDOWN; | ||
515 | chan->dir = DMA_TO_DEVICE; | ||
516 | } | ||
517 | chan->mask = BIT(chan_linear(chan)); | ||
518 | |||
519 | spin_lock_init(&chan->lock); | ||
520 | |||
521 | ctlr->channels[chan_num] = chan; | ||
522 | spin_unlock_irqrestore(&ctlr->lock, flags); | ||
523 | return chan; | ||
524 | |||
525 | err_chan_busy: | ||
526 | spin_unlock_irqrestore(&ctlr->lock, flags); | ||
527 | kfree(chan); | ||
528 | err_chan_alloc: | ||
529 | return ERR_PTR(ret); | ||
530 | } | ||
531 | |||
532 | int cpdma_chan_destroy(struct cpdma_chan *chan) | ||
533 | { | ||
534 | struct cpdma_ctlr *ctlr = chan->ctlr; | ||
535 | unsigned long flags; | ||
536 | |||
537 | if (!chan) | ||
538 | return -EINVAL; | ||
539 | |||
540 | spin_lock_irqsave(&ctlr->lock, flags); | ||
541 | if (chan->state != CPDMA_STATE_IDLE) | ||
542 | cpdma_chan_stop(chan); | ||
543 | ctlr->channels[chan->chan_num] = NULL; | ||
544 | spin_unlock_irqrestore(&ctlr->lock, flags); | ||
545 | kfree(chan); | ||
546 | return 0; | ||
547 | } | ||
548 | |||
549 | int cpdma_chan_get_stats(struct cpdma_chan *chan, | ||
550 | struct cpdma_chan_stats *stats) | ||
551 | { | ||
552 | unsigned long flags; | ||
553 | if (!chan) | ||
554 | return -EINVAL; | ||
555 | spin_lock_irqsave(&chan->lock, flags); | ||
556 | memcpy(stats, &chan->stats, sizeof(*stats)); | ||
557 | spin_unlock_irqrestore(&chan->lock, flags); | ||
558 | return 0; | ||
559 | } | ||
560 | |||
561 | int cpdma_chan_dump(struct cpdma_chan *chan) | ||
562 | { | ||
563 | unsigned long flags; | ||
564 | struct device *dev = chan->ctlr->dev; | ||
565 | |||
566 | spin_lock_irqsave(&chan->lock, flags); | ||
567 | |||
568 | dev_info(dev, "channel %d (%s %d) state %s", | ||
569 | chan->chan_num, is_rx_chan(chan) ? "rx" : "tx", | ||
570 | chan_linear(chan), cpdma_state_str[chan->state]); | ||
571 | dev_info(dev, "\thdp: %x\n", chan_read(chan, hdp)); | ||
572 | dev_info(dev, "\tcp: %x\n", chan_read(chan, cp)); | ||
573 | if (chan->rxfree) { | ||
574 | dev_info(dev, "\trxfree: %x\n", | ||
575 | chan_read(chan, rxfree)); | ||
576 | } | ||
577 | |||
578 | dev_info(dev, "\tstats head_enqueue: %d\n", | ||
579 | chan->stats.head_enqueue); | ||
580 | dev_info(dev, "\tstats tail_enqueue: %d\n", | ||
581 | chan->stats.tail_enqueue); | ||
582 | dev_info(dev, "\tstats pad_enqueue: %d\n", | ||
583 | chan->stats.pad_enqueue); | ||
584 | dev_info(dev, "\tstats misqueued: %d\n", | ||
585 | chan->stats.misqueued); | ||
586 | dev_info(dev, "\tstats desc_alloc_fail: %d\n", | ||
587 | chan->stats.desc_alloc_fail); | ||
588 | dev_info(dev, "\tstats pad_alloc_fail: %d\n", | ||
589 | chan->stats.pad_alloc_fail); | ||
590 | dev_info(dev, "\tstats runt_receive_buff: %d\n", | ||
591 | chan->stats.runt_receive_buff); | ||
592 | dev_info(dev, "\tstats runt_transmit_buff: %d\n", | ||
593 | chan->stats.runt_transmit_buff); | ||
594 | dev_info(dev, "\tstats empty_dequeue: %d\n", | ||
595 | chan->stats.empty_dequeue); | ||
596 | dev_info(dev, "\tstats busy_dequeue: %d\n", | ||
597 | chan->stats.busy_dequeue); | ||
598 | dev_info(dev, "\tstats good_dequeue: %d\n", | ||
599 | chan->stats.good_dequeue); | ||
600 | dev_info(dev, "\tstats requeue: %d\n", | ||
601 | chan->stats.requeue); | ||
602 | dev_info(dev, "\tstats teardown_dequeue: %d\n", | ||
603 | chan->stats.teardown_dequeue); | ||
604 | |||
605 | spin_unlock_irqrestore(&chan->lock, flags); | ||
606 | return 0; | ||
607 | } | ||
608 | |||
609 | static void __cpdma_chan_submit(struct cpdma_chan *chan, | ||
610 | struct cpdma_desc __iomem *desc) | ||
611 | { | ||
612 | struct cpdma_ctlr *ctlr = chan->ctlr; | ||
613 | struct cpdma_desc __iomem *prev = chan->tail; | ||
614 | struct cpdma_desc_pool *pool = ctlr->pool; | ||
615 | dma_addr_t desc_dma; | ||
616 | u32 mode; | ||
617 | |||
618 | desc_dma = desc_phys(pool, desc); | ||
619 | |||
620 | /* simple case - idle channel */ | ||
621 | if (!chan->head) { | ||
622 | chan->stats.head_enqueue++; | ||
623 | chan->head = desc; | ||
624 | chan->tail = desc; | ||
625 | if (chan->state == CPDMA_STATE_ACTIVE) | ||
626 | chan_write(chan, hdp, desc_dma); | ||
627 | return; | ||
628 | } | ||
629 | |||
630 | /* first chain the descriptor at the tail of the list */ | ||
631 | desc_write(prev, hw_next, desc_dma); | ||
632 | chan->tail = desc; | ||
633 | chan->stats.tail_enqueue++; | ||
634 | |||
635 | /* next check if EOQ has been triggered already */ | ||
636 | mode = desc_read(prev, hw_mode); | ||
637 | if (((mode & (CPDMA_DESC_EOQ | CPDMA_DESC_OWNER)) == CPDMA_DESC_EOQ) && | ||
638 | (chan->state == CPDMA_STATE_ACTIVE)) { | ||
639 | desc_write(prev, hw_mode, mode & ~CPDMA_DESC_EOQ); | ||
640 | chan_write(chan, hdp, desc_dma); | ||
641 | chan->stats.misqueued++; | ||
642 | } | ||
643 | } | ||
644 | |||
645 | int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data, | ||
646 | int len, gfp_t gfp_mask) | ||
647 | { | ||
648 | struct cpdma_ctlr *ctlr = chan->ctlr; | ||
649 | struct cpdma_desc __iomem *desc; | ||
650 | dma_addr_t buffer; | ||
651 | unsigned long flags; | ||
652 | u32 mode; | ||
653 | int ret = 0; | ||
654 | |||
655 | spin_lock_irqsave(&chan->lock, flags); | ||
656 | |||
657 | if (chan->state == CPDMA_STATE_TEARDOWN) { | ||
658 | ret = -EINVAL; | ||
659 | goto unlock_ret; | ||
660 | } | ||
661 | |||
662 | desc = cpdma_desc_alloc(ctlr->pool, 1); | ||
663 | if (!desc) { | ||
664 | chan->stats.desc_alloc_fail++; | ||
665 | ret = -ENOMEM; | ||
666 | goto unlock_ret; | ||
667 | } | ||
668 | |||
669 | if (len < ctlr->params.min_packet_size) { | ||
670 | len = ctlr->params.min_packet_size; | ||
671 | chan->stats.runt_transmit_buff++; | ||
672 | } | ||
673 | |||
674 | buffer = dma_map_single(ctlr->dev, data, len, chan->dir); | ||
675 | mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP; | ||
676 | |||
677 | desc_write(desc, hw_next, 0); | ||
678 | desc_write(desc, hw_buffer, buffer); | ||
679 | desc_write(desc, hw_len, len); | ||
680 | desc_write(desc, hw_mode, mode | len); | ||
681 | desc_write(desc, sw_token, token); | ||
682 | desc_write(desc, sw_buffer, buffer); | ||
683 | desc_write(desc, sw_len, len); | ||
684 | |||
685 | __cpdma_chan_submit(chan, desc); | ||
686 | |||
687 | if (chan->state == CPDMA_STATE_ACTIVE && chan->rxfree) | ||
688 | chan_write(chan, rxfree, 1); | ||
689 | |||
690 | chan->count++; | ||
691 | |||
692 | unlock_ret: | ||
693 | spin_unlock_irqrestore(&chan->lock, flags); | ||
694 | return ret; | ||
695 | } | ||
696 | |||
697 | static void __cpdma_chan_free(struct cpdma_chan *chan, | ||
698 | struct cpdma_desc __iomem *desc, | ||
699 | int outlen, int status) | ||
700 | { | ||
701 | struct cpdma_ctlr *ctlr = chan->ctlr; | ||
702 | struct cpdma_desc_pool *pool = ctlr->pool; | ||
703 | dma_addr_t buff_dma; | ||
704 | int origlen; | ||
705 | void *token; | ||
706 | |||
707 | token = (void *)desc_read(desc, sw_token); | ||
708 | buff_dma = desc_read(desc, sw_buffer); | ||
709 | origlen = desc_read(desc, sw_len); | ||
710 | |||
711 | dma_unmap_single(ctlr->dev, buff_dma, origlen, chan->dir); | ||
712 | cpdma_desc_free(pool, desc, 1); | ||
713 | (*chan->handler)(token, outlen, status); | ||
714 | } | ||
715 | |||
716 | static int __cpdma_chan_process(struct cpdma_chan *chan) | ||
717 | { | ||
718 | struct cpdma_ctlr *ctlr = chan->ctlr; | ||
719 | struct cpdma_desc __iomem *desc; | ||
720 | int status, outlen; | ||
721 | struct cpdma_desc_pool *pool = ctlr->pool; | ||
722 | dma_addr_t desc_dma; | ||
723 | unsigned long flags; | ||
724 | |||
725 | spin_lock_irqsave(&chan->lock, flags); | ||
726 | |||
727 | desc = chan->head; | ||
728 | if (!desc) { | ||
729 | chan->stats.empty_dequeue++; | ||
730 | status = -ENOENT; | ||
731 | goto unlock_ret; | ||
732 | } | ||
733 | desc_dma = desc_phys(pool, desc); | ||
734 | |||
735 | status = __raw_readl(&desc->hw_mode); | ||
736 | outlen = status & 0x7ff; | ||
737 | if (status & CPDMA_DESC_OWNER) { | ||
738 | chan->stats.busy_dequeue++; | ||
739 | status = -EBUSY; | ||
740 | goto unlock_ret; | ||
741 | } | ||
742 | status = status & (CPDMA_DESC_EOQ | CPDMA_DESC_TD_COMPLETE); | ||
743 | |||
744 | chan->head = desc_from_phys(pool, desc_read(desc, hw_next)); | ||
745 | chan_write(chan, cp, desc_dma); | ||
746 | chan->count--; | ||
747 | chan->stats.good_dequeue++; | ||
748 | |||
749 | if (status & CPDMA_DESC_EOQ) { | ||
750 | chan->stats.requeue++; | ||
751 | chan_write(chan, hdp, desc_phys(pool, chan->head)); | ||
752 | } | ||
753 | |||
754 | spin_unlock_irqrestore(&chan->lock, flags); | ||
755 | |||
756 | __cpdma_chan_free(chan, desc, outlen, status); | ||
757 | return status; | ||
758 | |||
759 | unlock_ret: | ||
760 | spin_unlock_irqrestore(&chan->lock, flags); | ||
761 | return status; | ||
762 | } | ||
763 | |||
764 | int cpdma_chan_process(struct cpdma_chan *chan, int quota) | ||
765 | { | ||
766 | int used = 0, ret = 0; | ||
767 | |||
768 | if (chan->state != CPDMA_STATE_ACTIVE) | ||
769 | return -EINVAL; | ||
770 | |||
771 | while (used < quota) { | ||
772 | ret = __cpdma_chan_process(chan); | ||
773 | if (ret < 0) | ||
774 | break; | ||
775 | used++; | ||
776 | } | ||
777 | return used; | ||
778 | } | ||
779 | |||
780 | int cpdma_chan_start(struct cpdma_chan *chan) | ||
781 | { | ||
782 | struct cpdma_ctlr *ctlr = chan->ctlr; | ||
783 | struct cpdma_desc_pool *pool = ctlr->pool; | ||
784 | unsigned long flags; | ||
785 | |||
786 | spin_lock_irqsave(&chan->lock, flags); | ||
787 | if (chan->state != CPDMA_STATE_IDLE) { | ||
788 | spin_unlock_irqrestore(&chan->lock, flags); | ||
789 | return -EBUSY; | ||
790 | } | ||
791 | if (ctlr->state != CPDMA_STATE_ACTIVE) { | ||
792 | spin_unlock_irqrestore(&chan->lock, flags); | ||
793 | return -EINVAL; | ||
794 | } | ||
795 | dma_reg_write(ctlr, chan->int_set, chan->mask); | ||
796 | chan->state = CPDMA_STATE_ACTIVE; | ||
797 | if (chan->head) { | ||
798 | chan_write(chan, hdp, desc_phys(pool, chan->head)); | ||
799 | if (chan->rxfree) | ||
800 | chan_write(chan, rxfree, chan->count); | ||
801 | } | ||
802 | |||
803 | spin_unlock_irqrestore(&chan->lock, flags); | ||
804 | return 0; | ||
805 | } | ||
806 | |||
807 | int cpdma_chan_stop(struct cpdma_chan *chan) | ||
808 | { | ||
809 | struct cpdma_ctlr *ctlr = chan->ctlr; | ||
810 | struct cpdma_desc_pool *pool = ctlr->pool; | ||
811 | unsigned long flags; | ||
812 | int ret; | ||
813 | unsigned long timeout; | ||
814 | |||
815 | spin_lock_irqsave(&chan->lock, flags); | ||
816 | if (chan->state != CPDMA_STATE_ACTIVE) { | ||
817 | spin_unlock_irqrestore(&chan->lock, flags); | ||
818 | return -EINVAL; | ||
819 | } | ||
820 | |||
821 | chan->state = CPDMA_STATE_TEARDOWN; | ||
822 | dma_reg_write(ctlr, chan->int_clear, chan->mask); | ||
823 | |||
824 | /* trigger teardown */ | ||
825 | dma_reg_write(ctlr, chan->td, chan->chan_num); | ||
826 | |||
827 | /* wait for teardown complete */ | ||
828 | timeout = jiffies + HZ/10; /* 100 msec */ | ||
829 | while (time_before(jiffies, timeout)) { | ||
830 | u32 cp = chan_read(chan, cp); | ||
831 | if ((cp & CPDMA_TEARDOWN_VALUE) == CPDMA_TEARDOWN_VALUE) | ||
832 | break; | ||
833 | cpu_relax(); | ||
834 | } | ||
835 | WARN_ON(!time_before(jiffies, timeout)); | ||
836 | chan_write(chan, cp, CPDMA_TEARDOWN_VALUE); | ||
837 | |||
838 | /* handle completed packets */ | ||
839 | do { | ||
840 | ret = __cpdma_chan_process(chan); | ||
841 | if (ret < 0) | ||
842 | break; | ||
843 | } while ((ret & CPDMA_DESC_TD_COMPLETE) == 0); | ||
844 | |||
845 | /* remaining packets haven't been tx/rx'ed, clean them up */ | ||
846 | while (chan->head) { | ||
847 | struct cpdma_desc __iomem *desc = chan->head; | ||
848 | dma_addr_t next_dma; | ||
849 | |||
850 | next_dma = desc_read(desc, hw_next); | ||
851 | chan->head = desc_from_phys(pool, next_dma); | ||
852 | chan->stats.teardown_dequeue++; | ||
853 | |||
854 | /* issue callback without locks held */ | ||
855 | spin_unlock_irqrestore(&chan->lock, flags); | ||
856 | __cpdma_chan_free(chan, desc, 0, -ENOSYS); | ||
857 | spin_lock_irqsave(&chan->lock, flags); | ||
858 | } | ||
859 | |||
860 | chan->state = CPDMA_STATE_IDLE; | ||
861 | spin_unlock_irqrestore(&chan->lock, flags); | ||
862 | return 0; | ||
863 | } | ||
864 | |||
865 | int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable) | ||
866 | { | ||
867 | unsigned long flags; | ||
868 | |||
869 | spin_lock_irqsave(&chan->lock, flags); | ||
870 | if (chan->state != CPDMA_STATE_ACTIVE) { | ||
871 | spin_unlock_irqrestore(&chan->lock, flags); | ||
872 | return -EINVAL; | ||
873 | } | ||
874 | |||
875 | dma_reg_write(chan->ctlr, enable ? chan->int_set : chan->int_clear, | ||
876 | chan->mask); | ||
877 | spin_unlock_irqrestore(&chan->lock, flags); | ||
878 | |||
879 | return 0; | ||
880 | } | ||
881 | |||
882 | struct cpdma_control_info { | ||
883 | u32 reg; | ||
884 | u32 shift, mask; | ||
885 | int access; | ||
886 | #define ACCESS_RO BIT(0) | ||
887 | #define ACCESS_WO BIT(1) | ||
888 | #define ACCESS_RW (ACCESS_RO | ACCESS_WO) | ||
889 | }; | ||
890 | |||
891 | struct cpdma_control_info controls[] = { | ||
892 | [CPDMA_CMD_IDLE] = {CPDMA_DMACONTROL, 3, 1, ACCESS_WO}, | ||
893 | [CPDMA_COPY_ERROR_FRAMES] = {CPDMA_DMACONTROL, 4, 1, ACCESS_RW}, | ||
894 | [CPDMA_RX_OFF_LEN_UPDATE] = {CPDMA_DMACONTROL, 2, 1, ACCESS_RW}, | ||
895 | [CPDMA_RX_OWNERSHIP_FLIP] = {CPDMA_DMACONTROL, 1, 1, ACCESS_RW}, | ||
896 | [CPDMA_TX_PRIO_FIXED] = {CPDMA_DMACONTROL, 0, 1, ACCESS_RW}, | ||
897 | [CPDMA_STAT_IDLE] = {CPDMA_DMASTATUS, 31, 1, ACCESS_RO}, | ||
898 | [CPDMA_STAT_TX_ERR_CODE] = {CPDMA_DMASTATUS, 20, 0xf, ACCESS_RW}, | ||
899 | [CPDMA_STAT_TX_ERR_CHAN] = {CPDMA_DMASTATUS, 16, 0x7, ACCESS_RW}, | ||
900 | [CPDMA_STAT_RX_ERR_CODE] = {CPDMA_DMASTATUS, 12, 0xf, ACCESS_RW}, | ||
901 | [CPDMA_STAT_RX_ERR_CHAN] = {CPDMA_DMASTATUS, 8, 0x7, ACCESS_RW}, | ||
902 | [CPDMA_RX_BUFFER_OFFSET] = {CPDMA_RXBUFFOFS, 0, 0xffff, ACCESS_RW}, | ||
903 | }; | ||
904 | |||
905 | int cpdma_control_get(struct cpdma_ctlr *ctlr, int control) | ||
906 | { | ||
907 | unsigned long flags; | ||
908 | struct cpdma_control_info *info = &controls[control]; | ||
909 | int ret; | ||
910 | |||
911 | spin_lock_irqsave(&ctlr->lock, flags); | ||
912 | |||
913 | ret = -ENOTSUPP; | ||
914 | if (!ctlr->params.has_ext_regs) | ||
915 | goto unlock_ret; | ||
916 | |||
917 | ret = -EINVAL; | ||
918 | if (ctlr->state != CPDMA_STATE_ACTIVE) | ||
919 | goto unlock_ret; | ||
920 | |||
921 | ret = -ENOENT; | ||
922 | if (control < 0 || control >= ARRAY_SIZE(controls)) | ||
923 | goto unlock_ret; | ||
924 | |||
925 | ret = -EPERM; | ||
926 | if ((info->access & ACCESS_RO) != ACCESS_RO) | ||
927 | goto unlock_ret; | ||
928 | |||
929 | ret = (dma_reg_read(ctlr, info->reg) >> info->shift) & info->mask; | ||
930 | |||
931 | unlock_ret: | ||
932 | spin_unlock_irqrestore(&ctlr->lock, flags); | ||
933 | return ret; | ||
934 | } | ||
935 | |||
936 | int cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value) | ||
937 | { | ||
938 | unsigned long flags; | ||
939 | struct cpdma_control_info *info = &controls[control]; | ||
940 | int ret; | ||
941 | u32 val; | ||
942 | |||
943 | spin_lock_irqsave(&ctlr->lock, flags); | ||
944 | |||
945 | ret = -ENOTSUPP; | ||
946 | if (!ctlr->params.has_ext_regs) | ||
947 | goto unlock_ret; | ||
948 | |||
949 | ret = -EINVAL; | ||
950 | if (ctlr->state != CPDMA_STATE_ACTIVE) | ||
951 | goto unlock_ret; | ||
952 | |||
953 | ret = -ENOENT; | ||
954 | if (control < 0 || control >= ARRAY_SIZE(controls)) | ||
955 | goto unlock_ret; | ||
956 | |||
957 | ret = -EPERM; | ||
958 | if ((info->access & ACCESS_WO) != ACCESS_WO) | ||
959 | goto unlock_ret; | ||
960 | |||
961 | val = dma_reg_read(ctlr, info->reg); | ||
962 | val &= ~(info->mask << info->shift); | ||
963 | val |= (value & info->mask) << info->shift; | ||
964 | dma_reg_write(ctlr, info->reg, val); | ||
965 | ret = 0; | ||
966 | |||
967 | unlock_ret: | ||
968 | spin_unlock_irqrestore(&ctlr->lock, flags); | ||
969 | return ret; | ||
970 | } | ||
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.h b/drivers/net/ethernet/ti/davinci_cpdma.h new file mode 100644 index 000000000000..afa19a0c0d81 --- /dev/null +++ b/drivers/net/ethernet/ti/davinci_cpdma.h | |||
@@ -0,0 +1,109 @@ | |||
1 | /* | ||
2 | * Texas Instruments CPDMA Driver | ||
3 | * | ||
4 | * Copyright (C) 2010 Texas Instruments | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License as | ||
8 | * published by the Free Software Foundation version 2. | ||
9 | * | ||
10 | * This program is distributed "as is" WITHOUT ANY WARRANTY of any | ||
11 | * kind, whether express or implied; without even the implied warranty | ||
12 | * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | */ | ||
15 | #ifndef __DAVINCI_CPDMA_H__ | ||
16 | #define __DAVINCI_CPDMA_H__ | ||
17 | |||
18 | #define CPDMA_MAX_CHANNELS BITS_PER_LONG | ||
19 | |||
20 | #define tx_chan_num(chan) (chan) | ||
21 | #define rx_chan_num(chan) ((chan) + CPDMA_MAX_CHANNELS) | ||
22 | #define is_rx_chan(chan) ((chan)->chan_num >= CPDMA_MAX_CHANNELS) | ||
23 | #define is_tx_chan(chan) (!is_rx_chan(chan)) | ||
24 | #define __chan_linear(chan_num) ((chan_num) & (CPDMA_MAX_CHANNELS - 1)) | ||
25 | #define chan_linear(chan) __chan_linear((chan)->chan_num) | ||
26 | |||
27 | struct cpdma_params { | ||
28 | struct device *dev; | ||
29 | void __iomem *dmaregs; | ||
30 | void __iomem *txhdp, *rxhdp, *txcp, *rxcp; | ||
31 | void __iomem *rxthresh, *rxfree; | ||
32 | int num_chan; | ||
33 | bool has_soft_reset; | ||
34 | int min_packet_size; | ||
35 | u32 desc_mem_phys; | ||
36 | u32 desc_hw_addr; | ||
37 | int desc_mem_size; | ||
38 | int desc_align; | ||
39 | |||
40 | /* | ||
41 | * Some instances of embedded cpdma controllers have extra control and | ||
42 | * status registers. The following flag enables access to these | ||
43 | * "extended" registers. | ||
44 | */ | ||
45 | bool has_ext_regs; | ||
46 | }; | ||
47 | |||
48 | struct cpdma_chan_stats { | ||
49 | u32 head_enqueue; | ||
50 | u32 tail_enqueue; | ||
51 | u32 pad_enqueue; | ||
52 | u32 misqueued; | ||
53 | u32 desc_alloc_fail; | ||
54 | u32 pad_alloc_fail; | ||
55 | u32 runt_receive_buff; | ||
56 | u32 runt_transmit_buff; | ||
57 | u32 empty_dequeue; | ||
58 | u32 busy_dequeue; | ||
59 | u32 good_dequeue; | ||
60 | u32 requeue; | ||
61 | u32 teardown_dequeue; | ||
62 | }; | ||
63 | |||
64 | struct cpdma_ctlr; | ||
65 | struct cpdma_chan; | ||
66 | |||
67 | typedef void (*cpdma_handler_fn)(void *token, int len, int status); | ||
68 | |||
69 | struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params); | ||
70 | int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr); | ||
71 | int cpdma_ctlr_start(struct cpdma_ctlr *ctlr); | ||
72 | int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr); | ||
73 | int cpdma_ctlr_dump(struct cpdma_ctlr *ctlr); | ||
74 | |||
75 | struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num, | ||
76 | cpdma_handler_fn handler); | ||
77 | int cpdma_chan_destroy(struct cpdma_chan *chan); | ||
78 | int cpdma_chan_start(struct cpdma_chan *chan); | ||
79 | int cpdma_chan_stop(struct cpdma_chan *chan); | ||
80 | int cpdma_chan_dump(struct cpdma_chan *chan); | ||
81 | |||
82 | int cpdma_chan_get_stats(struct cpdma_chan *chan, | ||
83 | struct cpdma_chan_stats *stats); | ||
84 | int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data, | ||
85 | int len, gfp_t gfp_mask); | ||
86 | int cpdma_chan_process(struct cpdma_chan *chan, int quota); | ||
87 | |||
88 | int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable); | ||
89 | void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr); | ||
90 | int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable); | ||
91 | |||
92 | enum cpdma_control { | ||
93 | CPDMA_CMD_IDLE, /* write-only */ | ||
94 | CPDMA_COPY_ERROR_FRAMES, /* read-write */ | ||
95 | CPDMA_RX_OFF_LEN_UPDATE, /* read-write */ | ||
96 | CPDMA_RX_OWNERSHIP_FLIP, /* read-write */ | ||
97 | CPDMA_TX_PRIO_FIXED, /* read-write */ | ||
98 | CPDMA_STAT_IDLE, /* read-only */ | ||
99 | CPDMA_STAT_TX_ERR_CHAN, /* read-only */ | ||
100 | CPDMA_STAT_TX_ERR_CODE, /* read-only */ | ||
101 | CPDMA_STAT_RX_ERR_CHAN, /* read-only */ | ||
102 | CPDMA_STAT_RX_ERR_CODE, /* read-only */ | ||
103 | CPDMA_RX_BUFFER_OFFSET, /* read-write */ | ||
104 | }; | ||
105 | |||
106 | int cpdma_control_get(struct cpdma_ctlr *ctlr, int control); | ||
107 | int cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value); | ||
108 | |||
109 | #endif | ||
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c new file mode 100644 index 000000000000..3f451e4d8361 --- /dev/null +++ b/drivers/net/ethernet/ti/davinci_emac.c | |||
@@ -0,0 +1,2047 @@ | |||
1 | /* | ||
2 | * DaVinci Ethernet Medium Access Controller | ||
3 | * | ||
4 | * DaVinci EMAC is based upon CPPI 3.0 TI DMA engine | ||
5 | * | ||
6 | * Copyright (C) 2009 Texas Instruments. | ||
7 | * | ||
8 | * --------------------------------------------------------------------------- | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License as published by | ||
12 | * the Free Software Foundation; either version 2 of the License, or | ||
13 | * (at your option) any later version. | ||
14 | * | ||
15 | * This program is distributed in the hope that it will be useful, | ||
16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
18 | * GNU General Public License for more details. | ||
19 | * | ||
20 | * You should have received a copy of the GNU General Public License | ||
21 | * along with this program; if not, write to the Free Software | ||
22 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
23 | * --------------------------------------------------------------------------- | ||
24 | * History: | ||
25 | * 0-5 A number of folks worked on this driver in bits and pieces but the major | ||
26 | * contribution came from Suraj Iyer and Anant Gole | ||
27 | * 6.0 Anant Gole - rewrote the driver as per Linux conventions | ||
28 | * 6.1 Chaithrika U S - added support for Gigabit and RMII features, | ||
29 | * PHY layer usage | ||
30 | */ | ||
31 | |||
32 | #include <linux/module.h> | ||
33 | #include <linux/kernel.h> | ||
34 | #include <linux/sched.h> | ||
35 | #include <linux/string.h> | ||
36 | #include <linux/timer.h> | ||
37 | #include <linux/errno.h> | ||
38 | #include <linux/in.h> | ||
39 | #include <linux/ioport.h> | ||
40 | #include <linux/slab.h> | ||
41 | #include <linux/mm.h> | ||
42 | #include <linux/interrupt.h> | ||
43 | #include <linux/init.h> | ||
44 | #include <linux/netdevice.h> | ||
45 | #include <linux/etherdevice.h> | ||
46 | #include <linux/skbuff.h> | ||
47 | #include <linux/ethtool.h> | ||
48 | #include <linux/highmem.h> | ||
49 | #include <linux/proc_fs.h> | ||
50 | #include <linux/ctype.h> | ||
51 | #include <linux/spinlock.h> | ||
52 | #include <linux/dma-mapping.h> | ||
53 | #include <linux/clk.h> | ||
54 | #include <linux/platform_device.h> | ||
55 | #include <linux/semaphore.h> | ||
56 | #include <linux/phy.h> | ||
57 | #include <linux/bitops.h> | ||
58 | #include <linux/io.h> | ||
59 | #include <linux/uaccess.h> | ||
60 | #include <linux/davinci_emac.h> | ||
61 | |||
62 | #include <asm/irq.h> | ||
63 | #include <asm/page.h> | ||
64 | |||
65 | #include "davinci_cpdma.h" | ||
66 | |||
67 | static int debug_level; | ||
68 | module_param(debug_level, int, 0); | ||
69 | MODULE_PARM_DESC(debug_level, "DaVinci EMAC debug level (NETIF_MSG bits)"); | ||
70 | |||
71 | /* Netif debug messages possible */ | ||
72 | #define DAVINCI_EMAC_DEBUG (NETIF_MSG_DRV | \ | ||
73 | NETIF_MSG_PROBE | \ | ||
74 | NETIF_MSG_LINK | \ | ||
75 | NETIF_MSG_TIMER | \ | ||
76 | NETIF_MSG_IFDOWN | \ | ||
77 | NETIF_MSG_IFUP | \ | ||
78 | NETIF_MSG_RX_ERR | \ | ||
79 | NETIF_MSG_TX_ERR | \ | ||
80 | NETIF_MSG_TX_QUEUED | \ | ||
81 | NETIF_MSG_INTR | \ | ||
82 | NETIF_MSG_TX_DONE | \ | ||
83 | NETIF_MSG_RX_STATUS | \ | ||
84 | NETIF_MSG_PKTDATA | \ | ||
85 | NETIF_MSG_HW | \ | ||
86 | NETIF_MSG_WOL) | ||
87 | |||
88 | /* version info */ | ||
89 | #define EMAC_MAJOR_VERSION 6 | ||
90 | #define EMAC_MINOR_VERSION 1 | ||
91 | #define EMAC_MODULE_VERSION "6.1" | ||
92 | MODULE_VERSION(EMAC_MODULE_VERSION); | ||
93 | static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1"; | ||
94 | |||
95 | /* Configuration items */ | ||
96 | #define EMAC_DEF_PASS_CRC (0) /* Do not pass CRC up to frames */ | ||
97 | #define EMAC_DEF_QOS_EN (0) /* EMAC proprietary QoS disabled */ | ||
98 | #define EMAC_DEF_NO_BUFF_CHAIN (0) /* No buffer chain */ | ||
99 | #define EMAC_DEF_MACCTRL_FRAME_EN (0) /* Discard Maccontrol frames */ | ||
100 | #define EMAC_DEF_SHORT_FRAME_EN (0) /* Discard short frames */ | ||
101 | #define EMAC_DEF_ERROR_FRAME_EN (0) /* Discard error frames */ | ||
102 | #define EMAC_DEF_PROM_EN (0) /* Promiscuous disabled */ | ||
103 | #define EMAC_DEF_PROM_CH (0) /* Promiscuous channel is 0 */ | ||
104 | #define EMAC_DEF_BCAST_EN (1) /* Broadcast enabled */ | ||
105 | #define EMAC_DEF_BCAST_CH (0) /* Broadcast channel is 0 */ | ||
106 | #define EMAC_DEF_MCAST_EN (1) /* Multicast enabled */ | ||
107 | #define EMAC_DEF_MCAST_CH (0) /* Multicast channel is 0 */ | ||
108 | |||
109 | #define EMAC_DEF_TXPRIO_FIXED (1) /* TX Priority is fixed */ | ||
110 | #define EMAC_DEF_TXPACING_EN (0) /* TX pacing NOT supported*/ | ||
111 | |||
112 | #define EMAC_DEF_BUFFER_OFFSET (0) /* Buffer offset to DMA (future) */ | ||
113 | #define EMAC_DEF_MIN_ETHPKTSIZE (60) /* Minimum ethernet pkt size */ | ||
114 | #define EMAC_DEF_MAX_FRAME_SIZE (1500 + 14 + 4 + 4) | ||
115 | #define EMAC_DEF_TX_CH (0) /* Default 0th channel */ | ||
116 | #define EMAC_DEF_RX_CH (0) /* Default 0th channel */ | ||
117 | #define EMAC_DEF_RX_NUM_DESC (128) | ||
118 | #define EMAC_DEF_MAX_TX_CH (1) /* Max TX channels configured */ | ||
119 | #define EMAC_DEF_MAX_RX_CH (1) /* Max RX channels configured */ | ||
120 | #define EMAC_POLL_WEIGHT (64) /* Default NAPI poll weight */ | ||
121 | |||
122 | /* Buffer descriptor parameters */ | ||
123 | #define EMAC_DEF_TX_MAX_SERVICE (32) /* TX max service BD's */ | ||
124 | #define EMAC_DEF_RX_MAX_SERVICE (64) /* should = netdev->weight */ | ||
125 | |||
126 | /* EMAC register related defines */ | ||
127 | #define EMAC_ALL_MULTI_REG_VALUE (0xFFFFFFFF) | ||
128 | #define EMAC_NUM_MULTICAST_BITS (64) | ||
129 | #define EMAC_TX_CONTROL_TX_ENABLE_VAL (0x1) | ||
130 | #define EMAC_RX_CONTROL_RX_ENABLE_VAL (0x1) | ||
131 | #define EMAC_MAC_HOST_ERR_INTMASK_VAL (0x2) | ||
132 | #define EMAC_RX_UNICAST_CLEAR_ALL (0xFF) | ||
133 | #define EMAC_INT_MASK_CLEAR (0xFF) | ||
134 | |||
135 | /* RX MBP register bit positions */ | ||
136 | #define EMAC_RXMBP_PASSCRC_MASK BIT(30) | ||
137 | #define EMAC_RXMBP_QOSEN_MASK BIT(29) | ||
138 | #define EMAC_RXMBP_NOCHAIN_MASK BIT(28) | ||
139 | #define EMAC_RXMBP_CMFEN_MASK BIT(24) | ||
140 | #define EMAC_RXMBP_CSFEN_MASK BIT(23) | ||
141 | #define EMAC_RXMBP_CEFEN_MASK BIT(22) | ||
142 | #define EMAC_RXMBP_CAFEN_MASK BIT(21) | ||
143 | #define EMAC_RXMBP_PROMCH_SHIFT (16) | ||
144 | #define EMAC_RXMBP_PROMCH_MASK (0x7 << 16) | ||
145 | #define EMAC_RXMBP_BROADEN_MASK BIT(13) | ||
146 | #define EMAC_RXMBP_BROADCH_SHIFT (8) | ||
147 | #define EMAC_RXMBP_BROADCH_MASK (0x7 << 8) | ||
148 | #define EMAC_RXMBP_MULTIEN_MASK BIT(5) | ||
149 | #define EMAC_RXMBP_MULTICH_SHIFT (0) | ||
150 | #define EMAC_RXMBP_MULTICH_MASK (0x7) | ||
151 | #define EMAC_RXMBP_CHMASK (0x7) | ||
152 | |||
153 | /* EMAC register definitions/bit maps used */ | ||
154 | # define EMAC_MBP_RXPROMISC (0x00200000) | ||
155 | # define EMAC_MBP_PROMISCCH(ch) (((ch) & 0x7) << 16) | ||
156 | # define EMAC_MBP_RXBCAST (0x00002000) | ||
157 | # define EMAC_MBP_BCASTCHAN(ch) (((ch) & 0x7) << 8) | ||
158 | # define EMAC_MBP_RXMCAST (0x00000020) | ||
159 | # define EMAC_MBP_MCASTCHAN(ch) ((ch) & 0x7) | ||
160 | |||
161 | /* EMAC mac_control register */ | ||
162 | #define EMAC_MACCONTROL_TXPTYPE BIT(9) | ||
163 | #define EMAC_MACCONTROL_TXPACEEN BIT(6) | ||
164 | #define EMAC_MACCONTROL_GMIIEN BIT(5) | ||
165 | #define EMAC_MACCONTROL_GIGABITEN BIT(7) | ||
166 | #define EMAC_MACCONTROL_FULLDUPLEXEN BIT(0) | ||
167 | #define EMAC_MACCONTROL_RMIISPEED_MASK BIT(15) | ||
168 | |||
169 | /* GIGABIT MODE related bits */ | ||
170 | #define EMAC_DM646X_MACCONTORL_GIG BIT(7) | ||
171 | #define EMAC_DM646X_MACCONTORL_GIGFORCE BIT(17) | ||
172 | |||
173 | /* EMAC mac_status register */ | ||
174 | #define EMAC_MACSTATUS_TXERRCODE_MASK (0xF00000) | ||
175 | #define EMAC_MACSTATUS_TXERRCODE_SHIFT (20) | ||
176 | #define EMAC_MACSTATUS_TXERRCH_MASK (0x7) | ||
177 | #define EMAC_MACSTATUS_TXERRCH_SHIFT (16) | ||
178 | #define EMAC_MACSTATUS_RXERRCODE_MASK (0xF000) | ||
179 | #define EMAC_MACSTATUS_RXERRCODE_SHIFT (12) | ||
180 | #define EMAC_MACSTATUS_RXERRCH_MASK (0x7) | ||
181 | #define EMAC_MACSTATUS_RXERRCH_SHIFT (8) | ||
182 | |||
183 | /* EMAC RX register masks */ | ||
184 | #define EMAC_RX_MAX_LEN_MASK (0xFFFF) | ||
185 | #define EMAC_RX_BUFFER_OFFSET_MASK (0xFFFF) | ||
186 | |||
187 | /* MAC_IN_VECTOR (0x180) register bit fields */ | ||
188 | #define EMAC_DM644X_MAC_IN_VECTOR_HOST_INT BIT(17) | ||
189 | #define EMAC_DM644X_MAC_IN_VECTOR_STATPEND_INT BIT(16) | ||
190 | #define EMAC_DM644X_MAC_IN_VECTOR_RX_INT_VEC BIT(8) | ||
191 | #define EMAC_DM644X_MAC_IN_VECTOR_TX_INT_VEC BIT(0) | ||
192 | |||
193 | /** NOTE:: For DM646x the IN_VECTOR has changed */ | ||
194 | #define EMAC_DM646X_MAC_IN_VECTOR_RX_INT_VEC BIT(EMAC_DEF_RX_CH) | ||
195 | #define EMAC_DM646X_MAC_IN_VECTOR_TX_INT_VEC BIT(16 + EMAC_DEF_TX_CH) | ||
196 | #define EMAC_DM646X_MAC_IN_VECTOR_HOST_INT BIT(26) | ||
197 | #define EMAC_DM646X_MAC_IN_VECTOR_STATPEND_INT BIT(27) | ||
198 | |||
199 | /* CPPI bit positions */ | ||
200 | #define EMAC_CPPI_SOP_BIT BIT(31) | ||
201 | #define EMAC_CPPI_EOP_BIT BIT(30) | ||
202 | #define EMAC_CPPI_OWNERSHIP_BIT BIT(29) | ||
203 | #define EMAC_CPPI_EOQ_BIT BIT(28) | ||
204 | #define EMAC_CPPI_TEARDOWN_COMPLETE_BIT BIT(27) | ||
205 | #define EMAC_CPPI_PASS_CRC_BIT BIT(26) | ||
206 | #define EMAC_RX_BD_BUF_SIZE (0xFFFF) | ||
207 | #define EMAC_BD_LENGTH_FOR_CACHE (16) /* only CPPI bytes */ | ||
208 | #define EMAC_RX_BD_PKT_LENGTH_MASK (0xFFFF) | ||
209 | |||
210 | /* Max hardware defines */ | ||
211 | #define EMAC_MAX_TXRX_CHANNELS (8) /* Max hardware channels */ | ||
212 | #define EMAC_DEF_MAX_MULTICAST_ADDRESSES (64) /* Max mcast addr's */ | ||
213 | |||
214 | /* EMAC Peripheral Device Register Memory Layout structure */ | ||
215 | #define EMAC_MACINVECTOR 0x90 | ||
216 | |||
217 | #define EMAC_DM646X_MACEOIVECTOR 0x94 | ||
218 | |||
219 | #define EMAC_MACINTSTATRAW 0xB0 | ||
220 | #define EMAC_MACINTSTATMASKED 0xB4 | ||
221 | #define EMAC_MACINTMASKSET 0xB8 | ||
222 | #define EMAC_MACINTMASKCLEAR 0xBC | ||
223 | |||
224 | #define EMAC_RXMBPENABLE 0x100 | ||
225 | #define EMAC_RXUNICASTSET 0x104 | ||
226 | #define EMAC_RXUNICASTCLEAR 0x108 | ||
227 | #define EMAC_RXMAXLEN 0x10C | ||
228 | #define EMAC_RXBUFFEROFFSET 0x110 | ||
229 | #define EMAC_RXFILTERLOWTHRESH 0x114 | ||
230 | |||
231 | #define EMAC_MACCONTROL 0x160 | ||
232 | #define EMAC_MACSTATUS 0x164 | ||
233 | #define EMAC_EMCONTROL 0x168 | ||
234 | #define EMAC_FIFOCONTROL 0x16C | ||
235 | #define EMAC_MACCONFIG 0x170 | ||
236 | #define EMAC_SOFTRESET 0x174 | ||
237 | #define EMAC_MACSRCADDRLO 0x1D0 | ||
238 | #define EMAC_MACSRCADDRHI 0x1D4 | ||
239 | #define EMAC_MACHASH1 0x1D8 | ||
240 | #define EMAC_MACHASH2 0x1DC | ||
241 | #define EMAC_MACADDRLO 0x500 | ||
242 | #define EMAC_MACADDRHI 0x504 | ||
243 | #define EMAC_MACINDEX 0x508 | ||
244 | |||
245 | /* EMAC statistics registers */ | ||
246 | #define EMAC_RXGOODFRAMES 0x200 | ||
247 | #define EMAC_RXBCASTFRAMES 0x204 | ||
248 | #define EMAC_RXMCASTFRAMES 0x208 | ||
249 | #define EMAC_RXPAUSEFRAMES 0x20C | ||
250 | #define EMAC_RXCRCERRORS 0x210 | ||
251 | #define EMAC_RXALIGNCODEERRORS 0x214 | ||
252 | #define EMAC_RXOVERSIZED 0x218 | ||
253 | #define EMAC_RXJABBER 0x21C | ||
254 | #define EMAC_RXUNDERSIZED 0x220 | ||
255 | #define EMAC_RXFRAGMENTS 0x224 | ||
256 | #define EMAC_RXFILTERED 0x228 | ||
257 | #define EMAC_RXQOSFILTERED 0x22C | ||
258 | #define EMAC_RXOCTETS 0x230 | ||
259 | #define EMAC_TXGOODFRAMES 0x234 | ||
260 | #define EMAC_TXBCASTFRAMES 0x238 | ||
261 | #define EMAC_TXMCASTFRAMES 0x23C | ||
262 | #define EMAC_TXPAUSEFRAMES 0x240 | ||
263 | #define EMAC_TXDEFERRED 0x244 | ||
264 | #define EMAC_TXCOLLISION 0x248 | ||
265 | #define EMAC_TXSINGLECOLL 0x24C | ||
266 | #define EMAC_TXMULTICOLL 0x250 | ||
267 | #define EMAC_TXEXCESSIVECOLL 0x254 | ||
268 | #define EMAC_TXLATECOLL 0x258 | ||
269 | #define EMAC_TXUNDERRUN 0x25C | ||
270 | #define EMAC_TXCARRIERSENSE 0x260 | ||
271 | #define EMAC_TXOCTETS 0x264 | ||
272 | #define EMAC_NETOCTETS 0x280 | ||
273 | #define EMAC_RXSOFOVERRUNS 0x284 | ||
274 | #define EMAC_RXMOFOVERRUNS 0x288 | ||
275 | #define EMAC_RXDMAOVERRUNS 0x28C | ||
276 | |||
277 | /* EMAC DM644x control registers */ | ||
278 | #define EMAC_CTRL_EWCTL (0x4) | ||
279 | #define EMAC_CTRL_EWINTTCNT (0x8) | ||
280 | |||
281 | /* EMAC DM644x control module masks */ | ||
282 | #define EMAC_DM644X_EWINTCNT_MASK 0x1FFFF | ||
283 | #define EMAC_DM644X_INTMIN_INTVL 0x1 | ||
284 | #define EMAC_DM644X_INTMAX_INTVL (EMAC_DM644X_EWINTCNT_MASK) | ||
285 | |||
286 | /* EMAC DM646X control module registers */ | ||
287 | #define EMAC_DM646X_CMINTCTRL 0x0C | ||
288 | #define EMAC_DM646X_CMRXINTEN 0x14 | ||
289 | #define EMAC_DM646X_CMTXINTEN 0x18 | ||
290 | #define EMAC_DM646X_CMRXINTMAX 0x70 | ||
291 | #define EMAC_DM646X_CMTXINTMAX 0x74 | ||
292 | |||
293 | /* EMAC DM646X control module masks */ | ||
294 | #define EMAC_DM646X_INTPACEEN (0x3 << 16) | ||
295 | #define EMAC_DM646X_INTPRESCALE_MASK (0x7FF << 0) | ||
296 | #define EMAC_DM646X_CMINTMAX_CNT 63 | ||
297 | #define EMAC_DM646X_CMINTMIN_CNT 2 | ||
298 | #define EMAC_DM646X_CMINTMAX_INTVL (1000 / EMAC_DM646X_CMINTMIN_CNT) | ||
299 | #define EMAC_DM646X_CMINTMIN_INTVL ((1000 / EMAC_DM646X_CMINTMAX_CNT) + 1) | ||
300 | |||
301 | |||
302 | /* EMAC EOI codes for C0 */ | ||
303 | #define EMAC_DM646X_MAC_EOI_C0_RXEN (0x01) | ||
304 | #define EMAC_DM646X_MAC_EOI_C0_TXEN (0x02) | ||
305 | |||
306 | /* EMAC Stats Clear Mask */ | ||
307 | #define EMAC_STATS_CLR_MASK (0xFFFFFFFF) | ||
308 | |||
309 | /* emac_priv: EMAC private data structure | ||
310 | * | ||
311 | * EMAC adapter private data structure | ||
312 | */ | ||
313 | struct emac_priv { | ||
314 | u32 msg_enable; | ||
315 | struct net_device *ndev; | ||
316 | struct platform_device *pdev; | ||
317 | struct napi_struct napi; | ||
318 | char mac_addr[6]; | ||
319 | void __iomem *remap_addr; | ||
320 | u32 emac_base_phys; | ||
321 | void __iomem *emac_base; | ||
322 | void __iomem *ctrl_base; | ||
323 | struct cpdma_ctlr *dma; | ||
324 | struct cpdma_chan *txchan; | ||
325 | struct cpdma_chan *rxchan; | ||
326 | u32 link; /* 1=link on, 0=link off */ | ||
327 | u32 speed; /* 0=Auto Neg, 1=No PHY, 10,100, 1000 - mbps */ | ||
328 | u32 duplex; /* Link duplex: 0=Half, 1=Full */ | ||
329 | u32 rx_buf_size; | ||
330 | u32 isr_count; | ||
331 | u32 coal_intvl; | ||
332 | u32 bus_freq_mhz; | ||
333 | u8 rmii_en; | ||
334 | u8 version; | ||
335 | u32 mac_hash1; | ||
336 | u32 mac_hash2; | ||
337 | u32 multicast_hash_cnt[EMAC_NUM_MULTICAST_BITS]; | ||
338 | u32 rx_addr_type; | ||
339 | const char *phy_id; | ||
340 | struct phy_device *phydev; | ||
341 | spinlock_t lock; | ||
342 | /*platform specific members*/ | ||
343 | void (*int_enable) (void); | ||
344 | void (*int_disable) (void); | ||
345 | }; | ||
346 | |||
347 | /* clock frequency for EMAC */ | ||
348 | static struct clk *emac_clk; | ||
349 | static unsigned long emac_bus_frequency; | ||
350 | |||
351 | /* EMAC TX Host Error description strings */ | ||
352 | static char *emac_txhost_errcodes[16] = { | ||
353 | "No error", "SOP error", "Ownership bit not set in SOP buffer", | ||
354 | "Zero Next Buffer Descriptor Pointer Without EOP", | ||
355 | "Zero Buffer Pointer", "Zero Buffer Length", "Packet Length Error", | ||
356 | "Reserved", "Reserved", "Reserved", "Reserved", "Reserved", | ||
357 | "Reserved", "Reserved", "Reserved", "Reserved" | ||
358 | }; | ||
359 | |||
360 | /* EMAC RX Host Error description strings */ | ||
361 | static char *emac_rxhost_errcodes[16] = { | ||
362 | "No error", "Reserved", "Ownership bit not set in input buffer", | ||
363 | "Reserved", "Zero Buffer Pointer", "Reserved", "Reserved", | ||
364 | "Reserved", "Reserved", "Reserved", "Reserved", "Reserved", | ||
365 | "Reserved", "Reserved", "Reserved", "Reserved" | ||
366 | }; | ||
367 | |||
368 | /* Helper macros */ | ||
369 | #define emac_read(reg) ioread32(priv->emac_base + (reg)) | ||
370 | #define emac_write(reg, val) iowrite32(val, priv->emac_base + (reg)) | ||
371 | |||
372 | #define emac_ctrl_read(reg) ioread32((priv->ctrl_base + (reg))) | ||
373 | #define emac_ctrl_write(reg, val) iowrite32(val, (priv->ctrl_base + (reg))) | ||
374 | |||
375 | /** | ||
376 | * emac_dump_regs: Dump important EMAC registers to debug terminal | ||
377 | * @priv: The DaVinci EMAC private adapter structure | ||
378 | * | ||
379 | * Executes ethtool set cmd & sets phy mode | ||
380 | * | ||
381 | */ | ||
382 | static void emac_dump_regs(struct emac_priv *priv) | ||
383 | { | ||
384 | struct device *emac_dev = &priv->ndev->dev; | ||
385 | |||
386 | /* Print important registers in EMAC */ | ||
387 | dev_info(emac_dev, "EMAC Basic registers\n"); | ||
388 | if (priv->version == EMAC_VERSION_1) { | ||
389 | dev_info(emac_dev, "EMAC: EWCTL: %08X, EWINTTCNT: %08X\n", | ||
390 | emac_ctrl_read(EMAC_CTRL_EWCTL), | ||
391 | emac_ctrl_read(EMAC_CTRL_EWINTTCNT)); | ||
392 | } | ||
393 | dev_info(emac_dev, "EMAC: EmuControl:%08X, FifoControl: %08X\n", | ||
394 | emac_read(EMAC_EMCONTROL), emac_read(EMAC_FIFOCONTROL)); | ||
395 | dev_info(emac_dev, "EMAC: MBPEnable:%08X, RXUnicastSet: %08X, "\ | ||
396 | "RXMaxLen=%08X\n", emac_read(EMAC_RXMBPENABLE), | ||
397 | emac_read(EMAC_RXUNICASTSET), emac_read(EMAC_RXMAXLEN)); | ||
398 | dev_info(emac_dev, "EMAC: MacControl:%08X, MacStatus: %08X, "\ | ||
399 | "MacConfig=%08X\n", emac_read(EMAC_MACCONTROL), | ||
400 | emac_read(EMAC_MACSTATUS), emac_read(EMAC_MACCONFIG)); | ||
401 | dev_info(emac_dev, "EMAC Statistics\n"); | ||
402 | dev_info(emac_dev, "EMAC: rx_good_frames:%d\n", | ||
403 | emac_read(EMAC_RXGOODFRAMES)); | ||
404 | dev_info(emac_dev, "EMAC: rx_broadcast_frames:%d\n", | ||
405 | emac_read(EMAC_RXBCASTFRAMES)); | ||
406 | dev_info(emac_dev, "EMAC: rx_multicast_frames:%d\n", | ||
407 | emac_read(EMAC_RXMCASTFRAMES)); | ||
408 | dev_info(emac_dev, "EMAC: rx_pause_frames:%d\n", | ||
409 | emac_read(EMAC_RXPAUSEFRAMES)); | ||
410 | dev_info(emac_dev, "EMAC: rx_crcerrors:%d\n", | ||
411 | emac_read(EMAC_RXCRCERRORS)); | ||
412 | dev_info(emac_dev, "EMAC: rx_align_code_errors:%d\n", | ||
413 | emac_read(EMAC_RXALIGNCODEERRORS)); | ||
414 | dev_info(emac_dev, "EMAC: rx_oversized_frames:%d\n", | ||
415 | emac_read(EMAC_RXOVERSIZED)); | ||
416 | dev_info(emac_dev, "EMAC: rx_jabber_frames:%d\n", | ||
417 | emac_read(EMAC_RXJABBER)); | ||
418 | dev_info(emac_dev, "EMAC: rx_undersized_frames:%d\n", | ||
419 | emac_read(EMAC_RXUNDERSIZED)); | ||
420 | dev_info(emac_dev, "EMAC: rx_fragments:%d\n", | ||
421 | emac_read(EMAC_RXFRAGMENTS)); | ||
422 | dev_info(emac_dev, "EMAC: rx_filtered_frames:%d\n", | ||
423 | emac_read(EMAC_RXFILTERED)); | ||
424 | dev_info(emac_dev, "EMAC: rx_qos_filtered_frames:%d\n", | ||
425 | emac_read(EMAC_RXQOSFILTERED)); | ||
426 | dev_info(emac_dev, "EMAC: rx_octets:%d\n", | ||
427 | emac_read(EMAC_RXOCTETS)); | ||
428 | dev_info(emac_dev, "EMAC: tx_goodframes:%d\n", | ||
429 | emac_read(EMAC_TXGOODFRAMES)); | ||
430 | dev_info(emac_dev, "EMAC: tx_bcastframes:%d\n", | ||
431 | emac_read(EMAC_TXBCASTFRAMES)); | ||
432 | dev_info(emac_dev, "EMAC: tx_mcastframes:%d\n", | ||
433 | emac_read(EMAC_TXMCASTFRAMES)); | ||
434 | dev_info(emac_dev, "EMAC: tx_pause_frames:%d\n", | ||
435 | emac_read(EMAC_TXPAUSEFRAMES)); | ||
436 | dev_info(emac_dev, "EMAC: tx_deferred_frames:%d\n", | ||
437 | emac_read(EMAC_TXDEFERRED)); | ||
438 | dev_info(emac_dev, "EMAC: tx_collision_frames:%d\n", | ||
439 | emac_read(EMAC_TXCOLLISION)); | ||
440 | dev_info(emac_dev, "EMAC: tx_single_coll_frames:%d\n", | ||
441 | emac_read(EMAC_TXSINGLECOLL)); | ||
442 | dev_info(emac_dev, "EMAC: tx_mult_coll_frames:%d\n", | ||
443 | emac_read(EMAC_TXMULTICOLL)); | ||
444 | dev_info(emac_dev, "EMAC: tx_excessive_collisions:%d\n", | ||
445 | emac_read(EMAC_TXEXCESSIVECOLL)); | ||
446 | dev_info(emac_dev, "EMAC: tx_late_collisions:%d\n", | ||
447 | emac_read(EMAC_TXLATECOLL)); | ||
448 | dev_info(emac_dev, "EMAC: tx_underrun:%d\n", | ||
449 | emac_read(EMAC_TXUNDERRUN)); | ||
450 | dev_info(emac_dev, "EMAC: tx_carrier_sense_errors:%d\n", | ||
451 | emac_read(EMAC_TXCARRIERSENSE)); | ||
452 | dev_info(emac_dev, "EMAC: tx_octets:%d\n", | ||
453 | emac_read(EMAC_TXOCTETS)); | ||
454 | dev_info(emac_dev, "EMAC: net_octets:%d\n", | ||
455 | emac_read(EMAC_NETOCTETS)); | ||
456 | dev_info(emac_dev, "EMAC: rx_sof_overruns:%d\n", | ||
457 | emac_read(EMAC_RXSOFOVERRUNS)); | ||
458 | dev_info(emac_dev, "EMAC: rx_mof_overruns:%d\n", | ||
459 | emac_read(EMAC_RXMOFOVERRUNS)); | ||
460 | dev_info(emac_dev, "EMAC: rx_dma_overruns:%d\n", | ||
461 | emac_read(EMAC_RXDMAOVERRUNS)); | ||
462 | |||
463 | cpdma_ctlr_dump(priv->dma); | ||
464 | } | ||
465 | |||
466 | /** | ||
467 | * emac_get_drvinfo: Get EMAC driver information | ||
468 | * @ndev: The DaVinci EMAC network adapter | ||
469 | * @info: ethtool info structure containing name and version | ||
470 | * | ||
471 | * Returns EMAC driver information (name and version) | ||
472 | * | ||
473 | */ | ||
474 | static void emac_get_drvinfo(struct net_device *ndev, | ||
475 | struct ethtool_drvinfo *info) | ||
476 | { | ||
477 | strcpy(info->driver, emac_version_string); | ||
478 | strcpy(info->version, EMAC_MODULE_VERSION); | ||
479 | } | ||
480 | |||
481 | /** | ||
482 | * emac_get_settings: Get EMAC settings | ||
483 | * @ndev: The DaVinci EMAC network adapter | ||
484 | * @ecmd: ethtool command | ||
485 | * | ||
486 | * Executes ethool get command | ||
487 | * | ||
488 | */ | ||
489 | static int emac_get_settings(struct net_device *ndev, | ||
490 | struct ethtool_cmd *ecmd) | ||
491 | { | ||
492 | struct emac_priv *priv = netdev_priv(ndev); | ||
493 | if (priv->phydev) | ||
494 | return phy_ethtool_gset(priv->phydev, ecmd); | ||
495 | else | ||
496 | return -EOPNOTSUPP; | ||
497 | |||
498 | } | ||
499 | |||
500 | /** | ||
501 | * emac_set_settings: Set EMAC settings | ||
502 | * @ndev: The DaVinci EMAC network adapter | ||
503 | * @ecmd: ethtool command | ||
504 | * | ||
505 | * Executes ethool set command | ||
506 | * | ||
507 | */ | ||
508 | static int emac_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd) | ||
509 | { | ||
510 | struct emac_priv *priv = netdev_priv(ndev); | ||
511 | if (priv->phydev) | ||
512 | return phy_ethtool_sset(priv->phydev, ecmd); | ||
513 | else | ||
514 | return -EOPNOTSUPP; | ||
515 | |||
516 | } | ||
517 | |||
518 | /** | ||
519 | * emac_get_coalesce : Get interrupt coalesce settings for this device | ||
520 | * @ndev : The DaVinci EMAC network adapter | ||
521 | * @coal : ethtool coalesce settings structure | ||
522 | * | ||
523 | * Fetch the current interrupt coalesce settings | ||
524 | * | ||
525 | */ | ||
526 | static int emac_get_coalesce(struct net_device *ndev, | ||
527 | struct ethtool_coalesce *coal) | ||
528 | { | ||
529 | struct emac_priv *priv = netdev_priv(ndev); | ||
530 | |||
531 | coal->rx_coalesce_usecs = priv->coal_intvl; | ||
532 | return 0; | ||
533 | |||
534 | } | ||
535 | |||
536 | /** | ||
537 | * emac_set_coalesce : Set interrupt coalesce settings for this device | ||
538 | * @ndev : The DaVinci EMAC network adapter | ||
539 | * @coal : ethtool coalesce settings structure | ||
540 | * | ||
541 | * Set interrupt coalesce parameters | ||
542 | * | ||
543 | */ | ||
544 | static int emac_set_coalesce(struct net_device *ndev, | ||
545 | struct ethtool_coalesce *coal) | ||
546 | { | ||
547 | struct emac_priv *priv = netdev_priv(ndev); | ||
548 | u32 int_ctrl, num_interrupts = 0; | ||
549 | u32 prescale = 0, addnl_dvdr = 1, coal_intvl = 0; | ||
550 | |||
551 | if (!coal->rx_coalesce_usecs) | ||
552 | return -EINVAL; | ||
553 | |||
554 | coal_intvl = coal->rx_coalesce_usecs; | ||
555 | |||
556 | switch (priv->version) { | ||
557 | case EMAC_VERSION_2: | ||
558 | int_ctrl = emac_ctrl_read(EMAC_DM646X_CMINTCTRL); | ||
559 | prescale = priv->bus_freq_mhz * 4; | ||
560 | |||
561 | if (coal_intvl < EMAC_DM646X_CMINTMIN_INTVL) | ||
562 | coal_intvl = EMAC_DM646X_CMINTMIN_INTVL; | ||
563 | |||
564 | if (coal_intvl > EMAC_DM646X_CMINTMAX_INTVL) { | ||
565 | /* | ||
566 | * Interrupt pacer works with 4us Pulse, we can | ||
567 | * throttle further by dilating the 4us pulse. | ||
568 | */ | ||
569 | addnl_dvdr = EMAC_DM646X_INTPRESCALE_MASK / prescale; | ||
570 | |||
571 | if (addnl_dvdr > 1) { | ||
572 | prescale *= addnl_dvdr; | ||
573 | if (coal_intvl > (EMAC_DM646X_CMINTMAX_INTVL | ||
574 | * addnl_dvdr)) | ||
575 | coal_intvl = (EMAC_DM646X_CMINTMAX_INTVL | ||
576 | * addnl_dvdr); | ||
577 | } else { | ||
578 | addnl_dvdr = 1; | ||
579 | coal_intvl = EMAC_DM646X_CMINTMAX_INTVL; | ||
580 | } | ||
581 | } | ||
582 | |||
583 | num_interrupts = (1000 * addnl_dvdr) / coal_intvl; | ||
584 | |||
585 | int_ctrl |= EMAC_DM646X_INTPACEEN; | ||
586 | int_ctrl &= (~EMAC_DM646X_INTPRESCALE_MASK); | ||
587 | int_ctrl |= (prescale & EMAC_DM646X_INTPRESCALE_MASK); | ||
588 | emac_ctrl_write(EMAC_DM646X_CMINTCTRL, int_ctrl); | ||
589 | |||
590 | emac_ctrl_write(EMAC_DM646X_CMRXINTMAX, num_interrupts); | ||
591 | emac_ctrl_write(EMAC_DM646X_CMTXINTMAX, num_interrupts); | ||
592 | |||
593 | break; | ||
594 | default: | ||
595 | int_ctrl = emac_ctrl_read(EMAC_CTRL_EWINTTCNT); | ||
596 | int_ctrl &= (~EMAC_DM644X_EWINTCNT_MASK); | ||
597 | prescale = coal_intvl * priv->bus_freq_mhz; | ||
598 | if (prescale > EMAC_DM644X_EWINTCNT_MASK) { | ||
599 | prescale = EMAC_DM644X_EWINTCNT_MASK; | ||
600 | coal_intvl = prescale / priv->bus_freq_mhz; | ||
601 | } | ||
602 | emac_ctrl_write(EMAC_CTRL_EWINTTCNT, (int_ctrl | prescale)); | ||
603 | |||
604 | break; | ||
605 | } | ||
606 | |||
607 | printk(KERN_INFO"Set coalesce to %d usecs.\n", coal_intvl); | ||
608 | priv->coal_intvl = coal_intvl; | ||
609 | |||
610 | return 0; | ||
611 | |||
612 | } | ||
613 | |||
614 | |||
615 | /** | ||
616 | * ethtool_ops: DaVinci EMAC Ethtool structure | ||
617 | * | ||
618 | * Ethtool support for EMAC adapter | ||
619 | * | ||
620 | */ | ||
621 | static const struct ethtool_ops ethtool_ops = { | ||
622 | .get_drvinfo = emac_get_drvinfo, | ||
623 | .get_settings = emac_get_settings, | ||
624 | .set_settings = emac_set_settings, | ||
625 | .get_link = ethtool_op_get_link, | ||
626 | .get_coalesce = emac_get_coalesce, | ||
627 | .set_coalesce = emac_set_coalesce, | ||
628 | }; | ||
629 | |||
630 | /** | ||
631 | * emac_update_phystatus: Update Phy status | ||
632 | * @priv: The DaVinci EMAC private adapter structure | ||
633 | * | ||
634 | * Updates phy status and takes action for network queue if required | ||
635 | * based upon link status | ||
636 | * | ||
637 | */ | ||
638 | static void emac_update_phystatus(struct emac_priv *priv) | ||
639 | { | ||
640 | u32 mac_control; | ||
641 | u32 new_duplex; | ||
642 | u32 cur_duplex; | ||
643 | struct net_device *ndev = priv->ndev; | ||
644 | |||
645 | mac_control = emac_read(EMAC_MACCONTROL); | ||
646 | cur_duplex = (mac_control & EMAC_MACCONTROL_FULLDUPLEXEN) ? | ||
647 | DUPLEX_FULL : DUPLEX_HALF; | ||
648 | if (priv->phydev) | ||
649 | new_duplex = priv->phydev->duplex; | ||
650 | else | ||
651 | new_duplex = DUPLEX_FULL; | ||
652 | |||
653 | /* We get called only if link has changed (speed/duplex/status) */ | ||
654 | if ((priv->link) && (new_duplex != cur_duplex)) { | ||
655 | priv->duplex = new_duplex; | ||
656 | if (DUPLEX_FULL == priv->duplex) | ||
657 | mac_control |= (EMAC_MACCONTROL_FULLDUPLEXEN); | ||
658 | else | ||
659 | mac_control &= ~(EMAC_MACCONTROL_FULLDUPLEXEN); | ||
660 | } | ||
661 | |||
662 | if (priv->speed == SPEED_1000 && (priv->version == EMAC_VERSION_2)) { | ||
663 | mac_control = emac_read(EMAC_MACCONTROL); | ||
664 | mac_control |= (EMAC_DM646X_MACCONTORL_GIG | | ||
665 | EMAC_DM646X_MACCONTORL_GIGFORCE); | ||
666 | } else { | ||
667 | /* Clear the GIG bit and GIGFORCE bit */ | ||
668 | mac_control &= ~(EMAC_DM646X_MACCONTORL_GIGFORCE | | ||
669 | EMAC_DM646X_MACCONTORL_GIG); | ||
670 | |||
671 | if (priv->rmii_en && (priv->speed == SPEED_100)) | ||
672 | mac_control |= EMAC_MACCONTROL_RMIISPEED_MASK; | ||
673 | else | ||
674 | mac_control &= ~EMAC_MACCONTROL_RMIISPEED_MASK; | ||
675 | } | ||
676 | |||
677 | /* Update mac_control if changed */ | ||
678 | emac_write(EMAC_MACCONTROL, mac_control); | ||
679 | |||
680 | if (priv->link) { | ||
681 | /* link ON */ | ||
682 | if (!netif_carrier_ok(ndev)) | ||
683 | netif_carrier_on(ndev); | ||
684 | /* reactivate the transmit queue if it is stopped */ | ||
685 | if (netif_running(ndev) && netif_queue_stopped(ndev)) | ||
686 | netif_wake_queue(ndev); | ||
687 | } else { | ||
688 | /* link OFF */ | ||
689 | if (netif_carrier_ok(ndev)) | ||
690 | netif_carrier_off(ndev); | ||
691 | if (!netif_queue_stopped(ndev)) | ||
692 | netif_stop_queue(ndev); | ||
693 | } | ||
694 | } | ||
695 | |||
696 | /** | ||
697 | * hash_get: Calculate hash value from mac address | ||
698 | * @addr: mac address to delete from hash table | ||
699 | * | ||
700 | * Calculates hash value from mac address | ||
701 | * | ||
702 | */ | ||
703 | static u32 hash_get(u8 *addr) | ||
704 | { | ||
705 | u32 hash; | ||
706 | u8 tmpval; | ||
707 | int cnt; | ||
708 | hash = 0; | ||
709 | |||
710 | for (cnt = 0; cnt < 2; cnt++) { | ||
711 | tmpval = *addr++; | ||
712 | hash ^= (tmpval >> 2) ^ (tmpval << 4); | ||
713 | tmpval = *addr++; | ||
714 | hash ^= (tmpval >> 4) ^ (tmpval << 2); | ||
715 | tmpval = *addr++; | ||
716 | hash ^= (tmpval >> 6) ^ (tmpval); | ||
717 | } | ||
718 | |||
719 | return hash & 0x3F; | ||
720 | } | ||
721 | |||
722 | /** | ||
723 | * hash_add: Hash function to add mac addr from hash table | ||
724 | * @priv: The DaVinci EMAC private adapter structure | ||
725 | * mac_addr: mac address to delete from hash table | ||
726 | * | ||
727 | * Adds mac address to the internal hash table | ||
728 | * | ||
729 | */ | ||
730 | static int hash_add(struct emac_priv *priv, u8 *mac_addr) | ||
731 | { | ||
732 | struct device *emac_dev = &priv->ndev->dev; | ||
733 | u32 rc = 0; | ||
734 | u32 hash_bit; | ||
735 | u32 hash_value = hash_get(mac_addr); | ||
736 | |||
737 | if (hash_value >= EMAC_NUM_MULTICAST_BITS) { | ||
738 | if (netif_msg_drv(priv)) { | ||
739 | dev_err(emac_dev, "DaVinci EMAC: hash_add(): Invalid "\ | ||
740 | "Hash %08x, should not be greater than %08x", | ||
741 | hash_value, (EMAC_NUM_MULTICAST_BITS - 1)); | ||
742 | } | ||
743 | return -1; | ||
744 | } | ||
745 | |||
746 | /* set the hash bit only if not previously set */ | ||
747 | if (priv->multicast_hash_cnt[hash_value] == 0) { | ||
748 | rc = 1; /* hash value changed */ | ||
749 | if (hash_value < 32) { | ||
750 | hash_bit = BIT(hash_value); | ||
751 | priv->mac_hash1 |= hash_bit; | ||
752 | } else { | ||
753 | hash_bit = BIT((hash_value - 32)); | ||
754 | priv->mac_hash2 |= hash_bit; | ||
755 | } | ||
756 | } | ||
757 | |||
758 | /* incr counter for num of mcast addr's mapped to "this" hash bit */ | ||
759 | ++priv->multicast_hash_cnt[hash_value]; | ||
760 | |||
761 | return rc; | ||
762 | } | ||
763 | |||
764 | /** | ||
765 | * hash_del: Hash function to delete mac addr from hash table | ||
766 | * @priv: The DaVinci EMAC private adapter structure | ||
767 | * mac_addr: mac address to delete from hash table | ||
768 | * | ||
769 | * Removes mac address from the internal hash table | ||
770 | * | ||
771 | */ | ||
772 | static int hash_del(struct emac_priv *priv, u8 *mac_addr) | ||
773 | { | ||
774 | u32 hash_value; | ||
775 | u32 hash_bit; | ||
776 | |||
777 | hash_value = hash_get(mac_addr); | ||
778 | if (priv->multicast_hash_cnt[hash_value] > 0) { | ||
779 | /* dec cntr for num of mcast addr's mapped to this hash bit */ | ||
780 | --priv->multicast_hash_cnt[hash_value]; | ||
781 | } | ||
782 | |||
783 | /* if counter still > 0, at least one multicast address refers | ||
784 | * to this hash bit. so return 0 */ | ||
785 | if (priv->multicast_hash_cnt[hash_value] > 0) | ||
786 | return 0; | ||
787 | |||
788 | if (hash_value < 32) { | ||
789 | hash_bit = BIT(hash_value); | ||
790 | priv->mac_hash1 &= ~hash_bit; | ||
791 | } else { | ||
792 | hash_bit = BIT((hash_value - 32)); | ||
793 | priv->mac_hash2 &= ~hash_bit; | ||
794 | } | ||
795 | |||
796 | /* return 1 to indicate change in mac_hash registers reqd */ | ||
797 | return 1; | ||
798 | } | ||
799 | |||
800 | /* EMAC multicast operation */ | ||
801 | #define EMAC_MULTICAST_ADD 0 | ||
802 | #define EMAC_MULTICAST_DEL 1 | ||
803 | #define EMAC_ALL_MULTI_SET 2 | ||
804 | #define EMAC_ALL_MULTI_CLR 3 | ||
805 | |||
806 | /** | ||
807 | * emac_add_mcast: Set multicast address in the EMAC adapter (Internal) | ||
808 | * @priv: The DaVinci EMAC private adapter structure | ||
809 | * @action: multicast operation to perform | ||
810 | * mac_addr: mac address to set | ||
811 | * | ||
812 | * Set multicast addresses in EMAC adapter - internal function | ||
813 | * | ||
814 | */ | ||
815 | static void emac_add_mcast(struct emac_priv *priv, u32 action, u8 *mac_addr) | ||
816 | { | ||
817 | struct device *emac_dev = &priv->ndev->dev; | ||
818 | int update = -1; | ||
819 | |||
820 | switch (action) { | ||
821 | case EMAC_MULTICAST_ADD: | ||
822 | update = hash_add(priv, mac_addr); | ||
823 | break; | ||
824 | case EMAC_MULTICAST_DEL: | ||
825 | update = hash_del(priv, mac_addr); | ||
826 | break; | ||
827 | case EMAC_ALL_MULTI_SET: | ||
828 | update = 1; | ||
829 | priv->mac_hash1 = EMAC_ALL_MULTI_REG_VALUE; | ||
830 | priv->mac_hash2 = EMAC_ALL_MULTI_REG_VALUE; | ||
831 | break; | ||
832 | case EMAC_ALL_MULTI_CLR: | ||
833 | update = 1; | ||
834 | priv->mac_hash1 = 0; | ||
835 | priv->mac_hash2 = 0; | ||
836 | memset(&(priv->multicast_hash_cnt[0]), 0, | ||
837 | sizeof(priv->multicast_hash_cnt[0]) * | ||
838 | EMAC_NUM_MULTICAST_BITS); | ||
839 | break; | ||
840 | default: | ||
841 | if (netif_msg_drv(priv)) | ||
842 | dev_err(emac_dev, "DaVinci EMAC: add_mcast"\ | ||
843 | ": bad operation %d", action); | ||
844 | break; | ||
845 | } | ||
846 | |||
847 | /* write to the hardware only if the register status chances */ | ||
848 | if (update > 0) { | ||
849 | emac_write(EMAC_MACHASH1, priv->mac_hash1); | ||
850 | emac_write(EMAC_MACHASH2, priv->mac_hash2); | ||
851 | } | ||
852 | } | ||
853 | |||
854 | /** | ||
855 | * emac_dev_mcast_set: Set multicast address in the EMAC adapter | ||
856 | * @ndev: The DaVinci EMAC network adapter | ||
857 | * | ||
858 | * Set multicast addresses in EMAC adapter | ||
859 | * | ||
860 | */ | ||
861 | static void emac_dev_mcast_set(struct net_device *ndev) | ||
862 | { | ||
863 | u32 mbp_enable; | ||
864 | struct emac_priv *priv = netdev_priv(ndev); | ||
865 | |||
866 | mbp_enable = emac_read(EMAC_RXMBPENABLE); | ||
867 | if (ndev->flags & IFF_PROMISC) { | ||
868 | mbp_enable &= (~EMAC_MBP_PROMISCCH(EMAC_DEF_PROM_CH)); | ||
869 | mbp_enable |= (EMAC_MBP_RXPROMISC); | ||
870 | } else { | ||
871 | mbp_enable = (mbp_enable & ~EMAC_MBP_RXPROMISC); | ||
872 | if ((ndev->flags & IFF_ALLMULTI) || | ||
873 | netdev_mc_count(ndev) > EMAC_DEF_MAX_MULTICAST_ADDRESSES) { | ||
874 | mbp_enable = (mbp_enable | EMAC_MBP_RXMCAST); | ||
875 | emac_add_mcast(priv, EMAC_ALL_MULTI_SET, NULL); | ||
876 | } | ||
877 | if (!netdev_mc_empty(ndev)) { | ||
878 | struct netdev_hw_addr *ha; | ||
879 | |||
880 | mbp_enable = (mbp_enable | EMAC_MBP_RXMCAST); | ||
881 | emac_add_mcast(priv, EMAC_ALL_MULTI_CLR, NULL); | ||
882 | /* program multicast address list into EMAC hardware */ | ||
883 | netdev_for_each_mc_addr(ha, ndev) { | ||
884 | emac_add_mcast(priv, EMAC_MULTICAST_ADD, | ||
885 | (u8 *) ha->addr); | ||
886 | } | ||
887 | } else { | ||
888 | mbp_enable = (mbp_enable & ~EMAC_MBP_RXMCAST); | ||
889 | emac_add_mcast(priv, EMAC_ALL_MULTI_CLR, NULL); | ||
890 | } | ||
891 | } | ||
892 | /* Set mbp config register */ | ||
893 | emac_write(EMAC_RXMBPENABLE, mbp_enable); | ||
894 | } | ||
895 | |||
896 | /************************************************************************* | ||
897 | * EMAC Hardware manipulation | ||
898 | *************************************************************************/ | ||
899 | |||
900 | /** | ||
901 | * emac_int_disable: Disable EMAC module interrupt (from adapter) | ||
902 | * @priv: The DaVinci EMAC private adapter structure | ||
903 | * | ||
904 | * Disable EMAC interrupt on the adapter | ||
905 | * | ||
906 | */ | ||
907 | static void emac_int_disable(struct emac_priv *priv) | ||
908 | { | ||
909 | if (priv->version == EMAC_VERSION_2) { | ||
910 | unsigned long flags; | ||
911 | |||
912 | local_irq_save(flags); | ||
913 | |||
914 | /* Program C0_Int_En to zero to turn off | ||
915 | * interrupts to the CPU */ | ||
916 | emac_ctrl_write(EMAC_DM646X_CMRXINTEN, 0x0); | ||
917 | emac_ctrl_write(EMAC_DM646X_CMTXINTEN, 0x0); | ||
918 | /* NOTE: Rx Threshold and Misc interrupts are not disabled */ | ||
919 | if (priv->int_disable) | ||
920 | priv->int_disable(); | ||
921 | |||
922 | local_irq_restore(flags); | ||
923 | |||
924 | } else { | ||
925 | /* Set DM644x control registers for interrupt control */ | ||
926 | emac_ctrl_write(EMAC_CTRL_EWCTL, 0x0); | ||
927 | } | ||
928 | } | ||
929 | |||
930 | /** | ||
931 | * emac_int_enable: Enable EMAC module interrupt (from adapter) | ||
932 | * @priv: The DaVinci EMAC private adapter structure | ||
933 | * | ||
934 | * Enable EMAC interrupt on the adapter | ||
935 | * | ||
936 | */ | ||
937 | static void emac_int_enable(struct emac_priv *priv) | ||
938 | { | ||
939 | if (priv->version == EMAC_VERSION_2) { | ||
940 | if (priv->int_enable) | ||
941 | priv->int_enable(); | ||
942 | |||
943 | emac_ctrl_write(EMAC_DM646X_CMRXINTEN, 0xff); | ||
944 | emac_ctrl_write(EMAC_DM646X_CMTXINTEN, 0xff); | ||
945 | |||
946 | /* In addition to turning on interrupt Enable, we need | ||
947 | * ack by writing appropriate values to the EOI | ||
948 | * register */ | ||
949 | |||
950 | /* NOTE: Rx Threshold and Misc interrupts are not enabled */ | ||
951 | |||
952 | /* ack rxen only then a new pulse will be generated */ | ||
953 | emac_write(EMAC_DM646X_MACEOIVECTOR, | ||
954 | EMAC_DM646X_MAC_EOI_C0_RXEN); | ||
955 | |||
956 | /* ack txen- only then a new pulse will be generated */ | ||
957 | emac_write(EMAC_DM646X_MACEOIVECTOR, | ||
958 | EMAC_DM646X_MAC_EOI_C0_TXEN); | ||
959 | |||
960 | } else { | ||
961 | /* Set DM644x control registers for interrupt control */ | ||
962 | emac_ctrl_write(EMAC_CTRL_EWCTL, 0x1); | ||
963 | } | ||
964 | } | ||
965 | |||
966 | /** | ||
967 | * emac_irq: EMAC interrupt handler | ||
968 | * @irq: interrupt number | ||
969 | * @dev_id: EMAC network adapter data structure ptr | ||
970 | * | ||
971 | * EMAC Interrupt handler - we only schedule NAPI and not process any packets | ||
972 | * here. EVen the interrupt status is checked (TX/RX/Err) in NAPI poll function | ||
973 | * | ||
974 | * Returns interrupt handled condition | ||
975 | */ | ||
976 | static irqreturn_t emac_irq(int irq, void *dev_id) | ||
977 | { | ||
978 | struct net_device *ndev = (struct net_device *)dev_id; | ||
979 | struct emac_priv *priv = netdev_priv(ndev); | ||
980 | |||
981 | ++priv->isr_count; | ||
982 | if (likely(netif_running(priv->ndev))) { | ||
983 | emac_int_disable(priv); | ||
984 | napi_schedule(&priv->napi); | ||
985 | } else { | ||
986 | /* we are closing down, so dont process anything */ | ||
987 | } | ||
988 | return IRQ_HANDLED; | ||
989 | } | ||
990 | |||
991 | static struct sk_buff *emac_rx_alloc(struct emac_priv *priv) | ||
992 | { | ||
993 | struct sk_buff *skb = dev_alloc_skb(priv->rx_buf_size); | ||
994 | if (WARN_ON(!skb)) | ||
995 | return NULL; | ||
996 | skb->dev = priv->ndev; | ||
997 | skb_reserve(skb, NET_IP_ALIGN); | ||
998 | return skb; | ||
999 | } | ||
1000 | |||
1001 | static void emac_rx_handler(void *token, int len, int status) | ||
1002 | { | ||
1003 | struct sk_buff *skb = token; | ||
1004 | struct net_device *ndev = skb->dev; | ||
1005 | struct emac_priv *priv = netdev_priv(ndev); | ||
1006 | struct device *emac_dev = &ndev->dev; | ||
1007 | int ret; | ||
1008 | |||
1009 | /* free and bail if we are shutting down */ | ||
1010 | if (unlikely(!netif_running(ndev) || !netif_carrier_ok(ndev))) { | ||
1011 | dev_kfree_skb_any(skb); | ||
1012 | return; | ||
1013 | } | ||
1014 | |||
1015 | /* recycle on receive error */ | ||
1016 | if (status < 0) { | ||
1017 | ndev->stats.rx_errors++; | ||
1018 | goto recycle; | ||
1019 | } | ||
1020 | |||
1021 | /* feed received packet up the stack */ | ||
1022 | skb_put(skb, len); | ||
1023 | skb->protocol = eth_type_trans(skb, ndev); | ||
1024 | netif_receive_skb(skb); | ||
1025 | ndev->stats.rx_bytes += len; | ||
1026 | ndev->stats.rx_packets++; | ||
1027 | |||
1028 | /* alloc a new packet for receive */ | ||
1029 | skb = emac_rx_alloc(priv); | ||
1030 | if (!skb) { | ||
1031 | if (netif_msg_rx_err(priv) && net_ratelimit()) | ||
1032 | dev_err(emac_dev, "failed rx buffer alloc\n"); | ||
1033 | return; | ||
1034 | } | ||
1035 | |||
1036 | recycle: | ||
1037 | ret = cpdma_chan_submit(priv->rxchan, skb, skb->data, | ||
1038 | skb_tailroom(skb), GFP_KERNEL); | ||
1039 | if (WARN_ON(ret < 0)) | ||
1040 | dev_kfree_skb_any(skb); | ||
1041 | } | ||
1042 | |||
1043 | static void emac_tx_handler(void *token, int len, int status) | ||
1044 | { | ||
1045 | struct sk_buff *skb = token; | ||
1046 | struct net_device *ndev = skb->dev; | ||
1047 | |||
1048 | if (unlikely(netif_queue_stopped(ndev))) | ||
1049 | netif_start_queue(ndev); | ||
1050 | ndev->stats.tx_packets++; | ||
1051 | ndev->stats.tx_bytes += len; | ||
1052 | dev_kfree_skb_any(skb); | ||
1053 | } | ||
1054 | |||
1055 | /** | ||
1056 | * emac_dev_xmit: EMAC Transmit function | ||
1057 | * @skb: SKB pointer | ||
1058 | * @ndev: The DaVinci EMAC network adapter | ||
1059 | * | ||
1060 | * Called by the system to transmit a packet - we queue the packet in | ||
1061 | * EMAC hardware transmit queue | ||
1062 | * | ||
1063 | * Returns success(NETDEV_TX_OK) or error code (typically out of desc's) | ||
1064 | */ | ||
1065 | static int emac_dev_xmit(struct sk_buff *skb, struct net_device *ndev) | ||
1066 | { | ||
1067 | struct device *emac_dev = &ndev->dev; | ||
1068 | int ret_code; | ||
1069 | struct emac_priv *priv = netdev_priv(ndev); | ||
1070 | |||
1071 | /* If no link, return */ | ||
1072 | if (unlikely(!priv->link)) { | ||
1073 | if (netif_msg_tx_err(priv) && net_ratelimit()) | ||
1074 | dev_err(emac_dev, "DaVinci EMAC: No link to transmit"); | ||
1075 | goto fail_tx; | ||
1076 | } | ||
1077 | |||
1078 | ret_code = skb_padto(skb, EMAC_DEF_MIN_ETHPKTSIZE); | ||
1079 | if (unlikely(ret_code < 0)) { | ||
1080 | if (netif_msg_tx_err(priv) && net_ratelimit()) | ||
1081 | dev_err(emac_dev, "DaVinci EMAC: packet pad failed"); | ||
1082 | goto fail_tx; | ||
1083 | } | ||
1084 | |||
1085 | skb_tx_timestamp(skb); | ||
1086 | |||
1087 | ret_code = cpdma_chan_submit(priv->txchan, skb, skb->data, skb->len, | ||
1088 | GFP_KERNEL); | ||
1089 | if (unlikely(ret_code != 0)) { | ||
1090 | if (netif_msg_tx_err(priv) && net_ratelimit()) | ||
1091 | dev_err(emac_dev, "DaVinci EMAC: desc submit failed"); | ||
1092 | goto fail_tx; | ||
1093 | } | ||
1094 | |||
1095 | return NETDEV_TX_OK; | ||
1096 | |||
1097 | fail_tx: | ||
1098 | ndev->stats.tx_dropped++; | ||
1099 | netif_stop_queue(ndev); | ||
1100 | return NETDEV_TX_BUSY; | ||
1101 | } | ||
1102 | |||
1103 | /** | ||
1104 | * emac_dev_tx_timeout: EMAC Transmit timeout function | ||
1105 | * @ndev: The DaVinci EMAC network adapter | ||
1106 | * | ||
1107 | * Called when system detects that a skb timeout period has expired | ||
1108 | * potentially due to a fault in the adapter in not being able to send | ||
1109 | * it out on the wire. We teardown the TX channel assuming a hardware | ||
1110 | * error and re-initialize the TX channel for hardware operation | ||
1111 | * | ||
1112 | */ | ||
1113 | static void emac_dev_tx_timeout(struct net_device *ndev) | ||
1114 | { | ||
1115 | struct emac_priv *priv = netdev_priv(ndev); | ||
1116 | struct device *emac_dev = &ndev->dev; | ||
1117 | |||
1118 | if (netif_msg_tx_err(priv)) | ||
1119 | dev_err(emac_dev, "DaVinci EMAC: xmit timeout, restarting TX"); | ||
1120 | |||
1121 | emac_dump_regs(priv); | ||
1122 | |||
1123 | ndev->stats.tx_errors++; | ||
1124 | emac_int_disable(priv); | ||
1125 | cpdma_chan_stop(priv->txchan); | ||
1126 | cpdma_chan_start(priv->txchan); | ||
1127 | emac_int_enable(priv); | ||
1128 | } | ||
1129 | |||
1130 | /** | ||
1131 | * emac_set_type0addr: Set EMAC Type0 mac address | ||
1132 | * @priv: The DaVinci EMAC private adapter structure | ||
1133 | * @ch: RX channel number | ||
1134 | * @mac_addr: MAC address to set in device | ||
1135 | * | ||
1136 | * Called internally to set Type0 mac address of the adapter (Device) | ||
1137 | * | ||
1138 | * Returns success (0) or appropriate error code (none as of now) | ||
1139 | */ | ||
1140 | static void emac_set_type0addr(struct emac_priv *priv, u32 ch, char *mac_addr) | ||
1141 | { | ||
1142 | u32 val; | ||
1143 | val = ((mac_addr[5] << 8) | (mac_addr[4])); | ||
1144 | emac_write(EMAC_MACSRCADDRLO, val); | ||
1145 | |||
1146 | val = ((mac_addr[3] << 24) | (mac_addr[2] << 16) | \ | ||
1147 | (mac_addr[1] << 8) | (mac_addr[0])); | ||
1148 | emac_write(EMAC_MACSRCADDRHI, val); | ||
1149 | val = emac_read(EMAC_RXUNICASTSET); | ||
1150 | val |= BIT(ch); | ||
1151 | emac_write(EMAC_RXUNICASTSET, val); | ||
1152 | val = emac_read(EMAC_RXUNICASTCLEAR); | ||
1153 | val &= ~BIT(ch); | ||
1154 | emac_write(EMAC_RXUNICASTCLEAR, val); | ||
1155 | } | ||
1156 | |||
1157 | /** | ||
1158 | * emac_set_type1addr: Set EMAC Type1 mac address | ||
1159 | * @priv: The DaVinci EMAC private adapter structure | ||
1160 | * @ch: RX channel number | ||
1161 | * @mac_addr: MAC address to set in device | ||
1162 | * | ||
1163 | * Called internally to set Type1 mac address of the adapter (Device) | ||
1164 | * | ||
1165 | * Returns success (0) or appropriate error code (none as of now) | ||
1166 | */ | ||
1167 | static void emac_set_type1addr(struct emac_priv *priv, u32 ch, char *mac_addr) | ||
1168 | { | ||
1169 | u32 val; | ||
1170 | emac_write(EMAC_MACINDEX, ch); | ||
1171 | val = ((mac_addr[5] << 8) | mac_addr[4]); | ||
1172 | emac_write(EMAC_MACADDRLO, val); | ||
1173 | val = ((mac_addr[3] << 24) | (mac_addr[2] << 16) | \ | ||
1174 | (mac_addr[1] << 8) | (mac_addr[0])); | ||
1175 | emac_write(EMAC_MACADDRHI, val); | ||
1176 | emac_set_type0addr(priv, ch, mac_addr); | ||
1177 | } | ||
1178 | |||
1179 | /** | ||
1180 | * emac_set_type2addr: Set EMAC Type2 mac address | ||
1181 | * @priv: The DaVinci EMAC private adapter structure | ||
1182 | * @ch: RX channel number | ||
1183 | * @mac_addr: MAC address to set in device | ||
1184 | * @index: index into RX address entries | ||
1185 | * @match: match parameter for RX address matching logic | ||
1186 | * | ||
1187 | * Called internally to set Type2 mac address of the adapter (Device) | ||
1188 | * | ||
1189 | * Returns success (0) or appropriate error code (none as of now) | ||
1190 | */ | ||
1191 | static void emac_set_type2addr(struct emac_priv *priv, u32 ch, | ||
1192 | char *mac_addr, int index, int match) | ||
1193 | { | ||
1194 | u32 val; | ||
1195 | emac_write(EMAC_MACINDEX, index); | ||
1196 | val = ((mac_addr[3] << 24) | (mac_addr[2] << 16) | \ | ||
1197 | (mac_addr[1] << 8) | (mac_addr[0])); | ||
1198 | emac_write(EMAC_MACADDRHI, val); | ||
1199 | val = ((mac_addr[5] << 8) | mac_addr[4] | ((ch & 0x7) << 16) | \ | ||
1200 | (match << 19) | BIT(20)); | ||
1201 | emac_write(EMAC_MACADDRLO, val); | ||
1202 | emac_set_type0addr(priv, ch, mac_addr); | ||
1203 | } | ||
1204 | |||
1205 | /** | ||
1206 | * emac_setmac: Set mac address in the adapter (internal function) | ||
1207 | * @priv: The DaVinci EMAC private adapter structure | ||
1208 | * @ch: RX channel number | ||
1209 | * @mac_addr: MAC address to set in device | ||
1210 | * | ||
1211 | * Called internally to set the mac address of the adapter (Device) | ||
1212 | * | ||
1213 | * Returns success (0) or appropriate error code (none as of now) | ||
1214 | */ | ||
1215 | static void emac_setmac(struct emac_priv *priv, u32 ch, char *mac_addr) | ||
1216 | { | ||
1217 | struct device *emac_dev = &priv->ndev->dev; | ||
1218 | |||
1219 | if (priv->rx_addr_type == 0) { | ||
1220 | emac_set_type0addr(priv, ch, mac_addr); | ||
1221 | } else if (priv->rx_addr_type == 1) { | ||
1222 | u32 cnt; | ||
1223 | for (cnt = 0; cnt < EMAC_MAX_TXRX_CHANNELS; cnt++) | ||
1224 | emac_set_type1addr(priv, ch, mac_addr); | ||
1225 | } else if (priv->rx_addr_type == 2) { | ||
1226 | emac_set_type2addr(priv, ch, mac_addr, ch, 1); | ||
1227 | emac_set_type0addr(priv, ch, mac_addr); | ||
1228 | } else { | ||
1229 | if (netif_msg_drv(priv)) | ||
1230 | dev_err(emac_dev, "DaVinci EMAC: Wrong addressing\n"); | ||
1231 | } | ||
1232 | } | ||
1233 | |||
1234 | /** | ||
1235 | * emac_dev_setmac_addr: Set mac address in the adapter | ||
1236 | * @ndev: The DaVinci EMAC network adapter | ||
1237 | * @addr: MAC address to set in device | ||
1238 | * | ||
1239 | * Called by the system to set the mac address of the adapter (Device) | ||
1240 | * | ||
1241 | * Returns success (0) or appropriate error code (none as of now) | ||
1242 | */ | ||
1243 | static int emac_dev_setmac_addr(struct net_device *ndev, void *addr) | ||
1244 | { | ||
1245 | struct emac_priv *priv = netdev_priv(ndev); | ||
1246 | struct device *emac_dev = &priv->ndev->dev; | ||
1247 | struct sockaddr *sa = addr; | ||
1248 | |||
1249 | if (!is_valid_ether_addr(sa->sa_data)) | ||
1250 | return -EINVAL; | ||
1251 | |||
1252 | /* Store mac addr in priv and rx channel and set it in EMAC hw */ | ||
1253 | memcpy(priv->mac_addr, sa->sa_data, ndev->addr_len); | ||
1254 | memcpy(ndev->dev_addr, sa->sa_data, ndev->addr_len); | ||
1255 | |||
1256 | /* MAC address is configured only after the interface is enabled. */ | ||
1257 | if (netif_running(ndev)) { | ||
1258 | memcpy(priv->mac_addr, sa->sa_data, ndev->addr_len); | ||
1259 | emac_setmac(priv, EMAC_DEF_RX_CH, priv->mac_addr); | ||
1260 | } | ||
1261 | |||
1262 | if (netif_msg_drv(priv)) | ||
1263 | dev_notice(emac_dev, "DaVinci EMAC: emac_dev_setmac_addr %pM\n", | ||
1264 | priv->mac_addr); | ||
1265 | |||
1266 | return 0; | ||
1267 | } | ||
1268 | |||
1269 | /** | ||
1270 | * emac_hw_enable: Enable EMAC hardware for packet transmission/reception | ||
1271 | * @priv: The DaVinci EMAC private adapter structure | ||
1272 | * | ||
1273 | * Enables EMAC hardware for packet processing - enables PHY, enables RX | ||
1274 | * for packet reception and enables device interrupts and then NAPI | ||
1275 | * | ||
1276 | * Returns success (0) or appropriate error code (none right now) | ||
1277 | */ | ||
1278 | static int emac_hw_enable(struct emac_priv *priv) | ||
1279 | { | ||
1280 | u32 val, mbp_enable, mac_control; | ||
1281 | |||
1282 | /* Soft reset */ | ||
1283 | emac_write(EMAC_SOFTRESET, 1); | ||
1284 | while (emac_read(EMAC_SOFTRESET)) | ||
1285 | cpu_relax(); | ||
1286 | |||
1287 | /* Disable interrupt & Set pacing for more interrupts initially */ | ||
1288 | emac_int_disable(priv); | ||
1289 | |||
1290 | /* Full duplex enable bit set when auto negotiation happens */ | ||
1291 | mac_control = | ||
1292 | (((EMAC_DEF_TXPRIO_FIXED) ? (EMAC_MACCONTROL_TXPTYPE) : 0x0) | | ||
1293 | ((priv->speed == 1000) ? EMAC_MACCONTROL_GIGABITEN : 0x0) | | ||
1294 | ((EMAC_DEF_TXPACING_EN) ? (EMAC_MACCONTROL_TXPACEEN) : 0x0) | | ||
1295 | ((priv->duplex == DUPLEX_FULL) ? 0x1 : 0)); | ||
1296 | emac_write(EMAC_MACCONTROL, mac_control); | ||
1297 | |||
1298 | mbp_enable = | ||
1299 | (((EMAC_DEF_PASS_CRC) ? (EMAC_RXMBP_PASSCRC_MASK) : 0x0) | | ||
1300 | ((EMAC_DEF_QOS_EN) ? (EMAC_RXMBP_QOSEN_MASK) : 0x0) | | ||
1301 | ((EMAC_DEF_NO_BUFF_CHAIN) ? (EMAC_RXMBP_NOCHAIN_MASK) : 0x0) | | ||
1302 | ((EMAC_DEF_MACCTRL_FRAME_EN) ? (EMAC_RXMBP_CMFEN_MASK) : 0x0) | | ||
1303 | ((EMAC_DEF_SHORT_FRAME_EN) ? (EMAC_RXMBP_CSFEN_MASK) : 0x0) | | ||
1304 | ((EMAC_DEF_ERROR_FRAME_EN) ? (EMAC_RXMBP_CEFEN_MASK) : 0x0) | | ||
1305 | ((EMAC_DEF_PROM_EN) ? (EMAC_RXMBP_CAFEN_MASK) : 0x0) | | ||
1306 | ((EMAC_DEF_PROM_CH & EMAC_RXMBP_CHMASK) << \ | ||
1307 | EMAC_RXMBP_PROMCH_SHIFT) | | ||
1308 | ((EMAC_DEF_BCAST_EN) ? (EMAC_RXMBP_BROADEN_MASK) : 0x0) | | ||
1309 | ((EMAC_DEF_BCAST_CH & EMAC_RXMBP_CHMASK) << \ | ||
1310 | EMAC_RXMBP_BROADCH_SHIFT) | | ||
1311 | ((EMAC_DEF_MCAST_EN) ? (EMAC_RXMBP_MULTIEN_MASK) : 0x0) | | ||
1312 | ((EMAC_DEF_MCAST_CH & EMAC_RXMBP_CHMASK) << \ | ||
1313 | EMAC_RXMBP_MULTICH_SHIFT)); | ||
1314 | emac_write(EMAC_RXMBPENABLE, mbp_enable); | ||
1315 | emac_write(EMAC_RXMAXLEN, (EMAC_DEF_MAX_FRAME_SIZE & | ||
1316 | EMAC_RX_MAX_LEN_MASK)); | ||
1317 | emac_write(EMAC_RXBUFFEROFFSET, (EMAC_DEF_BUFFER_OFFSET & | ||
1318 | EMAC_RX_BUFFER_OFFSET_MASK)); | ||
1319 | emac_write(EMAC_RXFILTERLOWTHRESH, 0); | ||
1320 | emac_write(EMAC_RXUNICASTCLEAR, EMAC_RX_UNICAST_CLEAR_ALL); | ||
1321 | priv->rx_addr_type = (emac_read(EMAC_MACCONFIG) >> 8) & 0xFF; | ||
1322 | |||
1323 | emac_write(EMAC_MACINTMASKSET, EMAC_MAC_HOST_ERR_INTMASK_VAL); | ||
1324 | |||
1325 | emac_setmac(priv, EMAC_DEF_RX_CH, priv->mac_addr); | ||
1326 | |||
1327 | /* Enable MII */ | ||
1328 | val = emac_read(EMAC_MACCONTROL); | ||
1329 | val |= (EMAC_MACCONTROL_GMIIEN); | ||
1330 | emac_write(EMAC_MACCONTROL, val); | ||
1331 | |||
1332 | /* Enable NAPI and interrupts */ | ||
1333 | napi_enable(&priv->napi); | ||
1334 | emac_int_enable(priv); | ||
1335 | return 0; | ||
1336 | |||
1337 | } | ||
1338 | |||
1339 | /** | ||
1340 | * emac_poll: EMAC NAPI Poll function | ||
1341 | * @ndev: The DaVinci EMAC network adapter | ||
1342 | * @budget: Number of receive packets to process (as told by NAPI layer) | ||
1343 | * | ||
1344 | * NAPI Poll function implemented to process packets as per budget. We check | ||
1345 | * the type of interrupt on the device and accordingly call the TX or RX | ||
1346 | * packet processing functions. We follow the budget for RX processing and | ||
1347 | * also put a cap on number of TX pkts processed through config param. The | ||
1348 | * NAPI schedule function is called if more packets pending. | ||
1349 | * | ||
1350 | * Returns number of packets received (in most cases; else TX pkts - rarely) | ||
1351 | */ | ||
1352 | static int emac_poll(struct napi_struct *napi, int budget) | ||
1353 | { | ||
1354 | unsigned int mask; | ||
1355 | struct emac_priv *priv = container_of(napi, struct emac_priv, napi); | ||
1356 | struct net_device *ndev = priv->ndev; | ||
1357 | struct device *emac_dev = &ndev->dev; | ||
1358 | u32 status = 0; | ||
1359 | u32 num_tx_pkts = 0, num_rx_pkts = 0; | ||
1360 | |||
1361 | /* Check interrupt vectors and call packet processing */ | ||
1362 | status = emac_read(EMAC_MACINVECTOR); | ||
1363 | |||
1364 | mask = EMAC_DM644X_MAC_IN_VECTOR_TX_INT_VEC; | ||
1365 | |||
1366 | if (priv->version == EMAC_VERSION_2) | ||
1367 | mask = EMAC_DM646X_MAC_IN_VECTOR_TX_INT_VEC; | ||
1368 | |||
1369 | if (status & mask) { | ||
1370 | num_tx_pkts = cpdma_chan_process(priv->txchan, | ||
1371 | EMAC_DEF_TX_MAX_SERVICE); | ||
1372 | } /* TX processing */ | ||
1373 | |||
1374 | mask = EMAC_DM644X_MAC_IN_VECTOR_RX_INT_VEC; | ||
1375 | |||
1376 | if (priv->version == EMAC_VERSION_2) | ||
1377 | mask = EMAC_DM646X_MAC_IN_VECTOR_RX_INT_VEC; | ||
1378 | |||
1379 | if (status & mask) { | ||
1380 | num_rx_pkts = cpdma_chan_process(priv->rxchan, budget); | ||
1381 | } /* RX processing */ | ||
1382 | |||
1383 | mask = EMAC_DM644X_MAC_IN_VECTOR_HOST_INT; | ||
1384 | if (priv->version == EMAC_VERSION_2) | ||
1385 | mask = EMAC_DM646X_MAC_IN_VECTOR_HOST_INT; | ||
1386 | |||
1387 | if (unlikely(status & mask)) { | ||
1388 | u32 ch, cause; | ||
1389 | dev_err(emac_dev, "DaVinci EMAC: Fatal Hardware Error\n"); | ||
1390 | netif_stop_queue(ndev); | ||
1391 | napi_disable(&priv->napi); | ||
1392 | |||
1393 | status = emac_read(EMAC_MACSTATUS); | ||
1394 | cause = ((status & EMAC_MACSTATUS_TXERRCODE_MASK) >> | ||
1395 | EMAC_MACSTATUS_TXERRCODE_SHIFT); | ||
1396 | if (cause) { | ||
1397 | ch = ((status & EMAC_MACSTATUS_TXERRCH_MASK) >> | ||
1398 | EMAC_MACSTATUS_TXERRCH_SHIFT); | ||
1399 | if (net_ratelimit()) { | ||
1400 | dev_err(emac_dev, "TX Host error %s on ch=%d\n", | ||
1401 | &emac_txhost_errcodes[cause][0], ch); | ||
1402 | } | ||
1403 | } | ||
1404 | cause = ((status & EMAC_MACSTATUS_RXERRCODE_MASK) >> | ||
1405 | EMAC_MACSTATUS_RXERRCODE_SHIFT); | ||
1406 | if (cause) { | ||
1407 | ch = ((status & EMAC_MACSTATUS_RXERRCH_MASK) >> | ||
1408 | EMAC_MACSTATUS_RXERRCH_SHIFT); | ||
1409 | if (netif_msg_hw(priv) && net_ratelimit()) | ||
1410 | dev_err(emac_dev, "RX Host error %s on ch=%d\n", | ||
1411 | &emac_rxhost_errcodes[cause][0], ch); | ||
1412 | } | ||
1413 | } else if (num_rx_pkts < budget) { | ||
1414 | napi_complete(napi); | ||
1415 | emac_int_enable(priv); | ||
1416 | } | ||
1417 | |||
1418 | return num_rx_pkts; | ||
1419 | } | ||
1420 | |||
1421 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
1422 | /** | ||
1423 | * emac_poll_controller: EMAC Poll controller function | ||
1424 | * @ndev: The DaVinci EMAC network adapter | ||
1425 | * | ||
1426 | * Polled functionality used by netconsole and others in non interrupt mode | ||
1427 | * | ||
1428 | */ | ||
1429 | void emac_poll_controller(struct net_device *ndev) | ||
1430 | { | ||
1431 | struct emac_priv *priv = netdev_priv(ndev); | ||
1432 | |||
1433 | emac_int_disable(priv); | ||
1434 | emac_irq(ndev->irq, ndev); | ||
1435 | emac_int_enable(priv); | ||
1436 | } | ||
1437 | #endif | ||
1438 | |||
1439 | static void emac_adjust_link(struct net_device *ndev) | ||
1440 | { | ||
1441 | struct emac_priv *priv = netdev_priv(ndev); | ||
1442 | struct phy_device *phydev = priv->phydev; | ||
1443 | unsigned long flags; | ||
1444 | int new_state = 0; | ||
1445 | |||
1446 | spin_lock_irqsave(&priv->lock, flags); | ||
1447 | |||
1448 | if (phydev->link) { | ||
1449 | /* check the mode of operation - full/half duplex */ | ||
1450 | if (phydev->duplex != priv->duplex) { | ||
1451 | new_state = 1; | ||
1452 | priv->duplex = phydev->duplex; | ||
1453 | } | ||
1454 | if (phydev->speed != priv->speed) { | ||
1455 | new_state = 1; | ||
1456 | priv->speed = phydev->speed; | ||
1457 | } | ||
1458 | if (!priv->link) { | ||
1459 | new_state = 1; | ||
1460 | priv->link = 1; | ||
1461 | } | ||
1462 | |||
1463 | } else if (priv->link) { | ||
1464 | new_state = 1; | ||
1465 | priv->link = 0; | ||
1466 | priv->speed = 0; | ||
1467 | priv->duplex = ~0; | ||
1468 | } | ||
1469 | if (new_state) { | ||
1470 | emac_update_phystatus(priv); | ||
1471 | phy_print_status(priv->phydev); | ||
1472 | } | ||
1473 | |||
1474 | spin_unlock_irqrestore(&priv->lock, flags); | ||
1475 | } | ||
1476 | |||
1477 | /************************************************************************* | ||
1478 | * Linux Driver Model | ||
1479 | *************************************************************************/ | ||
1480 | |||
1481 | /** | ||
1482 | * emac_devioctl: EMAC adapter ioctl | ||
1483 | * @ndev: The DaVinci EMAC network adapter | ||
1484 | * @ifrq: request parameter | ||
1485 | * @cmd: command parameter | ||
1486 | * | ||
1487 | * EMAC driver ioctl function | ||
1488 | * | ||
1489 | * Returns success(0) or appropriate error code | ||
1490 | */ | ||
1491 | static int emac_devioctl(struct net_device *ndev, struct ifreq *ifrq, int cmd) | ||
1492 | { | ||
1493 | struct emac_priv *priv = netdev_priv(ndev); | ||
1494 | |||
1495 | if (!(netif_running(ndev))) | ||
1496 | return -EINVAL; | ||
1497 | |||
1498 | /* TODO: Add phy read and write and private statistics get feature */ | ||
1499 | |||
1500 | return phy_mii_ioctl(priv->phydev, ifrq, cmd); | ||
1501 | } | ||
1502 | |||
1503 | static int match_first_device(struct device *dev, void *data) | ||
1504 | { | ||
1505 | return 1; | ||
1506 | } | ||
1507 | |||
1508 | /** | ||
1509 | * emac_dev_open: EMAC device open | ||
1510 | * @ndev: The DaVinci EMAC network adapter | ||
1511 | * | ||
1512 | * Called when system wants to start the interface. We init TX/RX channels | ||
1513 | * and enable the hardware for packet reception/transmission and start the | ||
1514 | * network queue. | ||
1515 | * | ||
1516 | * Returns 0 for a successful open, or appropriate error code | ||
1517 | */ | ||
1518 | static int emac_dev_open(struct net_device *ndev) | ||
1519 | { | ||
1520 | struct device *emac_dev = &ndev->dev; | ||
1521 | u32 cnt; | ||
1522 | struct resource *res; | ||
1523 | int q, m, ret; | ||
1524 | int i = 0; | ||
1525 | int k = 0; | ||
1526 | struct emac_priv *priv = netdev_priv(ndev); | ||
1527 | |||
1528 | netif_carrier_off(ndev); | ||
1529 | for (cnt = 0; cnt < ETH_ALEN; cnt++) | ||
1530 | ndev->dev_addr[cnt] = priv->mac_addr[cnt]; | ||
1531 | |||
1532 | /* Configuration items */ | ||
1533 | priv->rx_buf_size = EMAC_DEF_MAX_FRAME_SIZE + NET_IP_ALIGN; | ||
1534 | |||
1535 | priv->mac_hash1 = 0; | ||
1536 | priv->mac_hash2 = 0; | ||
1537 | emac_write(EMAC_MACHASH1, 0); | ||
1538 | emac_write(EMAC_MACHASH2, 0); | ||
1539 | |||
1540 | for (i = 0; i < EMAC_DEF_RX_NUM_DESC; i++) { | ||
1541 | struct sk_buff *skb = emac_rx_alloc(priv); | ||
1542 | |||
1543 | if (!skb) | ||
1544 | break; | ||
1545 | |||
1546 | ret = cpdma_chan_submit(priv->rxchan, skb, skb->data, | ||
1547 | skb_tailroom(skb), GFP_KERNEL); | ||
1548 | if (WARN_ON(ret < 0)) | ||
1549 | break; | ||
1550 | } | ||
1551 | |||
1552 | /* Request IRQ */ | ||
1553 | |||
1554 | while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) { | ||
1555 | for (i = res->start; i <= res->end; i++) { | ||
1556 | if (request_irq(i, emac_irq, IRQF_DISABLED, | ||
1557 | ndev->name, ndev)) | ||
1558 | goto rollback; | ||
1559 | } | ||
1560 | k++; | ||
1561 | } | ||
1562 | |||
1563 | /* Start/Enable EMAC hardware */ | ||
1564 | emac_hw_enable(priv); | ||
1565 | |||
1566 | /* Enable Interrupt pacing if configured */ | ||
1567 | if (priv->coal_intvl != 0) { | ||
1568 | struct ethtool_coalesce coal; | ||
1569 | |||
1570 | coal.rx_coalesce_usecs = (priv->coal_intvl << 4); | ||
1571 | emac_set_coalesce(ndev, &coal); | ||
1572 | } | ||
1573 | |||
1574 | cpdma_ctlr_start(priv->dma); | ||
1575 | |||
1576 | priv->phydev = NULL; | ||
1577 | /* use the first phy on the bus if pdata did not give us a phy id */ | ||
1578 | if (!priv->phy_id) { | ||
1579 | struct device *phy; | ||
1580 | |||
1581 | phy = bus_find_device(&mdio_bus_type, NULL, NULL, | ||
1582 | match_first_device); | ||
1583 | if (phy) | ||
1584 | priv->phy_id = dev_name(phy); | ||
1585 | } | ||
1586 | |||
1587 | if (priv->phy_id && *priv->phy_id) { | ||
1588 | priv->phydev = phy_connect(ndev, priv->phy_id, | ||
1589 | &emac_adjust_link, 0, | ||
1590 | PHY_INTERFACE_MODE_MII); | ||
1591 | |||
1592 | if (IS_ERR(priv->phydev)) { | ||
1593 | dev_err(emac_dev, "could not connect to phy %s\n", | ||
1594 | priv->phy_id); | ||
1595 | priv->phydev = NULL; | ||
1596 | return PTR_ERR(priv->phydev); | ||
1597 | } | ||
1598 | |||
1599 | priv->link = 0; | ||
1600 | priv->speed = 0; | ||
1601 | priv->duplex = ~0; | ||
1602 | |||
1603 | dev_info(emac_dev, "attached PHY driver [%s] " | ||
1604 | "(mii_bus:phy_addr=%s, id=%x)\n", | ||
1605 | priv->phydev->drv->name, dev_name(&priv->phydev->dev), | ||
1606 | priv->phydev->phy_id); | ||
1607 | } else { | ||
1608 | /* No PHY , fix the link, speed and duplex settings */ | ||
1609 | dev_notice(emac_dev, "no phy, defaulting to 100/full\n"); | ||
1610 | priv->link = 1; | ||
1611 | priv->speed = SPEED_100; | ||
1612 | priv->duplex = DUPLEX_FULL; | ||
1613 | emac_update_phystatus(priv); | ||
1614 | } | ||
1615 | |||
1616 | if (!netif_running(ndev)) /* debug only - to avoid compiler warning */ | ||
1617 | emac_dump_regs(priv); | ||
1618 | |||
1619 | if (netif_msg_drv(priv)) | ||
1620 | dev_notice(emac_dev, "DaVinci EMAC: Opened %s\n", ndev->name); | ||
1621 | |||
1622 | if (priv->phydev) | ||
1623 | phy_start(priv->phydev); | ||
1624 | |||
1625 | return 0; | ||
1626 | |||
1627 | rollback: | ||
1628 | |||
1629 | dev_err(emac_dev, "DaVinci EMAC: request_irq() failed"); | ||
1630 | |||
1631 | for (q = k; k >= 0; k--) { | ||
1632 | for (m = i; m >= res->start; m--) | ||
1633 | free_irq(m, ndev); | ||
1634 | res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k-1); | ||
1635 | m = res->end; | ||
1636 | } | ||
1637 | return -EBUSY; | ||
1638 | } | ||
1639 | |||
1640 | /** | ||
1641 | * emac_dev_stop: EMAC device stop | ||
1642 | * @ndev: The DaVinci EMAC network adapter | ||
1643 | * | ||
1644 | * Called when system wants to stop or down the interface. We stop the network | ||
1645 | * queue, disable interrupts and cleanup TX/RX channels. | ||
1646 | * | ||
1647 | * We return the statistics in net_device_stats structure pulled from emac | ||
1648 | */ | ||
1649 | static int emac_dev_stop(struct net_device *ndev) | ||
1650 | { | ||
1651 | struct resource *res; | ||
1652 | int i = 0; | ||
1653 | int irq_num; | ||
1654 | struct emac_priv *priv = netdev_priv(ndev); | ||
1655 | struct device *emac_dev = &ndev->dev; | ||
1656 | |||
1657 | /* inform the upper layers. */ | ||
1658 | netif_stop_queue(ndev); | ||
1659 | napi_disable(&priv->napi); | ||
1660 | |||
1661 | netif_carrier_off(ndev); | ||
1662 | emac_int_disable(priv); | ||
1663 | cpdma_ctlr_stop(priv->dma); | ||
1664 | emac_write(EMAC_SOFTRESET, 1); | ||
1665 | |||
1666 | if (priv->phydev) | ||
1667 | phy_disconnect(priv->phydev); | ||
1668 | |||
1669 | /* Free IRQ */ | ||
1670 | while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, i))) { | ||
1671 | for (irq_num = res->start; irq_num <= res->end; irq_num++) | ||
1672 | free_irq(irq_num, priv->ndev); | ||
1673 | i++; | ||
1674 | } | ||
1675 | |||
1676 | if (netif_msg_drv(priv)) | ||
1677 | dev_notice(emac_dev, "DaVinci EMAC: %s stopped\n", ndev->name); | ||
1678 | |||
1679 | return 0; | ||
1680 | } | ||
1681 | |||
1682 | /** | ||
1683 | * emac_dev_getnetstats: EMAC get statistics function | ||
1684 | * @ndev: The DaVinci EMAC network adapter | ||
1685 | * | ||
1686 | * Called when system wants to get statistics from the device. | ||
1687 | * | ||
1688 | * We return the statistics in net_device_stats structure pulled from emac | ||
1689 | */ | ||
1690 | static struct net_device_stats *emac_dev_getnetstats(struct net_device *ndev) | ||
1691 | { | ||
1692 | struct emac_priv *priv = netdev_priv(ndev); | ||
1693 | u32 mac_control; | ||
1694 | u32 stats_clear_mask; | ||
1695 | |||
1696 | /* update emac hardware stats and reset the registers*/ | ||
1697 | |||
1698 | mac_control = emac_read(EMAC_MACCONTROL); | ||
1699 | |||
1700 | if (mac_control & EMAC_MACCONTROL_GMIIEN) | ||
1701 | stats_clear_mask = EMAC_STATS_CLR_MASK; | ||
1702 | else | ||
1703 | stats_clear_mask = 0; | ||
1704 | |||
1705 | ndev->stats.multicast += emac_read(EMAC_RXMCASTFRAMES); | ||
1706 | emac_write(EMAC_RXMCASTFRAMES, stats_clear_mask); | ||
1707 | |||
1708 | ndev->stats.collisions += (emac_read(EMAC_TXCOLLISION) + | ||
1709 | emac_read(EMAC_TXSINGLECOLL) + | ||
1710 | emac_read(EMAC_TXMULTICOLL)); | ||
1711 | emac_write(EMAC_TXCOLLISION, stats_clear_mask); | ||
1712 | emac_write(EMAC_TXSINGLECOLL, stats_clear_mask); | ||
1713 | emac_write(EMAC_TXMULTICOLL, stats_clear_mask); | ||
1714 | |||
1715 | ndev->stats.rx_length_errors += (emac_read(EMAC_RXOVERSIZED) + | ||
1716 | emac_read(EMAC_RXJABBER) + | ||
1717 | emac_read(EMAC_RXUNDERSIZED)); | ||
1718 | emac_write(EMAC_RXOVERSIZED, stats_clear_mask); | ||
1719 | emac_write(EMAC_RXJABBER, stats_clear_mask); | ||
1720 | emac_write(EMAC_RXUNDERSIZED, stats_clear_mask); | ||
1721 | |||
1722 | ndev->stats.rx_over_errors += (emac_read(EMAC_RXSOFOVERRUNS) + | ||
1723 | emac_read(EMAC_RXMOFOVERRUNS)); | ||
1724 | emac_write(EMAC_RXSOFOVERRUNS, stats_clear_mask); | ||
1725 | emac_write(EMAC_RXMOFOVERRUNS, stats_clear_mask); | ||
1726 | |||
1727 | ndev->stats.rx_fifo_errors += emac_read(EMAC_RXDMAOVERRUNS); | ||
1728 | emac_write(EMAC_RXDMAOVERRUNS, stats_clear_mask); | ||
1729 | |||
1730 | ndev->stats.tx_carrier_errors += | ||
1731 | emac_read(EMAC_TXCARRIERSENSE); | ||
1732 | emac_write(EMAC_TXCARRIERSENSE, stats_clear_mask); | ||
1733 | |||
1734 | ndev->stats.tx_fifo_errors += emac_read(EMAC_TXUNDERRUN); | ||
1735 | emac_write(EMAC_TXUNDERRUN, stats_clear_mask); | ||
1736 | |||
1737 | return &ndev->stats; | ||
1738 | } | ||
1739 | |||
1740 | static const struct net_device_ops emac_netdev_ops = { | ||
1741 | .ndo_open = emac_dev_open, | ||
1742 | .ndo_stop = emac_dev_stop, | ||
1743 | .ndo_start_xmit = emac_dev_xmit, | ||
1744 | .ndo_set_multicast_list = emac_dev_mcast_set, | ||
1745 | .ndo_set_mac_address = emac_dev_setmac_addr, | ||
1746 | .ndo_do_ioctl = emac_devioctl, | ||
1747 | .ndo_tx_timeout = emac_dev_tx_timeout, | ||
1748 | .ndo_get_stats = emac_dev_getnetstats, | ||
1749 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
1750 | .ndo_poll_controller = emac_poll_controller, | ||
1751 | #endif | ||
1752 | }; | ||
1753 | |||
1754 | /** | ||
1755 | * davinci_emac_probe: EMAC device probe | ||
1756 | * @pdev: The DaVinci EMAC device that we are removing | ||
1757 | * | ||
1758 | * Called when probing for emac devicesr. We get details of instances and | ||
1759 | * resource information from platform init and register a network device | ||
1760 | * and allocate resources necessary for driver to perform | ||
1761 | */ | ||
1762 | static int __devinit davinci_emac_probe(struct platform_device *pdev) | ||
1763 | { | ||
1764 | int rc = 0; | ||
1765 | struct resource *res; | ||
1766 | struct net_device *ndev; | ||
1767 | struct emac_priv *priv; | ||
1768 | unsigned long size, hw_ram_addr; | ||
1769 | struct emac_platform_data *pdata; | ||
1770 | struct device *emac_dev; | ||
1771 | struct cpdma_params dma_params; | ||
1772 | |||
1773 | /* obtain emac clock from kernel */ | ||
1774 | emac_clk = clk_get(&pdev->dev, NULL); | ||
1775 | if (IS_ERR(emac_clk)) { | ||
1776 | dev_err(&pdev->dev, "failed to get EMAC clock\n"); | ||
1777 | return -EBUSY; | ||
1778 | } | ||
1779 | emac_bus_frequency = clk_get_rate(emac_clk); | ||
1780 | /* TODO: Probe PHY here if possible */ | ||
1781 | |||
1782 | ndev = alloc_etherdev(sizeof(struct emac_priv)); | ||
1783 | if (!ndev) { | ||
1784 | dev_err(&pdev->dev, "error allocating net_device\n"); | ||
1785 | rc = -ENOMEM; | ||
1786 | goto free_clk; | ||
1787 | } | ||
1788 | |||
1789 | platform_set_drvdata(pdev, ndev); | ||
1790 | priv = netdev_priv(ndev); | ||
1791 | priv->pdev = pdev; | ||
1792 | priv->ndev = ndev; | ||
1793 | priv->msg_enable = netif_msg_init(debug_level, DAVINCI_EMAC_DEBUG); | ||
1794 | |||
1795 | spin_lock_init(&priv->lock); | ||
1796 | |||
1797 | pdata = pdev->dev.platform_data; | ||
1798 | if (!pdata) { | ||
1799 | dev_err(&pdev->dev, "no platform data\n"); | ||
1800 | rc = -ENODEV; | ||
1801 | goto probe_quit; | ||
1802 | } | ||
1803 | |||
1804 | /* MAC addr and PHY mask , RMII enable info from platform_data */ | ||
1805 | memcpy(priv->mac_addr, pdata->mac_addr, 6); | ||
1806 | priv->phy_id = pdata->phy_id; | ||
1807 | priv->rmii_en = pdata->rmii_en; | ||
1808 | priv->version = pdata->version; | ||
1809 | priv->int_enable = pdata->interrupt_enable; | ||
1810 | priv->int_disable = pdata->interrupt_disable; | ||
1811 | |||
1812 | priv->coal_intvl = 0; | ||
1813 | priv->bus_freq_mhz = (u32)(emac_bus_frequency / 1000000); | ||
1814 | |||
1815 | emac_dev = &ndev->dev; | ||
1816 | /* Get EMAC platform data */ | ||
1817 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
1818 | if (!res) { | ||
1819 | dev_err(&pdev->dev,"error getting res\n"); | ||
1820 | rc = -ENOENT; | ||
1821 | goto probe_quit; | ||
1822 | } | ||
1823 | |||
1824 | priv->emac_base_phys = res->start + pdata->ctrl_reg_offset; | ||
1825 | size = resource_size(res); | ||
1826 | if (!request_mem_region(res->start, size, ndev->name)) { | ||
1827 | dev_err(&pdev->dev, "failed request_mem_region() for regs\n"); | ||
1828 | rc = -ENXIO; | ||
1829 | goto probe_quit; | ||
1830 | } | ||
1831 | |||
1832 | priv->remap_addr = ioremap(res->start, size); | ||
1833 | if (!priv->remap_addr) { | ||
1834 | dev_err(&pdev->dev, "unable to map IO\n"); | ||
1835 | rc = -ENOMEM; | ||
1836 | release_mem_region(res->start, size); | ||
1837 | goto probe_quit; | ||
1838 | } | ||
1839 | priv->emac_base = priv->remap_addr + pdata->ctrl_reg_offset; | ||
1840 | ndev->base_addr = (unsigned long)priv->remap_addr; | ||
1841 | |||
1842 | priv->ctrl_base = priv->remap_addr + pdata->ctrl_mod_reg_offset; | ||
1843 | |||
1844 | hw_ram_addr = pdata->hw_ram_addr; | ||
1845 | if (!hw_ram_addr) | ||
1846 | hw_ram_addr = (u32 __force)res->start + pdata->ctrl_ram_offset; | ||
1847 | |||
1848 | memset(&dma_params, 0, sizeof(dma_params)); | ||
1849 | dma_params.dev = emac_dev; | ||
1850 | dma_params.dmaregs = priv->emac_base; | ||
1851 | dma_params.rxthresh = priv->emac_base + 0x120; | ||
1852 | dma_params.rxfree = priv->emac_base + 0x140; | ||
1853 | dma_params.txhdp = priv->emac_base + 0x600; | ||
1854 | dma_params.rxhdp = priv->emac_base + 0x620; | ||
1855 | dma_params.txcp = priv->emac_base + 0x640; | ||
1856 | dma_params.rxcp = priv->emac_base + 0x660; | ||
1857 | dma_params.num_chan = EMAC_MAX_TXRX_CHANNELS; | ||
1858 | dma_params.min_packet_size = EMAC_DEF_MIN_ETHPKTSIZE; | ||
1859 | dma_params.desc_hw_addr = hw_ram_addr; | ||
1860 | dma_params.desc_mem_size = pdata->ctrl_ram_size; | ||
1861 | dma_params.desc_align = 16; | ||
1862 | |||
1863 | dma_params.desc_mem_phys = pdata->no_bd_ram ? 0 : | ||
1864 | (u32 __force)res->start + pdata->ctrl_ram_offset; | ||
1865 | |||
1866 | priv->dma = cpdma_ctlr_create(&dma_params); | ||
1867 | if (!priv->dma) { | ||
1868 | dev_err(&pdev->dev, "error initializing DMA\n"); | ||
1869 | rc = -ENOMEM; | ||
1870 | goto no_dma; | ||
1871 | } | ||
1872 | |||
1873 | priv->txchan = cpdma_chan_create(priv->dma, tx_chan_num(EMAC_DEF_TX_CH), | ||
1874 | emac_tx_handler); | ||
1875 | priv->rxchan = cpdma_chan_create(priv->dma, rx_chan_num(EMAC_DEF_RX_CH), | ||
1876 | emac_rx_handler); | ||
1877 | if (WARN_ON(!priv->txchan || !priv->rxchan)) { | ||
1878 | rc = -ENOMEM; | ||
1879 | goto no_irq_res; | ||
1880 | } | ||
1881 | |||
1882 | res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); | ||
1883 | if (!res) { | ||
1884 | dev_err(&pdev->dev, "error getting irq res\n"); | ||
1885 | rc = -ENOENT; | ||
1886 | goto no_irq_res; | ||
1887 | } | ||
1888 | ndev->irq = res->start; | ||
1889 | |||
1890 | if (!is_valid_ether_addr(priv->mac_addr)) { | ||
1891 | /* Use random MAC if none passed */ | ||
1892 | random_ether_addr(priv->mac_addr); | ||
1893 | dev_warn(&pdev->dev, "using random MAC addr: %pM\n", | ||
1894 | priv->mac_addr); | ||
1895 | } | ||
1896 | |||
1897 | ndev->netdev_ops = &emac_netdev_ops; | ||
1898 | SET_ETHTOOL_OPS(ndev, ðtool_ops); | ||
1899 | netif_napi_add(ndev, &priv->napi, emac_poll, EMAC_POLL_WEIGHT); | ||
1900 | |||
1901 | clk_enable(emac_clk); | ||
1902 | |||
1903 | /* register the network device */ | ||
1904 | SET_NETDEV_DEV(ndev, &pdev->dev); | ||
1905 | rc = register_netdev(ndev); | ||
1906 | if (rc) { | ||
1907 | dev_err(&pdev->dev, "error in register_netdev\n"); | ||
1908 | rc = -ENODEV; | ||
1909 | goto netdev_reg_err; | ||
1910 | } | ||
1911 | |||
1912 | |||
1913 | if (netif_msg_probe(priv)) { | ||
1914 | dev_notice(emac_dev, "DaVinci EMAC Probe found device "\ | ||
1915 | "(regs: %p, irq: %d)\n", | ||
1916 | (void *)priv->emac_base_phys, ndev->irq); | ||
1917 | } | ||
1918 | return 0; | ||
1919 | |||
1920 | netdev_reg_err: | ||
1921 | clk_disable(emac_clk); | ||
1922 | no_irq_res: | ||
1923 | if (priv->txchan) | ||
1924 | cpdma_chan_destroy(priv->txchan); | ||
1925 | if (priv->rxchan) | ||
1926 | cpdma_chan_destroy(priv->rxchan); | ||
1927 | cpdma_ctlr_destroy(priv->dma); | ||
1928 | no_dma: | ||
1929 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
1930 | release_mem_region(res->start, resource_size(res)); | ||
1931 | iounmap(priv->remap_addr); | ||
1932 | |||
1933 | probe_quit: | ||
1934 | free_netdev(ndev); | ||
1935 | free_clk: | ||
1936 | clk_put(emac_clk); | ||
1937 | return rc; | ||
1938 | } | ||
1939 | |||
1940 | /** | ||
1941 | * davinci_emac_remove: EMAC device remove | ||
1942 | * @pdev: The DaVinci EMAC device that we are removing | ||
1943 | * | ||
1944 | * Called when removing the device driver. We disable clock usage and release | ||
1945 | * the resources taken up by the driver and unregister network device | ||
1946 | */ | ||
1947 | static int __devexit davinci_emac_remove(struct platform_device *pdev) | ||
1948 | { | ||
1949 | struct resource *res; | ||
1950 | struct net_device *ndev = platform_get_drvdata(pdev); | ||
1951 | struct emac_priv *priv = netdev_priv(ndev); | ||
1952 | |||
1953 | dev_notice(&ndev->dev, "DaVinci EMAC: davinci_emac_remove()\n"); | ||
1954 | |||
1955 | platform_set_drvdata(pdev, NULL); | ||
1956 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
1957 | |||
1958 | if (priv->txchan) | ||
1959 | cpdma_chan_destroy(priv->txchan); | ||
1960 | if (priv->rxchan) | ||
1961 | cpdma_chan_destroy(priv->rxchan); | ||
1962 | cpdma_ctlr_destroy(priv->dma); | ||
1963 | |||
1964 | release_mem_region(res->start, resource_size(res)); | ||
1965 | |||
1966 | unregister_netdev(ndev); | ||
1967 | iounmap(priv->remap_addr); | ||
1968 | free_netdev(ndev); | ||
1969 | |||
1970 | clk_disable(emac_clk); | ||
1971 | clk_put(emac_clk); | ||
1972 | |||
1973 | return 0; | ||
1974 | } | ||
1975 | |||
1976 | static int davinci_emac_suspend(struct device *dev) | ||
1977 | { | ||
1978 | struct platform_device *pdev = to_platform_device(dev); | ||
1979 | struct net_device *ndev = platform_get_drvdata(pdev); | ||
1980 | |||
1981 | if (netif_running(ndev)) | ||
1982 | emac_dev_stop(ndev); | ||
1983 | |||
1984 | clk_disable(emac_clk); | ||
1985 | |||
1986 | return 0; | ||
1987 | } | ||
1988 | |||
1989 | static int davinci_emac_resume(struct device *dev) | ||
1990 | { | ||
1991 | struct platform_device *pdev = to_platform_device(dev); | ||
1992 | struct net_device *ndev = platform_get_drvdata(pdev); | ||
1993 | |||
1994 | clk_enable(emac_clk); | ||
1995 | |||
1996 | if (netif_running(ndev)) | ||
1997 | emac_dev_open(ndev); | ||
1998 | |||
1999 | return 0; | ||
2000 | } | ||
2001 | |||
2002 | static const struct dev_pm_ops davinci_emac_pm_ops = { | ||
2003 | .suspend = davinci_emac_suspend, | ||
2004 | .resume = davinci_emac_resume, | ||
2005 | }; | ||
2006 | |||
2007 | /** | ||
2008 | * davinci_emac_driver: EMAC platform driver structure | ||
2009 | */ | ||
2010 | static struct platform_driver davinci_emac_driver = { | ||
2011 | .driver = { | ||
2012 | .name = "davinci_emac", | ||
2013 | .owner = THIS_MODULE, | ||
2014 | .pm = &davinci_emac_pm_ops, | ||
2015 | }, | ||
2016 | .probe = davinci_emac_probe, | ||
2017 | .remove = __devexit_p(davinci_emac_remove), | ||
2018 | }; | ||
2019 | |||
2020 | /** | ||
2021 | * davinci_emac_init: EMAC driver module init | ||
2022 | * | ||
2023 | * Called when initializing the driver. We register the driver with | ||
2024 | * the platform. | ||
2025 | */ | ||
2026 | static int __init davinci_emac_init(void) | ||
2027 | { | ||
2028 | return platform_driver_register(&davinci_emac_driver); | ||
2029 | } | ||
2030 | late_initcall(davinci_emac_init); | ||
2031 | |||
2032 | /** | ||
2033 | * davinci_emac_exit: EMAC driver module exit | ||
2034 | * | ||
2035 | * Called when exiting the driver completely. We unregister the driver with | ||
2036 | * the platform and exit | ||
2037 | */ | ||
2038 | static void __exit davinci_emac_exit(void) | ||
2039 | { | ||
2040 | platform_driver_unregister(&davinci_emac_driver); | ||
2041 | } | ||
2042 | module_exit(davinci_emac_exit); | ||
2043 | |||
2044 | MODULE_LICENSE("GPL"); | ||
2045 | MODULE_AUTHOR("DaVinci EMAC Maintainer: Anant Gole <anantgole@ti.com>"); | ||
2046 | MODULE_AUTHOR("DaVinci EMAC Maintainer: Chaithrika U S <chaithrika@ti.com>"); | ||
2047 | MODULE_DESCRIPTION("DaVinci EMAC Ethernet driver"); | ||
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c new file mode 100644 index 000000000000..7615040df756 --- /dev/null +++ b/drivers/net/ethernet/ti/davinci_mdio.c | |||
@@ -0,0 +1,475 @@ | |||
1 | /* | ||
2 | * DaVinci MDIO Module driver | ||
3 | * | ||
4 | * Copyright (C) 2010 Texas Instruments. | ||
5 | * | ||
6 | * Shamelessly ripped out of davinci_emac.c, original copyrights follow: | ||
7 | * | ||
8 | * Copyright (C) 2009 Texas Instruments. | ||
9 | * | ||
10 | * --------------------------------------------------------------------------- | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or modify | ||
13 | * it under the terms of the GNU General Public License as published by | ||
14 | * the Free Software Foundation; either version 2 of the License, or | ||
15 | * (at your option) any later version. | ||
16 | * | ||
17 | * This program is distributed in the hope that it will be useful, | ||
18 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
19 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
20 | * GNU General Public License for more details. | ||
21 | * | ||
22 | * You should have received a copy of the GNU General Public License | ||
23 | * along with this program; if not, write to the Free Software | ||
24 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
25 | * --------------------------------------------------------------------------- | ||
26 | */ | ||
27 | #include <linux/module.h> | ||
28 | #include <linux/kernel.h> | ||
29 | #include <linux/platform_device.h> | ||
30 | #include <linux/delay.h> | ||
31 | #include <linux/sched.h> | ||
32 | #include <linux/slab.h> | ||
33 | #include <linux/phy.h> | ||
34 | #include <linux/clk.h> | ||
35 | #include <linux/err.h> | ||
36 | #include <linux/io.h> | ||
37 | #include <linux/davinci_emac.h> | ||
38 | |||
39 | /* | ||
40 | * This timeout definition is a worst-case ultra defensive measure against | ||
41 | * unexpected controller lock ups. Ideally, we should never ever hit this | ||
42 | * scenario in practice. | ||
43 | */ | ||
44 | #define MDIO_TIMEOUT 100 /* msecs */ | ||
45 | |||
46 | #define PHY_REG_MASK 0x1f | ||
47 | #define PHY_ID_MASK 0x1f | ||
48 | |||
49 | #define DEF_OUT_FREQ 2200000 /* 2.2 MHz */ | ||
50 | |||
51 | struct davinci_mdio_regs { | ||
52 | u32 version; | ||
53 | u32 control; | ||
54 | #define CONTROL_IDLE BIT(31) | ||
55 | #define CONTROL_ENABLE BIT(30) | ||
56 | #define CONTROL_MAX_DIV (0xff) | ||
57 | |||
58 | u32 alive; | ||
59 | u32 link; | ||
60 | u32 linkintraw; | ||
61 | u32 linkintmasked; | ||
62 | u32 __reserved_0[2]; | ||
63 | u32 userintraw; | ||
64 | u32 userintmasked; | ||
65 | u32 userintmaskset; | ||
66 | u32 userintmaskclr; | ||
67 | u32 __reserved_1[20]; | ||
68 | |||
69 | struct { | ||
70 | u32 access; | ||
71 | #define USERACCESS_GO BIT(31) | ||
72 | #define USERACCESS_WRITE BIT(30) | ||
73 | #define USERACCESS_ACK BIT(29) | ||
74 | #define USERACCESS_READ (0) | ||
75 | #define USERACCESS_DATA (0xffff) | ||
76 | |||
77 | u32 physel; | ||
78 | } user[0]; | ||
79 | }; | ||
80 | |||
81 | struct mdio_platform_data default_pdata = { | ||
82 | .bus_freq = DEF_OUT_FREQ, | ||
83 | }; | ||
84 | |||
85 | struct davinci_mdio_data { | ||
86 | struct mdio_platform_data pdata; | ||
87 | struct davinci_mdio_regs __iomem *regs; | ||
88 | spinlock_t lock; | ||
89 | struct clk *clk; | ||
90 | struct device *dev; | ||
91 | struct mii_bus *bus; | ||
92 | bool suspended; | ||
93 | unsigned long access_time; /* jiffies */ | ||
94 | }; | ||
95 | |||
96 | static void __davinci_mdio_reset(struct davinci_mdio_data *data) | ||
97 | { | ||
98 | u32 mdio_in, div, mdio_out_khz, access_time; | ||
99 | |||
100 | mdio_in = clk_get_rate(data->clk); | ||
101 | div = (mdio_in / data->pdata.bus_freq) - 1; | ||
102 | if (div > CONTROL_MAX_DIV) | ||
103 | div = CONTROL_MAX_DIV; | ||
104 | |||
105 | /* set enable and clock divider */ | ||
106 | __raw_writel(div | CONTROL_ENABLE, &data->regs->control); | ||
107 | |||
108 | /* | ||
109 | * One mdio transaction consists of: | ||
110 | * 32 bits of preamble | ||
111 | * 32 bits of transferred data | ||
112 | * 24 bits of bus yield (not needed unless shared?) | ||
113 | */ | ||
114 | mdio_out_khz = mdio_in / (1000 * (div + 1)); | ||
115 | access_time = (88 * 1000) / mdio_out_khz; | ||
116 | |||
117 | /* | ||
118 | * In the worst case, we could be kicking off a user-access immediately | ||
119 | * after the mdio bus scan state-machine triggered its own read. If | ||
120 | * so, our request could get deferred by one access cycle. We | ||
121 | * defensively allow for 4 access cycles. | ||
122 | */ | ||
123 | data->access_time = usecs_to_jiffies(access_time * 4); | ||
124 | if (!data->access_time) | ||
125 | data->access_time = 1; | ||
126 | } | ||
127 | |||
128 | static int davinci_mdio_reset(struct mii_bus *bus) | ||
129 | { | ||
130 | struct davinci_mdio_data *data = bus->priv; | ||
131 | u32 phy_mask, ver; | ||
132 | |||
133 | __davinci_mdio_reset(data); | ||
134 | |||
135 | /* wait for scan logic to settle */ | ||
136 | msleep(PHY_MAX_ADDR * data->access_time); | ||
137 | |||
138 | /* dump hardware version info */ | ||
139 | ver = __raw_readl(&data->regs->version); | ||
140 | dev_info(data->dev, "davinci mdio revision %d.%d\n", | ||
141 | (ver >> 8) & 0xff, ver & 0xff); | ||
142 | |||
143 | /* get phy mask from the alive register */ | ||
144 | phy_mask = __raw_readl(&data->regs->alive); | ||
145 | if (phy_mask) { | ||
146 | /* restrict mdio bus to live phys only */ | ||
147 | dev_info(data->dev, "detected phy mask %x\n", ~phy_mask); | ||
148 | phy_mask = ~phy_mask; | ||
149 | } else { | ||
150 | /* desperately scan all phys */ | ||
151 | dev_warn(data->dev, "no live phy, scanning all\n"); | ||
152 | phy_mask = 0; | ||
153 | } | ||
154 | data->bus->phy_mask = phy_mask; | ||
155 | |||
156 | return 0; | ||
157 | } | ||
158 | |||
159 | /* wait until hardware is ready for another user access */ | ||
160 | static inline int wait_for_user_access(struct davinci_mdio_data *data) | ||
161 | { | ||
162 | struct davinci_mdio_regs __iomem *regs = data->regs; | ||
163 | unsigned long timeout = jiffies + msecs_to_jiffies(MDIO_TIMEOUT); | ||
164 | u32 reg; | ||
165 | |||
166 | while (time_after(timeout, jiffies)) { | ||
167 | reg = __raw_readl(®s->user[0].access); | ||
168 | if ((reg & USERACCESS_GO) == 0) | ||
169 | return 0; | ||
170 | |||
171 | reg = __raw_readl(®s->control); | ||
172 | if ((reg & CONTROL_IDLE) == 0) | ||
173 | continue; | ||
174 | |||
175 | /* | ||
176 | * An emac soft_reset may have clobbered the mdio controller's | ||
177 | * state machine. We need to reset and retry the current | ||
178 | * operation | ||
179 | */ | ||
180 | dev_warn(data->dev, "resetting idled controller\n"); | ||
181 | __davinci_mdio_reset(data); | ||
182 | return -EAGAIN; | ||
183 | } | ||
184 | dev_err(data->dev, "timed out waiting for user access\n"); | ||
185 | return -ETIMEDOUT; | ||
186 | } | ||
187 | |||
188 | /* wait until hardware state machine is idle */ | ||
189 | static inline int wait_for_idle(struct davinci_mdio_data *data) | ||
190 | { | ||
191 | struct davinci_mdio_regs __iomem *regs = data->regs; | ||
192 | unsigned long timeout = jiffies + msecs_to_jiffies(MDIO_TIMEOUT); | ||
193 | |||
194 | while (time_after(timeout, jiffies)) { | ||
195 | if (__raw_readl(®s->control) & CONTROL_IDLE) | ||
196 | return 0; | ||
197 | } | ||
198 | dev_err(data->dev, "timed out waiting for idle\n"); | ||
199 | return -ETIMEDOUT; | ||
200 | } | ||
201 | |||
202 | static int davinci_mdio_read(struct mii_bus *bus, int phy_id, int phy_reg) | ||
203 | { | ||
204 | struct davinci_mdio_data *data = bus->priv; | ||
205 | u32 reg; | ||
206 | int ret; | ||
207 | |||
208 | if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK) | ||
209 | return -EINVAL; | ||
210 | |||
211 | spin_lock(&data->lock); | ||
212 | |||
213 | if (data->suspended) { | ||
214 | spin_unlock(&data->lock); | ||
215 | return -ENODEV; | ||
216 | } | ||
217 | |||
218 | reg = (USERACCESS_GO | USERACCESS_READ | (phy_reg << 21) | | ||
219 | (phy_id << 16)); | ||
220 | |||
221 | while (1) { | ||
222 | ret = wait_for_user_access(data); | ||
223 | if (ret == -EAGAIN) | ||
224 | continue; | ||
225 | if (ret < 0) | ||
226 | break; | ||
227 | |||
228 | __raw_writel(reg, &data->regs->user[0].access); | ||
229 | |||
230 | ret = wait_for_user_access(data); | ||
231 | if (ret == -EAGAIN) | ||
232 | continue; | ||
233 | if (ret < 0) | ||
234 | break; | ||
235 | |||
236 | reg = __raw_readl(&data->regs->user[0].access); | ||
237 | ret = (reg & USERACCESS_ACK) ? (reg & USERACCESS_DATA) : -EIO; | ||
238 | break; | ||
239 | } | ||
240 | |||
241 | spin_unlock(&data->lock); | ||
242 | |||
243 | return ret; | ||
244 | } | ||
245 | |||
246 | static int davinci_mdio_write(struct mii_bus *bus, int phy_id, | ||
247 | int phy_reg, u16 phy_data) | ||
248 | { | ||
249 | struct davinci_mdio_data *data = bus->priv; | ||
250 | u32 reg; | ||
251 | int ret; | ||
252 | |||
253 | if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK) | ||
254 | return -EINVAL; | ||
255 | |||
256 | spin_lock(&data->lock); | ||
257 | |||
258 | if (data->suspended) { | ||
259 | spin_unlock(&data->lock); | ||
260 | return -ENODEV; | ||
261 | } | ||
262 | |||
263 | reg = (USERACCESS_GO | USERACCESS_WRITE | (phy_reg << 21) | | ||
264 | (phy_id << 16) | (phy_data & USERACCESS_DATA)); | ||
265 | |||
266 | while (1) { | ||
267 | ret = wait_for_user_access(data); | ||
268 | if (ret == -EAGAIN) | ||
269 | continue; | ||
270 | if (ret < 0) | ||
271 | break; | ||
272 | |||
273 | __raw_writel(reg, &data->regs->user[0].access); | ||
274 | |||
275 | ret = wait_for_user_access(data); | ||
276 | if (ret == -EAGAIN) | ||
277 | continue; | ||
278 | break; | ||
279 | } | ||
280 | |||
281 | spin_unlock(&data->lock); | ||
282 | |||
283 | return 0; | ||
284 | } | ||
285 | |||
286 | static int __devinit davinci_mdio_probe(struct platform_device *pdev) | ||
287 | { | ||
288 | struct mdio_platform_data *pdata = pdev->dev.platform_data; | ||
289 | struct device *dev = &pdev->dev; | ||
290 | struct davinci_mdio_data *data; | ||
291 | struct resource *res; | ||
292 | struct phy_device *phy; | ||
293 | int ret, addr; | ||
294 | |||
295 | data = kzalloc(sizeof(*data), GFP_KERNEL); | ||
296 | if (!data) { | ||
297 | dev_err(dev, "failed to alloc device data\n"); | ||
298 | return -ENOMEM; | ||
299 | } | ||
300 | |||
301 | data->pdata = pdata ? (*pdata) : default_pdata; | ||
302 | |||
303 | data->bus = mdiobus_alloc(); | ||
304 | if (!data->bus) { | ||
305 | dev_err(dev, "failed to alloc mii bus\n"); | ||
306 | ret = -ENOMEM; | ||
307 | goto bail_out; | ||
308 | } | ||
309 | |||
310 | data->bus->name = dev_name(dev); | ||
311 | data->bus->read = davinci_mdio_read, | ||
312 | data->bus->write = davinci_mdio_write, | ||
313 | data->bus->reset = davinci_mdio_reset, | ||
314 | data->bus->parent = dev; | ||
315 | data->bus->priv = data; | ||
316 | snprintf(data->bus->id, MII_BUS_ID_SIZE, "%x", pdev->id); | ||
317 | |||
318 | data->clk = clk_get(dev, NULL); | ||
319 | if (IS_ERR(data->clk)) { | ||
320 | data->clk = NULL; | ||
321 | dev_err(dev, "failed to get device clock\n"); | ||
322 | ret = PTR_ERR(data->clk); | ||
323 | goto bail_out; | ||
324 | } | ||
325 | |||
326 | clk_enable(data->clk); | ||
327 | |||
328 | dev_set_drvdata(dev, data); | ||
329 | data->dev = dev; | ||
330 | spin_lock_init(&data->lock); | ||
331 | |||
332 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
333 | if (!res) { | ||
334 | dev_err(dev, "could not find register map resource\n"); | ||
335 | ret = -ENOENT; | ||
336 | goto bail_out; | ||
337 | } | ||
338 | |||
339 | res = devm_request_mem_region(dev, res->start, resource_size(res), | ||
340 | dev_name(dev)); | ||
341 | if (!res) { | ||
342 | dev_err(dev, "could not allocate register map resource\n"); | ||
343 | ret = -ENXIO; | ||
344 | goto bail_out; | ||
345 | } | ||
346 | |||
347 | data->regs = devm_ioremap_nocache(dev, res->start, resource_size(res)); | ||
348 | if (!data->regs) { | ||
349 | dev_err(dev, "could not map mdio registers\n"); | ||
350 | ret = -ENOMEM; | ||
351 | goto bail_out; | ||
352 | } | ||
353 | |||
354 | /* register the mii bus */ | ||
355 | ret = mdiobus_register(data->bus); | ||
356 | if (ret) | ||
357 | goto bail_out; | ||
358 | |||
359 | /* scan and dump the bus */ | ||
360 | for (addr = 0; addr < PHY_MAX_ADDR; addr++) { | ||
361 | phy = data->bus->phy_map[addr]; | ||
362 | if (phy) { | ||
363 | dev_info(dev, "phy[%d]: device %s, driver %s\n", | ||
364 | phy->addr, dev_name(&phy->dev), | ||
365 | phy->drv ? phy->drv->name : "unknown"); | ||
366 | } | ||
367 | } | ||
368 | |||
369 | return 0; | ||
370 | |||
371 | bail_out: | ||
372 | if (data->bus) | ||
373 | mdiobus_free(data->bus); | ||
374 | |||
375 | if (data->clk) { | ||
376 | clk_disable(data->clk); | ||
377 | clk_put(data->clk); | ||
378 | } | ||
379 | |||
380 | kfree(data); | ||
381 | |||
382 | return ret; | ||
383 | } | ||
384 | |||
385 | static int __devexit davinci_mdio_remove(struct platform_device *pdev) | ||
386 | { | ||
387 | struct device *dev = &pdev->dev; | ||
388 | struct davinci_mdio_data *data = dev_get_drvdata(dev); | ||
389 | |||
390 | if (data->bus) | ||
391 | mdiobus_free(data->bus); | ||
392 | |||
393 | if (data->clk) { | ||
394 | clk_disable(data->clk); | ||
395 | clk_put(data->clk); | ||
396 | } | ||
397 | |||
398 | dev_set_drvdata(dev, NULL); | ||
399 | |||
400 | kfree(data); | ||
401 | |||
402 | return 0; | ||
403 | } | ||
404 | |||
405 | static int davinci_mdio_suspend(struct device *dev) | ||
406 | { | ||
407 | struct davinci_mdio_data *data = dev_get_drvdata(dev); | ||
408 | u32 ctrl; | ||
409 | |||
410 | spin_lock(&data->lock); | ||
411 | |||
412 | /* shutdown the scan state machine */ | ||
413 | ctrl = __raw_readl(&data->regs->control); | ||
414 | ctrl &= ~CONTROL_ENABLE; | ||
415 | __raw_writel(ctrl, &data->regs->control); | ||
416 | wait_for_idle(data); | ||
417 | |||
418 | if (data->clk) | ||
419 | clk_disable(data->clk); | ||
420 | |||
421 | data->suspended = true; | ||
422 | spin_unlock(&data->lock); | ||
423 | |||
424 | return 0; | ||
425 | } | ||
426 | |||
427 | static int davinci_mdio_resume(struct device *dev) | ||
428 | { | ||
429 | struct davinci_mdio_data *data = dev_get_drvdata(dev); | ||
430 | u32 ctrl; | ||
431 | |||
432 | spin_lock(&data->lock); | ||
433 | if (data->clk) | ||
434 | clk_enable(data->clk); | ||
435 | |||
436 | /* restart the scan state machine */ | ||
437 | ctrl = __raw_readl(&data->regs->control); | ||
438 | ctrl |= CONTROL_ENABLE; | ||
439 | __raw_writel(ctrl, &data->regs->control); | ||
440 | |||
441 | data->suspended = false; | ||
442 | spin_unlock(&data->lock); | ||
443 | |||
444 | return 0; | ||
445 | } | ||
446 | |||
447 | static const struct dev_pm_ops davinci_mdio_pm_ops = { | ||
448 | .suspend = davinci_mdio_suspend, | ||
449 | .resume = davinci_mdio_resume, | ||
450 | }; | ||
451 | |||
452 | static struct platform_driver davinci_mdio_driver = { | ||
453 | .driver = { | ||
454 | .name = "davinci_mdio", | ||
455 | .owner = THIS_MODULE, | ||
456 | .pm = &davinci_mdio_pm_ops, | ||
457 | }, | ||
458 | .probe = davinci_mdio_probe, | ||
459 | .remove = __devexit_p(davinci_mdio_remove), | ||
460 | }; | ||
461 | |||
462 | static int __init davinci_mdio_init(void) | ||
463 | { | ||
464 | return platform_driver_register(&davinci_mdio_driver); | ||
465 | } | ||
466 | device_initcall(davinci_mdio_init); | ||
467 | |||
468 | static void __exit davinci_mdio_exit(void) | ||
469 | { | ||
470 | platform_driver_unregister(&davinci_mdio_driver); | ||
471 | } | ||
472 | module_exit(davinci_mdio_exit); | ||
473 | |||
474 | MODULE_LICENSE("GPL"); | ||
475 | MODULE_DESCRIPTION("DaVinci MDIO driver"); | ||
diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c new file mode 100644 index 000000000000..145871b3130b --- /dev/null +++ b/drivers/net/ethernet/ti/tlan.c | |||
@@ -0,0 +1,3258 @@ | |||
1 | /******************************************************************************* | ||
2 | * | ||
3 | * Linux ThunderLAN Driver | ||
4 | * | ||
5 | * tlan.c | ||
6 | * by James Banks | ||
7 | * | ||
8 | * (C) 1997-1998 Caldera, Inc. | ||
9 | * (C) 1998 James Banks | ||
10 | * (C) 1999-2001 Torben Mathiasen | ||
11 | * (C) 2002 Samuel Chessman | ||
12 | * | ||
13 | * This software may be used and distributed according to the terms | ||
14 | * of the GNU General Public License, incorporated herein by reference. | ||
15 | * | ||
16 | ** Useful (if not required) reading: | ||
17 | * | ||
18 | * Texas Instruments, ThunderLAN Programmer's Guide, | ||
19 | * TI Literature Number SPWU013A | ||
20 | * available in PDF format from www.ti.com | ||
21 | * Level One, LXT901 and LXT970 Data Sheets | ||
22 | * available in PDF format from www.level1.com | ||
23 | * National Semiconductor, DP83840A Data Sheet | ||
24 | * available in PDF format from www.national.com | ||
25 | * Microchip Technology, 24C01A/02A/04A Data Sheet | ||
26 | * available in PDF format from www.microchip.com | ||
27 | * | ||
28 | ******************************************************************************/ | ||
29 | |||
30 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
31 | |||
32 | #include <linux/hardirq.h> | ||
33 | #include <linux/module.h> | ||
34 | #include <linux/init.h> | ||
35 | #include <linux/interrupt.h> | ||
36 | #include <linux/ioport.h> | ||
37 | #include <linux/eisa.h> | ||
38 | #include <linux/pci.h> | ||
39 | #include <linux/dma-mapping.h> | ||
40 | #include <linux/netdevice.h> | ||
41 | #include <linux/etherdevice.h> | ||
42 | #include <linux/delay.h> | ||
43 | #include <linux/spinlock.h> | ||
44 | #include <linux/workqueue.h> | ||
45 | #include <linux/mii.h> | ||
46 | |||
47 | #include "tlan.h" | ||
48 | |||
49 | |||
50 | /* For removing EISA devices */ | ||
51 | static struct net_device *tlan_eisa_devices; | ||
52 | |||
53 | static int tlan_devices_installed; | ||
54 | |||
55 | /* Set speed, duplex and aui settings */ | ||
56 | static int aui[MAX_TLAN_BOARDS]; | ||
57 | static int duplex[MAX_TLAN_BOARDS]; | ||
58 | static int speed[MAX_TLAN_BOARDS]; | ||
59 | static int boards_found; | ||
60 | module_param_array(aui, int, NULL, 0); | ||
61 | module_param_array(duplex, int, NULL, 0); | ||
62 | module_param_array(speed, int, NULL, 0); | ||
63 | MODULE_PARM_DESC(aui, "ThunderLAN use AUI port(s) (0-1)"); | ||
64 | MODULE_PARM_DESC(duplex, | ||
65 | "ThunderLAN duplex setting(s) (0-default, 1-half, 2-full)"); | ||
66 | MODULE_PARM_DESC(speed, "ThunderLAN port speed setting(s) (0,10,100)"); | ||
67 | |||
68 | MODULE_AUTHOR("Maintainer: Samuel Chessman <chessman@tux.org>"); | ||
69 | MODULE_DESCRIPTION("Driver for TI ThunderLAN based ethernet PCI adapters"); | ||
70 | MODULE_LICENSE("GPL"); | ||
71 | |||
72 | |||
73 | /* Define this to enable Link beat monitoring */ | ||
74 | #undef MONITOR | ||
75 | |||
76 | /* Turn on debugging. See Documentation/networking/tlan.txt for details */ | ||
77 | static int debug; | ||
78 | module_param(debug, int, 0); | ||
79 | MODULE_PARM_DESC(debug, "ThunderLAN debug mask"); | ||
80 | |||
81 | static const char tlan_signature[] = "TLAN"; | ||
82 | static const char tlan_banner[] = "ThunderLAN driver v1.17\n"; | ||
83 | static int tlan_have_pci; | ||
84 | static int tlan_have_eisa; | ||
85 | |||
86 | static const char * const media[] = { | ||
87 | "10BaseT-HD", "10BaseT-FD", "100baseTx-HD", | ||
88 | "100BaseTx-FD", "100BaseT4", NULL | ||
89 | }; | ||
90 | |||
91 | static struct board { | ||
92 | const char *device_label; | ||
93 | u32 flags; | ||
94 | u16 addr_ofs; | ||
95 | } board_info[] = { | ||
96 | { "Compaq Netelligent 10 T PCI UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, | ||
97 | { "Compaq Netelligent 10/100 TX PCI UTP", | ||
98 | TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, | ||
99 | { "Compaq Integrated NetFlex-3/P", TLAN_ADAPTER_NONE, 0x83 }, | ||
100 | { "Compaq NetFlex-3/P", | ||
101 | TLAN_ADAPTER_UNMANAGED_PHY | TLAN_ADAPTER_BIT_RATE_PHY, 0x83 }, | ||
102 | { "Compaq NetFlex-3/P", TLAN_ADAPTER_NONE, 0x83 }, | ||
103 | { "Compaq Netelligent Integrated 10/100 TX UTP", | ||
104 | TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, | ||
105 | { "Compaq Netelligent Dual 10/100 TX PCI UTP", | ||
106 | TLAN_ADAPTER_NONE, 0x83 }, | ||
107 | { "Compaq Netelligent 10/100 TX Embedded UTP", | ||
108 | TLAN_ADAPTER_NONE, 0x83 }, | ||
109 | { "Olicom OC-2183/2185", TLAN_ADAPTER_USE_INTERN_10, 0x83 }, | ||
110 | { "Olicom OC-2325", TLAN_ADAPTER_UNMANAGED_PHY, 0xf8 }, | ||
111 | { "Olicom OC-2326", TLAN_ADAPTER_USE_INTERN_10, 0xf8 }, | ||
112 | { "Compaq Netelligent 10/100 TX UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, | ||
113 | { "Compaq Netelligent 10 T/2 PCI UTP/coax", TLAN_ADAPTER_NONE, 0x83 }, | ||
114 | { "Compaq NetFlex-3/E", | ||
115 | TLAN_ADAPTER_ACTIVITY_LED | /* EISA card */ | ||
116 | TLAN_ADAPTER_UNMANAGED_PHY | TLAN_ADAPTER_BIT_RATE_PHY, 0x83 }, | ||
117 | { "Compaq NetFlex-3/E", | ||
118 | TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, /* EISA card */ | ||
119 | }; | ||
120 | |||
121 | static DEFINE_PCI_DEVICE_TABLE(tlan_pci_tbl) = { | ||
122 | { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL10, | ||
123 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, | ||
124 | { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100, | ||
125 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 }, | ||
126 | { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETFLEX3I, | ||
127 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 }, | ||
128 | { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_THUNDER, | ||
129 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3 }, | ||
130 | { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETFLEX3B, | ||
131 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 }, | ||
132 | { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100PI, | ||
133 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 }, | ||
134 | { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100D, | ||
135 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 }, | ||
136 | { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100I, | ||
137 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 7 }, | ||
138 | { PCI_VENDOR_ID_OLICOM, PCI_DEVICE_ID_OLICOM_OC2183, | ||
139 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 }, | ||
140 | { PCI_VENDOR_ID_OLICOM, PCI_DEVICE_ID_OLICOM_OC2325, | ||
141 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 9 }, | ||
142 | { PCI_VENDOR_ID_OLICOM, PCI_DEVICE_ID_OLICOM_OC2326, | ||
143 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 10 }, | ||
144 | { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_NETELLIGENT_10_100_WS_5100, | ||
145 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 11 }, | ||
146 | { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_NETELLIGENT_10_T2, | ||
147 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 12 }, | ||
148 | { 0,} | ||
149 | }; | ||
150 | MODULE_DEVICE_TABLE(pci, tlan_pci_tbl); | ||
151 | |||
152 | static void tlan_eisa_probe(void); | ||
153 | static void tlan_eisa_cleanup(void); | ||
154 | static int tlan_init(struct net_device *); | ||
155 | static int tlan_open(struct net_device *dev); | ||
156 | static netdev_tx_t tlan_start_tx(struct sk_buff *, struct net_device *); | ||
157 | static irqreturn_t tlan_handle_interrupt(int, void *); | ||
158 | static int tlan_close(struct net_device *); | ||
159 | static struct net_device_stats *tlan_get_stats(struct net_device *); | ||
160 | static void tlan_set_multicast_list(struct net_device *); | ||
161 | static int tlan_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); | ||
162 | static int tlan_probe1(struct pci_dev *pdev, long ioaddr, | ||
163 | int irq, int rev, const struct pci_device_id *ent); | ||
164 | static void tlan_tx_timeout(struct net_device *dev); | ||
165 | static void tlan_tx_timeout_work(struct work_struct *work); | ||
166 | static int tlan_init_one(struct pci_dev *pdev, | ||
167 | const struct pci_device_id *ent); | ||
168 | |||
169 | static u32 tlan_handle_tx_eof(struct net_device *, u16); | ||
170 | static u32 tlan_handle_stat_overflow(struct net_device *, u16); | ||
171 | static u32 tlan_handle_rx_eof(struct net_device *, u16); | ||
172 | static u32 tlan_handle_dummy(struct net_device *, u16); | ||
173 | static u32 tlan_handle_tx_eoc(struct net_device *, u16); | ||
174 | static u32 tlan_handle_status_check(struct net_device *, u16); | ||
175 | static u32 tlan_handle_rx_eoc(struct net_device *, u16); | ||
176 | |||
177 | static void tlan_timer(unsigned long); | ||
178 | |||
179 | static void tlan_reset_lists(struct net_device *); | ||
180 | static void tlan_free_lists(struct net_device *); | ||
181 | static void tlan_print_dio(u16); | ||
182 | static void tlan_print_list(struct tlan_list *, char *, int); | ||
183 | static void tlan_read_and_clear_stats(struct net_device *, int); | ||
184 | static void tlan_reset_adapter(struct net_device *); | ||
185 | static void tlan_finish_reset(struct net_device *); | ||
186 | static void tlan_set_mac(struct net_device *, int areg, char *mac); | ||
187 | |||
188 | static void tlan_phy_print(struct net_device *); | ||
189 | static void tlan_phy_detect(struct net_device *); | ||
190 | static void tlan_phy_power_down(struct net_device *); | ||
191 | static void tlan_phy_power_up(struct net_device *); | ||
192 | static void tlan_phy_reset(struct net_device *); | ||
193 | static void tlan_phy_start_link(struct net_device *); | ||
194 | static void tlan_phy_finish_auto_neg(struct net_device *); | ||
195 | #ifdef MONITOR | ||
196 | static void tlan_phy_monitor(struct net_device *); | ||
197 | #endif | ||
198 | |||
199 | /* | ||
200 | static int tlan_phy_nop(struct net_device *); | ||
201 | static int tlan_phy_internal_check(struct net_device *); | ||
202 | static int tlan_phy_internal_service(struct net_device *); | ||
203 | static int tlan_phy_dp83840a_check(struct net_device *); | ||
204 | */ | ||
205 | |||
206 | static bool tlan_mii_read_reg(struct net_device *, u16, u16, u16 *); | ||
207 | static void tlan_mii_send_data(u16, u32, unsigned); | ||
208 | static void tlan_mii_sync(u16); | ||
209 | static void tlan_mii_write_reg(struct net_device *, u16, u16, u16); | ||
210 | |||
211 | static void tlan_ee_send_start(u16); | ||
212 | static int tlan_ee_send_byte(u16, u8, int); | ||
213 | static void tlan_ee_receive_byte(u16, u8 *, int); | ||
214 | static int tlan_ee_read_byte(struct net_device *, u8, u8 *); | ||
215 | |||
216 | |||
217 | static inline void | ||
218 | tlan_store_skb(struct tlan_list *tag, struct sk_buff *skb) | ||
219 | { | ||
220 | unsigned long addr = (unsigned long)skb; | ||
221 | tag->buffer[9].address = addr; | ||
222 | tag->buffer[8].address = upper_32_bits(addr); | ||
223 | } | ||
224 | |||
225 | static inline struct sk_buff * | ||
226 | tlan_get_skb(const struct tlan_list *tag) | ||
227 | { | ||
228 | unsigned long addr; | ||
229 | |||
230 | addr = tag->buffer[9].address; | ||
231 | addr |= (tag->buffer[8].address << 16) << 16; | ||
232 | return (struct sk_buff *) addr; | ||
233 | } | ||
234 | |||
235 | static u32 | ||
236 | (*tlan_int_vector[TLAN_INT_NUMBER_OF_INTS])(struct net_device *, u16) = { | ||
237 | NULL, | ||
238 | tlan_handle_tx_eof, | ||
239 | tlan_handle_stat_overflow, | ||
240 | tlan_handle_rx_eof, | ||
241 | tlan_handle_dummy, | ||
242 | tlan_handle_tx_eoc, | ||
243 | tlan_handle_status_check, | ||
244 | tlan_handle_rx_eoc | ||
245 | }; | ||
246 | |||
247 | static inline void | ||
248 | tlan_set_timer(struct net_device *dev, u32 ticks, u32 type) | ||
249 | { | ||
250 | struct tlan_priv *priv = netdev_priv(dev); | ||
251 | unsigned long flags = 0; | ||
252 | |||
253 | if (!in_irq()) | ||
254 | spin_lock_irqsave(&priv->lock, flags); | ||
255 | if (priv->timer.function != NULL && | ||
256 | priv->timer_type != TLAN_TIMER_ACTIVITY) { | ||
257 | if (!in_irq()) | ||
258 | spin_unlock_irqrestore(&priv->lock, flags); | ||
259 | return; | ||
260 | } | ||
261 | priv->timer.function = tlan_timer; | ||
262 | if (!in_irq()) | ||
263 | spin_unlock_irqrestore(&priv->lock, flags); | ||
264 | |||
265 | priv->timer.data = (unsigned long) dev; | ||
266 | priv->timer_set_at = jiffies; | ||
267 | priv->timer_type = type; | ||
268 | mod_timer(&priv->timer, jiffies + ticks); | ||
269 | |||
270 | } | ||
271 | |||
272 | |||
273 | /***************************************************************************** | ||
274 | ****************************************************************************** | ||
275 | |||
276 | ThunderLAN driver primary functions | ||
277 | |||
278 | these functions are more or less common to all linux network drivers. | ||
279 | |||
280 | ****************************************************************************** | ||
281 | *****************************************************************************/ | ||
282 | |||
283 | |||
284 | |||
285 | |||
286 | |||
287 | /*************************************************************** | ||
288 | * tlan_remove_one | ||
289 | * | ||
290 | * Returns: | ||
291 | * Nothing | ||
292 | * Parms: | ||
293 | * None | ||
294 | * | ||
295 | * Goes through the TLanDevices list and frees the device | ||
296 | * structs and memory associated with each device (lists | ||
297 | * and buffers). It also ureserves the IO port regions | ||
298 | * associated with this device. | ||
299 | * | ||
300 | **************************************************************/ | ||
301 | |||
302 | |||
303 | static void __devexit tlan_remove_one(struct pci_dev *pdev) | ||
304 | { | ||
305 | struct net_device *dev = pci_get_drvdata(pdev); | ||
306 | struct tlan_priv *priv = netdev_priv(dev); | ||
307 | |||
308 | unregister_netdev(dev); | ||
309 | |||
310 | if (priv->dma_storage) { | ||
311 | pci_free_consistent(priv->pci_dev, | ||
312 | priv->dma_size, priv->dma_storage, | ||
313 | priv->dma_storage_dma); | ||
314 | } | ||
315 | |||
316 | #ifdef CONFIG_PCI | ||
317 | pci_release_regions(pdev); | ||
318 | #endif | ||
319 | |||
320 | free_netdev(dev); | ||
321 | |||
322 | pci_set_drvdata(pdev, NULL); | ||
323 | } | ||
324 | |||
325 | static void tlan_start(struct net_device *dev) | ||
326 | { | ||
327 | tlan_reset_lists(dev); | ||
328 | /* NOTE: It might not be necessary to read the stats before a | ||
329 | reset if you don't care what the values are. | ||
330 | */ | ||
331 | tlan_read_and_clear_stats(dev, TLAN_IGNORE); | ||
332 | tlan_reset_adapter(dev); | ||
333 | netif_wake_queue(dev); | ||
334 | } | ||
335 | |||
336 | static void tlan_stop(struct net_device *dev) | ||
337 | { | ||
338 | struct tlan_priv *priv = netdev_priv(dev); | ||
339 | |||
340 | tlan_read_and_clear_stats(dev, TLAN_RECORD); | ||
341 | outl(TLAN_HC_AD_RST, dev->base_addr + TLAN_HOST_CMD); | ||
342 | /* Reset and power down phy */ | ||
343 | tlan_reset_adapter(dev); | ||
344 | if (priv->timer.function != NULL) { | ||
345 | del_timer_sync(&priv->timer); | ||
346 | priv->timer.function = NULL; | ||
347 | } | ||
348 | } | ||
349 | |||
350 | #ifdef CONFIG_PM | ||
351 | |||
352 | static int tlan_suspend(struct pci_dev *pdev, pm_message_t state) | ||
353 | { | ||
354 | struct net_device *dev = pci_get_drvdata(pdev); | ||
355 | |||
356 | if (netif_running(dev)) | ||
357 | tlan_stop(dev); | ||
358 | |||
359 | netif_device_detach(dev); | ||
360 | pci_save_state(pdev); | ||
361 | pci_disable_device(pdev); | ||
362 | pci_wake_from_d3(pdev, false); | ||
363 | pci_set_power_state(pdev, PCI_D3hot); | ||
364 | |||
365 | return 0; | ||
366 | } | ||
367 | |||
368 | static int tlan_resume(struct pci_dev *pdev) | ||
369 | { | ||
370 | struct net_device *dev = pci_get_drvdata(pdev); | ||
371 | |||
372 | pci_set_power_state(pdev, PCI_D0); | ||
373 | pci_restore_state(pdev); | ||
374 | pci_enable_wake(pdev, 0, 0); | ||
375 | netif_device_attach(dev); | ||
376 | |||
377 | if (netif_running(dev)) | ||
378 | tlan_start(dev); | ||
379 | |||
380 | return 0; | ||
381 | } | ||
382 | |||
383 | #else /* CONFIG_PM */ | ||
384 | |||
385 | #define tlan_suspend NULL | ||
386 | #define tlan_resume NULL | ||
387 | |||
388 | #endif /* CONFIG_PM */ | ||
389 | |||
390 | |||
391 | static struct pci_driver tlan_driver = { | ||
392 | .name = "tlan", | ||
393 | .id_table = tlan_pci_tbl, | ||
394 | .probe = tlan_init_one, | ||
395 | .remove = __devexit_p(tlan_remove_one), | ||
396 | .suspend = tlan_suspend, | ||
397 | .resume = tlan_resume, | ||
398 | }; | ||
399 | |||
400 | static int __init tlan_probe(void) | ||
401 | { | ||
402 | int rc = -ENODEV; | ||
403 | |||
404 | pr_info("%s", tlan_banner); | ||
405 | |||
406 | TLAN_DBG(TLAN_DEBUG_PROBE, "Starting PCI Probe....\n"); | ||
407 | |||
408 | /* Use new style PCI probing. Now the kernel will | ||
409 | do most of this for us */ | ||
410 | rc = pci_register_driver(&tlan_driver); | ||
411 | |||
412 | if (rc != 0) { | ||
413 | pr_err("Could not register pci driver\n"); | ||
414 | goto err_out_pci_free; | ||
415 | } | ||
416 | |||
417 | TLAN_DBG(TLAN_DEBUG_PROBE, "Starting EISA Probe....\n"); | ||
418 | tlan_eisa_probe(); | ||
419 | |||
420 | pr_info("%d device%s installed, PCI: %d EISA: %d\n", | ||
421 | tlan_devices_installed, tlan_devices_installed == 1 ? "" : "s", | ||
422 | tlan_have_pci, tlan_have_eisa); | ||
423 | |||
424 | if (tlan_devices_installed == 0) { | ||
425 | rc = -ENODEV; | ||
426 | goto err_out_pci_unreg; | ||
427 | } | ||
428 | return 0; | ||
429 | |||
430 | err_out_pci_unreg: | ||
431 | pci_unregister_driver(&tlan_driver); | ||
432 | err_out_pci_free: | ||
433 | return rc; | ||
434 | } | ||
435 | |||
436 | |||
437 | static int __devinit tlan_init_one(struct pci_dev *pdev, | ||
438 | const struct pci_device_id *ent) | ||
439 | { | ||
440 | return tlan_probe1(pdev, -1, -1, 0, ent); | ||
441 | } | ||
442 | |||
443 | |||
444 | /* | ||
445 | *************************************************************** | ||
446 | * tlan_probe1 | ||
447 | * | ||
448 | * Returns: | ||
449 | * 0 on success, error code on error | ||
450 | * Parms: | ||
451 | * none | ||
452 | * | ||
453 | * The name is lower case to fit in with all the rest of | ||
454 | * the netcard_probe names. This function looks for | ||
455 | * another TLan based adapter, setting it up with the | ||
456 | * allocated device struct if one is found. | ||
457 | * tlan_probe has been ported to the new net API and | ||
458 | * now allocates its own device structure. This function | ||
459 | * is also used by modules. | ||
460 | * | ||
461 | **************************************************************/ | ||
462 | |||
463 | static int __devinit tlan_probe1(struct pci_dev *pdev, | ||
464 | long ioaddr, int irq, int rev, | ||
465 | const struct pci_device_id *ent) | ||
466 | { | ||
467 | |||
468 | struct net_device *dev; | ||
469 | struct tlan_priv *priv; | ||
470 | u16 device_id; | ||
471 | int reg, rc = -ENODEV; | ||
472 | |||
473 | #ifdef CONFIG_PCI | ||
474 | if (pdev) { | ||
475 | rc = pci_enable_device(pdev); | ||
476 | if (rc) | ||
477 | return rc; | ||
478 | |||
479 | rc = pci_request_regions(pdev, tlan_signature); | ||
480 | if (rc) { | ||
481 | pr_err("Could not reserve IO regions\n"); | ||
482 | goto err_out; | ||
483 | } | ||
484 | } | ||
485 | #endif /* CONFIG_PCI */ | ||
486 | |||
487 | dev = alloc_etherdev(sizeof(struct tlan_priv)); | ||
488 | if (dev == NULL) { | ||
489 | pr_err("Could not allocate memory for device\n"); | ||
490 | rc = -ENOMEM; | ||
491 | goto err_out_regions; | ||
492 | } | ||
493 | SET_NETDEV_DEV(dev, &pdev->dev); | ||
494 | |||
495 | priv = netdev_priv(dev); | ||
496 | |||
497 | priv->pci_dev = pdev; | ||
498 | priv->dev = dev; | ||
499 | |||
500 | /* Is this a PCI device? */ | ||
501 | if (pdev) { | ||
502 | u32 pci_io_base = 0; | ||
503 | |||
504 | priv->adapter = &board_info[ent->driver_data]; | ||
505 | |||
506 | rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); | ||
507 | if (rc) { | ||
508 | pr_err("No suitable PCI mapping available\n"); | ||
509 | goto err_out_free_dev; | ||
510 | } | ||
511 | |||
512 | for (reg = 0; reg <= 5; reg++) { | ||
513 | if (pci_resource_flags(pdev, reg) & IORESOURCE_IO) { | ||
514 | pci_io_base = pci_resource_start(pdev, reg); | ||
515 | TLAN_DBG(TLAN_DEBUG_GNRL, | ||
516 | "IO mapping is available at %x.\n", | ||
517 | pci_io_base); | ||
518 | break; | ||
519 | } | ||
520 | } | ||
521 | if (!pci_io_base) { | ||
522 | pr_err("No IO mappings available\n"); | ||
523 | rc = -EIO; | ||
524 | goto err_out_free_dev; | ||
525 | } | ||
526 | |||
527 | dev->base_addr = pci_io_base; | ||
528 | dev->irq = pdev->irq; | ||
529 | priv->adapter_rev = pdev->revision; | ||
530 | pci_set_master(pdev); | ||
531 | pci_set_drvdata(pdev, dev); | ||
532 | |||
533 | } else { /* EISA card */ | ||
534 | /* This is a hack. We need to know which board structure | ||
535 | * is suited for this adapter */ | ||
536 | device_id = inw(ioaddr + EISA_ID2); | ||
537 | priv->is_eisa = 1; | ||
538 | if (device_id == 0x20F1) { | ||
539 | priv->adapter = &board_info[13]; /* NetFlex-3/E */ | ||
540 | priv->adapter_rev = 23; /* TLAN 2.3 */ | ||
541 | } else { | ||
542 | priv->adapter = &board_info[14]; | ||
543 | priv->adapter_rev = 10; /* TLAN 1.0 */ | ||
544 | } | ||
545 | dev->base_addr = ioaddr; | ||
546 | dev->irq = irq; | ||
547 | } | ||
548 | |||
549 | /* Kernel parameters */ | ||
550 | if (dev->mem_start) { | ||
551 | priv->aui = dev->mem_start & 0x01; | ||
552 | priv->duplex = ((dev->mem_start & 0x06) == 0x06) ? 0 | ||
553 | : (dev->mem_start & 0x06) >> 1; | ||
554 | priv->speed = ((dev->mem_start & 0x18) == 0x18) ? 0 | ||
555 | : (dev->mem_start & 0x18) >> 3; | ||
556 | |||
557 | if (priv->speed == 0x1) | ||
558 | priv->speed = TLAN_SPEED_10; | ||
559 | else if (priv->speed == 0x2) | ||
560 | priv->speed = TLAN_SPEED_100; | ||
561 | |||
562 | debug = priv->debug = dev->mem_end; | ||
563 | } else { | ||
564 | priv->aui = aui[boards_found]; | ||
565 | priv->speed = speed[boards_found]; | ||
566 | priv->duplex = duplex[boards_found]; | ||
567 | priv->debug = debug; | ||
568 | } | ||
569 | |||
570 | /* This will be used when we get an adapter error from | ||
571 | * within our irq handler */ | ||
572 | INIT_WORK(&priv->tlan_tqueue, tlan_tx_timeout_work); | ||
573 | |||
574 | spin_lock_init(&priv->lock); | ||
575 | |||
576 | rc = tlan_init(dev); | ||
577 | if (rc) { | ||
578 | pr_err("Could not set up device\n"); | ||
579 | goto err_out_free_dev; | ||
580 | } | ||
581 | |||
582 | rc = register_netdev(dev); | ||
583 | if (rc) { | ||
584 | pr_err("Could not register device\n"); | ||
585 | goto err_out_uninit; | ||
586 | } | ||
587 | |||
588 | |||
589 | tlan_devices_installed++; | ||
590 | boards_found++; | ||
591 | |||
592 | /* pdev is NULL if this is an EISA device */ | ||
593 | if (pdev) | ||
594 | tlan_have_pci++; | ||
595 | else { | ||
596 | priv->next_device = tlan_eisa_devices; | ||
597 | tlan_eisa_devices = dev; | ||
598 | tlan_have_eisa++; | ||
599 | } | ||
600 | |||
601 | netdev_info(dev, "irq=%2d, io=%04x, %s, Rev. %d\n", | ||
602 | (int)dev->irq, | ||
603 | (int)dev->base_addr, | ||
604 | priv->adapter->device_label, | ||
605 | priv->adapter_rev); | ||
606 | return 0; | ||
607 | |||
608 | err_out_uninit: | ||
609 | pci_free_consistent(priv->pci_dev, priv->dma_size, priv->dma_storage, | ||
610 | priv->dma_storage_dma); | ||
611 | err_out_free_dev: | ||
612 | free_netdev(dev); | ||
613 | err_out_regions: | ||
614 | #ifdef CONFIG_PCI | ||
615 | if (pdev) | ||
616 | pci_release_regions(pdev); | ||
617 | #endif | ||
618 | err_out: | ||
619 | if (pdev) | ||
620 | pci_disable_device(pdev); | ||
621 | return rc; | ||
622 | } | ||
623 | |||
624 | |||
625 | static void tlan_eisa_cleanup(void) | ||
626 | { | ||
627 | struct net_device *dev; | ||
628 | struct tlan_priv *priv; | ||
629 | |||
630 | while (tlan_have_eisa) { | ||
631 | dev = tlan_eisa_devices; | ||
632 | priv = netdev_priv(dev); | ||
633 | if (priv->dma_storage) { | ||
634 | pci_free_consistent(priv->pci_dev, priv->dma_size, | ||
635 | priv->dma_storage, | ||
636 | priv->dma_storage_dma); | ||
637 | } | ||
638 | release_region(dev->base_addr, 0x10); | ||
639 | unregister_netdev(dev); | ||
640 | tlan_eisa_devices = priv->next_device; | ||
641 | free_netdev(dev); | ||
642 | tlan_have_eisa--; | ||
643 | } | ||
644 | } | ||
645 | |||
646 | |||
647 | static void __exit tlan_exit(void) | ||
648 | { | ||
649 | pci_unregister_driver(&tlan_driver); | ||
650 | |||
651 | if (tlan_have_eisa) | ||
652 | tlan_eisa_cleanup(); | ||
653 | |||
654 | } | ||
655 | |||
656 | |||
657 | /* Module loading/unloading */ | ||
658 | module_init(tlan_probe); | ||
659 | module_exit(tlan_exit); | ||
660 | |||
661 | |||
662 | |||
663 | /************************************************************** | ||
664 | * tlan_eisa_probe | ||
665 | * | ||
666 | * Returns: 0 on success, 1 otherwise | ||
667 | * | ||
668 | * Parms: None | ||
669 | * | ||
670 | * | ||
671 | * This functions probes for EISA devices and calls | ||
672 | * TLan_probe1 when one is found. | ||
673 | * | ||
674 | *************************************************************/ | ||
675 | |||
676 | static void __init tlan_eisa_probe(void) | ||
677 | { | ||
678 | long ioaddr; | ||
679 | int rc = -ENODEV; | ||
680 | int irq; | ||
681 | u16 device_id; | ||
682 | |||
683 | if (!EISA_bus) { | ||
684 | TLAN_DBG(TLAN_DEBUG_PROBE, "No EISA bus present\n"); | ||
685 | return; | ||
686 | } | ||
687 | |||
688 | /* Loop through all slots of the EISA bus */ | ||
689 | for (ioaddr = 0x1000; ioaddr < 0x9000; ioaddr += 0x1000) { | ||
690 | |||
691 | TLAN_DBG(TLAN_DEBUG_PROBE, "EISA_ID 0x%4x: 0x%4x\n", | ||
692 | (int) ioaddr + 0xc80, inw(ioaddr + EISA_ID)); | ||
693 | TLAN_DBG(TLAN_DEBUG_PROBE, "EISA_ID 0x%4x: 0x%4x\n", | ||
694 | (int) ioaddr + 0xc82, inw(ioaddr + EISA_ID2)); | ||
695 | |||
696 | |||
697 | TLAN_DBG(TLAN_DEBUG_PROBE, | ||
698 | "Probing for EISA adapter at IO: 0x%4x : ", | ||
699 | (int) ioaddr); | ||
700 | if (request_region(ioaddr, 0x10, tlan_signature) == NULL) | ||
701 | goto out; | ||
702 | |||
703 | if (inw(ioaddr + EISA_ID) != 0x110E) { | ||
704 | release_region(ioaddr, 0x10); | ||
705 | goto out; | ||
706 | } | ||
707 | |||
708 | device_id = inw(ioaddr + EISA_ID2); | ||
709 | if (device_id != 0x20F1 && device_id != 0x40F1) { | ||
710 | release_region(ioaddr, 0x10); | ||
711 | goto out; | ||
712 | } | ||
713 | |||
714 | /* check if adapter is enabled */ | ||
715 | if (inb(ioaddr + EISA_CR) != 0x1) { | ||
716 | release_region(ioaddr, 0x10); | ||
717 | goto out2; | ||
718 | } | ||
719 | |||
720 | if (debug == 0x10) | ||
721 | pr_info("Found one\n"); | ||
722 | |||
723 | |||
724 | /* Get irq from board */ | ||
725 | switch (inb(ioaddr + 0xcc0)) { | ||
726 | case(0x10): | ||
727 | irq = 5; | ||
728 | break; | ||
729 | case(0x20): | ||
730 | irq = 9; | ||
731 | break; | ||
732 | case(0x40): | ||
733 | irq = 10; | ||
734 | break; | ||
735 | case(0x80): | ||
736 | irq = 11; | ||
737 | break; | ||
738 | default: | ||
739 | goto out; | ||
740 | } | ||
741 | |||
742 | |||
743 | /* Setup the newly found eisa adapter */ | ||
744 | rc = tlan_probe1(NULL, ioaddr, irq, | ||
745 | 12, NULL); | ||
746 | continue; | ||
747 | |||
748 | out: | ||
749 | if (debug == 0x10) | ||
750 | pr_info("None found\n"); | ||
751 | continue; | ||
752 | |||
753 | out2: | ||
754 | if (debug == 0x10) | ||
755 | pr_info("Card found but it is not enabled, skipping\n"); | ||
756 | continue; | ||
757 | |||
758 | } | ||
759 | |||
760 | } | ||
761 | |||
762 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
763 | static void tlan_poll(struct net_device *dev) | ||
764 | { | ||
765 | disable_irq(dev->irq); | ||
766 | tlan_handle_interrupt(dev->irq, dev); | ||
767 | enable_irq(dev->irq); | ||
768 | } | ||
769 | #endif | ||
770 | |||
771 | static const struct net_device_ops tlan_netdev_ops = { | ||
772 | .ndo_open = tlan_open, | ||
773 | .ndo_stop = tlan_close, | ||
774 | .ndo_start_xmit = tlan_start_tx, | ||
775 | .ndo_tx_timeout = tlan_tx_timeout, | ||
776 | .ndo_get_stats = tlan_get_stats, | ||
777 | .ndo_set_multicast_list = tlan_set_multicast_list, | ||
778 | .ndo_do_ioctl = tlan_ioctl, | ||
779 | .ndo_change_mtu = eth_change_mtu, | ||
780 | .ndo_set_mac_address = eth_mac_addr, | ||
781 | .ndo_validate_addr = eth_validate_addr, | ||
782 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
783 | .ndo_poll_controller = tlan_poll, | ||
784 | #endif | ||
785 | }; | ||
786 | |||
787 | |||
788 | |||
789 | /*************************************************************** | ||
790 | * tlan_init | ||
791 | * | ||
792 | * Returns: | ||
793 | * 0 on success, error code otherwise. | ||
794 | * Parms: | ||
795 | * dev The structure of the device to be | ||
796 | * init'ed. | ||
797 | * | ||
798 | * This function completes the initialization of the | ||
799 | * device structure and driver. It reserves the IO | ||
800 | * addresses, allocates memory for the lists and bounce | ||
801 | * buffers, retrieves the MAC address from the eeprom | ||
802 | * and assignes the device's methods. | ||
803 | * | ||
804 | **************************************************************/ | ||
805 | |||
806 | static int tlan_init(struct net_device *dev) | ||
807 | { | ||
808 | int dma_size; | ||
809 | int err; | ||
810 | int i; | ||
811 | struct tlan_priv *priv; | ||
812 | |||
813 | priv = netdev_priv(dev); | ||
814 | |||
815 | dma_size = (TLAN_NUM_RX_LISTS + TLAN_NUM_TX_LISTS) | ||
816 | * (sizeof(struct tlan_list)); | ||
817 | priv->dma_storage = pci_alloc_consistent(priv->pci_dev, | ||
818 | dma_size, | ||
819 | &priv->dma_storage_dma); | ||
820 | priv->dma_size = dma_size; | ||
821 | |||
822 | if (priv->dma_storage == NULL) { | ||
823 | pr_err("Could not allocate lists and buffers for %s\n", | ||
824 | dev->name); | ||
825 | return -ENOMEM; | ||
826 | } | ||
827 | memset(priv->dma_storage, 0, dma_size); | ||
828 | priv->rx_list = (struct tlan_list *) | ||
829 | ALIGN((unsigned long)priv->dma_storage, 8); | ||
830 | priv->rx_list_dma = ALIGN(priv->dma_storage_dma, 8); | ||
831 | priv->tx_list = priv->rx_list + TLAN_NUM_RX_LISTS; | ||
832 | priv->tx_list_dma = | ||
833 | priv->rx_list_dma + sizeof(struct tlan_list)*TLAN_NUM_RX_LISTS; | ||
834 | |||
835 | err = 0; | ||
836 | for (i = 0; i < 6 ; i++) | ||
837 | err |= tlan_ee_read_byte(dev, | ||
838 | (u8) priv->adapter->addr_ofs + i, | ||
839 | (u8 *) &dev->dev_addr[i]); | ||
840 | if (err) { | ||
841 | pr_err("%s: Error reading MAC from eeprom: %d\n", | ||
842 | dev->name, err); | ||
843 | } | ||
844 | dev->addr_len = 6; | ||
845 | |||
846 | netif_carrier_off(dev); | ||
847 | |||
848 | /* Device methods */ | ||
849 | dev->netdev_ops = &tlan_netdev_ops; | ||
850 | dev->watchdog_timeo = TX_TIMEOUT; | ||
851 | |||
852 | return 0; | ||
853 | |||
854 | } | ||
855 | |||
856 | |||
857 | |||
858 | |||
859 | /*************************************************************** | ||
860 | * tlan_open | ||
861 | * | ||
862 | * Returns: | ||
863 | * 0 on success, error code otherwise. | ||
864 | * Parms: | ||
865 | * dev Structure of device to be opened. | ||
866 | * | ||
867 | * This routine puts the driver and TLAN adapter in a | ||
868 | * state where it is ready to send and receive packets. | ||
869 | * It allocates the IRQ, resets and brings the adapter | ||
870 | * out of reset, and allows interrupts. It also delays | ||
871 | * the startup for autonegotiation or sends a Rx GO | ||
872 | * command to the adapter, as appropriate. | ||
873 | * | ||
874 | **************************************************************/ | ||
875 | |||
876 | static int tlan_open(struct net_device *dev) | ||
877 | { | ||
878 | struct tlan_priv *priv = netdev_priv(dev); | ||
879 | int err; | ||
880 | |||
881 | priv->tlan_rev = tlan_dio_read8(dev->base_addr, TLAN_DEF_REVISION); | ||
882 | err = request_irq(dev->irq, tlan_handle_interrupt, IRQF_SHARED, | ||
883 | dev->name, dev); | ||
884 | |||
885 | if (err) { | ||
886 | netdev_err(dev, "Cannot open because IRQ %d is already in use\n", | ||
887 | dev->irq); | ||
888 | return err; | ||
889 | } | ||
890 | |||
891 | init_timer(&priv->timer); | ||
892 | |||
893 | tlan_start(dev); | ||
894 | |||
895 | TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Opened. TLAN Chip Rev: %x\n", | ||
896 | dev->name, priv->tlan_rev); | ||
897 | |||
898 | return 0; | ||
899 | |||
900 | } | ||
901 | |||
902 | |||
903 | |||
904 | /************************************************************** | ||
905 | * tlan_ioctl | ||
906 | * | ||
907 | * Returns: | ||
908 | * 0 on success, error code otherwise | ||
909 | * Params: | ||
910 | * dev structure of device to receive ioctl. | ||
911 | * | ||
912 | * rq ifreq structure to hold userspace data. | ||
913 | * | ||
914 | * cmd ioctl command. | ||
915 | * | ||
916 | * | ||
917 | *************************************************************/ | ||
918 | |||
919 | static int tlan_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | ||
920 | { | ||
921 | struct tlan_priv *priv = netdev_priv(dev); | ||
922 | struct mii_ioctl_data *data = if_mii(rq); | ||
923 | u32 phy = priv->phy[priv->phy_num]; | ||
924 | |||
925 | if (!priv->phy_online) | ||
926 | return -EAGAIN; | ||
927 | |||
928 | switch (cmd) { | ||
929 | case SIOCGMIIPHY: /* get address of MII PHY in use. */ | ||
930 | data->phy_id = phy; | ||
931 | |||
932 | |||
933 | case SIOCGMIIREG: /* read MII PHY register. */ | ||
934 | tlan_mii_read_reg(dev, data->phy_id & 0x1f, | ||
935 | data->reg_num & 0x1f, &data->val_out); | ||
936 | return 0; | ||
937 | |||
938 | |||
939 | case SIOCSMIIREG: /* write MII PHY register. */ | ||
940 | tlan_mii_write_reg(dev, data->phy_id & 0x1f, | ||
941 | data->reg_num & 0x1f, data->val_in); | ||
942 | return 0; | ||
943 | default: | ||
944 | return -EOPNOTSUPP; | ||
945 | } | ||
946 | } | ||
947 | |||
948 | |||
949 | /*************************************************************** | ||
950 | * tlan_tx_timeout | ||
951 | * | ||
952 | * Returns: nothing | ||
953 | * | ||
954 | * Params: | ||
955 | * dev structure of device which timed out | ||
956 | * during transmit. | ||
957 | * | ||
958 | **************************************************************/ | ||
959 | |||
960 | static void tlan_tx_timeout(struct net_device *dev) | ||
961 | { | ||
962 | |||
963 | TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Transmit timed out.\n", dev->name); | ||
964 | |||
965 | /* Ok so we timed out, lets see what we can do about it...*/ | ||
966 | tlan_free_lists(dev); | ||
967 | tlan_reset_lists(dev); | ||
968 | tlan_read_and_clear_stats(dev, TLAN_IGNORE); | ||
969 | tlan_reset_adapter(dev); | ||
970 | dev->trans_start = jiffies; /* prevent tx timeout */ | ||
971 | netif_wake_queue(dev); | ||
972 | |||
973 | } | ||
974 | |||
975 | |||
976 | /*************************************************************** | ||
977 | * tlan_tx_timeout_work | ||
978 | * | ||
979 | * Returns: nothing | ||
980 | * | ||
981 | * Params: | ||
982 | * work work item of device which timed out | ||
983 | * | ||
984 | **************************************************************/ | ||
985 | |||
986 | static void tlan_tx_timeout_work(struct work_struct *work) | ||
987 | { | ||
988 | struct tlan_priv *priv = | ||
989 | container_of(work, struct tlan_priv, tlan_tqueue); | ||
990 | |||
991 | tlan_tx_timeout(priv->dev); | ||
992 | } | ||
993 | |||
994 | |||
995 | |||
996 | /*************************************************************** | ||
997 | * tlan_start_tx | ||
998 | * | ||
999 | * Returns: | ||
1000 | * 0 on success, non-zero on failure. | ||
1001 | * Parms: | ||
1002 | * skb A pointer to the sk_buff containing the | ||
1003 | * frame to be sent. | ||
1004 | * dev The device to send the data on. | ||
1005 | * | ||
1006 | * This function adds a frame to the Tx list to be sent | ||
1007 | * ASAP. First it verifies that the adapter is ready and | ||
1008 | * there is room in the queue. Then it sets up the next | ||
1009 | * available list, copies the frame to the corresponding | ||
1010 | * buffer. If the adapter Tx channel is idle, it gives | ||
1011 | * the adapter a Tx Go command on the list, otherwise it | ||
1012 | * sets the forward address of the previous list to point | ||
1013 | * to this one. Then it frees the sk_buff. | ||
1014 | * | ||
1015 | **************************************************************/ | ||
1016 | |||
1017 | static netdev_tx_t tlan_start_tx(struct sk_buff *skb, struct net_device *dev) | ||
1018 | { | ||
1019 | struct tlan_priv *priv = netdev_priv(dev); | ||
1020 | dma_addr_t tail_list_phys; | ||
1021 | struct tlan_list *tail_list; | ||
1022 | unsigned long flags; | ||
1023 | unsigned int txlen; | ||
1024 | |||
1025 | if (!priv->phy_online) { | ||
1026 | TLAN_DBG(TLAN_DEBUG_TX, "TRANSMIT: %s PHY is not ready\n", | ||
1027 | dev->name); | ||
1028 | dev_kfree_skb_any(skb); | ||
1029 | return NETDEV_TX_OK; | ||
1030 | } | ||
1031 | |||
1032 | if (skb_padto(skb, TLAN_MIN_FRAME_SIZE)) | ||
1033 | return NETDEV_TX_OK; | ||
1034 | txlen = max(skb->len, (unsigned int)TLAN_MIN_FRAME_SIZE); | ||
1035 | |||
1036 | tail_list = priv->tx_list + priv->tx_tail; | ||
1037 | tail_list_phys = | ||
1038 | priv->tx_list_dma + sizeof(struct tlan_list)*priv->tx_tail; | ||
1039 | |||
1040 | if (tail_list->c_stat != TLAN_CSTAT_UNUSED) { | ||
1041 | TLAN_DBG(TLAN_DEBUG_TX, | ||
1042 | "TRANSMIT: %s is busy (Head=%d Tail=%d)\n", | ||
1043 | dev->name, priv->tx_head, priv->tx_tail); | ||
1044 | netif_stop_queue(dev); | ||
1045 | priv->tx_busy_count++; | ||
1046 | return NETDEV_TX_BUSY; | ||
1047 | } | ||
1048 | |||
1049 | tail_list->forward = 0; | ||
1050 | |||
1051 | tail_list->buffer[0].address = pci_map_single(priv->pci_dev, | ||
1052 | skb->data, txlen, | ||
1053 | PCI_DMA_TODEVICE); | ||
1054 | tlan_store_skb(tail_list, skb); | ||
1055 | |||
1056 | tail_list->frame_size = (u16) txlen; | ||
1057 | tail_list->buffer[0].count = TLAN_LAST_BUFFER | (u32) txlen; | ||
1058 | tail_list->buffer[1].count = 0; | ||
1059 | tail_list->buffer[1].address = 0; | ||
1060 | |||
1061 | spin_lock_irqsave(&priv->lock, flags); | ||
1062 | tail_list->c_stat = TLAN_CSTAT_READY; | ||
1063 | if (!priv->tx_in_progress) { | ||
1064 | priv->tx_in_progress = 1; | ||
1065 | TLAN_DBG(TLAN_DEBUG_TX, | ||
1066 | "TRANSMIT: Starting TX on buffer %d\n", | ||
1067 | priv->tx_tail); | ||
1068 | outl(tail_list_phys, dev->base_addr + TLAN_CH_PARM); | ||
1069 | outl(TLAN_HC_GO, dev->base_addr + TLAN_HOST_CMD); | ||
1070 | } else { | ||
1071 | TLAN_DBG(TLAN_DEBUG_TX, | ||
1072 | "TRANSMIT: Adding buffer %d to TX channel\n", | ||
1073 | priv->tx_tail); | ||
1074 | if (priv->tx_tail == 0) { | ||
1075 | (priv->tx_list + (TLAN_NUM_TX_LISTS - 1))->forward | ||
1076 | = tail_list_phys; | ||
1077 | } else { | ||
1078 | (priv->tx_list + (priv->tx_tail - 1))->forward | ||
1079 | = tail_list_phys; | ||
1080 | } | ||
1081 | } | ||
1082 | spin_unlock_irqrestore(&priv->lock, flags); | ||
1083 | |||
1084 | CIRC_INC(priv->tx_tail, TLAN_NUM_TX_LISTS); | ||
1085 | |||
1086 | return NETDEV_TX_OK; | ||
1087 | |||
1088 | } | ||
1089 | |||
1090 | |||
1091 | |||
1092 | |||
1093 | /*************************************************************** | ||
1094 | * tlan_handle_interrupt | ||
1095 | * | ||
1096 | * Returns: | ||
1097 | * Nothing | ||
1098 | * Parms: | ||
1099 | * irq The line on which the interrupt | ||
1100 | * occurred. | ||
1101 | * dev_id A pointer to the device assigned to | ||
1102 | * this irq line. | ||
1103 | * | ||
1104 | * This function handles an interrupt generated by its | ||
1105 | * assigned TLAN adapter. The function deactivates | ||
1106 | * interrupts on its adapter, records the type of | ||
1107 | * interrupt, executes the appropriate subhandler, and | ||
1108 | * acknowdges the interrupt to the adapter (thus | ||
1109 | * re-enabling adapter interrupts. | ||
1110 | * | ||
1111 | **************************************************************/ | ||
1112 | |||
1113 | static irqreturn_t tlan_handle_interrupt(int irq, void *dev_id) | ||
1114 | { | ||
1115 | struct net_device *dev = dev_id; | ||
1116 | struct tlan_priv *priv = netdev_priv(dev); | ||
1117 | u16 host_int; | ||
1118 | u16 type; | ||
1119 | |||
1120 | spin_lock(&priv->lock); | ||
1121 | |||
1122 | host_int = inw(dev->base_addr + TLAN_HOST_INT); | ||
1123 | type = (host_int & TLAN_HI_IT_MASK) >> 2; | ||
1124 | if (type) { | ||
1125 | u32 ack; | ||
1126 | u32 host_cmd; | ||
1127 | |||
1128 | outw(host_int, dev->base_addr + TLAN_HOST_INT); | ||
1129 | ack = tlan_int_vector[type](dev, host_int); | ||
1130 | |||
1131 | if (ack) { | ||
1132 | host_cmd = TLAN_HC_ACK | ack | (type << 18); | ||
1133 | outl(host_cmd, dev->base_addr + TLAN_HOST_CMD); | ||
1134 | } | ||
1135 | } | ||
1136 | |||
1137 | spin_unlock(&priv->lock); | ||
1138 | |||
1139 | return IRQ_RETVAL(type); | ||
1140 | } | ||
1141 | |||
1142 | |||
1143 | |||
1144 | |||
1145 | /*************************************************************** | ||
1146 | * tlan_close | ||
1147 | * | ||
1148 | * Returns: | ||
1149 | * An error code. | ||
1150 | * Parms: | ||
1151 | * dev The device structure of the device to | ||
1152 | * close. | ||
1153 | * | ||
1154 | * This function shuts down the adapter. It records any | ||
1155 | * stats, puts the adapter into reset state, deactivates | ||
1156 | * its time as needed, and frees the irq it is using. | ||
1157 | * | ||
1158 | **************************************************************/ | ||
1159 | |||
1160 | static int tlan_close(struct net_device *dev) | ||
1161 | { | ||
1162 | struct tlan_priv *priv = netdev_priv(dev); | ||
1163 | |||
1164 | priv->neg_be_verbose = 0; | ||
1165 | tlan_stop(dev); | ||
1166 | |||
1167 | free_irq(dev->irq, dev); | ||
1168 | tlan_free_lists(dev); | ||
1169 | TLAN_DBG(TLAN_DEBUG_GNRL, "Device %s closed.\n", dev->name); | ||
1170 | |||
1171 | return 0; | ||
1172 | |||
1173 | } | ||
1174 | |||
1175 | |||
1176 | |||
1177 | |||
1178 | /*************************************************************** | ||
1179 | * tlan_get_stats | ||
1180 | * | ||
1181 | * Returns: | ||
1182 | * A pointer to the device's statistics structure. | ||
1183 | * Parms: | ||
1184 | * dev The device structure to return the | ||
1185 | * stats for. | ||
1186 | * | ||
1187 | * This function updates the devices statistics by reading | ||
1188 | * the TLAN chip's onboard registers. Then it returns the | ||
1189 | * address of the statistics structure. | ||
1190 | * | ||
1191 | **************************************************************/ | ||
1192 | |||
1193 | static struct net_device_stats *tlan_get_stats(struct net_device *dev) | ||
1194 | { | ||
1195 | struct tlan_priv *priv = netdev_priv(dev); | ||
1196 | int i; | ||
1197 | |||
1198 | /* Should only read stats if open ? */ | ||
1199 | tlan_read_and_clear_stats(dev, TLAN_RECORD); | ||
1200 | |||
1201 | TLAN_DBG(TLAN_DEBUG_RX, "RECEIVE: %s EOC count = %d\n", dev->name, | ||
1202 | priv->rx_eoc_count); | ||
1203 | TLAN_DBG(TLAN_DEBUG_TX, "TRANSMIT: %s Busy count = %d\n", dev->name, | ||
1204 | priv->tx_busy_count); | ||
1205 | if (debug & TLAN_DEBUG_GNRL) { | ||
1206 | tlan_print_dio(dev->base_addr); | ||
1207 | tlan_phy_print(dev); | ||
1208 | } | ||
1209 | if (debug & TLAN_DEBUG_LIST) { | ||
1210 | for (i = 0; i < TLAN_NUM_RX_LISTS; i++) | ||
1211 | tlan_print_list(priv->rx_list + i, "RX", i); | ||
1212 | for (i = 0; i < TLAN_NUM_TX_LISTS; i++) | ||
1213 | tlan_print_list(priv->tx_list + i, "TX", i); | ||
1214 | } | ||
1215 | |||
1216 | return &dev->stats; | ||
1217 | |||
1218 | } | ||
1219 | |||
1220 | |||
1221 | |||
1222 | |||
1223 | /*************************************************************** | ||
1224 | * tlan_set_multicast_list | ||
1225 | * | ||
1226 | * Returns: | ||
1227 | * Nothing | ||
1228 | * Parms: | ||
1229 | * dev The device structure to set the | ||
1230 | * multicast list for. | ||
1231 | * | ||
1232 | * This function sets the TLAN adaptor to various receive | ||
1233 | * modes. If the IFF_PROMISC flag is set, promiscuous | ||
1234 | * mode is acitviated. Otherwise, promiscuous mode is | ||
1235 | * turned off. If the IFF_ALLMULTI flag is set, then | ||
1236 | * the hash table is set to receive all group addresses. | ||
1237 | * Otherwise, the first three multicast addresses are | ||
1238 | * stored in AREG_1-3, and the rest are selected via the | ||
1239 | * hash table, as necessary. | ||
1240 | * | ||
1241 | **************************************************************/ | ||
1242 | |||
1243 | static void tlan_set_multicast_list(struct net_device *dev) | ||
1244 | { | ||
1245 | struct netdev_hw_addr *ha; | ||
1246 | u32 hash1 = 0; | ||
1247 | u32 hash2 = 0; | ||
1248 | int i; | ||
1249 | u32 offset; | ||
1250 | u8 tmp; | ||
1251 | |||
1252 | if (dev->flags & IFF_PROMISC) { | ||
1253 | tmp = tlan_dio_read8(dev->base_addr, TLAN_NET_CMD); | ||
1254 | tlan_dio_write8(dev->base_addr, | ||
1255 | TLAN_NET_CMD, tmp | TLAN_NET_CMD_CAF); | ||
1256 | } else { | ||
1257 | tmp = tlan_dio_read8(dev->base_addr, TLAN_NET_CMD); | ||
1258 | tlan_dio_write8(dev->base_addr, | ||
1259 | TLAN_NET_CMD, tmp & ~TLAN_NET_CMD_CAF); | ||
1260 | if (dev->flags & IFF_ALLMULTI) { | ||
1261 | for (i = 0; i < 3; i++) | ||
1262 | tlan_set_mac(dev, i + 1, NULL); | ||
1263 | tlan_dio_write32(dev->base_addr, TLAN_HASH_1, | ||
1264 | 0xffffffff); | ||
1265 | tlan_dio_write32(dev->base_addr, TLAN_HASH_2, | ||
1266 | 0xffffffff); | ||
1267 | } else { | ||
1268 | i = 0; | ||
1269 | netdev_for_each_mc_addr(ha, dev) { | ||
1270 | if (i < 3) { | ||
1271 | tlan_set_mac(dev, i + 1, | ||
1272 | (char *) &ha->addr); | ||
1273 | } else { | ||
1274 | offset = | ||
1275 | tlan_hash_func((u8 *)&ha->addr); | ||
1276 | if (offset < 32) | ||
1277 | hash1 |= (1 << offset); | ||
1278 | else | ||
1279 | hash2 |= (1 << (offset - 32)); | ||
1280 | } | ||
1281 | i++; | ||
1282 | } | ||
1283 | for ( ; i < 3; i++) | ||
1284 | tlan_set_mac(dev, i + 1, NULL); | ||
1285 | tlan_dio_write32(dev->base_addr, TLAN_HASH_1, hash1); | ||
1286 | tlan_dio_write32(dev->base_addr, TLAN_HASH_2, hash2); | ||
1287 | } | ||
1288 | } | ||
1289 | |||
1290 | } | ||
1291 | |||
1292 | |||
1293 | |||
1294 | /***************************************************************************** | ||
1295 | ****************************************************************************** | ||
1296 | |||
1297 | ThunderLAN driver interrupt vectors and table | ||
1298 | |||
1299 | please see chap. 4, "Interrupt Handling" of the "ThunderLAN | ||
1300 | Programmer's Guide" for more informations on handling interrupts | ||
1301 | generated by TLAN based adapters. | ||
1302 | |||
1303 | ****************************************************************************** | ||
1304 | *****************************************************************************/ | ||
1305 | |||
1306 | |||
1307 | |||
1308 | |||
1309 | /*************************************************************** | ||
1310 | * tlan_handle_tx_eof | ||
1311 | * | ||
1312 | * Returns: | ||
1313 | * 1 | ||
1314 | * Parms: | ||
1315 | * dev Device assigned the IRQ that was | ||
1316 | * raised. | ||
1317 | * host_int The contents of the HOST_INT | ||
1318 | * port. | ||
1319 | * | ||
1320 | * This function handles Tx EOF interrupts which are raised | ||
1321 | * by the adapter when it has completed sending the | ||
1322 | * contents of a buffer. If detemines which list/buffer | ||
1323 | * was completed and resets it. If the buffer was the last | ||
1324 | * in the channel (EOC), then the function checks to see if | ||
1325 | * another buffer is ready to send, and if so, sends a Tx | ||
1326 | * Go command. Finally, the driver activates/continues the | ||
1327 | * activity LED. | ||
1328 | * | ||
1329 | **************************************************************/ | ||
1330 | |||
1331 | static u32 tlan_handle_tx_eof(struct net_device *dev, u16 host_int) | ||
1332 | { | ||
1333 | struct tlan_priv *priv = netdev_priv(dev); | ||
1334 | int eoc = 0; | ||
1335 | struct tlan_list *head_list; | ||
1336 | dma_addr_t head_list_phys; | ||
1337 | u32 ack = 0; | ||
1338 | u16 tmp_c_stat; | ||
1339 | |||
1340 | TLAN_DBG(TLAN_DEBUG_TX, | ||
1341 | "TRANSMIT: Handling TX EOF (Head=%d Tail=%d)\n", | ||
1342 | priv->tx_head, priv->tx_tail); | ||
1343 | head_list = priv->tx_list + priv->tx_head; | ||
1344 | |||
1345 | while (((tmp_c_stat = head_list->c_stat) & TLAN_CSTAT_FRM_CMP) | ||
1346 | && (ack < 255)) { | ||
1347 | struct sk_buff *skb = tlan_get_skb(head_list); | ||
1348 | |||
1349 | ack++; | ||
1350 | pci_unmap_single(priv->pci_dev, head_list->buffer[0].address, | ||
1351 | max(skb->len, | ||
1352 | (unsigned int)TLAN_MIN_FRAME_SIZE), | ||
1353 | PCI_DMA_TODEVICE); | ||
1354 | dev_kfree_skb_any(skb); | ||
1355 | head_list->buffer[8].address = 0; | ||
1356 | head_list->buffer[9].address = 0; | ||
1357 | |||
1358 | if (tmp_c_stat & TLAN_CSTAT_EOC) | ||
1359 | eoc = 1; | ||
1360 | |||
1361 | dev->stats.tx_bytes += head_list->frame_size; | ||
1362 | |||
1363 | head_list->c_stat = TLAN_CSTAT_UNUSED; | ||
1364 | netif_start_queue(dev); | ||
1365 | CIRC_INC(priv->tx_head, TLAN_NUM_TX_LISTS); | ||
1366 | head_list = priv->tx_list + priv->tx_head; | ||
1367 | } | ||
1368 | |||
1369 | if (!ack) | ||
1370 | netdev_info(dev, | ||
1371 | "Received interrupt for uncompleted TX frame\n"); | ||
1372 | |||
1373 | if (eoc) { | ||
1374 | TLAN_DBG(TLAN_DEBUG_TX, | ||
1375 | "TRANSMIT: handling TX EOC (Head=%d Tail=%d)\n", | ||
1376 | priv->tx_head, priv->tx_tail); | ||
1377 | head_list = priv->tx_list + priv->tx_head; | ||
1378 | head_list_phys = priv->tx_list_dma | ||
1379 | + sizeof(struct tlan_list)*priv->tx_head; | ||
1380 | if ((head_list->c_stat & TLAN_CSTAT_READY) | ||
1381 | == TLAN_CSTAT_READY) { | ||
1382 | outl(head_list_phys, dev->base_addr + TLAN_CH_PARM); | ||
1383 | ack |= TLAN_HC_GO; | ||
1384 | } else { | ||
1385 | priv->tx_in_progress = 0; | ||
1386 | } | ||
1387 | } | ||
1388 | |||
1389 | if (priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED) { | ||
1390 | tlan_dio_write8(dev->base_addr, | ||
1391 | TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT); | ||
1392 | if (priv->timer.function == NULL) { | ||
1393 | priv->timer.function = tlan_timer; | ||
1394 | priv->timer.data = (unsigned long) dev; | ||
1395 | priv->timer.expires = jiffies + TLAN_TIMER_ACT_DELAY; | ||
1396 | priv->timer_set_at = jiffies; | ||
1397 | priv->timer_type = TLAN_TIMER_ACTIVITY; | ||
1398 | add_timer(&priv->timer); | ||
1399 | } else if (priv->timer_type == TLAN_TIMER_ACTIVITY) { | ||
1400 | priv->timer_set_at = jiffies; | ||
1401 | } | ||
1402 | } | ||
1403 | |||
1404 | return ack; | ||
1405 | |||
1406 | } | ||
1407 | |||
1408 | |||
1409 | |||
1410 | |||
1411 | /*************************************************************** | ||
1412 | * TLan_HandleStatOverflow | ||
1413 | * | ||
1414 | * Returns: | ||
1415 | * 1 | ||
1416 | * Parms: | ||
1417 | * dev Device assigned the IRQ that was | ||
1418 | * raised. | ||
1419 | * host_int The contents of the HOST_INT | ||
1420 | * port. | ||
1421 | * | ||
1422 | * This function handles the Statistics Overflow interrupt | ||
1423 | * which means that one or more of the TLAN statistics | ||
1424 | * registers has reached 1/2 capacity and needs to be read. | ||
1425 | * | ||
1426 | **************************************************************/ | ||
1427 | |||
1428 | static u32 tlan_handle_stat_overflow(struct net_device *dev, u16 host_int) | ||
1429 | { | ||
1430 | tlan_read_and_clear_stats(dev, TLAN_RECORD); | ||
1431 | |||
1432 | return 1; | ||
1433 | |||
1434 | } | ||
1435 | |||
1436 | |||
1437 | |||
1438 | |||
1439 | /*************************************************************** | ||
1440 | * TLan_HandleRxEOF | ||
1441 | * | ||
1442 | * Returns: | ||
1443 | * 1 | ||
1444 | * Parms: | ||
1445 | * dev Device assigned the IRQ that was | ||
1446 | * raised. | ||
1447 | * host_int The contents of the HOST_INT | ||
1448 | * port. | ||
1449 | * | ||
1450 | * This function handles the Rx EOF interrupt which | ||
1451 | * indicates a frame has been received by the adapter from | ||
1452 | * the net and the frame has been transferred to memory. | ||
1453 | * The function determines the bounce buffer the frame has | ||
1454 | * been loaded into, creates a new sk_buff big enough to | ||
1455 | * hold the frame, and sends it to protocol stack. It | ||
1456 | * then resets the used buffer and appends it to the end | ||
1457 | * of the list. If the frame was the last in the Rx | ||
1458 | * channel (EOC), the function restarts the receive channel | ||
1459 | * by sending an Rx Go command to the adapter. Then it | ||
1460 | * activates/continues the activity LED. | ||
1461 | * | ||
1462 | **************************************************************/ | ||
1463 | |||
1464 | static u32 tlan_handle_rx_eof(struct net_device *dev, u16 host_int) | ||
1465 | { | ||
1466 | struct tlan_priv *priv = netdev_priv(dev); | ||
1467 | u32 ack = 0; | ||
1468 | int eoc = 0; | ||
1469 | struct tlan_list *head_list; | ||
1470 | struct sk_buff *skb; | ||
1471 | struct tlan_list *tail_list; | ||
1472 | u16 tmp_c_stat; | ||
1473 | dma_addr_t head_list_phys; | ||
1474 | |||
1475 | TLAN_DBG(TLAN_DEBUG_RX, "RECEIVE: handling RX EOF (Head=%d Tail=%d)\n", | ||
1476 | priv->rx_head, priv->rx_tail); | ||
1477 | head_list = priv->rx_list + priv->rx_head; | ||
1478 | head_list_phys = | ||
1479 | priv->rx_list_dma + sizeof(struct tlan_list)*priv->rx_head; | ||
1480 | |||
1481 | while (((tmp_c_stat = head_list->c_stat) & TLAN_CSTAT_FRM_CMP) | ||
1482 | && (ack < 255)) { | ||
1483 | dma_addr_t frame_dma = head_list->buffer[0].address; | ||
1484 | u32 frame_size = head_list->frame_size; | ||
1485 | struct sk_buff *new_skb; | ||
1486 | |||
1487 | ack++; | ||
1488 | if (tmp_c_stat & TLAN_CSTAT_EOC) | ||
1489 | eoc = 1; | ||
1490 | |||
1491 | new_skb = netdev_alloc_skb_ip_align(dev, | ||
1492 | TLAN_MAX_FRAME_SIZE + 5); | ||
1493 | if (!new_skb) | ||
1494 | goto drop_and_reuse; | ||
1495 | |||
1496 | skb = tlan_get_skb(head_list); | ||
1497 | pci_unmap_single(priv->pci_dev, frame_dma, | ||
1498 | TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE); | ||
1499 | skb_put(skb, frame_size); | ||
1500 | |||
1501 | dev->stats.rx_bytes += frame_size; | ||
1502 | |||
1503 | skb->protocol = eth_type_trans(skb, dev); | ||
1504 | netif_rx(skb); | ||
1505 | |||
1506 | head_list->buffer[0].address = | ||
1507 | pci_map_single(priv->pci_dev, new_skb->data, | ||
1508 | TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE); | ||
1509 | |||
1510 | tlan_store_skb(head_list, new_skb); | ||
1511 | drop_and_reuse: | ||
1512 | head_list->forward = 0; | ||
1513 | head_list->c_stat = 0; | ||
1514 | tail_list = priv->rx_list + priv->rx_tail; | ||
1515 | tail_list->forward = head_list_phys; | ||
1516 | |||
1517 | CIRC_INC(priv->rx_head, TLAN_NUM_RX_LISTS); | ||
1518 | CIRC_INC(priv->rx_tail, TLAN_NUM_RX_LISTS); | ||
1519 | head_list = priv->rx_list + priv->rx_head; | ||
1520 | head_list_phys = priv->rx_list_dma | ||
1521 | + sizeof(struct tlan_list)*priv->rx_head; | ||
1522 | } | ||
1523 | |||
1524 | if (!ack) | ||
1525 | netdev_info(dev, | ||
1526 | "Received interrupt for uncompleted RX frame\n"); | ||
1527 | |||
1528 | |||
1529 | if (eoc) { | ||
1530 | TLAN_DBG(TLAN_DEBUG_RX, | ||
1531 | "RECEIVE: handling RX EOC (Head=%d Tail=%d)\n", | ||
1532 | priv->rx_head, priv->rx_tail); | ||
1533 | head_list = priv->rx_list + priv->rx_head; | ||
1534 | head_list_phys = priv->rx_list_dma | ||
1535 | + sizeof(struct tlan_list)*priv->rx_head; | ||
1536 | outl(head_list_phys, dev->base_addr + TLAN_CH_PARM); | ||
1537 | ack |= TLAN_HC_GO | TLAN_HC_RT; | ||
1538 | priv->rx_eoc_count++; | ||
1539 | } | ||
1540 | |||
1541 | if (priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED) { | ||
1542 | tlan_dio_write8(dev->base_addr, | ||
1543 | TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT); | ||
1544 | if (priv->timer.function == NULL) { | ||
1545 | priv->timer.function = tlan_timer; | ||
1546 | priv->timer.data = (unsigned long) dev; | ||
1547 | priv->timer.expires = jiffies + TLAN_TIMER_ACT_DELAY; | ||
1548 | priv->timer_set_at = jiffies; | ||
1549 | priv->timer_type = TLAN_TIMER_ACTIVITY; | ||
1550 | add_timer(&priv->timer); | ||
1551 | } else if (priv->timer_type == TLAN_TIMER_ACTIVITY) { | ||
1552 | priv->timer_set_at = jiffies; | ||
1553 | } | ||
1554 | } | ||
1555 | |||
1556 | return ack; | ||
1557 | |||
1558 | } | ||
1559 | |||
1560 | |||
1561 | |||
1562 | |||
1563 | /*************************************************************** | ||
1564 | * tlan_handle_dummy | ||
1565 | * | ||
1566 | * Returns: | ||
1567 | * 1 | ||
1568 | * Parms: | ||
1569 | * dev Device assigned the IRQ that was | ||
1570 | * raised. | ||
1571 | * host_int The contents of the HOST_INT | ||
1572 | * port. | ||
1573 | * | ||
1574 | * This function handles the Dummy interrupt, which is | ||
1575 | * raised whenever a test interrupt is generated by setting | ||
1576 | * the Req_Int bit of HOST_CMD to 1. | ||
1577 | * | ||
1578 | **************************************************************/ | ||
1579 | |||
1580 | static u32 tlan_handle_dummy(struct net_device *dev, u16 host_int) | ||
1581 | { | ||
1582 | netdev_info(dev, "Test interrupt\n"); | ||
1583 | return 1; | ||
1584 | |||
1585 | } | ||
1586 | |||
1587 | |||
1588 | |||
1589 | |||
1590 | /*************************************************************** | ||
1591 | * tlan_handle_tx_eoc | ||
1592 | * | ||
1593 | * Returns: | ||
1594 | * 1 | ||
1595 | * Parms: | ||
1596 | * dev Device assigned the IRQ that was | ||
1597 | * raised. | ||
1598 | * host_int The contents of the HOST_INT | ||
1599 | * port. | ||
1600 | * | ||
1601 | * This driver is structured to determine EOC occurrences by | ||
1602 | * reading the CSTAT member of the list structure. Tx EOC | ||
1603 | * interrupts are disabled via the DIO INTDIS register. | ||
1604 | * However, TLAN chips before revision 3.0 didn't have this | ||
1605 | * functionality, so process EOC events if this is the | ||
1606 | * case. | ||
1607 | * | ||
1608 | **************************************************************/ | ||
1609 | |||
1610 | static u32 tlan_handle_tx_eoc(struct net_device *dev, u16 host_int) | ||
1611 | { | ||
1612 | struct tlan_priv *priv = netdev_priv(dev); | ||
1613 | struct tlan_list *head_list; | ||
1614 | dma_addr_t head_list_phys; | ||
1615 | u32 ack = 1; | ||
1616 | |||
1617 | host_int = 0; | ||
1618 | if (priv->tlan_rev < 0x30) { | ||
1619 | TLAN_DBG(TLAN_DEBUG_TX, | ||
1620 | "TRANSMIT: handling TX EOC (Head=%d Tail=%d) -- IRQ\n", | ||
1621 | priv->tx_head, priv->tx_tail); | ||
1622 | head_list = priv->tx_list + priv->tx_head; | ||
1623 | head_list_phys = priv->tx_list_dma | ||
1624 | + sizeof(struct tlan_list)*priv->tx_head; | ||
1625 | if ((head_list->c_stat & TLAN_CSTAT_READY) | ||
1626 | == TLAN_CSTAT_READY) { | ||
1627 | netif_stop_queue(dev); | ||
1628 | outl(head_list_phys, dev->base_addr + TLAN_CH_PARM); | ||
1629 | ack |= TLAN_HC_GO; | ||
1630 | } else { | ||
1631 | priv->tx_in_progress = 0; | ||
1632 | } | ||
1633 | } | ||
1634 | |||
1635 | return ack; | ||
1636 | |||
1637 | } | ||
1638 | |||
1639 | |||
1640 | |||
1641 | |||
1642 | /*************************************************************** | ||
1643 | * tlan_handle_status_check | ||
1644 | * | ||
1645 | * Returns: | ||
1646 | * 0 if Adapter check, 1 if Network Status check. | ||
1647 | * Parms: | ||
1648 | * dev Device assigned the IRQ that was | ||
1649 | * raised. | ||
1650 | * host_int The contents of the HOST_INT | ||
1651 | * port. | ||
1652 | * | ||
1653 | * This function handles Adapter Check/Network Status | ||
1654 | * interrupts generated by the adapter. It checks the | ||
1655 | * vector in the HOST_INT register to determine if it is | ||
1656 | * an Adapter Check interrupt. If so, it resets the | ||
1657 | * adapter. Otherwise it clears the status registers | ||
1658 | * and services the PHY. | ||
1659 | * | ||
1660 | **************************************************************/ | ||
1661 | |||
1662 | static u32 tlan_handle_status_check(struct net_device *dev, u16 host_int) | ||
1663 | { | ||
1664 | struct tlan_priv *priv = netdev_priv(dev); | ||
1665 | u32 ack; | ||
1666 | u32 error; | ||
1667 | u8 net_sts; | ||
1668 | u32 phy; | ||
1669 | u16 tlphy_ctl; | ||
1670 | u16 tlphy_sts; | ||
1671 | |||
1672 | ack = 1; | ||
1673 | if (host_int & TLAN_HI_IV_MASK) { | ||
1674 | netif_stop_queue(dev); | ||
1675 | error = inl(dev->base_addr + TLAN_CH_PARM); | ||
1676 | netdev_info(dev, "Adaptor Error = 0x%x\n", error); | ||
1677 | tlan_read_and_clear_stats(dev, TLAN_RECORD); | ||
1678 | outl(TLAN_HC_AD_RST, dev->base_addr + TLAN_HOST_CMD); | ||
1679 | |||
1680 | schedule_work(&priv->tlan_tqueue); | ||
1681 | |||
1682 | netif_wake_queue(dev); | ||
1683 | ack = 0; | ||
1684 | } else { | ||
1685 | TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Status Check\n", dev->name); | ||
1686 | phy = priv->phy[priv->phy_num]; | ||
1687 | |||
1688 | net_sts = tlan_dio_read8(dev->base_addr, TLAN_NET_STS); | ||
1689 | if (net_sts) { | ||
1690 | tlan_dio_write8(dev->base_addr, TLAN_NET_STS, net_sts); | ||
1691 | TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Net_Sts = %x\n", | ||
1692 | dev->name, (unsigned) net_sts); | ||
1693 | } | ||
1694 | if ((net_sts & TLAN_NET_STS_MIRQ) && (priv->phy_num == 0)) { | ||
1695 | tlan_mii_read_reg(dev, phy, TLAN_TLPHY_STS, &tlphy_sts); | ||
1696 | tlan_mii_read_reg(dev, phy, TLAN_TLPHY_CTL, &tlphy_ctl); | ||
1697 | if (!(tlphy_sts & TLAN_TS_POLOK) && | ||
1698 | !(tlphy_ctl & TLAN_TC_SWAPOL)) { | ||
1699 | tlphy_ctl |= TLAN_TC_SWAPOL; | ||
1700 | tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL, | ||
1701 | tlphy_ctl); | ||
1702 | } else if ((tlphy_sts & TLAN_TS_POLOK) && | ||
1703 | (tlphy_ctl & TLAN_TC_SWAPOL)) { | ||
1704 | tlphy_ctl &= ~TLAN_TC_SWAPOL; | ||
1705 | tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL, | ||
1706 | tlphy_ctl); | ||
1707 | } | ||
1708 | |||
1709 | if (debug) | ||
1710 | tlan_phy_print(dev); | ||
1711 | } | ||
1712 | } | ||
1713 | |||
1714 | return ack; | ||
1715 | |||
1716 | } | ||
1717 | |||
1718 | |||
1719 | |||
1720 | |||
1721 | /*************************************************************** | ||
1722 | * tlan_handle_rx_eoc | ||
1723 | * | ||
1724 | * Returns: | ||
1725 | * 1 | ||
1726 | * Parms: | ||
1727 | * dev Device assigned the IRQ that was | ||
1728 | * raised. | ||
1729 | * host_int The contents of the HOST_INT | ||
1730 | * port. | ||
1731 | * | ||
1732 | * This driver is structured to determine EOC occurrences by | ||
1733 | * reading the CSTAT member of the list structure. Rx EOC | ||
1734 | * interrupts are disabled via the DIO INTDIS register. | ||
1735 | * However, TLAN chips before revision 3.0 didn't have this | ||
1736 | * CSTAT member or a INTDIS register, so if this chip is | ||
1737 | * pre-3.0, process EOC interrupts normally. | ||
1738 | * | ||
1739 | **************************************************************/ | ||
1740 | |||
1741 | static u32 tlan_handle_rx_eoc(struct net_device *dev, u16 host_int) | ||
1742 | { | ||
1743 | struct tlan_priv *priv = netdev_priv(dev); | ||
1744 | dma_addr_t head_list_phys; | ||
1745 | u32 ack = 1; | ||
1746 | |||
1747 | if (priv->tlan_rev < 0x30) { | ||
1748 | TLAN_DBG(TLAN_DEBUG_RX, | ||
1749 | "RECEIVE: Handling RX EOC (head=%d tail=%d) -- IRQ\n", | ||
1750 | priv->rx_head, priv->rx_tail); | ||
1751 | head_list_phys = priv->rx_list_dma | ||
1752 | + sizeof(struct tlan_list)*priv->rx_head; | ||
1753 | outl(head_list_phys, dev->base_addr + TLAN_CH_PARM); | ||
1754 | ack |= TLAN_HC_GO | TLAN_HC_RT; | ||
1755 | priv->rx_eoc_count++; | ||
1756 | } | ||
1757 | |||
1758 | return ack; | ||
1759 | |||
1760 | } | ||
1761 | |||
1762 | |||
1763 | |||
1764 | |||
1765 | /***************************************************************************** | ||
1766 | ****************************************************************************** | ||
1767 | |||
1768 | ThunderLAN driver timer function | ||
1769 | |||
1770 | ****************************************************************************** | ||
1771 | *****************************************************************************/ | ||
1772 | |||
1773 | |||
1774 | /*************************************************************** | ||
1775 | * tlan_timer | ||
1776 | * | ||
1777 | * Returns: | ||
1778 | * Nothing | ||
1779 | * Parms: | ||
1780 | * data A value given to add timer when | ||
1781 | * add_timer was called. | ||
1782 | * | ||
1783 | * This function handles timed functionality for the | ||
1784 | * TLAN driver. The two current timer uses are for | ||
1785 | * delaying for autonegotionation and driving the ACT LED. | ||
1786 | * - Autonegotiation requires being allowed about | ||
1787 | * 2 1/2 seconds before attempting to transmit a | ||
1788 | * packet. It would be a very bad thing to hang | ||
1789 | * the kernel this long, so the driver doesn't | ||
1790 | * allow transmission 'til after this time, for | ||
1791 | * certain PHYs. It would be much nicer if all | ||
1792 | * PHYs were interrupt-capable like the internal | ||
1793 | * PHY. | ||
1794 | * - The ACT LED, which shows adapter activity, is | ||
1795 | * driven by the driver, and so must be left on | ||
1796 | * for a short period to power up the LED so it | ||
1797 | * can be seen. This delay can be changed by | ||
1798 | * changing the TLAN_TIMER_ACT_DELAY in tlan.h, | ||
1799 | * if desired. 100 ms produces a slightly | ||
1800 | * sluggish response. | ||
1801 | * | ||
1802 | **************************************************************/ | ||
1803 | |||
1804 | static void tlan_timer(unsigned long data) | ||
1805 | { | ||
1806 | struct net_device *dev = (struct net_device *) data; | ||
1807 | struct tlan_priv *priv = netdev_priv(dev); | ||
1808 | u32 elapsed; | ||
1809 | unsigned long flags = 0; | ||
1810 | |||
1811 | priv->timer.function = NULL; | ||
1812 | |||
1813 | switch (priv->timer_type) { | ||
1814 | #ifdef MONITOR | ||
1815 | case TLAN_TIMER_LINK_BEAT: | ||
1816 | tlan_phy_monitor(dev); | ||
1817 | break; | ||
1818 | #endif | ||
1819 | case TLAN_TIMER_PHY_PDOWN: | ||
1820 | tlan_phy_power_down(dev); | ||
1821 | break; | ||
1822 | case TLAN_TIMER_PHY_PUP: | ||
1823 | tlan_phy_power_up(dev); | ||
1824 | break; | ||
1825 | case TLAN_TIMER_PHY_RESET: | ||
1826 | tlan_phy_reset(dev); | ||
1827 | break; | ||
1828 | case TLAN_TIMER_PHY_START_LINK: | ||
1829 | tlan_phy_start_link(dev); | ||
1830 | break; | ||
1831 | case TLAN_TIMER_PHY_FINISH_AN: | ||
1832 | tlan_phy_finish_auto_neg(dev); | ||
1833 | break; | ||
1834 | case TLAN_TIMER_FINISH_RESET: | ||
1835 | tlan_finish_reset(dev); | ||
1836 | break; | ||
1837 | case TLAN_TIMER_ACTIVITY: | ||
1838 | spin_lock_irqsave(&priv->lock, flags); | ||
1839 | if (priv->timer.function == NULL) { | ||
1840 | elapsed = jiffies - priv->timer_set_at; | ||
1841 | if (elapsed >= TLAN_TIMER_ACT_DELAY) { | ||
1842 | tlan_dio_write8(dev->base_addr, | ||
1843 | TLAN_LED_REG, TLAN_LED_LINK); | ||
1844 | } else { | ||
1845 | priv->timer.function = tlan_timer; | ||
1846 | priv->timer.expires = priv->timer_set_at | ||
1847 | + TLAN_TIMER_ACT_DELAY; | ||
1848 | spin_unlock_irqrestore(&priv->lock, flags); | ||
1849 | add_timer(&priv->timer); | ||
1850 | break; | ||
1851 | } | ||
1852 | } | ||
1853 | spin_unlock_irqrestore(&priv->lock, flags); | ||
1854 | break; | ||
1855 | default: | ||
1856 | break; | ||
1857 | } | ||
1858 | |||
1859 | } | ||
1860 | |||
1861 | |||
1862 | |||
1863 | |||
1864 | /***************************************************************************** | ||
1865 | ****************************************************************************** | ||
1866 | |||
1867 | ThunderLAN driver adapter related routines | ||
1868 | |||
1869 | ****************************************************************************** | ||
1870 | *****************************************************************************/ | ||
1871 | |||
1872 | |||
1873 | /*************************************************************** | ||
1874 | * tlan_reset_lists | ||
1875 | * | ||
1876 | * Returns: | ||
1877 | * Nothing | ||
1878 | * Parms: | ||
1879 | * dev The device structure with the list | ||
1880 | * stuctures to be reset. | ||
1881 | * | ||
1882 | * This routine sets the variables associated with managing | ||
1883 | * the TLAN lists to their initial values. | ||
1884 | * | ||
1885 | **************************************************************/ | ||
1886 | |||
1887 | static void tlan_reset_lists(struct net_device *dev) | ||
1888 | { | ||
1889 | struct tlan_priv *priv = netdev_priv(dev); | ||
1890 | int i; | ||
1891 | struct tlan_list *list; | ||
1892 | dma_addr_t list_phys; | ||
1893 | struct sk_buff *skb; | ||
1894 | |||
1895 | priv->tx_head = 0; | ||
1896 | priv->tx_tail = 0; | ||
1897 | for (i = 0; i < TLAN_NUM_TX_LISTS; i++) { | ||
1898 | list = priv->tx_list + i; | ||
1899 | list->c_stat = TLAN_CSTAT_UNUSED; | ||
1900 | list->buffer[0].address = 0; | ||
1901 | list->buffer[2].count = 0; | ||
1902 | list->buffer[2].address = 0; | ||
1903 | list->buffer[8].address = 0; | ||
1904 | list->buffer[9].address = 0; | ||
1905 | } | ||
1906 | |||
1907 | priv->rx_head = 0; | ||
1908 | priv->rx_tail = TLAN_NUM_RX_LISTS - 1; | ||
1909 | for (i = 0; i < TLAN_NUM_RX_LISTS; i++) { | ||
1910 | list = priv->rx_list + i; | ||
1911 | list_phys = priv->rx_list_dma + sizeof(struct tlan_list)*i; | ||
1912 | list->c_stat = TLAN_CSTAT_READY; | ||
1913 | list->frame_size = TLAN_MAX_FRAME_SIZE; | ||
1914 | list->buffer[0].count = TLAN_MAX_FRAME_SIZE | TLAN_LAST_BUFFER; | ||
1915 | skb = netdev_alloc_skb_ip_align(dev, TLAN_MAX_FRAME_SIZE + 5); | ||
1916 | if (!skb) { | ||
1917 | netdev_err(dev, "Out of memory for received data\n"); | ||
1918 | break; | ||
1919 | } | ||
1920 | |||
1921 | list->buffer[0].address = pci_map_single(priv->pci_dev, | ||
1922 | skb->data, | ||
1923 | TLAN_MAX_FRAME_SIZE, | ||
1924 | PCI_DMA_FROMDEVICE); | ||
1925 | tlan_store_skb(list, skb); | ||
1926 | list->buffer[1].count = 0; | ||
1927 | list->buffer[1].address = 0; | ||
1928 | list->forward = list_phys + sizeof(struct tlan_list); | ||
1929 | } | ||
1930 | |||
1931 | /* in case ran out of memory early, clear bits */ | ||
1932 | while (i < TLAN_NUM_RX_LISTS) { | ||
1933 | tlan_store_skb(priv->rx_list + i, NULL); | ||
1934 | ++i; | ||
1935 | } | ||
1936 | list->forward = 0; | ||
1937 | |||
1938 | } | ||
1939 | |||
1940 | |||
1941 | static void tlan_free_lists(struct net_device *dev) | ||
1942 | { | ||
1943 | struct tlan_priv *priv = netdev_priv(dev); | ||
1944 | int i; | ||
1945 | struct tlan_list *list; | ||
1946 | struct sk_buff *skb; | ||
1947 | |||
1948 | for (i = 0; i < TLAN_NUM_TX_LISTS; i++) { | ||
1949 | list = priv->tx_list + i; | ||
1950 | skb = tlan_get_skb(list); | ||
1951 | if (skb) { | ||
1952 | pci_unmap_single( | ||
1953 | priv->pci_dev, | ||
1954 | list->buffer[0].address, | ||
1955 | max(skb->len, | ||
1956 | (unsigned int)TLAN_MIN_FRAME_SIZE), | ||
1957 | PCI_DMA_TODEVICE); | ||
1958 | dev_kfree_skb_any(skb); | ||
1959 | list->buffer[8].address = 0; | ||
1960 | list->buffer[9].address = 0; | ||
1961 | } | ||
1962 | } | ||
1963 | |||
1964 | for (i = 0; i < TLAN_NUM_RX_LISTS; i++) { | ||
1965 | list = priv->rx_list + i; | ||
1966 | skb = tlan_get_skb(list); | ||
1967 | if (skb) { | ||
1968 | pci_unmap_single(priv->pci_dev, | ||
1969 | list->buffer[0].address, | ||
1970 | TLAN_MAX_FRAME_SIZE, | ||
1971 | PCI_DMA_FROMDEVICE); | ||
1972 | dev_kfree_skb_any(skb); | ||
1973 | list->buffer[8].address = 0; | ||
1974 | list->buffer[9].address = 0; | ||
1975 | } | ||
1976 | } | ||
1977 | } | ||
1978 | |||
1979 | |||
1980 | |||
1981 | |||
1982 | /*************************************************************** | ||
1983 | * tlan_print_dio | ||
1984 | * | ||
1985 | * Returns: | ||
1986 | * Nothing | ||
1987 | * Parms: | ||
1988 | * io_base Base IO port of the device of | ||
1989 | * which to print DIO registers. | ||
1990 | * | ||
1991 | * This function prints out all the internal (DIO) | ||
1992 | * registers of a TLAN chip. | ||
1993 | * | ||
1994 | **************************************************************/ | ||
1995 | |||
1996 | static void tlan_print_dio(u16 io_base) | ||
1997 | { | ||
1998 | u32 data0, data1; | ||
1999 | int i; | ||
2000 | |||
2001 | pr_info("Contents of internal registers for io base 0x%04hx\n", | ||
2002 | io_base); | ||
2003 | pr_info("Off. +0 +4\n"); | ||
2004 | for (i = 0; i < 0x4C; i += 8) { | ||
2005 | data0 = tlan_dio_read32(io_base, i); | ||
2006 | data1 = tlan_dio_read32(io_base, i + 0x4); | ||
2007 | pr_info("0x%02x 0x%08x 0x%08x\n", i, data0, data1); | ||
2008 | } | ||
2009 | |||
2010 | } | ||
2011 | |||
2012 | |||
2013 | |||
2014 | |||
2015 | /*************************************************************** | ||
2016 | * TLan_PrintList | ||
2017 | * | ||
2018 | * Returns: | ||
2019 | * Nothing | ||
2020 | * Parms: | ||
2021 | * list A pointer to the struct tlan_list structure to | ||
2022 | * be printed. | ||
2023 | * type A string to designate type of list, | ||
2024 | * "Rx" or "Tx". | ||
2025 | * num The index of the list. | ||
2026 | * | ||
2027 | * This function prints out the contents of the list | ||
2028 | * pointed to by the list parameter. | ||
2029 | * | ||
2030 | **************************************************************/ | ||
2031 | |||
2032 | static void tlan_print_list(struct tlan_list *list, char *type, int num) | ||
2033 | { | ||
2034 | int i; | ||
2035 | |||
2036 | pr_info("%s List %d at %p\n", type, num, list); | ||
2037 | pr_info(" Forward = 0x%08x\n", list->forward); | ||
2038 | pr_info(" CSTAT = 0x%04hx\n", list->c_stat); | ||
2039 | pr_info(" Frame Size = 0x%04hx\n", list->frame_size); | ||
2040 | /* for (i = 0; i < 10; i++) { */ | ||
2041 | for (i = 0; i < 2; i++) { | ||
2042 | pr_info(" Buffer[%d].count, addr = 0x%08x, 0x%08x\n", | ||
2043 | i, list->buffer[i].count, list->buffer[i].address); | ||
2044 | } | ||
2045 | |||
2046 | } | ||
2047 | |||
2048 | |||
2049 | |||
2050 | |||
2051 | /*************************************************************** | ||
2052 | * tlan_read_and_clear_stats | ||
2053 | * | ||
2054 | * Returns: | ||
2055 | * Nothing | ||
2056 | * Parms: | ||
2057 | * dev Pointer to device structure of adapter | ||
2058 | * to which to read stats. | ||
2059 | * record Flag indicating whether to add | ||
2060 | * | ||
2061 | * This functions reads all the internal status registers | ||
2062 | * of the TLAN chip, which clears them as a side effect. | ||
2063 | * It then either adds the values to the device's status | ||
2064 | * struct, or discards them, depending on whether record | ||
2065 | * is TLAN_RECORD (!=0) or TLAN_IGNORE (==0). | ||
2066 | * | ||
2067 | **************************************************************/ | ||
2068 | |||
2069 | static void tlan_read_and_clear_stats(struct net_device *dev, int record) | ||
2070 | { | ||
2071 | u32 tx_good, tx_under; | ||
2072 | u32 rx_good, rx_over; | ||
2073 | u32 def_tx, crc, code; | ||
2074 | u32 multi_col, single_col; | ||
2075 | u32 excess_col, late_col, loss; | ||
2076 | |||
2077 | outw(TLAN_GOOD_TX_FRMS, dev->base_addr + TLAN_DIO_ADR); | ||
2078 | tx_good = inb(dev->base_addr + TLAN_DIO_DATA); | ||
2079 | tx_good += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8; | ||
2080 | tx_good += inb(dev->base_addr + TLAN_DIO_DATA + 2) << 16; | ||
2081 | tx_under = inb(dev->base_addr + TLAN_DIO_DATA + 3); | ||
2082 | |||
2083 | outw(TLAN_GOOD_RX_FRMS, dev->base_addr + TLAN_DIO_ADR); | ||
2084 | rx_good = inb(dev->base_addr + TLAN_DIO_DATA); | ||
2085 | rx_good += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8; | ||
2086 | rx_good += inb(dev->base_addr + TLAN_DIO_DATA + 2) << 16; | ||
2087 | rx_over = inb(dev->base_addr + TLAN_DIO_DATA + 3); | ||
2088 | |||
2089 | outw(TLAN_DEFERRED_TX, dev->base_addr + TLAN_DIO_ADR); | ||
2090 | def_tx = inb(dev->base_addr + TLAN_DIO_DATA); | ||
2091 | def_tx += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8; | ||
2092 | crc = inb(dev->base_addr + TLAN_DIO_DATA + 2); | ||
2093 | code = inb(dev->base_addr + TLAN_DIO_DATA + 3); | ||
2094 | |||
2095 | outw(TLAN_MULTICOL_FRMS, dev->base_addr + TLAN_DIO_ADR); | ||
2096 | multi_col = inb(dev->base_addr + TLAN_DIO_DATA); | ||
2097 | multi_col += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8; | ||
2098 | single_col = inb(dev->base_addr + TLAN_DIO_DATA + 2); | ||
2099 | single_col += inb(dev->base_addr + TLAN_DIO_DATA + 3) << 8; | ||
2100 | |||
2101 | outw(TLAN_EXCESSCOL_FRMS, dev->base_addr + TLAN_DIO_ADR); | ||
2102 | excess_col = inb(dev->base_addr + TLAN_DIO_DATA); | ||
2103 | late_col = inb(dev->base_addr + TLAN_DIO_DATA + 1); | ||
2104 | loss = inb(dev->base_addr + TLAN_DIO_DATA + 2); | ||
2105 | |||
2106 | if (record) { | ||
2107 | dev->stats.rx_packets += rx_good; | ||
2108 | dev->stats.rx_errors += rx_over + crc + code; | ||
2109 | dev->stats.tx_packets += tx_good; | ||
2110 | dev->stats.tx_errors += tx_under + loss; | ||
2111 | dev->stats.collisions += multi_col | ||
2112 | + single_col + excess_col + late_col; | ||
2113 | |||
2114 | dev->stats.rx_over_errors += rx_over; | ||
2115 | dev->stats.rx_crc_errors += crc; | ||
2116 | dev->stats.rx_frame_errors += code; | ||
2117 | |||
2118 | dev->stats.tx_aborted_errors += tx_under; | ||
2119 | dev->stats.tx_carrier_errors += loss; | ||
2120 | } | ||
2121 | |||
2122 | } | ||
2123 | |||
2124 | |||
2125 | |||
2126 | |||
2127 | /*************************************************************** | ||
2128 | * TLan_Reset | ||
2129 | * | ||
2130 | * Returns: | ||
2131 | * 0 | ||
2132 | * Parms: | ||
2133 | * dev Pointer to device structure of adapter | ||
2134 | * to be reset. | ||
2135 | * | ||
2136 | * This function resets the adapter and it's physical | ||
2137 | * device. See Chap. 3, pp. 9-10 of the "ThunderLAN | ||
2138 | * Programmer's Guide" for details. The routine tries to | ||
2139 | * implement what is detailed there, though adjustments | ||
2140 | * have been made. | ||
2141 | * | ||
2142 | **************************************************************/ | ||
2143 | |||
2144 | static void | ||
2145 | tlan_reset_adapter(struct net_device *dev) | ||
2146 | { | ||
2147 | struct tlan_priv *priv = netdev_priv(dev); | ||
2148 | int i; | ||
2149 | u32 addr; | ||
2150 | u32 data; | ||
2151 | u8 data8; | ||
2152 | |||
2153 | priv->tlan_full_duplex = false; | ||
2154 | priv->phy_online = 0; | ||
2155 | netif_carrier_off(dev); | ||
2156 | |||
2157 | /* 1. Assert reset bit. */ | ||
2158 | |||
2159 | data = inl(dev->base_addr + TLAN_HOST_CMD); | ||
2160 | data |= TLAN_HC_AD_RST; | ||
2161 | outl(data, dev->base_addr + TLAN_HOST_CMD); | ||
2162 | |||
2163 | udelay(1000); | ||
2164 | |||
2165 | /* 2. Turn off interrupts. (Probably isn't necessary) */ | ||
2166 | |||
2167 | data = inl(dev->base_addr + TLAN_HOST_CMD); | ||
2168 | data |= TLAN_HC_INT_OFF; | ||
2169 | outl(data, dev->base_addr + TLAN_HOST_CMD); | ||
2170 | |||
2171 | /* 3. Clear AREGs and HASHs. */ | ||
2172 | |||
2173 | for (i = TLAN_AREG_0; i <= TLAN_HASH_2; i += 4) | ||
2174 | tlan_dio_write32(dev->base_addr, (u16) i, 0); | ||
2175 | |||
2176 | /* 4. Setup NetConfig register. */ | ||
2177 | |||
2178 | data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN | TLAN_NET_CFG_PHY_EN; | ||
2179 | tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, (u16) data); | ||
2180 | |||
2181 | /* 5. Load Ld_Tmr and Ld_Thr in HOST_CMD. */ | ||
2182 | |||
2183 | outl(TLAN_HC_LD_TMR | 0x3f, dev->base_addr + TLAN_HOST_CMD); | ||
2184 | outl(TLAN_HC_LD_THR | 0x9, dev->base_addr + TLAN_HOST_CMD); | ||
2185 | |||
2186 | /* 6. Unreset the MII by setting NMRST (in NetSio) to 1. */ | ||
2187 | |||
2188 | outw(TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR); | ||
2189 | addr = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO; | ||
2190 | tlan_set_bit(TLAN_NET_SIO_NMRST, addr); | ||
2191 | |||
2192 | /* 7. Setup the remaining registers. */ | ||
2193 | |||
2194 | if (priv->tlan_rev >= 0x30) { | ||
2195 | data8 = TLAN_ID_TX_EOC | TLAN_ID_RX_EOC; | ||
2196 | tlan_dio_write8(dev->base_addr, TLAN_INT_DIS, data8); | ||
2197 | } | ||
2198 | tlan_phy_detect(dev); | ||
2199 | data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN; | ||
2200 | |||
2201 | if (priv->adapter->flags & TLAN_ADAPTER_BIT_RATE_PHY) { | ||
2202 | data |= TLAN_NET_CFG_BIT; | ||
2203 | if (priv->aui == 1) { | ||
2204 | tlan_dio_write8(dev->base_addr, TLAN_ACOMMIT, 0x0a); | ||
2205 | } else if (priv->duplex == TLAN_DUPLEX_FULL) { | ||
2206 | tlan_dio_write8(dev->base_addr, TLAN_ACOMMIT, 0x00); | ||
2207 | priv->tlan_full_duplex = true; | ||
2208 | } else { | ||
2209 | tlan_dio_write8(dev->base_addr, TLAN_ACOMMIT, 0x08); | ||
2210 | } | ||
2211 | } | ||
2212 | |||
2213 | if (priv->phy_num == 0) | ||
2214 | data |= TLAN_NET_CFG_PHY_EN; | ||
2215 | tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, (u16) data); | ||
2216 | |||
2217 | if (priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY) | ||
2218 | tlan_finish_reset(dev); | ||
2219 | else | ||
2220 | tlan_phy_power_down(dev); | ||
2221 | |||
2222 | } | ||
2223 | |||
2224 | |||
2225 | |||
2226 | |||
2227 | static void | ||
2228 | tlan_finish_reset(struct net_device *dev) | ||
2229 | { | ||
2230 | struct tlan_priv *priv = netdev_priv(dev); | ||
2231 | u8 data; | ||
2232 | u32 phy; | ||
2233 | u8 sio; | ||
2234 | u16 status; | ||
2235 | u16 partner; | ||
2236 | u16 tlphy_ctl; | ||
2237 | u16 tlphy_par; | ||
2238 | u16 tlphy_id1, tlphy_id2; | ||
2239 | int i; | ||
2240 | |||
2241 | phy = priv->phy[priv->phy_num]; | ||
2242 | |||
2243 | data = TLAN_NET_CMD_NRESET | TLAN_NET_CMD_NWRAP; | ||
2244 | if (priv->tlan_full_duplex) | ||
2245 | data |= TLAN_NET_CMD_DUPLEX; | ||
2246 | tlan_dio_write8(dev->base_addr, TLAN_NET_CMD, data); | ||
2247 | data = TLAN_NET_MASK_MASK4 | TLAN_NET_MASK_MASK5; | ||
2248 | if (priv->phy_num == 0) | ||
2249 | data |= TLAN_NET_MASK_MASK7; | ||
2250 | tlan_dio_write8(dev->base_addr, TLAN_NET_MASK, data); | ||
2251 | tlan_dio_write16(dev->base_addr, TLAN_MAX_RX, ((1536)+7)&~7); | ||
2252 | tlan_mii_read_reg(dev, phy, MII_GEN_ID_HI, &tlphy_id1); | ||
2253 | tlan_mii_read_reg(dev, phy, MII_GEN_ID_LO, &tlphy_id2); | ||
2254 | |||
2255 | if ((priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY) || | ||
2256 | (priv->aui)) { | ||
2257 | status = MII_GS_LINK; | ||
2258 | netdev_info(dev, "Link forced\n"); | ||
2259 | } else { | ||
2260 | tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status); | ||
2261 | udelay(1000); | ||
2262 | tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status); | ||
2263 | if ((status & MII_GS_LINK) && | ||
2264 | /* We only support link info on Nat.Sem. PHY's */ | ||
2265 | (tlphy_id1 == NAT_SEM_ID1) && | ||
2266 | (tlphy_id2 == NAT_SEM_ID2)) { | ||
2267 | tlan_mii_read_reg(dev, phy, MII_AN_LPA, &partner); | ||
2268 | tlan_mii_read_reg(dev, phy, TLAN_TLPHY_PAR, &tlphy_par); | ||
2269 | |||
2270 | netdev_info(dev, | ||
2271 | "Link active with %s %uMbps %s-Duplex\n", | ||
2272 | !(tlphy_par & TLAN_PHY_AN_EN_STAT) | ||
2273 | ? "forced" : "Autonegotiation enabled,", | ||
2274 | tlphy_par & TLAN_PHY_SPEED_100 | ||
2275 | ? 100 : 10, | ||
2276 | tlphy_par & TLAN_PHY_DUPLEX_FULL | ||
2277 | ? "Full" : "Half"); | ||
2278 | |||
2279 | if (tlphy_par & TLAN_PHY_AN_EN_STAT) { | ||
2280 | netdev_info(dev, "Partner capability:"); | ||
2281 | for (i = 5; i < 10; i++) | ||
2282 | if (partner & (1 << i)) | ||
2283 | pr_cont(" %s", media[i-5]); | ||
2284 | pr_cont("\n"); | ||
2285 | } | ||
2286 | |||
2287 | tlan_dio_write8(dev->base_addr, TLAN_LED_REG, | ||
2288 | TLAN_LED_LINK); | ||
2289 | #ifdef MONITOR | ||
2290 | /* We have link beat..for now anyway */ | ||
2291 | priv->link = 1; | ||
2292 | /*Enabling link beat monitoring */ | ||
2293 | tlan_set_timer(dev, (10*HZ), TLAN_TIMER_LINK_BEAT); | ||
2294 | #endif | ||
2295 | } else if (status & MII_GS_LINK) { | ||
2296 | netdev_info(dev, "Link active\n"); | ||
2297 | tlan_dio_write8(dev->base_addr, TLAN_LED_REG, | ||
2298 | TLAN_LED_LINK); | ||
2299 | } | ||
2300 | } | ||
2301 | |||
2302 | if (priv->phy_num == 0) { | ||
2303 | tlan_mii_read_reg(dev, phy, TLAN_TLPHY_CTL, &tlphy_ctl); | ||
2304 | tlphy_ctl |= TLAN_TC_INTEN; | ||
2305 | tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL, tlphy_ctl); | ||
2306 | sio = tlan_dio_read8(dev->base_addr, TLAN_NET_SIO); | ||
2307 | sio |= TLAN_NET_SIO_MINTEN; | ||
2308 | tlan_dio_write8(dev->base_addr, TLAN_NET_SIO, sio); | ||
2309 | } | ||
2310 | |||
2311 | if (status & MII_GS_LINK) { | ||
2312 | tlan_set_mac(dev, 0, dev->dev_addr); | ||
2313 | priv->phy_online = 1; | ||
2314 | outb((TLAN_HC_INT_ON >> 8), dev->base_addr + TLAN_HOST_CMD + 1); | ||
2315 | if (debug >= 1 && debug != TLAN_DEBUG_PROBE) | ||
2316 | outb((TLAN_HC_REQ_INT >> 8), | ||
2317 | dev->base_addr + TLAN_HOST_CMD + 1); | ||
2318 | outl(priv->rx_list_dma, dev->base_addr + TLAN_CH_PARM); | ||
2319 | outl(TLAN_HC_GO | TLAN_HC_RT, dev->base_addr + TLAN_HOST_CMD); | ||
2320 | netif_carrier_on(dev); | ||
2321 | } else { | ||
2322 | netdev_info(dev, "Link inactive, will retry in 10 secs...\n"); | ||
2323 | tlan_set_timer(dev, (10*HZ), TLAN_TIMER_FINISH_RESET); | ||
2324 | return; | ||
2325 | } | ||
2326 | tlan_set_multicast_list(dev); | ||
2327 | |||
2328 | } | ||
2329 | |||
2330 | |||
2331 | |||
2332 | |||
2333 | /*************************************************************** | ||
2334 | * tlan_set_mac | ||
2335 | * | ||
2336 | * Returns: | ||
2337 | * Nothing | ||
2338 | * Parms: | ||
2339 | * dev Pointer to device structure of adapter | ||
2340 | * on which to change the AREG. | ||
2341 | * areg The AREG to set the address in (0 - 3). | ||
2342 | * mac A pointer to an array of chars. Each | ||
2343 | * element stores one byte of the address. | ||
2344 | * IE, it isn't in ascii. | ||
2345 | * | ||
2346 | * This function transfers a MAC address to one of the | ||
2347 | * TLAN AREGs (address registers). The TLAN chip locks | ||
2348 | * the register on writing to offset 0 and unlocks the | ||
2349 | * register after writing to offset 5. If NULL is passed | ||
2350 | * in mac, then the AREG is filled with 0's. | ||
2351 | * | ||
2352 | **************************************************************/ | ||
2353 | |||
2354 | static void tlan_set_mac(struct net_device *dev, int areg, char *mac) | ||
2355 | { | ||
2356 | int i; | ||
2357 | |||
2358 | areg *= 6; | ||
2359 | |||
2360 | if (mac != NULL) { | ||
2361 | for (i = 0; i < 6; i++) | ||
2362 | tlan_dio_write8(dev->base_addr, | ||
2363 | TLAN_AREG_0 + areg + i, mac[i]); | ||
2364 | } else { | ||
2365 | for (i = 0; i < 6; i++) | ||
2366 | tlan_dio_write8(dev->base_addr, | ||
2367 | TLAN_AREG_0 + areg + i, 0); | ||
2368 | } | ||
2369 | |||
2370 | } | ||
2371 | |||
2372 | |||
2373 | |||
2374 | |||
2375 | /***************************************************************************** | ||
2376 | ****************************************************************************** | ||
2377 | |||
2378 | ThunderLAN driver PHY layer routines | ||
2379 | |||
2380 | ****************************************************************************** | ||
2381 | *****************************************************************************/ | ||
2382 | |||
2383 | |||
2384 | |||
2385 | /********************************************************************* | ||
2386 | * tlan_phy_print | ||
2387 | * | ||
2388 | * Returns: | ||
2389 | * Nothing | ||
2390 | * Parms: | ||
2391 | * dev A pointer to the device structure of the | ||
2392 | * TLAN device having the PHYs to be detailed. | ||
2393 | * | ||
2394 | * This function prints the registers a PHY (aka transceiver). | ||
2395 | * | ||
2396 | ********************************************************************/ | ||
2397 | |||
2398 | static void tlan_phy_print(struct net_device *dev) | ||
2399 | { | ||
2400 | struct tlan_priv *priv = netdev_priv(dev); | ||
2401 | u16 i, data0, data1, data2, data3, phy; | ||
2402 | |||
2403 | phy = priv->phy[priv->phy_num]; | ||
2404 | |||
2405 | if (priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY) { | ||
2406 | netdev_info(dev, "Unmanaged PHY\n"); | ||
2407 | } else if (phy <= TLAN_PHY_MAX_ADDR) { | ||
2408 | netdev_info(dev, "PHY 0x%02x\n", phy); | ||
2409 | pr_info(" Off. +0 +1 +2 +3\n"); | ||
2410 | for (i = 0; i < 0x20; i += 4) { | ||
2411 | tlan_mii_read_reg(dev, phy, i, &data0); | ||
2412 | tlan_mii_read_reg(dev, phy, i + 1, &data1); | ||
2413 | tlan_mii_read_reg(dev, phy, i + 2, &data2); | ||
2414 | tlan_mii_read_reg(dev, phy, i + 3, &data3); | ||
2415 | pr_info(" 0x%02x 0x%04hx 0x%04hx 0x%04hx 0x%04hx\n", | ||
2416 | i, data0, data1, data2, data3); | ||
2417 | } | ||
2418 | } else { | ||
2419 | netdev_info(dev, "Invalid PHY\n"); | ||
2420 | } | ||
2421 | |||
2422 | } | ||
2423 | |||
2424 | |||
2425 | |||
2426 | |||
2427 | /********************************************************************* | ||
2428 | * tlan_phy_detect | ||
2429 | * | ||
2430 | * Returns: | ||
2431 | * Nothing | ||
2432 | * Parms: | ||
2433 | * dev A pointer to the device structure of the adapter | ||
2434 | * for which the PHY needs determined. | ||
2435 | * | ||
2436 | * So far I've found that adapters which have external PHYs | ||
2437 | * may also use the internal PHY for part of the functionality. | ||
2438 | * (eg, AUI/Thinnet). This function finds out if this TLAN | ||
2439 | * chip has an internal PHY, and then finds the first external | ||
2440 | * PHY (starting from address 0) if it exists). | ||
2441 | * | ||
2442 | ********************************************************************/ | ||
2443 | |||
2444 | static void tlan_phy_detect(struct net_device *dev) | ||
2445 | { | ||
2446 | struct tlan_priv *priv = netdev_priv(dev); | ||
2447 | u16 control; | ||
2448 | u16 hi; | ||
2449 | u16 lo; | ||
2450 | u32 phy; | ||
2451 | |||
2452 | if (priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY) { | ||
2453 | priv->phy_num = 0xffff; | ||
2454 | return; | ||
2455 | } | ||
2456 | |||
2457 | tlan_mii_read_reg(dev, TLAN_PHY_MAX_ADDR, MII_GEN_ID_HI, &hi); | ||
2458 | |||
2459 | if (hi != 0xffff) | ||
2460 | priv->phy[0] = TLAN_PHY_MAX_ADDR; | ||
2461 | else | ||
2462 | priv->phy[0] = TLAN_PHY_NONE; | ||
2463 | |||
2464 | priv->phy[1] = TLAN_PHY_NONE; | ||
2465 | for (phy = 0; phy <= TLAN_PHY_MAX_ADDR; phy++) { | ||
2466 | tlan_mii_read_reg(dev, phy, MII_GEN_CTL, &control); | ||
2467 | tlan_mii_read_reg(dev, phy, MII_GEN_ID_HI, &hi); | ||
2468 | tlan_mii_read_reg(dev, phy, MII_GEN_ID_LO, &lo); | ||
2469 | if ((control != 0xffff) || | ||
2470 | (hi != 0xffff) || (lo != 0xffff)) { | ||
2471 | TLAN_DBG(TLAN_DEBUG_GNRL, | ||
2472 | "PHY found at %02x %04x %04x %04x\n", | ||
2473 | phy, control, hi, lo); | ||
2474 | if ((priv->phy[1] == TLAN_PHY_NONE) && | ||
2475 | (phy != TLAN_PHY_MAX_ADDR)) { | ||
2476 | priv->phy[1] = phy; | ||
2477 | } | ||
2478 | } | ||
2479 | } | ||
2480 | |||
2481 | if (priv->phy[1] != TLAN_PHY_NONE) | ||
2482 | priv->phy_num = 1; | ||
2483 | else if (priv->phy[0] != TLAN_PHY_NONE) | ||
2484 | priv->phy_num = 0; | ||
2485 | else | ||
2486 | netdev_info(dev, "Cannot initialize device, no PHY was found!\n"); | ||
2487 | |||
2488 | } | ||
2489 | |||
2490 | |||
2491 | |||
2492 | |||
2493 | static void tlan_phy_power_down(struct net_device *dev) | ||
2494 | { | ||
2495 | struct tlan_priv *priv = netdev_priv(dev); | ||
2496 | u16 value; | ||
2497 | |||
2498 | TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Powering down PHY(s).\n", dev->name); | ||
2499 | value = MII_GC_PDOWN | MII_GC_LOOPBK | MII_GC_ISOLATE; | ||
2500 | tlan_mii_sync(dev->base_addr); | ||
2501 | tlan_mii_write_reg(dev, priv->phy[priv->phy_num], MII_GEN_CTL, value); | ||
2502 | if ((priv->phy_num == 0) && | ||
2503 | (priv->phy[1] != TLAN_PHY_NONE) && | ||
2504 | (!(priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10))) { | ||
2505 | tlan_mii_sync(dev->base_addr); | ||
2506 | tlan_mii_write_reg(dev, priv->phy[1], MII_GEN_CTL, value); | ||
2507 | } | ||
2508 | |||
2509 | /* Wait for 50 ms and powerup | ||
2510 | * This is abitrary. It is intended to make sure the | ||
2511 | * transceiver settles. | ||
2512 | */ | ||
2513 | tlan_set_timer(dev, (HZ/20), TLAN_TIMER_PHY_PUP); | ||
2514 | |||
2515 | } | ||
2516 | |||
2517 | |||
2518 | |||
2519 | |||
2520 | static void tlan_phy_power_up(struct net_device *dev) | ||
2521 | { | ||
2522 | struct tlan_priv *priv = netdev_priv(dev); | ||
2523 | u16 value; | ||
2524 | |||
2525 | TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Powering up PHY.\n", dev->name); | ||
2526 | tlan_mii_sync(dev->base_addr); | ||
2527 | value = MII_GC_LOOPBK; | ||
2528 | tlan_mii_write_reg(dev, priv->phy[priv->phy_num], MII_GEN_CTL, value); | ||
2529 | tlan_mii_sync(dev->base_addr); | ||
2530 | /* Wait for 500 ms and reset the | ||
2531 | * transceiver. The TLAN docs say both 50 ms and | ||
2532 | * 500 ms, so do the longer, just in case. | ||
2533 | */ | ||
2534 | tlan_set_timer(dev, (HZ/20), TLAN_TIMER_PHY_RESET); | ||
2535 | |||
2536 | } | ||
2537 | |||
2538 | |||
2539 | |||
2540 | |||
2541 | static void tlan_phy_reset(struct net_device *dev) | ||
2542 | { | ||
2543 | struct tlan_priv *priv = netdev_priv(dev); | ||
2544 | u16 phy; | ||
2545 | u16 value; | ||
2546 | |||
2547 | phy = priv->phy[priv->phy_num]; | ||
2548 | |||
2549 | TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Reseting PHY.\n", dev->name); | ||
2550 | tlan_mii_sync(dev->base_addr); | ||
2551 | value = MII_GC_LOOPBK | MII_GC_RESET; | ||
2552 | tlan_mii_write_reg(dev, phy, MII_GEN_CTL, value); | ||
2553 | tlan_mii_read_reg(dev, phy, MII_GEN_CTL, &value); | ||
2554 | while (value & MII_GC_RESET) | ||
2555 | tlan_mii_read_reg(dev, phy, MII_GEN_CTL, &value); | ||
2556 | |||
2557 | /* Wait for 500 ms and initialize. | ||
2558 | * I don't remember why I wait this long. | ||
2559 | * I've changed this to 50ms, as it seems long enough. | ||
2560 | */ | ||
2561 | tlan_set_timer(dev, (HZ/20), TLAN_TIMER_PHY_START_LINK); | ||
2562 | |||
2563 | } | ||
2564 | |||
2565 | |||
2566 | |||
2567 | |||
2568 | static void tlan_phy_start_link(struct net_device *dev) | ||
2569 | { | ||
2570 | struct tlan_priv *priv = netdev_priv(dev); | ||
2571 | u16 ability; | ||
2572 | u16 control; | ||
2573 | u16 data; | ||
2574 | u16 phy; | ||
2575 | u16 status; | ||
2576 | u16 tctl; | ||
2577 | |||
2578 | phy = priv->phy[priv->phy_num]; | ||
2579 | TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Trying to activate link.\n", dev->name); | ||
2580 | tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status); | ||
2581 | tlan_mii_read_reg(dev, phy, MII_GEN_STS, &ability); | ||
2582 | |||
2583 | if ((status & MII_GS_AUTONEG) && | ||
2584 | (!priv->aui)) { | ||
2585 | ability = status >> 11; | ||
2586 | if (priv->speed == TLAN_SPEED_10 && | ||
2587 | priv->duplex == TLAN_DUPLEX_HALF) { | ||
2588 | tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x0000); | ||
2589 | } else if (priv->speed == TLAN_SPEED_10 && | ||
2590 | priv->duplex == TLAN_DUPLEX_FULL) { | ||
2591 | priv->tlan_full_duplex = true; | ||
2592 | tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x0100); | ||
2593 | } else if (priv->speed == TLAN_SPEED_100 && | ||
2594 | priv->duplex == TLAN_DUPLEX_HALF) { | ||
2595 | tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x2000); | ||
2596 | } else if (priv->speed == TLAN_SPEED_100 && | ||
2597 | priv->duplex == TLAN_DUPLEX_FULL) { | ||
2598 | priv->tlan_full_duplex = true; | ||
2599 | tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x2100); | ||
2600 | } else { | ||
2601 | |||
2602 | /* Set Auto-Neg advertisement */ | ||
2603 | tlan_mii_write_reg(dev, phy, MII_AN_ADV, | ||
2604 | (ability << 5) | 1); | ||
2605 | /* Enablee Auto-Neg */ | ||
2606 | tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x1000); | ||
2607 | /* Restart Auto-Neg */ | ||
2608 | tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x1200); | ||
2609 | /* Wait for 4 sec for autonegotiation | ||
2610 | * to complete. The max spec time is less than this | ||
2611 | * but the card need additional time to start AN. | ||
2612 | * .5 sec should be plenty extra. | ||
2613 | */ | ||
2614 | netdev_info(dev, "Starting autonegotiation\n"); | ||
2615 | tlan_set_timer(dev, (2*HZ), TLAN_TIMER_PHY_FINISH_AN); | ||
2616 | return; | ||
2617 | } | ||
2618 | |||
2619 | } | ||
2620 | |||
2621 | if ((priv->aui) && (priv->phy_num != 0)) { | ||
2622 | priv->phy_num = 0; | ||
2623 | data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN | ||
2624 | | TLAN_NET_CFG_PHY_EN; | ||
2625 | tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, data); | ||
2626 | tlan_set_timer(dev, (40*HZ/1000), TLAN_TIMER_PHY_PDOWN); | ||
2627 | return; | ||
2628 | } else if (priv->phy_num == 0) { | ||
2629 | control = 0; | ||
2630 | tlan_mii_read_reg(dev, phy, TLAN_TLPHY_CTL, &tctl); | ||
2631 | if (priv->aui) { | ||
2632 | tctl |= TLAN_TC_AUISEL; | ||
2633 | } else { | ||
2634 | tctl &= ~TLAN_TC_AUISEL; | ||
2635 | if (priv->duplex == TLAN_DUPLEX_FULL) { | ||
2636 | control |= MII_GC_DUPLEX; | ||
2637 | priv->tlan_full_duplex = true; | ||
2638 | } | ||
2639 | if (priv->speed == TLAN_SPEED_100) | ||
2640 | control |= MII_GC_SPEEDSEL; | ||
2641 | } | ||
2642 | tlan_mii_write_reg(dev, phy, MII_GEN_CTL, control); | ||
2643 | tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL, tctl); | ||
2644 | } | ||
2645 | |||
2646 | /* Wait for 2 sec to give the transceiver time | ||
2647 | * to establish link. | ||
2648 | */ | ||
2649 | tlan_set_timer(dev, (4*HZ), TLAN_TIMER_FINISH_RESET); | ||
2650 | |||
2651 | } | ||
2652 | |||
2653 | |||
2654 | |||
2655 | |||
2656 | static void tlan_phy_finish_auto_neg(struct net_device *dev) | ||
2657 | { | ||
2658 | struct tlan_priv *priv = netdev_priv(dev); | ||
2659 | u16 an_adv; | ||
2660 | u16 an_lpa; | ||
2661 | u16 data; | ||
2662 | u16 mode; | ||
2663 | u16 phy; | ||
2664 | u16 status; | ||
2665 | |||
2666 | phy = priv->phy[priv->phy_num]; | ||
2667 | |||
2668 | tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status); | ||
2669 | udelay(1000); | ||
2670 | tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status); | ||
2671 | |||
2672 | if (!(status & MII_GS_AUTOCMPLT)) { | ||
2673 | /* Wait for 8 sec to give the process | ||
2674 | * more time. Perhaps we should fail after a while. | ||
2675 | */ | ||
2676 | if (!priv->neg_be_verbose++) { | ||
2677 | pr_info("Giving autonegotiation more time.\n"); | ||
2678 | pr_info("Please check that your adapter has\n"); | ||
2679 | pr_info("been properly connected to a HUB or Switch.\n"); | ||
2680 | pr_info("Trying to establish link in the background...\n"); | ||
2681 | } | ||
2682 | tlan_set_timer(dev, (8*HZ), TLAN_TIMER_PHY_FINISH_AN); | ||
2683 | return; | ||
2684 | } | ||
2685 | |||
2686 | netdev_info(dev, "Autonegotiation complete\n"); | ||
2687 | tlan_mii_read_reg(dev, phy, MII_AN_ADV, &an_adv); | ||
2688 | tlan_mii_read_reg(dev, phy, MII_AN_LPA, &an_lpa); | ||
2689 | mode = an_adv & an_lpa & 0x03E0; | ||
2690 | if (mode & 0x0100) | ||
2691 | priv->tlan_full_duplex = true; | ||
2692 | else if (!(mode & 0x0080) && (mode & 0x0040)) | ||
2693 | priv->tlan_full_duplex = true; | ||
2694 | |||
2695 | if ((!(mode & 0x0180)) && | ||
2696 | (priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10) && | ||
2697 | (priv->phy_num != 0)) { | ||
2698 | priv->phy_num = 0; | ||
2699 | data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN | ||
2700 | | TLAN_NET_CFG_PHY_EN; | ||
2701 | tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, data); | ||
2702 | tlan_set_timer(dev, (400*HZ/1000), TLAN_TIMER_PHY_PDOWN); | ||
2703 | return; | ||
2704 | } | ||
2705 | |||
2706 | if (priv->phy_num == 0) { | ||
2707 | if ((priv->duplex == TLAN_DUPLEX_FULL) || | ||
2708 | (an_adv & an_lpa & 0x0040)) { | ||
2709 | tlan_mii_write_reg(dev, phy, MII_GEN_CTL, | ||
2710 | MII_GC_AUTOENB | MII_GC_DUPLEX); | ||
2711 | netdev_info(dev, "Starting internal PHY with FULL-DUPLEX\n"); | ||
2712 | } else { | ||
2713 | tlan_mii_write_reg(dev, phy, MII_GEN_CTL, | ||
2714 | MII_GC_AUTOENB); | ||
2715 | netdev_info(dev, "Starting internal PHY with HALF-DUPLEX\n"); | ||
2716 | } | ||
2717 | } | ||
2718 | |||
2719 | /* Wait for 100 ms. No reason in partiticular. | ||
2720 | */ | ||
2721 | tlan_set_timer(dev, (HZ/10), TLAN_TIMER_FINISH_RESET); | ||
2722 | |||
2723 | } | ||
2724 | |||
2725 | #ifdef MONITOR | ||
2726 | |||
2727 | /********************************************************************* | ||
2728 | * | ||
2729 | * tlan_phy_monitor | ||
2730 | * | ||
2731 | * Returns: | ||
2732 | * None | ||
2733 | * | ||
2734 | * Params: | ||
2735 | * dev The device structure of this device. | ||
2736 | * | ||
2737 | * | ||
2738 | * This function monitors PHY condition by reading the status | ||
2739 | * register via the MII bus. This can be used to give info | ||
2740 | * about link changes (up/down), and possible switch to alternate | ||
2741 | * media. | ||
2742 | * | ||
2743 | *******************************************************************/ | ||
2744 | |||
2745 | void tlan_phy_monitor(struct net_device *dev) | ||
2746 | { | ||
2747 | struct tlan_priv *priv = netdev_priv(dev); | ||
2748 | u16 phy; | ||
2749 | u16 phy_status; | ||
2750 | |||
2751 | phy = priv->phy[priv->phy_num]; | ||
2752 | |||
2753 | /* Get PHY status register */ | ||
2754 | tlan_mii_read_reg(dev, phy, MII_GEN_STS, &phy_status); | ||
2755 | |||
2756 | /* Check if link has been lost */ | ||
2757 | if (!(phy_status & MII_GS_LINK)) { | ||
2758 | if (priv->link) { | ||
2759 | priv->link = 0; | ||
2760 | printk(KERN_DEBUG "TLAN: %s has lost link\n", | ||
2761 | dev->name); | ||
2762 | netif_carrier_off(dev); | ||
2763 | tlan_set_timer(dev, (2*HZ), TLAN_TIMER_LINK_BEAT); | ||
2764 | return; | ||
2765 | } | ||
2766 | } | ||
2767 | |||
2768 | /* Link restablished? */ | ||
2769 | if ((phy_status & MII_GS_LINK) && !priv->link) { | ||
2770 | priv->link = 1; | ||
2771 | printk(KERN_DEBUG "TLAN: %s has reestablished link\n", | ||
2772 | dev->name); | ||
2773 | netif_carrier_on(dev); | ||
2774 | } | ||
2775 | |||
2776 | /* Setup a new monitor */ | ||
2777 | tlan_set_timer(dev, (2*HZ), TLAN_TIMER_LINK_BEAT); | ||
2778 | } | ||
2779 | |||
2780 | #endif /* MONITOR */ | ||
2781 | |||
2782 | |||
2783 | /***************************************************************************** | ||
2784 | ****************************************************************************** | ||
2785 | |||
2786 | ThunderLAN driver MII routines | ||
2787 | |||
2788 | these routines are based on the information in chap. 2 of the | ||
2789 | "ThunderLAN Programmer's Guide", pp. 15-24. | ||
2790 | |||
2791 | ****************************************************************************** | ||
2792 | *****************************************************************************/ | ||
2793 | |||
2794 | |||
2795 | /*************************************************************** | ||
2796 | * tlan_mii_read_reg | ||
2797 | * | ||
2798 | * Returns: | ||
2799 | * false if ack received ok | ||
2800 | * true if no ack received or other error | ||
2801 | * | ||
2802 | * Parms: | ||
2803 | * dev The device structure containing | ||
2804 | * The io address and interrupt count | ||
2805 | * for this device. | ||
2806 | * phy The address of the PHY to be queried. | ||
2807 | * reg The register whose contents are to be | ||
2808 | * retrieved. | ||
2809 | * val A pointer to a variable to store the | ||
2810 | * retrieved value. | ||
2811 | * | ||
2812 | * This function uses the TLAN's MII bus to retrieve the contents | ||
2813 | * of a given register on a PHY. It sends the appropriate info | ||
2814 | * and then reads the 16-bit register value from the MII bus via | ||
2815 | * the TLAN SIO register. | ||
2816 | * | ||
2817 | **************************************************************/ | ||
2818 | |||
2819 | static bool | ||
2820 | tlan_mii_read_reg(struct net_device *dev, u16 phy, u16 reg, u16 *val) | ||
2821 | { | ||
2822 | u8 nack; | ||
2823 | u16 sio, tmp; | ||
2824 | u32 i; | ||
2825 | bool err; | ||
2826 | int minten; | ||
2827 | struct tlan_priv *priv = netdev_priv(dev); | ||
2828 | unsigned long flags = 0; | ||
2829 | |||
2830 | err = false; | ||
2831 | outw(TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR); | ||
2832 | sio = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO; | ||
2833 | |||
2834 | if (!in_irq()) | ||
2835 | spin_lock_irqsave(&priv->lock, flags); | ||
2836 | |||
2837 | tlan_mii_sync(dev->base_addr); | ||
2838 | |||
2839 | minten = tlan_get_bit(TLAN_NET_SIO_MINTEN, sio); | ||
2840 | if (minten) | ||
2841 | tlan_clear_bit(TLAN_NET_SIO_MINTEN, sio); | ||
2842 | |||
2843 | tlan_mii_send_data(dev->base_addr, 0x1, 2); /* start (01b) */ | ||
2844 | tlan_mii_send_data(dev->base_addr, 0x2, 2); /* read (10b) */ | ||
2845 | tlan_mii_send_data(dev->base_addr, phy, 5); /* device # */ | ||
2846 | tlan_mii_send_data(dev->base_addr, reg, 5); /* register # */ | ||
2847 | |||
2848 | |||
2849 | tlan_clear_bit(TLAN_NET_SIO_MTXEN, sio); /* change direction */ | ||
2850 | |||
2851 | tlan_clear_bit(TLAN_NET_SIO_MCLK, sio); /* clock idle bit */ | ||
2852 | tlan_set_bit(TLAN_NET_SIO_MCLK, sio); | ||
2853 | tlan_clear_bit(TLAN_NET_SIO_MCLK, sio); /* wait 300ns */ | ||
2854 | |||
2855 | nack = tlan_get_bit(TLAN_NET_SIO_MDATA, sio); /* check for ACK */ | ||
2856 | tlan_set_bit(TLAN_NET_SIO_MCLK, sio); /* finish ACK */ | ||
2857 | if (nack) { /* no ACK, so fake it */ | ||
2858 | for (i = 0; i < 16; i++) { | ||
2859 | tlan_clear_bit(TLAN_NET_SIO_MCLK, sio); | ||
2860 | tlan_set_bit(TLAN_NET_SIO_MCLK, sio); | ||
2861 | } | ||
2862 | tmp = 0xffff; | ||
2863 | err = true; | ||
2864 | } else { /* ACK, so read data */ | ||
2865 | for (tmp = 0, i = 0x8000; i; i >>= 1) { | ||
2866 | tlan_clear_bit(TLAN_NET_SIO_MCLK, sio); | ||
2867 | if (tlan_get_bit(TLAN_NET_SIO_MDATA, sio)) | ||
2868 | tmp |= i; | ||
2869 | tlan_set_bit(TLAN_NET_SIO_MCLK, sio); | ||
2870 | } | ||
2871 | } | ||
2872 | |||
2873 | |||
2874 | tlan_clear_bit(TLAN_NET_SIO_MCLK, sio); /* idle cycle */ | ||
2875 | tlan_set_bit(TLAN_NET_SIO_MCLK, sio); | ||
2876 | |||
2877 | if (minten) | ||
2878 | tlan_set_bit(TLAN_NET_SIO_MINTEN, sio); | ||
2879 | |||
2880 | *val = tmp; | ||
2881 | |||
2882 | if (!in_irq()) | ||
2883 | spin_unlock_irqrestore(&priv->lock, flags); | ||
2884 | |||
2885 | return err; | ||
2886 | |||
2887 | } | ||
2888 | |||
2889 | |||
2890 | |||
2891 | |||
2892 | /*************************************************************** | ||
2893 | * tlan_mii_send_data | ||
2894 | * | ||
2895 | * Returns: | ||
2896 | * Nothing | ||
2897 | * Parms: | ||
2898 | * base_port The base IO port of the adapter in | ||
2899 | * question. | ||
2900 | * dev The address of the PHY to be queried. | ||
2901 | * data The value to be placed on the MII bus. | ||
2902 | * num_bits The number of bits in data that are to | ||
2903 | * be placed on the MII bus. | ||
2904 | * | ||
2905 | * This function sends on sequence of bits on the MII | ||
2906 | * configuration bus. | ||
2907 | * | ||
2908 | **************************************************************/ | ||
2909 | |||
2910 | static void tlan_mii_send_data(u16 base_port, u32 data, unsigned num_bits) | ||
2911 | { | ||
2912 | u16 sio; | ||
2913 | u32 i; | ||
2914 | |||
2915 | if (num_bits == 0) | ||
2916 | return; | ||
2917 | |||
2918 | outw(TLAN_NET_SIO, base_port + TLAN_DIO_ADR); | ||
2919 | sio = base_port + TLAN_DIO_DATA + TLAN_NET_SIO; | ||
2920 | tlan_set_bit(TLAN_NET_SIO_MTXEN, sio); | ||
2921 | |||
2922 | for (i = (0x1 << (num_bits - 1)); i; i >>= 1) { | ||
2923 | tlan_clear_bit(TLAN_NET_SIO_MCLK, sio); | ||
2924 | (void) tlan_get_bit(TLAN_NET_SIO_MCLK, sio); | ||
2925 | if (data & i) | ||
2926 | tlan_set_bit(TLAN_NET_SIO_MDATA, sio); | ||
2927 | else | ||
2928 | tlan_clear_bit(TLAN_NET_SIO_MDATA, sio); | ||
2929 | tlan_set_bit(TLAN_NET_SIO_MCLK, sio); | ||
2930 | (void) tlan_get_bit(TLAN_NET_SIO_MCLK, sio); | ||
2931 | } | ||
2932 | |||
2933 | } | ||
2934 | |||
2935 | |||
2936 | |||
2937 | |||
2938 | /*************************************************************** | ||
2939 | * TLan_MiiSync | ||
2940 | * | ||
2941 | * Returns: | ||
2942 | * Nothing | ||
2943 | * Parms: | ||
2944 | * base_port The base IO port of the adapter in | ||
2945 | * question. | ||
2946 | * | ||
2947 | * This functions syncs all PHYs in terms of the MII configuration | ||
2948 | * bus. | ||
2949 | * | ||
2950 | **************************************************************/ | ||
2951 | |||
2952 | static void tlan_mii_sync(u16 base_port) | ||
2953 | { | ||
2954 | int i; | ||
2955 | u16 sio; | ||
2956 | |||
2957 | outw(TLAN_NET_SIO, base_port + TLAN_DIO_ADR); | ||
2958 | sio = base_port + TLAN_DIO_DATA + TLAN_NET_SIO; | ||
2959 | |||
2960 | tlan_clear_bit(TLAN_NET_SIO_MTXEN, sio); | ||
2961 | for (i = 0; i < 32; i++) { | ||
2962 | tlan_clear_bit(TLAN_NET_SIO_MCLK, sio); | ||
2963 | tlan_set_bit(TLAN_NET_SIO_MCLK, sio); | ||
2964 | } | ||
2965 | |||
2966 | } | ||
2967 | |||
2968 | |||
2969 | |||
2970 | |||
2971 | /*************************************************************** | ||
2972 | * tlan_mii_write_reg | ||
2973 | * | ||
2974 | * Returns: | ||
2975 | * Nothing | ||
2976 | * Parms: | ||
2977 | * dev The device structure for the device | ||
2978 | * to write to. | ||
2979 | * phy The address of the PHY to be written to. | ||
2980 | * reg The register whose contents are to be | ||
2981 | * written. | ||
2982 | * val The value to be written to the register. | ||
2983 | * | ||
2984 | * This function uses the TLAN's MII bus to write the contents of a | ||
2985 | * given register on a PHY. It sends the appropriate info and then | ||
2986 | * writes the 16-bit register value from the MII configuration bus | ||
2987 | * via the TLAN SIO register. | ||
2988 | * | ||
2989 | **************************************************************/ | ||
2990 | |||
2991 | static void | ||
2992 | tlan_mii_write_reg(struct net_device *dev, u16 phy, u16 reg, u16 val) | ||
2993 | { | ||
2994 | u16 sio; | ||
2995 | int minten; | ||
2996 | unsigned long flags = 0; | ||
2997 | struct tlan_priv *priv = netdev_priv(dev); | ||
2998 | |||
2999 | outw(TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR); | ||
3000 | sio = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO; | ||
3001 | |||
3002 | if (!in_irq()) | ||
3003 | spin_lock_irqsave(&priv->lock, flags); | ||
3004 | |||
3005 | tlan_mii_sync(dev->base_addr); | ||
3006 | |||
3007 | minten = tlan_get_bit(TLAN_NET_SIO_MINTEN, sio); | ||
3008 | if (minten) | ||
3009 | tlan_clear_bit(TLAN_NET_SIO_MINTEN, sio); | ||
3010 | |||
3011 | tlan_mii_send_data(dev->base_addr, 0x1, 2); /* start (01b) */ | ||
3012 | tlan_mii_send_data(dev->base_addr, 0x1, 2); /* write (01b) */ | ||
3013 | tlan_mii_send_data(dev->base_addr, phy, 5); /* device # */ | ||
3014 | tlan_mii_send_data(dev->base_addr, reg, 5); /* register # */ | ||
3015 | |||
3016 | tlan_mii_send_data(dev->base_addr, 0x2, 2); /* send ACK */ | ||
3017 | tlan_mii_send_data(dev->base_addr, val, 16); /* send data */ | ||
3018 | |||
3019 | tlan_clear_bit(TLAN_NET_SIO_MCLK, sio); /* idle cycle */ | ||
3020 | tlan_set_bit(TLAN_NET_SIO_MCLK, sio); | ||
3021 | |||
3022 | if (minten) | ||
3023 | tlan_set_bit(TLAN_NET_SIO_MINTEN, sio); | ||
3024 | |||
3025 | if (!in_irq()) | ||
3026 | spin_unlock_irqrestore(&priv->lock, flags); | ||
3027 | |||
3028 | } | ||
3029 | |||
3030 | |||
3031 | |||
3032 | |||
3033 | /***************************************************************************** | ||
3034 | ****************************************************************************** | ||
3035 | |||
3036 | ThunderLAN driver eeprom routines | ||
3037 | |||
3038 | the Compaq netelligent 10 and 10/100 cards use a microchip 24C02A | ||
3039 | EEPROM. these functions are based on information in microchip's | ||
3040 | data sheet. I don't know how well this functions will work with | ||
3041 | other Eeproms. | ||
3042 | |||
3043 | ****************************************************************************** | ||
3044 | *****************************************************************************/ | ||
3045 | |||
3046 | |||
3047 | /*************************************************************** | ||
3048 | * tlan_ee_send_start | ||
3049 | * | ||
3050 | * Returns: | ||
3051 | * Nothing | ||
3052 | * Parms: | ||
3053 | * io_base The IO port base address for the | ||
3054 | * TLAN device with the EEPROM to | ||
3055 | * use. | ||
3056 | * | ||
3057 | * This function sends a start cycle to an EEPROM attached | ||
3058 | * to a TLAN chip. | ||
3059 | * | ||
3060 | **************************************************************/ | ||
3061 | |||
3062 | static void tlan_ee_send_start(u16 io_base) | ||
3063 | { | ||
3064 | u16 sio; | ||
3065 | |||
3066 | outw(TLAN_NET_SIO, io_base + TLAN_DIO_ADR); | ||
3067 | sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO; | ||
3068 | |||
3069 | tlan_set_bit(TLAN_NET_SIO_ECLOK, sio); | ||
3070 | tlan_set_bit(TLAN_NET_SIO_EDATA, sio); | ||
3071 | tlan_set_bit(TLAN_NET_SIO_ETXEN, sio); | ||
3072 | tlan_clear_bit(TLAN_NET_SIO_EDATA, sio); | ||
3073 | tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio); | ||
3074 | |||
3075 | } | ||
3076 | |||
3077 | |||
3078 | |||
3079 | |||
3080 | /*************************************************************** | ||
3081 | * tlan_ee_send_byte | ||
3082 | * | ||
3083 | * Returns: | ||
3084 | * If the correct ack was received, 0, otherwise 1 | ||
3085 | * Parms: io_base The IO port base address for the | ||
3086 | * TLAN device with the EEPROM to | ||
3087 | * use. | ||
3088 | * data The 8 bits of information to | ||
3089 | * send to the EEPROM. | ||
3090 | * stop If TLAN_EEPROM_STOP is passed, a | ||
3091 | * stop cycle is sent after the | ||
3092 | * byte is sent after the ack is | ||
3093 | * read. | ||
3094 | * | ||
3095 | * This function sends a byte on the serial EEPROM line, | ||
3096 | * driving the clock to send each bit. The function then | ||
3097 | * reverses transmission direction and reads an acknowledge | ||
3098 | * bit. | ||
3099 | * | ||
3100 | **************************************************************/ | ||
3101 | |||
3102 | static int tlan_ee_send_byte(u16 io_base, u8 data, int stop) | ||
3103 | { | ||
3104 | int err; | ||
3105 | u8 place; | ||
3106 | u16 sio; | ||
3107 | |||
3108 | outw(TLAN_NET_SIO, io_base + TLAN_DIO_ADR); | ||
3109 | sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO; | ||
3110 | |||
3111 | /* Assume clock is low, tx is enabled; */ | ||
3112 | for (place = 0x80; place != 0; place >>= 1) { | ||
3113 | if (place & data) | ||
3114 | tlan_set_bit(TLAN_NET_SIO_EDATA, sio); | ||
3115 | else | ||
3116 | tlan_clear_bit(TLAN_NET_SIO_EDATA, sio); | ||
3117 | tlan_set_bit(TLAN_NET_SIO_ECLOK, sio); | ||
3118 | tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio); | ||
3119 | } | ||
3120 | tlan_clear_bit(TLAN_NET_SIO_ETXEN, sio); | ||
3121 | tlan_set_bit(TLAN_NET_SIO_ECLOK, sio); | ||
3122 | err = tlan_get_bit(TLAN_NET_SIO_EDATA, sio); | ||
3123 | tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio); | ||
3124 | tlan_set_bit(TLAN_NET_SIO_ETXEN, sio); | ||
3125 | |||
3126 | if ((!err) && stop) { | ||
3127 | /* STOP, raise data while clock is high */ | ||
3128 | tlan_clear_bit(TLAN_NET_SIO_EDATA, sio); | ||
3129 | tlan_set_bit(TLAN_NET_SIO_ECLOK, sio); | ||
3130 | tlan_set_bit(TLAN_NET_SIO_EDATA, sio); | ||
3131 | } | ||
3132 | |||
3133 | return err; | ||
3134 | |||
3135 | } | ||
3136 | |||
3137 | |||
3138 | |||
3139 | |||
3140 | /*************************************************************** | ||
3141 | * tlan_ee_receive_byte | ||
3142 | * | ||
3143 | * Returns: | ||
3144 | * Nothing | ||
3145 | * Parms: | ||
3146 | * io_base The IO port base address for the | ||
3147 | * TLAN device with the EEPROM to | ||
3148 | * use. | ||
3149 | * data An address to a char to hold the | ||
3150 | * data sent from the EEPROM. | ||
3151 | * stop If TLAN_EEPROM_STOP is passed, a | ||
3152 | * stop cycle is sent after the | ||
3153 | * byte is received, and no ack is | ||
3154 | * sent. | ||
3155 | * | ||
3156 | * This function receives 8 bits of data from the EEPROM | ||
3157 | * over the serial link. It then sends and ack bit, or no | ||
3158 | * ack and a stop bit. This function is used to retrieve | ||
3159 | * data after the address of a byte in the EEPROM has been | ||
3160 | * sent. | ||
3161 | * | ||
3162 | **************************************************************/ | ||
3163 | |||
3164 | static void tlan_ee_receive_byte(u16 io_base, u8 *data, int stop) | ||
3165 | { | ||
3166 | u8 place; | ||
3167 | u16 sio; | ||
3168 | |||
3169 | outw(TLAN_NET_SIO, io_base + TLAN_DIO_ADR); | ||
3170 | sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO; | ||
3171 | *data = 0; | ||
3172 | |||
3173 | /* Assume clock is low, tx is enabled; */ | ||
3174 | tlan_clear_bit(TLAN_NET_SIO_ETXEN, sio); | ||
3175 | for (place = 0x80; place; place >>= 1) { | ||
3176 | tlan_set_bit(TLAN_NET_SIO_ECLOK, sio); | ||
3177 | if (tlan_get_bit(TLAN_NET_SIO_EDATA, sio)) | ||
3178 | *data |= place; | ||
3179 | tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio); | ||
3180 | } | ||
3181 | |||
3182 | tlan_set_bit(TLAN_NET_SIO_ETXEN, sio); | ||
3183 | if (!stop) { | ||
3184 | tlan_clear_bit(TLAN_NET_SIO_EDATA, sio); /* ack = 0 */ | ||
3185 | tlan_set_bit(TLAN_NET_SIO_ECLOK, sio); | ||
3186 | tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio); | ||
3187 | } else { | ||
3188 | tlan_set_bit(TLAN_NET_SIO_EDATA, sio); /* no ack = 1 (?) */ | ||
3189 | tlan_set_bit(TLAN_NET_SIO_ECLOK, sio); | ||
3190 | tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio); | ||
3191 | /* STOP, raise data while clock is high */ | ||
3192 | tlan_clear_bit(TLAN_NET_SIO_EDATA, sio); | ||
3193 | tlan_set_bit(TLAN_NET_SIO_ECLOK, sio); | ||
3194 | tlan_set_bit(TLAN_NET_SIO_EDATA, sio); | ||
3195 | } | ||
3196 | |||
3197 | } | ||
3198 | |||
3199 | |||
3200 | |||
3201 | |||
3202 | /*************************************************************** | ||
3203 | * tlan_ee_read_byte | ||
3204 | * | ||
3205 | * Returns: | ||
3206 | * No error = 0, else, the stage at which the error | ||
3207 | * occurred. | ||
3208 | * Parms: | ||
3209 | * io_base The IO port base address for the | ||
3210 | * TLAN device with the EEPROM to | ||
3211 | * use. | ||
3212 | * ee_addr The address of the byte in the | ||
3213 | * EEPROM whose contents are to be | ||
3214 | * retrieved. | ||
3215 | * data An address to a char to hold the | ||
3216 | * data obtained from the EEPROM. | ||
3217 | * | ||
3218 | * This function reads a byte of information from an byte | ||
3219 | * cell in the EEPROM. | ||
3220 | * | ||
3221 | **************************************************************/ | ||
3222 | |||
3223 | static int tlan_ee_read_byte(struct net_device *dev, u8 ee_addr, u8 *data) | ||
3224 | { | ||
3225 | int err; | ||
3226 | struct tlan_priv *priv = netdev_priv(dev); | ||
3227 | unsigned long flags = 0; | ||
3228 | int ret = 0; | ||
3229 | |||
3230 | spin_lock_irqsave(&priv->lock, flags); | ||
3231 | |||
3232 | tlan_ee_send_start(dev->base_addr); | ||
3233 | err = tlan_ee_send_byte(dev->base_addr, 0xa0, TLAN_EEPROM_ACK); | ||
3234 | if (err) { | ||
3235 | ret = 1; | ||
3236 | goto fail; | ||
3237 | } | ||
3238 | err = tlan_ee_send_byte(dev->base_addr, ee_addr, TLAN_EEPROM_ACK); | ||
3239 | if (err) { | ||
3240 | ret = 2; | ||
3241 | goto fail; | ||
3242 | } | ||
3243 | tlan_ee_send_start(dev->base_addr); | ||
3244 | err = tlan_ee_send_byte(dev->base_addr, 0xa1, TLAN_EEPROM_ACK); | ||
3245 | if (err) { | ||
3246 | ret = 3; | ||
3247 | goto fail; | ||
3248 | } | ||
3249 | tlan_ee_receive_byte(dev->base_addr, data, TLAN_EEPROM_STOP); | ||
3250 | fail: | ||
3251 | spin_unlock_irqrestore(&priv->lock, flags); | ||
3252 | |||
3253 | return ret; | ||
3254 | |||
3255 | } | ||
3256 | |||
3257 | |||
3258 | |||
diff --git a/drivers/net/ethernet/ti/tlan.h b/drivers/net/ethernet/ti/tlan.h new file mode 100644 index 000000000000..5fc98a8e4889 --- /dev/null +++ b/drivers/net/ethernet/ti/tlan.h | |||
@@ -0,0 +1,546 @@ | |||
1 | #ifndef TLAN_H | ||
2 | #define TLAN_H | ||
3 | /******************************************************************** | ||
4 | * | ||
5 | * Linux ThunderLAN Driver | ||
6 | * | ||
7 | * tlan.h | ||
8 | * by James Banks | ||
9 | * | ||
10 | * (C) 1997-1998 Caldera, Inc. | ||
11 | * (C) 1999-2001 Torben Mathiasen | ||
12 | * | ||
13 | * This software may be used and distributed according to the terms | ||
14 | * of the GNU General Public License, incorporated herein by reference. | ||
15 | * | ||
16 | * | ||
17 | * Dec 10, 1999 Torben Mathiasen <torben.mathiasen@compaq.com> | ||
18 | * New Maintainer | ||
19 | * | ||
20 | ********************************************************************/ | ||
21 | |||
22 | |||
23 | #include <linux/io.h> | ||
24 | #include <linux/types.h> | ||
25 | #include <linux/netdevice.h> | ||
26 | |||
27 | |||
28 | |||
29 | /***************************************************************** | ||
30 | * TLan Definitions | ||
31 | * | ||
32 | ****************************************************************/ | ||
33 | |||
34 | #define TLAN_MIN_FRAME_SIZE 64 | ||
35 | #define TLAN_MAX_FRAME_SIZE 1600 | ||
36 | |||
37 | #define TLAN_NUM_RX_LISTS 32 | ||
38 | #define TLAN_NUM_TX_LISTS 64 | ||
39 | |||
40 | #define TLAN_IGNORE 0 | ||
41 | #define TLAN_RECORD 1 | ||
42 | |||
43 | #define TLAN_DBG(lvl, format, args...) \ | ||
44 | do { \ | ||
45 | if (debug&lvl) \ | ||
46 | printk(KERN_DEBUG "TLAN: " format, ##args); \ | ||
47 | } while (0) | ||
48 | |||
49 | #define TLAN_DEBUG_GNRL 0x0001 | ||
50 | #define TLAN_DEBUG_TX 0x0002 | ||
51 | #define TLAN_DEBUG_RX 0x0004 | ||
52 | #define TLAN_DEBUG_LIST 0x0008 | ||
53 | #define TLAN_DEBUG_PROBE 0x0010 | ||
54 | |||
55 | #define TX_TIMEOUT (10*HZ) /* We need time for auto-neg */ | ||
56 | #define MAX_TLAN_BOARDS 8 /* Max number of boards installed | ||
57 | at a time */ | ||
58 | |||
59 | |||
60 | /***************************************************************** | ||
61 | * Device Identification Definitions | ||
62 | * | ||
63 | ****************************************************************/ | ||
64 | |||
65 | #define PCI_DEVICE_ID_NETELLIGENT_10_T2 0xB012 | ||
66 | #define PCI_DEVICE_ID_NETELLIGENT_10_100_WS_5100 0xB030 | ||
67 | #ifndef PCI_DEVICE_ID_OLICOM_OC2183 | ||
68 | #define PCI_DEVICE_ID_OLICOM_OC2183 0x0013 | ||
69 | #endif | ||
70 | #ifndef PCI_DEVICE_ID_OLICOM_OC2325 | ||
71 | #define PCI_DEVICE_ID_OLICOM_OC2325 0x0012 | ||
72 | #endif | ||
73 | #ifndef PCI_DEVICE_ID_OLICOM_OC2326 | ||
74 | #define PCI_DEVICE_ID_OLICOM_OC2326 0x0014 | ||
75 | #endif | ||
76 | |||
77 | struct tlan_adapter_entry { | ||
78 | u16 vendor_id; | ||
79 | u16 device_id; | ||
80 | char *device_label; | ||
81 | u32 flags; | ||
82 | u16 addr_ofs; | ||
83 | }; | ||
84 | |||
85 | #define TLAN_ADAPTER_NONE 0x00000000 | ||
86 | #define TLAN_ADAPTER_UNMANAGED_PHY 0x00000001 | ||
87 | #define TLAN_ADAPTER_BIT_RATE_PHY 0x00000002 | ||
88 | #define TLAN_ADAPTER_USE_INTERN_10 0x00000004 | ||
89 | #define TLAN_ADAPTER_ACTIVITY_LED 0x00000008 | ||
90 | |||
91 | #define TLAN_SPEED_DEFAULT 0 | ||
92 | #define TLAN_SPEED_10 10 | ||
93 | #define TLAN_SPEED_100 100 | ||
94 | |||
95 | #define TLAN_DUPLEX_DEFAULT 0 | ||
96 | #define TLAN_DUPLEX_HALF 1 | ||
97 | #define TLAN_DUPLEX_FULL 2 | ||
98 | |||
99 | |||
100 | |||
101 | /***************************************************************** | ||
102 | * EISA Definitions | ||
103 | * | ||
104 | ****************************************************************/ | ||
105 | |||
106 | #define EISA_ID 0xc80 /* EISA ID Registers */ | ||
107 | #define EISA_ID0 0xc80 /* EISA ID Register 0 */ | ||
108 | #define EISA_ID1 0xc81 /* EISA ID Register 1 */ | ||
109 | #define EISA_ID2 0xc82 /* EISA ID Register 2 */ | ||
110 | #define EISA_ID3 0xc83 /* EISA ID Register 3 */ | ||
111 | #define EISA_CR 0xc84 /* EISA Control Register */ | ||
112 | #define EISA_REG0 0xc88 /* EISA Configuration Register 0 */ | ||
113 | #define EISA_REG1 0xc89 /* EISA Configuration Register 1 */ | ||
114 | #define EISA_REG2 0xc8a /* EISA Configuration Register 2 */ | ||
115 | #define EISA_REG3 0xc8f /* EISA Configuration Register 3 */ | ||
116 | #define EISA_APROM 0xc90 /* Ethernet Address PROM */ | ||
117 | |||
118 | |||
119 | |||
120 | /***************************************************************** | ||
121 | * Rx/Tx List Definitions | ||
122 | * | ||
123 | ****************************************************************/ | ||
124 | |||
125 | #define TLAN_BUFFERS_PER_LIST 10 | ||
126 | #define TLAN_LAST_BUFFER 0x80000000 | ||
127 | #define TLAN_CSTAT_UNUSED 0x8000 | ||
128 | #define TLAN_CSTAT_FRM_CMP 0x4000 | ||
129 | #define TLAN_CSTAT_READY 0x3000 | ||
130 | #define TLAN_CSTAT_EOC 0x0800 | ||
131 | #define TLAN_CSTAT_RX_ERROR 0x0400 | ||
132 | #define TLAN_CSTAT_PASS_CRC 0x0200 | ||
133 | #define TLAN_CSTAT_DP_PR 0x0100 | ||
134 | |||
135 | |||
136 | struct tlan_buffer { | ||
137 | u32 count; | ||
138 | u32 address; | ||
139 | }; | ||
140 | |||
141 | |||
142 | struct tlan_list { | ||
143 | u32 forward; | ||
144 | u16 c_stat; | ||
145 | u16 frame_size; | ||
146 | struct tlan_buffer buffer[TLAN_BUFFERS_PER_LIST]; | ||
147 | }; | ||
148 | |||
149 | |||
150 | typedef u8 TLanBuffer[TLAN_MAX_FRAME_SIZE]; | ||
151 | |||
152 | |||
153 | |||
154 | |||
155 | /***************************************************************** | ||
156 | * PHY definitions | ||
157 | * | ||
158 | ****************************************************************/ | ||
159 | |||
160 | #define TLAN_PHY_MAX_ADDR 0x1F | ||
161 | #define TLAN_PHY_NONE 0x20 | ||
162 | |||
163 | |||
164 | |||
165 | |||
166 | /***************************************************************** | ||
167 | * TLAN Private Information Structure | ||
168 | * | ||
169 | ****************************************************************/ | ||
170 | |||
171 | struct tlan_priv { | ||
172 | struct net_device *next_device; | ||
173 | struct pci_dev *pci_dev; | ||
174 | struct net_device *dev; | ||
175 | void *dma_storage; | ||
176 | dma_addr_t dma_storage_dma; | ||
177 | unsigned int dma_size; | ||
178 | u8 *pad_buffer; | ||
179 | struct tlan_list *rx_list; | ||
180 | dma_addr_t rx_list_dma; | ||
181 | u8 *rx_buffer; | ||
182 | dma_addr_t rx_buffer_dma; | ||
183 | u32 rx_head; | ||
184 | u32 rx_tail; | ||
185 | u32 rx_eoc_count; | ||
186 | struct tlan_list *tx_list; | ||
187 | dma_addr_t tx_list_dma; | ||
188 | u8 *tx_buffer; | ||
189 | dma_addr_t tx_buffer_dma; | ||
190 | u32 tx_head; | ||
191 | u32 tx_in_progress; | ||
192 | u32 tx_tail; | ||
193 | u32 tx_busy_count; | ||
194 | u32 phy_online; | ||
195 | u32 timer_set_at; | ||
196 | u32 timer_type; | ||
197 | struct timer_list timer; | ||
198 | struct board *adapter; | ||
199 | u32 adapter_rev; | ||
200 | u32 aui; | ||
201 | u32 debug; | ||
202 | u32 duplex; | ||
203 | u32 phy[2]; | ||
204 | u32 phy_num; | ||
205 | u32 speed; | ||
206 | u8 tlan_rev; | ||
207 | u8 tlan_full_duplex; | ||
208 | spinlock_t lock; | ||
209 | u8 link; | ||
210 | u8 is_eisa; | ||
211 | struct work_struct tlan_tqueue; | ||
212 | u8 neg_be_verbose; | ||
213 | }; | ||
214 | |||
215 | |||
216 | |||
217 | |||
218 | /***************************************************************** | ||
219 | * TLan Driver Timer Definitions | ||
220 | * | ||
221 | ****************************************************************/ | ||
222 | |||
223 | #define TLAN_TIMER_LINK_BEAT 1 | ||
224 | #define TLAN_TIMER_ACTIVITY 2 | ||
225 | #define TLAN_TIMER_PHY_PDOWN 3 | ||
226 | #define TLAN_TIMER_PHY_PUP 4 | ||
227 | #define TLAN_TIMER_PHY_RESET 5 | ||
228 | #define TLAN_TIMER_PHY_START_LINK 6 | ||
229 | #define TLAN_TIMER_PHY_FINISH_AN 7 | ||
230 | #define TLAN_TIMER_FINISH_RESET 8 | ||
231 | |||
232 | #define TLAN_TIMER_ACT_DELAY (HZ/10) | ||
233 | |||
234 | |||
235 | |||
236 | |||
237 | /***************************************************************** | ||
238 | * TLan Driver Eeprom Definitions | ||
239 | * | ||
240 | ****************************************************************/ | ||
241 | |||
242 | #define TLAN_EEPROM_ACK 0 | ||
243 | #define TLAN_EEPROM_STOP 1 | ||
244 | |||
245 | |||
246 | |||
247 | |||
248 | /***************************************************************** | ||
249 | * Host Register Offsets and Contents | ||
250 | * | ||
251 | ****************************************************************/ | ||
252 | |||
253 | #define TLAN_HOST_CMD 0x00 | ||
254 | #define TLAN_HC_GO 0x80000000 | ||
255 | #define TLAN_HC_STOP 0x40000000 | ||
256 | #define TLAN_HC_ACK 0x20000000 | ||
257 | #define TLAN_HC_CS_MASK 0x1FE00000 | ||
258 | #define TLAN_HC_EOC 0x00100000 | ||
259 | #define TLAN_HC_RT 0x00080000 | ||
260 | #define TLAN_HC_NES 0x00040000 | ||
261 | #define TLAN_HC_AD_RST 0x00008000 | ||
262 | #define TLAN_HC_LD_TMR 0x00004000 | ||
263 | #define TLAN_HC_LD_THR 0x00002000 | ||
264 | #define TLAN_HC_REQ_INT 0x00001000 | ||
265 | #define TLAN_HC_INT_OFF 0x00000800 | ||
266 | #define TLAN_HC_INT_ON 0x00000400 | ||
267 | #define TLAN_HC_AC_MASK 0x000000FF | ||
268 | #define TLAN_CH_PARM 0x04 | ||
269 | #define TLAN_DIO_ADR 0x08 | ||
270 | #define TLAN_DA_ADR_INC 0x8000 | ||
271 | #define TLAN_DA_RAM_ADR 0x4000 | ||
272 | #define TLAN_HOST_INT 0x0A | ||
273 | #define TLAN_HI_IV_MASK 0x1FE0 | ||
274 | #define TLAN_HI_IT_MASK 0x001C | ||
275 | #define TLAN_DIO_DATA 0x0C | ||
276 | |||
277 | |||
278 | /* ThunderLAN Internal Register DIO Offsets */ | ||
279 | |||
280 | #define TLAN_NET_CMD 0x00 | ||
281 | #define TLAN_NET_CMD_NRESET 0x80 | ||
282 | #define TLAN_NET_CMD_NWRAP 0x40 | ||
283 | #define TLAN_NET_CMD_CSF 0x20 | ||
284 | #define TLAN_NET_CMD_CAF 0x10 | ||
285 | #define TLAN_NET_CMD_NOBRX 0x08 | ||
286 | #define TLAN_NET_CMD_DUPLEX 0x04 | ||
287 | #define TLAN_NET_CMD_TRFRAM 0x02 | ||
288 | #define TLAN_NET_CMD_TXPACE 0x01 | ||
289 | #define TLAN_NET_SIO 0x01 | ||
290 | #define TLAN_NET_SIO_MINTEN 0x80 | ||
291 | #define TLAN_NET_SIO_ECLOK 0x40 | ||
292 | #define TLAN_NET_SIO_ETXEN 0x20 | ||
293 | #define TLAN_NET_SIO_EDATA 0x10 | ||
294 | #define TLAN_NET_SIO_NMRST 0x08 | ||
295 | #define TLAN_NET_SIO_MCLK 0x04 | ||
296 | #define TLAN_NET_SIO_MTXEN 0x02 | ||
297 | #define TLAN_NET_SIO_MDATA 0x01 | ||
298 | #define TLAN_NET_STS 0x02 | ||
299 | #define TLAN_NET_STS_MIRQ 0x80 | ||
300 | #define TLAN_NET_STS_HBEAT 0x40 | ||
301 | #define TLAN_NET_STS_TXSTOP 0x20 | ||
302 | #define TLAN_NET_STS_RXSTOP 0x10 | ||
303 | #define TLAN_NET_STS_RSRVD 0x0F | ||
304 | #define TLAN_NET_MASK 0x03 | ||
305 | #define TLAN_NET_MASK_MASK7 0x80 | ||
306 | #define TLAN_NET_MASK_MASK6 0x40 | ||
307 | #define TLAN_NET_MASK_MASK5 0x20 | ||
308 | #define TLAN_NET_MASK_MASK4 0x10 | ||
309 | #define TLAN_NET_MASK_RSRVD 0x0F | ||
310 | #define TLAN_NET_CONFIG 0x04 | ||
311 | #define TLAN_NET_CFG_RCLK 0x8000 | ||
312 | #define TLAN_NET_CFG_TCLK 0x4000 | ||
313 | #define TLAN_NET_CFG_BIT 0x2000 | ||
314 | #define TLAN_NET_CFG_RXCRC 0x1000 | ||
315 | #define TLAN_NET_CFG_PEF 0x0800 | ||
316 | #define TLAN_NET_CFG_1FRAG 0x0400 | ||
317 | #define TLAN_NET_CFG_1CHAN 0x0200 | ||
318 | #define TLAN_NET_CFG_MTEST 0x0100 | ||
319 | #define TLAN_NET_CFG_PHY_EN 0x0080 | ||
320 | #define TLAN_NET_CFG_MSMASK 0x007F | ||
321 | #define TLAN_MAN_TEST 0x06 | ||
322 | #define TLAN_DEF_VENDOR_ID 0x08 | ||
323 | #define TLAN_DEF_DEVICE_ID 0x0A | ||
324 | #define TLAN_DEF_REVISION 0x0C | ||
325 | #define TLAN_DEF_SUBCLASS 0x0D | ||
326 | #define TLAN_DEF_MIN_LAT 0x0E | ||
327 | #define TLAN_DEF_MAX_LAT 0x0F | ||
328 | #define TLAN_AREG_0 0x10 | ||
329 | #define TLAN_AREG_1 0x16 | ||
330 | #define TLAN_AREG_2 0x1C | ||
331 | #define TLAN_AREG_3 0x22 | ||
332 | #define TLAN_HASH_1 0x28 | ||
333 | #define TLAN_HASH_2 0x2C | ||
334 | #define TLAN_GOOD_TX_FRMS 0x30 | ||
335 | #define TLAN_TX_UNDERUNS 0x33 | ||
336 | #define TLAN_GOOD_RX_FRMS 0x34 | ||
337 | #define TLAN_RX_OVERRUNS 0x37 | ||
338 | #define TLAN_DEFERRED_TX 0x38 | ||
339 | #define TLAN_CRC_ERRORS 0x3A | ||
340 | #define TLAN_CODE_ERRORS 0x3B | ||
341 | #define TLAN_MULTICOL_FRMS 0x3C | ||
342 | #define TLAN_SINGLECOL_FRMS 0x3E | ||
343 | #define TLAN_EXCESSCOL_FRMS 0x40 | ||
344 | #define TLAN_LATE_COLS 0x41 | ||
345 | #define TLAN_CARRIER_LOSS 0x42 | ||
346 | #define TLAN_ACOMMIT 0x43 | ||
347 | #define TLAN_LED_REG 0x44 | ||
348 | #define TLAN_LED_ACT 0x10 | ||
349 | #define TLAN_LED_LINK 0x01 | ||
350 | #define TLAN_BSIZE_REG 0x45 | ||
351 | #define TLAN_MAX_RX 0x46 | ||
352 | #define TLAN_INT_DIS 0x48 | ||
353 | #define TLAN_ID_TX_EOC 0x04 | ||
354 | #define TLAN_ID_RX_EOF 0x02 | ||
355 | #define TLAN_ID_RX_EOC 0x01 | ||
356 | |||
357 | |||
358 | |||
359 | /* ThunderLAN Interrupt Codes */ | ||
360 | |||
361 | #define TLAN_INT_NUMBER_OF_INTS 8 | ||
362 | |||
363 | #define TLAN_INT_NONE 0x0000 | ||
364 | #define TLAN_INT_TX_EOF 0x0001 | ||
365 | #define TLAN_INT_STAT_OVERFLOW 0x0002 | ||
366 | #define TLAN_INT_RX_EOF 0x0003 | ||
367 | #define TLAN_INT_DUMMY 0x0004 | ||
368 | #define TLAN_INT_TX_EOC 0x0005 | ||
369 | #define TLAN_INT_STATUS_CHECK 0x0006 | ||
370 | #define TLAN_INT_RX_EOC 0x0007 | ||
371 | |||
372 | |||
373 | |||
374 | /* ThunderLAN MII Registers */ | ||
375 | |||
376 | /* Generic MII/PHY Registers */ | ||
377 | |||
378 | #define MII_GEN_CTL 0x00 | ||
379 | #define MII_GC_RESET 0x8000 | ||
380 | #define MII_GC_LOOPBK 0x4000 | ||
381 | #define MII_GC_SPEEDSEL 0x2000 | ||
382 | #define MII_GC_AUTOENB 0x1000 | ||
383 | #define MII_GC_PDOWN 0x0800 | ||
384 | #define MII_GC_ISOLATE 0x0400 | ||
385 | #define MII_GC_AUTORSRT 0x0200 | ||
386 | #define MII_GC_DUPLEX 0x0100 | ||
387 | #define MII_GC_COLTEST 0x0080 | ||
388 | #define MII_GC_RESERVED 0x007F | ||
389 | #define MII_GEN_STS 0x01 | ||
390 | #define MII_GS_100BT4 0x8000 | ||
391 | #define MII_GS_100BTXFD 0x4000 | ||
392 | #define MII_GS_100BTXHD 0x2000 | ||
393 | #define MII_GS_10BTFD 0x1000 | ||
394 | #define MII_GS_10BTHD 0x0800 | ||
395 | #define MII_GS_RESERVED 0x07C0 | ||
396 | #define MII_GS_AUTOCMPLT 0x0020 | ||
397 | #define MII_GS_RFLT 0x0010 | ||
398 | #define MII_GS_AUTONEG 0x0008 | ||
399 | #define MII_GS_LINK 0x0004 | ||
400 | #define MII_GS_JABBER 0x0002 | ||
401 | #define MII_GS_EXTCAP 0x0001 | ||
402 | #define MII_GEN_ID_HI 0x02 | ||
403 | #define MII_GEN_ID_LO 0x03 | ||
404 | #define MII_GIL_OUI 0xFC00 | ||
405 | #define MII_GIL_MODEL 0x03F0 | ||
406 | #define MII_GIL_REVISION 0x000F | ||
407 | #define MII_AN_ADV 0x04 | ||
408 | #define MII_AN_LPA 0x05 | ||
409 | #define MII_AN_EXP 0x06 | ||
410 | |||
411 | /* ThunderLAN Specific MII/PHY Registers */ | ||
412 | |||
413 | #define TLAN_TLPHY_ID 0x10 | ||
414 | #define TLAN_TLPHY_CTL 0x11 | ||
415 | #define TLAN_TC_IGLINK 0x8000 | ||
416 | #define TLAN_TC_SWAPOL 0x4000 | ||
417 | #define TLAN_TC_AUISEL 0x2000 | ||
418 | #define TLAN_TC_SQEEN 0x1000 | ||
419 | #define TLAN_TC_MTEST 0x0800 | ||
420 | #define TLAN_TC_RESERVED 0x07F8 | ||
421 | #define TLAN_TC_NFEW 0x0004 | ||
422 | #define TLAN_TC_INTEN 0x0002 | ||
423 | #define TLAN_TC_TINT 0x0001 | ||
424 | #define TLAN_TLPHY_STS 0x12 | ||
425 | #define TLAN_TS_MINT 0x8000 | ||
426 | #define TLAN_TS_PHOK 0x4000 | ||
427 | #define TLAN_TS_POLOK 0x2000 | ||
428 | #define TLAN_TS_TPENERGY 0x1000 | ||
429 | #define TLAN_TS_RESERVED 0x0FFF | ||
430 | #define TLAN_TLPHY_PAR 0x19 | ||
431 | #define TLAN_PHY_CIM_STAT 0x0020 | ||
432 | #define TLAN_PHY_SPEED_100 0x0040 | ||
433 | #define TLAN_PHY_DUPLEX_FULL 0x0080 | ||
434 | #define TLAN_PHY_AN_EN_STAT 0x0400 | ||
435 | |||
436 | /* National Sem. & Level1 PHY id's */ | ||
437 | #define NAT_SEM_ID1 0x2000 | ||
438 | #define NAT_SEM_ID2 0x5C01 | ||
439 | #define LEVEL1_ID1 0x7810 | ||
440 | #define LEVEL1_ID2 0x0000 | ||
441 | |||
442 | #define CIRC_INC(a, b) if (++a >= b) a = 0 | ||
443 | |||
444 | /* Routines to access internal registers. */ | ||
445 | |||
446 | static inline u8 tlan_dio_read8(u16 base_addr, u16 internal_addr) | ||
447 | { | ||
448 | outw(internal_addr, base_addr + TLAN_DIO_ADR); | ||
449 | return inb((base_addr + TLAN_DIO_DATA) + (internal_addr & 0x3)); | ||
450 | |||
451 | } | ||
452 | |||
453 | |||
454 | |||
455 | |||
456 | static inline u16 tlan_dio_read16(u16 base_addr, u16 internal_addr) | ||
457 | { | ||
458 | outw(internal_addr, base_addr + TLAN_DIO_ADR); | ||
459 | return inw((base_addr + TLAN_DIO_DATA) + (internal_addr & 0x2)); | ||
460 | |||
461 | } | ||
462 | |||
463 | |||
464 | |||
465 | |||
466 | static inline u32 tlan_dio_read32(u16 base_addr, u16 internal_addr) | ||
467 | { | ||
468 | outw(internal_addr, base_addr + TLAN_DIO_ADR); | ||
469 | return inl(base_addr + TLAN_DIO_DATA); | ||
470 | |||
471 | } | ||
472 | |||
473 | |||
474 | |||
475 | |||
476 | static inline void tlan_dio_write8(u16 base_addr, u16 internal_addr, u8 data) | ||
477 | { | ||
478 | outw(internal_addr, base_addr + TLAN_DIO_ADR); | ||
479 | outb(data, base_addr + TLAN_DIO_DATA + (internal_addr & 0x3)); | ||
480 | |||
481 | } | ||
482 | |||
483 | |||
484 | |||
485 | |||
486 | static inline void tlan_dio_write16(u16 base_addr, u16 internal_addr, u16 data) | ||
487 | { | ||
488 | outw(internal_addr, base_addr + TLAN_DIO_ADR); | ||
489 | outw(data, base_addr + TLAN_DIO_DATA + (internal_addr & 0x2)); | ||
490 | |||
491 | } | ||
492 | |||
493 | |||
494 | |||
495 | |||
496 | static inline void tlan_dio_write32(u16 base_addr, u16 internal_addr, u32 data) | ||
497 | { | ||
498 | outw(internal_addr, base_addr + TLAN_DIO_ADR); | ||
499 | outl(data, base_addr + TLAN_DIO_DATA + (internal_addr & 0x2)); | ||
500 | |||
501 | } | ||
502 | |||
503 | #define tlan_clear_bit(bit, port) outb_p(inb_p(port) & ~bit, port) | ||
504 | #define tlan_get_bit(bit, port) ((int) (inb_p(port) & bit)) | ||
505 | #define tlan_set_bit(bit, port) outb_p(inb_p(port) | bit, port) | ||
506 | |||
507 | /* | ||
508 | * given 6 bytes, view them as 8 6-bit numbers and return the XOR of those | ||
509 | * the code below is about seven times as fast as the original code | ||
510 | * | ||
511 | * The original code was: | ||
512 | * | ||
513 | * u32 xor(u32 a, u32 b) { return ((a && !b ) || (! a && b )); } | ||
514 | * | ||
515 | * #define XOR8(a, b, c, d, e, f, g, h) \ | ||
516 | * xor(a, xor(b, xor(c, xor(d, xor(e, xor(f, xor(g, h)) ) ) ) ) ) | ||
517 | * #define DA(a, bit) (( (u8) a[bit/8] ) & ( (u8) (1 << bit%8)) ) | ||
518 | * | ||
519 | * hash = XOR8(DA(a,0), DA(a, 6), DA(a,12), DA(a,18), DA(a,24), | ||
520 | * DA(a,30), DA(a,36), DA(a,42)); | ||
521 | * hash |= XOR8(DA(a,1), DA(a, 7), DA(a,13), DA(a,19), DA(a,25), | ||
522 | * DA(a,31), DA(a,37), DA(a,43)) << 1; | ||
523 | * hash |= XOR8(DA(a,2), DA(a, 8), DA(a,14), DA(a,20), DA(a,26), | ||
524 | * DA(a,32), DA(a,38), DA(a,44)) << 2; | ||
525 | * hash |= XOR8(DA(a,3), DA(a, 9), DA(a,15), DA(a,21), DA(a,27), | ||
526 | * DA(a,33), DA(a,39), DA(a,45)) << 3; | ||
527 | * hash |= XOR8(DA(a,4), DA(a,10), DA(a,16), DA(a,22), DA(a,28), | ||
528 | * DA(a,34), DA(a,40), DA(a,46)) << 4; | ||
529 | * hash |= XOR8(DA(a,5), DA(a,11), DA(a,17), DA(a,23), DA(a,29), | ||
530 | * DA(a,35), DA(a,41), DA(a,47)) << 5; | ||
531 | * | ||
532 | */ | ||
533 | static inline u32 tlan_hash_func(const u8 *a) | ||
534 | { | ||
535 | u8 hash; | ||
536 | |||
537 | hash = (a[0]^a[3]); /* & 077 */ | ||
538 | hash ^= ((a[0]^a[3])>>6); /* & 003 */ | ||
539 | hash ^= ((a[1]^a[4])<<2); /* & 074 */ | ||
540 | hash ^= ((a[1]^a[4])>>4); /* & 017 */ | ||
541 | hash ^= ((a[2]^a[5])<<4); /* & 060 */ | ||
542 | hash ^= ((a[2]^a[5])>>2); /* & 077 */ | ||
543 | |||
544 | return hash & 077; | ||
545 | } | ||
546 | #endif | ||