diff options
Diffstat (limited to 'drivers/net/cpmac.c')
-rw-r--r-- | drivers/net/cpmac.c | 1174 |
1 files changed, 1174 insertions, 0 deletions
diff --git a/drivers/net/cpmac.c b/drivers/net/cpmac.c new file mode 100644 index 000000000000..ed53aaab4c02 --- /dev/null +++ b/drivers/net/cpmac.c | |||
@@ -0,0 +1,1174 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2006, 2007 Eugene Konev | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2 of the License, or | ||
7 | * (at your option) any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | ||
17 | */ | ||
18 | |||
19 | #include <linux/module.h> | ||
20 | #include <linux/init.h> | ||
21 | #include <linux/moduleparam.h> | ||
22 | |||
23 | #include <linux/sched.h> | ||
24 | #include <linux/kernel.h> | ||
25 | #include <linux/slab.h> | ||
26 | #include <linux/errno.h> | ||
27 | #include <linux/types.h> | ||
28 | #include <linux/delay.h> | ||
29 | #include <linux/version.h> | ||
30 | |||
31 | #include <linux/netdevice.h> | ||
32 | #include <linux/etherdevice.h> | ||
33 | #include <linux/ethtool.h> | ||
34 | #include <linux/skbuff.h> | ||
35 | #include <linux/mii.h> | ||
36 | #include <linux/phy.h> | ||
37 | #include <linux/platform_device.h> | ||
38 | #include <linux/dma-mapping.h> | ||
39 | #include <asm/gpio.h> | ||
40 | |||
41 | MODULE_AUTHOR("Eugene Konev <ejka@imfi.kspu.ru>"); | ||
42 | MODULE_DESCRIPTION("TI AR7 ethernet driver (CPMAC)"); | ||
43 | MODULE_LICENSE("GPL"); | ||
44 | |||
45 | static int debug_level = 8; | ||
46 | static int dumb_switch; | ||
47 | |||
48 | /* Next 2 are only used in cpmac_probe, so it's pointless to change them */ | ||
49 | module_param(debug_level, int, 0444); | ||
50 | module_param(dumb_switch, int, 0444); | ||
51 | |||
52 | MODULE_PARM_DESC(debug_level, "Number of NETIF_MSG bits to enable"); | ||
53 | MODULE_PARM_DESC(dumb_switch, "Assume switch is not connected to MDIO bus"); | ||
54 | |||
55 | #define CPMAC_VERSION "0.5.0" | ||
56 | /* stolen from net/ieee80211.h */ | ||
57 | #ifndef MAC_FMT | ||
58 | #define MAC_FMT "%02x:%02x:%02x:%02x:%02x:%02x" | ||
59 | #define MAC_ARG(x) ((u8*)(x))[0], ((u8*)(x))[1], ((u8*)(x))[2], \ | ||
60 | ((u8*)(x))[3], ((u8*)(x))[4], ((u8*)(x))[5] | ||
61 | #endif | ||
62 | /* frame size + 802.1q tag */ | ||
63 | #define CPMAC_SKB_SIZE (ETH_FRAME_LEN + 4) | ||
64 | #define CPMAC_QUEUES 8 | ||
65 | |||
66 | /* Ethernet registers */ | ||
67 | #define CPMAC_TX_CONTROL 0x0004 | ||
68 | #define CPMAC_TX_TEARDOWN 0x0008 | ||
69 | #define CPMAC_RX_CONTROL 0x0014 | ||
70 | #define CPMAC_RX_TEARDOWN 0x0018 | ||
71 | #define CPMAC_MBP 0x0100 | ||
72 | # define MBP_RXPASSCRC 0x40000000 | ||
73 | # define MBP_RXQOS 0x20000000 | ||
74 | # define MBP_RXNOCHAIN 0x10000000 | ||
75 | # define MBP_RXCMF 0x01000000 | ||
76 | # define MBP_RXSHORT 0x00800000 | ||
77 | # define MBP_RXCEF 0x00400000 | ||
78 | # define MBP_RXPROMISC 0x00200000 | ||
79 | # define MBP_PROMISCCHAN(channel) (((channel) & 0x7) << 16) | ||
80 | # define MBP_RXBCAST 0x00002000 | ||
81 | # define MBP_BCASTCHAN(channel) (((channel) & 0x7) << 8) | ||
82 | # define MBP_RXMCAST 0x00000020 | ||
83 | # define MBP_MCASTCHAN(channel) ((channel) & 0x7) | ||
84 | #define CPMAC_UNICAST_ENABLE 0x0104 | ||
85 | #define CPMAC_UNICAST_CLEAR 0x0108 | ||
86 | #define CPMAC_MAX_LENGTH 0x010c | ||
87 | #define CPMAC_BUFFER_OFFSET 0x0110 | ||
88 | #define CPMAC_MAC_CONTROL 0x0160 | ||
89 | # define MAC_TXPTYPE 0x00000200 | ||
90 | # define MAC_TXPACE 0x00000040 | ||
91 | # define MAC_MII 0x00000020 | ||
92 | # define MAC_TXFLOW 0x00000010 | ||
93 | # define MAC_RXFLOW 0x00000008 | ||
94 | # define MAC_MTEST 0x00000004 | ||
95 | # define MAC_LOOPBACK 0x00000002 | ||
96 | # define MAC_FDX 0x00000001 | ||
97 | #define CPMAC_MAC_STATUS 0x0164 | ||
98 | # define MAC_STATUS_QOS 0x00000004 | ||
99 | # define MAC_STATUS_RXFLOW 0x00000002 | ||
100 | # define MAC_STATUS_TXFLOW 0x00000001 | ||
101 | #define CPMAC_TX_INT_ENABLE 0x0178 | ||
102 | #define CPMAC_TX_INT_CLEAR 0x017c | ||
103 | #define CPMAC_MAC_INT_VECTOR 0x0180 | ||
104 | # define MAC_INT_STATUS 0x00080000 | ||
105 | # define MAC_INT_HOST 0x00040000 | ||
106 | # define MAC_INT_RX 0x00020000 | ||
107 | # define MAC_INT_TX 0x00010000 | ||
108 | #define CPMAC_MAC_EOI_VECTOR 0x0184 | ||
109 | #define CPMAC_RX_INT_ENABLE 0x0198 | ||
110 | #define CPMAC_RX_INT_CLEAR 0x019c | ||
111 | #define CPMAC_MAC_INT_ENABLE 0x01a8 | ||
112 | #define CPMAC_MAC_INT_CLEAR 0x01ac | ||
113 | #define CPMAC_MAC_ADDR_LO(channel) (0x01b0 + (channel) * 4) | ||
114 | #define CPMAC_MAC_ADDR_MID 0x01d0 | ||
115 | #define CPMAC_MAC_ADDR_HI 0x01d4 | ||
116 | #define CPMAC_MAC_HASH_LO 0x01d8 | ||
117 | #define CPMAC_MAC_HASH_HI 0x01dc | ||
118 | #define CPMAC_TX_PTR(channel) (0x0600 + (channel) * 4) | ||
119 | #define CPMAC_RX_PTR(channel) (0x0620 + (channel) * 4) | ||
120 | #define CPMAC_TX_ACK(channel) (0x0640 + (channel) * 4) | ||
121 | #define CPMAC_RX_ACK(channel) (0x0660 + (channel) * 4) | ||
122 | #define CPMAC_REG_END 0x0680 | ||
123 | /* | ||
124 | * Rx/Tx statistics | ||
125 | * TODO: use some of them to fill stats in cpmac_stats() | ||
126 | */ | ||
127 | #define CPMAC_STATS_RX_GOOD 0x0200 | ||
128 | #define CPMAC_STATS_RX_BCAST 0x0204 | ||
129 | #define CPMAC_STATS_RX_MCAST 0x0208 | ||
130 | #define CPMAC_STATS_RX_PAUSE 0x020c | ||
131 | #define CPMAC_STATS_RX_CRC 0x0210 | ||
132 | #define CPMAC_STATS_RX_ALIGN 0x0214 | ||
133 | #define CPMAC_STATS_RX_OVER 0x0218 | ||
134 | #define CPMAC_STATS_RX_JABBER 0x021c | ||
135 | #define CPMAC_STATS_RX_UNDER 0x0220 | ||
136 | #define CPMAC_STATS_RX_FRAG 0x0224 | ||
137 | #define CPMAC_STATS_RX_FILTER 0x0228 | ||
138 | #define CPMAC_STATS_RX_QOSFILTER 0x022c | ||
139 | #define CPMAC_STATS_RX_OCTETS 0x0230 | ||
140 | |||
141 | #define CPMAC_STATS_TX_GOOD 0x0234 | ||
142 | #define CPMAC_STATS_TX_BCAST 0x0238 | ||
143 | #define CPMAC_STATS_TX_MCAST 0x023c | ||
144 | #define CPMAC_STATS_TX_PAUSE 0x0240 | ||
145 | #define CPMAC_STATS_TX_DEFER 0x0244 | ||
146 | #define CPMAC_STATS_TX_COLLISION 0x0248 | ||
147 | #define CPMAC_STATS_TX_SINGLECOLL 0x024c | ||
148 | #define CPMAC_STATS_TX_MULTICOLL 0x0250 | ||
149 | #define CPMAC_STATS_TX_EXCESSCOLL 0x0254 | ||
150 | #define CPMAC_STATS_TX_LATECOLL 0x0258 | ||
151 | #define CPMAC_STATS_TX_UNDERRUN 0x025c | ||
152 | #define CPMAC_STATS_TX_CARRIERSENSE 0x0260 | ||
153 | #define CPMAC_STATS_TX_OCTETS 0x0264 | ||
154 | |||
155 | #define cpmac_read(base, reg) (readl((void __iomem *)(base) + (reg))) | ||
156 | #define cpmac_write(base, reg, val) (writel(val, (void __iomem *)(base) + \ | ||
157 | (reg))) | ||
158 | |||
159 | /* MDIO bus */ | ||
160 | #define CPMAC_MDIO_VERSION 0x0000 | ||
161 | #define CPMAC_MDIO_CONTROL 0x0004 | ||
162 | # define MDIOC_IDLE 0x80000000 | ||
163 | # define MDIOC_ENABLE 0x40000000 | ||
164 | # define MDIOC_PREAMBLE 0x00100000 | ||
165 | # define MDIOC_FAULT 0x00080000 | ||
166 | # define MDIOC_FAULTDETECT 0x00040000 | ||
167 | # define MDIOC_INTTEST 0x00020000 | ||
168 | # define MDIOC_CLKDIV(div) ((div) & 0xff) | ||
169 | #define CPMAC_MDIO_ALIVE 0x0008 | ||
170 | #define CPMAC_MDIO_LINK 0x000c | ||
171 | #define CPMAC_MDIO_ACCESS(channel) (0x0080 + (channel) * 8) | ||
172 | # define MDIO_BUSY 0x80000000 | ||
173 | # define MDIO_WRITE 0x40000000 | ||
174 | # define MDIO_REG(reg) (((reg) & 0x1f) << 21) | ||
175 | # define MDIO_PHY(phy) (((phy) & 0x1f) << 16) | ||
176 | # define MDIO_DATA(data) ((data) & 0xffff) | ||
177 | #define CPMAC_MDIO_PHYSEL(channel) (0x0084 + (channel) * 8) | ||
178 | # define PHYSEL_LINKSEL 0x00000040 | ||
179 | # define PHYSEL_LINKINT 0x00000020 | ||
180 | |||
181 | struct cpmac_desc { | ||
182 | u32 hw_next; | ||
183 | u32 hw_data; | ||
184 | u16 buflen; | ||
185 | u16 bufflags; | ||
186 | u16 datalen; | ||
187 | u16 dataflags; | ||
188 | #define CPMAC_SOP 0x8000 | ||
189 | #define CPMAC_EOP 0x4000 | ||
190 | #define CPMAC_OWN 0x2000 | ||
191 | #define CPMAC_EOQ 0x1000 | ||
192 | struct sk_buff *skb; | ||
193 | struct cpmac_desc *next; | ||
194 | dma_addr_t mapping; | ||
195 | dma_addr_t data_mapping; | ||
196 | }; | ||
197 | |||
198 | struct cpmac_priv { | ||
199 | spinlock_t lock; | ||
200 | spinlock_t rx_lock; | ||
201 | struct cpmac_desc *rx_head; | ||
202 | int ring_size; | ||
203 | struct cpmac_desc *desc_ring; | ||
204 | dma_addr_t dma_ring; | ||
205 | void __iomem *regs; | ||
206 | struct mii_bus *mii_bus; | ||
207 | struct phy_device *phy; | ||
208 | char phy_name[BUS_ID_SIZE]; | ||
209 | int oldlink, oldspeed, oldduplex; | ||
210 | u32 msg_enable; | ||
211 | struct net_device *dev; | ||
212 | struct work_struct reset_work; | ||
213 | struct platform_device *pdev; | ||
214 | }; | ||
215 | |||
216 | static irqreturn_t cpmac_irq(int, void *); | ||
217 | static void cpmac_hw_start(struct net_device *dev); | ||
218 | static void cpmac_hw_stop(struct net_device *dev); | ||
219 | static int cpmac_stop(struct net_device *dev); | ||
220 | static int cpmac_open(struct net_device *dev); | ||
221 | |||
222 | static void cpmac_dump_regs(struct net_device *dev) | ||
223 | { | ||
224 | int i; | ||
225 | struct cpmac_priv *priv = netdev_priv(dev); | ||
226 | for (i = 0; i < CPMAC_REG_END; i += 4) { | ||
227 | if (i % 16 == 0) { | ||
228 | if (i) | ||
229 | printk("\n"); | ||
230 | printk(KERN_DEBUG "%s: reg[%p]:", dev->name, | ||
231 | priv->regs + i); | ||
232 | } | ||
233 | printk(" %08x", cpmac_read(priv->regs, i)); | ||
234 | } | ||
235 | printk("\n"); | ||
236 | } | ||
237 | |||
238 | static void cpmac_dump_desc(struct net_device *dev, struct cpmac_desc *desc) | ||
239 | { | ||
240 | int i; | ||
241 | printk(KERN_DEBUG "%s: desc[%p]:", dev->name, desc); | ||
242 | for (i = 0; i < sizeof(*desc) / 4; i++) | ||
243 | printk(" %08x", ((u32 *)desc)[i]); | ||
244 | printk("\n"); | ||
245 | } | ||
246 | |||
247 | static void cpmac_dump_skb(struct net_device *dev, struct sk_buff *skb) | ||
248 | { | ||
249 | int i; | ||
250 | printk(KERN_DEBUG "%s: skb 0x%p, len=%d\n", dev->name, skb, skb->len); | ||
251 | for (i = 0; i < skb->len; i++) { | ||
252 | if (i % 16 == 0) { | ||
253 | if (i) | ||
254 | printk("\n"); | ||
255 | printk(KERN_DEBUG "%s: data[%p]:", dev->name, | ||
256 | skb->data + i); | ||
257 | } | ||
258 | printk(" %02x", ((u8 *)skb->data)[i]); | ||
259 | } | ||
260 | printk("\n"); | ||
261 | } | ||
262 | |||
263 | static int cpmac_mdio_read(struct mii_bus *bus, int phy_id, int reg) | ||
264 | { | ||
265 | u32 val; | ||
266 | |||
267 | while (cpmac_read(bus->priv, CPMAC_MDIO_ACCESS(0)) & MDIO_BUSY) | ||
268 | cpu_relax(); | ||
269 | cpmac_write(bus->priv, CPMAC_MDIO_ACCESS(0), MDIO_BUSY | MDIO_REG(reg) | | ||
270 | MDIO_PHY(phy_id)); | ||
271 | while ((val = cpmac_read(bus->priv, CPMAC_MDIO_ACCESS(0))) & MDIO_BUSY) | ||
272 | cpu_relax(); | ||
273 | return MDIO_DATA(val); | ||
274 | } | ||
275 | |||
276 | static int cpmac_mdio_write(struct mii_bus *bus, int phy_id, | ||
277 | int reg, u16 val) | ||
278 | { | ||
279 | while (cpmac_read(bus->priv, CPMAC_MDIO_ACCESS(0)) & MDIO_BUSY) | ||
280 | cpu_relax(); | ||
281 | cpmac_write(bus->priv, CPMAC_MDIO_ACCESS(0), MDIO_BUSY | MDIO_WRITE | | ||
282 | MDIO_REG(reg) | MDIO_PHY(phy_id) | MDIO_DATA(val)); | ||
283 | return 0; | ||
284 | } | ||
285 | |||
286 | static int cpmac_mdio_reset(struct mii_bus *bus) | ||
287 | { | ||
288 | ar7_device_reset(AR7_RESET_BIT_MDIO); | ||
289 | cpmac_write(bus->priv, CPMAC_MDIO_CONTROL, MDIOC_ENABLE | | ||
290 | MDIOC_CLKDIV(ar7_cpmac_freq() / 2200000 - 1)); | ||
291 | return 0; | ||
292 | } | ||
293 | |||
294 | static int mii_irqs[PHY_MAX_ADDR] = { PHY_POLL, }; | ||
295 | |||
296 | static struct mii_bus cpmac_mii = { | ||
297 | .name = "cpmac-mii", | ||
298 | .read = cpmac_mdio_read, | ||
299 | .write = cpmac_mdio_write, | ||
300 | .reset = cpmac_mdio_reset, | ||
301 | .irq = mii_irqs, | ||
302 | }; | ||
303 | |||
304 | static int cpmac_config(struct net_device *dev, struct ifmap *map) | ||
305 | { | ||
306 | if (dev->flags & IFF_UP) | ||
307 | return -EBUSY; | ||
308 | |||
309 | /* Don't allow changing the I/O address */ | ||
310 | if (map->base_addr != dev->base_addr) | ||
311 | return -EOPNOTSUPP; | ||
312 | |||
313 | /* ignore other fields */ | ||
314 | return 0; | ||
315 | } | ||
316 | |||
317 | static void cpmac_set_multicast_list(struct net_device *dev) | ||
318 | { | ||
319 | struct dev_mc_list *iter; | ||
320 | int i; | ||
321 | u8 tmp; | ||
322 | u32 mbp, bit, hash[2] = { 0, }; | ||
323 | struct cpmac_priv *priv = netdev_priv(dev); | ||
324 | |||
325 | mbp = cpmac_read(priv->regs, CPMAC_MBP); | ||
326 | if (dev->flags & IFF_PROMISC) { | ||
327 | cpmac_write(priv->regs, CPMAC_MBP, (mbp & ~MBP_PROMISCCHAN(0)) | | ||
328 | MBP_RXPROMISC); | ||
329 | } else { | ||
330 | cpmac_write(priv->regs, CPMAC_MBP, mbp & ~MBP_RXPROMISC); | ||
331 | if (dev->flags & IFF_ALLMULTI) { | ||
332 | /* enable all multicast mode */ | ||
333 | cpmac_write(priv->regs, CPMAC_MAC_HASH_LO, 0xffffffff); | ||
334 | cpmac_write(priv->regs, CPMAC_MAC_HASH_HI, 0xffffffff); | ||
335 | } else { | ||
336 | /* | ||
337 | * cpmac uses some strange mac address hashing | ||
338 | * (not crc32) | ||
339 | */ | ||
340 | for (i = 0, iter = dev->mc_list; i < dev->mc_count; | ||
341 | i++, iter = iter->next) { | ||
342 | bit = 0; | ||
343 | tmp = iter->dmi_addr[0]; | ||
344 | bit ^= (tmp >> 2) ^ (tmp << 4); | ||
345 | tmp = iter->dmi_addr[1]; | ||
346 | bit ^= (tmp >> 4) ^ (tmp << 2); | ||
347 | tmp = iter->dmi_addr[2]; | ||
348 | bit ^= (tmp >> 6) ^ tmp; | ||
349 | tmp = iter->dmi_addr[3]; | ||
350 | bit ^= (tmp >> 2) ^ (tmp << 4); | ||
351 | tmp = iter->dmi_addr[4]; | ||
352 | bit ^= (tmp >> 4) ^ (tmp << 2); | ||
353 | tmp = iter->dmi_addr[5]; | ||
354 | bit ^= (tmp >> 6) ^ tmp; | ||
355 | bit &= 0x3f; | ||
356 | hash[bit / 32] |= 1 << (bit % 32); | ||
357 | } | ||
358 | |||
359 | cpmac_write(priv->regs, CPMAC_MAC_HASH_LO, hash[0]); | ||
360 | cpmac_write(priv->regs, CPMAC_MAC_HASH_HI, hash[1]); | ||
361 | } | ||
362 | } | ||
363 | } | ||
364 | |||
365 | static struct sk_buff *cpmac_rx_one(struct net_device *dev, | ||
366 | struct cpmac_priv *priv, | ||
367 | struct cpmac_desc *desc) | ||
368 | { | ||
369 | struct sk_buff *skb, *result = NULL; | ||
370 | |||
371 | if (unlikely(netif_msg_hw(priv))) | ||
372 | cpmac_dump_desc(dev, desc); | ||
373 | cpmac_write(priv->regs, CPMAC_RX_ACK(0), (u32)desc->mapping); | ||
374 | if (unlikely(!desc->datalen)) { | ||
375 | if (netif_msg_rx_err(priv) && net_ratelimit()) | ||
376 | printk(KERN_WARNING "%s: rx: spurious interrupt\n", | ||
377 | dev->name); | ||
378 | return NULL; | ||
379 | } | ||
380 | |||
381 | skb = netdev_alloc_skb(dev, CPMAC_SKB_SIZE); | ||
382 | if (likely(skb)) { | ||
383 | skb_reserve(skb, 2); | ||
384 | skb_put(desc->skb, desc->datalen); | ||
385 | desc->skb->protocol = eth_type_trans(desc->skb, dev); | ||
386 | desc->skb->ip_summed = CHECKSUM_NONE; | ||
387 | dev->stats.rx_packets++; | ||
388 | dev->stats.rx_bytes += desc->datalen; | ||
389 | result = desc->skb; | ||
390 | dma_unmap_single(&dev->dev, desc->data_mapping, CPMAC_SKB_SIZE, | ||
391 | DMA_FROM_DEVICE); | ||
392 | desc->skb = skb; | ||
393 | desc->data_mapping = dma_map_single(&dev->dev, skb->data, | ||
394 | CPMAC_SKB_SIZE, | ||
395 | DMA_FROM_DEVICE); | ||
396 | desc->hw_data = (u32)desc->data_mapping; | ||
397 | if (unlikely(netif_msg_pktdata(priv))) { | ||
398 | printk(KERN_DEBUG "%s: received packet:\n", dev->name); | ||
399 | cpmac_dump_skb(dev, result); | ||
400 | } | ||
401 | } else { | ||
402 | if (netif_msg_rx_err(priv) && net_ratelimit()) | ||
403 | printk(KERN_WARNING | ||
404 | "%s: low on skbs, dropping packet\n", dev->name); | ||
405 | dev->stats.rx_dropped++; | ||
406 | } | ||
407 | |||
408 | desc->buflen = CPMAC_SKB_SIZE; | ||
409 | desc->dataflags = CPMAC_OWN; | ||
410 | |||
411 | return result; | ||
412 | } | ||
413 | |||
414 | static int cpmac_poll(struct net_device *dev, int *budget) | ||
415 | { | ||
416 | struct sk_buff *skb; | ||
417 | struct cpmac_desc *desc; | ||
418 | int received = 0, quota = min(dev->quota, *budget); | ||
419 | struct cpmac_priv *priv = netdev_priv(dev); | ||
420 | |||
421 | spin_lock(&priv->rx_lock); | ||
422 | if (unlikely(!priv->rx_head)) { | ||
423 | if (netif_msg_rx_err(priv) && net_ratelimit()) | ||
424 | printk(KERN_WARNING "%s: rx: polling, but no queue\n", | ||
425 | dev->name); | ||
426 | netif_rx_complete(dev); | ||
427 | return 0; | ||
428 | } | ||
429 | |||
430 | desc = priv->rx_head; | ||
431 | while ((received < quota) && ((desc->dataflags & CPMAC_OWN) == 0)) { | ||
432 | skb = cpmac_rx_one(dev, priv, desc); | ||
433 | if (likely(skb)) { | ||
434 | netif_receive_skb(skb); | ||
435 | received++; | ||
436 | } | ||
437 | desc = desc->next; | ||
438 | } | ||
439 | |||
440 | priv->rx_head = desc; | ||
441 | spin_unlock(&priv->rx_lock); | ||
442 | *budget -= received; | ||
443 | dev->quota -= received; | ||
444 | if (unlikely(netif_msg_rx_status(priv))) | ||
445 | printk(KERN_DEBUG "%s: poll processed %d packets\n", dev->name, | ||
446 | received); | ||
447 | if (desc->dataflags & CPMAC_OWN) { | ||
448 | netif_rx_complete(dev); | ||
449 | cpmac_write(priv->regs, CPMAC_RX_PTR(0), (u32)desc->mapping); | ||
450 | cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1); | ||
451 | return 0; | ||
452 | } | ||
453 | |||
454 | return 1; | ||
455 | } | ||
456 | |||
457 | static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev) | ||
458 | { | ||
459 | int queue, len; | ||
460 | struct cpmac_desc *desc; | ||
461 | struct cpmac_priv *priv = netdev_priv(dev); | ||
462 | |||
463 | if (unlikely(skb_padto(skb, ETH_ZLEN))) { | ||
464 | if (netif_msg_tx_err(priv) && net_ratelimit()) | ||
465 | printk(KERN_WARNING | ||
466 | "%s: tx: padding failed, dropping\n", dev->name); | ||
467 | spin_lock(&priv->lock); | ||
468 | dev->stats.tx_dropped++; | ||
469 | spin_unlock(&priv->lock); | ||
470 | return -ENOMEM; | ||
471 | } | ||
472 | |||
473 | len = max(skb->len, ETH_ZLEN); | ||
474 | queue = skb->queue_mapping; | ||
475 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | ||
476 | netif_stop_subqueue(dev, queue); | ||
477 | #else | ||
478 | netif_stop_queue(dev); | ||
479 | #endif | ||
480 | |||
481 | desc = &priv->desc_ring[queue]; | ||
482 | if (unlikely(desc->dataflags & CPMAC_OWN)) { | ||
483 | if (netif_msg_tx_err(priv) && net_ratelimit()) | ||
484 | printk(KERN_WARNING "%s: tx dma ring full, dropping\n", | ||
485 | dev->name); | ||
486 | spin_lock(&priv->lock); | ||
487 | dev->stats.tx_dropped++; | ||
488 | spin_unlock(&priv->lock); | ||
489 | dev_kfree_skb_any(skb); | ||
490 | return -ENOMEM; | ||
491 | } | ||
492 | |||
493 | spin_lock(&priv->lock); | ||
494 | dev->trans_start = jiffies; | ||
495 | spin_unlock(&priv->lock); | ||
496 | desc->dataflags = CPMAC_SOP | CPMAC_EOP | CPMAC_OWN; | ||
497 | desc->skb = skb; | ||
498 | desc->data_mapping = dma_map_single(&dev->dev, skb->data, len, | ||
499 | DMA_TO_DEVICE); | ||
500 | desc->hw_data = (u32)desc->data_mapping; | ||
501 | desc->datalen = len; | ||
502 | desc->buflen = len; | ||
503 | if (unlikely(netif_msg_tx_queued(priv))) | ||
504 | printk(KERN_DEBUG "%s: sending 0x%p, len=%d\n", dev->name, skb, | ||
505 | skb->len); | ||
506 | if (unlikely(netif_msg_hw(priv))) | ||
507 | cpmac_dump_desc(dev, desc); | ||
508 | if (unlikely(netif_msg_pktdata(priv))) | ||
509 | cpmac_dump_skb(dev, skb); | ||
510 | cpmac_write(priv->regs, CPMAC_TX_PTR(queue), (u32)desc->mapping); | ||
511 | |||
512 | return 0; | ||
513 | } | ||
514 | |||
515 | static void cpmac_end_xmit(struct net_device *dev, int queue) | ||
516 | { | ||
517 | struct cpmac_desc *desc; | ||
518 | struct cpmac_priv *priv = netdev_priv(dev); | ||
519 | |||
520 | desc = &priv->desc_ring[queue]; | ||
521 | cpmac_write(priv->regs, CPMAC_TX_ACK(queue), (u32)desc->mapping); | ||
522 | if (likely(desc->skb)) { | ||
523 | spin_lock(&priv->lock); | ||
524 | dev->stats.tx_packets++; | ||
525 | dev->stats.tx_bytes += desc->skb->len; | ||
526 | spin_unlock(&priv->lock); | ||
527 | dma_unmap_single(&dev->dev, desc->data_mapping, desc->skb->len, | ||
528 | DMA_TO_DEVICE); | ||
529 | |||
530 | if (unlikely(netif_msg_tx_done(priv))) | ||
531 | printk(KERN_DEBUG "%s: sent 0x%p, len=%d\n", dev->name, | ||
532 | desc->skb, desc->skb->len); | ||
533 | |||
534 | dev_kfree_skb_irq(desc->skb); | ||
535 | desc->skb = NULL; | ||
536 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | ||
537 | if (netif_subqueue_stopped(dev, queue)) | ||
538 | netif_wake_subqueue(dev, queue); | ||
539 | #else | ||
540 | if (netif_queue_stopped(dev)) | ||
541 | netif_wake_queue(dev); | ||
542 | #endif | ||
543 | } else { | ||
544 | if (netif_msg_tx_err(priv) && net_ratelimit()) | ||
545 | printk(KERN_WARNING | ||
546 | "%s: end_xmit: spurious interrupt\n", dev->name); | ||
547 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | ||
548 | if (netif_subqueue_stopped(dev, queue)) | ||
549 | netif_wake_subqueue(dev, queue); | ||
550 | #else | ||
551 | if (netif_queue_stopped(dev)) | ||
552 | netif_wake_queue(dev); | ||
553 | #endif | ||
554 | } | ||
555 | } | ||
556 | |||
557 | static void cpmac_hw_stop(struct net_device *dev) | ||
558 | { | ||
559 | int i; | ||
560 | struct cpmac_priv *priv = netdev_priv(dev); | ||
561 | struct plat_cpmac_data *pdata = priv->pdev->dev.platform_data; | ||
562 | |||
563 | ar7_device_reset(pdata->reset_bit); | ||
564 | cpmac_write(priv->regs, CPMAC_RX_CONTROL, | ||
565 | cpmac_read(priv->regs, CPMAC_RX_CONTROL) & ~1); | ||
566 | cpmac_write(priv->regs, CPMAC_TX_CONTROL, | ||
567 | cpmac_read(priv->regs, CPMAC_TX_CONTROL) & ~1); | ||
568 | for (i = 0; i < 8; i++) { | ||
569 | cpmac_write(priv->regs, CPMAC_TX_PTR(i), 0); | ||
570 | cpmac_write(priv->regs, CPMAC_RX_PTR(i), 0); | ||
571 | } | ||
572 | cpmac_write(priv->regs, CPMAC_UNICAST_CLEAR, 0xff); | ||
573 | cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 0xff); | ||
574 | cpmac_write(priv->regs, CPMAC_TX_INT_CLEAR, 0xff); | ||
575 | cpmac_write(priv->regs, CPMAC_MAC_INT_CLEAR, 0xff); | ||
576 | cpmac_write(priv->regs, CPMAC_MAC_CONTROL, | ||
577 | cpmac_read(priv->regs, CPMAC_MAC_CONTROL) & ~MAC_MII); | ||
578 | } | ||
579 | |||
580 | static void cpmac_hw_start(struct net_device *dev) | ||
581 | { | ||
582 | int i; | ||
583 | struct cpmac_priv *priv = netdev_priv(dev); | ||
584 | struct plat_cpmac_data *pdata = priv->pdev->dev.platform_data; | ||
585 | |||
586 | ar7_device_reset(pdata->reset_bit); | ||
587 | for (i = 0; i < 8; i++) { | ||
588 | cpmac_write(priv->regs, CPMAC_TX_PTR(i), 0); | ||
589 | cpmac_write(priv->regs, CPMAC_RX_PTR(i), 0); | ||
590 | } | ||
591 | cpmac_write(priv->regs, CPMAC_RX_PTR(0), priv->rx_head->mapping); | ||
592 | |||
593 | cpmac_write(priv->regs, CPMAC_MBP, MBP_RXSHORT | MBP_RXBCAST | | ||
594 | MBP_RXMCAST); | ||
595 | cpmac_write(priv->regs, CPMAC_BUFFER_OFFSET, 0); | ||
596 | for (i = 0; i < 8; i++) | ||
597 | cpmac_write(priv->regs, CPMAC_MAC_ADDR_LO(i), dev->dev_addr[5]); | ||
598 | cpmac_write(priv->regs, CPMAC_MAC_ADDR_MID, dev->dev_addr[4]); | ||
599 | cpmac_write(priv->regs, CPMAC_MAC_ADDR_HI, dev->dev_addr[0] | | ||
600 | (dev->dev_addr[1] << 8) | (dev->dev_addr[2] << 16) | | ||
601 | (dev->dev_addr[3] << 24)); | ||
602 | cpmac_write(priv->regs, CPMAC_MAX_LENGTH, CPMAC_SKB_SIZE); | ||
603 | cpmac_write(priv->regs, CPMAC_UNICAST_CLEAR, 0xff); | ||
604 | cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 0xff); | ||
605 | cpmac_write(priv->regs, CPMAC_TX_INT_CLEAR, 0xff); | ||
606 | cpmac_write(priv->regs, CPMAC_MAC_INT_CLEAR, 0xff); | ||
607 | cpmac_write(priv->regs, CPMAC_UNICAST_ENABLE, 1); | ||
608 | cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1); | ||
609 | cpmac_write(priv->regs, CPMAC_TX_INT_ENABLE, 0xff); | ||
610 | cpmac_write(priv->regs, CPMAC_MAC_INT_ENABLE, 3); | ||
611 | |||
612 | cpmac_write(priv->regs, CPMAC_RX_CONTROL, | ||
613 | cpmac_read(priv->regs, CPMAC_RX_CONTROL) | 1); | ||
614 | cpmac_write(priv->regs, CPMAC_TX_CONTROL, | ||
615 | cpmac_read(priv->regs, CPMAC_TX_CONTROL) | 1); | ||
616 | cpmac_write(priv->regs, CPMAC_MAC_CONTROL, | ||
617 | cpmac_read(priv->regs, CPMAC_MAC_CONTROL) | MAC_MII | | ||
618 | MAC_FDX); | ||
619 | } | ||
620 | |||
621 | static void cpmac_clear_rx(struct net_device *dev) | ||
622 | { | ||
623 | struct cpmac_priv *priv = netdev_priv(dev); | ||
624 | struct cpmac_desc *desc; | ||
625 | int i; | ||
626 | if (unlikely(!priv->rx_head)) | ||
627 | return; | ||
628 | desc = priv->rx_head; | ||
629 | for (i = 0; i < priv->ring_size; i++) { | ||
630 | if ((desc->dataflags & CPMAC_OWN) == 0) { | ||
631 | if (netif_msg_rx_err(priv) && net_ratelimit()) | ||
632 | printk(KERN_WARNING "%s: packet dropped\n", | ||
633 | dev->name); | ||
634 | if (unlikely(netif_msg_hw(priv))) | ||
635 | cpmac_dump_desc(dev, desc); | ||
636 | desc->dataflags = CPMAC_OWN; | ||
637 | dev->stats.rx_dropped++; | ||
638 | } | ||
639 | desc = desc->next; | ||
640 | } | ||
641 | } | ||
642 | |||
643 | static void cpmac_clear_tx(struct net_device *dev) | ||
644 | { | ||
645 | struct cpmac_priv *priv = netdev_priv(dev); | ||
646 | int i; | ||
647 | if (unlikely(!priv->desc_ring)) | ||
648 | return; | ||
649 | for (i = 0; i < CPMAC_QUEUES; i++) | ||
650 | if (priv->desc_ring[i].skb) { | ||
651 | dev_kfree_skb_any(priv->desc_ring[i].skb); | ||
652 | if (netif_subqueue_stopped(dev, i)) | ||
653 | netif_wake_subqueue(dev, i); | ||
654 | } | ||
655 | } | ||
656 | |||
657 | static void cpmac_hw_error(struct work_struct *work) | ||
658 | { | ||
659 | struct cpmac_priv *priv = | ||
660 | container_of(work, struct cpmac_priv, reset_work); | ||
661 | |||
662 | spin_lock(&priv->rx_lock); | ||
663 | cpmac_clear_rx(priv->dev); | ||
664 | spin_unlock(&priv->rx_lock); | ||
665 | cpmac_clear_tx(priv->dev); | ||
666 | cpmac_hw_start(priv->dev); | ||
667 | netif_start_queue(priv->dev); | ||
668 | } | ||
669 | |||
670 | static irqreturn_t cpmac_irq(int irq, void *dev_id) | ||
671 | { | ||
672 | struct net_device *dev = dev_id; | ||
673 | struct cpmac_priv *priv; | ||
674 | int queue; | ||
675 | u32 status; | ||
676 | |||
677 | if (!dev) | ||
678 | return IRQ_NONE; | ||
679 | |||
680 | priv = netdev_priv(dev); | ||
681 | |||
682 | status = cpmac_read(priv->regs, CPMAC_MAC_INT_VECTOR); | ||
683 | |||
684 | if (unlikely(netif_msg_intr(priv))) | ||
685 | printk(KERN_DEBUG "%s: interrupt status: 0x%08x\n", dev->name, | ||
686 | status); | ||
687 | |||
688 | if (status & MAC_INT_TX) | ||
689 | cpmac_end_xmit(dev, (status & 7)); | ||
690 | |||
691 | if (status & MAC_INT_RX) { | ||
692 | queue = (status >> 8) & 7; | ||
693 | netif_rx_schedule(dev); | ||
694 | cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 1 << queue); | ||
695 | } | ||
696 | |||
697 | cpmac_write(priv->regs, CPMAC_MAC_EOI_VECTOR, 0); | ||
698 | |||
699 | if (unlikely(status & (MAC_INT_HOST | MAC_INT_STATUS))) { | ||
700 | if (netif_msg_drv(priv) && net_ratelimit()) | ||
701 | printk(KERN_ERR "%s: hw error, resetting...\n", | ||
702 | dev->name); | ||
703 | netif_stop_queue(dev); | ||
704 | cpmac_hw_stop(dev); | ||
705 | schedule_work(&priv->reset_work); | ||
706 | if (unlikely(netif_msg_hw(priv))) | ||
707 | cpmac_dump_regs(dev); | ||
708 | } | ||
709 | |||
710 | return IRQ_HANDLED; | ||
711 | } | ||
712 | |||
713 | static void cpmac_tx_timeout(struct net_device *dev) | ||
714 | { | ||
715 | struct cpmac_priv *priv = netdev_priv(dev); | ||
716 | int i; | ||
717 | |||
718 | spin_lock(&priv->lock); | ||
719 | dev->stats.tx_errors++; | ||
720 | spin_unlock(&priv->lock); | ||
721 | if (netif_msg_tx_err(priv) && net_ratelimit()) | ||
722 | printk(KERN_WARNING "%s: transmit timeout\n", dev->name); | ||
723 | /* | ||
724 | * FIXME: waking up random queue is not the best thing to | ||
725 | * do... on the other hand why we got here at all? | ||
726 | */ | ||
727 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | ||
728 | for (i = 0; i < CPMAC_QUEUES; i++) | ||
729 | if (priv->desc_ring[i].skb) { | ||
730 | dev_kfree_skb_any(priv->desc_ring[i].skb); | ||
731 | netif_wake_subqueue(dev, i); | ||
732 | break; | ||
733 | } | ||
734 | #else | ||
735 | if (priv->desc_ring[0].skb) | ||
736 | dev_kfree_skb_any(priv->desc_ring[0].skb); | ||
737 | netif_wake_queue(dev); | ||
738 | #endif | ||
739 | } | ||
740 | |||
741 | static int cpmac_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | ||
742 | { | ||
743 | struct cpmac_priv *priv = netdev_priv(dev); | ||
744 | if (!(netif_running(dev))) | ||
745 | return -EINVAL; | ||
746 | if (!priv->phy) | ||
747 | return -EINVAL; | ||
748 | if ((cmd == SIOCGMIIPHY) || (cmd == SIOCGMIIREG) || | ||
749 | (cmd == SIOCSMIIREG)) | ||
750 | return phy_mii_ioctl(priv->phy, if_mii(ifr), cmd); | ||
751 | |||
752 | return -EOPNOTSUPP; | ||
753 | } | ||
754 | |||
755 | static int cpmac_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | ||
756 | { | ||
757 | struct cpmac_priv *priv = netdev_priv(dev); | ||
758 | |||
759 | if (priv->phy) | ||
760 | return phy_ethtool_gset(priv->phy, cmd); | ||
761 | |||
762 | return -EINVAL; | ||
763 | } | ||
764 | |||
765 | static int cpmac_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | ||
766 | { | ||
767 | struct cpmac_priv *priv = netdev_priv(dev); | ||
768 | |||
769 | if (!capable(CAP_NET_ADMIN)) | ||
770 | return -EPERM; | ||
771 | |||
772 | if (priv->phy) | ||
773 | return phy_ethtool_sset(priv->phy, cmd); | ||
774 | |||
775 | return -EINVAL; | ||
776 | } | ||
777 | |||
778 | static void cpmac_get_ringparam(struct net_device *dev, struct ethtool_ringparam* ring) | ||
779 | { | ||
780 | struct cpmac_priv *priv = netdev_priv(dev); | ||
781 | |||
782 | ring->rx_max_pending = 1024; | ||
783 | ring->rx_mini_max_pending = 1; | ||
784 | ring->rx_jumbo_max_pending = 1; | ||
785 | ring->tx_max_pending = 1; | ||
786 | |||
787 | ring->rx_pending = priv->ring_size; | ||
788 | ring->rx_mini_pending = 1; | ||
789 | ring->rx_jumbo_pending = 1; | ||
790 | ring->tx_pending = 1; | ||
791 | } | ||
792 | |||
793 | static int cpmac_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ring) | ||
794 | { | ||
795 | struct cpmac_priv *priv = netdev_priv(dev); | ||
796 | |||
797 | if (dev->flags && IFF_UP) | ||
798 | return -EBUSY; | ||
799 | priv->ring_size = ring->rx_pending; | ||
800 | return 0; | ||
801 | } | ||
802 | |||
803 | static void cpmac_get_drvinfo(struct net_device *dev, | ||
804 | struct ethtool_drvinfo *info) | ||
805 | { | ||
806 | strcpy(info->driver, "cpmac"); | ||
807 | strcpy(info->version, CPMAC_VERSION); | ||
808 | info->fw_version[0] = '\0'; | ||
809 | sprintf(info->bus_info, "%s", "cpmac"); | ||
810 | info->regdump_len = 0; | ||
811 | } | ||
812 | |||
813 | static const struct ethtool_ops cpmac_ethtool_ops = { | ||
814 | .get_settings = cpmac_get_settings, | ||
815 | .set_settings = cpmac_set_settings, | ||
816 | .get_drvinfo = cpmac_get_drvinfo, | ||
817 | .get_link = ethtool_op_get_link, | ||
818 | .get_ringparam = cpmac_get_ringparam, | ||
819 | .set_ringparam = cpmac_set_ringparam, | ||
820 | }; | ||
821 | |||
822 | static void cpmac_adjust_link(struct net_device *dev) | ||
823 | { | ||
824 | struct cpmac_priv *priv = netdev_priv(dev); | ||
825 | int new_state = 0; | ||
826 | |||
827 | spin_lock(&priv->lock); | ||
828 | if (priv->phy->link) { | ||
829 | netif_start_queue(dev); | ||
830 | if (priv->phy->duplex != priv->oldduplex) { | ||
831 | new_state = 1; | ||
832 | priv->oldduplex = priv->phy->duplex; | ||
833 | } | ||
834 | |||
835 | if (priv->phy->speed != priv->oldspeed) { | ||
836 | new_state = 1; | ||
837 | priv->oldspeed = priv->phy->speed; | ||
838 | } | ||
839 | |||
840 | if (!priv->oldlink) { | ||
841 | new_state = 1; | ||
842 | priv->oldlink = 1; | ||
843 | netif_schedule(dev); | ||
844 | } | ||
845 | } else if (priv->oldlink) { | ||
846 | netif_stop_queue(dev); | ||
847 | new_state = 1; | ||
848 | priv->oldlink = 0; | ||
849 | priv->oldspeed = 0; | ||
850 | priv->oldduplex = -1; | ||
851 | } | ||
852 | |||
853 | if (new_state && netif_msg_link(priv) && net_ratelimit()) | ||
854 | phy_print_status(priv->phy); | ||
855 | |||
856 | spin_unlock(&priv->lock); | ||
857 | } | ||
858 | |||
859 | static int cpmac_open(struct net_device *dev) | ||
860 | { | ||
861 | int i, size, res; | ||
862 | struct cpmac_priv *priv = netdev_priv(dev); | ||
863 | struct resource *mem; | ||
864 | struct cpmac_desc *desc; | ||
865 | struct sk_buff *skb; | ||
866 | |||
867 | priv->phy = phy_connect(dev, priv->phy_name, &cpmac_adjust_link, | ||
868 | 0, PHY_INTERFACE_MODE_MII); | ||
869 | if (IS_ERR(priv->phy)) { | ||
870 | if (netif_msg_drv(priv)) | ||
871 | printk(KERN_ERR "%s: Could not attach to PHY\n", | ||
872 | dev->name); | ||
873 | return PTR_ERR(priv->phy); | ||
874 | } | ||
875 | |||
876 | mem = platform_get_resource_byname(priv->pdev, IORESOURCE_MEM, "regs"); | ||
877 | if (!request_mem_region(mem->start, mem->end - mem->start, dev->name)) { | ||
878 | if (netif_msg_drv(priv)) | ||
879 | printk(KERN_ERR "%s: failed to request registers\n", | ||
880 | dev->name); | ||
881 | res = -ENXIO; | ||
882 | goto fail_reserve; | ||
883 | } | ||
884 | |||
885 | priv->regs = ioremap(mem->start, mem->end - mem->start); | ||
886 | if (!priv->regs) { | ||
887 | if (netif_msg_drv(priv)) | ||
888 | printk(KERN_ERR "%s: failed to remap registers\n", | ||
889 | dev->name); | ||
890 | res = -ENXIO; | ||
891 | goto fail_remap; | ||
892 | } | ||
893 | |||
894 | size = priv->ring_size + CPMAC_QUEUES; | ||
895 | priv->desc_ring = dma_alloc_coherent(&dev->dev, | ||
896 | sizeof(struct cpmac_desc) * size, | ||
897 | &priv->dma_ring, | ||
898 | GFP_KERNEL); | ||
899 | if (!priv->desc_ring) { | ||
900 | res = -ENOMEM; | ||
901 | goto fail_alloc; | ||
902 | } | ||
903 | |||
904 | for (i = 0; i < size; i++) | ||
905 | priv->desc_ring[i].mapping = priv->dma_ring + sizeof(*desc) * i; | ||
906 | |||
907 | priv->rx_head = &priv->desc_ring[CPMAC_QUEUES]; | ||
908 | for (i = 0, desc = priv->rx_head; i < priv->ring_size; i++, desc++) { | ||
909 | skb = netdev_alloc_skb(dev, CPMAC_SKB_SIZE); | ||
910 | if (unlikely(!skb)) { | ||
911 | res = -ENOMEM; | ||
912 | goto fail_desc; | ||
913 | } | ||
914 | skb_reserve(skb, 2); | ||
915 | desc->skb = skb; | ||
916 | desc->data_mapping = dma_map_single(&dev->dev, skb->data, | ||
917 | CPMAC_SKB_SIZE, | ||
918 | DMA_FROM_DEVICE); | ||
919 | desc->hw_data = (u32)desc->data_mapping; | ||
920 | desc->buflen = CPMAC_SKB_SIZE; | ||
921 | desc->dataflags = CPMAC_OWN; | ||
922 | desc->next = &priv->rx_head[(i + 1) % priv->ring_size]; | ||
923 | desc->hw_next = (u32)desc->next->mapping; | ||
924 | } | ||
925 | |||
926 | if ((res = request_irq(dev->irq, cpmac_irq, IRQF_SHARED, | ||
927 | dev->name, dev))) { | ||
928 | if (netif_msg_drv(priv)) | ||
929 | printk(KERN_ERR "%s: failed to obtain irq\n", | ||
930 | dev->name); | ||
931 | goto fail_irq; | ||
932 | } | ||
933 | |||
934 | INIT_WORK(&priv->reset_work, cpmac_hw_error); | ||
935 | cpmac_hw_start(dev); | ||
936 | |||
937 | priv->phy->state = PHY_CHANGELINK; | ||
938 | phy_start(priv->phy); | ||
939 | |||
940 | return 0; | ||
941 | |||
942 | fail_irq: | ||
943 | fail_desc: | ||
944 | for (i = 0; i < priv->ring_size; i++) { | ||
945 | if (priv->rx_head[i].skb) { | ||
946 | dma_unmap_single(&dev->dev, | ||
947 | priv->rx_head[i].data_mapping, | ||
948 | CPMAC_SKB_SIZE, | ||
949 | DMA_FROM_DEVICE); | ||
950 | kfree_skb(priv->rx_head[i].skb); | ||
951 | } | ||
952 | } | ||
953 | fail_alloc: | ||
954 | kfree(priv->desc_ring); | ||
955 | iounmap(priv->regs); | ||
956 | |||
957 | fail_remap: | ||
958 | release_mem_region(mem->start, mem->end - mem->start); | ||
959 | |||
960 | fail_reserve: | ||
961 | phy_disconnect(priv->phy); | ||
962 | |||
963 | return res; | ||
964 | } | ||
965 | |||
966 | static int cpmac_stop(struct net_device *dev) | ||
967 | { | ||
968 | int i; | ||
969 | struct cpmac_priv *priv = netdev_priv(dev); | ||
970 | struct resource *mem; | ||
971 | |||
972 | netif_stop_queue(dev); | ||
973 | |||
974 | cancel_work_sync(&priv->reset_work); | ||
975 | phy_stop(priv->phy); | ||
976 | phy_disconnect(priv->phy); | ||
977 | priv->phy = NULL; | ||
978 | |||
979 | cpmac_hw_stop(dev); | ||
980 | |||
981 | for (i = 0; i < 8; i++) | ||
982 | cpmac_write(priv->regs, CPMAC_TX_PTR(i), 0); | ||
983 | cpmac_write(priv->regs, CPMAC_RX_PTR(0), 0); | ||
984 | cpmac_write(priv->regs, CPMAC_MBP, 0); | ||
985 | |||
986 | free_irq(dev->irq, dev); | ||
987 | iounmap(priv->regs); | ||
988 | mem = platform_get_resource_byname(priv->pdev, IORESOURCE_MEM, "regs"); | ||
989 | release_mem_region(mem->start, mem->end - mem->start); | ||
990 | priv->rx_head = &priv->desc_ring[CPMAC_QUEUES]; | ||
991 | for (i = 0; i < priv->ring_size; i++) { | ||
992 | if (priv->rx_head[i].skb) { | ||
993 | dma_unmap_single(&dev->dev, | ||
994 | priv->rx_head[i].data_mapping, | ||
995 | CPMAC_SKB_SIZE, | ||
996 | DMA_FROM_DEVICE); | ||
997 | kfree_skb(priv->rx_head[i].skb); | ||
998 | } | ||
999 | } | ||
1000 | |||
1001 | dma_free_coherent(&dev->dev, sizeof(struct cpmac_desc) * | ||
1002 | (CPMAC_QUEUES + priv->ring_size), | ||
1003 | priv->desc_ring, priv->dma_ring); | ||
1004 | return 0; | ||
1005 | } | ||
1006 | |||
1007 | static int external_switch; | ||
1008 | |||
1009 | static int __devinit cpmac_probe(struct platform_device *pdev) | ||
1010 | { | ||
1011 | int rc, phy_id; | ||
1012 | struct resource *mem; | ||
1013 | struct cpmac_priv *priv; | ||
1014 | struct net_device *dev; | ||
1015 | struct plat_cpmac_data *pdata; | ||
1016 | |||
1017 | pdata = pdev->dev.platform_data; | ||
1018 | |||
1019 | for (phy_id = 0; phy_id < PHY_MAX_ADDR; phy_id++) { | ||
1020 | if (!(pdata->phy_mask & (1 << phy_id))) | ||
1021 | continue; | ||
1022 | if (!cpmac_mii.phy_map[phy_id]) | ||
1023 | continue; | ||
1024 | break; | ||
1025 | } | ||
1026 | |||
1027 | if (phy_id == PHY_MAX_ADDR) { | ||
1028 | if (external_switch || dumb_switch) | ||
1029 | phy_id = 0; | ||
1030 | else { | ||
1031 | printk(KERN_ERR "cpmac: no PHY present\n"); | ||
1032 | return -ENODEV; | ||
1033 | } | ||
1034 | } | ||
1035 | |||
1036 | dev = alloc_etherdev_mq(sizeof(*priv), CPMAC_QUEUES); | ||
1037 | |||
1038 | if (!dev) { | ||
1039 | printk(KERN_ERR "cpmac: Unable to allocate net_device\n"); | ||
1040 | return -ENOMEM; | ||
1041 | } | ||
1042 | |||
1043 | platform_set_drvdata(pdev, dev); | ||
1044 | priv = netdev_priv(dev); | ||
1045 | |||
1046 | priv->pdev = pdev; | ||
1047 | mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs"); | ||
1048 | if (!mem) { | ||
1049 | rc = -ENODEV; | ||
1050 | goto fail; | ||
1051 | } | ||
1052 | |||
1053 | dev->irq = platform_get_irq_byname(pdev, "irq"); | ||
1054 | |||
1055 | dev->open = cpmac_open; | ||
1056 | dev->stop = cpmac_stop; | ||
1057 | dev->set_config = cpmac_config; | ||
1058 | dev->hard_start_xmit = cpmac_start_xmit; | ||
1059 | dev->do_ioctl = cpmac_ioctl; | ||
1060 | dev->set_multicast_list = cpmac_set_multicast_list; | ||
1061 | dev->tx_timeout = cpmac_tx_timeout; | ||
1062 | dev->ethtool_ops = &cpmac_ethtool_ops; | ||
1063 | dev->poll = cpmac_poll; | ||
1064 | dev->weight = 64; | ||
1065 | dev->features |= NETIF_F_MULTI_QUEUE; | ||
1066 | |||
1067 | spin_lock_init(&priv->lock); | ||
1068 | spin_lock_init(&priv->rx_lock); | ||
1069 | priv->dev = dev; | ||
1070 | priv->ring_size = 64; | ||
1071 | priv->msg_enable = netif_msg_init(debug_level, 0xff); | ||
1072 | memcpy(dev->dev_addr, pdata->dev_addr, sizeof(dev->dev_addr)); | ||
1073 | if (phy_id == 31) { | ||
1074 | snprintf(priv->phy_name, BUS_ID_SIZE, PHY_ID_FMT, | ||
1075 | cpmac_mii.id, phy_id); | ||
1076 | } else | ||
1077 | snprintf(priv->phy_name, BUS_ID_SIZE, "fixed@%d:%d", 100, 1); | ||
1078 | |||
1079 | if ((rc = register_netdev(dev))) { | ||
1080 | printk(KERN_ERR "cpmac: error %i registering device %s\n", rc, | ||
1081 | dev->name); | ||
1082 | goto fail; | ||
1083 | } | ||
1084 | |||
1085 | if (netif_msg_probe(priv)) { | ||
1086 | printk(KERN_INFO | ||
1087 | "cpmac: device %s (regs: %p, irq: %d, phy: %s, mac: " | ||
1088 | MAC_FMT ")\n", dev->name, (void *)mem->start, dev->irq, | ||
1089 | priv->phy_name, MAC_ARG(dev->dev_addr)); | ||
1090 | } | ||
1091 | return 0; | ||
1092 | |||
1093 | fail: | ||
1094 | free_netdev(dev); | ||
1095 | return rc; | ||
1096 | } | ||
1097 | |||
1098 | static int __devexit cpmac_remove(struct platform_device *pdev) | ||
1099 | { | ||
1100 | struct net_device *dev = platform_get_drvdata(pdev); | ||
1101 | unregister_netdev(dev); | ||
1102 | free_netdev(dev); | ||
1103 | return 0; | ||
1104 | } | ||
1105 | |||
1106 | static struct platform_driver cpmac_driver = { | ||
1107 | .driver.name = "cpmac", | ||
1108 | .probe = cpmac_probe, | ||
1109 | .remove = __devexit_p(cpmac_remove), | ||
1110 | }; | ||
1111 | |||
1112 | int __devinit cpmac_init(void) | ||
1113 | { | ||
1114 | u32 mask; | ||
1115 | int i, res; | ||
1116 | |||
1117 | cpmac_mii.priv = ioremap(AR7_REGS_MDIO, 256); | ||
1118 | |||
1119 | if (!cpmac_mii.priv) { | ||
1120 | printk(KERN_ERR "Can't ioremap mdio registers\n"); | ||
1121 | return -ENXIO; | ||
1122 | } | ||
1123 | |||
1124 | #warning FIXME: unhardcode gpio&reset bits | ||
1125 | ar7_gpio_disable(26); | ||
1126 | ar7_gpio_disable(27); | ||
1127 | ar7_device_reset(AR7_RESET_BIT_CPMAC_LO); | ||
1128 | ar7_device_reset(AR7_RESET_BIT_CPMAC_HI); | ||
1129 | ar7_device_reset(AR7_RESET_BIT_EPHY); | ||
1130 | |||
1131 | cpmac_mii.reset(&cpmac_mii); | ||
1132 | |||
1133 | for (i = 0; i < 300000; i++) | ||
1134 | if ((mask = cpmac_read(cpmac_mii.priv, CPMAC_MDIO_ALIVE))) | ||
1135 | break; | ||
1136 | else | ||
1137 | cpu_relax(); | ||
1138 | |||
1139 | mask &= 0x7fffffff; | ||
1140 | if (mask & (mask - 1)) { | ||
1141 | external_switch = 1; | ||
1142 | mask = 0; | ||
1143 | } | ||
1144 | |||
1145 | cpmac_mii.phy_mask = ~(mask | 0x80000000); | ||
1146 | |||
1147 | res = mdiobus_register(&cpmac_mii); | ||
1148 | if (res) | ||
1149 | goto fail_mii; | ||
1150 | |||
1151 | res = platform_driver_register(&cpmac_driver); | ||
1152 | if (res) | ||
1153 | goto fail_cpmac; | ||
1154 | |||
1155 | return 0; | ||
1156 | |||
1157 | fail_cpmac: | ||
1158 | mdiobus_unregister(&cpmac_mii); | ||
1159 | |||
1160 | fail_mii: | ||
1161 | iounmap(cpmac_mii.priv); | ||
1162 | |||
1163 | return res; | ||
1164 | } | ||
1165 | |||
1166 | void __devexit cpmac_exit(void) | ||
1167 | { | ||
1168 | platform_driver_unregister(&cpmac_driver); | ||
1169 | mdiobus_unregister(&cpmac_mii); | ||
1170 | iounmap(cpmac_mii.priv); | ||
1171 | } | ||
1172 | |||
1173 | module_init(cpmac_init); | ||
1174 | module_exit(cpmac_exit); | ||