diff options
author | Arnd Bergmann <arnd@arndb.de> | 2018-03-09 11:43:16 -0500 |
---|---|---|
committer | Arnd Bergmann <arnd@arndb.de> | 2018-03-26 09:56:24 -0400 |
commit | 3f2df32c9cb60f411a10725c12aa6e4555143d5f (patch) | |
tree | dfe9b5b6bbfde5854881dc7c92d9c300973ba80b | |
parent | 8eb97ff5a4ec941da1976a99a74760dd9aea41e2 (diff) |
net: remove cris etrax ethernet driver
The cris architecture is getting removed, so we don't need the
ethernet driver any more either.
Acked-by: Jesper Nilsson <jesper.nilsson@axis.com>
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
-rw-r--r-- | drivers/net/Makefile | 1 | ||||
-rw-r--r-- | drivers/net/cris/Makefile | 1 | ||||
-rw-r--r-- | drivers/net/cris/eth_v10.c | 1742 |
3 files changed, 0 insertions, 1744 deletions
diff --git a/drivers/net/Makefile b/drivers/net/Makefile index 04c3b747812c..91e67e375dd4 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile | |||
@@ -40,7 +40,6 @@ obj-$(CONFIG_ARCNET) += arcnet/ | |||
40 | obj-$(CONFIG_DEV_APPLETALK) += appletalk/ | 40 | obj-$(CONFIG_DEV_APPLETALK) += appletalk/ |
41 | obj-$(CONFIG_CAIF) += caif/ | 41 | obj-$(CONFIG_CAIF) += caif/ |
42 | obj-$(CONFIG_CAN) += can/ | 42 | obj-$(CONFIG_CAN) += can/ |
43 | obj-$(CONFIG_ETRAX_ETHERNET) += cris/ | ||
44 | obj-$(CONFIG_NET_DSA) += dsa/ | 43 | obj-$(CONFIG_NET_DSA) += dsa/ |
45 | obj-$(CONFIG_ETHERNET) += ethernet/ | 44 | obj-$(CONFIG_ETHERNET) += ethernet/ |
46 | obj-$(CONFIG_FDDI) += fddi/ | 45 | obj-$(CONFIG_FDDI) += fddi/ |
diff --git a/drivers/net/cris/Makefile b/drivers/net/cris/Makefile deleted file mode 100644 index b4e8932227b6..000000000000 --- a/drivers/net/cris/Makefile +++ /dev/null | |||
@@ -1 +0,0 @@ | |||
1 | obj-$(CONFIG_ETRAX_ARCH_V10) += eth_v10.o | ||
diff --git a/drivers/net/cris/eth_v10.c b/drivers/net/cris/eth_v10.c deleted file mode 100644 index 8b1a859f5140..000000000000 --- a/drivers/net/cris/eth_v10.c +++ /dev/null | |||
@@ -1,1742 +0,0 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | /* | ||
3 | * e100net.c: A network driver for the ETRAX 100LX network controller. | ||
4 | * | ||
5 | * Copyright (c) 1998-2002 Axis Communications AB. | ||
6 | * | ||
7 | * The outline of this driver comes from skeleton.c. | ||
8 | * | ||
9 | */ | ||
10 | |||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/delay.h> | ||
13 | #include <linux/types.h> | ||
14 | #include <linux/fcntl.h> | ||
15 | #include <linux/interrupt.h> | ||
16 | #include <linux/ptrace.h> | ||
17 | #include <linux/ioport.h> | ||
18 | #include <linux/in.h> | ||
19 | #include <linux/string.h> | ||
20 | #include <linux/spinlock.h> | ||
21 | #include <linux/errno.h> | ||
22 | #include <linux/init.h> | ||
23 | #include <linux/bitops.h> | ||
24 | |||
25 | #include <linux/if.h> | ||
26 | #include <linux/mii.h> | ||
27 | #include <linux/netdevice.h> | ||
28 | #include <linux/etherdevice.h> | ||
29 | #include <linux/skbuff.h> | ||
30 | #include <linux/ethtool.h> | ||
31 | |||
32 | #include <arch/svinto.h>/* DMA and register descriptions */ | ||
33 | #include <asm/io.h> /* CRIS_LED_* I/O functions */ | ||
34 | #include <asm/irq.h> | ||
35 | #include <asm/dma.h> | ||
36 | #include <asm/ethernet.h> | ||
37 | #include <asm/cache.h> | ||
38 | #include <arch/io_interface_mux.h> | ||
39 | |||
40 | //#define ETHDEBUG | ||
41 | #define D(x) | ||
42 | |||
43 | /* | ||
44 | * The name of the card. Is used for messages and in the requests for | ||
45 | * io regions, irqs and dma channels | ||
46 | */ | ||
47 | |||
48 | static const char* cardname = "ETRAX 100LX built-in ethernet controller"; | ||
49 | |||
50 | /* A default ethernet address. Highlevel SW will set the real one later */ | ||
51 | |||
52 | static struct sockaddr default_mac = { | ||
53 | 0, | ||
54 | { 0x00, 0x40, 0x8C, 0xCD, 0x00, 0x00 } | ||
55 | }; | ||
56 | |||
57 | /* Information that need to be kept for each board. */ | ||
58 | struct net_local { | ||
59 | struct mii_if_info mii_if; | ||
60 | |||
61 | /* Tx control lock. This protects the transmit buffer ring | ||
62 | * state along with the "tx full" state of the driver. This | ||
63 | * means all netif_queue flow control actions are protected | ||
64 | * by this lock as well. | ||
65 | */ | ||
66 | spinlock_t lock; | ||
67 | |||
68 | spinlock_t led_lock; /* Protect LED state */ | ||
69 | spinlock_t transceiver_lock; /* Protect transceiver state. */ | ||
70 | }; | ||
71 | |||
72 | typedef struct etrax_eth_descr | ||
73 | { | ||
74 | etrax_dma_descr descr; | ||
75 | struct sk_buff* skb; | ||
76 | } etrax_eth_descr; | ||
77 | |||
78 | /* Some transceivers requires special handling */ | ||
79 | struct transceiver_ops | ||
80 | { | ||
81 | unsigned int oui; | ||
82 | void (*check_speed)(struct net_device* dev); | ||
83 | void (*check_duplex)(struct net_device* dev); | ||
84 | }; | ||
85 | |||
86 | /* Duplex settings */ | ||
87 | enum duplex | ||
88 | { | ||
89 | half, | ||
90 | full, | ||
91 | autoneg | ||
92 | }; | ||
93 | |||
94 | /* Dma descriptors etc. */ | ||
95 | |||
96 | #define MAX_MEDIA_DATA_SIZE 1522 | ||
97 | |||
98 | #define MIN_PACKET_LEN 46 | ||
99 | #define ETHER_HEAD_LEN 14 | ||
100 | |||
101 | /* | ||
102 | ** MDIO constants. | ||
103 | */ | ||
104 | #define MDIO_START 0x1 | ||
105 | #define MDIO_READ 0x2 | ||
106 | #define MDIO_WRITE 0x1 | ||
107 | #define MDIO_PREAMBLE 0xfffffffful | ||
108 | |||
109 | /* Broadcom specific */ | ||
110 | #define MDIO_AUX_CTRL_STATUS_REG 0x18 | ||
111 | #define MDIO_BC_FULL_DUPLEX_IND 0x1 | ||
112 | #define MDIO_BC_SPEED 0x2 | ||
113 | |||
114 | /* TDK specific */ | ||
115 | #define MDIO_TDK_DIAGNOSTIC_REG 18 | ||
116 | #define MDIO_TDK_DIAGNOSTIC_RATE 0x400 | ||
117 | #define MDIO_TDK_DIAGNOSTIC_DPLX 0x800 | ||
118 | |||
119 | /*Intel LXT972A specific*/ | ||
120 | #define MDIO_INT_STATUS_REG_2 0x0011 | ||
121 | #define MDIO_INT_FULL_DUPLEX_IND (1 << 9) | ||
122 | #define MDIO_INT_SPEED (1 << 14) | ||
123 | |||
124 | /* Network flash constants */ | ||
125 | #define NET_FLASH_TIME (HZ/50) /* 20 ms */ | ||
126 | #define NET_FLASH_PAUSE (HZ/100) /* 10 ms */ | ||
127 | #define NET_LINK_UP_CHECK_INTERVAL (2*HZ) /* 2 s */ | ||
128 | #define NET_DUPLEX_CHECK_INTERVAL (2*HZ) /* 2 s */ | ||
129 | |||
130 | #define NO_NETWORK_ACTIVITY 0 | ||
131 | #define NETWORK_ACTIVITY 1 | ||
132 | |||
133 | #define NBR_OF_RX_DESC 32 | ||
134 | #define NBR_OF_TX_DESC 16 | ||
135 | |||
136 | /* Large packets are sent directly to upper layers while small packets are */ | ||
137 | /* copied (to reduce memory waste). The following constant decides the breakpoint */ | ||
138 | #define RX_COPYBREAK 256 | ||
139 | |||
140 | /* Due to a chip bug we need to flush the cache when descriptors are returned */ | ||
141 | /* to the DMA. To decrease performance impact we return descriptors in chunks. */ | ||
142 | /* The following constant determines the number of descriptors to return. */ | ||
143 | #define RX_QUEUE_THRESHOLD NBR_OF_RX_DESC/2 | ||
144 | |||
145 | #define GET_BIT(bit,val) (((val) >> (bit)) & 0x01) | ||
146 | |||
147 | /* Define some macros to access ETRAX 100 registers */ | ||
148 | #define SETF(var, reg, field, val) var = (var & ~IO_MASK_(reg##_, field##_)) | \ | ||
149 | IO_FIELD_(reg##_, field##_, val) | ||
150 | #define SETS(var, reg, field, val) var = (var & ~IO_MASK_(reg##_, field##_)) | \ | ||
151 | IO_STATE_(reg##_, field##_, _##val) | ||
152 | |||
153 | static etrax_eth_descr *myNextRxDesc; /* Points to the next descriptor to | ||
154 | to be processed */ | ||
155 | static etrax_eth_descr *myLastRxDesc; /* The last processed descriptor */ | ||
156 | |||
157 | static etrax_eth_descr RxDescList[NBR_OF_RX_DESC] __attribute__ ((aligned(32))); | ||
158 | |||
159 | static etrax_eth_descr* myFirstTxDesc; /* First packet not yet sent */ | ||
160 | static etrax_eth_descr* myLastTxDesc; /* End of send queue */ | ||
161 | static etrax_eth_descr* myNextTxDesc; /* Next descriptor to use */ | ||
162 | static etrax_eth_descr TxDescList[NBR_OF_TX_DESC] __attribute__ ((aligned(32))); | ||
163 | |||
164 | static unsigned int network_rec_config_shadow = 0; | ||
165 | |||
166 | static unsigned int network_tr_ctrl_shadow = 0; | ||
167 | |||
168 | /* Timers */ | ||
169 | static void e100_check_speed(struct timer_list *unused); | ||
170 | static void e100_clear_network_leds(struct timer_list *unused); | ||
171 | static void e100_check_duplex(struct timer_list *unused); | ||
172 | static DEFINE_TIMER(speed_timer, e100_check_speed); | ||
173 | static DEFINE_TIMER(clear_led_timer, e100_clear_network_leds); | ||
174 | static DEFINE_TIMER(duplex_timer, e100_check_duplex); | ||
175 | static struct net_device *timer_dev; | ||
176 | |||
177 | /* Network speed indication. */ | ||
178 | static int current_speed; /* Speed read from transceiver */ | ||
179 | static int current_speed_selection; /* Speed selected by user */ | ||
180 | static unsigned long led_next_time; | ||
181 | static int led_active; | ||
182 | static int rx_queue_len; | ||
183 | |||
184 | /* Duplex */ | ||
185 | static int full_duplex; | ||
186 | static enum duplex current_duplex; | ||
187 | |||
188 | /* Index to functions, as function prototypes. */ | ||
189 | |||
190 | static int etrax_ethernet_init(void); | ||
191 | |||
192 | static int e100_open(struct net_device *dev); | ||
193 | static int e100_set_mac_address(struct net_device *dev, void *addr); | ||
194 | static int e100_send_packet(struct sk_buff *skb, struct net_device *dev); | ||
195 | static irqreturn_t e100rxtx_interrupt(int irq, void *dev_id); | ||
196 | static irqreturn_t e100nw_interrupt(int irq, void *dev_id); | ||
197 | static void e100_rx(struct net_device *dev); | ||
198 | static int e100_close(struct net_device *dev); | ||
199 | static int e100_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); | ||
200 | static int e100_set_config(struct net_device* dev, struct ifmap* map); | ||
201 | static void e100_tx_timeout(struct net_device *dev); | ||
202 | static struct net_device_stats *e100_get_stats(struct net_device *dev); | ||
203 | static void set_multicast_list(struct net_device *dev); | ||
204 | static void e100_hardware_send_packet(struct net_local* np, char *buf, int length); | ||
205 | static void update_rx_stats(struct net_device_stats *); | ||
206 | static void update_tx_stats(struct net_device_stats *); | ||
207 | static int e100_probe_transceiver(struct net_device* dev); | ||
208 | |||
209 | static void e100_set_speed(struct net_device* dev, unsigned long speed); | ||
210 | static void e100_set_duplex(struct net_device* dev, enum duplex); | ||
211 | static void e100_negotiate(struct net_device* dev); | ||
212 | |||
213 | static int e100_get_mdio_reg(struct net_device *dev, int phy_id, int location); | ||
214 | static void e100_set_mdio_reg(struct net_device *dev, int phy_id, int location, int value); | ||
215 | |||
216 | static void e100_send_mdio_cmd(unsigned short cmd, int write_cmd); | ||
217 | static void e100_send_mdio_bit(unsigned char bit); | ||
218 | static unsigned char e100_receive_mdio_bit(void); | ||
219 | static void e100_reset_transceiver(struct net_device* net); | ||
220 | |||
221 | static void e100_set_network_leds(int active); | ||
222 | |||
223 | static const struct ethtool_ops e100_ethtool_ops; | ||
224 | #if defined(CONFIG_ETRAX_NO_PHY) | ||
225 | static void dummy_check_speed(struct net_device* dev); | ||
226 | static void dummy_check_duplex(struct net_device* dev); | ||
227 | #else | ||
228 | static void broadcom_check_speed(struct net_device* dev); | ||
229 | static void broadcom_check_duplex(struct net_device* dev); | ||
230 | static void tdk_check_speed(struct net_device* dev); | ||
231 | static void tdk_check_duplex(struct net_device* dev); | ||
232 | static void intel_check_speed(struct net_device* dev); | ||
233 | static void intel_check_duplex(struct net_device* dev); | ||
234 | static void generic_check_speed(struct net_device* dev); | ||
235 | static void generic_check_duplex(struct net_device* dev); | ||
236 | #endif | ||
237 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
238 | static void e100_netpoll(struct net_device* dev); | ||
239 | #endif | ||
240 | |||
241 | static int autoneg_normal = 1; | ||
242 | |||
243 | struct transceiver_ops transceivers[] = | ||
244 | { | ||
245 | #if defined(CONFIG_ETRAX_NO_PHY) | ||
246 | {0x0000, dummy_check_speed, dummy_check_duplex} /* Dummy */ | ||
247 | #else | ||
248 | {0x1018, broadcom_check_speed, broadcom_check_duplex}, /* Broadcom */ | ||
249 | {0xC039, tdk_check_speed, tdk_check_duplex}, /* TDK 2120 */ | ||
250 | {0x039C, tdk_check_speed, tdk_check_duplex}, /* TDK 2120C */ | ||
251 | {0x04de, intel_check_speed, intel_check_duplex}, /* Intel LXT972A*/ | ||
252 | {0x0000, generic_check_speed, generic_check_duplex} /* Generic, must be last */ | ||
253 | #endif | ||
254 | }; | ||
255 | |||
256 | struct transceiver_ops* transceiver = &transceivers[0]; | ||
257 | |||
258 | static const struct net_device_ops e100_netdev_ops = { | ||
259 | .ndo_open = e100_open, | ||
260 | .ndo_stop = e100_close, | ||
261 | .ndo_start_xmit = e100_send_packet, | ||
262 | .ndo_tx_timeout = e100_tx_timeout, | ||
263 | .ndo_get_stats = e100_get_stats, | ||
264 | .ndo_set_rx_mode = set_multicast_list, | ||
265 | .ndo_do_ioctl = e100_ioctl, | ||
266 | .ndo_set_mac_address = e100_set_mac_address, | ||
267 | .ndo_validate_addr = eth_validate_addr, | ||
268 | .ndo_set_config = e100_set_config, | ||
269 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
270 | .ndo_poll_controller = e100_netpoll, | ||
271 | #endif | ||
272 | }; | ||
273 | |||
274 | #define tx_done(dev) (*R_DMA_CH0_CMD == 0) | ||
275 | |||
276 | /* | ||
277 | * Check for a network adaptor of this type, and return '0' if one exists. | ||
278 | * If dev->base_addr == 0, probe all likely locations. | ||
279 | * If dev->base_addr == 1, always return failure. | ||
280 | * If dev->base_addr == 2, allocate space for the device and return success | ||
281 | * (detachable devices only). | ||
282 | */ | ||
283 | |||
284 | static int __init | ||
285 | etrax_ethernet_init(void) | ||
286 | { | ||
287 | struct net_device *dev; | ||
288 | struct net_local* np; | ||
289 | int i, err; | ||
290 | |||
291 | printk(KERN_INFO | ||
292 | "ETRAX 100LX 10/100MBit ethernet v2.0 (c) 1998-2007 Axis Communications AB\n"); | ||
293 | |||
294 | if (cris_request_io_interface(if_eth, cardname)) { | ||
295 | printk(KERN_CRIT "etrax_ethernet_init failed to get IO interface\n"); | ||
296 | return -EBUSY; | ||
297 | } | ||
298 | |||
299 | dev = alloc_etherdev(sizeof(struct net_local)); | ||
300 | if (!dev) | ||
301 | return -ENOMEM; | ||
302 | |||
303 | np = netdev_priv(dev); | ||
304 | |||
305 | /* we do our own locking */ | ||
306 | dev->features |= NETIF_F_LLTX; | ||
307 | |||
308 | dev->base_addr = (unsigned int)R_NETWORK_SA_0; /* just to have something to show */ | ||
309 | |||
310 | /* now setup our etrax specific stuff */ | ||
311 | |||
312 | dev->irq = NETWORK_DMA_RX_IRQ_NBR; /* we really use DMATX as well... */ | ||
313 | dev->dma = NETWORK_RX_DMA_NBR; | ||
314 | |||
315 | /* fill in our handlers so the network layer can talk to us in the future */ | ||
316 | |||
317 | dev->ethtool_ops = &e100_ethtool_ops; | ||
318 | dev->netdev_ops = &e100_netdev_ops; | ||
319 | |||
320 | spin_lock_init(&np->lock); | ||
321 | spin_lock_init(&np->led_lock); | ||
322 | spin_lock_init(&np->transceiver_lock); | ||
323 | |||
324 | /* Initialise the list of Etrax DMA-descriptors */ | ||
325 | |||
326 | /* Initialise receive descriptors */ | ||
327 | |||
328 | for (i = 0; i < NBR_OF_RX_DESC; i++) { | ||
329 | /* Allocate two extra cachelines to make sure that buffer used | ||
330 | * by DMA does not share cacheline with any other data (to | ||
331 | * avoid cache bug) | ||
332 | */ | ||
333 | RxDescList[i].skb = dev_alloc_skb(MAX_MEDIA_DATA_SIZE + 2 * L1_CACHE_BYTES); | ||
334 | if (!RxDescList[i].skb) | ||
335 | return -ENOMEM; | ||
336 | RxDescList[i].descr.ctrl = 0; | ||
337 | RxDescList[i].descr.sw_len = MAX_MEDIA_DATA_SIZE; | ||
338 | RxDescList[i].descr.next = virt_to_phys(&RxDescList[i + 1]); | ||
339 | RxDescList[i].descr.buf = L1_CACHE_ALIGN(virt_to_phys(RxDescList[i].skb->data)); | ||
340 | RxDescList[i].descr.status = 0; | ||
341 | RxDescList[i].descr.hw_len = 0; | ||
342 | prepare_rx_descriptor(&RxDescList[i].descr); | ||
343 | } | ||
344 | |||
345 | RxDescList[NBR_OF_RX_DESC - 1].descr.ctrl = d_eol; | ||
346 | RxDescList[NBR_OF_RX_DESC - 1].descr.next = virt_to_phys(&RxDescList[0]); | ||
347 | rx_queue_len = 0; | ||
348 | |||
349 | /* Initialize transmit descriptors */ | ||
350 | for (i = 0; i < NBR_OF_TX_DESC; i++) { | ||
351 | TxDescList[i].descr.ctrl = 0; | ||
352 | TxDescList[i].descr.sw_len = 0; | ||
353 | TxDescList[i].descr.next = virt_to_phys(&TxDescList[i + 1].descr); | ||
354 | TxDescList[i].descr.buf = 0; | ||
355 | TxDescList[i].descr.status = 0; | ||
356 | TxDescList[i].descr.hw_len = 0; | ||
357 | TxDescList[i].skb = 0; | ||
358 | } | ||
359 | |||
360 | TxDescList[NBR_OF_TX_DESC - 1].descr.ctrl = d_eol; | ||
361 | TxDescList[NBR_OF_TX_DESC - 1].descr.next = virt_to_phys(&TxDescList[0].descr); | ||
362 | |||
363 | /* Initialise initial pointers */ | ||
364 | |||
365 | myNextRxDesc = &RxDescList[0]; | ||
366 | myLastRxDesc = &RxDescList[NBR_OF_RX_DESC - 1]; | ||
367 | myFirstTxDesc = &TxDescList[0]; | ||
368 | myNextTxDesc = &TxDescList[0]; | ||
369 | myLastTxDesc = &TxDescList[NBR_OF_TX_DESC - 1]; | ||
370 | |||
371 | /* Register device */ | ||
372 | err = register_netdev(dev); | ||
373 | if (err) { | ||
374 | free_netdev(dev); | ||
375 | return err; | ||
376 | } | ||
377 | |||
378 | /* set the default MAC address */ | ||
379 | |||
380 | e100_set_mac_address(dev, &default_mac); | ||
381 | |||
382 | /* Initialize speed indicator stuff. */ | ||
383 | |||
384 | current_speed = 10; | ||
385 | current_speed_selection = 0; /* Auto */ | ||
386 | speed_timer.expires = jiffies + NET_LINK_UP_CHECK_INTERVAL; | ||
387 | |||
388 | full_duplex = 0; | ||
389 | current_duplex = autoneg; | ||
390 | duplex_timer.expires = jiffies + NET_DUPLEX_CHECK_INTERVAL; | ||
391 | |||
392 | timer_dev = dev; | ||
393 | |||
394 | /* Initialize mii interface */ | ||
395 | np->mii_if.phy_id_mask = 0x1f; | ||
396 | np->mii_if.reg_num_mask = 0x1f; | ||
397 | np->mii_if.dev = dev; | ||
398 | np->mii_if.mdio_read = e100_get_mdio_reg; | ||
399 | np->mii_if.mdio_write = e100_set_mdio_reg; | ||
400 | |||
401 | /* Initialize group address registers to make sure that no */ | ||
402 | /* unwanted addresses are matched */ | ||
403 | *R_NETWORK_GA_0 = 0x00000000; | ||
404 | *R_NETWORK_GA_1 = 0x00000000; | ||
405 | |||
406 | /* Initialize next time the led can flash */ | ||
407 | led_next_time = jiffies; | ||
408 | return 0; | ||
409 | } | ||
410 | device_initcall(etrax_ethernet_init) | ||
411 | |||
412 | /* set MAC address of the interface. called from the core after a | ||
413 | * SIOCSIFADDR ioctl, and from the bootup above. | ||
414 | */ | ||
415 | |||
416 | static int | ||
417 | e100_set_mac_address(struct net_device *dev, void *p) | ||
418 | { | ||
419 | struct net_local *np = netdev_priv(dev); | ||
420 | struct sockaddr *addr = p; | ||
421 | |||
422 | spin_lock(&np->lock); /* preemption protection */ | ||
423 | |||
424 | /* remember it */ | ||
425 | |||
426 | memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); | ||
427 | |||
428 | /* Write it to the hardware. | ||
429 | * Note the way the address is wrapped: | ||
430 | * *R_NETWORK_SA_0 = a0_0 | (a0_1 << 8) | (a0_2 << 16) | (a0_3 << 24); | ||
431 | * *R_NETWORK_SA_1 = a0_4 | (a0_5 << 8); | ||
432 | */ | ||
433 | |||
434 | *R_NETWORK_SA_0 = dev->dev_addr[0] | (dev->dev_addr[1] << 8) | | ||
435 | (dev->dev_addr[2] << 16) | (dev->dev_addr[3] << 24); | ||
436 | *R_NETWORK_SA_1 = dev->dev_addr[4] | (dev->dev_addr[5] << 8); | ||
437 | *R_NETWORK_SA_2 = 0; | ||
438 | |||
439 | /* show it in the log as well */ | ||
440 | |||
441 | printk(KERN_INFO "%s: changed MAC to %pM\n", dev->name, dev->dev_addr); | ||
442 | |||
443 | spin_unlock(&np->lock); | ||
444 | |||
445 | return 0; | ||
446 | } | ||
447 | |||
448 | /* | ||
449 | * Open/initialize the board. This is called (in the current kernel) | ||
450 | * sometime after booting when the 'ifconfig' program is run. | ||
451 | * | ||
452 | * This routine should set everything up anew at each open, even | ||
453 | * registers that "should" only need to be set once at boot, so that | ||
454 | * there is non-reboot way to recover if something goes wrong. | ||
455 | */ | ||
456 | |||
457 | static int | ||
458 | e100_open(struct net_device *dev) | ||
459 | { | ||
460 | unsigned long flags; | ||
461 | |||
462 | /* enable the MDIO output pin */ | ||
463 | |||
464 | *R_NETWORK_MGM_CTRL = IO_STATE(R_NETWORK_MGM_CTRL, mdoe, enable); | ||
465 | |||
466 | *R_IRQ_MASK0_CLR = | ||
467 | IO_STATE(R_IRQ_MASK0_CLR, overrun, clr) | | ||
468 | IO_STATE(R_IRQ_MASK0_CLR, underrun, clr) | | ||
469 | IO_STATE(R_IRQ_MASK0_CLR, excessive_col, clr); | ||
470 | |||
471 | /* clear dma0 and 1 eop and descr irq masks */ | ||
472 | *R_IRQ_MASK2_CLR = | ||
473 | IO_STATE(R_IRQ_MASK2_CLR, dma0_descr, clr) | | ||
474 | IO_STATE(R_IRQ_MASK2_CLR, dma0_eop, clr) | | ||
475 | IO_STATE(R_IRQ_MASK2_CLR, dma1_descr, clr) | | ||
476 | IO_STATE(R_IRQ_MASK2_CLR, dma1_eop, clr); | ||
477 | |||
478 | /* Reset and wait for the DMA channels */ | ||
479 | |||
480 | RESET_DMA(NETWORK_TX_DMA_NBR); | ||
481 | RESET_DMA(NETWORK_RX_DMA_NBR); | ||
482 | WAIT_DMA(NETWORK_TX_DMA_NBR); | ||
483 | WAIT_DMA(NETWORK_RX_DMA_NBR); | ||
484 | |||
485 | /* Initialise the etrax network controller */ | ||
486 | |||
487 | /* allocate the irq corresponding to the receiving DMA */ | ||
488 | |||
489 | if (request_irq(NETWORK_DMA_RX_IRQ_NBR, e100rxtx_interrupt, 0, cardname, | ||
490 | (void *)dev)) { | ||
491 | goto grace_exit0; | ||
492 | } | ||
493 | |||
494 | /* allocate the irq corresponding to the transmitting DMA */ | ||
495 | |||
496 | if (request_irq(NETWORK_DMA_TX_IRQ_NBR, e100rxtx_interrupt, 0, | ||
497 | cardname, (void *)dev)) { | ||
498 | goto grace_exit1; | ||
499 | } | ||
500 | |||
501 | /* allocate the irq corresponding to the network errors etc */ | ||
502 | |||
503 | if (request_irq(NETWORK_STATUS_IRQ_NBR, e100nw_interrupt, 0, | ||
504 | cardname, (void *)dev)) { | ||
505 | goto grace_exit2; | ||
506 | } | ||
507 | |||
508 | /* | ||
509 | * Always allocate the DMA channels after the IRQ, | ||
510 | * and clean up on failure. | ||
511 | */ | ||
512 | |||
513 | if (cris_request_dma(NETWORK_TX_DMA_NBR, | ||
514 | cardname, | ||
515 | DMA_VERBOSE_ON_ERROR, | ||
516 | dma_eth)) { | ||
517 | goto grace_exit3; | ||
518 | } | ||
519 | |||
520 | if (cris_request_dma(NETWORK_RX_DMA_NBR, | ||
521 | cardname, | ||
522 | DMA_VERBOSE_ON_ERROR, | ||
523 | dma_eth)) { | ||
524 | goto grace_exit4; | ||
525 | } | ||
526 | |||
527 | /* give the HW an idea of what MAC address we want */ | ||
528 | |||
529 | *R_NETWORK_SA_0 = dev->dev_addr[0] | (dev->dev_addr[1] << 8) | | ||
530 | (dev->dev_addr[2] << 16) | (dev->dev_addr[3] << 24); | ||
531 | *R_NETWORK_SA_1 = dev->dev_addr[4] | (dev->dev_addr[5] << 8); | ||
532 | *R_NETWORK_SA_2 = 0; | ||
533 | |||
534 | #if 0 | ||
535 | /* use promiscuous mode for testing */ | ||
536 | *R_NETWORK_GA_0 = 0xffffffff; | ||
537 | *R_NETWORK_GA_1 = 0xffffffff; | ||
538 | |||
539 | *R_NETWORK_REC_CONFIG = 0xd; /* broadcast rec, individ. rec, ma0 enabled */ | ||
540 | #else | ||
541 | SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, max_size, size1522); | ||
542 | SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, broadcast, receive); | ||
543 | SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, ma0, enable); | ||
544 | SETF(network_rec_config_shadow, R_NETWORK_REC_CONFIG, duplex, full_duplex); | ||
545 | *R_NETWORK_REC_CONFIG = network_rec_config_shadow; | ||
546 | #endif | ||
547 | |||
548 | *R_NETWORK_GEN_CONFIG = | ||
549 | IO_STATE(R_NETWORK_GEN_CONFIG, phy, mii_clk) | | ||
550 | IO_STATE(R_NETWORK_GEN_CONFIG, enable, on); | ||
551 | |||
552 | SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, clr); | ||
553 | SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, delay, none); | ||
554 | SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, cancel, dont); | ||
555 | SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, cd, enable); | ||
556 | SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, retry, enable); | ||
557 | SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, pad, enable); | ||
558 | SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, crc, enable); | ||
559 | *R_NETWORK_TR_CTRL = network_tr_ctrl_shadow; | ||
560 | |||
561 | local_irq_save(flags); | ||
562 | |||
563 | /* enable the irq's for ethernet DMA */ | ||
564 | |||
565 | *R_IRQ_MASK2_SET = | ||
566 | IO_STATE(R_IRQ_MASK2_SET, dma0_eop, set) | | ||
567 | IO_STATE(R_IRQ_MASK2_SET, dma1_eop, set); | ||
568 | |||
569 | *R_IRQ_MASK0_SET = | ||
570 | IO_STATE(R_IRQ_MASK0_SET, overrun, set) | | ||
571 | IO_STATE(R_IRQ_MASK0_SET, underrun, set) | | ||
572 | IO_STATE(R_IRQ_MASK0_SET, excessive_col, set); | ||
573 | |||
574 | /* make sure the irqs are cleared */ | ||
575 | |||
576 | *R_DMA_CH0_CLR_INTR = IO_STATE(R_DMA_CH0_CLR_INTR, clr_eop, do); | ||
577 | *R_DMA_CH1_CLR_INTR = IO_STATE(R_DMA_CH1_CLR_INTR, clr_eop, do); | ||
578 | |||
579 | /* make sure the rec and transmit error counters are cleared */ | ||
580 | |||
581 | (void)*R_REC_COUNTERS; /* dummy read */ | ||
582 | (void)*R_TR_COUNTERS; /* dummy read */ | ||
583 | |||
584 | /* start the receiving DMA channel so we can receive packets from now on */ | ||
585 | |||
586 | *R_DMA_CH1_FIRST = virt_to_phys(myNextRxDesc); | ||
587 | *R_DMA_CH1_CMD = IO_STATE(R_DMA_CH1_CMD, cmd, start); | ||
588 | |||
589 | /* Set up transmit DMA channel so it can be restarted later */ | ||
590 | |||
591 | *R_DMA_CH0_FIRST = 0; | ||
592 | *R_DMA_CH0_DESCR = virt_to_phys(myLastTxDesc); | ||
593 | netif_start_queue(dev); | ||
594 | |||
595 | local_irq_restore(flags); | ||
596 | |||
597 | /* Probe for transceiver */ | ||
598 | if (e100_probe_transceiver(dev)) | ||
599 | goto grace_exit5; | ||
600 | |||
601 | /* Start duplex/speed timers */ | ||
602 | add_timer(&speed_timer); | ||
603 | add_timer(&duplex_timer); | ||
604 | |||
605 | /* We are now ready to accept transmit requeusts from | ||
606 | * the queueing layer of the networking. | ||
607 | */ | ||
608 | netif_carrier_on(dev); | ||
609 | |||
610 | return 0; | ||
611 | |||
612 | grace_exit5: | ||
613 | cris_free_dma(NETWORK_RX_DMA_NBR, cardname); | ||
614 | grace_exit4: | ||
615 | cris_free_dma(NETWORK_TX_DMA_NBR, cardname); | ||
616 | grace_exit3: | ||
617 | free_irq(NETWORK_STATUS_IRQ_NBR, (void *)dev); | ||
618 | grace_exit2: | ||
619 | free_irq(NETWORK_DMA_TX_IRQ_NBR, (void *)dev); | ||
620 | grace_exit1: | ||
621 | free_irq(NETWORK_DMA_RX_IRQ_NBR, (void *)dev); | ||
622 | grace_exit0: | ||
623 | return -EAGAIN; | ||
624 | } | ||
625 | |||
626 | #if defined(CONFIG_ETRAX_NO_PHY) | ||
627 | static void | ||
628 | dummy_check_speed(struct net_device* dev) | ||
629 | { | ||
630 | current_speed = 100; | ||
631 | } | ||
632 | #else | ||
633 | static void | ||
634 | generic_check_speed(struct net_device* dev) | ||
635 | { | ||
636 | unsigned long data; | ||
637 | struct net_local *np = netdev_priv(dev); | ||
638 | |||
639 | data = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_ADVERTISE); | ||
640 | if ((data & ADVERTISE_100FULL) || | ||
641 | (data & ADVERTISE_100HALF)) | ||
642 | current_speed = 100; | ||
643 | else | ||
644 | current_speed = 10; | ||
645 | } | ||
646 | |||
647 | static void | ||
648 | tdk_check_speed(struct net_device* dev) | ||
649 | { | ||
650 | unsigned long data; | ||
651 | struct net_local *np = netdev_priv(dev); | ||
652 | |||
653 | data = e100_get_mdio_reg(dev, np->mii_if.phy_id, | ||
654 | MDIO_TDK_DIAGNOSTIC_REG); | ||
655 | current_speed = (data & MDIO_TDK_DIAGNOSTIC_RATE ? 100 : 10); | ||
656 | } | ||
657 | |||
658 | static void | ||
659 | broadcom_check_speed(struct net_device* dev) | ||
660 | { | ||
661 | unsigned long data; | ||
662 | struct net_local *np = netdev_priv(dev); | ||
663 | |||
664 | data = e100_get_mdio_reg(dev, np->mii_if.phy_id, | ||
665 | MDIO_AUX_CTRL_STATUS_REG); | ||
666 | current_speed = (data & MDIO_BC_SPEED ? 100 : 10); | ||
667 | } | ||
668 | |||
669 | static void | ||
670 | intel_check_speed(struct net_device* dev) | ||
671 | { | ||
672 | unsigned long data; | ||
673 | struct net_local *np = netdev_priv(dev); | ||
674 | |||
675 | data = e100_get_mdio_reg(dev, np->mii_if.phy_id, | ||
676 | MDIO_INT_STATUS_REG_2); | ||
677 | current_speed = (data & MDIO_INT_SPEED ? 100 : 10); | ||
678 | } | ||
679 | #endif | ||
680 | static void | ||
681 | e100_check_speed(struct timer_list *unused) | ||
682 | { | ||
683 | struct net_device* dev = timer_dev; | ||
684 | struct net_local *np = netdev_priv(dev); | ||
685 | static int led_initiated = 0; | ||
686 | unsigned long data; | ||
687 | int old_speed = current_speed; | ||
688 | |||
689 | spin_lock(&np->transceiver_lock); | ||
690 | |||
691 | data = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_BMSR); | ||
692 | if (!(data & BMSR_LSTATUS)) { | ||
693 | current_speed = 0; | ||
694 | } else { | ||
695 | transceiver->check_speed(dev); | ||
696 | } | ||
697 | |||
698 | spin_lock(&np->led_lock); | ||
699 | if ((old_speed != current_speed) || !led_initiated) { | ||
700 | led_initiated = 1; | ||
701 | e100_set_network_leds(NO_NETWORK_ACTIVITY); | ||
702 | if (current_speed) | ||
703 | netif_carrier_on(dev); | ||
704 | else | ||
705 | netif_carrier_off(dev); | ||
706 | } | ||
707 | spin_unlock(&np->led_lock); | ||
708 | |||
709 | /* Reinitialize the timer. */ | ||
710 | speed_timer.expires = jiffies + NET_LINK_UP_CHECK_INTERVAL; | ||
711 | add_timer(&speed_timer); | ||
712 | |||
713 | spin_unlock(&np->transceiver_lock); | ||
714 | } | ||
715 | |||
716 | static void | ||
717 | e100_negotiate(struct net_device* dev) | ||
718 | { | ||
719 | struct net_local *np = netdev_priv(dev); | ||
720 | unsigned short data = e100_get_mdio_reg(dev, np->mii_if.phy_id, | ||
721 | MII_ADVERTISE); | ||
722 | |||
723 | /* Discard old speed and duplex settings */ | ||
724 | data &= ~(ADVERTISE_100HALF | ADVERTISE_100FULL | | ||
725 | ADVERTISE_10HALF | ADVERTISE_10FULL); | ||
726 | |||
727 | switch (current_speed_selection) { | ||
728 | case 10: | ||
729 | if (current_duplex == full) | ||
730 | data |= ADVERTISE_10FULL; | ||
731 | else if (current_duplex == half) | ||
732 | data |= ADVERTISE_10HALF; | ||
733 | else | ||
734 | data |= ADVERTISE_10HALF | ADVERTISE_10FULL; | ||
735 | break; | ||
736 | |||
737 | case 100: | ||
738 | if (current_duplex == full) | ||
739 | data |= ADVERTISE_100FULL; | ||
740 | else if (current_duplex == half) | ||
741 | data |= ADVERTISE_100HALF; | ||
742 | else | ||
743 | data |= ADVERTISE_100HALF | ADVERTISE_100FULL; | ||
744 | break; | ||
745 | |||
746 | case 0: /* Auto */ | ||
747 | if (current_duplex == full) | ||
748 | data |= ADVERTISE_100FULL | ADVERTISE_10FULL; | ||
749 | else if (current_duplex == half) | ||
750 | data |= ADVERTISE_100HALF | ADVERTISE_10HALF; | ||
751 | else | ||
752 | data |= ADVERTISE_10HALF | ADVERTISE_10FULL | | ||
753 | ADVERTISE_100HALF | ADVERTISE_100FULL; | ||
754 | break; | ||
755 | |||
756 | default: /* assume autoneg speed and duplex */ | ||
757 | data |= ADVERTISE_10HALF | ADVERTISE_10FULL | | ||
758 | ADVERTISE_100HALF | ADVERTISE_100FULL; | ||
759 | break; | ||
760 | } | ||
761 | |||
762 | e100_set_mdio_reg(dev, np->mii_if.phy_id, MII_ADVERTISE, data); | ||
763 | |||
764 | data = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_BMCR); | ||
765 | if (autoneg_normal) { | ||
766 | /* Renegotiate with link partner */ | ||
767 | data |= BMCR_ANENABLE | BMCR_ANRESTART; | ||
768 | } else { | ||
769 | /* Don't negotiate speed or duplex */ | ||
770 | data &= ~(BMCR_ANENABLE | BMCR_ANRESTART); | ||
771 | |||
772 | /* Set speed and duplex static */ | ||
773 | if (current_speed_selection == 10) | ||
774 | data &= ~BMCR_SPEED100; | ||
775 | else | ||
776 | data |= BMCR_SPEED100; | ||
777 | |||
778 | if (current_duplex != full) | ||
779 | data &= ~BMCR_FULLDPLX; | ||
780 | else | ||
781 | data |= BMCR_FULLDPLX; | ||
782 | } | ||
783 | e100_set_mdio_reg(dev, np->mii_if.phy_id, MII_BMCR, data); | ||
784 | } | ||
785 | |||
786 | static void | ||
787 | e100_set_speed(struct net_device* dev, unsigned long speed) | ||
788 | { | ||
789 | struct net_local *np = netdev_priv(dev); | ||
790 | |||
791 | spin_lock(&np->transceiver_lock); | ||
792 | if (speed != current_speed_selection) { | ||
793 | current_speed_selection = speed; | ||
794 | e100_negotiate(dev); | ||
795 | } | ||
796 | spin_unlock(&np->transceiver_lock); | ||
797 | } | ||
798 | |||
799 | static void | ||
800 | e100_check_duplex(struct timer_list *unused) | ||
801 | { | ||
802 | struct net_device *dev = timer_dev; | ||
803 | struct net_local *np = netdev_priv(dev); | ||
804 | int old_duplex; | ||
805 | |||
806 | spin_lock(&np->transceiver_lock); | ||
807 | old_duplex = full_duplex; | ||
808 | transceiver->check_duplex(dev); | ||
809 | if (old_duplex != full_duplex) { | ||
810 | /* Duplex changed */ | ||
811 | SETF(network_rec_config_shadow, R_NETWORK_REC_CONFIG, duplex, full_duplex); | ||
812 | *R_NETWORK_REC_CONFIG = network_rec_config_shadow; | ||
813 | } | ||
814 | |||
815 | /* Reinitialize the timer. */ | ||
816 | duplex_timer.expires = jiffies + NET_DUPLEX_CHECK_INTERVAL; | ||
817 | add_timer(&duplex_timer); | ||
818 | np->mii_if.full_duplex = full_duplex; | ||
819 | spin_unlock(&np->transceiver_lock); | ||
820 | } | ||
821 | #if defined(CONFIG_ETRAX_NO_PHY) | ||
822 | static void | ||
823 | dummy_check_duplex(struct net_device* dev) | ||
824 | { | ||
825 | full_duplex = 1; | ||
826 | } | ||
827 | #else | ||
828 | static void | ||
829 | generic_check_duplex(struct net_device* dev) | ||
830 | { | ||
831 | unsigned long data; | ||
832 | struct net_local *np = netdev_priv(dev); | ||
833 | |||
834 | data = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_ADVERTISE); | ||
835 | if ((data & ADVERTISE_10FULL) || | ||
836 | (data & ADVERTISE_100FULL)) | ||
837 | full_duplex = 1; | ||
838 | else | ||
839 | full_duplex = 0; | ||
840 | } | ||
841 | |||
842 | static void | ||
843 | tdk_check_duplex(struct net_device* dev) | ||
844 | { | ||
845 | unsigned long data; | ||
846 | struct net_local *np = netdev_priv(dev); | ||
847 | |||
848 | data = e100_get_mdio_reg(dev, np->mii_if.phy_id, | ||
849 | MDIO_TDK_DIAGNOSTIC_REG); | ||
850 | full_duplex = (data & MDIO_TDK_DIAGNOSTIC_DPLX) ? 1 : 0; | ||
851 | } | ||
852 | |||
853 | static void | ||
854 | broadcom_check_duplex(struct net_device* dev) | ||
855 | { | ||
856 | unsigned long data; | ||
857 | struct net_local *np = netdev_priv(dev); | ||
858 | |||
859 | data = e100_get_mdio_reg(dev, np->mii_if.phy_id, | ||
860 | MDIO_AUX_CTRL_STATUS_REG); | ||
861 | full_duplex = (data & MDIO_BC_FULL_DUPLEX_IND) ? 1 : 0; | ||
862 | } | ||
863 | |||
864 | static void | ||
865 | intel_check_duplex(struct net_device* dev) | ||
866 | { | ||
867 | unsigned long data; | ||
868 | struct net_local *np = netdev_priv(dev); | ||
869 | |||
870 | data = e100_get_mdio_reg(dev, np->mii_if.phy_id, | ||
871 | MDIO_INT_STATUS_REG_2); | ||
872 | full_duplex = (data & MDIO_INT_FULL_DUPLEX_IND) ? 1 : 0; | ||
873 | } | ||
874 | #endif | ||
875 | static void | ||
876 | e100_set_duplex(struct net_device* dev, enum duplex new_duplex) | ||
877 | { | ||
878 | struct net_local *np = netdev_priv(dev); | ||
879 | |||
880 | spin_lock(&np->transceiver_lock); | ||
881 | if (new_duplex != current_duplex) { | ||
882 | current_duplex = new_duplex; | ||
883 | e100_negotiate(dev); | ||
884 | } | ||
885 | spin_unlock(&np->transceiver_lock); | ||
886 | } | ||
887 | |||
888 | static int | ||
889 | e100_probe_transceiver(struct net_device* dev) | ||
890 | { | ||
891 | int ret = 0; | ||
892 | |||
893 | #if !defined(CONFIG_ETRAX_NO_PHY) | ||
894 | unsigned int phyid_high; | ||
895 | unsigned int phyid_low; | ||
896 | unsigned int oui; | ||
897 | struct transceiver_ops* ops = NULL; | ||
898 | struct net_local *np = netdev_priv(dev); | ||
899 | |||
900 | spin_lock(&np->transceiver_lock); | ||
901 | |||
902 | /* Probe MDIO physical address */ | ||
903 | for (np->mii_if.phy_id = 0; np->mii_if.phy_id <= 31; | ||
904 | np->mii_if.phy_id++) { | ||
905 | if (e100_get_mdio_reg(dev, | ||
906 | np->mii_if.phy_id, MII_BMSR) != 0xffff) | ||
907 | break; | ||
908 | } | ||
909 | if (np->mii_if.phy_id == 32) { | ||
910 | ret = -ENODEV; | ||
911 | goto out; | ||
912 | } | ||
913 | |||
914 | /* Get manufacturer */ | ||
915 | phyid_high = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_PHYSID1); | ||
916 | phyid_low = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_PHYSID2); | ||
917 | oui = (phyid_high << 6) | (phyid_low >> 10); | ||
918 | |||
919 | for (ops = &transceivers[0]; ops->oui; ops++) { | ||
920 | if (ops->oui == oui) | ||
921 | break; | ||
922 | } | ||
923 | transceiver = ops; | ||
924 | out: | ||
925 | spin_unlock(&np->transceiver_lock); | ||
926 | #endif | ||
927 | return ret; | ||
928 | } | ||
929 | |||
930 | static int | ||
931 | e100_get_mdio_reg(struct net_device *dev, int phy_id, int location) | ||
932 | { | ||
933 | unsigned short cmd; /* Data to be sent on MDIO port */ | ||
934 | int data; /* Data read from MDIO */ | ||
935 | int bitCounter; | ||
936 | |||
937 | /* Start of frame, OP Code, Physical Address, Register Address */ | ||
938 | cmd = (MDIO_START << 14) | (MDIO_READ << 12) | (phy_id << 7) | | ||
939 | (location << 2); | ||
940 | |||
941 | e100_send_mdio_cmd(cmd, 0); | ||
942 | |||
943 | data = 0; | ||
944 | |||
945 | /* Data... */ | ||
946 | for (bitCounter=15; bitCounter>=0 ; bitCounter--) { | ||
947 | data |= (e100_receive_mdio_bit() << bitCounter); | ||
948 | } | ||
949 | |||
950 | return data; | ||
951 | } | ||
952 | |||
953 | static void | ||
954 | e100_set_mdio_reg(struct net_device *dev, int phy_id, int location, int value) | ||
955 | { | ||
956 | int bitCounter; | ||
957 | unsigned short cmd; | ||
958 | |||
959 | cmd = (MDIO_START << 14) | (MDIO_WRITE << 12) | (phy_id << 7) | | ||
960 | (location << 2); | ||
961 | |||
962 | e100_send_mdio_cmd(cmd, 1); | ||
963 | |||
964 | /* Data... */ | ||
965 | for (bitCounter=15; bitCounter>=0 ; bitCounter--) { | ||
966 | e100_send_mdio_bit(GET_BIT(bitCounter, value)); | ||
967 | } | ||
968 | |||
969 | } | ||
970 | |||
971 | static void | ||
972 | e100_send_mdio_cmd(unsigned short cmd, int write_cmd) | ||
973 | { | ||
974 | int bitCounter; | ||
975 | unsigned char data = 0x2; | ||
976 | |||
977 | /* Preamble */ | ||
978 | for (bitCounter = 31; bitCounter>= 0; bitCounter--) | ||
979 | e100_send_mdio_bit(GET_BIT(bitCounter, MDIO_PREAMBLE)); | ||
980 | |||
981 | for (bitCounter = 15; bitCounter >= 2; bitCounter--) | ||
982 | e100_send_mdio_bit(GET_BIT(bitCounter, cmd)); | ||
983 | |||
984 | /* Turnaround */ | ||
985 | for (bitCounter = 1; bitCounter >= 0 ; bitCounter--) | ||
986 | if (write_cmd) | ||
987 | e100_send_mdio_bit(GET_BIT(bitCounter, data)); | ||
988 | else | ||
989 | e100_receive_mdio_bit(); | ||
990 | } | ||
991 | |||
992 | static void | ||
993 | e100_send_mdio_bit(unsigned char bit) | ||
994 | { | ||
995 | *R_NETWORK_MGM_CTRL = | ||
996 | IO_STATE(R_NETWORK_MGM_CTRL, mdoe, enable) | | ||
997 | IO_FIELD(R_NETWORK_MGM_CTRL, mdio, bit); | ||
998 | udelay(1); | ||
999 | *R_NETWORK_MGM_CTRL = | ||
1000 | IO_STATE(R_NETWORK_MGM_CTRL, mdoe, enable) | | ||
1001 | IO_MASK(R_NETWORK_MGM_CTRL, mdck) | | ||
1002 | IO_FIELD(R_NETWORK_MGM_CTRL, mdio, bit); | ||
1003 | udelay(1); | ||
1004 | } | ||
1005 | |||
1006 | static unsigned char | ||
1007 | e100_receive_mdio_bit(void) | ||
1008 | { | ||
1009 | unsigned char bit; | ||
1010 | *R_NETWORK_MGM_CTRL = 0; | ||
1011 | bit = IO_EXTRACT(R_NETWORK_STAT, mdio, *R_NETWORK_STAT); | ||
1012 | udelay(1); | ||
1013 | *R_NETWORK_MGM_CTRL = IO_MASK(R_NETWORK_MGM_CTRL, mdck); | ||
1014 | udelay(1); | ||
1015 | return bit; | ||
1016 | } | ||
1017 | |||
1018 | static void | ||
1019 | e100_reset_transceiver(struct net_device* dev) | ||
1020 | { | ||
1021 | struct net_local *np = netdev_priv(dev); | ||
1022 | unsigned short cmd; | ||
1023 | unsigned short data; | ||
1024 | int bitCounter; | ||
1025 | |||
1026 | data = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_BMCR); | ||
1027 | |||
1028 | cmd = (MDIO_START << 14) | (MDIO_WRITE << 12) | (np->mii_if.phy_id << 7) | (MII_BMCR << 2); | ||
1029 | |||
1030 | e100_send_mdio_cmd(cmd, 1); | ||
1031 | |||
1032 | data |= 0x8000; | ||
1033 | |||
1034 | for (bitCounter = 15; bitCounter >= 0 ; bitCounter--) { | ||
1035 | e100_send_mdio_bit(GET_BIT(bitCounter, data)); | ||
1036 | } | ||
1037 | } | ||
1038 | |||
1039 | /* Called by upper layers if they decide it took too long to complete | ||
1040 | * sending a packet - we need to reset and stuff. | ||
1041 | */ | ||
1042 | |||
1043 | static void | ||
1044 | e100_tx_timeout(struct net_device *dev) | ||
1045 | { | ||
1046 | struct net_local *np = netdev_priv(dev); | ||
1047 | unsigned long flags; | ||
1048 | |||
1049 | spin_lock_irqsave(&np->lock, flags); | ||
1050 | |||
1051 | printk(KERN_WARNING "%s: transmit timed out, %s?\n", dev->name, | ||
1052 | tx_done(dev) ? "IRQ problem" : "network cable problem"); | ||
1053 | |||
1054 | /* remember we got an error */ | ||
1055 | |||
1056 | dev->stats.tx_errors++; | ||
1057 | |||
1058 | /* reset the TX DMA in case it has hung on something */ | ||
1059 | |||
1060 | RESET_DMA(NETWORK_TX_DMA_NBR); | ||
1061 | WAIT_DMA(NETWORK_TX_DMA_NBR); | ||
1062 | |||
1063 | /* Reset the transceiver. */ | ||
1064 | |||
1065 | e100_reset_transceiver(dev); | ||
1066 | |||
1067 | /* and get rid of the packets that never got an interrupt */ | ||
1068 | while (myFirstTxDesc != myNextTxDesc) { | ||
1069 | dev_kfree_skb(myFirstTxDesc->skb); | ||
1070 | myFirstTxDesc->skb = 0; | ||
1071 | myFirstTxDesc = phys_to_virt(myFirstTxDesc->descr.next); | ||
1072 | } | ||
1073 | |||
1074 | /* Set up transmit DMA channel so it can be restarted later */ | ||
1075 | *R_DMA_CH0_FIRST = 0; | ||
1076 | *R_DMA_CH0_DESCR = virt_to_phys(myLastTxDesc); | ||
1077 | |||
1078 | /* tell the upper layers we're ok again */ | ||
1079 | |||
1080 | netif_wake_queue(dev); | ||
1081 | spin_unlock_irqrestore(&np->lock, flags); | ||
1082 | } | ||
1083 | |||
1084 | |||
1085 | /* This will only be invoked if the driver is _not_ in XOFF state. | ||
1086 | * What this means is that we need not check it, and that this | ||
1087 | * invariant will hold if we make sure that the netif_*_queue() | ||
1088 | * calls are done at the proper times. | ||
1089 | */ | ||
1090 | |||
1091 | static int | ||
1092 | e100_send_packet(struct sk_buff *skb, struct net_device *dev) | ||
1093 | { | ||
1094 | struct net_local *np = netdev_priv(dev); | ||
1095 | unsigned char *buf = skb->data; | ||
1096 | unsigned long flags; | ||
1097 | |||
1098 | #ifdef ETHDEBUG | ||
1099 | printk("send packet len %d\n", length); | ||
1100 | #endif | ||
1101 | spin_lock_irqsave(&np->lock, flags); /* protect from tx_interrupt and ourself */ | ||
1102 | |||
1103 | myNextTxDesc->skb = skb; | ||
1104 | |||
1105 | netif_trans_update(dev); /* NETIF_F_LLTX driver :( */ | ||
1106 | |||
1107 | e100_hardware_send_packet(np, buf, skb->len); | ||
1108 | |||
1109 | myNextTxDesc = phys_to_virt(myNextTxDesc->descr.next); | ||
1110 | |||
1111 | /* Stop queue if full */ | ||
1112 | if (myNextTxDesc == myFirstTxDesc) { | ||
1113 | netif_stop_queue(dev); | ||
1114 | } | ||
1115 | |||
1116 | spin_unlock_irqrestore(&np->lock, flags); | ||
1117 | |||
1118 | return NETDEV_TX_OK; | ||
1119 | } | ||
1120 | |||
1121 | /* | ||
1122 | * The typical workload of the driver: | ||
1123 | * Handle the network interface interrupts. | ||
1124 | */ | ||
1125 | |||
1126 | static irqreturn_t | ||
1127 | e100rxtx_interrupt(int irq, void *dev_id) | ||
1128 | { | ||
1129 | struct net_device *dev = (struct net_device *)dev_id; | ||
1130 | unsigned long irqbits; | ||
1131 | |||
1132 | /* | ||
1133 | * Note that both rx and tx interrupts are blocked at this point, | ||
1134 | * regardless of which got us here. | ||
1135 | */ | ||
1136 | |||
1137 | irqbits = *R_IRQ_MASK2_RD; | ||
1138 | |||
1139 | /* Handle received packets */ | ||
1140 | if (irqbits & IO_STATE(R_IRQ_MASK2_RD, dma1_eop, active)) { | ||
1141 | /* acknowledge the eop interrupt */ | ||
1142 | |||
1143 | *R_DMA_CH1_CLR_INTR = IO_STATE(R_DMA_CH1_CLR_INTR, clr_eop, do); | ||
1144 | |||
1145 | /* check if one or more complete packets were indeed received */ | ||
1146 | |||
1147 | while ((*R_DMA_CH1_FIRST != virt_to_phys(myNextRxDesc)) && | ||
1148 | (myNextRxDesc != myLastRxDesc)) { | ||
1149 | /* Take out the buffer and give it to the OS, then | ||
1150 | * allocate a new buffer to put a packet in. | ||
1151 | */ | ||
1152 | e100_rx(dev); | ||
1153 | dev->stats.rx_packets++; | ||
1154 | /* restart/continue on the channel, for safety */ | ||
1155 | *R_DMA_CH1_CMD = IO_STATE(R_DMA_CH1_CMD, cmd, restart); | ||
1156 | /* clear dma channel 1 eop/descr irq bits */ | ||
1157 | *R_DMA_CH1_CLR_INTR = | ||
1158 | IO_STATE(R_DMA_CH1_CLR_INTR, clr_eop, do) | | ||
1159 | IO_STATE(R_DMA_CH1_CLR_INTR, clr_descr, do); | ||
1160 | |||
1161 | /* now, we might have gotten another packet | ||
1162 | so we have to loop back and check if so */ | ||
1163 | } | ||
1164 | } | ||
1165 | |||
1166 | /* Report any packets that have been sent */ | ||
1167 | while (virt_to_phys(myFirstTxDesc) != *R_DMA_CH0_FIRST && | ||
1168 | (netif_queue_stopped(dev) || myFirstTxDesc != myNextTxDesc)) { | ||
1169 | dev->stats.tx_bytes += myFirstTxDesc->skb->len; | ||
1170 | dev->stats.tx_packets++; | ||
1171 | |||
1172 | /* dma is ready with the transmission of the data in tx_skb, so now | ||
1173 | we can release the skb memory */ | ||
1174 | dev_kfree_skb_irq(myFirstTxDesc->skb); | ||
1175 | myFirstTxDesc->skb = 0; | ||
1176 | myFirstTxDesc = phys_to_virt(myFirstTxDesc->descr.next); | ||
1177 | /* Wake up queue. */ | ||
1178 | netif_wake_queue(dev); | ||
1179 | } | ||
1180 | |||
1181 | if (irqbits & IO_STATE(R_IRQ_MASK2_RD, dma0_eop, active)) { | ||
1182 | /* acknowledge the eop interrupt. */ | ||
1183 | *R_DMA_CH0_CLR_INTR = IO_STATE(R_DMA_CH0_CLR_INTR, clr_eop, do); | ||
1184 | } | ||
1185 | |||
1186 | return IRQ_HANDLED; | ||
1187 | } | ||
1188 | |||
1189 | static irqreturn_t | ||
1190 | e100nw_interrupt(int irq, void *dev_id) | ||
1191 | { | ||
1192 | struct net_device *dev = (struct net_device *)dev_id; | ||
1193 | unsigned long irqbits = *R_IRQ_MASK0_RD; | ||
1194 | |||
1195 | /* check for underrun irq */ | ||
1196 | if (irqbits & IO_STATE(R_IRQ_MASK0_RD, underrun, active)) { | ||
1197 | SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, clr); | ||
1198 | *R_NETWORK_TR_CTRL = network_tr_ctrl_shadow; | ||
1199 | SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, nop); | ||
1200 | dev->stats.tx_errors++; | ||
1201 | D(printk("ethernet receiver underrun!\n")); | ||
1202 | } | ||
1203 | |||
1204 | /* check for overrun irq */ | ||
1205 | if (irqbits & IO_STATE(R_IRQ_MASK0_RD, overrun, active)) { | ||
1206 | update_rx_stats(&dev->stats); /* this will ack the irq */ | ||
1207 | D(printk("ethernet receiver overrun!\n")); | ||
1208 | } | ||
1209 | /* check for excessive collision irq */ | ||
1210 | if (irqbits & IO_STATE(R_IRQ_MASK0_RD, excessive_col, active)) { | ||
1211 | SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, clr); | ||
1212 | *R_NETWORK_TR_CTRL = network_tr_ctrl_shadow; | ||
1213 | SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, nop); | ||
1214 | dev->stats.tx_errors++; | ||
1215 | D(printk("ethernet excessive collisions!\n")); | ||
1216 | } | ||
1217 | return IRQ_HANDLED; | ||
1218 | } | ||
1219 | |||
1220 | /* We have a good packet(s), get it/them out of the buffers. */ | ||
1221 | static void | ||
1222 | e100_rx(struct net_device *dev) | ||
1223 | { | ||
1224 | struct sk_buff *skb; | ||
1225 | int length = 0; | ||
1226 | struct net_local *np = netdev_priv(dev); | ||
1227 | unsigned char *skb_data_ptr; | ||
1228 | #ifdef ETHDEBUG | ||
1229 | int i; | ||
1230 | #endif | ||
1231 | etrax_eth_descr *prevRxDesc; /* The descriptor right before myNextRxDesc */ | ||
1232 | spin_lock(&np->led_lock); | ||
1233 | if (!led_active && time_after(jiffies, led_next_time)) { | ||
1234 | /* light the network leds depending on the current speed. */ | ||
1235 | e100_set_network_leds(NETWORK_ACTIVITY); | ||
1236 | |||
1237 | /* Set the earliest time we may clear the LED */ | ||
1238 | led_next_time = jiffies + NET_FLASH_TIME; | ||
1239 | led_active = 1; | ||
1240 | mod_timer(&clear_led_timer, jiffies + HZ/10); | ||
1241 | } | ||
1242 | spin_unlock(&np->led_lock); | ||
1243 | |||
1244 | length = myNextRxDesc->descr.hw_len - 4; | ||
1245 | dev->stats.rx_bytes += length; | ||
1246 | |||
1247 | #ifdef ETHDEBUG | ||
1248 | printk("Got a packet of length %d:\n", length); | ||
1249 | /* dump the first bytes in the packet */ | ||
1250 | skb_data_ptr = (unsigned char *)phys_to_virt(myNextRxDesc->descr.buf); | ||
1251 | for (i = 0; i < 8; i++) { | ||
1252 | printk("%d: %.2x %.2x %.2x %.2x %.2x %.2x %.2x %.2x\n", i * 8, | ||
1253 | skb_data_ptr[0],skb_data_ptr[1],skb_data_ptr[2],skb_data_ptr[3], | ||
1254 | skb_data_ptr[4],skb_data_ptr[5],skb_data_ptr[6],skb_data_ptr[7]); | ||
1255 | skb_data_ptr += 8; | ||
1256 | } | ||
1257 | #endif | ||
1258 | |||
1259 | if (length < RX_COPYBREAK) { | ||
1260 | /* Small packet, copy data */ | ||
1261 | skb = dev_alloc_skb(length - ETHER_HEAD_LEN); | ||
1262 | if (!skb) { | ||
1263 | dev->stats.rx_errors++; | ||
1264 | printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name); | ||
1265 | goto update_nextrxdesc; | ||
1266 | } | ||
1267 | |||
1268 | skb_put(skb, length - ETHER_HEAD_LEN); /* allocate room for the packet body */ | ||
1269 | skb_data_ptr = skb_push(skb, ETHER_HEAD_LEN); /* allocate room for the header */ | ||
1270 | |||
1271 | #ifdef ETHDEBUG | ||
1272 | printk("head = 0x%x, data = 0x%x, tail = 0x%x, end = 0x%x\n", | ||
1273 | skb->head, skb->data, skb_tail_pointer(skb), | ||
1274 | skb_end_pointer(skb)); | ||
1275 | printk("copying packet to 0x%x.\n", skb_data_ptr); | ||
1276 | #endif | ||
1277 | |||
1278 | memcpy(skb_data_ptr, phys_to_virt(myNextRxDesc->descr.buf), length); | ||
1279 | } | ||
1280 | else { | ||
1281 | /* Large packet, send directly to upper layers and allocate new | ||
1282 | * memory (aligned to cache line boundary to avoid bug). | ||
1283 | * Before sending the skb to upper layers we must make sure | ||
1284 | * that skb->data points to the aligned start of the packet. | ||
1285 | */ | ||
1286 | int align; | ||
1287 | struct sk_buff *new_skb = dev_alloc_skb(MAX_MEDIA_DATA_SIZE + 2 * L1_CACHE_BYTES); | ||
1288 | if (!new_skb) { | ||
1289 | dev->stats.rx_errors++; | ||
1290 | printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name); | ||
1291 | goto update_nextrxdesc; | ||
1292 | } | ||
1293 | skb = myNextRxDesc->skb; | ||
1294 | align = (int)phys_to_virt(myNextRxDesc->descr.buf) - (int)skb->data; | ||
1295 | skb_put(skb, length + align); | ||
1296 | skb_pull(skb, align); /* Remove alignment bytes */ | ||
1297 | myNextRxDesc->skb = new_skb; | ||
1298 | myNextRxDesc->descr.buf = L1_CACHE_ALIGN(virt_to_phys(myNextRxDesc->skb->data)); | ||
1299 | } | ||
1300 | |||
1301 | skb->protocol = eth_type_trans(skb, dev); | ||
1302 | |||
1303 | /* Send the packet to the upper layers */ | ||
1304 | netif_rx(skb); | ||
1305 | |||
1306 | update_nextrxdesc: | ||
1307 | /* Prepare for next packet */ | ||
1308 | myNextRxDesc->descr.status = 0; | ||
1309 | prevRxDesc = myNextRxDesc; | ||
1310 | myNextRxDesc = phys_to_virt(myNextRxDesc->descr.next); | ||
1311 | |||
1312 | rx_queue_len++; | ||
1313 | |||
1314 | /* Check if descriptors should be returned */ | ||
1315 | if (rx_queue_len == RX_QUEUE_THRESHOLD) { | ||
1316 | flush_etrax_cache(); | ||
1317 | prevRxDesc->descr.ctrl |= d_eol; | ||
1318 | myLastRxDesc->descr.ctrl &= ~d_eol; | ||
1319 | myLastRxDesc = prevRxDesc; | ||
1320 | rx_queue_len = 0; | ||
1321 | } | ||
1322 | } | ||
1323 | |||
1324 | /* The inverse routine to net_open(). */ | ||
1325 | static int | ||
1326 | e100_close(struct net_device *dev) | ||
1327 | { | ||
1328 | printk(KERN_INFO "Closing %s.\n", dev->name); | ||
1329 | |||
1330 | netif_stop_queue(dev); | ||
1331 | |||
1332 | *R_IRQ_MASK0_CLR = | ||
1333 | IO_STATE(R_IRQ_MASK0_CLR, overrun, clr) | | ||
1334 | IO_STATE(R_IRQ_MASK0_CLR, underrun, clr) | | ||
1335 | IO_STATE(R_IRQ_MASK0_CLR, excessive_col, clr); | ||
1336 | |||
1337 | *R_IRQ_MASK2_CLR = | ||
1338 | IO_STATE(R_IRQ_MASK2_CLR, dma0_descr, clr) | | ||
1339 | IO_STATE(R_IRQ_MASK2_CLR, dma0_eop, clr) | | ||
1340 | IO_STATE(R_IRQ_MASK2_CLR, dma1_descr, clr) | | ||
1341 | IO_STATE(R_IRQ_MASK2_CLR, dma1_eop, clr); | ||
1342 | |||
1343 | /* Stop the receiver and the transmitter */ | ||
1344 | |||
1345 | RESET_DMA(NETWORK_TX_DMA_NBR); | ||
1346 | RESET_DMA(NETWORK_RX_DMA_NBR); | ||
1347 | |||
1348 | /* Flush the Tx and disable Rx here. */ | ||
1349 | |||
1350 | free_irq(NETWORK_DMA_RX_IRQ_NBR, (void *)dev); | ||
1351 | free_irq(NETWORK_DMA_TX_IRQ_NBR, (void *)dev); | ||
1352 | free_irq(NETWORK_STATUS_IRQ_NBR, (void *)dev); | ||
1353 | |||
1354 | cris_free_dma(NETWORK_TX_DMA_NBR, cardname); | ||
1355 | cris_free_dma(NETWORK_RX_DMA_NBR, cardname); | ||
1356 | |||
1357 | /* Update the statistics here. */ | ||
1358 | |||
1359 | update_rx_stats(&dev->stats); | ||
1360 | update_tx_stats(&dev->stats); | ||
1361 | |||
1362 | /* Stop speed/duplex timers */ | ||
1363 | del_timer(&speed_timer); | ||
1364 | del_timer(&duplex_timer); | ||
1365 | |||
1366 | return 0; | ||
1367 | } | ||
1368 | |||
1369 | static int | ||
1370 | e100_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | ||
1371 | { | ||
1372 | struct mii_ioctl_data *data = if_mii(ifr); | ||
1373 | struct net_local *np = netdev_priv(dev); | ||
1374 | int rc = 0; | ||
1375 | int old_autoneg; | ||
1376 | |||
1377 | spin_lock(&np->lock); /* Preempt protection */ | ||
1378 | switch (cmd) { | ||
1379 | /* The ioctls below should be considered obsolete but are */ | ||
1380 | /* still present for compatibility with old scripts/apps */ | ||
1381 | case SET_ETH_SPEED_10: /* 10 Mbps */ | ||
1382 | e100_set_speed(dev, 10); | ||
1383 | break; | ||
1384 | case SET_ETH_SPEED_100: /* 100 Mbps */ | ||
1385 | e100_set_speed(dev, 100); | ||
1386 | break; | ||
1387 | case SET_ETH_SPEED_AUTO: /* Auto-negotiate speed */ | ||
1388 | e100_set_speed(dev, 0); | ||
1389 | break; | ||
1390 | case SET_ETH_DUPLEX_HALF: /* Half duplex */ | ||
1391 | e100_set_duplex(dev, half); | ||
1392 | break; | ||
1393 | case SET_ETH_DUPLEX_FULL: /* Full duplex */ | ||
1394 | e100_set_duplex(dev, full); | ||
1395 | break; | ||
1396 | case SET_ETH_DUPLEX_AUTO: /* Auto-negotiate duplex */ | ||
1397 | e100_set_duplex(dev, autoneg); | ||
1398 | break; | ||
1399 | case SET_ETH_AUTONEG: | ||
1400 | old_autoneg = autoneg_normal; | ||
1401 | autoneg_normal = *(int*)data; | ||
1402 | if (autoneg_normal != old_autoneg) | ||
1403 | e100_negotiate(dev); | ||
1404 | break; | ||
1405 | default: | ||
1406 | rc = generic_mii_ioctl(&np->mii_if, if_mii(ifr), | ||
1407 | cmd, NULL); | ||
1408 | break; | ||
1409 | } | ||
1410 | spin_unlock(&np->lock); | ||
1411 | return rc; | ||
1412 | } | ||
1413 | |||
1414 | static int e100_get_link_ksettings(struct net_device *dev, | ||
1415 | struct ethtool_link_ksettings *cmd) | ||
1416 | { | ||
1417 | struct net_local *np = netdev_priv(dev); | ||
1418 | u32 supported; | ||
1419 | |||
1420 | spin_lock_irq(&np->lock); | ||
1421 | mii_ethtool_get_link_ksettings(&np->mii_if, cmd); | ||
1422 | spin_unlock_irq(&np->lock); | ||
1423 | |||
1424 | /* The PHY may support 1000baseT, but the Etrax100 does not. */ | ||
1425 | ethtool_convert_link_mode_to_legacy_u32(&supported, | ||
1426 | cmd->link_modes.supported); | ||
1427 | |||
1428 | supported &= ~(SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full); | ||
1429 | |||
1430 | ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, | ||
1431 | supported); | ||
1432 | |||
1433 | return 0; | ||
1434 | } | ||
1435 | |||
1436 | static int e100_set_link_ksettings(struct net_device *dev, | ||
1437 | const struct ethtool_link_ksettings *ecmd) | ||
1438 | { | ||
1439 | if (ecmd->base.autoneg == AUTONEG_ENABLE) { | ||
1440 | e100_set_duplex(dev, autoneg); | ||
1441 | e100_set_speed(dev, 0); | ||
1442 | } else { | ||
1443 | e100_set_duplex(dev, ecmd->base.duplex == DUPLEX_HALF ? | ||
1444 | half : full); | ||
1445 | e100_set_speed(dev, ecmd->base.speed == SPEED_10 ? 10 : 100); | ||
1446 | } | ||
1447 | |||
1448 | return 0; | ||
1449 | } | ||
1450 | |||
1451 | static void e100_get_drvinfo(struct net_device *dev, | ||
1452 | struct ethtool_drvinfo *info) | ||
1453 | { | ||
1454 | strlcpy(info->driver, "ETRAX 100LX", sizeof(info->driver)); | ||
1455 | strlcpy(info->version, "$Revision: 1.31 $", sizeof(info->version)); | ||
1456 | strlcpy(info->fw_version, "N/A", sizeof(info->fw_version)); | ||
1457 | strlcpy(info->bus_info, "N/A", sizeof(info->bus_info)); | ||
1458 | } | ||
1459 | |||
1460 | static int e100_nway_reset(struct net_device *dev) | ||
1461 | { | ||
1462 | if (current_duplex == autoneg && current_speed_selection == 0) | ||
1463 | e100_negotiate(dev); | ||
1464 | return 0; | ||
1465 | } | ||
1466 | |||
1467 | static const struct ethtool_ops e100_ethtool_ops = { | ||
1468 | .get_drvinfo = e100_get_drvinfo, | ||
1469 | .nway_reset = e100_nway_reset, | ||
1470 | .get_link = ethtool_op_get_link, | ||
1471 | .get_link_ksettings = e100_get_link_ksettings, | ||
1472 | .set_link_ksettings = e100_set_link_ksettings, | ||
1473 | }; | ||
1474 | |||
1475 | static int | ||
1476 | e100_set_config(struct net_device *dev, struct ifmap *map) | ||
1477 | { | ||
1478 | struct net_local *np = netdev_priv(dev); | ||
1479 | |||
1480 | spin_lock(&np->lock); /* Preempt protection */ | ||
1481 | |||
1482 | switch(map->port) { | ||
1483 | case IF_PORT_UNKNOWN: | ||
1484 | /* Use autoneg */ | ||
1485 | e100_set_speed(dev, 0); | ||
1486 | e100_set_duplex(dev, autoneg); | ||
1487 | break; | ||
1488 | case IF_PORT_10BASET: | ||
1489 | e100_set_speed(dev, 10); | ||
1490 | e100_set_duplex(dev, autoneg); | ||
1491 | break; | ||
1492 | case IF_PORT_100BASET: | ||
1493 | case IF_PORT_100BASETX: | ||
1494 | e100_set_speed(dev, 100); | ||
1495 | e100_set_duplex(dev, autoneg); | ||
1496 | break; | ||
1497 | case IF_PORT_100BASEFX: | ||
1498 | case IF_PORT_10BASE2: | ||
1499 | case IF_PORT_AUI: | ||
1500 | spin_unlock(&np->lock); | ||
1501 | return -EOPNOTSUPP; | ||
1502 | default: | ||
1503 | printk(KERN_ERR "%s: Invalid media selected", dev->name); | ||
1504 | spin_unlock(&np->lock); | ||
1505 | return -EINVAL; | ||
1506 | } | ||
1507 | spin_unlock(&np->lock); | ||
1508 | return 0; | ||
1509 | } | ||
1510 | |||
1511 | static void | ||
1512 | update_rx_stats(struct net_device_stats *es) | ||
1513 | { | ||
1514 | unsigned long r = *R_REC_COUNTERS; | ||
1515 | /* update stats relevant to reception errors */ | ||
1516 | es->rx_fifo_errors += IO_EXTRACT(R_REC_COUNTERS, congestion, r); | ||
1517 | es->rx_crc_errors += IO_EXTRACT(R_REC_COUNTERS, crc_error, r); | ||
1518 | es->rx_frame_errors += IO_EXTRACT(R_REC_COUNTERS, alignment_error, r); | ||
1519 | es->rx_length_errors += IO_EXTRACT(R_REC_COUNTERS, oversize, r); | ||
1520 | } | ||
1521 | |||
1522 | static void | ||
1523 | update_tx_stats(struct net_device_stats *es) | ||
1524 | { | ||
1525 | unsigned long r = *R_TR_COUNTERS; | ||
1526 | /* update stats relevant to transmission errors */ | ||
1527 | es->collisions += | ||
1528 | IO_EXTRACT(R_TR_COUNTERS, single_col, r) + | ||
1529 | IO_EXTRACT(R_TR_COUNTERS, multiple_col, r); | ||
1530 | } | ||
1531 | |||
1532 | /* | ||
1533 | * Get the current statistics. | ||
1534 | * This may be called with the card open or closed. | ||
1535 | */ | ||
1536 | static struct net_device_stats * | ||
1537 | e100_get_stats(struct net_device *dev) | ||
1538 | { | ||
1539 | struct net_local *lp = netdev_priv(dev); | ||
1540 | unsigned long flags; | ||
1541 | |||
1542 | spin_lock_irqsave(&lp->lock, flags); | ||
1543 | |||
1544 | update_rx_stats(&dev->stats); | ||
1545 | update_tx_stats(&dev->stats); | ||
1546 | |||
1547 | spin_unlock_irqrestore(&lp->lock, flags); | ||
1548 | return &dev->stats; | ||
1549 | } | ||
1550 | |||
1551 | /* | ||
1552 | * Set or clear the multicast filter for this adaptor. | ||
1553 | * num_addrs == -1 Promiscuous mode, receive all packets | ||
1554 | * num_addrs == 0 Normal mode, clear multicast list | ||
1555 | * num_addrs > 0 Multicast mode, receive normal and MC packets, | ||
1556 | * and do best-effort filtering. | ||
1557 | */ | ||
1558 | static void | ||
1559 | set_multicast_list(struct net_device *dev) | ||
1560 | { | ||
1561 | struct net_local *lp = netdev_priv(dev); | ||
1562 | int num_addr = netdev_mc_count(dev); | ||
1563 | unsigned long int lo_bits; | ||
1564 | unsigned long int hi_bits; | ||
1565 | |||
1566 | spin_lock(&lp->lock); | ||
1567 | if (dev->flags & IFF_PROMISC) { | ||
1568 | /* promiscuous mode */ | ||
1569 | lo_bits = 0xfffffffful; | ||
1570 | hi_bits = 0xfffffffful; | ||
1571 | |||
1572 | /* Enable individual receive */ | ||
1573 | SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, individual, receive); | ||
1574 | *R_NETWORK_REC_CONFIG = network_rec_config_shadow; | ||
1575 | } else if (dev->flags & IFF_ALLMULTI) { | ||
1576 | /* enable all multicasts */ | ||
1577 | lo_bits = 0xfffffffful; | ||
1578 | hi_bits = 0xfffffffful; | ||
1579 | |||
1580 | /* Disable individual receive */ | ||
1581 | SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, individual, discard); | ||
1582 | *R_NETWORK_REC_CONFIG = network_rec_config_shadow; | ||
1583 | } else if (num_addr == 0) { | ||
1584 | /* Normal, clear the mc list */ | ||
1585 | lo_bits = 0x00000000ul; | ||
1586 | hi_bits = 0x00000000ul; | ||
1587 | |||
1588 | /* Disable individual receive */ | ||
1589 | SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, individual, discard); | ||
1590 | *R_NETWORK_REC_CONFIG = network_rec_config_shadow; | ||
1591 | } else { | ||
1592 | /* MC mode, receive normal and MC packets */ | ||
1593 | char hash_ix; | ||
1594 | struct netdev_hw_addr *ha; | ||
1595 | char *baddr; | ||
1596 | |||
1597 | lo_bits = 0x00000000ul; | ||
1598 | hi_bits = 0x00000000ul; | ||
1599 | netdev_for_each_mc_addr(ha, dev) { | ||
1600 | /* Calculate the hash index for the GA registers */ | ||
1601 | |||
1602 | hash_ix = 0; | ||
1603 | baddr = ha->addr; | ||
1604 | hash_ix ^= (*baddr) & 0x3f; | ||
1605 | hash_ix ^= ((*baddr) >> 6) & 0x03; | ||
1606 | ++baddr; | ||
1607 | hash_ix ^= ((*baddr) << 2) & 0x03c; | ||
1608 | hash_ix ^= ((*baddr) >> 4) & 0xf; | ||
1609 | ++baddr; | ||
1610 | hash_ix ^= ((*baddr) << 4) & 0x30; | ||
1611 | hash_ix ^= ((*baddr) >> 2) & 0x3f; | ||
1612 | ++baddr; | ||
1613 | hash_ix ^= (*baddr) & 0x3f; | ||
1614 | hash_ix ^= ((*baddr) >> 6) & 0x03; | ||
1615 | ++baddr; | ||
1616 | hash_ix ^= ((*baddr) << 2) & 0x03c; | ||
1617 | hash_ix ^= ((*baddr) >> 4) & 0xf; | ||
1618 | ++baddr; | ||
1619 | hash_ix ^= ((*baddr) << 4) & 0x30; | ||
1620 | hash_ix ^= ((*baddr) >> 2) & 0x3f; | ||
1621 | |||
1622 | hash_ix &= 0x3f; | ||
1623 | |||
1624 | if (hash_ix >= 32) { | ||
1625 | hi_bits |= (1 << (hash_ix-32)); | ||
1626 | } else { | ||
1627 | lo_bits |= (1 << hash_ix); | ||
1628 | } | ||
1629 | } | ||
1630 | /* Disable individual receive */ | ||
1631 | SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, individual, discard); | ||
1632 | *R_NETWORK_REC_CONFIG = network_rec_config_shadow; | ||
1633 | } | ||
1634 | *R_NETWORK_GA_0 = lo_bits; | ||
1635 | *R_NETWORK_GA_1 = hi_bits; | ||
1636 | spin_unlock(&lp->lock); | ||
1637 | } | ||
1638 | |||
1639 | void | ||
1640 | e100_hardware_send_packet(struct net_local *np, char *buf, int length) | ||
1641 | { | ||
1642 | D(printk("e100 send pack, buf 0x%x len %d\n", buf, length)); | ||
1643 | |||
1644 | spin_lock(&np->led_lock); | ||
1645 | if (!led_active && time_after(jiffies, led_next_time)) { | ||
1646 | /* light the network leds depending on the current speed. */ | ||
1647 | e100_set_network_leds(NETWORK_ACTIVITY); | ||
1648 | |||
1649 | /* Set the earliest time we may clear the LED */ | ||
1650 | led_next_time = jiffies + NET_FLASH_TIME; | ||
1651 | led_active = 1; | ||
1652 | mod_timer(&clear_led_timer, jiffies + HZ/10); | ||
1653 | } | ||
1654 | spin_unlock(&np->led_lock); | ||
1655 | |||
1656 | /* configure the tx dma descriptor */ | ||
1657 | myNextTxDesc->descr.sw_len = length; | ||
1658 | myNextTxDesc->descr.ctrl = d_eop | d_eol | d_wait; | ||
1659 | myNextTxDesc->descr.buf = virt_to_phys(buf); | ||
1660 | |||
1661 | /* Move end of list */ | ||
1662 | myLastTxDesc->descr.ctrl &= ~d_eol; | ||
1663 | myLastTxDesc = myNextTxDesc; | ||
1664 | |||
1665 | /* Restart DMA channel */ | ||
1666 | *R_DMA_CH0_CMD = IO_STATE(R_DMA_CH0_CMD, cmd, restart); | ||
1667 | } | ||
1668 | |||
1669 | static void | ||
1670 | e100_clear_network_leds(struct timer_list *unused) | ||
1671 | { | ||
1672 | struct net_device *dev = timer_dev; | ||
1673 | struct net_local *np = netdev_priv(dev); | ||
1674 | |||
1675 | spin_lock(&np->led_lock); | ||
1676 | |||
1677 | if (led_active && time_after(jiffies, led_next_time)) { | ||
1678 | e100_set_network_leds(NO_NETWORK_ACTIVITY); | ||
1679 | |||
1680 | /* Set the earliest time we may set the LED */ | ||
1681 | led_next_time = jiffies + NET_FLASH_PAUSE; | ||
1682 | led_active = 0; | ||
1683 | } | ||
1684 | |||
1685 | spin_unlock(&np->led_lock); | ||
1686 | } | ||
1687 | |||
1688 | static void | ||
1689 | e100_set_network_leds(int active) | ||
1690 | { | ||
1691 | #if defined(CONFIG_ETRAX_NETWORK_LED_ON_WHEN_LINK) | ||
1692 | int light_leds = (active == NO_NETWORK_ACTIVITY); | ||
1693 | #elif defined(CONFIG_ETRAX_NETWORK_LED_ON_WHEN_ACTIVITY) | ||
1694 | int light_leds = (active == NETWORK_ACTIVITY); | ||
1695 | #else | ||
1696 | #error "Define either CONFIG_ETRAX_NETWORK_LED_ON_WHEN_LINK or CONFIG_ETRAX_NETWORK_LED_ON_WHEN_ACTIVITY" | ||
1697 | #endif | ||
1698 | |||
1699 | if (!current_speed) { | ||
1700 | /* Make LED red, link is down */ | ||
1701 | CRIS_LED_NETWORK_SET(CRIS_LED_OFF); | ||
1702 | } else if (light_leds) { | ||
1703 | if (current_speed == 10) { | ||
1704 | CRIS_LED_NETWORK_SET(CRIS_LED_ORANGE); | ||
1705 | } else { | ||
1706 | CRIS_LED_NETWORK_SET(CRIS_LED_GREEN); | ||
1707 | } | ||
1708 | } else { | ||
1709 | CRIS_LED_NETWORK_SET(CRIS_LED_OFF); | ||
1710 | } | ||
1711 | } | ||
1712 | |||
1713 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
1714 | static void | ||
1715 | e100_netpoll(struct net_device* netdev) | ||
1716 | { | ||
1717 | e100rxtx_interrupt(NETWORK_DMA_TX_IRQ_NBR, netdev); | ||
1718 | } | ||
1719 | #endif | ||
1720 | |||
1721 | |||
1722 | static int __init | ||
1723 | e100_boot_setup(char* str) | ||
1724 | { | ||
1725 | struct sockaddr sa = {0}; | ||
1726 | int i; | ||
1727 | |||
1728 | /* Parse the colon separated Ethernet station address */ | ||
1729 | for (i = 0; i < ETH_ALEN; i++) { | ||
1730 | unsigned int tmp; | ||
1731 | if (sscanf(str + 3*i, "%2x", &tmp) != 1) { | ||
1732 | printk(KERN_WARNING "Malformed station address"); | ||
1733 | return 0; | ||
1734 | } | ||
1735 | sa.sa_data[i] = (char)tmp; | ||
1736 | } | ||
1737 | |||
1738 | default_mac = sa; | ||
1739 | return 1; | ||
1740 | } | ||
1741 | |||
1742 | __setup("etrax100_eth=", e100_boot_setup); | ||