aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/arm/ether00.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /drivers/net/arm/ether00.c
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'drivers/net/arm/ether00.c')
-rw-r--r--drivers/net/arm/ether00.c1017
1 files changed, 1017 insertions, 0 deletions
diff --git a/drivers/net/arm/ether00.c b/drivers/net/arm/ether00.c
new file mode 100644
index 000000000000..4f1f4e31bda5
--- /dev/null
+++ b/drivers/net/arm/ether00.c
@@ -0,0 +1,1017 @@
1/*
2 * drivers/net/ether00.c
3 *
4 * Copyright (C) 2001 Altera Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21/* includes */
22#include <linux/config.h>
23#include <linux/pci.h>
24#include <linux/sched.h>
25#include <linux/netdevice.h>
26#include <linux/skbuff.h>
27#include <linux/etherdevice.h>
28#include <linux/module.h>
29#include <linux/tqueue.h>
30#include <linux/mtd/mtd.h>
31#include <linux/pld/pld_hotswap.h>
32#include <asm/arch/excalibur.h>
33#include <asm/arch/hardware.h>
34#include <asm/irq.h>
35#include <asm/io.h>
36#include <asm/sizes.h>
37
38#include <asm/arch/ether00.h>
39#include <asm/arch/tdkphy.h>
40
41
42MODULE_AUTHOR("Clive Davies");
43MODULE_DESCRIPTION("Altera Ether00 IP core driver");
44MODULE_LICENSE("GPL");
45
46#define PKT_BUF_SZ 1540 /* Size of each rx buffer */
47#define ETH_NR 4 /* Number of MACs this driver supports */
48
49#define DEBUG(x)
50
51#define __dma_va(x) (unsigned int)((unsigned int)priv->dma_data+(((unsigned int)(x))&(EXC_SPSRAM_BLOCK0_SIZE-1)))
52#define __dma_pa(x) (unsigned int)(EXC_SPSRAM_BLOCK0_BASE+(((unsigned int)(x))-(unsigned int)priv->dma_data))
53
54#define ETHER00_BASE 0
55#define ETHER00_TYPE
56#define ETHER00_NAME "ether00"
57#define MAC_REG_SIZE 0x400 /* size of MAC register area */
58
59
60
61/* typedefs */
62
63/* The definition of the driver control structure */
64
65#define RX_NUM_BUFF 10
66#define RX_NUM_FDESC 10
67#define TX_NUM_FDESC 10
68
69struct tx_fda_ent{
70 FDA_DESC fd;
71 BUF_DESC bd;
72 BUF_DESC pad;
73};
74struct rx_fda_ent{
75 FDA_DESC fd;
76 BUF_DESC bd;
77 BUF_DESC pad;
78};
79struct rx_blist_ent{
80 FDA_DESC fd;
81 BUF_DESC bd;
82 BUF_DESC pad;
83};
84struct net_priv
85{
86 struct net_device_stats stats;
87 struct sk_buff* skb;
88 void* dma_data;
89 struct rx_blist_ent* rx_blist_vp;
90 struct rx_fda_ent* rx_fda_ptr;
91 struct tx_fda_ent* tx_fdalist_vp;
92 struct tq_struct tq_memupdate;
93 unsigned char memupdate_scheduled;
94 unsigned char rx_disabled;
95 unsigned char queue_stopped;
96 spinlock_t rx_lock;
97};
98
99static const char vendor_id[2]={0x07,0xed};
100
101#ifdef ETHER00_DEBUG
102
103/* Dump (most) registers for debugging puposes */
104
105static void dump_regs(struct net_device *dev){
106 struct net_priv* priv=dev->priv;
107 unsigned int* i;
108
109 printk("\n RX free descriptor area:\n");
110
111 for(i=(unsigned int*)priv->rx_fda_ptr;
112 i<((unsigned int*)(priv->rx_fda_ptr+RX_NUM_FDESC));){
113 printk("%#8x %#8x %#8x %#8x\n",*i,*(i+1),*(i+2),*(i+3));
114 i+=4;
115 }
116
117 printk("\n RX buffer list:\n");
118
119 for(i=(unsigned int*)priv->rx_blist_vp;
120 i<((unsigned int*)(priv->rx_blist_vp+RX_NUM_BUFF));){
121 printk("%#8x %#8x %#8x %#8x\n",*i,*(i+1),*(i+2),*(i+3));
122 i+=4;
123 }
124
125 printk("\n TX frame descriptor list:\n");
126
127 for(i=(unsigned int*)priv->tx_fdalist_vp;
128 i<((unsigned int*)(priv->tx_fdalist_vp+TX_NUM_FDESC));){
129 printk("%#8x %#8x %#8x %#8x\n",*i,*(i+1),*(i+2),*(i+3));
130 i+=4;
131 }
132
133 printk("\ndma ctl=%#x\n",readw(ETHER_DMA_CTL(dev->base_addr)));
134 printk("txfrmptr=%#x\n",readw(ETHER_TXFRMPTR(dev->base_addr)));
135 printk("txthrsh=%#x\n",readw(ETHER_TXTHRSH(dev->base_addr)));
136 printk("txpollctr=%#x\n",readw(ETHER_TXPOLLCTR(dev->base_addr)));
137 printk("blfrmptr=%#x\n",readw(ETHER_BLFRMPTR(dev->base_addr)));
138 printk("rxfragsize=%#x\n",readw(ETHER_RXFRAGSIZE(dev->base_addr)));
139 printk("tx_int_en=%#x\n",readw(ETHER_INT_EN(dev->base_addr)));
140 printk("fda_bas=%#x\n",readw(ETHER_FDA_BAS(dev->base_addr)));
141 printk("fda_lim=%#x\n",readw(ETHER_FDA_LIM(dev->base_addr)));
142 printk("int_src=%#x\n",readw(ETHER_INT_SRC(dev->base_addr)));
143 printk("pausecnt=%#x\n",readw(ETHER_PAUSECNT(dev->base_addr)));
144 printk("rempaucnt=%#x\n",readw(ETHER_REMPAUCNT(dev->base_addr)));
145 printk("txconfrmstat=%#x\n",readw(ETHER_TXCONFRMSTAT(dev->base_addr)));
146 printk("mac_ctl=%#x\n",readw(ETHER_MAC_CTL(dev->base_addr)));
147 printk("arc_ctl=%#x\n",readw(ETHER_ARC_CTL(dev->base_addr)));
148 printk("tx_ctl=%#x\n",readw(ETHER_TX_CTL(dev->base_addr)));
149}
150#endif /* ETHER00_DEBUG */
151
152
153static int ether00_write_phy(struct net_device *dev, short address, short value)
154{
155 volatile int count = 1024;
156 writew(value,ETHER_MD_DATA(dev->base_addr));
157 writew( ETHER_MD_CA_BUSY_MSK |
158 ETHER_MD_CA_WR_MSK |
159 (address & ETHER_MD_CA_ADDR_MSK),
160 ETHER_MD_CA(dev->base_addr));
161
162 /* Wait for the command to complete */
163 while((readw(ETHER_MD_CA(dev->base_addr)) & ETHER_MD_CA_BUSY_MSK)&&count){
164 count--;
165 }
166 if (!count){
167 printk("Write to phy failed, addr=%#x, data=%#x\n",address, value);
168 return -EIO;
169 }
170 return 0;
171}
172
173static int ether00_read_phy(struct net_device *dev, short address)
174{
175 volatile int count = 1024;
176 writew( ETHER_MD_CA_BUSY_MSK |
177 (address & ETHER_MD_CA_ADDR_MSK),
178 ETHER_MD_CA(dev->base_addr));
179
180 /* Wait for the command to complete */
181 while((readw(ETHER_MD_CA(dev->base_addr)) & ETHER_MD_CA_BUSY_MSK)&&count){
182 count--;
183 }
184 if (!count){
185 printk(KERN_WARNING "Read from phy timed out\n");
186 return -EIO;
187 }
188 return readw(ETHER_MD_DATA(dev->base_addr));
189}
190
191static void ether00_phy_int(int irq_num, void* dev_id, struct pt_regs* regs)
192{
193 struct net_device* dev=dev_id;
194 int irq_status;
195
196 irq_status=ether00_read_phy(dev, PHY_IRQ_CONTROL);
197
198 if(irq_status & PHY_IRQ_CONTROL_ANEG_COMP_INT_MSK){
199 /*
200 * Autonegotiation complete on epxa10db. The mac doesn't
201 * twig if we're in full duplex so we need to check the
202 * phy status register and configure the mac accordingly
203 */
204 if(ether00_read_phy(dev, PHY_STATUS)&(PHY_STATUS_10T_F_MSK|PHY_STATUS_100_X_F_MSK)){
205 int tmp;
206 tmp=readl(ETHER_MAC_CTL(dev->base_addr));
207 writel(tmp|ETHER_MAC_CTL_FULLDUP_MSK,ETHER_MAC_CTL(dev->base_addr));
208 }
209 }
210
211 if(irq_status&PHY_IRQ_CONTROL_LS_CHG_INT_MSK){
212
213 if(ether00_read_phy(dev, PHY_STATUS)& PHY_STATUS_LINK_MSK){
214 /* Link is up */
215 netif_carrier_on(dev);
216 //printk("Carrier on\n");
217 }else{
218 netif_carrier_off(dev);
219 //printk("Carrier off\n");
220
221 }
222 }
223
224}
225
226static void setup_blist_entry(struct sk_buff* skb,struct rx_blist_ent* blist_ent_ptr){
227 /* Make the buffer consistent with the cache as the mac is going to write
228 * directly into it*/
229 blist_ent_ptr->fd.FDSystem=(unsigned int)skb;
230 blist_ent_ptr->bd.BuffData=(char*)__pa(skb->data);
231 consistent_sync(skb->data,PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
232 /* align IP on 16 Byte (DMA_CTL set to skip 2 bytes) */
233 skb_reserve(skb,2);
234 blist_ent_ptr->bd.BuffLength=PKT_BUF_SZ-2;
235 blist_ent_ptr->fd.FDLength=1;
236 blist_ent_ptr->fd.FDCtl=FDCTL_COWNSFD_MSK;
237 blist_ent_ptr->bd.BDCtl=BDCTL_COWNSBD_MSK;
238}
239
240
241static int ether00_mem_init(struct net_device* dev)
242{
243 struct net_priv* priv=dev->priv;
244 struct tx_fda_ent *tx_fd_ptr,*tx_end_ptr;
245 struct rx_blist_ent* blist_ent_ptr;
246 int i;
247
248 /*
249 * Grab a block of on chip SRAM to contain the control stuctures for
250 * the ethernet MAC. This uncached becuase it needs to be accesses by both
251 * bus masters (cpu + mac). However, it shouldn't matter too much in terms
252 * of speed as its on chip memory
253 */
254 priv->dma_data=ioremap_nocache(EXC_SPSRAM_BLOCK0_BASE,EXC_SPSRAM_BLOCK0_SIZE );
255 if (!priv->dma_data)
256 return -ENOMEM;
257
258 priv->rx_fda_ptr=(struct rx_fda_ent*)priv->dma_data;
259 /*
260 * Now share it out amongst the Frame descriptors and the buffer list
261 */
262 priv->rx_blist_vp=(struct rx_blist_ent*)((unsigned int)priv->dma_data+RX_NUM_FDESC*sizeof(struct rx_fda_ent));
263
264 /*
265 *Initalise the FDA list
266 */
267 /* set ownership to the controller */
268 memset(priv->rx_fda_ptr,0x80,RX_NUM_FDESC*sizeof(struct rx_fda_ent));
269
270 /*
271 *Initialise the buffer list
272 */
273 blist_ent_ptr=priv->rx_blist_vp;
274 i=0;
275 while(blist_ent_ptr<(priv->rx_blist_vp+RX_NUM_BUFF)){
276 struct sk_buff *skb;
277 blist_ent_ptr->fd.FDLength=1;
278 skb=dev_alloc_skb(PKT_BUF_SZ);
279 if(skb){
280 setup_blist_entry(skb,blist_ent_ptr);
281 blist_ent_ptr->fd.FDNext=(FDA_DESC*)__dma_pa(blist_ent_ptr+1);
282 blist_ent_ptr->bd.BDStat=i++;
283 blist_ent_ptr++;
284 }
285 else
286 {
287 printk("Failed to initalise buffer list\n");
288 }
289
290 }
291 blist_ent_ptr--;
292 blist_ent_ptr->fd.FDNext=(FDA_DESC*)__dma_pa(priv->rx_blist_vp);
293
294 priv->tx_fdalist_vp=(struct tx_fda_ent*)(priv->rx_blist_vp+RX_NUM_BUFF);
295
296 /* Initialise the buffers to be a circular list. The mac will then go poll
297 * the list until it finds a frame ready to transmit */
298 tx_end_ptr=priv->tx_fdalist_vp+TX_NUM_FDESC;
299 for(tx_fd_ptr=priv->tx_fdalist_vp;tx_fd_ptr<tx_end_ptr;tx_fd_ptr++){
300 tx_fd_ptr->fd.FDNext=(FDA_DESC*)__dma_pa((tx_fd_ptr+1));
301 tx_fd_ptr->fd.FDCtl=1;
302 tx_fd_ptr->fd.FDStat=0;
303 tx_fd_ptr->fd.FDLength=1;
304
305 }
306 /* Change the last FDNext pointer to make a circular list */
307 tx_fd_ptr--;
308 tx_fd_ptr->fd.FDNext=(FDA_DESC*)__dma_pa(priv->tx_fdalist_vp);
309
310 /* Point the device at the chain of Rx and Tx Buffers */
311 writel((unsigned int)__dma_pa(priv->rx_fda_ptr),ETHER_FDA_BAS(dev->base_addr));
312 writel((RX_NUM_FDESC-1)*sizeof(struct rx_fda_ent),ETHER_FDA_LIM(dev->base_addr));
313 writel((unsigned int)__dma_pa(priv->rx_blist_vp),ETHER_BLFRMPTR(dev->base_addr));
314
315 writel((unsigned int)__dma_pa(priv->tx_fdalist_vp),ETHER_TXFRMPTR(dev->base_addr));
316
317 return 0;
318}
319
320
321void ether00_mem_update(void* dev_id)
322{
323 struct net_device* dev=dev_id;
324 struct net_priv* priv=dev->priv;
325 struct sk_buff* skb;
326 struct tx_fda_ent *fda_ptr=priv->tx_fdalist_vp;
327 struct rx_blist_ent* blist_ent_ptr;
328 unsigned long flags;
329
330 priv->tq_memupdate.sync=0;
331 //priv->tq_memupdate.list=
332 priv->memupdate_scheduled=0;
333
334 /* Transmit interrupt */
335 while(fda_ptr<(priv->tx_fdalist_vp+TX_NUM_FDESC)){
336 if(!(FDCTL_COWNSFD_MSK&fda_ptr->fd.FDCtl) && (ETHER_TX_STAT_COMP_MSK&fda_ptr->fd.FDStat)){
337 priv->stats.tx_packets++;
338 priv->stats.tx_bytes+=fda_ptr->bd.BuffLength;
339 skb=(struct sk_buff*)fda_ptr->fd.FDSystem;
340 //printk("%d:txcln:fda=%#x skb=%#x\n",jiffies,fda_ptr,skb);
341 dev_kfree_skb(skb);
342 fda_ptr->fd.FDSystem=0;
343 fda_ptr->fd.FDStat=0;
344 fda_ptr->fd.FDCtl=0;
345 }
346 fda_ptr++;
347 }
348 /* Fill in any missing buffers from the received queue */
349 spin_lock_irqsave(&priv->rx_lock,flags);
350 blist_ent_ptr=priv->rx_blist_vp;
351 while(blist_ent_ptr<(priv->rx_blist_vp+RX_NUM_BUFF)){
352 /* fd.FDSystem of 0 indicates we failed to allocate the buffer in the ISR */
353 if(!blist_ent_ptr->fd.FDSystem){
354 struct sk_buff *skb;
355 skb=dev_alloc_skb(PKT_BUF_SZ);
356 blist_ent_ptr->fd.FDSystem=(unsigned int)skb;
357 if(skb){
358 setup_blist_entry(skb,blist_ent_ptr);
359 }
360 else
361 {
362 break;
363 }
364 }
365 blist_ent_ptr++;
366 }
367 spin_unlock_irqrestore(&priv->rx_lock,flags);
368 if(priv->queue_stopped){
369 //printk("%d:cln:start q\n",jiffies);
370 netif_start_queue(dev);
371 }
372 if(priv->rx_disabled){
373 //printk("%d:enable_irq\n",jiffies);
374 priv->rx_disabled=0;
375 writel(ETHER_RX_CTL_RXEN_MSK,ETHER_RX_CTL(dev->base_addr));
376
377 }
378}
379
380
381static void ether00_int( int irq_num, void* dev_id, struct pt_regs* regs)
382{
383 struct net_device* dev=dev_id;
384 struct net_priv* priv=dev->priv;
385
386 unsigned int interruptValue;
387
388 interruptValue=readl(ETHER_INT_SRC(dev->base_addr));
389
390 //printk("INT_SRC=%x\n",interruptValue);
391
392 if(!(readl(ETHER_INT_SRC(dev->base_addr)) & ETHER_INT_SRC_IRQ_MSK))
393 {
394 return; /* Interrupt wasn't caused by us!! */
395 }
396
397 if(readl(ETHER_INT_SRC(dev->base_addr))&
398 (ETHER_INT_SRC_INTMACRX_MSK |
399 ETHER_INT_SRC_FDAEX_MSK |
400 ETHER_INT_SRC_BLEX_MSK)) {
401 struct rx_blist_ent* blist_ent_ptr;
402 struct rx_fda_ent* fda_ent_ptr;
403 struct sk_buff* skb;
404
405 fda_ent_ptr=priv->rx_fda_ptr;
406 spin_lock(&priv->rx_lock);
407 while(fda_ent_ptr<(priv->rx_fda_ptr+RX_NUM_FDESC)){
408 int result;
409
410 if(!(fda_ent_ptr->fd.FDCtl&FDCTL_COWNSFD_MSK))
411 {
412 /* This frame is ready for processing */
413 /*find the corresponding buffer in the bufferlist */
414 blist_ent_ptr=priv->rx_blist_vp+fda_ent_ptr->bd.BDStat;
415 skb=(struct sk_buff*)blist_ent_ptr->fd.FDSystem;
416
417 /* Pass this skb up the stack */
418 skb->dev=dev;
419 skb_put(skb,fda_ent_ptr->fd.FDLength);
420 skb->protocol=eth_type_trans(skb,dev);
421 skb->ip_summed=CHECKSUM_UNNECESSARY;
422 result=netif_rx(skb);
423 /* Update statistics */
424 priv->stats.rx_packets++;
425 priv->stats.rx_bytes+=fda_ent_ptr->fd.FDLength;
426
427 /* Free the FDA entry */
428 fda_ent_ptr->bd.BDStat=0xff;
429 fda_ent_ptr->fd.FDCtl=FDCTL_COWNSFD_MSK;
430
431 /* Allocate a new skb and point the bd entry to it */
432 blist_ent_ptr->fd.FDSystem=0;
433 skb=dev_alloc_skb(PKT_BUF_SZ);
434 //printk("allocskb=%#x\n",skb);
435 if(skb){
436 setup_blist_entry(skb,blist_ent_ptr);
437
438 }
439 else if(!priv->memupdate_scheduled){
440 int tmp;
441 /* There are no buffers at the moment, so schedule */
442 /* the background task to sort this out */
443 schedule_task(&priv->tq_memupdate);
444 priv->memupdate_scheduled=1;
445 printk(KERN_DEBUG "%s:No buffers",dev->name);
446 /* If this interrupt was due to a lack of buffers then
447 * we'd better stop the receiver too */
448 if(interruptValue&ETHER_INT_SRC_BLEX_MSK){
449 priv->rx_disabled=1;
450 tmp=readl(ETHER_INT_SRC(dev->base_addr));
451 writel(tmp&~ETHER_RX_CTL_RXEN_MSK,ETHER_RX_CTL(dev->base_addr));
452 printk(KERN_DEBUG "%s:Halting rx",dev->name);
453 }
454
455 }
456
457 }
458 fda_ent_ptr++;
459 }
460 spin_unlock(&priv->rx_lock);
461
462 /* Clear the interrupts */
463 writel(ETHER_INT_SRC_INTMACRX_MSK | ETHER_INT_SRC_FDAEX_MSK
464 | ETHER_INT_SRC_BLEX_MSK,ETHER_INT_SRC(dev->base_addr));
465
466 }
467
468 if(readl(ETHER_INT_SRC(dev->base_addr))&ETHER_INT_SRC_INTMACTX_MSK){
469
470 if(!priv->memupdate_scheduled){
471 schedule_task(&priv->tq_memupdate);
472 priv->memupdate_scheduled=1;
473 }
474 /* Clear the interrupt */
475 writel(ETHER_INT_SRC_INTMACTX_MSK,ETHER_INT_SRC(dev->base_addr));
476 }
477
478 if (readl(ETHER_INT_SRC(dev->base_addr)) & (ETHER_INT_SRC_SWINT_MSK|
479 ETHER_INT_SRC_INTEARNOT_MSK|
480 ETHER_INT_SRC_INTLINK_MSK|
481 ETHER_INT_SRC_INTEXBD_MSK|
482 ETHER_INT_SRC_INTTXCTLCMP_MSK))
483 {
484 /*
485 * Not using any of these so they shouldn't happen
486 *
487 * In the cased of INTEXBD - if you allocate more
488 * than 28 decsriptors you may need to think about this
489 */
490 printk("Not using this interrupt\n");
491 }
492
493 if (readl(ETHER_INT_SRC(dev->base_addr)) &
494 (ETHER_INT_SRC_INTSBUS_MSK |
495 ETHER_INT_SRC_INTNRABT_MSK
496 |ETHER_INT_SRC_DMPARERR_MSK))
497 {
498 /*
499 * Hardware errors, we can either ignore them and hope they go away
500 *or reset the device, I'll try the first for now to see if they happen
501 */
502 printk("Hardware error\n");
503 }
504}
505
506static void ether00_setup_ethernet_address(struct net_device* dev)
507{
508 int tmp;
509
510 dev->addr_len=6;
511 writew(0,ETHER_ARC_ADR(dev->base_addr));
512 writel((dev->dev_addr[0]<<24) |
513 (dev->dev_addr[1]<<16) |
514 (dev->dev_addr[2]<<8) |
515 dev->dev_addr[3],
516 ETHER_ARC_DATA(dev->base_addr));
517
518 writew(4,ETHER_ARC_ADR(dev->base_addr));
519 tmp=readl(ETHER_ARC_DATA(dev->base_addr));
520 tmp&=0xffff;
521 tmp|=(dev->dev_addr[4]<<24) | (dev->dev_addr[5]<<16);
522 writel(tmp, ETHER_ARC_DATA(dev->base_addr));
523 /* Enable this entry in the ARC */
524
525 writel(1,ETHER_ARC_ENA(dev->base_addr));
526
527 return;
528}
529
530
531static void ether00_reset(struct net_device *dev)
532{
533 /* reset the controller */
534 writew(ETHER_MAC_CTL_RESET_MSK,ETHER_MAC_CTL(dev->base_addr));
535
536 /*
537 * Make sure we're not going to send anything
538 */
539
540 writew(ETHER_TX_CTL_TXHALT_MSK,ETHER_TX_CTL(dev->base_addr));
541
542 /*
543 * Make sure we're not going to receive anything
544 */
545 writew(ETHER_RX_CTL_RXHALT_MSK,ETHER_RX_CTL(dev->base_addr));
546
547 /*
548 * Disable Interrupts for now, and set the burst size to 8 bytes
549 */
550
551 writel(ETHER_DMA_CTL_INTMASK_MSK |
552 ((8 << ETHER_DMA_CTL_DMBURST_OFST) & ETHER_DMA_CTL_DMBURST_MSK)
553 |(2<<ETHER_DMA_CTL_RXALIGN_OFST),
554 ETHER_DMA_CTL(dev->base_addr));
555
556
557 /*
558 * Set TxThrsh - start transmitting a packet after 1514
559 * bytes or when a packet is complete, whichever comes first
560 */
561 writew(1514,ETHER_TXTHRSH(dev->base_addr));
562
563 /*
564 * Set TxPollCtr. Each cycle is
565 * 61.44 microseconds with a 33 MHz bus
566 */
567 writew(1,ETHER_TXPOLLCTR(dev->base_addr));
568
569 /*
570 * Set Rx_Ctl - Turn off reception and let RxData turn it
571 * on later
572 */
573 writew(ETHER_RX_CTL_RXHALT_MSK,ETHER_RX_CTL(dev->base_addr));
574
575}
576
577
578static void ether00_set_multicast(struct net_device* dev)
579{
580 int count=dev->mc_count;
581
582 /* Set promiscuous mode if it's asked for. */
583
584 if (dev->flags&IFF_PROMISC){
585
586 writew( ETHER_ARC_CTL_COMPEN_MSK |
587 ETHER_ARC_CTL_BROADACC_MSK |
588 ETHER_ARC_CTL_GROUPACC_MSK |
589 ETHER_ARC_CTL_STATIONACC_MSK,
590 ETHER_ARC_CTL(dev->base_addr));
591 return;
592 }
593
594 /*
595 * Get all multicast packets if required, or if there are too
596 * many addresses to fit in hardware
597 */
598 if (dev->flags & IFF_ALLMULTI){
599 writew( ETHER_ARC_CTL_COMPEN_MSK |
600 ETHER_ARC_CTL_GROUPACC_MSK |
601 ETHER_ARC_CTL_BROADACC_MSK,
602 ETHER_ARC_CTL(dev->base_addr));
603 return;
604 }
605 if (dev->mc_count > (ETHER_ARC_SIZE - 1)){
606
607 printk(KERN_WARNING "Too many multicast addresses for hardware to filter - receiving all multicast packets\n");
608 writew( ETHER_ARC_CTL_COMPEN_MSK |
609 ETHER_ARC_CTL_GROUPACC_MSK |
610 ETHER_ARC_CTL_BROADACC_MSK,
611 ETHER_ARC_CTL(dev->base_addr));
612 return;
613 }
614
615 if(dev->mc_count){
616 struct dev_mc_list *mc_list_ent=dev->mc_list;
617 unsigned int temp,i;
618 DEBUG(printk("mc_count=%d mc_list=%#x\n",dev-> mc_count, dev->mc_list));
619 DEBUG(printk("mc addr=%02#x%02x%02x%02x%02x%02x\n",
620 mc_list_ent->dmi_addr[5],
621 mc_list_ent->dmi_addr[4],
622 mc_list_ent->dmi_addr[3],
623 mc_list_ent->dmi_addr[2],
624 mc_list_ent->dmi_addr[1],
625 mc_list_ent->dmi_addr[0]);)
626
627 /*
628 * The first 6 bytes are the MAC address, so
629 * don't change them!
630 */
631 writew(4,ETHER_ARC_ADR(dev->base_addr));
632 temp=readl(ETHER_ARC_DATA(dev->base_addr));
633 temp&=0xffff0000;
634
635 /* Disable the current multicast stuff */
636 writel(1,ETHER_ARC_ENA(dev->base_addr));
637
638 for(;;){
639 temp|=mc_list_ent->dmi_addr[1] |
640 mc_list_ent->dmi_addr[0]<<8;
641 writel(temp,ETHER_ARC_DATA(dev->base_addr));
642
643 i=readl(ETHER_ARC_ADR(dev->base_addr));
644 writew(i+4,ETHER_ARC_ADR(dev->base_addr));
645
646 temp=mc_list_ent->dmi_addr[5]|
647 mc_list_ent->dmi_addr[4]<<8 |
648 mc_list_ent->dmi_addr[3]<<16 |
649 mc_list_ent->dmi_addr[2]<<24;
650 writel(temp,ETHER_ARC_DATA(dev->base_addr));
651
652 count--;
653 if(!mc_list_ent->next || !count){
654 break;
655 }
656 DEBUG(printk("mc_list_next=%#x\n",mc_list_ent->next);)
657 mc_list_ent=mc_list_ent->next;
658
659
660 i=readl(ETHER_ARC_ADR(dev->base_addr));
661 writel(i+4,ETHER_ARC_ADR(dev->base_addr));
662
663 temp=mc_list_ent->dmi_addr[3]|
664 mc_list_ent->dmi_addr[2]<<8 |
665 mc_list_ent->dmi_addr[1]<<16 |
666 mc_list_ent->dmi_addr[0]<<24;
667 writel(temp,ETHER_ARC_DATA(dev->base_addr));
668
669 i=readl(ETHER_ARC_ADR(dev->base_addr));
670 writel(i+4,ETHER_ARC_ADR(dev->base_addr));
671
672 temp=mc_list_ent->dmi_addr[4]<<16 |
673 mc_list_ent->dmi_addr[5]<<24;
674
675 writel(temp,ETHER_ARC_DATA(dev->base_addr));
676
677 count--;
678 if(!mc_list_ent->next || !count){
679 break;
680 }
681 mc_list_ent=mc_list_ent->next;
682 }
683
684
685 if(count)
686 printk(KERN_WARNING "Multicast list size error\n");
687
688
689 writew( ETHER_ARC_CTL_BROADACC_MSK|
690 ETHER_ARC_CTL_COMPEN_MSK,
691 ETHER_ARC_CTL(dev->base_addr));
692
693 }
694
695 /* enable the active ARC enties */
696 writew((1<<(count+2))-1,ETHER_ARC_ENA(dev->base_addr));
697}
698
699
700static int ether00_open(struct net_device* dev)
701{
702 int result,tmp;
703 struct net_priv* priv;
704
705 if (!is_valid_ether_addr(dev->dev_addr))
706 return -EINVAL;
707
708 /* Install interrupt handlers */
709 result=request_irq(dev->irq,ether00_int,0,"ether00",dev);
710 if(result)
711 goto open_err1;
712
713 result=request_irq(2,ether00_phy_int,0,"ether00_phy",dev);
714 if(result)
715 goto open_err2;
716
717 ether00_reset(dev);
718 result=ether00_mem_init(dev);
719 if(result)
720 goto open_err3;
721
722
723 ether00_setup_ethernet_address(dev);
724
725 ether00_set_multicast(dev);
726
727 result=ether00_write_phy(dev,PHY_CONTROL, PHY_CONTROL_ANEGEN_MSK | PHY_CONTROL_RANEG_MSK);
728 if(result)
729 goto open_err4;
730 result=ether00_write_phy(dev,PHY_IRQ_CONTROL, PHY_IRQ_CONTROL_LS_CHG_IE_MSK |
731 PHY_IRQ_CONTROL_ANEG_COMP_IE_MSK);
732 if(result)
733 goto open_err4;
734
735 /* Start the device enable interrupts */
736 writew(ETHER_RX_CTL_RXEN_MSK
737// | ETHER_RX_CTL_STRIPCRC_MSK
738 | ETHER_RX_CTL_ENGOOD_MSK
739 | ETHER_RX_CTL_ENRXPAR_MSK| ETHER_RX_CTL_ENLONGERR_MSK
740 | ETHER_RX_CTL_ENOVER_MSK| ETHER_RX_CTL_ENCRCERR_MSK,
741 ETHER_RX_CTL(dev->base_addr));
742
743 writew(ETHER_TX_CTL_TXEN_MSK|
744 ETHER_TX_CTL_ENEXDEFER_MSK|
745 ETHER_TX_CTL_ENLCARR_MSK|
746 ETHER_TX_CTL_ENEXCOLL_MSK|
747 ETHER_TX_CTL_ENLATECOLL_MSK|
748 ETHER_TX_CTL_ENTXPAR_MSK|
749 ETHER_TX_CTL_ENCOMP_MSK,
750 ETHER_TX_CTL(dev->base_addr));
751
752 tmp=readl(ETHER_DMA_CTL(dev->base_addr));
753 writel(tmp&~ETHER_DMA_CTL_INTMASK_MSK,ETHER_DMA_CTL(dev->base_addr));
754
755 return 0;
756
757 open_err4:
758 ether00_reset(dev);
759 open_err3:
760 free_irq(2,dev);
761 open_err2:
762 free_irq(dev->irq,dev);
763 open_err1:
764 return result;
765
766}
767
768
769static int ether00_tx(struct sk_buff* skb, struct net_device* dev)
770{
771 struct net_priv *priv=dev->priv;
772 struct tx_fda_ent *fda_ptr;
773 int i;
774
775
776 /*
777 * Find an empty slot in which to stick the frame
778 */
779 fda_ptr=(struct tx_fda_ent*)__dma_va(readl(ETHER_TXFRMPTR(dev->base_addr)));
780 i=0;
781 while(i<TX_NUM_FDESC){
782 if (fda_ptr->fd.FDStat||(fda_ptr->fd.FDCtl & FDCTL_COWNSFD_MSK)){
783 fda_ptr =(struct tx_fda_ent*) __dma_va((struct tx_fda_ent*)fda_ptr->fd.FDNext);
784 }
785 else {
786 break;
787 }
788 i++;
789 }
790
791 /* Write the skb data from the cache*/
792 consistent_sync(skb->data,skb->len,PCI_DMA_TODEVICE);
793 fda_ptr->bd.BuffData=(char*)__pa(skb->data);
794 fda_ptr->bd.BuffLength=(unsigned short)skb->len;
795 /* Save the pointer to the skb for freeing later */
796 fda_ptr->fd.FDSystem=(unsigned int)skb;
797 fda_ptr->fd.FDStat=0;
798 /* Pass ownership of the buffers to the controller */
799 fda_ptr->fd.FDCtl=1;
800 fda_ptr->fd.FDCtl|=FDCTL_COWNSFD_MSK;
801
802 /* If the next buffer in the list is full, stop the queue */
803 fda_ptr=(struct tx_fda_ent*)__dma_va(fda_ptr->fd.FDNext);
804 if ((fda_ptr->fd.FDStat)||(fda_ptr->fd.FDCtl & FDCTL_COWNSFD_MSK)){
805 netif_stop_queue(dev);
806 priv->queue_stopped=1;
807 }
808
809 return 0;
810}
811
812static struct net_device_stats *ether00_stats(struct net_device* dev)
813{
814 struct net_priv *priv=dev->priv;
815 return &priv->stats;
816}
817
818
819static int ether00_stop(struct net_device* dev)
820{
821 struct net_priv *priv=dev->priv;
822 int tmp;
823
824 /* Stop/disable the device. */
825 tmp=readw(ETHER_RX_CTL(dev->base_addr));
826 tmp&=~(ETHER_RX_CTL_RXEN_MSK | ETHER_RX_CTL_ENGOOD_MSK);
827 tmp|=ETHER_RX_CTL_RXHALT_MSK;
828 writew(tmp,ETHER_RX_CTL(dev->base_addr));
829
830 tmp=readl(ETHER_TX_CTL(dev->base_addr));
831 tmp&=~ETHER_TX_CTL_TXEN_MSK;
832 tmp|=ETHER_TX_CTL_TXHALT_MSK;
833 writel(tmp,ETHER_TX_CTL(dev->base_addr));
834
835 /* Free up system resources */
836 free_irq(dev->irq,dev);
837 free_irq(2,dev);
838 iounmap(priv->dma_data);
839
840 return 0;
841}
842
843
844static void ether00_get_ethernet_address(struct net_device* dev)
845{
846 struct mtd_info *mymtd=NULL;
847 int i;
848 size_t retlen;
849
850 /*
851 * For the Epxa10 dev board (camelot), the ethernet MAC
852 * address is of the form 00:aa:aa:00:xx:xx where
853 * 00:aa:aa is the Altera vendor ID and xx:xx is the
854 * last 2 bytes of the board serial number, as programmed
855 * into the OTP area of the flash device on EBI1. If this
856 * isn't an expa10 dev board, or there's no mtd support to
857 * read the serial number from flash then we'll force the
858 * use to set their own mac address using ifconfig.
859 */
860
861#ifdef CONFIG_ARCH_CAMELOT
862#ifdef CONFIG_MTD
863 /* get the mtd_info structure for the first mtd device*/
864 for(i=0;i<MAX_MTD_DEVICES;i++){
865 mymtd=get_mtd_device(NULL,i);
866 if(!mymtd||!strcmp(mymtd->name,"EPXA10DB flash"))
867 break;
868 }
869
870 if(!mymtd || !mymtd->read_user_prot_reg){
871 printk(KERN_WARNING "%s: Failed to read MAC address from flash\n",dev->name);
872 }else{
873 mymtd->read_user_prot_reg(mymtd,2,1,&retlen,&dev->dev_addr[5]);
874 mymtd->read_user_prot_reg(mymtd,3,1,&retlen,&dev->dev_addr[4]);
875 dev->dev_addr[3]=0;
876 dev->dev_addr[2]=vendor_id[1];
877 dev->dev_addr[1]=vendor_id[0];
878 dev->dev_addr[0]=0;
879 }
880#else
881 printk(KERN_WARNING "%s: MTD support required to read MAC address from EPXA10 dev board\n", dev->name);
882#endif
883#endif
884
885 if (!is_valid_ether_addr(dev->dev_addr))
886 printk("%s: Invalid ethernet MAC address. Please set using "
887 "ifconfig\n", dev->name);
888
889}
890
891/*
892 * Keep a mapping of dev_info addresses -> port lines to use when
893 * removing ports dev==NULL indicates unused entry
894 */
895
896
897static struct net_device* dev_list[ETH_NR];
898
899static int ether00_add_device(struct pldhs_dev_info* dev_info,void* dev_ps_data)
900{
901 struct net_device *dev;
902 struct net_priv *priv;
903 void *map_addr;
904 int result;
905 int i;
906
907 i=0;
908 while(dev_list[i] && i < ETH_NR)
909 i++;
910
911 if(i==ETH_NR){
912 printk(KERN_WARNING "ether00: Maximum number of ports reached\n");
913 return 0;
914 }
915
916
917 if (!request_mem_region(dev_info->base_addr, MAC_REG_SIZE, "ether00"))
918 return -EBUSY;
919
920 dev = alloc_etherdev(sizeof(struct net_priv));
921 if(!dev) {
922 result = -ENOMEM;
923 goto out_release;
924 }
925 priv = dev->priv;
926
927 priv->tq_memupdate.routine=ether00_mem_update;
928 priv->tq_memupdate.data=(void*) dev;
929
930 spin_lock_init(&priv->rx_lock);
931
932 map_addr=ioremap_nocache(dev_info->base_addr,SZ_4K);
933 if(!map_addr){
934 result = -ENOMEM;
935 out_kfree;
936 }
937
938 dev->open=ether00_open;
939 dev->stop=ether00_stop;
940 dev->set_multicast_list=ether00_set_multicast;
941 dev->hard_start_xmit=ether00_tx;
942 dev->get_stats=ether00_stats;
943
944 ether00_get_ethernet_address(dev);
945
946 SET_MODULE_OWNER(dev);
947
948 dev->base_addr=(unsigned int)map_addr;
949 dev->irq=dev_info->irq;
950 dev->features=NETIF_F_DYNALLOC | NETIF_F_HW_CSUM;
951
952 result=register_netdev(dev);
953 if(result){
954 printk("Ether00: Error %i registering driver\n",result);
955 goto out_unmap;
956 }
957 printk("registered ether00 device at %#x\n",dev_info->base_addr);
958
959 dev_list[i]=dev;
960
961 return result;
962
963 out_unmap:
964 iounmap(map_addr);
965 out_kfree:
966 free_netdev(dev);
967 out_release:
968 release_mem_region(dev_info->base_addr, MAC_REG_SIZE);
969 return result;
970}
971
972
973static int ether00_remove_devices(void)
974{
975 int i;
976
977 for(i=0;i<ETH_NR;i++){
978 if(dev_list[i]){
979 netif_device_detach(dev_list[i]);
980 unregister_netdev(dev_list[i]);
981 iounmap((void*)dev_list[i]->base_addr);
982 release_mem_region(dev_list[i]->base_addr, MAC_REG_SIZE);
983 free_netdev(dev_list[i]);
984 dev_list[i]=0;
985 }
986 }
987 return 0;
988}
989
990static struct pld_hotswap_ops ether00_pldhs_ops={
991 .name = ETHER00_NAME,
992 .add_device = ether00_add_device,
993 .remove_devices = ether00_remove_devices,
994};
995
996
997static void __exit ether00_cleanup_module(void)
998{
999 int result;
1000 result=ether00_remove_devices();
1001 if(result)
1002 printk(KERN_WARNING "ether00: failed to remove all devices\n");
1003
1004 pldhs_unregister_driver(ETHER00_NAME);
1005}
1006module_exit(ether00_cleanup_module);
1007
1008
1009static int __init ether00_mod_init(void)
1010{
1011 printk("mod init\n");
1012 return pldhs_register_driver(&ether00_pldhs_ops);
1013
1014}
1015
1016module_init(ether00_mod_init);
1017