diff options
Diffstat (limited to 'drivers/net/fec.c')
-rw-r--r-- | drivers/net/fec.c | 901 |
1 files changed, 377 insertions, 524 deletions
diff --git a/drivers/net/fec.c b/drivers/net/fec.c index 682e7f0b5581..0f19b743749b 100644 --- a/drivers/net/fec.c +++ b/drivers/net/fec.c | |||
@@ -86,8 +86,7 @@ static unsigned char fec_mac_default[] = { | |||
86 | #endif | 86 | #endif |
87 | #endif /* CONFIG_M5272 */ | 87 | #endif /* CONFIG_M5272 */ |
88 | 88 | ||
89 | /* Forward declarations of some structures to support different PHYs | 89 | /* Forward declarations of some structures to support different PHYs */ |
90 | */ | ||
91 | 90 | ||
92 | typedef struct { | 91 | typedef struct { |
93 | uint mii_data; | 92 | uint mii_data; |
@@ -123,8 +122,7 @@ typedef struct { | |||
123 | #error "FEC: descriptor ring size constants too large" | 122 | #error "FEC: descriptor ring size constants too large" |
124 | #endif | 123 | #endif |
125 | 124 | ||
126 | /* Interrupt events/masks. | 125 | /* Interrupt events/masks. */ |
127 | */ | ||
128 | #define FEC_ENET_HBERR ((uint)0x80000000) /* Heartbeat error */ | 126 | #define FEC_ENET_HBERR ((uint)0x80000000) /* Heartbeat error */ |
129 | #define FEC_ENET_BABR ((uint)0x40000000) /* Babbling receiver */ | 127 | #define FEC_ENET_BABR ((uint)0x40000000) /* Babbling receiver */ |
130 | #define FEC_ENET_BABT ((uint)0x20000000) /* Babbling transmitter */ | 128 | #define FEC_ENET_BABT ((uint)0x20000000) /* Babbling transmitter */ |
@@ -165,7 +163,7 @@ typedef struct { | |||
165 | */ | 163 | */ |
166 | struct fec_enet_private { | 164 | struct fec_enet_private { |
167 | /* Hardware registers of the FEC device */ | 165 | /* Hardware registers of the FEC device */ |
168 | volatile fec_t *hwp; | 166 | void __iomem *hwp; |
169 | 167 | ||
170 | struct net_device *netdev; | 168 | struct net_device *netdev; |
171 | 169 | ||
@@ -174,16 +172,20 @@ struct fec_enet_private { | |||
174 | /* The saved address of a sent-in-place packet/buffer, for skfree(). */ | 172 | /* The saved address of a sent-in-place packet/buffer, for skfree(). */ |
175 | unsigned char *tx_bounce[TX_RING_SIZE]; | 173 | unsigned char *tx_bounce[TX_RING_SIZE]; |
176 | struct sk_buff* tx_skbuff[TX_RING_SIZE]; | 174 | struct sk_buff* tx_skbuff[TX_RING_SIZE]; |
175 | struct sk_buff* rx_skbuff[RX_RING_SIZE]; | ||
177 | ushort skb_cur; | 176 | ushort skb_cur; |
178 | ushort skb_dirty; | 177 | ushort skb_dirty; |
179 | 178 | ||
180 | /* CPM dual port RAM relative addresses. | 179 | /* CPM dual port RAM relative addresses */ |
181 | */ | ||
182 | dma_addr_t bd_dma; | 180 | dma_addr_t bd_dma; |
183 | cbd_t *rx_bd_base; /* Address of Rx and Tx buffers. */ | 181 | /* Address of Rx and Tx buffers */ |
184 | cbd_t *tx_bd_base; | 182 | struct bufdesc *rx_bd_base; |
185 | cbd_t *cur_rx, *cur_tx; /* The next free ring entry */ | 183 | struct bufdesc *tx_bd_base; |
186 | cbd_t *dirty_tx; /* The ring entries to be free()ed. */ | 184 | /* The next free ring entry */ |
185 | struct bufdesc *cur_rx, *cur_tx; | ||
186 | /* The ring entries to be free()ed */ | ||
187 | struct bufdesc *dirty_tx; | ||
188 | |||
187 | uint tx_full; | 189 | uint tx_full; |
188 | /* hold while accessing the HW like ringbuffer for tx/rx but not MAC */ | 190 | /* hold while accessing the HW like ringbuffer for tx/rx but not MAC */ |
189 | spinlock_t hw_lock; | 191 | spinlock_t hw_lock; |
@@ -209,17 +211,13 @@ struct fec_enet_private { | |||
209 | int full_duplex; | 211 | int full_duplex; |
210 | }; | 212 | }; |
211 | 213 | ||
212 | static int fec_enet_open(struct net_device *dev); | ||
213 | static int fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev); | ||
214 | static void fec_enet_mii(struct net_device *dev); | 214 | static void fec_enet_mii(struct net_device *dev); |
215 | static irqreturn_t fec_enet_interrupt(int irq, void * dev_id); | 215 | static irqreturn_t fec_enet_interrupt(int irq, void * dev_id); |
216 | static void fec_enet_tx(struct net_device *dev); | 216 | static void fec_enet_tx(struct net_device *dev); |
217 | static void fec_enet_rx(struct net_device *dev); | 217 | static void fec_enet_rx(struct net_device *dev); |
218 | static int fec_enet_close(struct net_device *dev); | 218 | static int fec_enet_close(struct net_device *dev); |
219 | static void set_multicast_list(struct net_device *dev); | ||
220 | static void fec_restart(struct net_device *dev, int duplex); | 219 | static void fec_restart(struct net_device *dev, int duplex); |
221 | static void fec_stop(struct net_device *dev); | 220 | static void fec_stop(struct net_device *dev); |
222 | static void fec_set_mac_address(struct net_device *dev); | ||
223 | 221 | ||
224 | 222 | ||
225 | /* MII processing. We keep this as simple as possible. Requests are | 223 | /* MII processing. We keep this as simple as possible. Requests are |
@@ -241,19 +239,16 @@ static mii_list_t *mii_tail; | |||
241 | static int mii_queue(struct net_device *dev, int request, | 239 | static int mii_queue(struct net_device *dev, int request, |
242 | void (*func)(uint, struct net_device *)); | 240 | void (*func)(uint, struct net_device *)); |
243 | 241 | ||
244 | /* Make MII read/write commands for the FEC. | 242 | /* Make MII read/write commands for the FEC */ |
245 | */ | ||
246 | #define mk_mii_read(REG) (0x60020000 | ((REG & 0x1f) << 18)) | 243 | #define mk_mii_read(REG) (0x60020000 | ((REG & 0x1f) << 18)) |
247 | #define mk_mii_write(REG, VAL) (0x50020000 | ((REG & 0x1f) << 18) | \ | 244 | #define mk_mii_write(REG, VAL) (0x50020000 | ((REG & 0x1f) << 18) | \ |
248 | (VAL & 0xffff)) | 245 | (VAL & 0xffff)) |
249 | #define mk_mii_end 0 | 246 | #define mk_mii_end 0 |
250 | 247 | ||
251 | /* Transmitter timeout. | 248 | /* Transmitter timeout */ |
252 | */ | 249 | #define TX_TIMEOUT (2 * HZ) |
253 | #define TX_TIMEOUT (2*HZ) | ||
254 | 250 | ||
255 | /* Register definitions for the PHY. | 251 | /* Register definitions for the PHY */ |
256 | */ | ||
257 | 252 | ||
258 | #define MII_REG_CR 0 /* Control Register */ | 253 | #define MII_REG_CR 0 /* Control Register */ |
259 | #define MII_REG_SR 1 /* Status Register */ | 254 | #define MII_REG_SR 1 /* Status Register */ |
@@ -288,18 +283,14 @@ static int mii_queue(struct net_device *dev, int request, | |||
288 | static int | 283 | static int |
289 | fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) | 284 | fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) |
290 | { | 285 | { |
291 | struct fec_enet_private *fep; | 286 | struct fec_enet_private *fep = netdev_priv(dev); |
292 | volatile fec_t *fecp; | 287 | struct bufdesc *bdp; |
293 | volatile cbd_t *bdp; | ||
294 | unsigned short status; | 288 | unsigned short status; |
295 | unsigned long flags; | 289 | unsigned long flags; |
296 | 290 | ||
297 | fep = netdev_priv(dev); | ||
298 | fecp = (volatile fec_t*)dev->base_addr; | ||
299 | |||
300 | if (!fep->link) { | 291 | if (!fep->link) { |
301 | /* Link is down or autonegotiation is in progress. */ | 292 | /* Link is down or autonegotiation is in progress. */ |
302 | return 1; | 293 | return NETDEV_TX_BUSY; |
303 | } | 294 | } |
304 | 295 | ||
305 | spin_lock_irqsave(&fep->hw_lock, flags); | 296 | spin_lock_irqsave(&fep->hw_lock, flags); |
@@ -307,30 +298,27 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
307 | bdp = fep->cur_tx; | 298 | bdp = fep->cur_tx; |
308 | 299 | ||
309 | status = bdp->cbd_sc; | 300 | status = bdp->cbd_sc; |
310 | #ifndef final_version | 301 | |
311 | if (status & BD_ENET_TX_READY) { | 302 | if (status & BD_ENET_TX_READY) { |
312 | /* Ooops. All transmit buffers are full. Bail out. | 303 | /* Ooops. All transmit buffers are full. Bail out. |
313 | * This should not happen, since dev->tbusy should be set. | 304 | * This should not happen, since dev->tbusy should be set. |
314 | */ | 305 | */ |
315 | printk("%s: tx queue full!.\n", dev->name); | 306 | printk("%s: tx queue full!.\n", dev->name); |
316 | spin_unlock_irqrestore(&fep->hw_lock, flags); | 307 | spin_unlock_irqrestore(&fep->hw_lock, flags); |
317 | return 1; | 308 | return NETDEV_TX_BUSY; |
318 | } | 309 | } |
319 | #endif | ||
320 | 310 | ||
321 | /* Clear all of the status flags. | 311 | /* Clear all of the status flags */ |
322 | */ | ||
323 | status &= ~BD_ENET_TX_STATS; | 312 | status &= ~BD_ENET_TX_STATS; |
324 | 313 | ||
325 | /* Set buffer length and buffer pointer. | 314 | /* Set buffer length and buffer pointer */ |
326 | */ | ||
327 | bdp->cbd_bufaddr = __pa(skb->data); | 315 | bdp->cbd_bufaddr = __pa(skb->data); |
328 | bdp->cbd_datlen = skb->len; | 316 | bdp->cbd_datlen = skb->len; |
329 | 317 | ||
330 | /* | 318 | /* |
331 | * On some FEC implementations data must be aligned on | 319 | * On some FEC implementations data must be aligned on |
332 | * 4-byte boundaries. Use bounce buffers to copy data | 320 | * 4-byte boundaries. Use bounce buffers to copy data |
333 | * and get it aligned. Ugh. | 321 | * and get it aligned. Ugh. |
334 | */ | 322 | */ |
335 | if (bdp->cbd_bufaddr & FEC_ALIGNMENT) { | 323 | if (bdp->cbd_bufaddr & FEC_ALIGNMENT) { |
336 | unsigned int index; | 324 | unsigned int index; |
@@ -339,8 +327,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
339 | bdp->cbd_bufaddr = __pa(fep->tx_bounce[index]); | 327 | bdp->cbd_bufaddr = __pa(fep->tx_bounce[index]); |
340 | } | 328 | } |
341 | 329 | ||
342 | /* Save skb pointer. | 330 | /* Save skb pointer */ |
343 | */ | ||
344 | fep->tx_skbuff[fep->skb_cur] = skb; | 331 | fep->tx_skbuff[fep->skb_cur] = skb; |
345 | 332 | ||
346 | dev->stats.tx_bytes += skb->len; | 333 | dev->stats.tx_bytes += skb->len; |
@@ -349,13 +336,12 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
349 | /* Push the data cache so the CPM does not get stale memory | 336 | /* Push the data cache so the CPM does not get stale memory |
350 | * data. | 337 | * data. |
351 | */ | 338 | */ |
352 | dma_sync_single(NULL, bdp->cbd_bufaddr, | 339 | bdp->cbd_bufaddr = dma_map_single(&dev->dev, skb->data, |
353 | bdp->cbd_datlen, DMA_TO_DEVICE); | 340 | FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE); |
354 | 341 | ||
355 | /* Send it on its way. Tell FEC it's ready, interrupt when done, | 342 | /* Send it on its way. Tell FEC it's ready, interrupt when done, |
356 | * it's the last BD of the frame, and to put the CRC on the end. | 343 | * it's the last BD of the frame, and to put the CRC on the end. |
357 | */ | 344 | */ |
358 | |||
359 | status |= (BD_ENET_TX_READY | BD_ENET_TX_INTR | 345 | status |= (BD_ENET_TX_READY | BD_ENET_TX_INTR |
360 | | BD_ENET_TX_LAST | BD_ENET_TX_TC); | 346 | | BD_ENET_TX_LAST | BD_ENET_TX_TC); |
361 | bdp->cbd_sc = status; | 347 | bdp->cbd_sc = status; |
@@ -363,22 +349,20 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
363 | dev->trans_start = jiffies; | 349 | dev->trans_start = jiffies; |
364 | 350 | ||
365 | /* Trigger transmission start */ | 351 | /* Trigger transmission start */ |
366 | fecp->fec_x_des_active = 0; | 352 | writel(0, fep->hwp + FEC_X_DES_ACTIVE); |
367 | 353 | ||
368 | /* If this was the last BD in the ring, start at the beginning again. | 354 | /* If this was the last BD in the ring, start at the beginning again. */ |
369 | */ | 355 | if (status & BD_ENET_TX_WRAP) |
370 | if (status & BD_ENET_TX_WRAP) { | ||
371 | bdp = fep->tx_bd_base; | 356 | bdp = fep->tx_bd_base; |
372 | } else { | 357 | else |
373 | bdp++; | 358 | bdp++; |
374 | } | ||
375 | 359 | ||
376 | if (bdp == fep->dirty_tx) { | 360 | if (bdp == fep->dirty_tx) { |
377 | fep->tx_full = 1; | 361 | fep->tx_full = 1; |
378 | netif_stop_queue(dev); | 362 | netif_stop_queue(dev); |
379 | } | 363 | } |
380 | 364 | ||
381 | fep->cur_tx = (cbd_t *)bdp; | 365 | fep->cur_tx = bdp; |
382 | 366 | ||
383 | spin_unlock_irqrestore(&fep->hw_lock, flags); | 367 | spin_unlock_irqrestore(&fep->hw_lock, flags); |
384 | 368 | ||
@@ -390,75 +374,33 @@ fec_timeout(struct net_device *dev) | |||
390 | { | 374 | { |
391 | struct fec_enet_private *fep = netdev_priv(dev); | 375 | struct fec_enet_private *fep = netdev_priv(dev); |
392 | 376 | ||
393 | printk("%s: transmit timed out.\n", dev->name); | ||
394 | dev->stats.tx_errors++; | 377 | dev->stats.tx_errors++; |
395 | #ifndef final_version | ||
396 | { | ||
397 | int i; | ||
398 | cbd_t *bdp; | ||
399 | |||
400 | printk("Ring data dump: cur_tx %lx%s, dirty_tx %lx cur_rx: %lx\n", | ||
401 | (unsigned long)fep->cur_tx, fep->tx_full ? " (full)" : "", | ||
402 | (unsigned long)fep->dirty_tx, | ||
403 | (unsigned long)fep->cur_rx); | ||
404 | |||
405 | bdp = fep->tx_bd_base; | ||
406 | printk(" tx: %u buffers\n", TX_RING_SIZE); | ||
407 | for (i = 0 ; i < TX_RING_SIZE; i++) { | ||
408 | printk(" %08x: %04x %04x %08x\n", | ||
409 | (uint) bdp, | ||
410 | bdp->cbd_sc, | ||
411 | bdp->cbd_datlen, | ||
412 | (int) bdp->cbd_bufaddr); | ||
413 | bdp++; | ||
414 | } | ||
415 | 378 | ||
416 | bdp = fep->rx_bd_base; | ||
417 | printk(" rx: %lu buffers\n", (unsigned long) RX_RING_SIZE); | ||
418 | for (i = 0 ; i < RX_RING_SIZE; i++) { | ||
419 | printk(" %08x: %04x %04x %08x\n", | ||
420 | (uint) bdp, | ||
421 | bdp->cbd_sc, | ||
422 | bdp->cbd_datlen, | ||
423 | (int) bdp->cbd_bufaddr); | ||
424 | bdp++; | ||
425 | } | ||
426 | } | ||
427 | #endif | ||
428 | fec_restart(dev, fep->full_duplex); | 379 | fec_restart(dev, fep->full_duplex); |
429 | netif_wake_queue(dev); | 380 | netif_wake_queue(dev); |
430 | } | 381 | } |
431 | 382 | ||
432 | /* The interrupt handler. | ||
433 | * This is called from the MPC core interrupt. | ||
434 | */ | ||
435 | static irqreturn_t | 383 | static irqreturn_t |
436 | fec_enet_interrupt(int irq, void * dev_id) | 384 | fec_enet_interrupt(int irq, void * dev_id) |
437 | { | 385 | { |
438 | struct net_device *dev = dev_id; | 386 | struct net_device *dev = dev_id; |
439 | volatile fec_t *fecp; | 387 | struct fec_enet_private *fep = netdev_priv(dev); |
440 | uint int_events; | 388 | uint int_events; |
441 | irqreturn_t ret = IRQ_NONE; | 389 | irqreturn_t ret = IRQ_NONE; |
442 | 390 | ||
443 | fecp = (volatile fec_t*)dev->base_addr; | ||
444 | |||
445 | /* Get the interrupt events that caused us to be here. | ||
446 | */ | ||
447 | do { | 391 | do { |
448 | int_events = fecp->fec_ievent; | 392 | int_events = readl(fep->hwp + FEC_IEVENT); |
449 | fecp->fec_ievent = int_events; | 393 | writel(int_events, fep->hwp + FEC_IEVENT); |
450 | 394 | ||
451 | /* Handle receive event in its own function. | ||
452 | */ | ||
453 | if (int_events & FEC_ENET_RXF) { | 395 | if (int_events & FEC_ENET_RXF) { |
454 | ret = IRQ_HANDLED; | 396 | ret = IRQ_HANDLED; |
455 | fec_enet_rx(dev); | 397 | fec_enet_rx(dev); |
456 | } | 398 | } |
457 | 399 | ||
458 | /* Transmit OK, or non-fatal error. Update the buffer | 400 | /* Transmit OK, or non-fatal error. Update the buffer |
459 | descriptors. FEC handles all errors, we just discover | 401 | * descriptors. FEC handles all errors, we just discover |
460 | them as part of the transmit process. | 402 | * them as part of the transmit process. |
461 | */ | 403 | */ |
462 | if (int_events & FEC_ENET_TXF) { | 404 | if (int_events & FEC_ENET_TXF) { |
463 | ret = IRQ_HANDLED; | 405 | ret = IRQ_HANDLED; |
464 | fec_enet_tx(dev); | 406 | fec_enet_tx(dev); |
@@ -479,7 +421,7 @@ static void | |||
479 | fec_enet_tx(struct net_device *dev) | 421 | fec_enet_tx(struct net_device *dev) |
480 | { | 422 | { |
481 | struct fec_enet_private *fep; | 423 | struct fec_enet_private *fep; |
482 | volatile cbd_t *bdp; | 424 | struct bufdesc *bdp; |
483 | unsigned short status; | 425 | unsigned short status; |
484 | struct sk_buff *skb; | 426 | struct sk_buff *skb; |
485 | 427 | ||
@@ -488,7 +430,11 @@ fec_enet_tx(struct net_device *dev) | |||
488 | bdp = fep->dirty_tx; | 430 | bdp = fep->dirty_tx; |
489 | 431 | ||
490 | while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) { | 432 | while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) { |
491 | if (bdp == fep->cur_tx && fep->tx_full == 0) break; | 433 | if (bdp == fep->cur_tx && fep->tx_full == 0) |
434 | break; | ||
435 | |||
436 | dma_unmap_single(&dev->dev, bdp->cbd_bufaddr, FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE); | ||
437 | bdp->cbd_bufaddr = 0; | ||
492 | 438 | ||
493 | skb = fep->tx_skbuff[fep->skb_dirty]; | 439 | skb = fep->tx_skbuff[fep->skb_dirty]; |
494 | /* Check for errors. */ | 440 | /* Check for errors. */ |
@@ -510,31 +456,27 @@ fec_enet_tx(struct net_device *dev) | |||
510 | dev->stats.tx_packets++; | 456 | dev->stats.tx_packets++; |
511 | } | 457 | } |
512 | 458 | ||
513 | #ifndef final_version | ||
514 | if (status & BD_ENET_TX_READY) | 459 | if (status & BD_ENET_TX_READY) |
515 | printk("HEY! Enet xmit interrupt and TX_READY.\n"); | 460 | printk("HEY! Enet xmit interrupt and TX_READY.\n"); |
516 | #endif | 461 | |
517 | /* Deferred means some collisions occurred during transmit, | 462 | /* Deferred means some collisions occurred during transmit, |
518 | * but we eventually sent the packet OK. | 463 | * but we eventually sent the packet OK. |
519 | */ | 464 | */ |
520 | if (status & BD_ENET_TX_DEF) | 465 | if (status & BD_ENET_TX_DEF) |
521 | dev->stats.collisions++; | 466 | dev->stats.collisions++; |
522 | 467 | ||
523 | /* Free the sk buffer associated with this last transmit. | 468 | /* Free the sk buffer associated with this last transmit */ |
524 | */ | ||
525 | dev_kfree_skb_any(skb); | 469 | dev_kfree_skb_any(skb); |
526 | fep->tx_skbuff[fep->skb_dirty] = NULL; | 470 | fep->tx_skbuff[fep->skb_dirty] = NULL; |
527 | fep->skb_dirty = (fep->skb_dirty + 1) & TX_RING_MOD_MASK; | 471 | fep->skb_dirty = (fep->skb_dirty + 1) & TX_RING_MOD_MASK; |
528 | 472 | ||
529 | /* Update pointer to next buffer descriptor to be transmitted. | 473 | /* Update pointer to next buffer descriptor to be transmitted */ |
530 | */ | ||
531 | if (status & BD_ENET_TX_WRAP) | 474 | if (status & BD_ENET_TX_WRAP) |
532 | bdp = fep->tx_bd_base; | 475 | bdp = fep->tx_bd_base; |
533 | else | 476 | else |
534 | bdp++; | 477 | bdp++; |
535 | 478 | ||
536 | /* Since we have freed up a buffer, the ring is no longer | 479 | /* Since we have freed up a buffer, the ring is no longer full |
537 | * full. | ||
538 | */ | 480 | */ |
539 | if (fep->tx_full) { | 481 | if (fep->tx_full) { |
540 | fep->tx_full = 0; | 482 | fep->tx_full = 0; |
@@ -542,7 +484,7 @@ fec_enet_tx(struct net_device *dev) | |||
542 | netif_wake_queue(dev); | 484 | netif_wake_queue(dev); |
543 | } | 485 | } |
544 | } | 486 | } |
545 | fep->dirty_tx = (cbd_t *)bdp; | 487 | fep->dirty_tx = bdp; |
546 | spin_unlock_irq(&fep->hw_lock); | 488 | spin_unlock_irq(&fep->hw_lock); |
547 | } | 489 | } |
548 | 490 | ||
@@ -555,9 +497,8 @@ fec_enet_tx(struct net_device *dev) | |||
555 | static void | 497 | static void |
556 | fec_enet_rx(struct net_device *dev) | 498 | fec_enet_rx(struct net_device *dev) |
557 | { | 499 | { |
558 | struct fec_enet_private *fep; | 500 | struct fec_enet_private *fep = netdev_priv(dev); |
559 | volatile fec_t *fecp; | 501 | struct bufdesc *bdp; |
560 | volatile cbd_t *bdp; | ||
561 | unsigned short status; | 502 | unsigned short status; |
562 | struct sk_buff *skb; | 503 | struct sk_buff *skb; |
563 | ushort pkt_len; | 504 | ushort pkt_len; |
@@ -567,9 +508,6 @@ fec_enet_rx(struct net_device *dev) | |||
567 | flush_cache_all(); | 508 | flush_cache_all(); |
568 | #endif | 509 | #endif |
569 | 510 | ||
570 | fep = netdev_priv(dev); | ||
571 | fecp = (volatile fec_t*)dev->base_addr; | ||
572 | |||
573 | spin_lock_irq(&fep->hw_lock); | 511 | spin_lock_irq(&fep->hw_lock); |
574 | 512 | ||
575 | /* First, grab all of the stats for the incoming packet. | 513 | /* First, grab all of the stats for the incoming packet. |
@@ -577,143 +515,121 @@ fec_enet_rx(struct net_device *dev) | |||
577 | */ | 515 | */ |
578 | bdp = fep->cur_rx; | 516 | bdp = fep->cur_rx; |
579 | 517 | ||
580 | while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) { | 518 | while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) { |
581 | 519 | ||
582 | #ifndef final_version | 520 | /* Since we have allocated space to hold a complete frame, |
583 | /* Since we have allocated space to hold a complete frame, | 521 | * the last indicator should be set. |
584 | * the last indicator should be set. | 522 | */ |
585 | */ | 523 | if ((status & BD_ENET_RX_LAST) == 0) |
586 | if ((status & BD_ENET_RX_LAST) == 0) | 524 | printk("FEC ENET: rcv is not +last\n"); |
587 | printk("FEC ENET: rcv is not +last\n"); | ||
588 | #endif | ||
589 | 525 | ||
590 | if (!fep->opened) | 526 | if (!fep->opened) |
591 | goto rx_processing_done; | 527 | goto rx_processing_done; |
592 | 528 | ||
593 | /* Check for errors. */ | 529 | /* Check for errors. */ |
594 | if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO | | 530 | if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO | |
595 | BD_ENET_RX_CR | BD_ENET_RX_OV)) { | 531 | BD_ENET_RX_CR | BD_ENET_RX_OV)) { |
596 | dev->stats.rx_errors++; | 532 | dev->stats.rx_errors++; |
597 | if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH)) { | 533 | if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH)) { |
598 | /* Frame too long or too short. */ | 534 | /* Frame too long or too short. */ |
599 | dev->stats.rx_length_errors++; | 535 | dev->stats.rx_length_errors++; |
536 | } | ||
537 | if (status & BD_ENET_RX_NO) /* Frame alignment */ | ||
538 | dev->stats.rx_frame_errors++; | ||
539 | if (status & BD_ENET_RX_CR) /* CRC Error */ | ||
540 | dev->stats.rx_crc_errors++; | ||
541 | if (status & BD_ENET_RX_OV) /* FIFO overrun */ | ||
542 | dev->stats.rx_fifo_errors++; | ||
600 | } | 543 | } |
601 | if (status & BD_ENET_RX_NO) /* Frame alignment */ | 544 | |
545 | /* Report late collisions as a frame error. | ||
546 | * On this error, the BD is closed, but we don't know what we | ||
547 | * have in the buffer. So, just drop this frame on the floor. | ||
548 | */ | ||
549 | if (status & BD_ENET_RX_CL) { | ||
550 | dev->stats.rx_errors++; | ||
602 | dev->stats.rx_frame_errors++; | 551 | dev->stats.rx_frame_errors++; |
603 | if (status & BD_ENET_RX_CR) /* CRC Error */ | 552 | goto rx_processing_done; |
604 | dev->stats.rx_crc_errors++; | 553 | } |
605 | if (status & BD_ENET_RX_OV) /* FIFO overrun */ | ||
606 | dev->stats.rx_fifo_errors++; | ||
607 | } | ||
608 | 554 | ||
609 | /* Report late collisions as a frame error. | 555 | /* Process the incoming frame. */ |
610 | * On this error, the BD is closed, but we don't know what we | 556 | dev->stats.rx_packets++; |
611 | * have in the buffer. So, just drop this frame on the floor. | 557 | pkt_len = bdp->cbd_datlen; |
612 | */ | 558 | dev->stats.rx_bytes += pkt_len; |
613 | if (status & BD_ENET_RX_CL) { | 559 | data = (__u8*)__va(bdp->cbd_bufaddr); |
614 | dev->stats.rx_errors++; | ||
615 | dev->stats.rx_frame_errors++; | ||
616 | goto rx_processing_done; | ||
617 | } | ||
618 | 560 | ||
619 | /* Process the incoming frame. | 561 | dma_unmap_single(NULL, bdp->cbd_bufaddr, bdp->cbd_datlen, |
620 | */ | 562 | DMA_FROM_DEVICE); |
621 | dev->stats.rx_packets++; | ||
622 | pkt_len = bdp->cbd_datlen; | ||
623 | dev->stats.rx_bytes += pkt_len; | ||
624 | data = (__u8*)__va(bdp->cbd_bufaddr); | ||
625 | |||
626 | dma_sync_single(NULL, (unsigned long)__pa(data), | ||
627 | pkt_len - 4, DMA_FROM_DEVICE); | ||
628 | |||
629 | /* This does 16 byte alignment, exactly what we need. | ||
630 | * The packet length includes FCS, but we don't want to | ||
631 | * include that when passing upstream as it messes up | ||
632 | * bridging applications. | ||
633 | */ | ||
634 | skb = dev_alloc_skb(pkt_len-4); | ||
635 | 563 | ||
636 | if (skb == NULL) { | 564 | /* This does 16 byte alignment, exactly what we need. |
637 | printk("%s: Memory squeeze, dropping packet.\n", dev->name); | 565 | * The packet length includes FCS, but we don't want to |
638 | dev->stats.rx_dropped++; | 566 | * include that when passing upstream as it messes up |
639 | } else { | 567 | * bridging applications. |
640 | skb_put(skb,pkt_len-4); /* Make room */ | 568 | */ |
641 | skb_copy_to_linear_data(skb, data, pkt_len-4); | 569 | skb = dev_alloc_skb(pkt_len - 4 + NET_IP_ALIGN); |
642 | skb->protocol=eth_type_trans(skb,dev); | ||
643 | netif_rx(skb); | ||
644 | } | ||
645 | rx_processing_done: | ||
646 | 570 | ||
647 | /* Clear the status flags for this buffer. | 571 | if (unlikely(!skb)) { |
648 | */ | 572 | printk("%s: Memory squeeze, dropping packet.\n", |
649 | status &= ~BD_ENET_RX_STATS; | 573 | dev->name); |
574 | dev->stats.rx_dropped++; | ||
575 | } else { | ||
576 | skb_reserve(skb, NET_IP_ALIGN); | ||
577 | skb_put(skb, pkt_len - 4); /* Make room */ | ||
578 | skb_copy_to_linear_data(skb, data, pkt_len - 4); | ||
579 | skb->protocol = eth_type_trans(skb, dev); | ||
580 | netif_rx(skb); | ||
581 | } | ||
650 | 582 | ||
651 | /* Mark the buffer empty. | 583 | bdp->cbd_bufaddr = dma_map_single(NULL, data, bdp->cbd_datlen, |
652 | */ | 584 | DMA_FROM_DEVICE); |
653 | status |= BD_ENET_RX_EMPTY; | 585 | rx_processing_done: |
654 | bdp->cbd_sc = status; | 586 | /* Clear the status flags for this buffer */ |
587 | status &= ~BD_ENET_RX_STATS; | ||
655 | 588 | ||
656 | /* Update BD pointer to next entry. | 589 | /* Mark the buffer empty */ |
657 | */ | 590 | status |= BD_ENET_RX_EMPTY; |
658 | if (status & BD_ENET_RX_WRAP) | 591 | bdp->cbd_sc = status; |
659 | bdp = fep->rx_bd_base; | ||
660 | else | ||
661 | bdp++; | ||
662 | 592 | ||
663 | #if 1 | 593 | /* Update BD pointer to next entry */ |
664 | /* Doing this here will keep the FEC running while we process | 594 | if (status & BD_ENET_RX_WRAP) |
665 | * incoming frames. On a heavily loaded network, we should be | 595 | bdp = fep->rx_bd_base; |
666 | * able to keep up at the expense of system resources. | 596 | else |
667 | */ | 597 | bdp++; |
668 | fecp->fec_r_des_active = 0; | 598 | /* Doing this here will keep the FEC running while we process |
669 | #endif | 599 | * incoming frames. On a heavily loaded network, we should be |
670 | } /* while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) */ | 600 | * able to keep up at the expense of system resources. |
671 | fep->cur_rx = (cbd_t *)bdp; | 601 | */ |
672 | 602 | writel(0, fep->hwp + FEC_R_DES_ACTIVE); | |
673 | #if 0 | 603 | } |
674 | /* Doing this here will allow us to process all frames in the | 604 | fep->cur_rx = bdp; |
675 | * ring before the FEC is allowed to put more there. On a heavily | ||
676 | * loaded network, some frames may be lost. Unfortunately, this | ||
677 | * increases the interrupt overhead since we can potentially work | ||
678 | * our way back to the interrupt return only to come right back | ||
679 | * here. | ||
680 | */ | ||
681 | fecp->fec_r_des_active = 0; | ||
682 | #endif | ||
683 | 605 | ||
684 | spin_unlock_irq(&fep->hw_lock); | 606 | spin_unlock_irq(&fep->hw_lock); |
685 | } | 607 | } |
686 | 608 | ||
687 | |||
688 | /* called from interrupt context */ | 609 | /* called from interrupt context */ |
689 | static void | 610 | static void |
690 | fec_enet_mii(struct net_device *dev) | 611 | fec_enet_mii(struct net_device *dev) |
691 | { | 612 | { |
692 | struct fec_enet_private *fep; | 613 | struct fec_enet_private *fep; |
693 | volatile fec_t *ep; | ||
694 | mii_list_t *mip; | 614 | mii_list_t *mip; |
695 | uint mii_reg; | ||
696 | 615 | ||
697 | fep = netdev_priv(dev); | 616 | fep = netdev_priv(dev); |
698 | spin_lock_irq(&fep->mii_lock); | 617 | spin_lock_irq(&fep->mii_lock); |
699 | 618 | ||
700 | ep = fep->hwp; | ||
701 | mii_reg = ep->fec_mii_data; | ||
702 | |||
703 | if ((mip = mii_head) == NULL) { | 619 | if ((mip = mii_head) == NULL) { |
704 | printk("MII and no head!\n"); | 620 | printk("MII and no head!\n"); |
705 | goto unlock; | 621 | goto unlock; |
706 | } | 622 | } |
707 | 623 | ||
708 | if (mip->mii_func != NULL) | 624 | if (mip->mii_func != NULL) |
709 | (*(mip->mii_func))(mii_reg, dev); | 625 | (*(mip->mii_func))(readl(fep->hwp + FEC_MII_DATA), dev); |
710 | 626 | ||
711 | mii_head = mip->mii_next; | 627 | mii_head = mip->mii_next; |
712 | mip->mii_next = mii_free; | 628 | mip->mii_next = mii_free; |
713 | mii_free = mip; | 629 | mii_free = mip; |
714 | 630 | ||
715 | if ((mip = mii_head) != NULL) | 631 | if ((mip = mii_head) != NULL) |
716 | ep->fec_mii_data = mip->mii_regval; | 632 | writel(mip->mii_regval, fep->hwp + FEC_MII_DATA); |
717 | 633 | ||
718 | unlock: | 634 | unlock: |
719 | spin_unlock_irq(&fep->mii_lock); | 635 | spin_unlock_irq(&fep->mii_lock); |
@@ -727,8 +643,7 @@ mii_queue(struct net_device *dev, int regval, void (*func)(uint, struct net_devi | |||
727 | mii_list_t *mip; | 643 | mii_list_t *mip; |
728 | int retval; | 644 | int retval; |
729 | 645 | ||
730 | /* Add PHY address to register command. | 646 | /* Add PHY address to register command */ |
731 | */ | ||
732 | fep = netdev_priv(dev); | 647 | fep = netdev_priv(dev); |
733 | spin_lock_irqsave(&fep->mii_lock, flags); | 648 | spin_lock_irqsave(&fep->mii_lock, flags); |
734 | 649 | ||
@@ -745,7 +660,7 @@ mii_queue(struct net_device *dev, int regval, void (*func)(uint, struct net_devi | |||
745 | mii_tail = mip; | 660 | mii_tail = mip; |
746 | } else { | 661 | } else { |
747 | mii_head = mii_tail = mip; | 662 | mii_head = mii_tail = mip; |
748 | fep->hwp->fec_mii_data = regval; | 663 | writel(regval, fep->hwp + FEC_MII_DATA); |
749 | } | 664 | } |
750 | } else { | 665 | } else { |
751 | retval = 1; | 666 | retval = 1; |
@@ -1246,11 +1161,8 @@ static void __inline__ fec_phy_ack_intr(void) | |||
1246 | static void __inline__ fec_get_mac(struct net_device *dev) | 1161 | static void __inline__ fec_get_mac(struct net_device *dev) |
1247 | { | 1162 | { |
1248 | struct fec_enet_private *fep = netdev_priv(dev); | 1163 | struct fec_enet_private *fep = netdev_priv(dev); |
1249 | volatile fec_t *fecp; | ||
1250 | unsigned char *iap, tmpaddr[ETH_ALEN]; | 1164 | unsigned char *iap, tmpaddr[ETH_ALEN]; |
1251 | 1165 | ||
1252 | fecp = fep->hwp; | ||
1253 | |||
1254 | if (FEC_FLASHMAC) { | 1166 | if (FEC_FLASHMAC) { |
1255 | /* | 1167 | /* |
1256 | * Get MAC address from FLASH. | 1168 | * Get MAC address from FLASH. |
@@ -1264,8 +1176,8 @@ static void __inline__ fec_get_mac(struct net_device *dev) | |||
1264 | (iap[3] == 0xff) && (iap[4] == 0xff) && (iap[5] == 0xff)) | 1176 | (iap[3] == 0xff) && (iap[4] == 0xff) && (iap[5] == 0xff)) |
1265 | iap = fec_mac_default; | 1177 | iap = fec_mac_default; |
1266 | } else { | 1178 | } else { |
1267 | *((unsigned long *) &tmpaddr[0]) = fecp->fec_addr_low; | 1179 | *((unsigned long *) &tmpaddr[0]) = readl(fep->hwp + FEC_ADDR_LOW); |
1268 | *((unsigned short *) &tmpaddr[4]) = (fecp->fec_addr_high >> 16); | 1180 | *((unsigned short *) &tmpaddr[4]) = (readl(fep->hwp + FEC_ADDR_HIGH) >> 16); |
1269 | iap = &tmpaddr[0]; | 1181 | iap = &tmpaddr[0]; |
1270 | } | 1182 | } |
1271 | 1183 | ||
@@ -1375,11 +1287,6 @@ static void mii_relink(struct work_struct *work) | |||
1375 | fec_restart(dev, duplex); | 1287 | fec_restart(dev, duplex); |
1376 | } else | 1288 | } else |
1377 | fec_stop(dev); | 1289 | fec_stop(dev); |
1378 | |||
1379 | #if 0 | ||
1380 | enable_irq(fep->mii_irq); | ||
1381 | #endif | ||
1382 | |||
1383 | } | 1290 | } |
1384 | 1291 | ||
1385 | /* mii_queue_relink is called in interrupt context from mii_link_interrupt */ | 1292 | /* mii_queue_relink is called in interrupt context from mii_link_interrupt */ |
@@ -1388,12 +1295,12 @@ static void mii_queue_relink(uint mii_reg, struct net_device *dev) | |||
1388 | struct fec_enet_private *fep = netdev_priv(dev); | 1295 | struct fec_enet_private *fep = netdev_priv(dev); |
1389 | 1296 | ||
1390 | /* | 1297 | /* |
1391 | ** We cannot queue phy_task twice in the workqueue. It | 1298 | * We cannot queue phy_task twice in the workqueue. It |
1392 | ** would cause an endless loop in the workqueue. | 1299 | * would cause an endless loop in the workqueue. |
1393 | ** Fortunately, if the last mii_relink entry has not yet been | 1300 | * Fortunately, if the last mii_relink entry has not yet been |
1394 | ** executed now, it will do the job for the current interrupt, | 1301 | * executed now, it will do the job for the current interrupt, |
1395 | ** which is just what we want. | 1302 | * which is just what we want. |
1396 | */ | 1303 | */ |
1397 | if (fep->mii_phy_task_queued) | 1304 | if (fep->mii_phy_task_queued) |
1398 | return; | 1305 | return; |
1399 | 1306 | ||
@@ -1424,8 +1331,7 @@ phy_cmd_t const phy_cmd_config[] = { | |||
1424 | { mk_mii_end, } | 1331 | { mk_mii_end, } |
1425 | }; | 1332 | }; |
1426 | 1333 | ||
1427 | /* Read remainder of PHY ID. | 1334 | /* Read remainder of PHY ID. */ |
1428 | */ | ||
1429 | static void | 1335 | static void |
1430 | mii_discover_phy3(uint mii_reg, struct net_device *dev) | 1336 | mii_discover_phy3(uint mii_reg, struct net_device *dev) |
1431 | { | 1337 | { |
@@ -1457,17 +1363,14 @@ static void | |||
1457 | mii_discover_phy(uint mii_reg, struct net_device *dev) | 1363 | mii_discover_phy(uint mii_reg, struct net_device *dev) |
1458 | { | 1364 | { |
1459 | struct fec_enet_private *fep; | 1365 | struct fec_enet_private *fep; |
1460 | volatile fec_t *fecp; | ||
1461 | uint phytype; | 1366 | uint phytype; |
1462 | 1367 | ||
1463 | fep = netdev_priv(dev); | 1368 | fep = netdev_priv(dev); |
1464 | fecp = fep->hwp; | ||
1465 | 1369 | ||
1466 | if (fep->phy_addr < 32) { | 1370 | if (fep->phy_addr < 32) { |
1467 | if ((phytype = (mii_reg & 0xffff)) != 0xffff && phytype != 0) { | 1371 | if ((phytype = (mii_reg & 0xffff)) != 0xffff && phytype != 0) { |
1468 | 1372 | ||
1469 | /* Got first part of ID, now get remainder. | 1373 | /* Got first part of ID, now get remainder */ |
1470 | */ | ||
1471 | fep->phy_id = phytype << 16; | 1374 | fep->phy_id = phytype << 16; |
1472 | mii_queue(dev, mk_mii_read(MII_REG_PHYIR2), | 1375 | mii_queue(dev, mk_mii_read(MII_REG_PHYIR2), |
1473 | mii_discover_phy3); | 1376 | mii_discover_phy3); |
@@ -1479,15 +1382,15 @@ mii_discover_phy(uint mii_reg, struct net_device *dev) | |||
1479 | } else { | 1382 | } else { |
1480 | printk("FEC: No PHY device found.\n"); | 1383 | printk("FEC: No PHY device found.\n"); |
1481 | /* Disable external MII interface */ | 1384 | /* Disable external MII interface */ |
1482 | fecp->fec_mii_speed = fep->phy_speed = 0; | 1385 | writel(0, fep->hwp + FEC_MII_SPEED); |
1386 | fep->phy_speed = 0; | ||
1483 | #ifdef HAVE_mii_link_interrupt | 1387 | #ifdef HAVE_mii_link_interrupt |
1484 | fec_disable_phy_intr(); | 1388 | fec_disable_phy_intr(); |
1485 | #endif | 1389 | #endif |
1486 | } | 1390 | } |
1487 | } | 1391 | } |
1488 | 1392 | ||
1489 | /* This interrupt occurs when the PHY detects a link change. | 1393 | /* This interrupt occurs when the PHY detects a link change */ |
1490 | */ | ||
1491 | #ifdef HAVE_mii_link_interrupt | 1394 | #ifdef HAVE_mii_link_interrupt |
1492 | static irqreturn_t | 1395 | static irqreturn_t |
1493 | mii_link_interrupt(int irq, void * dev_id) | 1396 | mii_link_interrupt(int irq, void * dev_id) |
@@ -1497,10 +1400,6 @@ mii_link_interrupt(int irq, void * dev_id) | |||
1497 | 1400 | ||
1498 | fec_phy_ack_intr(); | 1401 | fec_phy_ack_intr(); |
1499 | 1402 | ||
1500 | #if 0 | ||
1501 | disable_irq(fep->mii_irq); /* disable now, enable later */ | ||
1502 | #endif | ||
1503 | |||
1504 | mii_do_cmd(dev, fep->phy->ack_int); | 1403 | mii_do_cmd(dev, fep->phy->ack_int); |
1505 | mii_do_cmd(dev, phy_cmd_relink); /* restart and display status */ | 1404 | mii_do_cmd(dev, phy_cmd_relink); /* restart and display status */ |
1506 | 1405 | ||
@@ -1508,19 +1407,91 @@ mii_link_interrupt(int irq, void * dev_id) | |||
1508 | } | 1407 | } |
1509 | #endif | 1408 | #endif |
1510 | 1409 | ||
1410 | static void fec_enet_free_buffers(struct net_device *dev) | ||
1411 | { | ||
1412 | struct fec_enet_private *fep = netdev_priv(dev); | ||
1413 | int i; | ||
1414 | struct sk_buff *skb; | ||
1415 | struct bufdesc *bdp; | ||
1416 | |||
1417 | bdp = fep->rx_bd_base; | ||
1418 | for (i = 0; i < RX_RING_SIZE; i++) { | ||
1419 | skb = fep->rx_skbuff[i]; | ||
1420 | |||
1421 | if (bdp->cbd_bufaddr) | ||
1422 | dma_unmap_single(&dev->dev, bdp->cbd_bufaddr, | ||
1423 | FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); | ||
1424 | if (skb) | ||
1425 | dev_kfree_skb(skb); | ||
1426 | bdp++; | ||
1427 | } | ||
1428 | |||
1429 | bdp = fep->tx_bd_base; | ||
1430 | for (i = 0; i < TX_RING_SIZE; i++) | ||
1431 | kfree(fep->tx_bounce[i]); | ||
1432 | } | ||
1433 | |||
1434 | static int fec_enet_alloc_buffers(struct net_device *dev) | ||
1435 | { | ||
1436 | struct fec_enet_private *fep = netdev_priv(dev); | ||
1437 | int i; | ||
1438 | struct sk_buff *skb; | ||
1439 | struct bufdesc *bdp; | ||
1440 | |||
1441 | bdp = fep->rx_bd_base; | ||
1442 | for (i = 0; i < RX_RING_SIZE; i++) { | ||
1443 | skb = dev_alloc_skb(FEC_ENET_RX_FRSIZE); | ||
1444 | if (!skb) { | ||
1445 | fec_enet_free_buffers(dev); | ||
1446 | return -ENOMEM; | ||
1447 | } | ||
1448 | fep->rx_skbuff[i] = skb; | ||
1449 | |||
1450 | bdp->cbd_bufaddr = dma_map_single(&dev->dev, skb->data, | ||
1451 | FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); | ||
1452 | bdp->cbd_sc = BD_ENET_RX_EMPTY; | ||
1453 | bdp++; | ||
1454 | } | ||
1455 | |||
1456 | /* Set the last buffer to wrap. */ | ||
1457 | bdp--; | ||
1458 | bdp->cbd_sc |= BD_SC_WRAP; | ||
1459 | |||
1460 | bdp = fep->tx_bd_base; | ||
1461 | for (i = 0; i < TX_RING_SIZE; i++) { | ||
1462 | fep->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL); | ||
1463 | |||
1464 | bdp->cbd_sc = 0; | ||
1465 | bdp->cbd_bufaddr = 0; | ||
1466 | bdp++; | ||
1467 | } | ||
1468 | |||
1469 | /* Set the last buffer to wrap. */ | ||
1470 | bdp--; | ||
1471 | bdp->cbd_sc |= BD_SC_WRAP; | ||
1472 | |||
1473 | return 0; | ||
1474 | } | ||
1475 | |||
1511 | static int | 1476 | static int |
1512 | fec_enet_open(struct net_device *dev) | 1477 | fec_enet_open(struct net_device *dev) |
1513 | { | 1478 | { |
1514 | struct fec_enet_private *fep = netdev_priv(dev); | 1479 | struct fec_enet_private *fep = netdev_priv(dev); |
1480 | int ret; | ||
1515 | 1481 | ||
1516 | /* I should reset the ring buffers here, but I don't yet know | 1482 | /* I should reset the ring buffers here, but I don't yet know |
1517 | * a simple way to do that. | 1483 | * a simple way to do that. |
1518 | */ | 1484 | */ |
1519 | fec_set_mac_address(dev); | 1485 | |
1486 | ret = fec_enet_alloc_buffers(dev); | ||
1487 | if (ret) | ||
1488 | return ret; | ||
1520 | 1489 | ||
1521 | fep->sequence_done = 0; | 1490 | fep->sequence_done = 0; |
1522 | fep->link = 0; | 1491 | fep->link = 0; |
1523 | 1492 | ||
1493 | fec_restart(dev, 1); | ||
1494 | |||
1524 | if (fep->phy) { | 1495 | if (fep->phy) { |
1525 | mii_do_cmd(dev, fep->phy->ack_int); | 1496 | mii_do_cmd(dev, fep->phy->ack_int); |
1526 | mii_do_cmd(dev, fep->phy->config); | 1497 | mii_do_cmd(dev, fep->phy->config); |
@@ -1537,21 +1508,17 @@ fec_enet_open(struct net_device *dev) | |||
1537 | schedule(); | 1508 | schedule(); |
1538 | 1509 | ||
1539 | mii_do_cmd(dev, fep->phy->startup); | 1510 | mii_do_cmd(dev, fep->phy->startup); |
1540 | |||
1541 | /* Set the initial link state to true. A lot of hardware | ||
1542 | * based on this device does not implement a PHY interrupt, | ||
1543 | * so we are never notified of link change. | ||
1544 | */ | ||
1545 | fep->link = 1; | ||
1546 | } else { | ||
1547 | fep->link = 1; /* lets just try it and see */ | ||
1548 | /* no phy, go full duplex, it's most likely a hub chip */ | ||
1549 | fec_restart(dev, 1); | ||
1550 | } | 1511 | } |
1551 | 1512 | ||
1513 | /* Set the initial link state to true. A lot of hardware | ||
1514 | * based on this device does not implement a PHY interrupt, | ||
1515 | * so we are never notified of link change. | ||
1516 | */ | ||
1517 | fep->link = 1; | ||
1518 | |||
1552 | netif_start_queue(dev); | 1519 | netif_start_queue(dev); |
1553 | fep->opened = 1; | 1520 | fep->opened = 1; |
1554 | return 0; /* Success */ | 1521 | return 0; |
1555 | } | 1522 | } |
1556 | 1523 | ||
1557 | static int | 1524 | static int |
@@ -1559,12 +1526,13 @@ fec_enet_close(struct net_device *dev) | |||
1559 | { | 1526 | { |
1560 | struct fec_enet_private *fep = netdev_priv(dev); | 1527 | struct fec_enet_private *fep = netdev_priv(dev); |
1561 | 1528 | ||
1562 | /* Don't know what to do yet. | 1529 | /* Don't know what to do yet. */ |
1563 | */ | ||
1564 | fep->opened = 0; | 1530 | fep->opened = 0; |
1565 | netif_stop_queue(dev); | 1531 | netif_stop_queue(dev); |
1566 | fec_stop(dev); | 1532 | fec_stop(dev); |
1567 | 1533 | ||
1534 | fec_enet_free_buffers(dev); | ||
1535 | |||
1568 | return 0; | 1536 | return 0; |
1569 | } | 1537 | } |
1570 | 1538 | ||
@@ -1583,87 +1551,102 @@ fec_enet_close(struct net_device *dev) | |||
1583 | 1551 | ||
1584 | static void set_multicast_list(struct net_device *dev) | 1552 | static void set_multicast_list(struct net_device *dev) |
1585 | { | 1553 | { |
1586 | struct fec_enet_private *fep; | 1554 | struct fec_enet_private *fep = netdev_priv(dev); |
1587 | volatile fec_t *ep; | ||
1588 | struct dev_mc_list *dmi; | 1555 | struct dev_mc_list *dmi; |
1589 | unsigned int i, j, bit, data, crc; | 1556 | unsigned int i, j, bit, data, crc, tmp; |
1590 | unsigned char hash; | 1557 | unsigned char hash; |
1591 | 1558 | ||
1592 | fep = netdev_priv(dev); | 1559 | if (dev->flags & IFF_PROMISC) { |
1593 | ep = fep->hwp; | 1560 | tmp = readl(fep->hwp + FEC_R_CNTRL); |
1561 | tmp |= 0x8; | ||
1562 | writel(tmp, fep->hwp + FEC_R_CNTRL); | ||
1563 | return; | ||
1564 | } | ||
1594 | 1565 | ||
1595 | if (dev->flags&IFF_PROMISC) { | 1566 | tmp = readl(fep->hwp + FEC_R_CNTRL); |
1596 | ep->fec_r_cntrl |= 0x0008; | 1567 | tmp &= ~0x8; |
1597 | } else { | 1568 | writel(tmp, fep->hwp + FEC_R_CNTRL); |
1569 | |||
1570 | if (dev->flags & IFF_ALLMULTI) { | ||
1571 | /* Catch all multicast addresses, so set the | ||
1572 | * filter to all 1's | ||
1573 | */ | ||
1574 | writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); | ||
1575 | writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_LOW); | ||
1598 | 1576 | ||
1599 | ep->fec_r_cntrl &= ~0x0008; | 1577 | return; |
1578 | } | ||
1600 | 1579 | ||
1601 | if (dev->flags & IFF_ALLMULTI) { | 1580 | /* Clear filter and add the addresses in hash register |
1602 | /* Catch all multicast addresses, so set the | 1581 | */ |
1603 | * filter to all 1's. | 1582 | writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); |
1604 | */ | 1583 | writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW); |
1605 | ep->fec_grp_hash_table_high = 0xffffffff; | 1584 | |
1606 | ep->fec_grp_hash_table_low = 0xffffffff; | 1585 | dmi = dev->mc_list; |
1607 | } else { | 1586 | |
1608 | /* Clear filter and add the addresses in hash register. | 1587 | for (j = 0; j < dev->mc_count; j++, dmi = dmi->next) { |
1609 | */ | 1588 | /* Only support group multicast for now */ |
1610 | ep->fec_grp_hash_table_high = 0; | 1589 | if (!(dmi->dmi_addr[0] & 1)) |
1611 | ep->fec_grp_hash_table_low = 0; | 1590 | continue; |
1612 | 1591 | ||
1613 | dmi = dev->mc_list; | 1592 | /* calculate crc32 value of mac address */ |
1614 | 1593 | crc = 0xffffffff; | |
1615 | for (j = 0; j < dev->mc_count; j++, dmi = dmi->next) | 1594 | |
1616 | { | 1595 | for (i = 0; i < dmi->dmi_addrlen; i++) { |
1617 | /* Only support group multicast for now. | 1596 | data = dmi->dmi_addr[i]; |
1618 | */ | 1597 | for (bit = 0; bit < 8; bit++, data >>= 1) { |
1619 | if (!(dmi->dmi_addr[0] & 1)) | 1598 | crc = (crc >> 1) ^ |
1620 | continue; | 1599 | (((crc ^ data) & 1) ? CRC32_POLY : 0); |
1621 | |||
1622 | /* calculate crc32 value of mac address | ||
1623 | */ | ||
1624 | crc = 0xffffffff; | ||
1625 | |||
1626 | for (i = 0; i < dmi->dmi_addrlen; i++) | ||
1627 | { | ||
1628 | data = dmi->dmi_addr[i]; | ||
1629 | for (bit = 0; bit < 8; bit++, data >>= 1) | ||
1630 | { | ||
1631 | crc = (crc >> 1) ^ | ||
1632 | (((crc ^ data) & 1) ? CRC32_POLY : 0); | ||
1633 | } | ||
1634 | } | ||
1635 | |||
1636 | /* only upper 6 bits (HASH_BITS) are used | ||
1637 | which point to specific bit in he hash registers | ||
1638 | */ | ||
1639 | hash = (crc >> (32 - HASH_BITS)) & 0x3f; | ||
1640 | |||
1641 | if (hash > 31) | ||
1642 | ep->fec_grp_hash_table_high |= 1 << (hash - 32); | ||
1643 | else | ||
1644 | ep->fec_grp_hash_table_low |= 1 << hash; | ||
1645 | } | 1600 | } |
1646 | } | 1601 | } |
1602 | |||
1603 | /* only upper 6 bits (HASH_BITS) are used | ||
1604 | * which point to specific bit in he hash registers | ||
1605 | */ | ||
1606 | hash = (crc >> (32 - HASH_BITS)) & 0x3f; | ||
1607 | |||
1608 | if (hash > 31) { | ||
1609 | tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_HIGH); | ||
1610 | tmp |= 1 << (hash - 32); | ||
1611 | writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); | ||
1612 | } else { | ||
1613 | tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_LOW); | ||
1614 | tmp |= 1 << hash; | ||
1615 | writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_LOW); | ||
1616 | } | ||
1647 | } | 1617 | } |
1648 | } | 1618 | } |
1649 | 1619 | ||
1650 | /* Set a MAC change in hardware. | 1620 | /* Set a MAC change in hardware. */ |
1651 | */ | 1621 | static int |
1652 | static void | 1622 | fec_set_mac_address(struct net_device *dev, void *p) |
1653 | fec_set_mac_address(struct net_device *dev) | ||
1654 | { | 1623 | { |
1655 | volatile fec_t *fecp; | 1624 | struct fec_enet_private *fep = netdev_priv(dev); |
1625 | struct sockaddr *addr = p; | ||
1656 | 1626 | ||
1657 | fecp = ((struct fec_enet_private *)netdev_priv(dev))->hwp; | 1627 | if (!is_valid_ether_addr(addr->sa_data)) |
1628 | return -EADDRNOTAVAIL; | ||
1658 | 1629 | ||
1659 | /* Set station address. */ | 1630 | memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); |
1660 | fecp->fec_addr_low = dev->dev_addr[3] | (dev->dev_addr[2] << 8) | | ||
1661 | (dev->dev_addr[1] << 16) | (dev->dev_addr[0] << 24); | ||
1662 | fecp->fec_addr_high = (dev->dev_addr[5] << 16) | | ||
1663 | (dev->dev_addr[4] << 24); | ||
1664 | 1631 | ||
1632 | writel(dev->dev_addr[3] | (dev->dev_addr[2] << 8) | | ||
1633 | (dev->dev_addr[1] << 16) | (dev->dev_addr[0] << 24), | ||
1634 | fep->hwp + FEC_ADDR_LOW); | ||
1635 | writel((dev->dev_addr[5] << 16) | (dev->dev_addr[4] << 24), | ||
1636 | fep + FEC_ADDR_HIGH); | ||
1637 | return 0; | ||
1665 | } | 1638 | } |
1666 | 1639 | ||
1640 | static const struct net_device_ops fec_netdev_ops = { | ||
1641 | .ndo_open = fec_enet_open, | ||
1642 | .ndo_stop = fec_enet_close, | ||
1643 | .ndo_start_xmit = fec_enet_start_xmit, | ||
1644 | .ndo_set_multicast_list = set_multicast_list, | ||
1645 | .ndo_validate_addr = eth_validate_addr, | ||
1646 | .ndo_tx_timeout = fec_timeout, | ||
1647 | .ndo_set_mac_address = fec_set_mac_address, | ||
1648 | }; | ||
1649 | |||
1667 | /* | 1650 | /* |
1668 | * XXX: We need to clean up on failure exits here. | 1651 | * XXX: We need to clean up on failure exits here. |
1669 | * | 1652 | * |
@@ -1672,17 +1655,13 @@ fec_set_mac_address(struct net_device *dev) | |||
1672 | int __init fec_enet_init(struct net_device *dev, int index) | 1655 | int __init fec_enet_init(struct net_device *dev, int index) |
1673 | { | 1656 | { |
1674 | struct fec_enet_private *fep = netdev_priv(dev); | 1657 | struct fec_enet_private *fep = netdev_priv(dev); |
1675 | unsigned long mem_addr; | 1658 | struct bufdesc *cbd_base; |
1676 | volatile cbd_t *bdp; | 1659 | int i; |
1677 | cbd_t *cbd_base; | ||
1678 | volatile fec_t *fecp; | ||
1679 | int i, j; | ||
1680 | 1660 | ||
1681 | /* Allocate memory for buffer descriptors. | 1661 | /* Allocate memory for buffer descriptors. */ |
1682 | */ | 1662 | cbd_base = dma_alloc_coherent(NULL, PAGE_SIZE, &fep->bd_dma, |
1683 | mem_addr = (unsigned long)dma_alloc_coherent(NULL, PAGE_SIZE, | 1663 | GFP_KERNEL); |
1684 | &fep->bd_dma, GFP_KERNEL); | 1664 | if (!cbd_base) { |
1685 | if (mem_addr == 0) { | ||
1686 | printk("FEC: allocate descriptor memory failed?\n"); | 1665 | printk("FEC: allocate descriptor memory failed?\n"); |
1687 | return -ENOMEM; | 1666 | return -ENOMEM; |
1688 | } | 1667 | } |
@@ -1690,146 +1669,47 @@ int __init fec_enet_init(struct net_device *dev, int index) | |||
1690 | spin_lock_init(&fep->hw_lock); | 1669 | spin_lock_init(&fep->hw_lock); |
1691 | spin_lock_init(&fep->mii_lock); | 1670 | spin_lock_init(&fep->mii_lock); |
1692 | 1671 | ||
1693 | /* Create an Ethernet device instance. | ||
1694 | */ | ||
1695 | fecp = (volatile fec_t *)dev->base_addr; | ||
1696 | |||
1697 | fep->index = index; | 1672 | fep->index = index; |
1698 | fep->hwp = fecp; | 1673 | fep->hwp = (void __iomem *)dev->base_addr; |
1699 | fep->netdev = dev; | 1674 | fep->netdev = dev; |
1700 | 1675 | ||
1701 | /* Whack a reset. We should wait for this. | ||
1702 | */ | ||
1703 | fecp->fec_ecntrl = 1; | ||
1704 | udelay(10); | ||
1705 | |||
1706 | /* Set the Ethernet address */ | 1676 | /* Set the Ethernet address */ |
1707 | #ifdef CONFIG_M5272 | 1677 | #ifdef CONFIG_M5272 |
1708 | fec_get_mac(dev); | 1678 | fec_get_mac(dev); |
1709 | #else | 1679 | #else |
1710 | { | 1680 | { |
1711 | unsigned long l; | 1681 | unsigned long l; |
1712 | l = fecp->fec_addr_low; | 1682 | l = readl(fep->hwp + FEC_ADDR_LOW); |
1713 | dev->dev_addr[0] = (unsigned char)((l & 0xFF000000) >> 24); | 1683 | dev->dev_addr[0] = (unsigned char)((l & 0xFF000000) >> 24); |
1714 | dev->dev_addr[1] = (unsigned char)((l & 0x00FF0000) >> 16); | 1684 | dev->dev_addr[1] = (unsigned char)((l & 0x00FF0000) >> 16); |
1715 | dev->dev_addr[2] = (unsigned char)((l & 0x0000FF00) >> 8); | 1685 | dev->dev_addr[2] = (unsigned char)((l & 0x0000FF00) >> 8); |
1716 | dev->dev_addr[3] = (unsigned char)((l & 0x000000FF) >> 0); | 1686 | dev->dev_addr[3] = (unsigned char)((l & 0x000000FF) >> 0); |
1717 | l = fecp->fec_addr_high; | 1687 | l = readl(fep->hwp + FEC_ADDR_HIGH); |
1718 | dev->dev_addr[4] = (unsigned char)((l & 0xFF000000) >> 24); | 1688 | dev->dev_addr[4] = (unsigned char)((l & 0xFF000000) >> 24); |
1719 | dev->dev_addr[5] = (unsigned char)((l & 0x00FF0000) >> 16); | 1689 | dev->dev_addr[5] = (unsigned char)((l & 0x00FF0000) >> 16); |
1720 | } | 1690 | } |
1721 | #endif | 1691 | #endif |
1722 | 1692 | ||
1723 | cbd_base = (cbd_t *)mem_addr; | 1693 | /* Set receive and transmit descriptor base. */ |
1724 | |||
1725 | /* Set receive and transmit descriptor base. | ||
1726 | */ | ||
1727 | fep->rx_bd_base = cbd_base; | 1694 | fep->rx_bd_base = cbd_base; |
1728 | fep->tx_bd_base = cbd_base + RX_RING_SIZE; | 1695 | fep->tx_bd_base = cbd_base + RX_RING_SIZE; |
1729 | 1696 | ||
1730 | fep->dirty_tx = fep->cur_tx = fep->tx_bd_base; | ||
1731 | fep->cur_rx = fep->rx_bd_base; | ||
1732 | |||
1733 | fep->skb_cur = fep->skb_dirty = 0; | ||
1734 | |||
1735 | /* Initialize the receive buffer descriptors. | ||
1736 | */ | ||
1737 | bdp = fep->rx_bd_base; | ||
1738 | for (i=0; i<FEC_ENET_RX_PAGES; i++) { | ||
1739 | |||
1740 | /* Allocate a page. | ||
1741 | */ | ||
1742 | mem_addr = __get_free_page(GFP_KERNEL); | ||
1743 | /* XXX: missing check for allocation failure */ | ||
1744 | |||
1745 | /* Initialize the BD for every fragment in the page. | ||
1746 | */ | ||
1747 | for (j=0; j<FEC_ENET_RX_FRPPG; j++) { | ||
1748 | bdp->cbd_sc = BD_ENET_RX_EMPTY; | ||
1749 | bdp->cbd_bufaddr = __pa(mem_addr); | ||
1750 | mem_addr += FEC_ENET_RX_FRSIZE; | ||
1751 | bdp++; | ||
1752 | } | ||
1753 | } | ||
1754 | |||
1755 | /* Set the last buffer to wrap. | ||
1756 | */ | ||
1757 | bdp--; | ||
1758 | bdp->cbd_sc |= BD_SC_WRAP; | ||
1759 | |||
1760 | /* ...and the same for transmmit. | ||
1761 | */ | ||
1762 | bdp = fep->tx_bd_base; | ||
1763 | for (i=0, j=FEC_ENET_TX_FRPPG; i<TX_RING_SIZE; i++) { | ||
1764 | if (j >= FEC_ENET_TX_FRPPG) { | ||
1765 | mem_addr = __get_free_page(GFP_KERNEL); | ||
1766 | j = 1; | ||
1767 | } else { | ||
1768 | mem_addr += FEC_ENET_TX_FRSIZE; | ||
1769 | j++; | ||
1770 | } | ||
1771 | fep->tx_bounce[i] = (unsigned char *) mem_addr; | ||
1772 | |||
1773 | /* Initialize the BD for every fragment in the page. | ||
1774 | */ | ||
1775 | bdp->cbd_sc = 0; | ||
1776 | bdp->cbd_bufaddr = 0; | ||
1777 | bdp++; | ||
1778 | } | ||
1779 | |||
1780 | /* Set the last buffer to wrap. | ||
1781 | */ | ||
1782 | bdp--; | ||
1783 | bdp->cbd_sc |= BD_SC_WRAP; | ||
1784 | |||
1785 | /* Set receive and transmit descriptor base. | ||
1786 | */ | ||
1787 | fecp->fec_r_des_start = fep->bd_dma; | ||
1788 | fecp->fec_x_des_start = (unsigned long)fep->bd_dma + sizeof(cbd_t) | ||
1789 | * RX_RING_SIZE; | ||
1790 | |||
1791 | #ifdef HAVE_mii_link_interrupt | 1697 | #ifdef HAVE_mii_link_interrupt |
1792 | fec_request_mii_intr(dev); | 1698 | fec_request_mii_intr(dev); |
1793 | #endif | 1699 | #endif |
1794 | 1700 | /* The FEC Ethernet specific entries in the device structure */ | |
1795 | fecp->fec_grp_hash_table_high = 0; | ||
1796 | fecp->fec_grp_hash_table_low = 0; | ||
1797 | fecp->fec_r_buff_size = PKT_MAXBLR_SIZE; | ||
1798 | fecp->fec_ecntrl = 2; | ||
1799 | fecp->fec_r_des_active = 0; | ||
1800 | #ifndef CONFIG_M5272 | ||
1801 | fecp->fec_hash_table_high = 0; | ||
1802 | fecp->fec_hash_table_low = 0; | ||
1803 | #endif | ||
1804 | |||
1805 | /* The FEC Ethernet specific entries in the device structure. */ | ||
1806 | dev->open = fec_enet_open; | ||
1807 | dev->hard_start_xmit = fec_enet_start_xmit; | ||
1808 | dev->tx_timeout = fec_timeout; | ||
1809 | dev->watchdog_timeo = TX_TIMEOUT; | 1701 | dev->watchdog_timeo = TX_TIMEOUT; |
1810 | dev->stop = fec_enet_close; | 1702 | dev->netdev_ops = &fec_netdev_ops; |
1811 | dev->set_multicast_list = set_multicast_list; | ||
1812 | 1703 | ||
1813 | for (i=0; i<NMII-1; i++) | 1704 | for (i=0; i<NMII-1; i++) |
1814 | mii_cmds[i].mii_next = &mii_cmds[i+1]; | 1705 | mii_cmds[i].mii_next = &mii_cmds[i+1]; |
1815 | mii_free = mii_cmds; | 1706 | mii_free = mii_cmds; |
1816 | 1707 | ||
1817 | /* setup MII interface */ | 1708 | /* Set MII speed to 2.5 MHz */ |
1818 | fecp->fec_r_cntrl = OPT_FRAME_SIZE | 0x04; | ||
1819 | fecp->fec_x_cntrl = 0x00; | ||
1820 | |||
1821 | /* | ||
1822 | * Set MII speed to 2.5 MHz | ||
1823 | */ | ||
1824 | fep->phy_speed = ((((clk_get_rate(fep->clk) / 2 + 4999999) | 1709 | fep->phy_speed = ((((clk_get_rate(fep->clk) / 2 + 4999999) |
1825 | / 2500000) / 2) & 0x3F) << 1; | 1710 | / 2500000) / 2) & 0x3F) << 1; |
1826 | fecp->fec_mii_speed = fep->phy_speed; | ||
1827 | fec_restart(dev, 0); | 1711 | fec_restart(dev, 0); |
1828 | 1712 | ||
1829 | /* Clear and enable interrupts */ | ||
1830 | fecp->fec_ievent = 0xffc00000; | ||
1831 | fecp->fec_imask = (FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII); | ||
1832 | |||
1833 | /* Queue up command to detect the PHY and initialize the | 1713 | /* Queue up command to detect the PHY and initialize the |
1834 | * remainder of the interface. | 1714 | * remainder of the interface. |
1835 | */ | 1715 | */ |
@@ -1847,145 +1727,118 @@ int __init fec_enet_init(struct net_device *dev, int index) | |||
1847 | static void | 1727 | static void |
1848 | fec_restart(struct net_device *dev, int duplex) | 1728 | fec_restart(struct net_device *dev, int duplex) |
1849 | { | 1729 | { |
1850 | struct fec_enet_private *fep; | 1730 | struct fec_enet_private *fep = netdev_priv(dev); |
1851 | volatile cbd_t *bdp; | 1731 | struct bufdesc *bdp; |
1852 | volatile fec_t *fecp; | ||
1853 | int i; | 1732 | int i; |
1854 | 1733 | ||
1855 | fep = netdev_priv(dev); | 1734 | /* Whack a reset. We should wait for this. */ |
1856 | fecp = fep->hwp; | 1735 | writel(1, fep->hwp + FEC_ECNTRL); |
1857 | |||
1858 | /* Whack a reset. We should wait for this. | ||
1859 | */ | ||
1860 | fecp->fec_ecntrl = 1; | ||
1861 | udelay(10); | 1736 | udelay(10); |
1862 | 1737 | ||
1863 | /* Clear any outstanding interrupt. | 1738 | /* Clear any outstanding interrupt. */ |
1864 | */ | 1739 | writel(0xffc00000, fep->hwp + FEC_IEVENT); |
1865 | fecp->fec_ievent = 0xffc00000; | ||
1866 | 1740 | ||
1867 | /* Set station address. | 1741 | /* Reset all multicast. */ |
1868 | */ | 1742 | writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); |
1869 | fec_set_mac_address(dev); | 1743 | writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW); |
1744 | #ifndef CONFIG_M5272 | ||
1745 | writel(0, fep->hwp + FEC_HASH_TABLE_HIGH); | ||
1746 | writel(0, fep->hwp + FEC_HASH_TABLE_LOW); | ||
1747 | #endif | ||
1870 | 1748 | ||
1871 | /* Reset all multicast. | 1749 | /* Set maximum receive buffer size. */ |
1872 | */ | 1750 | writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE); |
1873 | fecp->fec_grp_hash_table_high = 0; | ||
1874 | fecp->fec_grp_hash_table_low = 0; | ||
1875 | 1751 | ||
1876 | /* Set maximum receive buffer size. | 1752 | /* Set receive and transmit descriptor base. */ |
1877 | */ | 1753 | writel(fep->bd_dma, fep->hwp + FEC_R_DES_START); |
1878 | fecp->fec_r_buff_size = PKT_MAXBLR_SIZE; | 1754 | writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc) * RX_RING_SIZE, |
1879 | 1755 | fep->hwp + FEC_X_DES_START); | |
1880 | /* Set receive and transmit descriptor base. | ||
1881 | */ | ||
1882 | fecp->fec_r_des_start = fep->bd_dma; | ||
1883 | fecp->fec_x_des_start = (unsigned long)fep->bd_dma + sizeof(cbd_t) | ||
1884 | * RX_RING_SIZE; | ||
1885 | 1756 | ||
1886 | fep->dirty_tx = fep->cur_tx = fep->tx_bd_base; | 1757 | fep->dirty_tx = fep->cur_tx = fep->tx_bd_base; |
1887 | fep->cur_rx = fep->rx_bd_base; | 1758 | fep->cur_rx = fep->rx_bd_base; |
1888 | 1759 | ||
1889 | /* Reset SKB transmit buffers. | 1760 | /* Reset SKB transmit buffers. */ |
1890 | */ | ||
1891 | fep->skb_cur = fep->skb_dirty = 0; | 1761 | fep->skb_cur = fep->skb_dirty = 0; |
1892 | for (i=0; i<=TX_RING_MOD_MASK; i++) { | 1762 | for (i = 0; i <= TX_RING_MOD_MASK; i++) { |
1893 | if (fep->tx_skbuff[i] != NULL) { | 1763 | if (fep->tx_skbuff[i]) { |
1894 | dev_kfree_skb_any(fep->tx_skbuff[i]); | 1764 | dev_kfree_skb_any(fep->tx_skbuff[i]); |
1895 | fep->tx_skbuff[i] = NULL; | 1765 | fep->tx_skbuff[i] = NULL; |
1896 | } | 1766 | } |
1897 | } | 1767 | } |
1898 | 1768 | ||
1899 | /* Initialize the receive buffer descriptors. | 1769 | /* Initialize the receive buffer descriptors. */ |
1900 | */ | ||
1901 | bdp = fep->rx_bd_base; | 1770 | bdp = fep->rx_bd_base; |
1902 | for (i=0; i<RX_RING_SIZE; i++) { | 1771 | for (i = 0; i < RX_RING_SIZE; i++) { |
1903 | 1772 | ||
1904 | /* Initialize the BD for every fragment in the page. | 1773 | /* Initialize the BD for every fragment in the page. */ |
1905 | */ | ||
1906 | bdp->cbd_sc = BD_ENET_RX_EMPTY; | 1774 | bdp->cbd_sc = BD_ENET_RX_EMPTY; |
1907 | bdp++; | 1775 | bdp++; |
1908 | } | 1776 | } |
1909 | 1777 | ||
1910 | /* Set the last buffer to wrap. | 1778 | /* Set the last buffer to wrap */ |
1911 | */ | ||
1912 | bdp--; | 1779 | bdp--; |
1913 | bdp->cbd_sc |= BD_SC_WRAP; | 1780 | bdp->cbd_sc |= BD_SC_WRAP; |
1914 | 1781 | ||
1915 | /* ...and the same for transmmit. | 1782 | /* ...and the same for transmit */ |
1916 | */ | ||
1917 | bdp = fep->tx_bd_base; | 1783 | bdp = fep->tx_bd_base; |
1918 | for (i=0; i<TX_RING_SIZE; i++) { | 1784 | for (i = 0; i < TX_RING_SIZE; i++) { |
1919 | 1785 | ||
1920 | /* Initialize the BD for every fragment in the page. | 1786 | /* Initialize the BD for every fragment in the page. */ |
1921 | */ | ||
1922 | bdp->cbd_sc = 0; | 1787 | bdp->cbd_sc = 0; |
1923 | bdp->cbd_bufaddr = 0; | 1788 | bdp->cbd_bufaddr = 0; |
1924 | bdp++; | 1789 | bdp++; |
1925 | } | 1790 | } |
1926 | 1791 | ||
1927 | /* Set the last buffer to wrap. | 1792 | /* Set the last buffer to wrap */ |
1928 | */ | ||
1929 | bdp--; | 1793 | bdp--; |
1930 | bdp->cbd_sc |= BD_SC_WRAP; | 1794 | bdp->cbd_sc |= BD_SC_WRAP; |
1931 | 1795 | ||
1932 | /* Enable MII mode. | 1796 | /* Enable MII mode */ |
1933 | */ | ||
1934 | if (duplex) { | 1797 | if (duplex) { |
1935 | fecp->fec_r_cntrl = OPT_FRAME_SIZE | 0x04;/* MII enable */ | 1798 | /* MII enable / FD enable */ |
1936 | fecp->fec_x_cntrl = 0x04; /* FD enable */ | 1799 | writel(OPT_FRAME_SIZE | 0x04, fep->hwp + FEC_R_CNTRL); |
1800 | writel(0x04, fep->hwp + FEC_X_CNTRL); | ||
1937 | } else { | 1801 | } else { |
1938 | /* MII enable|No Rcv on Xmit */ | 1802 | /* MII enable / No Rcv on Xmit */ |
1939 | fecp->fec_r_cntrl = OPT_FRAME_SIZE | 0x06; | 1803 | writel(OPT_FRAME_SIZE | 0x06, fep->hwp + FEC_R_CNTRL); |
1940 | fecp->fec_x_cntrl = 0x00; | 1804 | writel(0x0, fep->hwp + FEC_X_CNTRL); |
1941 | } | 1805 | } |
1942 | fep->full_duplex = duplex; | 1806 | fep->full_duplex = duplex; |
1943 | 1807 | ||
1944 | /* Set MII speed. | 1808 | /* Set MII speed */ |
1945 | */ | 1809 | writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); |
1946 | fecp->fec_mii_speed = fep->phy_speed; | ||
1947 | 1810 | ||
1948 | /* And last, enable the transmit and receive processing. | 1811 | /* And last, enable the transmit and receive processing */ |
1949 | */ | 1812 | writel(2, fep->hwp + FEC_ECNTRL); |
1950 | fecp->fec_ecntrl = 2; | 1813 | writel(0, fep->hwp + FEC_R_DES_ACTIVE); |
1951 | fecp->fec_r_des_active = 0; | ||
1952 | 1814 | ||
1953 | /* Enable interrupts we wish to service. | 1815 | /* Enable interrupts we wish to service */ |
1954 | */ | 1816 | writel(FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII, |
1955 | fecp->fec_imask = (FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII); | 1817 | fep->hwp + FEC_IMASK); |
1956 | } | 1818 | } |
1957 | 1819 | ||
1958 | static void | 1820 | static void |
1959 | fec_stop(struct net_device *dev) | 1821 | fec_stop(struct net_device *dev) |
1960 | { | 1822 | { |
1961 | volatile fec_t *fecp; | 1823 | struct fec_enet_private *fep = netdev_priv(dev); |
1962 | struct fec_enet_private *fep; | ||
1963 | |||
1964 | fep = netdev_priv(dev); | ||
1965 | fecp = fep->hwp; | ||
1966 | 1824 | ||
1967 | /* | 1825 | /* We cannot expect a graceful transmit stop without link !!! */ |
1968 | ** We cannot expect a graceful transmit stop without link !!! | 1826 | if (fep->link) { |
1969 | */ | 1827 | writel(1, fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */ |
1970 | if (fep->link) | ||
1971 | { | ||
1972 | fecp->fec_x_cntrl = 0x01; /* Graceful transmit stop */ | ||
1973 | udelay(10); | 1828 | udelay(10); |
1974 | if (!(fecp->fec_ievent & FEC_ENET_GRA)) | 1829 | if (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_GRA)) |
1975 | printk("fec_stop : Graceful transmit stop did not complete !\n"); | 1830 | printk("fec_stop : Graceful transmit stop did not complete !\n"); |
1976 | } | 1831 | } |
1977 | 1832 | ||
1978 | /* Whack a reset. We should wait for this. | 1833 | /* Whack a reset. We should wait for this. */ |
1979 | */ | 1834 | writel(1, fep->hwp + FEC_ECNTRL); |
1980 | fecp->fec_ecntrl = 1; | ||
1981 | udelay(10); | 1835 | udelay(10); |
1982 | 1836 | ||
1983 | /* Clear outstanding MII command interrupts. | 1837 | /* Clear outstanding MII command interrupts. */ |
1984 | */ | 1838 | writel(FEC_ENET_MII, fep->hwp + FEC_IEVENT); |
1985 | fecp->fec_ievent = FEC_ENET_MII; | ||
1986 | 1839 | ||
1987 | fecp->fec_imask = FEC_ENET_MII; | 1840 | writel(FEC_ENET_MII, fep->hwp + FEC_IMASK); |
1988 | fecp->fec_mii_speed = fep->phy_speed; | 1841 | writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); |
1989 | } | 1842 | } |
1990 | 1843 | ||
1991 | static int __devinit | 1844 | static int __devinit |