diff options
Diffstat (limited to 'drivers/net')
38 files changed, 4439 insertions, 3204 deletions
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c index e58d4c50c2e1..f5ee064ab6b2 100644 --- a/drivers/net/8139too.c +++ b/drivers/net/8139too.c | |||
@@ -1605,7 +1605,7 @@ static void rtl8139_thread (void *_data) | |||
1605 | if (tp->watchdog_fired) { | 1605 | if (tp->watchdog_fired) { |
1606 | tp->watchdog_fired = 0; | 1606 | tp->watchdog_fired = 0; |
1607 | rtl8139_tx_timeout_task(_data); | 1607 | rtl8139_tx_timeout_task(_data); |
1608 | } else if (rtnl_shlock_nowait() == 0) { | 1608 | } else if (rtnl_trylock()) { |
1609 | rtl8139_thread_iter (dev, tp, tp->mmio_addr); | 1609 | rtl8139_thread_iter (dev, tp, tp->mmio_addr); |
1610 | rtnl_unlock (); | 1610 | rtnl_unlock (); |
1611 | } else { | 1611 | } else { |
diff --git a/drivers/net/arm/am79c961a.c b/drivers/net/arm/am79c961a.c index 53e3afc1b7b7..09d5c3f26985 100644 --- a/drivers/net/arm/am79c961a.c +++ b/drivers/net/arm/am79c961a.c | |||
@@ -696,7 +696,9 @@ static int __init am79c961_probe(struct platform_device *pdev) | |||
696 | dev->base_addr = res->start; | 696 | dev->base_addr = res->start; |
697 | dev->irq = platform_get_irq(pdev, 0); | 697 | dev->irq = platform_get_irq(pdev, 0); |
698 | 698 | ||
699 | ret = -ENODEV; | 699 | ret = -ENODEV; |
700 | if (dev->irq < 0) | ||
701 | goto nodev; | ||
700 | if (!request_region(dev->base_addr, 0x18, dev->name)) | 702 | if (!request_region(dev->base_addr, 0x18, dev->name)) |
701 | goto nodev; | 703 | goto nodev; |
702 | 704 | ||
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c index b787b6582e50..7d213707008a 100644 --- a/drivers/net/bnx2.c +++ b/drivers/net/bnx2.c | |||
@@ -14,8 +14,8 @@ | |||
14 | 14 | ||
15 | #define DRV_MODULE_NAME "bnx2" | 15 | #define DRV_MODULE_NAME "bnx2" |
16 | #define PFX DRV_MODULE_NAME ": " | 16 | #define PFX DRV_MODULE_NAME ": " |
17 | #define DRV_MODULE_VERSION "1.4.31" | 17 | #define DRV_MODULE_VERSION "1.4.38" |
18 | #define DRV_MODULE_RELDATE "January 19, 2006" | 18 | #define DRV_MODULE_RELDATE "February 10, 2006" |
19 | 19 | ||
20 | #define RUN_AT(x) (jiffies + (x)) | 20 | #define RUN_AT(x) (jiffies + (x)) |
21 | 21 | ||
@@ -360,6 +360,8 @@ bnx2_netif_start(struct bnx2 *bp) | |||
360 | static void | 360 | static void |
361 | bnx2_free_mem(struct bnx2 *bp) | 361 | bnx2_free_mem(struct bnx2 *bp) |
362 | { | 362 | { |
363 | int i; | ||
364 | |||
363 | if (bp->stats_blk) { | 365 | if (bp->stats_blk) { |
364 | pci_free_consistent(bp->pdev, sizeof(struct statistics_block), | 366 | pci_free_consistent(bp->pdev, sizeof(struct statistics_block), |
365 | bp->stats_blk, bp->stats_blk_mapping); | 367 | bp->stats_blk, bp->stats_blk_mapping); |
@@ -378,19 +380,23 @@ bnx2_free_mem(struct bnx2 *bp) | |||
378 | } | 380 | } |
379 | kfree(bp->tx_buf_ring); | 381 | kfree(bp->tx_buf_ring); |
380 | bp->tx_buf_ring = NULL; | 382 | bp->tx_buf_ring = NULL; |
381 | if (bp->rx_desc_ring) { | 383 | for (i = 0; i < bp->rx_max_ring; i++) { |
382 | pci_free_consistent(bp->pdev, | 384 | if (bp->rx_desc_ring[i]) |
383 | sizeof(struct rx_bd) * RX_DESC_CNT, | 385 | pci_free_consistent(bp->pdev, |
384 | bp->rx_desc_ring, bp->rx_desc_mapping); | 386 | sizeof(struct rx_bd) * RX_DESC_CNT, |
385 | bp->rx_desc_ring = NULL; | 387 | bp->rx_desc_ring[i], |
386 | } | 388 | bp->rx_desc_mapping[i]); |
387 | kfree(bp->rx_buf_ring); | 389 | bp->rx_desc_ring[i] = NULL; |
390 | } | ||
391 | vfree(bp->rx_buf_ring); | ||
388 | bp->rx_buf_ring = NULL; | 392 | bp->rx_buf_ring = NULL; |
389 | } | 393 | } |
390 | 394 | ||
391 | static int | 395 | static int |
392 | bnx2_alloc_mem(struct bnx2 *bp) | 396 | bnx2_alloc_mem(struct bnx2 *bp) |
393 | { | 397 | { |
398 | int i; | ||
399 | |||
394 | bp->tx_buf_ring = kmalloc(sizeof(struct sw_bd) * TX_DESC_CNT, | 400 | bp->tx_buf_ring = kmalloc(sizeof(struct sw_bd) * TX_DESC_CNT, |
395 | GFP_KERNEL); | 401 | GFP_KERNEL); |
396 | if (bp->tx_buf_ring == NULL) | 402 | if (bp->tx_buf_ring == NULL) |
@@ -404,18 +410,23 @@ bnx2_alloc_mem(struct bnx2 *bp) | |||
404 | if (bp->tx_desc_ring == NULL) | 410 | if (bp->tx_desc_ring == NULL) |
405 | goto alloc_mem_err; | 411 | goto alloc_mem_err; |
406 | 412 | ||
407 | bp->rx_buf_ring = kmalloc(sizeof(struct sw_bd) * RX_DESC_CNT, | 413 | bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT * |
408 | GFP_KERNEL); | 414 | bp->rx_max_ring); |
409 | if (bp->rx_buf_ring == NULL) | 415 | if (bp->rx_buf_ring == NULL) |
410 | goto alloc_mem_err; | 416 | goto alloc_mem_err; |
411 | 417 | ||
412 | memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT); | 418 | memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT * |
413 | bp->rx_desc_ring = pci_alloc_consistent(bp->pdev, | 419 | bp->rx_max_ring); |
414 | sizeof(struct rx_bd) * | 420 | |
415 | RX_DESC_CNT, | 421 | for (i = 0; i < bp->rx_max_ring; i++) { |
416 | &bp->rx_desc_mapping); | 422 | bp->rx_desc_ring[i] = |
417 | if (bp->rx_desc_ring == NULL) | 423 | pci_alloc_consistent(bp->pdev, |
418 | goto alloc_mem_err; | 424 | sizeof(struct rx_bd) * RX_DESC_CNT, |
425 | &bp->rx_desc_mapping[i]); | ||
426 | if (bp->rx_desc_ring[i] == NULL) | ||
427 | goto alloc_mem_err; | ||
428 | |||
429 | } | ||
419 | 430 | ||
420 | bp->status_blk = pci_alloc_consistent(bp->pdev, | 431 | bp->status_blk = pci_alloc_consistent(bp->pdev, |
421 | sizeof(struct status_block), | 432 | sizeof(struct status_block), |
@@ -1520,7 +1531,7 @@ bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index) | |||
1520 | struct sk_buff *skb; | 1531 | struct sk_buff *skb; |
1521 | struct sw_bd *rx_buf = &bp->rx_buf_ring[index]; | 1532 | struct sw_bd *rx_buf = &bp->rx_buf_ring[index]; |
1522 | dma_addr_t mapping; | 1533 | dma_addr_t mapping; |
1523 | struct rx_bd *rxbd = &bp->rx_desc_ring[index]; | 1534 | struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)]; |
1524 | unsigned long align; | 1535 | unsigned long align; |
1525 | 1536 | ||
1526 | skb = dev_alloc_skb(bp->rx_buf_size); | 1537 | skb = dev_alloc_skb(bp->rx_buf_size); |
@@ -1656,23 +1667,30 @@ static inline void | |||
1656 | bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb, | 1667 | bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb, |
1657 | u16 cons, u16 prod) | 1668 | u16 cons, u16 prod) |
1658 | { | 1669 | { |
1659 | struct sw_bd *cons_rx_buf = &bp->rx_buf_ring[cons]; | 1670 | struct sw_bd *cons_rx_buf, *prod_rx_buf; |
1660 | struct sw_bd *prod_rx_buf = &bp->rx_buf_ring[prod]; | 1671 | struct rx_bd *cons_bd, *prod_bd; |
1661 | struct rx_bd *cons_bd = &bp->rx_desc_ring[cons]; | 1672 | |
1662 | struct rx_bd *prod_bd = &bp->rx_desc_ring[prod]; | 1673 | cons_rx_buf = &bp->rx_buf_ring[cons]; |
1674 | prod_rx_buf = &bp->rx_buf_ring[prod]; | ||
1663 | 1675 | ||
1664 | pci_dma_sync_single_for_device(bp->pdev, | 1676 | pci_dma_sync_single_for_device(bp->pdev, |
1665 | pci_unmap_addr(cons_rx_buf, mapping), | 1677 | pci_unmap_addr(cons_rx_buf, mapping), |
1666 | bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE); | 1678 | bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE); |
1667 | 1679 | ||
1668 | prod_rx_buf->skb = cons_rx_buf->skb; | 1680 | bp->rx_prod_bseq += bp->rx_buf_use_size; |
1669 | pci_unmap_addr_set(prod_rx_buf, mapping, | ||
1670 | pci_unmap_addr(cons_rx_buf, mapping)); | ||
1671 | 1681 | ||
1672 | memcpy(prod_bd, cons_bd, 8); | 1682 | prod_rx_buf->skb = skb; |
1673 | 1683 | ||
1674 | bp->rx_prod_bseq += bp->rx_buf_use_size; | 1684 | if (cons == prod) |
1685 | return; | ||
1675 | 1686 | ||
1687 | pci_unmap_addr_set(prod_rx_buf, mapping, | ||
1688 | pci_unmap_addr(cons_rx_buf, mapping)); | ||
1689 | |||
1690 | cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)]; | ||
1691 | prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)]; | ||
1692 | prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi; | ||
1693 | prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo; | ||
1676 | } | 1694 | } |
1677 | 1695 | ||
1678 | static int | 1696 | static int |
@@ -1699,14 +1717,19 @@ bnx2_rx_int(struct bnx2 *bp, int budget) | |||
1699 | u32 status; | 1717 | u32 status; |
1700 | struct sw_bd *rx_buf; | 1718 | struct sw_bd *rx_buf; |
1701 | struct sk_buff *skb; | 1719 | struct sk_buff *skb; |
1720 | dma_addr_t dma_addr; | ||
1702 | 1721 | ||
1703 | sw_ring_cons = RX_RING_IDX(sw_cons); | 1722 | sw_ring_cons = RX_RING_IDX(sw_cons); |
1704 | sw_ring_prod = RX_RING_IDX(sw_prod); | 1723 | sw_ring_prod = RX_RING_IDX(sw_prod); |
1705 | 1724 | ||
1706 | rx_buf = &bp->rx_buf_ring[sw_ring_cons]; | 1725 | rx_buf = &bp->rx_buf_ring[sw_ring_cons]; |
1707 | skb = rx_buf->skb; | 1726 | skb = rx_buf->skb; |
1708 | pci_dma_sync_single_for_cpu(bp->pdev, | 1727 | |
1709 | pci_unmap_addr(rx_buf, mapping), | 1728 | rx_buf->skb = NULL; |
1729 | |||
1730 | dma_addr = pci_unmap_addr(rx_buf, mapping); | ||
1731 | |||
1732 | pci_dma_sync_single_for_cpu(bp->pdev, dma_addr, | ||
1710 | bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE); | 1733 | bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE); |
1711 | 1734 | ||
1712 | rx_hdr = (struct l2_fhdr *) skb->data; | 1735 | rx_hdr = (struct l2_fhdr *) skb->data; |
@@ -1747,8 +1770,7 @@ bnx2_rx_int(struct bnx2 *bp, int budget) | |||
1747 | skb = new_skb; | 1770 | skb = new_skb; |
1748 | } | 1771 | } |
1749 | else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) { | 1772 | else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) { |
1750 | pci_unmap_single(bp->pdev, | 1773 | pci_unmap_single(bp->pdev, dma_addr, |
1751 | pci_unmap_addr(rx_buf, mapping), | ||
1752 | bp->rx_buf_use_size, PCI_DMA_FROMDEVICE); | 1774 | bp->rx_buf_use_size, PCI_DMA_FROMDEVICE); |
1753 | 1775 | ||
1754 | skb_reserve(skb, bp->rx_offset); | 1776 | skb_reserve(skb, bp->rx_offset); |
@@ -1794,8 +1816,6 @@ reuse_rx: | |||
1794 | rx_pkt++; | 1816 | rx_pkt++; |
1795 | 1817 | ||
1796 | next_rx: | 1818 | next_rx: |
1797 | rx_buf->skb = NULL; | ||
1798 | |||
1799 | sw_cons = NEXT_RX_BD(sw_cons); | 1819 | sw_cons = NEXT_RX_BD(sw_cons); |
1800 | sw_prod = NEXT_RX_BD(sw_prod); | 1820 | sw_prod = NEXT_RX_BD(sw_prod); |
1801 | 1821 | ||
@@ -3340,27 +3360,35 @@ bnx2_init_rx_ring(struct bnx2 *bp) | |||
3340 | bp->hw_rx_cons = 0; | 3360 | bp->hw_rx_cons = 0; |
3341 | bp->rx_prod_bseq = 0; | 3361 | bp->rx_prod_bseq = 0; |
3342 | 3362 | ||
3343 | rxbd = &bp->rx_desc_ring[0]; | 3363 | for (i = 0; i < bp->rx_max_ring; i++) { |
3344 | for (i = 0; i < MAX_RX_DESC_CNT; i++, rxbd++) { | 3364 | int j; |
3345 | rxbd->rx_bd_len = bp->rx_buf_use_size; | ||
3346 | rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END; | ||
3347 | } | ||
3348 | 3365 | ||
3349 | rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping >> 32; | 3366 | rxbd = &bp->rx_desc_ring[i][0]; |
3350 | rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping & 0xffffffff; | 3367 | for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) { |
3368 | rxbd->rx_bd_len = bp->rx_buf_use_size; | ||
3369 | rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END; | ||
3370 | } | ||
3371 | if (i == (bp->rx_max_ring - 1)) | ||
3372 | j = 0; | ||
3373 | else | ||
3374 | j = i + 1; | ||
3375 | rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32; | ||
3376 | rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] & | ||
3377 | 0xffffffff; | ||
3378 | } | ||
3351 | 3379 | ||
3352 | val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE; | 3380 | val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE; |
3353 | val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2; | 3381 | val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2; |
3354 | val |= 0x02 << 8; | 3382 | val |= 0x02 << 8; |
3355 | CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val); | 3383 | CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val); |
3356 | 3384 | ||
3357 | val = (u64) bp->rx_desc_mapping >> 32; | 3385 | val = (u64) bp->rx_desc_mapping[0] >> 32; |
3358 | CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val); | 3386 | CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val); |
3359 | 3387 | ||
3360 | val = (u64) bp->rx_desc_mapping & 0xffffffff; | 3388 | val = (u64) bp->rx_desc_mapping[0] & 0xffffffff; |
3361 | CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val); | 3389 | CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val); |
3362 | 3390 | ||
3363 | for ( ;ring_prod < bp->rx_ring_size; ) { | 3391 | for (i = 0; i < bp->rx_ring_size; i++) { |
3364 | if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) { | 3392 | if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) { |
3365 | break; | 3393 | break; |
3366 | } | 3394 | } |
@@ -3375,6 +3403,29 @@ bnx2_init_rx_ring(struct bnx2 *bp) | |||
3375 | } | 3403 | } |
3376 | 3404 | ||
3377 | static void | 3405 | static void |
3406 | bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size) | ||
3407 | { | ||
3408 | u32 num_rings, max; | ||
3409 | |||
3410 | bp->rx_ring_size = size; | ||
3411 | num_rings = 1; | ||
3412 | while (size > MAX_RX_DESC_CNT) { | ||
3413 | size -= MAX_RX_DESC_CNT; | ||
3414 | num_rings++; | ||
3415 | } | ||
3416 | /* round to next power of 2 */ | ||
3417 | max = MAX_RX_RINGS; | ||
3418 | while ((max & num_rings) == 0) | ||
3419 | max >>= 1; | ||
3420 | |||
3421 | if (num_rings != max) | ||
3422 | max <<= 1; | ||
3423 | |||
3424 | bp->rx_max_ring = max; | ||
3425 | bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1; | ||
3426 | } | ||
3427 | |||
3428 | static void | ||
3378 | bnx2_free_tx_skbs(struct bnx2 *bp) | 3429 | bnx2_free_tx_skbs(struct bnx2 *bp) |
3379 | { | 3430 | { |
3380 | int i; | 3431 | int i; |
@@ -3419,7 +3470,7 @@ bnx2_free_rx_skbs(struct bnx2 *bp) | |||
3419 | if (bp->rx_buf_ring == NULL) | 3470 | if (bp->rx_buf_ring == NULL) |
3420 | return; | 3471 | return; |
3421 | 3472 | ||
3422 | for (i = 0; i < RX_DESC_CNT; i++) { | 3473 | for (i = 0; i < bp->rx_max_ring_idx; i++) { |
3423 | struct sw_bd *rx_buf = &bp->rx_buf_ring[i]; | 3474 | struct sw_bd *rx_buf = &bp->rx_buf_ring[i]; |
3424 | struct sk_buff *skb = rx_buf->skb; | 3475 | struct sk_buff *skb = rx_buf->skb; |
3425 | 3476 | ||
@@ -3506,74 +3557,9 @@ bnx2_test_registers(struct bnx2 *bp) | |||
3506 | { 0x0c00, 0, 0x00000000, 0x00000001 }, | 3557 | { 0x0c00, 0, 0x00000000, 0x00000001 }, |
3507 | { 0x0c04, 0, 0x00000000, 0x03ff0001 }, | 3558 | { 0x0c04, 0, 0x00000000, 0x03ff0001 }, |
3508 | { 0x0c08, 0, 0x0f0ff073, 0x00000000 }, | 3559 | { 0x0c08, 0, 0x0f0ff073, 0x00000000 }, |
3509 | { 0x0c0c, 0, 0x00ffffff, 0x00000000 }, | ||
3510 | { 0x0c30, 0, 0x00000000, 0xffffffff }, | ||
3511 | { 0x0c34, 0, 0x00000000, 0xffffffff }, | ||
3512 | { 0x0c38, 0, 0x00000000, 0xffffffff }, | ||
3513 | { 0x0c3c, 0, 0x00000000, 0xffffffff }, | ||
3514 | { 0x0c40, 0, 0x00000000, 0xffffffff }, | ||
3515 | { 0x0c44, 0, 0x00000000, 0xffffffff }, | ||
3516 | { 0x0c48, 0, 0x00000000, 0x0007ffff }, | ||
3517 | { 0x0c4c, 0, 0x00000000, 0xffffffff }, | ||
3518 | { 0x0c50, 0, 0x00000000, 0xffffffff }, | ||
3519 | { 0x0c54, 0, 0x00000000, 0xffffffff }, | ||
3520 | { 0x0c58, 0, 0x00000000, 0xffffffff }, | ||
3521 | { 0x0c5c, 0, 0x00000000, 0xffffffff }, | ||
3522 | { 0x0c60, 0, 0x00000000, 0xffffffff }, | ||
3523 | { 0x0c64, 0, 0x00000000, 0xffffffff }, | ||
3524 | { 0x0c68, 0, 0x00000000, 0xffffffff }, | ||
3525 | { 0x0c6c, 0, 0x00000000, 0xffffffff }, | ||
3526 | { 0x0c70, 0, 0x00000000, 0xffffffff }, | ||
3527 | { 0x0c74, 0, 0x00000000, 0xffffffff }, | ||
3528 | { 0x0c78, 0, 0x00000000, 0xffffffff }, | ||
3529 | { 0x0c7c, 0, 0x00000000, 0xffffffff }, | ||
3530 | { 0x0c80, 0, 0x00000000, 0xffffffff }, | ||
3531 | { 0x0c84, 0, 0x00000000, 0xffffffff }, | ||
3532 | { 0x0c88, 0, 0x00000000, 0xffffffff }, | ||
3533 | { 0x0c8c, 0, 0x00000000, 0xffffffff }, | ||
3534 | { 0x0c90, 0, 0x00000000, 0xffffffff }, | ||
3535 | { 0x0c94, 0, 0x00000000, 0xffffffff }, | ||
3536 | { 0x0c98, 0, 0x00000000, 0xffffffff }, | ||
3537 | { 0x0c9c, 0, 0x00000000, 0xffffffff }, | ||
3538 | { 0x0ca0, 0, 0x00000000, 0xffffffff }, | ||
3539 | { 0x0ca4, 0, 0x00000000, 0xffffffff }, | ||
3540 | { 0x0ca8, 0, 0x00000000, 0x0007ffff }, | ||
3541 | { 0x0cac, 0, 0x00000000, 0xffffffff }, | ||
3542 | { 0x0cb0, 0, 0x00000000, 0xffffffff }, | ||
3543 | { 0x0cb4, 0, 0x00000000, 0xffffffff }, | ||
3544 | { 0x0cb8, 0, 0x00000000, 0xffffffff }, | ||
3545 | { 0x0cbc, 0, 0x00000000, 0xffffffff }, | ||
3546 | { 0x0cc0, 0, 0x00000000, 0xffffffff }, | ||
3547 | { 0x0cc4, 0, 0x00000000, 0xffffffff }, | ||
3548 | { 0x0cc8, 0, 0x00000000, 0xffffffff }, | ||
3549 | { 0x0ccc, 0, 0x00000000, 0xffffffff }, | ||
3550 | { 0x0cd0, 0, 0x00000000, 0xffffffff }, | ||
3551 | { 0x0cd4, 0, 0x00000000, 0xffffffff }, | ||
3552 | { 0x0cd8, 0, 0x00000000, 0xffffffff }, | ||
3553 | { 0x0cdc, 0, 0x00000000, 0xffffffff }, | ||
3554 | { 0x0ce0, 0, 0x00000000, 0xffffffff }, | ||
3555 | { 0x0ce4, 0, 0x00000000, 0xffffffff }, | ||
3556 | { 0x0ce8, 0, 0x00000000, 0xffffffff }, | ||
3557 | { 0x0cec, 0, 0x00000000, 0xffffffff }, | ||
3558 | { 0x0cf0, 0, 0x00000000, 0xffffffff }, | ||
3559 | { 0x0cf4, 0, 0x00000000, 0xffffffff }, | ||
3560 | { 0x0cf8, 0, 0x00000000, 0xffffffff }, | ||
3561 | { 0x0cfc, 0, 0x00000000, 0xffffffff }, | ||
3562 | { 0x0d00, 0, 0x00000000, 0xffffffff }, | ||
3563 | { 0x0d04, 0, 0x00000000, 0xffffffff }, | ||
3564 | 3560 | ||
3565 | { 0x1000, 0, 0x00000000, 0x00000001 }, | 3561 | { 0x1000, 0, 0x00000000, 0x00000001 }, |
3566 | { 0x1004, 0, 0x00000000, 0x000f0001 }, | 3562 | { 0x1004, 0, 0x00000000, 0x000f0001 }, |
3567 | { 0x1044, 0, 0x00000000, 0xffc003ff }, | ||
3568 | { 0x1080, 0, 0x00000000, 0x0001ffff }, | ||
3569 | { 0x1084, 0, 0x00000000, 0xffffffff }, | ||
3570 | { 0x1088, 0, 0x00000000, 0xffffffff }, | ||
3571 | { 0x108c, 0, 0x00000000, 0xffffffff }, | ||
3572 | { 0x1090, 0, 0x00000000, 0xffffffff }, | ||
3573 | { 0x1094, 0, 0x00000000, 0xffffffff }, | ||
3574 | { 0x1098, 0, 0x00000000, 0xffffffff }, | ||
3575 | { 0x109c, 0, 0x00000000, 0xffffffff }, | ||
3576 | { 0x10a0, 0, 0x00000000, 0xffffffff }, | ||
3577 | 3563 | ||
3578 | { 0x1408, 0, 0x01c00800, 0x00000000 }, | 3564 | { 0x1408, 0, 0x01c00800, 0x00000000 }, |
3579 | { 0x149c, 0, 0x8000ffff, 0x00000000 }, | 3565 | { 0x149c, 0, 0x8000ffff, 0x00000000 }, |
@@ -3585,111 +3571,9 @@ bnx2_test_registers(struct bnx2 *bp) | |||
3585 | { 0x14c4, 0, 0x00003fff, 0x00000000 }, | 3571 | { 0x14c4, 0, 0x00003fff, 0x00000000 }, |
3586 | { 0x14cc, 0, 0x00000000, 0x00000001 }, | 3572 | { 0x14cc, 0, 0x00000000, 0x00000001 }, |
3587 | { 0x14d0, 0, 0xffffffff, 0x00000000 }, | 3573 | { 0x14d0, 0, 0xffffffff, 0x00000000 }, |
3588 | { 0x1500, 0, 0x00000000, 0xffffffff }, | ||
3589 | { 0x1504, 0, 0x00000000, 0xffffffff }, | ||
3590 | { 0x1508, 0, 0x00000000, 0xffffffff }, | ||
3591 | { 0x150c, 0, 0x00000000, 0xffffffff }, | ||
3592 | { 0x1510, 0, 0x00000000, 0xffffffff }, | ||
3593 | { 0x1514, 0, 0x00000000, 0xffffffff }, | ||
3594 | { 0x1518, 0, 0x00000000, 0xffffffff }, | ||
3595 | { 0x151c, 0, 0x00000000, 0xffffffff }, | ||
3596 | { 0x1520, 0, 0x00000000, 0xffffffff }, | ||
3597 | { 0x1524, 0, 0x00000000, 0xffffffff }, | ||
3598 | { 0x1528, 0, 0x00000000, 0xffffffff }, | ||
3599 | { 0x152c, 0, 0x00000000, 0xffffffff }, | ||
3600 | { 0x1530, 0, 0x00000000, 0xffffffff }, | ||
3601 | { 0x1534, 0, 0x00000000, 0xffffffff }, | ||
3602 | { 0x1538, 0, 0x00000000, 0xffffffff }, | ||
3603 | { 0x153c, 0, 0x00000000, 0xffffffff }, | ||
3604 | { 0x1540, 0, 0x00000000, 0xffffffff }, | ||
3605 | { 0x1544, 0, 0x00000000, 0xffffffff }, | ||
3606 | { 0x1548, 0, 0x00000000, 0xffffffff }, | ||
3607 | { 0x154c, 0, 0x00000000, 0xffffffff }, | ||
3608 | { 0x1550, 0, 0x00000000, 0xffffffff }, | ||
3609 | { 0x1554, 0, 0x00000000, 0xffffffff }, | ||
3610 | { 0x1558, 0, 0x00000000, 0xffffffff }, | ||
3611 | { 0x1600, 0, 0x00000000, 0xffffffff }, | ||
3612 | { 0x1604, 0, 0x00000000, 0xffffffff }, | ||
3613 | { 0x1608, 0, 0x00000000, 0xffffffff }, | ||
3614 | { 0x160c, 0, 0x00000000, 0xffffffff }, | ||
3615 | { 0x1610, 0, 0x00000000, 0xffffffff }, | ||
3616 | { 0x1614, 0, 0x00000000, 0xffffffff }, | ||
3617 | { 0x1618, 0, 0x00000000, 0xffffffff }, | ||
3618 | { 0x161c, 0, 0x00000000, 0xffffffff }, | ||
3619 | { 0x1620, 0, 0x00000000, 0xffffffff }, | ||
3620 | { 0x1624, 0, 0x00000000, 0xffffffff }, | ||
3621 | { 0x1628, 0, 0x00000000, 0xffffffff }, | ||
3622 | { 0x162c, 0, 0x00000000, 0xffffffff }, | ||
3623 | { 0x1630, 0, 0x00000000, 0xffffffff }, | ||
3624 | { 0x1634, 0, 0x00000000, 0xffffffff }, | ||
3625 | { 0x1638, 0, 0x00000000, 0xffffffff }, | ||
3626 | { 0x163c, 0, 0x00000000, 0xffffffff }, | ||
3627 | { 0x1640, 0, 0x00000000, 0xffffffff }, | ||
3628 | { 0x1644, 0, 0x00000000, 0xffffffff }, | ||
3629 | { 0x1648, 0, 0x00000000, 0xffffffff }, | ||
3630 | { 0x164c, 0, 0x00000000, 0xffffffff }, | ||
3631 | { 0x1650, 0, 0x00000000, 0xffffffff }, | ||
3632 | { 0x1654, 0, 0x00000000, 0xffffffff }, | ||
3633 | 3574 | ||
3634 | { 0x1800, 0, 0x00000000, 0x00000001 }, | 3575 | { 0x1800, 0, 0x00000000, 0x00000001 }, |
3635 | { 0x1804, 0, 0x00000000, 0x00000003 }, | 3576 | { 0x1804, 0, 0x00000000, 0x00000003 }, |
3636 | { 0x1840, 0, 0x00000000, 0xffffffff }, | ||
3637 | { 0x1844, 0, 0x00000000, 0xffffffff }, | ||
3638 | { 0x1848, 0, 0x00000000, 0xffffffff }, | ||
3639 | { 0x184c, 0, 0x00000000, 0xffffffff }, | ||
3640 | { 0x1850, 0, 0x00000000, 0xffffffff }, | ||
3641 | { 0x1900, 0, 0x7ffbffff, 0x00000000 }, | ||
3642 | { 0x1904, 0, 0xffffffff, 0x00000000 }, | ||
3643 | { 0x190c, 0, 0xffffffff, 0x00000000 }, | ||
3644 | { 0x1914, 0, 0xffffffff, 0x00000000 }, | ||
3645 | { 0x191c, 0, 0xffffffff, 0x00000000 }, | ||
3646 | { 0x1924, 0, 0xffffffff, 0x00000000 }, | ||
3647 | { 0x192c, 0, 0xffffffff, 0x00000000 }, | ||
3648 | { 0x1934, 0, 0xffffffff, 0x00000000 }, | ||
3649 | { 0x193c, 0, 0xffffffff, 0x00000000 }, | ||
3650 | { 0x1944, 0, 0xffffffff, 0x00000000 }, | ||
3651 | { 0x194c, 0, 0xffffffff, 0x00000000 }, | ||
3652 | { 0x1954, 0, 0xffffffff, 0x00000000 }, | ||
3653 | { 0x195c, 0, 0xffffffff, 0x00000000 }, | ||
3654 | { 0x1964, 0, 0xffffffff, 0x00000000 }, | ||
3655 | { 0x196c, 0, 0xffffffff, 0x00000000 }, | ||
3656 | { 0x1974, 0, 0xffffffff, 0x00000000 }, | ||
3657 | { 0x197c, 0, 0xffffffff, 0x00000000 }, | ||
3658 | { 0x1980, 0, 0x0700ffff, 0x00000000 }, | ||
3659 | |||
3660 | { 0x1c00, 0, 0x00000000, 0x00000001 }, | ||
3661 | { 0x1c04, 0, 0x00000000, 0x00000003 }, | ||
3662 | { 0x1c08, 0, 0x0000000f, 0x00000000 }, | ||
3663 | { 0x1c40, 0, 0x00000000, 0xffffffff }, | ||
3664 | { 0x1c44, 0, 0x00000000, 0xffffffff }, | ||
3665 | { 0x1c48, 0, 0x00000000, 0xffffffff }, | ||
3666 | { 0x1c4c, 0, 0x00000000, 0xffffffff }, | ||
3667 | { 0x1c50, 0, 0x00000000, 0xffffffff }, | ||
3668 | { 0x1d00, 0, 0x7ffbffff, 0x00000000 }, | ||
3669 | { 0x1d04, 0, 0xffffffff, 0x00000000 }, | ||
3670 | { 0x1d0c, 0, 0xffffffff, 0x00000000 }, | ||
3671 | { 0x1d14, 0, 0xffffffff, 0x00000000 }, | ||
3672 | { 0x1d1c, 0, 0xffffffff, 0x00000000 }, | ||
3673 | { 0x1d24, 0, 0xffffffff, 0x00000000 }, | ||
3674 | { 0x1d2c, 0, 0xffffffff, 0x00000000 }, | ||
3675 | { 0x1d34, 0, 0xffffffff, 0x00000000 }, | ||
3676 | { 0x1d3c, 0, 0xffffffff, 0x00000000 }, | ||
3677 | { 0x1d44, 0, 0xffffffff, 0x00000000 }, | ||
3678 | { 0x1d4c, 0, 0xffffffff, 0x00000000 }, | ||
3679 | { 0x1d54, 0, 0xffffffff, 0x00000000 }, | ||
3680 | { 0x1d5c, 0, 0xffffffff, 0x00000000 }, | ||
3681 | { 0x1d64, 0, 0xffffffff, 0x00000000 }, | ||
3682 | { 0x1d6c, 0, 0xffffffff, 0x00000000 }, | ||
3683 | { 0x1d74, 0, 0xffffffff, 0x00000000 }, | ||
3684 | { 0x1d7c, 0, 0xffffffff, 0x00000000 }, | ||
3685 | { 0x1d80, 0, 0x0700ffff, 0x00000000 }, | ||
3686 | |||
3687 | { 0x2004, 0, 0x00000000, 0x0337000f }, | ||
3688 | { 0x2008, 0, 0xffffffff, 0x00000000 }, | ||
3689 | { 0x200c, 0, 0xffffffff, 0x00000000 }, | ||
3690 | { 0x2010, 0, 0xffffffff, 0x00000000 }, | ||
3691 | { 0x2014, 0, 0x801fff80, 0x00000000 }, | ||
3692 | { 0x2018, 0, 0x000003ff, 0x00000000 }, | ||
3693 | 3577 | ||
3694 | { 0x2800, 0, 0x00000000, 0x00000001 }, | 3578 | { 0x2800, 0, 0x00000000, 0x00000001 }, |
3695 | { 0x2804, 0, 0x00000000, 0x00003f01 }, | 3579 | { 0x2804, 0, 0x00000000, 0x00003f01 }, |
@@ -3707,16 +3591,6 @@ bnx2_test_registers(struct bnx2 *bp) | |||
3707 | { 0x2c00, 0, 0x00000000, 0x00000011 }, | 3591 | { 0x2c00, 0, 0x00000000, 0x00000011 }, |
3708 | { 0x2c04, 0, 0x00000000, 0x00030007 }, | 3592 | { 0x2c04, 0, 0x00000000, 0x00030007 }, |
3709 | 3593 | ||
3710 | { 0x3000, 0, 0x00000000, 0x00000001 }, | ||
3711 | { 0x3004, 0, 0x00000000, 0x007007ff }, | ||
3712 | { 0x3008, 0, 0x00000003, 0x00000000 }, | ||
3713 | { 0x300c, 0, 0xffffffff, 0x00000000 }, | ||
3714 | { 0x3010, 0, 0xffffffff, 0x00000000 }, | ||
3715 | { 0x3014, 0, 0xffffffff, 0x00000000 }, | ||
3716 | { 0x3034, 0, 0xffffffff, 0x00000000 }, | ||
3717 | { 0x3038, 0, 0xffffffff, 0x00000000 }, | ||
3718 | { 0x3050, 0, 0x00000001, 0x00000000 }, | ||
3719 | |||
3720 | { 0x3c00, 0, 0x00000000, 0x00000001 }, | 3594 | { 0x3c00, 0, 0x00000000, 0x00000001 }, |
3721 | { 0x3c04, 0, 0x00000000, 0x00070000 }, | 3595 | { 0x3c04, 0, 0x00000000, 0x00070000 }, |
3722 | { 0x3c08, 0, 0x00007f71, 0x07f00000 }, | 3596 | { 0x3c08, 0, 0x00007f71, 0x07f00000 }, |
@@ -3726,88 +3600,11 @@ bnx2_test_registers(struct bnx2 *bp) | |||
3726 | { 0x3c18, 0, 0x00000000, 0xffffffff }, | 3600 | { 0x3c18, 0, 0x00000000, 0xffffffff }, |
3727 | { 0x3c1c, 0, 0xfffff000, 0x00000000 }, | 3601 | { 0x3c1c, 0, 0xfffff000, 0x00000000 }, |
3728 | { 0x3c20, 0, 0xffffff00, 0x00000000 }, | 3602 | { 0x3c20, 0, 0xffffff00, 0x00000000 }, |
3729 | { 0x3c24, 0, 0xffffffff, 0x00000000 }, | ||
3730 | { 0x3c28, 0, 0xffffffff, 0x00000000 }, | ||
3731 | { 0x3c2c, 0, 0xffffffff, 0x00000000 }, | ||
3732 | { 0x3c30, 0, 0xffffffff, 0x00000000 }, | ||
3733 | { 0x3c34, 0, 0xffffffff, 0x00000000 }, | ||
3734 | { 0x3c38, 0, 0xffffffff, 0x00000000 }, | ||
3735 | { 0x3c3c, 0, 0xffffffff, 0x00000000 }, | ||
3736 | { 0x3c40, 0, 0xffffffff, 0x00000000 }, | ||
3737 | { 0x3c44, 0, 0xffffffff, 0x00000000 }, | ||
3738 | { 0x3c48, 0, 0xffffffff, 0x00000000 }, | ||
3739 | { 0x3c4c, 0, 0xffffffff, 0x00000000 }, | ||
3740 | { 0x3c50, 0, 0xffffffff, 0x00000000 }, | ||
3741 | { 0x3c54, 0, 0xffffffff, 0x00000000 }, | ||
3742 | { 0x3c58, 0, 0xffffffff, 0x00000000 }, | ||
3743 | { 0x3c5c, 0, 0xffffffff, 0x00000000 }, | ||
3744 | { 0x3c60, 0, 0xffffffff, 0x00000000 }, | ||
3745 | { 0x3c64, 0, 0xffffffff, 0x00000000 }, | ||
3746 | { 0x3c68, 0, 0xffffffff, 0x00000000 }, | ||
3747 | { 0x3c6c, 0, 0xffffffff, 0x00000000 }, | ||
3748 | { 0x3c70, 0, 0xffffffff, 0x00000000 }, | ||
3749 | { 0x3c74, 0, 0x0000003f, 0x00000000 }, | ||
3750 | { 0x3c78, 0, 0x00000000, 0x00000000 }, | ||
3751 | { 0x3c7c, 0, 0x00000000, 0x00000000 }, | ||
3752 | { 0x3c80, 0, 0x3fffffff, 0x00000000 }, | ||
3753 | { 0x3c84, 0, 0x0000003f, 0x00000000 }, | ||
3754 | { 0x3c88, 0, 0x00000000, 0xffffffff }, | ||
3755 | { 0x3c8c, 0, 0x00000000, 0xffffffff }, | ||
3756 | |||
3757 | { 0x4000, 0, 0x00000000, 0x00000001 }, | ||
3758 | { 0x4004, 0, 0x00000000, 0x00030000 }, | ||
3759 | { 0x4008, 0, 0x00000ff0, 0x00000000 }, | ||
3760 | { 0x400c, 0, 0xffffffff, 0x00000000 }, | ||
3761 | { 0x4088, 0, 0x00000000, 0x00070303 }, | ||
3762 | |||
3763 | { 0x4400, 0, 0x00000000, 0x00000001 }, | ||
3764 | { 0x4404, 0, 0x00000000, 0x00003f01 }, | ||
3765 | { 0x4408, 0, 0x7fff00ff, 0x00000000 }, | ||
3766 | { 0x440c, 0, 0xffffffff, 0x00000000 }, | ||
3767 | { 0x4410, 0, 0xffff, 0x0000 }, | ||
3768 | { 0x4414, 0, 0xffff, 0x0000 }, | ||
3769 | { 0x4418, 0, 0xffff, 0x0000 }, | ||
3770 | { 0x441c, 0, 0xffff, 0x0000 }, | ||
3771 | { 0x4428, 0, 0xffffffff, 0x00000000 }, | ||
3772 | { 0x442c, 0, 0xffffffff, 0x00000000 }, | ||
3773 | { 0x4430, 0, 0xffffffff, 0x00000000 }, | ||
3774 | { 0x4434, 0, 0xffffffff, 0x00000000 }, | ||
3775 | { 0x4438, 0, 0xffffffff, 0x00000000 }, | ||
3776 | { 0x443c, 0, 0xffffffff, 0x00000000 }, | ||
3777 | { 0x4440, 0, 0xffffffff, 0x00000000 }, | ||
3778 | { 0x4444, 0, 0xffffffff, 0x00000000 }, | ||
3779 | |||
3780 | { 0x4c00, 0, 0x00000000, 0x00000001 }, | ||
3781 | { 0x4c04, 0, 0x00000000, 0x0000003f }, | ||
3782 | { 0x4c08, 0, 0xffffffff, 0x00000000 }, | ||
3783 | { 0x4c0c, 0, 0x0007fc00, 0x00000000 }, | ||
3784 | { 0x4c10, 0, 0x80003fe0, 0x00000000 }, | ||
3785 | { 0x4c14, 0, 0xffffffff, 0x00000000 }, | ||
3786 | { 0x4c44, 0, 0x00000000, 0x9fff9fff }, | ||
3787 | { 0x4c48, 0, 0x00000000, 0xb3009fff }, | ||
3788 | { 0x4c4c, 0, 0x00000000, 0x77f33b30 }, | ||
3789 | { 0x4c50, 0, 0x00000000, 0xffffffff }, | ||
3790 | 3603 | ||
3791 | { 0x5004, 0, 0x00000000, 0x0000007f }, | 3604 | { 0x5004, 0, 0x00000000, 0x0000007f }, |
3792 | { 0x5008, 0, 0x0f0007ff, 0x00000000 }, | 3605 | { 0x5008, 0, 0x0f0007ff, 0x00000000 }, |
3793 | { 0x500c, 0, 0xf800f800, 0x07ff07ff }, | 3606 | { 0x500c, 0, 0xf800f800, 0x07ff07ff }, |
3794 | 3607 | ||
3795 | { 0x5400, 0, 0x00000008, 0x00000001 }, | ||
3796 | { 0x5404, 0, 0x00000000, 0x0000003f }, | ||
3797 | { 0x5408, 0, 0x0000001f, 0x00000000 }, | ||
3798 | { 0x540c, 0, 0xffffffff, 0x00000000 }, | ||
3799 | { 0x5410, 0, 0xffffffff, 0x00000000 }, | ||
3800 | { 0x5414, 0, 0x0000ffff, 0x00000000 }, | ||
3801 | { 0x5418, 0, 0x0000ffff, 0x00000000 }, | ||
3802 | { 0x541c, 0, 0x0000ffff, 0x00000000 }, | ||
3803 | { 0x5420, 0, 0x0000ffff, 0x00000000 }, | ||
3804 | { 0x5428, 0, 0x000000ff, 0x00000000 }, | ||
3805 | { 0x542c, 0, 0xff00ffff, 0x00000000 }, | ||
3806 | { 0x5430, 0, 0x001fff80, 0x00000000 }, | ||
3807 | { 0x5438, 0, 0xffffffff, 0x00000000 }, | ||
3808 | { 0x543c, 0, 0xffffffff, 0x00000000 }, | ||
3809 | { 0x5440, 0, 0xf800f800, 0x07ff07ff }, | ||
3810 | |||
3811 | { 0x5c00, 0, 0x00000000, 0x00000001 }, | 3608 | { 0x5c00, 0, 0x00000000, 0x00000001 }, |
3812 | { 0x5c04, 0, 0x00000000, 0x0003000f }, | 3609 | { 0x5c04, 0, 0x00000000, 0x0003000f }, |
3813 | { 0x5c08, 0, 0x00000003, 0x00000000 }, | 3610 | { 0x5c08, 0, 0x00000003, 0x00000000 }, |
@@ -4794,6 +4591,64 @@ bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) | |||
4794 | info->fw_version[5] = 0; | 4591 | info->fw_version[5] = 0; |
4795 | } | 4592 | } |
4796 | 4593 | ||
4594 | #define BNX2_REGDUMP_LEN (32 * 1024) | ||
4595 | |||
4596 | static int | ||
4597 | bnx2_get_regs_len(struct net_device *dev) | ||
4598 | { | ||
4599 | return BNX2_REGDUMP_LEN; | ||
4600 | } | ||
4601 | |||
4602 | static void | ||
4603 | bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p) | ||
4604 | { | ||
4605 | u32 *p = _p, i, offset; | ||
4606 | u8 *orig_p = _p; | ||
4607 | struct bnx2 *bp = netdev_priv(dev); | ||
4608 | u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c, | ||
4609 | 0x0800, 0x0880, 0x0c00, 0x0c10, | ||
4610 | 0x0c30, 0x0d08, 0x1000, 0x101c, | ||
4611 | 0x1040, 0x1048, 0x1080, 0x10a4, | ||
4612 | 0x1400, 0x1490, 0x1498, 0x14f0, | ||
4613 | 0x1500, 0x155c, 0x1580, 0x15dc, | ||
4614 | 0x1600, 0x1658, 0x1680, 0x16d8, | ||
4615 | 0x1800, 0x1820, 0x1840, 0x1854, | ||
4616 | 0x1880, 0x1894, 0x1900, 0x1984, | ||
4617 | 0x1c00, 0x1c0c, 0x1c40, 0x1c54, | ||
4618 | 0x1c80, 0x1c94, 0x1d00, 0x1d84, | ||
4619 | 0x2000, 0x2030, 0x23c0, 0x2400, | ||
4620 | 0x2800, 0x2820, 0x2830, 0x2850, | ||
4621 | 0x2b40, 0x2c10, 0x2fc0, 0x3058, | ||
4622 | 0x3c00, 0x3c94, 0x4000, 0x4010, | ||
4623 | 0x4080, 0x4090, 0x43c0, 0x4458, | ||
4624 | 0x4c00, 0x4c18, 0x4c40, 0x4c54, | ||
4625 | 0x4fc0, 0x5010, 0x53c0, 0x5444, | ||
4626 | 0x5c00, 0x5c18, 0x5c80, 0x5c90, | ||
4627 | 0x5fc0, 0x6000, 0x6400, 0x6428, | ||
4628 | 0x6800, 0x6848, 0x684c, 0x6860, | ||
4629 | 0x6888, 0x6910, 0x8000 }; | ||
4630 | |||
4631 | regs->version = 0; | ||
4632 | |||
4633 | memset(p, 0, BNX2_REGDUMP_LEN); | ||
4634 | |||
4635 | if (!netif_running(bp->dev)) | ||
4636 | return; | ||
4637 | |||
4638 | i = 0; | ||
4639 | offset = reg_boundaries[0]; | ||
4640 | p += offset; | ||
4641 | while (offset < BNX2_REGDUMP_LEN) { | ||
4642 | *p++ = REG_RD(bp, offset); | ||
4643 | offset += 4; | ||
4644 | if (offset == reg_boundaries[i + 1]) { | ||
4645 | offset = reg_boundaries[i + 2]; | ||
4646 | p = (u32 *) (orig_p + offset); | ||
4647 | i += 2; | ||
4648 | } | ||
4649 | } | ||
4650 | } | ||
4651 | |||
4797 | static void | 4652 | static void |
4798 | bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) | 4653 | bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) |
4799 | { | 4654 | { |
@@ -4979,7 +4834,7 @@ bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) | |||
4979 | { | 4834 | { |
4980 | struct bnx2 *bp = netdev_priv(dev); | 4835 | struct bnx2 *bp = netdev_priv(dev); |
4981 | 4836 | ||
4982 | ering->rx_max_pending = MAX_RX_DESC_CNT; | 4837 | ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT; |
4983 | ering->rx_mini_max_pending = 0; | 4838 | ering->rx_mini_max_pending = 0; |
4984 | ering->rx_jumbo_max_pending = 0; | 4839 | ering->rx_jumbo_max_pending = 0; |
4985 | 4840 | ||
@@ -4996,17 +4851,28 @@ bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) | |||
4996 | { | 4851 | { |
4997 | struct bnx2 *bp = netdev_priv(dev); | 4852 | struct bnx2 *bp = netdev_priv(dev); |
4998 | 4853 | ||
4999 | if ((ering->rx_pending > MAX_RX_DESC_CNT) || | 4854 | if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) || |
5000 | (ering->tx_pending > MAX_TX_DESC_CNT) || | 4855 | (ering->tx_pending > MAX_TX_DESC_CNT) || |
5001 | (ering->tx_pending <= MAX_SKB_FRAGS)) { | 4856 | (ering->tx_pending <= MAX_SKB_FRAGS)) { |
5002 | 4857 | ||
5003 | return -EINVAL; | 4858 | return -EINVAL; |
5004 | } | 4859 | } |
5005 | bp->rx_ring_size = ering->rx_pending; | 4860 | if (netif_running(bp->dev)) { |
4861 | bnx2_netif_stop(bp); | ||
4862 | bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET); | ||
4863 | bnx2_free_skbs(bp); | ||
4864 | bnx2_free_mem(bp); | ||
4865 | } | ||
4866 | |||
4867 | bnx2_set_rx_ring_size(bp, ering->rx_pending); | ||
5006 | bp->tx_ring_size = ering->tx_pending; | 4868 | bp->tx_ring_size = ering->tx_pending; |
5007 | 4869 | ||
5008 | if (netif_running(bp->dev)) { | 4870 | if (netif_running(bp->dev)) { |
5009 | bnx2_netif_stop(bp); | 4871 | int rc; |
4872 | |||
4873 | rc = bnx2_alloc_mem(bp); | ||
4874 | if (rc) | ||
4875 | return rc; | ||
5010 | bnx2_init_nic(bp); | 4876 | bnx2_init_nic(bp); |
5011 | bnx2_netif_start(bp); | 4877 | bnx2_netif_start(bp); |
5012 | } | 4878 | } |
@@ -5360,6 +5226,8 @@ static struct ethtool_ops bnx2_ethtool_ops = { | |||
5360 | .get_settings = bnx2_get_settings, | 5226 | .get_settings = bnx2_get_settings, |
5361 | .set_settings = bnx2_set_settings, | 5227 | .set_settings = bnx2_set_settings, |
5362 | .get_drvinfo = bnx2_get_drvinfo, | 5228 | .get_drvinfo = bnx2_get_drvinfo, |
5229 | .get_regs_len = bnx2_get_regs_len, | ||
5230 | .get_regs = bnx2_get_regs, | ||
5363 | .get_wol = bnx2_get_wol, | 5231 | .get_wol = bnx2_get_wol, |
5364 | .set_wol = bnx2_set_wol, | 5232 | .set_wol = bnx2_set_wol, |
5365 | .nway_reset = bnx2_nway_reset, | 5233 | .nway_reset = bnx2_nway_reset, |
@@ -5678,7 +5546,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) | |||
5678 | bp->mac_addr[5] = (u8) reg; | 5546 | bp->mac_addr[5] = (u8) reg; |
5679 | 5547 | ||
5680 | bp->tx_ring_size = MAX_TX_DESC_CNT; | 5548 | bp->tx_ring_size = MAX_TX_DESC_CNT; |
5681 | bp->rx_ring_size = 100; | 5549 | bnx2_set_rx_ring_size(bp, 100); |
5682 | 5550 | ||
5683 | bp->rx_csum = 1; | 5551 | bp->rx_csum = 1; |
5684 | 5552 | ||
@@ -5897,6 +5765,7 @@ bnx2_suspend(struct pci_dev *pdev, pm_message_t state) | |||
5897 | if (!netif_running(dev)) | 5765 | if (!netif_running(dev)) |
5898 | return 0; | 5766 | return 0; |
5899 | 5767 | ||
5768 | flush_scheduled_work(); | ||
5900 | bnx2_netif_stop(bp); | 5769 | bnx2_netif_stop(bp); |
5901 | netif_device_detach(dev); | 5770 | netif_device_detach(dev); |
5902 | del_timer_sync(&bp->timer); | 5771 | del_timer_sync(&bp->timer); |
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h index 9f691cbd666b..fd4b7f2eb477 100644 --- a/drivers/net/bnx2.h +++ b/drivers/net/bnx2.h | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <linux/errno.h> | 23 | #include <linux/errno.h> |
24 | #include <linux/ioport.h> | 24 | #include <linux/ioport.h> |
25 | #include <linux/slab.h> | 25 | #include <linux/slab.h> |
26 | #include <linux/vmalloc.h> | ||
26 | #include <linux/interrupt.h> | 27 | #include <linux/interrupt.h> |
27 | #include <linux/pci.h> | 28 | #include <linux/pci.h> |
28 | #include <linux/init.h> | 29 | #include <linux/init.h> |
@@ -3792,8 +3793,10 @@ struct l2_fhdr { | |||
3792 | #define TX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct tx_bd)) | 3793 | #define TX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct tx_bd)) |
3793 | #define MAX_TX_DESC_CNT (TX_DESC_CNT - 1) | 3794 | #define MAX_TX_DESC_CNT (TX_DESC_CNT - 1) |
3794 | 3795 | ||
3796 | #define MAX_RX_RINGS 4 | ||
3795 | #define RX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct rx_bd)) | 3797 | #define RX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct rx_bd)) |
3796 | #define MAX_RX_DESC_CNT (RX_DESC_CNT - 1) | 3798 | #define MAX_RX_DESC_CNT (RX_DESC_CNT - 1) |
3799 | #define MAX_TOTAL_RX_DESC_CNT (MAX_RX_DESC_CNT * MAX_RX_RINGS) | ||
3797 | 3800 | ||
3798 | #define NEXT_TX_BD(x) (((x) & (MAX_TX_DESC_CNT - 1)) == \ | 3801 | #define NEXT_TX_BD(x) (((x) & (MAX_TX_DESC_CNT - 1)) == \ |
3799 | (MAX_TX_DESC_CNT - 1)) ? \ | 3802 | (MAX_TX_DESC_CNT - 1)) ? \ |
@@ -3805,8 +3808,10 @@ struct l2_fhdr { | |||
3805 | (MAX_RX_DESC_CNT - 1)) ? \ | 3808 | (MAX_RX_DESC_CNT - 1)) ? \ |
3806 | (x) + 2 : (x) + 1 | 3809 | (x) + 2 : (x) + 1 |
3807 | 3810 | ||
3808 | #define RX_RING_IDX(x) ((x) & MAX_RX_DESC_CNT) | 3811 | #define RX_RING_IDX(x) ((x) & bp->rx_max_ring_idx) |
3809 | 3812 | ||
3813 | #define RX_RING(x) (((x) & ~MAX_RX_DESC_CNT) >> 8) | ||
3814 | #define RX_IDX(x) ((x) & MAX_RX_DESC_CNT) | ||
3810 | 3815 | ||
3811 | /* Context size. */ | 3816 | /* Context size. */ |
3812 | #define CTX_SHIFT 7 | 3817 | #define CTX_SHIFT 7 |
@@ -3903,6 +3908,15 @@ struct bnx2 { | |||
3903 | struct status_block *status_blk; | 3908 | struct status_block *status_blk; |
3904 | u32 last_status_idx; | 3909 | u32 last_status_idx; |
3905 | 3910 | ||
3911 | u32 flags; | ||
3912 | #define PCIX_FLAG 1 | ||
3913 | #define PCI_32BIT_FLAG 2 | ||
3914 | #define ONE_TDMA_FLAG 4 /* no longer used */ | ||
3915 | #define NO_WOL_FLAG 8 | ||
3916 | #define USING_DAC_FLAG 0x10 | ||
3917 | #define USING_MSI_FLAG 0x20 | ||
3918 | #define ASF_ENABLE_FLAG 0x40 | ||
3919 | |||
3906 | struct tx_bd *tx_desc_ring; | 3920 | struct tx_bd *tx_desc_ring; |
3907 | struct sw_bd *tx_buf_ring; | 3921 | struct sw_bd *tx_buf_ring; |
3908 | u32 tx_prod_bseq; | 3922 | u32 tx_prod_bseq; |
@@ -3920,19 +3934,22 @@ struct bnx2 { | |||
3920 | u32 rx_offset; | 3934 | u32 rx_offset; |
3921 | u32 rx_buf_use_size; /* useable size */ | 3935 | u32 rx_buf_use_size; /* useable size */ |
3922 | u32 rx_buf_size; /* with alignment */ | 3936 | u32 rx_buf_size; /* with alignment */ |
3923 | struct rx_bd *rx_desc_ring; | 3937 | u32 rx_max_ring_idx; |
3924 | struct sw_bd *rx_buf_ring; | 3938 | |
3925 | u32 rx_prod_bseq; | 3939 | u32 rx_prod_bseq; |
3926 | u16 rx_prod; | 3940 | u16 rx_prod; |
3927 | u16 rx_cons; | 3941 | u16 rx_cons; |
3928 | 3942 | ||
3929 | u32 rx_csum; | 3943 | u32 rx_csum; |
3930 | 3944 | ||
3945 | struct sw_bd *rx_buf_ring; | ||
3946 | struct rx_bd *rx_desc_ring[MAX_RX_RINGS]; | ||
3947 | |||
3931 | /* Only used to synchronize netif_stop_queue/wake_queue when tx */ | 3948 | /* Only used to synchronize netif_stop_queue/wake_queue when tx */ |
3932 | /* ring is full */ | 3949 | /* ring is full */ |
3933 | spinlock_t tx_lock; | 3950 | spinlock_t tx_lock; |
3934 | 3951 | ||
3935 | /* End of fileds used in the performance code paths. */ | 3952 | /* End of fields used in the performance code paths. */ |
3936 | 3953 | ||
3937 | char *name; | 3954 | char *name; |
3938 | 3955 | ||
@@ -3945,15 +3962,6 @@ struct bnx2 { | |||
3945 | /* Used to synchronize phy accesses. */ | 3962 | /* Used to synchronize phy accesses. */ |
3946 | spinlock_t phy_lock; | 3963 | spinlock_t phy_lock; |
3947 | 3964 | ||
3948 | u32 flags; | ||
3949 | #define PCIX_FLAG 1 | ||
3950 | #define PCI_32BIT_FLAG 2 | ||
3951 | #define ONE_TDMA_FLAG 4 /* no longer used */ | ||
3952 | #define NO_WOL_FLAG 8 | ||
3953 | #define USING_DAC_FLAG 0x10 | ||
3954 | #define USING_MSI_FLAG 0x20 | ||
3955 | #define ASF_ENABLE_FLAG 0x40 | ||
3956 | |||
3957 | u32 phy_flags; | 3965 | u32 phy_flags; |
3958 | #define PHY_SERDES_FLAG 1 | 3966 | #define PHY_SERDES_FLAG 1 |
3959 | #define PHY_CRC_FIX_FLAG 2 | 3967 | #define PHY_CRC_FIX_FLAG 2 |
@@ -4004,8 +4012,9 @@ struct bnx2 { | |||
4004 | dma_addr_t tx_desc_mapping; | 4012 | dma_addr_t tx_desc_mapping; |
4005 | 4013 | ||
4006 | 4014 | ||
4015 | int rx_max_ring; | ||
4007 | int rx_ring_size; | 4016 | int rx_ring_size; |
4008 | dma_addr_t rx_desc_mapping; | 4017 | dma_addr_t rx_desc_mapping[MAX_RX_RINGS]; |
4009 | 4018 | ||
4010 | u16 tx_quick_cons_trip; | 4019 | u16 tx_quick_cons_trip; |
4011 | u16 tx_quick_cons_trip_int; | 4020 | u16 tx_quick_cons_trip_int; |
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c index 6e295fce5c6f..8f1573e658a5 100644 --- a/drivers/net/cassini.c +++ b/drivers/net/cassini.c | |||
@@ -91,6 +91,7 @@ | |||
91 | #include <linux/mii.h> | 91 | #include <linux/mii.h> |
92 | #include <linux/ip.h> | 92 | #include <linux/ip.h> |
93 | #include <linux/tcp.h> | 93 | #include <linux/tcp.h> |
94 | #include <linux/mutex.h> | ||
94 | 95 | ||
95 | #include <net/checksum.h> | 96 | #include <net/checksum.h> |
96 | 97 | ||
@@ -3892,7 +3893,7 @@ static void cas_reset(struct cas *cp, int blkflag) | |||
3892 | spin_unlock(&cp->stat_lock[N_TX_RINGS]); | 3893 | spin_unlock(&cp->stat_lock[N_TX_RINGS]); |
3893 | } | 3894 | } |
3894 | 3895 | ||
3895 | /* Shut down the chip, must be called with pm_sem held. */ | 3896 | /* Shut down the chip, must be called with pm_mutex held. */ |
3896 | static void cas_shutdown(struct cas *cp) | 3897 | static void cas_shutdown(struct cas *cp) |
3897 | { | 3898 | { |
3898 | unsigned long flags; | 3899 | unsigned long flags; |
@@ -4311,11 +4312,11 @@ static int cas_open(struct net_device *dev) | |||
4311 | int hw_was_up, err; | 4312 | int hw_was_up, err; |
4312 | unsigned long flags; | 4313 | unsigned long flags; |
4313 | 4314 | ||
4314 | down(&cp->pm_sem); | 4315 | mutex_lock(&cp->pm_mutex); |
4315 | 4316 | ||
4316 | hw_was_up = cp->hw_running; | 4317 | hw_was_up = cp->hw_running; |
4317 | 4318 | ||
4318 | /* The power-management semaphore protects the hw_running | 4319 | /* The power-management mutex protects the hw_running |
4319 | * etc. state so it is safe to do this bit without cp->lock | 4320 | * etc. state so it is safe to do this bit without cp->lock |
4320 | */ | 4321 | */ |
4321 | if (!cp->hw_running) { | 4322 | if (!cp->hw_running) { |
@@ -4364,7 +4365,7 @@ static int cas_open(struct net_device *dev) | |||
4364 | cas_unlock_all_restore(cp, flags); | 4365 | cas_unlock_all_restore(cp, flags); |
4365 | 4366 | ||
4366 | netif_start_queue(dev); | 4367 | netif_start_queue(dev); |
4367 | up(&cp->pm_sem); | 4368 | mutex_unlock(&cp->pm_mutex); |
4368 | return 0; | 4369 | return 0; |
4369 | 4370 | ||
4370 | err_spare: | 4371 | err_spare: |
@@ -4372,7 +4373,7 @@ err_spare: | |||
4372 | cas_free_rxds(cp); | 4373 | cas_free_rxds(cp); |
4373 | err_tx_tiny: | 4374 | err_tx_tiny: |
4374 | cas_tx_tiny_free(cp); | 4375 | cas_tx_tiny_free(cp); |
4375 | up(&cp->pm_sem); | 4376 | mutex_unlock(&cp->pm_mutex); |
4376 | return err; | 4377 | return err; |
4377 | } | 4378 | } |
4378 | 4379 | ||
@@ -4382,7 +4383,7 @@ static int cas_close(struct net_device *dev) | |||
4382 | struct cas *cp = netdev_priv(dev); | 4383 | struct cas *cp = netdev_priv(dev); |
4383 | 4384 | ||
4384 | /* Make sure we don't get distracted by suspend/resume */ | 4385 | /* Make sure we don't get distracted by suspend/resume */ |
4385 | down(&cp->pm_sem); | 4386 | mutex_lock(&cp->pm_mutex); |
4386 | 4387 | ||
4387 | netif_stop_queue(dev); | 4388 | netif_stop_queue(dev); |
4388 | 4389 | ||
@@ -4399,7 +4400,7 @@ static int cas_close(struct net_device *dev) | |||
4399 | cas_spare_free(cp); | 4400 | cas_spare_free(cp); |
4400 | cas_free_rxds(cp); | 4401 | cas_free_rxds(cp); |
4401 | cas_tx_tiny_free(cp); | 4402 | cas_tx_tiny_free(cp); |
4402 | up(&cp->pm_sem); | 4403 | mutex_unlock(&cp->pm_mutex); |
4403 | return 0; | 4404 | return 0; |
4404 | } | 4405 | } |
4405 | 4406 | ||
@@ -4834,10 +4835,10 @@ static int cas_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
4834 | unsigned long flags; | 4835 | unsigned long flags; |
4835 | int rc = -EOPNOTSUPP; | 4836 | int rc = -EOPNOTSUPP; |
4836 | 4837 | ||
4837 | /* Hold the PM semaphore while doing ioctl's or we may collide | 4838 | /* Hold the PM mutex while doing ioctl's or we may collide |
4838 | * with open/close and power management and oops. | 4839 | * with open/close and power management and oops. |
4839 | */ | 4840 | */ |
4840 | down(&cp->pm_sem); | 4841 | mutex_lock(&cp->pm_mutex); |
4841 | switch (cmd) { | 4842 | switch (cmd) { |
4842 | case SIOCGMIIPHY: /* Get address of MII PHY in use. */ | 4843 | case SIOCGMIIPHY: /* Get address of MII PHY in use. */ |
4843 | data->phy_id = cp->phy_addr; | 4844 | data->phy_id = cp->phy_addr; |
@@ -4867,7 +4868,7 @@ static int cas_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
4867 | break; | 4868 | break; |
4868 | }; | 4869 | }; |
4869 | 4870 | ||
4870 | up(&cp->pm_sem); | 4871 | mutex_unlock(&cp->pm_mutex); |
4871 | return rc; | 4872 | return rc; |
4872 | } | 4873 | } |
4873 | 4874 | ||
@@ -4994,7 +4995,7 @@ static int __devinit cas_init_one(struct pci_dev *pdev, | |||
4994 | spin_lock_init(&cp->tx_lock[i]); | 4995 | spin_lock_init(&cp->tx_lock[i]); |
4995 | } | 4996 | } |
4996 | spin_lock_init(&cp->stat_lock[N_TX_RINGS]); | 4997 | spin_lock_init(&cp->stat_lock[N_TX_RINGS]); |
4997 | init_MUTEX(&cp->pm_sem); | 4998 | mutex_init(&cp->pm_mutex); |
4998 | 4999 | ||
4999 | init_timer(&cp->link_timer); | 5000 | init_timer(&cp->link_timer); |
5000 | cp->link_timer.function = cas_link_timer; | 5001 | cp->link_timer.function = cas_link_timer; |
@@ -5116,10 +5117,10 @@ err_out_free_consistent: | |||
5116 | cp->init_block, cp->block_dvma); | 5117 | cp->init_block, cp->block_dvma); |
5117 | 5118 | ||
5118 | err_out_iounmap: | 5119 | err_out_iounmap: |
5119 | down(&cp->pm_sem); | 5120 | mutex_lock(&cp->pm_mutex); |
5120 | if (cp->hw_running) | 5121 | if (cp->hw_running) |
5121 | cas_shutdown(cp); | 5122 | cas_shutdown(cp); |
5122 | up(&cp->pm_sem); | 5123 | mutex_unlock(&cp->pm_mutex); |
5123 | 5124 | ||
5124 | iounmap(cp->regs); | 5125 | iounmap(cp->regs); |
5125 | 5126 | ||
@@ -5152,11 +5153,11 @@ static void __devexit cas_remove_one(struct pci_dev *pdev) | |||
5152 | cp = netdev_priv(dev); | 5153 | cp = netdev_priv(dev); |
5153 | unregister_netdev(dev); | 5154 | unregister_netdev(dev); |
5154 | 5155 | ||
5155 | down(&cp->pm_sem); | 5156 | mutex_lock(&cp->pm_mutex); |
5156 | flush_scheduled_work(); | 5157 | flush_scheduled_work(); |
5157 | if (cp->hw_running) | 5158 | if (cp->hw_running) |
5158 | cas_shutdown(cp); | 5159 | cas_shutdown(cp); |
5159 | up(&cp->pm_sem); | 5160 | mutex_unlock(&cp->pm_mutex); |
5160 | 5161 | ||
5161 | #if 1 | 5162 | #if 1 |
5162 | if (cp->orig_cacheline_size) { | 5163 | if (cp->orig_cacheline_size) { |
@@ -5183,10 +5184,7 @@ static int cas_suspend(struct pci_dev *pdev, pm_message_t state) | |||
5183 | struct cas *cp = netdev_priv(dev); | 5184 | struct cas *cp = netdev_priv(dev); |
5184 | unsigned long flags; | 5185 | unsigned long flags; |
5185 | 5186 | ||
5186 | /* We hold the PM semaphore during entire driver | 5187 | mutex_lock(&cp->pm_mutex); |
5187 | * sleep time | ||
5188 | */ | ||
5189 | down(&cp->pm_sem); | ||
5190 | 5188 | ||
5191 | /* If the driver is opened, we stop the DMA */ | 5189 | /* If the driver is opened, we stop the DMA */ |
5192 | if (cp->opened) { | 5190 | if (cp->opened) { |
@@ -5206,6 +5204,7 @@ static int cas_suspend(struct pci_dev *pdev, pm_message_t state) | |||
5206 | 5204 | ||
5207 | if (cp->hw_running) | 5205 | if (cp->hw_running) |
5208 | cas_shutdown(cp); | 5206 | cas_shutdown(cp); |
5207 | mutex_unlock(&cp->pm_mutex); | ||
5209 | 5208 | ||
5210 | return 0; | 5209 | return 0; |
5211 | } | 5210 | } |
@@ -5217,6 +5216,7 @@ static int cas_resume(struct pci_dev *pdev) | |||
5217 | 5216 | ||
5218 | printk(KERN_INFO "%s: resuming\n", dev->name); | 5217 | printk(KERN_INFO "%s: resuming\n", dev->name); |
5219 | 5218 | ||
5219 | mutex_lock(&cp->pm_mutex); | ||
5220 | cas_hard_reset(cp); | 5220 | cas_hard_reset(cp); |
5221 | if (cp->opened) { | 5221 | if (cp->opened) { |
5222 | unsigned long flags; | 5222 | unsigned long flags; |
@@ -5229,7 +5229,7 @@ static int cas_resume(struct pci_dev *pdev) | |||
5229 | 5229 | ||
5230 | netif_device_attach(dev); | 5230 | netif_device_attach(dev); |
5231 | } | 5231 | } |
5232 | up(&cp->pm_sem); | 5232 | mutex_unlock(&cp->pm_mutex); |
5233 | return 0; | 5233 | return 0; |
5234 | } | 5234 | } |
5235 | #endif /* CONFIG_PM */ | 5235 | #endif /* CONFIG_PM */ |
diff --git a/drivers/net/cassini.h b/drivers/net/cassini.h index 88063ef16cf6..ab55c7ee1012 100644 --- a/drivers/net/cassini.h +++ b/drivers/net/cassini.h | |||
@@ -4284,7 +4284,7 @@ struct cas { | |||
4284 | * (ie. not power managed) */ | 4284 | * (ie. not power managed) */ |
4285 | int hw_running; | 4285 | int hw_running; |
4286 | int opened; | 4286 | int opened; |
4287 | struct semaphore pm_sem; /* open/close/suspend/resume */ | 4287 | struct mutex pm_mutex; /* open/close/suspend/resume */ |
4288 | 4288 | ||
4289 | struct cas_init_block *init_block; | 4289 | struct cas_init_block *init_block; |
4290 | struct cas_tx_desc *init_txds[MAX_TX_RINGS]; | 4290 | struct cas_tx_desc *init_txds[MAX_TX_RINGS]; |
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index f39de16e6b97..49cd096a3c3d 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c | |||
@@ -920,7 +920,7 @@ e1000_remove(struct pci_dev *pdev) | |||
920 | unregister_netdev(netdev); | 920 | unregister_netdev(netdev); |
921 | #ifdef CONFIG_E1000_NAPI | 921 | #ifdef CONFIG_E1000_NAPI |
922 | for (i = 0; i < adapter->num_rx_queues; i++) | 922 | for (i = 0; i < adapter->num_rx_queues; i++) |
923 | __dev_put(&adapter->polling_netdev[i]); | 923 | dev_put(&adapter->polling_netdev[i]); |
924 | #endif | 924 | #endif |
925 | 925 | ||
926 | if (!e1000_check_phy_reset_block(&adapter->hw)) | 926 | if (!e1000_check_phy_reset_block(&adapter->hw)) |
diff --git a/drivers/net/fs_enet/mac-fcc.c b/drivers/net/fs_enet/mac-fcc.c index e67b1d06611c..95e2bb8dd7b4 100644 --- a/drivers/net/fs_enet/mac-fcc.c +++ b/drivers/net/fs_enet/mac-fcc.c | |||
@@ -118,6 +118,8 @@ static int do_pd_setup(struct fs_enet_private *fep) | |||
118 | 118 | ||
119 | /* Fill out IRQ field */ | 119 | /* Fill out IRQ field */ |
120 | fep->interrupt = platform_get_irq(pdev, 0); | 120 | fep->interrupt = platform_get_irq(pdev, 0); |
121 | if (fep->interrupt < 0) | ||
122 | return -EINVAL; | ||
121 | 123 | ||
122 | /* Attach the memory for the FCC Parameter RAM */ | 124 | /* Attach the memory for the FCC Parameter RAM */ |
123 | r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fcc_pram"); | 125 | r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fcc_pram"); |
diff --git a/drivers/net/fs_enet/mac-fec.c b/drivers/net/fs_enet/mac-fec.c index 2e8f44469699..3dad69dfdb2c 100644 --- a/drivers/net/fs_enet/mac-fec.c +++ b/drivers/net/fs_enet/mac-fec.c | |||
@@ -144,6 +144,8 @@ static int do_pd_setup(struct fs_enet_private *fep) | |||
144 | 144 | ||
145 | /* Fill out IRQ field */ | 145 | /* Fill out IRQ field */ |
146 | fep->interrupt = platform_get_irq_byname(pdev,"interrupt"); | 146 | fep->interrupt = platform_get_irq_byname(pdev,"interrupt"); |
147 | if (fep->interrupt < 0) | ||
148 | return -EINVAL; | ||
147 | 149 | ||
148 | r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs"); | 150 | r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs"); |
149 | fep->fec.fecp =(void*)r->start; | 151 | fep->fec.fecp =(void*)r->start; |
diff --git a/drivers/net/fs_enet/mac-scc.c b/drivers/net/fs_enet/mac-scc.c index a3897fda71fa..a772b286f96d 100644 --- a/drivers/net/fs_enet/mac-scc.c +++ b/drivers/net/fs_enet/mac-scc.c | |||
@@ -118,6 +118,8 @@ static int do_pd_setup(struct fs_enet_private *fep) | |||
118 | 118 | ||
119 | /* Fill out IRQ field */ | 119 | /* Fill out IRQ field */ |
120 | fep->interrupt = platform_get_irq_byname(pdev, "interrupt"); | 120 | fep->interrupt = platform_get_irq_byname(pdev, "interrupt"); |
121 | if (fep->interrupt < 0) | ||
122 | return -EINVAL; | ||
121 | 123 | ||
122 | r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs"); | 124 | r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs"); |
123 | fep->scc.sccp = (void *)r->start; | 125 | fep->scc.sccp = (void *)r->start; |
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c index 0e8e3fcde9ff..771e25d8c417 100644 --- a/drivers/net/gianfar.c +++ b/drivers/net/gianfar.c | |||
@@ -193,8 +193,12 @@ static int gfar_probe(struct platform_device *pdev) | |||
193 | priv->interruptTransmit = platform_get_irq_byname(pdev, "tx"); | 193 | priv->interruptTransmit = platform_get_irq_byname(pdev, "tx"); |
194 | priv->interruptReceive = platform_get_irq_byname(pdev, "rx"); | 194 | priv->interruptReceive = platform_get_irq_byname(pdev, "rx"); |
195 | priv->interruptError = platform_get_irq_byname(pdev, "error"); | 195 | priv->interruptError = platform_get_irq_byname(pdev, "error"); |
196 | if (priv->interruptTransmit < 0 || priv->interruptReceive < 0 || priv->interruptError < 0) | ||
197 | goto regs_fail; | ||
196 | } else { | 198 | } else { |
197 | priv->interruptTransmit = platform_get_irq(pdev, 0); | 199 | priv->interruptTransmit = platform_get_irq(pdev, 0); |
200 | if (priv->interruptTransmit < 0) | ||
201 | goto regs_fail; | ||
198 | } | 202 | } |
199 | 203 | ||
200 | /* get a pointer to the register memory */ | 204 | /* get a pointer to the register memory */ |
diff --git a/drivers/net/irda/Kconfig b/drivers/net/irda/Kconfig index c81fe1c382d5..5e6d00752990 100644 --- a/drivers/net/irda/Kconfig +++ b/drivers/net/irda/Kconfig | |||
@@ -64,6 +64,14 @@ config TEKRAM_DONGLE | |||
64 | dongles you will have to start irattach like this: | 64 | dongles you will have to start irattach like this: |
65 | "irattach -d tekram". | 65 | "irattach -d tekram". |
66 | 66 | ||
67 | config TOIM3232_DONGLE | ||
68 | tristate "TOIM3232 IrDa dongle" | ||
69 | depends on DONGLE && IRDA | ||
70 | help | ||
71 | Say Y here if you want to build support for the Vishay/Temic | ||
72 | TOIM3232 and TOIM4232 based dongles. | ||
73 | To compile it as a module, choose M here. | ||
74 | |||
67 | config LITELINK_DONGLE | 75 | config LITELINK_DONGLE |
68 | tristate "Parallax LiteLink dongle" | 76 | tristate "Parallax LiteLink dongle" |
69 | depends on DONGLE && IRDA | 77 | depends on DONGLE && IRDA |
diff --git a/drivers/net/irda/Makefile b/drivers/net/irda/Makefile index 72cbfdc9cfcc..27ab75f20799 100644 --- a/drivers/net/irda/Makefile +++ b/drivers/net/irda/Makefile | |||
@@ -43,6 +43,7 @@ obj-$(CONFIG_OLD_BELKIN_DONGLE) += old_belkin-sir.o | |||
43 | obj-$(CONFIG_MCP2120_DONGLE) += mcp2120-sir.o | 43 | obj-$(CONFIG_MCP2120_DONGLE) += mcp2120-sir.o |
44 | obj-$(CONFIG_ACT200L_DONGLE) += act200l-sir.o | 44 | obj-$(CONFIG_ACT200L_DONGLE) += act200l-sir.o |
45 | obj-$(CONFIG_MA600_DONGLE) += ma600-sir.o | 45 | obj-$(CONFIG_MA600_DONGLE) += ma600-sir.o |
46 | obj-$(CONFIG_TOIM3232_DONGLE) += toim3232-sir.o | ||
46 | 47 | ||
47 | # The SIR helper module | 48 | # The SIR helper module |
48 | sir-dev-objs := sir_dev.o sir_dongle.o sir_kthread.o | 49 | sir-dev-objs := sir_dev.o sir_dongle.o sir_kthread.o |
diff --git a/drivers/net/irda/donauboe.c b/drivers/net/irda/donauboe.c index 3137592d60c0..910c0cab35b0 100644 --- a/drivers/net/irda/donauboe.c +++ b/drivers/net/irda/donauboe.c | |||
@@ -1778,7 +1778,7 @@ static struct pci_driver donauboe_pci_driver = { | |||
1778 | static int __init | 1778 | static int __init |
1779 | donauboe_init (void) | 1779 | donauboe_init (void) |
1780 | { | 1780 | { |
1781 | return pci_module_init(&donauboe_pci_driver); | 1781 | return pci_register_driver(&donauboe_pci_driver); |
1782 | } | 1782 | } |
1783 | 1783 | ||
1784 | static void __exit | 1784 | static void __exit |
diff --git a/drivers/net/irda/ep7211_ir.c b/drivers/net/irda/ep7211_ir.c index 31896262d21c..4cba38f7e4a8 100644 --- a/drivers/net/irda/ep7211_ir.c +++ b/drivers/net/irda/ep7211_ir.c | |||
@@ -8,6 +8,7 @@ | |||
8 | #include <linux/delay.h> | 8 | #include <linux/delay.h> |
9 | #include <linux/tty.h> | 9 | #include <linux/tty.h> |
10 | #include <linux/init.h> | 10 | #include <linux/init.h> |
11 | #include <linux/spinlock.h> | ||
11 | 12 | ||
12 | #include <net/irda/irda.h> | 13 | #include <net/irda/irda.h> |
13 | #include <net/irda/irda_device.h> | 14 | #include <net/irda/irda_device.h> |
@@ -23,6 +24,8 @@ static void ep7211_ir_close(dongle_t *self); | |||
23 | static int ep7211_ir_change_speed(struct irda_task *task); | 24 | static int ep7211_ir_change_speed(struct irda_task *task); |
24 | static int ep7211_ir_reset(struct irda_task *task); | 25 | static int ep7211_ir_reset(struct irda_task *task); |
25 | 26 | ||
27 | static DEFINE_SPINLOCK(ep7211_lock); | ||
28 | |||
26 | static struct dongle_reg dongle = { | 29 | static struct dongle_reg dongle = { |
27 | .type = IRDA_EP7211_IR, | 30 | .type = IRDA_EP7211_IR, |
28 | .open = ep7211_ir_open, | 31 | .open = ep7211_ir_open, |
@@ -36,7 +39,7 @@ static void ep7211_ir_open(dongle_t *self, struct qos_info *qos) | |||
36 | { | 39 | { |
37 | unsigned int syscon1, flags; | 40 | unsigned int syscon1, flags; |
38 | 41 | ||
39 | save_flags(flags); cli(); | 42 | spin_lock_irqsave(&ep7211_lock, flags); |
40 | 43 | ||
41 | /* Turn on the SIR encoder. */ | 44 | /* Turn on the SIR encoder. */ |
42 | syscon1 = clps_readl(SYSCON1); | 45 | syscon1 = clps_readl(SYSCON1); |
@@ -46,14 +49,14 @@ static void ep7211_ir_open(dongle_t *self, struct qos_info *qos) | |||
46 | /* XXX: We should disable modem status interrupts on the first | 49 | /* XXX: We should disable modem status interrupts on the first |
47 | UART (interrupt #14). */ | 50 | UART (interrupt #14). */ |
48 | 51 | ||
49 | restore_flags(flags); | 52 | spin_unlock_irqrestore(&ep7211_lock, flags); |
50 | } | 53 | } |
51 | 54 | ||
52 | static void ep7211_ir_close(dongle_t *self) | 55 | static void ep7211_ir_close(dongle_t *self) |
53 | { | 56 | { |
54 | unsigned int syscon1, flags; | 57 | unsigned int syscon1, flags; |
55 | 58 | ||
56 | save_flags(flags); cli(); | 59 | spin_lock_irqsave(&ep7211_lock, flags); |
57 | 60 | ||
58 | /* Turn off the SIR encoder. */ | 61 | /* Turn off the SIR encoder. */ |
59 | syscon1 = clps_readl(SYSCON1); | 62 | syscon1 = clps_readl(SYSCON1); |
@@ -63,7 +66,7 @@ static void ep7211_ir_close(dongle_t *self) | |||
63 | /* XXX: If we've disabled the modem status interrupts, we should | 66 | /* XXX: If we've disabled the modem status interrupts, we should |
64 | reset them back to their original state. */ | 67 | reset them back to their original state. */ |
65 | 68 | ||
66 | restore_flags(flags); | 69 | spin_unlock_irqrestore(&ep7211_lock, flags); |
67 | } | 70 | } |
68 | 71 | ||
69 | /* | 72 | /* |
diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c index 8936058a3cce..6e2ec56cde0b 100644 --- a/drivers/net/irda/irda-usb.c +++ b/drivers/net/irda/irda-usb.c | |||
@@ -740,7 +740,7 @@ static void irda_usb_receive(struct urb *urb, struct pt_regs *regs) | |||
740 | struct sk_buff *newskb; | 740 | struct sk_buff *newskb; |
741 | struct sk_buff *dataskb; | 741 | struct sk_buff *dataskb; |
742 | struct urb *next_urb; | 742 | struct urb *next_urb; |
743 | int docopy; | 743 | unsigned int len, docopy; |
744 | 744 | ||
745 | IRDA_DEBUG(2, "%s(), len=%d\n", __FUNCTION__, urb->actual_length); | 745 | IRDA_DEBUG(2, "%s(), len=%d\n", __FUNCTION__, urb->actual_length); |
746 | 746 | ||
@@ -851,10 +851,11 @@ static void irda_usb_receive(struct urb *urb, struct pt_regs *regs) | |||
851 | dataskb->dev = self->netdev; | 851 | dataskb->dev = self->netdev; |
852 | dataskb->mac.raw = dataskb->data; | 852 | dataskb->mac.raw = dataskb->data; |
853 | dataskb->protocol = htons(ETH_P_IRDA); | 853 | dataskb->protocol = htons(ETH_P_IRDA); |
854 | len = dataskb->len; | ||
854 | netif_rx(dataskb); | 855 | netif_rx(dataskb); |
855 | 856 | ||
856 | /* Keep stats up to date */ | 857 | /* Keep stats up to date */ |
857 | self->stats.rx_bytes += dataskb->len; | 858 | self->stats.rx_bytes += len; |
858 | self->stats.rx_packets++; | 859 | self->stats.rx_packets++; |
859 | self->netdev->last_rx = jiffies; | 860 | self->netdev->last_rx = jiffies; |
860 | 861 | ||
diff --git a/drivers/net/irda/irtty-sir.c b/drivers/net/irda/irtty-sir.c index 101750bf210f..6a98b7ae4975 100644 --- a/drivers/net/irda/irtty-sir.c +++ b/drivers/net/irda/irtty-sir.c | |||
@@ -33,6 +33,7 @@ | |||
33 | #include <asm/uaccess.h> | 33 | #include <asm/uaccess.h> |
34 | #include <linux/smp_lock.h> | 34 | #include <linux/smp_lock.h> |
35 | #include <linux/delay.h> | 35 | #include <linux/delay.h> |
36 | #include <linux/mutex.h> | ||
36 | 37 | ||
37 | #include <net/irda/irda.h> | 38 | #include <net/irda/irda.h> |
38 | #include <net/irda/irda_device.h> | 39 | #include <net/irda/irda_device.h> |
@@ -338,7 +339,7 @@ static inline void irtty_stop_receiver(struct tty_struct *tty, int stop) | |||
338 | /*****************************************************************/ | 339 | /*****************************************************************/ |
339 | 340 | ||
340 | /* serialize ldisc open/close with sir_dev */ | 341 | /* serialize ldisc open/close with sir_dev */ |
341 | static DECLARE_MUTEX(irtty_sem); | 342 | static DEFINE_MUTEX(irtty_mutex); |
342 | 343 | ||
343 | /* notifier from sir_dev when irda% device gets opened (ifup) */ | 344 | /* notifier from sir_dev when irda% device gets opened (ifup) */ |
344 | 345 | ||
@@ -348,11 +349,11 @@ static int irtty_start_dev(struct sir_dev *dev) | |||
348 | struct tty_struct *tty; | 349 | struct tty_struct *tty; |
349 | 350 | ||
350 | /* serialize with ldisc open/close */ | 351 | /* serialize with ldisc open/close */ |
351 | down(&irtty_sem); | 352 | mutex_lock(&irtty_mutex); |
352 | 353 | ||
353 | priv = dev->priv; | 354 | priv = dev->priv; |
354 | if (unlikely(!priv || priv->magic!=IRTTY_MAGIC)) { | 355 | if (unlikely(!priv || priv->magic!=IRTTY_MAGIC)) { |
355 | up(&irtty_sem); | 356 | mutex_unlock(&irtty_mutex); |
356 | return -ESTALE; | 357 | return -ESTALE; |
357 | } | 358 | } |
358 | 359 | ||
@@ -363,7 +364,7 @@ static int irtty_start_dev(struct sir_dev *dev) | |||
363 | /* Make sure we can receive more data */ | 364 | /* Make sure we can receive more data */ |
364 | irtty_stop_receiver(tty, FALSE); | 365 | irtty_stop_receiver(tty, FALSE); |
365 | 366 | ||
366 | up(&irtty_sem); | 367 | mutex_unlock(&irtty_mutex); |
367 | return 0; | 368 | return 0; |
368 | } | 369 | } |
369 | 370 | ||
@@ -375,11 +376,11 @@ static int irtty_stop_dev(struct sir_dev *dev) | |||
375 | struct tty_struct *tty; | 376 | struct tty_struct *tty; |
376 | 377 | ||
377 | /* serialize with ldisc open/close */ | 378 | /* serialize with ldisc open/close */ |
378 | down(&irtty_sem); | 379 | mutex_lock(&irtty_mutex); |
379 | 380 | ||
380 | priv = dev->priv; | 381 | priv = dev->priv; |
381 | if (unlikely(!priv || priv->magic!=IRTTY_MAGIC)) { | 382 | if (unlikely(!priv || priv->magic!=IRTTY_MAGIC)) { |
382 | up(&irtty_sem); | 383 | mutex_unlock(&irtty_mutex); |
383 | return -ESTALE; | 384 | return -ESTALE; |
384 | } | 385 | } |
385 | 386 | ||
@@ -390,7 +391,7 @@ static int irtty_stop_dev(struct sir_dev *dev) | |||
390 | if (tty->driver->stop) | 391 | if (tty->driver->stop) |
391 | tty->driver->stop(tty); | 392 | tty->driver->stop(tty); |
392 | 393 | ||
393 | up(&irtty_sem); | 394 | mutex_unlock(&irtty_mutex); |
394 | 395 | ||
395 | return 0; | 396 | return 0; |
396 | } | 397 | } |
@@ -514,13 +515,13 @@ static int irtty_open(struct tty_struct *tty) | |||
514 | priv->dev = dev; | 515 | priv->dev = dev; |
515 | 516 | ||
516 | /* serialize with start_dev - in case we were racing with ifup */ | 517 | /* serialize with start_dev - in case we were racing with ifup */ |
517 | down(&irtty_sem); | 518 | mutex_lock(&irtty_mutex); |
518 | 519 | ||
519 | dev->priv = priv; | 520 | dev->priv = priv; |
520 | tty->disc_data = priv; | 521 | tty->disc_data = priv; |
521 | tty->receive_room = 65536; | 522 | tty->receive_room = 65536; |
522 | 523 | ||
523 | up(&irtty_sem); | 524 | mutex_unlock(&irtty_mutex); |
524 | 525 | ||
525 | IRDA_DEBUG(0, "%s - %s: irda line discipline opened\n", __FUNCTION__, tty->name); | 526 | IRDA_DEBUG(0, "%s - %s: irda line discipline opened\n", __FUNCTION__, tty->name); |
526 | 527 | ||
diff --git a/drivers/net/irda/nsc-ircc.c b/drivers/net/irda/nsc-ircc.c index ee717d0e939e..83141a3ff546 100644 --- a/drivers/net/irda/nsc-ircc.c +++ b/drivers/net/irda/nsc-ircc.c | |||
@@ -12,6 +12,7 @@ | |||
12 | * Copyright (c) 1998-2000 Dag Brattli <dagb@cs.uit.no> | 12 | * Copyright (c) 1998-2000 Dag Brattli <dagb@cs.uit.no> |
13 | * Copyright (c) 1998 Lichen Wang, <lwang@actisys.com> | 13 | * Copyright (c) 1998 Lichen Wang, <lwang@actisys.com> |
14 | * Copyright (c) 1998 Actisys Corp., www.actisys.com | 14 | * Copyright (c) 1998 Actisys Corp., www.actisys.com |
15 | * Copyright (c) 2000-2004 Jean Tourrilhes <jt@hpl.hp.com> | ||
15 | * All Rights Reserved | 16 | * All Rights Reserved |
16 | * | 17 | * |
17 | * This program is free software; you can redistribute it and/or | 18 | * This program is free software; you can redistribute it and/or |
@@ -53,14 +54,13 @@ | |||
53 | #include <linux/init.h> | 54 | #include <linux/init.h> |
54 | #include <linux/rtnetlink.h> | 55 | #include <linux/rtnetlink.h> |
55 | #include <linux/dma-mapping.h> | 56 | #include <linux/dma-mapping.h> |
57 | #include <linux/pnp.h> | ||
58 | #include <linux/platform_device.h> | ||
56 | 59 | ||
57 | #include <asm/io.h> | 60 | #include <asm/io.h> |
58 | #include <asm/dma.h> | 61 | #include <asm/dma.h> |
59 | #include <asm/byteorder.h> | 62 | #include <asm/byteorder.h> |
60 | 63 | ||
61 | #include <linux/pm.h> | ||
62 | #include <linux/pm_legacy.h> | ||
63 | |||
64 | #include <net/irda/wrapper.h> | 64 | #include <net/irda/wrapper.h> |
65 | #include <net/irda/irda.h> | 65 | #include <net/irda/irda.h> |
66 | #include <net/irda/irda_device.h> | 66 | #include <net/irda/irda_device.h> |
@@ -72,14 +72,27 @@ | |||
72 | 72 | ||
73 | static char *driver_name = "nsc-ircc"; | 73 | static char *driver_name = "nsc-ircc"; |
74 | 74 | ||
75 | /* Power Management */ | ||
76 | #define NSC_IRCC_DRIVER_NAME "nsc-ircc" | ||
77 | static int nsc_ircc_suspend(struct platform_device *dev, pm_message_t state); | ||
78 | static int nsc_ircc_resume(struct platform_device *dev); | ||
79 | |||
80 | static struct platform_driver nsc_ircc_driver = { | ||
81 | .suspend = nsc_ircc_suspend, | ||
82 | .resume = nsc_ircc_resume, | ||
83 | .driver = { | ||
84 | .name = NSC_IRCC_DRIVER_NAME, | ||
85 | }, | ||
86 | }; | ||
87 | |||
75 | /* Module parameters */ | 88 | /* Module parameters */ |
76 | static int qos_mtt_bits = 0x07; /* 1 ms or more */ | 89 | static int qos_mtt_bits = 0x07; /* 1 ms or more */ |
77 | static int dongle_id; | 90 | static int dongle_id; |
78 | 91 | ||
79 | /* Use BIOS settions by default, but user may supply module parameters */ | 92 | /* Use BIOS settions by default, but user may supply module parameters */ |
80 | static unsigned int io[] = { ~0, ~0, ~0, ~0 }; | 93 | static unsigned int io[] = { ~0, ~0, ~0, ~0, ~0 }; |
81 | static unsigned int irq[] = { 0, 0, 0, 0, 0 }; | 94 | static unsigned int irq[] = { 0, 0, 0, 0, 0 }; |
82 | static unsigned int dma[] = { 0, 0, 0, 0, 0 }; | 95 | static unsigned int dma[] = { 0, 0, 0, 0, 0 }; |
83 | 96 | ||
84 | static int nsc_ircc_probe_108(nsc_chip_t *chip, chipio_t *info); | 97 | static int nsc_ircc_probe_108(nsc_chip_t *chip, chipio_t *info); |
85 | static int nsc_ircc_probe_338(nsc_chip_t *chip, chipio_t *info); | 98 | static int nsc_ircc_probe_338(nsc_chip_t *chip, chipio_t *info); |
@@ -87,6 +100,7 @@ static int nsc_ircc_probe_39x(nsc_chip_t *chip, chipio_t *info); | |||
87 | static int nsc_ircc_init_108(nsc_chip_t *chip, chipio_t *info); | 100 | static int nsc_ircc_init_108(nsc_chip_t *chip, chipio_t *info); |
88 | static int nsc_ircc_init_338(nsc_chip_t *chip, chipio_t *info); | 101 | static int nsc_ircc_init_338(nsc_chip_t *chip, chipio_t *info); |
89 | static int nsc_ircc_init_39x(nsc_chip_t *chip, chipio_t *info); | 102 | static int nsc_ircc_init_39x(nsc_chip_t *chip, chipio_t *info); |
103 | static int nsc_ircc_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *id); | ||
90 | 104 | ||
91 | /* These are the known NSC chips */ | 105 | /* These are the known NSC chips */ |
92 | static nsc_chip_t chips[] = { | 106 | static nsc_chip_t chips[] = { |
@@ -101,11 +115,12 @@ static nsc_chip_t chips[] = { | |||
101 | /* Contributed by Jan Frey - IBM A30/A31 */ | 115 | /* Contributed by Jan Frey - IBM A30/A31 */ |
102 | { "PC8739x", { 0x2e, 0x4e, 0x0 }, 0x20, 0xea, 0xff, | 116 | { "PC8739x", { 0x2e, 0x4e, 0x0 }, 0x20, 0xea, 0xff, |
103 | nsc_ircc_probe_39x, nsc_ircc_init_39x }, | 117 | nsc_ircc_probe_39x, nsc_ircc_init_39x }, |
118 | { "IBM", { 0x2e, 0x4e, 0x0 }, 0x20, 0xf4, 0xff, | ||
119 | nsc_ircc_probe_39x, nsc_ircc_init_39x }, | ||
104 | { NULL } | 120 | { NULL } |
105 | }; | 121 | }; |
106 | 122 | ||
107 | /* Max 4 instances for now */ | 123 | static struct nsc_ircc_cb *dev_self[] = { NULL, NULL, NULL, NULL, NULL }; |
108 | static struct nsc_ircc_cb *dev_self[] = { NULL, NULL, NULL, NULL }; | ||
109 | 124 | ||
110 | static char *dongle_types[] = { | 125 | static char *dongle_types[] = { |
111 | "Differential serial interface", | 126 | "Differential serial interface", |
@@ -126,8 +141,24 @@ static char *dongle_types[] = { | |||
126 | "No dongle connected", | 141 | "No dongle connected", |
127 | }; | 142 | }; |
128 | 143 | ||
144 | /* PNP probing */ | ||
145 | static chipio_t pnp_info; | ||
146 | static const struct pnp_device_id nsc_ircc_pnp_table[] = { | ||
147 | { .id = "NSC6001", .driver_data = 0 }, | ||
148 | { .id = "IBM0071", .driver_data = 0 }, | ||
149 | { } | ||
150 | }; | ||
151 | |||
152 | MODULE_DEVICE_TABLE(pnp, nsc_ircc_pnp_table); | ||
153 | |||
154 | static struct pnp_driver nsc_ircc_pnp_driver = { | ||
155 | .name = "nsc-ircc", | ||
156 | .id_table = nsc_ircc_pnp_table, | ||
157 | .probe = nsc_ircc_pnp_probe, | ||
158 | }; | ||
159 | |||
129 | /* Some prototypes */ | 160 | /* Some prototypes */ |
130 | static int nsc_ircc_open(int i, chipio_t *info); | 161 | static int nsc_ircc_open(chipio_t *info); |
131 | static int nsc_ircc_close(struct nsc_ircc_cb *self); | 162 | static int nsc_ircc_close(struct nsc_ircc_cb *self); |
132 | static int nsc_ircc_setup(chipio_t *info); | 163 | static int nsc_ircc_setup(chipio_t *info); |
133 | static void nsc_ircc_pio_receive(struct nsc_ircc_cb *self); | 164 | static void nsc_ircc_pio_receive(struct nsc_ircc_cb *self); |
@@ -146,7 +177,10 @@ static int nsc_ircc_net_open(struct net_device *dev); | |||
146 | static int nsc_ircc_net_close(struct net_device *dev); | 177 | static int nsc_ircc_net_close(struct net_device *dev); |
147 | static int nsc_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); | 178 | static int nsc_ircc_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); |
148 | static struct net_device_stats *nsc_ircc_net_get_stats(struct net_device *dev); | 179 | static struct net_device_stats *nsc_ircc_net_get_stats(struct net_device *dev); |
149 | static int nsc_ircc_pmproc(struct pm_dev *dev, pm_request_t rqst, void *data); | 180 | |
181 | /* Globals */ | ||
182 | static int pnp_registered; | ||
183 | static int pnp_succeeded; | ||
150 | 184 | ||
151 | /* | 185 | /* |
152 | * Function nsc_ircc_init () | 186 | * Function nsc_ircc_init () |
@@ -158,28 +192,36 @@ static int __init nsc_ircc_init(void) | |||
158 | { | 192 | { |
159 | chipio_t info; | 193 | chipio_t info; |
160 | nsc_chip_t *chip; | 194 | nsc_chip_t *chip; |
161 | int ret = -ENODEV; | 195 | int ret; |
162 | int cfg_base; | 196 | int cfg_base; |
163 | int cfg, id; | 197 | int cfg, id; |
164 | int reg; | 198 | int reg; |
165 | int i = 0; | 199 | int i = 0; |
166 | 200 | ||
201 | ret = platform_driver_register(&nsc_ircc_driver); | ||
202 | if (ret) { | ||
203 | IRDA_ERROR("%s, Can't register driver!\n", driver_name); | ||
204 | return ret; | ||
205 | } | ||
206 | |||
207 | /* Register with PnP subsystem to detect disable ports */ | ||
208 | ret = pnp_register_driver(&nsc_ircc_pnp_driver); | ||
209 | |||
210 | if (ret >= 0) | ||
211 | pnp_registered = 1; | ||
212 | |||
213 | ret = -ENODEV; | ||
214 | |||
167 | /* Probe for all the NSC chipsets we know about */ | 215 | /* Probe for all the NSC chipsets we know about */ |
168 | for (chip=chips; chip->name ; chip++) { | 216 | for (chip = chips; chip->name ; chip++) { |
169 | IRDA_DEBUG(2, "%s(), Probing for %s ...\n", __FUNCTION__, | 217 | IRDA_DEBUG(2, "%s(), Probing for %s ...\n", __FUNCTION__, |
170 | chip->name); | 218 | chip->name); |
171 | 219 | ||
172 | /* Try all config registers for this chip */ | 220 | /* Try all config registers for this chip */ |
173 | for (cfg=0; cfg<3; cfg++) { | 221 | for (cfg = 0; cfg < ARRAY_SIZE(chip->cfg); cfg++) { |
174 | cfg_base = chip->cfg[cfg]; | 222 | cfg_base = chip->cfg[cfg]; |
175 | if (!cfg_base) | 223 | if (!cfg_base) |
176 | continue; | 224 | continue; |
177 | |||
178 | memset(&info, 0, sizeof(chipio_t)); | ||
179 | info.cfg_base = cfg_base; | ||
180 | info.fir_base = io[i]; | ||
181 | info.dma = dma[i]; | ||
182 | info.irq = irq[i]; | ||
183 | 225 | ||
184 | /* Read index register */ | 226 | /* Read index register */ |
185 | reg = inb(cfg_base); | 227 | reg = inb(cfg_base); |
@@ -194,24 +236,65 @@ static int __init nsc_ircc_init(void) | |||
194 | if ((id & chip->cid_mask) == chip->cid_value) { | 236 | if ((id & chip->cid_mask) == chip->cid_value) { |
195 | IRDA_DEBUG(2, "%s() Found %s chip, revision=%d\n", | 237 | IRDA_DEBUG(2, "%s() Found %s chip, revision=%d\n", |
196 | __FUNCTION__, chip->name, id & ~chip->cid_mask); | 238 | __FUNCTION__, chip->name, id & ~chip->cid_mask); |
197 | /* | ||
198 | * If the user supplies the base address, then | ||
199 | * we init the chip, if not we probe the values | ||
200 | * set by the BIOS | ||
201 | */ | ||
202 | if (io[i] < 0x2000) { | ||
203 | chip->init(chip, &info); | ||
204 | } else | ||
205 | chip->probe(chip, &info); | ||
206 | 239 | ||
207 | if (nsc_ircc_open(i, &info) == 0) | 240 | /* |
208 | ret = 0; | 241 | * If we found a correct PnP setting, |
242 | * we first try it. | ||
243 | */ | ||
244 | if (pnp_succeeded) { | ||
245 | memset(&info, 0, sizeof(chipio_t)); | ||
246 | info.cfg_base = cfg_base; | ||
247 | info.fir_base = pnp_info.fir_base; | ||
248 | info.dma = pnp_info.dma; | ||
249 | info.irq = pnp_info.irq; | ||
250 | |||
251 | if (info.fir_base < 0x2000) { | ||
252 | IRDA_MESSAGE("%s, chip->init\n", driver_name); | ||
253 | chip->init(chip, &info); | ||
254 | } else | ||
255 | chip->probe(chip, &info); | ||
256 | |||
257 | if (nsc_ircc_open(&info) >= 0) | ||
258 | ret = 0; | ||
259 | } | ||
260 | |||
261 | /* | ||
262 | * Opening based on PnP values failed. | ||
263 | * Let's fallback to user values, or probe | ||
264 | * the chip. | ||
265 | */ | ||
266 | if (ret) { | ||
267 | IRDA_DEBUG(2, "%s, PnP init failed\n", driver_name); | ||
268 | memset(&info, 0, sizeof(chipio_t)); | ||
269 | info.cfg_base = cfg_base; | ||
270 | info.fir_base = io[i]; | ||
271 | info.dma = dma[i]; | ||
272 | info.irq = irq[i]; | ||
273 | |||
274 | /* | ||
275 | * If the user supplies the base address, then | ||
276 | * we init the chip, if not we probe the values | ||
277 | * set by the BIOS | ||
278 | */ | ||
279 | if (io[i] < 0x2000) { | ||
280 | chip->init(chip, &info); | ||
281 | } else | ||
282 | chip->probe(chip, &info); | ||
283 | |||
284 | if (nsc_ircc_open(&info) >= 0) | ||
285 | ret = 0; | ||
286 | } | ||
209 | i++; | 287 | i++; |
210 | } else { | 288 | } else { |
211 | IRDA_DEBUG(2, "%s(), Wrong chip id=0x%02x\n", __FUNCTION__, id); | 289 | IRDA_DEBUG(2, "%s(), Wrong chip id=0x%02x\n", __FUNCTION__, id); |
212 | } | 290 | } |
213 | } | 291 | } |
214 | 292 | } | |
293 | |||
294 | if (ret) { | ||
295 | platform_driver_unregister(&nsc_ircc_driver); | ||
296 | pnp_unregister_driver(&nsc_ircc_pnp_driver); | ||
297 | pnp_registered = 0; | ||
215 | } | 298 | } |
216 | 299 | ||
217 | return ret; | 300 | return ret; |
@@ -227,12 +310,17 @@ static void __exit nsc_ircc_cleanup(void) | |||
227 | { | 310 | { |
228 | int i; | 311 | int i; |
229 | 312 | ||
230 | pm_unregister_all(nsc_ircc_pmproc); | 313 | for (i = 0; i < ARRAY_SIZE(dev_self); i++) { |
231 | |||
232 | for (i=0; i < 4; i++) { | ||
233 | if (dev_self[i]) | 314 | if (dev_self[i]) |
234 | nsc_ircc_close(dev_self[i]); | 315 | nsc_ircc_close(dev_self[i]); |
235 | } | 316 | } |
317 | |||
318 | platform_driver_unregister(&nsc_ircc_driver); | ||
319 | |||
320 | if (pnp_registered) | ||
321 | pnp_unregister_driver(&nsc_ircc_pnp_driver); | ||
322 | |||
323 | pnp_registered = 0; | ||
236 | } | 324 | } |
237 | 325 | ||
238 | /* | 326 | /* |
@@ -241,16 +329,26 @@ static void __exit nsc_ircc_cleanup(void) | |||
241 | * Open driver instance | 329 | * Open driver instance |
242 | * | 330 | * |
243 | */ | 331 | */ |
244 | static int __init nsc_ircc_open(int i, chipio_t *info) | 332 | static int __init nsc_ircc_open(chipio_t *info) |
245 | { | 333 | { |
246 | struct net_device *dev; | 334 | struct net_device *dev; |
247 | struct nsc_ircc_cb *self; | 335 | struct nsc_ircc_cb *self; |
248 | struct pm_dev *pmdev; | ||
249 | void *ret; | 336 | void *ret; |
250 | int err; | 337 | int err, chip_index; |
251 | 338 | ||
252 | IRDA_DEBUG(2, "%s()\n", __FUNCTION__); | 339 | IRDA_DEBUG(2, "%s()\n", __FUNCTION__); |
253 | 340 | ||
341 | |||
342 | for (chip_index = 0; chip_index < ARRAY_SIZE(dev_self); chip_index++) { | ||
343 | if (!dev_self[chip_index]) | ||
344 | break; | ||
345 | } | ||
346 | |||
347 | if (chip_index == ARRAY_SIZE(dev_self)) { | ||
348 | IRDA_ERROR("%s(), maximum number of supported chips reached!\n", __FUNCTION__); | ||
349 | return -ENOMEM; | ||
350 | } | ||
351 | |||
254 | IRDA_MESSAGE("%s, Found chip at base=0x%03x\n", driver_name, | 352 | IRDA_MESSAGE("%s, Found chip at base=0x%03x\n", driver_name, |
255 | info->cfg_base); | 353 | info->cfg_base); |
256 | 354 | ||
@@ -271,8 +369,8 @@ static int __init nsc_ircc_open(int i, chipio_t *info) | |||
271 | spin_lock_init(&self->lock); | 369 | spin_lock_init(&self->lock); |
272 | 370 | ||
273 | /* Need to store self somewhere */ | 371 | /* Need to store self somewhere */ |
274 | dev_self[i] = self; | 372 | dev_self[chip_index] = self; |
275 | self->index = i; | 373 | self->index = chip_index; |
276 | 374 | ||
277 | /* Initialize IO */ | 375 | /* Initialize IO */ |
278 | self->io.cfg_base = info->cfg_base; | 376 | self->io.cfg_base = info->cfg_base; |
@@ -351,7 +449,7 @@ static int __init nsc_ircc_open(int i, chipio_t *info) | |||
351 | 449 | ||
352 | /* Check if user has supplied a valid dongle id or not */ | 450 | /* Check if user has supplied a valid dongle id or not */ |
353 | if ((dongle_id <= 0) || | 451 | if ((dongle_id <= 0) || |
354 | (dongle_id >= (sizeof(dongle_types) / sizeof(dongle_types[0]))) ) { | 452 | (dongle_id >= ARRAY_SIZE(dongle_types))) { |
355 | dongle_id = nsc_ircc_read_dongle_id(self->io.fir_base); | 453 | dongle_id = nsc_ircc_read_dongle_id(self->io.fir_base); |
356 | 454 | ||
357 | IRDA_MESSAGE("%s, Found dongle: %s\n", driver_name, | 455 | IRDA_MESSAGE("%s, Found dongle: %s\n", driver_name, |
@@ -364,11 +462,18 @@ static int __init nsc_ircc_open(int i, chipio_t *info) | |||
364 | self->io.dongle_id = dongle_id; | 462 | self->io.dongle_id = dongle_id; |
365 | nsc_ircc_init_dongle_interface(self->io.fir_base, dongle_id); | 463 | nsc_ircc_init_dongle_interface(self->io.fir_base, dongle_id); |
366 | 464 | ||
367 | pmdev = pm_register(PM_SYS_DEV, PM_SYS_IRDA, nsc_ircc_pmproc); | 465 | self->pldev = platform_device_register_simple(NSC_IRCC_DRIVER_NAME, |
368 | if (pmdev) | 466 | self->index, NULL, 0); |
369 | pmdev->data = self; | 467 | if (IS_ERR(self->pldev)) { |
468 | err = PTR_ERR(self->pldev); | ||
469 | goto out5; | ||
470 | } | ||
471 | platform_set_drvdata(self->pldev, self); | ||
370 | 472 | ||
371 | return 0; | 473 | return chip_index; |
474 | |||
475 | out5: | ||
476 | unregister_netdev(dev); | ||
372 | out4: | 477 | out4: |
373 | dma_free_coherent(NULL, self->tx_buff.truesize, | 478 | dma_free_coherent(NULL, self->tx_buff.truesize, |
374 | self->tx_buff.head, self->tx_buff_dma); | 479 | self->tx_buff.head, self->tx_buff_dma); |
@@ -379,7 +484,7 @@ static int __init nsc_ircc_open(int i, chipio_t *info) | |||
379 | release_region(self->io.fir_base, self->io.fir_ext); | 484 | release_region(self->io.fir_base, self->io.fir_ext); |
380 | out1: | 485 | out1: |
381 | free_netdev(dev); | 486 | free_netdev(dev); |
382 | dev_self[i] = NULL; | 487 | dev_self[chip_index] = NULL; |
383 | return err; | 488 | return err; |
384 | } | 489 | } |
385 | 490 | ||
@@ -399,6 +504,8 @@ static int __exit nsc_ircc_close(struct nsc_ircc_cb *self) | |||
399 | 504 | ||
400 | iobase = self->io.fir_base; | 505 | iobase = self->io.fir_base; |
401 | 506 | ||
507 | platform_device_unregister(self->pldev); | ||
508 | |||
402 | /* Remove netdevice */ | 509 | /* Remove netdevice */ |
403 | unregister_netdev(self->netdev); | 510 | unregister_netdev(self->netdev); |
404 | 511 | ||
@@ -806,6 +913,43 @@ static int nsc_ircc_probe_39x(nsc_chip_t *chip, chipio_t *info) | |||
806 | return 0; | 913 | return 0; |
807 | } | 914 | } |
808 | 915 | ||
916 | /* PNP probing */ | ||
917 | static int nsc_ircc_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *id) | ||
918 | { | ||
919 | memset(&pnp_info, 0, sizeof(chipio_t)); | ||
920 | pnp_info.irq = -1; | ||
921 | pnp_info.dma = -1; | ||
922 | pnp_succeeded = 1; | ||
923 | |||
924 | /* There don't seem to be any way to get the cfg_base. | ||
925 | * On my box, cfg_base is in the PnP descriptor of the | ||
926 | * motherboard. Oh well... Jean II */ | ||
927 | |||
928 | if (pnp_port_valid(dev, 0) && | ||
929 | !(pnp_port_flags(dev, 0) & IORESOURCE_DISABLED)) | ||
930 | pnp_info.fir_base = pnp_port_start(dev, 0); | ||
931 | |||
932 | if (pnp_irq_valid(dev, 0) && | ||
933 | !(pnp_irq_flags(dev, 0) & IORESOURCE_DISABLED)) | ||
934 | pnp_info.irq = pnp_irq(dev, 0); | ||
935 | |||
936 | if (pnp_dma_valid(dev, 0) && | ||
937 | !(pnp_dma_flags(dev, 0) & IORESOURCE_DISABLED)) | ||
938 | pnp_info.dma = pnp_dma(dev, 0); | ||
939 | |||
940 | IRDA_DEBUG(0, "%s() : From PnP, found firbase 0x%03X ; irq %d ; dma %d.\n", | ||
941 | __FUNCTION__, pnp_info.fir_base, pnp_info.irq, pnp_info.dma); | ||
942 | |||
943 | if((pnp_info.fir_base == 0) || | ||
944 | (pnp_info.irq == -1) || (pnp_info.dma == -1)) { | ||
945 | /* Returning an error will disable the device. Yuck ! */ | ||
946 | //return -EINVAL; | ||
947 | pnp_succeeded = 0; | ||
948 | } | ||
949 | |||
950 | return 0; | ||
951 | } | ||
952 | |||
809 | /* | 953 | /* |
810 | * Function nsc_ircc_setup (info) | 954 | * Function nsc_ircc_setup (info) |
811 | * | 955 | * |
@@ -2161,45 +2305,83 @@ static struct net_device_stats *nsc_ircc_net_get_stats(struct net_device *dev) | |||
2161 | return &self->stats; | 2305 | return &self->stats; |
2162 | } | 2306 | } |
2163 | 2307 | ||
2164 | static void nsc_ircc_suspend(struct nsc_ircc_cb *self) | 2308 | static int nsc_ircc_suspend(struct platform_device *dev, pm_message_t state) |
2165 | { | 2309 | { |
2166 | IRDA_MESSAGE("%s, Suspending\n", driver_name); | 2310 | struct nsc_ircc_cb *self = platform_get_drvdata(dev); |
2311 | int bank; | ||
2312 | unsigned long flags; | ||
2313 | int iobase = self->io.fir_base; | ||
2167 | 2314 | ||
2168 | if (self->io.suspended) | 2315 | if (self->io.suspended) |
2169 | return; | 2316 | return 0; |
2170 | 2317 | ||
2171 | nsc_ircc_net_close(self->netdev); | 2318 | IRDA_DEBUG(1, "%s, Suspending\n", driver_name); |
2172 | 2319 | ||
2320 | rtnl_lock(); | ||
2321 | if (netif_running(self->netdev)) { | ||
2322 | netif_device_detach(self->netdev); | ||
2323 | spin_lock_irqsave(&self->lock, flags); | ||
2324 | /* Save current bank */ | ||
2325 | bank = inb(iobase+BSR); | ||
2326 | |||
2327 | /* Disable interrupts */ | ||
2328 | switch_bank(iobase, BANK0); | ||
2329 | outb(0, iobase+IER); | ||
2330 | |||
2331 | /* Restore bank register */ | ||
2332 | outb(bank, iobase+BSR); | ||
2333 | |||
2334 | spin_unlock_irqrestore(&self->lock, flags); | ||
2335 | free_irq(self->io.irq, self->netdev); | ||
2336 | disable_dma(self->io.dma); | ||
2337 | } | ||
2173 | self->io.suspended = 1; | 2338 | self->io.suspended = 1; |
2339 | rtnl_unlock(); | ||
2340 | |||
2341 | return 0; | ||
2174 | } | 2342 | } |
2175 | 2343 | ||
2176 | static void nsc_ircc_wakeup(struct nsc_ircc_cb *self) | 2344 | static int nsc_ircc_resume(struct platform_device *dev) |
2177 | { | 2345 | { |
2346 | struct nsc_ircc_cb *self = platform_get_drvdata(dev); | ||
2347 | unsigned long flags; | ||
2348 | |||
2178 | if (!self->io.suspended) | 2349 | if (!self->io.suspended) |
2179 | return; | 2350 | return 0; |
2180 | 2351 | ||
2352 | IRDA_DEBUG(1, "%s, Waking up\n", driver_name); | ||
2353 | |||
2354 | rtnl_lock(); | ||
2181 | nsc_ircc_setup(&self->io); | 2355 | nsc_ircc_setup(&self->io); |
2182 | nsc_ircc_net_open(self->netdev); | 2356 | nsc_ircc_init_dongle_interface(self->io.fir_base, self->io.dongle_id); |
2183 | |||
2184 | IRDA_MESSAGE("%s, Waking up\n", driver_name); | ||
2185 | 2357 | ||
2358 | if (netif_running(self->netdev)) { | ||
2359 | if (request_irq(self->io.irq, nsc_ircc_interrupt, 0, | ||
2360 | self->netdev->name, self->netdev)) { | ||
2361 | IRDA_WARNING("%s, unable to allocate irq=%d\n", | ||
2362 | driver_name, self->io.irq); | ||
2363 | |||
2364 | /* | ||
2365 | * Don't fail resume process, just kill this | ||
2366 | * network interface | ||
2367 | */ | ||
2368 | unregister_netdevice(self->netdev); | ||
2369 | } else { | ||
2370 | spin_lock_irqsave(&self->lock, flags); | ||
2371 | nsc_ircc_change_speed(self, self->io.speed); | ||
2372 | spin_unlock_irqrestore(&self->lock, flags); | ||
2373 | netif_device_attach(self->netdev); | ||
2374 | } | ||
2375 | |||
2376 | } else { | ||
2377 | spin_lock_irqsave(&self->lock, flags); | ||
2378 | nsc_ircc_change_speed(self, 9600); | ||
2379 | spin_unlock_irqrestore(&self->lock, flags); | ||
2380 | } | ||
2186 | self->io.suspended = 0; | 2381 | self->io.suspended = 0; |
2187 | } | 2382 | rtnl_unlock(); |
2188 | 2383 | ||
2189 | static int nsc_ircc_pmproc(struct pm_dev *dev, pm_request_t rqst, void *data) | 2384 | return 0; |
2190 | { | ||
2191 | struct nsc_ircc_cb *self = (struct nsc_ircc_cb*) dev->data; | ||
2192 | if (self) { | ||
2193 | switch (rqst) { | ||
2194 | case PM_SUSPEND: | ||
2195 | nsc_ircc_suspend(self); | ||
2196 | break; | ||
2197 | case PM_RESUME: | ||
2198 | nsc_ircc_wakeup(self); | ||
2199 | break; | ||
2200 | } | ||
2201 | } | ||
2202 | return 0; | ||
2203 | } | 2385 | } |
2204 | 2386 | ||
2205 | MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no>"); | 2387 | MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no>"); |
diff --git a/drivers/net/irda/nsc-ircc.h b/drivers/net/irda/nsc-ircc.h index 6edf7e514624..dacf671abcd6 100644 --- a/drivers/net/irda/nsc-ircc.h +++ b/drivers/net/irda/nsc-ircc.h | |||
@@ -269,7 +269,7 @@ struct nsc_ircc_cb { | |||
269 | __u32 new_speed; | 269 | __u32 new_speed; |
270 | int index; /* Instance index */ | 270 | int index; /* Instance index */ |
271 | 271 | ||
272 | struct pm_dev *dev; | 272 | struct platform_device *pldev; |
273 | }; | 273 | }; |
274 | 274 | ||
275 | static inline void switch_bank(int iobase, int bank) | 275 | static inline void switch_bank(int iobase, int bank) |
diff --git a/drivers/net/irda/sir_dongle.c b/drivers/net/irda/sir_dongle.c index 8d225921ae7b..d7e32d9554fc 100644 --- a/drivers/net/irda/sir_dongle.c +++ b/drivers/net/irda/sir_dongle.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/init.h> | 16 | #include <linux/init.h> |
17 | #include <linux/smp_lock.h> | 17 | #include <linux/smp_lock.h> |
18 | #include <linux/kmod.h> | 18 | #include <linux/kmod.h> |
19 | #include <linux/mutex.h> | ||
19 | 20 | ||
20 | #include <net/irda/irda.h> | 21 | #include <net/irda/irda.h> |
21 | 22 | ||
@@ -28,7 +29,7 @@ | |||
28 | */ | 29 | */ |
29 | 30 | ||
30 | static LIST_HEAD(dongle_list); /* list of registered dongle drivers */ | 31 | static LIST_HEAD(dongle_list); /* list of registered dongle drivers */ |
31 | static DECLARE_MUTEX(dongle_list_lock); /* protects the list */ | 32 | static DEFINE_MUTEX(dongle_list_lock); /* protects the list */ |
32 | 33 | ||
33 | int irda_register_dongle(struct dongle_driver *new) | 34 | int irda_register_dongle(struct dongle_driver *new) |
34 | { | 35 | { |
@@ -38,25 +39,25 @@ int irda_register_dongle(struct dongle_driver *new) | |||
38 | IRDA_DEBUG(0, "%s : registering dongle \"%s\" (%d).\n", | 39 | IRDA_DEBUG(0, "%s : registering dongle \"%s\" (%d).\n", |
39 | __FUNCTION__, new->driver_name, new->type); | 40 | __FUNCTION__, new->driver_name, new->type); |
40 | 41 | ||
41 | down(&dongle_list_lock); | 42 | mutex_lock(&dongle_list_lock); |
42 | list_for_each(entry, &dongle_list) { | 43 | list_for_each(entry, &dongle_list) { |
43 | drv = list_entry(entry, struct dongle_driver, dongle_list); | 44 | drv = list_entry(entry, struct dongle_driver, dongle_list); |
44 | if (new->type == drv->type) { | 45 | if (new->type == drv->type) { |
45 | up(&dongle_list_lock); | 46 | mutex_unlock(&dongle_list_lock); |
46 | return -EEXIST; | 47 | return -EEXIST; |
47 | } | 48 | } |
48 | } | 49 | } |
49 | list_add(&new->dongle_list, &dongle_list); | 50 | list_add(&new->dongle_list, &dongle_list); |
50 | up(&dongle_list_lock); | 51 | mutex_unlock(&dongle_list_lock); |
51 | return 0; | 52 | return 0; |
52 | } | 53 | } |
53 | EXPORT_SYMBOL(irda_register_dongle); | 54 | EXPORT_SYMBOL(irda_register_dongle); |
54 | 55 | ||
55 | int irda_unregister_dongle(struct dongle_driver *drv) | 56 | int irda_unregister_dongle(struct dongle_driver *drv) |
56 | { | 57 | { |
57 | down(&dongle_list_lock); | 58 | mutex_lock(&dongle_list_lock); |
58 | list_del(&drv->dongle_list); | 59 | list_del(&drv->dongle_list); |
59 | up(&dongle_list_lock); | 60 | mutex_unlock(&dongle_list_lock); |
60 | return 0; | 61 | return 0; |
61 | } | 62 | } |
62 | EXPORT_SYMBOL(irda_unregister_dongle); | 63 | EXPORT_SYMBOL(irda_unregister_dongle); |
@@ -75,7 +76,7 @@ int sirdev_get_dongle(struct sir_dev *dev, IRDA_DONGLE type) | |||
75 | return -EBUSY; | 76 | return -EBUSY; |
76 | 77 | ||
77 | /* serialize access to the list of registered dongles */ | 78 | /* serialize access to the list of registered dongles */ |
78 | down(&dongle_list_lock); | 79 | mutex_lock(&dongle_list_lock); |
79 | 80 | ||
80 | list_for_each(entry, &dongle_list) { | 81 | list_for_each(entry, &dongle_list) { |
81 | drv = list_entry(entry, struct dongle_driver, dongle_list); | 82 | drv = list_entry(entry, struct dongle_driver, dongle_list); |
@@ -109,14 +110,14 @@ int sirdev_get_dongle(struct sir_dev *dev, IRDA_DONGLE type) | |||
109 | if (!drv->open || (err=drv->open(dev))!=0) | 110 | if (!drv->open || (err=drv->open(dev))!=0) |
110 | goto out_reject; /* failed to open driver */ | 111 | goto out_reject; /* failed to open driver */ |
111 | 112 | ||
112 | up(&dongle_list_lock); | 113 | mutex_unlock(&dongle_list_lock); |
113 | return 0; | 114 | return 0; |
114 | 115 | ||
115 | out_reject: | 116 | out_reject: |
116 | dev->dongle_drv = NULL; | 117 | dev->dongle_drv = NULL; |
117 | module_put(drv->owner); | 118 | module_put(drv->owner); |
118 | out_unlock: | 119 | out_unlock: |
119 | up(&dongle_list_lock); | 120 | mutex_unlock(&dongle_list_lock); |
120 | return err; | 121 | return err; |
121 | } | 122 | } |
122 | 123 | ||
diff --git a/drivers/net/irda/toim3232-sir.c b/drivers/net/irda/toim3232-sir.c new file mode 100644 index 000000000000..aa1a9b0ed83e --- /dev/null +++ b/drivers/net/irda/toim3232-sir.c | |||
@@ -0,0 +1,375 @@ | |||
1 | /********************************************************************* | ||
2 | * | ||
3 | * Filename: toim3232-sir.c | ||
4 | * Version: 1.0 | ||
5 | * Description: Implementation of dongles based on the Vishay/Temic | ||
6 | * TOIM3232 SIR Endec chipset. Currently only the | ||
7 | * IRWave IR320ST-2 is tested, although it should work | ||
8 | * with any TOIM3232 or TOIM4232 chipset based RS232 | ||
9 | * dongle with minimal modification. | ||
10 | * Based heavily on the Tekram driver (tekram.c), | ||
11 | * with thanks to Dag Brattli and Martin Diehl. | ||
12 | * Status: Experimental. | ||
13 | * Author: David Basden <davidb-irda@rcpt.to> | ||
14 | * Created at: Thu Feb 09 23:47:32 2006 | ||
15 | * | ||
16 | * Copyright (c) 2006 David Basden. | ||
17 | * Copyright (c) 1998-1999 Dag Brattli, | ||
18 | * Copyright (c) 2002 Martin Diehl, | ||
19 | * All Rights Reserved. | ||
20 | * | ||
21 | * This program is free software; you can redistribute it and/or | ||
22 | * modify it under the terms of the GNU General Public License as | ||
23 | * published by the Free Software Foundation; either version 2 of | ||
24 | * the License, or (at your option) any later version. | ||
25 | * | ||
26 | * Neither Dag Brattli nor University of Tromsø admit liability nor | ||
27 | * provide warranty for any of this software. This material is | ||
28 | * provided "AS-IS" and at no charge. | ||
29 | * | ||
30 | ********************************************************************/ | ||
31 | |||
32 | /* | ||
33 | * This driver has currently only been tested on the IRWave IR320ST-2 | ||
34 | * | ||
35 | * PROTOCOL: | ||
36 | * | ||
37 | * The protocol for talking to the TOIM3232 is quite easy, and is | ||
38 | * designed to interface with RS232 with only level convertors. The | ||
39 | * BR/~D line on the chip is brought high to signal 'command mode', | ||
40 | * where a command byte is sent to select the baudrate of the RS232 | ||
41 | * interface and the pulse length of the IRDA output. When BR/~D | ||
42 | * is brought low, the dongle then changes to the selected baudrate, | ||
43 | * and the RS232 interface is used for data until BR/~D is brought | ||
44 | * high again. The initial speed for the TOIMx323 after RESET is | ||
45 | * 9600 baud. The baudrate for command-mode is the last selected | ||
46 | * baud-rate, or 9600 after a RESET. | ||
47 | * | ||
48 | * The dongle I have (below) adds some extra hardware on the front end, | ||
49 | * but this is mostly directed towards pariasitic power from the RS232 | ||
50 | * line rather than changing very much about how to communicate with | ||
51 | * the TOIM3232. | ||
52 | * | ||
53 | * The protocol to talk to the TOIM4232 chipset seems to be almost | ||
54 | * identical to the TOIM3232 (and the 4232 datasheet is more detailed) | ||
55 | * so this code will probably work on that as well, although I haven't | ||
56 | * tested it on that hardware. | ||
57 | * | ||
58 | * Target dongle variations that might be common: | ||
59 | * | ||
60 | * DTR and RTS function: | ||
61 | * The data sheet for the 4232 has a sample implementation that hooks the | ||
62 | * DTR and RTS lines to the RESET and BaudRate/~Data lines of the | ||
63 | * chip (through line-converters). Given both DTR and RTS would have to | ||
64 | * be held low in normal operation, and the TOIMx232 requires +5V to | ||
65 | * signal ground, most dongle designers would almost certainly choose | ||
66 | * an implementation that kept at least one of DTR or RTS high in | ||
67 | * normal operation to provide power to the dongle, but will likely | ||
68 | * vary between designs. | ||
69 | * | ||
70 | * User specified command bits: | ||
71 | * There are two user-controllable output lines from the TOIMx232 that | ||
72 | * can be set low or high by setting the appropriate bits in the | ||
73 | * high-nibble of the command byte (when setting speed and pulse length). | ||
74 | * These might be used to switch on and off added hardware or extra | ||
75 | * dongle features. | ||
76 | * | ||
77 | * | ||
78 | * Target hardware: IRWave IR320ST-2 | ||
79 | * | ||
80 | * The IRWave IR320ST-2 is a simple dongle based on the Vishay/Temic | ||
81 | * TOIM3232 SIR Endec and the Vishay/Temic TFDS4500 SIR IRDA transciever. | ||
82 | * It uses a hex inverter and some discrete components to buffer and | ||
83 | * line convert the RS232 down to 5V. | ||
84 | * | ||
85 | * The dongle is powered through a voltage regulator, fed by a large | ||
86 | * capacitor. To switch the dongle on, DTR is brought high to charge | ||
87 | * the capacitor and drive the voltage regulator. DTR isn't associated | ||
88 | * with any control lines on the TOIM3232. Parisitic power is also taken | ||
89 | * from the RTS, TD and RD lines when brought high, but through resistors. | ||
90 | * When DTR is low, the circuit might lose power even with RTS high. | ||
91 | * | ||
92 | * RTS is inverted and attached to the BR/~D input pin. When RTS | ||
93 | * is high, BR/~D is low, and the TOIM3232 is in the normal 'data' mode. | ||
94 | * RTS is brought low, BR/~D is high, and the TOIM3232 is in 'command | ||
95 | * mode'. | ||
96 | * | ||
97 | * For some unknown reason, the RESET line isn't actually connected | ||
98 | * to anything. This means to reset the dongle to get it to a known | ||
99 | * state (9600 baud) you must drop DTR and RTS low, wait for the power | ||
100 | * capacitor to discharge, and then bring DTR (and RTS for data mode) | ||
101 | * high again, and wait for the capacitor to charge, the power supply | ||
102 | * to stabilise, and the oscillator clock to stabilise. | ||
103 | * | ||
104 | * Fortunately, if the current baudrate is known, the chipset can | ||
105 | * easily change speed by entering command mode without having to | ||
106 | * reset the dongle first. | ||
107 | * | ||
108 | * Major Components: | ||
109 | * | ||
110 | * - Vishay/Temic TOIM3232 SIR Endec to change RS232 pulse timings | ||
111 | * to IRDA pulse timings | ||
112 | * - 3.6864MHz crystal to drive TOIM3232 clock oscillator | ||
113 | * - DM74lS04M Inverting Hex line buffer for RS232 input buffering | ||
114 | * and level conversion | ||
115 | * - PJ2951AC 150mA voltage regulator | ||
116 | * - Vishay/Temic TFDS4500 SIR IRDA front-end transceiver | ||
117 | * | ||
118 | */ | ||
119 | |||
120 | #include <linux/module.h> | ||
121 | #include <linux/delay.h> | ||
122 | #include <linux/init.h> | ||
123 | |||
124 | #include <net/irda/irda.h> | ||
125 | |||
126 | #include "sir-dev.h" | ||
127 | |||
128 | static int toim3232delay = 150; /* default is 150 ms */ | ||
129 | module_param(toim3232delay, int, 0); | ||
130 | MODULE_PARM_DESC(toim3232delay, "toim3232 dongle write complete delay"); | ||
131 | |||
132 | #if 0 | ||
133 | static int toim3232flipdtr = 0; /* default is DTR high to reset */ | ||
134 | module_param(toim3232flipdtr, int, 0); | ||
135 | MODULE_PARM_DESC(toim3232flipdtr, "toim3232 dongle invert DTR (Reset)"); | ||
136 | |||
137 | static int toim3232fliprts = 0; /* default is RTS high for baud change */ | ||
138 | module_param(toim3232fliptrs, int, 0); | ||
139 | MODULE_PARM_DESC(toim3232fliprts, "toim3232 dongle invert RTS (BR/D)"); | ||
140 | #endif | ||
141 | |||
142 | static int toim3232_open(struct sir_dev *); | ||
143 | static int toim3232_close(struct sir_dev *); | ||
144 | static int toim3232_change_speed(struct sir_dev *, unsigned); | ||
145 | static int toim3232_reset(struct sir_dev *); | ||
146 | |||
147 | #define TOIM3232_115200 0x00 | ||
148 | #define TOIM3232_57600 0x01 | ||
149 | #define TOIM3232_38400 0x02 | ||
150 | #define TOIM3232_19200 0x03 | ||
151 | #define TOIM3232_9600 0x06 | ||
152 | #define TOIM3232_2400 0x0A | ||
153 | |||
154 | #define TOIM3232_PW 0x10 /* Pulse select bit */ | ||
155 | |||
156 | static struct dongle_driver toim3232 = { | ||
157 | .owner = THIS_MODULE, | ||
158 | .driver_name = "Vishay TOIM3232", | ||
159 | .type = IRDA_TOIM3232_DONGLE, | ||
160 | .open = toim3232_open, | ||
161 | .close = toim3232_close, | ||
162 | .reset = toim3232_reset, | ||
163 | .set_speed = toim3232_change_speed, | ||
164 | }; | ||
165 | |||
166 | static int __init toim3232_sir_init(void) | ||
167 | { | ||
168 | if (toim3232delay < 1 || toim3232delay > 500) | ||
169 | toim3232delay = 200; | ||
170 | IRDA_DEBUG(1, "%s - using %d ms delay\n", | ||
171 | toim3232.driver_name, toim3232delay); | ||
172 | return irda_register_dongle(&toim3232); | ||
173 | } | ||
174 | |||
175 | static void __exit toim3232_sir_cleanup(void) | ||
176 | { | ||
177 | irda_unregister_dongle(&toim3232); | ||
178 | } | ||
179 | |||
180 | static int toim3232_open(struct sir_dev *dev) | ||
181 | { | ||
182 | struct qos_info *qos = &dev->qos; | ||
183 | |||
184 | IRDA_DEBUG(2, "%s()\n", __FUNCTION__); | ||
185 | |||
186 | /* Pull the lines high to start with. | ||
187 | * | ||
188 | * For the IR320ST-2, we need to charge the main supply capacitor to | ||
189 | * switch the device on. We keep DTR high throughout to do this. | ||
190 | * When RTS, TD and RD are high, they will also trickle-charge the | ||
191 | * cap. RTS is high for data transmission, and low for baud rate select. | ||
192 | * -- DGB | ||
193 | */ | ||
194 | sirdev_set_dtr_rts(dev, TRUE, TRUE); | ||
195 | |||
196 | /* The TOI3232 supports many speeds between 1200bps and 115000bps. | ||
197 | * We really only care about those supported by the IRDA spec, but | ||
198 | * 38400 seems to be implemented in many places */ | ||
199 | qos->baud_rate.bits &= IR_2400|IR_9600|IR_19200|IR_38400|IR_57600|IR_115200; | ||
200 | |||
201 | /* From the tekram driver. Not sure what a reasonable value is -- DGB */ | ||
202 | qos->min_turn_time.bits = 0x01; /* Needs at least 10 ms */ | ||
203 | irda_qos_bits_to_value(qos); | ||
204 | |||
205 | /* irda thread waits 50 msec for power settling */ | ||
206 | |||
207 | return 0; | ||
208 | } | ||
209 | |||
210 | static int toim3232_close(struct sir_dev *dev) | ||
211 | { | ||
212 | IRDA_DEBUG(2, "%s()\n", __FUNCTION__); | ||
213 | |||
214 | /* Power off dongle */ | ||
215 | sirdev_set_dtr_rts(dev, FALSE, FALSE); | ||
216 | |||
217 | return 0; | ||
218 | } | ||
219 | |||
220 | /* | ||
221 | * Function toim3232change_speed (dev, state, speed) | ||
222 | * | ||
223 | * Set the speed for the TOIM3232 based dongle. Warning, this | ||
224 | * function must be called with a process context! | ||
225 | * | ||
226 | * Algorithm | ||
227 | * 1. keep DTR high but clear RTS to bring into baud programming mode | ||
228 | * 2. wait at least 7us to enter programming mode | ||
229 | * 3. send control word to set baud rate and timing | ||
230 | * 4. wait at least 1us | ||
231 | * 5. bring RTS high to enter DATA mode (RS232 is passed through to transceiver) | ||
232 | * 6. should take effect immediately (although probably worth waiting) | ||
233 | */ | ||
234 | |||
235 | #define TOIM3232_STATE_WAIT_SPEED (SIRDEV_STATE_DONGLE_SPEED + 1) | ||
236 | |||
237 | static int toim3232_change_speed(struct sir_dev *dev, unsigned speed) | ||
238 | { | ||
239 | unsigned state = dev->fsm.substate; | ||
240 | unsigned delay = 0; | ||
241 | u8 byte; | ||
242 | static int ret = 0; | ||
243 | |||
244 | IRDA_DEBUG(2, "%s()\n", __FUNCTION__); | ||
245 | |||
246 | switch(state) { | ||
247 | case SIRDEV_STATE_DONGLE_SPEED: | ||
248 | |||
249 | /* Figure out what we are going to send as a control byte */ | ||
250 | switch (speed) { | ||
251 | case 2400: | ||
252 | byte = TOIM3232_PW|TOIM3232_2400; | ||
253 | break; | ||
254 | default: | ||
255 | speed = 9600; | ||
256 | ret = -EINVAL; | ||
257 | /* fall thru */ | ||
258 | case 9600: | ||
259 | byte = TOIM3232_PW|TOIM3232_9600; | ||
260 | break; | ||
261 | case 19200: | ||
262 | byte = TOIM3232_PW|TOIM3232_19200; | ||
263 | break; | ||
264 | case 38400: | ||
265 | byte = TOIM3232_PW|TOIM3232_38400; | ||
266 | break; | ||
267 | case 57600: | ||
268 | byte = TOIM3232_PW|TOIM3232_57600; | ||
269 | break; | ||
270 | case 115200: | ||
271 | byte = TOIM3232_115200; | ||
272 | break; | ||
273 | } | ||
274 | |||
275 | /* Set DTR, Clear RTS: Go into baud programming mode */ | ||
276 | sirdev_set_dtr_rts(dev, TRUE, FALSE); | ||
277 | |||
278 | /* Wait at least 7us */ | ||
279 | udelay(14); | ||
280 | |||
281 | /* Write control byte */ | ||
282 | sirdev_raw_write(dev, &byte, 1); | ||
283 | |||
284 | dev->speed = speed; | ||
285 | |||
286 | state = TOIM3232_STATE_WAIT_SPEED; | ||
287 | delay = toim3232delay; | ||
288 | break; | ||
289 | |||
290 | case TOIM3232_STATE_WAIT_SPEED: | ||
291 | /* Have transmitted control byte * Wait for 'at least 1us' */ | ||
292 | udelay(14); | ||
293 | |||
294 | /* Set DTR, Set RTS: Go into normal data mode */ | ||
295 | sirdev_set_dtr_rts(dev, TRUE, TRUE); | ||
296 | |||
297 | /* Wait (TODO: check this is needed) */ | ||
298 | udelay(50); | ||
299 | break; | ||
300 | |||
301 | default: | ||
302 | printk(KERN_ERR "%s - undefined state %d\n", __FUNCTION__, state); | ||
303 | ret = -EINVAL; | ||
304 | break; | ||
305 | } | ||
306 | |||
307 | dev->fsm.substate = state; | ||
308 | return (delay > 0) ? delay : ret; | ||
309 | } | ||
310 | |||
311 | /* | ||
312 | * Function toim3232reset (driver) | ||
313 | * | ||
314 | * This function resets the toim3232 dongle. Warning, this function | ||
315 | * must be called with a process context!! | ||
316 | * | ||
317 | * What we should do is: | ||
318 | * 0. Pull RESET high | ||
319 | * 1. Wait for at least 7us | ||
320 | * 2. Pull RESET low | ||
321 | * 3. Wait for at least 7us | ||
322 | * 4. Pull BR/~D high | ||
323 | * 5. Wait for at least 7us | ||
324 | * 6. Send control byte to set baud rate | ||
325 | * 7. Wait at least 1us after stop bit | ||
326 | * 8. Pull BR/~D low | ||
327 | * 9. Should then be in data mode | ||
328 | * | ||
329 | * Because the IR320ST-2 doesn't have the RESET line connected for some reason, | ||
330 | * we'll have to do something else. | ||
331 | * | ||
332 | * The default speed after a RESET is 9600, so lets try just bringing it up in | ||
333 | * data mode after switching it off, waiting for the supply capacitor to | ||
334 | * discharge, and then switch it back on. This isn't actually pulling RESET | ||
335 | * high, but it seems to have the same effect. | ||
336 | * | ||
337 | * This behaviour will probably work on dongles that have the RESET line connected, | ||
338 | * but if not, add a flag for the IR320ST-2, and implment the above-listed proper | ||
339 | * behaviour. | ||
340 | * | ||
341 | * RTS is inverted and then fed to BR/~D, so to put it in programming mode, we | ||
342 | * need to have pull RTS low | ||
343 | */ | ||
344 | |||
345 | static int toim3232_reset(struct sir_dev *dev) | ||
346 | { | ||
347 | IRDA_DEBUG(2, "%s()\n", __FUNCTION__); | ||
348 | |||
349 | /* Switch off both DTR and RTS to switch off dongle */ | ||
350 | sirdev_set_dtr_rts(dev, FALSE, FALSE); | ||
351 | |||
352 | /* Should sleep a while. This might be evil doing it this way.*/ | ||
353 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
354 | schedule_timeout(msecs_to_jiffies(50)); | ||
355 | |||
356 | /* Set DTR, Set RTS (data mode) */ | ||
357 | sirdev_set_dtr_rts(dev, TRUE, TRUE); | ||
358 | |||
359 | /* Wait at least 10 ms for power to stabilize again */ | ||
360 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
361 | schedule_timeout(msecs_to_jiffies(10)); | ||
362 | |||
363 | /* Speed should now be 9600 */ | ||
364 | dev->speed = 9600; | ||
365 | |||
366 | return 0; | ||
367 | } | ||
368 | |||
369 | MODULE_AUTHOR("David Basden <davidb-linux@rcpt.to>"); | ||
370 | MODULE_DESCRIPTION("Vishay/Temic TOIM3232 based dongle driver"); | ||
371 | MODULE_LICENSE("GPL"); | ||
372 | MODULE_ALIAS("irda-dongle-12"); /* IRDA_TOIM3232_DONGLE */ | ||
373 | |||
374 | module_init(toim3232_sir_init); | ||
375 | module_exit(toim3232_sir_cleanup); | ||
diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c index a9f49f058cfb..97a49e0be76b 100644 --- a/drivers/net/irda/vlsi_ir.c +++ b/drivers/net/irda/vlsi_ir.c | |||
@@ -1887,7 +1887,7 @@ static int __init vlsi_mod_init(void) | |||
1887 | vlsi_proc_root->owner = THIS_MODULE; | 1887 | vlsi_proc_root->owner = THIS_MODULE; |
1888 | } | 1888 | } |
1889 | 1889 | ||
1890 | ret = pci_module_init(&vlsi_irda_driver); | 1890 | ret = pci_register_driver(&vlsi_irda_driver); |
1891 | 1891 | ||
1892 | if (ret && vlsi_proc_root) | 1892 | if (ret && vlsi_proc_root) |
1893 | remove_proc_entry(PROC_DIR, NULL); | 1893 | remove_proc_entry(PROC_DIR, NULL); |
diff --git a/drivers/net/mv643xx_eth.h b/drivers/net/mv643xx_eth.h index 7754d1974b9e..4262c1da6d4a 100644 --- a/drivers/net/mv643xx_eth.h +++ b/drivers/net/mv643xx_eth.h | |||
@@ -42,13 +42,23 @@ | |||
42 | #define MAX_DESCS_PER_SKB 1 | 42 | #define MAX_DESCS_PER_SKB 1 |
43 | #endif | 43 | #endif |
44 | 44 | ||
45 | /* | ||
46 | * The MV643XX HW requires 8-byte alignment. However, when I/O | ||
47 | * is non-cache-coherent, we need to ensure that the I/O buffers | ||
48 | * we use don't share cache lines with other data. | ||
49 | */ | ||
50 | #if defined(CONFIG_DMA_NONCOHERENT) || defined(CONFIG_NOT_COHERENT_CACHE) | ||
51 | #define ETH_DMA_ALIGN L1_CACHE_BYTES | ||
52 | #else | ||
53 | #define ETH_DMA_ALIGN 8 | ||
54 | #endif | ||
55 | |||
45 | #define ETH_VLAN_HLEN 4 | 56 | #define ETH_VLAN_HLEN 4 |
46 | #define ETH_FCS_LEN 4 | 57 | #define ETH_FCS_LEN 4 |
47 | #define ETH_DMA_ALIGN 8 /* hw requires 8-byte alignment */ | 58 | #define ETH_HW_IP_ALIGN 2 /* hw aligns IP header */ |
48 | #define ETH_HW_IP_ALIGN 2 /* hw aligns IP header */ | ||
49 | #define ETH_WRAPPER_LEN (ETH_HW_IP_ALIGN + ETH_HLEN + \ | 59 | #define ETH_WRAPPER_LEN (ETH_HW_IP_ALIGN + ETH_HLEN + \ |
50 | ETH_VLAN_HLEN + ETH_FCS_LEN) | 60 | ETH_VLAN_HLEN + ETH_FCS_LEN) |
51 | #define ETH_RX_SKB_SIZE ((dev->mtu + ETH_WRAPPER_LEN + 7) & ~0x7) | 61 | #define ETH_RX_SKB_SIZE (dev->mtu + ETH_WRAPPER_LEN + ETH_DMA_ALIGN) |
52 | 62 | ||
53 | #define ETH_RX_QUEUES_ENABLED (1 << 0) /* use only Q0 for receive */ | 63 | #define ETH_RX_QUEUES_ENABLED (1 << 0) /* use only Q0 for receive */ |
54 | #define ETH_TX_QUEUES_ENABLED (1 << 0) /* use only Q0 for transmit */ | 64 | #define ETH_TX_QUEUES_ENABLED (1 << 0) /* use only Q0 for transmit */ |
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c index 7e900572eaf8..9595f74da93f 100644 --- a/drivers/net/pcnet32.c +++ b/drivers/net/pcnet32.c | |||
@@ -22,12 +22,12 @@ | |||
22 | *************************************************************************/ | 22 | *************************************************************************/ |
23 | 23 | ||
24 | #define DRV_NAME "pcnet32" | 24 | #define DRV_NAME "pcnet32" |
25 | #define DRV_VERSION "1.31c" | 25 | #define DRV_VERSION "1.32" |
26 | #define DRV_RELDATE "01.Nov.2005" | 26 | #define DRV_RELDATE "18.Mar.2006" |
27 | #define PFX DRV_NAME ": " | 27 | #define PFX DRV_NAME ": " |
28 | 28 | ||
29 | static const char * const version = | 29 | static const char *const version = |
30 | DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " tsbogend@alpha.franken.de\n"; | 30 | DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " tsbogend@alpha.franken.de\n"; |
31 | 31 | ||
32 | #include <linux/module.h> | 32 | #include <linux/module.h> |
33 | #include <linux/kernel.h> | 33 | #include <linux/kernel.h> |
@@ -58,18 +58,23 @@ DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " tsbogend@alpha.franken.de\n"; | |||
58 | * PCI device identifiers for "new style" Linux PCI Device Drivers | 58 | * PCI device identifiers for "new style" Linux PCI Device Drivers |
59 | */ | 59 | */ |
60 | static struct pci_device_id pcnet32_pci_tbl[] = { | 60 | static struct pci_device_id pcnet32_pci_tbl[] = { |
61 | { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE_HOME, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, | 61 | { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE_HOME, |
62 | { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, | 62 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, |
63 | /* | 63 | { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE, |
64 | * Adapters that were sold with IBM's RS/6000 or pSeries hardware have | 64 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, |
65 | * the incorrect vendor id. | 65 | |
66 | */ | 66 | /* |
67 | { PCI_VENDOR_ID_TRIDENT, PCI_DEVICE_ID_AMD_LANCE, PCI_ANY_ID, PCI_ANY_ID, | 67 | * Adapters that were sold with IBM's RS/6000 or pSeries hardware have |
68 | PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, 0 }, | 68 | * the incorrect vendor id. |
69 | { 0, } | 69 | */ |
70 | { PCI_VENDOR_ID_TRIDENT, PCI_DEVICE_ID_AMD_LANCE, | ||
71 | PCI_ANY_ID, PCI_ANY_ID, | ||
72 | PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, 0}, | ||
73 | |||
74 | { } /* terminate list */ | ||
70 | }; | 75 | }; |
71 | 76 | ||
72 | MODULE_DEVICE_TABLE (pci, pcnet32_pci_tbl); | 77 | MODULE_DEVICE_TABLE(pci, pcnet32_pci_tbl); |
73 | 78 | ||
74 | static int cards_found; | 79 | static int cards_found; |
75 | 80 | ||
@@ -77,13 +82,11 @@ static int cards_found; | |||
77 | * VLB I/O addresses | 82 | * VLB I/O addresses |
78 | */ | 83 | */ |
79 | static unsigned int pcnet32_portlist[] __initdata = | 84 | static unsigned int pcnet32_portlist[] __initdata = |
80 | { 0x300, 0x320, 0x340, 0x360, 0 }; | 85 | { 0x300, 0x320, 0x340, 0x360, 0 }; |
81 | |||
82 | |||
83 | 86 | ||
84 | static int pcnet32_debug = 0; | 87 | static int pcnet32_debug = 0; |
85 | static int tx_start = 1; /* Mapping -- 0:20, 1:64, 2:128, 3:~220 (depends on chip vers) */ | 88 | static int tx_start = 1; /* Mapping -- 0:20, 1:64, 2:128, 3:~220 (depends on chip vers) */ |
86 | static int pcnet32vlb; /* check for VLB cards ? */ | 89 | static int pcnet32vlb; /* check for VLB cards ? */ |
87 | 90 | ||
88 | static struct net_device *pcnet32_dev; | 91 | static struct net_device *pcnet32_dev; |
89 | 92 | ||
@@ -110,32 +113,34 @@ static int rx_copybreak = 200; | |||
110 | * to internal options | 113 | * to internal options |
111 | */ | 114 | */ |
112 | static const unsigned char options_mapping[] = { | 115 | static const unsigned char options_mapping[] = { |
113 | PCNET32_PORT_ASEL, /* 0 Auto-select */ | 116 | PCNET32_PORT_ASEL, /* 0 Auto-select */ |
114 | PCNET32_PORT_AUI, /* 1 BNC/AUI */ | 117 | PCNET32_PORT_AUI, /* 1 BNC/AUI */ |
115 | PCNET32_PORT_AUI, /* 2 AUI/BNC */ | 118 | PCNET32_PORT_AUI, /* 2 AUI/BNC */ |
116 | PCNET32_PORT_ASEL, /* 3 not supported */ | 119 | PCNET32_PORT_ASEL, /* 3 not supported */ |
117 | PCNET32_PORT_10BT | PCNET32_PORT_FD, /* 4 10baseT-FD */ | 120 | PCNET32_PORT_10BT | PCNET32_PORT_FD, /* 4 10baseT-FD */ |
118 | PCNET32_PORT_ASEL, /* 5 not supported */ | 121 | PCNET32_PORT_ASEL, /* 5 not supported */ |
119 | PCNET32_PORT_ASEL, /* 6 not supported */ | 122 | PCNET32_PORT_ASEL, /* 6 not supported */ |
120 | PCNET32_PORT_ASEL, /* 7 not supported */ | 123 | PCNET32_PORT_ASEL, /* 7 not supported */ |
121 | PCNET32_PORT_ASEL, /* 8 not supported */ | 124 | PCNET32_PORT_ASEL, /* 8 not supported */ |
122 | PCNET32_PORT_MII, /* 9 MII 10baseT */ | 125 | PCNET32_PORT_MII, /* 9 MII 10baseT */ |
123 | PCNET32_PORT_MII | PCNET32_PORT_FD, /* 10 MII 10baseT-FD */ | 126 | PCNET32_PORT_MII | PCNET32_PORT_FD, /* 10 MII 10baseT-FD */ |
124 | PCNET32_PORT_MII, /* 11 MII (autosel) */ | 127 | PCNET32_PORT_MII, /* 11 MII (autosel) */ |
125 | PCNET32_PORT_10BT, /* 12 10BaseT */ | 128 | PCNET32_PORT_10BT, /* 12 10BaseT */ |
126 | PCNET32_PORT_MII | PCNET32_PORT_100, /* 13 MII 100BaseTx */ | 129 | PCNET32_PORT_MII | PCNET32_PORT_100, /* 13 MII 100BaseTx */ |
127 | PCNET32_PORT_MII | PCNET32_PORT_100 | PCNET32_PORT_FD, /* 14 MII 100BaseTx-FD */ | 130 | /* 14 MII 100BaseTx-FD */ |
128 | PCNET32_PORT_ASEL /* 15 not supported */ | 131 | PCNET32_PORT_MII | PCNET32_PORT_100 | PCNET32_PORT_FD, |
132 | PCNET32_PORT_ASEL /* 15 not supported */ | ||
129 | }; | 133 | }; |
130 | 134 | ||
131 | static const char pcnet32_gstrings_test[][ETH_GSTRING_LEN] = { | 135 | static const char pcnet32_gstrings_test[][ETH_GSTRING_LEN] = { |
132 | "Loopback test (offline)" | 136 | "Loopback test (offline)" |
133 | }; | 137 | }; |
138 | |||
134 | #define PCNET32_TEST_LEN (sizeof(pcnet32_gstrings_test) / ETH_GSTRING_LEN) | 139 | #define PCNET32_TEST_LEN (sizeof(pcnet32_gstrings_test) / ETH_GSTRING_LEN) |
135 | 140 | ||
136 | #define PCNET32_NUM_REGS 168 | 141 | #define PCNET32_NUM_REGS 136 |
137 | 142 | ||
138 | #define MAX_UNITS 8 /* More are supported, limit only on options */ | 143 | #define MAX_UNITS 8 /* More are supported, limit only on options */ |
139 | static int options[MAX_UNITS]; | 144 | static int options[MAX_UNITS]; |
140 | static int full_duplex[MAX_UNITS]; | 145 | static int full_duplex[MAX_UNITS]; |
141 | static int homepna[MAX_UNITS]; | 146 | static int homepna[MAX_UNITS]; |
@@ -151,124 +156,6 @@ static int homepna[MAX_UNITS]; | |||
151 | */ | 156 | */ |
152 | 157 | ||
153 | /* | 158 | /* |
154 | * History: | ||
155 | * v0.01: Initial version | ||
156 | * only tested on Alpha Noname Board | ||
157 | * v0.02: changed IRQ handling for new interrupt scheme (dev_id) | ||
158 | * tested on a ASUS SP3G | ||
159 | * v0.10: fixed an odd problem with the 79C974 in a Compaq Deskpro XL | ||
160 | * looks like the 974 doesn't like stopping and restarting in a | ||
161 | * short period of time; now we do a reinit of the lance; the | ||
162 | * bug was triggered by doing ifconfig eth0 <ip> broadcast <addr> | ||
163 | * and hangs the machine (thanks to Klaus Liedl for debugging) | ||
164 | * v0.12: by suggestion from Donald Becker: Renamed driver to pcnet32, | ||
165 | * made it standalone (no need for lance.c) | ||
166 | * v0.13: added additional PCI detecting for special PCI devices (Compaq) | ||
167 | * v0.14: stripped down additional PCI probe (thanks to David C Niemi | ||
168 | * and sveneric@xs4all.nl for testing this on their Compaq boxes) | ||
169 | * v0.15: added 79C965 (VLB) probe | ||
170 | * added interrupt sharing for PCI chips | ||
171 | * v0.16: fixed set_multicast_list on Alpha machines | ||
172 | * v0.17: removed hack from dev.c; now pcnet32 uses ethif_probe in Space.c | ||
173 | * v0.19: changed setting of autoselect bit | ||
174 | * v0.20: removed additional Compaq PCI probe; there is now a working one | ||
175 | * in arch/i386/bios32.c | ||
176 | * v0.21: added endian conversion for ppc, from work by cort@cs.nmt.edu | ||
177 | * v0.22: added printing of status to ring dump | ||
178 | * v0.23: changed enet_statistics to net_devive_stats | ||
179 | * v0.90: added multicast filter | ||
180 | * added module support | ||
181 | * changed irq probe to new style | ||
182 | * added PCnetFast chip id | ||
183 | * added fix for receive stalls with Intel saturn chipsets | ||
184 | * added in-place rx skbs like in the tulip driver | ||
185 | * minor cleanups | ||
186 | * v0.91: added PCnetFast+ chip id | ||
187 | * back port to 2.0.x | ||
188 | * v1.00: added some stuff from Donald Becker's 2.0.34 version | ||
189 | * added support for byte counters in net_dev_stats | ||
190 | * v1.01: do ring dumps, only when debugging the driver | ||
191 | * increased the transmit timeout | ||
192 | * v1.02: fixed memory leak in pcnet32_init_ring() | ||
193 | * v1.10: workaround for stopped transmitter | ||
194 | * added port selection for modules | ||
195 | * detect special T1/E1 WAN card and setup port selection | ||
196 | * v1.11: fixed wrong checking of Tx errors | ||
197 | * v1.20: added check of return value kmalloc (cpeterso@cs.washington.edu) | ||
198 | * added save original kmalloc addr for freeing (mcr@solidum.com) | ||
199 | * added support for PCnetHome chip (joe@MIT.EDU) | ||
200 | * rewritten PCI card detection | ||
201 | * added dwio mode to get driver working on some PPC machines | ||
202 | * v1.21: added mii selection and mii ioctl | ||
203 | * v1.22: changed pci scanning code to make PPC people happy | ||
204 | * fixed switching to 32bit mode in pcnet32_open() (thanks | ||
205 | * to Michael Richard <mcr@solidum.com> for noticing this one) | ||
206 | * added sub vendor/device id matching (thanks again to | ||
207 | * Michael Richard <mcr@solidum.com>) | ||
208 | * added chip id for 79c973/975 (thanks to Zach Brown <zab@zabbo.net>) | ||
209 | * v1.23 fixed small bug, when manual selecting MII speed/duplex | ||
210 | * v1.24 Applied Thomas' patch to use TxStartPoint and thus decrease TxFIFO | ||
211 | * underflows. Added tx_start_pt module parameter. Increased | ||
212 | * TX_RING_SIZE from 16 to 32. Added #ifdef'd code to use DXSUFLO | ||
213 | * for FAST[+] chipsets. <kaf@fc.hp.com> | ||
214 | * v1.24ac Added SMP spinlocking - Alan Cox <alan@redhat.com> | ||
215 | * v1.25kf Added No Interrupt on successful Tx for some Tx's <kaf@fc.hp.com> | ||
216 | * v1.26 Converted to pci_alloc_consistent, Jamey Hicks / George France | ||
217 | * <jamey@crl.dec.com> | ||
218 | * - Fixed a few bugs, related to running the controller in 32bit mode. | ||
219 | * 23 Oct, 2000. Carsten Langgaard, carstenl@mips.com | ||
220 | * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved. | ||
221 | * v1.26p Fix oops on rmmod+insmod; plug i/o resource leak - Paul Gortmaker | ||
222 | * v1.27 improved CSR/PROM address detection, lots of cleanups, | ||
223 | * new pcnet32vlb module option, HP-PARISC support, | ||
224 | * added module parameter descriptions, | ||
225 | * initial ethtool support - Helge Deller <deller@gmx.de> | ||
226 | * v1.27a Sun Feb 10 2002 Go Taniguchi <go@turbolinux.co.jp> | ||
227 | * use alloc_etherdev and register_netdev | ||
228 | * fix pci probe not increment cards_found | ||
229 | * FD auto negotiate error workaround for xSeries250 | ||
230 | * clean up and using new mii module | ||
231 | * v1.27b Sep 30 2002 Kent Yoder <yoder1@us.ibm.com> | ||
232 | * Added timer for cable connection state changes. | ||
233 | * v1.28 20 Feb 2004 Don Fry <brazilnut@us.ibm.com> | ||
234 | * Jon Mason <jonmason@us.ibm.com>, Chinmay Albal <albal@in.ibm.com> | ||
235 | * Now uses ethtool_ops, netif_msg_* and generic_mii_ioctl. | ||
236 | * Fixes bogus 'Bus master arbitration failure', pci_[un]map_single | ||
237 | * length errors, and transmit hangs. Cleans up after errors in open. | ||
238 | * Jim Lewis <jklewis@us.ibm.com> added ethernet loopback test. | ||
239 | * Thomas Munck Steenholdt <tmus@tmus.dk> non-mii ioctl corrections. | ||
240 | * v1.29 6 Apr 2004 Jim Lewis <jklewis@us.ibm.com> added physical | ||
241 | * identification code (blink led's) and register dump. | ||
242 | * Don Fry added timer for 971/972 so skbufs don't remain on tx ring | ||
243 | * forever. | ||
244 | * v1.30 18 May 2004 Don Fry removed timer and Last Transmit Interrupt | ||
245 | * (ltint) as they added complexity and didn't give good throughput. | ||
246 | * v1.30a 22 May 2004 Don Fry limit frames received during interrupt. | ||
247 | * v1.30b 24 May 2004 Don Fry fix bogus tx carrier errors with 79c973, | ||
248 | * assisted by Bruce Penrod <bmpenrod@endruntechnologies.com>. | ||
249 | * v1.30c 25 May 2004 Don Fry added netif_wake_queue after pcnet32_restart. | ||
250 | * v1.30d 01 Jun 2004 Don Fry discard oversize rx packets. | ||
251 | * v1.30e 11 Jun 2004 Don Fry recover after fifo error and rx hang. | ||
252 | * v1.30f 16 Jun 2004 Don Fry cleanup IRQ to allow 0 and 1 for PCI, | ||
253 | * expanding on suggestions from Ralf Baechle <ralf@linux-mips.org>, | ||
254 | * and Brian Murphy <brian@murphy.dk>. | ||
255 | * v1.30g 22 Jun 2004 Patrick Simmons <psimmons@flash.net> added option | ||
256 | * homepna for selecting HomePNA mode for PCNet/Home 79C978. | ||
257 | * v1.30h 24 Jun 2004 Don Fry correctly select auto, speed, duplex in bcr32. | ||
258 | * v1.30i 28 Jun 2004 Don Fry change to use module_param. | ||
259 | * v1.30j 29 Apr 2005 Don Fry fix skb/map leak with loopback test. | ||
260 | * v1.31 02 Sep 2005 Hubert WS Lin <wslin@tw.ibm.c0m> added set_ringparam(). | ||
261 | * v1.31a 12 Sep 2005 Hubert WS Lin <wslin@tw.ibm.c0m> set min ring size to 4 | ||
262 | * to allow loopback test to work unchanged. | ||
263 | * v1.31b 06 Oct 2005 Don Fry changed alloc_ring to show name of device | ||
264 | * if allocation fails | ||
265 | * v1.31c 01 Nov 2005 Don Fry Allied Telesyn 2700/2701 FX are 100Mbit only. | ||
266 | * Force 100Mbit FD if Auto (ASEL) is selected. | ||
267 | * See Bugzilla 2669 and 4551. | ||
268 | */ | ||
269 | |||
270 | |||
271 | /* | ||
272 | * Set the number of Tx and Rx buffers, using Log_2(# buffers). | 159 | * Set the number of Tx and Rx buffers, using Log_2(# buffers). |
273 | * Reasonable default values are 4 Tx buffers, and 16 Rx buffers. | 160 | * Reasonable default values are 4 Tx buffers, and 16 Rx buffers. |
274 | * That translates to 2 (4 == 2^^2) and 4 (16 == 2^^4). | 161 | * That translates to 2 (4 == 2^^2) and 4 (16 == 2^^4). |
@@ -303,42 +190,42 @@ static int homepna[MAX_UNITS]; | |||
303 | 190 | ||
304 | /* The PCNET32 Rx and Tx ring descriptors. */ | 191 | /* The PCNET32 Rx and Tx ring descriptors. */ |
305 | struct pcnet32_rx_head { | 192 | struct pcnet32_rx_head { |
306 | u32 base; | 193 | u32 base; |
307 | s16 buf_length; | 194 | s16 buf_length; |
308 | s16 status; | 195 | s16 status; |
309 | u32 msg_length; | 196 | u32 msg_length; |
310 | u32 reserved; | 197 | u32 reserved; |
311 | }; | 198 | }; |
312 | 199 | ||
313 | struct pcnet32_tx_head { | 200 | struct pcnet32_tx_head { |
314 | u32 base; | 201 | u32 base; |
315 | s16 length; | 202 | s16 length; |
316 | s16 status; | 203 | s16 status; |
317 | u32 misc; | 204 | u32 misc; |
318 | u32 reserved; | 205 | u32 reserved; |
319 | }; | 206 | }; |
320 | 207 | ||
321 | /* The PCNET32 32-Bit initialization block, described in databook. */ | 208 | /* The PCNET32 32-Bit initialization block, described in databook. */ |
322 | struct pcnet32_init_block { | 209 | struct pcnet32_init_block { |
323 | u16 mode; | 210 | u16 mode; |
324 | u16 tlen_rlen; | 211 | u16 tlen_rlen; |
325 | u8 phys_addr[6]; | 212 | u8 phys_addr[6]; |
326 | u16 reserved; | 213 | u16 reserved; |
327 | u32 filter[2]; | 214 | u32 filter[2]; |
328 | /* Receive and transmit ring base, along with extra bits. */ | 215 | /* Receive and transmit ring base, along with extra bits. */ |
329 | u32 rx_ring; | 216 | u32 rx_ring; |
330 | u32 tx_ring; | 217 | u32 tx_ring; |
331 | }; | 218 | }; |
332 | 219 | ||
333 | /* PCnet32 access functions */ | 220 | /* PCnet32 access functions */ |
334 | struct pcnet32_access { | 221 | struct pcnet32_access { |
335 | u16 (*read_csr)(unsigned long, int); | 222 | u16 (*read_csr) (unsigned long, int); |
336 | void (*write_csr)(unsigned long, int, u16); | 223 | void (*write_csr) (unsigned long, int, u16); |
337 | u16 (*read_bcr)(unsigned long, int); | 224 | u16 (*read_bcr) (unsigned long, int); |
338 | void (*write_bcr)(unsigned long, int, u16); | 225 | void (*write_bcr) (unsigned long, int, u16); |
339 | u16 (*read_rap)(unsigned long); | 226 | u16 (*read_rap) (unsigned long); |
340 | void (*write_rap)(unsigned long, u16); | 227 | void (*write_rap) (unsigned long, u16); |
341 | void (*reset)(unsigned long); | 228 | void (*reset) (unsigned long); |
342 | }; | 229 | }; |
343 | 230 | ||
344 | /* | 231 | /* |
@@ -346,760 +233,794 @@ struct pcnet32_access { | |||
346 | * so the structure should be allocated using pci_alloc_consistent(). | 233 | * so the structure should be allocated using pci_alloc_consistent(). |
347 | */ | 234 | */ |
348 | struct pcnet32_private { | 235 | struct pcnet32_private { |
349 | struct pcnet32_init_block init_block; | 236 | struct pcnet32_init_block init_block; |
350 | /* The Tx and Rx ring entries must be aligned on 16-byte boundaries in 32bit mode. */ | 237 | /* The Tx and Rx ring entries must be aligned on 16-byte boundaries in 32bit mode. */ |
351 | struct pcnet32_rx_head *rx_ring; | 238 | struct pcnet32_rx_head *rx_ring; |
352 | struct pcnet32_tx_head *tx_ring; | 239 | struct pcnet32_tx_head *tx_ring; |
353 | dma_addr_t dma_addr; /* DMA address of beginning of this | 240 | dma_addr_t dma_addr;/* DMA address of beginning of this |
354 | object, returned by | 241 | object, returned by pci_alloc_consistent */ |
355 | pci_alloc_consistent */ | 242 | struct pci_dev *pci_dev; |
356 | struct pci_dev *pci_dev; /* Pointer to the associated pci device | 243 | const char *name; |
357 | structure */ | 244 | /* The saved address of a sent-in-place packet/buffer, for skfree(). */ |
358 | const char *name; | 245 | struct sk_buff **tx_skbuff; |
359 | /* The saved address of a sent-in-place packet/buffer, for skfree(). */ | 246 | struct sk_buff **rx_skbuff; |
360 | struct sk_buff **tx_skbuff; | 247 | dma_addr_t *tx_dma_addr; |
361 | struct sk_buff **rx_skbuff; | 248 | dma_addr_t *rx_dma_addr; |
362 | dma_addr_t *tx_dma_addr; | 249 | struct pcnet32_access a; |
363 | dma_addr_t *rx_dma_addr; | 250 | spinlock_t lock; /* Guard lock */ |
364 | struct pcnet32_access a; | 251 | unsigned int cur_rx, cur_tx; /* The next free ring entry */ |
365 | spinlock_t lock; /* Guard lock */ | 252 | unsigned int rx_ring_size; /* current rx ring size */ |
366 | unsigned int cur_rx, cur_tx; /* The next free ring entry */ | 253 | unsigned int tx_ring_size; /* current tx ring size */ |
367 | unsigned int rx_ring_size; /* current rx ring size */ | 254 | unsigned int rx_mod_mask; /* rx ring modular mask */ |
368 | unsigned int tx_ring_size; /* current tx ring size */ | 255 | unsigned int tx_mod_mask; /* tx ring modular mask */ |
369 | unsigned int rx_mod_mask; /* rx ring modular mask */ | 256 | unsigned short rx_len_bits; |
370 | unsigned int tx_mod_mask; /* tx ring modular mask */ | 257 | unsigned short tx_len_bits; |
371 | unsigned short rx_len_bits; | 258 | dma_addr_t rx_ring_dma_addr; |
372 | unsigned short tx_len_bits; | 259 | dma_addr_t tx_ring_dma_addr; |
373 | dma_addr_t rx_ring_dma_addr; | 260 | unsigned int dirty_rx, /* ring entries to be freed. */ |
374 | dma_addr_t tx_ring_dma_addr; | 261 | dirty_tx; |
375 | unsigned int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */ | 262 | |
376 | struct net_device_stats stats; | 263 | struct net_device_stats stats; |
377 | char tx_full; | 264 | char tx_full; |
378 | int options; | 265 | char phycount; /* number of phys found */ |
379 | unsigned int shared_irq:1, /* shared irq possible */ | 266 | int options; |
380 | dxsuflo:1, /* disable transmit stop on uflo */ | 267 | unsigned int shared_irq:1, /* shared irq possible */ |
381 | mii:1; /* mii port available */ | 268 | dxsuflo:1, /* disable transmit stop on uflo */ |
382 | struct net_device *next; | 269 | mii:1; /* mii port available */ |
383 | struct mii_if_info mii_if; | 270 | struct net_device *next; |
384 | struct timer_list watchdog_timer; | 271 | struct mii_if_info mii_if; |
385 | struct timer_list blink_timer; | 272 | struct timer_list watchdog_timer; |
386 | u32 msg_enable; /* debug message level */ | 273 | struct timer_list blink_timer; |
274 | u32 msg_enable; /* debug message level */ | ||
275 | |||
276 | /* each bit indicates an available PHY */ | ||
277 | u32 phymask; | ||
387 | }; | 278 | }; |
388 | 279 | ||
389 | static void pcnet32_probe_vlbus(void); | 280 | static void pcnet32_probe_vlbus(void); |
390 | static int pcnet32_probe_pci(struct pci_dev *, const struct pci_device_id *); | 281 | static int pcnet32_probe_pci(struct pci_dev *, const struct pci_device_id *); |
391 | static int pcnet32_probe1(unsigned long, int, struct pci_dev *); | 282 | static int pcnet32_probe1(unsigned long, int, struct pci_dev *); |
392 | static int pcnet32_open(struct net_device *); | 283 | static int pcnet32_open(struct net_device *); |
393 | static int pcnet32_init_ring(struct net_device *); | 284 | static int pcnet32_init_ring(struct net_device *); |
394 | static int pcnet32_start_xmit(struct sk_buff *, struct net_device *); | 285 | static int pcnet32_start_xmit(struct sk_buff *, struct net_device *); |
395 | static int pcnet32_rx(struct net_device *); | 286 | static int pcnet32_rx(struct net_device *); |
396 | static void pcnet32_tx_timeout (struct net_device *dev); | 287 | static void pcnet32_tx_timeout(struct net_device *dev); |
397 | static irqreturn_t pcnet32_interrupt(int, void *, struct pt_regs *); | 288 | static irqreturn_t pcnet32_interrupt(int, void *, struct pt_regs *); |
398 | static int pcnet32_close(struct net_device *); | 289 | static int pcnet32_close(struct net_device *); |
399 | static struct net_device_stats *pcnet32_get_stats(struct net_device *); | 290 | static struct net_device_stats *pcnet32_get_stats(struct net_device *); |
400 | static void pcnet32_load_multicast(struct net_device *dev); | 291 | static void pcnet32_load_multicast(struct net_device *dev); |
401 | static void pcnet32_set_multicast_list(struct net_device *); | 292 | static void pcnet32_set_multicast_list(struct net_device *); |
402 | static int pcnet32_ioctl(struct net_device *, struct ifreq *, int); | 293 | static int pcnet32_ioctl(struct net_device *, struct ifreq *, int); |
403 | static void pcnet32_watchdog(struct net_device *); | 294 | static void pcnet32_watchdog(struct net_device *); |
404 | static int mdio_read(struct net_device *dev, int phy_id, int reg_num); | 295 | static int mdio_read(struct net_device *dev, int phy_id, int reg_num); |
405 | static void mdio_write(struct net_device *dev, int phy_id, int reg_num, int val); | 296 | static void mdio_write(struct net_device *dev, int phy_id, int reg_num, |
297 | int val); | ||
406 | static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits); | 298 | static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits); |
407 | static void pcnet32_ethtool_test(struct net_device *dev, | 299 | static void pcnet32_ethtool_test(struct net_device *dev, |
408 | struct ethtool_test *eth_test, u64 *data); | 300 | struct ethtool_test *eth_test, u64 * data); |
409 | static int pcnet32_loopback_test(struct net_device *dev, uint64_t *data1); | 301 | static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1); |
410 | static int pcnet32_phys_id(struct net_device *dev, u32 data); | 302 | static int pcnet32_phys_id(struct net_device *dev, u32 data); |
411 | static void pcnet32_led_blink_callback(struct net_device *dev); | 303 | static void pcnet32_led_blink_callback(struct net_device *dev); |
412 | static int pcnet32_get_regs_len(struct net_device *dev); | 304 | static int pcnet32_get_regs_len(struct net_device *dev); |
413 | static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs, | 305 | static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs, |
414 | void *ptr); | 306 | void *ptr); |
415 | static void pcnet32_purge_tx_ring(struct net_device *dev); | 307 | static void pcnet32_purge_tx_ring(struct net_device *dev); |
416 | static int pcnet32_alloc_ring(struct net_device *dev, char *name); | 308 | static int pcnet32_alloc_ring(struct net_device *dev, char *name); |
417 | static void pcnet32_free_ring(struct net_device *dev); | 309 | static void pcnet32_free_ring(struct net_device *dev); |
418 | 310 | static void pcnet32_check_media(struct net_device *dev, int verbose); | |
419 | 311 | ||
420 | enum pci_flags_bit { | 312 | enum pci_flags_bit { |
421 | PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4, | 313 | PCI_USES_IO = 1, PCI_USES_MEM = 2, PCI_USES_MASTER = 4, |
422 | PCI_ADDR0=0x10<<0, PCI_ADDR1=0x10<<1, PCI_ADDR2=0x10<<2, PCI_ADDR3=0x10<<3, | 314 | PCI_ADDR0 = 0x10 << 0, PCI_ADDR1 = 0x10 << 1, PCI_ADDR2 = |
315 | 0x10 << 2, PCI_ADDR3 = 0x10 << 3, | ||
423 | }; | 316 | }; |
424 | 317 | ||
425 | 318 | static u16 pcnet32_wio_read_csr(unsigned long addr, int index) | |
426 | static u16 pcnet32_wio_read_csr (unsigned long addr, int index) | ||
427 | { | 319 | { |
428 | outw (index, addr+PCNET32_WIO_RAP); | 320 | outw(index, addr + PCNET32_WIO_RAP); |
429 | return inw (addr+PCNET32_WIO_RDP); | 321 | return inw(addr + PCNET32_WIO_RDP); |
430 | } | 322 | } |
431 | 323 | ||
432 | static void pcnet32_wio_write_csr (unsigned long addr, int index, u16 val) | 324 | static void pcnet32_wio_write_csr(unsigned long addr, int index, u16 val) |
433 | { | 325 | { |
434 | outw (index, addr+PCNET32_WIO_RAP); | 326 | outw(index, addr + PCNET32_WIO_RAP); |
435 | outw (val, addr+PCNET32_WIO_RDP); | 327 | outw(val, addr + PCNET32_WIO_RDP); |
436 | } | 328 | } |
437 | 329 | ||
438 | static u16 pcnet32_wio_read_bcr (unsigned long addr, int index) | 330 | static u16 pcnet32_wio_read_bcr(unsigned long addr, int index) |
439 | { | 331 | { |
440 | outw (index, addr+PCNET32_WIO_RAP); | 332 | outw(index, addr + PCNET32_WIO_RAP); |
441 | return inw (addr+PCNET32_WIO_BDP); | 333 | return inw(addr + PCNET32_WIO_BDP); |
442 | } | 334 | } |
443 | 335 | ||
444 | static void pcnet32_wio_write_bcr (unsigned long addr, int index, u16 val) | 336 | static void pcnet32_wio_write_bcr(unsigned long addr, int index, u16 val) |
445 | { | 337 | { |
446 | outw (index, addr+PCNET32_WIO_RAP); | 338 | outw(index, addr + PCNET32_WIO_RAP); |
447 | outw (val, addr+PCNET32_WIO_BDP); | 339 | outw(val, addr + PCNET32_WIO_BDP); |
448 | } | 340 | } |
449 | 341 | ||
450 | static u16 pcnet32_wio_read_rap (unsigned long addr) | 342 | static u16 pcnet32_wio_read_rap(unsigned long addr) |
451 | { | 343 | { |
452 | return inw (addr+PCNET32_WIO_RAP); | 344 | return inw(addr + PCNET32_WIO_RAP); |
453 | } | 345 | } |
454 | 346 | ||
455 | static void pcnet32_wio_write_rap (unsigned long addr, u16 val) | 347 | static void pcnet32_wio_write_rap(unsigned long addr, u16 val) |
456 | { | 348 | { |
457 | outw (val, addr+PCNET32_WIO_RAP); | 349 | outw(val, addr + PCNET32_WIO_RAP); |
458 | } | 350 | } |
459 | 351 | ||
460 | static void pcnet32_wio_reset (unsigned long addr) | 352 | static void pcnet32_wio_reset(unsigned long addr) |
461 | { | 353 | { |
462 | inw (addr+PCNET32_WIO_RESET); | 354 | inw(addr + PCNET32_WIO_RESET); |
463 | } | 355 | } |
464 | 356 | ||
465 | static int pcnet32_wio_check (unsigned long addr) | 357 | static int pcnet32_wio_check(unsigned long addr) |
466 | { | 358 | { |
467 | outw (88, addr+PCNET32_WIO_RAP); | 359 | outw(88, addr + PCNET32_WIO_RAP); |
468 | return (inw (addr+PCNET32_WIO_RAP) == 88); | 360 | return (inw(addr + PCNET32_WIO_RAP) == 88); |
469 | } | 361 | } |
470 | 362 | ||
471 | static struct pcnet32_access pcnet32_wio = { | 363 | static struct pcnet32_access pcnet32_wio = { |
472 | .read_csr = pcnet32_wio_read_csr, | 364 | .read_csr = pcnet32_wio_read_csr, |
473 | .write_csr = pcnet32_wio_write_csr, | 365 | .write_csr = pcnet32_wio_write_csr, |
474 | .read_bcr = pcnet32_wio_read_bcr, | 366 | .read_bcr = pcnet32_wio_read_bcr, |
475 | .write_bcr = pcnet32_wio_write_bcr, | 367 | .write_bcr = pcnet32_wio_write_bcr, |
476 | .read_rap = pcnet32_wio_read_rap, | 368 | .read_rap = pcnet32_wio_read_rap, |
477 | .write_rap = pcnet32_wio_write_rap, | 369 | .write_rap = pcnet32_wio_write_rap, |
478 | .reset = pcnet32_wio_reset | 370 | .reset = pcnet32_wio_reset |
479 | }; | 371 | }; |
480 | 372 | ||
481 | static u16 pcnet32_dwio_read_csr (unsigned long addr, int index) | 373 | static u16 pcnet32_dwio_read_csr(unsigned long addr, int index) |
482 | { | 374 | { |
483 | outl (index, addr+PCNET32_DWIO_RAP); | 375 | outl(index, addr + PCNET32_DWIO_RAP); |
484 | return (inl (addr+PCNET32_DWIO_RDP) & 0xffff); | 376 | return (inl(addr + PCNET32_DWIO_RDP) & 0xffff); |
485 | } | 377 | } |
486 | 378 | ||
487 | static void pcnet32_dwio_write_csr (unsigned long addr, int index, u16 val) | 379 | static void pcnet32_dwio_write_csr(unsigned long addr, int index, u16 val) |
488 | { | 380 | { |
489 | outl (index, addr+PCNET32_DWIO_RAP); | 381 | outl(index, addr + PCNET32_DWIO_RAP); |
490 | outl (val, addr+PCNET32_DWIO_RDP); | 382 | outl(val, addr + PCNET32_DWIO_RDP); |
491 | } | 383 | } |
492 | 384 | ||
493 | static u16 pcnet32_dwio_read_bcr (unsigned long addr, int index) | 385 | static u16 pcnet32_dwio_read_bcr(unsigned long addr, int index) |
494 | { | 386 | { |
495 | outl (index, addr+PCNET32_DWIO_RAP); | 387 | outl(index, addr + PCNET32_DWIO_RAP); |
496 | return (inl (addr+PCNET32_DWIO_BDP) & 0xffff); | 388 | return (inl(addr + PCNET32_DWIO_BDP) & 0xffff); |
497 | } | 389 | } |
498 | 390 | ||
499 | static void pcnet32_dwio_write_bcr (unsigned long addr, int index, u16 val) | 391 | static void pcnet32_dwio_write_bcr(unsigned long addr, int index, u16 val) |
500 | { | 392 | { |
501 | outl (index, addr+PCNET32_DWIO_RAP); | 393 | outl(index, addr + PCNET32_DWIO_RAP); |
502 | outl (val, addr+PCNET32_DWIO_BDP); | 394 | outl(val, addr + PCNET32_DWIO_BDP); |
503 | } | 395 | } |
504 | 396 | ||
505 | static u16 pcnet32_dwio_read_rap (unsigned long addr) | 397 | static u16 pcnet32_dwio_read_rap(unsigned long addr) |
506 | { | 398 | { |
507 | return (inl (addr+PCNET32_DWIO_RAP) & 0xffff); | 399 | return (inl(addr + PCNET32_DWIO_RAP) & 0xffff); |
508 | } | 400 | } |
509 | 401 | ||
510 | static void pcnet32_dwio_write_rap (unsigned long addr, u16 val) | 402 | static void pcnet32_dwio_write_rap(unsigned long addr, u16 val) |
511 | { | 403 | { |
512 | outl (val, addr+PCNET32_DWIO_RAP); | 404 | outl(val, addr + PCNET32_DWIO_RAP); |
513 | } | 405 | } |
514 | 406 | ||
515 | static void pcnet32_dwio_reset (unsigned long addr) | 407 | static void pcnet32_dwio_reset(unsigned long addr) |
516 | { | 408 | { |
517 | inl (addr+PCNET32_DWIO_RESET); | 409 | inl(addr + PCNET32_DWIO_RESET); |
518 | } | 410 | } |
519 | 411 | ||
520 | static int pcnet32_dwio_check (unsigned long addr) | 412 | static int pcnet32_dwio_check(unsigned long addr) |
521 | { | 413 | { |
522 | outl (88, addr+PCNET32_DWIO_RAP); | 414 | outl(88, addr + PCNET32_DWIO_RAP); |
523 | return ((inl (addr+PCNET32_DWIO_RAP) & 0xffff) == 88); | 415 | return ((inl(addr + PCNET32_DWIO_RAP) & 0xffff) == 88); |
524 | } | 416 | } |
525 | 417 | ||
526 | static struct pcnet32_access pcnet32_dwio = { | 418 | static struct pcnet32_access pcnet32_dwio = { |
527 | .read_csr = pcnet32_dwio_read_csr, | 419 | .read_csr = pcnet32_dwio_read_csr, |
528 | .write_csr = pcnet32_dwio_write_csr, | 420 | .write_csr = pcnet32_dwio_write_csr, |
529 | .read_bcr = pcnet32_dwio_read_bcr, | 421 | .read_bcr = pcnet32_dwio_read_bcr, |
530 | .write_bcr = pcnet32_dwio_write_bcr, | 422 | .write_bcr = pcnet32_dwio_write_bcr, |
531 | .read_rap = pcnet32_dwio_read_rap, | 423 | .read_rap = pcnet32_dwio_read_rap, |
532 | .write_rap = pcnet32_dwio_write_rap, | 424 | .write_rap = pcnet32_dwio_write_rap, |
533 | .reset = pcnet32_dwio_reset | 425 | .reset = pcnet32_dwio_reset |
534 | }; | 426 | }; |
535 | 427 | ||
536 | #ifdef CONFIG_NET_POLL_CONTROLLER | 428 | #ifdef CONFIG_NET_POLL_CONTROLLER |
537 | static void pcnet32_poll_controller(struct net_device *dev) | 429 | static void pcnet32_poll_controller(struct net_device *dev) |
538 | { | 430 | { |
539 | disable_irq(dev->irq); | 431 | disable_irq(dev->irq); |
540 | pcnet32_interrupt(0, dev, NULL); | 432 | pcnet32_interrupt(0, dev, NULL); |
541 | enable_irq(dev->irq); | 433 | enable_irq(dev->irq); |
542 | } | 434 | } |
543 | #endif | 435 | #endif |
544 | 436 | ||
545 | |||
546 | static int pcnet32_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | 437 | static int pcnet32_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) |
547 | { | 438 | { |
548 | struct pcnet32_private *lp = dev->priv; | 439 | struct pcnet32_private *lp = dev->priv; |
549 | unsigned long flags; | 440 | unsigned long flags; |
550 | int r = -EOPNOTSUPP; | 441 | int r = -EOPNOTSUPP; |
551 | 442 | ||
552 | if (lp->mii) { | 443 | if (lp->mii) { |
553 | spin_lock_irqsave(&lp->lock, flags); | 444 | spin_lock_irqsave(&lp->lock, flags); |
554 | mii_ethtool_gset(&lp->mii_if, cmd); | 445 | mii_ethtool_gset(&lp->mii_if, cmd); |
555 | spin_unlock_irqrestore(&lp->lock, flags); | 446 | spin_unlock_irqrestore(&lp->lock, flags); |
556 | r = 0; | 447 | r = 0; |
557 | } | 448 | } |
558 | return r; | 449 | return r; |
559 | } | 450 | } |
560 | 451 | ||
561 | static int pcnet32_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | 452 | static int pcnet32_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) |
562 | { | 453 | { |
563 | struct pcnet32_private *lp = dev->priv; | 454 | struct pcnet32_private *lp = dev->priv; |
564 | unsigned long flags; | 455 | unsigned long flags; |
565 | int r = -EOPNOTSUPP; | 456 | int r = -EOPNOTSUPP; |
566 | 457 | ||
567 | if (lp->mii) { | 458 | if (lp->mii) { |
568 | spin_lock_irqsave(&lp->lock, flags); | 459 | spin_lock_irqsave(&lp->lock, flags); |
569 | r = mii_ethtool_sset(&lp->mii_if, cmd); | 460 | r = mii_ethtool_sset(&lp->mii_if, cmd); |
570 | spin_unlock_irqrestore(&lp->lock, flags); | 461 | spin_unlock_irqrestore(&lp->lock, flags); |
571 | } | 462 | } |
572 | return r; | 463 | return r; |
573 | } | 464 | } |
574 | 465 | ||
575 | static void pcnet32_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) | 466 | static void pcnet32_get_drvinfo(struct net_device *dev, |
467 | struct ethtool_drvinfo *info) | ||
576 | { | 468 | { |
577 | struct pcnet32_private *lp = dev->priv; | 469 | struct pcnet32_private *lp = dev->priv; |
578 | 470 | ||
579 | strcpy (info->driver, DRV_NAME); | 471 | strcpy(info->driver, DRV_NAME); |
580 | strcpy (info->version, DRV_VERSION); | 472 | strcpy(info->version, DRV_VERSION); |
581 | if (lp->pci_dev) | 473 | if (lp->pci_dev) |
582 | strcpy (info->bus_info, pci_name(lp->pci_dev)); | 474 | strcpy(info->bus_info, pci_name(lp->pci_dev)); |
583 | else | 475 | else |
584 | sprintf(info->bus_info, "VLB 0x%lx", dev->base_addr); | 476 | sprintf(info->bus_info, "VLB 0x%lx", dev->base_addr); |
585 | } | 477 | } |
586 | 478 | ||
587 | static u32 pcnet32_get_link(struct net_device *dev) | 479 | static u32 pcnet32_get_link(struct net_device *dev) |
588 | { | 480 | { |
589 | struct pcnet32_private *lp = dev->priv; | 481 | struct pcnet32_private *lp = dev->priv; |
590 | unsigned long flags; | 482 | unsigned long flags; |
591 | int r; | 483 | int r; |
592 | |||
593 | spin_lock_irqsave(&lp->lock, flags); | ||
594 | if (lp->mii) { | ||
595 | r = mii_link_ok(&lp->mii_if); | ||
596 | } else { | ||
597 | ulong ioaddr = dev->base_addr; /* card base I/O address */ | ||
598 | r = (lp->a.read_bcr(ioaddr, 4) != 0xc0); | ||
599 | } | ||
600 | spin_unlock_irqrestore(&lp->lock, flags); | ||
601 | 484 | ||
602 | return r; | 485 | spin_lock_irqsave(&lp->lock, flags); |
486 | if (lp->mii) { | ||
487 | r = mii_link_ok(&lp->mii_if); | ||
488 | } else { | ||
489 | ulong ioaddr = dev->base_addr; /* card base I/O address */ | ||
490 | r = (lp->a.read_bcr(ioaddr, 4) != 0xc0); | ||
491 | } | ||
492 | spin_unlock_irqrestore(&lp->lock, flags); | ||
493 | |||
494 | return r; | ||
603 | } | 495 | } |
604 | 496 | ||
605 | static u32 pcnet32_get_msglevel(struct net_device *dev) | 497 | static u32 pcnet32_get_msglevel(struct net_device *dev) |
606 | { | 498 | { |
607 | struct pcnet32_private *lp = dev->priv; | 499 | struct pcnet32_private *lp = dev->priv; |
608 | return lp->msg_enable; | 500 | return lp->msg_enable; |
609 | } | 501 | } |
610 | 502 | ||
611 | static void pcnet32_set_msglevel(struct net_device *dev, u32 value) | 503 | static void pcnet32_set_msglevel(struct net_device *dev, u32 value) |
612 | { | 504 | { |
613 | struct pcnet32_private *lp = dev->priv; | 505 | struct pcnet32_private *lp = dev->priv; |
614 | lp->msg_enable = value; | 506 | lp->msg_enable = value; |
615 | } | 507 | } |
616 | 508 | ||
617 | static int pcnet32_nway_reset(struct net_device *dev) | 509 | static int pcnet32_nway_reset(struct net_device *dev) |
618 | { | 510 | { |
619 | struct pcnet32_private *lp = dev->priv; | 511 | struct pcnet32_private *lp = dev->priv; |
620 | unsigned long flags; | 512 | unsigned long flags; |
621 | int r = -EOPNOTSUPP; | 513 | int r = -EOPNOTSUPP; |
622 | 514 | ||
623 | if (lp->mii) { | 515 | if (lp->mii) { |
624 | spin_lock_irqsave(&lp->lock, flags); | 516 | spin_lock_irqsave(&lp->lock, flags); |
625 | r = mii_nway_restart(&lp->mii_if); | 517 | r = mii_nway_restart(&lp->mii_if); |
626 | spin_unlock_irqrestore(&lp->lock, flags); | 518 | spin_unlock_irqrestore(&lp->lock, flags); |
627 | } | 519 | } |
628 | return r; | 520 | return r; |
629 | } | 521 | } |
630 | 522 | ||
631 | static void pcnet32_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) | 523 | static void pcnet32_get_ringparam(struct net_device *dev, |
524 | struct ethtool_ringparam *ering) | ||
632 | { | 525 | { |
633 | struct pcnet32_private *lp = dev->priv; | 526 | struct pcnet32_private *lp = dev->priv; |
634 | 527 | ||
635 | ering->tx_max_pending = TX_MAX_RING_SIZE - 1; | 528 | ering->tx_max_pending = TX_MAX_RING_SIZE - 1; |
636 | ering->tx_pending = lp->tx_ring_size - 1; | 529 | ering->tx_pending = lp->tx_ring_size - 1; |
637 | ering->rx_max_pending = RX_MAX_RING_SIZE - 1; | 530 | ering->rx_max_pending = RX_MAX_RING_SIZE - 1; |
638 | ering->rx_pending = lp->rx_ring_size - 1; | 531 | ering->rx_pending = lp->rx_ring_size - 1; |
639 | } | 532 | } |
640 | 533 | ||
641 | static int pcnet32_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) | 534 | static int pcnet32_set_ringparam(struct net_device *dev, |
535 | struct ethtool_ringparam *ering) | ||
642 | { | 536 | { |
643 | struct pcnet32_private *lp = dev->priv; | 537 | struct pcnet32_private *lp = dev->priv; |
644 | unsigned long flags; | 538 | unsigned long flags; |
645 | int i; | 539 | int i; |
646 | 540 | ||
647 | if (ering->rx_mini_pending || ering->rx_jumbo_pending) | 541 | if (ering->rx_mini_pending || ering->rx_jumbo_pending) |
648 | return -EINVAL; | 542 | return -EINVAL; |
649 | 543 | ||
650 | if (netif_running(dev)) | 544 | if (netif_running(dev)) |
651 | pcnet32_close(dev); | 545 | pcnet32_close(dev); |
652 | 546 | ||
653 | spin_lock_irqsave(&lp->lock, flags); | 547 | spin_lock_irqsave(&lp->lock, flags); |
654 | pcnet32_free_ring(dev); | ||
655 | lp->tx_ring_size = min(ering->tx_pending, (unsigned int) TX_MAX_RING_SIZE); | ||
656 | lp->rx_ring_size = min(ering->rx_pending, (unsigned int) RX_MAX_RING_SIZE); | ||
657 | |||
658 | /* set the minimum ring size to 4, to allow the loopback test to work | ||
659 | * unchanged. | ||
660 | */ | ||
661 | for (i = 2; i <= PCNET32_LOG_MAX_TX_BUFFERS; i++) { | ||
662 | if (lp->tx_ring_size <= (1 << i)) | ||
663 | break; | ||
664 | } | ||
665 | lp->tx_ring_size = (1 << i); | ||
666 | lp->tx_mod_mask = lp->tx_ring_size - 1; | ||
667 | lp->tx_len_bits = (i << 12); | ||
668 | |||
669 | for (i = 2; i <= PCNET32_LOG_MAX_RX_BUFFERS; i++) { | ||
670 | if (lp->rx_ring_size <= (1 << i)) | ||
671 | break; | ||
672 | } | ||
673 | lp->rx_ring_size = (1 << i); | ||
674 | lp->rx_mod_mask = lp->rx_ring_size - 1; | ||
675 | lp->rx_len_bits = (i << 4); | ||
676 | |||
677 | if (pcnet32_alloc_ring(dev, dev->name)) { | ||
678 | pcnet32_free_ring(dev); | 548 | pcnet32_free_ring(dev); |
679 | spin_unlock_irqrestore(&lp->lock, flags); | 549 | lp->tx_ring_size = |
680 | return -ENOMEM; | 550 | min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE); |
681 | } | 551 | lp->rx_ring_size = |
552 | min(ering->rx_pending, (unsigned int)RX_MAX_RING_SIZE); | ||
553 | |||
554 | /* set the minimum ring size to 4, to allow the loopback test to work | ||
555 | * unchanged. | ||
556 | */ | ||
557 | for (i = 2; i <= PCNET32_LOG_MAX_TX_BUFFERS; i++) { | ||
558 | if (lp->tx_ring_size <= (1 << i)) | ||
559 | break; | ||
560 | } | ||
561 | lp->tx_ring_size = (1 << i); | ||
562 | lp->tx_mod_mask = lp->tx_ring_size - 1; | ||
563 | lp->tx_len_bits = (i << 12); | ||
682 | 564 | ||
683 | spin_unlock_irqrestore(&lp->lock, flags); | 565 | for (i = 2; i <= PCNET32_LOG_MAX_RX_BUFFERS; i++) { |
566 | if (lp->rx_ring_size <= (1 << i)) | ||
567 | break; | ||
568 | } | ||
569 | lp->rx_ring_size = (1 << i); | ||
570 | lp->rx_mod_mask = lp->rx_ring_size - 1; | ||
571 | lp->rx_len_bits = (i << 4); | ||
572 | |||
573 | if (pcnet32_alloc_ring(dev, dev->name)) { | ||
574 | pcnet32_free_ring(dev); | ||
575 | spin_unlock_irqrestore(&lp->lock, flags); | ||
576 | return -ENOMEM; | ||
577 | } | ||
684 | 578 | ||
685 | if (pcnet32_debug & NETIF_MSG_DRV) | 579 | spin_unlock_irqrestore(&lp->lock, flags); |
686 | printk(KERN_INFO PFX "%s: Ring Param Settings: RX: %d, TX: %d\n", | ||
687 | dev->name, lp->rx_ring_size, lp->tx_ring_size); | ||
688 | 580 | ||
689 | if (netif_running(dev)) | 581 | if (pcnet32_debug & NETIF_MSG_DRV) |
690 | pcnet32_open(dev); | 582 | printk(KERN_INFO PFX |
583 | "%s: Ring Param Settings: RX: %d, TX: %d\n", dev->name, | ||
584 | lp->rx_ring_size, lp->tx_ring_size); | ||
691 | 585 | ||
692 | return 0; | 586 | if (netif_running(dev)) |
587 | pcnet32_open(dev); | ||
588 | |||
589 | return 0; | ||
693 | } | 590 | } |
694 | 591 | ||
695 | static void pcnet32_get_strings(struct net_device *dev, u32 stringset, u8 *data) | 592 | static void pcnet32_get_strings(struct net_device *dev, u32 stringset, |
593 | u8 * data) | ||
696 | { | 594 | { |
697 | memcpy(data, pcnet32_gstrings_test, sizeof(pcnet32_gstrings_test)); | 595 | memcpy(data, pcnet32_gstrings_test, sizeof(pcnet32_gstrings_test)); |
698 | } | 596 | } |
699 | 597 | ||
700 | static int pcnet32_self_test_count(struct net_device *dev) | 598 | static int pcnet32_self_test_count(struct net_device *dev) |
701 | { | 599 | { |
702 | return PCNET32_TEST_LEN; | 600 | return PCNET32_TEST_LEN; |
703 | } | 601 | } |
704 | 602 | ||
705 | static void pcnet32_ethtool_test(struct net_device *dev, | 603 | static void pcnet32_ethtool_test(struct net_device *dev, |
706 | struct ethtool_test *test, u64 *data) | 604 | struct ethtool_test *test, u64 * data) |
707 | { | 605 | { |
708 | struct pcnet32_private *lp = dev->priv; | 606 | struct pcnet32_private *lp = dev->priv; |
709 | int rc; | 607 | int rc; |
710 | 608 | ||
711 | if (test->flags == ETH_TEST_FL_OFFLINE) { | 609 | if (test->flags == ETH_TEST_FL_OFFLINE) { |
712 | rc = pcnet32_loopback_test(dev, data); | 610 | rc = pcnet32_loopback_test(dev, data); |
713 | if (rc) { | 611 | if (rc) { |
714 | if (netif_msg_hw(lp)) | 612 | if (netif_msg_hw(lp)) |
715 | printk(KERN_DEBUG "%s: Loopback test failed.\n", dev->name); | 613 | printk(KERN_DEBUG "%s: Loopback test failed.\n", |
716 | test->flags |= ETH_TEST_FL_FAILED; | 614 | dev->name); |
615 | test->flags |= ETH_TEST_FL_FAILED; | ||
616 | } else if (netif_msg_hw(lp)) | ||
617 | printk(KERN_DEBUG "%s: Loopback test passed.\n", | ||
618 | dev->name); | ||
717 | } else if (netif_msg_hw(lp)) | 619 | } else if (netif_msg_hw(lp)) |
718 | printk(KERN_DEBUG "%s: Loopback test passed.\n", dev->name); | 620 | printk(KERN_DEBUG |
719 | } else if (netif_msg_hw(lp)) | 621 | "%s: No tests to run (specify 'Offline' on ethtool).", |
720 | printk(KERN_DEBUG "%s: No tests to run (specify 'Offline' on ethtool).", dev->name); | 622 | dev->name); |
721 | } /* end pcnet32_ethtool_test */ | 623 | } /* end pcnet32_ethtool_test */ |
722 | 624 | ||
723 | static int pcnet32_loopback_test(struct net_device *dev, uint64_t *data1) | 625 | static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1) |
724 | { | 626 | { |
725 | struct pcnet32_private *lp = dev->priv; | 627 | struct pcnet32_private *lp = dev->priv; |
726 | struct pcnet32_access *a = &lp->a; /* access to registers */ | 628 | struct pcnet32_access *a = &lp->a; /* access to registers */ |
727 | ulong ioaddr = dev->base_addr; /* card base I/O address */ | 629 | ulong ioaddr = dev->base_addr; /* card base I/O address */ |
728 | struct sk_buff *skb; /* sk buff */ | 630 | struct sk_buff *skb; /* sk buff */ |
729 | int x, i; /* counters */ | 631 | int x, i; /* counters */ |
730 | int numbuffs = 4; /* number of TX/RX buffers and descs */ | 632 | int numbuffs = 4; /* number of TX/RX buffers and descs */ |
731 | u16 status = 0x8300; /* TX ring status */ | 633 | u16 status = 0x8300; /* TX ring status */ |
732 | u16 teststatus; /* test of ring status */ | 634 | u16 teststatus; /* test of ring status */ |
733 | int rc; /* return code */ | 635 | int rc; /* return code */ |
734 | int size; /* size of packets */ | 636 | int size; /* size of packets */ |
735 | unsigned char *packet; /* source packet data */ | 637 | unsigned char *packet; /* source packet data */ |
736 | static const int data_len = 60; /* length of source packets */ | 638 | static const int data_len = 60; /* length of source packets */ |
737 | unsigned long flags; | 639 | unsigned long flags; |
738 | unsigned long ticks; | 640 | unsigned long ticks; |
739 | 641 | ||
740 | *data1 = 1; /* status of test, default to fail */ | 642 | *data1 = 1; /* status of test, default to fail */ |
741 | rc = 1; /* default to fail */ | 643 | rc = 1; /* default to fail */ |
742 | 644 | ||
743 | if (netif_running(dev)) | 645 | if (netif_running(dev)) |
744 | pcnet32_close(dev); | 646 | pcnet32_close(dev); |
745 | 647 | ||
746 | spin_lock_irqsave(&lp->lock, flags); | 648 | spin_lock_irqsave(&lp->lock, flags); |
747 | 649 | ||
748 | /* Reset the PCNET32 */ | 650 | /* Reset the PCNET32 */ |
749 | lp->a.reset (ioaddr); | 651 | lp->a.reset(ioaddr); |
750 | 652 | ||
751 | /* switch pcnet32 to 32bit mode */ | 653 | /* switch pcnet32 to 32bit mode */ |
752 | lp->a.write_bcr (ioaddr, 20, 2); | 654 | lp->a.write_bcr(ioaddr, 20, 2); |
753 | 655 | ||
754 | lp->init_block.mode = le16_to_cpu((lp->options & PCNET32_PORT_PORTSEL) << 7); | 656 | lp->init_block.mode = |
755 | lp->init_block.filter[0] = 0; | 657 | le16_to_cpu((lp->options & PCNET32_PORT_PORTSEL) << 7); |
756 | lp->init_block.filter[1] = 0; | 658 | lp->init_block.filter[0] = 0; |
757 | 659 | lp->init_block.filter[1] = 0; | |
758 | /* purge & init rings but don't actually restart */ | 660 | |
759 | pcnet32_restart(dev, 0x0000); | 661 | /* purge & init rings but don't actually restart */ |
760 | 662 | pcnet32_restart(dev, 0x0000); | |
761 | lp->a.write_csr(ioaddr, 0, 0x0004); /* Set STOP bit */ | 663 | |
762 | 664 | lp->a.write_csr(ioaddr, 0, 0x0004); /* Set STOP bit */ | |
763 | /* Initialize Transmit buffers. */ | 665 | |
764 | size = data_len + 15; | 666 | /* Initialize Transmit buffers. */ |
765 | for (x=0; x<numbuffs; x++) { | 667 | size = data_len + 15; |
766 | if (!(skb = dev_alloc_skb(size))) { | 668 | for (x = 0; x < numbuffs; x++) { |
767 | if (netif_msg_hw(lp)) | 669 | if (!(skb = dev_alloc_skb(size))) { |
768 | printk(KERN_DEBUG "%s: Cannot allocate skb at line: %d!\n", | 670 | if (netif_msg_hw(lp)) |
769 | dev->name, __LINE__); | 671 | printk(KERN_DEBUG |
770 | goto clean_up; | 672 | "%s: Cannot allocate skb at line: %d!\n", |
771 | } else { | 673 | dev->name, __LINE__); |
772 | packet = skb->data; | 674 | goto clean_up; |
773 | skb_put(skb, size); /* create space for data */ | 675 | } else { |
774 | lp->tx_skbuff[x] = skb; | 676 | packet = skb->data; |
775 | lp->tx_ring[x].length = le16_to_cpu(-skb->len); | 677 | skb_put(skb, size); /* create space for data */ |
776 | lp->tx_ring[x].misc = 0; | 678 | lp->tx_skbuff[x] = skb; |
777 | 679 | lp->tx_ring[x].length = le16_to_cpu(-skb->len); | |
778 | /* put DA and SA into the skb */ | 680 | lp->tx_ring[x].misc = 0; |
779 | for (i=0; i<6; i++) | 681 | |
780 | *packet++ = dev->dev_addr[i]; | 682 | /* put DA and SA into the skb */ |
781 | for (i=0; i<6; i++) | 683 | for (i = 0; i < 6; i++) |
782 | *packet++ = dev->dev_addr[i]; | 684 | *packet++ = dev->dev_addr[i]; |
783 | /* type */ | 685 | for (i = 0; i < 6; i++) |
784 | *packet++ = 0x08; | 686 | *packet++ = dev->dev_addr[i]; |
785 | *packet++ = 0x06; | 687 | /* type */ |
786 | /* packet number */ | 688 | *packet++ = 0x08; |
787 | *packet++ = x; | 689 | *packet++ = 0x06; |
788 | /* fill packet with data */ | 690 | /* packet number */ |
789 | for (i=0; i<data_len; i++) | 691 | *packet++ = x; |
790 | *packet++ = i; | 692 | /* fill packet with data */ |
791 | 693 | for (i = 0; i < data_len; i++) | |
792 | lp->tx_dma_addr[x] = pci_map_single(lp->pci_dev, skb->data, | 694 | *packet++ = i; |
793 | skb->len, PCI_DMA_TODEVICE); | 695 | |
794 | lp->tx_ring[x].base = (u32)le32_to_cpu(lp->tx_dma_addr[x]); | 696 | lp->tx_dma_addr[x] = |
795 | wmb(); /* Make sure owner changes after all others are visible */ | 697 | pci_map_single(lp->pci_dev, skb->data, skb->len, |
796 | lp->tx_ring[x].status = le16_to_cpu(status); | 698 | PCI_DMA_TODEVICE); |
797 | } | 699 | lp->tx_ring[x].base = |
798 | } | 700 | (u32) le32_to_cpu(lp->tx_dma_addr[x]); |
799 | 701 | wmb(); /* Make sure owner changes after all others are visible */ | |
800 | x = a->read_bcr(ioaddr, 32); /* set internal loopback in BSR32 */ | 702 | lp->tx_ring[x].status = le16_to_cpu(status); |
801 | x = x | 0x0002; | 703 | } |
802 | a->write_bcr(ioaddr, 32, x); | 704 | } |
803 | 705 | ||
804 | lp->a.write_csr (ioaddr, 15, 0x0044); /* set int loopback in CSR15 */ | 706 | x = a->read_bcr(ioaddr, 32); /* set internal loopback in BSR32 */ |
805 | 707 | x = x | 0x0002; | |
806 | teststatus = le16_to_cpu(0x8000); | 708 | a->write_bcr(ioaddr, 32, x); |
807 | lp->a.write_csr(ioaddr, 0, 0x0002); /* Set STRT bit */ | 709 | |
808 | 710 | lp->a.write_csr(ioaddr, 15, 0x0044); /* set int loopback in CSR15 */ | |
809 | /* Check status of descriptors */ | 711 | |
810 | for (x=0; x<numbuffs; x++) { | 712 | teststatus = le16_to_cpu(0x8000); |
811 | ticks = 0; | 713 | lp->a.write_csr(ioaddr, 0, 0x0002); /* Set STRT bit */ |
812 | rmb(); | 714 | |
813 | while ((lp->rx_ring[x].status & teststatus) && (ticks < 200)) { | 715 | /* Check status of descriptors */ |
814 | spin_unlock_irqrestore(&lp->lock, flags); | 716 | for (x = 0; x < numbuffs; x++) { |
815 | mdelay(1); | 717 | ticks = 0; |
816 | spin_lock_irqsave(&lp->lock, flags); | 718 | rmb(); |
817 | rmb(); | 719 | while ((lp->rx_ring[x].status & teststatus) && (ticks < 200)) { |
818 | ticks++; | 720 | spin_unlock_irqrestore(&lp->lock, flags); |
819 | } | 721 | mdelay(1); |
820 | if (ticks == 200) { | 722 | spin_lock_irqsave(&lp->lock, flags); |
821 | if (netif_msg_hw(lp)) | 723 | rmb(); |
822 | printk("%s: Desc %d failed to reset!\n",dev->name,x); | 724 | ticks++; |
823 | break; | 725 | } |
824 | } | 726 | if (ticks == 200) { |
825 | } | 727 | if (netif_msg_hw(lp)) |
826 | 728 | printk("%s: Desc %d failed to reset!\n", | |
827 | lp->a.write_csr(ioaddr, 0, 0x0004); /* Set STOP bit */ | 729 | dev->name, x); |
828 | wmb(); | 730 | break; |
829 | if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) { | 731 | } |
830 | printk(KERN_DEBUG "%s: RX loopback packets:\n", dev->name); | 732 | } |
831 | 733 | ||
832 | for (x=0; x<numbuffs; x++) { | 734 | lp->a.write_csr(ioaddr, 0, 0x0004); /* Set STOP bit */ |
833 | printk(KERN_DEBUG "%s: Packet %d:\n", dev->name, x); | 735 | wmb(); |
834 | skb = lp->rx_skbuff[x]; | 736 | if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) { |
835 | for (i=0; i<size; i++) { | 737 | printk(KERN_DEBUG "%s: RX loopback packets:\n", dev->name); |
836 | printk("%02x ", *(skb->data+i)); | 738 | |
837 | } | 739 | for (x = 0; x < numbuffs; x++) { |
838 | printk("\n"); | 740 | printk(KERN_DEBUG "%s: Packet %d:\n", dev->name, x); |
839 | } | 741 | skb = lp->rx_skbuff[x]; |
840 | } | 742 | for (i = 0; i < size; i++) { |
841 | 743 | printk("%02x ", *(skb->data + i)); | |
842 | x = 0; | 744 | } |
843 | rc = 0; | 745 | printk("\n"); |
844 | while (x<numbuffs && !rc) { | 746 | } |
845 | skb = lp->rx_skbuff[x]; | 747 | } |
846 | packet = lp->tx_skbuff[x]->data; | 748 | |
847 | for (i=0; i<size; i++) { | 749 | x = 0; |
848 | if (*(skb->data+i) != packet[i]) { | 750 | rc = 0; |
849 | if (netif_msg_hw(lp)) | 751 | while (x < numbuffs && !rc) { |
850 | printk(KERN_DEBUG "%s: Error in compare! %2x - %02x %02x\n", | 752 | skb = lp->rx_skbuff[x]; |
851 | dev->name, i, *(skb->data+i), packet[i]); | 753 | packet = lp->tx_skbuff[x]->data; |
852 | rc = 1; | 754 | for (i = 0; i < size; i++) { |
853 | break; | 755 | if (*(skb->data + i) != packet[i]) { |
854 | } | 756 | if (netif_msg_hw(lp)) |
757 | printk(KERN_DEBUG | ||
758 | "%s: Error in compare! %2x - %02x %02x\n", | ||
759 | dev->name, i, *(skb->data + i), | ||
760 | packet[i]); | ||
761 | rc = 1; | ||
762 | break; | ||
763 | } | ||
764 | } | ||
765 | x++; | ||
766 | } | ||
767 | if (!rc) { | ||
768 | *data1 = 0; | ||
855 | } | 769 | } |
856 | x++; | ||
857 | } | ||
858 | if (!rc) { | ||
859 | *data1 = 0; | ||
860 | } | ||
861 | 770 | ||
862 | clean_up: | 771 | clean_up: |
863 | pcnet32_purge_tx_ring(dev); | 772 | pcnet32_purge_tx_ring(dev); |
864 | x = a->read_csr(ioaddr, 15) & 0xFFFF; | 773 | x = a->read_csr(ioaddr, 15) & 0xFFFF; |
865 | a->write_csr(ioaddr, 15, (x & ~0x0044)); /* reset bits 6 and 2 */ | 774 | a->write_csr(ioaddr, 15, (x & ~0x0044)); /* reset bits 6 and 2 */ |
866 | 775 | ||
867 | x = a->read_bcr(ioaddr, 32); /* reset internal loopback */ | 776 | x = a->read_bcr(ioaddr, 32); /* reset internal loopback */ |
868 | x = x & ~0x0002; | 777 | x = x & ~0x0002; |
869 | a->write_bcr(ioaddr, 32, x); | 778 | a->write_bcr(ioaddr, 32, x); |
870 | 779 | ||
871 | spin_unlock_irqrestore(&lp->lock, flags); | 780 | spin_unlock_irqrestore(&lp->lock, flags); |
872 | 781 | ||
873 | if (netif_running(dev)) { | 782 | if (netif_running(dev)) { |
874 | pcnet32_open(dev); | 783 | pcnet32_open(dev); |
875 | } else { | 784 | } else { |
876 | lp->a.write_bcr (ioaddr, 20, 4); /* return to 16bit mode */ | 785 | lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */ |
877 | } | 786 | } |
878 | 787 | ||
879 | return(rc); | 788 | return (rc); |
880 | } /* end pcnet32_loopback_test */ | 789 | } /* end pcnet32_loopback_test */ |
881 | 790 | ||
882 | static void pcnet32_led_blink_callback(struct net_device *dev) | 791 | static void pcnet32_led_blink_callback(struct net_device *dev) |
883 | { | 792 | { |
884 | struct pcnet32_private *lp = dev->priv; | 793 | struct pcnet32_private *lp = dev->priv; |
885 | struct pcnet32_access *a = &lp->a; | 794 | struct pcnet32_access *a = &lp->a; |
886 | ulong ioaddr = dev->base_addr; | 795 | ulong ioaddr = dev->base_addr; |
887 | unsigned long flags; | 796 | unsigned long flags; |
888 | int i; | 797 | int i; |
889 | 798 | ||
890 | spin_lock_irqsave(&lp->lock, flags); | 799 | spin_lock_irqsave(&lp->lock, flags); |
891 | for (i=4; i<8; i++) { | 800 | for (i = 4; i < 8; i++) { |
892 | a->write_bcr(ioaddr, i, a->read_bcr(ioaddr, i) ^ 0x4000); | 801 | a->write_bcr(ioaddr, i, a->read_bcr(ioaddr, i) ^ 0x4000); |
893 | } | 802 | } |
894 | spin_unlock_irqrestore(&lp->lock, flags); | 803 | spin_unlock_irqrestore(&lp->lock, flags); |
895 | 804 | ||
896 | mod_timer(&lp->blink_timer, PCNET32_BLINK_TIMEOUT); | 805 | mod_timer(&lp->blink_timer, PCNET32_BLINK_TIMEOUT); |
897 | } | 806 | } |
898 | 807 | ||
899 | static int pcnet32_phys_id(struct net_device *dev, u32 data) | 808 | static int pcnet32_phys_id(struct net_device *dev, u32 data) |
900 | { | 809 | { |
901 | struct pcnet32_private *lp = dev->priv; | 810 | struct pcnet32_private *lp = dev->priv; |
902 | struct pcnet32_access *a = &lp->a; | 811 | struct pcnet32_access *a = &lp->a; |
903 | ulong ioaddr = dev->base_addr; | 812 | ulong ioaddr = dev->base_addr; |
904 | unsigned long flags; | 813 | unsigned long flags; |
905 | int i, regs[4]; | 814 | int i, regs[4]; |
906 | 815 | ||
907 | if (!lp->blink_timer.function) { | 816 | if (!lp->blink_timer.function) { |
908 | init_timer(&lp->blink_timer); | 817 | init_timer(&lp->blink_timer); |
909 | lp->blink_timer.function = (void *) pcnet32_led_blink_callback; | 818 | lp->blink_timer.function = (void *)pcnet32_led_blink_callback; |
910 | lp->blink_timer.data = (unsigned long) dev; | 819 | lp->blink_timer.data = (unsigned long)dev; |
911 | } | 820 | } |
912 | 821 | ||
913 | /* Save the current value of the bcrs */ | 822 | /* Save the current value of the bcrs */ |
914 | spin_lock_irqsave(&lp->lock, flags); | 823 | spin_lock_irqsave(&lp->lock, flags); |
915 | for (i=4; i<8; i++) { | 824 | for (i = 4; i < 8; i++) { |
916 | regs[i-4] = a->read_bcr(ioaddr, i); | 825 | regs[i - 4] = a->read_bcr(ioaddr, i); |
917 | } | 826 | } |
918 | spin_unlock_irqrestore(&lp->lock, flags); | 827 | spin_unlock_irqrestore(&lp->lock, flags); |
919 | 828 | ||
920 | mod_timer(&lp->blink_timer, jiffies); | 829 | mod_timer(&lp->blink_timer, jiffies); |
921 | set_current_state(TASK_INTERRUPTIBLE); | 830 | set_current_state(TASK_INTERRUPTIBLE); |
922 | 831 | ||
923 | if ((!data) || (data > (u32)(MAX_SCHEDULE_TIMEOUT / HZ))) | 832 | if ((!data) || (data > (u32) (MAX_SCHEDULE_TIMEOUT / HZ))) |
924 | data = (u32)(MAX_SCHEDULE_TIMEOUT / HZ); | 833 | data = (u32) (MAX_SCHEDULE_TIMEOUT / HZ); |
925 | 834 | ||
926 | msleep_interruptible(data * 1000); | 835 | msleep_interruptible(data * 1000); |
927 | del_timer_sync(&lp->blink_timer); | 836 | del_timer_sync(&lp->blink_timer); |
928 | 837 | ||
929 | /* Restore the original value of the bcrs */ | 838 | /* Restore the original value of the bcrs */ |
930 | spin_lock_irqsave(&lp->lock, flags); | 839 | spin_lock_irqsave(&lp->lock, flags); |
931 | for (i=4; i<8; i++) { | 840 | for (i = 4; i < 8; i++) { |
932 | a->write_bcr(ioaddr, i, regs[i-4]); | 841 | a->write_bcr(ioaddr, i, regs[i - 4]); |
933 | } | 842 | } |
934 | spin_unlock_irqrestore(&lp->lock, flags); | 843 | spin_unlock_irqrestore(&lp->lock, flags); |
935 | 844 | ||
936 | return 0; | 845 | return 0; |
937 | } | 846 | } |
938 | 847 | ||
848 | #define PCNET32_REGS_PER_PHY 32 | ||
849 | #define PCNET32_MAX_PHYS 32 | ||
939 | static int pcnet32_get_regs_len(struct net_device *dev) | 850 | static int pcnet32_get_regs_len(struct net_device *dev) |
940 | { | 851 | { |
941 | return(PCNET32_NUM_REGS * sizeof(u16)); | 852 | struct pcnet32_private *lp = dev->priv; |
853 | int j = lp->phycount * PCNET32_REGS_PER_PHY; | ||
854 | |||
855 | return ((PCNET32_NUM_REGS + j) * sizeof(u16)); | ||
942 | } | 856 | } |
943 | 857 | ||
944 | static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs, | 858 | static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs, |
945 | void *ptr) | 859 | void *ptr) |
946 | { | 860 | { |
947 | int i, csr0; | 861 | int i, csr0; |
948 | u16 *buff = ptr; | 862 | u16 *buff = ptr; |
949 | struct pcnet32_private *lp = dev->priv; | 863 | struct pcnet32_private *lp = dev->priv; |
950 | struct pcnet32_access *a = &lp->a; | 864 | struct pcnet32_access *a = &lp->a; |
951 | ulong ioaddr = dev->base_addr; | 865 | ulong ioaddr = dev->base_addr; |
952 | int ticks; | 866 | int ticks; |
953 | unsigned long flags; | 867 | unsigned long flags; |
954 | |||
955 | spin_lock_irqsave(&lp->lock, flags); | ||
956 | |||
957 | csr0 = a->read_csr(ioaddr, 0); | ||
958 | if (!(csr0 & 0x0004)) { /* If not stopped */ | ||
959 | /* set SUSPEND (SPND) - CSR5 bit 0 */ | ||
960 | a->write_csr(ioaddr, 5, 0x0001); | ||
961 | |||
962 | /* poll waiting for bit to be set */ | ||
963 | ticks = 0; | ||
964 | while (!(a->read_csr(ioaddr, 5) & 0x0001)) { | ||
965 | spin_unlock_irqrestore(&lp->lock, flags); | ||
966 | mdelay(1); | ||
967 | spin_lock_irqsave(&lp->lock, flags); | ||
968 | ticks++; | ||
969 | if (ticks > 200) { | ||
970 | if (netif_msg_hw(lp)) | ||
971 | printk(KERN_DEBUG "%s: Error getting into suspend!\n", | ||
972 | dev->name); | ||
973 | break; | ||
974 | } | ||
975 | } | ||
976 | } | ||
977 | 868 | ||
978 | /* read address PROM */ | 869 | spin_lock_irqsave(&lp->lock, flags); |
979 | for (i=0; i<16; i += 2) | ||
980 | *buff++ = inw(ioaddr + i); | ||
981 | 870 | ||
982 | /* read control and status registers */ | 871 | csr0 = a->read_csr(ioaddr, 0); |
983 | for (i=0; i<90; i++) { | 872 | if (!(csr0 & 0x0004)) { /* If not stopped */ |
984 | *buff++ = a->read_csr(ioaddr, i); | 873 | /* set SUSPEND (SPND) - CSR5 bit 0 */ |
985 | } | 874 | a->write_csr(ioaddr, 5, 0x0001); |
875 | |||
876 | /* poll waiting for bit to be set */ | ||
877 | ticks = 0; | ||
878 | while (!(a->read_csr(ioaddr, 5) & 0x0001)) { | ||
879 | spin_unlock_irqrestore(&lp->lock, flags); | ||
880 | mdelay(1); | ||
881 | spin_lock_irqsave(&lp->lock, flags); | ||
882 | ticks++; | ||
883 | if (ticks > 200) { | ||
884 | if (netif_msg_hw(lp)) | ||
885 | printk(KERN_DEBUG | ||
886 | "%s: Error getting into suspend!\n", | ||
887 | dev->name); | ||
888 | break; | ||
889 | } | ||
890 | } | ||
891 | } | ||
986 | 892 | ||
987 | *buff++ = a->read_csr(ioaddr, 112); | 893 | /* read address PROM */ |
988 | *buff++ = a->read_csr(ioaddr, 114); | 894 | for (i = 0; i < 16; i += 2) |
895 | *buff++ = inw(ioaddr + i); | ||
989 | 896 | ||
990 | /* read bus configuration registers */ | 897 | /* read control and status registers */ |
991 | for (i=0; i<30; i++) { | 898 | for (i = 0; i < 90; i++) { |
992 | *buff++ = a->read_bcr(ioaddr, i); | 899 | *buff++ = a->read_csr(ioaddr, i); |
993 | } | 900 | } |
994 | *buff++ = 0; /* skip bcr30 so as not to hang 79C976 */ | 901 | |
995 | for (i=31; i<36; i++) { | 902 | *buff++ = a->read_csr(ioaddr, 112); |
996 | *buff++ = a->read_bcr(ioaddr, i); | 903 | *buff++ = a->read_csr(ioaddr, 114); |
997 | } | ||
998 | 904 | ||
999 | /* read mii phy registers */ | 905 | /* read bus configuration registers */ |
1000 | if (lp->mii) { | 906 | for (i = 0; i < 30; i++) { |
1001 | for (i=0; i<32; i++) { | 907 | *buff++ = a->read_bcr(ioaddr, i); |
1002 | lp->a.write_bcr(ioaddr, 33, ((lp->mii_if.phy_id) << 5) | i); | 908 | } |
1003 | *buff++ = lp->a.read_bcr(ioaddr, 34); | 909 | *buff++ = 0; /* skip bcr30 so as not to hang 79C976 */ |
910 | for (i = 31; i < 36; i++) { | ||
911 | *buff++ = a->read_bcr(ioaddr, i); | ||
1004 | } | 912 | } |
1005 | } | ||
1006 | 913 | ||
1007 | if (!(csr0 & 0x0004)) { /* If not stopped */ | 914 | /* read mii phy registers */ |
1008 | /* clear SUSPEND (SPND) - CSR5 bit 0 */ | 915 | if (lp->mii) { |
1009 | a->write_csr(ioaddr, 5, 0x0000); | 916 | int j; |
1010 | } | 917 | for (j = 0; j < PCNET32_MAX_PHYS; j++) { |
918 | if (lp->phymask & (1 << j)) { | ||
919 | for (i = 0; i < PCNET32_REGS_PER_PHY; i++) { | ||
920 | lp->a.write_bcr(ioaddr, 33, | ||
921 | (j << 5) | i); | ||
922 | *buff++ = lp->a.read_bcr(ioaddr, 34); | ||
923 | } | ||
924 | } | ||
925 | } | ||
926 | } | ||
1011 | 927 | ||
1012 | i = buff - (u16 *)ptr; | 928 | if (!(csr0 & 0x0004)) { /* If not stopped */ |
1013 | for (; i < PCNET32_NUM_REGS; i++) | 929 | /* clear SUSPEND (SPND) - CSR5 bit 0 */ |
1014 | *buff++ = 0; | 930 | a->write_csr(ioaddr, 5, 0x0000); |
931 | } | ||
1015 | 932 | ||
1016 | spin_unlock_irqrestore(&lp->lock, flags); | 933 | spin_unlock_irqrestore(&lp->lock, flags); |
1017 | } | 934 | } |
1018 | 935 | ||
1019 | static struct ethtool_ops pcnet32_ethtool_ops = { | 936 | static struct ethtool_ops pcnet32_ethtool_ops = { |
1020 | .get_settings = pcnet32_get_settings, | 937 | .get_settings = pcnet32_get_settings, |
1021 | .set_settings = pcnet32_set_settings, | 938 | .set_settings = pcnet32_set_settings, |
1022 | .get_drvinfo = pcnet32_get_drvinfo, | 939 | .get_drvinfo = pcnet32_get_drvinfo, |
1023 | .get_msglevel = pcnet32_get_msglevel, | 940 | .get_msglevel = pcnet32_get_msglevel, |
1024 | .set_msglevel = pcnet32_set_msglevel, | 941 | .set_msglevel = pcnet32_set_msglevel, |
1025 | .nway_reset = pcnet32_nway_reset, | 942 | .nway_reset = pcnet32_nway_reset, |
1026 | .get_link = pcnet32_get_link, | 943 | .get_link = pcnet32_get_link, |
1027 | .get_ringparam = pcnet32_get_ringparam, | 944 | .get_ringparam = pcnet32_get_ringparam, |
1028 | .set_ringparam = pcnet32_set_ringparam, | 945 | .set_ringparam = pcnet32_set_ringparam, |
1029 | .get_tx_csum = ethtool_op_get_tx_csum, | 946 | .get_tx_csum = ethtool_op_get_tx_csum, |
1030 | .get_sg = ethtool_op_get_sg, | 947 | .get_sg = ethtool_op_get_sg, |
1031 | .get_tso = ethtool_op_get_tso, | 948 | .get_tso = ethtool_op_get_tso, |
1032 | .get_strings = pcnet32_get_strings, | 949 | .get_strings = pcnet32_get_strings, |
1033 | .self_test_count = pcnet32_self_test_count, | 950 | .self_test_count = pcnet32_self_test_count, |
1034 | .self_test = pcnet32_ethtool_test, | 951 | .self_test = pcnet32_ethtool_test, |
1035 | .phys_id = pcnet32_phys_id, | 952 | .phys_id = pcnet32_phys_id, |
1036 | .get_regs_len = pcnet32_get_regs_len, | 953 | .get_regs_len = pcnet32_get_regs_len, |
1037 | .get_regs = pcnet32_get_regs, | 954 | .get_regs = pcnet32_get_regs, |
1038 | .get_perm_addr = ethtool_op_get_perm_addr, | 955 | .get_perm_addr = ethtool_op_get_perm_addr, |
1039 | }; | 956 | }; |
1040 | 957 | ||
1041 | /* only probes for non-PCI devices, the rest are handled by | 958 | /* only probes for non-PCI devices, the rest are handled by |
1042 | * pci_register_driver via pcnet32_probe_pci */ | 959 | * pci_register_driver via pcnet32_probe_pci */ |
1043 | 960 | ||
1044 | static void __devinit | 961 | static void __devinit pcnet32_probe_vlbus(void) |
1045 | pcnet32_probe_vlbus(void) | ||
1046 | { | 962 | { |
1047 | unsigned int *port, ioaddr; | 963 | unsigned int *port, ioaddr; |
1048 | 964 | ||
1049 | /* search for PCnet32 VLB cards at known addresses */ | 965 | /* search for PCnet32 VLB cards at known addresses */ |
1050 | for (port = pcnet32_portlist; (ioaddr = *port); port++) { | 966 | for (port = pcnet32_portlist; (ioaddr = *port); port++) { |
1051 | if (request_region(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_vlbus")) { | 967 | if (request_region |
1052 | /* check if there is really a pcnet chip on that ioaddr */ | 968 | (ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_vlbus")) { |
1053 | if ((inb(ioaddr + 14) == 0x57) && (inb(ioaddr + 15) == 0x57)) { | 969 | /* check if there is really a pcnet chip on that ioaddr */ |
1054 | pcnet32_probe1(ioaddr, 0, NULL); | 970 | if ((inb(ioaddr + 14) == 0x57) |
1055 | } else { | 971 | && (inb(ioaddr + 15) == 0x57)) { |
1056 | release_region(ioaddr, PCNET32_TOTAL_SIZE); | 972 | pcnet32_probe1(ioaddr, 0, NULL); |
1057 | } | 973 | } else { |
1058 | } | 974 | release_region(ioaddr, PCNET32_TOTAL_SIZE); |
1059 | } | 975 | } |
976 | } | ||
977 | } | ||
1060 | } | 978 | } |
1061 | 979 | ||
1062 | |||
1063 | static int __devinit | 980 | static int __devinit |
1064 | pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent) | 981 | pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent) |
1065 | { | 982 | { |
1066 | unsigned long ioaddr; | 983 | unsigned long ioaddr; |
1067 | int err; | 984 | int err; |
1068 | 985 | ||
1069 | err = pci_enable_device(pdev); | 986 | err = pci_enable_device(pdev); |
1070 | if (err < 0) { | 987 | if (err < 0) { |
1071 | if (pcnet32_debug & NETIF_MSG_PROBE) | 988 | if (pcnet32_debug & NETIF_MSG_PROBE) |
1072 | printk(KERN_ERR PFX "failed to enable device -- err=%d\n", err); | 989 | printk(KERN_ERR PFX |
1073 | return err; | 990 | "failed to enable device -- err=%d\n", err); |
1074 | } | 991 | return err; |
1075 | pci_set_master(pdev); | 992 | } |
993 | pci_set_master(pdev); | ||
994 | |||
995 | ioaddr = pci_resource_start(pdev, 0); | ||
996 | if (!ioaddr) { | ||
997 | if (pcnet32_debug & NETIF_MSG_PROBE) | ||
998 | printk(KERN_ERR PFX | ||
999 | "card has no PCI IO resources, aborting\n"); | ||
1000 | return -ENODEV; | ||
1001 | } | ||
1076 | 1002 | ||
1077 | ioaddr = pci_resource_start (pdev, 0); | 1003 | if (!pci_dma_supported(pdev, PCNET32_DMA_MASK)) { |
1078 | if (!ioaddr) { | 1004 | if (pcnet32_debug & NETIF_MSG_PROBE) |
1079 | if (pcnet32_debug & NETIF_MSG_PROBE) | 1005 | printk(KERN_ERR PFX |
1080 | printk (KERN_ERR PFX "card has no PCI IO resources, aborting\n"); | 1006 | "architecture does not support 32bit PCI busmaster DMA\n"); |
1081 | return -ENODEV; | 1007 | return -ENODEV; |
1082 | } | 1008 | } |
1009 | if (request_region(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_pci") == | ||
1010 | NULL) { | ||
1011 | if (pcnet32_debug & NETIF_MSG_PROBE) | ||
1012 | printk(KERN_ERR PFX | ||
1013 | "io address range already allocated\n"); | ||
1014 | return -EBUSY; | ||
1015 | } | ||
1083 | 1016 | ||
1084 | if (!pci_dma_supported(pdev, PCNET32_DMA_MASK)) { | 1017 | err = pcnet32_probe1(ioaddr, 1, pdev); |
1085 | if (pcnet32_debug & NETIF_MSG_PROBE) | 1018 | if (err < 0) { |
1086 | printk(KERN_ERR PFX "architecture does not support 32bit PCI busmaster DMA\n"); | 1019 | pci_disable_device(pdev); |
1087 | return -ENODEV; | 1020 | } |
1088 | } | 1021 | return err; |
1089 | if (request_region(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_pci") == NULL) { | ||
1090 | if (pcnet32_debug & NETIF_MSG_PROBE) | ||
1091 | printk(KERN_ERR PFX "io address range already allocated\n"); | ||
1092 | return -EBUSY; | ||
1093 | } | ||
1094 | |||
1095 | err = pcnet32_probe1(ioaddr, 1, pdev); | ||
1096 | if (err < 0) { | ||
1097 | pci_disable_device(pdev); | ||
1098 | } | ||
1099 | return err; | ||
1100 | } | 1022 | } |
1101 | 1023 | ||
1102 | |||
1103 | /* pcnet32_probe1 | 1024 | /* pcnet32_probe1 |
1104 | * Called from both pcnet32_probe_vlbus and pcnet_probe_pci. | 1025 | * Called from both pcnet32_probe_vlbus and pcnet_probe_pci. |
1105 | * pdev will be NULL when called from pcnet32_probe_vlbus. | 1026 | * pdev will be NULL when called from pcnet32_probe_vlbus. |
@@ -1107,630 +1028,764 @@ pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1107 | static int __devinit | 1028 | static int __devinit |
1108 | pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) | 1029 | pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) |
1109 | { | 1030 | { |
1110 | struct pcnet32_private *lp; | 1031 | struct pcnet32_private *lp; |
1111 | dma_addr_t lp_dma_addr; | 1032 | dma_addr_t lp_dma_addr; |
1112 | int i, media; | 1033 | int i, media; |
1113 | int fdx, mii, fset, dxsuflo; | 1034 | int fdx, mii, fset, dxsuflo; |
1114 | int chip_version; | 1035 | int chip_version; |
1115 | char *chipname; | 1036 | char *chipname; |
1116 | struct net_device *dev; | 1037 | struct net_device *dev; |
1117 | struct pcnet32_access *a = NULL; | 1038 | struct pcnet32_access *a = NULL; |
1118 | u8 promaddr[6]; | 1039 | u8 promaddr[6]; |
1119 | int ret = -ENODEV; | 1040 | int ret = -ENODEV; |
1120 | 1041 | ||
1121 | /* reset the chip */ | 1042 | /* reset the chip */ |
1122 | pcnet32_wio_reset(ioaddr); | 1043 | pcnet32_wio_reset(ioaddr); |
1123 | 1044 | ||
1124 | /* NOTE: 16-bit check is first, otherwise some older PCnet chips fail */ | 1045 | /* NOTE: 16-bit check is first, otherwise some older PCnet chips fail */ |
1125 | if (pcnet32_wio_read_csr(ioaddr, 0) == 4 && pcnet32_wio_check(ioaddr)) { | 1046 | if (pcnet32_wio_read_csr(ioaddr, 0) == 4 && pcnet32_wio_check(ioaddr)) { |
1126 | a = &pcnet32_wio; | 1047 | a = &pcnet32_wio; |
1127 | } else { | 1048 | } else { |
1128 | pcnet32_dwio_reset(ioaddr); | 1049 | pcnet32_dwio_reset(ioaddr); |
1129 | if (pcnet32_dwio_read_csr(ioaddr, 0) == 4 && pcnet32_dwio_check(ioaddr)) { | 1050 | if (pcnet32_dwio_read_csr(ioaddr, 0) == 4 |
1130 | a = &pcnet32_dwio; | 1051 | && pcnet32_dwio_check(ioaddr)) { |
1131 | } else | 1052 | a = &pcnet32_dwio; |
1132 | goto err_release_region; | 1053 | } else |
1133 | } | 1054 | goto err_release_region; |
1134 | 1055 | } | |
1135 | chip_version = a->read_csr(ioaddr, 88) | (a->read_csr(ioaddr,89) << 16); | 1056 | |
1136 | if ((pcnet32_debug & NETIF_MSG_PROBE) && (pcnet32_debug & NETIF_MSG_HW)) | 1057 | chip_version = |
1137 | printk(KERN_INFO " PCnet chip version is %#x.\n", chip_version); | 1058 | a->read_csr(ioaddr, 88) | (a->read_csr(ioaddr, 89) << 16); |
1138 | if ((chip_version & 0xfff) != 0x003) { | 1059 | if ((pcnet32_debug & NETIF_MSG_PROBE) && (pcnet32_debug & NETIF_MSG_HW)) |
1139 | if (pcnet32_debug & NETIF_MSG_PROBE) | 1060 | printk(KERN_INFO " PCnet chip version is %#x.\n", |
1140 | printk(KERN_INFO PFX "Unsupported chip version.\n"); | 1061 | chip_version); |
1141 | goto err_release_region; | 1062 | if ((chip_version & 0xfff) != 0x003) { |
1142 | } | 1063 | if (pcnet32_debug & NETIF_MSG_PROBE) |
1143 | 1064 | printk(KERN_INFO PFX "Unsupported chip version.\n"); | |
1144 | /* initialize variables */ | 1065 | goto err_release_region; |
1145 | fdx = mii = fset = dxsuflo = 0; | 1066 | } |
1146 | chip_version = (chip_version >> 12) & 0xffff; | 1067 | |
1147 | 1068 | /* initialize variables */ | |
1148 | switch (chip_version) { | 1069 | fdx = mii = fset = dxsuflo = 0; |
1149 | case 0x2420: | 1070 | chip_version = (chip_version >> 12) & 0xffff; |
1150 | chipname = "PCnet/PCI 79C970"; /* PCI */ | 1071 | |
1151 | break; | 1072 | switch (chip_version) { |
1152 | case 0x2430: | 1073 | case 0x2420: |
1153 | if (shared) | 1074 | chipname = "PCnet/PCI 79C970"; /* PCI */ |
1154 | chipname = "PCnet/PCI 79C970"; /* 970 gives the wrong chip id back */ | 1075 | break; |
1155 | else | 1076 | case 0x2430: |
1156 | chipname = "PCnet/32 79C965"; /* 486/VL bus */ | 1077 | if (shared) |
1157 | break; | 1078 | chipname = "PCnet/PCI 79C970"; /* 970 gives the wrong chip id back */ |
1158 | case 0x2621: | 1079 | else |
1159 | chipname = "PCnet/PCI II 79C970A"; /* PCI */ | 1080 | chipname = "PCnet/32 79C965"; /* 486/VL bus */ |
1160 | fdx = 1; | 1081 | break; |
1161 | break; | 1082 | case 0x2621: |
1162 | case 0x2623: | 1083 | chipname = "PCnet/PCI II 79C970A"; /* PCI */ |
1163 | chipname = "PCnet/FAST 79C971"; /* PCI */ | 1084 | fdx = 1; |
1164 | fdx = 1; mii = 1; fset = 1; | 1085 | break; |
1165 | break; | 1086 | case 0x2623: |
1166 | case 0x2624: | 1087 | chipname = "PCnet/FAST 79C971"; /* PCI */ |
1167 | chipname = "PCnet/FAST+ 79C972"; /* PCI */ | 1088 | fdx = 1; |
1168 | fdx = 1; mii = 1; fset = 1; | 1089 | mii = 1; |
1169 | break; | 1090 | fset = 1; |
1170 | case 0x2625: | 1091 | break; |
1171 | chipname = "PCnet/FAST III 79C973"; /* PCI */ | 1092 | case 0x2624: |
1172 | fdx = 1; mii = 1; | 1093 | chipname = "PCnet/FAST+ 79C972"; /* PCI */ |
1173 | break; | 1094 | fdx = 1; |
1174 | case 0x2626: | 1095 | mii = 1; |
1175 | chipname = "PCnet/Home 79C978"; /* PCI */ | 1096 | fset = 1; |
1176 | fdx = 1; | 1097 | break; |
1098 | case 0x2625: | ||
1099 | chipname = "PCnet/FAST III 79C973"; /* PCI */ | ||
1100 | fdx = 1; | ||
1101 | mii = 1; | ||
1102 | break; | ||
1103 | case 0x2626: | ||
1104 | chipname = "PCnet/Home 79C978"; /* PCI */ | ||
1105 | fdx = 1; | ||
1106 | /* | ||
1107 | * This is based on specs published at www.amd.com. This section | ||
1108 | * assumes that a card with a 79C978 wants to go into standard | ||
1109 | * ethernet mode. The 79C978 can also go into 1Mb HomePNA mode, | ||
1110 | * and the module option homepna=1 can select this instead. | ||
1111 | */ | ||
1112 | media = a->read_bcr(ioaddr, 49); | ||
1113 | media &= ~3; /* default to 10Mb ethernet */ | ||
1114 | if (cards_found < MAX_UNITS && homepna[cards_found]) | ||
1115 | media |= 1; /* switch to home wiring mode */ | ||
1116 | if (pcnet32_debug & NETIF_MSG_PROBE) | ||
1117 | printk(KERN_DEBUG PFX "media set to %sMbit mode.\n", | ||
1118 | (media & 1) ? "1" : "10"); | ||
1119 | a->write_bcr(ioaddr, 49, media); | ||
1120 | break; | ||
1121 | case 0x2627: | ||
1122 | chipname = "PCnet/FAST III 79C975"; /* PCI */ | ||
1123 | fdx = 1; | ||
1124 | mii = 1; | ||
1125 | break; | ||
1126 | case 0x2628: | ||
1127 | chipname = "PCnet/PRO 79C976"; | ||
1128 | fdx = 1; | ||
1129 | mii = 1; | ||
1130 | break; | ||
1131 | default: | ||
1132 | if (pcnet32_debug & NETIF_MSG_PROBE) | ||
1133 | printk(KERN_INFO PFX | ||
1134 | "PCnet version %#x, no PCnet32 chip.\n", | ||
1135 | chip_version); | ||
1136 | goto err_release_region; | ||
1137 | } | ||
1138 | |||
1177 | /* | 1139 | /* |
1178 | * This is based on specs published at www.amd.com. This section | 1140 | * On selected chips turn on the BCR18:NOUFLO bit. This stops transmit |
1179 | * assumes that a card with a 79C978 wants to go into standard | 1141 | * starting until the packet is loaded. Strike one for reliability, lose |
1180 | * ethernet mode. The 79C978 can also go into 1Mb HomePNA mode, | 1142 | * one for latency - although on PCI this isnt a big loss. Older chips |
1181 | * and the module option homepna=1 can select this instead. | 1143 | * have FIFO's smaller than a packet, so you can't do this. |
1144 | * Turn on BCR18:BurstRdEn and BCR18:BurstWrEn. | ||
1182 | */ | 1145 | */ |
1183 | media = a->read_bcr(ioaddr, 49); | 1146 | |
1184 | media &= ~3; /* default to 10Mb ethernet */ | 1147 | if (fset) { |
1185 | if (cards_found < MAX_UNITS && homepna[cards_found]) | 1148 | a->write_bcr(ioaddr, 18, (a->read_bcr(ioaddr, 18) | 0x0860)); |
1186 | media |= 1; /* switch to home wiring mode */ | 1149 | a->write_csr(ioaddr, 80, |
1187 | if (pcnet32_debug & NETIF_MSG_PROBE) | 1150 | (a->read_csr(ioaddr, 80) & 0x0C00) | 0x0c00); |
1188 | printk(KERN_DEBUG PFX "media set to %sMbit mode.\n", | 1151 | dxsuflo = 1; |
1189 | (media & 1) ? "1" : "10"); | 1152 | } |
1190 | a->write_bcr(ioaddr, 49, media); | 1153 | |
1191 | break; | 1154 | dev = alloc_etherdev(0); |
1192 | case 0x2627: | 1155 | if (!dev) { |
1193 | chipname = "PCnet/FAST III 79C975"; /* PCI */ | 1156 | if (pcnet32_debug & NETIF_MSG_PROBE) |
1194 | fdx = 1; mii = 1; | 1157 | printk(KERN_ERR PFX "Memory allocation failed.\n"); |
1195 | break; | 1158 | ret = -ENOMEM; |
1196 | case 0x2628: | 1159 | goto err_release_region; |
1197 | chipname = "PCnet/PRO 79C976"; | 1160 | } |
1198 | fdx = 1; mii = 1; | 1161 | SET_NETDEV_DEV(dev, &pdev->dev); |
1199 | break; | 1162 | |
1200 | default: | ||
1201 | if (pcnet32_debug & NETIF_MSG_PROBE) | ||
1202 | printk(KERN_INFO PFX "PCnet version %#x, no PCnet32 chip.\n", | ||
1203 | chip_version); | ||
1204 | goto err_release_region; | ||
1205 | } | ||
1206 | |||
1207 | /* | ||
1208 | * On selected chips turn on the BCR18:NOUFLO bit. This stops transmit | ||
1209 | * starting until the packet is loaded. Strike one for reliability, lose | ||
1210 | * one for latency - although on PCI this isnt a big loss. Older chips | ||
1211 | * have FIFO's smaller than a packet, so you can't do this. | ||
1212 | * Turn on BCR18:BurstRdEn and BCR18:BurstWrEn. | ||
1213 | */ | ||
1214 | |||
1215 | if (fset) { | ||
1216 | a->write_bcr(ioaddr, 18, (a->read_bcr(ioaddr, 18) | 0x0860)); | ||
1217 | a->write_csr(ioaddr, 80, (a->read_csr(ioaddr, 80) & 0x0C00) | 0x0c00); | ||
1218 | dxsuflo = 1; | ||
1219 | } | ||
1220 | |||
1221 | dev = alloc_etherdev(0); | ||
1222 | if (!dev) { | ||
1223 | if (pcnet32_debug & NETIF_MSG_PROBE) | 1163 | if (pcnet32_debug & NETIF_MSG_PROBE) |
1224 | printk(KERN_ERR PFX "Memory allocation failed.\n"); | 1164 | printk(KERN_INFO PFX "%s at %#3lx,", chipname, ioaddr); |
1225 | ret = -ENOMEM; | 1165 | |
1226 | goto err_release_region; | 1166 | /* In most chips, after a chip reset, the ethernet address is read from the |
1227 | } | 1167 | * station address PROM at the base address and programmed into the |
1228 | SET_NETDEV_DEV(dev, &pdev->dev); | 1168 | * "Physical Address Registers" CSR12-14. |
1229 | 1169 | * As a precautionary measure, we read the PROM values and complain if | |
1230 | if (pcnet32_debug & NETIF_MSG_PROBE) | 1170 | * they disagree with the CSRs. Either way, we use the CSR values, and |
1231 | printk(KERN_INFO PFX "%s at %#3lx,", chipname, ioaddr); | 1171 | * double check that they are valid. |
1232 | 1172 | */ | |
1233 | /* In most chips, after a chip reset, the ethernet address is read from the | 1173 | for (i = 0; i < 3; i++) { |
1234 | * station address PROM at the base address and programmed into the | 1174 | unsigned int val; |
1235 | * "Physical Address Registers" CSR12-14. | 1175 | val = a->read_csr(ioaddr, i + 12) & 0x0ffff; |
1236 | * As a precautionary measure, we read the PROM values and complain if | 1176 | /* There may be endianness issues here. */ |
1237 | * they disagree with the CSRs. Either way, we use the CSR values, and | 1177 | dev->dev_addr[2 * i] = val & 0x0ff; |
1238 | * double check that they are valid. | 1178 | dev->dev_addr[2 * i + 1] = (val >> 8) & 0x0ff; |
1239 | */ | 1179 | } |
1240 | for (i = 0; i < 3; i++) { | 1180 | |
1241 | unsigned int val; | 1181 | /* read PROM address and compare with CSR address */ |
1242 | val = a->read_csr(ioaddr, i+12) & 0x0ffff; | ||
1243 | /* There may be endianness issues here. */ | ||
1244 | dev->dev_addr[2*i] = val & 0x0ff; | ||
1245 | dev->dev_addr[2*i+1] = (val >> 8) & 0x0ff; | ||
1246 | } | ||
1247 | |||
1248 | /* read PROM address and compare with CSR address */ | ||
1249 | for (i = 0; i < 6; i++) | ||
1250 | promaddr[i] = inb(ioaddr + i); | ||
1251 | |||
1252 | if (memcmp(promaddr, dev->dev_addr, 6) | ||
1253 | || !is_valid_ether_addr(dev->dev_addr)) { | ||
1254 | if (is_valid_ether_addr(promaddr)) { | ||
1255 | if (pcnet32_debug & NETIF_MSG_PROBE) { | ||
1256 | printk(" warning: CSR address invalid,\n"); | ||
1257 | printk(KERN_INFO " using instead PROM address of"); | ||
1258 | } | ||
1259 | memcpy(dev->dev_addr, promaddr, 6); | ||
1260 | } | ||
1261 | } | ||
1262 | memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); | ||
1263 | |||
1264 | /* if the ethernet address is not valid, force to 00:00:00:00:00:00 */ | ||
1265 | if (!is_valid_ether_addr(dev->perm_addr)) | ||
1266 | memset(dev->dev_addr, 0, sizeof(dev->dev_addr)); | ||
1267 | |||
1268 | if (pcnet32_debug & NETIF_MSG_PROBE) { | ||
1269 | for (i = 0; i < 6; i++) | 1182 | for (i = 0; i < 6; i++) |
1270 | printk(" %2.2x", dev->dev_addr[i]); | 1183 | promaddr[i] = inb(ioaddr + i); |
1271 | 1184 | ||
1272 | /* Version 0x2623 and 0x2624 */ | 1185 | if (memcmp(promaddr, dev->dev_addr, 6) |
1273 | if (((chip_version + 1) & 0xfffe) == 0x2624) { | 1186 | || !is_valid_ether_addr(dev->dev_addr)) { |
1274 | i = a->read_csr(ioaddr, 80) & 0x0C00; /* Check tx_start_pt */ | 1187 | if (is_valid_ether_addr(promaddr)) { |
1275 | printk("\n" KERN_INFO " tx_start_pt(0x%04x):",i); | 1188 | if (pcnet32_debug & NETIF_MSG_PROBE) { |
1276 | switch(i>>10) { | 1189 | printk(" warning: CSR address invalid,\n"); |
1277 | case 0: printk(" 20 bytes,"); break; | 1190 | printk(KERN_INFO |
1278 | case 1: printk(" 64 bytes,"); break; | 1191 | " using instead PROM address of"); |
1279 | case 2: printk(" 128 bytes,"); break; | 1192 | } |
1280 | case 3: printk("~220 bytes,"); break; | 1193 | memcpy(dev->dev_addr, promaddr, 6); |
1281 | } | 1194 | } |
1282 | i = a->read_bcr(ioaddr, 18); /* Check Burst/Bus control */ | 1195 | } |
1283 | printk(" BCR18(%x):",i&0xffff); | 1196 | memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); |
1284 | if (i & (1<<5)) printk("BurstWrEn "); | 1197 | |
1285 | if (i & (1<<6)) printk("BurstRdEn "); | 1198 | /* if the ethernet address is not valid, force to 00:00:00:00:00:00 */ |
1286 | if (i & (1<<7)) printk("DWordIO "); | 1199 | if (!is_valid_ether_addr(dev->perm_addr)) |
1287 | if (i & (1<<11)) printk("NoUFlow "); | 1200 | memset(dev->dev_addr, 0, sizeof(dev->dev_addr)); |
1288 | i = a->read_bcr(ioaddr, 25); | 1201 | |
1289 | printk("\n" KERN_INFO " SRAMSIZE=0x%04x,",i<<8); | 1202 | if (pcnet32_debug & NETIF_MSG_PROBE) { |
1290 | i = a->read_bcr(ioaddr, 26); | 1203 | for (i = 0; i < 6; i++) |
1291 | printk(" SRAM_BND=0x%04x,",i<<8); | 1204 | printk(" %2.2x", dev->dev_addr[i]); |
1292 | i = a->read_bcr(ioaddr, 27); | 1205 | |
1293 | if (i & (1<<14)) printk("LowLatRx"); | 1206 | /* Version 0x2623 and 0x2624 */ |
1294 | } | 1207 | if (((chip_version + 1) & 0xfffe) == 0x2624) { |
1295 | } | 1208 | i = a->read_csr(ioaddr, 80) & 0x0C00; /* Check tx_start_pt */ |
1296 | 1209 | printk("\n" KERN_INFO " tx_start_pt(0x%04x):", i); | |
1297 | dev->base_addr = ioaddr; | 1210 | switch (i >> 10) { |
1298 | /* pci_alloc_consistent returns page-aligned memory, so we do not have to check the alignment */ | 1211 | case 0: |
1299 | if ((lp = pci_alloc_consistent(pdev, sizeof(*lp), &lp_dma_addr)) == NULL) { | 1212 | printk(" 20 bytes,"); |
1300 | if (pcnet32_debug & NETIF_MSG_PROBE) | 1213 | break; |
1301 | printk(KERN_ERR PFX "Consistent memory allocation failed.\n"); | 1214 | case 1: |
1302 | ret = -ENOMEM; | 1215 | printk(" 64 bytes,"); |
1303 | goto err_free_netdev; | 1216 | break; |
1304 | } | 1217 | case 2: |
1305 | 1218 | printk(" 128 bytes,"); | |
1306 | memset(lp, 0, sizeof(*lp)); | 1219 | break; |
1307 | lp->dma_addr = lp_dma_addr; | 1220 | case 3: |
1308 | lp->pci_dev = pdev; | 1221 | printk("~220 bytes,"); |
1309 | 1222 | break; | |
1310 | spin_lock_init(&lp->lock); | 1223 | } |
1311 | 1224 | i = a->read_bcr(ioaddr, 18); /* Check Burst/Bus control */ | |
1312 | SET_MODULE_OWNER(dev); | 1225 | printk(" BCR18(%x):", i & 0xffff); |
1313 | SET_NETDEV_DEV(dev, &pdev->dev); | 1226 | if (i & (1 << 5)) |
1314 | dev->priv = lp; | 1227 | printk("BurstWrEn "); |
1315 | lp->name = chipname; | 1228 | if (i & (1 << 6)) |
1316 | lp->shared_irq = shared; | 1229 | printk("BurstRdEn "); |
1317 | lp->tx_ring_size = TX_RING_SIZE; /* default tx ring size */ | 1230 | if (i & (1 << 7)) |
1318 | lp->rx_ring_size = RX_RING_SIZE; /* default rx ring size */ | 1231 | printk("DWordIO "); |
1319 | lp->tx_mod_mask = lp->tx_ring_size - 1; | 1232 | if (i & (1 << 11)) |
1320 | lp->rx_mod_mask = lp->rx_ring_size - 1; | 1233 | printk("NoUFlow "); |
1321 | lp->tx_len_bits = (PCNET32_LOG_TX_BUFFERS << 12); | 1234 | i = a->read_bcr(ioaddr, 25); |
1322 | lp->rx_len_bits = (PCNET32_LOG_RX_BUFFERS << 4); | 1235 | printk("\n" KERN_INFO " SRAMSIZE=0x%04x,", i << 8); |
1323 | lp->mii_if.full_duplex = fdx; | 1236 | i = a->read_bcr(ioaddr, 26); |
1324 | lp->mii_if.phy_id_mask = 0x1f; | 1237 | printk(" SRAM_BND=0x%04x,", i << 8); |
1325 | lp->mii_if.reg_num_mask = 0x1f; | 1238 | i = a->read_bcr(ioaddr, 27); |
1326 | lp->dxsuflo = dxsuflo; | 1239 | if (i & (1 << 14)) |
1327 | lp->mii = mii; | 1240 | printk("LowLatRx"); |
1328 | lp->msg_enable = pcnet32_debug; | 1241 | } |
1329 | if ((cards_found >= MAX_UNITS) || (options[cards_found] > sizeof(options_mapping))) | 1242 | } |
1330 | lp->options = PCNET32_PORT_ASEL; | 1243 | |
1331 | else | 1244 | dev->base_addr = ioaddr; |
1332 | lp->options = options_mapping[options[cards_found]]; | 1245 | /* pci_alloc_consistent returns page-aligned memory, so we do not have to check the alignment */ |
1333 | lp->mii_if.dev = dev; | 1246 | if ((lp = |
1334 | lp->mii_if.mdio_read = mdio_read; | 1247 | pci_alloc_consistent(pdev, sizeof(*lp), &lp_dma_addr)) == NULL) { |
1335 | lp->mii_if.mdio_write = mdio_write; | 1248 | if (pcnet32_debug & NETIF_MSG_PROBE) |
1336 | 1249 | printk(KERN_ERR PFX | |
1337 | if (fdx && !(lp->options & PCNET32_PORT_ASEL) && | 1250 | "Consistent memory allocation failed.\n"); |
1338 | ((cards_found>=MAX_UNITS) || full_duplex[cards_found])) | 1251 | ret = -ENOMEM; |
1339 | lp->options |= PCNET32_PORT_FD; | 1252 | goto err_free_netdev; |
1340 | 1253 | } | |
1341 | if (!a) { | 1254 | |
1342 | if (pcnet32_debug & NETIF_MSG_PROBE) | 1255 | memset(lp, 0, sizeof(*lp)); |
1343 | printk(KERN_ERR PFX "No access methods\n"); | 1256 | lp->dma_addr = lp_dma_addr; |
1344 | ret = -ENODEV; | 1257 | lp->pci_dev = pdev; |
1345 | goto err_free_consistent; | 1258 | |
1346 | } | 1259 | spin_lock_init(&lp->lock); |
1347 | lp->a = *a; | 1260 | |
1348 | 1261 | SET_MODULE_OWNER(dev); | |
1349 | /* prior to register_netdev, dev->name is not yet correct */ | 1262 | SET_NETDEV_DEV(dev, &pdev->dev); |
1350 | if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) { | 1263 | dev->priv = lp; |
1351 | ret = -ENOMEM; | 1264 | lp->name = chipname; |
1352 | goto err_free_ring; | 1265 | lp->shared_irq = shared; |
1353 | } | 1266 | lp->tx_ring_size = TX_RING_SIZE; /* default tx ring size */ |
1354 | /* detect special T1/E1 WAN card by checking for MAC address */ | 1267 | lp->rx_ring_size = RX_RING_SIZE; /* default rx ring size */ |
1355 | if (dev->dev_addr[0] == 0x00 && dev->dev_addr[1] == 0xe0 | 1268 | lp->tx_mod_mask = lp->tx_ring_size - 1; |
1269 | lp->rx_mod_mask = lp->rx_ring_size - 1; | ||
1270 | lp->tx_len_bits = (PCNET32_LOG_TX_BUFFERS << 12); | ||
1271 | lp->rx_len_bits = (PCNET32_LOG_RX_BUFFERS << 4); | ||
1272 | lp->mii_if.full_duplex = fdx; | ||
1273 | lp->mii_if.phy_id_mask = 0x1f; | ||
1274 | lp->mii_if.reg_num_mask = 0x1f; | ||
1275 | lp->dxsuflo = dxsuflo; | ||
1276 | lp->mii = mii; | ||
1277 | lp->msg_enable = pcnet32_debug; | ||
1278 | if ((cards_found >= MAX_UNITS) | ||
1279 | || (options[cards_found] > sizeof(options_mapping))) | ||
1280 | lp->options = PCNET32_PORT_ASEL; | ||
1281 | else | ||
1282 | lp->options = options_mapping[options[cards_found]]; | ||
1283 | lp->mii_if.dev = dev; | ||
1284 | lp->mii_if.mdio_read = mdio_read; | ||
1285 | lp->mii_if.mdio_write = mdio_write; | ||
1286 | |||
1287 | if (fdx && !(lp->options & PCNET32_PORT_ASEL) && | ||
1288 | ((cards_found >= MAX_UNITS) || full_duplex[cards_found])) | ||
1289 | lp->options |= PCNET32_PORT_FD; | ||
1290 | |||
1291 | if (!a) { | ||
1292 | if (pcnet32_debug & NETIF_MSG_PROBE) | ||
1293 | printk(KERN_ERR PFX "No access methods\n"); | ||
1294 | ret = -ENODEV; | ||
1295 | goto err_free_consistent; | ||
1296 | } | ||
1297 | lp->a = *a; | ||
1298 | |||
1299 | /* prior to register_netdev, dev->name is not yet correct */ | ||
1300 | if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) { | ||
1301 | ret = -ENOMEM; | ||
1302 | goto err_free_ring; | ||
1303 | } | ||
1304 | /* detect special T1/E1 WAN card by checking for MAC address */ | ||
1305 | if (dev->dev_addr[0] == 0x00 && dev->dev_addr[1] == 0xe0 | ||
1356 | && dev->dev_addr[2] == 0x75) | 1306 | && dev->dev_addr[2] == 0x75) |
1357 | lp->options = PCNET32_PORT_FD | PCNET32_PORT_GPSI; | 1307 | lp->options = PCNET32_PORT_FD | PCNET32_PORT_GPSI; |
1358 | |||
1359 | lp->init_block.mode = le16_to_cpu(0x0003); /* Disable Rx and Tx. */ | ||
1360 | lp->init_block.tlen_rlen = le16_to_cpu(lp->tx_len_bits | lp->rx_len_bits); | ||
1361 | for (i = 0; i < 6; i++) | ||
1362 | lp->init_block.phys_addr[i] = dev->dev_addr[i]; | ||
1363 | lp->init_block.filter[0] = 0x00000000; | ||
1364 | lp->init_block.filter[1] = 0x00000000; | ||
1365 | lp->init_block.rx_ring = (u32)le32_to_cpu(lp->rx_ring_dma_addr); | ||
1366 | lp->init_block.tx_ring = (u32)le32_to_cpu(lp->tx_ring_dma_addr); | ||
1367 | |||
1368 | /* switch pcnet32 to 32bit mode */ | ||
1369 | a->write_bcr(ioaddr, 20, 2); | ||
1370 | |||
1371 | a->write_csr(ioaddr, 1, (lp->dma_addr + offsetof(struct pcnet32_private, | ||
1372 | init_block)) & 0xffff); | ||
1373 | a->write_csr(ioaddr, 2, (lp->dma_addr + offsetof(struct pcnet32_private, | ||
1374 | init_block)) >> 16); | ||
1375 | |||
1376 | if (pdev) { /* use the IRQ provided by PCI */ | ||
1377 | dev->irq = pdev->irq; | ||
1378 | if (pcnet32_debug & NETIF_MSG_PROBE) | ||
1379 | printk(" assigned IRQ %d.\n", dev->irq); | ||
1380 | } else { | ||
1381 | unsigned long irq_mask = probe_irq_on(); | ||
1382 | 1308 | ||
1383 | /* | 1309 | lp->init_block.mode = le16_to_cpu(0x0003); /* Disable Rx and Tx. */ |
1384 | * To auto-IRQ we enable the initialization-done and DMA error | 1310 | lp->init_block.tlen_rlen = |
1385 | * interrupts. For ISA boards we get a DMA error, but VLB and PCI | 1311 | le16_to_cpu(lp->tx_len_bits | lp->rx_len_bits); |
1386 | * boards will work. | 1312 | for (i = 0; i < 6; i++) |
1387 | */ | 1313 | lp->init_block.phys_addr[i] = dev->dev_addr[i]; |
1388 | /* Trigger an initialization just for the interrupt. */ | 1314 | lp->init_block.filter[0] = 0x00000000; |
1389 | a->write_csr (ioaddr, 0, 0x41); | 1315 | lp->init_block.filter[1] = 0x00000000; |
1390 | mdelay (1); | 1316 | lp->init_block.rx_ring = (u32) le32_to_cpu(lp->rx_ring_dma_addr); |
1317 | lp->init_block.tx_ring = (u32) le32_to_cpu(lp->tx_ring_dma_addr); | ||
1318 | |||
1319 | /* switch pcnet32 to 32bit mode */ | ||
1320 | a->write_bcr(ioaddr, 20, 2); | ||
1321 | |||
1322 | a->write_csr(ioaddr, 1, (lp->dma_addr + offsetof(struct pcnet32_private, | ||
1323 | init_block)) & 0xffff); | ||
1324 | a->write_csr(ioaddr, 2, (lp->dma_addr + offsetof(struct pcnet32_private, | ||
1325 | init_block)) >> 16); | ||
1326 | |||
1327 | if (pdev) { /* use the IRQ provided by PCI */ | ||
1328 | dev->irq = pdev->irq; | ||
1329 | if (pcnet32_debug & NETIF_MSG_PROBE) | ||
1330 | printk(" assigned IRQ %d.\n", dev->irq); | ||
1331 | } else { | ||
1332 | unsigned long irq_mask = probe_irq_on(); | ||
1333 | |||
1334 | /* | ||
1335 | * To auto-IRQ we enable the initialization-done and DMA error | ||
1336 | * interrupts. For ISA boards we get a DMA error, but VLB and PCI | ||
1337 | * boards will work. | ||
1338 | */ | ||
1339 | /* Trigger an initialization just for the interrupt. */ | ||
1340 | a->write_csr(ioaddr, 0, 0x41); | ||
1341 | mdelay(1); | ||
1342 | |||
1343 | dev->irq = probe_irq_off(irq_mask); | ||
1344 | if (!dev->irq) { | ||
1345 | if (pcnet32_debug & NETIF_MSG_PROBE) | ||
1346 | printk(", failed to detect IRQ line.\n"); | ||
1347 | ret = -ENODEV; | ||
1348 | goto err_free_ring; | ||
1349 | } | ||
1350 | if (pcnet32_debug & NETIF_MSG_PROBE) | ||
1351 | printk(", probed IRQ %d.\n", dev->irq); | ||
1352 | } | ||
1391 | 1353 | ||
1392 | dev->irq = probe_irq_off (irq_mask); | 1354 | /* Set the mii phy_id so that we can query the link state */ |
1393 | if (!dev->irq) { | 1355 | if (lp->mii) { |
1394 | if (pcnet32_debug & NETIF_MSG_PROBE) | 1356 | /* lp->phycount and lp->phymask are set to 0 by memset above */ |
1395 | printk(", failed to detect IRQ line.\n"); | 1357 | |
1396 | ret = -ENODEV; | 1358 | lp->mii_if.phy_id = ((lp->a.read_bcr(ioaddr, 33)) >> 5) & 0x1f; |
1397 | goto err_free_ring; | 1359 | /* scan for PHYs */ |
1360 | for (i = 0; i < PCNET32_MAX_PHYS; i++) { | ||
1361 | unsigned short id1, id2; | ||
1362 | |||
1363 | id1 = mdio_read(dev, i, MII_PHYSID1); | ||
1364 | if (id1 == 0xffff) | ||
1365 | continue; | ||
1366 | id2 = mdio_read(dev, i, MII_PHYSID2); | ||
1367 | if (id2 == 0xffff) | ||
1368 | continue; | ||
1369 | if (i == 31 && ((chip_version + 1) & 0xfffe) == 0x2624) | ||
1370 | continue; /* 79C971 & 79C972 have phantom phy at id 31 */ | ||
1371 | lp->phycount++; | ||
1372 | lp->phymask |= (1 << i); | ||
1373 | lp->mii_if.phy_id = i; | ||
1374 | if (pcnet32_debug & NETIF_MSG_PROBE) | ||
1375 | printk(KERN_INFO PFX | ||
1376 | "Found PHY %04x:%04x at address %d.\n", | ||
1377 | id1, id2, i); | ||
1378 | } | ||
1379 | lp->a.write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5); | ||
1380 | if (lp->phycount > 1) { | ||
1381 | lp->options |= PCNET32_PORT_MII; | ||
1382 | } | ||
1398 | } | 1383 | } |
1399 | if (pcnet32_debug & NETIF_MSG_PROBE) | 1384 | |
1400 | printk(", probed IRQ %d.\n", dev->irq); | 1385 | init_timer(&lp->watchdog_timer); |
1401 | } | 1386 | lp->watchdog_timer.data = (unsigned long)dev; |
1402 | 1387 | lp->watchdog_timer.function = (void *)&pcnet32_watchdog; | |
1403 | /* Set the mii phy_id so that we can query the link state */ | 1388 | |
1404 | if (lp->mii) | 1389 | /* The PCNET32-specific entries in the device structure. */ |
1405 | lp->mii_if.phy_id = ((lp->a.read_bcr (ioaddr, 33)) >> 5) & 0x1f; | 1390 | dev->open = &pcnet32_open; |
1406 | 1391 | dev->hard_start_xmit = &pcnet32_start_xmit; | |
1407 | init_timer (&lp->watchdog_timer); | 1392 | dev->stop = &pcnet32_close; |
1408 | lp->watchdog_timer.data = (unsigned long) dev; | 1393 | dev->get_stats = &pcnet32_get_stats; |
1409 | lp->watchdog_timer.function = (void *) &pcnet32_watchdog; | 1394 | dev->set_multicast_list = &pcnet32_set_multicast_list; |
1410 | 1395 | dev->do_ioctl = &pcnet32_ioctl; | |
1411 | /* The PCNET32-specific entries in the device structure. */ | 1396 | dev->ethtool_ops = &pcnet32_ethtool_ops; |
1412 | dev->open = &pcnet32_open; | 1397 | dev->tx_timeout = pcnet32_tx_timeout; |
1413 | dev->hard_start_xmit = &pcnet32_start_xmit; | 1398 | dev->watchdog_timeo = (5 * HZ); |
1414 | dev->stop = &pcnet32_close; | ||
1415 | dev->get_stats = &pcnet32_get_stats; | ||
1416 | dev->set_multicast_list = &pcnet32_set_multicast_list; | ||
1417 | dev->do_ioctl = &pcnet32_ioctl; | ||
1418 | dev->ethtool_ops = &pcnet32_ethtool_ops; | ||
1419 | dev->tx_timeout = pcnet32_tx_timeout; | ||
1420 | dev->watchdog_timeo = (5*HZ); | ||
1421 | 1399 | ||
1422 | #ifdef CONFIG_NET_POLL_CONTROLLER | 1400 | #ifdef CONFIG_NET_POLL_CONTROLLER |
1423 | dev->poll_controller = pcnet32_poll_controller; | 1401 | dev->poll_controller = pcnet32_poll_controller; |
1424 | #endif | 1402 | #endif |
1425 | 1403 | ||
1426 | /* Fill in the generic fields of the device structure. */ | 1404 | /* Fill in the generic fields of the device structure. */ |
1427 | if (register_netdev(dev)) | 1405 | if (register_netdev(dev)) |
1428 | goto err_free_ring; | 1406 | goto err_free_ring; |
1429 | 1407 | ||
1430 | if (pdev) { | 1408 | if (pdev) { |
1431 | pci_set_drvdata(pdev, dev); | 1409 | pci_set_drvdata(pdev, dev); |
1432 | } else { | 1410 | } else { |
1433 | lp->next = pcnet32_dev; | 1411 | lp->next = pcnet32_dev; |
1434 | pcnet32_dev = dev; | 1412 | pcnet32_dev = dev; |
1435 | } | 1413 | } |
1436 | |||
1437 | if (pcnet32_debug & NETIF_MSG_PROBE) | ||
1438 | printk(KERN_INFO "%s: registered as %s\n", dev->name, lp->name); | ||
1439 | cards_found++; | ||
1440 | |||
1441 | /* enable LED writes */ | ||
1442 | a->write_bcr(ioaddr, 2, a->read_bcr(ioaddr, 2) | 0x1000); | ||
1443 | |||
1444 | return 0; | ||
1445 | |||
1446 | err_free_ring: | ||
1447 | pcnet32_free_ring(dev); | ||
1448 | err_free_consistent: | ||
1449 | pci_free_consistent(lp->pci_dev, sizeof(*lp), lp, lp->dma_addr); | ||
1450 | err_free_netdev: | ||
1451 | free_netdev(dev); | ||
1452 | err_release_region: | ||
1453 | release_region(ioaddr, PCNET32_TOTAL_SIZE); | ||
1454 | return ret; | ||
1455 | } | ||
1456 | 1414 | ||
1415 | if (pcnet32_debug & NETIF_MSG_PROBE) | ||
1416 | printk(KERN_INFO "%s: registered as %s\n", dev->name, lp->name); | ||
1417 | cards_found++; | ||
1418 | |||
1419 | /* enable LED writes */ | ||
1420 | a->write_bcr(ioaddr, 2, a->read_bcr(ioaddr, 2) | 0x1000); | ||
1421 | |||
1422 | return 0; | ||
1423 | |||
1424 | err_free_ring: | ||
1425 | pcnet32_free_ring(dev); | ||
1426 | err_free_consistent: | ||
1427 | pci_free_consistent(lp->pci_dev, sizeof(*lp), lp, lp->dma_addr); | ||
1428 | err_free_netdev: | ||
1429 | free_netdev(dev); | ||
1430 | err_release_region: | ||
1431 | release_region(ioaddr, PCNET32_TOTAL_SIZE); | ||
1432 | return ret; | ||
1433 | } | ||
1457 | 1434 | ||
1458 | /* if any allocation fails, caller must also call pcnet32_free_ring */ | 1435 | /* if any allocation fails, caller must also call pcnet32_free_ring */ |
1459 | static int pcnet32_alloc_ring(struct net_device *dev, char *name) | 1436 | static int pcnet32_alloc_ring(struct net_device *dev, char *name) |
1460 | { | 1437 | { |
1461 | struct pcnet32_private *lp = dev->priv; | 1438 | struct pcnet32_private *lp = dev->priv; |
1462 | 1439 | ||
1463 | lp->tx_ring = pci_alloc_consistent(lp->pci_dev, | 1440 | lp->tx_ring = pci_alloc_consistent(lp->pci_dev, |
1464 | sizeof(struct pcnet32_tx_head) * lp->tx_ring_size, | 1441 | sizeof(struct pcnet32_tx_head) * |
1465 | &lp->tx_ring_dma_addr); | 1442 | lp->tx_ring_size, |
1466 | if (lp->tx_ring == NULL) { | 1443 | &lp->tx_ring_dma_addr); |
1467 | if (pcnet32_debug & NETIF_MSG_DRV) | 1444 | if (lp->tx_ring == NULL) { |
1468 | printk("\n" KERN_ERR PFX "%s: Consistent memory allocation failed.\n", | 1445 | if (pcnet32_debug & NETIF_MSG_DRV) |
1469 | name); | 1446 | printk("\n" KERN_ERR PFX |
1470 | return -ENOMEM; | 1447 | "%s: Consistent memory allocation failed.\n", |
1471 | } | 1448 | name); |
1472 | 1449 | return -ENOMEM; | |
1473 | lp->rx_ring = pci_alloc_consistent(lp->pci_dev, | 1450 | } |
1474 | sizeof(struct pcnet32_rx_head) * lp->rx_ring_size, | ||
1475 | &lp->rx_ring_dma_addr); | ||
1476 | if (lp->rx_ring == NULL) { | ||
1477 | if (pcnet32_debug & NETIF_MSG_DRV) | ||
1478 | printk("\n" KERN_ERR PFX "%s: Consistent memory allocation failed.\n", | ||
1479 | name); | ||
1480 | return -ENOMEM; | ||
1481 | } | ||
1482 | |||
1483 | lp->tx_dma_addr = kmalloc(sizeof(dma_addr_t) * lp->tx_ring_size, | ||
1484 | GFP_ATOMIC); | ||
1485 | if (!lp->tx_dma_addr) { | ||
1486 | if (pcnet32_debug & NETIF_MSG_DRV) | ||
1487 | printk("\n" KERN_ERR PFX "%s: Memory allocation failed.\n", name); | ||
1488 | return -ENOMEM; | ||
1489 | } | ||
1490 | memset(lp->tx_dma_addr, 0, sizeof(dma_addr_t) * lp->tx_ring_size); | ||
1491 | |||
1492 | lp->rx_dma_addr = kmalloc(sizeof(dma_addr_t) * lp->rx_ring_size, | ||
1493 | GFP_ATOMIC); | ||
1494 | if (!lp->rx_dma_addr) { | ||
1495 | if (pcnet32_debug & NETIF_MSG_DRV) | ||
1496 | printk("\n" KERN_ERR PFX "%s: Memory allocation failed.\n", name); | ||
1497 | return -ENOMEM; | ||
1498 | } | ||
1499 | memset(lp->rx_dma_addr, 0, sizeof(dma_addr_t) * lp->rx_ring_size); | ||
1500 | |||
1501 | lp->tx_skbuff = kmalloc(sizeof(struct sk_buff *) * lp->tx_ring_size, | ||
1502 | GFP_ATOMIC); | ||
1503 | if (!lp->tx_skbuff) { | ||
1504 | if (pcnet32_debug & NETIF_MSG_DRV) | ||
1505 | printk("\n" KERN_ERR PFX "%s: Memory allocation failed.\n", name); | ||
1506 | return -ENOMEM; | ||
1507 | } | ||
1508 | memset(lp->tx_skbuff, 0, sizeof(struct sk_buff *) * lp->tx_ring_size); | ||
1509 | |||
1510 | lp->rx_skbuff = kmalloc(sizeof(struct sk_buff *) * lp->rx_ring_size, | ||
1511 | GFP_ATOMIC); | ||
1512 | if (!lp->rx_skbuff) { | ||
1513 | if (pcnet32_debug & NETIF_MSG_DRV) | ||
1514 | printk("\n" KERN_ERR PFX "%s: Memory allocation failed.\n", name); | ||
1515 | return -ENOMEM; | ||
1516 | } | ||
1517 | memset(lp->rx_skbuff, 0, sizeof(struct sk_buff *) * lp->rx_ring_size); | ||
1518 | 1451 | ||
1519 | return 0; | 1452 | lp->rx_ring = pci_alloc_consistent(lp->pci_dev, |
1520 | } | 1453 | sizeof(struct pcnet32_rx_head) * |
1454 | lp->rx_ring_size, | ||
1455 | &lp->rx_ring_dma_addr); | ||
1456 | if (lp->rx_ring == NULL) { | ||
1457 | if (pcnet32_debug & NETIF_MSG_DRV) | ||
1458 | printk("\n" KERN_ERR PFX | ||
1459 | "%s: Consistent memory allocation failed.\n", | ||
1460 | name); | ||
1461 | return -ENOMEM; | ||
1462 | } | ||
1521 | 1463 | ||
1464 | lp->tx_dma_addr = kmalloc(sizeof(dma_addr_t) * lp->tx_ring_size, | ||
1465 | GFP_ATOMIC); | ||
1466 | if (!lp->tx_dma_addr) { | ||
1467 | if (pcnet32_debug & NETIF_MSG_DRV) | ||
1468 | printk("\n" KERN_ERR PFX | ||
1469 | "%s: Memory allocation failed.\n", name); | ||
1470 | return -ENOMEM; | ||
1471 | } | ||
1472 | memset(lp->tx_dma_addr, 0, sizeof(dma_addr_t) * lp->tx_ring_size); | ||
1473 | |||
1474 | lp->rx_dma_addr = kmalloc(sizeof(dma_addr_t) * lp->rx_ring_size, | ||
1475 | GFP_ATOMIC); | ||
1476 | if (!lp->rx_dma_addr) { | ||
1477 | if (pcnet32_debug & NETIF_MSG_DRV) | ||
1478 | printk("\n" KERN_ERR PFX | ||
1479 | "%s: Memory allocation failed.\n", name); | ||
1480 | return -ENOMEM; | ||
1481 | } | ||
1482 | memset(lp->rx_dma_addr, 0, sizeof(dma_addr_t) * lp->rx_ring_size); | ||
1483 | |||
1484 | lp->tx_skbuff = kmalloc(sizeof(struct sk_buff *) * lp->tx_ring_size, | ||
1485 | GFP_ATOMIC); | ||
1486 | if (!lp->tx_skbuff) { | ||
1487 | if (pcnet32_debug & NETIF_MSG_DRV) | ||
1488 | printk("\n" KERN_ERR PFX | ||
1489 | "%s: Memory allocation failed.\n", name); | ||
1490 | return -ENOMEM; | ||
1491 | } | ||
1492 | memset(lp->tx_skbuff, 0, sizeof(struct sk_buff *) * lp->tx_ring_size); | ||
1493 | |||
1494 | lp->rx_skbuff = kmalloc(sizeof(struct sk_buff *) * lp->rx_ring_size, | ||
1495 | GFP_ATOMIC); | ||
1496 | if (!lp->rx_skbuff) { | ||
1497 | if (pcnet32_debug & NETIF_MSG_DRV) | ||
1498 | printk("\n" KERN_ERR PFX | ||
1499 | "%s: Memory allocation failed.\n", name); | ||
1500 | return -ENOMEM; | ||
1501 | } | ||
1502 | memset(lp->rx_skbuff, 0, sizeof(struct sk_buff *) * lp->rx_ring_size); | ||
1503 | |||
1504 | return 0; | ||
1505 | } | ||
1522 | 1506 | ||
1523 | static void pcnet32_free_ring(struct net_device *dev) | 1507 | static void pcnet32_free_ring(struct net_device *dev) |
1524 | { | 1508 | { |
1525 | struct pcnet32_private *lp = dev->priv; | 1509 | struct pcnet32_private *lp = dev->priv; |
1526 | 1510 | ||
1527 | kfree(lp->tx_skbuff); | 1511 | kfree(lp->tx_skbuff); |
1528 | lp->tx_skbuff = NULL; | 1512 | lp->tx_skbuff = NULL; |
1529 | 1513 | ||
1530 | kfree(lp->rx_skbuff); | 1514 | kfree(lp->rx_skbuff); |
1531 | lp->rx_skbuff = NULL; | 1515 | lp->rx_skbuff = NULL; |
1532 | 1516 | ||
1533 | kfree(lp->tx_dma_addr); | 1517 | kfree(lp->tx_dma_addr); |
1534 | lp->tx_dma_addr = NULL; | 1518 | lp->tx_dma_addr = NULL; |
1535 | 1519 | ||
1536 | kfree(lp->rx_dma_addr); | 1520 | kfree(lp->rx_dma_addr); |
1537 | lp->rx_dma_addr = NULL; | 1521 | lp->rx_dma_addr = NULL; |
1538 | 1522 | ||
1539 | if (lp->tx_ring) { | 1523 | if (lp->tx_ring) { |
1540 | pci_free_consistent(lp->pci_dev, sizeof(struct pcnet32_tx_head) * lp->tx_ring_size, | 1524 | pci_free_consistent(lp->pci_dev, |
1541 | lp->tx_ring, lp->tx_ring_dma_addr); | 1525 | sizeof(struct pcnet32_tx_head) * |
1542 | lp->tx_ring = NULL; | 1526 | lp->tx_ring_size, lp->tx_ring, |
1543 | } | 1527 | lp->tx_ring_dma_addr); |
1528 | lp->tx_ring = NULL; | ||
1529 | } | ||
1544 | 1530 | ||
1545 | if (lp->rx_ring) { | 1531 | if (lp->rx_ring) { |
1546 | pci_free_consistent(lp->pci_dev, sizeof(struct pcnet32_rx_head) * lp->rx_ring_size, | 1532 | pci_free_consistent(lp->pci_dev, |
1547 | lp->rx_ring, lp->rx_ring_dma_addr); | 1533 | sizeof(struct pcnet32_rx_head) * |
1548 | lp->rx_ring = NULL; | 1534 | lp->rx_ring_size, lp->rx_ring, |
1549 | } | 1535 | lp->rx_ring_dma_addr); |
1536 | lp->rx_ring = NULL; | ||
1537 | } | ||
1550 | } | 1538 | } |
1551 | 1539 | ||
1552 | 1540 | static int pcnet32_open(struct net_device *dev) | |
1553 | static int | ||
1554 | pcnet32_open(struct net_device *dev) | ||
1555 | { | 1541 | { |
1556 | struct pcnet32_private *lp = dev->priv; | 1542 | struct pcnet32_private *lp = dev->priv; |
1557 | unsigned long ioaddr = dev->base_addr; | 1543 | unsigned long ioaddr = dev->base_addr; |
1558 | u16 val; | 1544 | u16 val; |
1559 | int i; | 1545 | int i; |
1560 | int rc; | 1546 | int rc; |
1561 | unsigned long flags; | 1547 | unsigned long flags; |
1562 | 1548 | ||
1563 | if (request_irq(dev->irq, &pcnet32_interrupt, | 1549 | if (request_irq(dev->irq, &pcnet32_interrupt, |
1564 | lp->shared_irq ? SA_SHIRQ : 0, dev->name, (void *)dev)) { | 1550 | lp->shared_irq ? SA_SHIRQ : 0, dev->name, |
1565 | return -EAGAIN; | 1551 | (void *)dev)) { |
1566 | } | 1552 | return -EAGAIN; |
1567 | 1553 | } | |
1568 | spin_lock_irqsave(&lp->lock, flags); | 1554 | |
1569 | /* Check for a valid station address */ | 1555 | spin_lock_irqsave(&lp->lock, flags); |
1570 | if (!is_valid_ether_addr(dev->dev_addr)) { | 1556 | /* Check for a valid station address */ |
1571 | rc = -EINVAL; | 1557 | if (!is_valid_ether_addr(dev->dev_addr)) { |
1572 | goto err_free_irq; | 1558 | rc = -EINVAL; |
1573 | } | 1559 | goto err_free_irq; |
1574 | 1560 | } | |
1575 | /* Reset the PCNET32 */ | 1561 | |
1576 | lp->a.reset (ioaddr); | 1562 | /* Reset the PCNET32 */ |
1577 | 1563 | lp->a.reset(ioaddr); | |
1578 | /* switch pcnet32 to 32bit mode */ | 1564 | |
1579 | lp->a.write_bcr (ioaddr, 20, 2); | 1565 | /* switch pcnet32 to 32bit mode */ |
1580 | 1566 | lp->a.write_bcr(ioaddr, 20, 2); | |
1581 | if (netif_msg_ifup(lp)) | 1567 | |
1582 | printk(KERN_DEBUG "%s: pcnet32_open() irq %d tx/rx rings %#x/%#x init %#x.\n", | 1568 | if (netif_msg_ifup(lp)) |
1583 | dev->name, dev->irq, | 1569 | printk(KERN_DEBUG |
1584 | (u32) (lp->tx_ring_dma_addr), | 1570 | "%s: pcnet32_open() irq %d tx/rx rings %#x/%#x init %#x.\n", |
1585 | (u32) (lp->rx_ring_dma_addr), | 1571 | dev->name, dev->irq, (u32) (lp->tx_ring_dma_addr), |
1586 | (u32) (lp->dma_addr + offsetof(struct pcnet32_private, init_block))); | 1572 | (u32) (lp->rx_ring_dma_addr), |
1587 | 1573 | (u32) (lp->dma_addr + | |
1588 | /* set/reset autoselect bit */ | 1574 | offsetof(struct pcnet32_private, init_block))); |
1589 | val = lp->a.read_bcr (ioaddr, 2) & ~2; | 1575 | |
1590 | if (lp->options & PCNET32_PORT_ASEL) | 1576 | /* set/reset autoselect bit */ |
1591 | val |= 2; | 1577 | val = lp->a.read_bcr(ioaddr, 2) & ~2; |
1592 | lp->a.write_bcr (ioaddr, 2, val); | 1578 | if (lp->options & PCNET32_PORT_ASEL) |
1593 | |||
1594 | /* handle full duplex setting */ | ||
1595 | if (lp->mii_if.full_duplex) { | ||
1596 | val = lp->a.read_bcr (ioaddr, 9) & ~3; | ||
1597 | if (lp->options & PCNET32_PORT_FD) { | ||
1598 | val |= 1; | ||
1599 | if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI)) | ||
1600 | val |= 2; | 1579 | val |= 2; |
1601 | } else if (lp->options & PCNET32_PORT_ASEL) { | 1580 | lp->a.write_bcr(ioaddr, 2, val); |
1602 | /* workaround of xSeries250, turn on for 79C975 only */ | 1581 | |
1603 | i = ((lp->a.read_csr(ioaddr, 88) | | 1582 | /* handle full duplex setting */ |
1604 | (lp->a.read_csr(ioaddr,89) << 16)) >> 12) & 0xffff; | 1583 | if (lp->mii_if.full_duplex) { |
1605 | if (i == 0x2627) | 1584 | val = lp->a.read_bcr(ioaddr, 9) & ~3; |
1606 | val |= 3; | 1585 | if (lp->options & PCNET32_PORT_FD) { |
1607 | } | 1586 | val |= 1; |
1608 | lp->a.write_bcr (ioaddr, 9, val); | 1587 | if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI)) |
1609 | } | 1588 | val |= 2; |
1610 | 1589 | } else if (lp->options & PCNET32_PORT_ASEL) { | |
1611 | /* set/reset GPSI bit in test register */ | 1590 | /* workaround of xSeries250, turn on for 79C975 only */ |
1612 | val = lp->a.read_csr (ioaddr, 124) & ~0x10; | 1591 | i = ((lp->a.read_csr(ioaddr, 88) | |
1613 | if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI) | 1592 | (lp->a. |
1614 | val |= 0x10; | 1593 | read_csr(ioaddr, 89) << 16)) >> 12) & 0xffff; |
1615 | lp->a.write_csr (ioaddr, 124, val); | 1594 | if (i == 0x2627) |
1616 | 1595 | val |= 3; | |
1617 | /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */ | 1596 | } |
1618 | if (lp->pci_dev->subsystem_vendor == PCI_VENDOR_ID_AT && | 1597 | lp->a.write_bcr(ioaddr, 9, val); |
1598 | } | ||
1599 | |||
1600 | /* set/reset GPSI bit in test register */ | ||
1601 | val = lp->a.read_csr(ioaddr, 124) & ~0x10; | ||
1602 | if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI) | ||
1603 | val |= 0x10; | ||
1604 | lp->a.write_csr(ioaddr, 124, val); | ||
1605 | |||
1606 | /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */ | ||
1607 | if (lp->pci_dev->subsystem_vendor == PCI_VENDOR_ID_AT && | ||
1619 | (lp->pci_dev->subsystem_device == PCI_SUBDEVICE_ID_AT_2700FX || | 1608 | (lp->pci_dev->subsystem_device == PCI_SUBDEVICE_ID_AT_2700FX || |
1620 | lp->pci_dev->subsystem_device == PCI_SUBDEVICE_ID_AT_2701FX)) { | 1609 | lp->pci_dev->subsystem_device == PCI_SUBDEVICE_ID_AT_2701FX)) { |
1621 | if (lp->options & PCNET32_PORT_ASEL) { | 1610 | if (lp->options & PCNET32_PORT_ASEL) { |
1622 | lp->options = PCNET32_PORT_FD | PCNET32_PORT_100; | 1611 | lp->options = PCNET32_PORT_FD | PCNET32_PORT_100; |
1623 | if (netif_msg_link(lp)) | 1612 | if (netif_msg_link(lp)) |
1624 | printk(KERN_DEBUG "%s: Setting 100Mb-Full Duplex.\n", | 1613 | printk(KERN_DEBUG |
1625 | dev->name); | 1614 | "%s: Setting 100Mb-Full Duplex.\n", |
1626 | } | 1615 | dev->name); |
1627 | } | 1616 | } |
1628 | { | 1617 | } |
1629 | /* | 1618 | if (lp->phycount < 2) { |
1630 | * 24 Jun 2004 according AMD, in order to change the PHY, | 1619 | /* |
1631 | * DANAS (or DISPM for 79C976) must be set; then select the speed, | 1620 | * 24 Jun 2004 according AMD, in order to change the PHY, |
1632 | * duplex, and/or enable auto negotiation, and clear DANAS | 1621 | * DANAS (or DISPM for 79C976) must be set; then select the speed, |
1633 | */ | 1622 | * duplex, and/or enable auto negotiation, and clear DANAS |
1634 | if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) { | 1623 | */ |
1635 | lp->a.write_bcr(ioaddr, 32, | 1624 | if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) { |
1636 | lp->a.read_bcr(ioaddr, 32) | 0x0080); | 1625 | lp->a.write_bcr(ioaddr, 32, |
1637 | /* disable Auto Negotiation, set 10Mpbs, HD */ | 1626 | lp->a.read_bcr(ioaddr, 32) | 0x0080); |
1638 | val = lp->a.read_bcr(ioaddr, 32) & ~0xb8; | 1627 | /* disable Auto Negotiation, set 10Mpbs, HD */ |
1639 | if (lp->options & PCNET32_PORT_FD) | 1628 | val = lp->a.read_bcr(ioaddr, 32) & ~0xb8; |
1640 | val |= 0x10; | 1629 | if (lp->options & PCNET32_PORT_FD) |
1641 | if (lp->options & PCNET32_PORT_100) | 1630 | val |= 0x10; |
1642 | val |= 0x08; | 1631 | if (lp->options & PCNET32_PORT_100) |
1643 | lp->a.write_bcr (ioaddr, 32, val); | 1632 | val |= 0x08; |
1633 | lp->a.write_bcr(ioaddr, 32, val); | ||
1634 | } else { | ||
1635 | if (lp->options & PCNET32_PORT_ASEL) { | ||
1636 | lp->a.write_bcr(ioaddr, 32, | ||
1637 | lp->a.read_bcr(ioaddr, | ||
1638 | 32) | 0x0080); | ||
1639 | /* enable auto negotiate, setup, disable fd */ | ||
1640 | val = lp->a.read_bcr(ioaddr, 32) & ~0x98; | ||
1641 | val |= 0x20; | ||
1642 | lp->a.write_bcr(ioaddr, 32, val); | ||
1643 | } | ||
1644 | } | ||
1644 | } else { | 1645 | } else { |
1645 | if (lp->options & PCNET32_PORT_ASEL) { | 1646 | int first_phy = -1; |
1646 | lp->a.write_bcr(ioaddr, 32, | 1647 | u16 bmcr; |
1647 | lp->a.read_bcr(ioaddr, 32) | 0x0080); | 1648 | u32 bcr9; |
1648 | /* enable auto negotiate, setup, disable fd */ | 1649 | struct ethtool_cmd ecmd; |
1649 | val = lp->a.read_bcr(ioaddr, 32) & ~0x98; | 1650 | |
1650 | val |= 0x20; | 1651 | /* |
1651 | lp->a.write_bcr(ioaddr, 32, val); | 1652 | * There is really no good other way to handle multiple PHYs |
1652 | } | 1653 | * other than turning off all automatics |
1654 | */ | ||
1655 | val = lp->a.read_bcr(ioaddr, 2); | ||
1656 | lp->a.write_bcr(ioaddr, 2, val & ~2); | ||
1657 | val = lp->a.read_bcr(ioaddr, 32); | ||
1658 | lp->a.write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */ | ||
1659 | |||
1660 | if (!(lp->options & PCNET32_PORT_ASEL)) { | ||
1661 | /* setup ecmd */ | ||
1662 | ecmd.port = PORT_MII; | ||
1663 | ecmd.transceiver = XCVR_INTERNAL; | ||
1664 | ecmd.autoneg = AUTONEG_DISABLE; | ||
1665 | ecmd.speed = | ||
1666 | lp-> | ||
1667 | options & PCNET32_PORT_100 ? SPEED_100 : SPEED_10; | ||
1668 | bcr9 = lp->a.read_bcr(ioaddr, 9); | ||
1669 | |||
1670 | if (lp->options & PCNET32_PORT_FD) { | ||
1671 | ecmd.duplex = DUPLEX_FULL; | ||
1672 | bcr9 |= (1 << 0); | ||
1673 | } else { | ||
1674 | ecmd.duplex = DUPLEX_HALF; | ||
1675 | bcr9 |= ~(1 << 0); | ||
1676 | } | ||
1677 | lp->a.write_bcr(ioaddr, 9, bcr9); | ||
1678 | } | ||
1679 | |||
1680 | for (i = 0; i < PCNET32_MAX_PHYS; i++) { | ||
1681 | if (lp->phymask & (1 << i)) { | ||
1682 | /* isolate all but the first PHY */ | ||
1683 | bmcr = mdio_read(dev, i, MII_BMCR); | ||
1684 | if (first_phy == -1) { | ||
1685 | first_phy = i; | ||
1686 | mdio_write(dev, i, MII_BMCR, | ||
1687 | bmcr & ~BMCR_ISOLATE); | ||
1688 | } else { | ||
1689 | mdio_write(dev, i, MII_BMCR, | ||
1690 | bmcr | BMCR_ISOLATE); | ||
1691 | } | ||
1692 | /* use mii_ethtool_sset to setup PHY */ | ||
1693 | lp->mii_if.phy_id = i; | ||
1694 | ecmd.phy_address = i; | ||
1695 | if (lp->options & PCNET32_PORT_ASEL) { | ||
1696 | mii_ethtool_gset(&lp->mii_if, &ecmd); | ||
1697 | ecmd.autoneg = AUTONEG_ENABLE; | ||
1698 | } | ||
1699 | mii_ethtool_sset(&lp->mii_if, &ecmd); | ||
1700 | } | ||
1701 | } | ||
1702 | lp->mii_if.phy_id = first_phy; | ||
1703 | if (netif_msg_link(lp)) | ||
1704 | printk(KERN_INFO "%s: Using PHY number %d.\n", | ||
1705 | dev->name, first_phy); | ||
1653 | } | 1706 | } |
1654 | } | ||
1655 | 1707 | ||
1656 | #ifdef DO_DXSUFLO | 1708 | #ifdef DO_DXSUFLO |
1657 | if (lp->dxsuflo) { /* Disable transmit stop on underflow */ | 1709 | if (lp->dxsuflo) { /* Disable transmit stop on underflow */ |
1658 | val = lp->a.read_csr (ioaddr, 3); | 1710 | val = lp->a.read_csr(ioaddr, 3); |
1659 | val |= 0x40; | 1711 | val |= 0x40; |
1660 | lp->a.write_csr (ioaddr, 3, val); | 1712 | lp->a.write_csr(ioaddr, 3, val); |
1661 | } | 1713 | } |
1662 | #endif | 1714 | #endif |
1663 | 1715 | ||
1664 | lp->init_block.mode = le16_to_cpu((lp->options & PCNET32_PORT_PORTSEL) << 7); | 1716 | lp->init_block.mode = |
1665 | pcnet32_load_multicast(dev); | 1717 | le16_to_cpu((lp->options & PCNET32_PORT_PORTSEL) << 7); |
1666 | 1718 | pcnet32_load_multicast(dev); | |
1667 | if (pcnet32_init_ring(dev)) { | 1719 | |
1668 | rc = -ENOMEM; | 1720 | if (pcnet32_init_ring(dev)) { |
1669 | goto err_free_ring; | 1721 | rc = -ENOMEM; |
1670 | } | 1722 | goto err_free_ring; |
1671 | 1723 | } | |
1672 | /* Re-initialize the PCNET32, and start it when done. */ | 1724 | |
1673 | lp->a.write_csr (ioaddr, 1, (lp->dma_addr + | 1725 | /* Re-initialize the PCNET32, and start it when done. */ |
1674 | offsetof(struct pcnet32_private, init_block)) & 0xffff); | 1726 | lp->a.write_csr(ioaddr, 1, (lp->dma_addr + |
1675 | lp->a.write_csr (ioaddr, 2, (lp->dma_addr + | 1727 | offsetof(struct pcnet32_private, |
1676 | offsetof(struct pcnet32_private, init_block)) >> 16); | 1728 | init_block)) & 0xffff); |
1677 | 1729 | lp->a.write_csr(ioaddr, 2, | |
1678 | lp->a.write_csr (ioaddr, 4, 0x0915); | 1730 | (lp->dma_addr + |
1679 | lp->a.write_csr (ioaddr, 0, 0x0001); | 1731 | offsetof(struct pcnet32_private, init_block)) >> 16); |
1680 | 1732 | ||
1681 | netif_start_queue(dev); | 1733 | lp->a.write_csr(ioaddr, 4, 0x0915); |
1682 | 1734 | lp->a.write_csr(ioaddr, 0, 0x0001); | |
1683 | /* If we have mii, print the link status and start the watchdog */ | 1735 | |
1684 | if (lp->mii) { | 1736 | netif_start_queue(dev); |
1685 | mii_check_media (&lp->mii_if, netif_msg_link(lp), 1); | 1737 | |
1686 | mod_timer (&(lp->watchdog_timer), PCNET32_WATCHDOG_TIMEOUT); | 1738 | /* Print the link status and start the watchdog */ |
1687 | } | 1739 | pcnet32_check_media(dev, 1); |
1688 | 1740 | mod_timer(&(lp->watchdog_timer), PCNET32_WATCHDOG_TIMEOUT); | |
1689 | i = 0; | 1741 | |
1690 | while (i++ < 100) | 1742 | i = 0; |
1691 | if (lp->a.read_csr (ioaddr, 0) & 0x0100) | 1743 | while (i++ < 100) |
1692 | break; | 1744 | if (lp->a.read_csr(ioaddr, 0) & 0x0100) |
1693 | /* | 1745 | break; |
1694 | * We used to clear the InitDone bit, 0x0100, here but Mark Stockton | 1746 | /* |
1695 | * reports that doing so triggers a bug in the '974. | 1747 | * We used to clear the InitDone bit, 0x0100, here but Mark Stockton |
1696 | */ | 1748 | * reports that doing so triggers a bug in the '974. |
1697 | lp->a.write_csr (ioaddr, 0, 0x0042); | 1749 | */ |
1698 | 1750 | lp->a.write_csr(ioaddr, 0, 0x0042); | |
1699 | if (netif_msg_ifup(lp)) | 1751 | |
1700 | printk(KERN_DEBUG "%s: pcnet32 open after %d ticks, init block %#x csr0 %4.4x.\n", | 1752 | if (netif_msg_ifup(lp)) |
1701 | dev->name, i, (u32) (lp->dma_addr + | 1753 | printk(KERN_DEBUG |
1702 | offsetof(struct pcnet32_private, init_block)), | 1754 | "%s: pcnet32 open after %d ticks, init block %#x csr0 %4.4x.\n", |
1703 | lp->a.read_csr(ioaddr, 0)); | 1755 | dev->name, i, |
1704 | 1756 | (u32) (lp->dma_addr + | |
1705 | spin_unlock_irqrestore(&lp->lock, flags); | 1757 | offsetof(struct pcnet32_private, init_block)), |
1706 | 1758 | lp->a.read_csr(ioaddr, 0)); | |
1707 | return 0; /* Always succeed */ | 1759 | |
1708 | 1760 | spin_unlock_irqrestore(&lp->lock, flags); | |
1709 | err_free_ring: | 1761 | |
1710 | /* free any allocated skbuffs */ | 1762 | return 0; /* Always succeed */ |
1711 | for (i = 0; i < lp->rx_ring_size; i++) { | 1763 | |
1712 | lp->rx_ring[i].status = 0; | 1764 | err_free_ring: |
1713 | if (lp->rx_skbuff[i]) { | 1765 | /* free any allocated skbuffs */ |
1714 | pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[i], PKT_BUF_SZ-2, | 1766 | for (i = 0; i < lp->rx_ring_size; i++) { |
1715 | PCI_DMA_FROMDEVICE); | 1767 | lp->rx_ring[i].status = 0; |
1716 | dev_kfree_skb(lp->rx_skbuff[i]); | 1768 | if (lp->rx_skbuff[i]) { |
1717 | } | 1769 | pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[i], |
1718 | lp->rx_skbuff[i] = NULL; | 1770 | PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE); |
1719 | lp->rx_dma_addr[i] = 0; | 1771 | dev_kfree_skb(lp->rx_skbuff[i]); |
1720 | } | 1772 | } |
1721 | 1773 | lp->rx_skbuff[i] = NULL; | |
1722 | pcnet32_free_ring(dev); | 1774 | lp->rx_dma_addr[i] = 0; |
1723 | 1775 | } | |
1724 | /* | 1776 | |
1725 | * Switch back to 16bit mode to avoid problems with dumb | 1777 | pcnet32_free_ring(dev); |
1726 | * DOS packet driver after a warm reboot | 1778 | |
1727 | */ | 1779 | /* |
1728 | lp->a.write_bcr (ioaddr, 20, 4); | 1780 | * Switch back to 16bit mode to avoid problems with dumb |
1729 | 1781 | * DOS packet driver after a warm reboot | |
1730 | err_free_irq: | 1782 | */ |
1731 | spin_unlock_irqrestore(&lp->lock, flags); | 1783 | lp->a.write_bcr(ioaddr, 20, 4); |
1732 | free_irq(dev->irq, dev); | 1784 | |
1733 | return rc; | 1785 | err_free_irq: |
1786 | spin_unlock_irqrestore(&lp->lock, flags); | ||
1787 | free_irq(dev->irq, dev); | ||
1788 | return rc; | ||
1734 | } | 1789 | } |
1735 | 1790 | ||
1736 | /* | 1791 | /* |
@@ -1746,727 +1801,893 @@ err_free_irq: | |||
1746 | * restarting the chip, but I'm too lazy to do so right now. dplatt@3do.com | 1801 | * restarting the chip, but I'm too lazy to do so right now. dplatt@3do.com |
1747 | */ | 1802 | */ |
1748 | 1803 | ||
1749 | static void | 1804 | static void pcnet32_purge_tx_ring(struct net_device *dev) |
1750 | pcnet32_purge_tx_ring(struct net_device *dev) | ||
1751 | { | 1805 | { |
1752 | struct pcnet32_private *lp = dev->priv; | 1806 | struct pcnet32_private *lp = dev->priv; |
1753 | int i; | 1807 | int i; |
1754 | |||
1755 | for (i = 0; i < lp->tx_ring_size; i++) { | ||
1756 | lp->tx_ring[i].status = 0; /* CPU owns buffer */ | ||
1757 | wmb(); /* Make sure adapter sees owner change */ | ||
1758 | if (lp->tx_skbuff[i]) { | ||
1759 | pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[i], | ||
1760 | lp->tx_skbuff[i]->len, PCI_DMA_TODEVICE); | ||
1761 | dev_kfree_skb_any(lp->tx_skbuff[i]); | ||
1762 | } | ||
1763 | lp->tx_skbuff[i] = NULL; | ||
1764 | lp->tx_dma_addr[i] = 0; | ||
1765 | } | ||
1766 | } | ||
1767 | 1808 | ||
1809 | for (i = 0; i < lp->tx_ring_size; i++) { | ||
1810 | lp->tx_ring[i].status = 0; /* CPU owns buffer */ | ||
1811 | wmb(); /* Make sure adapter sees owner change */ | ||
1812 | if (lp->tx_skbuff[i]) { | ||
1813 | pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[i], | ||
1814 | lp->tx_skbuff[i]->len, | ||
1815 | PCI_DMA_TODEVICE); | ||
1816 | dev_kfree_skb_any(lp->tx_skbuff[i]); | ||
1817 | } | ||
1818 | lp->tx_skbuff[i] = NULL; | ||
1819 | lp->tx_dma_addr[i] = 0; | ||
1820 | } | ||
1821 | } | ||
1768 | 1822 | ||
1769 | /* Initialize the PCNET32 Rx and Tx rings. */ | 1823 | /* Initialize the PCNET32 Rx and Tx rings. */ |
1770 | static int | 1824 | static int pcnet32_init_ring(struct net_device *dev) |
1771 | pcnet32_init_ring(struct net_device *dev) | ||
1772 | { | 1825 | { |
1773 | struct pcnet32_private *lp = dev->priv; | 1826 | struct pcnet32_private *lp = dev->priv; |
1774 | int i; | 1827 | int i; |
1775 | 1828 | ||
1776 | lp->tx_full = 0; | 1829 | lp->tx_full = 0; |
1777 | lp->cur_rx = lp->cur_tx = 0; | 1830 | lp->cur_rx = lp->cur_tx = 0; |
1778 | lp->dirty_rx = lp->dirty_tx = 0; | 1831 | lp->dirty_rx = lp->dirty_tx = 0; |
1779 | 1832 | ||
1780 | for (i = 0; i < lp->rx_ring_size; i++) { | 1833 | for (i = 0; i < lp->rx_ring_size; i++) { |
1781 | struct sk_buff *rx_skbuff = lp->rx_skbuff[i]; | 1834 | struct sk_buff *rx_skbuff = lp->rx_skbuff[i]; |
1782 | if (rx_skbuff == NULL) { | 1835 | if (rx_skbuff == NULL) { |
1783 | if (!(rx_skbuff = lp->rx_skbuff[i] = dev_alloc_skb (PKT_BUF_SZ))) { | 1836 | if (! |
1784 | /* there is not much, we can do at this point */ | 1837 | (rx_skbuff = lp->rx_skbuff[i] = |
1785 | if (pcnet32_debug & NETIF_MSG_DRV) | 1838 | dev_alloc_skb(PKT_BUF_SZ))) { |
1786 | printk(KERN_ERR "%s: pcnet32_init_ring dev_alloc_skb failed.\n", | 1839 | /* there is not much, we can do at this point */ |
1787 | dev->name); | 1840 | if (pcnet32_debug & NETIF_MSG_DRV) |
1788 | return -1; | 1841 | printk(KERN_ERR |
1789 | } | 1842 | "%s: pcnet32_init_ring dev_alloc_skb failed.\n", |
1790 | skb_reserve (rx_skbuff, 2); | 1843 | dev->name); |
1791 | } | 1844 | return -1; |
1792 | 1845 | } | |
1793 | rmb(); | 1846 | skb_reserve(rx_skbuff, 2); |
1794 | if (lp->rx_dma_addr[i] == 0) | 1847 | } |
1795 | lp->rx_dma_addr[i] = pci_map_single(lp->pci_dev, rx_skbuff->data, | 1848 | |
1796 | PKT_BUF_SZ-2, PCI_DMA_FROMDEVICE); | 1849 | rmb(); |
1797 | lp->rx_ring[i].base = (u32)le32_to_cpu(lp->rx_dma_addr[i]); | 1850 | if (lp->rx_dma_addr[i] == 0) |
1798 | lp->rx_ring[i].buf_length = le16_to_cpu(2-PKT_BUF_SZ); | 1851 | lp->rx_dma_addr[i] = |
1799 | wmb(); /* Make sure owner changes after all others are visible */ | 1852 | pci_map_single(lp->pci_dev, rx_skbuff->data, |
1800 | lp->rx_ring[i].status = le16_to_cpu(0x8000); | 1853 | PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE); |
1801 | } | 1854 | lp->rx_ring[i].base = (u32) le32_to_cpu(lp->rx_dma_addr[i]); |
1802 | /* The Tx buffer address is filled in as needed, but we do need to clear | 1855 | lp->rx_ring[i].buf_length = le16_to_cpu(2 - PKT_BUF_SZ); |
1803 | * the upper ownership bit. */ | 1856 | wmb(); /* Make sure owner changes after all others are visible */ |
1804 | for (i = 0; i < lp->tx_ring_size; i++) { | 1857 | lp->rx_ring[i].status = le16_to_cpu(0x8000); |
1805 | lp->tx_ring[i].status = 0; /* CPU owns buffer */ | 1858 | } |
1806 | wmb(); /* Make sure adapter sees owner change */ | 1859 | /* The Tx buffer address is filled in as needed, but we do need to clear |
1807 | lp->tx_ring[i].base = 0; | 1860 | * the upper ownership bit. */ |
1808 | lp->tx_dma_addr[i] = 0; | 1861 | for (i = 0; i < lp->tx_ring_size; i++) { |
1809 | } | 1862 | lp->tx_ring[i].status = 0; /* CPU owns buffer */ |
1810 | 1863 | wmb(); /* Make sure adapter sees owner change */ | |
1811 | lp->init_block.tlen_rlen = le16_to_cpu(lp->tx_len_bits | lp->rx_len_bits); | 1864 | lp->tx_ring[i].base = 0; |
1812 | for (i = 0; i < 6; i++) | 1865 | lp->tx_dma_addr[i] = 0; |
1813 | lp->init_block.phys_addr[i] = dev->dev_addr[i]; | 1866 | } |
1814 | lp->init_block.rx_ring = (u32)le32_to_cpu(lp->rx_ring_dma_addr); | 1867 | |
1815 | lp->init_block.tx_ring = (u32)le32_to_cpu(lp->tx_ring_dma_addr); | 1868 | lp->init_block.tlen_rlen = |
1816 | wmb(); /* Make sure all changes are visible */ | 1869 | le16_to_cpu(lp->tx_len_bits | lp->rx_len_bits); |
1817 | return 0; | 1870 | for (i = 0; i < 6; i++) |
1871 | lp->init_block.phys_addr[i] = dev->dev_addr[i]; | ||
1872 | lp->init_block.rx_ring = (u32) le32_to_cpu(lp->rx_ring_dma_addr); | ||
1873 | lp->init_block.tx_ring = (u32) le32_to_cpu(lp->tx_ring_dma_addr); | ||
1874 | wmb(); /* Make sure all changes are visible */ | ||
1875 | return 0; | ||
1818 | } | 1876 | } |
1819 | 1877 | ||
1820 | /* the pcnet32 has been issued a stop or reset. Wait for the stop bit | 1878 | /* the pcnet32 has been issued a stop or reset. Wait for the stop bit |
1821 | * then flush the pending transmit operations, re-initialize the ring, | 1879 | * then flush the pending transmit operations, re-initialize the ring, |
1822 | * and tell the chip to initialize. | 1880 | * and tell the chip to initialize. |
1823 | */ | 1881 | */ |
1824 | static void | 1882 | static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits) |
1825 | pcnet32_restart(struct net_device *dev, unsigned int csr0_bits) | ||
1826 | { | 1883 | { |
1827 | struct pcnet32_private *lp = dev->priv; | 1884 | struct pcnet32_private *lp = dev->priv; |
1828 | unsigned long ioaddr = dev->base_addr; | 1885 | unsigned long ioaddr = dev->base_addr; |
1829 | int i; | 1886 | int i; |
1830 | 1887 | ||
1831 | /* wait for stop */ | 1888 | /* wait for stop */ |
1832 | for (i=0; i<100; i++) | 1889 | for (i = 0; i < 100; i++) |
1833 | if (lp->a.read_csr(ioaddr, 0) & 0x0004) | 1890 | if (lp->a.read_csr(ioaddr, 0) & 0x0004) |
1834 | break; | 1891 | break; |
1835 | 1892 | ||
1836 | if (i >= 100 && netif_msg_drv(lp)) | 1893 | if (i >= 100 && netif_msg_drv(lp)) |
1837 | printk(KERN_ERR "%s: pcnet32_restart timed out waiting for stop.\n", | 1894 | printk(KERN_ERR |
1838 | dev->name); | 1895 | "%s: pcnet32_restart timed out waiting for stop.\n", |
1896 | dev->name); | ||
1839 | 1897 | ||
1840 | pcnet32_purge_tx_ring(dev); | 1898 | pcnet32_purge_tx_ring(dev); |
1841 | if (pcnet32_init_ring(dev)) | 1899 | if (pcnet32_init_ring(dev)) |
1842 | return; | 1900 | return; |
1843 | 1901 | ||
1844 | /* ReInit Ring */ | 1902 | /* ReInit Ring */ |
1845 | lp->a.write_csr (ioaddr, 0, 1); | 1903 | lp->a.write_csr(ioaddr, 0, 1); |
1846 | i = 0; | 1904 | i = 0; |
1847 | while (i++ < 1000) | 1905 | while (i++ < 1000) |
1848 | if (lp->a.read_csr (ioaddr, 0) & 0x0100) | 1906 | if (lp->a.read_csr(ioaddr, 0) & 0x0100) |
1849 | break; | 1907 | break; |
1850 | 1908 | ||
1851 | lp->a.write_csr (ioaddr, 0, csr0_bits); | 1909 | lp->a.write_csr(ioaddr, 0, csr0_bits); |
1852 | } | 1910 | } |
1853 | 1911 | ||
1854 | 1912 | static void pcnet32_tx_timeout(struct net_device *dev) | |
1855 | static void | ||
1856 | pcnet32_tx_timeout (struct net_device *dev) | ||
1857 | { | 1913 | { |
1858 | struct pcnet32_private *lp = dev->priv; | 1914 | struct pcnet32_private *lp = dev->priv; |
1859 | unsigned long ioaddr = dev->base_addr, flags; | 1915 | unsigned long ioaddr = dev->base_addr, flags; |
1860 | 1916 | ||
1861 | spin_lock_irqsave(&lp->lock, flags); | 1917 | spin_lock_irqsave(&lp->lock, flags); |
1862 | /* Transmitter timeout, serious problems. */ | 1918 | /* Transmitter timeout, serious problems. */ |
1863 | if (pcnet32_debug & NETIF_MSG_DRV) | 1919 | if (pcnet32_debug & NETIF_MSG_DRV) |
1864 | printk(KERN_ERR "%s: transmit timed out, status %4.4x, resetting.\n", | 1920 | printk(KERN_ERR |
1865 | dev->name, lp->a.read_csr(ioaddr, 0)); | 1921 | "%s: transmit timed out, status %4.4x, resetting.\n", |
1866 | lp->a.write_csr (ioaddr, 0, 0x0004); | 1922 | dev->name, lp->a.read_csr(ioaddr, 0)); |
1867 | lp->stats.tx_errors++; | 1923 | lp->a.write_csr(ioaddr, 0, 0x0004); |
1868 | if (netif_msg_tx_err(lp)) { | 1924 | lp->stats.tx_errors++; |
1869 | int i; | 1925 | if (netif_msg_tx_err(lp)) { |
1870 | printk(KERN_DEBUG " Ring data dump: dirty_tx %d cur_tx %d%s cur_rx %d.", | 1926 | int i; |
1871 | lp->dirty_tx, lp->cur_tx, lp->tx_full ? " (full)" : "", | 1927 | printk(KERN_DEBUG |
1872 | lp->cur_rx); | 1928 | " Ring data dump: dirty_tx %d cur_tx %d%s cur_rx %d.", |
1873 | for (i = 0 ; i < lp->rx_ring_size; i++) | 1929 | lp->dirty_tx, lp->cur_tx, lp->tx_full ? " (full)" : "", |
1874 | printk("%s %08x %04x %08x %04x", i & 1 ? "" : "\n ", | 1930 | lp->cur_rx); |
1875 | le32_to_cpu(lp->rx_ring[i].base), | 1931 | for (i = 0; i < lp->rx_ring_size; i++) |
1876 | (-le16_to_cpu(lp->rx_ring[i].buf_length)) & 0xffff, | 1932 | printk("%s %08x %04x %08x %04x", i & 1 ? "" : "\n ", |
1877 | le32_to_cpu(lp->rx_ring[i].msg_length), | 1933 | le32_to_cpu(lp->rx_ring[i].base), |
1878 | le16_to_cpu(lp->rx_ring[i].status)); | 1934 | (-le16_to_cpu(lp->rx_ring[i].buf_length)) & |
1879 | for (i = 0 ; i < lp->tx_ring_size; i++) | 1935 | 0xffff, le32_to_cpu(lp->rx_ring[i].msg_length), |
1880 | printk("%s %08x %04x %08x %04x", i & 1 ? "" : "\n ", | 1936 | le16_to_cpu(lp->rx_ring[i].status)); |
1881 | le32_to_cpu(lp->tx_ring[i].base), | 1937 | for (i = 0; i < lp->tx_ring_size; i++) |
1882 | (-le16_to_cpu(lp->tx_ring[i].length)) & 0xffff, | 1938 | printk("%s %08x %04x %08x %04x", i & 1 ? "" : "\n ", |
1883 | le32_to_cpu(lp->tx_ring[i].misc), | 1939 | le32_to_cpu(lp->tx_ring[i].base), |
1884 | le16_to_cpu(lp->tx_ring[i].status)); | 1940 | (-le16_to_cpu(lp->tx_ring[i].length)) & 0xffff, |
1885 | printk("\n"); | 1941 | le32_to_cpu(lp->tx_ring[i].misc), |
1886 | } | 1942 | le16_to_cpu(lp->tx_ring[i].status)); |
1887 | pcnet32_restart(dev, 0x0042); | 1943 | printk("\n"); |
1888 | 1944 | } | |
1889 | dev->trans_start = jiffies; | 1945 | pcnet32_restart(dev, 0x0042); |
1890 | netif_wake_queue(dev); | 1946 | |
1891 | 1947 | dev->trans_start = jiffies; | |
1892 | spin_unlock_irqrestore(&lp->lock, flags); | 1948 | netif_wake_queue(dev); |
1893 | } | ||
1894 | 1949 | ||
1950 | spin_unlock_irqrestore(&lp->lock, flags); | ||
1951 | } | ||
1895 | 1952 | ||
1896 | static int | 1953 | static int pcnet32_start_xmit(struct sk_buff *skb, struct net_device *dev) |
1897 | pcnet32_start_xmit(struct sk_buff *skb, struct net_device *dev) | ||
1898 | { | 1954 | { |
1899 | struct pcnet32_private *lp = dev->priv; | 1955 | struct pcnet32_private *lp = dev->priv; |
1900 | unsigned long ioaddr = dev->base_addr; | 1956 | unsigned long ioaddr = dev->base_addr; |
1901 | u16 status; | 1957 | u16 status; |
1902 | int entry; | 1958 | int entry; |
1903 | unsigned long flags; | 1959 | unsigned long flags; |
1904 | 1960 | ||
1905 | spin_lock_irqsave(&lp->lock, flags); | 1961 | spin_lock_irqsave(&lp->lock, flags); |
1906 | 1962 | ||
1907 | if (netif_msg_tx_queued(lp)) { | 1963 | if (netif_msg_tx_queued(lp)) { |
1908 | printk(KERN_DEBUG "%s: pcnet32_start_xmit() called, csr0 %4.4x.\n", | 1964 | printk(KERN_DEBUG |
1909 | dev->name, lp->a.read_csr(ioaddr, 0)); | 1965 | "%s: pcnet32_start_xmit() called, csr0 %4.4x.\n", |
1910 | } | 1966 | dev->name, lp->a.read_csr(ioaddr, 0)); |
1967 | } | ||
1911 | 1968 | ||
1912 | /* Default status -- will not enable Successful-TxDone | 1969 | /* Default status -- will not enable Successful-TxDone |
1913 | * interrupt when that option is available to us. | 1970 | * interrupt when that option is available to us. |
1914 | */ | 1971 | */ |
1915 | status = 0x8300; | 1972 | status = 0x8300; |
1916 | 1973 | ||
1917 | /* Fill in a Tx ring entry */ | 1974 | /* Fill in a Tx ring entry */ |
1918 | 1975 | ||
1919 | /* Mask to ring buffer boundary. */ | 1976 | /* Mask to ring buffer boundary. */ |
1920 | entry = lp->cur_tx & lp->tx_mod_mask; | 1977 | entry = lp->cur_tx & lp->tx_mod_mask; |
1921 | 1978 | ||
1922 | /* Caution: the write order is important here, set the status | 1979 | /* Caution: the write order is important here, set the status |
1923 | * with the "ownership" bits last. */ | 1980 | * with the "ownership" bits last. */ |
1924 | 1981 | ||
1925 | lp->tx_ring[entry].length = le16_to_cpu(-skb->len); | 1982 | lp->tx_ring[entry].length = le16_to_cpu(-skb->len); |
1926 | 1983 | ||
1927 | lp->tx_ring[entry].misc = 0x00000000; | 1984 | lp->tx_ring[entry].misc = 0x00000000; |
1928 | 1985 | ||
1929 | lp->tx_skbuff[entry] = skb; | 1986 | lp->tx_skbuff[entry] = skb; |
1930 | lp->tx_dma_addr[entry] = pci_map_single(lp->pci_dev, skb->data, skb->len, | 1987 | lp->tx_dma_addr[entry] = |
1931 | PCI_DMA_TODEVICE); | 1988 | pci_map_single(lp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE); |
1932 | lp->tx_ring[entry].base = (u32)le32_to_cpu(lp->tx_dma_addr[entry]); | 1989 | lp->tx_ring[entry].base = (u32) le32_to_cpu(lp->tx_dma_addr[entry]); |
1933 | wmb(); /* Make sure owner changes after all others are visible */ | 1990 | wmb(); /* Make sure owner changes after all others are visible */ |
1934 | lp->tx_ring[entry].status = le16_to_cpu(status); | 1991 | lp->tx_ring[entry].status = le16_to_cpu(status); |
1935 | 1992 | ||
1936 | lp->cur_tx++; | 1993 | lp->cur_tx++; |
1937 | lp->stats.tx_bytes += skb->len; | 1994 | lp->stats.tx_bytes += skb->len; |
1938 | 1995 | ||
1939 | /* Trigger an immediate send poll. */ | 1996 | /* Trigger an immediate send poll. */ |
1940 | lp->a.write_csr (ioaddr, 0, 0x0048); | 1997 | lp->a.write_csr(ioaddr, 0, 0x0048); |
1941 | 1998 | ||
1942 | dev->trans_start = jiffies; | 1999 | dev->trans_start = jiffies; |
1943 | 2000 | ||
1944 | if (lp->tx_ring[(entry+1) & lp->tx_mod_mask].base != 0) { | 2001 | if (lp->tx_ring[(entry + 1) & lp->tx_mod_mask].base != 0) { |
1945 | lp->tx_full = 1; | 2002 | lp->tx_full = 1; |
1946 | netif_stop_queue(dev); | 2003 | netif_stop_queue(dev); |
1947 | } | 2004 | } |
1948 | spin_unlock_irqrestore(&lp->lock, flags); | 2005 | spin_unlock_irqrestore(&lp->lock, flags); |
1949 | return 0; | 2006 | return 0; |
1950 | } | 2007 | } |
1951 | 2008 | ||
1952 | /* The PCNET32 interrupt handler. */ | 2009 | /* The PCNET32 interrupt handler. */ |
1953 | static irqreturn_t | 2010 | static irqreturn_t |
1954 | pcnet32_interrupt(int irq, void *dev_id, struct pt_regs * regs) | 2011 | pcnet32_interrupt(int irq, void *dev_id, struct pt_regs *regs) |
1955 | { | 2012 | { |
1956 | struct net_device *dev = dev_id; | 2013 | struct net_device *dev = dev_id; |
1957 | struct pcnet32_private *lp; | 2014 | struct pcnet32_private *lp; |
1958 | unsigned long ioaddr; | 2015 | unsigned long ioaddr; |
1959 | u16 csr0,rap; | 2016 | u16 csr0, rap; |
1960 | int boguscnt = max_interrupt_work; | 2017 | int boguscnt = max_interrupt_work; |
1961 | int must_restart; | 2018 | int must_restart; |
1962 | 2019 | ||
1963 | if (!dev) { | 2020 | if (!dev) { |
1964 | if (pcnet32_debug & NETIF_MSG_INTR) | 2021 | if (pcnet32_debug & NETIF_MSG_INTR) |
1965 | printk (KERN_DEBUG "%s(): irq %d for unknown device\n", | 2022 | printk(KERN_DEBUG "%s(): irq %d for unknown device\n", |
1966 | __FUNCTION__, irq); | 2023 | __FUNCTION__, irq); |
1967 | return IRQ_NONE; | 2024 | return IRQ_NONE; |
1968 | } | ||
1969 | |||
1970 | ioaddr = dev->base_addr; | ||
1971 | lp = dev->priv; | ||
1972 | |||
1973 | spin_lock(&lp->lock); | ||
1974 | |||
1975 | rap = lp->a.read_rap(ioaddr); | ||
1976 | while ((csr0 = lp->a.read_csr (ioaddr, 0)) & 0x8f00 && --boguscnt >= 0) { | ||
1977 | if (csr0 == 0xffff) { | ||
1978 | break; /* PCMCIA remove happened */ | ||
1979 | } | 2025 | } |
1980 | /* Acknowledge all of the current interrupt sources ASAP. */ | ||
1981 | lp->a.write_csr (ioaddr, 0, csr0 & ~0x004f); | ||
1982 | 2026 | ||
1983 | must_restart = 0; | 2027 | ioaddr = dev->base_addr; |
2028 | lp = dev->priv; | ||
1984 | 2029 | ||
1985 | if (netif_msg_intr(lp)) | 2030 | spin_lock(&lp->lock); |
1986 | printk(KERN_DEBUG "%s: interrupt csr0=%#2.2x new csr=%#2.2x.\n", | 2031 | |
1987 | dev->name, csr0, lp->a.read_csr (ioaddr, 0)); | 2032 | rap = lp->a.read_rap(ioaddr); |
1988 | 2033 | while ((csr0 = lp->a.read_csr(ioaddr, 0)) & 0x8f00 && --boguscnt >= 0) { | |
1989 | if (csr0 & 0x0400) /* Rx interrupt */ | 2034 | if (csr0 == 0xffff) { |
1990 | pcnet32_rx(dev); | 2035 | break; /* PCMCIA remove happened */ |
1991 | 2036 | } | |
1992 | if (csr0 & 0x0200) { /* Tx-done interrupt */ | 2037 | /* Acknowledge all of the current interrupt sources ASAP. */ |
1993 | unsigned int dirty_tx = lp->dirty_tx; | 2038 | lp->a.write_csr(ioaddr, 0, csr0 & ~0x004f); |
1994 | int delta; | 2039 | |
1995 | 2040 | must_restart = 0; | |
1996 | while (dirty_tx != lp->cur_tx) { | 2041 | |
1997 | int entry = dirty_tx & lp->tx_mod_mask; | 2042 | if (netif_msg_intr(lp)) |
1998 | int status = (short)le16_to_cpu(lp->tx_ring[entry].status); | 2043 | printk(KERN_DEBUG |
1999 | 2044 | "%s: interrupt csr0=%#2.2x new csr=%#2.2x.\n", | |
2000 | if (status < 0) | 2045 | dev->name, csr0, lp->a.read_csr(ioaddr, 0)); |
2001 | break; /* It still hasn't been Txed */ | 2046 | |
2002 | 2047 | if (csr0 & 0x0400) /* Rx interrupt */ | |
2003 | lp->tx_ring[entry].base = 0; | 2048 | pcnet32_rx(dev); |
2004 | 2049 | ||
2005 | if (status & 0x4000) { | 2050 | if (csr0 & 0x0200) { /* Tx-done interrupt */ |
2006 | /* There was an major error, log it. */ | 2051 | unsigned int dirty_tx = lp->dirty_tx; |
2007 | int err_status = le32_to_cpu(lp->tx_ring[entry].misc); | 2052 | int delta; |
2008 | lp->stats.tx_errors++; | 2053 | |
2009 | if (netif_msg_tx_err(lp)) | 2054 | while (dirty_tx != lp->cur_tx) { |
2010 | printk(KERN_ERR "%s: Tx error status=%04x err_status=%08x\n", | 2055 | int entry = dirty_tx & lp->tx_mod_mask; |
2011 | dev->name, status, err_status); | 2056 | int status = |
2012 | if (err_status & 0x04000000) lp->stats.tx_aborted_errors++; | 2057 | (short)le16_to_cpu(lp->tx_ring[entry]. |
2013 | if (err_status & 0x08000000) lp->stats.tx_carrier_errors++; | 2058 | status); |
2014 | if (err_status & 0x10000000) lp->stats.tx_window_errors++; | 2059 | |
2060 | if (status < 0) | ||
2061 | break; /* It still hasn't been Txed */ | ||
2062 | |||
2063 | lp->tx_ring[entry].base = 0; | ||
2064 | |||
2065 | if (status & 0x4000) { | ||
2066 | /* There was an major error, log it. */ | ||
2067 | int err_status = | ||
2068 | le32_to_cpu(lp->tx_ring[entry]. | ||
2069 | misc); | ||
2070 | lp->stats.tx_errors++; | ||
2071 | if (netif_msg_tx_err(lp)) | ||
2072 | printk(KERN_ERR | ||
2073 | "%s: Tx error status=%04x err_status=%08x\n", | ||
2074 | dev->name, status, | ||
2075 | err_status); | ||
2076 | if (err_status & 0x04000000) | ||
2077 | lp->stats.tx_aborted_errors++; | ||
2078 | if (err_status & 0x08000000) | ||
2079 | lp->stats.tx_carrier_errors++; | ||
2080 | if (err_status & 0x10000000) | ||
2081 | lp->stats.tx_window_errors++; | ||
2015 | #ifndef DO_DXSUFLO | 2082 | #ifndef DO_DXSUFLO |
2016 | if (err_status & 0x40000000) { | 2083 | if (err_status & 0x40000000) { |
2017 | lp->stats.tx_fifo_errors++; | 2084 | lp->stats.tx_fifo_errors++; |
2018 | /* Ackk! On FIFO errors the Tx unit is turned off! */ | 2085 | /* Ackk! On FIFO errors the Tx unit is turned off! */ |
2019 | /* Remove this verbosity later! */ | 2086 | /* Remove this verbosity later! */ |
2020 | if (netif_msg_tx_err(lp)) | 2087 | if (netif_msg_tx_err(lp)) |
2021 | printk(KERN_ERR "%s: Tx FIFO error! CSR0=%4.4x\n", | 2088 | printk(KERN_ERR |
2022 | dev->name, csr0); | 2089 | "%s: Tx FIFO error! CSR0=%4.4x\n", |
2023 | must_restart = 1; | 2090 | dev->name, csr0); |
2024 | } | 2091 | must_restart = 1; |
2092 | } | ||
2025 | #else | 2093 | #else |
2026 | if (err_status & 0x40000000) { | 2094 | if (err_status & 0x40000000) { |
2027 | lp->stats.tx_fifo_errors++; | 2095 | lp->stats.tx_fifo_errors++; |
2028 | if (! lp->dxsuflo) { /* If controller doesn't recover ... */ | 2096 | if (!lp->dxsuflo) { /* If controller doesn't recover ... */ |
2029 | /* Ackk! On FIFO errors the Tx unit is turned off! */ | 2097 | /* Ackk! On FIFO errors the Tx unit is turned off! */ |
2030 | /* Remove this verbosity later! */ | 2098 | /* Remove this verbosity later! */ |
2031 | if (netif_msg_tx_err(lp)) | 2099 | if (netif_msg_tx_err |
2032 | printk(KERN_ERR "%s: Tx FIFO error! CSR0=%4.4x\n", | 2100 | (lp)) |
2033 | dev->name, csr0); | 2101 | printk(KERN_ERR |
2034 | must_restart = 1; | 2102 | "%s: Tx FIFO error! CSR0=%4.4x\n", |
2035 | } | 2103 | dev-> |
2036 | } | 2104 | name, |
2105 | csr0); | ||
2106 | must_restart = 1; | ||
2107 | } | ||
2108 | } | ||
2037 | #endif | 2109 | #endif |
2038 | } else { | 2110 | } else { |
2039 | if (status & 0x1800) | 2111 | if (status & 0x1800) |
2040 | lp->stats.collisions++; | 2112 | lp->stats.collisions++; |
2041 | lp->stats.tx_packets++; | 2113 | lp->stats.tx_packets++; |
2114 | } | ||
2115 | |||
2116 | /* We must free the original skb */ | ||
2117 | if (lp->tx_skbuff[entry]) { | ||
2118 | pci_unmap_single(lp->pci_dev, | ||
2119 | lp->tx_dma_addr[entry], | ||
2120 | lp->tx_skbuff[entry]-> | ||
2121 | len, PCI_DMA_TODEVICE); | ||
2122 | dev_kfree_skb_irq(lp->tx_skbuff[entry]); | ||
2123 | lp->tx_skbuff[entry] = NULL; | ||
2124 | lp->tx_dma_addr[entry] = 0; | ||
2125 | } | ||
2126 | dirty_tx++; | ||
2127 | } | ||
2128 | |||
2129 | delta = | ||
2130 | (lp->cur_tx - dirty_tx) & (lp->tx_mod_mask + | ||
2131 | lp->tx_ring_size); | ||
2132 | if (delta > lp->tx_ring_size) { | ||
2133 | if (netif_msg_drv(lp)) | ||
2134 | printk(KERN_ERR | ||
2135 | "%s: out-of-sync dirty pointer, %d vs. %d, full=%d.\n", | ||
2136 | dev->name, dirty_tx, lp->cur_tx, | ||
2137 | lp->tx_full); | ||
2138 | dirty_tx += lp->tx_ring_size; | ||
2139 | delta -= lp->tx_ring_size; | ||
2140 | } | ||
2141 | |||
2142 | if (lp->tx_full && | ||
2143 | netif_queue_stopped(dev) && | ||
2144 | delta < lp->tx_ring_size - 2) { | ||
2145 | /* The ring is no longer full, clear tbusy. */ | ||
2146 | lp->tx_full = 0; | ||
2147 | netif_wake_queue(dev); | ||
2148 | } | ||
2149 | lp->dirty_tx = dirty_tx; | ||
2150 | } | ||
2151 | |||
2152 | /* Log misc errors. */ | ||
2153 | if (csr0 & 0x4000) | ||
2154 | lp->stats.tx_errors++; /* Tx babble. */ | ||
2155 | if (csr0 & 0x1000) { | ||
2156 | /* | ||
2157 | * this happens when our receive ring is full. This shouldn't | ||
2158 | * be a problem as we will see normal rx interrupts for the frames | ||
2159 | * in the receive ring. But there are some PCI chipsets (I can | ||
2160 | * reproduce this on SP3G with Intel saturn chipset) which have | ||
2161 | * sometimes problems and will fill up the receive ring with | ||
2162 | * error descriptors. In this situation we don't get a rx | ||
2163 | * interrupt, but a missed frame interrupt sooner or later. | ||
2164 | * So we try to clean up our receive ring here. | ||
2165 | */ | ||
2166 | pcnet32_rx(dev); | ||
2167 | lp->stats.rx_errors++; /* Missed a Rx frame. */ | ||
2168 | } | ||
2169 | if (csr0 & 0x0800) { | ||
2170 | if (netif_msg_drv(lp)) | ||
2171 | printk(KERN_ERR | ||
2172 | "%s: Bus master arbitration failure, status %4.4x.\n", | ||
2173 | dev->name, csr0); | ||
2174 | /* unlike for the lance, there is no restart needed */ | ||
2042 | } | 2175 | } |
2043 | 2176 | ||
2044 | /* We must free the original skb */ | 2177 | if (must_restart) { |
2045 | if (lp->tx_skbuff[entry]) { | 2178 | /* reset the chip to clear the error condition, then restart */ |
2046 | pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[entry], | 2179 | lp->a.reset(ioaddr); |
2047 | lp->tx_skbuff[entry]->len, PCI_DMA_TODEVICE); | 2180 | lp->a.write_csr(ioaddr, 4, 0x0915); |
2048 | dev_kfree_skb_irq(lp->tx_skbuff[entry]); | 2181 | pcnet32_restart(dev, 0x0002); |
2049 | lp->tx_skbuff[entry] = NULL; | 2182 | netif_wake_queue(dev); |
2050 | lp->tx_dma_addr[entry] = 0; | ||
2051 | } | 2183 | } |
2052 | dirty_tx++; | 2184 | } |
2053 | } | 2185 | |
2054 | 2186 | /* Set interrupt enable. */ | |
2055 | delta = (lp->cur_tx - dirty_tx) & (lp->tx_mod_mask + lp->tx_ring_size); | 2187 | lp->a.write_csr(ioaddr, 0, 0x0040); |
2056 | if (delta > lp->tx_ring_size) { | 2188 | lp->a.write_rap(ioaddr, rap); |
2057 | if (netif_msg_drv(lp)) | 2189 | |
2058 | printk(KERN_ERR "%s: out-of-sync dirty pointer, %d vs. %d, full=%d.\n", | 2190 | if (netif_msg_intr(lp)) |
2059 | dev->name, dirty_tx, lp->cur_tx, lp->tx_full); | 2191 | printk(KERN_DEBUG "%s: exiting interrupt, csr0=%#4.4x.\n", |
2060 | dirty_tx += lp->tx_ring_size; | 2192 | dev->name, lp->a.read_csr(ioaddr, 0)); |
2061 | delta -= lp->tx_ring_size; | 2193 | |
2062 | } | 2194 | spin_unlock(&lp->lock); |
2063 | 2195 | ||
2064 | if (lp->tx_full && | 2196 | return IRQ_HANDLED; |
2065 | netif_queue_stopped(dev) && | ||
2066 | delta < lp->tx_ring_size - 2) { | ||
2067 | /* The ring is no longer full, clear tbusy. */ | ||
2068 | lp->tx_full = 0; | ||
2069 | netif_wake_queue (dev); | ||
2070 | } | ||
2071 | lp->dirty_tx = dirty_tx; | ||
2072 | } | ||
2073 | |||
2074 | /* Log misc errors. */ | ||
2075 | if (csr0 & 0x4000) lp->stats.tx_errors++; /* Tx babble. */ | ||
2076 | if (csr0 & 0x1000) { | ||
2077 | /* | ||
2078 | * this happens when our receive ring is full. This shouldn't | ||
2079 | * be a problem as we will see normal rx interrupts for the frames | ||
2080 | * in the receive ring. But there are some PCI chipsets (I can | ||
2081 | * reproduce this on SP3G with Intel saturn chipset) which have | ||
2082 | * sometimes problems and will fill up the receive ring with | ||
2083 | * error descriptors. In this situation we don't get a rx | ||
2084 | * interrupt, but a missed frame interrupt sooner or later. | ||
2085 | * So we try to clean up our receive ring here. | ||
2086 | */ | ||
2087 | pcnet32_rx(dev); | ||
2088 | lp->stats.rx_errors++; /* Missed a Rx frame. */ | ||
2089 | } | ||
2090 | if (csr0 & 0x0800) { | ||
2091 | if (netif_msg_drv(lp)) | ||
2092 | printk(KERN_ERR "%s: Bus master arbitration failure, status %4.4x.\n", | ||
2093 | dev->name, csr0); | ||
2094 | /* unlike for the lance, there is no restart needed */ | ||
2095 | } | ||
2096 | |||
2097 | if (must_restart) { | ||
2098 | /* reset the chip to clear the error condition, then restart */ | ||
2099 | lp->a.reset(ioaddr); | ||
2100 | lp->a.write_csr(ioaddr, 4, 0x0915); | ||
2101 | pcnet32_restart(dev, 0x0002); | ||
2102 | netif_wake_queue(dev); | ||
2103 | } | ||
2104 | } | ||
2105 | |||
2106 | /* Set interrupt enable. */ | ||
2107 | lp->a.write_csr (ioaddr, 0, 0x0040); | ||
2108 | lp->a.write_rap (ioaddr,rap); | ||
2109 | |||
2110 | if (netif_msg_intr(lp)) | ||
2111 | printk(KERN_DEBUG "%s: exiting interrupt, csr0=%#4.4x.\n", | ||
2112 | dev->name, lp->a.read_csr (ioaddr, 0)); | ||
2113 | |||
2114 | spin_unlock(&lp->lock); | ||
2115 | |||
2116 | return IRQ_HANDLED; | ||
2117 | } | 2197 | } |
2118 | 2198 | ||
2119 | static int | 2199 | static int pcnet32_rx(struct net_device *dev) |
2120 | pcnet32_rx(struct net_device *dev) | ||
2121 | { | 2200 | { |
2122 | struct pcnet32_private *lp = dev->priv; | 2201 | struct pcnet32_private *lp = dev->priv; |
2123 | int entry = lp->cur_rx & lp->rx_mod_mask; | 2202 | int entry = lp->cur_rx & lp->rx_mod_mask; |
2124 | int boguscnt = lp->rx_ring_size / 2; | 2203 | int boguscnt = lp->rx_ring_size / 2; |
2125 | 2204 | ||
2126 | /* If we own the next entry, it's a new packet. Send it up. */ | 2205 | /* If we own the next entry, it's a new packet. Send it up. */ |
2127 | while ((short)le16_to_cpu(lp->rx_ring[entry].status) >= 0) { | 2206 | while ((short)le16_to_cpu(lp->rx_ring[entry].status) >= 0) { |
2128 | int status = (short)le16_to_cpu(lp->rx_ring[entry].status) >> 8; | 2207 | int status = (short)le16_to_cpu(lp->rx_ring[entry].status) >> 8; |
2129 | 2208 | ||
2130 | if (status != 0x03) { /* There was an error. */ | 2209 | if (status != 0x03) { /* There was an error. */ |
2131 | /* | 2210 | /* |
2132 | * There is a tricky error noted by John Murphy, | 2211 | * There is a tricky error noted by John Murphy, |
2133 | * <murf@perftech.com> to Russ Nelson: Even with full-sized | 2212 | * <murf@perftech.com> to Russ Nelson: Even with full-sized |
2134 | * buffers it's possible for a jabber packet to use two | 2213 | * buffers it's possible for a jabber packet to use two |
2135 | * buffers, with only the last correctly noting the error. | 2214 | * buffers, with only the last correctly noting the error. |
2136 | */ | 2215 | */ |
2137 | if (status & 0x01) /* Only count a general error at the */ | 2216 | if (status & 0x01) /* Only count a general error at the */ |
2138 | lp->stats.rx_errors++; /* end of a packet.*/ | 2217 | lp->stats.rx_errors++; /* end of a packet. */ |
2139 | if (status & 0x20) lp->stats.rx_frame_errors++; | 2218 | if (status & 0x20) |
2140 | if (status & 0x10) lp->stats.rx_over_errors++; | 2219 | lp->stats.rx_frame_errors++; |
2141 | if (status & 0x08) lp->stats.rx_crc_errors++; | 2220 | if (status & 0x10) |
2142 | if (status & 0x04) lp->stats.rx_fifo_errors++; | 2221 | lp->stats.rx_over_errors++; |
2143 | lp->rx_ring[entry].status &= le16_to_cpu(0x03ff); | 2222 | if (status & 0x08) |
2144 | } else { | 2223 | lp->stats.rx_crc_errors++; |
2145 | /* Malloc up new buffer, compatible with net-2e. */ | 2224 | if (status & 0x04) |
2146 | short pkt_len = (le32_to_cpu(lp->rx_ring[entry].msg_length) & 0xfff)-4; | 2225 | lp->stats.rx_fifo_errors++; |
2147 | struct sk_buff *skb; | 2226 | lp->rx_ring[entry].status &= le16_to_cpu(0x03ff); |
2148 | |||
2149 | /* Discard oversize frames. */ | ||
2150 | if (unlikely(pkt_len > PKT_BUF_SZ - 2)) { | ||
2151 | if (netif_msg_drv(lp)) | ||
2152 | printk(KERN_ERR "%s: Impossible packet size %d!\n", | ||
2153 | dev->name, pkt_len); | ||
2154 | lp->stats.rx_errors++; | ||
2155 | } else if (pkt_len < 60) { | ||
2156 | if (netif_msg_rx_err(lp)) | ||
2157 | printk(KERN_ERR "%s: Runt packet!\n", dev->name); | ||
2158 | lp->stats.rx_errors++; | ||
2159 | } else { | ||
2160 | int rx_in_place = 0; | ||
2161 | |||
2162 | if (pkt_len > rx_copybreak) { | ||
2163 | struct sk_buff *newskb; | ||
2164 | |||
2165 | if ((newskb = dev_alloc_skb(PKT_BUF_SZ))) { | ||
2166 | skb_reserve (newskb, 2); | ||
2167 | skb = lp->rx_skbuff[entry]; | ||
2168 | pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[entry], | ||
2169 | PKT_BUF_SZ-2, PCI_DMA_FROMDEVICE); | ||
2170 | skb_put (skb, pkt_len); | ||
2171 | lp->rx_skbuff[entry] = newskb; | ||
2172 | newskb->dev = dev; | ||
2173 | lp->rx_dma_addr[entry] = | ||
2174 | pci_map_single(lp->pci_dev, newskb->data, | ||
2175 | PKT_BUF_SZ-2, PCI_DMA_FROMDEVICE); | ||
2176 | lp->rx_ring[entry].base = le32_to_cpu(lp->rx_dma_addr[entry]); | ||
2177 | rx_in_place = 1; | ||
2178 | } else | ||
2179 | skb = NULL; | ||
2180 | } else { | 2227 | } else { |
2181 | skb = dev_alloc_skb(pkt_len+2); | 2228 | /* Malloc up new buffer, compatible with net-2e. */ |
2182 | } | 2229 | short pkt_len = |
2183 | 2230 | (le32_to_cpu(lp->rx_ring[entry].msg_length) & 0xfff) | |
2184 | if (skb == NULL) { | 2231 | - 4; |
2185 | int i; | 2232 | struct sk_buff *skb; |
2186 | if (netif_msg_drv(lp)) | 2233 | |
2187 | printk(KERN_ERR "%s: Memory squeeze, deferring packet.\n", | 2234 | /* Discard oversize frames. */ |
2188 | dev->name); | 2235 | if (unlikely(pkt_len > PKT_BUF_SZ - 2)) { |
2189 | for (i = 0; i < lp->rx_ring_size; i++) | 2236 | if (netif_msg_drv(lp)) |
2190 | if ((short)le16_to_cpu(lp->rx_ring[(entry+i) | 2237 | printk(KERN_ERR |
2191 | & lp->rx_mod_mask].status) < 0) | 2238 | "%s: Impossible packet size %d!\n", |
2192 | break; | 2239 | dev->name, pkt_len); |
2193 | 2240 | lp->stats.rx_errors++; | |
2194 | if (i > lp->rx_ring_size -2) { | 2241 | } else if (pkt_len < 60) { |
2195 | lp->stats.rx_dropped++; | 2242 | if (netif_msg_rx_err(lp)) |
2196 | lp->rx_ring[entry].status |= le16_to_cpu(0x8000); | 2243 | printk(KERN_ERR "%s: Runt packet!\n", |
2197 | wmb(); /* Make sure adapter sees owner change */ | 2244 | dev->name); |
2198 | lp->cur_rx++; | 2245 | lp->stats.rx_errors++; |
2199 | } | 2246 | } else { |
2200 | break; | 2247 | int rx_in_place = 0; |
2201 | } | 2248 | |
2202 | skb->dev = dev; | 2249 | if (pkt_len > rx_copybreak) { |
2203 | if (!rx_in_place) { | 2250 | struct sk_buff *newskb; |
2204 | skb_reserve(skb,2); /* 16 byte align */ | 2251 | |
2205 | skb_put(skb,pkt_len); /* Make room */ | 2252 | if ((newskb = |
2206 | pci_dma_sync_single_for_cpu(lp->pci_dev, | 2253 | dev_alloc_skb(PKT_BUF_SZ))) { |
2207 | lp->rx_dma_addr[entry], | 2254 | skb_reserve(newskb, 2); |
2208 | PKT_BUF_SZ-2, | 2255 | skb = lp->rx_skbuff[entry]; |
2209 | PCI_DMA_FROMDEVICE); | 2256 | pci_unmap_single(lp->pci_dev, |
2210 | eth_copy_and_sum(skb, | 2257 | lp-> |
2211 | (unsigned char *)(lp->rx_skbuff[entry]->data), | 2258 | rx_dma_addr |
2212 | pkt_len,0); | 2259 | [entry], |
2213 | pci_dma_sync_single_for_device(lp->pci_dev, | 2260 | PKT_BUF_SZ - 2, |
2214 | lp->rx_dma_addr[entry], | 2261 | PCI_DMA_FROMDEVICE); |
2215 | PKT_BUF_SZ-2, | 2262 | skb_put(skb, pkt_len); |
2216 | PCI_DMA_FROMDEVICE); | 2263 | lp->rx_skbuff[entry] = newskb; |
2264 | newskb->dev = dev; | ||
2265 | lp->rx_dma_addr[entry] = | ||
2266 | pci_map_single(lp->pci_dev, | ||
2267 | newskb->data, | ||
2268 | PKT_BUF_SZ - | ||
2269 | 2, | ||
2270 | PCI_DMA_FROMDEVICE); | ||
2271 | lp->rx_ring[entry].base = | ||
2272 | le32_to_cpu(lp-> | ||
2273 | rx_dma_addr | ||
2274 | [entry]); | ||
2275 | rx_in_place = 1; | ||
2276 | } else | ||
2277 | skb = NULL; | ||
2278 | } else { | ||
2279 | skb = dev_alloc_skb(pkt_len + 2); | ||
2280 | } | ||
2281 | |||
2282 | if (skb == NULL) { | ||
2283 | int i; | ||
2284 | if (netif_msg_drv(lp)) | ||
2285 | printk(KERN_ERR | ||
2286 | "%s: Memory squeeze, deferring packet.\n", | ||
2287 | dev->name); | ||
2288 | for (i = 0; i < lp->rx_ring_size; i++) | ||
2289 | if ((short) | ||
2290 | le16_to_cpu(lp-> | ||
2291 | rx_ring[(entry + | ||
2292 | i) | ||
2293 | & lp-> | ||
2294 | rx_mod_mask]. | ||
2295 | status) < 0) | ||
2296 | break; | ||
2297 | |||
2298 | if (i > lp->rx_ring_size - 2) { | ||
2299 | lp->stats.rx_dropped++; | ||
2300 | lp->rx_ring[entry].status |= | ||
2301 | le16_to_cpu(0x8000); | ||
2302 | wmb(); /* Make sure adapter sees owner change */ | ||
2303 | lp->cur_rx++; | ||
2304 | } | ||
2305 | break; | ||
2306 | } | ||
2307 | skb->dev = dev; | ||
2308 | if (!rx_in_place) { | ||
2309 | skb_reserve(skb, 2); /* 16 byte align */ | ||
2310 | skb_put(skb, pkt_len); /* Make room */ | ||
2311 | pci_dma_sync_single_for_cpu(lp->pci_dev, | ||
2312 | lp-> | ||
2313 | rx_dma_addr | ||
2314 | [entry], | ||
2315 | PKT_BUF_SZ - | ||
2316 | 2, | ||
2317 | PCI_DMA_FROMDEVICE); | ||
2318 | eth_copy_and_sum(skb, | ||
2319 | (unsigned char *)(lp-> | ||
2320 | rx_skbuff | ||
2321 | [entry]-> | ||
2322 | data), | ||
2323 | pkt_len, 0); | ||
2324 | pci_dma_sync_single_for_device(lp-> | ||
2325 | pci_dev, | ||
2326 | lp-> | ||
2327 | rx_dma_addr | ||
2328 | [entry], | ||
2329 | PKT_BUF_SZ | ||
2330 | - 2, | ||
2331 | PCI_DMA_FROMDEVICE); | ||
2332 | } | ||
2333 | lp->stats.rx_bytes += skb->len; | ||
2334 | skb->protocol = eth_type_trans(skb, dev); | ||
2335 | netif_rx(skb); | ||
2336 | dev->last_rx = jiffies; | ||
2337 | lp->stats.rx_packets++; | ||
2338 | } | ||
2217 | } | 2339 | } |
2218 | lp->stats.rx_bytes += skb->len; | 2340 | /* |
2219 | skb->protocol=eth_type_trans(skb,dev); | 2341 | * The docs say that the buffer length isn't touched, but Andrew Boyd |
2220 | netif_rx(skb); | 2342 | * of QNX reports that some revs of the 79C965 clear it. |
2221 | dev->last_rx = jiffies; | 2343 | */ |
2222 | lp->stats.rx_packets++; | 2344 | lp->rx_ring[entry].buf_length = le16_to_cpu(2 - PKT_BUF_SZ); |
2223 | } | 2345 | wmb(); /* Make sure owner changes after all others are visible */ |
2346 | lp->rx_ring[entry].status |= le16_to_cpu(0x8000); | ||
2347 | entry = (++lp->cur_rx) & lp->rx_mod_mask; | ||
2348 | if (--boguscnt <= 0) | ||
2349 | break; /* don't stay in loop forever */ | ||
2224 | } | 2350 | } |
2225 | /* | 2351 | |
2226 | * The docs say that the buffer length isn't touched, but Andrew Boyd | 2352 | return 0; |
2227 | * of QNX reports that some revs of the 79C965 clear it. | ||
2228 | */ | ||
2229 | lp->rx_ring[entry].buf_length = le16_to_cpu(2-PKT_BUF_SZ); | ||
2230 | wmb(); /* Make sure owner changes after all others are visible */ | ||
2231 | lp->rx_ring[entry].status |= le16_to_cpu(0x8000); | ||
2232 | entry = (++lp->cur_rx) & lp->rx_mod_mask; | ||
2233 | if (--boguscnt <= 0) break; /* don't stay in loop forever */ | ||
2234 | } | ||
2235 | |||
2236 | return 0; | ||
2237 | } | 2353 | } |
2238 | 2354 | ||
2239 | static int | 2355 | static int pcnet32_close(struct net_device *dev) |
2240 | pcnet32_close(struct net_device *dev) | ||
2241 | { | 2356 | { |
2242 | unsigned long ioaddr = dev->base_addr; | 2357 | unsigned long ioaddr = dev->base_addr; |
2243 | struct pcnet32_private *lp = dev->priv; | 2358 | struct pcnet32_private *lp = dev->priv; |
2244 | int i; | 2359 | int i; |
2245 | unsigned long flags; | 2360 | unsigned long flags; |
2246 | 2361 | ||
2247 | del_timer_sync(&lp->watchdog_timer); | 2362 | del_timer_sync(&lp->watchdog_timer); |
2248 | 2363 | ||
2249 | netif_stop_queue(dev); | 2364 | netif_stop_queue(dev); |
2250 | 2365 | ||
2251 | spin_lock_irqsave(&lp->lock, flags); | 2366 | spin_lock_irqsave(&lp->lock, flags); |
2252 | 2367 | ||
2253 | lp->stats.rx_missed_errors = lp->a.read_csr (ioaddr, 112); | 2368 | lp->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112); |
2254 | 2369 | ||
2255 | if (netif_msg_ifdown(lp)) | 2370 | if (netif_msg_ifdown(lp)) |
2256 | printk(KERN_DEBUG "%s: Shutting down ethercard, status was %2.2x.\n", | 2371 | printk(KERN_DEBUG |
2257 | dev->name, lp->a.read_csr (ioaddr, 0)); | 2372 | "%s: Shutting down ethercard, status was %2.2x.\n", |
2373 | dev->name, lp->a.read_csr(ioaddr, 0)); | ||
2258 | 2374 | ||
2259 | /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */ | 2375 | /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */ |
2260 | lp->a.write_csr (ioaddr, 0, 0x0004); | 2376 | lp->a.write_csr(ioaddr, 0, 0x0004); |
2261 | 2377 | ||
2262 | /* | 2378 | /* |
2263 | * Switch back to 16bit mode to avoid problems with dumb | 2379 | * Switch back to 16bit mode to avoid problems with dumb |
2264 | * DOS packet driver after a warm reboot | 2380 | * DOS packet driver after a warm reboot |
2265 | */ | 2381 | */ |
2266 | lp->a.write_bcr (ioaddr, 20, 4); | 2382 | lp->a.write_bcr(ioaddr, 20, 4); |
2267 | 2383 | ||
2268 | spin_unlock_irqrestore(&lp->lock, flags); | 2384 | spin_unlock_irqrestore(&lp->lock, flags); |
2269 | 2385 | ||
2270 | free_irq(dev->irq, dev); | 2386 | free_irq(dev->irq, dev); |
2271 | 2387 | ||
2272 | spin_lock_irqsave(&lp->lock, flags); | 2388 | spin_lock_irqsave(&lp->lock, flags); |
2273 | 2389 | ||
2274 | /* free all allocated skbuffs */ | 2390 | /* free all allocated skbuffs */ |
2275 | for (i = 0; i < lp->rx_ring_size; i++) { | 2391 | for (i = 0; i < lp->rx_ring_size; i++) { |
2276 | lp->rx_ring[i].status = 0; | 2392 | lp->rx_ring[i].status = 0; |
2277 | wmb(); /* Make sure adapter sees owner change */ | 2393 | wmb(); /* Make sure adapter sees owner change */ |
2278 | if (lp->rx_skbuff[i]) { | 2394 | if (lp->rx_skbuff[i]) { |
2279 | pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[i], PKT_BUF_SZ-2, | 2395 | pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[i], |
2280 | PCI_DMA_FROMDEVICE); | 2396 | PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE); |
2281 | dev_kfree_skb(lp->rx_skbuff[i]); | 2397 | dev_kfree_skb(lp->rx_skbuff[i]); |
2398 | } | ||
2399 | lp->rx_skbuff[i] = NULL; | ||
2400 | lp->rx_dma_addr[i] = 0; | ||
2282 | } | 2401 | } |
2283 | lp->rx_skbuff[i] = NULL; | ||
2284 | lp->rx_dma_addr[i] = 0; | ||
2285 | } | ||
2286 | 2402 | ||
2287 | for (i = 0; i < lp->tx_ring_size; i++) { | 2403 | for (i = 0; i < lp->tx_ring_size; i++) { |
2288 | lp->tx_ring[i].status = 0; /* CPU owns buffer */ | 2404 | lp->tx_ring[i].status = 0; /* CPU owns buffer */ |
2289 | wmb(); /* Make sure adapter sees owner change */ | 2405 | wmb(); /* Make sure adapter sees owner change */ |
2290 | if (lp->tx_skbuff[i]) { | 2406 | if (lp->tx_skbuff[i]) { |
2291 | pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[i], | 2407 | pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[i], |
2292 | lp->tx_skbuff[i]->len, PCI_DMA_TODEVICE); | 2408 | lp->tx_skbuff[i]->len, |
2293 | dev_kfree_skb(lp->tx_skbuff[i]); | 2409 | PCI_DMA_TODEVICE); |
2410 | dev_kfree_skb(lp->tx_skbuff[i]); | ||
2411 | } | ||
2412 | lp->tx_skbuff[i] = NULL; | ||
2413 | lp->tx_dma_addr[i] = 0; | ||
2294 | } | 2414 | } |
2295 | lp->tx_skbuff[i] = NULL; | ||
2296 | lp->tx_dma_addr[i] = 0; | ||
2297 | } | ||
2298 | 2415 | ||
2299 | spin_unlock_irqrestore(&lp->lock, flags); | 2416 | spin_unlock_irqrestore(&lp->lock, flags); |
2300 | 2417 | ||
2301 | return 0; | 2418 | return 0; |
2302 | } | 2419 | } |
2303 | 2420 | ||
2304 | static struct net_device_stats * | 2421 | static struct net_device_stats *pcnet32_get_stats(struct net_device *dev) |
2305 | pcnet32_get_stats(struct net_device *dev) | ||
2306 | { | 2422 | { |
2307 | struct pcnet32_private *lp = dev->priv; | 2423 | struct pcnet32_private *lp = dev->priv; |
2308 | unsigned long ioaddr = dev->base_addr; | 2424 | unsigned long ioaddr = dev->base_addr; |
2309 | u16 saved_addr; | 2425 | u16 saved_addr; |
2310 | unsigned long flags; | 2426 | unsigned long flags; |
2311 | 2427 | ||
2312 | spin_lock_irqsave(&lp->lock, flags); | 2428 | spin_lock_irqsave(&lp->lock, flags); |
2313 | saved_addr = lp->a.read_rap(ioaddr); | 2429 | saved_addr = lp->a.read_rap(ioaddr); |
2314 | lp->stats.rx_missed_errors = lp->a.read_csr (ioaddr, 112); | 2430 | lp->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112); |
2315 | lp->a.write_rap(ioaddr, saved_addr); | 2431 | lp->a.write_rap(ioaddr, saved_addr); |
2316 | spin_unlock_irqrestore(&lp->lock, flags); | 2432 | spin_unlock_irqrestore(&lp->lock, flags); |
2317 | 2433 | ||
2318 | return &lp->stats; | 2434 | return &lp->stats; |
2319 | } | 2435 | } |
2320 | 2436 | ||
2321 | /* taken from the sunlance driver, which it took from the depca driver */ | 2437 | /* taken from the sunlance driver, which it took from the depca driver */ |
2322 | static void pcnet32_load_multicast (struct net_device *dev) | 2438 | static void pcnet32_load_multicast(struct net_device *dev) |
2323 | { | 2439 | { |
2324 | struct pcnet32_private *lp = dev->priv; | 2440 | struct pcnet32_private *lp = dev->priv; |
2325 | volatile struct pcnet32_init_block *ib = &lp->init_block; | 2441 | volatile struct pcnet32_init_block *ib = &lp->init_block; |
2326 | volatile u16 *mcast_table = (u16 *)&ib->filter; | 2442 | volatile u16 *mcast_table = (u16 *) & ib->filter; |
2327 | struct dev_mc_list *dmi=dev->mc_list; | 2443 | struct dev_mc_list *dmi = dev->mc_list; |
2328 | char *addrs; | 2444 | char *addrs; |
2329 | int i; | 2445 | int i; |
2330 | u32 crc; | 2446 | u32 crc; |
2331 | 2447 | ||
2332 | /* set all multicast bits */ | 2448 | /* set all multicast bits */ |
2333 | if (dev->flags & IFF_ALLMULTI) { | 2449 | if (dev->flags & IFF_ALLMULTI) { |
2334 | ib->filter[0] = 0xffffffff; | 2450 | ib->filter[0] = 0xffffffff; |
2335 | ib->filter[1] = 0xffffffff; | 2451 | ib->filter[1] = 0xffffffff; |
2452 | return; | ||
2453 | } | ||
2454 | /* clear the multicast filter */ | ||
2455 | ib->filter[0] = 0; | ||
2456 | ib->filter[1] = 0; | ||
2457 | |||
2458 | /* Add addresses */ | ||
2459 | for (i = 0; i < dev->mc_count; i++) { | ||
2460 | addrs = dmi->dmi_addr; | ||
2461 | dmi = dmi->next; | ||
2462 | |||
2463 | /* multicast address? */ | ||
2464 | if (!(*addrs & 1)) | ||
2465 | continue; | ||
2466 | |||
2467 | crc = ether_crc_le(6, addrs); | ||
2468 | crc = crc >> 26; | ||
2469 | mcast_table[crc >> 4] = | ||
2470 | le16_to_cpu(le16_to_cpu(mcast_table[crc >> 4]) | | ||
2471 | (1 << (crc & 0xf))); | ||
2472 | } | ||
2336 | return; | 2473 | return; |
2337 | } | ||
2338 | /* clear the multicast filter */ | ||
2339 | ib->filter[0] = 0; | ||
2340 | ib->filter[1] = 0; | ||
2341 | |||
2342 | /* Add addresses */ | ||
2343 | for (i = 0; i < dev->mc_count; i++) { | ||
2344 | addrs = dmi->dmi_addr; | ||
2345 | dmi = dmi->next; | ||
2346 | |||
2347 | /* multicast address? */ | ||
2348 | if (!(*addrs & 1)) | ||
2349 | continue; | ||
2350 | |||
2351 | crc = ether_crc_le(6, addrs); | ||
2352 | crc = crc >> 26; | ||
2353 | mcast_table [crc >> 4] = le16_to_cpu( | ||
2354 | le16_to_cpu(mcast_table [crc >> 4]) | (1 << (crc & 0xf))); | ||
2355 | } | ||
2356 | return; | ||
2357 | } | 2474 | } |
2358 | 2475 | ||
2359 | |||
2360 | /* | 2476 | /* |
2361 | * Set or clear the multicast filter for this adaptor. | 2477 | * Set or clear the multicast filter for this adaptor. |
2362 | */ | 2478 | */ |
2363 | static void pcnet32_set_multicast_list(struct net_device *dev) | 2479 | static void pcnet32_set_multicast_list(struct net_device *dev) |
2364 | { | 2480 | { |
2365 | unsigned long ioaddr = dev->base_addr, flags; | 2481 | unsigned long ioaddr = dev->base_addr, flags; |
2366 | struct pcnet32_private *lp = dev->priv; | 2482 | struct pcnet32_private *lp = dev->priv; |
2367 | 2483 | ||
2368 | spin_lock_irqsave(&lp->lock, flags); | 2484 | spin_lock_irqsave(&lp->lock, flags); |
2369 | if (dev->flags&IFF_PROMISC) { | 2485 | if (dev->flags & IFF_PROMISC) { |
2370 | /* Log any net taps. */ | 2486 | /* Log any net taps. */ |
2371 | if (netif_msg_hw(lp)) | 2487 | if (netif_msg_hw(lp)) |
2372 | printk(KERN_INFO "%s: Promiscuous mode enabled.\n", dev->name); | 2488 | printk(KERN_INFO "%s: Promiscuous mode enabled.\n", |
2373 | lp->init_block.mode = le16_to_cpu(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) << 7); | 2489 | dev->name); |
2374 | } else { | 2490 | lp->init_block.mode = |
2375 | lp->init_block.mode = le16_to_cpu((lp->options & PCNET32_PORT_PORTSEL) << 7); | 2491 | le16_to_cpu(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) << |
2376 | pcnet32_load_multicast (dev); | 2492 | 7); |
2377 | } | 2493 | } else { |
2378 | 2494 | lp->init_block.mode = | |
2379 | lp->a.write_csr (ioaddr, 0, 0x0004); /* Temporarily stop the lance. */ | 2495 | le16_to_cpu((lp->options & PCNET32_PORT_PORTSEL) << 7); |
2380 | pcnet32_restart(dev, 0x0042); /* Resume normal operation */ | 2496 | pcnet32_load_multicast(dev); |
2381 | netif_wake_queue(dev); | 2497 | } |
2382 | 2498 | ||
2383 | spin_unlock_irqrestore(&lp->lock, flags); | 2499 | lp->a.write_csr(ioaddr, 0, 0x0004); /* Temporarily stop the lance. */ |
2500 | pcnet32_restart(dev, 0x0042); /* Resume normal operation */ | ||
2501 | netif_wake_queue(dev); | ||
2502 | |||
2503 | spin_unlock_irqrestore(&lp->lock, flags); | ||
2384 | } | 2504 | } |
2385 | 2505 | ||
2386 | /* This routine assumes that the lp->lock is held */ | 2506 | /* This routine assumes that the lp->lock is held */ |
2387 | static int mdio_read(struct net_device *dev, int phy_id, int reg_num) | 2507 | static int mdio_read(struct net_device *dev, int phy_id, int reg_num) |
2388 | { | 2508 | { |
2389 | struct pcnet32_private *lp = dev->priv; | 2509 | struct pcnet32_private *lp = dev->priv; |
2390 | unsigned long ioaddr = dev->base_addr; | 2510 | unsigned long ioaddr = dev->base_addr; |
2391 | u16 val_out; | 2511 | u16 val_out; |
2392 | 2512 | ||
2393 | if (!lp->mii) | 2513 | if (!lp->mii) |
2394 | return 0; | 2514 | return 0; |
2395 | 2515 | ||
2396 | lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f)); | 2516 | lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f)); |
2397 | val_out = lp->a.read_bcr(ioaddr, 34); | 2517 | val_out = lp->a.read_bcr(ioaddr, 34); |
2398 | 2518 | ||
2399 | return val_out; | 2519 | return val_out; |
2400 | } | 2520 | } |
2401 | 2521 | ||
2402 | /* This routine assumes that the lp->lock is held */ | 2522 | /* This routine assumes that the lp->lock is held */ |
2403 | static void mdio_write(struct net_device *dev, int phy_id, int reg_num, int val) | 2523 | static void mdio_write(struct net_device *dev, int phy_id, int reg_num, int val) |
2404 | { | 2524 | { |
2405 | struct pcnet32_private *lp = dev->priv; | 2525 | struct pcnet32_private *lp = dev->priv; |
2406 | unsigned long ioaddr = dev->base_addr; | 2526 | unsigned long ioaddr = dev->base_addr; |
2407 | 2527 | ||
2408 | if (!lp->mii) | 2528 | if (!lp->mii) |
2409 | return; | 2529 | return; |
2410 | 2530 | ||
2411 | lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f)); | 2531 | lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f)); |
2412 | lp->a.write_bcr(ioaddr, 34, val); | 2532 | lp->a.write_bcr(ioaddr, 34, val); |
2413 | } | 2533 | } |
2414 | 2534 | ||
2415 | static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | 2535 | static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) |
2416 | { | 2536 | { |
2417 | struct pcnet32_private *lp = dev->priv; | 2537 | struct pcnet32_private *lp = dev->priv; |
2418 | int rc; | 2538 | int rc; |
2419 | unsigned long flags; | 2539 | unsigned long flags; |
2540 | |||
2541 | /* SIOC[GS]MIIxxx ioctls */ | ||
2542 | if (lp->mii) { | ||
2543 | spin_lock_irqsave(&lp->lock, flags); | ||
2544 | rc = generic_mii_ioctl(&lp->mii_if, if_mii(rq), cmd, NULL); | ||
2545 | spin_unlock_irqrestore(&lp->lock, flags); | ||
2546 | } else { | ||
2547 | rc = -EOPNOTSUPP; | ||
2548 | } | ||
2549 | |||
2550 | return rc; | ||
2551 | } | ||
2552 | |||
2553 | static int pcnet32_check_otherphy(struct net_device *dev) | ||
2554 | { | ||
2555 | struct pcnet32_private *lp = dev->priv; | ||
2556 | struct mii_if_info mii = lp->mii_if; | ||
2557 | u16 bmcr; | ||
2558 | int i; | ||
2420 | 2559 | ||
2421 | /* SIOC[GS]MIIxxx ioctls */ | 2560 | for (i = 0; i < PCNET32_MAX_PHYS; i++) { |
2422 | if (lp->mii) { | 2561 | if (i == lp->mii_if.phy_id) |
2423 | spin_lock_irqsave(&lp->lock, flags); | 2562 | continue; /* skip active phy */ |
2424 | rc = generic_mii_ioctl(&lp->mii_if, if_mii(rq), cmd, NULL); | 2563 | if (lp->phymask & (1 << i)) { |
2425 | spin_unlock_irqrestore(&lp->lock, flags); | 2564 | mii.phy_id = i; |
2426 | } else { | 2565 | if (mii_link_ok(&mii)) { |
2427 | rc = -EOPNOTSUPP; | 2566 | /* found PHY with active link */ |
2428 | } | 2567 | if (netif_msg_link(lp)) |
2568 | printk(KERN_INFO | ||
2569 | "%s: Using PHY number %d.\n", | ||
2570 | dev->name, i); | ||
2571 | |||
2572 | /* isolate inactive phy */ | ||
2573 | bmcr = | ||
2574 | mdio_read(dev, lp->mii_if.phy_id, MII_BMCR); | ||
2575 | mdio_write(dev, lp->mii_if.phy_id, MII_BMCR, | ||
2576 | bmcr | BMCR_ISOLATE); | ||
2577 | |||
2578 | /* de-isolate new phy */ | ||
2579 | bmcr = mdio_read(dev, i, MII_BMCR); | ||
2580 | mdio_write(dev, i, MII_BMCR, | ||
2581 | bmcr & ~BMCR_ISOLATE); | ||
2582 | |||
2583 | /* set new phy address */ | ||
2584 | lp->mii_if.phy_id = i; | ||
2585 | return 1; | ||
2586 | } | ||
2587 | } | ||
2588 | } | ||
2589 | return 0; | ||
2590 | } | ||
2591 | |||
2592 | /* | ||
2593 | * Show the status of the media. Similar to mii_check_media however it | ||
2594 | * correctly shows the link speed for all (tested) pcnet32 variants. | ||
2595 | * Devices with no mii just report link state without speed. | ||
2596 | * | ||
2597 | * Caller is assumed to hold and release the lp->lock. | ||
2598 | */ | ||
2429 | 2599 | ||
2430 | return rc; | 2600 | static void pcnet32_check_media(struct net_device *dev, int verbose) |
2601 | { | ||
2602 | struct pcnet32_private *lp = dev->priv; | ||
2603 | int curr_link; | ||
2604 | int prev_link = netif_carrier_ok(dev) ? 1 : 0; | ||
2605 | u32 bcr9; | ||
2606 | |||
2607 | if (lp->mii) { | ||
2608 | curr_link = mii_link_ok(&lp->mii_if); | ||
2609 | } else { | ||
2610 | ulong ioaddr = dev->base_addr; /* card base I/O address */ | ||
2611 | curr_link = (lp->a.read_bcr(ioaddr, 4) != 0xc0); | ||
2612 | } | ||
2613 | if (!curr_link) { | ||
2614 | if (prev_link || verbose) { | ||
2615 | netif_carrier_off(dev); | ||
2616 | if (netif_msg_link(lp)) | ||
2617 | printk(KERN_INFO "%s: link down\n", dev->name); | ||
2618 | } | ||
2619 | if (lp->phycount > 1) { | ||
2620 | curr_link = pcnet32_check_otherphy(dev); | ||
2621 | prev_link = 0; | ||
2622 | } | ||
2623 | } else if (verbose || !prev_link) { | ||
2624 | netif_carrier_on(dev); | ||
2625 | if (lp->mii) { | ||
2626 | if (netif_msg_link(lp)) { | ||
2627 | struct ethtool_cmd ecmd; | ||
2628 | mii_ethtool_gset(&lp->mii_if, &ecmd); | ||
2629 | printk(KERN_INFO | ||
2630 | "%s: link up, %sMbps, %s-duplex\n", | ||
2631 | dev->name, | ||
2632 | (ecmd.speed == SPEED_100) ? "100" : "10", | ||
2633 | (ecmd.duplex == | ||
2634 | DUPLEX_FULL) ? "full" : "half"); | ||
2635 | } | ||
2636 | bcr9 = lp->a.read_bcr(dev->base_addr, 9); | ||
2637 | if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) { | ||
2638 | if (lp->mii_if.full_duplex) | ||
2639 | bcr9 |= (1 << 0); | ||
2640 | else | ||
2641 | bcr9 &= ~(1 << 0); | ||
2642 | lp->a.write_bcr(dev->base_addr, 9, bcr9); | ||
2643 | } | ||
2644 | } else { | ||
2645 | if (netif_msg_link(lp)) | ||
2646 | printk(KERN_INFO "%s: link up\n", dev->name); | ||
2647 | } | ||
2648 | } | ||
2431 | } | 2649 | } |
2432 | 2650 | ||
2651 | /* | ||
2652 | * Check for loss of link and link establishment. | ||
2653 | * Can not use mii_check_media because it does nothing if mode is forced. | ||
2654 | */ | ||
2655 | |||
2433 | static void pcnet32_watchdog(struct net_device *dev) | 2656 | static void pcnet32_watchdog(struct net_device *dev) |
2434 | { | 2657 | { |
2435 | struct pcnet32_private *lp = dev->priv; | 2658 | struct pcnet32_private *lp = dev->priv; |
2436 | unsigned long flags; | 2659 | unsigned long flags; |
2437 | 2660 | ||
2438 | /* Print the link status if it has changed */ | 2661 | /* Print the link status if it has changed */ |
2439 | if (lp->mii) { | ||
2440 | spin_lock_irqsave(&lp->lock, flags); | 2662 | spin_lock_irqsave(&lp->lock, flags); |
2441 | mii_check_media (&lp->mii_if, netif_msg_link(lp), 0); | 2663 | pcnet32_check_media(dev, 0); |
2442 | spin_unlock_irqrestore(&lp->lock, flags); | 2664 | spin_unlock_irqrestore(&lp->lock, flags); |
2443 | } | ||
2444 | 2665 | ||
2445 | mod_timer (&(lp->watchdog_timer), PCNET32_WATCHDOG_TIMEOUT); | 2666 | mod_timer(&(lp->watchdog_timer), PCNET32_WATCHDOG_TIMEOUT); |
2446 | } | 2667 | } |
2447 | 2668 | ||
2448 | static void __devexit pcnet32_remove_one(struct pci_dev *pdev) | 2669 | static void __devexit pcnet32_remove_one(struct pci_dev *pdev) |
2449 | { | 2670 | { |
2450 | struct net_device *dev = pci_get_drvdata(pdev); | 2671 | struct net_device *dev = pci_get_drvdata(pdev); |
2451 | 2672 | ||
2452 | if (dev) { | 2673 | if (dev) { |
2453 | struct pcnet32_private *lp = dev->priv; | 2674 | struct pcnet32_private *lp = dev->priv; |
2454 | 2675 | ||
2455 | unregister_netdev(dev); | 2676 | unregister_netdev(dev); |
2456 | pcnet32_free_ring(dev); | 2677 | pcnet32_free_ring(dev); |
2457 | release_region(dev->base_addr, PCNET32_TOTAL_SIZE); | 2678 | release_region(dev->base_addr, PCNET32_TOTAL_SIZE); |
2458 | pci_free_consistent(lp->pci_dev, sizeof(*lp), lp, lp->dma_addr); | 2679 | pci_free_consistent(lp->pci_dev, sizeof(*lp), lp, lp->dma_addr); |
2459 | free_netdev(dev); | 2680 | free_netdev(dev); |
2460 | pci_disable_device(pdev); | 2681 | pci_disable_device(pdev); |
2461 | pci_set_drvdata(pdev, NULL); | 2682 | pci_set_drvdata(pdev, NULL); |
2462 | } | 2683 | } |
2463 | } | 2684 | } |
2464 | 2685 | ||
2465 | static struct pci_driver pcnet32_driver = { | 2686 | static struct pci_driver pcnet32_driver = { |
2466 | .name = DRV_NAME, | 2687 | .name = DRV_NAME, |
2467 | .probe = pcnet32_probe_pci, | 2688 | .probe = pcnet32_probe_pci, |
2468 | .remove = __devexit_p(pcnet32_remove_one), | 2689 | .remove = __devexit_p(pcnet32_remove_one), |
2469 | .id_table = pcnet32_pci_tbl, | 2690 | .id_table = pcnet32_pci_tbl, |
2470 | }; | 2691 | }; |
2471 | 2692 | ||
2472 | /* An additional parameter that may be passed in... */ | 2693 | /* An additional parameter that may be passed in... */ |
@@ -2477,9 +2698,11 @@ static int pcnet32_have_pci; | |||
2477 | module_param(debug, int, 0); | 2698 | module_param(debug, int, 0); |
2478 | MODULE_PARM_DESC(debug, DRV_NAME " debug level"); | 2699 | MODULE_PARM_DESC(debug, DRV_NAME " debug level"); |
2479 | module_param(max_interrupt_work, int, 0); | 2700 | module_param(max_interrupt_work, int, 0); |
2480 | MODULE_PARM_DESC(max_interrupt_work, DRV_NAME " maximum events handled per interrupt"); | 2701 | MODULE_PARM_DESC(max_interrupt_work, |
2702 | DRV_NAME " maximum events handled per interrupt"); | ||
2481 | module_param(rx_copybreak, int, 0); | 2703 | module_param(rx_copybreak, int, 0); |
2482 | MODULE_PARM_DESC(rx_copybreak, DRV_NAME " copy breakpoint for copy-only-tiny-frames"); | 2704 | MODULE_PARM_DESC(rx_copybreak, |
2705 | DRV_NAME " copy breakpoint for copy-only-tiny-frames"); | ||
2483 | module_param(tx_start_pt, int, 0); | 2706 | module_param(tx_start_pt, int, 0); |
2484 | MODULE_PARM_DESC(tx_start_pt, DRV_NAME " transmit start point (0-3)"); | 2707 | MODULE_PARM_DESC(tx_start_pt, DRV_NAME " transmit start point (0-3)"); |
2485 | module_param(pcnet32vlb, int, 0); | 2708 | module_param(pcnet32vlb, int, 0); |
@@ -2490,7 +2713,9 @@ module_param_array(full_duplex, int, NULL, 0); | |||
2490 | MODULE_PARM_DESC(full_duplex, DRV_NAME " full duplex setting(s) (1)"); | 2713 | MODULE_PARM_DESC(full_duplex, DRV_NAME " full duplex setting(s) (1)"); |
2491 | /* Module Parameter for HomePNA cards added by Patrick Simmons, 2004 */ | 2714 | /* Module Parameter for HomePNA cards added by Patrick Simmons, 2004 */ |
2492 | module_param_array(homepna, int, NULL, 0); | 2715 | module_param_array(homepna, int, NULL, 0); |
2493 | MODULE_PARM_DESC(homepna, DRV_NAME " mode for 79C978 cards (1 for HomePNA, 0 for Ethernet, default Ethernet"); | 2716 | MODULE_PARM_DESC(homepna, |
2717 | DRV_NAME | ||
2718 | " mode for 79C978 cards (1 for HomePNA, 0 for Ethernet, default Ethernet"); | ||
2494 | 2719 | ||
2495 | MODULE_AUTHOR("Thomas Bogendoerfer"); | 2720 | MODULE_AUTHOR("Thomas Bogendoerfer"); |
2496 | MODULE_DESCRIPTION("Driver for PCnet32 and PCnetPCI based ethercards"); | 2721 | MODULE_DESCRIPTION("Driver for PCnet32 and PCnetPCI based ethercards"); |
@@ -2500,44 +2725,44 @@ MODULE_LICENSE("GPL"); | |||
2500 | 2725 | ||
2501 | static int __init pcnet32_init_module(void) | 2726 | static int __init pcnet32_init_module(void) |
2502 | { | 2727 | { |
2503 | printk(KERN_INFO "%s", version); | 2728 | printk(KERN_INFO "%s", version); |
2504 | 2729 | ||
2505 | pcnet32_debug = netif_msg_init(debug, PCNET32_MSG_DEFAULT); | 2730 | pcnet32_debug = netif_msg_init(debug, PCNET32_MSG_DEFAULT); |
2506 | 2731 | ||
2507 | if ((tx_start_pt >= 0) && (tx_start_pt <= 3)) | 2732 | if ((tx_start_pt >= 0) && (tx_start_pt <= 3)) |
2508 | tx_start = tx_start_pt; | 2733 | tx_start = tx_start_pt; |
2509 | 2734 | ||
2510 | /* find the PCI devices */ | 2735 | /* find the PCI devices */ |
2511 | if (!pci_module_init(&pcnet32_driver)) | 2736 | if (!pci_module_init(&pcnet32_driver)) |
2512 | pcnet32_have_pci = 1; | 2737 | pcnet32_have_pci = 1; |
2513 | 2738 | ||
2514 | /* should we find any remaining VLbus devices ? */ | 2739 | /* should we find any remaining VLbus devices ? */ |
2515 | if (pcnet32vlb) | 2740 | if (pcnet32vlb) |
2516 | pcnet32_probe_vlbus(); | 2741 | pcnet32_probe_vlbus(); |
2517 | 2742 | ||
2518 | if (cards_found && (pcnet32_debug & NETIF_MSG_PROBE)) | 2743 | if (cards_found && (pcnet32_debug & NETIF_MSG_PROBE)) |
2519 | printk(KERN_INFO PFX "%d cards_found.\n", cards_found); | 2744 | printk(KERN_INFO PFX "%d cards_found.\n", cards_found); |
2520 | 2745 | ||
2521 | return (pcnet32_have_pci + cards_found) ? 0 : -ENODEV; | 2746 | return (pcnet32_have_pci + cards_found) ? 0 : -ENODEV; |
2522 | } | 2747 | } |
2523 | 2748 | ||
2524 | static void __exit pcnet32_cleanup_module(void) | 2749 | static void __exit pcnet32_cleanup_module(void) |
2525 | { | 2750 | { |
2526 | struct net_device *next_dev; | 2751 | struct net_device *next_dev; |
2527 | 2752 | ||
2528 | while (pcnet32_dev) { | 2753 | while (pcnet32_dev) { |
2529 | struct pcnet32_private *lp = pcnet32_dev->priv; | 2754 | struct pcnet32_private *lp = pcnet32_dev->priv; |
2530 | next_dev = lp->next; | 2755 | next_dev = lp->next; |
2531 | unregister_netdev(pcnet32_dev); | 2756 | unregister_netdev(pcnet32_dev); |
2532 | pcnet32_free_ring(pcnet32_dev); | 2757 | pcnet32_free_ring(pcnet32_dev); |
2533 | release_region(pcnet32_dev->base_addr, PCNET32_TOTAL_SIZE); | 2758 | release_region(pcnet32_dev->base_addr, PCNET32_TOTAL_SIZE); |
2534 | pci_free_consistent(lp->pci_dev, sizeof(*lp), lp, lp->dma_addr); | 2759 | pci_free_consistent(lp->pci_dev, sizeof(*lp), lp, lp->dma_addr); |
2535 | free_netdev(pcnet32_dev); | 2760 | free_netdev(pcnet32_dev); |
2536 | pcnet32_dev = next_dev; | 2761 | pcnet32_dev = next_dev; |
2537 | } | 2762 | } |
2538 | 2763 | ||
2539 | if (pcnet32_have_pci) | 2764 | if (pcnet32_have_pci) |
2540 | pci_unregister_driver(&pcnet32_driver); | 2765 | pci_unregister_driver(&pcnet32_driver); |
2541 | } | 2766 | } |
2542 | 2767 | ||
2543 | module_init(pcnet32_init_module); | 2768 | module_init(pcnet32_init_module); |
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c index 0245e40b51a1..f608c12e3e8b 100644 --- a/drivers/net/ppp_generic.c +++ b/drivers/net/ppp_generic.c | |||
@@ -1691,8 +1691,8 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb) | |||
1691 | || ppp->npmode[npi] != NPMODE_PASS) { | 1691 | || ppp->npmode[npi] != NPMODE_PASS) { |
1692 | kfree_skb(skb); | 1692 | kfree_skb(skb); |
1693 | } else { | 1693 | } else { |
1694 | skb_pull(skb, 2); /* chop off protocol */ | 1694 | /* chop off protocol */ |
1695 | skb_postpull_rcsum(skb, skb->data - 2, 2); | 1695 | skb_pull_rcsum(skb, 2); |
1696 | skb->dev = ppp->dev; | 1696 | skb->dev = ppp->dev; |
1697 | skb->protocol = htons(npindex_to_ethertype[npi]); | 1697 | skb->protocol = htons(npindex_to_ethertype[npi]); |
1698 | skb->mac.raw = skb->data; | 1698 | skb->mac.raw = skb->data; |
diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c index 9369f811075d..475dc930380f 100644 --- a/drivers/net/pppoe.c +++ b/drivers/net/pppoe.c | |||
@@ -337,8 +337,7 @@ static int pppoe_rcv_core(struct sock *sk, struct sk_buff *skb) | |||
337 | if (sk->sk_state & PPPOX_BOUND) { | 337 | if (sk->sk_state & PPPOX_BOUND) { |
338 | struct pppoe_hdr *ph = (struct pppoe_hdr *) skb->nh.raw; | 338 | struct pppoe_hdr *ph = (struct pppoe_hdr *) skb->nh.raw; |
339 | int len = ntohs(ph->length); | 339 | int len = ntohs(ph->length); |
340 | skb_pull(skb, sizeof(struct pppoe_hdr)); | 340 | skb_pull_rcsum(skb, sizeof(struct pppoe_hdr)); |
341 | skb_postpull_rcsum(skb, ph, sizeof(*ph)); | ||
342 | if (pskb_trim_rcsum(skb, len)) | 341 | if (pskb_trim_rcsum(skb, len)) |
343 | goto abort_kfree; | 342 | goto abort_kfree; |
344 | 343 | ||
diff --git a/drivers/net/skfp/fplustm.c b/drivers/net/skfp/fplustm.c index a4b2b6975d6c..0784f558ca9a 100644 --- a/drivers/net/skfp/fplustm.c +++ b/drivers/net/skfp/fplustm.c | |||
@@ -549,12 +549,12 @@ void formac_tx_restart(struct s_smc *smc) | |||
549 | static void enable_formac(struct s_smc *smc) | 549 | static void enable_formac(struct s_smc *smc) |
550 | { | 550 | { |
551 | /* set formac IMSK : 0 enables irq */ | 551 | /* set formac IMSK : 0 enables irq */ |
552 | outpw(FM_A(FM_IMSK1U),~mac_imsk1u) ; | 552 | outpw(FM_A(FM_IMSK1U),(unsigned short)~mac_imsk1u); |
553 | outpw(FM_A(FM_IMSK1L),~mac_imsk1l) ; | 553 | outpw(FM_A(FM_IMSK1L),(unsigned short)~mac_imsk1l); |
554 | outpw(FM_A(FM_IMSK2U),~mac_imsk2u) ; | 554 | outpw(FM_A(FM_IMSK2U),(unsigned short)~mac_imsk2u); |
555 | outpw(FM_A(FM_IMSK2L),~mac_imsk2l) ; | 555 | outpw(FM_A(FM_IMSK2L),(unsigned short)~mac_imsk2l); |
556 | outpw(FM_A(FM_IMSK3U),~mac_imsk3u) ; | 556 | outpw(FM_A(FM_IMSK3U),(unsigned short)~mac_imsk3u); |
557 | outpw(FM_A(FM_IMSK3L),~mac_imsk3l) ; | 557 | outpw(FM_A(FM_IMSK3L),(unsigned short)~mac_imsk3l); |
558 | } | 558 | } |
559 | 559 | ||
560 | #if 0 /* Removed because the driver should use the ASICs TX complete IRQ. */ | 560 | #if 0 /* Removed because the driver should use the ASICs TX complete IRQ. */ |
diff --git a/drivers/net/skge.c b/drivers/net/skge.c index 25e028b7ce48..4eda81d41b10 100644 --- a/drivers/net/skge.c +++ b/drivers/net/skge.c | |||
@@ -44,7 +44,7 @@ | |||
44 | #include "skge.h" | 44 | #include "skge.h" |
45 | 45 | ||
46 | #define DRV_NAME "skge" | 46 | #define DRV_NAME "skge" |
47 | #define DRV_VERSION "1.3" | 47 | #define DRV_VERSION "1.4" |
48 | #define PFX DRV_NAME " " | 48 | #define PFX DRV_NAME " " |
49 | 49 | ||
50 | #define DEFAULT_TX_RING_SIZE 128 | 50 | #define DEFAULT_TX_RING_SIZE 128 |
@@ -104,7 +104,6 @@ static const int txqaddr[] = { Q_XA1, Q_XA2 }; | |||
104 | static const int rxqaddr[] = { Q_R1, Q_R2 }; | 104 | static const int rxqaddr[] = { Q_R1, Q_R2 }; |
105 | static const u32 rxirqmask[] = { IS_R1_F, IS_R2_F }; | 105 | static const u32 rxirqmask[] = { IS_R1_F, IS_R2_F }; |
106 | static const u32 txirqmask[] = { IS_XA1_F, IS_XA2_F }; | 106 | static const u32 txirqmask[] = { IS_XA1_F, IS_XA2_F }; |
107 | static const u32 portirqmask[] = { IS_PORT_1, IS_PORT_2 }; | ||
108 | 107 | ||
109 | static int skge_get_regs_len(struct net_device *dev) | 108 | static int skge_get_regs_len(struct net_device *dev) |
110 | { | 109 | { |
@@ -728,19 +727,18 @@ static struct ethtool_ops skge_ethtool_ops = { | |||
728 | * Allocate ring elements and chain them together | 727 | * Allocate ring elements and chain them together |
729 | * One-to-one association of board descriptors with ring elements | 728 | * One-to-one association of board descriptors with ring elements |
730 | */ | 729 | */ |
731 | static int skge_ring_alloc(struct skge_ring *ring, void *vaddr, u64 base) | 730 | static int skge_ring_alloc(struct skge_ring *ring, void *vaddr, u32 base) |
732 | { | 731 | { |
733 | struct skge_tx_desc *d; | 732 | struct skge_tx_desc *d; |
734 | struct skge_element *e; | 733 | struct skge_element *e; |
735 | int i; | 734 | int i; |
736 | 735 | ||
737 | ring->start = kmalloc(sizeof(*e)*ring->count, GFP_KERNEL); | 736 | ring->start = kcalloc(sizeof(*e), ring->count, GFP_KERNEL); |
738 | if (!ring->start) | 737 | if (!ring->start) |
739 | return -ENOMEM; | 738 | return -ENOMEM; |
740 | 739 | ||
741 | for (i = 0, e = ring->start, d = vaddr; i < ring->count; i++, e++, d++) { | 740 | for (i = 0, e = ring->start, d = vaddr; i < ring->count; i++, e++, d++) { |
742 | e->desc = d; | 741 | e->desc = d; |
743 | e->skb = NULL; | ||
744 | if (i == ring->count - 1) { | 742 | if (i == ring->count - 1) { |
745 | e->next = ring->start; | 743 | e->next = ring->start; |
746 | d->next_offset = base; | 744 | d->next_offset = base; |
@@ -2169,27 +2167,31 @@ static int skge_up(struct net_device *dev) | |||
2169 | if (!skge->mem) | 2167 | if (!skge->mem) |
2170 | return -ENOMEM; | 2168 | return -ENOMEM; |
2171 | 2169 | ||
2170 | BUG_ON(skge->dma & 7); | ||
2171 | |||
2172 | if ((u64)skge->dma >> 32 != ((u64) skge->dma + skge->mem_size) >> 32) { | ||
2173 | printk(KERN_ERR PFX "pci_alloc_consistent region crosses 4G boundary\n"); | ||
2174 | err = -EINVAL; | ||
2175 | goto free_pci_mem; | ||
2176 | } | ||
2177 | |||
2172 | memset(skge->mem, 0, skge->mem_size); | 2178 | memset(skge->mem, 0, skge->mem_size); |
2173 | 2179 | ||
2174 | if ((err = skge_ring_alloc(&skge->rx_ring, skge->mem, skge->dma))) | 2180 | err = skge_ring_alloc(&skge->rx_ring, skge->mem, skge->dma); |
2181 | if (err) | ||
2175 | goto free_pci_mem; | 2182 | goto free_pci_mem; |
2176 | 2183 | ||
2177 | err = skge_rx_fill(skge); | 2184 | err = skge_rx_fill(skge); |
2178 | if (err) | 2185 | if (err) |
2179 | goto free_rx_ring; | 2186 | goto free_rx_ring; |
2180 | 2187 | ||
2181 | if ((err = skge_ring_alloc(&skge->tx_ring, skge->mem + rx_size, | 2188 | err = skge_ring_alloc(&skge->tx_ring, skge->mem + rx_size, |
2182 | skge->dma + rx_size))) | 2189 | skge->dma + rx_size); |
2190 | if (err) | ||
2183 | goto free_rx_ring; | 2191 | goto free_rx_ring; |
2184 | 2192 | ||
2185 | skge->tx_avail = skge->tx_ring.count - 1; | 2193 | skge->tx_avail = skge->tx_ring.count - 1; |
2186 | 2194 | ||
2187 | /* Enable IRQ from port */ | ||
2188 | spin_lock_irq(&hw->hw_lock); | ||
2189 | hw->intr_mask |= portirqmask[port]; | ||
2190 | skge_write32(hw, B0_IMSK, hw->intr_mask); | ||
2191 | spin_unlock_irq(&hw->hw_lock); | ||
2192 | |||
2193 | /* Initialize MAC */ | 2195 | /* Initialize MAC */ |
2194 | spin_lock_bh(&hw->phy_lock); | 2196 | spin_lock_bh(&hw->phy_lock); |
2195 | if (hw->chip_id == CHIP_ID_GENESIS) | 2197 | if (hw->chip_id == CHIP_ID_GENESIS) |
@@ -2246,11 +2248,6 @@ static int skge_down(struct net_device *dev) | |||
2246 | else | 2248 | else |
2247 | yukon_stop(skge); | 2249 | yukon_stop(skge); |
2248 | 2250 | ||
2249 | spin_lock_irq(&hw->hw_lock); | ||
2250 | hw->intr_mask &= ~portirqmask[skge->port]; | ||
2251 | skge_write32(hw, B0_IMSK, hw->intr_mask); | ||
2252 | spin_unlock_irq(&hw->hw_lock); | ||
2253 | |||
2254 | /* Stop transmitter */ | 2251 | /* Stop transmitter */ |
2255 | skge_write8(hw, Q_ADDR(txqaddr[port], Q_CSR), CSR_STOP); | 2252 | skge_write8(hw, Q_ADDR(txqaddr[port], Q_CSR), CSR_STOP); |
2256 | skge_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), | 2253 | skge_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), |
@@ -2307,18 +2304,15 @@ static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev) | |||
2307 | int i; | 2304 | int i; |
2308 | u32 control, len; | 2305 | u32 control, len; |
2309 | u64 map; | 2306 | u64 map; |
2310 | unsigned long flags; | ||
2311 | 2307 | ||
2312 | skb = skb_padto(skb, ETH_ZLEN); | 2308 | skb = skb_padto(skb, ETH_ZLEN); |
2313 | if (!skb) | 2309 | if (!skb) |
2314 | return NETDEV_TX_OK; | 2310 | return NETDEV_TX_OK; |
2315 | 2311 | ||
2316 | local_irq_save(flags); | ||
2317 | if (!spin_trylock(&skge->tx_lock)) { | 2312 | if (!spin_trylock(&skge->tx_lock)) { |
2318 | /* Collision - tell upper layer to requeue */ | 2313 | /* Collision - tell upper layer to requeue */ |
2319 | local_irq_restore(flags); | 2314 | return NETDEV_TX_LOCKED; |
2320 | return NETDEV_TX_LOCKED; | 2315 | } |
2321 | } | ||
2322 | 2316 | ||
2323 | if (unlikely(skge->tx_avail < skb_shinfo(skb)->nr_frags +1)) { | 2317 | if (unlikely(skge->tx_avail < skb_shinfo(skb)->nr_frags +1)) { |
2324 | if (!netif_queue_stopped(dev)) { | 2318 | if (!netif_queue_stopped(dev)) { |
@@ -2327,7 +2321,7 @@ static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev) | |||
2327 | printk(KERN_WARNING PFX "%s: ring full when queue awake!\n", | 2321 | printk(KERN_WARNING PFX "%s: ring full when queue awake!\n", |
2328 | dev->name); | 2322 | dev->name); |
2329 | } | 2323 | } |
2330 | spin_unlock_irqrestore(&skge->tx_lock, flags); | 2324 | spin_unlock(&skge->tx_lock); |
2331 | return NETDEV_TX_BUSY; | 2325 | return NETDEV_TX_BUSY; |
2332 | } | 2326 | } |
2333 | 2327 | ||
@@ -2402,8 +2396,10 @@ static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev) | |||
2402 | netif_stop_queue(dev); | 2396 | netif_stop_queue(dev); |
2403 | } | 2397 | } |
2404 | 2398 | ||
2399 | mmiowb(); | ||
2400 | spin_unlock(&skge->tx_lock); | ||
2401 | |||
2405 | dev->trans_start = jiffies; | 2402 | dev->trans_start = jiffies; |
2406 | spin_unlock_irqrestore(&skge->tx_lock, flags); | ||
2407 | 2403 | ||
2408 | return NETDEV_TX_OK; | 2404 | return NETDEV_TX_OK; |
2409 | } | 2405 | } |
@@ -2416,7 +2412,7 @@ static inline void skge_tx_free(struct skge_hw *hw, struct skge_element *e) | |||
2416 | pci_unmap_addr(e, mapaddr), | 2412 | pci_unmap_addr(e, mapaddr), |
2417 | pci_unmap_len(e, maplen), | 2413 | pci_unmap_len(e, maplen), |
2418 | PCI_DMA_TODEVICE); | 2414 | PCI_DMA_TODEVICE); |
2419 | dev_kfree_skb_any(e->skb); | 2415 | dev_kfree_skb(e->skb); |
2420 | e->skb = NULL; | 2416 | e->skb = NULL; |
2421 | } else { | 2417 | } else { |
2422 | pci_unmap_page(hw->pdev, | 2418 | pci_unmap_page(hw->pdev, |
@@ -2430,15 +2426,14 @@ static void skge_tx_clean(struct skge_port *skge) | |||
2430 | { | 2426 | { |
2431 | struct skge_ring *ring = &skge->tx_ring; | 2427 | struct skge_ring *ring = &skge->tx_ring; |
2432 | struct skge_element *e; | 2428 | struct skge_element *e; |
2433 | unsigned long flags; | ||
2434 | 2429 | ||
2435 | spin_lock_irqsave(&skge->tx_lock, flags); | 2430 | spin_lock_bh(&skge->tx_lock); |
2436 | for (e = ring->to_clean; e != ring->to_use; e = e->next) { | 2431 | for (e = ring->to_clean; e != ring->to_use; e = e->next) { |
2437 | ++skge->tx_avail; | 2432 | ++skge->tx_avail; |
2438 | skge_tx_free(skge->hw, e); | 2433 | skge_tx_free(skge->hw, e); |
2439 | } | 2434 | } |
2440 | ring->to_clean = e; | 2435 | ring->to_clean = e; |
2441 | spin_unlock_irqrestore(&skge->tx_lock, flags); | 2436 | spin_unlock_bh(&skge->tx_lock); |
2442 | } | 2437 | } |
2443 | 2438 | ||
2444 | static void skge_tx_timeout(struct net_device *dev) | 2439 | static void skge_tx_timeout(struct net_device *dev) |
@@ -2663,6 +2658,37 @@ resubmit: | |||
2663 | return NULL; | 2658 | return NULL; |
2664 | } | 2659 | } |
2665 | 2660 | ||
2661 | static void skge_tx_done(struct skge_port *skge) | ||
2662 | { | ||
2663 | struct skge_ring *ring = &skge->tx_ring; | ||
2664 | struct skge_element *e; | ||
2665 | |||
2666 | spin_lock(&skge->tx_lock); | ||
2667 | for (e = ring->to_clean; prefetch(e->next), e != ring->to_use; e = e->next) { | ||
2668 | struct skge_tx_desc *td = e->desc; | ||
2669 | u32 control; | ||
2670 | |||
2671 | rmb(); | ||
2672 | control = td->control; | ||
2673 | if (control & BMU_OWN) | ||
2674 | break; | ||
2675 | |||
2676 | if (unlikely(netif_msg_tx_done(skge))) | ||
2677 | printk(KERN_DEBUG PFX "%s: tx done slot %td status 0x%x\n", | ||
2678 | skge->netdev->name, e - ring->start, td->status); | ||
2679 | |||
2680 | skge_tx_free(skge->hw, e); | ||
2681 | e->skb = NULL; | ||
2682 | ++skge->tx_avail; | ||
2683 | } | ||
2684 | ring->to_clean = e; | ||
2685 | skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F); | ||
2686 | |||
2687 | if (skge->tx_avail > MAX_SKB_FRAGS + 1) | ||
2688 | netif_wake_queue(skge->netdev); | ||
2689 | |||
2690 | spin_unlock(&skge->tx_lock); | ||
2691 | } | ||
2666 | 2692 | ||
2667 | static int skge_poll(struct net_device *dev, int *budget) | 2693 | static int skge_poll(struct net_device *dev, int *budget) |
2668 | { | 2694 | { |
@@ -2670,8 +2696,10 @@ static int skge_poll(struct net_device *dev, int *budget) | |||
2670 | struct skge_hw *hw = skge->hw; | 2696 | struct skge_hw *hw = skge->hw; |
2671 | struct skge_ring *ring = &skge->rx_ring; | 2697 | struct skge_ring *ring = &skge->rx_ring; |
2672 | struct skge_element *e; | 2698 | struct skge_element *e; |
2673 | unsigned int to_do = min(dev->quota, *budget); | 2699 | int to_do = min(dev->quota, *budget); |
2674 | unsigned int work_done = 0; | 2700 | int work_done = 0; |
2701 | |||
2702 | skge_tx_done(skge); | ||
2675 | 2703 | ||
2676 | for (e = ring->to_clean; prefetch(e->next), work_done < to_do; e = e->next) { | 2704 | for (e = ring->to_clean; prefetch(e->next), work_done < to_do; e = e->next) { |
2677 | struct skge_rx_desc *rd = e->desc; | 2705 | struct skge_rx_desc *rd = e->desc; |
@@ -2683,8 +2711,8 @@ static int skge_poll(struct net_device *dev, int *budget) | |||
2683 | if (control & BMU_OWN) | 2711 | if (control & BMU_OWN) |
2684 | break; | 2712 | break; |
2685 | 2713 | ||
2686 | skb = skge_rx_get(skge, e, control, rd->status, | 2714 | skb = skge_rx_get(skge, e, control, rd->status, |
2687 | le16_to_cpu(rd->csum2)); | 2715 | le16_to_cpu(rd->csum2)); |
2688 | if (likely(skb)) { | 2716 | if (likely(skb)) { |
2689 | dev->last_rx = jiffies; | 2717 | dev->last_rx = jiffies; |
2690 | netif_receive_skb(skb); | 2718 | netif_receive_skb(skb); |
@@ -2705,49 +2733,15 @@ static int skge_poll(struct net_device *dev, int *budget) | |||
2705 | if (work_done >= to_do) | 2733 | if (work_done >= to_do) |
2706 | return 1; /* not done */ | 2734 | return 1; /* not done */ |
2707 | 2735 | ||
2708 | spin_lock_irq(&hw->hw_lock); | 2736 | netif_rx_complete(dev); |
2709 | __netif_rx_complete(dev); | 2737 | mmiowb(); |
2710 | hw->intr_mask |= portirqmask[skge->port]; | 2738 | |
2739 | hw->intr_mask |= skge->port == 0 ? (IS_R1_F|IS_XA1_F) : (IS_R2_F|IS_XA2_F); | ||
2711 | skge_write32(hw, B0_IMSK, hw->intr_mask); | 2740 | skge_write32(hw, B0_IMSK, hw->intr_mask); |
2712 | spin_unlock_irq(&hw->hw_lock); | ||
2713 | 2741 | ||
2714 | return 0; | 2742 | return 0; |
2715 | } | 2743 | } |
2716 | 2744 | ||
2717 | static inline void skge_tx_intr(struct net_device *dev) | ||
2718 | { | ||
2719 | struct skge_port *skge = netdev_priv(dev); | ||
2720 | struct skge_hw *hw = skge->hw; | ||
2721 | struct skge_ring *ring = &skge->tx_ring; | ||
2722 | struct skge_element *e; | ||
2723 | |||
2724 | spin_lock(&skge->tx_lock); | ||
2725 | for (e = ring->to_clean; prefetch(e->next), e != ring->to_use; e = e->next) { | ||
2726 | struct skge_tx_desc *td = e->desc; | ||
2727 | u32 control; | ||
2728 | |||
2729 | rmb(); | ||
2730 | control = td->control; | ||
2731 | if (control & BMU_OWN) | ||
2732 | break; | ||
2733 | |||
2734 | if (unlikely(netif_msg_tx_done(skge))) | ||
2735 | printk(KERN_DEBUG PFX "%s: tx done slot %td status 0x%x\n", | ||
2736 | dev->name, e - ring->start, td->status); | ||
2737 | |||
2738 | skge_tx_free(hw, e); | ||
2739 | e->skb = NULL; | ||
2740 | ++skge->tx_avail; | ||
2741 | } | ||
2742 | ring->to_clean = e; | ||
2743 | skge_write8(hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F); | ||
2744 | |||
2745 | if (skge->tx_avail > MAX_SKB_FRAGS + 1) | ||
2746 | netif_wake_queue(dev); | ||
2747 | |||
2748 | spin_unlock(&skge->tx_lock); | ||
2749 | } | ||
2750 | |||
2751 | /* Parity errors seem to happen when Genesis is connected to a switch | 2745 | /* Parity errors seem to happen when Genesis is connected to a switch |
2752 | * with no other ports present. Heartbeat error?? | 2746 | * with no other ports present. Heartbeat error?? |
2753 | */ | 2747 | */ |
@@ -2770,17 +2764,6 @@ static void skge_mac_parity(struct skge_hw *hw, int port) | |||
2770 | ? GMF_CLI_TX_FC : GMF_CLI_TX_PE); | 2764 | ? GMF_CLI_TX_FC : GMF_CLI_TX_PE); |
2771 | } | 2765 | } |
2772 | 2766 | ||
2773 | static void skge_pci_clear(struct skge_hw *hw) | ||
2774 | { | ||
2775 | u16 status; | ||
2776 | |||
2777 | pci_read_config_word(hw->pdev, PCI_STATUS, &status); | ||
2778 | skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); | ||
2779 | pci_write_config_word(hw->pdev, PCI_STATUS, | ||
2780 | status | PCI_STATUS_ERROR_BITS); | ||
2781 | skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); | ||
2782 | } | ||
2783 | |||
2784 | static void skge_mac_intr(struct skge_hw *hw, int port) | 2767 | static void skge_mac_intr(struct skge_hw *hw, int port) |
2785 | { | 2768 | { |
2786 | if (hw->chip_id == CHIP_ID_GENESIS) | 2769 | if (hw->chip_id == CHIP_ID_GENESIS) |
@@ -2822,23 +2805,39 @@ static void skge_error_irq(struct skge_hw *hw) | |||
2822 | if (hwstatus & IS_M2_PAR_ERR) | 2805 | if (hwstatus & IS_M2_PAR_ERR) |
2823 | skge_mac_parity(hw, 1); | 2806 | skge_mac_parity(hw, 1); |
2824 | 2807 | ||
2825 | if (hwstatus & IS_R1_PAR_ERR) | 2808 | if (hwstatus & IS_R1_PAR_ERR) { |
2809 | printk(KERN_ERR PFX "%s: receive queue parity error\n", | ||
2810 | hw->dev[0]->name); | ||
2826 | skge_write32(hw, B0_R1_CSR, CSR_IRQ_CL_P); | 2811 | skge_write32(hw, B0_R1_CSR, CSR_IRQ_CL_P); |
2812 | } | ||
2827 | 2813 | ||
2828 | if (hwstatus & IS_R2_PAR_ERR) | 2814 | if (hwstatus & IS_R2_PAR_ERR) { |
2815 | printk(KERN_ERR PFX "%s: receive queue parity error\n", | ||
2816 | hw->dev[1]->name); | ||
2829 | skge_write32(hw, B0_R2_CSR, CSR_IRQ_CL_P); | 2817 | skge_write32(hw, B0_R2_CSR, CSR_IRQ_CL_P); |
2818 | } | ||
2830 | 2819 | ||
2831 | if (hwstatus & (IS_IRQ_MST_ERR|IS_IRQ_STAT)) { | 2820 | if (hwstatus & (IS_IRQ_MST_ERR|IS_IRQ_STAT)) { |
2832 | printk(KERN_ERR PFX "hardware error detected (status 0x%x)\n", | 2821 | u16 pci_status, pci_cmd; |
2833 | hwstatus); | 2822 | |
2823 | pci_read_config_word(hw->pdev, PCI_COMMAND, &pci_cmd); | ||
2824 | pci_read_config_word(hw->pdev, PCI_STATUS, &pci_status); | ||
2834 | 2825 | ||
2835 | skge_pci_clear(hw); | 2826 | printk(KERN_ERR PFX "%s: PCI error cmd=%#x status=%#x\n", |
2827 | pci_name(hw->pdev), pci_cmd, pci_status); | ||
2828 | |||
2829 | /* Write the error bits back to clear them. */ | ||
2830 | pci_status &= PCI_STATUS_ERROR_BITS; | ||
2831 | skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); | ||
2832 | pci_write_config_word(hw->pdev, PCI_COMMAND, | ||
2833 | pci_cmd | PCI_COMMAND_SERR | PCI_COMMAND_PARITY); | ||
2834 | pci_write_config_word(hw->pdev, PCI_STATUS, pci_status); | ||
2835 | skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); | ||
2836 | 2836 | ||
2837 | /* if error still set then just ignore it */ | 2837 | /* if error still set then just ignore it */ |
2838 | hwstatus = skge_read32(hw, B0_HWE_ISRC); | 2838 | hwstatus = skge_read32(hw, B0_HWE_ISRC); |
2839 | if (hwstatus & IS_IRQ_STAT) { | 2839 | if (hwstatus & IS_IRQ_STAT) { |
2840 | pr_debug("IRQ status %x: still set ignoring hardware errors\n", | 2840 | printk(KERN_INFO PFX "unable to clear error (so ignoring them)\n"); |
2841 | hwstatus); | ||
2842 | hw->intr_mask &= ~IS_HW_ERR; | 2841 | hw->intr_mask &= ~IS_HW_ERR; |
2843 | } | 2842 | } |
2844 | } | 2843 | } |
@@ -2855,12 +2854,11 @@ static void skge_extirq(unsigned long data) | |||
2855 | int port; | 2854 | int port; |
2856 | 2855 | ||
2857 | spin_lock(&hw->phy_lock); | 2856 | spin_lock(&hw->phy_lock); |
2858 | for (port = 0; port < 2; port++) { | 2857 | for (port = 0; port < hw->ports; port++) { |
2859 | struct net_device *dev = hw->dev[port]; | 2858 | struct net_device *dev = hw->dev[port]; |
2859 | struct skge_port *skge = netdev_priv(dev); | ||
2860 | 2860 | ||
2861 | if (dev && netif_running(dev)) { | 2861 | if (netif_running(dev)) { |
2862 | struct skge_port *skge = netdev_priv(dev); | ||
2863 | |||
2864 | if (hw->chip_id != CHIP_ID_GENESIS) | 2862 | if (hw->chip_id != CHIP_ID_GENESIS) |
2865 | yukon_phy_intr(skge); | 2863 | yukon_phy_intr(skge); |
2866 | else | 2864 | else |
@@ -2869,38 +2867,39 @@ static void skge_extirq(unsigned long data) | |||
2869 | } | 2867 | } |
2870 | spin_unlock(&hw->phy_lock); | 2868 | spin_unlock(&hw->phy_lock); |
2871 | 2869 | ||
2872 | spin_lock_irq(&hw->hw_lock); | ||
2873 | hw->intr_mask |= IS_EXT_REG; | 2870 | hw->intr_mask |= IS_EXT_REG; |
2874 | skge_write32(hw, B0_IMSK, hw->intr_mask); | 2871 | skge_write32(hw, B0_IMSK, hw->intr_mask); |
2875 | spin_unlock_irq(&hw->hw_lock); | ||
2876 | } | 2872 | } |
2877 | 2873 | ||
2878 | static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs) | 2874 | static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs) |
2879 | { | 2875 | { |
2880 | struct skge_hw *hw = dev_id; | 2876 | struct skge_hw *hw = dev_id; |
2881 | u32 status = skge_read32(hw, B0_SP_ISRC); | 2877 | u32 status; |
2882 | 2878 | ||
2883 | if (status == 0 || status == ~0) /* hotplug or shared irq */ | 2879 | /* Reading this register masks IRQ */ |
2880 | status = skge_read32(hw, B0_SP_ISRC); | ||
2881 | if (status == 0) | ||
2884 | return IRQ_NONE; | 2882 | return IRQ_NONE; |
2885 | 2883 | ||
2886 | spin_lock(&hw->hw_lock); | 2884 | if (status & IS_EXT_REG) { |
2887 | if (status & IS_R1_F) { | 2885 | hw->intr_mask &= ~IS_EXT_REG; |
2886 | tasklet_schedule(&hw->ext_tasklet); | ||
2887 | } | ||
2888 | |||
2889 | if (status & (IS_R1_F|IS_XA1_F)) { | ||
2888 | skge_write8(hw, Q_ADDR(Q_R1, Q_CSR), CSR_IRQ_CL_F); | 2890 | skge_write8(hw, Q_ADDR(Q_R1, Q_CSR), CSR_IRQ_CL_F); |
2889 | hw->intr_mask &= ~IS_R1_F; | 2891 | hw->intr_mask &= ~(IS_R1_F|IS_XA1_F); |
2890 | netif_rx_schedule(hw->dev[0]); | 2892 | netif_rx_schedule(hw->dev[0]); |
2891 | } | 2893 | } |
2892 | 2894 | ||
2893 | if (status & IS_R2_F) { | 2895 | if (status & (IS_R2_F|IS_XA2_F)) { |
2894 | skge_write8(hw, Q_ADDR(Q_R2, Q_CSR), CSR_IRQ_CL_F); | 2896 | skge_write8(hw, Q_ADDR(Q_R2, Q_CSR), CSR_IRQ_CL_F); |
2895 | hw->intr_mask &= ~IS_R2_F; | 2897 | hw->intr_mask &= ~(IS_R2_F|IS_XA2_F); |
2896 | netif_rx_schedule(hw->dev[1]); | 2898 | netif_rx_schedule(hw->dev[1]); |
2897 | } | 2899 | } |
2898 | 2900 | ||
2899 | if (status & IS_XA1_F) | 2901 | if (likely((status & hw->intr_mask) == 0)) |
2900 | skge_tx_intr(hw->dev[0]); | 2902 | return IRQ_HANDLED; |
2901 | |||
2902 | if (status & IS_XA2_F) | ||
2903 | skge_tx_intr(hw->dev[1]); | ||
2904 | 2903 | ||
2905 | if (status & IS_PA_TO_RX1) { | 2904 | if (status & IS_PA_TO_RX1) { |
2906 | struct skge_port *skge = netdev_priv(hw->dev[0]); | 2905 | struct skge_port *skge = netdev_priv(hw->dev[0]); |
@@ -2929,13 +2928,7 @@ static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs) | |||
2929 | if (status & IS_HW_ERR) | 2928 | if (status & IS_HW_ERR) |
2930 | skge_error_irq(hw); | 2929 | skge_error_irq(hw); |
2931 | 2930 | ||
2932 | if (status & IS_EXT_REG) { | ||
2933 | hw->intr_mask &= ~IS_EXT_REG; | ||
2934 | tasklet_schedule(&hw->ext_tasklet); | ||
2935 | } | ||
2936 | |||
2937 | skge_write32(hw, B0_IMSK, hw->intr_mask); | 2931 | skge_write32(hw, B0_IMSK, hw->intr_mask); |
2938 | spin_unlock(&hw->hw_lock); | ||
2939 | 2932 | ||
2940 | return IRQ_HANDLED; | 2933 | return IRQ_HANDLED; |
2941 | } | 2934 | } |
@@ -3010,7 +3003,7 @@ static const char *skge_board_name(const struct skge_hw *hw) | |||
3010 | static int skge_reset(struct skge_hw *hw) | 3003 | static int skge_reset(struct skge_hw *hw) |
3011 | { | 3004 | { |
3012 | u32 reg; | 3005 | u32 reg; |
3013 | u16 ctst; | 3006 | u16 ctst, pci_status; |
3014 | u8 t8, mac_cfg, pmd_type, phy_type; | 3007 | u8 t8, mac_cfg, pmd_type, phy_type; |
3015 | int i; | 3008 | int i; |
3016 | 3009 | ||
@@ -3021,8 +3014,13 @@ static int skge_reset(struct skge_hw *hw) | |||
3021 | skge_write8(hw, B0_CTST, CS_RST_CLR); | 3014 | skge_write8(hw, B0_CTST, CS_RST_CLR); |
3022 | 3015 | ||
3023 | /* clear PCI errors, if any */ | 3016 | /* clear PCI errors, if any */ |
3024 | skge_pci_clear(hw); | 3017 | skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); |
3018 | skge_write8(hw, B2_TST_CTRL2, 0); | ||
3025 | 3019 | ||
3020 | pci_read_config_word(hw->pdev, PCI_STATUS, &pci_status); | ||
3021 | pci_write_config_word(hw->pdev, PCI_STATUS, | ||
3022 | pci_status | PCI_STATUS_ERROR_BITS); | ||
3023 | skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); | ||
3026 | skge_write8(hw, B0_CTST, CS_MRST_CLR); | 3024 | skge_write8(hw, B0_CTST, CS_MRST_CLR); |
3027 | 3025 | ||
3028 | /* restore CLK_RUN bits (for Yukon-Lite) */ | 3026 | /* restore CLK_RUN bits (for Yukon-Lite) */ |
@@ -3081,7 +3079,10 @@ static int skge_reset(struct skge_hw *hw) | |||
3081 | else | 3079 | else |
3082 | hw->ram_size = t8 * 4096; | 3080 | hw->ram_size = t8 * 4096; |
3083 | 3081 | ||
3084 | hw->intr_mask = IS_HW_ERR | IS_EXT_REG; | 3082 | hw->intr_mask = IS_HW_ERR | IS_EXT_REG | IS_PORT_1; |
3083 | if (hw->ports > 1) | ||
3084 | hw->intr_mask |= IS_PORT_2; | ||
3085 | |||
3085 | if (hw->chip_id == CHIP_ID_GENESIS) | 3086 | if (hw->chip_id == CHIP_ID_GENESIS) |
3086 | genesis_init(hw); | 3087 | genesis_init(hw); |
3087 | else { | 3088 | else { |
@@ -3251,13 +3252,15 @@ static int __devinit skge_probe(struct pci_dev *pdev, | |||
3251 | struct skge_hw *hw; | 3252 | struct skge_hw *hw; |
3252 | int err, using_dac = 0; | 3253 | int err, using_dac = 0; |
3253 | 3254 | ||
3254 | if ((err = pci_enable_device(pdev))) { | 3255 | err = pci_enable_device(pdev); |
3256 | if (err) { | ||
3255 | printk(KERN_ERR PFX "%s cannot enable PCI device\n", | 3257 | printk(KERN_ERR PFX "%s cannot enable PCI device\n", |
3256 | pci_name(pdev)); | 3258 | pci_name(pdev)); |
3257 | goto err_out; | 3259 | goto err_out; |
3258 | } | 3260 | } |
3259 | 3261 | ||
3260 | if ((err = pci_request_regions(pdev, DRV_NAME))) { | 3262 | err = pci_request_regions(pdev, DRV_NAME); |
3263 | if (err) { | ||
3261 | printk(KERN_ERR PFX "%s cannot obtain PCI resources\n", | 3264 | printk(KERN_ERR PFX "%s cannot obtain PCI resources\n", |
3262 | pci_name(pdev)); | 3265 | pci_name(pdev)); |
3263 | goto err_out_disable_pdev; | 3266 | goto err_out_disable_pdev; |
@@ -3265,22 +3268,18 @@ static int __devinit skge_probe(struct pci_dev *pdev, | |||
3265 | 3268 | ||
3266 | pci_set_master(pdev); | 3269 | pci_set_master(pdev); |
3267 | 3270 | ||
3268 | if (sizeof(dma_addr_t) > sizeof(u32) && | 3271 | if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) { |
3269 | !(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK))) { | ||
3270 | using_dac = 1; | 3272 | using_dac = 1; |
3271 | err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); | 3273 | err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); |
3272 | if (err < 0) { | 3274 | } else if (!(err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) { |
3273 | printk(KERN_ERR PFX "%s unable to obtain 64 bit DMA " | 3275 | using_dac = 0; |
3274 | "for consistent allocations\n", pci_name(pdev)); | 3276 | err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); |
3275 | goto err_out_free_regions; | 3277 | } |
3276 | } | 3278 | |
3277 | } else { | 3279 | if (err) { |
3278 | err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); | 3280 | printk(KERN_ERR PFX "%s no usable DMA configuration\n", |
3279 | if (err) { | 3281 | pci_name(pdev)); |
3280 | printk(KERN_ERR PFX "%s no usable DMA configuration\n", | 3282 | goto err_out_free_regions; |
3281 | pci_name(pdev)); | ||
3282 | goto err_out_free_regions; | ||
3283 | } | ||
3284 | } | 3283 | } |
3285 | 3284 | ||
3286 | #ifdef __BIG_ENDIAN | 3285 | #ifdef __BIG_ENDIAN |
@@ -3304,7 +3303,6 @@ static int __devinit skge_probe(struct pci_dev *pdev, | |||
3304 | 3303 | ||
3305 | hw->pdev = pdev; | 3304 | hw->pdev = pdev; |
3306 | spin_lock_init(&hw->phy_lock); | 3305 | spin_lock_init(&hw->phy_lock); |
3307 | spin_lock_init(&hw->hw_lock); | ||
3308 | tasklet_init(&hw->ext_tasklet, skge_extirq, (unsigned long) hw); | 3306 | tasklet_init(&hw->ext_tasklet, skge_extirq, (unsigned long) hw); |
3309 | 3307 | ||
3310 | hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000); | 3308 | hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000); |
@@ -3314,7 +3312,8 @@ static int __devinit skge_probe(struct pci_dev *pdev, | |||
3314 | goto err_out_free_hw; | 3312 | goto err_out_free_hw; |
3315 | } | 3313 | } |
3316 | 3314 | ||
3317 | if ((err = request_irq(pdev->irq, skge_intr, SA_SHIRQ, DRV_NAME, hw))) { | 3315 | err = request_irq(pdev->irq, skge_intr, SA_SHIRQ, DRV_NAME, hw); |
3316 | if (err) { | ||
3318 | printk(KERN_ERR PFX "%s: cannot assign irq %d\n", | 3317 | printk(KERN_ERR PFX "%s: cannot assign irq %d\n", |
3319 | pci_name(pdev), pdev->irq); | 3318 | pci_name(pdev), pdev->irq); |
3320 | goto err_out_iounmap; | 3319 | goto err_out_iounmap; |
@@ -3332,7 +3331,8 @@ static int __devinit skge_probe(struct pci_dev *pdev, | |||
3332 | if ((dev = skge_devinit(hw, 0, using_dac)) == NULL) | 3331 | if ((dev = skge_devinit(hw, 0, using_dac)) == NULL) |
3333 | goto err_out_led_off; | 3332 | goto err_out_led_off; |
3334 | 3333 | ||
3335 | if ((err = register_netdev(dev))) { | 3334 | err = register_netdev(dev); |
3335 | if (err) { | ||
3336 | printk(KERN_ERR PFX "%s: cannot register net device\n", | 3336 | printk(KERN_ERR PFX "%s: cannot register net device\n", |
3337 | pci_name(pdev)); | 3337 | pci_name(pdev)); |
3338 | goto err_out_free_netdev; | 3338 | goto err_out_free_netdev; |
@@ -3387,7 +3387,6 @@ static void __devexit skge_remove(struct pci_dev *pdev) | |||
3387 | 3387 | ||
3388 | skge_write32(hw, B0_IMSK, 0); | 3388 | skge_write32(hw, B0_IMSK, 0); |
3389 | skge_write16(hw, B0_LED, LED_STAT_OFF); | 3389 | skge_write16(hw, B0_LED, LED_STAT_OFF); |
3390 | skge_pci_clear(hw); | ||
3391 | skge_write8(hw, B0_CTST, CS_RST_SET); | 3390 | skge_write8(hw, B0_CTST, CS_RST_SET); |
3392 | 3391 | ||
3393 | tasklet_kill(&hw->ext_tasklet); | 3392 | tasklet_kill(&hw->ext_tasklet); |
diff --git a/drivers/net/skge.h b/drivers/net/skge.h index 941f12a333b6..2efdacc290e5 100644 --- a/drivers/net/skge.h +++ b/drivers/net/skge.h | |||
@@ -2402,7 +2402,6 @@ struct skge_hw { | |||
2402 | 2402 | ||
2403 | struct tasklet_struct ext_tasklet; | 2403 | struct tasklet_struct ext_tasklet; |
2404 | spinlock_t phy_lock; | 2404 | spinlock_t phy_lock; |
2405 | spinlock_t hw_lock; | ||
2406 | }; | 2405 | }; |
2407 | 2406 | ||
2408 | enum { | 2407 | enum { |
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c index 73260364cba3..f08fe6c884b2 100644 --- a/drivers/net/sky2.c +++ b/drivers/net/sky2.c | |||
@@ -51,7 +51,7 @@ | |||
51 | #include "sky2.h" | 51 | #include "sky2.h" |
52 | 52 | ||
53 | #define DRV_NAME "sky2" | 53 | #define DRV_NAME "sky2" |
54 | #define DRV_VERSION "0.15" | 54 | #define DRV_VERSION "1.1" |
55 | #define PFX DRV_NAME " " | 55 | #define PFX DRV_NAME " " |
56 | 56 | ||
57 | /* | 57 | /* |
@@ -61,10 +61,6 @@ | |||
61 | * a receive requires one (or two if using 64 bit dma). | 61 | * a receive requires one (or two if using 64 bit dma). |
62 | */ | 62 | */ |
63 | 63 | ||
64 | #define is_ec_a1(hw) \ | ||
65 | unlikely((hw)->chip_id == CHIP_ID_YUKON_EC && \ | ||
66 | (hw)->chip_rev == CHIP_REV_YU_EC_A1) | ||
67 | |||
68 | #define RX_LE_SIZE 512 | 64 | #define RX_LE_SIZE 512 |
69 | #define RX_LE_BYTES (RX_LE_SIZE*sizeof(struct sky2_rx_le)) | 65 | #define RX_LE_BYTES (RX_LE_SIZE*sizeof(struct sky2_rx_le)) |
70 | #define RX_MAX_PENDING (RX_LE_SIZE/2 - 2) | 66 | #define RX_MAX_PENDING (RX_LE_SIZE/2 - 2) |
@@ -96,6 +92,10 @@ static int copybreak __read_mostly = 256; | |||
96 | module_param(copybreak, int, 0); | 92 | module_param(copybreak, int, 0); |
97 | MODULE_PARM_DESC(copybreak, "Receive copy threshold"); | 93 | MODULE_PARM_DESC(copybreak, "Receive copy threshold"); |
98 | 94 | ||
95 | static int disable_msi = 0; | ||
96 | module_param(disable_msi, int, 0); | ||
97 | MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)"); | ||
98 | |||
99 | static const struct pci_device_id sky2_id_table[] = { | 99 | static const struct pci_device_id sky2_id_table[] = { |
100 | { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9000) }, | 100 | { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9000) }, |
101 | { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) }, | 101 | { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) }, |
@@ -504,9 +504,9 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port) | |||
504 | /* Force a renegotiation */ | 504 | /* Force a renegotiation */ |
505 | static void sky2_phy_reinit(struct sky2_port *sky2) | 505 | static void sky2_phy_reinit(struct sky2_port *sky2) |
506 | { | 506 | { |
507 | down(&sky2->phy_sema); | 507 | spin_lock_bh(&sky2->phy_lock); |
508 | sky2_phy_init(sky2->hw, sky2->port); | 508 | sky2_phy_init(sky2->hw, sky2->port); |
509 | up(&sky2->phy_sema); | 509 | spin_unlock_bh(&sky2->phy_lock); |
510 | } | 510 | } |
511 | 511 | ||
512 | static void sky2_mac_init(struct sky2_hw *hw, unsigned port) | 512 | static void sky2_mac_init(struct sky2_hw *hw, unsigned port) |
@@ -571,9 +571,9 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port) | |||
571 | 571 | ||
572 | sky2_read16(hw, SK_REG(port, GMAC_IRQ_SRC)); | 572 | sky2_read16(hw, SK_REG(port, GMAC_IRQ_SRC)); |
573 | 573 | ||
574 | down(&sky2->phy_sema); | 574 | spin_lock_bh(&sky2->phy_lock); |
575 | sky2_phy_init(hw, port); | 575 | sky2_phy_init(hw, port); |
576 | up(&sky2->phy_sema); | 576 | spin_unlock_bh(&sky2->phy_lock); |
577 | 577 | ||
578 | /* MIB clear */ | 578 | /* MIB clear */ |
579 | reg = gma_read16(hw, port, GM_PHY_ADDR); | 579 | reg = gma_read16(hw, port, GM_PHY_ADDR); |
@@ -725,37 +725,11 @@ static inline struct sky2_tx_le *get_tx_le(struct sky2_port *sky2) | |||
725 | return le; | 725 | return le; |
726 | } | 726 | } |
727 | 727 | ||
728 | /* | 728 | /* Update chip's next pointer */ |
729 | * This is a workaround code taken from SysKonnect sk98lin driver | 729 | static inline void sky2_put_idx(struct sky2_hw *hw, unsigned q, u16 idx) |
730 | * to deal with chip bug on Yukon EC rev 0 in the wraparound case. | ||
731 | */ | ||
732 | static void sky2_put_idx(struct sky2_hw *hw, unsigned q, | ||
733 | u16 idx, u16 *last, u16 size) | ||
734 | { | 730 | { |
735 | wmb(); | 731 | wmb(); |
736 | if (is_ec_a1(hw) && idx < *last) { | 732 | sky2_write16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX), idx); |
737 | u16 hwget = sky2_read16(hw, Y2_QADDR(q, PREF_UNIT_GET_IDX)); | ||
738 | |||
739 | if (hwget == 0) { | ||
740 | /* Start prefetching again */ | ||
741 | sky2_write8(hw, Y2_QADDR(q, PREF_UNIT_FIFO_WM), 0xe0); | ||
742 | goto setnew; | ||
743 | } | ||
744 | |||
745 | if (hwget == size - 1) { | ||
746 | /* set watermark to one list element */ | ||
747 | sky2_write8(hw, Y2_QADDR(q, PREF_UNIT_FIFO_WM), 8); | ||
748 | |||
749 | /* set put index to first list element */ | ||
750 | sky2_write16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX), 0); | ||
751 | } else /* have hardware go to end of list */ | ||
752 | sky2_write16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX), | ||
753 | size - 1); | ||
754 | } else { | ||
755 | setnew: | ||
756 | sky2_write16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX), idx); | ||
757 | } | ||
758 | *last = idx; | ||
759 | mmiowb(); | 733 | mmiowb(); |
760 | } | 734 | } |
761 | 735 | ||
@@ -878,7 +852,7 @@ static int sky2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
878 | if (!netif_running(dev)) | 852 | if (!netif_running(dev)) |
879 | return -ENODEV; /* Phy still in reset */ | 853 | return -ENODEV; /* Phy still in reset */ |
880 | 854 | ||
881 | switch(cmd) { | 855 | switch (cmd) { |
882 | case SIOCGMIIPHY: | 856 | case SIOCGMIIPHY: |
883 | data->phy_id = PHY_ADDR_MARV; | 857 | data->phy_id = PHY_ADDR_MARV; |
884 | 858 | ||
@@ -886,9 +860,9 @@ static int sky2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
886 | case SIOCGMIIREG: { | 860 | case SIOCGMIIREG: { |
887 | u16 val = 0; | 861 | u16 val = 0; |
888 | 862 | ||
889 | down(&sky2->phy_sema); | 863 | spin_lock_bh(&sky2->phy_lock); |
890 | err = __gm_phy_read(hw, sky2->port, data->reg_num & 0x1f, &val); | 864 | err = __gm_phy_read(hw, sky2->port, data->reg_num & 0x1f, &val); |
891 | up(&sky2->phy_sema); | 865 | spin_unlock_bh(&sky2->phy_lock); |
892 | 866 | ||
893 | data->val_out = val; | 867 | data->val_out = val; |
894 | break; | 868 | break; |
@@ -898,10 +872,10 @@ static int sky2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
898 | if (!capable(CAP_NET_ADMIN)) | 872 | if (!capable(CAP_NET_ADMIN)) |
899 | return -EPERM; | 873 | return -EPERM; |
900 | 874 | ||
901 | down(&sky2->phy_sema); | 875 | spin_lock_bh(&sky2->phy_lock); |
902 | err = gm_phy_write(hw, sky2->port, data->reg_num & 0x1f, | 876 | err = gm_phy_write(hw, sky2->port, data->reg_num & 0x1f, |
903 | data->val_in); | 877 | data->val_in); |
904 | up(&sky2->phy_sema); | 878 | spin_unlock_bh(&sky2->phy_lock); |
905 | break; | 879 | break; |
906 | } | 880 | } |
907 | return err; | 881 | return err; |
@@ -1001,7 +975,6 @@ static int sky2_rx_start(struct sky2_port *sky2) | |||
1001 | 975 | ||
1002 | /* Tell chip about available buffers */ | 976 | /* Tell chip about available buffers */ |
1003 | sky2_write16(hw, Y2_QADDR(rxq, PREF_UNIT_PUT_IDX), sky2->rx_put); | 977 | sky2_write16(hw, Y2_QADDR(rxq, PREF_UNIT_PUT_IDX), sky2->rx_put); |
1004 | sky2->rx_last_put = sky2_read16(hw, Y2_QADDR(rxq, PREF_UNIT_PUT_IDX)); | ||
1005 | return 0; | 978 | return 0; |
1006 | nomem: | 979 | nomem: |
1007 | sky2_rx_clean(sky2); | 980 | sky2_rx_clean(sky2); |
@@ -1014,7 +987,7 @@ static int sky2_up(struct net_device *dev) | |||
1014 | struct sky2_port *sky2 = netdev_priv(dev); | 987 | struct sky2_port *sky2 = netdev_priv(dev); |
1015 | struct sky2_hw *hw = sky2->hw; | 988 | struct sky2_hw *hw = sky2->hw; |
1016 | unsigned port = sky2->port; | 989 | unsigned port = sky2->port; |
1017 | u32 ramsize, rxspace; | 990 | u32 ramsize, rxspace, imask; |
1018 | int err = -ENOMEM; | 991 | int err = -ENOMEM; |
1019 | 992 | ||
1020 | if (netif_msg_ifup(sky2)) | 993 | if (netif_msg_ifup(sky2)) |
@@ -1079,10 +1052,10 @@ static int sky2_up(struct net_device *dev) | |||
1079 | goto err_out; | 1052 | goto err_out; |
1080 | 1053 | ||
1081 | /* Enable interrupts from phy/mac for port */ | 1054 | /* Enable interrupts from phy/mac for port */ |
1082 | spin_lock_irq(&hw->hw_lock); | 1055 | imask = sky2_read32(hw, B0_IMSK); |
1083 | hw->intr_mask |= (port == 0) ? Y2_IS_PORT_1 : Y2_IS_PORT_2; | 1056 | imask |= (port == 0) ? Y2_IS_PORT_1 : Y2_IS_PORT_2; |
1084 | sky2_write32(hw, B0_IMSK, hw->intr_mask); | 1057 | sky2_write32(hw, B0_IMSK, imask); |
1085 | spin_unlock_irq(&hw->hw_lock); | 1058 | |
1086 | return 0; | 1059 | return 0; |
1087 | 1060 | ||
1088 | err_out: | 1061 | err_out: |
@@ -1299,8 +1272,7 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev) | |||
1299 | netif_stop_queue(dev); | 1272 | netif_stop_queue(dev); |
1300 | } | 1273 | } |
1301 | 1274 | ||
1302 | sky2_put_idx(hw, txqaddr[sky2->port], sky2->tx_prod, | 1275 | sky2_put_idx(hw, txqaddr[sky2->port], sky2->tx_prod); |
1303 | &sky2->tx_last_put, TX_RING_SIZE); | ||
1304 | 1276 | ||
1305 | out_unlock: | 1277 | out_unlock: |
1306 | spin_unlock(&sky2->tx_lock); | 1278 | spin_unlock(&sky2->tx_lock); |
@@ -1332,7 +1304,7 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done) | |||
1332 | struct tx_ring_info *re = sky2->tx_ring + put; | 1304 | struct tx_ring_info *re = sky2->tx_ring + put; |
1333 | struct sk_buff *skb = re->skb; | 1305 | struct sk_buff *skb = re->skb; |
1334 | 1306 | ||
1335 | nxt = re->idx; | 1307 | nxt = re->idx; |
1336 | BUG_ON(nxt >= TX_RING_SIZE); | 1308 | BUG_ON(nxt >= TX_RING_SIZE); |
1337 | prefetch(sky2->tx_ring + nxt); | 1309 | prefetch(sky2->tx_ring + nxt); |
1338 | 1310 | ||
@@ -1348,7 +1320,7 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done) | |||
1348 | struct tx_ring_info *fre; | 1320 | struct tx_ring_info *fre; |
1349 | fre = sky2->tx_ring + (put + i + 1) % TX_RING_SIZE; | 1321 | fre = sky2->tx_ring + (put + i + 1) % TX_RING_SIZE; |
1350 | pci_unmap_page(pdev, pci_unmap_addr(fre, mapaddr), | 1322 | pci_unmap_page(pdev, pci_unmap_addr(fre, mapaddr), |
1351 | skb_shinfo(skb)->frags[i].size, | 1323 | skb_shinfo(skb)->frags[i].size, |
1352 | PCI_DMA_TODEVICE); | 1324 | PCI_DMA_TODEVICE); |
1353 | } | 1325 | } |
1354 | 1326 | ||
@@ -1356,7 +1328,7 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done) | |||
1356 | } | 1328 | } |
1357 | 1329 | ||
1358 | sky2->tx_cons = put; | 1330 | sky2->tx_cons = put; |
1359 | if (netif_queue_stopped(dev) && tx_avail(sky2) > MAX_SKB_TX_LE) | 1331 | if (tx_avail(sky2) > MAX_SKB_TX_LE) |
1360 | netif_wake_queue(dev); | 1332 | netif_wake_queue(dev); |
1361 | } | 1333 | } |
1362 | 1334 | ||
@@ -1375,6 +1347,7 @@ static int sky2_down(struct net_device *dev) | |||
1375 | struct sky2_hw *hw = sky2->hw; | 1347 | struct sky2_hw *hw = sky2->hw; |
1376 | unsigned port = sky2->port; | 1348 | unsigned port = sky2->port; |
1377 | u16 ctrl; | 1349 | u16 ctrl; |
1350 | u32 imask; | ||
1378 | 1351 | ||
1379 | /* Never really got started! */ | 1352 | /* Never really got started! */ |
1380 | if (!sky2->tx_le) | 1353 | if (!sky2->tx_le) |
@@ -1386,14 +1359,6 @@ static int sky2_down(struct net_device *dev) | |||
1386 | /* Stop more packets from being queued */ | 1359 | /* Stop more packets from being queued */ |
1387 | netif_stop_queue(dev); | 1360 | netif_stop_queue(dev); |
1388 | 1361 | ||
1389 | /* Disable port IRQ */ | ||
1390 | spin_lock_irq(&hw->hw_lock); | ||
1391 | hw->intr_mask &= ~((sky2->port == 0) ? Y2_IS_IRQ_PHY1 : Y2_IS_IRQ_PHY2); | ||
1392 | sky2_write32(hw, B0_IMSK, hw->intr_mask); | ||
1393 | spin_unlock_irq(&hw->hw_lock); | ||
1394 | |||
1395 | flush_scheduled_work(); | ||
1396 | |||
1397 | sky2_phy_reset(hw, port); | 1362 | sky2_phy_reset(hw, port); |
1398 | 1363 | ||
1399 | /* Stop transmitter */ | 1364 | /* Stop transmitter */ |
@@ -1437,6 +1402,11 @@ static int sky2_down(struct net_device *dev) | |||
1437 | sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET); | 1402 | sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET); |
1438 | sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_SET); | 1403 | sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_SET); |
1439 | 1404 | ||
1405 | /* Disable port IRQ */ | ||
1406 | imask = sky2_read32(hw, B0_IMSK); | ||
1407 | imask &= ~(sky2->port == 0) ? Y2_IS_PORT_1 : Y2_IS_PORT_2; | ||
1408 | sky2_write32(hw, B0_IMSK, imask); | ||
1409 | |||
1440 | /* turn off LED's */ | 1410 | /* turn off LED's */ |
1441 | sky2_write16(hw, B0_Y2LED, LED_STAT_OFF); | 1411 | sky2_write16(hw, B0_Y2LED, LED_STAT_OFF); |
1442 | 1412 | ||
@@ -1631,20 +1601,19 @@ static int sky2_autoneg_done(struct sky2_port *sky2, u16 aux) | |||
1631 | return 0; | 1601 | return 0; |
1632 | } | 1602 | } |
1633 | 1603 | ||
1634 | /* | 1604 | /* Interrupt from PHY */ |
1635 | * Interrupt from PHY are handled outside of interrupt context | 1605 | static void sky2_phy_intr(struct sky2_hw *hw, unsigned port) |
1636 | * because accessing phy registers requires spin wait which might | ||
1637 | * cause excess interrupt latency. | ||
1638 | */ | ||
1639 | static void sky2_phy_task(void *arg) | ||
1640 | { | 1606 | { |
1641 | struct sky2_port *sky2 = arg; | 1607 | struct net_device *dev = hw->dev[port]; |
1642 | struct sky2_hw *hw = sky2->hw; | 1608 | struct sky2_port *sky2 = netdev_priv(dev); |
1643 | u16 istatus, phystat; | 1609 | u16 istatus, phystat; |
1644 | 1610 | ||
1645 | down(&sky2->phy_sema); | 1611 | spin_lock(&sky2->phy_lock); |
1646 | istatus = gm_phy_read(hw, sky2->port, PHY_MARV_INT_STAT); | 1612 | istatus = gm_phy_read(hw, port, PHY_MARV_INT_STAT); |
1647 | phystat = gm_phy_read(hw, sky2->port, PHY_MARV_PHY_STAT); | 1613 | phystat = gm_phy_read(hw, port, PHY_MARV_PHY_STAT); |
1614 | |||
1615 | if (!netif_running(dev)) | ||
1616 | goto out; | ||
1648 | 1617 | ||
1649 | if (netif_msg_intr(sky2)) | 1618 | if (netif_msg_intr(sky2)) |
1650 | printk(KERN_INFO PFX "%s: phy interrupt status 0x%x 0x%x\n", | 1619 | printk(KERN_INFO PFX "%s: phy interrupt status 0x%x 0x%x\n", |
@@ -1670,12 +1639,7 @@ static void sky2_phy_task(void *arg) | |||
1670 | sky2_link_down(sky2); | 1639 | sky2_link_down(sky2); |
1671 | } | 1640 | } |
1672 | out: | 1641 | out: |
1673 | up(&sky2->phy_sema); | 1642 | spin_unlock(&sky2->phy_lock); |
1674 | |||
1675 | spin_lock_irq(&hw->hw_lock); | ||
1676 | hw->intr_mask |= (sky2->port == 0) ? Y2_IS_IRQ_PHY1 : Y2_IS_IRQ_PHY2; | ||
1677 | sky2_write32(hw, B0_IMSK, hw->intr_mask); | ||
1678 | spin_unlock_irq(&hw->hw_lock); | ||
1679 | } | 1643 | } |
1680 | 1644 | ||
1681 | 1645 | ||
@@ -1687,31 +1651,40 @@ static void sky2_tx_timeout(struct net_device *dev) | |||
1687 | struct sky2_port *sky2 = netdev_priv(dev); | 1651 | struct sky2_port *sky2 = netdev_priv(dev); |
1688 | struct sky2_hw *hw = sky2->hw; | 1652 | struct sky2_hw *hw = sky2->hw; |
1689 | unsigned txq = txqaddr[sky2->port]; | 1653 | unsigned txq = txqaddr[sky2->port]; |
1690 | u16 ridx; | 1654 | u16 report, done; |
1691 | |||
1692 | /* Maybe we just missed an status interrupt */ | ||
1693 | spin_lock(&sky2->tx_lock); | ||
1694 | ridx = sky2_read16(hw, | ||
1695 | sky2->port == 0 ? STAT_TXA1_RIDX : STAT_TXA2_RIDX); | ||
1696 | sky2_tx_complete(sky2, ridx); | ||
1697 | spin_unlock(&sky2->tx_lock); | ||
1698 | |||
1699 | if (!netif_queue_stopped(dev)) { | ||
1700 | if (net_ratelimit()) | ||
1701 | pr_info(PFX "transmit interrupt missed? recovered\n"); | ||
1702 | return; | ||
1703 | } | ||
1704 | 1655 | ||
1705 | if (netif_msg_timer(sky2)) | 1656 | if (netif_msg_timer(sky2)) |
1706 | printk(KERN_ERR PFX "%s: tx timeout\n", dev->name); | 1657 | printk(KERN_ERR PFX "%s: tx timeout\n", dev->name); |
1707 | 1658 | ||
1708 | sky2_write32(hw, Q_ADDR(txq, Q_CSR), BMU_STOP); | 1659 | report = sky2_read16(hw, sky2->port == 0 ? STAT_TXA1_RIDX : STAT_TXA2_RIDX); |
1709 | sky2_write32(hw, Y2_QADDR(txq, PREF_UNIT_CTRL), PREF_UNIT_RST_SET); | 1660 | done = sky2_read16(hw, Q_ADDR(txq, Q_DONE)); |
1710 | 1661 | ||
1711 | sky2_tx_clean(sky2); | 1662 | printk(KERN_DEBUG PFX "%s: transmit ring %u .. %u report=%u done=%u\n", |
1663 | dev->name, | ||
1664 | sky2->tx_cons, sky2->tx_prod, report, done); | ||
1712 | 1665 | ||
1713 | sky2_qset(hw, txq); | 1666 | if (report != done) { |
1714 | sky2_prefetch_init(hw, txq, sky2->tx_le_map, TX_RING_SIZE - 1); | 1667 | printk(KERN_INFO PFX "status burst pending (irq moderation?)\n"); |
1668 | |||
1669 | sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_STOP); | ||
1670 | sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START); | ||
1671 | } else if (report != sky2->tx_cons) { | ||
1672 | printk(KERN_INFO PFX "status report lost?\n"); | ||
1673 | |||
1674 | spin_lock_bh(&sky2->tx_lock); | ||
1675 | sky2_tx_complete(sky2, report); | ||
1676 | spin_unlock_bh(&sky2->tx_lock); | ||
1677 | } else { | ||
1678 | printk(KERN_INFO PFX "hardware hung? flushing\n"); | ||
1679 | |||
1680 | sky2_write32(hw, Q_ADDR(txq, Q_CSR), BMU_STOP); | ||
1681 | sky2_write32(hw, Y2_QADDR(txq, PREF_UNIT_CTRL), PREF_UNIT_RST_SET); | ||
1682 | |||
1683 | sky2_tx_clean(sky2); | ||
1684 | |||
1685 | sky2_qset(hw, txq); | ||
1686 | sky2_prefetch_init(hw, txq, sky2->tx_le_map, TX_RING_SIZE - 1); | ||
1687 | } | ||
1715 | } | 1688 | } |
1716 | 1689 | ||
1717 | 1690 | ||
@@ -1730,6 +1703,7 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu) | |||
1730 | struct sky2_hw *hw = sky2->hw; | 1703 | struct sky2_hw *hw = sky2->hw; |
1731 | int err; | 1704 | int err; |
1732 | u16 ctl, mode; | 1705 | u16 ctl, mode; |
1706 | u32 imask; | ||
1733 | 1707 | ||
1734 | if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU) | 1708 | if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU) |
1735 | return -EINVAL; | 1709 | return -EINVAL; |
@@ -1742,12 +1716,15 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu) | |||
1742 | return 0; | 1716 | return 0; |
1743 | } | 1717 | } |
1744 | 1718 | ||
1719 | imask = sky2_read32(hw, B0_IMSK); | ||
1745 | sky2_write32(hw, B0_IMSK, 0); | 1720 | sky2_write32(hw, B0_IMSK, 0); |
1746 | 1721 | ||
1747 | dev->trans_start = jiffies; /* prevent tx timeout */ | 1722 | dev->trans_start = jiffies; /* prevent tx timeout */ |
1748 | netif_stop_queue(dev); | 1723 | netif_stop_queue(dev); |
1749 | netif_poll_disable(hw->dev[0]); | 1724 | netif_poll_disable(hw->dev[0]); |
1750 | 1725 | ||
1726 | synchronize_irq(hw->pdev->irq); | ||
1727 | |||
1751 | ctl = gma_read16(hw, sky2->port, GM_GP_CTRL); | 1728 | ctl = gma_read16(hw, sky2->port, GM_GP_CTRL); |
1752 | gma_write16(hw, sky2->port, GM_GP_CTRL, ctl & ~GM_GPCR_RX_ENA); | 1729 | gma_write16(hw, sky2->port, GM_GP_CTRL, ctl & ~GM_GPCR_RX_ENA); |
1753 | sky2_rx_stop(sky2); | 1730 | sky2_rx_stop(sky2); |
@@ -1766,7 +1743,7 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu) | |||
1766 | sky2_write8(hw, RB_ADDR(rxqaddr[sky2->port], RB_CTRL), RB_ENA_OP_MD); | 1743 | sky2_write8(hw, RB_ADDR(rxqaddr[sky2->port], RB_CTRL), RB_ENA_OP_MD); |
1767 | 1744 | ||
1768 | err = sky2_rx_start(sky2); | 1745 | err = sky2_rx_start(sky2); |
1769 | sky2_write32(hw, B0_IMSK, hw->intr_mask); | 1746 | sky2_write32(hw, B0_IMSK, imask); |
1770 | 1747 | ||
1771 | if (err) | 1748 | if (err) |
1772 | dev_close(dev); | 1749 | dev_close(dev); |
@@ -1843,8 +1820,7 @@ resubmit: | |||
1843 | sky2_rx_add(sky2, re->mapaddr); | 1820 | sky2_rx_add(sky2, re->mapaddr); |
1844 | 1821 | ||
1845 | /* Tell receiver about new buffers. */ | 1822 | /* Tell receiver about new buffers. */ |
1846 | sky2_put_idx(sky2->hw, rxqaddr[sky2->port], sky2->rx_put, | 1823 | sky2_put_idx(sky2->hw, rxqaddr[sky2->port], sky2->rx_put); |
1847 | &sky2->rx_last_put, RX_LE_SIZE); | ||
1848 | 1824 | ||
1849 | return skb; | 1825 | return skb; |
1850 | 1826 | ||
@@ -1871,76 +1847,51 @@ error: | |||
1871 | goto resubmit; | 1847 | goto resubmit; |
1872 | } | 1848 | } |
1873 | 1849 | ||
1874 | /* | 1850 | /* Transmit complete */ |
1875 | * Check for transmit complete | 1851 | static inline void sky2_tx_done(struct net_device *dev, u16 last) |
1876 | */ | ||
1877 | #define TX_NO_STATUS 0xffff | ||
1878 | |||
1879 | static void sky2_tx_check(struct sky2_hw *hw, int port, u16 last) | ||
1880 | { | 1852 | { |
1881 | if (last != TX_NO_STATUS) { | 1853 | struct sky2_port *sky2 = netdev_priv(dev); |
1882 | struct net_device *dev = hw->dev[port]; | ||
1883 | if (dev && netif_running(dev)) { | ||
1884 | struct sky2_port *sky2 = netdev_priv(dev); | ||
1885 | 1854 | ||
1886 | spin_lock(&sky2->tx_lock); | 1855 | if (netif_running(dev)) { |
1887 | sky2_tx_complete(sky2, last); | 1856 | spin_lock(&sky2->tx_lock); |
1888 | spin_unlock(&sky2->tx_lock); | 1857 | sky2_tx_complete(sky2, last); |
1889 | } | 1858 | spin_unlock(&sky2->tx_lock); |
1890 | } | 1859 | } |
1891 | } | 1860 | } |
1892 | 1861 | ||
1893 | /* | 1862 | /* Process status response ring */ |
1894 | * Both ports share the same status interrupt, therefore there is only | 1863 | static int sky2_status_intr(struct sky2_hw *hw, int to_do) |
1895 | * one poll routine. | ||
1896 | */ | ||
1897 | static int sky2_poll(struct net_device *dev0, int *budget) | ||
1898 | { | 1864 | { |
1899 | struct sky2_hw *hw = ((struct sky2_port *) netdev_priv(dev0))->hw; | 1865 | int work_done = 0; |
1900 | unsigned int to_do = min(dev0->quota, *budget); | ||
1901 | unsigned int work_done = 0; | ||
1902 | u16 hwidx; | ||
1903 | u16 tx_done[2] = { TX_NO_STATUS, TX_NO_STATUS }; | ||
1904 | |||
1905 | sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ); | ||
1906 | |||
1907 | /* | ||
1908 | * Kick the STAT_LEV_TIMER_CTRL timer. | ||
1909 | * This fixes my hangs on Yukon-EC (0xb6) rev 1. | ||
1910 | * The if clause is there to start the timer only if it has been | ||
1911 | * configured correctly and not been disabled via ethtool. | ||
1912 | */ | ||
1913 | if (sky2_read8(hw, STAT_LEV_TIMER_CTRL) == TIM_START) { | ||
1914 | sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_STOP); | ||
1915 | sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_START); | ||
1916 | } | ||
1917 | 1866 | ||
1918 | hwidx = sky2_read16(hw, STAT_PUT_IDX); | ||
1919 | BUG_ON(hwidx >= STATUS_RING_SIZE); | ||
1920 | rmb(); | 1867 | rmb(); |
1921 | 1868 | ||
1922 | while (hwidx != hw->st_idx) { | 1869 | for(;;) { |
1923 | struct sky2_status_le *le = hw->st_le + hw->st_idx; | 1870 | struct sky2_status_le *le = hw->st_le + hw->st_idx; |
1924 | struct net_device *dev; | 1871 | struct net_device *dev; |
1925 | struct sky2_port *sky2; | 1872 | struct sky2_port *sky2; |
1926 | struct sk_buff *skb; | 1873 | struct sk_buff *skb; |
1927 | u32 status; | 1874 | u32 status; |
1928 | u16 length; | 1875 | u16 length; |
1876 | u8 link, opcode; | ||
1877 | |||
1878 | opcode = le->opcode; | ||
1879 | if (!opcode) | ||
1880 | break; | ||
1881 | opcode &= ~HW_OWNER; | ||
1929 | 1882 | ||
1930 | le = hw->st_le + hw->st_idx; | ||
1931 | hw->st_idx = (hw->st_idx + 1) % STATUS_RING_SIZE; | 1883 | hw->st_idx = (hw->st_idx + 1) % STATUS_RING_SIZE; |
1932 | prefetch(hw->st_le + hw->st_idx); | 1884 | le->opcode = 0; |
1933 | 1885 | ||
1934 | BUG_ON(le->link >= 2); | 1886 | link = le->link; |
1935 | dev = hw->dev[le->link]; | 1887 | BUG_ON(link >= 2); |
1936 | if (dev == NULL || !netif_running(dev)) | 1888 | dev = hw->dev[link]; |
1937 | continue; | ||
1938 | 1889 | ||
1939 | sky2 = netdev_priv(dev); | 1890 | sky2 = netdev_priv(dev); |
1940 | status = le32_to_cpu(le->status); | 1891 | length = le->length; |
1941 | length = le16_to_cpu(le->length); | 1892 | status = le->status; |
1942 | 1893 | ||
1943 | switch (le->opcode & ~HW_OWNER) { | 1894 | switch (opcode) { |
1944 | case OP_RXSTAT: | 1895 | case OP_RXSTAT: |
1945 | skb = sky2_receive(sky2, length, status); | 1896 | skb = sky2_receive(sky2, length, status); |
1946 | if (!skb) | 1897 | if (!skb) |
@@ -1980,42 +1931,23 @@ static int sky2_poll(struct net_device *dev0, int *budget) | |||
1980 | 1931 | ||
1981 | case OP_TXINDEXLE: | 1932 | case OP_TXINDEXLE: |
1982 | /* TX index reports status for both ports */ | 1933 | /* TX index reports status for both ports */ |
1983 | tx_done[0] = status & 0xffff; | 1934 | sky2_tx_done(hw->dev[0], status & 0xffff); |
1984 | tx_done[1] = ((status >> 24) & 0xff) | 1935 | if (hw->dev[1]) |
1985 | | (u16)(length & 0xf) << 8; | 1936 | sky2_tx_done(hw->dev[1], |
1937 | ((status >> 24) & 0xff) | ||
1938 | | (u16)(length & 0xf) << 8); | ||
1986 | break; | 1939 | break; |
1987 | 1940 | ||
1988 | default: | 1941 | default: |
1989 | if (net_ratelimit()) | 1942 | if (net_ratelimit()) |
1990 | printk(KERN_WARNING PFX | 1943 | printk(KERN_WARNING PFX |
1991 | "unknown status opcode 0x%x\n", le->opcode); | 1944 | "unknown status opcode 0x%x\n", opcode); |
1992 | break; | 1945 | break; |
1993 | } | 1946 | } |
1994 | } | 1947 | } |
1995 | 1948 | ||
1996 | exit_loop: | 1949 | exit_loop: |
1997 | sky2_tx_check(hw, 0, tx_done[0]); | 1950 | return work_done; |
1998 | sky2_tx_check(hw, 1, tx_done[1]); | ||
1999 | |||
2000 | if (sky2_read8(hw, STAT_TX_TIMER_CTRL) == TIM_START) { | ||
2001 | sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_STOP); | ||
2002 | sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START); | ||
2003 | } | ||
2004 | |||
2005 | if (likely(work_done < to_do)) { | ||
2006 | spin_lock_irq(&hw->hw_lock); | ||
2007 | __netif_rx_complete(dev0); | ||
2008 | |||
2009 | hw->intr_mask |= Y2_IS_STAT_BMU; | ||
2010 | sky2_write32(hw, B0_IMSK, hw->intr_mask); | ||
2011 | spin_unlock_irq(&hw->hw_lock); | ||
2012 | |||
2013 | return 0; | ||
2014 | } else { | ||
2015 | *budget -= work_done; | ||
2016 | dev0->quota -= work_done; | ||
2017 | return 1; | ||
2018 | } | ||
2019 | } | 1951 | } |
2020 | 1952 | ||
2021 | static void sky2_hw_error(struct sky2_hw *hw, unsigned port, u32 status) | 1953 | static void sky2_hw_error(struct sky2_hw *hw, unsigned port, u32 status) |
@@ -2134,57 +2066,97 @@ static void sky2_mac_intr(struct sky2_hw *hw, unsigned port) | |||
2134 | } | 2066 | } |
2135 | } | 2067 | } |
2136 | 2068 | ||
2137 | static void sky2_phy_intr(struct sky2_hw *hw, unsigned port) | 2069 | /* This should never happen it is a fatal situation */ |
2070 | static void sky2_descriptor_error(struct sky2_hw *hw, unsigned port, | ||
2071 | const char *rxtx, u32 mask) | ||
2138 | { | 2072 | { |
2139 | struct net_device *dev = hw->dev[port]; | 2073 | struct net_device *dev = hw->dev[port]; |
2140 | struct sky2_port *sky2 = netdev_priv(dev); | 2074 | struct sky2_port *sky2 = netdev_priv(dev); |
2075 | u32 imask; | ||
2076 | |||
2077 | printk(KERN_ERR PFX "%s: %s descriptor error (hardware problem)\n", | ||
2078 | dev ? dev->name : "<not registered>", rxtx); | ||
2141 | 2079 | ||
2142 | hw->intr_mask &= ~(port == 0 ? Y2_IS_IRQ_PHY1 : Y2_IS_IRQ_PHY2); | 2080 | imask = sky2_read32(hw, B0_IMSK); |
2143 | sky2_write32(hw, B0_IMSK, hw->intr_mask); | 2081 | imask &= ~mask; |
2082 | sky2_write32(hw, B0_IMSK, imask); | ||
2144 | 2083 | ||
2145 | schedule_work(&sky2->phy_task); | 2084 | if (dev) { |
2085 | spin_lock(&sky2->phy_lock); | ||
2086 | sky2_link_down(sky2); | ||
2087 | spin_unlock(&sky2->phy_lock); | ||
2088 | } | ||
2146 | } | 2089 | } |
2147 | 2090 | ||
2148 | static irqreturn_t sky2_intr(int irq, void *dev_id, struct pt_regs *regs) | 2091 | static int sky2_poll(struct net_device *dev0, int *budget) |
2149 | { | 2092 | { |
2150 | struct sky2_hw *hw = dev_id; | 2093 | struct sky2_hw *hw = ((struct sky2_port *) netdev_priv(dev0))->hw; |
2151 | struct net_device *dev0 = hw->dev[0]; | 2094 | int work_limit = min(dev0->quota, *budget); |
2152 | u32 status; | 2095 | int work_done = 0; |
2096 | u32 status = sky2_read32(hw, B0_Y2_SP_EISR); | ||
2153 | 2097 | ||
2154 | status = sky2_read32(hw, B0_Y2_SP_ISRC2); | 2098 | if (unlikely(status & ~Y2_IS_STAT_BMU)) { |
2155 | if (status == 0 || status == ~0) | 2099 | if (status & Y2_IS_HW_ERR) |
2156 | return IRQ_NONE; | 2100 | sky2_hw_intr(hw); |
2157 | 2101 | ||
2158 | spin_lock(&hw->hw_lock); | 2102 | if (status & Y2_IS_IRQ_PHY1) |
2159 | if (status & Y2_IS_HW_ERR) | 2103 | sky2_phy_intr(hw, 0); |
2160 | sky2_hw_intr(hw); | ||
2161 | 2104 | ||
2162 | /* Do NAPI for Rx and Tx status */ | 2105 | if (status & Y2_IS_IRQ_PHY2) |
2163 | if (status & Y2_IS_STAT_BMU) { | 2106 | sky2_phy_intr(hw, 1); |
2164 | hw->intr_mask &= ~Y2_IS_STAT_BMU; | ||
2165 | sky2_write32(hw, B0_IMSK, hw->intr_mask); | ||
2166 | 2107 | ||
2167 | if (likely(__netif_rx_schedule_prep(dev0))) { | 2108 | if (status & Y2_IS_IRQ_MAC1) |
2168 | prefetch(&hw->st_le[hw->st_idx]); | 2109 | sky2_mac_intr(hw, 0); |
2169 | __netif_rx_schedule(dev0); | 2110 | |
2170 | } | 2111 | if (status & Y2_IS_IRQ_MAC2) |
2112 | sky2_mac_intr(hw, 1); | ||
2113 | |||
2114 | if (status & Y2_IS_CHK_RX1) | ||
2115 | sky2_descriptor_error(hw, 0, "receive", Y2_IS_CHK_RX1); | ||
2116 | |||
2117 | if (status & Y2_IS_CHK_RX2) | ||
2118 | sky2_descriptor_error(hw, 1, "receive", Y2_IS_CHK_RX2); | ||
2119 | |||
2120 | if (status & Y2_IS_CHK_TXA1) | ||
2121 | sky2_descriptor_error(hw, 0, "transmit", Y2_IS_CHK_TXA1); | ||
2122 | |||
2123 | if (status & Y2_IS_CHK_TXA2) | ||
2124 | sky2_descriptor_error(hw, 1, "transmit", Y2_IS_CHK_TXA2); | ||
2171 | } | 2125 | } |
2172 | 2126 | ||
2173 | if (status & Y2_IS_IRQ_PHY1) | 2127 | if (status & Y2_IS_STAT_BMU) { |
2174 | sky2_phy_intr(hw, 0); | 2128 | work_done = sky2_status_intr(hw, work_limit); |
2129 | *budget -= work_done; | ||
2130 | dev0->quota -= work_done; | ||
2131 | |||
2132 | if (work_done >= work_limit) | ||
2133 | return 1; | ||
2175 | 2134 | ||
2176 | if (status & Y2_IS_IRQ_PHY2) | 2135 | sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ); |
2177 | sky2_phy_intr(hw, 1); | 2136 | } |
2178 | 2137 | ||
2179 | if (status & Y2_IS_IRQ_MAC1) | 2138 | netif_rx_complete(dev0); |
2180 | sky2_mac_intr(hw, 0); | ||
2181 | 2139 | ||
2182 | if (status & Y2_IS_IRQ_MAC2) | 2140 | status = sky2_read32(hw, B0_Y2_SP_LISR); |
2183 | sky2_mac_intr(hw, 1); | 2141 | return 0; |
2142 | } | ||
2184 | 2143 | ||
2185 | sky2_write32(hw, B0_Y2_SP_ICR, 2); | 2144 | static irqreturn_t sky2_intr(int irq, void *dev_id, struct pt_regs *regs) |
2145 | { | ||
2146 | struct sky2_hw *hw = dev_id; | ||
2147 | struct net_device *dev0 = hw->dev[0]; | ||
2148 | u32 status; | ||
2186 | 2149 | ||
2187 | spin_unlock(&hw->hw_lock); | 2150 | /* Reading this mask interrupts as side effect */ |
2151 | status = sky2_read32(hw, B0_Y2_SP_ISRC2); | ||
2152 | if (status == 0 || status == ~0) | ||
2153 | return IRQ_NONE; | ||
2154 | |||
2155 | prefetch(&hw->st_le[hw->st_idx]); | ||
2156 | if (likely(__netif_rx_schedule_prep(dev0))) | ||
2157 | __netif_rx_schedule(dev0); | ||
2158 | else | ||
2159 | printk(KERN_DEBUG PFX "irq race detected\n"); | ||
2188 | 2160 | ||
2189 | return IRQ_HANDLED; | 2161 | return IRQ_HANDLED; |
2190 | } | 2162 | } |
@@ -2238,6 +2210,23 @@ static int sky2_reset(struct sky2_hw *hw) | |||
2238 | return -EOPNOTSUPP; | 2210 | return -EOPNOTSUPP; |
2239 | } | 2211 | } |
2240 | 2212 | ||
2213 | hw->chip_rev = (sky2_read8(hw, B2_MAC_CFG) & CFG_CHIP_R_MSK) >> 4; | ||
2214 | |||
2215 | /* This rev is really old, and requires untested workarounds */ | ||
2216 | if (hw->chip_id == CHIP_ID_YUKON_EC && hw->chip_rev == CHIP_REV_YU_EC_A1) { | ||
2217 | printk(KERN_ERR PFX "%s: unsupported revision Yukon-%s (0x%x) rev %d\n", | ||
2218 | pci_name(hw->pdev), yukon2_name[hw->chip_id - CHIP_ID_YUKON_XL], | ||
2219 | hw->chip_id, hw->chip_rev); | ||
2220 | return -EOPNOTSUPP; | ||
2221 | } | ||
2222 | |||
2223 | /* This chip is new and not tested yet */ | ||
2224 | if (hw->chip_id == CHIP_ID_YUKON_EC_U) { | ||
2225 | pr_info(PFX "%s: is a version of Yukon 2 chipset that has not been tested yet.\n", | ||
2226 | pci_name(hw->pdev)); | ||
2227 | pr_info("Please report success/failure to maintainer <shemminger@osdl.org>\n"); | ||
2228 | } | ||
2229 | |||
2241 | /* disable ASF */ | 2230 | /* disable ASF */ |
2242 | if (hw->chip_id <= CHIP_ID_YUKON_EC) { | 2231 | if (hw->chip_id <= CHIP_ID_YUKON_EC) { |
2243 | sky2_write8(hw, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET); | 2232 | sky2_write8(hw, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET); |
@@ -2258,7 +2247,7 @@ static int sky2_reset(struct sky2_hw *hw) | |||
2258 | sky2_write8(hw, B0_CTST, CS_MRST_CLR); | 2247 | sky2_write8(hw, B0_CTST, CS_MRST_CLR); |
2259 | 2248 | ||
2260 | /* clear any PEX errors */ | 2249 | /* clear any PEX errors */ |
2261 | if (pci_find_capability(hw->pdev, PCI_CAP_ID_EXP)) | 2250 | if (pci_find_capability(hw->pdev, PCI_CAP_ID_EXP)) |
2262 | sky2_pci_write32(hw, PEX_UNC_ERR_STAT, 0xffffffffUL); | 2251 | sky2_pci_write32(hw, PEX_UNC_ERR_STAT, 0xffffffffUL); |
2263 | 2252 | ||
2264 | 2253 | ||
@@ -2271,7 +2260,6 @@ static int sky2_reset(struct sky2_hw *hw) | |||
2271 | if (!(sky2_read8(hw, B2_Y2_CLK_GATE) & Y2_STATUS_LNK2_INAC)) | 2260 | if (!(sky2_read8(hw, B2_Y2_CLK_GATE) & Y2_STATUS_LNK2_INAC)) |
2272 | ++hw->ports; | 2261 | ++hw->ports; |
2273 | } | 2262 | } |
2274 | hw->chip_rev = (sky2_read8(hw, B2_MAC_CFG) & CFG_CHIP_R_MSK) >> 4; | ||
2275 | 2263 | ||
2276 | sky2_set_power_state(hw, PCI_D0); | 2264 | sky2_set_power_state(hw, PCI_D0); |
2277 | 2265 | ||
@@ -2337,30 +2325,18 @@ static int sky2_reset(struct sky2_hw *hw) | |||
2337 | /* Set the list last index */ | 2325 | /* Set the list last index */ |
2338 | sky2_write16(hw, STAT_LAST_IDX, STATUS_RING_SIZE - 1); | 2326 | sky2_write16(hw, STAT_LAST_IDX, STATUS_RING_SIZE - 1); |
2339 | 2327 | ||
2340 | /* These status setup values are copied from SysKonnect's driver */ | 2328 | sky2_write16(hw, STAT_TX_IDX_TH, 10); |
2341 | if (is_ec_a1(hw)) { | 2329 | sky2_write8(hw, STAT_FIFO_WM, 16); |
2342 | /* WA for dev. #4.3 */ | ||
2343 | sky2_write16(hw, STAT_TX_IDX_TH, 0xfff); /* Tx Threshold */ | ||
2344 | |||
2345 | /* set Status-FIFO watermark */ | ||
2346 | sky2_write8(hw, STAT_FIFO_WM, 0x21); /* WA for dev. #4.18 */ | ||
2347 | 2330 | ||
2348 | /* set Status-FIFO ISR watermark */ | 2331 | /* set Status-FIFO ISR watermark */ |
2349 | sky2_write8(hw, STAT_FIFO_ISR_WM, 0x07); /* WA for dev. #4.18 */ | 2332 | if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev == 0) |
2350 | sky2_write32(hw, STAT_TX_TIMER_INI, sky2_us2clk(hw, 10000)); | 2333 | sky2_write8(hw, STAT_FIFO_ISR_WM, 4); |
2351 | } else { | 2334 | else |
2352 | sky2_write16(hw, STAT_TX_IDX_TH, 10); | 2335 | sky2_write8(hw, STAT_FIFO_ISR_WM, 16); |
2353 | sky2_write8(hw, STAT_FIFO_WM, 16); | ||
2354 | |||
2355 | /* set Status-FIFO ISR watermark */ | ||
2356 | if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev == 0) | ||
2357 | sky2_write8(hw, STAT_FIFO_ISR_WM, 4); | ||
2358 | else | ||
2359 | sky2_write8(hw, STAT_FIFO_ISR_WM, 16); | ||
2360 | 2336 | ||
2361 | sky2_write32(hw, STAT_TX_TIMER_INI, sky2_us2clk(hw, 1000)); | 2337 | sky2_write32(hw, STAT_TX_TIMER_INI, sky2_us2clk(hw, 1000)); |
2362 | sky2_write32(hw, STAT_ISR_TIMER_INI, sky2_us2clk(hw, 7)); | 2338 | sky2_write32(hw, STAT_ISR_TIMER_INI, sky2_us2clk(hw, 20)); |
2363 | } | 2339 | sky2_write32(hw, STAT_LEV_TIMER_INI, sky2_us2clk(hw, 100)); |
2364 | 2340 | ||
2365 | /* enable status unit */ | 2341 | /* enable status unit */ |
2366 | sky2_write32(hw, STAT_CTRL, SC_STAT_OP_ON); | 2342 | sky2_write32(hw, STAT_CTRL, SC_STAT_OP_ON); |
@@ -2743,7 +2719,7 @@ static int sky2_phys_id(struct net_device *dev, u32 data) | |||
2743 | ms = data * 1000; | 2719 | ms = data * 1000; |
2744 | 2720 | ||
2745 | /* save initial values */ | 2721 | /* save initial values */ |
2746 | down(&sky2->phy_sema); | 2722 | spin_lock_bh(&sky2->phy_lock); |
2747 | if (hw->chip_id == CHIP_ID_YUKON_XL) { | 2723 | if (hw->chip_id == CHIP_ID_YUKON_XL) { |
2748 | u16 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR); | 2724 | u16 pg = gm_phy_read(hw, port, PHY_MARV_EXT_ADR); |
2749 | gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3); | 2725 | gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3); |
@@ -2759,9 +2735,9 @@ static int sky2_phys_id(struct net_device *dev, u32 data) | |||
2759 | sky2_led(hw, port, onoff); | 2735 | sky2_led(hw, port, onoff); |
2760 | onoff = !onoff; | 2736 | onoff = !onoff; |
2761 | 2737 | ||
2762 | up(&sky2->phy_sema); | 2738 | spin_unlock_bh(&sky2->phy_lock); |
2763 | interrupted = msleep_interruptible(250); | 2739 | interrupted = msleep_interruptible(250); |
2764 | down(&sky2->phy_sema); | 2740 | spin_lock_bh(&sky2->phy_lock); |
2765 | 2741 | ||
2766 | ms -= 250; | 2742 | ms -= 250; |
2767 | } | 2743 | } |
@@ -2776,7 +2752,7 @@ static int sky2_phys_id(struct net_device *dev, u32 data) | |||
2776 | gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl); | 2752 | gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl); |
2777 | gm_phy_write(hw, port, PHY_MARV_LED_OVER, ledover); | 2753 | gm_phy_write(hw, port, PHY_MARV_LED_OVER, ledover); |
2778 | } | 2754 | } |
2779 | up(&sky2->phy_sema); | 2755 | spin_unlock_bh(&sky2->phy_lock); |
2780 | 2756 | ||
2781 | return 0; | 2757 | return 0; |
2782 | } | 2758 | } |
@@ -2806,38 +2782,6 @@ static int sky2_set_pauseparam(struct net_device *dev, | |||
2806 | return err; | 2782 | return err; |
2807 | } | 2783 | } |
2808 | 2784 | ||
2809 | #ifdef CONFIG_PM | ||
2810 | static void sky2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) | ||
2811 | { | ||
2812 | struct sky2_port *sky2 = netdev_priv(dev); | ||
2813 | |||
2814 | wol->supported = WAKE_MAGIC; | ||
2815 | wol->wolopts = sky2->wol ? WAKE_MAGIC : 0; | ||
2816 | } | ||
2817 | |||
2818 | static int sky2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) | ||
2819 | { | ||
2820 | struct sky2_port *sky2 = netdev_priv(dev); | ||
2821 | struct sky2_hw *hw = sky2->hw; | ||
2822 | |||
2823 | if (wol->wolopts != WAKE_MAGIC && wol->wolopts != 0) | ||
2824 | return -EOPNOTSUPP; | ||
2825 | |||
2826 | sky2->wol = wol->wolopts == WAKE_MAGIC; | ||
2827 | |||
2828 | if (sky2->wol) { | ||
2829 | memcpy_toio(hw->regs + WOL_MAC_ADDR, dev->dev_addr, ETH_ALEN); | ||
2830 | |||
2831 | sky2_write16(hw, WOL_CTRL_STAT, | ||
2832 | WOL_CTL_ENA_PME_ON_MAGIC_PKT | | ||
2833 | WOL_CTL_ENA_MAGIC_PKT_UNIT); | ||
2834 | } else | ||
2835 | sky2_write16(hw, WOL_CTRL_STAT, WOL_CTL_DEFAULT); | ||
2836 | |||
2837 | return 0; | ||
2838 | } | ||
2839 | #endif | ||
2840 | |||
2841 | static int sky2_get_coalesce(struct net_device *dev, | 2785 | static int sky2_get_coalesce(struct net_device *dev, |
2842 | struct ethtool_coalesce *ecmd) | 2786 | struct ethtool_coalesce *ecmd) |
2843 | { | 2787 | { |
@@ -2878,19 +2822,11 @@ static int sky2_set_coalesce(struct net_device *dev, | |||
2878 | { | 2822 | { |
2879 | struct sky2_port *sky2 = netdev_priv(dev); | 2823 | struct sky2_port *sky2 = netdev_priv(dev); |
2880 | struct sky2_hw *hw = sky2->hw; | 2824 | struct sky2_hw *hw = sky2->hw; |
2881 | const u32 tmin = sky2_clk2us(hw, 1); | 2825 | const u32 tmax = sky2_clk2us(hw, 0x0ffffff); |
2882 | const u32 tmax = 5000; | ||
2883 | |||
2884 | if (ecmd->tx_coalesce_usecs != 0 && | ||
2885 | (ecmd->tx_coalesce_usecs < tmin || ecmd->tx_coalesce_usecs > tmax)) | ||
2886 | return -EINVAL; | ||
2887 | |||
2888 | if (ecmd->rx_coalesce_usecs != 0 && | ||
2889 | (ecmd->rx_coalesce_usecs < tmin || ecmd->rx_coalesce_usecs > tmax)) | ||
2890 | return -EINVAL; | ||
2891 | 2826 | ||
2892 | if (ecmd->rx_coalesce_usecs_irq != 0 && | 2827 | if (ecmd->tx_coalesce_usecs > tmax || |
2893 | (ecmd->rx_coalesce_usecs_irq < tmin || ecmd->rx_coalesce_usecs_irq > tmax)) | 2828 | ecmd->rx_coalesce_usecs > tmax || |
2829 | ecmd->rx_coalesce_usecs_irq > tmax) | ||
2894 | return -EINVAL; | 2830 | return -EINVAL; |
2895 | 2831 | ||
2896 | if (ecmd->tx_max_coalesced_frames >= TX_RING_SIZE-1) | 2832 | if (ecmd->tx_max_coalesced_frames >= TX_RING_SIZE-1) |
@@ -3025,10 +2961,6 @@ static struct ethtool_ops sky2_ethtool_ops = { | |||
3025 | .set_ringparam = sky2_set_ringparam, | 2961 | .set_ringparam = sky2_set_ringparam, |
3026 | .get_pauseparam = sky2_get_pauseparam, | 2962 | .get_pauseparam = sky2_get_pauseparam, |
3027 | .set_pauseparam = sky2_set_pauseparam, | 2963 | .set_pauseparam = sky2_set_pauseparam, |
3028 | #ifdef CONFIG_PM | ||
3029 | .get_wol = sky2_get_wol, | ||
3030 | .set_wol = sky2_set_wol, | ||
3031 | #endif | ||
3032 | .phys_id = sky2_phys_id, | 2964 | .phys_id = sky2_phys_id, |
3033 | .get_stats_count = sky2_get_stats_count, | 2965 | .get_stats_count = sky2_get_stats_count, |
3034 | .get_ethtool_stats = sky2_get_ethtool_stats, | 2966 | .get_ethtool_stats = sky2_get_ethtool_stats, |
@@ -3082,16 +3014,15 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw, | |||
3082 | sky2->speed = -1; | 3014 | sky2->speed = -1; |
3083 | sky2->advertising = sky2_supported_modes(hw); | 3015 | sky2->advertising = sky2_supported_modes(hw); |
3084 | 3016 | ||
3085 | /* Receive checksum disabled for Yukon XL | 3017 | /* Receive checksum disabled for Yukon XL |
3086 | * because of observed problems with incorrect | 3018 | * because of observed problems with incorrect |
3087 | * values when multiple packets are received in one interrupt | 3019 | * values when multiple packets are received in one interrupt |
3088 | */ | 3020 | */ |
3089 | sky2->rx_csum = (hw->chip_id != CHIP_ID_YUKON_XL); | 3021 | sky2->rx_csum = (hw->chip_id != CHIP_ID_YUKON_XL); |
3090 | 3022 | ||
3091 | INIT_WORK(&sky2->phy_task, sky2_phy_task, sky2); | 3023 | spin_lock_init(&sky2->phy_lock); |
3092 | init_MUTEX(&sky2->phy_sema); | ||
3093 | sky2->tx_pending = TX_DEF_PENDING; | 3024 | sky2->tx_pending = TX_DEF_PENDING; |
3094 | sky2->rx_pending = is_ec_a1(hw) ? 8 : RX_DEF_PENDING; | 3025 | sky2->rx_pending = RX_DEF_PENDING; |
3095 | sky2->rx_bufsize = sky2_buf_size(ETH_DATA_LEN); | 3026 | sky2->rx_bufsize = sky2_buf_size(ETH_DATA_LEN); |
3096 | 3027 | ||
3097 | hw->dev[port] = dev; | 3028 | hw->dev[port] = dev; |
@@ -3133,6 +3064,66 @@ static void __devinit sky2_show_addr(struct net_device *dev) | |||
3133 | dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]); | 3064 | dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]); |
3134 | } | 3065 | } |
3135 | 3066 | ||
3067 | /* Handle software interrupt used during MSI test */ | ||
3068 | static irqreturn_t __devinit sky2_test_intr(int irq, void *dev_id, | ||
3069 | struct pt_regs *regs) | ||
3070 | { | ||
3071 | struct sky2_hw *hw = dev_id; | ||
3072 | u32 status = sky2_read32(hw, B0_Y2_SP_ISRC2); | ||
3073 | |||
3074 | if (status == 0) | ||
3075 | return IRQ_NONE; | ||
3076 | |||
3077 | if (status & Y2_IS_IRQ_SW) { | ||
3078 | hw->msi_detected = 1; | ||
3079 | wake_up(&hw->msi_wait); | ||
3080 | sky2_write8(hw, B0_CTST, CS_CL_SW_IRQ); | ||
3081 | } | ||
3082 | sky2_write32(hw, B0_Y2_SP_ICR, 2); | ||
3083 | |||
3084 | return IRQ_HANDLED; | ||
3085 | } | ||
3086 | |||
3087 | /* Test interrupt path by forcing a a software IRQ */ | ||
3088 | static int __devinit sky2_test_msi(struct sky2_hw *hw) | ||
3089 | { | ||
3090 | struct pci_dev *pdev = hw->pdev; | ||
3091 | int err; | ||
3092 | |||
3093 | sky2_write32(hw, B0_IMSK, Y2_IS_IRQ_SW); | ||
3094 | |||
3095 | err = request_irq(pdev->irq, sky2_test_intr, SA_SHIRQ, DRV_NAME, hw); | ||
3096 | if (err) { | ||
3097 | printk(KERN_ERR PFX "%s: cannot assign irq %d\n", | ||
3098 | pci_name(pdev), pdev->irq); | ||
3099 | return err; | ||
3100 | } | ||
3101 | |||
3102 | init_waitqueue_head (&hw->msi_wait); | ||
3103 | |||
3104 | sky2_write8(hw, B0_CTST, CS_ST_SW_IRQ); | ||
3105 | wmb(); | ||
3106 | |||
3107 | wait_event_timeout(hw->msi_wait, hw->msi_detected, HZ/10); | ||
3108 | |||
3109 | if (!hw->msi_detected) { | ||
3110 | /* MSI test failed, go back to INTx mode */ | ||
3111 | printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, " | ||
3112 | "switching to INTx mode. Please report this failure to " | ||
3113 | "the PCI maintainer and include system chipset information.\n", | ||
3114 | pci_name(pdev)); | ||
3115 | |||
3116 | err = -EOPNOTSUPP; | ||
3117 | sky2_write8(hw, B0_CTST, CS_CL_SW_IRQ); | ||
3118 | } | ||
3119 | |||
3120 | sky2_write32(hw, B0_IMSK, 0); | ||
3121 | |||
3122 | free_irq(pdev->irq, hw); | ||
3123 | |||
3124 | return err; | ||
3125 | } | ||
3126 | |||
3136 | static int __devinit sky2_probe(struct pci_dev *pdev, | 3127 | static int __devinit sky2_probe(struct pci_dev *pdev, |
3137 | const struct pci_device_id *ent) | 3128 | const struct pci_device_id *ent) |
3138 | { | 3129 | { |
@@ -3201,7 +3192,6 @@ static int __devinit sky2_probe(struct pci_dev *pdev, | |||
3201 | goto err_out_free_hw; | 3192 | goto err_out_free_hw; |
3202 | } | 3193 | } |
3203 | hw->pm_cap = pm_cap; | 3194 | hw->pm_cap = pm_cap; |
3204 | spin_lock_init(&hw->hw_lock); | ||
3205 | 3195 | ||
3206 | #ifdef __BIG_ENDIAN | 3196 | #ifdef __BIG_ENDIAN |
3207 | /* byte swap descriptors in hardware */ | 3197 | /* byte swap descriptors in hardware */ |
@@ -3254,21 +3244,29 @@ static int __devinit sky2_probe(struct pci_dev *pdev, | |||
3254 | } | 3244 | } |
3255 | } | 3245 | } |
3256 | 3246 | ||
3257 | err = request_irq(pdev->irq, sky2_intr, SA_SHIRQ, DRV_NAME, hw); | 3247 | if (!disable_msi && pci_enable_msi(pdev) == 0) { |
3248 | err = sky2_test_msi(hw); | ||
3249 | if (err == -EOPNOTSUPP) | ||
3250 | pci_disable_msi(pdev); | ||
3251 | else if (err) | ||
3252 | goto err_out_unregister; | ||
3253 | } | ||
3254 | |||
3255 | err = request_irq(pdev->irq, sky2_intr, SA_SHIRQ, DRV_NAME, hw); | ||
3258 | if (err) { | 3256 | if (err) { |
3259 | printk(KERN_ERR PFX "%s: cannot assign irq %d\n", | 3257 | printk(KERN_ERR PFX "%s: cannot assign irq %d\n", |
3260 | pci_name(pdev), pdev->irq); | 3258 | pci_name(pdev), pdev->irq); |
3261 | goto err_out_unregister; | 3259 | goto err_out_unregister; |
3262 | } | 3260 | } |
3263 | 3261 | ||
3264 | hw->intr_mask = Y2_IS_BASE; | 3262 | sky2_write32(hw, B0_IMSK, Y2_IS_BASE); |
3265 | sky2_write32(hw, B0_IMSK, hw->intr_mask); | ||
3266 | 3263 | ||
3267 | pci_set_drvdata(pdev, hw); | 3264 | pci_set_drvdata(pdev, hw); |
3268 | 3265 | ||
3269 | return 0; | 3266 | return 0; |
3270 | 3267 | ||
3271 | err_out_unregister: | 3268 | err_out_unregister: |
3269 | pci_disable_msi(pdev); | ||
3272 | if (dev1) { | 3270 | if (dev1) { |
3273 | unregister_netdev(dev1); | 3271 | unregister_netdev(dev1); |
3274 | free_netdev(dev1); | 3272 | free_netdev(dev1); |
@@ -3311,6 +3309,7 @@ static void __devexit sky2_remove(struct pci_dev *pdev) | |||
3311 | sky2_read8(hw, B0_CTST); | 3309 | sky2_read8(hw, B0_CTST); |
3312 | 3310 | ||
3313 | free_irq(pdev->irq, hw); | 3311 | free_irq(pdev->irq, hw); |
3312 | pci_disable_msi(pdev); | ||
3314 | pci_free_consistent(pdev, STATUS_LE_BYTES, hw->st_le, hw->st_dma); | 3313 | pci_free_consistent(pdev, STATUS_LE_BYTES, hw->st_le, hw->st_dma); |
3315 | pci_release_regions(pdev); | 3314 | pci_release_regions(pdev); |
3316 | pci_disable_device(pdev); | 3315 | pci_disable_device(pdev); |
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h index dce955c76f3c..d63cd5a1b71c 100644 --- a/drivers/net/sky2.h +++ b/drivers/net/sky2.h | |||
@@ -278,13 +278,11 @@ enum { | |||
278 | Y2_IS_CHK_TXS1 = 1<<1, /* Descriptor error TXS 1 */ | 278 | Y2_IS_CHK_TXS1 = 1<<1, /* Descriptor error TXS 1 */ |
279 | Y2_IS_CHK_TXA1 = 1<<0, /* Descriptor error TXA 1 */ | 279 | Y2_IS_CHK_TXA1 = 1<<0, /* Descriptor error TXA 1 */ |
280 | 280 | ||
281 | Y2_IS_BASE = Y2_IS_HW_ERR | Y2_IS_STAT_BMU | | 281 | Y2_IS_BASE = Y2_IS_HW_ERR | Y2_IS_STAT_BMU, |
282 | Y2_IS_POLL_CHK | Y2_IS_TWSI_RDY | | 282 | Y2_IS_PORT_1 = Y2_IS_IRQ_PHY1 | Y2_IS_IRQ_MAC1 |
283 | Y2_IS_IRQ_SW | Y2_IS_TIMINT, | 283 | | Y2_IS_CHK_TXA1 | Y2_IS_CHK_RX1, |
284 | Y2_IS_PORT_1 = Y2_IS_IRQ_PHY1 | Y2_IS_IRQ_MAC1 | | 284 | Y2_IS_PORT_2 = Y2_IS_IRQ_PHY2 | Y2_IS_IRQ_MAC2 |
285 | Y2_IS_CHK_RX1 | Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXS1, | 285 | | Y2_IS_CHK_TXA2 | Y2_IS_CHK_RX2, |
286 | Y2_IS_PORT_2 = Y2_IS_IRQ_PHY2 | Y2_IS_IRQ_MAC2 | | ||
287 | Y2_IS_CHK_RX2 | Y2_IS_CHK_TXA2 | Y2_IS_CHK_TXS2, | ||
288 | }; | 286 | }; |
289 | 287 | ||
290 | /* B2_IRQM_HWE_MSK 32 bit IRQ Moderation HW Error Mask */ | 288 | /* B2_IRQM_HWE_MSK 32 bit IRQ Moderation HW Error Mask */ |
@@ -1832,6 +1830,7 @@ struct sky2_port { | |||
1832 | struct net_device *netdev; | 1830 | struct net_device *netdev; |
1833 | unsigned port; | 1831 | unsigned port; |
1834 | u32 msg_enable; | 1832 | u32 msg_enable; |
1833 | spinlock_t phy_lock; | ||
1835 | 1834 | ||
1836 | spinlock_t tx_lock ____cacheline_aligned_in_smp; | 1835 | spinlock_t tx_lock ____cacheline_aligned_in_smp; |
1837 | struct tx_ring_info *tx_ring; | 1836 | struct tx_ring_info *tx_ring; |
@@ -1840,7 +1839,6 @@ struct sky2_port { | |||
1840 | u16 tx_prod; /* next le to use */ | 1839 | u16 tx_prod; /* next le to use */ |
1841 | u32 tx_addr64; | 1840 | u32 tx_addr64; |
1842 | u16 tx_pending; | 1841 | u16 tx_pending; |
1843 | u16 tx_last_put; | ||
1844 | u16 tx_last_mss; | 1842 | u16 tx_last_mss; |
1845 | 1843 | ||
1846 | struct ring_info *rx_ring ____cacheline_aligned_in_smp; | 1844 | struct ring_info *rx_ring ____cacheline_aligned_in_smp; |
@@ -1849,7 +1847,6 @@ struct sky2_port { | |||
1849 | u16 rx_next; /* next re to check */ | 1847 | u16 rx_next; /* next re to check */ |
1850 | u16 rx_put; /* next le index to use */ | 1848 | u16 rx_put; /* next le index to use */ |
1851 | u16 rx_pending; | 1849 | u16 rx_pending; |
1852 | u16 rx_last_put; | ||
1853 | u16 rx_bufsize; | 1850 | u16 rx_bufsize; |
1854 | #ifdef SKY2_VLAN_TAG_USED | 1851 | #ifdef SKY2_VLAN_TAG_USED |
1855 | u16 rx_tag; | 1852 | u16 rx_tag; |
@@ -1865,20 +1862,15 @@ struct sky2_port { | |||
1865 | u8 rx_pause; | 1862 | u8 rx_pause; |
1866 | u8 tx_pause; | 1863 | u8 tx_pause; |
1867 | u8 rx_csum; | 1864 | u8 rx_csum; |
1868 | u8 wol; | ||
1869 | 1865 | ||
1870 | struct net_device_stats net_stats; | 1866 | struct net_device_stats net_stats; |
1871 | 1867 | ||
1872 | struct work_struct phy_task; | ||
1873 | struct semaphore phy_sema; | ||
1874 | }; | 1868 | }; |
1875 | 1869 | ||
1876 | struct sky2_hw { | 1870 | struct sky2_hw { |
1877 | void __iomem *regs; | 1871 | void __iomem *regs; |
1878 | struct pci_dev *pdev; | 1872 | struct pci_dev *pdev; |
1879 | struct net_device *dev[2]; | 1873 | struct net_device *dev[2]; |
1880 | spinlock_t hw_lock; | ||
1881 | u32 intr_mask; | ||
1882 | 1874 | ||
1883 | int pm_cap; | 1875 | int pm_cap; |
1884 | u8 chip_id; | 1876 | u8 chip_id; |
@@ -1889,6 +1881,8 @@ struct sky2_hw { | |||
1889 | struct sky2_status_le *st_le; | 1881 | struct sky2_status_le *st_le; |
1890 | u32 st_idx; | 1882 | u32 st_idx; |
1891 | dma_addr_t st_dma; | 1883 | dma_addr_t st_dma; |
1884 | int msi_detected; | ||
1885 | wait_queue_head_t msi_wait; | ||
1892 | }; | 1886 | }; |
1893 | 1887 | ||
1894 | /* Register accessor for memory mapped device */ | 1888 | /* Register accessor for memory mapped device */ |
diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c index 7ec08127c9d6..0e9833adf9fe 100644 --- a/drivers/net/smc91x.c +++ b/drivers/net/smc91x.c | |||
@@ -215,15 +215,12 @@ struct smc_local { | |||
215 | 215 | ||
216 | spinlock_t lock; | 216 | spinlock_t lock; |
217 | 217 | ||
218 | #ifdef SMC_CAN_USE_DATACS | ||
219 | u32 __iomem *datacs; | ||
220 | #endif | ||
221 | |||
222 | #ifdef SMC_USE_PXA_DMA | 218 | #ifdef SMC_USE_PXA_DMA |
223 | /* DMA needs the physical address of the chip */ | 219 | /* DMA needs the physical address of the chip */ |
224 | u_long physaddr; | 220 | u_long physaddr; |
225 | #endif | 221 | #endif |
226 | void __iomem *base; | 222 | void __iomem *base; |
223 | void __iomem *datacs; | ||
227 | }; | 224 | }; |
228 | 225 | ||
229 | #if SMC_DEBUG > 0 | 226 | #if SMC_DEBUG > 0 |
@@ -2104,9 +2101,8 @@ static int smc_enable_device(struct platform_device *pdev) | |||
2104 | * Set the appropriate byte/word mode. | 2101 | * Set the appropriate byte/word mode. |
2105 | */ | 2102 | */ |
2106 | ecsr = readb(addr + (ECSR << SMC_IO_SHIFT)) & ~ECSR_IOIS8; | 2103 | ecsr = readb(addr + (ECSR << SMC_IO_SHIFT)) & ~ECSR_IOIS8; |
2107 | #ifndef SMC_CAN_USE_16BIT | 2104 | if (!SMC_CAN_USE_16BIT) |
2108 | ecsr |= ECSR_IOIS8; | 2105 | ecsr |= ECSR_IOIS8; |
2109 | #endif | ||
2110 | writeb(ecsr, addr + (ECSR << SMC_IO_SHIFT)); | 2106 | writeb(ecsr, addr + (ECSR << SMC_IO_SHIFT)); |
2111 | local_irq_restore(flags); | 2107 | local_irq_restore(flags); |
2112 | 2108 | ||
@@ -2143,40 +2139,39 @@ static void smc_release_attrib(struct platform_device *pdev) | |||
2143 | release_mem_region(res->start, ATTRIB_SIZE); | 2139 | release_mem_region(res->start, ATTRIB_SIZE); |
2144 | } | 2140 | } |
2145 | 2141 | ||
2146 | #ifdef SMC_CAN_USE_DATACS | 2142 | static inline void smc_request_datacs(struct platform_device *pdev, struct net_device *ndev) |
2147 | static void smc_request_datacs(struct platform_device *pdev, struct net_device *ndev) | ||
2148 | { | 2143 | { |
2149 | struct resource * res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-data32"); | 2144 | if (SMC_CAN_USE_DATACS) { |
2150 | struct smc_local *lp = netdev_priv(ndev); | 2145 | struct resource * res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-data32"); |
2146 | struct smc_local *lp = netdev_priv(ndev); | ||
2151 | 2147 | ||
2152 | if (!res) | 2148 | if (!res) |
2153 | return; | 2149 | return; |
2154 | 2150 | ||
2155 | if(!request_mem_region(res->start, SMC_DATA_EXTENT, CARDNAME)) { | 2151 | if(!request_mem_region(res->start, SMC_DATA_EXTENT, CARDNAME)) { |
2156 | printk(KERN_INFO "%s: failed to request datacs memory region.\n", CARDNAME); | 2152 | printk(KERN_INFO "%s: failed to request datacs memory region.\n", CARDNAME); |
2157 | return; | 2153 | return; |
2158 | } | 2154 | } |
2159 | 2155 | ||
2160 | lp->datacs = ioremap(res->start, SMC_DATA_EXTENT); | 2156 | lp->datacs = ioremap(res->start, SMC_DATA_EXTENT); |
2157 | } | ||
2161 | } | 2158 | } |
2162 | 2159 | ||
2163 | static void smc_release_datacs(struct platform_device *pdev, struct net_device *ndev) | 2160 | static void smc_release_datacs(struct platform_device *pdev, struct net_device *ndev) |
2164 | { | 2161 | { |
2165 | struct smc_local *lp = netdev_priv(ndev); | 2162 | if (SMC_CAN_USE_DATACS) { |
2166 | struct resource * res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-data32"); | 2163 | struct smc_local *lp = netdev_priv(ndev); |
2164 | struct resource * res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-data32"); | ||
2167 | 2165 | ||
2168 | if (lp->datacs) | 2166 | if (lp->datacs) |
2169 | iounmap(lp->datacs); | 2167 | iounmap(lp->datacs); |
2170 | 2168 | ||
2171 | lp->datacs = NULL; | 2169 | lp->datacs = NULL; |
2172 | 2170 | ||
2173 | if (res) | 2171 | if (res) |
2174 | release_mem_region(res->start, SMC_DATA_EXTENT); | 2172 | release_mem_region(res->start, SMC_DATA_EXTENT); |
2173 | } | ||
2175 | } | 2174 | } |
2176 | #else | ||
2177 | static void smc_request_datacs(struct platform_device *pdev, struct net_device *ndev) {} | ||
2178 | static void smc_release_datacs(struct platform_device *pdev, struct net_device *ndev) {} | ||
2179 | #endif | ||
2180 | 2175 | ||
2181 | /* | 2176 | /* |
2182 | * smc_init(void) | 2177 | * smc_init(void) |
@@ -2221,6 +2216,10 @@ static int smc_drv_probe(struct platform_device *pdev) | |||
2221 | 2216 | ||
2222 | ndev->dma = (unsigned char)-1; | 2217 | ndev->dma = (unsigned char)-1; |
2223 | ndev->irq = platform_get_irq(pdev, 0); | 2218 | ndev->irq = platform_get_irq(pdev, 0); |
2219 | if (ndev->irq < 0) { | ||
2220 | ret = -ENODEV; | ||
2221 | goto out_free_netdev; | ||
2222 | } | ||
2224 | 2223 | ||
2225 | ret = smc_request_attrib(pdev); | 2224 | ret = smc_request_attrib(pdev); |
2226 | if (ret) | 2225 | if (ret) |
diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h index e0efd1964e72..e1be1af51201 100644 --- a/drivers/net/smc91x.h +++ b/drivers/net/smc91x.h | |||
@@ -275,7 +275,10 @@ SMC_outw(u16 val, void __iomem *ioaddr, int reg) | |||
275 | #define SMC_insw(a,r,p,l) readsw ((void*) ((a) + (r)), p, l) | 275 | #define SMC_insw(a,r,p,l) readsw ((void*) ((a) + (r)), p, l) |
276 | #define SMC_outw(v,a,r) ({ writew ((v), (a) + (r)); LPD7A40X_IOBARRIER; }) | 276 | #define SMC_outw(v,a,r) ({ writew ((v), (a) + (r)); LPD7A40X_IOBARRIER; }) |
277 | 277 | ||
278 | static inline void SMC_outsw (unsigned long a, int r, unsigned char* p, int l) | 278 | #define SMC_outsw LPD7A40X_SMC_outsw |
279 | |||
280 | static inline void LPD7A40X_SMC_outsw(unsigned long a, int r, | ||
281 | unsigned char* p, int l) | ||
279 | { | 282 | { |
280 | unsigned short* ps = (unsigned short*) p; | 283 | unsigned short* ps = (unsigned short*) p; |
281 | while (l-- > 0) { | 284 | while (l-- > 0) { |
@@ -342,10 +345,6 @@ static inline void SMC_outsw (unsigned long a, int r, unsigned char* p, int l) | |||
342 | 345 | ||
343 | #endif | 346 | #endif |
344 | 347 | ||
345 | #ifndef SMC_IRQ_FLAGS | ||
346 | #define SMC_IRQ_FLAGS SA_TRIGGER_RISING | ||
347 | #endif | ||
348 | |||
349 | #ifdef SMC_USE_PXA_DMA | 348 | #ifdef SMC_USE_PXA_DMA |
350 | /* | 349 | /* |
351 | * Let's use the DMA engine on the XScale PXA2xx for RX packets. This is | 350 | * Let's use the DMA engine on the XScale PXA2xx for RX packets. This is |
@@ -441,10 +440,85 @@ smc_pxa_dma_irq(int dma, void *dummy, struct pt_regs *regs) | |||
441 | #endif /* SMC_USE_PXA_DMA */ | 440 | #endif /* SMC_USE_PXA_DMA */ |
442 | 441 | ||
443 | 442 | ||
444 | /* Because of bank switching, the LAN91x uses only 16 I/O ports */ | 443 | /* |
444 | * Everything a particular hardware setup needs should have been defined | ||
445 | * at this point. Add stubs for the undefined cases, mainly to avoid | ||
446 | * compilation warnings since they'll be optimized away, or to prevent buggy | ||
447 | * use of them. | ||
448 | */ | ||
449 | |||
450 | #if ! SMC_CAN_USE_32BIT | ||
451 | #define SMC_inl(ioaddr, reg) ({ BUG(); 0; }) | ||
452 | #define SMC_outl(x, ioaddr, reg) BUG() | ||
453 | #define SMC_insl(a, r, p, l) BUG() | ||
454 | #define SMC_outsl(a, r, p, l) BUG() | ||
455 | #endif | ||
456 | |||
457 | #if !defined(SMC_insl) || !defined(SMC_outsl) | ||
458 | #define SMC_insl(a, r, p, l) BUG() | ||
459 | #define SMC_outsl(a, r, p, l) BUG() | ||
460 | #endif | ||
461 | |||
462 | #if ! SMC_CAN_USE_16BIT | ||
463 | |||
464 | /* | ||
465 | * Any 16-bit access is performed with two 8-bit accesses if the hardware | ||
466 | * can't do it directly. Most registers are 16-bit so those are mandatory. | ||
467 | */ | ||
468 | #define SMC_outw(x, ioaddr, reg) \ | ||
469 | do { \ | ||
470 | unsigned int __val16 = (x); \ | ||
471 | SMC_outb( __val16, ioaddr, reg ); \ | ||
472 | SMC_outb( __val16 >> 8, ioaddr, reg + (1 << SMC_IO_SHIFT));\ | ||
473 | } while (0) | ||
474 | #define SMC_inw(ioaddr, reg) \ | ||
475 | ({ \ | ||
476 | unsigned int __val16; \ | ||
477 | __val16 = SMC_inb( ioaddr, reg ); \ | ||
478 | __val16 |= SMC_inb( ioaddr, reg + (1 << SMC_IO_SHIFT)) << 8; \ | ||
479 | __val16; \ | ||
480 | }) | ||
481 | |||
482 | #define SMC_insw(a, r, p, l) BUG() | ||
483 | #define SMC_outsw(a, r, p, l) BUG() | ||
484 | |||
485 | #endif | ||
486 | |||
487 | #if !defined(SMC_insw) || !defined(SMC_outsw) | ||
488 | #define SMC_insw(a, r, p, l) BUG() | ||
489 | #define SMC_outsw(a, r, p, l) BUG() | ||
490 | #endif | ||
491 | |||
492 | #if ! SMC_CAN_USE_8BIT | ||
493 | #define SMC_inb(ioaddr, reg) ({ BUG(); 0; }) | ||
494 | #define SMC_outb(x, ioaddr, reg) BUG() | ||
495 | #define SMC_insb(a, r, p, l) BUG() | ||
496 | #define SMC_outsb(a, r, p, l) BUG() | ||
497 | #endif | ||
498 | |||
499 | #if !defined(SMC_insb) || !defined(SMC_outsb) | ||
500 | #define SMC_insb(a, r, p, l) BUG() | ||
501 | #define SMC_outsb(a, r, p, l) BUG() | ||
502 | #endif | ||
503 | |||
504 | #ifndef SMC_CAN_USE_DATACS | ||
505 | #define SMC_CAN_USE_DATACS 0 | ||
506 | #endif | ||
507 | |||
445 | #ifndef SMC_IO_SHIFT | 508 | #ifndef SMC_IO_SHIFT |
446 | #define SMC_IO_SHIFT 0 | 509 | #define SMC_IO_SHIFT 0 |
447 | #endif | 510 | #endif |
511 | |||
512 | #ifndef SMC_IRQ_FLAGS | ||
513 | #define SMC_IRQ_FLAGS SA_TRIGGER_RISING | ||
514 | #endif | ||
515 | |||
516 | #ifndef SMC_INTERRUPT_PREAMBLE | ||
517 | #define SMC_INTERRUPT_PREAMBLE | ||
518 | #endif | ||
519 | |||
520 | |||
521 | /* Because of bank switching, the LAN91x uses only 16 I/O ports */ | ||
448 | #define SMC_IO_EXTENT (16 << SMC_IO_SHIFT) | 522 | #define SMC_IO_EXTENT (16 << SMC_IO_SHIFT) |
449 | #define SMC_DATA_EXTENT (4) | 523 | #define SMC_DATA_EXTENT (4) |
450 | 524 | ||
@@ -817,6 +891,11 @@ static const char * chip_ids[ 16 ] = { | |||
817 | * Note: the following macros do *not* select the bank -- this must | 891 | * Note: the following macros do *not* select the bank -- this must |
818 | * be done separately as needed in the main code. The SMC_REG() macro | 892 | * be done separately as needed in the main code. The SMC_REG() macro |
819 | * only uses the bank argument for debugging purposes (when enabled). | 893 | * only uses the bank argument for debugging purposes (when enabled). |
894 | * | ||
895 | * Note: despite inline functions being safer, everything leading to this | ||
896 | * should preferably be macros to let BUG() display the line number in | ||
897 | * the core source code since we're interested in the top call site | ||
898 | * not in any inline function location. | ||
820 | */ | 899 | */ |
821 | 900 | ||
822 | #if SMC_DEBUG > 0 | 901 | #if SMC_DEBUG > 0 |
@@ -834,62 +913,142 @@ static const char * chip_ids[ 16 ] = { | |||
834 | #define SMC_REG(reg, bank) (reg<<SMC_IO_SHIFT) | 913 | #define SMC_REG(reg, bank) (reg<<SMC_IO_SHIFT) |
835 | #endif | 914 | #endif |
836 | 915 | ||
837 | #if SMC_CAN_USE_8BIT | 916 | /* |
838 | #define SMC_GET_PN() SMC_inb( ioaddr, PN_REG ) | 917 | * Hack Alert: Some setups just can't write 8 or 16 bits reliably when not |
839 | #define SMC_SET_PN(x) SMC_outb( x, ioaddr, PN_REG ) | 918 | * aligned to a 32 bit boundary. I tell you that does exist! |
840 | #define SMC_GET_AR() SMC_inb( ioaddr, AR_REG ) | 919 | * Fortunately the affected register accesses can be easily worked around |
841 | #define SMC_GET_TXFIFO() SMC_inb( ioaddr, TXFIFO_REG ) | 920 | * since we can write zeroes to the preceeding 16 bits without adverse |
842 | #define SMC_GET_RXFIFO() SMC_inb( ioaddr, RXFIFO_REG ) | 921 | * effects and use a 32-bit access. |
843 | #define SMC_GET_INT() SMC_inb( ioaddr, INT_REG ) | 922 | * |
844 | #define SMC_ACK_INT(x) SMC_outb( x, ioaddr, INT_REG ) | 923 | * Enforce it on any 32-bit capable setup for now. |
845 | #define SMC_GET_INT_MASK() SMC_inb( ioaddr, IM_REG ) | 924 | */ |
846 | #define SMC_SET_INT_MASK(x) SMC_outb( x, ioaddr, IM_REG ) | 925 | #define SMC_MUST_ALIGN_WRITE SMC_CAN_USE_32BIT |
847 | #else | 926 | |
848 | #define SMC_GET_PN() (SMC_inw( ioaddr, PN_REG ) & 0xFF) | 927 | #define SMC_GET_PN() \ |
849 | #define SMC_SET_PN(x) SMC_outw( x, ioaddr, PN_REG ) | 928 | ( SMC_CAN_USE_8BIT ? (SMC_inb(ioaddr, PN_REG)) \ |
850 | #define SMC_GET_AR() (SMC_inw( ioaddr, PN_REG ) >> 8) | 929 | : (SMC_inw(ioaddr, PN_REG) & 0xFF) ) |
851 | #define SMC_GET_TXFIFO() (SMC_inw( ioaddr, TXFIFO_REG ) & 0xFF) | 930 | |
852 | #define SMC_GET_RXFIFO() (SMC_inw( ioaddr, TXFIFO_REG ) >> 8) | 931 | #define SMC_SET_PN(x) \ |
853 | #define SMC_GET_INT() (SMC_inw( ioaddr, INT_REG ) & 0xFF) | 932 | do { \ |
933 | if (SMC_MUST_ALIGN_WRITE) \ | ||
934 | SMC_outl((x)<<16, ioaddr, SMC_REG(0, 2)); \ | ||
935 | else if (SMC_CAN_USE_8BIT) \ | ||
936 | SMC_outb(x, ioaddr, PN_REG); \ | ||
937 | else \ | ||
938 | SMC_outw(x, ioaddr, PN_REG); \ | ||
939 | } while (0) | ||
940 | |||
941 | #define SMC_GET_AR() \ | ||
942 | ( SMC_CAN_USE_8BIT ? (SMC_inb(ioaddr, AR_REG)) \ | ||
943 | : (SMC_inw(ioaddr, PN_REG) >> 8) ) | ||
944 | |||
945 | #define SMC_GET_TXFIFO() \ | ||
946 | ( SMC_CAN_USE_8BIT ? (SMC_inb(ioaddr, TXFIFO_REG)) \ | ||
947 | : (SMC_inw(ioaddr, TXFIFO_REG) & 0xFF) ) | ||
948 | |||
949 | #define SMC_GET_RXFIFO() \ | ||
950 | ( SMC_CAN_USE_8BIT ? (SMC_inb(ioaddr, RXFIFO_REG)) \ | ||
951 | : (SMC_inw(ioaddr, TXFIFO_REG) >> 8) ) | ||
952 | |||
953 | #define SMC_GET_INT() \ | ||
954 | ( SMC_CAN_USE_8BIT ? (SMC_inb(ioaddr, INT_REG)) \ | ||
955 | : (SMC_inw(ioaddr, INT_REG) & 0xFF) ) | ||
956 | |||
854 | #define SMC_ACK_INT(x) \ | 957 | #define SMC_ACK_INT(x) \ |
855 | do { \ | 958 | do { \ |
856 | unsigned long __flags; \ | 959 | if (SMC_CAN_USE_8BIT) \ |
857 | int __mask; \ | 960 | SMC_outb(x, ioaddr, INT_REG); \ |
858 | local_irq_save(__flags); \ | 961 | else { \ |
859 | __mask = SMC_inw( ioaddr, INT_REG ) & ~0xff; \ | 962 | unsigned long __flags; \ |
860 | SMC_outw( __mask | (x), ioaddr, INT_REG ); \ | 963 | int __mask; \ |
861 | local_irq_restore(__flags); \ | 964 | local_irq_save(__flags); \ |
965 | __mask = SMC_inw( ioaddr, INT_REG ) & ~0xff; \ | ||
966 | SMC_outw( __mask | (x), ioaddr, INT_REG ); \ | ||
967 | local_irq_restore(__flags); \ | ||
968 | } \ | ||
969 | } while (0) | ||
970 | |||
971 | #define SMC_GET_INT_MASK() \ | ||
972 | ( SMC_CAN_USE_8BIT ? (SMC_inb(ioaddr, IM_REG)) \ | ||
973 | : (SMC_inw( ioaddr, INT_REG ) >> 8) ) | ||
974 | |||
975 | #define SMC_SET_INT_MASK(x) \ | ||
976 | do { \ | ||
977 | if (SMC_CAN_USE_8BIT) \ | ||
978 | SMC_outb(x, ioaddr, IM_REG); \ | ||
979 | else \ | ||
980 | SMC_outw((x) << 8, ioaddr, INT_REG); \ | ||
981 | } while (0) | ||
982 | |||
983 | #define SMC_CURRENT_BANK() SMC_inw(ioaddr, BANK_SELECT) | ||
984 | |||
985 | #define SMC_SELECT_BANK(x) \ | ||
986 | do { \ | ||
987 | if (SMC_MUST_ALIGN_WRITE) \ | ||
988 | SMC_outl((x)<<16, ioaddr, 12<<SMC_IO_SHIFT); \ | ||
989 | else \ | ||
990 | SMC_outw(x, ioaddr, BANK_SELECT); \ | ||
991 | } while (0) | ||
992 | |||
993 | #define SMC_GET_BASE() SMC_inw(ioaddr, BASE_REG) | ||
994 | |||
995 | #define SMC_SET_BASE(x) SMC_outw(x, ioaddr, BASE_REG) | ||
996 | |||
997 | #define SMC_GET_CONFIG() SMC_inw(ioaddr, CONFIG_REG) | ||
998 | |||
999 | #define SMC_SET_CONFIG(x) SMC_outw(x, ioaddr, CONFIG_REG) | ||
1000 | |||
1001 | #define SMC_GET_COUNTER() SMC_inw(ioaddr, COUNTER_REG) | ||
1002 | |||
1003 | #define SMC_GET_CTL() SMC_inw(ioaddr, CTL_REG) | ||
1004 | |||
1005 | #define SMC_SET_CTL(x) SMC_outw(x, ioaddr, CTL_REG) | ||
1006 | |||
1007 | #define SMC_GET_MII() SMC_inw(ioaddr, MII_REG) | ||
1008 | |||
1009 | #define SMC_SET_MII(x) SMC_outw(x, ioaddr, MII_REG) | ||
1010 | |||
1011 | #define SMC_GET_MIR() SMC_inw(ioaddr, MIR_REG) | ||
1012 | |||
1013 | #define SMC_SET_MIR(x) SMC_outw(x, ioaddr, MIR_REG) | ||
1014 | |||
1015 | #define SMC_GET_MMU_CMD() SMC_inw(ioaddr, MMU_CMD_REG) | ||
1016 | |||
1017 | #define SMC_SET_MMU_CMD(x) SMC_outw(x, ioaddr, MMU_CMD_REG) | ||
1018 | |||
1019 | #define SMC_GET_FIFO() SMC_inw(ioaddr, FIFO_REG) | ||
1020 | |||
1021 | #define SMC_GET_PTR() SMC_inw(ioaddr, PTR_REG) | ||
1022 | |||
1023 | #define SMC_SET_PTR(x) \ | ||
1024 | do { \ | ||
1025 | if (SMC_MUST_ALIGN_WRITE) \ | ||
1026 | SMC_outl((x)<<16, ioaddr, SMC_REG(4, 2)); \ | ||
1027 | else \ | ||
1028 | SMC_outw(x, ioaddr, PTR_REG); \ | ||
862 | } while (0) | 1029 | } while (0) |
863 | #define SMC_GET_INT_MASK() (SMC_inw( ioaddr, INT_REG ) >> 8) | ||
864 | #define SMC_SET_INT_MASK(x) SMC_outw( (x) << 8, ioaddr, INT_REG ) | ||
865 | #endif | ||
866 | 1030 | ||
867 | #define SMC_CURRENT_BANK() SMC_inw( ioaddr, BANK_SELECT ) | 1031 | #define SMC_GET_EPH_STATUS() SMC_inw(ioaddr, EPH_STATUS_REG) |
868 | #define SMC_SELECT_BANK(x) SMC_outw( x, ioaddr, BANK_SELECT ) | 1032 | |
869 | #define SMC_GET_BASE() SMC_inw( ioaddr, BASE_REG ) | 1033 | #define SMC_GET_RCR() SMC_inw(ioaddr, RCR_REG) |
870 | #define SMC_SET_BASE(x) SMC_outw( x, ioaddr, BASE_REG ) | 1034 | |
871 | #define SMC_GET_CONFIG() SMC_inw( ioaddr, CONFIG_REG ) | 1035 | #define SMC_SET_RCR(x) SMC_outw(x, ioaddr, RCR_REG) |
872 | #define SMC_SET_CONFIG(x) SMC_outw( x, ioaddr, CONFIG_REG ) | 1036 | |
873 | #define SMC_GET_COUNTER() SMC_inw( ioaddr, COUNTER_REG ) | 1037 | #define SMC_GET_REV() SMC_inw(ioaddr, REV_REG) |
874 | #define SMC_GET_CTL() SMC_inw( ioaddr, CTL_REG ) | 1038 | |
875 | #define SMC_SET_CTL(x) SMC_outw( x, ioaddr, CTL_REG ) | 1039 | #define SMC_GET_RPC() SMC_inw(ioaddr, RPC_REG) |
876 | #define SMC_GET_MII() SMC_inw( ioaddr, MII_REG ) | 1040 | |
877 | #define SMC_SET_MII(x) SMC_outw( x, ioaddr, MII_REG ) | 1041 | #define SMC_SET_RPC(x) \ |
878 | #define SMC_GET_MIR() SMC_inw( ioaddr, MIR_REG ) | 1042 | do { \ |
879 | #define SMC_SET_MIR(x) SMC_outw( x, ioaddr, MIR_REG ) | 1043 | if (SMC_MUST_ALIGN_WRITE) \ |
880 | #define SMC_GET_MMU_CMD() SMC_inw( ioaddr, MMU_CMD_REG ) | 1044 | SMC_outl((x)<<16, ioaddr, SMC_REG(8, 0)); \ |
881 | #define SMC_SET_MMU_CMD(x) SMC_outw( x, ioaddr, MMU_CMD_REG ) | 1045 | else \ |
882 | #define SMC_GET_FIFO() SMC_inw( ioaddr, FIFO_REG ) | 1046 | SMC_outw(x, ioaddr, RPC_REG); \ |
883 | #define SMC_GET_PTR() SMC_inw( ioaddr, PTR_REG ) | 1047 | } while (0) |
884 | #define SMC_SET_PTR(x) SMC_outw( x, ioaddr, PTR_REG ) | 1048 | |
885 | #define SMC_GET_EPH_STATUS() SMC_inw( ioaddr, EPH_STATUS_REG ) | 1049 | #define SMC_GET_TCR() SMC_inw(ioaddr, TCR_REG) |
886 | #define SMC_GET_RCR() SMC_inw( ioaddr, RCR_REG ) | 1050 | |
887 | #define SMC_SET_RCR(x) SMC_outw( x, ioaddr, RCR_REG ) | 1051 | #define SMC_SET_TCR(x) SMC_outw(x, ioaddr, TCR_REG) |
888 | #define SMC_GET_REV() SMC_inw( ioaddr, REV_REG ) | ||
889 | #define SMC_GET_RPC() SMC_inw( ioaddr, RPC_REG ) | ||
890 | #define SMC_SET_RPC(x) SMC_outw( x, ioaddr, RPC_REG ) | ||
891 | #define SMC_GET_TCR() SMC_inw( ioaddr, TCR_REG ) | ||
892 | #define SMC_SET_TCR(x) SMC_outw( x, ioaddr, TCR_REG ) | ||
893 | 1052 | ||
894 | #ifndef SMC_GET_MAC_ADDR | 1053 | #ifndef SMC_GET_MAC_ADDR |
895 | #define SMC_GET_MAC_ADDR(addr) \ | 1054 | #define SMC_GET_MAC_ADDR(addr) \ |
@@ -920,151 +1079,84 @@ static const char * chip_ids[ 16 ] = { | |||
920 | SMC_outw( mt[6] | (mt[7] << 8), ioaddr, MCAST_REG4 ); \ | 1079 | SMC_outw( mt[6] | (mt[7] << 8), ioaddr, MCAST_REG4 ); \ |
921 | } while (0) | 1080 | } while (0) |
922 | 1081 | ||
923 | #if SMC_CAN_USE_32BIT | ||
924 | /* | ||
925 | * Some setups just can't write 8 or 16 bits reliably when not aligned | ||
926 | * to a 32 bit boundary. I tell you that exists! | ||
927 | * We re-do the ones here that can be easily worked around if they can have | ||
928 | * their low parts written to 0 without adverse effects. | ||
929 | */ | ||
930 | #undef SMC_SELECT_BANK | ||
931 | #define SMC_SELECT_BANK(x) SMC_outl( (x)<<16, ioaddr, 12<<SMC_IO_SHIFT ) | ||
932 | #undef SMC_SET_RPC | ||
933 | #define SMC_SET_RPC(x) SMC_outl( (x)<<16, ioaddr, SMC_REG(8, 0) ) | ||
934 | #undef SMC_SET_PN | ||
935 | #define SMC_SET_PN(x) SMC_outl( (x)<<16, ioaddr, SMC_REG(0, 2) ) | ||
936 | #undef SMC_SET_PTR | ||
937 | #define SMC_SET_PTR(x) SMC_outl( (x)<<16, ioaddr, SMC_REG(4, 2) ) | ||
938 | #endif | ||
939 | |||
940 | #if SMC_CAN_USE_32BIT | ||
941 | #define SMC_PUT_PKT_HDR(status, length) \ | ||
942 | SMC_outl( (status) | (length) << 16, ioaddr, DATA_REG ) | ||
943 | #define SMC_GET_PKT_HDR(status, length) \ | ||
944 | do { \ | ||
945 | unsigned int __val = SMC_inl( ioaddr, DATA_REG ); \ | ||
946 | (status) = __val & 0xffff; \ | ||
947 | (length) = __val >> 16; \ | ||
948 | } while (0) | ||
949 | #else | ||
950 | #define SMC_PUT_PKT_HDR(status, length) \ | 1082 | #define SMC_PUT_PKT_HDR(status, length) \ |
951 | do { \ | 1083 | do { \ |
952 | SMC_outw( status, ioaddr, DATA_REG ); \ | 1084 | if (SMC_CAN_USE_32BIT) \ |
953 | SMC_outw( length, ioaddr, DATA_REG ); \ | 1085 | SMC_outl((status) | (length)<<16, ioaddr, DATA_REG); \ |
954 | } while (0) | 1086 | else { \ |
955 | #define SMC_GET_PKT_HDR(status, length) \ | 1087 | SMC_outw(status, ioaddr, DATA_REG); \ |
956 | do { \ | 1088 | SMC_outw(length, ioaddr, DATA_REG); \ |
957 | (status) = SMC_inw( ioaddr, DATA_REG ); \ | 1089 | } \ |
958 | (length) = SMC_inw( ioaddr, DATA_REG ); \ | ||
959 | } while (0) | 1090 | } while (0) |
960 | #endif | ||
961 | 1091 | ||
962 | #if SMC_CAN_USE_32BIT | 1092 | #define SMC_GET_PKT_HDR(status, length) \ |
963 | #define _SMC_PUSH_DATA(p, l) \ | ||
964 | do { \ | 1093 | do { \ |
965 | char *__ptr = (p); \ | 1094 | if (SMC_CAN_USE_32BIT) { \ |
966 | int __len = (l); \ | 1095 | unsigned int __val = SMC_inl(ioaddr, DATA_REG); \ |
967 | if (__len >= 2 && (unsigned long)__ptr & 2) { \ | 1096 | (status) = __val & 0xffff; \ |
968 | __len -= 2; \ | 1097 | (length) = __val >> 16; \ |
969 | SMC_outw( *(u16 *)__ptr, ioaddr, DATA_REG ); \ | 1098 | } else { \ |
970 | __ptr += 2; \ | 1099 | (status) = SMC_inw(ioaddr, DATA_REG); \ |
971 | } \ | 1100 | (length) = SMC_inw(ioaddr, DATA_REG); \ |
972 | SMC_outsl( ioaddr, DATA_REG, __ptr, __len >> 2); \ | ||
973 | if (__len & 2) { \ | ||
974 | __ptr += (__len & ~3); \ | ||
975 | SMC_outw( *((u16 *)__ptr), ioaddr, DATA_REG ); \ | ||
976 | } \ | 1101 | } \ |
977 | } while (0) | 1102 | } while (0) |
978 | #define _SMC_PULL_DATA(p, l) \ | ||
979 | do { \ | ||
980 | char *__ptr = (p); \ | ||
981 | int __len = (l); \ | ||
982 | if ((unsigned long)__ptr & 2) { \ | ||
983 | /* \ | ||
984 | * We want 32bit alignment here. \ | ||
985 | * Since some buses perform a full 32bit \ | ||
986 | * fetch even for 16bit data we can't use \ | ||
987 | * SMC_inw() here. Back both source (on chip \ | ||
988 | * and destination) pointers of 2 bytes. \ | ||
989 | */ \ | ||
990 | __ptr -= 2; \ | ||
991 | __len += 2; \ | ||
992 | SMC_SET_PTR( 2|PTR_READ|PTR_RCV|PTR_AUTOINC ); \ | ||
993 | } \ | ||
994 | __len += 2; \ | ||
995 | SMC_insl( ioaddr, DATA_REG, __ptr, __len >> 2); \ | ||
996 | } while (0) | ||
997 | #elif SMC_CAN_USE_16BIT | ||
998 | #define _SMC_PUSH_DATA(p, l) SMC_outsw( ioaddr, DATA_REG, p, (l) >> 1 ) | ||
999 | #define _SMC_PULL_DATA(p, l) SMC_insw ( ioaddr, DATA_REG, p, (l) >> 1 ) | ||
1000 | #elif SMC_CAN_USE_8BIT | ||
1001 | #define _SMC_PUSH_DATA(p, l) SMC_outsb( ioaddr, DATA_REG, p, l ) | ||
1002 | #define _SMC_PULL_DATA(p, l) SMC_insb ( ioaddr, DATA_REG, p, l ) | ||
1003 | #endif | ||
1004 | 1103 | ||
1005 | #if ! SMC_CAN_USE_16BIT | 1104 | #define SMC_PUSH_DATA(p, l) \ |
1006 | #define SMC_outw(x, ioaddr, reg) \ | ||
1007 | do { \ | 1105 | do { \ |
1008 | unsigned int __val16 = (x); \ | 1106 | if (SMC_CAN_USE_32BIT) { \ |
1009 | SMC_outb( __val16, ioaddr, reg ); \ | 1107 | void *__ptr = (p); \ |
1010 | SMC_outb( __val16 >> 8, ioaddr, reg + (1 << SMC_IO_SHIFT));\ | 1108 | int __len = (l); \ |
1109 | void *__ioaddr = ioaddr; \ | ||
1110 | if (__len >= 2 && (unsigned long)__ptr & 2) { \ | ||
1111 | __len -= 2; \ | ||
1112 | SMC_outw(*(u16 *)__ptr, ioaddr, DATA_REG); \ | ||
1113 | __ptr += 2; \ | ||
1114 | } \ | ||
1115 | if (SMC_CAN_USE_DATACS && lp->datacs) \ | ||
1116 | __ioaddr = lp->datacs; \ | ||
1117 | SMC_outsl(__ioaddr, DATA_REG, __ptr, __len>>2); \ | ||
1118 | if (__len & 2) { \ | ||
1119 | __ptr += (__len & ~3); \ | ||
1120 | SMC_outw(*((u16 *)__ptr), ioaddr, DATA_REG); \ | ||
1121 | } \ | ||
1122 | } else if (SMC_CAN_USE_16BIT) \ | ||
1123 | SMC_outsw(ioaddr, DATA_REG, p, (l) >> 1); \ | ||
1124 | else if (SMC_CAN_USE_8BIT) \ | ||
1125 | SMC_outsb(ioaddr, DATA_REG, p, l); \ | ||
1011 | } while (0) | 1126 | } while (0) |
1012 | #define SMC_inw(ioaddr, reg) \ | ||
1013 | ({ \ | ||
1014 | unsigned int __val16; \ | ||
1015 | __val16 = SMC_inb( ioaddr, reg ); \ | ||
1016 | __val16 |= SMC_inb( ioaddr, reg + (1 << SMC_IO_SHIFT)) << 8; \ | ||
1017 | __val16; \ | ||
1018 | }) | ||
1019 | #endif | ||
1020 | |||
1021 | #ifdef SMC_CAN_USE_DATACS | ||
1022 | #define SMC_PUSH_DATA(p, l) \ | ||
1023 | if ( lp->datacs ) { \ | ||
1024 | unsigned char *__ptr = (p); \ | ||
1025 | int __len = (l); \ | ||
1026 | if (__len >= 2 && (unsigned long)__ptr & 2) { \ | ||
1027 | __len -= 2; \ | ||
1028 | SMC_outw( *((u16 *)__ptr), ioaddr, DATA_REG ); \ | ||
1029 | __ptr += 2; \ | ||
1030 | } \ | ||
1031 | outsl(lp->datacs, __ptr, __len >> 2); \ | ||
1032 | if (__len & 2) { \ | ||
1033 | __ptr += (__len & ~3); \ | ||
1034 | SMC_outw( *((u16 *)__ptr), ioaddr, DATA_REG ); \ | ||
1035 | } \ | ||
1036 | } else { \ | ||
1037 | _SMC_PUSH_DATA(p, l); \ | ||
1038 | } | ||
1039 | 1127 | ||
1040 | #define SMC_PULL_DATA(p, l) \ | 1128 | #define SMC_PULL_DATA(p, l) \ |
1041 | if ( lp->datacs ) { \ | 1129 | do { \ |
1042 | unsigned char *__ptr = (p); \ | 1130 | if (SMC_CAN_USE_32BIT) { \ |
1043 | int __len = (l); \ | 1131 | void *__ptr = (p); \ |
1044 | if ((unsigned long)__ptr & 2) { \ | 1132 | int __len = (l); \ |
1045 | /* \ | 1133 | void *__ioaddr = ioaddr; \ |
1046 | * We want 32bit alignment here. \ | 1134 | if ((unsigned long)__ptr & 2) { \ |
1047 | * Since some buses perform a full 32bit \ | 1135 | /* \ |
1048 | * fetch even for 16bit data we can't use \ | 1136 | * We want 32bit alignment here. \ |
1049 | * SMC_inw() here. Back both source (on chip \ | 1137 | * Since some buses perform a full \ |
1050 | * and destination) pointers of 2 bytes. \ | 1138 | * 32bit fetch even for 16bit data \ |
1051 | */ \ | 1139 | * we can't use SMC_inw() here. \ |
1052 | __ptr -= 2; \ | 1140 | * Back both source (on-chip) and \ |
1141 | * destination pointers of 2 bytes. \ | ||
1142 | * This is possible since the call to \ | ||
1143 | * SMC_GET_PKT_HDR() already advanced \ | ||
1144 | * the source pointer of 4 bytes, and \ | ||
1145 | * the skb_reserve(skb, 2) advanced \ | ||
1146 | * the destination pointer of 2 bytes. \ | ||
1147 | */ \ | ||
1148 | __ptr -= 2; \ | ||
1149 | __len += 2; \ | ||
1150 | SMC_SET_PTR(2|PTR_READ|PTR_RCV|PTR_AUTOINC); \ | ||
1151 | } \ | ||
1152 | if (SMC_CAN_USE_DATACS && lp->datacs) \ | ||
1153 | __ioaddr = lp->datacs; \ | ||
1053 | __len += 2; \ | 1154 | __len += 2; \ |
1054 | SMC_SET_PTR( 2|PTR_READ|PTR_RCV|PTR_AUTOINC ); \ | 1155 | SMC_insl(__ioaddr, DATA_REG, __ptr, __len>>2); \ |
1055 | } \ | 1156 | } else if (SMC_CAN_USE_16BIT) \ |
1056 | __len += 2; \ | 1157 | SMC_insw(ioaddr, DATA_REG, p, (l) >> 1); \ |
1057 | insl( lp->datacs, __ptr, __len >> 2); \ | 1158 | else if (SMC_CAN_USE_8BIT) \ |
1058 | } else { \ | 1159 | SMC_insb(ioaddr, DATA_REG, p, l); \ |
1059 | _SMC_PULL_DATA(p, l); \ | 1160 | } while (0) |
1060 | } | ||
1061 | #else | ||
1062 | #define SMC_PUSH_DATA(p, l) _SMC_PUSH_DATA(p, l) | ||
1063 | #define SMC_PULL_DATA(p, l) _SMC_PULL_DATA(p, l) | ||
1064 | #endif | ||
1065 | |||
1066 | #if !defined (SMC_INTERRUPT_PREAMBLE) | ||
1067 | # define SMC_INTERRUPT_PREAMBLE | ||
1068 | #endif | ||
1069 | 1161 | ||
1070 | #endif /* _SMC91X_H_ */ | 1162 | #endif /* _SMC91X_H_ */ |
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c index 28ce47a02408..38cd30cb7c75 100644 --- a/drivers/net/sungem.c +++ b/drivers/net/sungem.c | |||
@@ -55,6 +55,7 @@ | |||
55 | #include <linux/workqueue.h> | 55 | #include <linux/workqueue.h> |
56 | #include <linux/if_vlan.h> | 56 | #include <linux/if_vlan.h> |
57 | #include <linux/bitops.h> | 57 | #include <linux/bitops.h> |
58 | #include <linux/mutex.h> | ||
58 | 59 | ||
59 | #include <asm/system.h> | 60 | #include <asm/system.h> |
60 | #include <asm/io.h> | 61 | #include <asm/io.h> |
@@ -2284,7 +2285,7 @@ static void gem_reset_task(void *data) | |||
2284 | { | 2285 | { |
2285 | struct gem *gp = (struct gem *) data; | 2286 | struct gem *gp = (struct gem *) data; |
2286 | 2287 | ||
2287 | down(&gp->pm_sem); | 2288 | mutex_lock(&gp->pm_mutex); |
2288 | 2289 | ||
2289 | netif_poll_disable(gp->dev); | 2290 | netif_poll_disable(gp->dev); |
2290 | 2291 | ||
@@ -2311,7 +2312,7 @@ static void gem_reset_task(void *data) | |||
2311 | 2312 | ||
2312 | netif_poll_enable(gp->dev); | 2313 | netif_poll_enable(gp->dev); |
2313 | 2314 | ||
2314 | up(&gp->pm_sem); | 2315 | mutex_unlock(&gp->pm_mutex); |
2315 | } | 2316 | } |
2316 | 2317 | ||
2317 | 2318 | ||
@@ -2320,14 +2321,14 @@ static int gem_open(struct net_device *dev) | |||
2320 | struct gem *gp = dev->priv; | 2321 | struct gem *gp = dev->priv; |
2321 | int rc = 0; | 2322 | int rc = 0; |
2322 | 2323 | ||
2323 | down(&gp->pm_sem); | 2324 | mutex_lock(&gp->pm_mutex); |
2324 | 2325 | ||
2325 | /* We need the cell enabled */ | 2326 | /* We need the cell enabled */ |
2326 | if (!gp->asleep) | 2327 | if (!gp->asleep) |
2327 | rc = gem_do_start(dev); | 2328 | rc = gem_do_start(dev); |
2328 | gp->opened = (rc == 0); | 2329 | gp->opened = (rc == 0); |
2329 | 2330 | ||
2330 | up(&gp->pm_sem); | 2331 | mutex_unlock(&gp->pm_mutex); |
2331 | 2332 | ||
2332 | return rc; | 2333 | return rc; |
2333 | } | 2334 | } |
@@ -2340,13 +2341,13 @@ static int gem_close(struct net_device *dev) | |||
2340 | * our caller (dev_close) already did it for us | 2341 | * our caller (dev_close) already did it for us |
2341 | */ | 2342 | */ |
2342 | 2343 | ||
2343 | down(&gp->pm_sem); | 2344 | mutex_lock(&gp->pm_mutex); |
2344 | 2345 | ||
2345 | gp->opened = 0; | 2346 | gp->opened = 0; |
2346 | if (!gp->asleep) | 2347 | if (!gp->asleep) |
2347 | gem_do_stop(dev, 0); | 2348 | gem_do_stop(dev, 0); |
2348 | 2349 | ||
2349 | up(&gp->pm_sem); | 2350 | mutex_unlock(&gp->pm_mutex); |
2350 | 2351 | ||
2351 | return 0; | 2352 | return 0; |
2352 | } | 2353 | } |
@@ -2358,7 +2359,7 @@ static int gem_suspend(struct pci_dev *pdev, pm_message_t state) | |||
2358 | struct gem *gp = dev->priv; | 2359 | struct gem *gp = dev->priv; |
2359 | unsigned long flags; | 2360 | unsigned long flags; |
2360 | 2361 | ||
2361 | down(&gp->pm_sem); | 2362 | mutex_lock(&gp->pm_mutex); |
2362 | 2363 | ||
2363 | netif_poll_disable(dev); | 2364 | netif_poll_disable(dev); |
2364 | 2365 | ||
@@ -2391,11 +2392,11 @@ static int gem_suspend(struct pci_dev *pdev, pm_message_t state) | |||
2391 | /* Stop the link timer */ | 2392 | /* Stop the link timer */ |
2392 | del_timer_sync(&gp->link_timer); | 2393 | del_timer_sync(&gp->link_timer); |
2393 | 2394 | ||
2394 | /* Now we release the semaphore to not block the reset task who | 2395 | /* Now we release the mutex to not block the reset task who |
2395 | * can take it too. We are marked asleep, so there will be no | 2396 | * can take it too. We are marked asleep, so there will be no |
2396 | * conflict here | 2397 | * conflict here |
2397 | */ | 2398 | */ |
2398 | up(&gp->pm_sem); | 2399 | mutex_unlock(&gp->pm_mutex); |
2399 | 2400 | ||
2400 | /* Wait for a pending reset task to complete */ | 2401 | /* Wait for a pending reset task to complete */ |
2401 | while (gp->reset_task_pending) | 2402 | while (gp->reset_task_pending) |
@@ -2424,7 +2425,7 @@ static int gem_resume(struct pci_dev *pdev) | |||
2424 | 2425 | ||
2425 | printk(KERN_INFO "%s: resuming\n", dev->name); | 2426 | printk(KERN_INFO "%s: resuming\n", dev->name); |
2426 | 2427 | ||
2427 | down(&gp->pm_sem); | 2428 | mutex_lock(&gp->pm_mutex); |
2428 | 2429 | ||
2429 | /* Keep the cell enabled during the entire operation, no need to | 2430 | /* Keep the cell enabled during the entire operation, no need to |
2430 | * take a lock here tho since nothing else can happen while we are | 2431 | * take a lock here tho since nothing else can happen while we are |
@@ -2440,7 +2441,7 @@ static int gem_resume(struct pci_dev *pdev) | |||
2440 | * still asleep, a new sleep cycle may bring it back | 2441 | * still asleep, a new sleep cycle may bring it back |
2441 | */ | 2442 | */ |
2442 | gem_put_cell(gp); | 2443 | gem_put_cell(gp); |
2443 | up(&gp->pm_sem); | 2444 | mutex_unlock(&gp->pm_mutex); |
2444 | return 0; | 2445 | return 0; |
2445 | } | 2446 | } |
2446 | pci_set_master(gp->pdev); | 2447 | pci_set_master(gp->pdev); |
@@ -2486,7 +2487,7 @@ static int gem_resume(struct pci_dev *pdev) | |||
2486 | 2487 | ||
2487 | netif_poll_enable(dev); | 2488 | netif_poll_enable(dev); |
2488 | 2489 | ||
2489 | up(&gp->pm_sem); | 2490 | mutex_unlock(&gp->pm_mutex); |
2490 | 2491 | ||
2491 | return 0; | 2492 | return 0; |
2492 | } | 2493 | } |
@@ -2591,7 +2592,7 @@ static int gem_change_mtu(struct net_device *dev, int new_mtu) | |||
2591 | return 0; | 2592 | return 0; |
2592 | } | 2593 | } |
2593 | 2594 | ||
2594 | down(&gp->pm_sem); | 2595 | mutex_lock(&gp->pm_mutex); |
2595 | spin_lock_irq(&gp->lock); | 2596 | spin_lock_irq(&gp->lock); |
2596 | spin_lock(&gp->tx_lock); | 2597 | spin_lock(&gp->tx_lock); |
2597 | dev->mtu = new_mtu; | 2598 | dev->mtu = new_mtu; |
@@ -2602,7 +2603,7 @@ static int gem_change_mtu(struct net_device *dev, int new_mtu) | |||
2602 | } | 2603 | } |
2603 | spin_unlock(&gp->tx_lock); | 2604 | spin_unlock(&gp->tx_lock); |
2604 | spin_unlock_irq(&gp->lock); | 2605 | spin_unlock_irq(&gp->lock); |
2605 | up(&gp->pm_sem); | 2606 | mutex_unlock(&gp->pm_mutex); |
2606 | 2607 | ||
2607 | return 0; | 2608 | return 0; |
2608 | } | 2609 | } |
@@ -2771,10 +2772,10 @@ static int gem_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
2771 | int rc = -EOPNOTSUPP; | 2772 | int rc = -EOPNOTSUPP; |
2772 | unsigned long flags; | 2773 | unsigned long flags; |
2773 | 2774 | ||
2774 | /* Hold the PM semaphore while doing ioctl's or we may collide | 2775 | /* Hold the PM mutex while doing ioctl's or we may collide |
2775 | * with power management. | 2776 | * with power management. |
2776 | */ | 2777 | */ |
2777 | down(&gp->pm_sem); | 2778 | mutex_lock(&gp->pm_mutex); |
2778 | 2779 | ||
2779 | spin_lock_irqsave(&gp->lock, flags); | 2780 | spin_lock_irqsave(&gp->lock, flags); |
2780 | gem_get_cell(gp); | 2781 | gem_get_cell(gp); |
@@ -2812,7 +2813,7 @@ static int gem_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
2812 | gem_put_cell(gp); | 2813 | gem_put_cell(gp); |
2813 | spin_unlock_irqrestore(&gp->lock, flags); | 2814 | spin_unlock_irqrestore(&gp->lock, flags); |
2814 | 2815 | ||
2815 | up(&gp->pm_sem); | 2816 | mutex_unlock(&gp->pm_mutex); |
2816 | 2817 | ||
2817 | return rc; | 2818 | return rc; |
2818 | } | 2819 | } |
@@ -3033,7 +3034,7 @@ static int __devinit gem_init_one(struct pci_dev *pdev, | |||
3033 | 3034 | ||
3034 | spin_lock_init(&gp->lock); | 3035 | spin_lock_init(&gp->lock); |
3035 | spin_lock_init(&gp->tx_lock); | 3036 | spin_lock_init(&gp->tx_lock); |
3036 | init_MUTEX(&gp->pm_sem); | 3037 | mutex_init(&gp->pm_mutex); |
3037 | 3038 | ||
3038 | init_timer(&gp->link_timer); | 3039 | init_timer(&gp->link_timer); |
3039 | gp->link_timer.function = gem_link_timer; | 3040 | gp->link_timer.function = gem_link_timer; |
diff --git a/drivers/net/sungem.h b/drivers/net/sungem.h index 13006d759ad8..89847215d006 100644 --- a/drivers/net/sungem.h +++ b/drivers/net/sungem.h | |||
@@ -980,15 +980,15 @@ struct gem { | |||
980 | int tx_new, tx_old; | 980 | int tx_new, tx_old; |
981 | 981 | ||
982 | unsigned int has_wol : 1; /* chip supports wake-on-lan */ | 982 | unsigned int has_wol : 1; /* chip supports wake-on-lan */ |
983 | unsigned int asleep : 1; /* chip asleep, protected by pm_sem */ | 983 | unsigned int asleep : 1; /* chip asleep, protected by pm_mutex */ |
984 | unsigned int asleep_wol : 1; /* was asleep with WOL enabled */ | 984 | unsigned int asleep_wol : 1; /* was asleep with WOL enabled */ |
985 | unsigned int opened : 1; /* driver opened, protected by pm_sem */ | 985 | unsigned int opened : 1; /* driver opened, protected by pm_mutex */ |
986 | unsigned int running : 1; /* chip running, protected by lock */ | 986 | unsigned int running : 1; /* chip running, protected by lock */ |
987 | 987 | ||
988 | /* cell enable count, protected by lock */ | 988 | /* cell enable count, protected by lock */ |
989 | int cell_enabled; | 989 | int cell_enabled; |
990 | 990 | ||
991 | struct semaphore pm_sem; | 991 | struct mutex pm_mutex; |
992 | 992 | ||
993 | u32 msg_enable; | 993 | u32 msg_enable; |
994 | u32 status; | 994 | u32 status; |
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index 6c6c5498899f..e03d1ae50c3e 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c | |||
@@ -69,8 +69,8 @@ | |||
69 | 69 | ||
70 | #define DRV_MODULE_NAME "tg3" | 70 | #define DRV_MODULE_NAME "tg3" |
71 | #define PFX DRV_MODULE_NAME ": " | 71 | #define PFX DRV_MODULE_NAME ": " |
72 | #define DRV_MODULE_VERSION "3.49" | 72 | #define DRV_MODULE_VERSION "3.52" |
73 | #define DRV_MODULE_RELDATE "Feb 2, 2006" | 73 | #define DRV_MODULE_RELDATE "Mar 06, 2006" |
74 | 74 | ||
75 | #define TG3_DEF_MAC_MODE 0 | 75 | #define TG3_DEF_MAC_MODE 0 |
76 | #define TG3_DEF_RX_MODE 0 | 76 | #define TG3_DEF_RX_MODE 0 |
@@ -221,10 +221,22 @@ static struct pci_device_id tg3_pci_tbl[] = { | |||
221 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, | 221 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, |
222 | { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F, | 222 | { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F, |
223 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, | 223 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, |
224 | { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754, | ||
225 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, | ||
226 | { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M, | ||
227 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, | ||
228 | { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787, | ||
229 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, | ||
230 | { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M, | ||
231 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, | ||
224 | { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714, | 232 | { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714, |
225 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, | 233 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, |
234 | { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S, | ||
235 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, | ||
226 | { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715, | 236 | { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715, |
227 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, | 237 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, |
238 | { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S, | ||
239 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, | ||
228 | { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780, | 240 | { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780, |
229 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, | 241 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, |
230 | { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S, | 242 | { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S, |
@@ -534,6 +546,9 @@ static void tg3_enable_ints(struct tg3 *tp) | |||
534 | (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT)); | 546 | (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT)); |
535 | tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, | 547 | tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, |
536 | (tp->last_tag << 24)); | 548 | (tp->last_tag << 24)); |
549 | if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) | ||
550 | tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, | ||
551 | (tp->last_tag << 24)); | ||
537 | tg3_cond_int(tp); | 552 | tg3_cond_int(tp); |
538 | } | 553 | } |
539 | 554 | ||
@@ -1038,9 +1053,11 @@ static void tg3_frob_aux_power(struct tg3 *tp) | |||
1038 | struct net_device *dev_peer; | 1053 | struct net_device *dev_peer; |
1039 | 1054 | ||
1040 | dev_peer = pci_get_drvdata(tp->pdev_peer); | 1055 | dev_peer = pci_get_drvdata(tp->pdev_peer); |
1056 | /* remove_one() may have been run on the peer. */ | ||
1041 | if (!dev_peer) | 1057 | if (!dev_peer) |
1042 | BUG(); | 1058 | tp_peer = tp; |
1043 | tp_peer = netdev_priv(dev_peer); | 1059 | else |
1060 | tp_peer = netdev_priv(dev_peer); | ||
1044 | } | 1061 | } |
1045 | 1062 | ||
1046 | if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 || | 1063 | if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 || |
@@ -1131,7 +1148,7 @@ static int tg3_halt_cpu(struct tg3 *, u32); | |||
1131 | static int tg3_nvram_lock(struct tg3 *); | 1148 | static int tg3_nvram_lock(struct tg3 *); |
1132 | static void tg3_nvram_unlock(struct tg3 *); | 1149 | static void tg3_nvram_unlock(struct tg3 *); |
1133 | 1150 | ||
1134 | static int tg3_set_power_state(struct tg3 *tp, int state) | 1151 | static int tg3_set_power_state(struct tg3 *tp, pci_power_t state) |
1135 | { | 1152 | { |
1136 | u32 misc_host_ctrl; | 1153 | u32 misc_host_ctrl; |
1137 | u16 power_control, power_caps; | 1154 | u16 power_control, power_caps; |
@@ -1150,7 +1167,7 @@ static int tg3_set_power_state(struct tg3 *tp, int state) | |||
1150 | power_control |= PCI_PM_CTRL_PME_STATUS; | 1167 | power_control |= PCI_PM_CTRL_PME_STATUS; |
1151 | power_control &= ~(PCI_PM_CTRL_STATE_MASK); | 1168 | power_control &= ~(PCI_PM_CTRL_STATE_MASK); |
1152 | switch (state) { | 1169 | switch (state) { |
1153 | case 0: | 1170 | case PCI_D0: |
1154 | power_control |= 0; | 1171 | power_control |= 0; |
1155 | pci_write_config_word(tp->pdev, | 1172 | pci_write_config_word(tp->pdev, |
1156 | pm + PCI_PM_CTRL, | 1173 | pm + PCI_PM_CTRL, |
@@ -1163,15 +1180,15 @@ static int tg3_set_power_state(struct tg3 *tp, int state) | |||
1163 | 1180 | ||
1164 | return 0; | 1181 | return 0; |
1165 | 1182 | ||
1166 | case 1: | 1183 | case PCI_D1: |
1167 | power_control |= 1; | 1184 | power_control |= 1; |
1168 | break; | 1185 | break; |
1169 | 1186 | ||
1170 | case 2: | 1187 | case PCI_D2: |
1171 | power_control |= 2; | 1188 | power_control |= 2; |
1172 | break; | 1189 | break; |
1173 | 1190 | ||
1174 | case 3: | 1191 | case PCI_D3hot: |
1175 | power_control |= 3; | 1192 | power_control |= 3; |
1176 | break; | 1193 | break; |
1177 | 1194 | ||
@@ -2680,6 +2697,12 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset) | |||
2680 | 2697 | ||
2681 | err |= tg3_readphy(tp, MII_BMSR, &bmsr); | 2698 | err |= tg3_readphy(tp, MII_BMSR, &bmsr); |
2682 | err |= tg3_readphy(tp, MII_BMSR, &bmsr); | 2699 | err |= tg3_readphy(tp, MII_BMSR, &bmsr); |
2700 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) { | ||
2701 | if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP) | ||
2702 | bmsr |= BMSR_LSTATUS; | ||
2703 | else | ||
2704 | bmsr &= ~BMSR_LSTATUS; | ||
2705 | } | ||
2683 | 2706 | ||
2684 | err |= tg3_readphy(tp, MII_BMCR, &bmcr); | 2707 | err |= tg3_readphy(tp, MII_BMCR, &bmcr); |
2685 | 2708 | ||
@@ -2748,6 +2771,13 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset) | |||
2748 | bmcr = new_bmcr; | 2771 | bmcr = new_bmcr; |
2749 | err |= tg3_readphy(tp, MII_BMSR, &bmsr); | 2772 | err |= tg3_readphy(tp, MII_BMSR, &bmsr); |
2750 | err |= tg3_readphy(tp, MII_BMSR, &bmsr); | 2773 | err |= tg3_readphy(tp, MII_BMSR, &bmsr); |
2774 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == | ||
2775 | ASIC_REV_5714) { | ||
2776 | if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP) | ||
2777 | bmsr |= BMSR_LSTATUS; | ||
2778 | else | ||
2779 | bmsr &= ~BMSR_LSTATUS; | ||
2780 | } | ||
2751 | tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT; | 2781 | tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT; |
2752 | } | 2782 | } |
2753 | } | 2783 | } |
@@ -3338,6 +3368,23 @@ static inline void tg3_full_unlock(struct tg3 *tp) | |||
3338 | spin_unlock_bh(&tp->lock); | 3368 | spin_unlock_bh(&tp->lock); |
3339 | } | 3369 | } |
3340 | 3370 | ||
3371 | /* One-shot MSI handler - Chip automatically disables interrupt | ||
3372 | * after sending MSI so driver doesn't have to do it. | ||
3373 | */ | ||
3374 | static irqreturn_t tg3_msi_1shot(int irq, void *dev_id, struct pt_regs *regs) | ||
3375 | { | ||
3376 | struct net_device *dev = dev_id; | ||
3377 | struct tg3 *tp = netdev_priv(dev); | ||
3378 | |||
3379 | prefetch(tp->hw_status); | ||
3380 | prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]); | ||
3381 | |||
3382 | if (likely(!tg3_irq_sync(tp))) | ||
3383 | netif_rx_schedule(dev); /* schedule NAPI poll */ | ||
3384 | |||
3385 | return IRQ_HANDLED; | ||
3386 | } | ||
3387 | |||
3341 | /* MSI ISR - No need to check for interrupt sharing and no need to | 3388 | /* MSI ISR - No need to check for interrupt sharing and no need to |
3342 | * flush status block and interrupt mailbox. PCI ordering rules | 3389 | * flush status block and interrupt mailbox. PCI ordering rules |
3343 | * guarantee that MSI will arrive after the status block. | 3390 | * guarantee that MSI will arrive after the status block. |
@@ -3628,11 +3675,139 @@ static void tg3_set_txd(struct tg3 *tp, int entry, | |||
3628 | txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT; | 3675 | txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT; |
3629 | } | 3676 | } |
3630 | 3677 | ||
3678 | /* hard_start_xmit for devices that don't have any bugs and | ||
3679 | * support TG3_FLG2_HW_TSO_2 only. | ||
3680 | */ | ||
3631 | static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) | 3681 | static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) |
3632 | { | 3682 | { |
3633 | struct tg3 *tp = netdev_priv(dev); | 3683 | struct tg3 *tp = netdev_priv(dev); |
3634 | dma_addr_t mapping; | 3684 | dma_addr_t mapping; |
3635 | u32 len, entry, base_flags, mss; | 3685 | u32 len, entry, base_flags, mss; |
3686 | |||
3687 | len = skb_headlen(skb); | ||
3688 | |||
3689 | /* No BH disabling for tx_lock here. We are running in BH disabled | ||
3690 | * context and TX reclaim runs via tp->poll inside of a software | ||
3691 | * interrupt. Furthermore, IRQ processing runs lockless so we have | ||
3692 | * no IRQ context deadlocks to worry about either. Rejoice! | ||
3693 | */ | ||
3694 | if (!spin_trylock(&tp->tx_lock)) | ||
3695 | return NETDEV_TX_LOCKED; | ||
3696 | |||
3697 | if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) { | ||
3698 | if (!netif_queue_stopped(dev)) { | ||
3699 | netif_stop_queue(dev); | ||
3700 | |||
3701 | /* This is a hard error, log it. */ | ||
3702 | printk(KERN_ERR PFX "%s: BUG! Tx Ring full when " | ||
3703 | "queue awake!\n", dev->name); | ||
3704 | } | ||
3705 | spin_unlock(&tp->tx_lock); | ||
3706 | return NETDEV_TX_BUSY; | ||
3707 | } | ||
3708 | |||
3709 | entry = tp->tx_prod; | ||
3710 | base_flags = 0; | ||
3711 | #if TG3_TSO_SUPPORT != 0 | ||
3712 | mss = 0; | ||
3713 | if (skb->len > (tp->dev->mtu + ETH_HLEN) && | ||
3714 | (mss = skb_shinfo(skb)->tso_size) != 0) { | ||
3715 | int tcp_opt_len, ip_tcp_len; | ||
3716 | |||
3717 | if (skb_header_cloned(skb) && | ||
3718 | pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) { | ||
3719 | dev_kfree_skb(skb); | ||
3720 | goto out_unlock; | ||
3721 | } | ||
3722 | |||
3723 | tcp_opt_len = ((skb->h.th->doff - 5) * 4); | ||
3724 | ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr); | ||
3725 | |||
3726 | base_flags |= (TXD_FLAG_CPU_PRE_DMA | | ||
3727 | TXD_FLAG_CPU_POST_DMA); | ||
3728 | |||
3729 | skb->nh.iph->check = 0; | ||
3730 | skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len); | ||
3731 | |||
3732 | skb->h.th->check = 0; | ||
3733 | |||
3734 | mss |= (ip_tcp_len + tcp_opt_len) << 9; | ||
3735 | } | ||
3736 | else if (skb->ip_summed == CHECKSUM_HW) | ||
3737 | base_flags |= TXD_FLAG_TCPUDP_CSUM; | ||
3738 | #else | ||
3739 | mss = 0; | ||
3740 | if (skb->ip_summed == CHECKSUM_HW) | ||
3741 | base_flags |= TXD_FLAG_TCPUDP_CSUM; | ||
3742 | #endif | ||
3743 | #if TG3_VLAN_TAG_USED | ||
3744 | if (tp->vlgrp != NULL && vlan_tx_tag_present(skb)) | ||
3745 | base_flags |= (TXD_FLAG_VLAN | | ||
3746 | (vlan_tx_tag_get(skb) << 16)); | ||
3747 | #endif | ||
3748 | |||
3749 | /* Queue skb data, a.k.a. the main skb fragment. */ | ||
3750 | mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE); | ||
3751 | |||
3752 | tp->tx_buffers[entry].skb = skb; | ||
3753 | pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping); | ||
3754 | |||
3755 | tg3_set_txd(tp, entry, mapping, len, base_flags, | ||
3756 | (skb_shinfo(skb)->nr_frags == 0) | (mss << 1)); | ||
3757 | |||
3758 | entry = NEXT_TX(entry); | ||
3759 | |||
3760 | /* Now loop through additional data fragments, and queue them. */ | ||
3761 | if (skb_shinfo(skb)->nr_frags > 0) { | ||
3762 | unsigned int i, last; | ||
3763 | |||
3764 | last = skb_shinfo(skb)->nr_frags - 1; | ||
3765 | for (i = 0; i <= last; i++) { | ||
3766 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | ||
3767 | |||
3768 | len = frag->size; | ||
3769 | mapping = pci_map_page(tp->pdev, | ||
3770 | frag->page, | ||
3771 | frag->page_offset, | ||
3772 | len, PCI_DMA_TODEVICE); | ||
3773 | |||
3774 | tp->tx_buffers[entry].skb = NULL; | ||
3775 | pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping); | ||
3776 | |||
3777 | tg3_set_txd(tp, entry, mapping, len, | ||
3778 | base_flags, (i == last) | (mss << 1)); | ||
3779 | |||
3780 | entry = NEXT_TX(entry); | ||
3781 | } | ||
3782 | } | ||
3783 | |||
3784 | /* Packets are ready, update Tx producer idx local and on card. */ | ||
3785 | tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry); | ||
3786 | |||
3787 | tp->tx_prod = entry; | ||
3788 | if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1)) { | ||
3789 | netif_stop_queue(dev); | ||
3790 | if (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH) | ||
3791 | netif_wake_queue(tp->dev); | ||
3792 | } | ||
3793 | |||
3794 | out_unlock: | ||
3795 | mmiowb(); | ||
3796 | spin_unlock(&tp->tx_lock); | ||
3797 | |||
3798 | dev->trans_start = jiffies; | ||
3799 | |||
3800 | return NETDEV_TX_OK; | ||
3801 | } | ||
3802 | |||
3803 | /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and | ||
3804 | * support TG3_FLG2_HW_TSO_1 or firmware TSO only. | ||
3805 | */ | ||
3806 | static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev) | ||
3807 | { | ||
3808 | struct tg3 *tp = netdev_priv(dev); | ||
3809 | dma_addr_t mapping; | ||
3810 | u32 len, entry, base_flags, mss; | ||
3636 | int would_hit_hwbug; | 3811 | int would_hit_hwbug; |
3637 | 3812 | ||
3638 | len = skb_headlen(skb); | 3813 | len = skb_headlen(skb); |
@@ -4369,6 +4544,10 @@ static int tg3_chip_reset(struct tg3 *tp) | |||
4369 | tp->nvram_lock_cnt = 0; | 4544 | tp->nvram_lock_cnt = 0; |
4370 | } | 4545 | } |
4371 | 4546 | ||
4547 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 || | ||
4548 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) | ||
4549 | tw32(GRC_FASTBOOT_PC, 0); | ||
4550 | |||
4372 | /* | 4551 | /* |
4373 | * We must avoid the readl() that normally takes place. | 4552 | * We must avoid the readl() that normally takes place. |
4374 | * It locks machines, causes machine checks, and other | 4553 | * It locks machines, causes machine checks, and other |
@@ -5518,6 +5697,9 @@ static int tg3_set_mac_addr(struct net_device *dev, void *p) | |||
5518 | 5697 | ||
5519 | memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); | 5698 | memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); |
5520 | 5699 | ||
5700 | if (!netif_running(dev)) | ||
5701 | return 0; | ||
5702 | |||
5521 | spin_lock_bh(&tp->lock); | 5703 | spin_lock_bh(&tp->lock); |
5522 | __tg3_set_mac_addr(tp); | 5704 | __tg3_set_mac_addr(tp); |
5523 | spin_unlock_bh(&tp->lock); | 5705 | spin_unlock_bh(&tp->lock); |
@@ -5585,6 +5767,9 @@ static int tg3_reset_hw(struct tg3 *tp) | |||
5585 | tg3_abort_hw(tp, 1); | 5767 | tg3_abort_hw(tp, 1); |
5586 | } | 5768 | } |
5587 | 5769 | ||
5770 | if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) | ||
5771 | tg3_phy_reset(tp); | ||
5772 | |||
5588 | err = tg3_chip_reset(tp); | 5773 | err = tg3_chip_reset(tp); |
5589 | if (err) | 5774 | if (err) |
5590 | return err; | 5775 | return err; |
@@ -5993,6 +6178,10 @@ static int tg3_reset_hw(struct tg3 *tp) | |||
5993 | } | 6178 | } |
5994 | } | 6179 | } |
5995 | 6180 | ||
6181 | /* Enable host coalescing bug fix */ | ||
6182 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) | ||
6183 | val |= (1 << 29); | ||
6184 | |||
5996 | tw32_f(WDMAC_MODE, val); | 6185 | tw32_f(WDMAC_MODE, val); |
5997 | udelay(40); | 6186 | udelay(40); |
5998 | 6187 | ||
@@ -6097,6 +6286,17 @@ static int tg3_reset_hw(struct tg3 *tp) | |||
6097 | tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG; | 6286 | tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG; |
6098 | } | 6287 | } |
6099 | 6288 | ||
6289 | if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) && | ||
6290 | (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) { | ||
6291 | u32 tmp; | ||
6292 | |||
6293 | tmp = tr32(SERDES_RX_CTRL); | ||
6294 | tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT); | ||
6295 | tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT; | ||
6296 | tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT; | ||
6297 | tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl); | ||
6298 | } | ||
6299 | |||
6100 | err = tg3_setup_phy(tp, 1); | 6300 | err = tg3_setup_phy(tp, 1); |
6101 | if (err) | 6301 | if (err) |
6102 | return err; | 6302 | return err; |
@@ -6175,7 +6375,7 @@ static int tg3_init_hw(struct tg3 *tp) | |||
6175 | int err; | 6375 | int err; |
6176 | 6376 | ||
6177 | /* Force the chip into D0. */ | 6377 | /* Force the chip into D0. */ |
6178 | err = tg3_set_power_state(tp, 0); | 6378 | err = tg3_set_power_state(tp, PCI_D0); |
6179 | if (err) | 6379 | if (err) |
6180 | goto out; | 6380 | goto out; |
6181 | 6381 | ||
@@ -6331,6 +6531,26 @@ static void tg3_timer(unsigned long __opaque) | |||
6331 | add_timer(&tp->timer); | 6531 | add_timer(&tp->timer); |
6332 | } | 6532 | } |
6333 | 6533 | ||
6534 | static int tg3_request_irq(struct tg3 *tp) | ||
6535 | { | ||
6536 | irqreturn_t (*fn)(int, void *, struct pt_regs *); | ||
6537 | unsigned long flags; | ||
6538 | struct net_device *dev = tp->dev; | ||
6539 | |||
6540 | if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { | ||
6541 | fn = tg3_msi; | ||
6542 | if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) | ||
6543 | fn = tg3_msi_1shot; | ||
6544 | flags = SA_SAMPLE_RANDOM; | ||
6545 | } else { | ||
6546 | fn = tg3_interrupt; | ||
6547 | if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) | ||
6548 | fn = tg3_interrupt_tagged; | ||
6549 | flags = SA_SHIRQ | SA_SAMPLE_RANDOM; | ||
6550 | } | ||
6551 | return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev)); | ||
6552 | } | ||
6553 | |||
6334 | static int tg3_test_interrupt(struct tg3 *tp) | 6554 | static int tg3_test_interrupt(struct tg3 *tp) |
6335 | { | 6555 | { |
6336 | struct net_device *dev = tp->dev; | 6556 | struct net_device *dev = tp->dev; |
@@ -6367,16 +6587,7 @@ static int tg3_test_interrupt(struct tg3 *tp) | |||
6367 | 6587 | ||
6368 | free_irq(tp->pdev->irq, dev); | 6588 | free_irq(tp->pdev->irq, dev); |
6369 | 6589 | ||
6370 | if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) | 6590 | err = tg3_request_irq(tp); |
6371 | err = request_irq(tp->pdev->irq, tg3_msi, | ||
6372 | SA_SAMPLE_RANDOM, dev->name, dev); | ||
6373 | else { | ||
6374 | irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt; | ||
6375 | if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) | ||
6376 | fn = tg3_interrupt_tagged; | ||
6377 | err = request_irq(tp->pdev->irq, fn, | ||
6378 | SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev); | ||
6379 | } | ||
6380 | 6591 | ||
6381 | if (err) | 6592 | if (err) |
6382 | return err; | 6593 | return err; |
@@ -6428,14 +6639,7 @@ static int tg3_test_msi(struct tg3 *tp) | |||
6428 | 6639 | ||
6429 | tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI; | 6640 | tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI; |
6430 | 6641 | ||
6431 | { | 6642 | err = tg3_request_irq(tp); |
6432 | irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt; | ||
6433 | if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) | ||
6434 | fn = tg3_interrupt_tagged; | ||
6435 | |||
6436 | err = request_irq(tp->pdev->irq, fn, | ||
6437 | SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev); | ||
6438 | } | ||
6439 | if (err) | 6643 | if (err) |
6440 | return err; | 6644 | return err; |
6441 | 6645 | ||
@@ -6462,6 +6666,10 @@ static int tg3_open(struct net_device *dev) | |||
6462 | 6666 | ||
6463 | tg3_full_lock(tp, 0); | 6667 | tg3_full_lock(tp, 0); |
6464 | 6668 | ||
6669 | err = tg3_set_power_state(tp, PCI_D0); | ||
6670 | if (err) | ||
6671 | return err; | ||
6672 | |||
6465 | tg3_disable_ints(tp); | 6673 | tg3_disable_ints(tp); |
6466 | tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE; | 6674 | tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE; |
6467 | 6675 | ||
@@ -6476,7 +6684,9 @@ static int tg3_open(struct net_device *dev) | |||
6476 | 6684 | ||
6477 | if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && | 6685 | if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && |
6478 | (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) && | 6686 | (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_AX) && |
6479 | (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX)) { | 6687 | (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5750_BX) && |
6688 | !((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) && | ||
6689 | (tp->pdev_peer == tp->pdev))) { | ||
6480 | /* All MSI supporting chips should support tagged | 6690 | /* All MSI supporting chips should support tagged |
6481 | * status. Assert that this is the case. | 6691 | * status. Assert that this is the case. |
6482 | */ | 6692 | */ |
@@ -6491,17 +6701,7 @@ static int tg3_open(struct net_device *dev) | |||
6491 | tp->tg3_flags2 |= TG3_FLG2_USING_MSI; | 6701 | tp->tg3_flags2 |= TG3_FLG2_USING_MSI; |
6492 | } | 6702 | } |
6493 | } | 6703 | } |
6494 | if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) | 6704 | err = tg3_request_irq(tp); |
6495 | err = request_irq(tp->pdev->irq, tg3_msi, | ||
6496 | SA_SAMPLE_RANDOM, dev->name, dev); | ||
6497 | else { | ||
6498 | irqreturn_t (*fn)(int, void *, struct pt_regs *)=tg3_interrupt; | ||
6499 | if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) | ||
6500 | fn = tg3_interrupt_tagged; | ||
6501 | |||
6502 | err = request_irq(tp->pdev->irq, fn, | ||
6503 | SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev); | ||
6504 | } | ||
6505 | 6705 | ||
6506 | if (err) { | 6706 | if (err) { |
6507 | if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { | 6707 | if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { |
@@ -6566,6 +6766,14 @@ static int tg3_open(struct net_device *dev) | |||
6566 | 6766 | ||
6567 | return err; | 6767 | return err; |
6568 | } | 6768 | } |
6769 | |||
6770 | if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) { | ||
6771 | if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) { | ||
6772 | u32 val = tr32(0x7c04); | ||
6773 | |||
6774 | tw32(0x7c04, val | (1 << 29)); | ||
6775 | } | ||
6776 | } | ||
6569 | } | 6777 | } |
6570 | 6778 | ||
6571 | tg3_full_lock(tp, 0); | 6779 | tg3_full_lock(tp, 0); |
@@ -6839,7 +7047,6 @@ static int tg3_close(struct net_device *dev) | |||
6839 | tp->tg3_flags &= | 7047 | tp->tg3_flags &= |
6840 | ~(TG3_FLAG_INIT_COMPLETE | | 7048 | ~(TG3_FLAG_INIT_COMPLETE | |
6841 | TG3_FLAG_GOT_SERDES_FLOWCTL); | 7049 | TG3_FLAG_GOT_SERDES_FLOWCTL); |
6842 | netif_carrier_off(tp->dev); | ||
6843 | 7050 | ||
6844 | tg3_full_unlock(tp); | 7051 | tg3_full_unlock(tp); |
6845 | 7052 | ||
@@ -6856,6 +7063,10 @@ static int tg3_close(struct net_device *dev) | |||
6856 | 7063 | ||
6857 | tg3_free_consistent(tp); | 7064 | tg3_free_consistent(tp); |
6858 | 7065 | ||
7066 | tg3_set_power_state(tp, PCI_D3hot); | ||
7067 | |||
7068 | netif_carrier_off(tp->dev); | ||
7069 | |||
6859 | return 0; | 7070 | return 0; |
6860 | } | 7071 | } |
6861 | 7072 | ||
@@ -7150,6 +7361,9 @@ static void tg3_set_rx_mode(struct net_device *dev) | |||
7150 | { | 7361 | { |
7151 | struct tg3 *tp = netdev_priv(dev); | 7362 | struct tg3 *tp = netdev_priv(dev); |
7152 | 7363 | ||
7364 | if (!netif_running(dev)) | ||
7365 | return; | ||
7366 | |||
7153 | tg3_full_lock(tp, 0); | 7367 | tg3_full_lock(tp, 0); |
7154 | __tg3_set_rx_mode(dev); | 7368 | __tg3_set_rx_mode(dev); |
7155 | tg3_full_unlock(tp); | 7369 | tg3_full_unlock(tp); |
@@ -7174,6 +7388,9 @@ static void tg3_get_regs(struct net_device *dev, | |||
7174 | 7388 | ||
7175 | memset(p, 0, TG3_REGDUMP_LEN); | 7389 | memset(p, 0, TG3_REGDUMP_LEN); |
7176 | 7390 | ||
7391 | if (tp->link_config.phy_is_low_power) | ||
7392 | return; | ||
7393 | |||
7177 | tg3_full_lock(tp, 0); | 7394 | tg3_full_lock(tp, 0); |
7178 | 7395 | ||
7179 | #define __GET_REG32(reg) (*(p)++ = tr32(reg)) | 7396 | #define __GET_REG32(reg) (*(p)++ = tr32(reg)) |
@@ -7240,6 +7457,7 @@ static int tg3_get_eeprom_len(struct net_device *dev) | |||
7240 | } | 7457 | } |
7241 | 7458 | ||
7242 | static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val); | 7459 | static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val); |
7460 | static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val); | ||
7243 | 7461 | ||
7244 | static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data) | 7462 | static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data) |
7245 | { | 7463 | { |
@@ -7248,6 +7466,9 @@ static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, | |||
7248 | u8 *pd; | 7466 | u8 *pd; |
7249 | u32 i, offset, len, val, b_offset, b_count; | 7467 | u32 i, offset, len, val, b_offset, b_count; |
7250 | 7468 | ||
7469 | if (tp->link_config.phy_is_low_power) | ||
7470 | return -EAGAIN; | ||
7471 | |||
7251 | offset = eeprom->offset; | 7472 | offset = eeprom->offset; |
7252 | len = eeprom->len; | 7473 | len = eeprom->len; |
7253 | eeprom->len = 0; | 7474 | eeprom->len = 0; |
@@ -7309,6 +7530,9 @@ static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, | |||
7309 | u32 offset, len, b_offset, odd_len, start, end; | 7530 | u32 offset, len, b_offset, odd_len, start, end; |
7310 | u8 *buf; | 7531 | u8 *buf; |
7311 | 7532 | ||
7533 | if (tp->link_config.phy_is_low_power) | ||
7534 | return -EAGAIN; | ||
7535 | |||
7312 | if (eeprom->magic != TG3_EEPROM_MAGIC) | 7536 | if (eeprom->magic != TG3_EEPROM_MAGIC) |
7313 | return -EINVAL; | 7537 | return -EINVAL; |
7314 | 7538 | ||
@@ -7442,6 +7666,7 @@ static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info | |||
7442 | 7666 | ||
7443 | strcpy(info->driver, DRV_MODULE_NAME); | 7667 | strcpy(info->driver, DRV_MODULE_NAME); |
7444 | strcpy(info->version, DRV_MODULE_VERSION); | 7668 | strcpy(info->version, DRV_MODULE_VERSION); |
7669 | strcpy(info->fw_version, tp->fw_ver); | ||
7445 | strcpy(info->bus_info, pci_name(tp->pdev)); | 7670 | strcpy(info->bus_info, pci_name(tp->pdev)); |
7446 | } | 7671 | } |
7447 | 7672 | ||
@@ -7536,11 +7761,20 @@ static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam * | |||
7536 | 7761 | ||
7537 | ering->rx_max_pending = TG3_RX_RING_SIZE - 1; | 7762 | ering->rx_max_pending = TG3_RX_RING_SIZE - 1; |
7538 | ering->rx_mini_max_pending = 0; | 7763 | ering->rx_mini_max_pending = 0; |
7539 | ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1; | 7764 | if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) |
7765 | ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1; | ||
7766 | else | ||
7767 | ering->rx_jumbo_max_pending = 0; | ||
7768 | |||
7769 | ering->tx_max_pending = TG3_TX_RING_SIZE - 1; | ||
7540 | 7770 | ||
7541 | ering->rx_pending = tp->rx_pending; | 7771 | ering->rx_pending = tp->rx_pending; |
7542 | ering->rx_mini_pending = 0; | 7772 | ering->rx_mini_pending = 0; |
7543 | ering->rx_jumbo_pending = tp->rx_jumbo_pending; | 7773 | if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) |
7774 | ering->rx_jumbo_pending = tp->rx_jumbo_pending; | ||
7775 | else | ||
7776 | ering->rx_jumbo_pending = 0; | ||
7777 | |||
7544 | ering->tx_pending = tp->tx_pending; | 7778 | ering->tx_pending = tp->tx_pending; |
7545 | } | 7779 | } |
7546 | 7780 | ||
@@ -7661,10 +7895,10 @@ static int tg3_set_tx_csum(struct net_device *dev, u32 data) | |||
7661 | return 0; | 7895 | return 0; |
7662 | } | 7896 | } |
7663 | 7897 | ||
7664 | if (data) | 7898 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) |
7665 | dev->features |= NETIF_F_IP_CSUM; | 7899 | ethtool_op_set_tx_hw_csum(dev, data); |
7666 | else | 7900 | else |
7667 | dev->features &= ~NETIF_F_IP_CSUM; | 7901 | ethtool_op_set_tx_csum(dev, data); |
7668 | 7902 | ||
7669 | return 0; | 7903 | return 0; |
7670 | } | 7904 | } |
@@ -7734,29 +7968,52 @@ static void tg3_get_ethtool_stats (struct net_device *dev, | |||
7734 | } | 7968 | } |
7735 | 7969 | ||
7736 | #define NVRAM_TEST_SIZE 0x100 | 7970 | #define NVRAM_TEST_SIZE 0x100 |
7971 | #define NVRAM_SELFBOOT_FORMAT1_SIZE 0x14 | ||
7737 | 7972 | ||
7738 | static int tg3_test_nvram(struct tg3 *tp) | 7973 | static int tg3_test_nvram(struct tg3 *tp) |
7739 | { | 7974 | { |
7740 | u32 *buf, csum; | 7975 | u32 *buf, csum, magic; |
7741 | int i, j, err = 0; | 7976 | int i, j, err = 0, size; |
7742 | 7977 | ||
7743 | buf = kmalloc(NVRAM_TEST_SIZE, GFP_KERNEL); | 7978 | if (tg3_nvram_read_swab(tp, 0, &magic) != 0) |
7979 | return -EIO; | ||
7980 | |||
7981 | if (magic == TG3_EEPROM_MAGIC) | ||
7982 | size = NVRAM_TEST_SIZE; | ||
7983 | else if ((magic & 0xff000000) == 0xa5000000) { | ||
7984 | if ((magic & 0xe00000) == 0x200000) | ||
7985 | size = NVRAM_SELFBOOT_FORMAT1_SIZE; | ||
7986 | else | ||
7987 | return 0; | ||
7988 | } else | ||
7989 | return -EIO; | ||
7990 | |||
7991 | buf = kmalloc(size, GFP_KERNEL); | ||
7744 | if (buf == NULL) | 7992 | if (buf == NULL) |
7745 | return -ENOMEM; | 7993 | return -ENOMEM; |
7746 | 7994 | ||
7747 | for (i = 0, j = 0; i < NVRAM_TEST_SIZE; i += 4, j++) { | 7995 | err = -EIO; |
7996 | for (i = 0, j = 0; i < size; i += 4, j++) { | ||
7748 | u32 val; | 7997 | u32 val; |
7749 | 7998 | ||
7750 | if ((err = tg3_nvram_read(tp, i, &val)) != 0) | 7999 | if ((err = tg3_nvram_read(tp, i, &val)) != 0) |
7751 | break; | 8000 | break; |
7752 | buf[j] = cpu_to_le32(val); | 8001 | buf[j] = cpu_to_le32(val); |
7753 | } | 8002 | } |
7754 | if (i < NVRAM_TEST_SIZE) | 8003 | if (i < size) |
7755 | goto out; | 8004 | goto out; |
7756 | 8005 | ||
7757 | err = -EIO; | 8006 | /* Selfboot format */ |
7758 | if (cpu_to_be32(buf[0]) != TG3_EEPROM_MAGIC) | 8007 | if (cpu_to_be32(buf[0]) != TG3_EEPROM_MAGIC) { |
7759 | goto out; | 8008 | u8 *buf8 = (u8 *) buf, csum8 = 0; |
8009 | |||
8010 | for (i = 0; i < size; i++) | ||
8011 | csum8 += buf8[i]; | ||
8012 | |||
8013 | if (csum8 == 0) | ||
8014 | return 0; | ||
8015 | return -EIO; | ||
8016 | } | ||
7760 | 8017 | ||
7761 | /* Bootstrap checksum at offset 0x10 */ | 8018 | /* Bootstrap checksum at offset 0x10 */ |
7762 | csum = calc_crc((unsigned char *) buf, 0x10); | 8019 | csum = calc_crc((unsigned char *) buf, 0x10); |
@@ -7802,7 +8059,7 @@ static int tg3_test_link(struct tg3 *tp) | |||
7802 | } | 8059 | } |
7803 | 8060 | ||
7804 | /* Only test the commonly used registers */ | 8061 | /* Only test the commonly used registers */ |
7805 | static const int tg3_test_registers(struct tg3 *tp) | 8062 | static int tg3_test_registers(struct tg3 *tp) |
7806 | { | 8063 | { |
7807 | int i, is_5705; | 8064 | int i, is_5705; |
7808 | u32 offset, read_mask, write_mask, val, save_val, read_val; | 8065 | u32 offset, read_mask, write_mask, val, save_val, read_val; |
@@ -8050,14 +8307,24 @@ static int tg3_test_memory(struct tg3 *tp) | |||
8050 | { 0x00008000, 0x02000}, | 8307 | { 0x00008000, 0x02000}, |
8051 | { 0x00010000, 0x0e000}, | 8308 | { 0x00010000, 0x0e000}, |
8052 | { 0xffffffff, 0x00000} | 8309 | { 0xffffffff, 0x00000} |
8310 | }, mem_tbl_5755[] = { | ||
8311 | { 0x00000200, 0x00008}, | ||
8312 | { 0x00004000, 0x00800}, | ||
8313 | { 0x00006000, 0x00800}, | ||
8314 | { 0x00008000, 0x02000}, | ||
8315 | { 0x00010000, 0x0c000}, | ||
8316 | { 0xffffffff, 0x00000} | ||
8053 | }; | 8317 | }; |
8054 | struct mem_entry *mem_tbl; | 8318 | struct mem_entry *mem_tbl; |
8055 | int err = 0; | 8319 | int err = 0; |
8056 | int i; | 8320 | int i; |
8057 | 8321 | ||
8058 | if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) | 8322 | if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) { |
8059 | mem_tbl = mem_tbl_5705; | 8323 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) |
8060 | else | 8324 | mem_tbl = mem_tbl_5755; |
8325 | else | ||
8326 | mem_tbl = mem_tbl_5705; | ||
8327 | } else | ||
8061 | mem_tbl = mem_tbl_570x; | 8328 | mem_tbl = mem_tbl_570x; |
8062 | 8329 | ||
8063 | for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) { | 8330 | for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) { |
@@ -8229,6 +8496,9 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest, | |||
8229 | { | 8496 | { |
8230 | struct tg3 *tp = netdev_priv(dev); | 8497 | struct tg3 *tp = netdev_priv(dev); |
8231 | 8498 | ||
8499 | if (tp->link_config.phy_is_low_power) | ||
8500 | tg3_set_power_state(tp, PCI_D0); | ||
8501 | |||
8232 | memset(data, 0, sizeof(u64) * TG3_NUM_TEST); | 8502 | memset(data, 0, sizeof(u64) * TG3_NUM_TEST); |
8233 | 8503 | ||
8234 | if (tg3_test_nvram(tp) != 0) { | 8504 | if (tg3_test_nvram(tp) != 0) { |
@@ -8257,6 +8527,9 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest, | |||
8257 | if (!err) | 8527 | if (!err) |
8258 | tg3_nvram_unlock(tp); | 8528 | tg3_nvram_unlock(tp); |
8259 | 8529 | ||
8530 | if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) | ||
8531 | tg3_phy_reset(tp); | ||
8532 | |||
8260 | if (tg3_test_registers(tp) != 0) { | 8533 | if (tg3_test_registers(tp) != 0) { |
8261 | etest->flags |= ETH_TEST_FL_FAILED; | 8534 | etest->flags |= ETH_TEST_FL_FAILED; |
8262 | data[2] = 1; | 8535 | data[2] = 1; |
@@ -8286,6 +8559,9 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest, | |||
8286 | 8559 | ||
8287 | tg3_full_unlock(tp); | 8560 | tg3_full_unlock(tp); |
8288 | } | 8561 | } |
8562 | if (tp->link_config.phy_is_low_power) | ||
8563 | tg3_set_power_state(tp, PCI_D3hot); | ||
8564 | |||
8289 | } | 8565 | } |
8290 | 8566 | ||
8291 | static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | 8567 | static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) |
@@ -8305,6 +8581,9 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
8305 | if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) | 8581 | if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) |
8306 | break; /* We have no PHY */ | 8582 | break; /* We have no PHY */ |
8307 | 8583 | ||
8584 | if (tp->link_config.phy_is_low_power) | ||
8585 | return -EAGAIN; | ||
8586 | |||
8308 | spin_lock_bh(&tp->lock); | 8587 | spin_lock_bh(&tp->lock); |
8309 | err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval); | 8588 | err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval); |
8310 | spin_unlock_bh(&tp->lock); | 8589 | spin_unlock_bh(&tp->lock); |
@@ -8321,6 +8600,9 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
8321 | if (!capable(CAP_NET_ADMIN)) | 8600 | if (!capable(CAP_NET_ADMIN)) |
8322 | return -EPERM; | 8601 | return -EPERM; |
8323 | 8602 | ||
8603 | if (tp->link_config.phy_is_low_power) | ||
8604 | return -EAGAIN; | ||
8605 | |||
8324 | spin_lock_bh(&tp->lock); | 8606 | spin_lock_bh(&tp->lock); |
8325 | err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in); | 8607 | err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in); |
8326 | spin_unlock_bh(&tp->lock); | 8608 | spin_unlock_bh(&tp->lock); |
@@ -8464,14 +8746,14 @@ static struct ethtool_ops tg3_ethtool_ops = { | |||
8464 | 8746 | ||
8465 | static void __devinit tg3_get_eeprom_size(struct tg3 *tp) | 8747 | static void __devinit tg3_get_eeprom_size(struct tg3 *tp) |
8466 | { | 8748 | { |
8467 | u32 cursize, val; | 8749 | u32 cursize, val, magic; |
8468 | 8750 | ||
8469 | tp->nvram_size = EEPROM_CHIP_SIZE; | 8751 | tp->nvram_size = EEPROM_CHIP_SIZE; |
8470 | 8752 | ||
8471 | if (tg3_nvram_read(tp, 0, &val) != 0) | 8753 | if (tg3_nvram_read_swab(tp, 0, &magic) != 0) |
8472 | return; | 8754 | return; |
8473 | 8755 | ||
8474 | if (swab32(val) != TG3_EEPROM_MAGIC) | 8756 | if ((magic != TG3_EEPROM_MAGIC) && ((magic & 0xff000000) != 0xa5000000)) |
8475 | return; | 8757 | return; |
8476 | 8758 | ||
8477 | /* | 8759 | /* |
@@ -8479,13 +8761,13 @@ static void __devinit tg3_get_eeprom_size(struct tg3 *tp) | |||
8479 | * When we encounter our validation signature, we know the addressing | 8761 | * When we encounter our validation signature, we know the addressing |
8480 | * has wrapped around, and thus have our chip size. | 8762 | * has wrapped around, and thus have our chip size. |
8481 | */ | 8763 | */ |
8482 | cursize = 0x800; | 8764 | cursize = 0x10; |
8483 | 8765 | ||
8484 | while (cursize < tp->nvram_size) { | 8766 | while (cursize < tp->nvram_size) { |
8485 | if (tg3_nvram_read(tp, cursize, &val) != 0) | 8767 | if (tg3_nvram_read_swab(tp, cursize, &val) != 0) |
8486 | return; | 8768 | return; |
8487 | 8769 | ||
8488 | if (swab32(val) == TG3_EEPROM_MAGIC) | 8770 | if (val == magic) |
8489 | break; | 8771 | break; |
8490 | 8772 | ||
8491 | cursize <<= 1; | 8773 | cursize <<= 1; |
@@ -8498,6 +8780,15 @@ static void __devinit tg3_get_nvram_size(struct tg3 *tp) | |||
8498 | { | 8780 | { |
8499 | u32 val; | 8781 | u32 val; |
8500 | 8782 | ||
8783 | if (tg3_nvram_read_swab(tp, 0, &val) != 0) | ||
8784 | return; | ||
8785 | |||
8786 | /* Selfboot format */ | ||
8787 | if (val != TG3_EEPROM_MAGIC) { | ||
8788 | tg3_get_eeprom_size(tp); | ||
8789 | return; | ||
8790 | } | ||
8791 | |||
8501 | if (tg3_nvram_read(tp, 0xf0, &val) == 0) { | 8792 | if (tg3_nvram_read(tp, 0xf0, &val) == 0) { |
8502 | if (val != 0) { | 8793 | if (val != 0) { |
8503 | tp->nvram_size = (val >> 16) * 1024; | 8794 | tp->nvram_size = (val >> 16) * 1024; |
@@ -8621,6 +8912,44 @@ static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp) | |||
8621 | } | 8912 | } |
8622 | } | 8913 | } |
8623 | 8914 | ||
8915 | static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp) | ||
8916 | { | ||
8917 | u32 nvcfg1; | ||
8918 | |||
8919 | nvcfg1 = tr32(NVRAM_CFG1); | ||
8920 | |||
8921 | switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { | ||
8922 | case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ: | ||
8923 | case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ: | ||
8924 | case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ: | ||
8925 | case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ: | ||
8926 | tp->nvram_jedecnum = JEDEC_ATMEL; | ||
8927 | tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; | ||
8928 | tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE; | ||
8929 | |||
8930 | nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS; | ||
8931 | tw32(NVRAM_CFG1, nvcfg1); | ||
8932 | break; | ||
8933 | case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED: | ||
8934 | case FLASH_5755VENDOR_ATMEL_FLASH_1: | ||
8935 | case FLASH_5755VENDOR_ATMEL_FLASH_2: | ||
8936 | case FLASH_5755VENDOR_ATMEL_FLASH_3: | ||
8937 | tp->nvram_jedecnum = JEDEC_ATMEL; | ||
8938 | tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; | ||
8939 | tp->tg3_flags2 |= TG3_FLG2_FLASH; | ||
8940 | tp->nvram_pagesize = 264; | ||
8941 | break; | ||
8942 | case FLASH_5752VENDOR_ST_M45PE10: | ||
8943 | case FLASH_5752VENDOR_ST_M45PE20: | ||
8944 | case FLASH_5752VENDOR_ST_M45PE40: | ||
8945 | tp->nvram_jedecnum = JEDEC_ST; | ||
8946 | tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED; | ||
8947 | tp->tg3_flags2 |= TG3_FLG2_FLASH; | ||
8948 | tp->nvram_pagesize = 256; | ||
8949 | break; | ||
8950 | } | ||
8951 | } | ||
8952 | |||
8624 | /* Chips other than 5700/5701 use the NVRAM for fetching info. */ | 8953 | /* Chips other than 5700/5701 use the NVRAM for fetching info. */ |
8625 | static void __devinit tg3_nvram_init(struct tg3 *tp) | 8954 | static void __devinit tg3_nvram_init(struct tg3 *tp) |
8626 | { | 8955 | { |
@@ -8656,6 +8985,8 @@ static void __devinit tg3_nvram_init(struct tg3 *tp) | |||
8656 | 8985 | ||
8657 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752) | 8986 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752) |
8658 | tg3_get_5752_nvram_info(tp); | 8987 | tg3_get_5752_nvram_info(tp); |
8988 | else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) | ||
8989 | tg3_get_5787_nvram_info(tp); | ||
8659 | else | 8990 | else |
8660 | tg3_get_nvram_info(tp); | 8991 | tg3_get_nvram_info(tp); |
8661 | 8992 | ||
@@ -8725,6 +9056,34 @@ static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd) | |||
8725 | return 0; | 9056 | return 0; |
8726 | } | 9057 | } |
8727 | 9058 | ||
9059 | static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr) | ||
9060 | { | ||
9061 | if ((tp->tg3_flags & TG3_FLAG_NVRAM) && | ||
9062 | (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) && | ||
9063 | (tp->tg3_flags2 & TG3_FLG2_FLASH) && | ||
9064 | (tp->nvram_jedecnum == JEDEC_ATMEL)) | ||
9065 | |||
9066 | addr = ((addr / tp->nvram_pagesize) << | ||
9067 | ATMEL_AT45DB0X1B_PAGE_POS) + | ||
9068 | (addr % tp->nvram_pagesize); | ||
9069 | |||
9070 | return addr; | ||
9071 | } | ||
9072 | |||
9073 | static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr) | ||
9074 | { | ||
9075 | if ((tp->tg3_flags & TG3_FLAG_NVRAM) && | ||
9076 | (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) && | ||
9077 | (tp->tg3_flags2 & TG3_FLG2_FLASH) && | ||
9078 | (tp->nvram_jedecnum == JEDEC_ATMEL)) | ||
9079 | |||
9080 | addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) * | ||
9081 | tp->nvram_pagesize) + | ||
9082 | (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1)); | ||
9083 | |||
9084 | return addr; | ||
9085 | } | ||
9086 | |||
8728 | static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val) | 9087 | static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val) |
8729 | { | 9088 | { |
8730 | int ret; | 9089 | int ret; |
@@ -8737,14 +9096,7 @@ static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val) | |||
8737 | if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) | 9096 | if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) |
8738 | return tg3_nvram_read_using_eeprom(tp, offset, val); | 9097 | return tg3_nvram_read_using_eeprom(tp, offset, val); |
8739 | 9098 | ||
8740 | if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) && | 9099 | offset = tg3_nvram_phys_addr(tp, offset); |
8741 | (tp->tg3_flags2 & TG3_FLG2_FLASH) && | ||
8742 | (tp->nvram_jedecnum == JEDEC_ATMEL)) { | ||
8743 | |||
8744 | offset = ((offset / tp->nvram_pagesize) << | ||
8745 | ATMEL_AT45DB0X1B_PAGE_POS) + | ||
8746 | (offset % tp->nvram_pagesize); | ||
8747 | } | ||
8748 | 9100 | ||
8749 | if (offset > NVRAM_ADDR_MSK) | 9101 | if (offset > NVRAM_ADDR_MSK) |
8750 | return -EINVAL; | 9102 | return -EINVAL; |
@@ -8769,6 +9121,16 @@ static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val) | |||
8769 | return ret; | 9121 | return ret; |
8770 | } | 9122 | } |
8771 | 9123 | ||
9124 | static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val) | ||
9125 | { | ||
9126 | int err; | ||
9127 | u32 tmp; | ||
9128 | |||
9129 | err = tg3_nvram_read(tp, offset, &tmp); | ||
9130 | *val = swab32(tmp); | ||
9131 | return err; | ||
9132 | } | ||
9133 | |||
8772 | static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp, | 9134 | static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp, |
8773 | u32 offset, u32 len, u8 *buf) | 9135 | u32 offset, u32 len, u8 *buf) |
8774 | { | 9136 | { |
@@ -8921,15 +9283,7 @@ static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len, | |||
8921 | 9283 | ||
8922 | page_off = offset % tp->nvram_pagesize; | 9284 | page_off = offset % tp->nvram_pagesize; |
8923 | 9285 | ||
8924 | if ((tp->tg3_flags2 & TG3_FLG2_FLASH) && | 9286 | phy_addr = tg3_nvram_phys_addr(tp, offset); |
8925 | (tp->nvram_jedecnum == JEDEC_ATMEL)) { | ||
8926 | |||
8927 | phy_addr = ((offset / tp->nvram_pagesize) << | ||
8928 | ATMEL_AT45DB0X1B_PAGE_POS) + page_off; | ||
8929 | } | ||
8930 | else { | ||
8931 | phy_addr = offset; | ||
8932 | } | ||
8933 | 9287 | ||
8934 | tw32(NVRAM_ADDR, phy_addr); | 9288 | tw32(NVRAM_ADDR, phy_addr); |
8935 | 9289 | ||
@@ -8944,6 +9298,7 @@ static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len, | |||
8944 | nvram_cmd |= NVRAM_CMD_LAST; | 9298 | nvram_cmd |= NVRAM_CMD_LAST; |
8945 | 9299 | ||
8946 | if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) && | 9300 | if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) && |
9301 | (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) && | ||
8947 | (tp->nvram_jedecnum == JEDEC_ST) && | 9302 | (tp->nvram_jedecnum == JEDEC_ST) && |
8948 | (nvram_cmd & NVRAM_CMD_FIRST)) { | 9303 | (nvram_cmd & NVRAM_CMD_FIRST)) { |
8949 | 9304 | ||
@@ -9347,6 +9702,7 @@ static void __devinit tg3_read_partno(struct tg3 *tp) | |||
9347 | { | 9702 | { |
9348 | unsigned char vpd_data[256]; | 9703 | unsigned char vpd_data[256]; |
9349 | int i; | 9704 | int i; |
9705 | u32 magic; | ||
9350 | 9706 | ||
9351 | if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) { | 9707 | if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) { |
9352 | /* Sun decided not to put the necessary bits in the | 9708 | /* Sun decided not to put the necessary bits in the |
@@ -9356,16 +9712,43 @@ static void __devinit tg3_read_partno(struct tg3 *tp) | |||
9356 | return; | 9712 | return; |
9357 | } | 9713 | } |
9358 | 9714 | ||
9359 | for (i = 0; i < 256; i += 4) { | 9715 | if (tg3_nvram_read_swab(tp, 0x0, &magic)) |
9360 | u32 tmp; | 9716 | return; |
9361 | 9717 | ||
9362 | if (tg3_nvram_read(tp, 0x100 + i, &tmp)) | 9718 | if (magic == TG3_EEPROM_MAGIC) { |
9363 | goto out_not_found; | 9719 | for (i = 0; i < 256; i += 4) { |
9720 | u32 tmp; | ||
9721 | |||
9722 | if (tg3_nvram_read(tp, 0x100 + i, &tmp)) | ||
9723 | goto out_not_found; | ||
9364 | 9724 | ||
9365 | vpd_data[i + 0] = ((tmp >> 0) & 0xff); | 9725 | vpd_data[i + 0] = ((tmp >> 0) & 0xff); |
9366 | vpd_data[i + 1] = ((tmp >> 8) & 0xff); | 9726 | vpd_data[i + 1] = ((tmp >> 8) & 0xff); |
9367 | vpd_data[i + 2] = ((tmp >> 16) & 0xff); | 9727 | vpd_data[i + 2] = ((tmp >> 16) & 0xff); |
9368 | vpd_data[i + 3] = ((tmp >> 24) & 0xff); | 9728 | vpd_data[i + 3] = ((tmp >> 24) & 0xff); |
9729 | } | ||
9730 | } else { | ||
9731 | int vpd_cap; | ||
9732 | |||
9733 | vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD); | ||
9734 | for (i = 0; i < 256; i += 4) { | ||
9735 | u32 tmp, j = 0; | ||
9736 | u16 tmp16; | ||
9737 | |||
9738 | pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR, | ||
9739 | i); | ||
9740 | while (j++ < 100) { | ||
9741 | pci_read_config_word(tp->pdev, vpd_cap + | ||
9742 | PCI_VPD_ADDR, &tmp16); | ||
9743 | if (tmp16 & 0x8000) | ||
9744 | break; | ||
9745 | msleep(1); | ||
9746 | } | ||
9747 | pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA, | ||
9748 | &tmp); | ||
9749 | tmp = cpu_to_le32(tmp); | ||
9750 | memcpy(&vpd_data[i], &tmp, 4); | ||
9751 | } | ||
9369 | } | 9752 | } |
9370 | 9753 | ||
9371 | /* Now parse and find the part number. */ | 9754 | /* Now parse and find the part number. */ |
@@ -9412,6 +9795,46 @@ out_not_found: | |||
9412 | strcpy(tp->board_part_number, "none"); | 9795 | strcpy(tp->board_part_number, "none"); |
9413 | } | 9796 | } |
9414 | 9797 | ||
9798 | static void __devinit tg3_read_fw_ver(struct tg3 *tp) | ||
9799 | { | ||
9800 | u32 val, offset, start; | ||
9801 | |||
9802 | if (tg3_nvram_read_swab(tp, 0, &val)) | ||
9803 | return; | ||
9804 | |||
9805 | if (val != TG3_EEPROM_MAGIC) | ||
9806 | return; | ||
9807 | |||
9808 | if (tg3_nvram_read_swab(tp, 0xc, &offset) || | ||
9809 | tg3_nvram_read_swab(tp, 0x4, &start)) | ||
9810 | return; | ||
9811 | |||
9812 | offset = tg3_nvram_logical_addr(tp, offset); | ||
9813 | if (tg3_nvram_read_swab(tp, offset, &val)) | ||
9814 | return; | ||
9815 | |||
9816 | if ((val & 0xfc000000) == 0x0c000000) { | ||
9817 | u32 ver_offset, addr; | ||
9818 | int i; | ||
9819 | |||
9820 | if (tg3_nvram_read_swab(tp, offset + 4, &val) || | ||
9821 | tg3_nvram_read_swab(tp, offset + 8, &ver_offset)) | ||
9822 | return; | ||
9823 | |||
9824 | if (val != 0) | ||
9825 | return; | ||
9826 | |||
9827 | addr = offset + ver_offset - start; | ||
9828 | for (i = 0; i < 16; i += 4) { | ||
9829 | if (tg3_nvram_read(tp, addr + i, &val)) | ||
9830 | return; | ||
9831 | |||
9832 | val = cpu_to_le32(val); | ||
9833 | memcpy(tp->fw_ver + i, &val, 4); | ||
9834 | } | ||
9835 | } | ||
9836 | } | ||
9837 | |||
9415 | #ifdef CONFIG_SPARC64 | 9838 | #ifdef CONFIG_SPARC64 |
9416 | static int __devinit tg3_is_sun_570X(struct tg3 *tp) | 9839 | static int __devinit tg3_is_sun_570X(struct tg3 *tp) |
9417 | { | 9840 | { |
@@ -9603,6 +10026,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) | |||
9603 | 10026 | ||
9604 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 || | 10027 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 || |
9605 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 || | 10028 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 || |
10029 | GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 || | ||
9606 | (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) | 10030 | (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) |
9607 | tp->tg3_flags2 |= TG3_FLG2_5750_PLUS; | 10031 | tp->tg3_flags2 |= TG3_FLG2_5750_PLUS; |
9608 | 10032 | ||
@@ -9610,12 +10034,18 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) | |||
9610 | (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)) | 10034 | (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)) |
9611 | tp->tg3_flags2 |= TG3_FLG2_5705_PLUS; | 10035 | tp->tg3_flags2 |= TG3_FLG2_5705_PLUS; |
9612 | 10036 | ||
9613 | if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) | 10037 | if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) { |
9614 | tp->tg3_flags2 |= TG3_FLG2_HW_TSO; | 10038 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) { |
10039 | tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2; | ||
10040 | tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI; | ||
10041 | } else | ||
10042 | tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1; | ||
10043 | } | ||
9615 | 10044 | ||
9616 | if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 && | 10045 | if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 && |
9617 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 && | 10046 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750 && |
9618 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) | 10047 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 && |
10048 | GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) | ||
9619 | tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE; | 10049 | tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE; |
9620 | 10050 | ||
9621 | if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0) | 10051 | if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0) |
@@ -9772,7 +10202,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) | |||
9772 | tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3; | 10202 | tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3; |
9773 | 10203 | ||
9774 | /* Force the chip into D0. */ | 10204 | /* Force the chip into D0. */ |
9775 | err = tg3_set_power_state(tp, 0); | 10205 | err = tg3_set_power_state(tp, PCI_D0); |
9776 | if (err) { | 10206 | if (err) { |
9777 | printk(KERN_ERR PFX "(%s) transition to D0 failed\n", | 10207 | printk(KERN_ERR PFX "(%s) transition to D0 failed\n", |
9778 | pci_name(tp->pdev)); | 10208 | pci_name(tp->pdev)); |
@@ -9825,7 +10255,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) | |||
9825 | if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) | 10255 | if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) |
9826 | tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG; | 10256 | tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG; |
9827 | 10257 | ||
9828 | if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) | 10258 | if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) && |
10259 | (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787)) | ||
9829 | tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG; | 10260 | tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG; |
9830 | 10261 | ||
9831 | tp->coalesce_mode = 0; | 10262 | tp->coalesce_mode = 0; |
@@ -9925,6 +10356,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) | |||
9925 | } | 10356 | } |
9926 | 10357 | ||
9927 | tg3_read_partno(tp); | 10358 | tg3_read_partno(tp); |
10359 | tg3_read_fw_ver(tp); | ||
9928 | 10360 | ||
9929 | if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) { | 10361 | if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) { |
9930 | tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT; | 10362 | tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT; |
@@ -9960,10 +10392,13 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) | |||
9960 | else | 10392 | else |
9961 | tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES; | 10393 | tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES; |
9962 | 10394 | ||
9963 | /* It seems all chips can get confused if TX buffers | 10395 | /* All chips before 5787 can get confused if TX buffers |
9964 | * straddle the 4GB address boundary in some cases. | 10396 | * straddle the 4GB address boundary in some cases. |
9965 | */ | 10397 | */ |
9966 | tp->dev->hard_start_xmit = tg3_start_xmit; | 10398 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) |
10399 | tp->dev->hard_start_xmit = tg3_start_xmit; | ||
10400 | else | ||
10401 | tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug; | ||
9967 | 10402 | ||
9968 | tp->rx_offset = 2; | 10403 | tp->rx_offset = 2; |
9969 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 && | 10404 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 && |
@@ -10491,7 +10926,6 @@ static void __devinit tg3_init_link_config(struct tg3 *tp) | |||
10491 | tp->link_config.speed = SPEED_INVALID; | 10926 | tp->link_config.speed = SPEED_INVALID; |
10492 | tp->link_config.duplex = DUPLEX_INVALID; | 10927 | tp->link_config.duplex = DUPLEX_INVALID; |
10493 | tp->link_config.autoneg = AUTONEG_ENABLE; | 10928 | tp->link_config.autoneg = AUTONEG_ENABLE; |
10494 | netif_carrier_off(tp->dev); | ||
10495 | tp->link_config.active_speed = SPEED_INVALID; | 10929 | tp->link_config.active_speed = SPEED_INVALID; |
10496 | tp->link_config.active_duplex = DUPLEX_INVALID; | 10930 | tp->link_config.active_duplex = DUPLEX_INVALID; |
10497 | tp->link_config.phy_is_low_power = 0; | 10931 | tp->link_config.phy_is_low_power = 0; |
@@ -10550,6 +10984,7 @@ static char * __devinit tg3_phy_string(struct tg3 *tp) | |||
10550 | case PHY_ID_BCM5752: return "5752"; | 10984 | case PHY_ID_BCM5752: return "5752"; |
10551 | case PHY_ID_BCM5714: return "5714"; | 10985 | case PHY_ID_BCM5714: return "5714"; |
10552 | case PHY_ID_BCM5780: return "5780"; | 10986 | case PHY_ID_BCM5780: return "5780"; |
10987 | case PHY_ID_BCM5787: return "5787"; | ||
10553 | case PHY_ID_BCM8002: return "8002/serdes"; | 10988 | case PHY_ID_BCM8002: return "8002/serdes"; |
10554 | case 0: return "serdes"; | 10989 | case 0: return "serdes"; |
10555 | default: return "unknown"; | 10990 | default: return "unknown"; |
@@ -10848,11 +11283,12 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, | |||
10848 | tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE; | 11283 | tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE; |
10849 | } | 11284 | } |
10850 | 11285 | ||
10851 | /* TSO is off by default, user can enable using ethtool. */ | 11286 | /* TSO is on by default on chips that support hardware TSO. |
10852 | #if 0 | 11287 | * Firmware TSO on older chips gives lower performance, so it |
10853 | if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) | 11288 | * is off by default, but can be enabled using ethtool. |
11289 | */ | ||
11290 | if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) | ||
10854 | dev->features |= NETIF_F_TSO; | 11291 | dev->features |= NETIF_F_TSO; |
10855 | #endif | ||
10856 | 11292 | ||
10857 | #endif | 11293 | #endif |
10858 | 11294 | ||
@@ -10896,7 +11332,11 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, | |||
10896 | * checksumming. | 11332 | * checksumming. |
10897 | */ | 11333 | */ |
10898 | if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) { | 11334 | if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) { |
10899 | dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM; | 11335 | if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) |
11336 | dev->features |= NETIF_F_HW_CSUM; | ||
11337 | else | ||
11338 | dev->features |= NETIF_F_IP_CSUM; | ||
11339 | dev->features |= NETIF_F_SG; | ||
10900 | tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS; | 11340 | tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS; |
10901 | } else | 11341 | } else |
10902 | tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS; | 11342 | tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS; |
@@ -10949,6 +11389,8 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, | |||
10949 | (pdev->dma_mask == DMA_32BIT_MASK) ? 32 : | 11389 | (pdev->dma_mask == DMA_32BIT_MASK) ? 32 : |
10950 | (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64)); | 11390 | (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64)); |
10951 | 11391 | ||
11392 | netif_carrier_off(tp->dev); | ||
11393 | |||
10952 | return 0; | 11394 | return 0; |
10953 | 11395 | ||
10954 | err_out_iounmap: | 11396 | err_out_iounmap: |
@@ -11044,7 +11486,7 @@ static int tg3_resume(struct pci_dev *pdev) | |||
11044 | 11486 | ||
11045 | pci_restore_state(tp->pdev); | 11487 | pci_restore_state(tp->pdev); |
11046 | 11488 | ||
11047 | err = tg3_set_power_state(tp, 0); | 11489 | err = tg3_set_power_state(tp, PCI_D0); |
11048 | if (err) | 11490 | if (err) |
11049 | return err; | 11491 | return err; |
11050 | 11492 | ||
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h index 7e3b613afb29..baa34c4721db 100644 --- a/drivers/net/tg3.h +++ b/drivers/net/tg3.h | |||
@@ -138,6 +138,7 @@ | |||
138 | #define ASIC_REV_5752 0x06 | 138 | #define ASIC_REV_5752 0x06 |
139 | #define ASIC_REV_5780 0x08 | 139 | #define ASIC_REV_5780 0x08 |
140 | #define ASIC_REV_5714 0x09 | 140 | #define ASIC_REV_5714 0x09 |
141 | #define ASIC_REV_5787 0x0b | ||
141 | #define GET_CHIP_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 8) | 142 | #define GET_CHIP_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 8) |
142 | #define CHIPREV_5700_AX 0x70 | 143 | #define CHIPREV_5700_AX 0x70 |
143 | #define CHIPREV_5700_BX 0x71 | 144 | #define CHIPREV_5700_BX 0x71 |
@@ -1393,6 +1394,7 @@ | |||
1393 | #define GRC_MDI_CTRL 0x00006844 | 1394 | #define GRC_MDI_CTRL 0x00006844 |
1394 | #define GRC_SEEPROM_DELAY 0x00006848 | 1395 | #define GRC_SEEPROM_DELAY 0x00006848 |
1395 | /* 0x684c --> 0x6c00 unused */ | 1396 | /* 0x684c --> 0x6c00 unused */ |
1397 | #define GRC_FASTBOOT_PC 0x00006894 /* 5752, 5755, 5787 */ | ||
1396 | 1398 | ||
1397 | /* 0x6c00 --> 0x7000 unused */ | 1399 | /* 0x6c00 --> 0x7000 unused */ |
1398 | 1400 | ||
@@ -1436,6 +1438,13 @@ | |||
1436 | #define FLASH_5752VENDOR_ST_M45PE10 0x02400000 | 1438 | #define FLASH_5752VENDOR_ST_M45PE10 0x02400000 |
1437 | #define FLASH_5752VENDOR_ST_M45PE20 0x02400002 | 1439 | #define FLASH_5752VENDOR_ST_M45PE20 0x02400002 |
1438 | #define FLASH_5752VENDOR_ST_M45PE40 0x02400001 | 1440 | #define FLASH_5752VENDOR_ST_M45PE40 0x02400001 |
1441 | #define FLASH_5755VENDOR_ATMEL_FLASH_1 0x03400001 | ||
1442 | #define FLASH_5755VENDOR_ATMEL_FLASH_2 0x03400002 | ||
1443 | #define FLASH_5755VENDOR_ATMEL_FLASH_3 0x03400000 | ||
1444 | #define FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ 0x03000003 | ||
1445 | #define FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ 0x03000002 | ||
1446 | #define FLASH_5787VENDOR_MICRO_EEPROM_64KHZ 0x03000000 | ||
1447 | #define FLASH_5787VENDOR_MICRO_EEPROM_376KHZ 0x02000000 | ||
1439 | #define NVRAM_CFG1_5752PAGE_SIZE_MASK 0x70000000 | 1448 | #define NVRAM_CFG1_5752PAGE_SIZE_MASK 0x70000000 |
1440 | #define FLASH_5752PAGE_SIZE_256 0x00000000 | 1449 | #define FLASH_5752PAGE_SIZE_256 0x00000000 |
1441 | #define FLASH_5752PAGE_SIZE_512 0x10000000 | 1450 | #define FLASH_5752PAGE_SIZE_512 0x10000000 |
@@ -2185,7 +2194,7 @@ struct tg3 { | |||
2185 | #define TG3_FLG2_PHY_SERDES 0x00002000 | 2194 | #define TG3_FLG2_PHY_SERDES 0x00002000 |
2186 | #define TG3_FLG2_CAPACITIVE_COUPLING 0x00004000 | 2195 | #define TG3_FLG2_CAPACITIVE_COUPLING 0x00004000 |
2187 | #define TG3_FLG2_FLASH 0x00008000 | 2196 | #define TG3_FLG2_FLASH 0x00008000 |
2188 | #define TG3_FLG2_HW_TSO 0x00010000 | 2197 | #define TG3_FLG2_HW_TSO_1 0x00010000 |
2189 | #define TG3_FLG2_SERDES_PREEMPHASIS 0x00020000 | 2198 | #define TG3_FLG2_SERDES_PREEMPHASIS 0x00020000 |
2190 | #define TG3_FLG2_5705_PLUS 0x00040000 | 2199 | #define TG3_FLG2_5705_PLUS 0x00040000 |
2191 | #define TG3_FLG2_5750_PLUS 0x00080000 | 2200 | #define TG3_FLG2_5750_PLUS 0x00080000 |
@@ -2198,6 +2207,9 @@ struct tg3 { | |||
2198 | #define TG3_FLG2_PARALLEL_DETECT 0x01000000 | 2207 | #define TG3_FLG2_PARALLEL_DETECT 0x01000000 |
2199 | #define TG3_FLG2_ICH_WORKAROUND 0x02000000 | 2208 | #define TG3_FLG2_ICH_WORKAROUND 0x02000000 |
2200 | #define TG3_FLG2_5780_CLASS 0x04000000 | 2209 | #define TG3_FLG2_5780_CLASS 0x04000000 |
2210 | #define TG3_FLG2_HW_TSO_2 0x08000000 | ||
2211 | #define TG3_FLG2_HW_TSO (TG3_FLG2_HW_TSO_1 | TG3_FLG2_HW_TSO_2) | ||
2212 | #define TG3_FLG2_1SHOT_MSI 0x10000000 | ||
2201 | 2213 | ||
2202 | u32 split_mode_max_reqs; | 2214 | u32 split_mode_max_reqs; |
2203 | #define SPLIT_MODE_5704_MAX_REQ 3 | 2215 | #define SPLIT_MODE_5704_MAX_REQ 3 |
@@ -2247,6 +2259,7 @@ struct tg3 { | |||
2247 | #define PHY_ID_BCM5752 0x60008100 | 2259 | #define PHY_ID_BCM5752 0x60008100 |
2248 | #define PHY_ID_BCM5714 0x60008340 | 2260 | #define PHY_ID_BCM5714 0x60008340 |
2249 | #define PHY_ID_BCM5780 0x60008350 | 2261 | #define PHY_ID_BCM5780 0x60008350 |
2262 | #define PHY_ID_BCM5787 0xbc050ce0 | ||
2250 | #define PHY_ID_BCM8002 0x60010140 | 2263 | #define PHY_ID_BCM8002 0x60010140 |
2251 | #define PHY_ID_INVALID 0xffffffff | 2264 | #define PHY_ID_INVALID 0xffffffff |
2252 | #define PHY_ID_REV_MASK 0x0000000f | 2265 | #define PHY_ID_REV_MASK 0x0000000f |
@@ -2258,6 +2271,7 @@ struct tg3 { | |||
2258 | u32 led_ctrl; | 2271 | u32 led_ctrl; |
2259 | 2272 | ||
2260 | char board_part_number[24]; | 2273 | char board_part_number[24]; |
2274 | char fw_ver[16]; | ||
2261 | u32 nic_sram_data_cfg; | 2275 | u32 nic_sram_data_cfg; |
2262 | u32 pci_clock_ctrl; | 2276 | u32 pci_clock_ctrl; |
2263 | struct pci_dev *pdev_peer; | 2277 | struct pci_dev *pdev_peer; |
@@ -2271,7 +2285,8 @@ struct tg3 { | |||
2271 | (X) == PHY_ID_BCM5703 || (X) == PHY_ID_BCM5704 || \ | 2285 | (X) == PHY_ID_BCM5703 || (X) == PHY_ID_BCM5704 || \ |
2272 | (X) == PHY_ID_BCM5705 || (X) == PHY_ID_BCM5750 || \ | 2286 | (X) == PHY_ID_BCM5705 || (X) == PHY_ID_BCM5750 || \ |
2273 | (X) == PHY_ID_BCM5752 || (X) == PHY_ID_BCM5714 || \ | 2287 | (X) == PHY_ID_BCM5752 || (X) == PHY_ID_BCM5714 || \ |
2274 | (X) == PHY_ID_BCM5780 || (X) == PHY_ID_BCM8002) | 2288 | (X) == PHY_ID_BCM5780 || (X) == PHY_ID_BCM5787 || \ |
2289 | (X) == PHY_ID_BCM8002) | ||
2275 | 2290 | ||
2276 | struct tg3_hw_stats *hw_stats; | 2291 | struct tg3_hw_stats *hw_stats; |
2277 | dma_addr_t stats_mapping; | 2292 | dma_addr_t stats_mapping; |
diff --git a/drivers/net/wan/sbni.c b/drivers/net/wan/sbni.c index db2c798ba89e..175ba13bce41 100644 --- a/drivers/net/wan/sbni.c +++ b/drivers/net/wan/sbni.c | |||
@@ -1495,8 +1495,7 @@ module_param(skip_pci_probe, bool, 0); | |||
1495 | MODULE_LICENSE("GPL"); | 1495 | MODULE_LICENSE("GPL"); |
1496 | 1496 | ||
1497 | 1497 | ||
1498 | int | 1498 | int __init init_module( void ) |
1499 | init_module( void ) | ||
1500 | { | 1499 | { |
1501 | struct net_device *dev; | 1500 | struct net_device *dev; |
1502 | int err; | 1501 | int err; |