diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-01-18 12:15:49 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-01-18 12:15:49 -0500 |
commit | af37501c792107c2bde1524bdae38d9a247b841a (patch) | |
tree | b50ee90d29e72956b8b7d8d19677fe5996755d49 /drivers/net | |
parent | d859e29fe34cb833071b20aef860ee94fbad9bb2 (diff) | |
parent | 99937d6455cea95405ac681c86a857d0fcd530bd (diff) |
Merge branch 'core/percpu' into perfcounters/core
Conflicts:
arch/x86/include/asm/pda.h
We merge tip/core/percpu into tip/perfcounters/core because of a
semantic and contextual conflict: the former eliminates the PDA,
while the latter extends it with apic_perf_irqs field.
Resolve the conflict by moving the new field to the irq_cpustat
structure on 64-bit too.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'drivers/net')
51 files changed, 308 insertions, 364 deletions
diff --git a/drivers/net/3c503.c b/drivers/net/3c503.c index c092c3929224..5b91a85fe107 100644 --- a/drivers/net/3c503.c +++ b/drivers/net/3c503.c | |||
@@ -177,6 +177,7 @@ static const struct net_device_ops el2_netdev_ops = { | |||
177 | .ndo_get_stats = eip_get_stats, | 177 | .ndo_get_stats = eip_get_stats, |
178 | .ndo_set_multicast_list = eip_set_multicast_list, | 178 | .ndo_set_multicast_list = eip_set_multicast_list, |
179 | .ndo_validate_addr = eth_validate_addr, | 179 | .ndo_validate_addr = eth_validate_addr, |
180 | .ndo_set_mac_address = eth_mac_addr, | ||
180 | .ndo_change_mtu = eth_change_mtu, | 181 | .ndo_change_mtu = eth_change_mtu, |
181 | #ifdef CONFIG_NET_POLL_CONTROLLER | 182 | #ifdef CONFIG_NET_POLL_CONTROLLER |
182 | .ndo_poll_controller = eip_poll, | 183 | .ndo_poll_controller = eip_poll, |
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c index 665e7fdf27a1..cdbbb6226fc5 100644 --- a/drivers/net/3c59x.c +++ b/drivers/net/3c59x.c | |||
@@ -3109,6 +3109,8 @@ static void acpi_set_WOL(struct net_device *dev) | |||
3109 | struct vortex_private *vp = netdev_priv(dev); | 3109 | struct vortex_private *vp = netdev_priv(dev); |
3110 | void __iomem *ioaddr = vp->ioaddr; | 3110 | void __iomem *ioaddr = vp->ioaddr; |
3111 | 3111 | ||
3112 | device_set_wakeup_enable(vp->gendev, vp->enable_wol); | ||
3113 | |||
3112 | if (vp->enable_wol) { | 3114 | if (vp->enable_wol) { |
3113 | /* Power up on: 1==Downloaded Filter, 2==Magic Packets, 4==Link Status. */ | 3115 | /* Power up on: 1==Downloaded Filter, 2==Magic Packets, 4==Link Status. */ |
3114 | EL3WINDOW(7); | 3116 | EL3WINDOW(7); |
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c index dd7ac8290aec..4e19ae3ce6be 100644 --- a/drivers/net/8139cp.c +++ b/drivers/net/8139cp.c | |||
@@ -1821,6 +1821,7 @@ static const struct net_device_ops cp_netdev_ops = { | |||
1821 | .ndo_open = cp_open, | 1821 | .ndo_open = cp_open, |
1822 | .ndo_stop = cp_close, | 1822 | .ndo_stop = cp_close, |
1823 | .ndo_validate_addr = eth_validate_addr, | 1823 | .ndo_validate_addr = eth_validate_addr, |
1824 | .ndo_set_mac_address = eth_mac_addr, | ||
1824 | .ndo_set_multicast_list = cp_set_rx_mode, | 1825 | .ndo_set_multicast_list = cp_set_rx_mode, |
1825 | .ndo_get_stats = cp_get_stats, | 1826 | .ndo_get_stats = cp_get_stats, |
1826 | .ndo_do_ioctl = cp_ioctl, | 1827 | .ndo_do_ioctl = cp_ioctl, |
@@ -1832,6 +1833,7 @@ static const struct net_device_ops cp_netdev_ops = { | |||
1832 | #ifdef BROKEN | 1833 | #ifdef BROKEN |
1833 | .ndo_change_mtu = cp_change_mtu, | 1834 | .ndo_change_mtu = cp_change_mtu, |
1834 | #endif | 1835 | #endif |
1836 | |||
1835 | #ifdef CONFIG_NET_POLL_CONTROLLER | 1837 | #ifdef CONFIG_NET_POLL_CONTROLLER |
1836 | .ndo_poll_controller = cp_poll_controller, | 1838 | .ndo_poll_controller = cp_poll_controller, |
1837 | #endif | 1839 | #endif |
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c index fe370f805793..a5b24202d564 100644 --- a/drivers/net/8139too.c +++ b/drivers/net/8139too.c | |||
@@ -917,6 +917,7 @@ static const struct net_device_ops rtl8139_netdev_ops = { | |||
917 | .ndo_stop = rtl8139_close, | 917 | .ndo_stop = rtl8139_close, |
918 | .ndo_get_stats = rtl8139_get_stats, | 918 | .ndo_get_stats = rtl8139_get_stats, |
919 | .ndo_validate_addr = eth_validate_addr, | 919 | .ndo_validate_addr = eth_validate_addr, |
920 | .ndo_set_mac_address = eth_mac_addr, | ||
920 | .ndo_start_xmit = rtl8139_start_xmit, | 921 | .ndo_start_xmit = rtl8139_start_xmit, |
921 | .ndo_set_multicast_list = rtl8139_set_rx_mode, | 922 | .ndo_set_multicast_list = rtl8139_set_rx_mode, |
922 | .ndo_do_ioctl = netdev_ioctl, | 923 | .ndo_do_ioctl = netdev_ioctl, |
@@ -924,7 +925,6 @@ static const struct net_device_ops rtl8139_netdev_ops = { | |||
924 | #ifdef CONFIG_NET_POLL_CONTROLLER | 925 | #ifdef CONFIG_NET_POLL_CONTROLLER |
925 | .ndo_poll_controller = rtl8139_poll_controller, | 926 | .ndo_poll_controller = rtl8139_poll_controller, |
926 | #endif | 927 | #endif |
927 | |||
928 | }; | 928 | }; |
929 | 929 | ||
930 | static int __devinit rtl8139_init_one (struct pci_dev *pdev, | 930 | static int __devinit rtl8139_init_one (struct pci_dev *pdev, |
diff --git a/drivers/net/8390.c b/drivers/net/8390.c index fbe609a51e02..ec3e22e6306f 100644 --- a/drivers/net/8390.c +++ b/drivers/net/8390.c | |||
@@ -63,6 +63,7 @@ const struct net_device_ops ei_netdev_ops = { | |||
63 | .ndo_get_stats = ei_get_stats, | 63 | .ndo_get_stats = ei_get_stats, |
64 | .ndo_set_multicast_list = ei_set_multicast_list, | 64 | .ndo_set_multicast_list = ei_set_multicast_list, |
65 | .ndo_validate_addr = eth_validate_addr, | 65 | .ndo_validate_addr = eth_validate_addr, |
66 | .ndo_set_mac_address = eth_mac_addr, | ||
66 | .ndo_change_mtu = eth_change_mtu, | 67 | .ndo_change_mtu = eth_change_mtu, |
67 | #ifdef CONFIG_NET_POLL_CONTROLLER | 68 | #ifdef CONFIG_NET_POLL_CONTROLLER |
68 | .ndo_poll_controller = ei_poll, | 69 | .ndo_poll_controller = ei_poll, |
diff --git a/drivers/net/8390p.c b/drivers/net/8390p.c index ee70b358a816..da863c91d1d0 100644 --- a/drivers/net/8390p.c +++ b/drivers/net/8390p.c | |||
@@ -68,6 +68,7 @@ const struct net_device_ops eip_netdev_ops = { | |||
68 | .ndo_get_stats = eip_get_stats, | 68 | .ndo_get_stats = eip_get_stats, |
69 | .ndo_set_multicast_list = eip_set_multicast_list, | 69 | .ndo_set_multicast_list = eip_set_multicast_list, |
70 | .ndo_validate_addr = eth_validate_addr, | 70 | .ndo_validate_addr = eth_validate_addr, |
71 | .ndo_set_mac_address = eth_mac_addr, | ||
71 | .ndo_change_mtu = eth_change_mtu, | 72 | .ndo_change_mtu = eth_change_mtu, |
72 | #ifdef CONFIG_NET_POLL_CONTROLLER | 73 | #ifdef CONFIG_NET_POLL_CONTROLLER |
73 | .ndo_poll_controller = eip_poll, | 74 | .ndo_poll_controller = eip_poll, |
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 65afda4a62d9..9fe8cb7d43ac 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig | |||
@@ -1600,7 +1600,7 @@ config 8139_OLD_RX_RESET | |||
1600 | old RX-reset behavior. If unsure, say N. | 1600 | old RX-reset behavior. If unsure, say N. |
1601 | 1601 | ||
1602 | config R6040 | 1602 | config R6040 |
1603 | tristate "RDC R6040 Fast Ethernet Adapter support (EXPERIMENTAL)" | 1603 | tristate "RDC R6040 Fast Ethernet Adapter support" |
1604 | depends on NET_PCI && PCI | 1604 | depends on NET_PCI && PCI |
1605 | select CRC32 | 1605 | select CRC32 |
1606 | select MII | 1606 | select MII |
diff --git a/drivers/net/acenic.c b/drivers/net/acenic.c index 5b396ff6c83f..9589d620639d 100644 --- a/drivers/net/acenic.c +++ b/drivers/net/acenic.c | |||
@@ -460,6 +460,7 @@ static const struct net_device_ops ace_netdev_ops = { | |||
460 | .ndo_get_stats = ace_get_stats, | 460 | .ndo_get_stats = ace_get_stats, |
461 | .ndo_start_xmit = ace_start_xmit, | 461 | .ndo_start_xmit = ace_start_xmit, |
462 | .ndo_set_multicast_list = ace_set_multicast_list, | 462 | .ndo_set_multicast_list = ace_set_multicast_list, |
463 | .ndo_validate_addr = eth_validate_addr, | ||
463 | .ndo_set_mac_address = ace_set_mac_addr, | 464 | .ndo_set_mac_address = ace_set_mac_addr, |
464 | .ndo_change_mtu = ace_change_mtu, | 465 | .ndo_change_mtu = ace_change_mtu, |
465 | #if ACENIC_DO_VLAN | 466 | #if ACENIC_DO_VLAN |
diff --git a/drivers/net/arm/etherh.c b/drivers/net/arm/etherh.c index 6278606d1049..745ac188babe 100644 --- a/drivers/net/arm/etherh.c +++ b/drivers/net/arm/etherh.c | |||
@@ -646,6 +646,7 @@ static const struct net_device_ops etherh_netdev_ops = { | |||
646 | .ndo_get_stats = ei_get_stats, | 646 | .ndo_get_stats = ei_get_stats, |
647 | .ndo_set_multicast_list = ei_set_multicast_list, | 647 | .ndo_set_multicast_list = ei_set_multicast_list, |
648 | .ndo_validate_addr = eth_validate_addr, | 648 | .ndo_validate_addr = eth_validate_addr, |
649 | .ndo_set_mac_addr = eth_set_mac_addr, | ||
649 | .ndo_change_mtu = eth_change_mtu, | 650 | .ndo_change_mtu = eth_change_mtu, |
650 | #ifdef CONFIG_NET_POLL_CONTROLLER | 651 | #ifdef CONFIG_NET_POLL_CONTROLLER |
651 | .ndo_poll_controller = ei_poll, | 652 | .ndo_poll_controller = ei_poll, |
diff --git a/drivers/net/arm/ks8695net.c b/drivers/net/arm/ks8695net.c index 9ad22d1b00fd..1cf2f949c0b4 100644 --- a/drivers/net/arm/ks8695net.c +++ b/drivers/net/arm/ks8695net.c | |||
@@ -1357,6 +1357,7 @@ static const struct net_device_ops ks8695_netdev_ops = { | |||
1357 | .ndo_start_xmit = ks8695_start_xmit, | 1357 | .ndo_start_xmit = ks8695_start_xmit, |
1358 | .ndo_tx_timeout = ks8695_timeout, | 1358 | .ndo_tx_timeout = ks8695_timeout, |
1359 | .ndo_set_mac_address = ks8695_set_mac, | 1359 | .ndo_set_mac_address = ks8695_set_mac, |
1360 | .ndo_validate_addr = eth_validate_addr, | ||
1360 | .ndo_set_multicast_list = ks8695_set_multicast, | 1361 | .ndo_set_multicast_list = ks8695_set_multicast, |
1361 | }; | 1362 | }; |
1362 | 1363 | ||
diff --git a/drivers/net/b44.c b/drivers/net/b44.c index 6926ebedfdc9..5ae131c147f9 100644 --- a/drivers/net/b44.c +++ b/drivers/net/b44.c | |||
@@ -73,8 +73,8 @@ | |||
73 | (BP)->tx_cons - (BP)->tx_prod - TX_RING_GAP(BP)) | 73 | (BP)->tx_cons - (BP)->tx_prod - TX_RING_GAP(BP)) |
74 | #define NEXT_TX(N) (((N) + 1) & (B44_TX_RING_SIZE - 1)) | 74 | #define NEXT_TX(N) (((N) + 1) & (B44_TX_RING_SIZE - 1)) |
75 | 75 | ||
76 | #define RX_PKT_OFFSET 30 | 76 | #define RX_PKT_OFFSET (RX_HEADER_LEN + 2) |
77 | #define RX_PKT_BUF_SZ (1536 + RX_PKT_OFFSET + 64) | 77 | #define RX_PKT_BUF_SZ (1536 + RX_PKT_OFFSET) |
78 | 78 | ||
79 | /* minimum number of free TX descriptors required to wake up TX process */ | 79 | /* minimum number of free TX descriptors required to wake up TX process */ |
80 | #define B44_TX_WAKEUP_THRESH (B44_TX_RING_SIZE / 4) | 80 | #define B44_TX_WAKEUP_THRESH (B44_TX_RING_SIZE / 4) |
@@ -682,7 +682,6 @@ static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked) | |||
682 | } | 682 | } |
683 | 683 | ||
684 | rh = (struct rx_header *) skb->data; | 684 | rh = (struct rx_header *) skb->data; |
685 | skb_reserve(skb, RX_PKT_OFFSET); | ||
686 | 685 | ||
687 | rh->len = 0; | 686 | rh->len = 0; |
688 | rh->flags = 0; | 687 | rh->flags = 0; |
@@ -693,13 +692,13 @@ static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked) | |||
693 | if (src_map != NULL) | 692 | if (src_map != NULL) |
694 | src_map->skb = NULL; | 693 | src_map->skb = NULL; |
695 | 694 | ||
696 | ctrl = (DESC_CTRL_LEN & (RX_PKT_BUF_SZ - RX_PKT_OFFSET)); | 695 | ctrl = (DESC_CTRL_LEN & RX_PKT_BUF_SZ); |
697 | if (dest_idx == (B44_RX_RING_SIZE - 1)) | 696 | if (dest_idx == (B44_RX_RING_SIZE - 1)) |
698 | ctrl |= DESC_CTRL_EOT; | 697 | ctrl |= DESC_CTRL_EOT; |
699 | 698 | ||
700 | dp = &bp->rx_ring[dest_idx]; | 699 | dp = &bp->rx_ring[dest_idx]; |
701 | dp->ctrl = cpu_to_le32(ctrl); | 700 | dp->ctrl = cpu_to_le32(ctrl); |
702 | dp->addr = cpu_to_le32((u32) mapping + RX_PKT_OFFSET + bp->dma_offset); | 701 | dp->addr = cpu_to_le32((u32) mapping + bp->dma_offset); |
703 | 702 | ||
704 | if (bp->flags & B44_FLAG_RX_RING_HACK) | 703 | if (bp->flags & B44_FLAG_RX_RING_HACK) |
705 | b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma, | 704 | b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma, |
@@ -809,8 +808,8 @@ static int b44_rx(struct b44 *bp, int budget) | |||
809 | ssb_dma_unmap_single(bp->sdev, map, | 808 | ssb_dma_unmap_single(bp->sdev, map, |
810 | skb_size, DMA_FROM_DEVICE); | 809 | skb_size, DMA_FROM_DEVICE); |
811 | /* Leave out rx_header */ | 810 | /* Leave out rx_header */ |
812 | skb_put(skb, len + RX_PKT_OFFSET); | 811 | skb_put(skb, len + RX_PKT_OFFSET); |
813 | skb_pull(skb, RX_PKT_OFFSET); | 812 | skb_pull(skb, RX_PKT_OFFSET); |
814 | } else { | 813 | } else { |
815 | struct sk_buff *copy_skb; | 814 | struct sk_buff *copy_skb; |
816 | 815 | ||
diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h index 5b346f9eaa8b..a89d8cc51205 100644 --- a/drivers/net/cxgb3/adapter.h +++ b/drivers/net/cxgb3/adapter.h | |||
@@ -50,12 +50,17 @@ struct vlan_group; | |||
50 | struct adapter; | 50 | struct adapter; |
51 | struct sge_qset; | 51 | struct sge_qset; |
52 | 52 | ||
53 | enum { /* rx_offload flags */ | ||
54 | T3_RX_CSUM = 1 << 0, | ||
55 | T3_LRO = 1 << 1, | ||
56 | }; | ||
57 | |||
53 | struct port_info { | 58 | struct port_info { |
54 | struct adapter *adapter; | 59 | struct adapter *adapter; |
55 | struct vlan_group *vlan_grp; | 60 | struct vlan_group *vlan_grp; |
56 | struct sge_qset *qs; | 61 | struct sge_qset *qs; |
57 | u8 port_id; | 62 | u8 port_id; |
58 | u8 rx_csum_offload; | 63 | u8 rx_offload; |
59 | u8 nqsets; | 64 | u8 nqsets; |
60 | u8 first_qset; | 65 | u8 first_qset; |
61 | struct cphy phy; | 66 | struct cphy phy; |
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c index 2847f947499d..0089746b8d02 100644 --- a/drivers/net/cxgb3/cxgb3_main.c +++ b/drivers/net/cxgb3/cxgb3_main.c | |||
@@ -546,7 +546,7 @@ static int setup_sge_qsets(struct adapter *adap) | |||
546 | pi->qs = &adap->sge.qs[pi->first_qset]; | 546 | pi->qs = &adap->sge.qs[pi->first_qset]; |
547 | for (j = pi->first_qset; j < pi->first_qset + pi->nqsets; | 547 | for (j = pi->first_qset; j < pi->first_qset + pi->nqsets; |
548 | ++j, ++qset_idx) { | 548 | ++j, ++qset_idx) { |
549 | set_qset_lro(dev, qset_idx, pi->rx_csum_offload); | 549 | set_qset_lro(dev, qset_idx, pi->rx_offload & T3_LRO); |
550 | err = t3_sge_alloc_qset(adap, qset_idx, 1, | 550 | err = t3_sge_alloc_qset(adap, qset_idx, 1, |
551 | (adap->flags & USING_MSIX) ? qset_idx + 1 : | 551 | (adap->flags & USING_MSIX) ? qset_idx + 1 : |
552 | irq_idx, | 552 | irq_idx, |
@@ -1657,17 +1657,19 @@ static u32 get_rx_csum(struct net_device *dev) | |||
1657 | { | 1657 | { |
1658 | struct port_info *p = netdev_priv(dev); | 1658 | struct port_info *p = netdev_priv(dev); |
1659 | 1659 | ||
1660 | return p->rx_csum_offload; | 1660 | return p->rx_offload & T3_RX_CSUM; |
1661 | } | 1661 | } |
1662 | 1662 | ||
1663 | static int set_rx_csum(struct net_device *dev, u32 data) | 1663 | static int set_rx_csum(struct net_device *dev, u32 data) |
1664 | { | 1664 | { |
1665 | struct port_info *p = netdev_priv(dev); | 1665 | struct port_info *p = netdev_priv(dev); |
1666 | 1666 | ||
1667 | p->rx_csum_offload = data; | 1667 | if (data) { |
1668 | if (!data) { | 1668 | p->rx_offload |= T3_RX_CSUM; |
1669 | } else { | ||
1669 | int i; | 1670 | int i; |
1670 | 1671 | ||
1672 | p->rx_offload &= ~(T3_RX_CSUM | T3_LRO); | ||
1671 | for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) | 1673 | for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) |
1672 | set_qset_lro(dev, i, 0); | 1674 | set_qset_lro(dev, i, 0); |
1673 | } | 1675 | } |
@@ -1830,15 +1832,18 @@ static int cxgb3_set_flags(struct net_device *dev, u32 data) | |||
1830 | int i; | 1832 | int i; |
1831 | 1833 | ||
1832 | if (data & ETH_FLAG_LRO) { | 1834 | if (data & ETH_FLAG_LRO) { |
1833 | if (!pi->rx_csum_offload) | 1835 | if (!(pi->rx_offload & T3_RX_CSUM)) |
1834 | return -EINVAL; | 1836 | return -EINVAL; |
1835 | 1837 | ||
1838 | pi->rx_offload |= T3_LRO; | ||
1836 | for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; i++) | 1839 | for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; i++) |
1837 | set_qset_lro(dev, i, 1); | 1840 | set_qset_lro(dev, i, 1); |
1838 | 1841 | ||
1839 | } else | 1842 | } else { |
1843 | pi->rx_offload &= ~T3_LRO; | ||
1840 | for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; i++) | 1844 | for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; i++) |
1841 | set_qset_lro(dev, i, 0); | 1845 | set_qset_lro(dev, i, 0); |
1846 | } | ||
1842 | 1847 | ||
1843 | return 0; | 1848 | return 0; |
1844 | } | 1849 | } |
@@ -1926,7 +1931,7 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr) | |||
1926 | pi = adap2pinfo(adapter, i); | 1931 | pi = adap2pinfo(adapter, i); |
1927 | if (t.qset_idx >= pi->first_qset && | 1932 | if (t.qset_idx >= pi->first_qset && |
1928 | t.qset_idx < pi->first_qset + pi->nqsets && | 1933 | t.qset_idx < pi->first_qset + pi->nqsets && |
1929 | !pi->rx_csum_offload) | 1934 | !(pi->rx_offload & T3_RX_CSUM)) |
1930 | return -EINVAL; | 1935 | return -EINVAL; |
1931 | } | 1936 | } |
1932 | 1937 | ||
@@ -2946,7 +2951,7 @@ static int __devinit init_one(struct pci_dev *pdev, | |||
2946 | adapter->port[i] = netdev; | 2951 | adapter->port[i] = netdev; |
2947 | pi = netdev_priv(netdev); | 2952 | pi = netdev_priv(netdev); |
2948 | pi->adapter = adapter; | 2953 | pi->adapter = adapter; |
2949 | pi->rx_csum_offload = 1; | 2954 | pi->rx_offload = T3_RX_CSUM | T3_LRO; |
2950 | pi->port_id = i; | 2955 | pi->port_id = i; |
2951 | netif_carrier_off(netdev); | 2956 | netif_carrier_off(netdev); |
2952 | netif_tx_stop_all_queues(netdev); | 2957 | netif_tx_stop_all_queues(netdev); |
@@ -2955,6 +2960,7 @@ static int __devinit init_one(struct pci_dev *pdev, | |||
2955 | netdev->mem_end = mmio_start + mmio_len - 1; | 2960 | netdev->mem_end = mmio_start + mmio_len - 1; |
2956 | netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO; | 2961 | netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO; |
2957 | netdev->features |= NETIF_F_LLTX; | 2962 | netdev->features |= NETIF_F_LLTX; |
2963 | netdev->features |= NETIF_F_LRO; | ||
2958 | if (pci_using_dac) | 2964 | if (pci_using_dac) |
2959 | netdev->features |= NETIF_F_HIGHDMA; | 2965 | netdev->features |= NETIF_F_HIGHDMA; |
2960 | 2966 | ||
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c index 6c641a889471..14f9fb3e8795 100644 --- a/drivers/net/cxgb3/sge.c +++ b/drivers/net/cxgb3/sge.c | |||
@@ -1932,7 +1932,7 @@ static void rx_eth(struct adapter *adap, struct sge_rspq *rq, | |||
1932 | skb_pull(skb, sizeof(*p) + pad); | 1932 | skb_pull(skb, sizeof(*p) + pad); |
1933 | skb->protocol = eth_type_trans(skb, adap->port[p->iff]); | 1933 | skb->protocol = eth_type_trans(skb, adap->port[p->iff]); |
1934 | pi = netdev_priv(skb->dev); | 1934 | pi = netdev_priv(skb->dev); |
1935 | if (pi->rx_csum_offload && p->csum_valid && p->csum == htons(0xffff) && | 1935 | if ((pi->rx_offload & T3_RX_CSUM) && p->csum_valid && p->csum == htons(0xffff) && |
1936 | !p->fragment) { | 1936 | !p->fragment) { |
1937 | qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++; | 1937 | qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++; |
1938 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 1938 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c index f2a5963b5a95..e415e81ecd3e 100644 --- a/drivers/net/e1000e/ich8lan.c +++ b/drivers/net/e1000e/ich8lan.c | |||
@@ -390,7 +390,8 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter) | |||
390 | } | 390 | } |
391 | 391 | ||
392 | static DEFINE_MUTEX(nvm_mutex); | 392 | static DEFINE_MUTEX(nvm_mutex); |
393 | static pid_t nvm_owner = -1; | 393 | static pid_t nvm_owner_pid = -1; |
394 | static char nvm_owner_name[TASK_COMM_LEN] = ""; | ||
394 | 395 | ||
395 | /** | 396 | /** |
396 | * e1000_acquire_swflag_ich8lan - Acquire software control flag | 397 | * e1000_acquire_swflag_ich8lan - Acquire software control flag |
@@ -408,11 +409,15 @@ static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw) | |||
408 | might_sleep(); | 409 | might_sleep(); |
409 | 410 | ||
410 | if (!mutex_trylock(&nvm_mutex)) { | 411 | if (!mutex_trylock(&nvm_mutex)) { |
411 | WARN(1, KERN_ERR "e1000e mutex contention. Owned by pid %d\n", | 412 | WARN(1, KERN_ERR "e1000e mutex contention. Owned by process " |
412 | nvm_owner); | 413 | "%s (pid %d), required by process %s (pid %d)\n", |
414 | nvm_owner_name, nvm_owner_pid, | ||
415 | current->comm, current->pid); | ||
416 | |||
413 | mutex_lock(&nvm_mutex); | 417 | mutex_lock(&nvm_mutex); |
414 | } | 418 | } |
415 | nvm_owner = current->pid; | 419 | nvm_owner_pid = current->pid; |
420 | strncpy(nvm_owner_name, current->comm, TASK_COMM_LEN); | ||
416 | 421 | ||
417 | while (timeout) { | 422 | while (timeout) { |
418 | extcnf_ctrl = er32(EXTCNF_CTRL); | 423 | extcnf_ctrl = er32(EXTCNF_CTRL); |
@@ -430,7 +435,8 @@ static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw) | |||
430 | hw_dbg(hw, "FW or HW has locked the resource for too long.\n"); | 435 | hw_dbg(hw, "FW or HW has locked the resource for too long.\n"); |
431 | extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG; | 436 | extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG; |
432 | ew32(EXTCNF_CTRL, extcnf_ctrl); | 437 | ew32(EXTCNF_CTRL, extcnf_ctrl); |
433 | nvm_owner = -1; | 438 | nvm_owner_pid = -1; |
439 | strcpy(nvm_owner_name, ""); | ||
434 | mutex_unlock(&nvm_mutex); | 440 | mutex_unlock(&nvm_mutex); |
435 | return -E1000_ERR_CONFIG; | 441 | return -E1000_ERR_CONFIG; |
436 | } | 442 | } |
@@ -454,7 +460,8 @@ static void e1000_release_swflag_ich8lan(struct e1000_hw *hw) | |||
454 | extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG; | 460 | extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG; |
455 | ew32(EXTCNF_CTRL, extcnf_ctrl); | 461 | ew32(EXTCNF_CTRL, extcnf_ctrl); |
456 | 462 | ||
457 | nvm_owner = -1; | 463 | nvm_owner_pid = -1; |
464 | strcpy(nvm_owner_name, ""); | ||
458 | mutex_unlock(&nvm_mutex); | 465 | mutex_unlock(&nvm_mutex); |
459 | } | 466 | } |
460 | 467 | ||
diff --git a/drivers/net/e2100.c b/drivers/net/e2100.c index 20eb05cddb83..b07ba1924de0 100644 --- a/drivers/net/e2100.c +++ b/drivers/net/e2100.c | |||
@@ -169,6 +169,7 @@ static const struct net_device_ops e21_netdev_ops = { | |||
169 | .ndo_get_stats = ei_get_stats, | 169 | .ndo_get_stats = ei_get_stats, |
170 | .ndo_set_multicast_list = ei_set_multicast_list, | 170 | .ndo_set_multicast_list = ei_set_multicast_list, |
171 | .ndo_validate_addr = eth_validate_addr, | 171 | .ndo_validate_addr = eth_validate_addr, |
172 | .ndo_set_mac_address = eth_mac_addr, | ||
172 | .ndo_change_mtu = eth_change_mtu, | 173 | .ndo_change_mtu = eth_change_mtu, |
173 | #ifdef CONFIG_NET_POLL_CONTROLLER | 174 | #ifdef CONFIG_NET_POLL_CONTROLLER |
174 | .ndo_poll_controller = ei_poll, | 175 | .ndo_poll_controller = ei_poll, |
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c index d039e16f2763..7d60551d538f 100644 --- a/drivers/net/enic/enic_main.c +++ b/drivers/net/enic/enic_main.c | |||
@@ -1599,6 +1599,7 @@ static const struct net_device_ops enic_netdev_ops = { | |||
1599 | .ndo_start_xmit = enic_hard_start_xmit, | 1599 | .ndo_start_xmit = enic_hard_start_xmit, |
1600 | .ndo_get_stats = enic_get_stats, | 1600 | .ndo_get_stats = enic_get_stats, |
1601 | .ndo_validate_addr = eth_validate_addr, | 1601 | .ndo_validate_addr = eth_validate_addr, |
1602 | .ndo_set_mac_address = eth_mac_addr, | ||
1602 | .ndo_set_multicast_list = enic_set_multicast_list, | 1603 | .ndo_set_multicast_list = enic_set_multicast_list, |
1603 | .ndo_change_mtu = enic_change_mtu, | 1604 | .ndo_change_mtu = enic_change_mtu, |
1604 | .ndo_vlan_rx_register = enic_vlan_rx_register, | 1605 | .ndo_vlan_rx_register = enic_vlan_rx_register, |
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c index 5b68dc20168d..5b910cf63740 100644 --- a/drivers/net/forcedeth.c +++ b/drivers/net/forcedeth.c | |||
@@ -13,7 +13,7 @@ | |||
13 | * Copyright (C) 2004 Andrew de Quincey (wol support) | 13 | * Copyright (C) 2004 Andrew de Quincey (wol support) |
14 | * Copyright (C) 2004 Carl-Daniel Hailfinger (invalid MAC handling, insane | 14 | * Copyright (C) 2004 Carl-Daniel Hailfinger (invalid MAC handling, insane |
15 | * IRQ rate fixes, bigendian fixes, cleanups, verification) | 15 | * IRQ rate fixes, bigendian fixes, cleanups, verification) |
16 | * Copyright (c) 2004,2005,2006,2007,2008 NVIDIA Corporation | 16 | * Copyright (c) 2004,2005,2006,2007,2008,2009 NVIDIA Corporation |
17 | * | 17 | * |
18 | * This program is free software; you can redistribute it and/or modify | 18 | * This program is free software; you can redistribute it and/or modify |
19 | * it under the terms of the GNU General Public License as published by | 19 | * it under the terms of the GNU General Public License as published by |
@@ -39,7 +39,7 @@ | |||
39 | * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few | 39 | * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few |
40 | * superfluous timer interrupts from the nic. | 40 | * superfluous timer interrupts from the nic. |
41 | */ | 41 | */ |
42 | #define FORCEDETH_VERSION "0.61" | 42 | #define FORCEDETH_VERSION "0.62" |
43 | #define DRV_NAME "forcedeth" | 43 | #define DRV_NAME "forcedeth" |
44 | 44 | ||
45 | #include <linux/module.h> | 45 | #include <linux/module.h> |
@@ -2096,14 +2096,15 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2096 | ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); | 2096 | ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); |
2097 | } | 2097 | } |
2098 | 2098 | ||
2099 | spin_lock_irqsave(&np->lock, flags); | ||
2099 | empty_slots = nv_get_empty_tx_slots(np); | 2100 | empty_slots = nv_get_empty_tx_slots(np); |
2100 | if (unlikely(empty_slots <= entries)) { | 2101 | if (unlikely(empty_slots <= entries)) { |
2101 | spin_lock_irqsave(&np->lock, flags); | ||
2102 | netif_stop_queue(dev); | 2102 | netif_stop_queue(dev); |
2103 | np->tx_stop = 1; | 2103 | np->tx_stop = 1; |
2104 | spin_unlock_irqrestore(&np->lock, flags); | 2104 | spin_unlock_irqrestore(&np->lock, flags); |
2105 | return NETDEV_TX_BUSY; | 2105 | return NETDEV_TX_BUSY; |
2106 | } | 2106 | } |
2107 | spin_unlock_irqrestore(&np->lock, flags); | ||
2107 | 2108 | ||
2108 | start_tx = put_tx = np->put_tx.orig; | 2109 | start_tx = put_tx = np->put_tx.orig; |
2109 | 2110 | ||
@@ -2214,14 +2215,15 @@ static int nv_start_xmit_optimized(struct sk_buff *skb, struct net_device *dev) | |||
2214 | ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); | 2215 | ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); |
2215 | } | 2216 | } |
2216 | 2217 | ||
2218 | spin_lock_irqsave(&np->lock, flags); | ||
2217 | empty_slots = nv_get_empty_tx_slots(np); | 2219 | empty_slots = nv_get_empty_tx_slots(np); |
2218 | if (unlikely(empty_slots <= entries)) { | 2220 | if (unlikely(empty_slots <= entries)) { |
2219 | spin_lock_irqsave(&np->lock, flags); | ||
2220 | netif_stop_queue(dev); | 2221 | netif_stop_queue(dev); |
2221 | np->tx_stop = 1; | 2222 | np->tx_stop = 1; |
2222 | spin_unlock_irqrestore(&np->lock, flags); | 2223 | spin_unlock_irqrestore(&np->lock, flags); |
2223 | return NETDEV_TX_BUSY; | 2224 | return NETDEV_TX_BUSY; |
2224 | } | 2225 | } |
2226 | spin_unlock_irqrestore(&np->lock, flags); | ||
2225 | 2227 | ||
2226 | start_tx = put_tx = np->put_tx.ex; | 2228 | start_tx = put_tx = np->put_tx.ex; |
2227 | start_tx_ctx = np->put_tx_ctx; | 2229 | start_tx_ctx = np->put_tx_ctx; |
@@ -3403,10 +3405,10 @@ static irqreturn_t nv_nic_irq(int foo, void *data) | |||
3403 | 3405 | ||
3404 | #ifdef CONFIG_FORCEDETH_NAPI | 3406 | #ifdef CONFIG_FORCEDETH_NAPI |
3405 | if (events & NVREG_IRQ_RX_ALL) { | 3407 | if (events & NVREG_IRQ_RX_ALL) { |
3408 | spin_lock(&np->lock); | ||
3406 | netif_rx_schedule(&np->napi); | 3409 | netif_rx_schedule(&np->napi); |
3407 | 3410 | ||
3408 | /* Disable furthur receive irq's */ | 3411 | /* Disable furthur receive irq's */ |
3409 | spin_lock(&np->lock); | ||
3410 | np->irqmask &= ~NVREG_IRQ_RX_ALL; | 3412 | np->irqmask &= ~NVREG_IRQ_RX_ALL; |
3411 | 3413 | ||
3412 | if (np->msi_flags & NV_MSI_X_ENABLED) | 3414 | if (np->msi_flags & NV_MSI_X_ENABLED) |
@@ -3520,10 +3522,10 @@ static irqreturn_t nv_nic_irq_optimized(int foo, void *data) | |||
3520 | 3522 | ||
3521 | #ifdef CONFIG_FORCEDETH_NAPI | 3523 | #ifdef CONFIG_FORCEDETH_NAPI |
3522 | if (events & NVREG_IRQ_RX_ALL) { | 3524 | if (events & NVREG_IRQ_RX_ALL) { |
3525 | spin_lock(&np->lock); | ||
3523 | netif_rx_schedule(&np->napi); | 3526 | netif_rx_schedule(&np->napi); |
3524 | 3527 | ||
3525 | /* Disable furthur receive irq's */ | 3528 | /* Disable furthur receive irq's */ |
3526 | spin_lock(&np->lock); | ||
3527 | np->irqmask &= ~NVREG_IRQ_RX_ALL; | 3529 | np->irqmask &= ~NVREG_IRQ_RX_ALL; |
3528 | 3530 | ||
3529 | if (np->msi_flags & NV_MSI_X_ENABLED) | 3531 | if (np->msi_flags & NV_MSI_X_ENABLED) |
@@ -6167,19 +6169,19 @@ static struct pci_device_id pci_tbl[] = { | |||
6167 | }, | 6169 | }, |
6168 | { /* MCP79 Ethernet Controller */ | 6170 | { /* MCP79 Ethernet Controller */ |
6169 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_36), | 6171 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_36), |
6170 | .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, | 6172 | .driver_data = DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, |
6171 | }, | 6173 | }, |
6172 | { /* MCP79 Ethernet Controller */ | 6174 | { /* MCP79 Ethernet Controller */ |
6173 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_37), | 6175 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_37), |
6174 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, | 6176 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, |
6175 | }, | 6177 | }, |
6176 | { /* MCP79 Ethernet Controller */ | 6178 | { /* MCP79 Ethernet Controller */ |
6177 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_38), | 6179 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_38), |
6178 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, | 6180 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, |
6179 | }, | 6181 | }, |
6180 | { /* MCP79 Ethernet Controller */ | 6182 | { /* MCP79 Ethernet Controller */ |
6181 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_39), | 6183 | PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_39), |
6182 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, | 6184 | .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V3|DEV_HAS_TEST_EXTENDED|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX|DEV_NEED_TX_LIMIT|DEV_HAS_GEAR_MODE, |
6183 | }, | 6185 | }, |
6184 | {0,}, | 6186 | {0,}, |
6185 | }; | 6187 | }; |
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c index 1b8deca8b9f8..efcbeb6c8673 100644 --- a/drivers/net/gianfar.c +++ b/drivers/net/gianfar.c | |||
@@ -296,6 +296,20 @@ err_out: | |||
296 | return err; | 296 | return err; |
297 | } | 297 | } |
298 | 298 | ||
299 | /* Ioctl MII Interface */ | ||
300 | static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | ||
301 | { | ||
302 | struct gfar_private *priv = netdev_priv(dev); | ||
303 | |||
304 | if (!netif_running(dev)) | ||
305 | return -EINVAL; | ||
306 | |||
307 | if (!priv->phydev) | ||
308 | return -ENODEV; | ||
309 | |||
310 | return phy_mii_ioctl(priv->phydev, if_mii(rq), cmd); | ||
311 | } | ||
312 | |||
299 | /* Set up the ethernet device structure, private data, | 313 | /* Set up the ethernet device structure, private data, |
300 | * and anything else we need before we start */ | 314 | * and anything else we need before we start */ |
301 | static int gfar_probe(struct of_device *ofdev, | 315 | static int gfar_probe(struct of_device *ofdev, |
@@ -366,6 +380,7 @@ static int gfar_probe(struct of_device *ofdev, | |||
366 | dev->set_multicast_list = gfar_set_multi; | 380 | dev->set_multicast_list = gfar_set_multi; |
367 | 381 | ||
368 | dev->ethtool_ops = &gfar_ethtool_ops; | 382 | dev->ethtool_ops = &gfar_ethtool_ops; |
383 | dev->do_ioctl = gfar_ioctl; | ||
369 | 384 | ||
370 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) { | 385 | if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) { |
371 | priv->rx_csum_enable = 1; | 386 | priv->rx_csum_enable = 1; |
diff --git a/drivers/net/hamachi.c b/drivers/net/hamachi.c index 32200227c923..7e8b3c59a7d6 100644 --- a/drivers/net/hamachi.c +++ b/drivers/net/hamachi.c | |||
@@ -576,6 +576,7 @@ static const struct net_device_ops hamachi_netdev_ops = { | |||
576 | .ndo_set_multicast_list = set_rx_mode, | 576 | .ndo_set_multicast_list = set_rx_mode, |
577 | .ndo_change_mtu = eth_change_mtu, | 577 | .ndo_change_mtu = eth_change_mtu, |
578 | .ndo_validate_addr = eth_validate_addr, | 578 | .ndo_validate_addr = eth_validate_addr, |
579 | .ndo_set_mac_address = eth_mac_addr, | ||
579 | .ndo_tx_timeout = hamachi_tx_timeout, | 580 | .ndo_tx_timeout = hamachi_tx_timeout, |
580 | .ndo_do_ioctl = netdev_ioctl, | 581 | .ndo_do_ioctl = netdev_ioctl, |
581 | }; | 582 | }; |
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c index 50f1e172ee8f..2d4089894ec7 100644 --- a/drivers/net/hamradio/6pack.c +++ b/drivers/net/hamradio/6pack.c | |||
@@ -717,11 +717,12 @@ static int sixpack_ioctl(struct tty_struct *tty, struct file *file, | |||
717 | unsigned int cmd, unsigned long arg) | 717 | unsigned int cmd, unsigned long arg) |
718 | { | 718 | { |
719 | struct sixpack *sp = sp_get(tty); | 719 | struct sixpack *sp = sp_get(tty); |
720 | struct net_device *dev = sp->dev; | 720 | struct net_device *dev; |
721 | unsigned int tmp, err; | 721 | unsigned int tmp, err; |
722 | 722 | ||
723 | if (!sp) | 723 | if (!sp) |
724 | return -ENXIO; | 724 | return -ENXIO; |
725 | dev = sp->dev; | ||
725 | 726 | ||
726 | switch(cmd) { | 727 | switch(cmd) { |
727 | case SIOCGIFNAME: | 728 | case SIOCGIFNAME: |
diff --git a/drivers/net/hp-plus.c b/drivers/net/hp-plus.c index b507dbc16e62..5e070f446635 100644 --- a/drivers/net/hp-plus.c +++ b/drivers/net/hp-plus.c | |||
@@ -166,6 +166,7 @@ static const struct net_device_ops hpp_netdev_ops = { | |||
166 | .ndo_get_stats = eip_get_stats, | 166 | .ndo_get_stats = eip_get_stats, |
167 | .ndo_set_multicast_list = eip_set_multicast_list, | 167 | .ndo_set_multicast_list = eip_set_multicast_list, |
168 | .ndo_validate_addr = eth_validate_addr, | 168 | .ndo_validate_addr = eth_validate_addr, |
169 | .ndo_set_mac_address = eth_mac_addr, | ||
169 | .ndo_change_mtu = eth_change_mtu, | 170 | .ndo_change_mtu = eth_change_mtu, |
170 | #ifdef CONFIG_NET_POLL_CONTROLLER | 171 | #ifdef CONFIG_NET_POLL_CONTROLLER |
171 | .ndo_poll_controller = eip_poll, | 172 | .ndo_poll_controller = eip_poll, |
diff --git a/drivers/net/hydra.c b/drivers/net/hydra.c index 9cb38a8d4387..8ac0930c183c 100644 --- a/drivers/net/hydra.c +++ b/drivers/net/hydra.c | |||
@@ -103,6 +103,7 @@ static const struct net_device_ops hydra_netdev_ops = { | |||
103 | .ndo_get_stats = ei_get_stats, | 103 | .ndo_get_stats = ei_get_stats, |
104 | .ndo_set_multicast_list = ei_set_multicast_list, | 104 | .ndo_set_multicast_list = ei_set_multicast_list, |
105 | .ndo_validate_addr = eth_validate_addr, | 105 | .ndo_validate_addr = eth_validate_addr, |
106 | .ndo_set_mac_address = eth_mac_addr, | ||
106 | .ndo_change_mtu = eth_change_mtu, | 107 | .ndo_change_mtu = eth_change_mtu, |
107 | #ifdef CONFIG_NET_POLL_CONTROLLER | 108 | #ifdef CONFIG_NET_POLL_CONTROLLER |
108 | .ndo_poll_controller = ei_poll, | 109 | .ndo_poll_controller = ei_poll, |
diff --git a/drivers/net/irda/au1k_ir.c b/drivers/net/irda/au1k_ir.c index 75a1d0a86dee..941164076a2b 100644 --- a/drivers/net/irda/au1k_ir.c +++ b/drivers/net/irda/au1k_ir.c | |||
@@ -594,7 +594,7 @@ static int au1k_irda_rx(struct net_device *dev) | |||
594 | update_rx_stats(dev, flags, count); | 594 | update_rx_stats(dev, flags, count); |
595 | skb=alloc_skb(count+1,GFP_ATOMIC); | 595 | skb=alloc_skb(count+1,GFP_ATOMIC); |
596 | if (skb == NULL) { | 596 | if (skb == NULL) { |
597 | aup->stats.rx_dropped++; | 597 | aup->netdev->stats.rx_dropped++; |
598 | continue; | 598 | continue; |
599 | } | 599 | } |
600 | skb_reserve(skb, 1); | 600 | skb_reserve(skb, 1); |
diff --git a/drivers/net/irda/donauboe.c b/drivers/net/irda/donauboe.c index 687c2d53d4d2..6f3e7f71658d 100644 --- a/drivers/net/irda/donauboe.c +++ b/drivers/net/irda/donauboe.c | |||
@@ -1194,13 +1194,13 @@ toshoboe_interrupt (int irq, void *dev_id) | |||
1194 | txp = txpc; | 1194 | txp = txpc; |
1195 | txpc++; | 1195 | txpc++; |
1196 | txpc %= TX_SLOTS; | 1196 | txpc %= TX_SLOTS; |
1197 | self->stats.tx_packets++; | 1197 | self->netdev->stats.tx_packets++; |
1198 | if (self->ring->tx[txpc].control & OBOE_CTL_TX_HW_OWNS) | 1198 | if (self->ring->tx[txpc].control & OBOE_CTL_TX_HW_OWNS) |
1199 | self->ring->tx[txp].control &= ~OBOE_CTL_TX_RTCENTX; | 1199 | self->ring->tx[txp].control &= ~OBOE_CTL_TX_RTCENTX; |
1200 | } | 1200 | } |
1201 | self->stats.tx_packets--; | 1201 | self->netdev->stats.tx_packets--; |
1202 | #else | 1202 | #else |
1203 | self->stats.tx_packets++; | 1203 | self->netdev->stats.tx_packets++; |
1204 | #endif | 1204 | #endif |
1205 | toshoboe_start_DMA(self, OBOE_CONFIG0H_ENTX); | 1205 | toshoboe_start_DMA(self, OBOE_CONFIG0H_ENTX); |
1206 | } | 1206 | } |
@@ -1280,7 +1280,7 @@ dumpbufs(self->rx_bufs[self->rxs],len,'<'); | |||
1280 | skb_put (skb, len); | 1280 | skb_put (skb, len); |
1281 | skb_copy_to_linear_data(skb, self->rx_bufs[self->rxs], | 1281 | skb_copy_to_linear_data(skb, self->rx_bufs[self->rxs], |
1282 | len); | 1282 | len); |
1283 | self->stats.rx_packets++; | 1283 | self->netdev->stats.rx_packets++; |
1284 | skb->dev = self->netdev; | 1284 | skb->dev = self->netdev; |
1285 | skb_reset_mac_header(skb); | 1285 | skb_reset_mac_header(skb); |
1286 | skb->protocol = htons (ETH_P_IRDA); | 1286 | skb->protocol = htons (ETH_P_IRDA); |
diff --git a/drivers/net/mac8390.c b/drivers/net/mac8390.c index 57716e22660c..8e884869a05b 100644 --- a/drivers/net/mac8390.c +++ b/drivers/net/mac8390.c | |||
@@ -486,6 +486,7 @@ static const struct net_device_ops mac8390_netdev_ops = { | |||
486 | .ndo_get_stats = ei_get_stats, | 486 | .ndo_get_stats = ei_get_stats, |
487 | .ndo_set_multicast_list = ei_set_multicast_list, | 487 | .ndo_set_multicast_list = ei_set_multicast_list, |
488 | .ndo_validate_addr = eth_validate_addr, | 488 | .ndo_validate_addr = eth_validate_addr, |
489 | .ndo_set_mac_address = eth_mac_addr, | ||
489 | .ndo_change_mtu = eth_change_mtu, | 490 | .ndo_change_mtu = eth_change_mtu, |
490 | #ifdef CONFIG_NET_POLL_CONTROLLER | 491 | #ifdef CONFIG_NET_POLL_CONTROLLER |
491 | .ndo_poll_controller = ei_poll, | 492 | .ndo_poll_controller = ei_poll, |
diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c index 15bb38d99304..9f6644a44030 100644 --- a/drivers/net/mlx4/en_netdev.c +++ b/drivers/net/mlx4/en_netdev.c | |||
@@ -952,6 +952,7 @@ static const struct net_device_ops mlx4_netdev_ops = { | |||
952 | .ndo_get_stats = mlx4_en_get_stats, | 952 | .ndo_get_stats = mlx4_en_get_stats, |
953 | .ndo_set_multicast_list = mlx4_en_set_multicast, | 953 | .ndo_set_multicast_list = mlx4_en_set_multicast, |
954 | .ndo_set_mac_address = mlx4_en_set_mac, | 954 | .ndo_set_mac_address = mlx4_en_set_mac, |
955 | .ndo_validate_addr = eth_validate_addr, | ||
955 | .ndo_change_mtu = mlx4_en_change_mtu, | 956 | .ndo_change_mtu = mlx4_en_change_mtu, |
956 | .ndo_tx_timeout = mlx4_en_tx_timeout, | 957 | .ndo_tx_timeout = mlx4_en_tx_timeout, |
957 | .ndo_vlan_rx_register = mlx4_en_vlan_rx_register, | 958 | .ndo_vlan_rx_register = mlx4_en_vlan_rx_register, |
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c index 710c79e7a2db..6ef2490d5c3e 100644 --- a/drivers/net/mlx4/main.c +++ b/drivers/net/mlx4/main.c | |||
@@ -912,8 +912,8 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev) | |||
912 | int i; | 912 | int i; |
913 | 913 | ||
914 | if (msi_x) { | 914 | if (msi_x) { |
915 | nreq = min(dev->caps.num_eqs - dev->caps.reserved_eqs, | 915 | nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs, |
916 | num_possible_cpus() + 1); | 916 | num_possible_cpus() + 1); |
917 | entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL); | 917 | entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL); |
918 | if (!entries) | 918 | if (!entries) |
919 | goto no_msi; | 919 | goto no_msi; |
diff --git a/drivers/net/ne-h8300.c b/drivers/net/ne-h8300.c index b57239171046..7bd6662d5b04 100644 --- a/drivers/net/ne-h8300.c +++ b/drivers/net/ne-h8300.c | |||
@@ -202,6 +202,7 @@ static const struct net_device_ops ne_netdev_ops = { | |||
202 | .ndo_get_stats = ei_get_stats, | 202 | .ndo_get_stats = ei_get_stats, |
203 | .ndo_set_multicast_list = ei_set_multicast_list, | 203 | .ndo_set_multicast_list = ei_set_multicast_list, |
204 | .ndo_validate_addr = eth_validate_addr, | 204 | .ndo_validate_addr = eth_validate_addr, |
205 | .ndo_set_mac_address = eth_mac_addr, | ||
205 | .ndo_change_mtu = eth_change_mtu, | 206 | .ndo_change_mtu = eth_change_mtu, |
206 | #ifdef CONFIG_NET_POLL_CONTROLLER | 207 | #ifdef CONFIG_NET_POLL_CONTROLLER |
207 | .ndo_poll_controller = ei_poll, | 208 | .ndo_poll_controller = ei_poll, |
diff --git a/drivers/net/ne2k-pci.c b/drivers/net/ne2k-pci.c index 62f20ba211cb..f090d3b9ec94 100644 --- a/drivers/net/ne2k-pci.c +++ b/drivers/net/ne2k-pci.c | |||
@@ -208,6 +208,7 @@ static const struct net_device_ops ne2k_netdev_ops = { | |||
208 | .ndo_get_stats = ei_get_stats, | 208 | .ndo_get_stats = ei_get_stats, |
209 | .ndo_set_multicast_list = ei_set_multicast_list, | 209 | .ndo_set_multicast_list = ei_set_multicast_list, |
210 | .ndo_validate_addr = eth_validate_addr, | 210 | .ndo_validate_addr = eth_validate_addr, |
211 | .ndo_set_mac_address = eth_mac_addr, | ||
211 | .ndo_change_mtu = eth_change_mtu, | 212 | .ndo_change_mtu = eth_change_mtu, |
212 | #ifdef CONFIG_NET_POLL_CONTROLLER | 213 | #ifdef CONFIG_NET_POLL_CONTROLLER |
213 | .ndo_poll_controller = ei_poll, | 214 | .ndo_poll_controller = ei_poll, |
diff --git a/drivers/net/ns83820.c b/drivers/net/ns83820.c index 42021aca1ddd..e80294d8cc19 100644 --- a/drivers/net/ns83820.c +++ b/drivers/net/ns83820.c | |||
@@ -1956,6 +1956,7 @@ static const struct net_device_ops netdev_ops = { | |||
1956 | .ndo_change_mtu = ns83820_change_mtu, | 1956 | .ndo_change_mtu = ns83820_change_mtu, |
1957 | .ndo_set_multicast_list = ns83820_set_multicast, | 1957 | .ndo_set_multicast_list = ns83820_set_multicast, |
1958 | .ndo_validate_addr = eth_validate_addr, | 1958 | .ndo_validate_addr = eth_validate_addr, |
1959 | .ndo_set_mac_address = eth_mac_addr, | ||
1959 | .ndo_tx_timeout = ns83820_tx_timeout, | 1960 | .ndo_tx_timeout = ns83820_tx_timeout, |
1960 | #ifdef NS83820_VLAN_ACCEL_SUPPORT | 1961 | #ifdef NS83820_VLAN_ACCEL_SUPPORT |
1961 | .ndo_vlan_rx_register = ns83820_vlan_rx_register, | 1962 | .ndo_vlan_rx_register = ns83820_vlan_rx_register, |
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h index 459663a4023d..c1dadadfab18 100644 --- a/drivers/net/qlge/qlge.h +++ b/drivers/net/qlge/qlge.h | |||
@@ -28,11 +28,11 @@ | |||
28 | } while (0) | 28 | } while (0) |
29 | 29 | ||
30 | #define QLGE_VENDOR_ID 0x1077 | 30 | #define QLGE_VENDOR_ID 0x1077 |
31 | #define QLGE_DEVICE_ID1 0x8012 | 31 | #define QLGE_DEVICE_ID 0x8012 |
32 | #define QLGE_DEVICE_ID 0x8000 | ||
33 | 32 | ||
34 | #define MAX_RX_RINGS 128 | 33 | #define MAX_CPUS 8 |
35 | #define MAX_TX_RINGS 128 | 34 | #define MAX_TX_RINGS MAX_CPUS |
35 | #define MAX_RX_RINGS ((MAX_CPUS * 2) + 1) | ||
36 | 36 | ||
37 | #define NUM_TX_RING_ENTRIES 256 | 37 | #define NUM_TX_RING_ENTRIES 256 |
38 | #define NUM_RX_RING_ENTRIES 256 | 38 | #define NUM_RX_RING_ENTRIES 256 |
@@ -45,6 +45,7 @@ | |||
45 | #define MAX_SPLIT_SIZE 1023 | 45 | #define MAX_SPLIT_SIZE 1023 |
46 | #define QLGE_SB_PAD 32 | 46 | #define QLGE_SB_PAD 32 |
47 | 47 | ||
48 | #define MAX_CQ 128 | ||
48 | #define DFLT_COALESCE_WAIT 100 /* 100 usec wait for coalescing */ | 49 | #define DFLT_COALESCE_WAIT 100 /* 100 usec wait for coalescing */ |
49 | #define MAX_INTER_FRAME_WAIT 10 /* 10 usec max interframe-wait for coalescing */ | 50 | #define MAX_INTER_FRAME_WAIT 10 /* 10 usec max interframe-wait for coalescing */ |
50 | #define DFLT_INTER_FRAME_WAIT (MAX_INTER_FRAME_WAIT/2) | 51 | #define DFLT_INTER_FRAME_WAIT (MAX_INTER_FRAME_WAIT/2) |
@@ -961,8 +962,7 @@ struct ib_mac_iocb_rsp { | |||
961 | #define IB_MAC_IOCB_RSP_DS 0x40 /* data is in small buffer */ | 962 | #define IB_MAC_IOCB_RSP_DS 0x40 /* data is in small buffer */ |
962 | #define IB_MAC_IOCB_RSP_DL 0x80 /* data is in large buffer */ | 963 | #define IB_MAC_IOCB_RSP_DL 0x80 /* data is in large buffer */ |
963 | __le32 data_len; /* */ | 964 | __le32 data_len; /* */ |
964 | __le32 data_addr_lo; /* */ | 965 | __le64 data_addr; /* */ |
965 | __le32 data_addr_hi; /* */ | ||
966 | __le32 rss; /* */ | 966 | __le32 rss; /* */ |
967 | __le16 vlan_id; /* 12 bits */ | 967 | __le16 vlan_id; /* 12 bits */ |
968 | #define IB_MAC_IOCB_RSP_C 0x1000 /* VLAN CFI bit */ | 968 | #define IB_MAC_IOCB_RSP_C 0x1000 /* VLAN CFI bit */ |
@@ -976,8 +976,7 @@ struct ib_mac_iocb_rsp { | |||
976 | #define IB_MAC_IOCB_RSP_HS 0x40 | 976 | #define IB_MAC_IOCB_RSP_HS 0x40 |
977 | #define IB_MAC_IOCB_RSP_HL 0x80 | 977 | #define IB_MAC_IOCB_RSP_HL 0x80 |
978 | __le32 hdr_len; /* */ | 978 | __le32 hdr_len; /* */ |
979 | __le32 hdr_addr_lo; /* */ | 979 | __le64 hdr_addr; /* */ |
980 | __le32 hdr_addr_hi; /* */ | ||
981 | } __attribute((packed)); | 980 | } __attribute((packed)); |
982 | 981 | ||
983 | struct ib_ae_iocb_rsp { | 982 | struct ib_ae_iocb_rsp { |
@@ -1042,10 +1041,8 @@ struct wqicb { | |||
1042 | __le16 cq_id_rss; | 1041 | __le16 cq_id_rss; |
1043 | #define Q_CQ_ID_RSS_RV 0x8000 | 1042 | #define Q_CQ_ID_RSS_RV 0x8000 |
1044 | __le16 rid; | 1043 | __le16 rid; |
1045 | __le32 addr_lo; | 1044 | __le64 addr; |
1046 | __le32 addr_hi; | 1045 | __le64 cnsmr_idx_addr; |
1047 | __le32 cnsmr_idx_addr_lo; | ||
1048 | __le32 cnsmr_idx_addr_hi; | ||
1049 | } __attribute((packed)); | 1046 | } __attribute((packed)); |
1050 | 1047 | ||
1051 | /* | 1048 | /* |
@@ -1070,18 +1067,14 @@ struct cqicb { | |||
1070 | #define LEN_CPP_64 0x0002 | 1067 | #define LEN_CPP_64 0x0002 |
1071 | #define LEN_CPP_128 0x0003 | 1068 | #define LEN_CPP_128 0x0003 |
1072 | __le16 rid; | 1069 | __le16 rid; |
1073 | __le32 addr_lo; | 1070 | __le64 addr; |
1074 | __le32 addr_hi; | 1071 | __le64 prod_idx_addr; |
1075 | __le32 prod_idx_addr_lo; | ||
1076 | __le32 prod_idx_addr_hi; | ||
1077 | __le16 pkt_delay; | 1072 | __le16 pkt_delay; |
1078 | __le16 irq_delay; | 1073 | __le16 irq_delay; |
1079 | __le32 lbq_addr_lo; | 1074 | __le64 lbq_addr; |
1080 | __le32 lbq_addr_hi; | ||
1081 | __le16 lbq_buf_size; | 1075 | __le16 lbq_buf_size; |
1082 | __le16 lbq_len; /* entry count */ | 1076 | __le16 lbq_len; /* entry count */ |
1083 | __le32 sbq_addr_lo; | 1077 | __le64 sbq_addr; |
1084 | __le32 sbq_addr_hi; | ||
1085 | __le16 sbq_buf_size; | 1078 | __le16 sbq_buf_size; |
1086 | __le16 sbq_len; /* entry count */ | 1079 | __le16 sbq_len; /* entry count */ |
1087 | } __attribute((packed)); | 1080 | } __attribute((packed)); |
@@ -1145,7 +1138,7 @@ struct tx_ring { | |||
1145 | struct wqicb wqicb; /* structure used to inform chip of new queue */ | 1138 | struct wqicb wqicb; /* structure used to inform chip of new queue */ |
1146 | void *wq_base; /* pci_alloc:virtual addr for tx */ | 1139 | void *wq_base; /* pci_alloc:virtual addr for tx */ |
1147 | dma_addr_t wq_base_dma; /* pci_alloc:dma addr for tx */ | 1140 | dma_addr_t wq_base_dma; /* pci_alloc:dma addr for tx */ |
1148 | u32 *cnsmr_idx_sh_reg; /* shadow copy of consumer idx */ | 1141 | __le32 *cnsmr_idx_sh_reg; /* shadow copy of consumer idx */ |
1149 | dma_addr_t cnsmr_idx_sh_reg_dma; /* dma-shadow copy of consumer */ | 1142 | dma_addr_t cnsmr_idx_sh_reg_dma; /* dma-shadow copy of consumer */ |
1150 | u32 wq_size; /* size in bytes of queue area */ | 1143 | u32 wq_size; /* size in bytes of queue area */ |
1151 | u32 wq_len; /* number of entries in queue */ | 1144 | u32 wq_len; /* number of entries in queue */ |
@@ -1181,7 +1174,7 @@ struct rx_ring { | |||
1181 | u32 cq_size; | 1174 | u32 cq_size; |
1182 | u32 cq_len; | 1175 | u32 cq_len; |
1183 | u16 cq_id; | 1176 | u16 cq_id; |
1184 | volatile __le32 *prod_idx_sh_reg; /* Shadowed producer register. */ | 1177 | __le32 *prod_idx_sh_reg; /* Shadowed producer register. */ |
1185 | dma_addr_t prod_idx_sh_reg_dma; | 1178 | dma_addr_t prod_idx_sh_reg_dma; |
1186 | void __iomem *cnsmr_idx_db_reg; /* PCI doorbell mem area + 0 */ | 1179 | void __iomem *cnsmr_idx_db_reg; /* PCI doorbell mem area + 0 */ |
1187 | u32 cnsmr_idx; /* current sw idx */ | 1180 | u32 cnsmr_idx; /* current sw idx */ |
@@ -1402,9 +1395,11 @@ struct ql_adapter { | |||
1402 | int rx_ring_count; | 1395 | int rx_ring_count; |
1403 | int ring_mem_size; | 1396 | int ring_mem_size; |
1404 | void *ring_mem; | 1397 | void *ring_mem; |
1405 | struct rx_ring *rx_ring; | 1398 | |
1399 | struct rx_ring rx_ring[MAX_RX_RINGS]; | ||
1400 | struct tx_ring tx_ring[MAX_TX_RINGS]; | ||
1401 | |||
1406 | int rx_csum; | 1402 | int rx_csum; |
1407 | struct tx_ring *tx_ring; | ||
1408 | u32 default_rx_queue; | 1403 | u32 default_rx_queue; |
1409 | 1404 | ||
1410 | u16 rx_coalesce_usecs; /* cqicb->int_delay */ | 1405 | u16 rx_coalesce_usecs; /* cqicb->int_delay */ |
@@ -1459,6 +1454,24 @@ static inline void ql_write_db_reg(u32 val, void __iomem *addr) | |||
1459 | mmiowb(); | 1454 | mmiowb(); |
1460 | } | 1455 | } |
1461 | 1456 | ||
1457 | /* | ||
1458 | * Shadow Registers: | ||
1459 | * Outbound queues have a consumer index that is maintained by the chip. | ||
1460 | * Inbound queues have a producer index that is maintained by the chip. | ||
1461 | * For lower overhead, these registers are "shadowed" to host memory | ||
1462 | * which allows the device driver to track the queue progress without | ||
1463 | * PCI reads. When an entry is placed on an inbound queue, the chip will | ||
1464 | * update the relevant index register and then copy the value to the | ||
1465 | * shadow register in host memory. | ||
1466 | */ | ||
1467 | static inline u32 ql_read_sh_reg(__le32 *addr) | ||
1468 | { | ||
1469 | u32 reg; | ||
1470 | reg = le32_to_cpu(*addr); | ||
1471 | rmb(); | ||
1472 | return reg; | ||
1473 | } | ||
1474 | |||
1462 | extern char qlge_driver_name[]; | 1475 | extern char qlge_driver_name[]; |
1463 | extern const char qlge_driver_version[]; | 1476 | extern const char qlge_driver_version[]; |
1464 | extern const struct ethtool_ops qlge_ethtool_ops; | 1477 | extern const struct ethtool_ops qlge_ethtool_ops; |
diff --git a/drivers/net/qlge/qlge_dbg.c b/drivers/net/qlge/qlge_dbg.c index 3f5e02d2e4a9..379b895ed6e6 100644 --- a/drivers/net/qlge/qlge_dbg.c +++ b/drivers/net/qlge/qlge_dbg.c | |||
@@ -435,14 +435,10 @@ void ql_dump_wqicb(struct wqicb *wqicb) | |||
435 | printk(KERN_ERR PFX "wqicb->cq_id_rss = %d.\n", | 435 | printk(KERN_ERR PFX "wqicb->cq_id_rss = %d.\n", |
436 | le16_to_cpu(wqicb->cq_id_rss)); | 436 | le16_to_cpu(wqicb->cq_id_rss)); |
437 | printk(KERN_ERR PFX "wqicb->rid = 0x%x.\n", le16_to_cpu(wqicb->rid)); | 437 | printk(KERN_ERR PFX "wqicb->rid = 0x%x.\n", le16_to_cpu(wqicb->rid)); |
438 | printk(KERN_ERR PFX "wqicb->wq_addr_lo = 0x%.08x.\n", | 438 | printk(KERN_ERR PFX "wqicb->wq_addr = 0x%llx.\n", |
439 | le32_to_cpu(wqicb->addr_lo)); | 439 | (unsigned long long) le64_to_cpu(wqicb->addr)); |
440 | printk(KERN_ERR PFX "wqicb->wq_addr_hi = 0x%.08x.\n", | 440 | printk(KERN_ERR PFX "wqicb->wq_cnsmr_idx_addr = 0x%llx.\n", |
441 | le32_to_cpu(wqicb->addr_hi)); | 441 | (unsigned long long) le64_to_cpu(wqicb->cnsmr_idx_addr)); |
442 | printk(KERN_ERR PFX "wqicb->wq_cnsmr_idx_addr_lo = 0x%.08x.\n", | ||
443 | le32_to_cpu(wqicb->cnsmr_idx_addr_lo)); | ||
444 | printk(KERN_ERR PFX "wqicb->wq_cnsmr_idx_addr_hi = 0x%.08x.\n", | ||
445 | le32_to_cpu(wqicb->cnsmr_idx_addr_hi)); | ||
446 | } | 442 | } |
447 | 443 | ||
448 | void ql_dump_tx_ring(struct tx_ring *tx_ring) | 444 | void ql_dump_tx_ring(struct tx_ring *tx_ring) |
@@ -455,10 +451,11 @@ void ql_dump_tx_ring(struct tx_ring *tx_ring) | |||
455 | printk(KERN_ERR PFX "tx_ring->base = %p.\n", tx_ring->wq_base); | 451 | printk(KERN_ERR PFX "tx_ring->base = %p.\n", tx_ring->wq_base); |
456 | printk(KERN_ERR PFX "tx_ring->base_dma = 0x%llx.\n", | 452 | printk(KERN_ERR PFX "tx_ring->base_dma = 0x%llx.\n", |
457 | (unsigned long long) tx_ring->wq_base_dma); | 453 | (unsigned long long) tx_ring->wq_base_dma); |
458 | printk(KERN_ERR PFX "tx_ring->cnsmr_idx_sh_reg = %p.\n", | 454 | printk(KERN_ERR PFX |
459 | tx_ring->cnsmr_idx_sh_reg); | 455 | "tx_ring->cnsmr_idx_sh_reg, addr = 0x%p, value = %d.\n", |
460 | printk(KERN_ERR PFX "tx_ring->cnsmr_idx_sh_reg_dma = 0x%llx.\n", | 456 | tx_ring->cnsmr_idx_sh_reg, |
461 | (unsigned long long) tx_ring->cnsmr_idx_sh_reg_dma); | 457 | tx_ring->cnsmr_idx_sh_reg |
458 | ? ql_read_sh_reg(tx_ring->cnsmr_idx_sh_reg) : 0); | ||
462 | printk(KERN_ERR PFX "tx_ring->size = %d.\n", tx_ring->wq_size); | 459 | printk(KERN_ERR PFX "tx_ring->size = %d.\n", tx_ring->wq_size); |
463 | printk(KERN_ERR PFX "tx_ring->len = %d.\n", tx_ring->wq_len); | 460 | printk(KERN_ERR PFX "tx_ring->len = %d.\n", tx_ring->wq_len); |
464 | printk(KERN_ERR PFX "tx_ring->prod_idx_db_reg = %p.\n", | 461 | printk(KERN_ERR PFX "tx_ring->prod_idx_db_reg = %p.\n", |
@@ -510,30 +507,22 @@ void ql_dump_cqicb(struct cqicb *cqicb) | |||
510 | printk(KERN_ERR PFX "cqicb->msix_vect = %d.\n", cqicb->msix_vect); | 507 | printk(KERN_ERR PFX "cqicb->msix_vect = %d.\n", cqicb->msix_vect); |
511 | printk(KERN_ERR PFX "cqicb->flags = %x.\n", cqicb->flags); | 508 | printk(KERN_ERR PFX "cqicb->flags = %x.\n", cqicb->flags); |
512 | printk(KERN_ERR PFX "cqicb->len = %d.\n", le16_to_cpu(cqicb->len)); | 509 | printk(KERN_ERR PFX "cqicb->len = %d.\n", le16_to_cpu(cqicb->len)); |
513 | printk(KERN_ERR PFX "cqicb->addr_lo = %x.\n", | 510 | printk(KERN_ERR PFX "cqicb->addr = 0x%llx.\n", |
514 | le32_to_cpu(cqicb->addr_lo)); | 511 | (unsigned long long) le64_to_cpu(cqicb->addr)); |
515 | printk(KERN_ERR PFX "cqicb->addr_hi = %x.\n", | 512 | printk(KERN_ERR PFX "cqicb->prod_idx_addr = 0x%llx.\n", |
516 | le32_to_cpu(cqicb->addr_hi)); | 513 | (unsigned long long) le64_to_cpu(cqicb->prod_idx_addr)); |
517 | printk(KERN_ERR PFX "cqicb->prod_idx_addr_lo = %x.\n", | ||
518 | le32_to_cpu(cqicb->prod_idx_addr_lo)); | ||
519 | printk(KERN_ERR PFX "cqicb->prod_idx_addr_hi = %x.\n", | ||
520 | le32_to_cpu(cqicb->prod_idx_addr_hi)); | ||
521 | printk(KERN_ERR PFX "cqicb->pkt_delay = 0x%.04x.\n", | 514 | printk(KERN_ERR PFX "cqicb->pkt_delay = 0x%.04x.\n", |
522 | le16_to_cpu(cqicb->pkt_delay)); | 515 | le16_to_cpu(cqicb->pkt_delay)); |
523 | printk(KERN_ERR PFX "cqicb->irq_delay = 0x%.04x.\n", | 516 | printk(KERN_ERR PFX "cqicb->irq_delay = 0x%.04x.\n", |
524 | le16_to_cpu(cqicb->irq_delay)); | 517 | le16_to_cpu(cqicb->irq_delay)); |
525 | printk(KERN_ERR PFX "cqicb->lbq_addr_lo = %x.\n", | 518 | printk(KERN_ERR PFX "cqicb->lbq_addr = 0x%llx.\n", |
526 | le32_to_cpu(cqicb->lbq_addr_lo)); | 519 | (unsigned long long) le64_to_cpu(cqicb->lbq_addr)); |
527 | printk(KERN_ERR PFX "cqicb->lbq_addr_hi = %x.\n", | ||
528 | le32_to_cpu(cqicb->lbq_addr_hi)); | ||
529 | printk(KERN_ERR PFX "cqicb->lbq_buf_size = 0x%.04x.\n", | 520 | printk(KERN_ERR PFX "cqicb->lbq_buf_size = 0x%.04x.\n", |
530 | le16_to_cpu(cqicb->lbq_buf_size)); | 521 | le16_to_cpu(cqicb->lbq_buf_size)); |
531 | printk(KERN_ERR PFX "cqicb->lbq_len = 0x%.04x.\n", | 522 | printk(KERN_ERR PFX "cqicb->lbq_len = 0x%.04x.\n", |
532 | le16_to_cpu(cqicb->lbq_len)); | 523 | le16_to_cpu(cqicb->lbq_len)); |
533 | printk(KERN_ERR PFX "cqicb->sbq_addr_lo = %x.\n", | 524 | printk(KERN_ERR PFX "cqicb->sbq_addr = 0x%llx.\n", |
534 | le32_to_cpu(cqicb->sbq_addr_lo)); | 525 | (unsigned long long) le64_to_cpu(cqicb->sbq_addr)); |
535 | printk(KERN_ERR PFX "cqicb->sbq_addr_hi = %x.\n", | ||
536 | le32_to_cpu(cqicb->sbq_addr_hi)); | ||
537 | printk(KERN_ERR PFX "cqicb->sbq_buf_size = 0x%.04x.\n", | 526 | printk(KERN_ERR PFX "cqicb->sbq_buf_size = 0x%.04x.\n", |
538 | le16_to_cpu(cqicb->sbq_buf_size)); | 527 | le16_to_cpu(cqicb->sbq_buf_size)); |
539 | printk(KERN_ERR PFX "cqicb->sbq_len = 0x%.04x.\n", | 528 | printk(KERN_ERR PFX "cqicb->sbq_len = 0x%.04x.\n", |
@@ -558,9 +547,10 @@ void ql_dump_rx_ring(struct rx_ring *rx_ring) | |||
558 | printk(KERN_ERR PFX "rx_ring->cq_size = %d.\n", rx_ring->cq_size); | 547 | printk(KERN_ERR PFX "rx_ring->cq_size = %d.\n", rx_ring->cq_size); |
559 | printk(KERN_ERR PFX "rx_ring->cq_len = %d.\n", rx_ring->cq_len); | 548 | printk(KERN_ERR PFX "rx_ring->cq_len = %d.\n", rx_ring->cq_len); |
560 | printk(KERN_ERR PFX | 549 | printk(KERN_ERR PFX |
561 | "rx_ring->prod_idx_sh_reg, addr = %p, value = %d.\n", | 550 | "rx_ring->prod_idx_sh_reg, addr = 0x%p, value = %d.\n", |
562 | rx_ring->prod_idx_sh_reg, | 551 | rx_ring->prod_idx_sh_reg, |
563 | rx_ring->prod_idx_sh_reg ? *(rx_ring->prod_idx_sh_reg) : 0); | 552 | rx_ring->prod_idx_sh_reg |
553 | ? ql_read_sh_reg(rx_ring->prod_idx_sh_reg) : 0); | ||
564 | printk(KERN_ERR PFX "rx_ring->prod_idx_sh_reg_dma = %llx.\n", | 554 | printk(KERN_ERR PFX "rx_ring->prod_idx_sh_reg_dma = %llx.\n", |
565 | (unsigned long long) rx_ring->prod_idx_sh_reg_dma); | 555 | (unsigned long long) rx_ring->prod_idx_sh_reg_dma); |
566 | printk(KERN_ERR PFX "rx_ring->cnsmr_idx_db_reg = %p.\n", | 556 | printk(KERN_ERR PFX "rx_ring->cnsmr_idx_db_reg = %p.\n", |
@@ -809,10 +799,8 @@ void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp) | |||
809 | 799 | ||
810 | printk(KERN_ERR PFX "data_len = %d\n", | 800 | printk(KERN_ERR PFX "data_len = %d\n", |
811 | le32_to_cpu(ib_mac_rsp->data_len)); | 801 | le32_to_cpu(ib_mac_rsp->data_len)); |
812 | printk(KERN_ERR PFX "data_addr_hi = 0x%x\n", | 802 | printk(KERN_ERR PFX "data_addr = 0x%llx\n", |
813 | le32_to_cpu(ib_mac_rsp->data_addr_hi)); | 803 | (unsigned long long) le64_to_cpu(ib_mac_rsp->data_addr)); |
814 | printk(KERN_ERR PFX "data_addr_lo = 0x%x\n", | ||
815 | le32_to_cpu(ib_mac_rsp->data_addr_lo)); | ||
816 | if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) | 804 | if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) |
817 | printk(KERN_ERR PFX "rss = %x\n", | 805 | printk(KERN_ERR PFX "rss = %x\n", |
818 | le32_to_cpu(ib_mac_rsp->rss)); | 806 | le32_to_cpu(ib_mac_rsp->rss)); |
@@ -828,10 +816,8 @@ void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp) | |||
828 | if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) { | 816 | if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) { |
829 | printk(KERN_ERR PFX "hdr length = %d.\n", | 817 | printk(KERN_ERR PFX "hdr length = %d.\n", |
830 | le32_to_cpu(ib_mac_rsp->hdr_len)); | 818 | le32_to_cpu(ib_mac_rsp->hdr_len)); |
831 | printk(KERN_ERR PFX "hdr addr_hi = 0x%x.\n", | 819 | printk(KERN_ERR PFX "hdr addr = 0x%llx.\n", |
832 | le32_to_cpu(ib_mac_rsp->hdr_addr_hi)); | 820 | (unsigned long long) le64_to_cpu(ib_mac_rsp->hdr_addr)); |
833 | printk(KERN_ERR PFX "hdr addr_lo = 0x%x.\n", | ||
834 | le32_to_cpu(ib_mac_rsp->hdr_addr_lo)); | ||
835 | } | 821 | } |
836 | } | 822 | } |
837 | #endif | 823 | #endif |
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c index f4c016012f18..45421c8b6010 100644 --- a/drivers/net/qlge/qlge_main.c +++ b/drivers/net/qlge/qlge_main.c | |||
@@ -76,7 +76,6 @@ MODULE_PARM_DESC(irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy."); | |||
76 | 76 | ||
77 | static struct pci_device_id qlge_pci_tbl[] __devinitdata = { | 77 | static struct pci_device_id qlge_pci_tbl[] __devinitdata = { |
78 | {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID)}, | 78 | {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID)}, |
79 | {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID1)}, | ||
80 | /* required last entry */ | 79 | /* required last entry */ |
81 | {0,} | 80 | {0,} |
82 | }; | 81 | }; |
@@ -127,12 +126,12 @@ static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask) | |||
127 | 126 | ||
128 | int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask) | 127 | int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask) |
129 | { | 128 | { |
130 | unsigned int seconds = 3; | 129 | unsigned int wait_count = 30; |
131 | do { | 130 | do { |
132 | if (!ql_sem_trylock(qdev, sem_mask)) | 131 | if (!ql_sem_trylock(qdev, sem_mask)) |
133 | return 0; | 132 | return 0; |
134 | ssleep(1); | 133 | udelay(100); |
135 | } while (--seconds); | 134 | } while (--wait_count); |
136 | return -ETIMEDOUT; | 135 | return -ETIMEDOUT; |
137 | } | 136 | } |
138 | 137 | ||
@@ -1545,7 +1544,7 @@ static void ql_process_chip_ae_intr(struct ql_adapter *qdev, | |||
1545 | static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring) | 1544 | static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring) |
1546 | { | 1545 | { |
1547 | struct ql_adapter *qdev = rx_ring->qdev; | 1546 | struct ql_adapter *qdev = rx_ring->qdev; |
1548 | u32 prod = le32_to_cpu(*rx_ring->prod_idx_sh_reg); | 1547 | u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg); |
1549 | struct ob_mac_iocb_rsp *net_rsp = NULL; | 1548 | struct ob_mac_iocb_rsp *net_rsp = NULL; |
1550 | int count = 0; | 1549 | int count = 0; |
1551 | 1550 | ||
@@ -1571,7 +1570,7 @@ static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring) | |||
1571 | } | 1570 | } |
1572 | count++; | 1571 | count++; |
1573 | ql_update_cq(rx_ring); | 1572 | ql_update_cq(rx_ring); |
1574 | prod = le32_to_cpu(*rx_ring->prod_idx_sh_reg); | 1573 | prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg); |
1575 | } | 1574 | } |
1576 | ql_write_cq_idx(rx_ring); | 1575 | ql_write_cq_idx(rx_ring); |
1577 | if (netif_queue_stopped(qdev->ndev) && net_rsp != NULL) { | 1576 | if (netif_queue_stopped(qdev->ndev) && net_rsp != NULL) { |
@@ -1591,7 +1590,7 @@ static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring) | |||
1591 | static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget) | 1590 | static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget) |
1592 | { | 1591 | { |
1593 | struct ql_adapter *qdev = rx_ring->qdev; | 1592 | struct ql_adapter *qdev = rx_ring->qdev; |
1594 | u32 prod = le32_to_cpu(*rx_ring->prod_idx_sh_reg); | 1593 | u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg); |
1595 | struct ql_net_rsp_iocb *net_rsp; | 1594 | struct ql_net_rsp_iocb *net_rsp; |
1596 | int count = 0; | 1595 | int count = 0; |
1597 | 1596 | ||
@@ -1624,7 +1623,7 @@ static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget) | |||
1624 | } | 1623 | } |
1625 | count++; | 1624 | count++; |
1626 | ql_update_cq(rx_ring); | 1625 | ql_update_cq(rx_ring); |
1627 | prod = le32_to_cpu(*rx_ring->prod_idx_sh_reg); | 1626 | prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg); |
1628 | if (count == budget) | 1627 | if (count == budget) |
1629 | break; | 1628 | break; |
1630 | } | 1629 | } |
@@ -1787,7 +1786,7 @@ static irqreturn_t qlge_isr(int irq, void *dev_id) | |||
1787 | * Check the default queue and wake handler if active. | 1786 | * Check the default queue and wake handler if active. |
1788 | */ | 1787 | */ |
1789 | rx_ring = &qdev->rx_ring[0]; | 1788 | rx_ring = &qdev->rx_ring[0]; |
1790 | if (le32_to_cpu(*rx_ring->prod_idx_sh_reg) != rx_ring->cnsmr_idx) { | 1789 | if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) != rx_ring->cnsmr_idx) { |
1791 | QPRINTK(qdev, INTR, INFO, "Waking handler for rx_ring[0].\n"); | 1790 | QPRINTK(qdev, INTR, INFO, "Waking handler for rx_ring[0].\n"); |
1792 | ql_disable_completion_interrupt(qdev, intr_context->intr); | 1791 | ql_disable_completion_interrupt(qdev, intr_context->intr); |
1793 | queue_delayed_work_on(smp_processor_id(), qdev->q_workqueue, | 1792 | queue_delayed_work_on(smp_processor_id(), qdev->q_workqueue, |
@@ -1801,7 +1800,7 @@ static irqreturn_t qlge_isr(int irq, void *dev_id) | |||
1801 | */ | 1800 | */ |
1802 | for (i = 1; i < qdev->rx_ring_count; i++) { | 1801 | for (i = 1; i < qdev->rx_ring_count; i++) { |
1803 | rx_ring = &qdev->rx_ring[i]; | 1802 | rx_ring = &qdev->rx_ring[i]; |
1804 | if (le32_to_cpu(*rx_ring->prod_idx_sh_reg) != | 1803 | if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) != |
1805 | rx_ring->cnsmr_idx) { | 1804 | rx_ring->cnsmr_idx) { |
1806 | QPRINTK(qdev, INTR, INFO, | 1805 | QPRINTK(qdev, INTR, INFO, |
1807 | "Waking handler for rx_ring[%d].\n", i); | 1806 | "Waking handler for rx_ring[%d].\n", i); |
@@ -2356,28 +2355,6 @@ static void ql_tx_ring_clean(struct ql_adapter *qdev) | |||
2356 | } | 2355 | } |
2357 | } | 2356 | } |
2358 | 2357 | ||
2359 | static void ql_free_ring_cb(struct ql_adapter *qdev) | ||
2360 | { | ||
2361 | kfree(qdev->ring_mem); | ||
2362 | } | ||
2363 | |||
2364 | static int ql_alloc_ring_cb(struct ql_adapter *qdev) | ||
2365 | { | ||
2366 | /* Allocate space for tx/rx ring control blocks. */ | ||
2367 | qdev->ring_mem_size = | ||
2368 | (qdev->tx_ring_count * sizeof(struct tx_ring)) + | ||
2369 | (qdev->rx_ring_count * sizeof(struct rx_ring)); | ||
2370 | qdev->ring_mem = kmalloc(qdev->ring_mem_size, GFP_KERNEL); | ||
2371 | if (qdev->ring_mem == NULL) { | ||
2372 | return -ENOMEM; | ||
2373 | } else { | ||
2374 | qdev->rx_ring = qdev->ring_mem; | ||
2375 | qdev->tx_ring = qdev->ring_mem + | ||
2376 | (qdev->rx_ring_count * sizeof(struct rx_ring)); | ||
2377 | } | ||
2378 | return 0; | ||
2379 | } | ||
2380 | |||
2381 | static void ql_free_mem_resources(struct ql_adapter *qdev) | 2358 | static void ql_free_mem_resources(struct ql_adapter *qdev) |
2382 | { | 2359 | { |
2383 | int i; | 2360 | int i; |
@@ -2467,12 +2444,9 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring) | |||
2467 | bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len; | 2444 | bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len; |
2468 | cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT); | 2445 | cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT); |
2469 | 2446 | ||
2470 | cqicb->addr_lo = cpu_to_le32(rx_ring->cq_base_dma); | 2447 | cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma); |
2471 | cqicb->addr_hi = cpu_to_le32((u64) rx_ring->cq_base_dma >> 32); | ||
2472 | 2448 | ||
2473 | cqicb->prod_idx_addr_lo = cpu_to_le32(rx_ring->prod_idx_sh_reg_dma); | 2449 | cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma); |
2474 | cqicb->prod_idx_addr_hi = | ||
2475 | cpu_to_le32((u64) rx_ring->prod_idx_sh_reg_dma >> 32); | ||
2476 | 2450 | ||
2477 | /* | 2451 | /* |
2478 | * Set up the control block load flags. | 2452 | * Set up the control block load flags. |
@@ -2483,10 +2457,8 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring) | |||
2483 | if (rx_ring->lbq_len) { | 2457 | if (rx_ring->lbq_len) { |
2484 | cqicb->flags |= FLAGS_LL; /* Load lbq values */ | 2458 | cqicb->flags |= FLAGS_LL; /* Load lbq values */ |
2485 | *((u64 *) rx_ring->lbq_base_indirect) = rx_ring->lbq_base_dma; | 2459 | *((u64 *) rx_ring->lbq_base_indirect) = rx_ring->lbq_base_dma; |
2486 | cqicb->lbq_addr_lo = | 2460 | cqicb->lbq_addr = |
2487 | cpu_to_le32(rx_ring->lbq_base_indirect_dma); | 2461 | cpu_to_le64(rx_ring->lbq_base_indirect_dma); |
2488 | cqicb->lbq_addr_hi = | ||
2489 | cpu_to_le32((u64) rx_ring->lbq_base_indirect_dma >> 32); | ||
2490 | bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 : | 2462 | bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 : |
2491 | (u16) rx_ring->lbq_buf_size; | 2463 | (u16) rx_ring->lbq_buf_size; |
2492 | cqicb->lbq_buf_size = cpu_to_le16(bq_len); | 2464 | cqicb->lbq_buf_size = cpu_to_le16(bq_len); |
@@ -2501,10 +2473,8 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring) | |||
2501 | if (rx_ring->sbq_len) { | 2473 | if (rx_ring->sbq_len) { |
2502 | cqicb->flags |= FLAGS_LS; /* Load sbq values */ | 2474 | cqicb->flags |= FLAGS_LS; /* Load sbq values */ |
2503 | *((u64 *) rx_ring->sbq_base_indirect) = rx_ring->sbq_base_dma; | 2475 | *((u64 *) rx_ring->sbq_base_indirect) = rx_ring->sbq_base_dma; |
2504 | cqicb->sbq_addr_lo = | 2476 | cqicb->sbq_addr = |
2505 | cpu_to_le32(rx_ring->sbq_base_indirect_dma); | 2477 | cpu_to_le64(rx_ring->sbq_base_indirect_dma); |
2506 | cqicb->sbq_addr_hi = | ||
2507 | cpu_to_le32((u64) rx_ring->sbq_base_indirect_dma >> 32); | ||
2508 | cqicb->sbq_buf_size = | 2478 | cqicb->sbq_buf_size = |
2509 | cpu_to_le16(((rx_ring->sbq_buf_size / 2) + 8) & 0xfffffff8); | 2479 | cpu_to_le16(((rx_ring->sbq_buf_size / 2) + 8) & 0xfffffff8); |
2510 | bq_len = (rx_ring->sbq_len == 65536) ? 0 : | 2480 | bq_len = (rx_ring->sbq_len == 65536) ? 0 : |
@@ -2611,12 +2581,9 @@ static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring) | |||
2611 | Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO); | 2581 | Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO); |
2612 | wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id); | 2582 | wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id); |
2613 | wqicb->rid = 0; | 2583 | wqicb->rid = 0; |
2614 | wqicb->addr_lo = cpu_to_le32(tx_ring->wq_base_dma); | 2584 | wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma); |
2615 | wqicb->addr_hi = cpu_to_le32((u64) tx_ring->wq_base_dma >> 32); | ||
2616 | 2585 | ||
2617 | wqicb->cnsmr_idx_addr_lo = cpu_to_le32(tx_ring->cnsmr_idx_sh_reg_dma); | 2586 | wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma); |
2618 | wqicb->cnsmr_idx_addr_hi = | ||
2619 | cpu_to_le32((u64) tx_ring->cnsmr_idx_sh_reg_dma >> 32); | ||
2620 | 2587 | ||
2621 | ql_init_tx_ring(qdev, tx_ring); | 2588 | ql_init_tx_ring(qdev, tx_ring); |
2622 | 2589 | ||
@@ -2746,14 +2713,14 @@ static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev) | |||
2746 | * Outbound queue is for outbound completions only. | 2713 | * Outbound queue is for outbound completions only. |
2747 | */ | 2714 | */ |
2748 | intr_context->handler = qlge_msix_tx_isr; | 2715 | intr_context->handler = qlge_msix_tx_isr; |
2749 | sprintf(intr_context->name, "%s-txq-%d", | 2716 | sprintf(intr_context->name, "%s-tx-%d", |
2750 | qdev->ndev->name, i); | 2717 | qdev->ndev->name, i); |
2751 | } else { | 2718 | } else { |
2752 | /* | 2719 | /* |
2753 | * Inbound queues handle unicast frames only. | 2720 | * Inbound queues handle unicast frames only. |
2754 | */ | 2721 | */ |
2755 | intr_context->handler = qlge_msix_rx_isr; | 2722 | intr_context->handler = qlge_msix_rx_isr; |
2756 | sprintf(intr_context->name, "%s-rxq-%d", | 2723 | sprintf(intr_context->name, "%s-rx-%d", |
2757 | qdev->ndev->name, i); | 2724 | qdev->ndev->name, i); |
2758 | } | 2725 | } |
2759 | } | 2726 | } |
@@ -3247,7 +3214,6 @@ static int qlge_close(struct net_device *ndev) | |||
3247 | msleep(1); | 3214 | msleep(1); |
3248 | ql_adapter_down(qdev); | 3215 | ql_adapter_down(qdev); |
3249 | ql_release_adapter_resources(qdev); | 3216 | ql_release_adapter_resources(qdev); |
3250 | ql_free_ring_cb(qdev); | ||
3251 | return 0; | 3217 | return 0; |
3252 | } | 3218 | } |
3253 | 3219 | ||
@@ -3273,8 +3239,8 @@ static int ql_configure_rings(struct ql_adapter *qdev) | |||
3273 | * This limitation can be removed when requested. | 3239 | * This limitation can be removed when requested. |
3274 | */ | 3240 | */ |
3275 | 3241 | ||
3276 | if (cpu_cnt > 8) | 3242 | if (cpu_cnt > MAX_CPUS) |
3277 | cpu_cnt = 8; | 3243 | cpu_cnt = MAX_CPUS; |
3278 | 3244 | ||
3279 | /* | 3245 | /* |
3280 | * rx_ring[0] is always the default queue. | 3246 | * rx_ring[0] is always the default queue. |
@@ -3294,9 +3260,6 @@ static int ql_configure_rings(struct ql_adapter *qdev) | |||
3294 | */ | 3260 | */ |
3295 | qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count + 1; | 3261 | qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count + 1; |
3296 | 3262 | ||
3297 | if (ql_alloc_ring_cb(qdev)) | ||
3298 | return -ENOMEM; | ||
3299 | |||
3300 | for (i = 0; i < qdev->tx_ring_count; i++) { | 3263 | for (i = 0; i < qdev->tx_ring_count; i++) { |
3301 | tx_ring = &qdev->tx_ring[i]; | 3264 | tx_ring = &qdev->tx_ring[i]; |
3302 | memset((void *)tx_ring, 0, sizeof(tx_ring)); | 3265 | memset((void *)tx_ring, 0, sizeof(tx_ring)); |
@@ -3393,7 +3356,6 @@ static int qlge_open(struct net_device *ndev) | |||
3393 | 3356 | ||
3394 | error_up: | 3357 | error_up: |
3395 | ql_release_adapter_resources(qdev); | 3358 | ql_release_adapter_resources(qdev); |
3396 | ql_free_ring_cb(qdev); | ||
3397 | return err; | 3359 | return err; |
3398 | } | 3360 | } |
3399 | 3361 | ||
diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c index cf3a082bc89d..72fd9e97c190 100644 --- a/drivers/net/r6040.c +++ b/drivers/net/r6040.c | |||
@@ -49,8 +49,8 @@ | |||
49 | #include <asm/processor.h> | 49 | #include <asm/processor.h> |
50 | 50 | ||
51 | #define DRV_NAME "r6040" | 51 | #define DRV_NAME "r6040" |
52 | #define DRV_VERSION "0.20" | 52 | #define DRV_VERSION "0.21" |
53 | #define DRV_RELDATE "07Jan2009" | 53 | #define DRV_RELDATE "09Jan2009" |
54 | 54 | ||
55 | /* PHY CHIP Address */ | 55 | /* PHY CHIP Address */ |
56 | #define PHY1_ADDR 1 /* For MAC1 */ | 56 | #define PHY1_ADDR 1 /* For MAC1 */ |
@@ -457,22 +457,12 @@ static void r6040_down(struct net_device *dev) | |||
457 | iowrite16(adrp[0], ioaddr + MID_0L); | 457 | iowrite16(adrp[0], ioaddr + MID_0L); |
458 | iowrite16(adrp[1], ioaddr + MID_0M); | 458 | iowrite16(adrp[1], ioaddr + MID_0M); |
459 | iowrite16(adrp[2], ioaddr + MID_0H); | 459 | iowrite16(adrp[2], ioaddr + MID_0H); |
460 | free_irq(dev->irq, dev); | ||
461 | |||
462 | /* Free RX buffer */ | ||
463 | r6040_free_rxbufs(dev); | ||
464 | |||
465 | /* Free TX buffer */ | ||
466 | r6040_free_txbufs(dev); | ||
467 | |||
468 | /* Free Descriptor memory */ | ||
469 | pci_free_consistent(pdev, RX_DESC_SIZE, lp->rx_ring, lp->rx_ring_dma); | ||
470 | pci_free_consistent(pdev, TX_DESC_SIZE, lp->tx_ring, lp->tx_ring_dma); | ||
471 | } | 460 | } |
472 | 461 | ||
473 | static int r6040_close(struct net_device *dev) | 462 | static int r6040_close(struct net_device *dev) |
474 | { | 463 | { |
475 | struct r6040_private *lp = netdev_priv(dev); | 464 | struct r6040_private *lp = netdev_priv(dev); |
465 | struct pci_dev *pdev = lp->pdev; | ||
476 | 466 | ||
477 | /* deleted timer */ | 467 | /* deleted timer */ |
478 | del_timer_sync(&lp->timer); | 468 | del_timer_sync(&lp->timer); |
@@ -481,8 +471,28 @@ static int r6040_close(struct net_device *dev) | |||
481 | napi_disable(&lp->napi); | 471 | napi_disable(&lp->napi); |
482 | netif_stop_queue(dev); | 472 | netif_stop_queue(dev); |
483 | r6040_down(dev); | 473 | r6040_down(dev); |
474 | |||
475 | free_irq(dev->irq, dev); | ||
476 | |||
477 | /* Free RX buffer */ | ||
478 | r6040_free_rxbufs(dev); | ||
479 | |||
480 | /* Free TX buffer */ | ||
481 | r6040_free_txbufs(dev); | ||
482 | |||
484 | spin_unlock_irq(&lp->lock); | 483 | spin_unlock_irq(&lp->lock); |
485 | 484 | ||
485 | /* Free Descriptor memory */ | ||
486 | if (lp->rx_ring) { | ||
487 | pci_free_consistent(pdev, RX_DESC_SIZE, lp->rx_ring, lp->rx_ring_dma); | ||
488 | lp->rx_ring = 0; | ||
489 | } | ||
490 | |||
491 | if (lp->tx_ring) { | ||
492 | pci_free_consistent(pdev, TX_DESC_SIZE, lp->tx_ring, lp->tx_ring_dma); | ||
493 | lp->tx_ring = 0; | ||
494 | } | ||
495 | |||
486 | return 0; | 496 | return 0; |
487 | } | 497 | } |
488 | 498 | ||
@@ -1049,6 +1059,7 @@ static const struct net_device_ops r6040_netdev_ops = { | |||
1049 | .ndo_set_multicast_list = r6040_multicast_list, | 1059 | .ndo_set_multicast_list = r6040_multicast_list, |
1050 | .ndo_change_mtu = eth_change_mtu, | 1060 | .ndo_change_mtu = eth_change_mtu, |
1051 | .ndo_validate_addr = eth_validate_addr, | 1061 | .ndo_validate_addr = eth_validate_addr, |
1062 | .ndo_set_mac_address = eth_mac_addr, | ||
1052 | .ndo_do_ioctl = r6040_ioctl, | 1063 | .ndo_do_ioctl = r6040_ioctl, |
1053 | .ndo_tx_timeout = r6040_tx_timeout, | 1064 | .ndo_tx_timeout = r6040_tx_timeout, |
1054 | #ifdef CONFIG_NET_POLL_CONTROLLER | 1065 | #ifdef CONFIG_NET_POLL_CONTROLLER |
@@ -1143,8 +1154,10 @@ static int __devinit r6040_init_one(struct pci_dev *pdev, | |||
1143 | 1154 | ||
1144 | /* Some bootloader/BIOSes do not initialize | 1155 | /* Some bootloader/BIOSes do not initialize |
1145 | * MAC address, warn about that */ | 1156 | * MAC address, warn about that */ |
1146 | if (!(adrp[0] || adrp[1] || adrp[2])) | 1157 | if (!(adrp[0] || adrp[1] || adrp[2])) { |
1147 | printk(KERN_WARNING DRV_NAME ": MAC address not initialized\n"); | 1158 | printk(KERN_WARNING DRV_NAME ": MAC address not initialized, generating random\n"); |
1159 | random_ether_addr(dev->dev_addr); | ||
1160 | } | ||
1148 | 1161 | ||
1149 | /* Link new device into r6040_root_dev */ | 1162 | /* Link new device into r6040_root_dev */ |
1150 | lp->pdev = pdev; | 1163 | lp->pdev = pdev; |
diff --git a/drivers/net/sc92031.c b/drivers/net/sc92031.c index 42fd31276602..8b75bef4a841 100644 --- a/drivers/net/sc92031.c +++ b/drivers/net/sc92031.c | |||
@@ -1408,6 +1408,7 @@ static const struct net_device_ops sc92031_netdev_ops = { | |||
1408 | .ndo_set_multicast_list = sc92031_set_multicast_list, | 1408 | .ndo_set_multicast_list = sc92031_set_multicast_list, |
1409 | .ndo_change_mtu = eth_change_mtu, | 1409 | .ndo_change_mtu = eth_change_mtu, |
1410 | .ndo_validate_addr = eth_validate_addr, | 1410 | .ndo_validate_addr = eth_validate_addr, |
1411 | .ndo_set_mac_address = eth_mac_addr, | ||
1411 | .ndo_tx_timeout = sc92031_tx_timeout, | 1412 | .ndo_tx_timeout = sc92031_tx_timeout, |
1412 | #ifdef CONFIG_NET_POLL_CONTROLLER | 1413 | #ifdef CONFIG_NET_POLL_CONTROLLER |
1413 | .ndo_poll_controller = sc92031_poll_controller, | 1414 | .ndo_poll_controller = sc92031_poll_controller, |
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c index 7673fd92eaf5..101c00a7bb73 100644 --- a/drivers/net/sfc/efx.c +++ b/drivers/net/sfc/efx.c | |||
@@ -854,20 +854,27 @@ static void efx_fini_io(struct efx_nic *efx) | |||
854 | * interrupts across them. */ | 854 | * interrupts across them. */ |
855 | static int efx_wanted_rx_queues(void) | 855 | static int efx_wanted_rx_queues(void) |
856 | { | 856 | { |
857 | cpumask_t core_mask; | 857 | cpumask_var_t core_mask; |
858 | int count; | 858 | int count; |
859 | int cpu; | 859 | int cpu; |
860 | 860 | ||
861 | cpus_clear(core_mask); | 861 | if (!alloc_cpumask_var(&core_mask, GFP_KERNEL)) { |
862 | printk(KERN_WARNING | ||
863 | "efx.c: allocation failure, irq balancing hobbled\n"); | ||
864 | return 1; | ||
865 | } | ||
866 | |||
867 | cpumask_clear(core_mask); | ||
862 | count = 0; | 868 | count = 0; |
863 | for_each_online_cpu(cpu) { | 869 | for_each_online_cpu(cpu) { |
864 | if (!cpu_isset(cpu, core_mask)) { | 870 | if (!cpumask_test_cpu(cpu, core_mask)) { |
865 | ++count; | 871 | ++count; |
866 | cpus_or(core_mask, core_mask, | 872 | cpumask_or(core_mask, core_mask, |
867 | topology_core_siblings(cpu)); | 873 | topology_core_cpumask(cpu)); |
868 | } | 874 | } |
869 | } | 875 | } |
870 | 876 | ||
877 | free_cpumask_var(core_mask); | ||
871 | return count; | 878 | return count; |
872 | } | 879 | } |
873 | 880 | ||
diff --git a/drivers/net/sfc/tenxpress.c b/drivers/net/sfc/tenxpress.c index b9768760fae7..9ecb77da9545 100644 --- a/drivers/net/sfc/tenxpress.c +++ b/drivers/net/sfc/tenxpress.c | |||
@@ -636,10 +636,11 @@ static void tenxpress_phy_fini(struct efx_nic *efx) | |||
636 | { | 636 | { |
637 | int reg; | 637 | int reg; |
638 | 638 | ||
639 | if (efx->phy_type == PHY_TYPE_SFT9001B) { | 639 | if (efx->phy_type == PHY_TYPE_SFT9001B) |
640 | device_remove_file(&efx->pci_dev->dev, | 640 | device_remove_file(&efx->pci_dev->dev, |
641 | &dev_attr_phy_short_reach); | 641 | &dev_attr_phy_short_reach); |
642 | } else { | 642 | |
643 | if (efx->phy_type == PHY_TYPE_SFX7101) { | ||
643 | /* Power down the LNPGA */ | 644 | /* Power down the LNPGA */ |
644 | reg = (1 << PMA_PMD_LNPGA_POWERDOWN_LBN); | 645 | reg = (1 << PMA_PMD_LNPGA_POWERDOWN_LBN); |
645 | mdio_clause45_write(efx, efx->mii.phy_id, MDIO_MMD_PMAPMD, | 646 | mdio_clause45_write(efx, efx->mii.phy_id, MDIO_MMD_PMAPMD, |
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c index 4acd41a093ad..6cbefcae9ac2 100644 --- a/drivers/net/sis900.c +++ b/drivers/net/sis900.c | |||
@@ -389,6 +389,7 @@ static const struct net_device_ops sis900_netdev_ops = { | |||
389 | .ndo_set_multicast_list = set_rx_mode, | 389 | .ndo_set_multicast_list = set_rx_mode, |
390 | .ndo_change_mtu = eth_change_mtu, | 390 | .ndo_change_mtu = eth_change_mtu, |
391 | .ndo_validate_addr = eth_validate_addr, | 391 | .ndo_validate_addr = eth_validate_addr, |
392 | .ndo_set_mac_address = eth_mac_addr, | ||
392 | .ndo_do_ioctl = mii_ioctl, | 393 | .ndo_do_ioctl = mii_ioctl, |
393 | .ndo_tx_timeout = sis900_tx_timeout, | 394 | .ndo_tx_timeout = sis900_tx_timeout, |
394 | #ifdef CONFIG_NET_POLL_CONTROLLER | 395 | #ifdef CONFIG_NET_POLL_CONTROLLER |
diff --git a/drivers/net/smc-mca.c b/drivers/net/smc-mca.c index 404b80e5ba11..8d36d40649ef 100644 --- a/drivers/net/smc-mca.c +++ b/drivers/net/smc-mca.c | |||
@@ -192,6 +192,7 @@ static const struct net_device_ops ultramca_netdev_ops = { | |||
192 | .ndo_get_stats = ei_get_stats, | 192 | .ndo_get_stats = ei_get_stats, |
193 | .ndo_set_multicast_list = ei_set_multicast_list, | 193 | .ndo_set_multicast_list = ei_set_multicast_list, |
194 | .ndo_validate_addr = eth_validate_addr, | 194 | .ndo_validate_addr = eth_validate_addr, |
195 | .ndo_set_mac_address = eth_mac_addr, | ||
195 | .ndo_change_mtu = eth_change_mtu, | 196 | .ndo_change_mtu = eth_change_mtu, |
196 | #ifdef CONFIG_NET_POLL_CONTROLLER | 197 | #ifdef CONFIG_NET_POLL_CONTROLLER |
197 | .ndo_poll_controller = ei_poll, | 198 | .ndo_poll_controller = ei_poll, |
diff --git a/drivers/net/smc-ultra.c b/drivers/net/smc-ultra.c index b3866089a206..2033fee3143a 100644 --- a/drivers/net/smc-ultra.c +++ b/drivers/net/smc-ultra.c | |||
@@ -196,6 +196,7 @@ static const struct net_device_ops ultra_netdev_ops = { | |||
196 | .ndo_get_stats = ei_get_stats, | 196 | .ndo_get_stats = ei_get_stats, |
197 | .ndo_set_multicast_list = ei_set_multicast_list, | 197 | .ndo_set_multicast_list = ei_set_multicast_list, |
198 | .ndo_validate_addr = eth_validate_addr, | 198 | .ndo_validate_addr = eth_validate_addr, |
199 | .ndo_set_mac_address = eth_mac_addr, | ||
199 | .ndo_change_mtu = eth_change_mtu, | 200 | .ndo_change_mtu = eth_change_mtu, |
200 | #ifdef CONFIG_NET_POLL_CONTROLLER | 201 | #ifdef CONFIG_NET_POLL_CONTROLLER |
201 | .ndo_poll_controller = ei_poll, | 202 | .ndo_poll_controller = ei_poll, |
diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c index dc3f1108884d..f513bdf1c887 100644 --- a/drivers/net/smsc911x.c +++ b/drivers/net/smsc911x.c | |||
@@ -144,6 +144,7 @@ static inline u32 smsc911x_reg_read(struct smsc911x_data *pdata, u32 reg) | |||
144 | } | 144 | } |
145 | 145 | ||
146 | BUG(); | 146 | BUG(); |
147 | return 0; | ||
147 | } | 148 | } |
148 | 149 | ||
149 | static inline void smsc911x_reg_write(struct smsc911x_data *pdata, u32 reg, | 150 | static inline void smsc911x_reg_write(struct smsc911x_data *pdata, u32 reg, |
@@ -1740,6 +1741,7 @@ static const struct net_device_ops smsc911x_netdev_ops = { | |||
1740 | .ndo_set_multicast_list = smsc911x_set_multicast_list, | 1741 | .ndo_set_multicast_list = smsc911x_set_multicast_list, |
1741 | .ndo_do_ioctl = smsc911x_do_ioctl, | 1742 | .ndo_do_ioctl = smsc911x_do_ioctl, |
1742 | .ndo_validate_addr = eth_validate_addr, | 1743 | .ndo_validate_addr = eth_validate_addr, |
1744 | .ndo_set_mac_address = eth_mac_addr, | ||
1743 | #ifdef CONFIG_NET_POLL_CONTROLLER | 1745 | #ifdef CONFIG_NET_POLL_CONTROLLER |
1744 | .ndo_poll_controller = smsc911x_poll_controller, | 1746 | .ndo_poll_controller = smsc911x_poll_controller, |
1745 | #endif | 1747 | #endif |
@@ -1967,7 +1969,7 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev) | |||
1967 | smsc911x_reg_write(pdata, INT_STS, 0xFFFFFFFF); | 1969 | smsc911x_reg_write(pdata, INT_STS, 0xFFFFFFFF); |
1968 | 1970 | ||
1969 | retval = request_irq(dev->irq, smsc911x_irqhandler, IRQF_DISABLED, | 1971 | retval = request_irq(dev->irq, smsc911x_irqhandler, IRQF_DISABLED, |
1970 | SMSC_CHIPNAME, dev); | 1972 | dev->name, dev); |
1971 | if (retval) { | 1973 | if (retval) { |
1972 | SMSC_WARNING(PROBE, | 1974 | SMSC_WARNING(PROBE, |
1973 | "Unable to claim requested irq: %d", dev->irq); | 1975 | "Unable to claim requested irq: %d", dev->irq); |
diff --git a/drivers/net/smsc9420.c b/drivers/net/smsc9420.c index 27e017d96966..c14a4c6452c7 100644 --- a/drivers/net/smsc9420.c +++ b/drivers/net/smsc9420.c | |||
@@ -1551,6 +1551,7 @@ static const struct net_device_ops smsc9420_netdev_ops = { | |||
1551 | .ndo_set_multicast_list = smsc9420_set_multicast_list, | 1551 | .ndo_set_multicast_list = smsc9420_set_multicast_list, |
1552 | .ndo_do_ioctl = smsc9420_do_ioctl, | 1552 | .ndo_do_ioctl = smsc9420_do_ioctl, |
1553 | .ndo_validate_addr = eth_validate_addr, | 1553 | .ndo_validate_addr = eth_validate_addr, |
1554 | .ndo_set_mac_address = eth_mac_addr, | ||
1554 | #ifdef CONFIG_NET_POLL_CONTROLLER | 1555 | #ifdef CONFIG_NET_POLL_CONTROLLER |
1555 | .ndo_poll_controller = smsc9420_poll_controller, | 1556 | .ndo_poll_controller = smsc9420_poll_controller, |
1556 | #endif /* CONFIG_NET_POLL_CONTROLLER */ | 1557 | #endif /* CONFIG_NET_POLL_CONTROLLER */ |
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c index 7d5a1303e30d..11441225bf41 100644 --- a/drivers/net/ucc_geth.c +++ b/drivers/net/ucc_geth.c | |||
@@ -442,40 +442,30 @@ static void magic_packet_detection_enable(struct ucc_geth_private *ugeth) | |||
442 | { | 442 | { |
443 | struct ucc_fast_private *uccf; | 443 | struct ucc_fast_private *uccf; |
444 | struct ucc_geth __iomem *ug_regs; | 444 | struct ucc_geth __iomem *ug_regs; |
445 | u32 maccfg2, uccm; | ||
446 | 445 | ||
447 | uccf = ugeth->uccf; | 446 | uccf = ugeth->uccf; |
448 | ug_regs = ugeth->ug_regs; | 447 | ug_regs = ugeth->ug_regs; |
449 | 448 | ||
450 | /* Enable interrupts for magic packet detection */ | 449 | /* Enable interrupts for magic packet detection */ |
451 | uccm = in_be32(uccf->p_uccm); | 450 | setbits32(uccf->p_uccm, UCC_GETH_UCCE_MPD); |
452 | uccm |= UCCE_MPD; | ||
453 | out_be32(uccf->p_uccm, uccm); | ||
454 | 451 | ||
455 | /* Enable magic packet detection */ | 452 | /* Enable magic packet detection */ |
456 | maccfg2 = in_be32(&ug_regs->maccfg2); | 453 | setbits32(&ug_regs->maccfg2, MACCFG2_MPE); |
457 | maccfg2 |= MACCFG2_MPE; | ||
458 | out_be32(&ug_regs->maccfg2, maccfg2); | ||
459 | } | 454 | } |
460 | 455 | ||
461 | static void magic_packet_detection_disable(struct ucc_geth_private *ugeth) | 456 | static void magic_packet_detection_disable(struct ucc_geth_private *ugeth) |
462 | { | 457 | { |
463 | struct ucc_fast_private *uccf; | 458 | struct ucc_fast_private *uccf; |
464 | struct ucc_geth __iomem *ug_regs; | 459 | struct ucc_geth __iomem *ug_regs; |
465 | u32 maccfg2, uccm; | ||
466 | 460 | ||
467 | uccf = ugeth->uccf; | 461 | uccf = ugeth->uccf; |
468 | ug_regs = ugeth->ug_regs; | 462 | ug_regs = ugeth->ug_regs; |
469 | 463 | ||
470 | /* Disable interrupts for magic packet detection */ | 464 | /* Disable interrupts for magic packet detection */ |
471 | uccm = in_be32(uccf->p_uccm); | 465 | clrbits32(uccf->p_uccm, UCC_GETH_UCCE_MPD); |
472 | uccm &= ~UCCE_MPD; | ||
473 | out_be32(uccf->p_uccm, uccm); | ||
474 | 466 | ||
475 | /* Disable magic packet detection */ | 467 | /* Disable magic packet detection */ |
476 | maccfg2 = in_be32(&ug_regs->maccfg2); | 468 | clrbits32(&ug_regs->maccfg2, MACCFG2_MPE); |
477 | maccfg2 &= ~MACCFG2_MPE; | ||
478 | out_be32(&ug_regs->maccfg2, maccfg2); | ||
479 | } | 469 | } |
480 | #endif /* MAGIC_PACKET */ | 470 | #endif /* MAGIC_PACKET */ |
481 | 471 | ||
@@ -585,7 +575,8 @@ static void get_statistics(struct ucc_geth_private *ugeth, | |||
585 | 575 | ||
586 | /* Hardware only if user handed pointer and driver actually | 576 | /* Hardware only if user handed pointer and driver actually |
587 | gathers hardware statistics */ | 577 | gathers hardware statistics */ |
588 | if (hardware_statistics && (in_be32(&uf_regs->upsmr) & UPSMR_HSE)) { | 578 | if (hardware_statistics && |
579 | (in_be32(&uf_regs->upsmr) & UCC_GETH_UPSMR_HSE)) { | ||
589 | hardware_statistics->tx64 = in_be32(&ug_regs->tx64); | 580 | hardware_statistics->tx64 = in_be32(&ug_regs->tx64); |
590 | hardware_statistics->tx127 = in_be32(&ug_regs->tx127); | 581 | hardware_statistics->tx127 = in_be32(&ug_regs->tx127); |
591 | hardware_statistics->tx255 = in_be32(&ug_regs->tx255); | 582 | hardware_statistics->tx255 = in_be32(&ug_regs->tx255); |
@@ -1181,9 +1172,7 @@ int init_flow_control_params(u32 automatic_flow_control_mode, | |||
1181 | out_be32(uempr_register, value); | 1172 | out_be32(uempr_register, value); |
1182 | 1173 | ||
1183 | /* Set UPSMR register */ | 1174 | /* Set UPSMR register */ |
1184 | value = in_be32(upsmr_register); | 1175 | setbits32(upsmr_register, automatic_flow_control_mode); |
1185 | value |= automatic_flow_control_mode; | ||
1186 | out_be32(upsmr_register, value); | ||
1187 | 1176 | ||
1188 | value = in_be32(maccfg1_register); | 1177 | value = in_be32(maccfg1_register); |
1189 | if (rx_flow_control_enable) | 1178 | if (rx_flow_control_enable) |
@@ -1200,14 +1189,11 @@ static int init_hw_statistics_gathering_mode(int enable_hardware_statistics, | |||
1200 | u32 __iomem *upsmr_register, | 1189 | u32 __iomem *upsmr_register, |
1201 | u16 __iomem *uescr_register) | 1190 | u16 __iomem *uescr_register) |
1202 | { | 1191 | { |
1203 | u32 upsmr_value = 0; | ||
1204 | u16 uescr_value = 0; | 1192 | u16 uescr_value = 0; |
1193 | |||
1205 | /* Enable hardware statistics gathering if requested */ | 1194 | /* Enable hardware statistics gathering if requested */ |
1206 | if (enable_hardware_statistics) { | 1195 | if (enable_hardware_statistics) |
1207 | upsmr_value = in_be32(upsmr_register); | 1196 | setbits32(upsmr_register, UCC_GETH_UPSMR_HSE); |
1208 | upsmr_value |= UPSMR_HSE; | ||
1209 | out_be32(upsmr_register, upsmr_value); | ||
1210 | } | ||
1211 | 1197 | ||
1212 | /* Clear hardware statistics counters */ | 1198 | /* Clear hardware statistics counters */ |
1213 | uescr_value = in_be16(uescr_register); | 1199 | uescr_value = in_be16(uescr_register); |
@@ -1233,23 +1219,17 @@ static int init_firmware_statistics_gathering_mode(int | |||
1233 | { | 1219 | { |
1234 | /* Note: this function does not check if */ | 1220 | /* Note: this function does not check if */ |
1235 | /* the parameters it receives are NULL */ | 1221 | /* the parameters it receives are NULL */ |
1236 | u16 temoder_value; | ||
1237 | u32 remoder_value; | ||
1238 | 1222 | ||
1239 | if (enable_tx_firmware_statistics) { | 1223 | if (enable_tx_firmware_statistics) { |
1240 | out_be32(tx_rmon_base_ptr, | 1224 | out_be32(tx_rmon_base_ptr, |
1241 | tx_firmware_statistics_structure_address); | 1225 | tx_firmware_statistics_structure_address); |
1242 | temoder_value = in_be16(temoder_register); | 1226 | setbits16(temoder_register, TEMODER_TX_RMON_STATISTICS_ENABLE); |
1243 | temoder_value |= TEMODER_TX_RMON_STATISTICS_ENABLE; | ||
1244 | out_be16(temoder_register, temoder_value); | ||
1245 | } | 1227 | } |
1246 | 1228 | ||
1247 | if (enable_rx_firmware_statistics) { | 1229 | if (enable_rx_firmware_statistics) { |
1248 | out_be32(rx_rmon_base_ptr, | 1230 | out_be32(rx_rmon_base_ptr, |
1249 | rx_firmware_statistics_structure_address); | 1231 | rx_firmware_statistics_structure_address); |
1250 | remoder_value = in_be32(remoder_register); | 1232 | setbits32(remoder_register, REMODER_RX_RMON_STATISTICS_ENABLE); |
1251 | remoder_value |= REMODER_RX_RMON_STATISTICS_ENABLE; | ||
1252 | out_be32(remoder_register, remoder_value); | ||
1253 | } | 1233 | } |
1254 | 1234 | ||
1255 | return 0; | 1235 | return 0; |
@@ -1316,15 +1296,12 @@ static int init_check_frame_length_mode(int length_check, | |||
1316 | static int init_preamble_length(u8 preamble_length, | 1296 | static int init_preamble_length(u8 preamble_length, |
1317 | u32 __iomem *maccfg2_register) | 1297 | u32 __iomem *maccfg2_register) |
1318 | { | 1298 | { |
1319 | u32 value = 0; | ||
1320 | |||
1321 | if ((preamble_length < 3) || (preamble_length > 7)) | 1299 | if ((preamble_length < 3) || (preamble_length > 7)) |
1322 | return -EINVAL; | 1300 | return -EINVAL; |
1323 | 1301 | ||
1324 | value = in_be32(maccfg2_register); | 1302 | clrsetbits_be32(maccfg2_register, MACCFG2_PREL_MASK, |
1325 | value &= ~MACCFG2_PREL_MASK; | 1303 | preamble_length << MACCFG2_PREL_SHIFT); |
1326 | value |= (preamble_length << MACCFG2_PREL_SHIFT); | 1304 | |
1327 | out_be32(maccfg2_register, value); | ||
1328 | return 0; | 1305 | return 0; |
1329 | } | 1306 | } |
1330 | 1307 | ||
@@ -1337,19 +1314,19 @@ static int init_rx_parameters(int reject_broadcast, | |||
1337 | value = in_be32(upsmr_register); | 1314 | value = in_be32(upsmr_register); |
1338 | 1315 | ||
1339 | if (reject_broadcast) | 1316 | if (reject_broadcast) |
1340 | value |= UPSMR_BRO; | 1317 | value |= UCC_GETH_UPSMR_BRO; |
1341 | else | 1318 | else |
1342 | value &= ~UPSMR_BRO; | 1319 | value &= ~UCC_GETH_UPSMR_BRO; |
1343 | 1320 | ||
1344 | if (receive_short_frames) | 1321 | if (receive_short_frames) |
1345 | value |= UPSMR_RSH; | 1322 | value |= UCC_GETH_UPSMR_RSH; |
1346 | else | 1323 | else |
1347 | value &= ~UPSMR_RSH; | 1324 | value &= ~UCC_GETH_UPSMR_RSH; |
1348 | 1325 | ||
1349 | if (promiscuous) | 1326 | if (promiscuous) |
1350 | value |= UPSMR_PRO; | 1327 | value |= UCC_GETH_UPSMR_PRO; |
1351 | else | 1328 | else |
1352 | value &= ~UPSMR_PRO; | 1329 | value &= ~UCC_GETH_UPSMR_PRO; |
1353 | 1330 | ||
1354 | out_be32(upsmr_register, value); | 1331 | out_be32(upsmr_register, value); |
1355 | 1332 | ||
@@ -1410,26 +1387,27 @@ static int adjust_enet_interface(struct ucc_geth_private *ugeth) | |||
1410 | 1387 | ||
1411 | /* Set UPSMR */ | 1388 | /* Set UPSMR */ |
1412 | upsmr = in_be32(&uf_regs->upsmr); | 1389 | upsmr = in_be32(&uf_regs->upsmr); |
1413 | upsmr &= ~(UPSMR_RPM | UPSMR_R10M | UPSMR_TBIM | UPSMR_RMM); | 1390 | upsmr &= ~(UCC_GETH_UPSMR_RPM | UCC_GETH_UPSMR_R10M | |
1391 | UCC_GETH_UPSMR_TBIM | UCC_GETH_UPSMR_RMM); | ||
1414 | if ((ugeth->phy_interface == PHY_INTERFACE_MODE_RMII) || | 1392 | if ((ugeth->phy_interface == PHY_INTERFACE_MODE_RMII) || |
1415 | (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII) || | 1393 | (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII) || |
1416 | (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_ID) || | 1394 | (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_ID) || |
1417 | (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) || | 1395 | (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) || |
1418 | (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) || | 1396 | (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) || |
1419 | (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) { | 1397 | (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) { |
1420 | upsmr |= UPSMR_RPM; | 1398 | upsmr |= UCC_GETH_UPSMR_RPM; |
1421 | switch (ugeth->max_speed) { | 1399 | switch (ugeth->max_speed) { |
1422 | case SPEED_10: | 1400 | case SPEED_10: |
1423 | upsmr |= UPSMR_R10M; | 1401 | upsmr |= UCC_GETH_UPSMR_R10M; |
1424 | /* FALLTHROUGH */ | 1402 | /* FALLTHROUGH */ |
1425 | case SPEED_100: | 1403 | case SPEED_100: |
1426 | if (ugeth->phy_interface != PHY_INTERFACE_MODE_RTBI) | 1404 | if (ugeth->phy_interface != PHY_INTERFACE_MODE_RTBI) |
1427 | upsmr |= UPSMR_RMM; | 1405 | upsmr |= UCC_GETH_UPSMR_RMM; |
1428 | } | 1406 | } |
1429 | } | 1407 | } |
1430 | if ((ugeth->phy_interface == PHY_INTERFACE_MODE_TBI) || | 1408 | if ((ugeth->phy_interface == PHY_INTERFACE_MODE_TBI) || |
1431 | (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) { | 1409 | (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) { |
1432 | upsmr |= UPSMR_TBIM; | 1410 | upsmr |= UCC_GETH_UPSMR_TBIM; |
1433 | } | 1411 | } |
1434 | out_be32(&uf_regs->upsmr, upsmr); | 1412 | out_be32(&uf_regs->upsmr, upsmr); |
1435 | 1413 | ||
@@ -1517,9 +1495,9 @@ static void adjust_link(struct net_device *dev) | |||
1517 | (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) || | 1495 | (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) || |
1518 | (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) { | 1496 | (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) { |
1519 | if (phydev->speed == SPEED_10) | 1497 | if (phydev->speed == SPEED_10) |
1520 | upsmr |= UPSMR_R10M; | 1498 | upsmr |= UCC_GETH_UPSMR_R10M; |
1521 | else | 1499 | else |
1522 | upsmr &= ~(UPSMR_R10M); | 1500 | upsmr &= ~UCC_GETH_UPSMR_R10M; |
1523 | } | 1501 | } |
1524 | break; | 1502 | break; |
1525 | default: | 1503 | default: |
@@ -1602,10 +1580,8 @@ static int ugeth_graceful_stop_tx(struct ucc_geth_private *ugeth) | |||
1602 | uccf = ugeth->uccf; | 1580 | uccf = ugeth->uccf; |
1603 | 1581 | ||
1604 | /* Mask GRACEFUL STOP TX interrupt bit and clear it */ | 1582 | /* Mask GRACEFUL STOP TX interrupt bit and clear it */ |
1605 | temp = in_be32(uccf->p_uccm); | 1583 | clrbits32(uccf->p_uccm, UCC_GETH_UCCE_GRA); |
1606 | temp &= ~UCCE_GRA; | 1584 | out_be32(uccf->p_ucce, UCC_GETH_UCCE_GRA); /* clear by writing 1 */ |
1607 | out_be32(uccf->p_uccm, temp); | ||
1608 | out_be32(uccf->p_ucce, UCCE_GRA); /* clear by writing 1 */ | ||
1609 | 1585 | ||
1610 | /* Issue host command */ | 1586 | /* Issue host command */ |
1611 | cecr_subblock = | 1587 | cecr_subblock = |
@@ -1617,7 +1593,7 @@ static int ugeth_graceful_stop_tx(struct ucc_geth_private *ugeth) | |||
1617 | do { | 1593 | do { |
1618 | msleep(10); | 1594 | msleep(10); |
1619 | temp = in_be32(uccf->p_ucce); | 1595 | temp = in_be32(uccf->p_ucce); |
1620 | } while (!(temp & UCCE_GRA) && --i); | 1596 | } while (!(temp & UCC_GETH_UCCE_GRA) && --i); |
1621 | 1597 | ||
1622 | uccf->stopped_tx = 1; | 1598 | uccf->stopped_tx = 1; |
1623 | 1599 | ||
@@ -1975,12 +1951,9 @@ static void ucc_geth_set_multi(struct net_device *dev) | |||
1975 | uf_regs = ugeth->uccf->uf_regs; | 1951 | uf_regs = ugeth->uccf->uf_regs; |
1976 | 1952 | ||
1977 | if (dev->flags & IFF_PROMISC) { | 1953 | if (dev->flags & IFF_PROMISC) { |
1978 | 1954 | setbits32(&uf_regs->upsmr, UCC_GETH_UPSMR_PRO); | |
1979 | out_be32(&uf_regs->upsmr, in_be32(&uf_regs->upsmr) | UPSMR_PRO); | ||
1980 | |||
1981 | } else { | 1955 | } else { |
1982 | 1956 | clrbits32(&uf_regs->upsmr, UCC_GETH_UPSMR_PRO); | |
1983 | out_be32(&uf_regs->upsmr, in_be32(&uf_regs->upsmr)&~UPSMR_PRO); | ||
1984 | 1957 | ||
1985 | p_82xx_addr_filt = | 1958 | p_82xx_addr_filt = |
1986 | (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth-> | 1959 | (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth-> |
@@ -2020,7 +1993,6 @@ static void ucc_geth_stop(struct ucc_geth_private *ugeth) | |||
2020 | { | 1993 | { |
2021 | struct ucc_geth __iomem *ug_regs = ugeth->ug_regs; | 1994 | struct ucc_geth __iomem *ug_regs = ugeth->ug_regs; |
2022 | struct phy_device *phydev = ugeth->phydev; | 1995 | struct phy_device *phydev = ugeth->phydev; |
2023 | u32 tempval; | ||
2024 | 1996 | ||
2025 | ugeth_vdbg("%s: IN", __func__); | 1997 | ugeth_vdbg("%s: IN", __func__); |
2026 | 1998 | ||
@@ -2037,9 +2009,7 @@ static void ucc_geth_stop(struct ucc_geth_private *ugeth) | |||
2037 | out_be32(ugeth->uccf->p_ucce, 0xffffffff); | 2009 | out_be32(ugeth->uccf->p_ucce, 0xffffffff); |
2038 | 2010 | ||
2039 | /* Disable Rx and Tx */ | 2011 | /* Disable Rx and Tx */ |
2040 | tempval = in_be32(&ug_regs->maccfg1); | 2012 | clrbits32(&ug_regs->maccfg1, MACCFG1_ENABLE_RX | MACCFG1_ENABLE_TX); |
2041 | tempval &= ~(MACCFG1_ENABLE_RX | MACCFG1_ENABLE_TX); | ||
2042 | out_be32(&ug_regs->maccfg1, tempval); | ||
2043 | 2013 | ||
2044 | ucc_geth_memclean(ugeth); | 2014 | ucc_geth_memclean(ugeth); |
2045 | } | 2015 | } |
@@ -2153,10 +2123,10 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth) | |||
2153 | /* Generate uccm_mask for receive */ | 2123 | /* Generate uccm_mask for receive */ |
2154 | uf_info->uccm_mask = ug_info->eventRegMask & UCCE_OTHER;/* Errors */ | 2124 | uf_info->uccm_mask = ug_info->eventRegMask & UCCE_OTHER;/* Errors */ |
2155 | for (i = 0; i < ug_info->numQueuesRx; i++) | 2125 | for (i = 0; i < ug_info->numQueuesRx; i++) |
2156 | uf_info->uccm_mask |= (UCCE_RXBF_SINGLE_MASK << i); | 2126 | uf_info->uccm_mask |= (UCC_GETH_UCCE_RXF0 << i); |
2157 | 2127 | ||
2158 | for (i = 0; i < ug_info->numQueuesTx; i++) | 2128 | for (i = 0; i < ug_info->numQueuesTx; i++) |
2159 | uf_info->uccm_mask |= (UCCE_TXBF_SINGLE_MASK << i); | 2129 | uf_info->uccm_mask |= (UCC_GETH_UCCE_TXB0 << i); |
2160 | /* Initialize the general fast UCC block. */ | 2130 | /* Initialize the general fast UCC block. */ |
2161 | if (ucc_fast_init(uf_info, &ugeth->uccf)) { | 2131 | if (ucc_fast_init(uf_info, &ugeth->uccf)) { |
2162 | if (netif_msg_probe(ugeth)) | 2132 | if (netif_msg_probe(ugeth)) |
@@ -2185,7 +2155,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) | |||
2185 | struct ucc_geth __iomem *ug_regs; | 2155 | struct ucc_geth __iomem *ug_regs; |
2186 | int ret_val = -EINVAL; | 2156 | int ret_val = -EINVAL; |
2187 | u32 remoder = UCC_GETH_REMODER_INIT; | 2157 | u32 remoder = UCC_GETH_REMODER_INIT; |
2188 | u32 init_enet_pram_offset, cecr_subblock, command, maccfg1; | 2158 | u32 init_enet_pram_offset, cecr_subblock, command; |
2189 | u32 ifstat, i, j, size, l2qt, l3qt, length; | 2159 | u32 ifstat, i, j, size, l2qt, l3qt, length; |
2190 | u16 temoder = UCC_GETH_TEMODER_INIT; | 2160 | u16 temoder = UCC_GETH_TEMODER_INIT; |
2191 | u16 test; | 2161 | u16 test; |
@@ -2281,10 +2251,7 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth) | |||
2281 | &uf_regs->upsmr, | 2251 | &uf_regs->upsmr, |
2282 | &ug_regs->uempr, &ug_regs->maccfg1); | 2252 | &ug_regs->uempr, &ug_regs->maccfg1); |
2283 | 2253 | ||
2284 | maccfg1 = in_be32(&ug_regs->maccfg1); | 2254 | setbits32(&ug_regs->maccfg1, MACCFG1_ENABLE_RX | MACCFG1_ENABLE_TX); |
2285 | maccfg1 |= MACCFG1_ENABLE_RX; | ||
2286 | maccfg1 |= MACCFG1_ENABLE_TX; | ||
2287 | out_be32(&ug_regs->maccfg1, maccfg1); | ||
2288 | 2255 | ||
2289 | /* Set IPGIFG */ | 2256 | /* Set IPGIFG */ |
2290 | /* For more details see the hardware spec. */ | 2257 | /* For more details see the hardware spec. */ |
@@ -3274,7 +3241,6 @@ static int ucc_geth_tx(struct net_device *dev, u8 txQ) | |||
3274 | static int ucc_geth_poll(struct napi_struct *napi, int budget) | 3241 | static int ucc_geth_poll(struct napi_struct *napi, int budget) |
3275 | { | 3242 | { |
3276 | struct ucc_geth_private *ugeth = container_of(napi, struct ucc_geth_private, napi); | 3243 | struct ucc_geth_private *ugeth = container_of(napi, struct ucc_geth_private, napi); |
3277 | struct net_device *dev = ugeth->dev; | ||
3278 | struct ucc_geth_info *ug_info; | 3244 | struct ucc_geth_info *ug_info; |
3279 | int howmany, i; | 3245 | int howmany, i; |
3280 | 3246 | ||
@@ -3285,14 +3251,8 @@ static int ucc_geth_poll(struct napi_struct *napi, int budget) | |||
3285 | howmany += ucc_geth_rx(ugeth, i, budget - howmany); | 3251 | howmany += ucc_geth_rx(ugeth, i, budget - howmany); |
3286 | 3252 | ||
3287 | if (howmany < budget) { | 3253 | if (howmany < budget) { |
3288 | struct ucc_fast_private *uccf; | ||
3289 | u32 uccm; | ||
3290 | |||
3291 | netif_rx_complete(napi); | 3254 | netif_rx_complete(napi); |
3292 | uccf = ugeth->uccf; | 3255 | setbits32(ugeth->uccf->p_uccm, UCCE_RX_EVENTS); |
3293 | uccm = in_be32(uccf->p_uccm); | ||
3294 | uccm |= UCCE_RX_EVENTS; | ||
3295 | out_be32(uccf->p_uccm, uccm); | ||
3296 | } | 3256 | } |
3297 | 3257 | ||
3298 | return howmany; | 3258 | return howmany; |
@@ -3332,7 +3292,7 @@ static irqreturn_t ucc_geth_irq_handler(int irq, void *info) | |||
3332 | /* Tx event processing */ | 3292 | /* Tx event processing */ |
3333 | if (ucce & UCCE_TX_EVENTS) { | 3293 | if (ucce & UCCE_TX_EVENTS) { |
3334 | spin_lock(&ugeth->lock); | 3294 | spin_lock(&ugeth->lock); |
3335 | tx_mask = UCCE_TXBF_SINGLE_MASK; | 3295 | tx_mask = UCC_GETH_UCCE_TXB0; |
3336 | for (i = 0; i < ug_info->numQueuesTx; i++) { | 3296 | for (i = 0; i < ug_info->numQueuesTx; i++) { |
3337 | if (ucce & tx_mask) | 3297 | if (ucce & tx_mask) |
3338 | ucc_geth_tx(dev, i); | 3298 | ucc_geth_tx(dev, i); |
@@ -3344,12 +3304,10 @@ static irqreturn_t ucc_geth_irq_handler(int irq, void *info) | |||
3344 | 3304 | ||
3345 | /* Errors and other events */ | 3305 | /* Errors and other events */ |
3346 | if (ucce & UCCE_OTHER) { | 3306 | if (ucce & UCCE_OTHER) { |
3347 | if (ucce & UCCE_BSY) { | 3307 | if (ucce & UCC_GETH_UCCE_BSY) |
3348 | dev->stats.rx_errors++; | 3308 | dev->stats.rx_errors++; |
3349 | } | 3309 | if (ucce & UCC_GETH_UCCE_TXE) |
3350 | if (ucce & UCCE_TXE) { | ||
3351 | dev->stats.tx_errors++; | 3310 | dev->stats.tx_errors++; |
3352 | } | ||
3353 | } | 3311 | } |
3354 | 3312 | ||
3355 | return IRQ_HANDLED; | 3313 | return IRQ_HANDLED; |
diff --git a/drivers/net/ucc_geth.h b/drivers/net/ucc_geth.h index d74d2f7cb739..8f699cb773ee 100644 --- a/drivers/net/ucc_geth.h +++ b/drivers/net/ucc_geth.h | |||
@@ -162,92 +162,27 @@ struct ucc_geth { | |||
162 | boundary */ | 162 | boundary */ |
163 | 163 | ||
164 | /* UCC GETH Event Register */ | 164 | /* UCC GETH Event Register */ |
165 | #define UCCE_MPD 0x80000000 /* Magic packet | 165 | #define UCCE_TXB (UCC_GETH_UCCE_TXB7 | UCC_GETH_UCCE_TXB6 | \ |
166 | detection */ | 166 | UCC_GETH_UCCE_TXB5 | UCC_GETH_UCCE_TXB4 | \ |
167 | #define UCCE_SCAR 0x40000000 | 167 | UCC_GETH_UCCE_TXB3 | UCC_GETH_UCCE_TXB2 | \ |
168 | #define UCCE_GRA 0x20000000 /* Tx graceful | 168 | UCC_GETH_UCCE_TXB1 | UCC_GETH_UCCE_TXB0) |
169 | stop | 169 | |
170 | complete */ | 170 | #define UCCE_RXB (UCC_GETH_UCCE_RXB7 | UCC_GETH_UCCE_RXB6 | \ |
171 | #define UCCE_CBPR 0x10000000 | 171 | UCC_GETH_UCCE_RXB5 | UCC_GETH_UCCE_RXB4 | \ |
172 | #define UCCE_BSY 0x08000000 | 172 | UCC_GETH_UCCE_RXB3 | UCC_GETH_UCCE_RXB2 | \ |
173 | #define UCCE_RXC 0x04000000 | 173 | UCC_GETH_UCCE_RXB1 | UCC_GETH_UCCE_RXB0) |
174 | #define UCCE_TXC 0x02000000 | 174 | |
175 | #define UCCE_TXE 0x01000000 | 175 | #define UCCE_RXF (UCC_GETH_UCCE_RXF7 | UCC_GETH_UCCE_RXF6 | \ |
176 | #define UCCE_TXB7 0x00800000 | 176 | UCC_GETH_UCCE_RXF5 | UCC_GETH_UCCE_RXF4 | \ |
177 | #define UCCE_TXB6 0x00400000 | 177 | UCC_GETH_UCCE_RXF3 | UCC_GETH_UCCE_RXF2 | \ |
178 | #define UCCE_TXB5 0x00200000 | 178 | UCC_GETH_UCCE_RXF1 | UCC_GETH_UCCE_RXF0) |
179 | #define UCCE_TXB4 0x00100000 | 179 | |
180 | #define UCCE_TXB3 0x00080000 | 180 | #define UCCE_OTHER (UCC_GETH_UCCE_SCAR | UCC_GETH_UCCE_GRA | \ |
181 | #define UCCE_TXB2 0x00040000 | 181 | UCC_GETH_UCCE_CBPR | UCC_GETH_UCCE_BSY | \ |
182 | #define UCCE_TXB1 0x00020000 | 182 | UCC_GETH_UCCE_RXC | UCC_GETH_UCCE_TXC | UCC_GETH_UCCE_TXE) |
183 | #define UCCE_TXB0 0x00010000 | 183 | |
184 | #define UCCE_RXB7 0x00008000 | 184 | #define UCCE_RX_EVENTS (UCCE_RXF | UCC_GETH_UCCE_BSY) |
185 | #define UCCE_RXB6 0x00004000 | 185 | #define UCCE_TX_EVENTS (UCCE_TXB | UCC_GETH_UCCE_TXE) |
186 | #define UCCE_RXB5 0x00002000 | ||
187 | #define UCCE_RXB4 0x00001000 | ||
188 | #define UCCE_RXB3 0x00000800 | ||
189 | #define UCCE_RXB2 0x00000400 | ||
190 | #define UCCE_RXB1 0x00000200 | ||
191 | #define UCCE_RXB0 0x00000100 | ||
192 | #define UCCE_RXF7 0x00000080 | ||
193 | #define UCCE_RXF6 0x00000040 | ||
194 | #define UCCE_RXF5 0x00000020 | ||
195 | #define UCCE_RXF4 0x00000010 | ||
196 | #define UCCE_RXF3 0x00000008 | ||
197 | #define UCCE_RXF2 0x00000004 | ||
198 | #define UCCE_RXF1 0x00000002 | ||
199 | #define UCCE_RXF0 0x00000001 | ||
200 | |||
201 | #define UCCE_RXBF_SINGLE_MASK (UCCE_RXF0) | ||
202 | #define UCCE_TXBF_SINGLE_MASK (UCCE_TXB0) | ||
203 | |||
204 | #define UCCE_TXB (UCCE_TXB7 | UCCE_TXB6 | UCCE_TXB5 | UCCE_TXB4 |\ | ||
205 | UCCE_TXB3 | UCCE_TXB2 | UCCE_TXB1 | UCCE_TXB0) | ||
206 | #define UCCE_RXB (UCCE_RXB7 | UCCE_RXB6 | UCCE_RXB5 | UCCE_RXB4 |\ | ||
207 | UCCE_RXB3 | UCCE_RXB2 | UCCE_RXB1 | UCCE_RXB0) | ||
208 | #define UCCE_RXF (UCCE_RXF7 | UCCE_RXF6 | UCCE_RXF5 | UCCE_RXF4 |\ | ||
209 | UCCE_RXF3 | UCCE_RXF2 | UCCE_RXF1 | UCCE_RXF0) | ||
210 | #define UCCE_OTHER (UCCE_SCAR | UCCE_GRA | UCCE_CBPR | UCCE_BSY |\ | ||
211 | UCCE_RXC | UCCE_TXC | UCCE_TXE) | ||
212 | |||
213 | #define UCCE_RX_EVENTS (UCCE_RXF | UCCE_BSY) | ||
214 | #define UCCE_TX_EVENTS (UCCE_TXB | UCCE_TXE) | ||
215 | |||
216 | /* UCC GETH UPSMR (Protocol Specific Mode Register) */ | ||
217 | #define UPSMR_ECM 0x04000000 /* Enable CAM | ||
218 | Miss or | ||
219 | Enable | ||
220 | Filtering | ||
221 | Miss */ | ||
222 | #define UPSMR_HSE 0x02000000 /* Hardware | ||
223 | Statistics | ||
224 | Enable */ | ||
225 | #define UPSMR_PRO 0x00400000 /* Promiscuous*/ | ||
226 | #define UPSMR_CAP 0x00200000 /* CAM polarity | ||
227 | */ | ||
228 | #define UPSMR_RSH 0x00100000 /* Receive | ||
229 | Short Frames | ||
230 | */ | ||
231 | #define UPSMR_RPM 0x00080000 /* Reduced Pin | ||
232 | Mode | ||
233 | interfaces */ | ||
234 | #define UPSMR_R10M 0x00040000 /* RGMII/RMII | ||
235 | 10 Mode */ | ||
236 | #define UPSMR_RLPB 0x00020000 /* RMII | ||
237 | Loopback | ||
238 | Mode */ | ||
239 | #define UPSMR_TBIM 0x00010000 /* Ten-bit | ||
240 | Interface | ||
241 | Mode */ | ||
242 | #define UPSMR_RMM 0x00001000 /* RMII/RGMII | ||
243 | Mode */ | ||
244 | #define UPSMR_CAM 0x00000400 /* CAM Address | ||
245 | Matching */ | ||
246 | #define UPSMR_BRO 0x00000200 /* Broadcast | ||
247 | Address */ | ||
248 | #define UPSMR_RES1 0x00002000 /* Reserved | ||
249 | feild - must | ||
250 | be 1 */ | ||
251 | 186 | ||
252 | /* UCC GETH MACCFG1 (MAC Configuration 1 Register) */ | 187 | /* UCC GETH MACCFG1 (MAC Configuration 1 Register) */ |
253 | #define MACCFG1_FLOW_RX 0x00000020 /* Flow Control | 188 | #define MACCFG1_FLOW_RX 0x00000020 /* Flow Control |
@@ -945,9 +880,10 @@ struct ucc_geth_hardware_statistics { | |||
945 | #define UCC_GETH_REMODER_INIT 0 /* bits that must be | 880 | #define UCC_GETH_REMODER_INIT 0 /* bits that must be |
946 | set */ | 881 | set */ |
947 | #define UCC_GETH_TEMODER_INIT 0xC000 /* bits that must */ | 882 | #define UCC_GETH_TEMODER_INIT 0xC000 /* bits that must */ |
948 | #define UCC_GETH_UPSMR_INIT (UPSMR_RES1) /* Start value | 883 | |
949 | for this | 884 | /* Initial value for UPSMR */ |
950 | register */ | 885 | #define UCC_GETH_UPSMR_INIT UCC_GETH_UPSMR_RES1 |
886 | |||
951 | #define UCC_GETH_MACCFG1_INIT 0 | 887 | #define UCC_GETH_MACCFG1_INIT 0 |
952 | #define UCC_GETH_MACCFG2_INIT (MACCFG2_RESERVED_1) | 888 | #define UCC_GETH_MACCFG2_INIT (MACCFG2_RESERVED_1) |
953 | 889 | ||
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c index ac07cc6e3cb2..3b8e63254277 100644 --- a/drivers/net/via-rhine.c +++ b/drivers/net/via-rhine.c | |||
@@ -622,6 +622,7 @@ static const struct net_device_ops rhine_netdev_ops = { | |||
622 | .ndo_get_stats = rhine_get_stats, | 622 | .ndo_get_stats = rhine_get_stats, |
623 | .ndo_set_multicast_list = rhine_set_rx_mode, | 623 | .ndo_set_multicast_list = rhine_set_rx_mode, |
624 | .ndo_validate_addr = eth_validate_addr, | 624 | .ndo_validate_addr = eth_validate_addr, |
625 | .ndo_set_mac_address = eth_mac_addr, | ||
625 | .ndo_do_ioctl = netdev_ioctl, | 626 | .ndo_do_ioctl = netdev_ioctl, |
626 | .ndo_tx_timeout = rhine_tx_timeout, | 627 | .ndo_tx_timeout = rhine_tx_timeout, |
627 | #ifdef CONFIG_NET_POLL_CONTROLLER | 628 | #ifdef CONFIG_NET_POLL_CONTROLLER |
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c index 58e25d090ae0..a75f91dc3153 100644 --- a/drivers/net/via-velocity.c +++ b/drivers/net/via-velocity.c | |||
@@ -855,6 +855,7 @@ static const struct net_device_ops velocity_netdev_ops = { | |||
855 | .ndo_start_xmit = velocity_xmit, | 855 | .ndo_start_xmit = velocity_xmit, |
856 | .ndo_get_stats = velocity_get_stats, | 856 | .ndo_get_stats = velocity_get_stats, |
857 | .ndo_validate_addr = eth_validate_addr, | 857 | .ndo_validate_addr = eth_validate_addr, |
858 | .ndo_set_mac_address = eth_mac_addr, | ||
858 | .ndo_set_multicast_list = velocity_set_multi, | 859 | .ndo_set_multicast_list = velocity_set_multi, |
859 | .ndo_change_mtu = velocity_change_mtu, | 860 | .ndo_change_mtu = velocity_change_mtu, |
860 | .ndo_do_ioctl = velocity_ioctl, | 861 | .ndo_do_ioctl = velocity_ioctl, |
diff --git a/drivers/net/wd.c b/drivers/net/wd.c index 3c1edda08d3d..d8322d2d1e29 100644 --- a/drivers/net/wd.c +++ b/drivers/net/wd.c | |||
@@ -155,6 +155,7 @@ static const struct net_device_ops wd_netdev_ops = { | |||
155 | .ndo_get_stats = ei_get_stats, | 155 | .ndo_get_stats = ei_get_stats, |
156 | .ndo_set_multicast_list = ei_set_multicast_list, | 156 | .ndo_set_multicast_list = ei_set_multicast_list, |
157 | .ndo_validate_addr = eth_validate_addr, | 157 | .ndo_validate_addr = eth_validate_addr, |
158 | .ndo_set_mac_address = eth_mac_addr, | ||
158 | .ndo_change_mtu = eth_change_mtu, | 159 | .ndo_change_mtu = eth_change_mtu, |
159 | #ifdef CONFIG_NET_POLL_CONTROLLER | 160 | #ifdef CONFIG_NET_POLL_CONTROLLER |
160 | .ndo_poll_controller = ei_poll, | 161 | .ndo_poll_controller = ei_poll, |
diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c index 3dba83679444..4e0007d20030 100644 --- a/drivers/net/wireless/libertas/main.c +++ b/drivers/net/wireless/libertas/main.c | |||
@@ -1369,7 +1369,7 @@ EXPORT_SYMBOL_GPL(lbs_start_card); | |||
1369 | 1369 | ||
1370 | void lbs_stop_card(struct lbs_private *priv) | 1370 | void lbs_stop_card(struct lbs_private *priv) |
1371 | { | 1371 | { |
1372 | struct net_device *dev = priv->dev; | 1372 | struct net_device *dev; |
1373 | struct cmd_ctrl_node *cmdnode; | 1373 | struct cmd_ctrl_node *cmdnode; |
1374 | unsigned long flags; | 1374 | unsigned long flags; |
1375 | 1375 | ||
@@ -1377,9 +1377,10 @@ void lbs_stop_card(struct lbs_private *priv) | |||
1377 | 1377 | ||
1378 | if (!priv) | 1378 | if (!priv) |
1379 | goto out; | 1379 | goto out; |
1380 | dev = priv->dev; | ||
1380 | 1381 | ||
1381 | netif_stop_queue(priv->dev); | 1382 | netif_stop_queue(dev); |
1382 | netif_carrier_off(priv->dev); | 1383 | netif_carrier_off(dev); |
1383 | 1384 | ||
1384 | lbs_debugfs_remove_one(priv); | 1385 | lbs_debugfs_remove_one(priv); |
1385 | if (priv->mesh_tlv) { | 1386 | if (priv->mesh_tlv) { |
diff --git a/drivers/net/yellowfin.c b/drivers/net/yellowfin.c index cf9712922778..2f1645dcb8c8 100644 --- a/drivers/net/yellowfin.c +++ b/drivers/net/yellowfin.c | |||
@@ -362,6 +362,7 @@ static const struct net_device_ops netdev_ops = { | |||
362 | .ndo_set_multicast_list = set_rx_mode, | 362 | .ndo_set_multicast_list = set_rx_mode, |
363 | .ndo_change_mtu = eth_change_mtu, | 363 | .ndo_change_mtu = eth_change_mtu, |
364 | .ndo_validate_addr = eth_validate_addr, | 364 | .ndo_validate_addr = eth_validate_addr, |
365 | .ndo_set_mac_address = eth_mac_addr, | ||
365 | .ndo_do_ioctl = netdev_ioctl, | 366 | .ndo_do_ioctl = netdev_ioctl, |
366 | .ndo_tx_timeout = yellowfin_tx_timeout, | 367 | .ndo_tx_timeout = yellowfin_tx_timeout, |
367 | }; | 368 | }; |
diff --git a/drivers/net/zorro8390.c b/drivers/net/zorro8390.c index affd904deafc..37c84e3b8be0 100644 --- a/drivers/net/zorro8390.c +++ b/drivers/net/zorro8390.c | |||
@@ -147,6 +147,7 @@ static const struct net_device_ops zorro8390_netdev_ops = { | |||
147 | .ndo_get_stats = ei_get_stats, | 147 | .ndo_get_stats = ei_get_stats, |
148 | .ndo_set_multicast_list = ei_set_multicast_list, | 148 | .ndo_set_multicast_list = ei_set_multicast_list, |
149 | .ndo_validate_addr = eth_validate_addr, | 149 | .ndo_validate_addr = eth_validate_addr, |
150 | .ndo_set_mac_address = eth_mac_addr, | ||
150 | .ndo_change_mtu = eth_change_mtu, | 151 | .ndo_change_mtu = eth_change_mtu, |
151 | #ifdef CONFIG_NET_POLL_CONTROLLER | 152 | #ifdef CONFIG_NET_POLL_CONTROLLER |
152 | .ndo_poll_controller = ei_poll, | 153 | .ndo_poll_controller = ei_poll, |