diff options
author | Linus Torvalds <torvalds@woody.osdl.org> | 2006-12-11 15:22:58 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.osdl.org> | 2006-12-11 15:22:58 -0500 |
commit | 116140b7f5c9182c86a4e419f81684209357aea7 (patch) | |
tree | 596e6d1a3121c022afd1bf29637b6bc6bb06d086 | |
parent | 8d610dd52dd1da696e199e4b4545f33a2a5de5c6 (diff) | |
parent | 8af905b4a403ce74b8d907b50bccc453a58834bc (diff) |
Merge branch 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6
* 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6:
[PATCH] smc91x: Kill off excessive versatile hooks.
[PATCH] myri10ge: update driver version to 1.1.0
[PATCH] myri10ge: fix big_bytes in case of vlan frames
[PATCH] myri10ge: Full vlan frame in small_bytes
[PATCH] myri10ge: drop contiguous skb routines
[PATCH] myri10ge: switch to page-based skb
[PATCH] myri10ge: add page-based skb routines
[PATCH] myri10ge: indentation cleanups
[PATCH] chelsio: working NAPI
[PATCH] MACB: Use __raw register access
[PATCH] MACB: Use struct delayed_work instead of struct work_struct
[PATCH] ucc_geth: Initialize mdio_lock.
[PATCH] ucc_geth: compilation error fixes
-rw-r--r-- | drivers/net/Kconfig | 8 | ||||
-rw-r--r-- | drivers/net/chelsio/cxgb2.c | 23 | ||||
-rw-r--r-- | drivers/net/chelsio/sge.c | 115 | ||||
-rw-r--r-- | drivers/net/chelsio/sge.h | 4 | ||||
-rw-r--r-- | drivers/net/macb.c | 8 | ||||
-rw-r--r-- | drivers/net/macb.h | 6 | ||||
-rw-r--r-- | drivers/net/myri10ge/myri10ge.c | 498 | ||||
-rw-r--r-- | drivers/net/smc91x.h | 90 | ||||
-rw-r--r-- | drivers/net/ucc_geth.c | 12 |
9 files changed, 334 insertions, 430 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 9de0eed6755b..8aa8dd02b910 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig | |||
@@ -2384,6 +2384,14 @@ config CHELSIO_T1_1G | |||
2384 | Enables support for Chelsio's gigabit Ethernet PCI cards. If you | 2384 | Enables support for Chelsio's gigabit Ethernet PCI cards. If you |
2385 | are using only 10G cards say 'N' here. | 2385 | are using only 10G cards say 'N' here. |
2386 | 2386 | ||
2387 | config CHELSIO_T1_NAPI | ||
2388 | bool "Use Rx Polling (NAPI)" | ||
2389 | depends on CHELSIO_T1 | ||
2390 | default y | ||
2391 | help | ||
2392 | NAPI is a driver API designed to reduce CPU and interrupt load | ||
2393 | when the driver is receiving lots of packets from the card. | ||
2394 | |||
2387 | config EHEA | 2395 | config EHEA |
2388 | tristate "eHEA Ethernet support" | 2396 | tristate "eHEA Ethernet support" |
2389 | depends on IBMEBUS | 2397 | depends on IBMEBUS |
diff --git a/drivers/net/chelsio/cxgb2.c b/drivers/net/chelsio/cxgb2.c index de48eadddbc4..fd5d821f3f2a 100644 --- a/drivers/net/chelsio/cxgb2.c +++ b/drivers/net/chelsio/cxgb2.c | |||
@@ -220,9 +220,8 @@ static int cxgb_up(struct adapter *adapter) | |||
220 | 220 | ||
221 | t1_interrupts_clear(adapter); | 221 | t1_interrupts_clear(adapter); |
222 | 222 | ||
223 | adapter->params.has_msi = !disable_msi && pci_enable_msi(adapter->pdev) == 0; | 223 | adapter->params.has_msi = !disable_msi && !pci_enable_msi(adapter->pdev); |
224 | err = request_irq(adapter->pdev->irq, | 224 | err = request_irq(adapter->pdev->irq, t1_interrupt, |
225 | t1_select_intr_handler(adapter), | ||
226 | adapter->params.has_msi ? 0 : IRQF_SHARED, | 225 | adapter->params.has_msi ? 0 : IRQF_SHARED, |
227 | adapter->name, adapter); | 226 | adapter->name, adapter); |
228 | if (err) { | 227 | if (err) { |
@@ -764,18 +763,7 @@ static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c) | |||
764 | { | 763 | { |
765 | struct adapter *adapter = dev->priv; | 764 | struct adapter *adapter = dev->priv; |
766 | 765 | ||
767 | /* | 766 | adapter->params.sge.rx_coalesce_usecs = c->rx_coalesce_usecs; |
768 | * If RX coalescing is requested we use NAPI, otherwise interrupts. | ||
769 | * This choice can be made only when all ports and the TOE are off. | ||
770 | */ | ||
771 | if (adapter->open_device_map == 0) | ||
772 | adapter->params.sge.polling = c->use_adaptive_rx_coalesce; | ||
773 | |||
774 | if (adapter->params.sge.polling) { | ||
775 | adapter->params.sge.rx_coalesce_usecs = 0; | ||
776 | } else { | ||
777 | adapter->params.sge.rx_coalesce_usecs = c->rx_coalesce_usecs; | ||
778 | } | ||
779 | adapter->params.sge.coalesce_enable = c->use_adaptive_rx_coalesce; | 767 | adapter->params.sge.coalesce_enable = c->use_adaptive_rx_coalesce; |
780 | adapter->params.sge.sample_interval_usecs = c->rate_sample_interval; | 768 | adapter->params.sge.sample_interval_usecs = c->rate_sample_interval; |
781 | t1_sge_set_coalesce_params(adapter->sge, &adapter->params.sge); | 769 | t1_sge_set_coalesce_params(adapter->sge, &adapter->params.sge); |
@@ -944,7 +932,7 @@ static void t1_netpoll(struct net_device *dev) | |||
944 | struct adapter *adapter = dev->priv; | 932 | struct adapter *adapter = dev->priv; |
945 | 933 | ||
946 | local_irq_save(flags); | 934 | local_irq_save(flags); |
947 | t1_select_intr_handler(adapter)(adapter->pdev->irq, adapter); | 935 | t1_interrupt(adapter->pdev->irq, adapter); |
948 | local_irq_restore(flags); | 936 | local_irq_restore(flags); |
949 | } | 937 | } |
950 | #endif | 938 | #endif |
@@ -1165,7 +1153,10 @@ static int __devinit init_one(struct pci_dev *pdev, | |||
1165 | #ifdef CONFIG_NET_POLL_CONTROLLER | 1153 | #ifdef CONFIG_NET_POLL_CONTROLLER |
1166 | netdev->poll_controller = t1_netpoll; | 1154 | netdev->poll_controller = t1_netpoll; |
1167 | #endif | 1155 | #endif |
1156 | #ifdef CONFIG_CHELSIO_T1_NAPI | ||
1168 | netdev->weight = 64; | 1157 | netdev->weight = 64; |
1158 | netdev->poll = t1_poll; | ||
1159 | #endif | ||
1169 | 1160 | ||
1170 | SET_ETHTOOL_OPS(netdev, &t1_ethtool_ops); | 1161 | SET_ETHTOOL_OPS(netdev, &t1_ethtool_ops); |
1171 | } | 1162 | } |
diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c index 0ca8d876e16f..659cb2252e44 100644 --- a/drivers/net/chelsio/sge.c +++ b/drivers/net/chelsio/sge.c | |||
@@ -1413,16 +1413,20 @@ static int sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len) | |||
1413 | 1413 | ||
1414 | if (unlikely(adapter->vlan_grp && p->vlan_valid)) { | 1414 | if (unlikely(adapter->vlan_grp && p->vlan_valid)) { |
1415 | st->vlan_xtract++; | 1415 | st->vlan_xtract++; |
1416 | if (adapter->params.sge.polling) | 1416 | #ifdef CONFIG_CHELSIO_T1_NAPI |
1417 | vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, | 1417 | vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, |
1418 | ntohs(p->vlan)); | 1418 | ntohs(p->vlan)); |
1419 | else | 1419 | #else |
1420 | vlan_hwaccel_rx(skb, adapter->vlan_grp, | 1420 | vlan_hwaccel_rx(skb, adapter->vlan_grp, |
1421 | ntohs(p->vlan)); | 1421 | ntohs(p->vlan)); |
1422 | } else if (adapter->params.sge.polling) | 1422 | #endif |
1423 | } else { | ||
1424 | #ifdef CONFIG_CHELSIO_T1_NAPI | ||
1423 | netif_receive_skb(skb); | 1425 | netif_receive_skb(skb); |
1424 | else | 1426 | #else |
1425 | netif_rx(skb); | 1427 | netif_rx(skb); |
1428 | #endif | ||
1429 | } | ||
1426 | return 0; | 1430 | return 0; |
1427 | } | 1431 | } |
1428 | 1432 | ||
@@ -1572,6 +1576,7 @@ static int process_responses(struct adapter *adapter, int budget) | |||
1572 | return budget; | 1576 | return budget; |
1573 | } | 1577 | } |
1574 | 1578 | ||
1579 | #ifdef CONFIG_CHELSIO_T1_NAPI | ||
1575 | /* | 1580 | /* |
1576 | * A simpler version of process_responses() that handles only pure (i.e., | 1581 | * A simpler version of process_responses() that handles only pure (i.e., |
1577 | * non data-carrying) responses. Such respones are too light-weight to justify | 1582 | * non data-carrying) responses. Such respones are too light-weight to justify |
@@ -1619,92 +1624,76 @@ static int process_pure_responses(struct adapter *adapter, struct respQ_e *e) | |||
1619 | * or protection from interrupts as data interrupts are off at this point and | 1624 | * or protection from interrupts as data interrupts are off at this point and |
1620 | * other adapter interrupts do not interfere. | 1625 | * other adapter interrupts do not interfere. |
1621 | */ | 1626 | */ |
1622 | static int t1_poll(struct net_device *dev, int *budget) | 1627 | int t1_poll(struct net_device *dev, int *budget) |
1623 | { | 1628 | { |
1624 | struct adapter *adapter = dev->priv; | 1629 | struct adapter *adapter = dev->priv; |
1625 | int effective_budget = min(*budget, dev->quota); | 1630 | int effective_budget = min(*budget, dev->quota); |
1626 | |||
1627 | int work_done = process_responses(adapter, effective_budget); | 1631 | int work_done = process_responses(adapter, effective_budget); |
1632 | |||
1628 | *budget -= work_done; | 1633 | *budget -= work_done; |
1629 | dev->quota -= work_done; | 1634 | dev->quota -= work_done; |
1630 | 1635 | ||
1631 | if (work_done >= effective_budget) | 1636 | if (work_done >= effective_budget) |
1632 | return 1; | 1637 | return 1; |
1633 | 1638 | ||
1639 | spin_lock_irq(&adapter->async_lock); | ||
1634 | __netif_rx_complete(dev); | 1640 | __netif_rx_complete(dev); |
1635 | |||
1636 | /* | ||
1637 | * Because we don't atomically flush the following write it is | ||
1638 | * possible that in very rare cases it can reach the device in a way | ||
1639 | * that races with a new response being written plus an error interrupt | ||
1640 | * causing the NAPI interrupt handler below to return unhandled status | ||
1641 | * to the OS. To protect against this would require flushing the write | ||
1642 | * and doing both the write and the flush with interrupts off. Way too | ||
1643 | * expensive and unjustifiable given the rarity of the race. | ||
1644 | */ | ||
1645 | writel(adapter->sge->respQ.cidx, adapter->regs + A_SG_SLEEPING); | 1641 | writel(adapter->sge->respQ.cidx, adapter->regs + A_SG_SLEEPING); |
1646 | return 0; | 1642 | writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA, |
1647 | } | 1643 | adapter->regs + A_PL_ENABLE); |
1644 | spin_unlock_irq(&adapter->async_lock); | ||
1648 | 1645 | ||
1649 | /* | 1646 | return 0; |
1650 | * Returns true if the device is already scheduled for polling. | ||
1651 | */ | ||
1652 | static inline int napi_is_scheduled(struct net_device *dev) | ||
1653 | { | ||
1654 | return test_bit(__LINK_STATE_RX_SCHED, &dev->state); | ||
1655 | } | 1647 | } |
1656 | 1648 | ||
1657 | /* | 1649 | /* |
1658 | * NAPI version of the main interrupt handler. | 1650 | * NAPI version of the main interrupt handler. |
1659 | */ | 1651 | */ |
1660 | static irqreturn_t t1_interrupt_napi(int irq, void *data) | 1652 | irqreturn_t t1_interrupt(int irq, void *data) |
1661 | { | 1653 | { |
1662 | int handled; | ||
1663 | struct adapter *adapter = data; | 1654 | struct adapter *adapter = data; |
1655 | struct net_device *dev = adapter->sge->netdev; | ||
1664 | struct sge *sge = adapter->sge; | 1656 | struct sge *sge = adapter->sge; |
1665 | struct respQ *q = &adapter->sge->respQ; | 1657 | u32 cause; |
1658 | int handled = 0; | ||
1666 | 1659 | ||
1667 | /* | 1660 | cause = readl(adapter->regs + A_PL_CAUSE); |
1668 | * Clear the SGE_DATA interrupt first thing. Normally the NAPI | 1661 | if (cause == 0 || cause == ~0) |
1669 | * handler has control of the response queue and the interrupt handler | 1662 | return IRQ_NONE; |
1670 | * can look at the queue reliably only once it knows NAPI is off. | ||
1671 | * We can't wait that long to clear the SGE_DATA interrupt because we | ||
1672 | * could race with t1_poll rearming the SGE interrupt, so we need to | ||
1673 | * clear the interrupt speculatively and really early on. | ||
1674 | */ | ||
1675 | writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE); | ||
1676 | 1663 | ||
1677 | spin_lock(&adapter->async_lock); | 1664 | spin_lock(&adapter->async_lock); |
1678 | if (!napi_is_scheduled(sge->netdev)) { | 1665 | if (cause & F_PL_INTR_SGE_DATA) { |
1666 | struct respQ *q = &adapter->sge->respQ; | ||
1679 | struct respQ_e *e = &q->entries[q->cidx]; | 1667 | struct respQ_e *e = &q->entries[q->cidx]; |
1680 | 1668 | ||
1681 | if (e->GenerationBit == q->genbit) { | 1669 | handled = 1; |
1682 | if (e->DataValid || | 1670 | writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE); |
1683 | process_pure_responses(adapter, e)) { | 1671 | |
1684 | if (likely(__netif_rx_schedule_prep(sge->netdev))) | 1672 | if (e->GenerationBit == q->genbit && |
1685 | __netif_rx_schedule(sge->netdev); | 1673 | __netif_rx_schedule_prep(dev)) { |
1686 | else if (net_ratelimit()) | 1674 | if (e->DataValid || process_pure_responses(adapter, e)) { |
1687 | printk(KERN_INFO | 1675 | /* mask off data IRQ */ |
1688 | "NAPI schedule failure!\n"); | 1676 | writel(adapter->slow_intr_mask, |
1689 | } else | 1677 | adapter->regs + A_PL_ENABLE); |
1690 | writel(q->cidx, adapter->regs + A_SG_SLEEPING); | 1678 | __netif_rx_schedule(sge->netdev); |
1691 | 1679 | goto unlock; | |
1692 | handled = 1; | 1680 | } |
1693 | goto unlock; | 1681 | /* no data, no NAPI needed */ |
1694 | } else | 1682 | netif_poll_enable(dev); |
1695 | writel(q->cidx, adapter->regs + A_SG_SLEEPING); | 1683 | |
1696 | } else if (readl(adapter->regs + A_PL_CAUSE) & F_PL_INTR_SGE_DATA) { | 1684 | } |
1697 | printk(KERN_ERR "data interrupt while NAPI running\n"); | 1685 | writel(q->cidx, adapter->regs + A_SG_SLEEPING); |
1698 | } | 1686 | } else |
1699 | 1687 | handled = t1_slow_intr_handler(adapter); | |
1700 | handled = t1_slow_intr_handler(adapter); | 1688 | |
1701 | if (!handled) | 1689 | if (!handled) |
1702 | sge->stats.unhandled_irqs++; | 1690 | sge->stats.unhandled_irqs++; |
1703 | unlock: | 1691 | unlock: |
1704 | spin_unlock(&adapter->async_lock); | 1692 | spin_unlock(&adapter->async_lock); |
1705 | return IRQ_RETVAL(handled != 0); | 1693 | return IRQ_RETVAL(handled != 0); |
1706 | } | 1694 | } |
1707 | 1695 | ||
1696 | #else | ||
1708 | /* | 1697 | /* |
1709 | * Main interrupt handler, optimized assuming that we took a 'DATA' | 1698 | * Main interrupt handler, optimized assuming that we took a 'DATA' |
1710 | * interrupt. | 1699 | * interrupt. |
@@ -1720,7 +1709,7 @@ static irqreturn_t t1_interrupt_napi(int irq, void *data) | |||
1720 | * 5. If we took an interrupt, but no valid respQ descriptors was found we | 1709 | * 5. If we took an interrupt, but no valid respQ descriptors was found we |
1721 | * let the slow_intr_handler run and do error handling. | 1710 | * let the slow_intr_handler run and do error handling. |
1722 | */ | 1711 | */ |
1723 | static irqreturn_t t1_interrupt(int irq, void *cookie) | 1712 | irqreturn_t t1_interrupt(int irq, void *cookie) |
1724 | { | 1713 | { |
1725 | int work_done; | 1714 | int work_done; |
1726 | struct respQ_e *e; | 1715 | struct respQ_e *e; |
@@ -1752,11 +1741,7 @@ static irqreturn_t t1_interrupt(int irq, void *cookie) | |||
1752 | spin_unlock(&adapter->async_lock); | 1741 | spin_unlock(&adapter->async_lock); |
1753 | return IRQ_RETVAL(work_done != 0); | 1742 | return IRQ_RETVAL(work_done != 0); |
1754 | } | 1743 | } |
1755 | 1744 | #endif | |
1756 | irq_handler_t t1_select_intr_handler(adapter_t *adapter) | ||
1757 | { | ||
1758 | return adapter->params.sge.polling ? t1_interrupt_napi : t1_interrupt; | ||
1759 | } | ||
1760 | 1745 | ||
1761 | /* | 1746 | /* |
1762 | * Enqueues the sk_buff onto the cmdQ[qid] and has hardware fetch it. | 1747 | * Enqueues the sk_buff onto the cmdQ[qid] and has hardware fetch it. |
@@ -2033,7 +2018,6 @@ static void sge_tx_reclaim_cb(unsigned long data) | |||
2033 | */ | 2018 | */ |
2034 | int t1_sge_set_coalesce_params(struct sge *sge, struct sge_params *p) | 2019 | int t1_sge_set_coalesce_params(struct sge *sge, struct sge_params *p) |
2035 | { | 2020 | { |
2036 | sge->netdev->poll = t1_poll; | ||
2037 | sge->fixed_intrtimer = p->rx_coalesce_usecs * | 2021 | sge->fixed_intrtimer = p->rx_coalesce_usecs * |
2038 | core_ticks_per_usec(sge->adapter); | 2022 | core_ticks_per_usec(sge->adapter); |
2039 | writel(sge->fixed_intrtimer, sge->adapter->regs + A_SG_INTRTIMER); | 2023 | writel(sge->fixed_intrtimer, sge->adapter->regs + A_SG_INTRTIMER); |
@@ -2234,7 +2218,6 @@ struct sge * __devinit t1_sge_create(struct adapter *adapter, | |||
2234 | 2218 | ||
2235 | p->coalesce_enable = 0; | 2219 | p->coalesce_enable = 0; |
2236 | p->sample_interval_usecs = 0; | 2220 | p->sample_interval_usecs = 0; |
2237 | p->polling = 0; | ||
2238 | 2221 | ||
2239 | return sge; | 2222 | return sge; |
2240 | nomem_port: | 2223 | nomem_port: |
diff --git a/drivers/net/chelsio/sge.h b/drivers/net/chelsio/sge.h index 7ceb0117d039..d132a0ef2a22 100644 --- a/drivers/net/chelsio/sge.h +++ b/drivers/net/chelsio/sge.h | |||
@@ -76,7 +76,9 @@ struct sge *t1_sge_create(struct adapter *, struct sge_params *); | |||
76 | int t1_sge_configure(struct sge *, struct sge_params *); | 76 | int t1_sge_configure(struct sge *, struct sge_params *); |
77 | int t1_sge_set_coalesce_params(struct sge *, struct sge_params *); | 77 | int t1_sge_set_coalesce_params(struct sge *, struct sge_params *); |
78 | void t1_sge_destroy(struct sge *); | 78 | void t1_sge_destroy(struct sge *); |
79 | irq_handler_t t1_select_intr_handler(adapter_t *adapter); | 79 | irqreturn_t t1_interrupt(int irq, void *cookie); |
80 | int t1_poll(struct net_device *, int *); | ||
81 | |||
80 | int t1_start_xmit(struct sk_buff *skb, struct net_device *dev); | 82 | int t1_start_xmit(struct sk_buff *skb, struct net_device *dev); |
81 | void t1_set_vlan_accel(struct adapter *adapter, int on_off); | 83 | void t1_set_vlan_accel(struct adapter *adapter, int on_off); |
82 | void t1_sge_start(struct sge *); | 84 | void t1_sge_start(struct sge *); |
diff --git a/drivers/net/macb.c b/drivers/net/macb.c index bd0ce98c939c..25b559b5d5ed 100644 --- a/drivers/net/macb.c +++ b/drivers/net/macb.c | |||
@@ -264,12 +264,12 @@ static void macb_update_stats(struct macb *bp) | |||
264 | WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4); | 264 | WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4); |
265 | 265 | ||
266 | for(; p < end; p++, reg++) | 266 | for(; p < end; p++, reg++) |
267 | *p += readl(reg); | 267 | *p += __raw_readl(reg); |
268 | } | 268 | } |
269 | 269 | ||
270 | static void macb_periodic_task(void *arg) | 270 | static void macb_periodic_task(struct work_struct *work) |
271 | { | 271 | { |
272 | struct macb *bp = arg; | 272 | struct macb *bp = container_of(work, struct macb, periodic_task.work); |
273 | 273 | ||
274 | macb_update_stats(bp); | 274 | macb_update_stats(bp); |
275 | macb_check_media(bp, 1, 0); | 275 | macb_check_media(bp, 1, 0); |
@@ -1088,7 +1088,7 @@ static int __devinit macb_probe(struct platform_device *pdev) | |||
1088 | 1088 | ||
1089 | dev->base_addr = regs->start; | 1089 | dev->base_addr = regs->start; |
1090 | 1090 | ||
1091 | INIT_WORK(&bp->periodic_task, macb_periodic_task, bp); | 1091 | INIT_DELAYED_WORK(&bp->periodic_task, macb_periodic_task); |
1092 | mutex_init(&bp->mdio_mutex); | 1092 | mutex_init(&bp->mdio_mutex); |
1093 | init_completion(&bp->mdio_complete); | 1093 | init_completion(&bp->mdio_complete); |
1094 | 1094 | ||
diff --git a/drivers/net/macb.h b/drivers/net/macb.h index 8c253db69881..27bf0ae0f0bb 100644 --- a/drivers/net/macb.h +++ b/drivers/net/macb.h | |||
@@ -250,9 +250,9 @@ | |||
250 | 250 | ||
251 | /* Register access macros */ | 251 | /* Register access macros */ |
252 | #define macb_readl(port,reg) \ | 252 | #define macb_readl(port,reg) \ |
253 | readl((port)->regs + MACB_##reg) | 253 | __raw_readl((port)->regs + MACB_##reg) |
254 | #define macb_writel(port,reg,value) \ | 254 | #define macb_writel(port,reg,value) \ |
255 | writel((value), (port)->regs + MACB_##reg) | 255 | __raw_writel((value), (port)->regs + MACB_##reg) |
256 | 256 | ||
257 | struct dma_desc { | 257 | struct dma_desc { |
258 | u32 addr; | 258 | u32 addr; |
@@ -377,7 +377,7 @@ struct macb { | |||
377 | 377 | ||
378 | unsigned int rx_pending, tx_pending; | 378 | unsigned int rx_pending, tx_pending; |
379 | 379 | ||
380 | struct work_struct periodic_task; | 380 | struct delayed_work periodic_task; |
381 | 381 | ||
382 | struct mutex mdio_mutex; | 382 | struct mutex mdio_mutex; |
383 | struct completion mdio_complete; | 383 | struct completion mdio_complete; |
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c index 81f127a78afa..94ac168be593 100644 --- a/drivers/net/myri10ge/myri10ge.c +++ b/drivers/net/myri10ge/myri10ge.c | |||
@@ -71,7 +71,7 @@ | |||
71 | #include "myri10ge_mcp.h" | 71 | #include "myri10ge_mcp.h" |
72 | #include "myri10ge_mcp_gen_header.h" | 72 | #include "myri10ge_mcp_gen_header.h" |
73 | 73 | ||
74 | #define MYRI10GE_VERSION_STR "1.0.0" | 74 | #define MYRI10GE_VERSION_STR "1.1.0" |
75 | 75 | ||
76 | MODULE_DESCRIPTION("Myricom 10G driver (10GbE)"); | 76 | MODULE_DESCRIPTION("Myricom 10G driver (10GbE)"); |
77 | MODULE_AUTHOR("Maintainer: help@myri.com"); | 77 | MODULE_AUTHOR("Maintainer: help@myri.com"); |
@@ -92,8 +92,13 @@ MODULE_LICENSE("Dual BSD/GPL"); | |||
92 | #define MYRI10GE_NO_CONFIRM_DATA htonl(0xffffffff) | 92 | #define MYRI10GE_NO_CONFIRM_DATA htonl(0xffffffff) |
93 | #define MYRI10GE_NO_RESPONSE_RESULT 0xffffffff | 93 | #define MYRI10GE_NO_RESPONSE_RESULT 0xffffffff |
94 | 94 | ||
95 | #define MYRI10GE_ALLOC_ORDER 0 | ||
96 | #define MYRI10GE_ALLOC_SIZE ((1 << MYRI10GE_ALLOC_ORDER) * PAGE_SIZE) | ||
97 | #define MYRI10GE_MAX_FRAGS_PER_FRAME (MYRI10GE_MAX_ETHER_MTU/MYRI10GE_ALLOC_SIZE + 1) | ||
98 | |||
95 | struct myri10ge_rx_buffer_state { | 99 | struct myri10ge_rx_buffer_state { |
96 | struct sk_buff *skb; | 100 | struct page *page; |
101 | int page_offset; | ||
97 | DECLARE_PCI_UNMAP_ADDR(bus) | 102 | DECLARE_PCI_UNMAP_ADDR(bus) |
98 | DECLARE_PCI_UNMAP_LEN(len) | 103 | DECLARE_PCI_UNMAP_LEN(len) |
99 | }; | 104 | }; |
@@ -116,9 +121,14 @@ struct myri10ge_rx_buf { | |||
116 | u8 __iomem *wc_fifo; /* w/c rx dma addr fifo address */ | 121 | u8 __iomem *wc_fifo; /* w/c rx dma addr fifo address */ |
117 | struct mcp_kreq_ether_recv *shadow; /* host shadow of recv ring */ | 122 | struct mcp_kreq_ether_recv *shadow; /* host shadow of recv ring */ |
118 | struct myri10ge_rx_buffer_state *info; | 123 | struct myri10ge_rx_buffer_state *info; |
124 | struct page *page; | ||
125 | dma_addr_t bus; | ||
126 | int page_offset; | ||
119 | int cnt; | 127 | int cnt; |
128 | int fill_cnt; | ||
120 | int alloc_fail; | 129 | int alloc_fail; |
121 | int mask; /* number of rx slots -1 */ | 130 | int mask; /* number of rx slots -1 */ |
131 | int watchdog_needed; | ||
122 | }; | 132 | }; |
123 | 133 | ||
124 | struct myri10ge_tx_buf { | 134 | struct myri10ge_tx_buf { |
@@ -150,6 +160,7 @@ struct myri10ge_priv { | |||
150 | struct myri10ge_rx_buf rx_big; | 160 | struct myri10ge_rx_buf rx_big; |
151 | struct myri10ge_rx_done rx_done; | 161 | struct myri10ge_rx_done rx_done; |
152 | int small_bytes; | 162 | int small_bytes; |
163 | int big_bytes; | ||
153 | struct net_device *dev; | 164 | struct net_device *dev; |
154 | struct net_device_stats stats; | 165 | struct net_device_stats stats; |
155 | u8 __iomem *sram; | 166 | u8 __iomem *sram; |
@@ -238,11 +249,6 @@ module_param(myri10ge_force_firmware, int, S_IRUGO); | |||
238 | MODULE_PARM_DESC(myri10ge_force_firmware, | 249 | MODULE_PARM_DESC(myri10ge_force_firmware, |
239 | "Force firmware to assume aligned completions\n"); | 250 | "Force firmware to assume aligned completions\n"); |
240 | 251 | ||
241 | static int myri10ge_skb_cross_4k = 0; | ||
242 | module_param(myri10ge_skb_cross_4k, int, S_IRUGO | S_IWUSR); | ||
243 | MODULE_PARM_DESC(myri10ge_skb_cross_4k, | ||
244 | "Can a small skb cross a 4KB boundary?\n"); | ||
245 | |||
246 | static int myri10ge_initial_mtu = MYRI10GE_MAX_ETHER_MTU - ETH_HLEN; | 252 | static int myri10ge_initial_mtu = MYRI10GE_MAX_ETHER_MTU - ETH_HLEN; |
247 | module_param(myri10ge_initial_mtu, int, S_IRUGO); | 253 | module_param(myri10ge_initial_mtu, int, S_IRUGO); |
248 | MODULE_PARM_DESC(myri10ge_initial_mtu, "Initial MTU\n"); | 254 | MODULE_PARM_DESC(myri10ge_initial_mtu, "Initial MTU\n"); |
@@ -266,6 +272,10 @@ static int myri10ge_debug = -1; /* defaults above */ | |||
266 | module_param(myri10ge_debug, int, 0); | 272 | module_param(myri10ge_debug, int, 0); |
267 | MODULE_PARM_DESC(myri10ge_debug, "Debug level (0=none,...,16=all)"); | 273 | MODULE_PARM_DESC(myri10ge_debug, "Debug level (0=none,...,16=all)"); |
268 | 274 | ||
275 | static int myri10ge_fill_thresh = 256; | ||
276 | module_param(myri10ge_fill_thresh, int, S_IRUGO | S_IWUSR); | ||
277 | MODULE_PARM_DESC(myri10ge_fill_thresh, "Number of empty rx slots allowed\n"); | ||
278 | |||
269 | #define MYRI10GE_FW_OFFSET 1024*1024 | 279 | #define MYRI10GE_FW_OFFSET 1024*1024 |
270 | #define MYRI10GE_HIGHPART_TO_U32(X) \ | 280 | #define MYRI10GE_HIGHPART_TO_U32(X) \ |
271 | (sizeof (X) == 8) ? ((u32)((u64)(X) >> 32)) : (0) | 281 | (sizeof (X) == 8) ? ((u32)((u64)(X) >> 32)) : (0) |
@@ -273,9 +283,9 @@ MODULE_PARM_DESC(myri10ge_debug, "Debug level (0=none,...,16=all)"); | |||
273 | 283 | ||
274 | #define myri10ge_pio_copy(to,from,size) __iowrite64_copy(to,from,size/8) | 284 | #define myri10ge_pio_copy(to,from,size) __iowrite64_copy(to,from,size/8) |
275 | 285 | ||
276 | static inline void put_be32(__be32 val, __be32 __iomem *p) | 286 | static inline void put_be32(__be32 val, __be32 __iomem * p) |
277 | { | 287 | { |
278 | __raw_writel((__force __u32)val, (__force void __iomem *)p); | 288 | __raw_writel((__force __u32) val, (__force void __iomem *)p); |
279 | } | 289 | } |
280 | 290 | ||
281 | static int | 291 | static int |
@@ -804,194 +814,179 @@ myri10ge_submit_8rx(struct mcp_kreq_ether_recv __iomem * dst, | |||
804 | mb(); | 814 | mb(); |
805 | } | 815 | } |
806 | 816 | ||
807 | /* | 817 | static inline void myri10ge_vlan_ip_csum(struct sk_buff *skb, __wsum hw_csum) |
808 | * Set of routines to get a new receive buffer. Any buffer which | ||
809 | * crosses a 4KB boundary must start on a 4KB boundary due to PCIe | ||
810 | * wdma restrictions. We also try to align any smaller allocation to | ||
811 | * at least a 16 byte boundary for efficiency. We assume the linux | ||
812 | * memory allocator works by powers of 2, and will not return memory | ||
813 | * smaller than 2KB which crosses a 4KB boundary. If it does, we fall | ||
814 | * back to allocating 2x as much space as required. | ||
815 | * | ||
816 | * We intend to replace large (>4KB) skb allocations by using | ||
817 | * pages directly and building a fraglist in the near future. | ||
818 | */ | ||
819 | |||
820 | static inline struct sk_buff *myri10ge_alloc_big(struct net_device *dev, | ||
821 | int bytes) | ||
822 | { | ||
823 | struct sk_buff *skb; | ||
824 | unsigned long data, roundup; | ||
825 | |||
826 | skb = netdev_alloc_skb(dev, bytes + 4096 + MXGEFW_PAD); | ||
827 | if (skb == NULL) | ||
828 | return NULL; | ||
829 | |||
830 | /* Correct skb->truesize so that socket buffer | ||
831 | * accounting is not confused the rounding we must | ||
832 | * do to satisfy alignment constraints. | ||
833 | */ | ||
834 | skb->truesize -= 4096; | ||
835 | |||
836 | data = (unsigned long)(skb->data); | ||
837 | roundup = (-data) & (4095); | ||
838 | skb_reserve(skb, roundup); | ||
839 | return skb; | ||
840 | } | ||
841 | |||
842 | /* Allocate 2x as much space as required and use whichever portion | ||
843 | * does not cross a 4KB boundary */ | ||
844 | static inline struct sk_buff *myri10ge_alloc_small_safe(struct net_device *dev, | ||
845 | unsigned int bytes) | ||
846 | { | 818 | { |
847 | struct sk_buff *skb; | 819 | struct vlan_hdr *vh = (struct vlan_hdr *)(skb->data); |
848 | unsigned long data, boundary; | ||
849 | |||
850 | skb = netdev_alloc_skb(dev, 2 * (bytes + MXGEFW_PAD) - 1); | ||
851 | if (unlikely(skb == NULL)) | ||
852 | return NULL; | ||
853 | |||
854 | /* Correct skb->truesize so that socket buffer | ||
855 | * accounting is not confused the rounding we must | ||
856 | * do to satisfy alignment constraints. | ||
857 | */ | ||
858 | skb->truesize -= bytes + MXGEFW_PAD; | ||
859 | |||
860 | data = (unsigned long)(skb->data); | ||
861 | boundary = (data + 4095UL) & ~4095UL; | ||
862 | if ((boundary - data) >= (bytes + MXGEFW_PAD)) | ||
863 | return skb; | ||
864 | 820 | ||
865 | skb_reserve(skb, boundary - data); | 821 | if ((skb->protocol == htons(ETH_P_8021Q)) && |
866 | return skb; | 822 | (vh->h_vlan_encapsulated_proto == htons(ETH_P_IP) || |
823 | vh->h_vlan_encapsulated_proto == htons(ETH_P_IPV6))) { | ||
824 | skb->csum = hw_csum; | ||
825 | skb->ip_summed = CHECKSUM_COMPLETE; | ||
826 | } | ||
867 | } | 827 | } |
868 | 828 | ||
869 | /* Allocate just enough space, and verify that the allocated | 829 | static inline void |
870 | * space does not cross a 4KB boundary */ | 830 | myri10ge_rx_skb_build(struct sk_buff *skb, u8 * va, |
871 | static inline struct sk_buff *myri10ge_alloc_small(struct net_device *dev, | 831 | struct skb_frag_struct *rx_frags, int len, int hlen) |
872 | int bytes) | ||
873 | { | 832 | { |
874 | struct sk_buff *skb; | 833 | struct skb_frag_struct *skb_frags; |
875 | unsigned long roundup, data, end; | 834 | |
876 | 835 | skb->len = skb->data_len = len; | |
877 | skb = netdev_alloc_skb(dev, bytes + 16 + MXGEFW_PAD); | 836 | skb->truesize = len + sizeof(struct sk_buff); |
878 | if (unlikely(skb == NULL)) | 837 | /* attach the page(s) */ |
879 | return NULL; | 838 | |
880 | 839 | skb_frags = skb_shinfo(skb)->frags; | |
881 | /* Round allocated buffer to 16 byte boundary */ | 840 | while (len > 0) { |
882 | data = (unsigned long)(skb->data); | 841 | memcpy(skb_frags, rx_frags, sizeof(*skb_frags)); |
883 | roundup = (-data) & 15UL; | 842 | len -= rx_frags->size; |
884 | skb_reserve(skb, roundup); | 843 | skb_frags++; |
885 | /* Verify that the data buffer does not cross a page boundary */ | 844 | rx_frags++; |
886 | data = (unsigned long)(skb->data); | 845 | skb_shinfo(skb)->nr_frags++; |
887 | end = data + bytes + MXGEFW_PAD - 1; | 846 | } |
888 | if (unlikely(((end >> 12) != (data >> 12)) && (data & 4095UL))) { | 847 | |
889 | printk(KERN_NOTICE | 848 | /* pskb_may_pull is not available in irq context, but |
890 | "myri10ge_alloc_small: small skb crossed 4KB boundary\n"); | 849 | * skb_pull() (for ether_pad and eth_type_trans()) requires |
891 | myri10ge_skb_cross_4k = 1; | 850 | * the beginning of the packet in skb_headlen(), move it |
892 | dev_kfree_skb_any(skb); | 851 | * manually */ |
893 | skb = myri10ge_alloc_small_safe(dev, bytes); | 852 | memcpy(skb->data, va, hlen); |
894 | } | 853 | skb_shinfo(skb)->frags[0].page_offset += hlen; |
895 | return skb; | 854 | skb_shinfo(skb)->frags[0].size -= hlen; |
855 | skb->data_len -= hlen; | ||
856 | skb->tail += hlen; | ||
857 | skb_pull(skb, MXGEFW_PAD); | ||
896 | } | 858 | } |
897 | 859 | ||
898 | static inline int | 860 | static void |
899 | myri10ge_getbuf(struct myri10ge_rx_buf *rx, struct myri10ge_priv *mgp, | 861 | myri10ge_alloc_rx_pages(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx, |
900 | int bytes, int idx) | 862 | int bytes, int watchdog) |
901 | { | 863 | { |
902 | struct net_device *dev = mgp->dev; | 864 | struct page *page; |
903 | struct pci_dev *pdev = mgp->pdev; | 865 | int idx; |
904 | struct sk_buff *skb; | ||
905 | dma_addr_t bus; | ||
906 | int len, retval = 0; | ||
907 | 866 | ||
908 | bytes += VLAN_HLEN; /* account for 802.1q vlan tag */ | 867 | if (unlikely(rx->watchdog_needed && !watchdog)) |
868 | return; | ||
909 | 869 | ||
910 | if ((bytes + MXGEFW_PAD) > (4096 - 16) /* linux overhead */ ) | 870 | /* try to refill entire ring */ |
911 | skb = myri10ge_alloc_big(dev, bytes); | 871 | while (rx->fill_cnt != (rx->cnt + rx->mask + 1)) { |
912 | else if (myri10ge_skb_cross_4k) | 872 | idx = rx->fill_cnt & rx->mask; |
913 | skb = myri10ge_alloc_small_safe(dev, bytes); | ||
914 | else | ||
915 | skb = myri10ge_alloc_small(dev, bytes); | ||
916 | 873 | ||
917 | if (unlikely(skb == NULL)) { | 874 | if ((bytes < MYRI10GE_ALLOC_SIZE / 2) && |
918 | rx->alloc_fail++; | 875 | (rx->page_offset + bytes <= MYRI10GE_ALLOC_SIZE)) { |
919 | retval = -ENOBUFS; | 876 | /* we can use part of previous page */ |
920 | goto done; | 877 | get_page(rx->page); |
921 | } | 878 | } else { |
922 | 879 | /* we need a new page */ | |
923 | /* set len so that it only covers the area we | 880 | page = |
924 | * need mapped for DMA */ | 881 | alloc_pages(GFP_ATOMIC | __GFP_COMP, |
925 | len = bytes + MXGEFW_PAD; | 882 | MYRI10GE_ALLOC_ORDER); |
926 | 883 | if (unlikely(page == NULL)) { | |
927 | bus = pci_map_single(pdev, skb->data, len, PCI_DMA_FROMDEVICE); | 884 | if (rx->fill_cnt - rx->cnt < 16) |
928 | rx->info[idx].skb = skb; | 885 | rx->watchdog_needed = 1; |
929 | pci_unmap_addr_set(&rx->info[idx], bus, bus); | 886 | return; |
930 | pci_unmap_len_set(&rx->info[idx], len, len); | 887 | } |
931 | rx->shadow[idx].addr_low = htonl(MYRI10GE_LOWPART_TO_U32(bus)); | 888 | rx->page = page; |
932 | rx->shadow[idx].addr_high = htonl(MYRI10GE_HIGHPART_TO_U32(bus)); | 889 | rx->page_offset = 0; |
933 | 890 | rx->bus = pci_map_page(mgp->pdev, page, 0, | |
934 | done: | 891 | MYRI10GE_ALLOC_SIZE, |
935 | /* copy 8 descriptors (64-bytes) to the mcp at a time */ | 892 | PCI_DMA_FROMDEVICE); |
936 | if ((idx & 7) == 7) { | 893 | } |
937 | if (rx->wc_fifo == NULL) | 894 | rx->info[idx].page = rx->page; |
938 | myri10ge_submit_8rx(&rx->lanai[idx - 7], | 895 | rx->info[idx].page_offset = rx->page_offset; |
939 | &rx->shadow[idx - 7]); | 896 | /* note that this is the address of the start of the |
940 | else { | 897 | * page */ |
941 | mb(); | 898 | pci_unmap_addr_set(&rx->info[idx], bus, rx->bus); |
942 | myri10ge_pio_copy(rx->wc_fifo, | 899 | rx->shadow[idx].addr_low = |
943 | &rx->shadow[idx - 7], 64); | 900 | htonl(MYRI10GE_LOWPART_TO_U32(rx->bus) + rx->page_offset); |
901 | rx->shadow[idx].addr_high = | ||
902 | htonl(MYRI10GE_HIGHPART_TO_U32(rx->bus)); | ||
903 | |||
904 | /* start next packet on a cacheline boundary */ | ||
905 | rx->page_offset += SKB_DATA_ALIGN(bytes); | ||
906 | rx->fill_cnt++; | ||
907 | |||
908 | /* copy 8 descriptors to the firmware at a time */ | ||
909 | if ((idx & 7) == 7) { | ||
910 | if (rx->wc_fifo == NULL) | ||
911 | myri10ge_submit_8rx(&rx->lanai[idx - 7], | ||
912 | &rx->shadow[idx - 7]); | ||
913 | else { | ||
914 | mb(); | ||
915 | myri10ge_pio_copy(rx->wc_fifo, | ||
916 | &rx->shadow[idx - 7], 64); | ||
917 | } | ||
944 | } | 918 | } |
945 | } | 919 | } |
946 | return retval; | ||
947 | } | 920 | } |
948 | 921 | ||
949 | static inline void myri10ge_vlan_ip_csum(struct sk_buff *skb, __wsum hw_csum) | 922 | static inline void |
923 | myri10ge_unmap_rx_page(struct pci_dev *pdev, | ||
924 | struct myri10ge_rx_buffer_state *info, int bytes) | ||
950 | { | 925 | { |
951 | struct vlan_hdr *vh = (struct vlan_hdr *)(skb->data); | 926 | /* unmap the recvd page if we're the only or last user of it */ |
952 | 927 | if (bytes >= MYRI10GE_ALLOC_SIZE / 2 || | |
953 | if ((skb->protocol == htons(ETH_P_8021Q)) && | 928 | (info->page_offset + 2 * bytes) > MYRI10GE_ALLOC_SIZE) { |
954 | (vh->h_vlan_encapsulated_proto == htons(ETH_P_IP) || | 929 | pci_unmap_page(pdev, (pci_unmap_addr(info, bus) |
955 | vh->h_vlan_encapsulated_proto == htons(ETH_P_IPV6))) { | 930 | & ~(MYRI10GE_ALLOC_SIZE - 1)), |
956 | skb->csum = hw_csum; | 931 | MYRI10GE_ALLOC_SIZE, PCI_DMA_FROMDEVICE); |
957 | skb->ip_summed = CHECKSUM_COMPLETE; | ||
958 | } | 932 | } |
959 | } | 933 | } |
960 | 934 | ||
961 | static inline unsigned long | 935 | #define MYRI10GE_HLEN 64 /* The number of bytes to copy from a |
936 | * page into an skb */ | ||
937 | |||
938 | static inline int | ||
962 | myri10ge_rx_done(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx, | 939 | myri10ge_rx_done(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx, |
963 | int bytes, int len, __wsum csum) | 940 | int bytes, int len, __wsum csum) |
964 | { | 941 | { |
965 | dma_addr_t bus; | ||
966 | struct sk_buff *skb; | 942 | struct sk_buff *skb; |
967 | int idx, unmap_len; | 943 | struct skb_frag_struct rx_frags[MYRI10GE_MAX_FRAGS_PER_FRAME]; |
944 | int i, idx, hlen, remainder; | ||
945 | struct pci_dev *pdev = mgp->pdev; | ||
946 | struct net_device *dev = mgp->dev; | ||
947 | u8 *va; | ||
968 | 948 | ||
949 | len += MXGEFW_PAD; | ||
969 | idx = rx->cnt & rx->mask; | 950 | idx = rx->cnt & rx->mask; |
970 | rx->cnt++; | 951 | va = page_address(rx->info[idx].page) + rx->info[idx].page_offset; |
952 | prefetch(va); | ||
953 | /* Fill skb_frag_struct(s) with data from our receive */ | ||
954 | for (i = 0, remainder = len; remainder > 0; i++) { | ||
955 | myri10ge_unmap_rx_page(pdev, &rx->info[idx], bytes); | ||
956 | rx_frags[i].page = rx->info[idx].page; | ||
957 | rx_frags[i].page_offset = rx->info[idx].page_offset; | ||
958 | if (remainder < MYRI10GE_ALLOC_SIZE) | ||
959 | rx_frags[i].size = remainder; | ||
960 | else | ||
961 | rx_frags[i].size = MYRI10GE_ALLOC_SIZE; | ||
962 | rx->cnt++; | ||
963 | idx = rx->cnt & rx->mask; | ||
964 | remainder -= MYRI10GE_ALLOC_SIZE; | ||
965 | } | ||
966 | |||
967 | hlen = MYRI10GE_HLEN > len ? len : MYRI10GE_HLEN; | ||
971 | 968 | ||
972 | /* save a pointer to the received skb */ | 969 | /* allocate an skb to attach the page(s) to. */ |
973 | skb = rx->info[idx].skb; | ||
974 | bus = pci_unmap_addr(&rx->info[idx], bus); | ||
975 | unmap_len = pci_unmap_len(&rx->info[idx], len); | ||
976 | 970 | ||
977 | /* try to replace the received skb */ | 971 | skb = netdev_alloc_skb(dev, MYRI10GE_HLEN + 16); |
978 | if (myri10ge_getbuf(rx, mgp, bytes, idx)) { | 972 | if (unlikely(skb == NULL)) { |
979 | /* drop the frame -- the old skbuf is re-cycled */ | 973 | mgp->stats.rx_dropped++; |
980 | mgp->stats.rx_dropped += 1; | 974 | do { |
975 | i--; | ||
976 | put_page(rx_frags[i].page); | ||
977 | } while (i != 0); | ||
981 | return 0; | 978 | return 0; |
982 | } | 979 | } |
983 | 980 | ||
984 | /* unmap the recvd skb */ | 981 | /* Attach the pages to the skb, and trim off any padding */ |
985 | pci_unmap_single(mgp->pdev, bus, unmap_len, PCI_DMA_FROMDEVICE); | 982 | myri10ge_rx_skb_build(skb, va, rx_frags, len, hlen); |
986 | 983 | if (skb_shinfo(skb)->frags[0].size <= 0) { | |
987 | /* mcp implicitly skips 1st bytes so that packet is properly | 984 | put_page(skb_shinfo(skb)->frags[0].page); |
988 | * aligned */ | 985 | skb_shinfo(skb)->nr_frags = 0; |
989 | skb_reserve(skb, MXGEFW_PAD); | 986 | } |
990 | 987 | skb->protocol = eth_type_trans(skb, dev); | |
991 | /* set the length of the frame */ | 988 | skb->dev = dev; |
992 | skb_put(skb, len); | ||
993 | 989 | ||
994 | skb->protocol = eth_type_trans(skb, mgp->dev); | ||
995 | if (mgp->csum_flag) { | 990 | if (mgp->csum_flag) { |
996 | if ((skb->protocol == htons(ETH_P_IP)) || | 991 | if ((skb->protocol == htons(ETH_P_IP)) || |
997 | (skb->protocol == htons(ETH_P_IPV6))) { | 992 | (skb->protocol == htons(ETH_P_IPV6))) { |
@@ -1000,9 +995,8 @@ myri10ge_rx_done(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx, | |||
1000 | } else | 995 | } else |
1001 | myri10ge_vlan_ip_csum(skb, csum); | 996 | myri10ge_vlan_ip_csum(skb, csum); |
1002 | } | 997 | } |
1003 | |||
1004 | netif_receive_skb(skb); | 998 | netif_receive_skb(skb); |
1005 | mgp->dev->last_rx = jiffies; | 999 | dev->last_rx = jiffies; |
1006 | return 1; | 1000 | return 1; |
1007 | } | 1001 | } |
1008 | 1002 | ||
@@ -1079,7 +1073,7 @@ static inline void myri10ge_clean_rx_done(struct myri10ge_priv *mgp, int *limit) | |||
1079 | length, checksum); | 1073 | length, checksum); |
1080 | else | 1074 | else |
1081 | rx_ok = myri10ge_rx_done(mgp, &mgp->rx_big, | 1075 | rx_ok = myri10ge_rx_done(mgp, &mgp->rx_big, |
1082 | mgp->dev->mtu + ETH_HLEN, | 1076 | mgp->big_bytes, |
1083 | length, checksum); | 1077 | length, checksum); |
1084 | rx_packets += rx_ok; | 1078 | rx_packets += rx_ok; |
1085 | rx_bytes += rx_ok * (unsigned long)length; | 1079 | rx_bytes += rx_ok * (unsigned long)length; |
@@ -1094,6 +1088,14 @@ static inline void myri10ge_clean_rx_done(struct myri10ge_priv *mgp, int *limit) | |||
1094 | rx_done->cnt = cnt; | 1088 | rx_done->cnt = cnt; |
1095 | mgp->stats.rx_packets += rx_packets; | 1089 | mgp->stats.rx_packets += rx_packets; |
1096 | mgp->stats.rx_bytes += rx_bytes; | 1090 | mgp->stats.rx_bytes += rx_bytes; |
1091 | |||
1092 | /* restock receive rings if needed */ | ||
1093 | if (mgp->rx_small.fill_cnt - mgp->rx_small.cnt < myri10ge_fill_thresh) | ||
1094 | myri10ge_alloc_rx_pages(mgp, &mgp->rx_small, | ||
1095 | mgp->small_bytes + MXGEFW_PAD, 0); | ||
1096 | if (mgp->rx_big.fill_cnt - mgp->rx_big.cnt < myri10ge_fill_thresh) | ||
1097 | myri10ge_alloc_rx_pages(mgp, &mgp->rx_big, mgp->big_bytes, 0); | ||
1098 | |||
1097 | } | 1099 | } |
1098 | 1100 | ||
1099 | static inline void myri10ge_check_statblock(struct myri10ge_priv *mgp) | 1101 | static inline void myri10ge_check_statblock(struct myri10ge_priv *mgp) |
@@ -1484,56 +1486,48 @@ static int myri10ge_allocate_rings(struct net_device *dev) | |||
1484 | goto abort_with_rx_small_info; | 1486 | goto abort_with_rx_small_info; |
1485 | 1487 | ||
1486 | /* Fill the receive rings */ | 1488 | /* Fill the receive rings */ |
1489 | mgp->rx_big.cnt = 0; | ||
1490 | mgp->rx_small.cnt = 0; | ||
1491 | mgp->rx_big.fill_cnt = 0; | ||
1492 | mgp->rx_small.fill_cnt = 0; | ||
1493 | mgp->rx_small.page_offset = MYRI10GE_ALLOC_SIZE; | ||
1494 | mgp->rx_big.page_offset = MYRI10GE_ALLOC_SIZE; | ||
1495 | mgp->rx_small.watchdog_needed = 0; | ||
1496 | mgp->rx_big.watchdog_needed = 0; | ||
1497 | myri10ge_alloc_rx_pages(mgp, &mgp->rx_small, | ||
1498 | mgp->small_bytes + MXGEFW_PAD, 0); | ||
1487 | 1499 | ||
1488 | for (i = 0; i <= mgp->rx_small.mask; i++) { | 1500 | if (mgp->rx_small.fill_cnt < mgp->rx_small.mask + 1) { |
1489 | status = myri10ge_getbuf(&mgp->rx_small, mgp, | 1501 | printk(KERN_ERR "myri10ge: %s: alloced only %d small bufs\n", |
1490 | mgp->small_bytes, i); | 1502 | dev->name, mgp->rx_small.fill_cnt); |
1491 | if (status) { | 1503 | goto abort_with_rx_small_ring; |
1492 | printk(KERN_ERR | ||
1493 | "myri10ge: %s: alloced only %d small bufs\n", | ||
1494 | dev->name, i); | ||
1495 | goto abort_with_rx_small_ring; | ||
1496 | } | ||
1497 | } | 1504 | } |
1498 | 1505 | ||
1499 | for (i = 0; i <= mgp->rx_big.mask; i++) { | 1506 | myri10ge_alloc_rx_pages(mgp, &mgp->rx_big, mgp->big_bytes, 0); |
1500 | status = | 1507 | if (mgp->rx_big.fill_cnt < mgp->rx_big.mask + 1) { |
1501 | myri10ge_getbuf(&mgp->rx_big, mgp, dev->mtu + ETH_HLEN, i); | 1508 | printk(KERN_ERR "myri10ge: %s: alloced only %d big bufs\n", |
1502 | if (status) { | 1509 | dev->name, mgp->rx_big.fill_cnt); |
1503 | printk(KERN_ERR | 1510 | goto abort_with_rx_big_ring; |
1504 | "myri10ge: %s: alloced only %d big bufs\n", | ||
1505 | dev->name, i); | ||
1506 | goto abort_with_rx_big_ring; | ||
1507 | } | ||
1508 | } | 1511 | } |
1509 | 1512 | ||
1510 | return 0; | 1513 | return 0; |
1511 | 1514 | ||
1512 | abort_with_rx_big_ring: | 1515 | abort_with_rx_big_ring: |
1513 | for (i = 0; i <= mgp->rx_big.mask; i++) { | 1516 | for (i = mgp->rx_big.cnt; i < mgp->rx_big.fill_cnt; i++) { |
1514 | if (mgp->rx_big.info[i].skb != NULL) | 1517 | int idx = i & mgp->rx_big.mask; |
1515 | dev_kfree_skb_any(mgp->rx_big.info[i].skb); | 1518 | myri10ge_unmap_rx_page(mgp->pdev, &mgp->rx_big.info[idx], |
1516 | if (pci_unmap_len(&mgp->rx_big.info[i], len)) | 1519 | mgp->big_bytes); |
1517 | pci_unmap_single(mgp->pdev, | 1520 | put_page(mgp->rx_big.info[idx].page); |
1518 | pci_unmap_addr(&mgp->rx_big.info[i], | ||
1519 | bus), | ||
1520 | pci_unmap_len(&mgp->rx_big.info[i], | ||
1521 | len), | ||
1522 | PCI_DMA_FROMDEVICE); | ||
1523 | } | 1521 | } |
1524 | 1522 | ||
1525 | abort_with_rx_small_ring: | 1523 | abort_with_rx_small_ring: |
1526 | for (i = 0; i <= mgp->rx_small.mask; i++) { | 1524 | for (i = mgp->rx_small.cnt; i < mgp->rx_small.fill_cnt; i++) { |
1527 | if (mgp->rx_small.info[i].skb != NULL) | 1525 | int idx = i & mgp->rx_small.mask; |
1528 | dev_kfree_skb_any(mgp->rx_small.info[i].skb); | 1526 | myri10ge_unmap_rx_page(mgp->pdev, &mgp->rx_small.info[idx], |
1529 | if (pci_unmap_len(&mgp->rx_small.info[i], len)) | 1527 | mgp->small_bytes + MXGEFW_PAD); |
1530 | pci_unmap_single(mgp->pdev, | 1528 | put_page(mgp->rx_small.info[idx].page); |
1531 | pci_unmap_addr(&mgp->rx_small.info[i], | ||
1532 | bus), | ||
1533 | pci_unmap_len(&mgp->rx_small.info[i], | ||
1534 | len), | ||
1535 | PCI_DMA_FROMDEVICE); | ||
1536 | } | 1529 | } |
1530 | |||
1537 | kfree(mgp->rx_big.info); | 1531 | kfree(mgp->rx_big.info); |
1538 | 1532 | ||
1539 | abort_with_rx_small_info: | 1533 | abort_with_rx_small_info: |
@@ -1566,30 +1560,24 @@ static void myri10ge_free_rings(struct net_device *dev) | |||
1566 | 1560 | ||
1567 | mgp = netdev_priv(dev); | 1561 | mgp = netdev_priv(dev); |
1568 | 1562 | ||
1569 | for (i = 0; i <= mgp->rx_big.mask; i++) { | 1563 | for (i = mgp->rx_big.cnt; i < mgp->rx_big.fill_cnt; i++) { |
1570 | if (mgp->rx_big.info[i].skb != NULL) | 1564 | idx = i & mgp->rx_big.mask; |
1571 | dev_kfree_skb_any(mgp->rx_big.info[i].skb); | 1565 | if (i == mgp->rx_big.fill_cnt - 1) |
1572 | if (pci_unmap_len(&mgp->rx_big.info[i], len)) | 1566 | mgp->rx_big.info[idx].page_offset = MYRI10GE_ALLOC_SIZE; |
1573 | pci_unmap_single(mgp->pdev, | 1567 | myri10ge_unmap_rx_page(mgp->pdev, &mgp->rx_big.info[idx], |
1574 | pci_unmap_addr(&mgp->rx_big.info[i], | 1568 | mgp->big_bytes); |
1575 | bus), | 1569 | put_page(mgp->rx_big.info[idx].page); |
1576 | pci_unmap_len(&mgp->rx_big.info[i], | ||
1577 | len), | ||
1578 | PCI_DMA_FROMDEVICE); | ||
1579 | } | ||
1580 | |||
1581 | for (i = 0; i <= mgp->rx_small.mask; i++) { | ||
1582 | if (mgp->rx_small.info[i].skb != NULL) | ||
1583 | dev_kfree_skb_any(mgp->rx_small.info[i].skb); | ||
1584 | if (pci_unmap_len(&mgp->rx_small.info[i], len)) | ||
1585 | pci_unmap_single(mgp->pdev, | ||
1586 | pci_unmap_addr(&mgp->rx_small.info[i], | ||
1587 | bus), | ||
1588 | pci_unmap_len(&mgp->rx_small.info[i], | ||
1589 | len), | ||
1590 | PCI_DMA_FROMDEVICE); | ||
1591 | } | 1570 | } |
1592 | 1571 | ||
1572 | for (i = mgp->rx_small.cnt; i < mgp->rx_small.fill_cnt; i++) { | ||
1573 | idx = i & mgp->rx_small.mask; | ||
1574 | if (i == mgp->rx_small.fill_cnt - 1) | ||
1575 | mgp->rx_small.info[idx].page_offset = | ||
1576 | MYRI10GE_ALLOC_SIZE; | ||
1577 | myri10ge_unmap_rx_page(mgp->pdev, &mgp->rx_small.info[idx], | ||
1578 | mgp->small_bytes + MXGEFW_PAD); | ||
1579 | put_page(mgp->rx_small.info[idx].page); | ||
1580 | } | ||
1593 | tx = &mgp->tx; | 1581 | tx = &mgp->tx; |
1594 | while (tx->done != tx->req) { | 1582 | while (tx->done != tx->req) { |
1595 | idx = tx->done & tx->mask; | 1583 | idx = tx->done & tx->mask; |
@@ -1657,19 +1645,18 @@ static int myri10ge_open(struct net_device *dev) | |||
1657 | */ | 1645 | */ |
1658 | 1646 | ||
1659 | if (dev->mtu <= ETH_DATA_LEN) | 1647 | if (dev->mtu <= ETH_DATA_LEN) |
1660 | mgp->small_bytes = 128; /* enough for a TCP header */ | 1648 | /* enough for a TCP header */ |
1649 | mgp->small_bytes = (128 > SMP_CACHE_BYTES) | ||
1650 | ? (128 - MXGEFW_PAD) | ||
1651 | : (SMP_CACHE_BYTES - MXGEFW_PAD); | ||
1661 | else | 1652 | else |
1662 | mgp->small_bytes = ETH_FRAME_LEN; /* enough for an ETH_DATA_LEN frame */ | 1653 | /* enough for a vlan encapsulated ETH_DATA_LEN frame */ |
1654 | mgp->small_bytes = VLAN_ETH_FRAME_LEN; | ||
1663 | 1655 | ||
1664 | /* Override the small buffer size? */ | 1656 | /* Override the small buffer size? */ |
1665 | if (myri10ge_small_bytes > 0) | 1657 | if (myri10ge_small_bytes > 0) |
1666 | mgp->small_bytes = myri10ge_small_bytes; | 1658 | mgp->small_bytes = myri10ge_small_bytes; |
1667 | 1659 | ||
1668 | /* If the user sets an obscenely small MTU, adjust the small | ||
1669 | * bytes down to nearly nothing */ | ||
1670 | if (mgp->small_bytes >= (dev->mtu + ETH_HLEN)) | ||
1671 | mgp->small_bytes = 64; | ||
1672 | |||
1673 | /* get the lanai pointers to the send and receive rings */ | 1660 | /* get the lanai pointers to the send and receive rings */ |
1674 | 1661 | ||
1675 | status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SEND_OFFSET, &cmd, 0); | 1662 | status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SEND_OFFSET, &cmd, 0); |
@@ -1705,17 +1692,23 @@ static int myri10ge_open(struct net_device *dev) | |||
1705 | mgp->rx_big.wc_fifo = NULL; | 1692 | mgp->rx_big.wc_fifo = NULL; |
1706 | } | 1693 | } |
1707 | 1694 | ||
1708 | status = myri10ge_allocate_rings(dev); | ||
1709 | if (status != 0) | ||
1710 | goto abort_with_nothing; | ||
1711 | |||
1712 | /* Firmware needs the big buff size as a power of 2. Lie and | 1695 | /* Firmware needs the big buff size as a power of 2. Lie and |
1713 | * tell him the buffer is larger, because we only use 1 | 1696 | * tell him the buffer is larger, because we only use 1 |
1714 | * buffer/pkt, and the mtu will prevent overruns. | 1697 | * buffer/pkt, and the mtu will prevent overruns. |
1715 | */ | 1698 | */ |
1716 | big_pow2 = dev->mtu + ETH_HLEN + MXGEFW_PAD; | 1699 | big_pow2 = dev->mtu + ETH_HLEN + VLAN_HLEN + MXGEFW_PAD; |
1717 | while ((big_pow2 & (big_pow2 - 1)) != 0) | 1700 | if (big_pow2 < MYRI10GE_ALLOC_SIZE / 2) { |
1718 | big_pow2++; | 1701 | while ((big_pow2 & (big_pow2 - 1)) != 0) |
1702 | big_pow2++; | ||
1703 | mgp->big_bytes = dev->mtu + ETH_HLEN + VLAN_HLEN + MXGEFW_PAD; | ||
1704 | } else { | ||
1705 | big_pow2 = MYRI10GE_ALLOC_SIZE; | ||
1706 | mgp->big_bytes = big_pow2; | ||
1707 | } | ||
1708 | |||
1709 | status = myri10ge_allocate_rings(dev); | ||
1710 | if (status != 0) | ||
1711 | goto abort_with_nothing; | ||
1719 | 1712 | ||
1720 | /* now give firmware buffers sizes, and MTU */ | 1713 | /* now give firmware buffers sizes, and MTU */ |
1721 | cmd.data0 = dev->mtu + ETH_HLEN + VLAN_HLEN; | 1714 | cmd.data0 = dev->mtu + ETH_HLEN + VLAN_HLEN; |
@@ -2206,7 +2199,7 @@ static void myri10ge_set_multicast_list(struct net_device *dev) | |||
2206 | struct myri10ge_cmd cmd; | 2199 | struct myri10ge_cmd cmd; |
2207 | struct myri10ge_priv *mgp; | 2200 | struct myri10ge_priv *mgp; |
2208 | struct dev_mc_list *mc_list; | 2201 | struct dev_mc_list *mc_list; |
2209 | __be32 data[2] = {0, 0}; | 2202 | __be32 data[2] = { 0, 0 }; |
2210 | int err; | 2203 | int err; |
2211 | 2204 | ||
2212 | mgp = netdev_priv(dev); | 2205 | mgp = netdev_priv(dev); |
@@ -2625,7 +2618,7 @@ static u32 myri10ge_read_reboot(struct myri10ge_priv *mgp) | |||
2625 | static void myri10ge_watchdog(struct work_struct *work) | 2618 | static void myri10ge_watchdog(struct work_struct *work) |
2626 | { | 2619 | { |
2627 | struct myri10ge_priv *mgp = | 2620 | struct myri10ge_priv *mgp = |
2628 | container_of(work, struct myri10ge_priv, watchdog_work); | 2621 | container_of(work, struct myri10ge_priv, watchdog_work); |
2629 | u32 reboot; | 2622 | u32 reboot; |
2630 | int status; | 2623 | int status; |
2631 | u16 cmd, vendor; | 2624 | u16 cmd, vendor; |
@@ -2698,6 +2691,21 @@ static void myri10ge_watchdog_timer(unsigned long arg) | |||
2698 | struct myri10ge_priv *mgp; | 2691 | struct myri10ge_priv *mgp; |
2699 | 2692 | ||
2700 | mgp = (struct myri10ge_priv *)arg; | 2693 | mgp = (struct myri10ge_priv *)arg; |
2694 | |||
2695 | if (mgp->rx_small.watchdog_needed) { | ||
2696 | myri10ge_alloc_rx_pages(mgp, &mgp->rx_small, | ||
2697 | mgp->small_bytes + MXGEFW_PAD, 1); | ||
2698 | if (mgp->rx_small.fill_cnt - mgp->rx_small.cnt >= | ||
2699 | myri10ge_fill_thresh) | ||
2700 | mgp->rx_small.watchdog_needed = 0; | ||
2701 | } | ||
2702 | if (mgp->rx_big.watchdog_needed) { | ||
2703 | myri10ge_alloc_rx_pages(mgp, &mgp->rx_big, mgp->big_bytes, 1); | ||
2704 | if (mgp->rx_big.fill_cnt - mgp->rx_big.cnt >= | ||
2705 | myri10ge_fill_thresh) | ||
2706 | mgp->rx_big.watchdog_needed = 0; | ||
2707 | } | ||
2708 | |||
2701 | if (mgp->tx.req != mgp->tx.done && | 2709 | if (mgp->tx.req != mgp->tx.done && |
2702 | mgp->tx.done == mgp->watchdog_tx_done && | 2710 | mgp->tx.done == mgp->watchdog_tx_done && |
2703 | mgp->watchdog_tx_req != mgp->watchdog_tx_done) | 2711 | mgp->watchdog_tx_req != mgp->watchdog_tx_done) |
diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h index 9367c574477a..d2767e6584a9 100644 --- a/drivers/net/smc91x.h +++ b/drivers/net/smc91x.h | |||
@@ -362,96 +362,6 @@ static inline void LPD7_SMC_outsw (unsigned char* a, int r, | |||
362 | 362 | ||
363 | #define SMC_IRQ_FLAGS (0) | 363 | #define SMC_IRQ_FLAGS (0) |
364 | 364 | ||
365 | #elif defined(CONFIG_ARCH_VERSATILE) | ||
366 | |||
367 | #define SMC_CAN_USE_8BIT 1 | ||
368 | #define SMC_CAN_USE_16BIT 1 | ||
369 | #define SMC_CAN_USE_32BIT 1 | ||
370 | #define SMC_NOWAIT 1 | ||
371 | |||
372 | #define SMC_inb(a, r) readb((a) + (r)) | ||
373 | #define SMC_inw(a, r) readw((a) + (r)) | ||
374 | #define SMC_inl(a, r) readl((a) + (r)) | ||
375 | #define SMC_outb(v, a, r) writeb(v, (a) + (r)) | ||
376 | #define SMC_outw(v, a, r) writew(v, (a) + (r)) | ||
377 | #define SMC_outl(v, a, r) writel(v, (a) + (r)) | ||
378 | #define SMC_insl(a, r, p, l) readsl((a) + (r), p, l) | ||
379 | #define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l) | ||
380 | |||
381 | #define SMC_IRQ_FLAGS (0) | ||
382 | |||
383 | #elif defined(CONFIG_ARCH_VERSATILE) | ||
384 | |||
385 | #define SMC_CAN_USE_8BIT 1 | ||
386 | #define SMC_CAN_USE_16BIT 1 | ||
387 | #define SMC_CAN_USE_32BIT 1 | ||
388 | #define SMC_NOWAIT 1 | ||
389 | |||
390 | #define SMC_inb(a, r) readb((a) + (r)) | ||
391 | #define SMC_inw(a, r) readw((a) + (r)) | ||
392 | #define SMC_inl(a, r) readl((a) + (r)) | ||
393 | #define SMC_outb(v, a, r) writeb(v, (a) + (r)) | ||
394 | #define SMC_outw(v, a, r) writew(v, (a) + (r)) | ||
395 | #define SMC_outl(v, a, r) writel(v, (a) + (r)) | ||
396 | #define SMC_insl(a, r, p, l) readsl((a) + (r), p, l) | ||
397 | #define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l) | ||
398 | |||
399 | #define SMC_IRQ_FLAGS (0) | ||
400 | |||
401 | #elif defined(CONFIG_ARCH_VERSATILE) | ||
402 | |||
403 | #define SMC_CAN_USE_8BIT 1 | ||
404 | #define SMC_CAN_USE_16BIT 1 | ||
405 | #define SMC_CAN_USE_32BIT 1 | ||
406 | #define SMC_NOWAIT 1 | ||
407 | |||
408 | #define SMC_inb(a, r) readb((a) + (r)) | ||
409 | #define SMC_inw(a, r) readw((a) + (r)) | ||
410 | #define SMC_inl(a, r) readl((a) + (r)) | ||
411 | #define SMC_outb(v, a, r) writeb(v, (a) + (r)) | ||
412 | #define SMC_outw(v, a, r) writew(v, (a) + (r)) | ||
413 | #define SMC_outl(v, a, r) writel(v, (a) + (r)) | ||
414 | #define SMC_insl(a, r, p, l) readsl((a) + (r), p, l) | ||
415 | #define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l) | ||
416 | |||
417 | #define SMC_IRQ_FLAGS (0) | ||
418 | |||
419 | #elif defined(CONFIG_ARCH_VERSATILE) | ||
420 | |||
421 | #define SMC_CAN_USE_8BIT 1 | ||
422 | #define SMC_CAN_USE_16BIT 1 | ||
423 | #define SMC_CAN_USE_32BIT 1 | ||
424 | #define SMC_NOWAIT 1 | ||
425 | |||
426 | #define SMC_inb(a, r) readb((a) + (r)) | ||
427 | #define SMC_inw(a, r) readw((a) + (r)) | ||
428 | #define SMC_inl(a, r) readl((a) + (r)) | ||
429 | #define SMC_outb(v, a, r) writeb(v, (a) + (r)) | ||
430 | #define SMC_outw(v, a, r) writew(v, (a) + (r)) | ||
431 | #define SMC_outl(v, a, r) writel(v, (a) + (r)) | ||
432 | #define SMC_insl(a, r, p, l) readsl((a) + (r), p, l) | ||
433 | #define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l) | ||
434 | |||
435 | #define SMC_IRQ_FLAGS (0) | ||
436 | |||
437 | #elif defined(CONFIG_ARCH_VERSATILE) | ||
438 | |||
439 | #define SMC_CAN_USE_8BIT 1 | ||
440 | #define SMC_CAN_USE_16BIT 1 | ||
441 | #define SMC_CAN_USE_32BIT 1 | ||
442 | #define SMC_NOWAIT 1 | ||
443 | |||
444 | #define SMC_inb(a, r) readb((a) + (r)) | ||
445 | #define SMC_inw(a, r) readw((a) + (r)) | ||
446 | #define SMC_inl(a, r) readl((a) + (r)) | ||
447 | #define SMC_outb(v, a, r) writeb(v, (a) + (r)) | ||
448 | #define SMC_outw(v, a, r) writew(v, (a) + (r)) | ||
449 | #define SMC_outl(v, a, r) writel(v, (a) + (r)) | ||
450 | #define SMC_insl(a, r, p, l) readsl((a) + (r), p, l) | ||
451 | #define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l) | ||
452 | |||
453 | #define SMC_IRQ_FLAGS (0) | ||
454 | |||
455 | #else | 365 | #else |
456 | 366 | ||
457 | #define SMC_CAN_USE_8BIT 1 | 367 | #define SMC_CAN_USE_8BIT 1 |
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c index 1f05511fa390..8243150f5b05 100644 --- a/drivers/net/ucc_geth.c +++ b/drivers/net/ucc_geth.c | |||
@@ -194,9 +194,9 @@ static void enqueue(struct list_head *node, struct list_head *lh) | |||
194 | { | 194 | { |
195 | unsigned long flags; | 195 | unsigned long flags; |
196 | 196 | ||
197 | spin_lock_irqsave(ugeth_lock, flags); | 197 | spin_lock_irqsave(&ugeth_lock, flags); |
198 | list_add_tail(node, lh); | 198 | list_add_tail(node, lh); |
199 | spin_unlock_irqrestore(ugeth_lock, flags); | 199 | spin_unlock_irqrestore(&ugeth_lock, flags); |
200 | } | 200 | } |
201 | #endif /* CONFIG_UGETH_FILTERING */ | 201 | #endif /* CONFIG_UGETH_FILTERING */ |
202 | 202 | ||
@@ -204,14 +204,14 @@ static struct list_head *dequeue(struct list_head *lh) | |||
204 | { | 204 | { |
205 | unsigned long flags; | 205 | unsigned long flags; |
206 | 206 | ||
207 | spin_lock_irqsave(ugeth_lock, flags); | 207 | spin_lock_irqsave(&ugeth_lock, flags); |
208 | if (!list_empty(lh)) { | 208 | if (!list_empty(lh)) { |
209 | struct list_head *node = lh->next; | 209 | struct list_head *node = lh->next; |
210 | list_del(node); | 210 | list_del(node); |
211 | spin_unlock_irqrestore(ugeth_lock, flags); | 211 | spin_unlock_irqrestore(&ugeth_lock, flags); |
212 | return node; | 212 | return node; |
213 | } else { | 213 | } else { |
214 | spin_unlock_irqrestore(ugeth_lock, flags); | 214 | spin_unlock_irqrestore(&ugeth_lock, flags); |
215 | return NULL; | 215 | return NULL; |
216 | } | 216 | } |
217 | } | 217 | } |
@@ -1852,6 +1852,8 @@ static int init_phy(struct net_device *dev) | |||
1852 | mii_info->mdio_read = &read_phy_reg; | 1852 | mii_info->mdio_read = &read_phy_reg; |
1853 | mii_info->mdio_write = &write_phy_reg; | 1853 | mii_info->mdio_write = &write_phy_reg; |
1854 | 1854 | ||
1855 | spin_lock_init(&mii_info->mdio_lock); | ||
1856 | |||
1855 | ugeth->mii_info = mii_info; | 1857 | ugeth->mii_info = mii_info; |
1856 | 1858 | ||
1857 | spin_lock_irq(&ugeth->lock); | 1859 | spin_lock_irq(&ugeth->lock); |