diff options
author | David S. Miller <davem@davemloft.net> | 2012-04-12 19:41:23 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2012-04-12 19:41:23 -0400 |
commit | 011e3c63251be832d23df9f0697626ab7b354d02 (patch) | |
tree | 2cad5b58c274c93ae49d9b58fb15d784d4dfd78f /drivers/net/ethernet | |
parent | c1412fce7eccae62b4de22494f6ab3ff8a90c0c6 (diff) | |
parent | ecca5c3acc0d0933d89abc44e60afb0cc8170e35 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r-- | drivers/net/ethernet/tile/tilepro.c | 77 |
1 files changed, 51 insertions, 26 deletions
diff --git a/drivers/net/ethernet/tile/tilepro.c b/drivers/net/ethernet/tile/tilepro.c index 261356c2dc99..3d501ec7fad7 100644 --- a/drivers/net/ethernet/tile/tilepro.c +++ b/drivers/net/ethernet/tile/tilepro.c | |||
@@ -342,6 +342,21 @@ inline int __netio_fastio1(u32 fastio_index, u32 arg0) | |||
342 | } | 342 | } |
343 | 343 | ||
344 | 344 | ||
345 | static void tile_net_return_credit(struct tile_net_cpu *info) | ||
346 | { | ||
347 | struct tile_netio_queue *queue = &info->queue; | ||
348 | netio_queue_user_impl_t *qup = &queue->__user_part; | ||
349 | |||
350 | /* Return four credits after every fourth packet. */ | ||
351 | if (--qup->__receive_credit_remaining == 0) { | ||
352 | u32 interval = qup->__receive_credit_interval; | ||
353 | qup->__receive_credit_remaining = interval; | ||
354 | __netio_fastio_return_credits(qup->__fastio_index, interval); | ||
355 | } | ||
356 | } | ||
357 | |||
358 | |||
359 | |||
345 | /* | 360 | /* |
346 | * Provide a linux buffer to LIPP. | 361 | * Provide a linux buffer to LIPP. |
347 | */ | 362 | */ |
@@ -433,7 +448,7 @@ static bool tile_net_provide_needed_buffer(struct tile_net_cpu *info, | |||
433 | struct sk_buff **skb_ptr; | 448 | struct sk_buff **skb_ptr; |
434 | 449 | ||
435 | /* Request 96 extra bytes for alignment purposes. */ | 450 | /* Request 96 extra bytes for alignment purposes. */ |
436 | skb = netdev_alloc_skb(info->napi->dev, len + padding); | 451 | skb = netdev_alloc_skb(info->napi.dev, len + padding); |
437 | if (skb == NULL) | 452 | if (skb == NULL) |
438 | return false; | 453 | return false; |
439 | 454 | ||
@@ -864,19 +879,11 @@ static bool tile_net_poll_aux(struct tile_net_cpu *info, int index) | |||
864 | 879 | ||
865 | stats->rx_packets++; | 880 | stats->rx_packets++; |
866 | stats->rx_bytes += len; | 881 | stats->rx_bytes += len; |
867 | |||
868 | if (small) | ||
869 | info->num_needed_small_buffers++; | ||
870 | else | ||
871 | info->num_needed_large_buffers++; | ||
872 | } | 882 | } |
873 | 883 | ||
874 | /* Return four credits after every fourth packet. */ | 884 | /* ISSUE: It would be nice to defer this until the packet has */ |
875 | if (--qup->__receive_credit_remaining == 0) { | 885 | /* actually been processed. */ |
876 | u32 interval = qup->__receive_credit_interval; | 886 | tile_net_return_credit(info); |
877 | qup->__receive_credit_remaining = interval; | ||
878 | __netio_fastio_return_credits(qup->__fastio_index, interval); | ||
879 | } | ||
880 | 887 | ||
881 | /* Consume this packet. */ | 888 | /* Consume this packet. */ |
882 | qup->__packet_receive_read = index2; | 889 | qup->__packet_receive_read = index2; |
@@ -1543,7 +1550,7 @@ static int tile_net_drain_lipp_buffers(struct tile_net_priv *priv) | |||
1543 | 1550 | ||
1544 | /* Drain all the LIPP buffers. */ | 1551 | /* Drain all the LIPP buffers. */ |
1545 | while (true) { | 1552 | while (true) { |
1546 | int buffer; | 1553 | unsigned int buffer; |
1547 | 1554 | ||
1548 | /* NOTE: This should never fail. */ | 1555 | /* NOTE: This should never fail. */ |
1549 | if (hv_dev_pread(priv->hv_devhdl, 0, (HV_VirtAddr)&buffer, | 1556 | if (hv_dev_pread(priv->hv_devhdl, 0, (HV_VirtAddr)&buffer, |
@@ -1707,7 +1714,7 @@ static unsigned int tile_net_tx_frags(lepp_frag_t *frags, | |||
1707 | if (!hash_default) { | 1714 | if (!hash_default) { |
1708 | void *va = pfn_to_kaddr(pfn) + f->page_offset; | 1715 | void *va = pfn_to_kaddr(pfn) + f->page_offset; |
1709 | BUG_ON(PageHighMem(skb_frag_page(f))); | 1716 | BUG_ON(PageHighMem(skb_frag_page(f))); |
1710 | finv_buffer_remote(va, f->size, 0); | 1717 | finv_buffer_remote(va, skb_frag_size(f), 0); |
1711 | } | 1718 | } |
1712 | 1719 | ||
1713 | cpa = ((phys_addr_t)pfn << PAGE_SHIFT) + f->page_offset; | 1720 | cpa = ((phys_addr_t)pfn << PAGE_SHIFT) + f->page_offset; |
@@ -1735,8 +1742,8 @@ static unsigned int tile_net_tx_frags(lepp_frag_t *frags, | |||
1735 | * Sometimes, if "sendfile()" requires copying, we will be called with | 1742 | * Sometimes, if "sendfile()" requires copying, we will be called with |
1736 | * "data" containing the header and payload, with "frags" being empty. | 1743 | * "data" containing the header and payload, with "frags" being empty. |
1737 | * | 1744 | * |
1738 | * In theory, "sh->nr_frags" could be 3, but in practice, it seems | 1745 | * Sometimes, for example when using NFS over TCP, a single segment can |
1739 | * that this will never actually happen. | 1746 | * span 3 fragments, which must be handled carefully in LEPP. |
1740 | * | 1747 | * |
1741 | * See "emulate_large_send_offload()" for some reference code, which | 1748 | * See "emulate_large_send_offload()" for some reference code, which |
1742 | * does not handle checksumming. | 1749 | * does not handle checksumming. |
@@ -1844,10 +1851,8 @@ static int tile_net_tx_tso(struct sk_buff *skb, struct net_device *dev) | |||
1844 | 1851 | ||
1845 | spin_lock_irqsave(&priv->eq_lock, irqflags); | 1852 | spin_lock_irqsave(&priv->eq_lock, irqflags); |
1846 | 1853 | ||
1847 | /* | 1854 | /* Handle completions if needed to make room. */ |
1848 | * Handle completions if needed to make room. | 1855 | /* NOTE: Return NETDEV_TX_BUSY if there is still no room. */ |
1849 | * HACK: Spin until there is sufficient room. | ||
1850 | */ | ||
1851 | if (lepp_num_free_comp_slots(eq) == 0) { | 1856 | if (lepp_num_free_comp_slots(eq) == 0) { |
1852 | nolds = tile_net_lepp_grab_comps(eq, olds, wanted, 0); | 1857 | nolds = tile_net_lepp_grab_comps(eq, olds, wanted, 0); |
1853 | if (nolds == 0) { | 1858 | if (nolds == 0) { |
@@ -1861,6 +1866,7 @@ busy: | |||
1861 | cmd_tail = eq->cmd_tail; | 1866 | cmd_tail = eq->cmd_tail; |
1862 | 1867 | ||
1863 | /* Prepare to advance, detecting full queue. */ | 1868 | /* Prepare to advance, detecting full queue. */ |
1869 | /* NOTE: Return NETDEV_TX_BUSY if the queue is full. */ | ||
1864 | cmd_next = cmd_tail + cmd_size; | 1870 | cmd_next = cmd_tail + cmd_size; |
1865 | if (cmd_tail < cmd_head && cmd_next >= cmd_head) | 1871 | if (cmd_tail < cmd_head && cmd_next >= cmd_head) |
1866 | goto busy; | 1872 | goto busy; |
@@ -2023,10 +2029,8 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev) | |||
2023 | 2029 | ||
2024 | spin_lock_irqsave(&priv->eq_lock, irqflags); | 2030 | spin_lock_irqsave(&priv->eq_lock, irqflags); |
2025 | 2031 | ||
2026 | /* | 2032 | /* Handle completions if needed to make room. */ |
2027 | * Handle completions if needed to make room. | 2033 | /* NOTE: Return NETDEV_TX_BUSY if there is still no room. */ |
2028 | * HACK: Spin until there is sufficient room. | ||
2029 | */ | ||
2030 | if (lepp_num_free_comp_slots(eq) == 0) { | 2034 | if (lepp_num_free_comp_slots(eq) == 0) { |
2031 | nolds = tile_net_lepp_grab_comps(eq, olds, wanted, 0); | 2035 | nolds = tile_net_lepp_grab_comps(eq, olds, wanted, 0); |
2032 | if (nolds == 0) { | 2036 | if (nolds == 0) { |
@@ -2040,6 +2044,7 @@ busy: | |||
2040 | cmd_tail = eq->cmd_tail; | 2044 | cmd_tail = eq->cmd_tail; |
2041 | 2045 | ||
2042 | /* Copy the commands, or fail. */ | 2046 | /* Copy the commands, or fail. */ |
2047 | /* NOTE: Return NETDEV_TX_BUSY if the queue is full. */ | ||
2043 | for (i = 0; i < num_frags; i++) { | 2048 | for (i = 0; i < num_frags; i++) { |
2044 | 2049 | ||
2045 | /* Prepare to advance, detecting full queue. */ | 2050 | /* Prepare to advance, detecting full queue. */ |
@@ -2261,6 +2266,23 @@ static int tile_net_get_mac(struct net_device *dev) | |||
2261 | return 0; | 2266 | return 0; |
2262 | } | 2267 | } |
2263 | 2268 | ||
2269 | |||
2270 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
2271 | /* | ||
2272 | * Polling 'interrupt' - used by things like netconsole to send skbs | ||
2273 | * without having to re-enable interrupts. It's not called while | ||
2274 | * the interrupt routine is executing. | ||
2275 | */ | ||
2276 | static void tile_net_netpoll(struct net_device *dev) | ||
2277 | { | ||
2278 | struct tile_net_priv *priv = netdev_priv(dev); | ||
2279 | disable_percpu_irq(priv->intr_id); | ||
2280 | tile_net_handle_ingress_interrupt(priv->intr_id, dev); | ||
2281 | enable_percpu_irq(priv->intr_id, 0); | ||
2282 | } | ||
2283 | #endif | ||
2284 | |||
2285 | |||
2264 | static const struct net_device_ops tile_net_ops = { | 2286 | static const struct net_device_ops tile_net_ops = { |
2265 | .ndo_open = tile_net_open, | 2287 | .ndo_open = tile_net_open, |
2266 | .ndo_stop = tile_net_stop, | 2288 | .ndo_stop = tile_net_stop, |
@@ -2269,7 +2291,10 @@ static const struct net_device_ops tile_net_ops = { | |||
2269 | .ndo_get_stats = tile_net_get_stats, | 2291 | .ndo_get_stats = tile_net_get_stats, |
2270 | .ndo_change_mtu = tile_net_change_mtu, | 2292 | .ndo_change_mtu = tile_net_change_mtu, |
2271 | .ndo_tx_timeout = tile_net_tx_timeout, | 2293 | .ndo_tx_timeout = tile_net_tx_timeout, |
2272 | .ndo_set_mac_address = tile_net_set_mac_address | 2294 | .ndo_set_mac_address = tile_net_set_mac_address, |
2295 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
2296 | .ndo_poll_controller = tile_net_netpoll, | ||
2297 | #endif | ||
2273 | }; | 2298 | }; |
2274 | 2299 | ||
2275 | 2300 | ||
@@ -2409,7 +2434,7 @@ static void tile_net_cleanup(void) | |||
2409 | */ | 2434 | */ |
2410 | static int tile_net_init_module(void) | 2435 | static int tile_net_init_module(void) |
2411 | { | 2436 | { |
2412 | pr_info("Tilera IPP Net Driver\n"); | 2437 | pr_info("Tilera Network Driver\n"); |
2413 | 2438 | ||
2414 | tile_net_devs[0] = tile_net_dev_init("xgbe0"); | 2439 | tile_net_devs[0] = tile_net_dev_init("xgbe0"); |
2415 | tile_net_devs[1] = tile_net_dev_init("xgbe1"); | 2440 | tile_net_devs[1] = tile_net_dev_init("xgbe1"); |