aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/staging/octeon
diff options
context:
space:
mode:
authorDavid Daney <ddaney@caviumnetworks.com>2010-02-15 18:06:47 -0500
committerRalf Baechle <ralf@linux-mips.org>2010-02-27 06:53:30 -0500
commit4898c560103fb8075c10a8e9d70e0ca26873075e (patch)
tree9d2122e65326bef1b8bde2ce089c4e47e9e5c2ae /drivers/staging/octeon
parent86568dc41e8c7edcf6d014a64d143536d24b6a5d (diff)
Staging: Octeon: Free transmit SKBs in a timely manner
If we wait for the once-per-second cleanup to free transmit SKBs, sockets with small transmit buffer sizes might spend most of their time blocked waiting for the cleanup. Normally we do a cleanup for each transmitted packet. We add a watchdog type timer so that we also schedule a timeout for 150uS after a packet is transmitted. The watchdog is reset for each transmitted packet, so for high packet rates, it never expires. At these high rates, the cleanups are done for each packet so the extra watchdog initiated cleanups are neither needed nor triggered. Signed-off-by: David Daney <ddaney@caviumnetworks.com> To: linux-mips@linux-mips.org To: netdev@vger.kernel.org To: gregkh@suse.de Cc: Eric Dumazet <eric.dumazet@gmail.com> Patchwork: http://patchwork.linux-mips.org/patch/968/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org> This version has spelling and comment changes based on feedback from Eric Dumazet.
Diffstat (limited to 'drivers/staging/octeon')
-rw-r--r--drivers/staging/octeon/Kconfig1
-rw-r--r--drivers/staging/octeon/ethernet-defines.h5
-rw-r--r--drivers/staging/octeon/ethernet-tx.c137
-rw-r--r--drivers/staging/octeon/ethernet-tx.h6
-rw-r--r--drivers/staging/octeon/ethernet.c47
-rw-r--r--drivers/staging/octeon/octeon-ethernet.h9
6 files changed, 142 insertions, 63 deletions
diff --git a/drivers/staging/octeon/Kconfig b/drivers/staging/octeon/Kconfig
index 579b8f129e6..638ad6b3589 100644
--- a/drivers/staging/octeon/Kconfig
+++ b/drivers/staging/octeon/Kconfig
@@ -3,7 +3,6 @@ config OCTEON_ETHERNET
3 depends on CPU_CAVIUM_OCTEON 3 depends on CPU_CAVIUM_OCTEON
4 select PHYLIB 4 select PHYLIB
5 select MDIO_OCTEON 5 select MDIO_OCTEON
6 select HIGH_RES_TIMERS
7 help 6 help
8 This driver supports the builtin ethernet ports on Cavium 7 This driver supports the builtin ethernet ports on Cavium
9 Networks' products in the Octeon family. This driver supports the 8 Networks' products in the Octeon family. This driver supports the
diff --git a/drivers/staging/octeon/ethernet-defines.h b/drivers/staging/octeon/ethernet-defines.h
index 00a8561726b..6a2cd50a17d 100644
--- a/drivers/staging/octeon/ethernet-defines.h
+++ b/drivers/staging/octeon/ethernet-defines.h
@@ -95,10 +95,11 @@
95/*#define DONT_WRITEBACK(x) 0 */ 95/*#define DONT_WRITEBACK(x) 0 */
96 96
97/* Maximum number of SKBs to try to free per xmit packet. */ 97/* Maximum number of SKBs to try to free per xmit packet. */
98#define MAX_SKB_TO_FREE 10
99#define MAX_OUT_QUEUE_DEPTH 1000 98#define MAX_OUT_QUEUE_DEPTH 1000
100 99
101#define FAU_NUM_PACKET_BUFFERS_TO_FREE (CVMX_FAU_REG_END - sizeof(uint32_t)) 100#define FAU_TOTAL_TX_TO_CLEAN (CVMX_FAU_REG_END - sizeof(uint32_t))
101#define FAU_NUM_PACKET_BUFFERS_TO_FREE (FAU_TOTAL_TX_TO_CLEAN - sizeof(uint32_t))
102
102#define TOTAL_NUMBER_OF_PORTS (CVMX_PIP_NUM_INPUT_PORTS+1) 103#define TOTAL_NUMBER_OF_PORTS (CVMX_PIP_NUM_INPUT_PORTS+1)
103 104
104 105
diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c
index 62258bd3145..5175247ce0a 100644
--- a/drivers/staging/octeon/ethernet-tx.c
+++ b/drivers/staging/octeon/ethernet-tx.c
@@ -48,6 +48,7 @@
48 48
49#include "cvmx-wqe.h" 49#include "cvmx-wqe.h"
50#include "cvmx-fau.h" 50#include "cvmx-fau.h"
51#include "cvmx-pip.h"
51#include "cvmx-pko.h" 52#include "cvmx-pko.h"
52#include "cvmx-helper.h" 53#include "cvmx-helper.h"
53 54
@@ -66,6 +67,11 @@
66#define GET_SKBUFF_QOS(skb) 0 67#define GET_SKBUFF_QOS(skb) 0
67#endif 68#endif
68 69
70static void cvm_oct_tx_do_cleanup(unsigned long arg);
71static DECLARE_TASKLET(cvm_oct_tx_cleanup_tasklet, cvm_oct_tx_do_cleanup, 0);
72
73/* Maximum number of SKBs to try to free per xmit packet. */
74#define MAX_SKB_TO_FREE (MAX_OUT_QUEUE_DEPTH * 2)
69 75
70static inline int32_t cvm_oct_adjust_skb_to_free(int32_t skb_to_free, int fau) 76static inline int32_t cvm_oct_adjust_skb_to_free(int32_t skb_to_free, int fau)
71{ 77{
@@ -77,10 +83,24 @@ static inline int32_t cvm_oct_adjust_skb_to_free(int32_t skb_to_free, int fau)
77 return skb_to_free; 83 return skb_to_free;
78} 84}
79 85
80void cvm_oct_free_tx_skbs(struct octeon_ethernet *priv) 86static void cvm_oct_kick_tx_poll_watchdog(void)
87{
88 union cvmx_ciu_timx ciu_timx;
89 ciu_timx.u64 = 0;
90 ciu_timx.s.one_shot = 1;
91 ciu_timx.s.len = cvm_oct_tx_poll_interval;
92 cvmx_write_csr(CVMX_CIU_TIMX(1), ciu_timx.u64);
93}
94
95void cvm_oct_free_tx_skbs(struct net_device *dev)
81{ 96{
82 int32_t skb_to_free; 97 int32_t skb_to_free;
83 int qos, queues_per_port; 98 int qos, queues_per_port;
99 int total_freed = 0;
100 int total_remaining = 0;
101 unsigned long flags;
102 struct octeon_ethernet *priv = netdev_priv(dev);
103
84 queues_per_port = cvmx_pko_get_num_queues(priv->port); 104 queues_per_port = cvmx_pko_get_num_queues(priv->port);
85 /* Drain any pending packets in the free list */ 105 /* Drain any pending packets in the free list */
86 for (qos = 0; qos < queues_per_port; qos++) { 106 for (qos = 0; qos < queues_per_port; qos++) {
@@ -89,24 +109,31 @@ void cvm_oct_free_tx_skbs(struct octeon_ethernet *priv)
89 skb_to_free = cvmx_fau_fetch_and_add32(priv->fau+qos*4, MAX_SKB_TO_FREE); 109 skb_to_free = cvmx_fau_fetch_and_add32(priv->fau+qos*4, MAX_SKB_TO_FREE);
90 skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free, priv->fau+qos*4); 110 skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free, priv->fau+qos*4);
91 111
92 while (skb_to_free > 0) { 112
93 dev_kfree_skb_any(skb_dequeue(&priv->tx_free_list[qos])); 113 total_freed += skb_to_free;
94 skb_to_free--; 114 if (skb_to_free > 0) {
115 struct sk_buff *to_free_list = NULL;
116 spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
117 while (skb_to_free > 0) {
118 struct sk_buff *t = __skb_dequeue(&priv->tx_free_list[qos]);
119 t->next = to_free_list;
120 to_free_list = t;
121 skb_to_free--;
122 }
123 spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags);
124 /* Do the actual freeing outside of the lock. */
125 while (to_free_list) {
126 struct sk_buff *t = to_free_list;
127 to_free_list = to_free_list->next;
128 dev_kfree_skb_any(t);
129 }
95 } 130 }
131 total_remaining += skb_queue_len(&priv->tx_free_list[qos]);
96 } 132 }
97} 133 if (total_freed >= 0 && netif_queue_stopped(dev))
98
99enum hrtimer_restart cvm_oct_restart_tx(struct hrtimer *timer)
100{
101 struct octeon_ethernet *priv = container_of(timer, struct octeon_ethernet, tx_restart_timer);
102 struct net_device *dev = cvm_oct_device[priv->port];
103
104 cvm_oct_free_tx_skbs(priv);
105
106 if (netif_queue_stopped(dev))
107 netif_wake_queue(dev); 134 netif_wake_queue(dev);
108 135 if (total_remaining)
109 return HRTIMER_NORESTART; 136 cvm_oct_kick_tx_poll_watchdog();
110} 137}
111 138
112/** 139/**
@@ -129,6 +156,7 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
129 struct sk_buff *to_free_list; 156 struct sk_buff *to_free_list;
130 int32_t skb_to_free; 157 int32_t skb_to_free;
131 int32_t buffers_to_free; 158 int32_t buffers_to_free;
159 u32 total_to_clean;
132 unsigned long flags; 160 unsigned long flags;
133#if REUSE_SKBUFFS_WITHOUT_FREE 161#if REUSE_SKBUFFS_WITHOUT_FREE
134 unsigned char *fpa_head; 162 unsigned char *fpa_head;
@@ -232,7 +260,6 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
232 pko_command.s.subone0 = 1; 260 pko_command.s.subone0 = 1;
233 261
234 pko_command.s.dontfree = 1; 262 pko_command.s.dontfree = 1;
235 pko_command.s.reg0 = priv->fau + qos * 4;
236 263
237 /* Build the PKO buffer pointer */ 264 /* Build the PKO buffer pointer */
238 hw_buffer.u64 = 0; 265 hw_buffer.u64 = 0;
@@ -327,7 +354,6 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
327 * We can use this buffer in the FPA. We don't need the FAU 354 * We can use this buffer in the FPA. We don't need the FAU
328 * update anymore 355 * update anymore
329 */ 356 */
330 pko_command.s.reg0 = 0;
331 pko_command.s.dontfree = 0; 357 pko_command.s.dontfree = 0;
332 358
333 hw_buffer.s.back = ((unsigned long)skb->data >> 7) - ((unsigned long)fpa_head >> 7); 359 hw_buffer.s.back = ((unsigned long)skb->data >> 7) - ((unsigned long)fpa_head >> 7);
@@ -384,15 +410,17 @@ dont_put_skbuff_in_hw:
384 * If we're sending faster than the receive can free them then 410 * If we're sending faster than the receive can free them then
385 * don't do the HW free. 411 * don't do the HW free.
386 */ 412 */
387 if ((buffers_to_free < -100) && !pko_command.s.dontfree) { 413 if ((buffers_to_free < -100) && !pko_command.s.dontfree)
388 pko_command.s.dontfree = 1; 414 pko_command.s.dontfree = 1;
389 pko_command.s.reg0 = priv->fau + qos * 4;
390 }
391 415
392 if (pko_command.s.dontfree) 416 if (pko_command.s.dontfree) {
393 queue_type = QUEUE_CORE; 417 queue_type = QUEUE_CORE;
394 else 418 pko_command.s.reg0 = priv->fau+qos*4;
419 } else {
395 queue_type = QUEUE_HW; 420 queue_type = QUEUE_HW;
421 }
422 if (USE_ASYNC_IOBDMA)
423 cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH, FAU_TOTAL_TX_TO_CLEAN, 1);
396 424
397 spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags); 425 spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
398 426
@@ -402,10 +430,7 @@ dont_put_skbuff_in_hw:
402 /* Drop the lock when notifying the core. */ 430 /* Drop the lock when notifying the core. */
403 spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags); 431 spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags);
404 netif_stop_queue(dev); 432 netif_stop_queue(dev);
405 hrtimer_start(&priv->tx_restart_timer,
406 priv->tx_restart_interval, HRTIMER_MODE_REL);
407 spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags); 433 spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
408
409 } else { 434 } else {
410 /* If not using normal queueing. */ 435 /* If not using normal queueing. */
411 queue_type = QUEUE_DROP; 436 queue_type = QUEUE_DROP;
@@ -460,11 +485,27 @@ skip_xmit:
460 } 485 }
461 486
462 if (USE_ASYNC_IOBDMA) { 487 if (USE_ASYNC_IOBDMA) {
488 CVMX_SYNCIOBDMA;
489 total_to_clean = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
463 /* Restore the scratch area */ 490 /* Restore the scratch area */
464 cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch); 491 cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch);
465 cvmx_scratch_write64(CVMX_SCR_SCRATCH + 8, old_scratch2); 492 cvmx_scratch_write64(CVMX_SCR_SCRATCH + 8, old_scratch2);
493 } else {
494 total_to_clean = cvmx_fau_fetch_and_add32(FAU_TOTAL_TX_TO_CLEAN, 1);
466 } 495 }
467 496
497 if (total_to_clean & 0x3ff) {
498 /*
499 * Schedule the cleanup tasklet every 1024 packets for
500 * the pathological case of high traffic on one port
501 * delaying clean up of packets on a different port
502 * that is blocked waiting for the cleanup.
503 */
504 tasklet_schedule(&cvm_oct_tx_cleanup_tasklet);
505 }
506
507 cvm_oct_kick_tx_poll_watchdog();
508
468 return NETDEV_TX_OK; 509 return NETDEV_TX_OK;
469} 510}
470 511
@@ -624,7 +665,7 @@ int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev)
624 * 665 *
625 * @dev: Device being shutdown 666 * @dev: Device being shutdown
626 */ 667 */
627void cvm_oct_tx_shutdown(struct net_device *dev) 668void cvm_oct_tx_shutdown_dev(struct net_device *dev)
628{ 669{
629 struct octeon_ethernet *priv = netdev_priv(dev); 670 struct octeon_ethernet *priv = netdev_priv(dev);
630 unsigned long flags; 671 unsigned long flags;
@@ -638,3 +679,45 @@ void cvm_oct_tx_shutdown(struct net_device *dev)
638 spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags); 679 spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags);
639 } 680 }
640} 681}
682
683static void cvm_oct_tx_do_cleanup(unsigned long arg)
684{
685 int port;
686
687 for (port = 0; port < TOTAL_NUMBER_OF_PORTS; port++) {
688 if (cvm_oct_device[port]) {
689 struct net_device *dev = cvm_oct_device[port];
690 cvm_oct_free_tx_skbs(dev);
691 }
692 }
693}
694
695static irqreturn_t cvm_oct_tx_cleanup_watchdog(int cpl, void *dev_id)
696{
697 /* Disable the interrupt. */
698 cvmx_write_csr(CVMX_CIU_TIMX(1), 0);
699 /* Do the work in the tasklet. */
700 tasklet_schedule(&cvm_oct_tx_cleanup_tasklet);
701 return IRQ_HANDLED;
702}
703
704void cvm_oct_tx_initialize(void)
705{
706 int i;
707
708 /* Disable the interrupt. */
709 cvmx_write_csr(CVMX_CIU_TIMX(1), 0);
710 /* Register an IRQ hander for to receive CIU_TIMX(1) interrupts */
711 i = request_irq(OCTEON_IRQ_TIMER1,
712 cvm_oct_tx_cleanup_watchdog, 0,
713 "Ethernet", cvm_oct_device);
714
715 if (i)
716 panic("Could not acquire Ethernet IRQ %d\n", OCTEON_IRQ_TIMER1);
717}
718
719void cvm_oct_tx_shutdown(void)
720{
721 /* Free the interrupt handler */
722 free_irq(OCTEON_IRQ_TIMER1, cvm_oct_device);
723}
diff --git a/drivers/staging/octeon/ethernet-tx.h b/drivers/staging/octeon/ethernet-tx.h
index b628d8c8421..547680c6c37 100644
--- a/drivers/staging/octeon/ethernet-tx.h
+++ b/drivers/staging/octeon/ethernet-tx.h
@@ -29,6 +29,6 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev);
29int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev); 29int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev);
30int cvm_oct_transmit_qos(struct net_device *dev, void *work_queue_entry, 30int cvm_oct_transmit_qos(struct net_device *dev, void *work_queue_entry,
31 int do_free, int qos); 31 int do_free, int qos);
32void cvm_oct_tx_shutdown(struct net_device *dev); 32void cvm_oct_tx_initialize(void);
33void cvm_oct_free_tx_skbs(struct octeon_ethernet *priv); 33void cvm_oct_tx_shutdown(void);
34enum hrtimer_restart cvm_oct_restart_tx(struct hrtimer *timer); 34void cvm_oct_tx_shutdown_dev(struct net_device *dev);
diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
index 1771c1035a3..5ee60ab0b23 100644
--- a/drivers/staging/octeon/ethernet.c
+++ b/drivers/staging/octeon/ethernet.c
@@ -140,6 +140,8 @@ atomic_t cvm_oct_poll_queue_stopping = ATOMIC_INIT(0);
140 */ 140 */
141struct net_device *cvm_oct_device[TOTAL_NUMBER_OF_PORTS]; 141struct net_device *cvm_oct_device[TOTAL_NUMBER_OF_PORTS];
142 142
143u64 cvm_oct_tx_poll_interval;
144
143static void cvm_oct_rx_refill_worker(struct work_struct *work); 145static void cvm_oct_rx_refill_worker(struct work_struct *work);
144static DECLARE_DELAYED_WORK(cvm_oct_rx_refill_work, cvm_oct_rx_refill_worker); 146static DECLARE_DELAYED_WORK(cvm_oct_rx_refill_work, cvm_oct_rx_refill_worker);
145 147
@@ -159,18 +161,19 @@ static void cvm_oct_rx_refill_worker(struct work_struct *work)
159 &cvm_oct_rx_refill_work, HZ); 161 &cvm_oct_rx_refill_work, HZ);
160} 162}
161 163
162static void cvm_oct_tx_clean_worker(struct work_struct *work) 164static void cvm_oct_periodic_worker(struct work_struct *work)
163{ 165{
164 struct octeon_ethernet *priv = container_of(work, 166 struct octeon_ethernet *priv = container_of(work,
165 struct octeon_ethernet, 167 struct octeon_ethernet,
166 tx_clean_work.work); 168 port_periodic_work.work);
167 169
168 if (priv->poll) 170 if (priv->poll)
169 priv->poll(cvm_oct_device[priv->port]); 171 priv->poll(cvm_oct_device[priv->port]);
170 cvm_oct_free_tx_skbs(priv); 172
171 cvm_oct_device[priv->port]->netdev_ops->ndo_get_stats(cvm_oct_device[priv->port]); 173 cvm_oct_device[priv->port]->netdev_ops->ndo_get_stats(cvm_oct_device[priv->port]);
174
172 if (!atomic_read(&cvm_oct_poll_queue_stopping)) 175 if (!atomic_read(&cvm_oct_poll_queue_stopping))
173 queue_delayed_work(cvm_oct_poll_queue, &priv->tx_clean_work, HZ); 176 queue_delayed_work(cvm_oct_poll_queue, &priv->port_periodic_work, HZ);
174 } 177 }
175 178
176/** 179/**
@@ -662,6 +665,9 @@ static int __init cvm_oct_init_module(void)
662 */ 665 */
663 cvmx_fau_atomic_write32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0); 666 cvmx_fau_atomic_write32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);
664 667
668 /* Initialize the FAU used for counting tx SKBs that need to be freed */
669 cvmx_fau_atomic_write32(FAU_TOTAL_TX_TO_CLEAN, 0);
670
665 if ((pow_send_group != -1)) { 671 if ((pow_send_group != -1)) {
666 struct net_device *dev; 672 struct net_device *dev;
667 pr_info("\tConfiguring device for POW only access\n"); 673 pr_info("\tConfiguring device for POW only access\n");
@@ -670,18 +676,6 @@ static int __init cvm_oct_init_module(void)
670 /* Initialize the device private structure. */ 676 /* Initialize the device private structure. */
671 struct octeon_ethernet *priv = netdev_priv(dev); 677 struct octeon_ethernet *priv = netdev_priv(dev);
672 678
673 hrtimer_init(&priv->tx_restart_timer,
674 CLOCK_MONOTONIC,
675 HRTIMER_MODE_REL);
676 priv->tx_restart_timer.function = cvm_oct_restart_tx;
677
678 /*
679 * Default for 10GE 5000nS enough time to
680 * transmit about 100 64byte packtes. 1GE
681 * interfaces will get 50000nS below.
682 */
683 priv->tx_restart_interval = ktime_set(0, 5000);
684
685 dev->netdev_ops = &cvm_oct_pow_netdev_ops; 679 dev->netdev_ops = &cvm_oct_pow_netdev_ops;
686 priv->imode = CVMX_HELPER_INTERFACE_MODE_DISABLED; 680 priv->imode = CVMX_HELPER_INTERFACE_MODE_DISABLED;
687 priv->port = CVMX_PIP_NUM_INPUT_PORTS; 681 priv->port = CVMX_PIP_NUM_INPUT_PORTS;
@@ -725,9 +719,8 @@ static int __init cvm_oct_init_module(void)
725 /* Initialize the device private structure. */ 719 /* Initialize the device private structure. */
726 priv = netdev_priv(dev); 720 priv = netdev_priv(dev);
727 721
728 INIT_DELAYED_WORK(&priv->tx_clean_work, 722 INIT_DELAYED_WORK(&priv->port_periodic_work,
729 cvm_oct_tx_clean_worker); 723 cvm_oct_periodic_worker);
730
731 priv->imode = imode; 724 priv->imode = imode;
732 priv->port = port; 725 priv->port = port;
733 priv->queue = cvmx_pko_get_base_queue(priv->port); 726 priv->queue = cvmx_pko_get_base_queue(priv->port);
@@ -763,7 +756,6 @@ static int __init cvm_oct_init_module(void)
763 756
764 case CVMX_HELPER_INTERFACE_MODE_SGMII: 757 case CVMX_HELPER_INTERFACE_MODE_SGMII:
765 dev->netdev_ops = &cvm_oct_sgmii_netdev_ops; 758 dev->netdev_ops = &cvm_oct_sgmii_netdev_ops;
766 priv->tx_restart_interval = ktime_set(0, 50000);
767 strcpy(dev->name, "eth%d"); 759 strcpy(dev->name, "eth%d");
768 break; 760 break;
769 761
@@ -775,7 +767,6 @@ static int __init cvm_oct_init_module(void)
775 case CVMX_HELPER_INTERFACE_MODE_RGMII: 767 case CVMX_HELPER_INTERFACE_MODE_RGMII:
776 case CVMX_HELPER_INTERFACE_MODE_GMII: 768 case CVMX_HELPER_INTERFACE_MODE_GMII:
777 dev->netdev_ops = &cvm_oct_rgmii_netdev_ops; 769 dev->netdev_ops = &cvm_oct_rgmii_netdev_ops;
778 priv->tx_restart_interval = ktime_set(0, 50000);
779 strcpy(dev->name, "eth%d"); 770 strcpy(dev->name, "eth%d");
780 break; 771 break;
781 } 772 }
@@ -793,13 +784,19 @@ static int __init cvm_oct_init_module(void)
793 cvmx_pko_get_num_queues(priv->port) * 784 cvmx_pko_get_num_queues(priv->port) *
794 sizeof(uint32_t); 785 sizeof(uint32_t);
795 queue_delayed_work(cvm_oct_poll_queue, 786 queue_delayed_work(cvm_oct_poll_queue,
796 &priv->tx_clean_work, HZ); 787 &priv->port_periodic_work, HZ);
797 } 788 }
798 } 789 }
799 } 790 }
800 791
792 cvm_oct_tx_initialize();
801 cvm_oct_rx_initialize(); 793 cvm_oct_rx_initialize();
802 794
795 /*
796 * 150 uS: about 10 1500-byte packtes at 1GE.
797 */
798 cvm_oct_tx_poll_interval = 150 * (octeon_get_clock_rate() / 1000000);
799
803 queue_delayed_work(cvm_oct_poll_queue, &cvm_oct_rx_refill_work, HZ); 800 queue_delayed_work(cvm_oct_poll_queue, &cvm_oct_rx_refill_work, HZ);
804 801
805 return 0; 802 return 0;
@@ -826,6 +823,8 @@ static void __exit cvm_oct_cleanup_module(void)
826 cancel_delayed_work_sync(&cvm_oct_rx_refill_work); 823 cancel_delayed_work_sync(&cvm_oct_rx_refill_work);
827 824
828 cvm_oct_rx_shutdown(); 825 cvm_oct_rx_shutdown();
826 cvm_oct_tx_shutdown();
827
829 cvmx_pko_disable(); 828 cvmx_pko_disable();
830 829
831 /* Free the ethernet devices */ 830 /* Free the ethernet devices */
@@ -833,9 +832,9 @@ static void __exit cvm_oct_cleanup_module(void)
833 if (cvm_oct_device[port]) { 832 if (cvm_oct_device[port]) {
834 struct net_device *dev = cvm_oct_device[port]; 833 struct net_device *dev = cvm_oct_device[port];
835 struct octeon_ethernet *priv = netdev_priv(dev); 834 struct octeon_ethernet *priv = netdev_priv(dev);
836 cancel_delayed_work_sync(&priv->tx_clean_work); 835 cancel_delayed_work_sync(&priv->port_periodic_work);
837 836
838 cvm_oct_tx_shutdown(dev); 837 cvm_oct_tx_shutdown_dev(dev);
839 unregister_netdev(dev); 838 unregister_netdev(dev);
840 kfree(dev); 839 kfree(dev);
841 cvm_oct_device[port] = NULL; 840 cvm_oct_device[port] = NULL;
diff --git a/drivers/staging/octeon/octeon-ethernet.h b/drivers/staging/octeon/octeon-ethernet.h
index 8d0921061da..db2a3cc048e 100644
--- a/drivers/staging/octeon/octeon-ethernet.h
+++ b/drivers/staging/octeon/octeon-ethernet.h
@@ -4,7 +4,7 @@
4 * Contact: support@caviumnetworks.com 4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK 5 * This file is part of the OCTEON SDK
6 * 6 *
7 * Copyright (c) 2003-2007 Cavium Networks 7 * Copyright (c) 2003-2010 Cavium Networks
8 * 8 *
9 * This file is free software; you can redistribute it and/or modify 9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as 10 * it under the terms of the GNU General Public License, Version 2, as
@@ -31,8 +31,6 @@
31#ifndef OCTEON_ETHERNET_H 31#ifndef OCTEON_ETHERNET_H
32#define OCTEON_ETHERNET_H 32#define OCTEON_ETHERNET_H
33 33
34#include <linux/hrtimer.h>
35
36/** 34/**
37 * This is the definition of the Ethernet driver's private 35 * This is the definition of the Ethernet driver's private
38 * driver state stored in netdev_priv(dev). 36 * driver state stored in netdev_priv(dev).
@@ -59,9 +57,7 @@ struct octeon_ethernet {
59 uint64_t link_info; 57 uint64_t link_info;
60 /* Called periodically to check link status */ 58 /* Called periodically to check link status */
61 void (*poll) (struct net_device *dev); 59 void (*poll) (struct net_device *dev);
62 struct hrtimer tx_restart_timer; 60 struct delayed_work port_periodic_work;
63 ktime_t tx_restart_interval;
64 struct delayed_work tx_clean_work;
65 struct work_struct port_work; /* may be unused. */ 61 struct work_struct port_work; /* may be unused. */
66}; 62};
67 63
@@ -101,6 +97,7 @@ extern char pow_send_list[];
101extern struct net_device *cvm_oct_device[]; 97extern struct net_device *cvm_oct_device[];
102extern struct workqueue_struct *cvm_oct_poll_queue; 98extern struct workqueue_struct *cvm_oct_poll_queue;
103extern atomic_t cvm_oct_poll_queue_stopping; 99extern atomic_t cvm_oct_poll_queue_stopping;
100extern u64 cvm_oct_tx_poll_interval;
104 101
105extern int max_rx_cpus; 102extern int max_rx_cpus;
106extern int rx_napi_weight; 103extern int rx_napi_weight;