aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorDavid Daney <ddaney@caviumnetworks.com>2010-01-07 14:05:03 -0500
committerRalf Baechle <ralf@linux-mips.org>2010-02-27 06:53:07 -0500
commit6888fc87768eaa218b6244f2e78c55416706981a (patch)
tree9a51ac4e8ead33364e479ac2ce4e98a128b49c17 /drivers
parent166bdaa9aad9903bf4330ef68feb37f220c9eac8 (diff)
Staging: Octeon Ethernet: Rewrite transmit code.
Stop the queue if too many packets are queued. Restart it from a high resolution timer. Rearrange and simplify locking and SKB freeing code Signed-off-by: David Daney <ddaney@caviumnetworks.com> To: linux-mips@linux-mips.org To: gregkh@suse.de Patchwork: http://patchwork.linux-mips.org/patch/843/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/staging/octeon/Kconfig1
-rw-r--r--drivers/staging/octeon/ethernet-tx.c172
-rw-r--r--drivers/staging/octeon/ethernet-tx.h27
-rw-r--r--drivers/staging/octeon/ethernet.c69
-rw-r--r--drivers/staging/octeon/octeon-ethernet.h4
5 files changed, 150 insertions, 123 deletions
diff --git a/drivers/staging/octeon/Kconfig b/drivers/staging/octeon/Kconfig
index 638ad6b35891..579b8f129e6e 100644
--- a/drivers/staging/octeon/Kconfig
+++ b/drivers/staging/octeon/Kconfig
@@ -3,6 +3,7 @@ config OCTEON_ETHERNET
3 depends on CPU_CAVIUM_OCTEON 3 depends on CPU_CAVIUM_OCTEON
4 select PHYLIB 4 select PHYLIB
5 select MDIO_OCTEON 5 select MDIO_OCTEON
6 select HIGH_RES_TIMERS
6 help 7 help
7 This driver supports the builtin ethernet ports on Cavium 8 This driver supports the builtin ethernet ports on Cavium
8 Networks' products in the Octeon family. This driver supports the 9 Networks' products in the Octeon family. This driver supports the
diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c
index e5695d964d9a..05b58f8b58fd 100644
--- a/drivers/staging/octeon/ethernet-tx.c
+++ b/drivers/staging/octeon/ethernet-tx.c
@@ -64,6 +64,49 @@
64#define GET_SKBUFF_QOS(skb) 0 64#define GET_SKBUFF_QOS(skb) 0
65#endif 65#endif
66 66
67
68static inline int32_t cvm_oct_adjust_skb_to_free(int32_t skb_to_free, int fau)
69{
70 int32_t undo;
71 undo = skb_to_free > 0 ? MAX_SKB_TO_FREE : skb_to_free + MAX_SKB_TO_FREE;
72 if (undo > 0)
73 cvmx_fau_atomic_add32(fau, -undo);
74 skb_to_free = -skb_to_free > MAX_SKB_TO_FREE ? MAX_SKB_TO_FREE : -skb_to_free;
75 return skb_to_free;
76}
77
78void cvm_oct_free_tx_skbs(struct octeon_ethernet *priv)
79{
80 int32_t skb_to_free;
81 int qos, queues_per_port;
82 queues_per_port = cvmx_pko_get_num_queues(priv->port);
83 /* Drain any pending packets in the free list */
84 for (qos = 0; qos < queues_per_port; qos++) {
85 if (skb_queue_len(&priv->tx_free_list[qos]) == 0)
86 continue;
87 skb_to_free = cvmx_fau_fetch_and_add32(priv->fau+qos*4, MAX_SKB_TO_FREE);
88 skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free, priv->fau+qos*4);
89
90 while (skb_to_free > 0) {
91 dev_kfree_skb_any(skb_dequeue(&priv->tx_free_list[qos]));
92 skb_to_free--;
93 }
94 }
95}
96
97enum hrtimer_restart cvm_oct_restart_tx(struct hrtimer *timer)
98{
99 struct octeon_ethernet *priv = container_of(timer, struct octeon_ethernet, tx_restart_timer);
100 struct net_device *dev = cvm_oct_device[priv->port];
101
102 cvm_oct_free_tx_skbs(priv);
103
104 if (netif_queue_stopped(dev))
105 netif_wake_queue(dev);
106
107 return HRTIMER_NORESTART;
108}
109
67/** 110/**
68 * Packet transmit 111 * Packet transmit
69 * 112 *
@@ -77,13 +120,13 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
77 union cvmx_buf_ptr hw_buffer; 120 union cvmx_buf_ptr hw_buffer;
78 uint64_t old_scratch; 121 uint64_t old_scratch;
79 uint64_t old_scratch2; 122 uint64_t old_scratch2;
80 int dropped;
81 int qos; 123 int qos;
82 int queue_it_up; 124 enum {QUEUE_CORE, QUEUE_HW, QUEUE_DROP} queue_type;
83 struct octeon_ethernet *priv = netdev_priv(dev); 125 struct octeon_ethernet *priv = netdev_priv(dev);
126 struct sk_buff *to_free_list;
84 int32_t skb_to_free; 127 int32_t skb_to_free;
85 int32_t undo;
86 int32_t buffers_to_free; 128 int32_t buffers_to_free;
129 unsigned long flags;
87#if REUSE_SKBUFFS_WITHOUT_FREE 130#if REUSE_SKBUFFS_WITHOUT_FREE
88 unsigned char *fpa_head; 131 unsigned char *fpa_head;
89#endif 132#endif
@@ -94,9 +137,6 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
94 */ 137 */
95 prefetch(priv); 138 prefetch(priv);
96 139
97 /* Start off assuming no drop */
98 dropped = 0;
99
100 /* 140 /*
101 * The check on CVMX_PKO_QUEUES_PER_PORT_* is designed to 141 * The check on CVMX_PKO_QUEUES_PER_PORT_* is designed to
102 * completely remove "qos" in the event neither interface 142 * completely remove "qos" in the event neither interface
@@ -268,9 +308,9 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
268 skb->tc_verd = 0; 308 skb->tc_verd = 0;
269#endif /* CONFIG_NET_CLS_ACT */ 309#endif /* CONFIG_NET_CLS_ACT */
270#endif /* CONFIG_NET_SCHED */ 310#endif /* CONFIG_NET_SCHED */
311#endif /* REUSE_SKBUFFS_WITHOUT_FREE */
271 312
272dont_put_skbuff_in_hw: 313dont_put_skbuff_in_hw:
273#endif /* REUSE_SKBUFFS_WITHOUT_FREE */
274 314
275 /* Check if we can use the hardware checksumming */ 315 /* Check if we can use the hardware checksumming */
276 if (USE_HW_TCPUDP_CHECKSUM && (skb->protocol == htons(ETH_P_IP)) && 316 if (USE_HW_TCPUDP_CHECKSUM && (skb->protocol == htons(ETH_P_IP)) &&
@@ -295,18 +335,7 @@ dont_put_skbuff_in_hw:
295 cvmx_fau_fetch_and_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0); 335 cvmx_fau_fetch_and_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);
296 } 336 }
297 337
298 /* 338 skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free, priv->fau+qos*4);
299 * We try to claim MAX_SKB_TO_FREE buffers. If there were not
300 * that many available, we have to un-claim (undo) any that
301 * were in excess. If skb_to_free is positive we will free
302 * that many buffers.
303 */
304 undo = skb_to_free > 0 ?
305 MAX_SKB_TO_FREE : skb_to_free + MAX_SKB_TO_FREE;
306 if (undo > 0)
307 cvmx_fau_atomic_add32(priv->fau+qos*4, -undo);
308 skb_to_free = -skb_to_free > MAX_SKB_TO_FREE ?
309 MAX_SKB_TO_FREE : -skb_to_free;
310 339
311 /* 340 /*
312 * If we're sending faster than the receive can free them then 341 * If we're sending faster than the receive can free them then
@@ -317,60 +346,83 @@ dont_put_skbuff_in_hw:
317 pko_command.s.reg0 = priv->fau + qos * 4; 346 pko_command.s.reg0 = priv->fau + qos * 4;
318 } 347 }
319 348
320 cvmx_pko_send_packet_prepare(priv->port, priv->queue + qos, 349 if (pko_command.s.dontfree)
321 CVMX_PKO_LOCK_CMD_QUEUE); 350 queue_type = QUEUE_CORE;
351 else
352 queue_type = QUEUE_HW;
353
354 spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
322 355
323 /* Drop this packet if we have too many already queued to the HW */ 356 /* Drop this packet if we have too many already queued to the HW */
324 if (unlikely 357 if (unlikely(skb_queue_len(&priv->tx_free_list[qos]) >= MAX_OUT_QUEUE_DEPTH)) {
325 (skb_queue_len(&priv->tx_free_list[qos]) >= MAX_OUT_QUEUE_DEPTH)) { 358 if (dev->tx_queue_len != 0) {
326 /* 359 /* Drop the lock when notifying the core. */
327 DEBUGPRINT("%s: Tx dropped. Too many queued\n", dev->name); 360 spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags);
328 */ 361 netif_stop_queue(dev);
329 dropped = 1; 362 hrtimer_start(&priv->tx_restart_timer,
363 priv->tx_restart_interval, HRTIMER_MODE_REL);
364 spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
365
366 } else {
367 /* If not using normal queueing. */
368 queue_type = QUEUE_DROP;
369 goto skip_xmit;
370 }
330 } 371 }
372
373 cvmx_pko_send_packet_prepare(priv->port, priv->queue + qos,
374 CVMX_PKO_LOCK_NONE);
375
331 /* Send the packet to the output queue */ 376 /* Send the packet to the output queue */
332 else if (unlikely 377 if (unlikely(cvmx_pko_send_packet_finish(priv->port,
333 (cvmx_pko_send_packet_finish 378 priv->queue + qos,
334 (priv->port, priv->queue + qos, pko_command, hw_buffer, 379 pko_command, hw_buffer,
335 CVMX_PKO_LOCK_CMD_QUEUE))) { 380 CVMX_PKO_LOCK_NONE))) {
336 DEBUGPRINT("%s: Failed to send the packet\n", dev->name); 381 DEBUGPRINT("%s: Failed to send the packet\n", dev->name);
337 dropped = 1; 382 queue_type = QUEUE_DROP;
338 } 383 }
384skip_xmit:
385 to_free_list = NULL;
339 386
340 if (USE_ASYNC_IOBDMA) { 387 switch (queue_type) {
341 /* Restore the scratch area */ 388 case QUEUE_DROP:
342 cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch); 389 skb->next = to_free_list;
343 cvmx_scratch_write64(CVMX_SCR_SCRATCH + 8, old_scratch2); 390 to_free_list = skb;
391 priv->stats.tx_dropped++;
392 break;
393 case QUEUE_HW:
394 cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, -1);
395 break;
396 case QUEUE_CORE:
397 __skb_queue_tail(&priv->tx_free_list[qos], skb);
398 break;
399 default:
400 BUG();
344 } 401 }
345 402
346 queue_it_up = 0; 403 while (skb_to_free > 0) {
347 if (unlikely(dropped)) { 404 struct sk_buff *t = __skb_dequeue(&priv->tx_free_list[qos]);
348 dev_kfree_skb_any(skb); 405 t->next = to_free_list;
349 priv->stats.tx_dropped++; 406 to_free_list = t;
350 } else { 407 skb_to_free--;
351 if (USE_SKBUFFS_IN_HW) {
352 /* Put this packet on the queue to be freed later */
353 if (pko_command.s.dontfree)
354 queue_it_up = 1;
355 else
356 cvmx_fau_atomic_add32
357 (FAU_NUM_PACKET_BUFFERS_TO_FREE, -1);
358 } else {
359 /* Put this packet on the queue to be freed later */
360 queue_it_up = 1;
361 }
362 } 408 }
363 409
364 if (queue_it_up) { 410 spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags);
365 spin_lock(&priv->tx_free_list[qos].lock); 411
366 __skb_queue_tail(&priv->tx_free_list[qos], skb); 412 /* Do the actual freeing outside of the lock. */
367 cvm_oct_free_tx_skbs(priv, skb_to_free, qos, 0); 413 while (to_free_list) {
368 spin_unlock(&priv->tx_free_list[qos].lock); 414 struct sk_buff *t = to_free_list;
369 } else { 415 to_free_list = to_free_list->next;
370 cvm_oct_free_tx_skbs(priv, skb_to_free, qos, 1); 416 dev_kfree_skb_any(t);
371 } 417 }
372 418
373 return 0; 419 if (USE_ASYNC_IOBDMA) {
420 /* Restore the scratch area */
421 cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch);
422 cvmx_scratch_write64(CVMX_SCR_SCRATCH + 8, old_scratch2);
423 }
424
425 return NETDEV_TX_OK;
374} 426}
375 427
376/** 428/**
diff --git a/drivers/staging/octeon/ethernet-tx.h b/drivers/staging/octeon/ethernet-tx.h
index c0bebf750bc0..b628d8c8421d 100644
--- a/drivers/staging/octeon/ethernet-tx.h
+++ b/drivers/staging/octeon/ethernet-tx.h
@@ -30,28 +30,5 @@ int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev);
30int cvm_oct_transmit_qos(struct net_device *dev, void *work_queue_entry, 30int cvm_oct_transmit_qos(struct net_device *dev, void *work_queue_entry,
31 int do_free, int qos); 31 int do_free, int qos);
32void cvm_oct_tx_shutdown(struct net_device *dev); 32void cvm_oct_tx_shutdown(struct net_device *dev);
33 33void cvm_oct_free_tx_skbs(struct octeon_ethernet *priv);
34/** 34enum hrtimer_restart cvm_oct_restart_tx(struct hrtimer *timer);
35 * Free dead transmit skbs.
36 *
37 * @priv: The driver data
38 * @skb_to_free: The number of SKBs to free (free none if negative).
39 * @qos: The queue to free from.
40 * @take_lock: If true, acquire the skb list lock.
41 */
42static inline void cvm_oct_free_tx_skbs(struct octeon_ethernet *priv,
43 int skb_to_free,
44 int qos, int take_lock)
45{
46 /* Free skbuffs not in use by the hardware. */
47 if (skb_to_free > 0) {
48 if (take_lock)
49 spin_lock(&priv->tx_free_list[qos].lock);
50 while (skb_to_free > 0) {
51 dev_kfree_skb(__skb_dequeue(&priv->tx_free_list[qos]));
52 skb_to_free--;
53 }
54 if (take_lock)
55 spin_unlock(&priv->tx_free_list[qos].lock);
56 }
57}
diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
index 4e054262a005..973178a80c93 100644
--- a/drivers/staging/octeon/ethernet.c
+++ b/drivers/staging/octeon/ethernet.c
@@ -131,50 +131,29 @@ struct net_device *cvm_oct_device[TOTAL_NUMBER_OF_PORTS];
131 */ 131 */
132static void cvm_do_timer(unsigned long arg) 132static void cvm_do_timer(unsigned long arg)
133{ 133{
134 int32_t skb_to_free, undo;
135 int queues_per_port;
136 int qos;
137 struct octeon_ethernet *priv;
138 static int port; 134 static int port;
139 135 if (port < CVMX_PIP_NUM_INPUT_PORTS) {
140 if (port >= CVMX_PIP_NUM_INPUT_PORTS) { 136 if (cvm_oct_device[port]) {
137 struct octeon_ethernet *priv = netdev_priv(cvm_oct_device[port]);
138 if (priv->poll)
139 priv->poll(cvm_oct_device[port]);
140 cvm_oct_free_tx_skbs(priv);
141 cvm_oct_device[port]->netdev_ops->ndo_get_stats(cvm_oct_device[port]);
142 }
143 port++;
141 /* 144 /*
142 * All ports have been polled. Start the next 145 * Poll the next port in a 50th of a second. This
143 * iteration through the ports in one second. 146 * spreads the polling of ports out a little bit.
144 */ 147 */
148 mod_timer(&cvm_oct_poll_timer, jiffies + HZ/50);
149 } else {
145 port = 0; 150 port = 0;
151 /*
152 * All ports have been polled. Start the next iteration through
153 * the ports in one second.
154 */
146 mod_timer(&cvm_oct_poll_timer, jiffies + HZ); 155 mod_timer(&cvm_oct_poll_timer, jiffies + HZ);
147 return;
148 } 156 }
149 if (!cvm_oct_device[port])
150 goto out;
151
152 priv = netdev_priv(cvm_oct_device[port]);
153 if (priv->poll)
154 priv->poll(cvm_oct_device[port]);
155
156 queues_per_port = cvmx_pko_get_num_queues(port);
157 /* Drain any pending packets in the free list */
158 for (qos = 0; qos < queues_per_port; qos++) {
159 if (skb_queue_len(&priv->tx_free_list[qos]) == 0)
160 continue;
161 skb_to_free = cvmx_fau_fetch_and_add32(priv->fau + qos * 4,
162 MAX_SKB_TO_FREE);
163 undo = skb_to_free > 0 ?
164 MAX_SKB_TO_FREE : skb_to_free + MAX_SKB_TO_FREE;
165 if (undo > 0)
166 cvmx_fau_atomic_add32(priv->fau+qos*4, -undo);
167 skb_to_free = -skb_to_free > MAX_SKB_TO_FREE ?
168 MAX_SKB_TO_FREE : -skb_to_free;
169 cvm_oct_free_tx_skbs(priv, skb_to_free, qos, 1);
170 }
171 cvm_oct_device[port]->netdev_ops->ndo_get_stats(cvm_oct_device[port]);
172
173out:
174 port++;
175 /* Poll the next port in a 50th of a second.
176 This spreads the polling of ports out a little bit */
177 mod_timer(&cvm_oct_poll_timer, jiffies + HZ / 50);
178} 157}
179 158
180/** 159/**
@@ -678,6 +657,18 @@ static int __init cvm_oct_init_module(void)
678 /* Initialize the device private structure. */ 657 /* Initialize the device private structure. */
679 struct octeon_ethernet *priv = netdev_priv(dev); 658 struct octeon_ethernet *priv = netdev_priv(dev);
680 659
660 hrtimer_init(&priv->tx_restart_timer,
661 CLOCK_MONOTONIC,
662 HRTIMER_MODE_REL);
663 priv->tx_restart_timer.function = cvm_oct_restart_tx;
664
665 /*
666 * Default for 10GE 5000nS enough time to
667 * transmit about 100 64byte packtes. 1GE
668 * interfaces will get 50000nS below.
669 */
670 priv->tx_restart_interval = ktime_set(0, 5000);
671
681 dev->netdev_ops = &cvm_oct_pow_netdev_ops; 672 dev->netdev_ops = &cvm_oct_pow_netdev_ops;
682 priv->imode = CVMX_HELPER_INTERFACE_MODE_DISABLED; 673 priv->imode = CVMX_HELPER_INTERFACE_MODE_DISABLED;
683 priv->port = CVMX_PIP_NUM_INPUT_PORTS; 674 priv->port = CVMX_PIP_NUM_INPUT_PORTS;
@@ -757,6 +748,7 @@ static int __init cvm_oct_init_module(void)
757 748
758 case CVMX_HELPER_INTERFACE_MODE_SGMII: 749 case CVMX_HELPER_INTERFACE_MODE_SGMII:
759 dev->netdev_ops = &cvm_oct_sgmii_netdev_ops; 750 dev->netdev_ops = &cvm_oct_sgmii_netdev_ops;
751 priv->tx_restart_interval = ktime_set(0, 50000);
760 strcpy(dev->name, "eth%d"); 752 strcpy(dev->name, "eth%d");
761 break; 753 break;
762 754
@@ -768,6 +760,7 @@ static int __init cvm_oct_init_module(void)
768 case CVMX_HELPER_INTERFACE_MODE_RGMII: 760 case CVMX_HELPER_INTERFACE_MODE_RGMII:
769 case CVMX_HELPER_INTERFACE_MODE_GMII: 761 case CVMX_HELPER_INTERFACE_MODE_GMII:
770 dev->netdev_ops = &cvm_oct_rgmii_netdev_ops; 762 dev->netdev_ops = &cvm_oct_rgmii_netdev_ops;
763 priv->tx_restart_interval = ktime_set(0, 50000);
771 strcpy(dev->name, "eth%d"); 764 strcpy(dev->name, "eth%d");
772 break; 765 break;
773 } 766 }
diff --git a/drivers/staging/octeon/octeon-ethernet.h b/drivers/staging/octeon/octeon-ethernet.h
index 208da27bc02d..203c6a920af5 100644
--- a/drivers/staging/octeon/octeon-ethernet.h
+++ b/drivers/staging/octeon/octeon-ethernet.h
@@ -31,6 +31,8 @@
31#ifndef OCTEON_ETHERNET_H 31#ifndef OCTEON_ETHERNET_H
32#define OCTEON_ETHERNET_H 32#define OCTEON_ETHERNET_H
33 33
34#include <linux/hrtimer.h>
35
34/** 36/**
35 * This is the definition of the Ethernet driver's private 37 * This is the definition of the Ethernet driver's private
36 * driver state stored in netdev_priv(dev). 38 * driver state stored in netdev_priv(dev).
@@ -57,6 +59,8 @@ struct octeon_ethernet {
57 uint64_t link_info; 59 uint64_t link_info;
58 /* Called periodically to check link status */ 60 /* Called periodically to check link status */
59 void (*poll) (struct net_device *dev); 61 void (*poll) (struct net_device *dev);
62 struct hrtimer tx_restart_timer;
63 ktime_t tx_restart_interval;
60}; 64};
61 65
62/** 66/**