aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched/sch_generic.c
diff options
context:
space:
mode:
authorEric Dumazet <dada1@cosmosbay.com>2009-05-17 23:55:16 -0400
committerDavid S. Miller <davem@davemloft.net>2009-05-17 23:55:16 -0400
commit9d21493b4beb8f918ba248032fefa393074a5e2b (patch)
tree653590f3e325da5c4c1fc7d2c00bc196a3167f9d /net/sched/sch_generic.c
parent0a305720ee597aad41af61e6b6844321d3e24251 (diff)
net: tx scalability works : trans_start
struct net_device trans_start field is a hot spot on SMP and high performance devices, particularly multi queues ones, because every transmitter dirties it. Is main use is tx watchdog and bonding alive checks. But as most devices dont use NETIF_F_LLTX, we have to lock a netdev_queue before calling their ndo_start_xmit(). So it makes sense to move trans_start from net_device to netdev_queue. Its update will occur on a already present (and in exclusive state) cache line, for free. We can do this transition smoothly. An old driver continue to update dev->trans_start, while an updated one updates txq->trans_start. Further patches could also put tx_bytes/tx_packets counters in netdev_queue to avoid dirtying dev->stats (vlan device comes to mind) Signed-off-by: Eric Dumazet <dada1@cosmosbay.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched/sch_generic.c')
-rw-r--r--net/sched/sch_generic.c40
1 files changed, 31 insertions, 9 deletions
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 5f5efe4e6072..27d03816ec3e 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -196,6 +196,21 @@ void __qdisc_run(struct Qdisc *q)
196 clear_bit(__QDISC_STATE_RUNNING, &q->state); 196 clear_bit(__QDISC_STATE_RUNNING, &q->state);
197} 197}
198 198
199unsigned long dev_trans_start(struct net_device *dev)
200{
201 unsigned long val, res = dev->trans_start;
202 unsigned int i;
203
204 for (i = 0; i < dev->num_tx_queues; i++) {
205 val = netdev_get_tx_queue(dev, i)->trans_start;
206 if (val && time_after(val, res))
207 res = val;
208 }
209 dev->trans_start = res;
210 return res;
211}
212EXPORT_SYMBOL(dev_trans_start);
213
199static void dev_watchdog(unsigned long arg) 214static void dev_watchdog(unsigned long arg)
200{ 215{
201 struct net_device *dev = (struct net_device *)arg; 216 struct net_device *dev = (struct net_device *)arg;
@@ -205,25 +220,30 @@ static void dev_watchdog(unsigned long arg)
205 if (netif_device_present(dev) && 220 if (netif_device_present(dev) &&
206 netif_running(dev) && 221 netif_running(dev) &&
207 netif_carrier_ok(dev)) { 222 netif_carrier_ok(dev)) {
208 int some_queue_stopped = 0; 223 int some_queue_timedout = 0;
209 unsigned int i; 224 unsigned int i;
225 unsigned long trans_start;
210 226
211 for (i = 0; i < dev->num_tx_queues; i++) { 227 for (i = 0; i < dev->num_tx_queues; i++) {
212 struct netdev_queue *txq; 228 struct netdev_queue *txq;
213 229
214 txq = netdev_get_tx_queue(dev, i); 230 txq = netdev_get_tx_queue(dev, i);
215 if (netif_tx_queue_stopped(txq)) { 231 /*
216 some_queue_stopped = 1; 232 * old device drivers set dev->trans_start
233 */
234 trans_start = txq->trans_start ? : dev->trans_start;
235 if (netif_tx_queue_stopped(txq) &&
236 time_after(jiffies, (trans_start +
237 dev->watchdog_timeo))) {
238 some_queue_timedout = 1;
217 break; 239 break;
218 } 240 }
219 } 241 }
220 242
221 if (some_queue_stopped && 243 if (some_queue_timedout) {
222 time_after(jiffies, (dev->trans_start +
223 dev->watchdog_timeo))) {
224 char drivername[64]; 244 char drivername[64];
225 WARN_ONCE(1, KERN_INFO "NETDEV WATCHDOG: %s (%s): transmit timed out\n", 245 WARN_ONCE(1, KERN_INFO "NETDEV WATCHDOG: %s (%s): transmit queue %u timed out\n",
226 dev->name, netdev_drivername(dev, drivername, 64)); 246 dev->name, netdev_drivername(dev, drivername, 64), i);
227 dev->netdev_ops->ndo_tx_timeout(dev); 247 dev->netdev_ops->ndo_tx_timeout(dev);
228 } 248 }
229 if (!mod_timer(&dev->watchdog_timer, 249 if (!mod_timer(&dev->watchdog_timer,
@@ -602,8 +622,10 @@ static void transition_one_qdisc(struct net_device *dev,
602 clear_bit(__QDISC_STATE_DEACTIVATED, &new_qdisc->state); 622 clear_bit(__QDISC_STATE_DEACTIVATED, &new_qdisc->state);
603 623
604 rcu_assign_pointer(dev_queue->qdisc, new_qdisc); 624 rcu_assign_pointer(dev_queue->qdisc, new_qdisc);
605 if (need_watchdog_p && new_qdisc != &noqueue_qdisc) 625 if (need_watchdog_p && new_qdisc != &noqueue_qdisc) {
626 dev_queue->trans_start = 0;
606 *need_watchdog_p = 1; 627 *need_watchdog_p = 1;
628 }
607} 629}
608 630
609void dev_activate(struct net_device *dev) 631void dev_activate(struct net_device *dev)