aboutsummaryrefslogtreecommitdiffstats
path: root/net/core/dev.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/core/dev.c')
-rw-r--r--net/core/dev.c88
1 files changed, 1 insertions, 87 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 4f1ae2efe87..3156df699f0 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -115,18 +115,6 @@
115#endif /* CONFIG_NET_RADIO */ 115#endif /* CONFIG_NET_RADIO */
116#include <asm/current.h> 116#include <asm/current.h>
117 117
118/* This define, if set, will randomly drop a packet when congestion
119 * is more than moderate. It helps fairness in the multi-interface
120 * case when one of them is a hog, but it kills performance for the
121 * single interface case so it is off now by default.
122 */
123#undef RAND_LIE
124
125/* Setting this will sample the queue lengths and thus congestion
126 * via a timer instead of as each packet is received.
127 */
128#undef OFFLINE_SAMPLE
129
130/* 118/*
131 * The list of packet types we will receive (as opposed to discard) 119 * The list of packet types we will receive (as opposed to discard)
132 * and the routines to invoke. 120 * and the routines to invoke.
@@ -159,11 +147,6 @@ static DEFINE_SPINLOCK(ptype_lock);
159static struct list_head ptype_base[16]; /* 16 way hashed list */ 147static struct list_head ptype_base[16]; /* 16 way hashed list */
160static struct list_head ptype_all; /* Taps */ 148static struct list_head ptype_all; /* Taps */
161 149
162#ifdef OFFLINE_SAMPLE
163static void sample_queue(unsigned long dummy);
164static struct timer_list samp_timer = TIMER_INITIALIZER(sample_queue, 0, 0);
165#endif
166
167/* 150/*
168 * The @dev_base list is protected by @dev_base_lock and the rtln 151 * The @dev_base list is protected by @dev_base_lock and the rtln
169 * semaphore. 152 * semaphore.
@@ -1365,69 +1348,10 @@ out:
1365 1348
1366int netdev_max_backlog = 300; 1349int netdev_max_backlog = 300;
1367int weight_p = 64; /* old backlog weight */ 1350int weight_p = 64; /* old backlog weight */
1368/* These numbers are selected based on intuition and some
1369 * experimentatiom, if you have more scientific way of doing this
1370 * please go ahead and fix things.
1371 */
1372int no_cong_thresh = 10;
1373int no_cong = 20;
1374int lo_cong = 100;
1375int mod_cong = 290;
1376 1351
1377DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, }; 1352DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };
1378 1353
1379 1354
1380static void get_sample_stats(int cpu)
1381{
1382#ifdef RAND_LIE
1383 unsigned long rd;
1384 int rq;
1385#endif
1386 struct softnet_data *sd = &per_cpu(softnet_data, cpu);
1387 int blog = sd->input_pkt_queue.qlen;
1388 int avg_blog = sd->avg_blog;
1389
1390 avg_blog = (avg_blog >> 1) + (blog >> 1);
1391
1392 if (avg_blog > mod_cong) {
1393 /* Above moderate congestion levels. */
1394 sd->cng_level = NET_RX_CN_HIGH;
1395#ifdef RAND_LIE
1396 rd = net_random();
1397 rq = rd % netdev_max_backlog;
1398 if (rq < avg_blog) /* unlucky bastard */
1399 sd->cng_level = NET_RX_DROP;
1400#endif
1401 } else if (avg_blog > lo_cong) {
1402 sd->cng_level = NET_RX_CN_MOD;
1403#ifdef RAND_LIE
1404 rd = net_random();
1405 rq = rd % netdev_max_backlog;
1406 if (rq < avg_blog) /* unlucky bastard */
1407 sd->cng_level = NET_RX_CN_HIGH;
1408#endif
1409 } else if (avg_blog > no_cong)
1410 sd->cng_level = NET_RX_CN_LOW;
1411 else /* no congestion */
1412 sd->cng_level = NET_RX_SUCCESS;
1413
1414 sd->avg_blog = avg_blog;
1415}
1416
1417#ifdef OFFLINE_SAMPLE
1418static void sample_queue(unsigned long dummy)
1419{
1420/* 10 ms 0r 1ms -- i don't care -- JHS */
1421 int next_tick = 1;
1422 int cpu = smp_processor_id();
1423
1424 get_sample_stats(cpu);
1425 next_tick += jiffies;
1426 mod_timer(&samp_timer, next_tick);
1427}
1428#endif
1429
1430
1431/** 1355/**
1432 * netif_rx - post buffer to the network code 1356 * netif_rx - post buffer to the network code
1433 * @skb: buffer to post 1357 * @skb: buffer to post
@@ -1476,11 +1400,8 @@ int netif_rx(struct sk_buff *skb)
1476enqueue: 1400enqueue:
1477 dev_hold(skb->dev); 1401 dev_hold(skb->dev);
1478 __skb_queue_tail(&queue->input_pkt_queue, skb); 1402 __skb_queue_tail(&queue->input_pkt_queue, skb);
1479#ifndef OFFLINE_SAMPLE
1480 get_sample_stats(this_cpu);
1481#endif
1482 local_irq_restore(flags); 1403 local_irq_restore(flags);
1483 return queue->cng_level; 1404 return NET_RX_SUCCESS;
1484 } 1405 }
1485 1406
1486 if (queue->throttle) 1407 if (queue->throttle)
@@ -3300,8 +3221,6 @@ static int __init net_dev_init(void)
3300 queue = &per_cpu(softnet_data, i); 3221 queue = &per_cpu(softnet_data, i);
3301 skb_queue_head_init(&queue->input_pkt_queue); 3222 skb_queue_head_init(&queue->input_pkt_queue);
3302 queue->throttle = 0; 3223 queue->throttle = 0;
3303 queue->cng_level = 0;
3304 queue->avg_blog = 10; /* arbitrary non-zero */
3305 queue->completion_queue = NULL; 3224 queue->completion_queue = NULL;
3306 INIT_LIST_HEAD(&queue->poll_list); 3225 INIT_LIST_HEAD(&queue->poll_list);
3307 set_bit(__LINK_STATE_START, &queue->backlog_dev.state); 3226 set_bit(__LINK_STATE_START, &queue->backlog_dev.state);
@@ -3310,11 +3229,6 @@ static int __init net_dev_init(void)
3310 atomic_set(&queue->backlog_dev.refcnt, 1); 3229 atomic_set(&queue->backlog_dev.refcnt, 1);
3311 } 3230 }
3312 3231
3313#ifdef OFFLINE_SAMPLE
3314 samp_timer.expires = jiffies + (10 * HZ);
3315 add_timer(&samp_timer);
3316#endif
3317
3318 dev_boot_phase = 0; 3232 dev_boot_phase = 0;
3319 3233
3320 open_softirq(NET_TX_SOFTIRQ, net_tx_action, NULL); 3234 open_softirq(NET_TX_SOFTIRQ, net_tx_action, NULL);