aboutsummaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-08-15 12:15:17 -0400
committerIngo Molnar <mingo@elte.hu>2008-08-15 12:15:17 -0400
commitf3efbe582b5396d134024c03a5fa253f2a85d9a6 (patch)
treee4e15b7567b82d24cb1e7327398286a2b88df04c /net/core
parent05d3ed0a1fe3ea05ab9f3b8d32576a0bc2e19660 (diff)
parentb635acec48bcaa9183fcbf4e3955616b0d4119b5 (diff)
Merge branch 'linus' into x86/gart
Diffstat (limited to 'net/core')
-rw-r--r--net/core/datagram.c8
-rw-r--r--net/core/dev.c89
-rw-r--r--net/core/gen_estimator.c9
-rw-r--r--net/core/neighbour.c13
-rw-r--r--net/core/netpoll.c1
-rw-r--r--net/core/pktgen.c83
-rw-r--r--net/core/request_sock.c2
-rw-r--r--net/core/skbuff.c23
-rw-r--r--net/core/sock.c2
-rw-r--r--net/core/stream.c6
-rw-r--r--net/core/sysctl_net_core.c4
-rw-r--r--net/core/user_dma.c6
-rw-r--r--net/core/utils.c5
13 files changed, 162 insertions, 89 deletions
diff --git a/net/core/datagram.c b/net/core/datagram.c
index 8a28fc93b724..dd61dcad6019 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -285,7 +285,7 @@ int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset,
285 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 285 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
286 int end; 286 int end;
287 287
288 BUG_TRAP(start <= offset + len); 288 WARN_ON(start > offset + len);
289 289
290 end = start + skb_shinfo(skb)->frags[i].size; 290 end = start + skb_shinfo(skb)->frags[i].size;
291 if ((copy = end - offset) > 0) { 291 if ((copy = end - offset) > 0) {
@@ -315,7 +315,7 @@ int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset,
315 for (; list; list = list->next) { 315 for (; list; list = list->next) {
316 int end; 316 int end;
317 317
318 BUG_TRAP(start <= offset + len); 318 WARN_ON(start > offset + len);
319 319
320 end = start + list->len; 320 end = start + list->len;
321 if ((copy = end - offset) > 0) { 321 if ((copy = end - offset) > 0) {
@@ -366,7 +366,7 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
366 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 366 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
367 int end; 367 int end;
368 368
369 BUG_TRAP(start <= offset + len); 369 WARN_ON(start > offset + len);
370 370
371 end = start + skb_shinfo(skb)->frags[i].size; 371 end = start + skb_shinfo(skb)->frags[i].size;
372 if ((copy = end - offset) > 0) { 372 if ((copy = end - offset) > 0) {
@@ -402,7 +402,7 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
402 for (; list; list=list->next) { 402 for (; list; list=list->next) {
403 int end; 403 int end;
404 404
405 BUG_TRAP(start <= offset + len); 405 WARN_ON(start > offset + len);
406 406
407 end = start + list->len; 407 end = start + list->len;
408 if ((copy = end - offset) > 0) { 408 if ((copy = end - offset) > 0) {
diff --git a/net/core/dev.c b/net/core/dev.c
index 6bf217da9d8f..600bb23c4c2e 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1341,9 +1341,6 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1341 1341
1342void __netif_schedule(struct Qdisc *q) 1342void __netif_schedule(struct Qdisc *q)
1343{ 1343{
1344 if (WARN_ON_ONCE(q == &noop_qdisc))
1345 return;
1346
1347 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state)) { 1344 if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state)) {
1348 struct softnet_data *sd; 1345 struct softnet_data *sd;
1349 unsigned long flags; 1346 unsigned long flags;
@@ -1799,7 +1796,7 @@ gso:
1799 skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_EGRESS); 1796 skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_EGRESS);
1800#endif 1797#endif
1801 if (q->enqueue) { 1798 if (q->enqueue) {
1802 spinlock_t *root_lock = qdisc_root_lock(q); 1799 spinlock_t *root_lock = qdisc_lock(q);
1803 1800
1804 spin_lock(root_lock); 1801 spin_lock(root_lock);
1805 1802
@@ -1808,7 +1805,6 @@ gso:
1808 1805
1809 spin_unlock(root_lock); 1806 spin_unlock(root_lock);
1810 1807
1811 rc = rc == NET_XMIT_BYPASS ? NET_XMIT_SUCCESS : rc;
1812 goto out; 1808 goto out;
1813 } 1809 }
1814 1810
@@ -1912,7 +1908,6 @@ int netif_rx(struct sk_buff *skb)
1912 if (queue->input_pkt_queue.qlen <= netdev_max_backlog) { 1908 if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {
1913 if (queue->input_pkt_queue.qlen) { 1909 if (queue->input_pkt_queue.qlen) {
1914enqueue: 1910enqueue:
1915 dev_hold(skb->dev);
1916 __skb_queue_tail(&queue->input_pkt_queue, skb); 1911 __skb_queue_tail(&queue->input_pkt_queue, skb);
1917 local_irq_restore(flags); 1912 local_irq_restore(flags);
1918 return NET_RX_SUCCESS; 1913 return NET_RX_SUCCESS;
@@ -1944,22 +1939,6 @@ int netif_rx_ni(struct sk_buff *skb)
1944 1939
1945EXPORT_SYMBOL(netif_rx_ni); 1940EXPORT_SYMBOL(netif_rx_ni);
1946 1941
1947static inline struct net_device *skb_bond(struct sk_buff *skb)
1948{
1949 struct net_device *dev = skb->dev;
1950
1951 if (dev->master) {
1952 if (skb_bond_should_drop(skb)) {
1953 kfree_skb(skb);
1954 return NULL;
1955 }
1956 skb->dev = dev->master;
1957 }
1958
1959 return dev;
1960}
1961
1962
1963static void net_tx_action(struct softirq_action *h) 1942static void net_tx_action(struct softirq_action *h)
1964{ 1943{
1965 struct softnet_data *sd = &__get_cpu_var(softnet_data); 1944 struct softnet_data *sd = &__get_cpu_var(softnet_data);
@@ -1976,7 +1955,7 @@ static void net_tx_action(struct softirq_action *h)
1976 struct sk_buff *skb = clist; 1955 struct sk_buff *skb = clist;
1977 clist = clist->next; 1956 clist = clist->next;
1978 1957
1979 BUG_TRAP(!atomic_read(&skb->users)); 1958 WARN_ON(atomic_read(&skb->users));
1980 __kfree_skb(skb); 1959 __kfree_skb(skb);
1981 } 1960 }
1982 } 1961 }
@@ -1998,7 +1977,7 @@ static void net_tx_action(struct softirq_action *h)
1998 smp_mb__before_clear_bit(); 1977 smp_mb__before_clear_bit();
1999 clear_bit(__QDISC_STATE_SCHED, &q->state); 1978 clear_bit(__QDISC_STATE_SCHED, &q->state);
2000 1979
2001 root_lock = qdisc_root_lock(q); 1980 root_lock = qdisc_lock(q);
2002 if (spin_trylock(root_lock)) { 1981 if (spin_trylock(root_lock)) {
2003 qdisc_run(q); 1982 qdisc_run(q);
2004 spin_unlock(root_lock); 1983 spin_unlock(root_lock);
@@ -2103,7 +2082,7 @@ static int ing_filter(struct sk_buff *skb)
2103 rxq = &dev->rx_queue; 2082 rxq = &dev->rx_queue;
2104 2083
2105 q = rxq->qdisc; 2084 q = rxq->qdisc;
2106 if (q) { 2085 if (q != &noop_qdisc) {
2107 spin_lock(qdisc_lock(q)); 2086 spin_lock(qdisc_lock(q));
2108 result = qdisc_enqueue_root(skb, q); 2087 result = qdisc_enqueue_root(skb, q);
2109 spin_unlock(qdisc_lock(q)); 2088 spin_unlock(qdisc_lock(q));
@@ -2116,7 +2095,7 @@ static inline struct sk_buff *handle_ing(struct sk_buff *skb,
2116 struct packet_type **pt_prev, 2095 struct packet_type **pt_prev,
2117 int *ret, struct net_device *orig_dev) 2096 int *ret, struct net_device *orig_dev)
2118{ 2097{
2119 if (!skb->dev->rx_queue.qdisc) 2098 if (skb->dev->rx_queue.qdisc == &noop_qdisc)
2120 goto out; 2099 goto out;
2121 2100
2122 if (*pt_prev) { 2101 if (*pt_prev) {
@@ -2186,6 +2165,7 @@ int netif_receive_skb(struct sk_buff *skb)
2186{ 2165{
2187 struct packet_type *ptype, *pt_prev; 2166 struct packet_type *ptype, *pt_prev;
2188 struct net_device *orig_dev; 2167 struct net_device *orig_dev;
2168 struct net_device *null_or_orig;
2189 int ret = NET_RX_DROP; 2169 int ret = NET_RX_DROP;
2190 __be16 type; 2170 __be16 type;
2191 2171
@@ -2199,10 +2179,14 @@ int netif_receive_skb(struct sk_buff *skb)
2199 if (!skb->iif) 2179 if (!skb->iif)
2200 skb->iif = skb->dev->ifindex; 2180 skb->iif = skb->dev->ifindex;
2201 2181
2202 orig_dev = skb_bond(skb); 2182 null_or_orig = NULL;
2203 2183 orig_dev = skb->dev;
2204 if (!orig_dev) 2184 if (orig_dev->master) {
2205 return NET_RX_DROP; 2185 if (skb_bond_should_drop(skb))
2186 null_or_orig = orig_dev; /* deliver only exact match */
2187 else
2188 skb->dev = orig_dev->master;
2189 }
2206 2190
2207 __get_cpu_var(netdev_rx_stat).total++; 2191 __get_cpu_var(netdev_rx_stat).total++;
2208 2192
@@ -2226,7 +2210,8 @@ int netif_receive_skb(struct sk_buff *skb)
2226#endif 2210#endif
2227 2211
2228 list_for_each_entry_rcu(ptype, &ptype_all, list) { 2212 list_for_each_entry_rcu(ptype, &ptype_all, list) {
2229 if (!ptype->dev || ptype->dev == skb->dev) { 2213 if (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
2214 ptype->dev == orig_dev) {
2230 if (pt_prev) 2215 if (pt_prev)
2231 ret = deliver_skb(skb, pt_prev, orig_dev); 2216 ret = deliver_skb(skb, pt_prev, orig_dev);
2232 pt_prev = ptype; 2217 pt_prev = ptype;
@@ -2251,7 +2236,8 @@ ncls:
2251 list_for_each_entry_rcu(ptype, 2236 list_for_each_entry_rcu(ptype,
2252 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) { 2237 &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
2253 if (ptype->type == type && 2238 if (ptype->type == type &&
2254 (!ptype->dev || ptype->dev == skb->dev)) { 2239 (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
2240 ptype->dev == orig_dev)) {
2255 if (pt_prev) 2241 if (pt_prev)
2256 ret = deliver_skb(skb, pt_prev, orig_dev); 2242 ret = deliver_skb(skb, pt_prev, orig_dev);
2257 pt_prev = ptype; 2243 pt_prev = ptype;
@@ -2273,6 +2259,20 @@ out:
2273 return ret; 2259 return ret;
2274} 2260}
2275 2261
2262/* Network device is going away, flush any packets still pending */
2263static void flush_backlog(void *arg)
2264{
2265 struct net_device *dev = arg;
2266 struct softnet_data *queue = &__get_cpu_var(softnet_data);
2267 struct sk_buff *skb, *tmp;
2268
2269 skb_queue_walk_safe(&queue->input_pkt_queue, skb, tmp)
2270 if (skb->dev == dev) {
2271 __skb_unlink(skb, &queue->input_pkt_queue);
2272 kfree_skb(skb);
2273 }
2274}
2275
2276static int process_backlog(struct napi_struct *napi, int quota) 2276static int process_backlog(struct napi_struct *napi, int quota)
2277{ 2277{
2278 int work = 0; 2278 int work = 0;
@@ -2282,7 +2282,6 @@ static int process_backlog(struct napi_struct *napi, int quota)
2282 napi->weight = weight_p; 2282 napi->weight = weight_p;
2283 do { 2283 do {
2284 struct sk_buff *skb; 2284 struct sk_buff *skb;
2285 struct net_device *dev;
2286 2285
2287 local_irq_disable(); 2286 local_irq_disable();
2288 skb = __skb_dequeue(&queue->input_pkt_queue); 2287 skb = __skb_dequeue(&queue->input_pkt_queue);
@@ -2291,14 +2290,9 @@ static int process_backlog(struct napi_struct *napi, int quota)
2291 local_irq_enable(); 2290 local_irq_enable();
2292 break; 2291 break;
2293 } 2292 }
2294
2295 local_irq_enable(); 2293 local_irq_enable();
2296 2294
2297 dev = skb->dev;
2298
2299 netif_receive_skb(skb); 2295 netif_receive_skb(skb);
2300
2301 dev_put(dev);
2302 } while (++work < quota && jiffies == start_time); 2296 } while (++work < quota && jiffies == start_time);
2303 2297
2304 return work; 2298 return work;
@@ -2398,7 +2392,7 @@ out:
2398 */ 2392 */
2399 if (!cpus_empty(net_dma.channel_mask)) { 2393 if (!cpus_empty(net_dma.channel_mask)) {
2400 int chan_idx; 2394 int chan_idx;
2401 for_each_cpu_mask(chan_idx, net_dma.channel_mask) { 2395 for_each_cpu_mask_nr(chan_idx, net_dma.channel_mask) {
2402 struct dma_chan *chan = net_dma.channels[chan_idx]; 2396 struct dma_chan *chan = net_dma.channels[chan_idx];
2403 if (chan) 2397 if (chan)
2404 dma_async_memcpy_issue_pending(chan); 2398 dma_async_memcpy_issue_pending(chan);
@@ -3850,7 +3844,7 @@ static void rollback_registered(struct net_device *dev)
3850 dev->uninit(dev); 3844 dev->uninit(dev);
3851 3845
3852 /* Notifier chain MUST detach us from master device. */ 3846 /* Notifier chain MUST detach us from master device. */
3853 BUG_TRAP(!dev->master); 3847 WARN_ON(dev->master);
3854 3848
3855 /* Remove entries from kobject tree */ 3849 /* Remove entries from kobject tree */
3856 netdev_unregister_kobject(dev); 3850 netdev_unregister_kobject(dev);
@@ -3991,6 +3985,10 @@ int register_netdevice(struct net_device *dev)
3991 } 3985 }
3992 } 3986 }
3993 3987
3988 /* Enable software GSO if SG is supported. */
3989 if (dev->features & NETIF_F_SG)
3990 dev->features |= NETIF_F_GSO;
3991
3994 netdev_initialize_kobject(dev); 3992 netdev_initialize_kobject(dev);
3995 ret = netdev_register_kobject(dev); 3993 ret = netdev_register_kobject(dev);
3996 if (ret) 3994 if (ret)
@@ -4168,13 +4166,15 @@ void netdev_run_todo(void)
4168 4166
4169 dev->reg_state = NETREG_UNREGISTERED; 4167 dev->reg_state = NETREG_UNREGISTERED;
4170 4168
4169 on_each_cpu(flush_backlog, dev, 1);
4170
4171 netdev_wait_allrefs(dev); 4171 netdev_wait_allrefs(dev);
4172 4172
4173 /* paranoia */ 4173 /* paranoia */
4174 BUG_ON(atomic_read(&dev->refcnt)); 4174 BUG_ON(atomic_read(&dev->refcnt));
4175 BUG_TRAP(!dev->ip_ptr); 4175 WARN_ON(dev->ip_ptr);
4176 BUG_TRAP(!dev->ip6_ptr); 4176 WARN_ON(dev->ip6_ptr);
4177 BUG_TRAP(!dev->dn_ptr); 4177 WARN_ON(dev->dn_ptr);
4178 4178
4179 if (dev->destructor) 4179 if (dev->destructor)
4180 dev->destructor(dev); 4180 dev->destructor(dev);
@@ -4203,6 +4203,7 @@ static void netdev_init_queues(struct net_device *dev)
4203{ 4203{
4204 netdev_init_one_queue(dev, &dev->rx_queue, NULL); 4204 netdev_init_one_queue(dev, &dev->rx_queue, NULL);
4205 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL); 4205 netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
4206 spin_lock_init(&dev->tx_global_lock);
4206} 4207}
4207 4208
4208/** 4209/**
@@ -4533,7 +4534,7 @@ static void net_dma_rebalance(struct net_dma *net_dma)
4533 i = 0; 4534 i = 0;
4534 cpu = first_cpu(cpu_online_map); 4535 cpu = first_cpu(cpu_online_map);
4535 4536
4536 for_each_cpu_mask(chan_idx, net_dma->channel_mask) { 4537 for_each_cpu_mask_nr(chan_idx, net_dma->channel_mask) {
4537 chan = net_dma->channels[chan_idx]; 4538 chan = net_dma->channels[chan_idx];
4538 4539
4539 n = ((num_online_cpus() / cpus_weight(net_dma->channel_mask)) 4540 n = ((num_online_cpus() / cpus_weight(net_dma->channel_mask))
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
index 57abe8266be1..a89f32fa94f6 100644
--- a/net/core/gen_estimator.c
+++ b/net/core/gen_estimator.c
@@ -99,7 +99,7 @@ struct gen_estimator_head
99 99
100static struct gen_estimator_head elist[EST_MAX_INTERVAL+1]; 100static struct gen_estimator_head elist[EST_MAX_INTERVAL+1];
101 101
102/* Protects against NULL dereference */ 102/* Protects against NULL dereference and RCU write-side */
103static DEFINE_RWLOCK(est_lock); 103static DEFINE_RWLOCK(est_lock);
104 104
105static void est_timer(unsigned long arg) 105static void est_timer(unsigned long arg)
@@ -185,6 +185,7 @@ int gen_new_estimator(struct gnet_stats_basic *bstats,
185 est->last_packets = bstats->packets; 185 est->last_packets = bstats->packets;
186 est->avpps = rate_est->pps<<10; 186 est->avpps = rate_est->pps<<10;
187 187
188 write_lock_bh(&est_lock);
188 if (!elist[idx].timer.function) { 189 if (!elist[idx].timer.function) {
189 INIT_LIST_HEAD(&elist[idx].list); 190 INIT_LIST_HEAD(&elist[idx].list);
190 setup_timer(&elist[idx].timer, est_timer, idx); 191 setup_timer(&elist[idx].timer, est_timer, idx);
@@ -194,6 +195,7 @@ int gen_new_estimator(struct gnet_stats_basic *bstats,
194 mod_timer(&elist[idx].timer, jiffies + ((HZ/4) << idx)); 195 mod_timer(&elist[idx].timer, jiffies + ((HZ/4) << idx));
195 196
196 list_add_rcu(&est->list, &elist[idx].list); 197 list_add_rcu(&est->list, &elist[idx].list);
198 write_unlock_bh(&est_lock);
197 return 0; 199 return 0;
198} 200}
199 201
@@ -212,7 +214,6 @@ static void __gen_kill_estimator(struct rcu_head *head)
212 * Removes the rate estimator specified by &bstats and &rate_est 214 * Removes the rate estimator specified by &bstats and &rate_est
213 * and deletes the timer. 215 * and deletes the timer.
214 * 216 *
215 * NOTE: Called under rtnl_mutex
216 */ 217 */
217void gen_kill_estimator(struct gnet_stats_basic *bstats, 218void gen_kill_estimator(struct gnet_stats_basic *bstats,
218 struct gnet_stats_rate_est *rate_est) 219 struct gnet_stats_rate_est *rate_est)
@@ -226,17 +227,17 @@ void gen_kill_estimator(struct gnet_stats_basic *bstats,
226 if (!elist[idx].timer.function) 227 if (!elist[idx].timer.function)
227 continue; 228 continue;
228 229
230 write_lock_bh(&est_lock);
229 list_for_each_entry_safe(e, n, &elist[idx].list, list) { 231 list_for_each_entry_safe(e, n, &elist[idx].list, list) {
230 if (e->rate_est != rate_est || e->bstats != bstats) 232 if (e->rate_est != rate_est || e->bstats != bstats)
231 continue; 233 continue;
232 234
233 write_lock_bh(&est_lock);
234 e->bstats = NULL; 235 e->bstats = NULL;
235 write_unlock_bh(&est_lock);
236 236
237 list_del_rcu(&e->list); 237 list_del_rcu(&e->list);
238 call_rcu(&e->e_rcu, __gen_kill_estimator); 238 call_rcu(&e->e_rcu, __gen_kill_estimator);
239 } 239 }
240 write_unlock_bh(&est_lock);
240 } 241 }
241} 242}
242 243
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index f62c8af85d38..9d92e41826e7 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -2281,6 +2281,7 @@ static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
2281 struct neighbour *n = neigh_get_first(seq); 2281 struct neighbour *n = neigh_get_first(seq);
2282 2282
2283 if (n) { 2283 if (n) {
2284 --(*pos);
2284 while (*pos) { 2285 while (*pos) {
2285 n = neigh_get_next(seq, n, pos); 2286 n = neigh_get_next(seq, n, pos);
2286 if (!n) 2287 if (!n)
@@ -2341,6 +2342,7 @@ static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
2341 struct pneigh_entry *pn = pneigh_get_first(seq); 2342 struct pneigh_entry *pn = pneigh_get_first(seq);
2342 2343
2343 if (pn) { 2344 if (pn) {
2345 --(*pos);
2344 while (*pos) { 2346 while (*pos) {
2345 pn = pneigh_get_next(seq, pn, pos); 2347 pn = pneigh_get_next(seq, pn, pos);
2346 if (!pn) 2348 if (!pn)
@@ -2354,10 +2356,11 @@ static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
2354{ 2356{
2355 struct neigh_seq_state *state = seq->private; 2357 struct neigh_seq_state *state = seq->private;
2356 void *rc; 2358 void *rc;
2359 loff_t idxpos = *pos;
2357 2360
2358 rc = neigh_get_idx(seq, pos); 2361 rc = neigh_get_idx(seq, &idxpos);
2359 if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY)) 2362 if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2360 rc = pneigh_get_idx(seq, pos); 2363 rc = pneigh_get_idx(seq, &idxpos);
2361 2364
2362 return rc; 2365 return rc;
2363} 2366}
@@ -2366,7 +2369,6 @@ void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl
2366 __acquires(tbl->lock) 2369 __acquires(tbl->lock)
2367{ 2370{
2368 struct neigh_seq_state *state = seq->private; 2371 struct neigh_seq_state *state = seq->private;
2369 loff_t pos_minus_one;
2370 2372
2371 state->tbl = tbl; 2373 state->tbl = tbl;
2372 state->bucket = 0; 2374 state->bucket = 0;
@@ -2374,8 +2376,7 @@ void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl
2374 2376
2375 read_lock_bh(&tbl->lock); 2377 read_lock_bh(&tbl->lock);
2376 2378
2377 pos_minus_one = *pos - 1; 2379 return *pos ? neigh_get_idx_any(seq, pos) : SEQ_START_TOKEN;
2378 return *pos ? neigh_get_idx_any(seq, &pos_minus_one) : SEQ_START_TOKEN;
2379} 2380}
2380EXPORT_SYMBOL(neigh_seq_start); 2381EXPORT_SYMBOL(neigh_seq_start);
2381 2382
@@ -2385,7 +2386,7 @@ void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2385 void *rc; 2386 void *rc;
2386 2387
2387 if (v == SEQ_START_TOKEN) { 2388 if (v == SEQ_START_TOKEN) {
2388 rc = neigh_get_idx(seq, pos); 2389 rc = neigh_get_first(seq);
2389 goto out; 2390 goto out;
2390 } 2391 }
2391 2392
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index c12720895ecf..6c7af390be0a 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -70,6 +70,7 @@ static void queue_process(struct work_struct *work)
70 local_irq_save(flags); 70 local_irq_save(flags);
71 __netif_tx_lock(txq, smp_processor_id()); 71 __netif_tx_lock(txq, smp_processor_id());
72 if (netif_tx_queue_stopped(txq) || 72 if (netif_tx_queue_stopped(txq) ||
73 netif_tx_queue_frozen(txq) ||
73 dev->hard_start_xmit(skb, dev) != NETDEV_TX_OK) { 74 dev->hard_start_xmit(skb, dev) != NETDEV_TX_OK) {
74 skb_queue_head(&npinfo->txq, skb); 75 skb_queue_head(&npinfo->txq, skb);
75 __netif_tx_unlock(txq); 76 __netif_tx_unlock(txq);
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index c7d484f7e1c4..a756847e3814 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -168,7 +168,7 @@
168#include <asm/div64.h> /* do_div */ 168#include <asm/div64.h> /* do_div */
169#include <asm/timex.h> 169#include <asm/timex.h>
170 170
171#define VERSION "pktgen v2.69: Packet Generator for packet performance testing.\n" 171#define VERSION "pktgen v2.70: Packet Generator for packet performance testing.\n"
172 172
173#define IP_NAME_SZ 32 173#define IP_NAME_SZ 32
174#define MAX_MPLS_LABELS 16 /* This is the max label stack depth */ 174#define MAX_MPLS_LABELS 16 /* This is the max label stack depth */
@@ -189,6 +189,7 @@
189#define F_FLOW_SEQ (1<<11) /* Sequential flows */ 189#define F_FLOW_SEQ (1<<11) /* Sequential flows */
190#define F_IPSEC_ON (1<<12) /* ipsec on for flows */ 190#define F_IPSEC_ON (1<<12) /* ipsec on for flows */
191#define F_QUEUE_MAP_RND (1<<13) /* queue map Random */ 191#define F_QUEUE_MAP_RND (1<<13) /* queue map Random */
192#define F_QUEUE_MAP_CPU (1<<14) /* queue map mirrors smp_processor_id() */
192 193
193/* Thread control flag bits */ 194/* Thread control flag bits */
194#define T_TERMINATE (1<<0) 195#define T_TERMINATE (1<<0)
@@ -621,6 +622,9 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
621 if (pkt_dev->flags & F_QUEUE_MAP_RND) 622 if (pkt_dev->flags & F_QUEUE_MAP_RND)
622 seq_printf(seq, "QUEUE_MAP_RND "); 623 seq_printf(seq, "QUEUE_MAP_RND ");
623 624
625 if (pkt_dev->flags & F_QUEUE_MAP_CPU)
626 seq_printf(seq, "QUEUE_MAP_CPU ");
627
624 if (pkt_dev->cflows) { 628 if (pkt_dev->cflows) {
625 if (pkt_dev->flags & F_FLOW_SEQ) 629 if (pkt_dev->flags & F_FLOW_SEQ)
626 seq_printf(seq, "FLOW_SEQ "); /*in sequence flows*/ 630 seq_printf(seq, "FLOW_SEQ "); /*in sequence flows*/
@@ -1134,6 +1138,12 @@ static ssize_t pktgen_if_write(struct file *file,
1134 1138
1135 else if (strcmp(f, "!QUEUE_MAP_RND") == 0) 1139 else if (strcmp(f, "!QUEUE_MAP_RND") == 0)
1136 pkt_dev->flags &= ~F_QUEUE_MAP_RND; 1140 pkt_dev->flags &= ~F_QUEUE_MAP_RND;
1141
1142 else if (strcmp(f, "QUEUE_MAP_CPU") == 0)
1143 pkt_dev->flags |= F_QUEUE_MAP_CPU;
1144
1145 else if (strcmp(f, "!QUEUE_MAP_CPU") == 0)
1146 pkt_dev->flags &= ~F_QUEUE_MAP_CPU;
1137#ifdef CONFIG_XFRM 1147#ifdef CONFIG_XFRM
1138 else if (strcmp(f, "IPSEC") == 0) 1148 else if (strcmp(f, "IPSEC") == 0)
1139 pkt_dev->flags |= F_IPSEC_ON; 1149 pkt_dev->flags |= F_IPSEC_ON;
@@ -1895,6 +1905,23 @@ static int pktgen_device_event(struct notifier_block *unused,
1895 return NOTIFY_DONE; 1905 return NOTIFY_DONE;
1896} 1906}
1897 1907
1908static struct net_device *pktgen_dev_get_by_name(struct pktgen_dev *pkt_dev, const char *ifname)
1909{
1910 char b[IFNAMSIZ+5];
1911 int i = 0;
1912
1913 for(i=0; ifname[i] != '@'; i++) {
1914 if(i == IFNAMSIZ)
1915 break;
1916
1917 b[i] = ifname[i];
1918 }
1919 b[i] = 0;
1920
1921 return dev_get_by_name(&init_net, b);
1922}
1923
1924
1898/* Associate pktgen_dev with a device. */ 1925/* Associate pktgen_dev with a device. */
1899 1926
1900static int pktgen_setup_dev(struct pktgen_dev *pkt_dev, const char *ifname) 1927static int pktgen_setup_dev(struct pktgen_dev *pkt_dev, const char *ifname)
@@ -1908,7 +1935,7 @@ static int pktgen_setup_dev(struct pktgen_dev *pkt_dev, const char *ifname)
1908 pkt_dev->odev = NULL; 1935 pkt_dev->odev = NULL;
1909 } 1936 }
1910 1937
1911 odev = dev_get_by_name(&init_net, ifname); 1938 odev = pktgen_dev_get_by_name(pkt_dev, ifname);
1912 if (!odev) { 1939 if (!odev) {
1913 printk(KERN_ERR "pktgen: no such netdevice: \"%s\"\n", ifname); 1940 printk(KERN_ERR "pktgen: no such netdevice: \"%s\"\n", ifname);
1914 return -ENODEV; 1941 return -ENODEV;
@@ -1934,6 +1961,8 @@ static int pktgen_setup_dev(struct pktgen_dev *pkt_dev, const char *ifname)
1934 */ 1961 */
1935static void pktgen_setup_inject(struct pktgen_dev *pkt_dev) 1962static void pktgen_setup_inject(struct pktgen_dev *pkt_dev)
1936{ 1963{
1964 int ntxq;
1965
1937 if (!pkt_dev->odev) { 1966 if (!pkt_dev->odev) {
1938 printk(KERN_ERR "pktgen: ERROR: pkt_dev->odev == NULL in " 1967 printk(KERN_ERR "pktgen: ERROR: pkt_dev->odev == NULL in "
1939 "setup_inject.\n"); 1968 "setup_inject.\n");
@@ -1942,6 +1971,33 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev)
1942 return; 1971 return;
1943 } 1972 }
1944 1973
1974 /* make sure that we don't pick a non-existing transmit queue */
1975 ntxq = pkt_dev->odev->real_num_tx_queues;
1976 if (ntxq <= num_online_cpus() && (pkt_dev->flags & F_QUEUE_MAP_CPU)) {
1977 printk(KERN_WARNING "pktgen: WARNING: QUEUE_MAP_CPU "
1978 "disabled because CPU count (%d) exceeds number ",
1979 num_online_cpus());
1980 printk(KERN_WARNING "pktgen: WARNING: of tx queues "
1981 "(%d) on %s \n", ntxq, pkt_dev->odev->name);
1982 pkt_dev->flags &= ~F_QUEUE_MAP_CPU;
1983 }
1984 if (ntxq <= pkt_dev->queue_map_min) {
1985 printk(KERN_WARNING "pktgen: WARNING: Requested "
1986 "queue_map_min (%d) exceeds number of tx\n",
1987 pkt_dev->queue_map_min);
1988 printk(KERN_WARNING "pktgen: WARNING: queues (%d) on "
1989 "%s, resetting\n", ntxq, pkt_dev->odev->name);
1990 pkt_dev->queue_map_min = ntxq - 1;
1991 }
1992 if (ntxq <= pkt_dev->queue_map_max) {
1993 printk(KERN_WARNING "pktgen: WARNING: Requested "
1994 "queue_map_max (%d) exceeds number of tx\n",
1995 pkt_dev->queue_map_max);
1996 printk(KERN_WARNING "pktgen: WARNING: queues (%d) on "
1997 "%s, resetting\n", ntxq, pkt_dev->odev->name);
1998 pkt_dev->queue_map_max = ntxq - 1;
1999 }
2000
1945 /* Default to the interface's mac if not explicitly set. */ 2001 /* Default to the interface's mac if not explicitly set. */
1946 2002
1947 if (is_zero_ether_addr(pkt_dev->src_mac)) 2003 if (is_zero_ether_addr(pkt_dev->src_mac))
@@ -2085,15 +2141,19 @@ static inline int f_pick(struct pktgen_dev *pkt_dev)
2085 if (pkt_dev->flows[flow].count >= pkt_dev->lflow) { 2141 if (pkt_dev->flows[flow].count >= pkt_dev->lflow) {
2086 /* reset time */ 2142 /* reset time */
2087 pkt_dev->flows[flow].count = 0; 2143 pkt_dev->flows[flow].count = 0;
2144 pkt_dev->flows[flow].flags = 0;
2088 pkt_dev->curfl += 1; 2145 pkt_dev->curfl += 1;
2089 if (pkt_dev->curfl >= pkt_dev->cflows) 2146 if (pkt_dev->curfl >= pkt_dev->cflows)
2090 pkt_dev->curfl = 0; /*reset */ 2147 pkt_dev->curfl = 0; /*reset */
2091 } 2148 }
2092 } else { 2149 } else {
2093 flow = random32() % pkt_dev->cflows; 2150 flow = random32() % pkt_dev->cflows;
2151 pkt_dev->curfl = flow;
2094 2152
2095 if (pkt_dev->flows[flow].count > pkt_dev->lflow) 2153 if (pkt_dev->flows[flow].count > pkt_dev->lflow) {
2096 pkt_dev->flows[flow].count = 0; 2154 pkt_dev->flows[flow].count = 0;
2155 pkt_dev->flows[flow].flags = 0;
2156 }
2097 } 2157 }
2098 2158
2099 return pkt_dev->curfl; 2159 return pkt_dev->curfl;
@@ -2125,7 +2185,11 @@ static void get_ipsec_sa(struct pktgen_dev *pkt_dev, int flow)
2125#endif 2185#endif
2126static void set_cur_queue_map(struct pktgen_dev *pkt_dev) 2186static void set_cur_queue_map(struct pktgen_dev *pkt_dev)
2127{ 2187{
2128 if (pkt_dev->queue_map_min < pkt_dev->queue_map_max) { 2188
2189 if (pkt_dev->flags & F_QUEUE_MAP_CPU)
2190 pkt_dev->cur_queue_map = smp_processor_id();
2191
2192 else if (pkt_dev->queue_map_min < pkt_dev->queue_map_max) {
2129 __u16 t; 2193 __u16 t;
2130 if (pkt_dev->flags & F_QUEUE_MAP_RND) { 2194 if (pkt_dev->flags & F_QUEUE_MAP_RND) {
2131 t = random32() % 2195 t = random32() %
@@ -2162,7 +2226,7 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev)
2162 mc = random32() % pkt_dev->src_mac_count; 2226 mc = random32() % pkt_dev->src_mac_count;
2163 else { 2227 else {
2164 mc = pkt_dev->cur_src_mac_offset++; 2228 mc = pkt_dev->cur_src_mac_offset++;
2165 if (pkt_dev->cur_src_mac_offset > 2229 if (pkt_dev->cur_src_mac_offset >=
2166 pkt_dev->src_mac_count) 2230 pkt_dev->src_mac_count)
2167 pkt_dev->cur_src_mac_offset = 0; 2231 pkt_dev->cur_src_mac_offset = 0;
2168 } 2232 }
@@ -2189,7 +2253,7 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev)
2189 2253
2190 else { 2254 else {
2191 mc = pkt_dev->cur_dst_mac_offset++; 2255 mc = pkt_dev->cur_dst_mac_offset++;
2192 if (pkt_dev->cur_dst_mac_offset > 2256 if (pkt_dev->cur_dst_mac_offset >=
2193 pkt_dev->dst_mac_count) { 2257 pkt_dev->dst_mac_count) {
2194 pkt_dev->cur_dst_mac_offset = 0; 2258 pkt_dev->cur_dst_mac_offset = 0;
2195 } 2259 }
@@ -3305,6 +3369,7 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
3305 3369
3306 txq = netdev_get_tx_queue(odev, queue_map); 3370 txq = netdev_get_tx_queue(odev, queue_map);
3307 if (netif_tx_queue_stopped(txq) || 3371 if (netif_tx_queue_stopped(txq) ||
3372 netif_tx_queue_frozen(txq) ||
3308 need_resched()) { 3373 need_resched()) {
3309 idle_start = getCurUs(); 3374 idle_start = getCurUs();
3310 3375
@@ -3320,7 +3385,8 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
3320 3385
3321 pkt_dev->idle_acc += getCurUs() - idle_start; 3386 pkt_dev->idle_acc += getCurUs() - idle_start;
3322 3387
3323 if (netif_tx_queue_stopped(txq)) { 3388 if (netif_tx_queue_stopped(txq) ||
3389 netif_tx_queue_frozen(txq)) {
3324 pkt_dev->next_tx_us = getCurUs(); /* TODO */ 3390 pkt_dev->next_tx_us = getCurUs(); /* TODO */
3325 pkt_dev->next_tx_ns = 0; 3391 pkt_dev->next_tx_ns = 0;
3326 goto out; /* Try the next interface */ 3392 goto out; /* Try the next interface */
@@ -3352,7 +3418,8 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
3352 txq = netdev_get_tx_queue(odev, queue_map); 3418 txq = netdev_get_tx_queue(odev, queue_map);
3353 3419
3354 __netif_tx_lock_bh(txq); 3420 __netif_tx_lock_bh(txq);
3355 if (!netif_tx_queue_stopped(txq)) { 3421 if (!netif_tx_queue_stopped(txq) &&
3422 !netif_tx_queue_frozen(txq)) {
3356 3423
3357 atomic_inc(&(pkt_dev->skb->users)); 3424 atomic_inc(&(pkt_dev->skb->users));
3358 retry_now: 3425 retry_now:
diff --git a/net/core/request_sock.c b/net/core/request_sock.c
index 2d3035d3abd7..7552495aff7a 100644
--- a/net/core/request_sock.c
+++ b/net/core/request_sock.c
@@ -123,7 +123,7 @@ void reqsk_queue_destroy(struct request_sock_queue *queue)
123 } 123 }
124 } 124 }
125 125
126 BUG_TRAP(lopt->qlen == 0); 126 WARN_ON(lopt->qlen != 0);
127 if (lopt_size > PAGE_SIZE) 127 if (lopt_size > PAGE_SIZE)
128 vfree(lopt); 128 vfree(lopt);
129 else 129 else
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index e4115672b6cf..84640172d65d 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -485,6 +485,9 @@ static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
485 C(head); 485 C(head);
486 C(data); 486 C(data);
487 C(truesize); 487 C(truesize);
488#if defined(CONFIG_MAC80211) || defined(CONFIG_MAC80211_MODULE)
489 C(do_not_encrypt);
490#endif
488 atomic_set(&n->users, 1); 491 atomic_set(&n->users, 1);
489 492
490 atomic_inc(&(skb_shinfo(skb)->dataref)); 493 atomic_inc(&(skb_shinfo(skb)->dataref));
@@ -1200,7 +1203,7 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
1200 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1203 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1201 int end; 1204 int end;
1202 1205
1203 BUG_TRAP(start <= offset + len); 1206 WARN_ON(start > offset + len);
1204 1207
1205 end = start + skb_shinfo(skb)->frags[i].size; 1208 end = start + skb_shinfo(skb)->frags[i].size;
1206 if ((copy = end - offset) > 0) { 1209 if ((copy = end - offset) > 0) {
@@ -1229,7 +1232,7 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
1229 for (; list; list = list->next) { 1232 for (; list; list = list->next) {
1230 int end; 1233 int end;
1231 1234
1232 BUG_TRAP(start <= offset + len); 1235 WARN_ON(start > offset + len);
1233 1236
1234 end = start + list->len; 1237 end = start + list->len;
1235 if ((copy = end - offset) > 0) { 1238 if ((copy = end - offset) > 0) {
@@ -1475,7 +1478,7 @@ int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
1475 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1478 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1476 int end; 1479 int end;
1477 1480
1478 BUG_TRAP(start <= offset + len); 1481 WARN_ON(start > offset + len);
1479 1482
1480 end = start + frag->size; 1483 end = start + frag->size;
1481 if ((copy = end - offset) > 0) { 1484 if ((copy = end - offset) > 0) {
@@ -1503,7 +1506,7 @@ int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
1503 for (; list; list = list->next) { 1506 for (; list; list = list->next) {
1504 int end; 1507 int end;
1505 1508
1506 BUG_TRAP(start <= offset + len); 1509 WARN_ON(start > offset + len);
1507 1510
1508 end = start + list->len; 1511 end = start + list->len;
1509 if ((copy = end - offset) > 0) { 1512 if ((copy = end - offset) > 0) {
@@ -1552,7 +1555,7 @@ __wsum skb_checksum(const struct sk_buff *skb, int offset,
1552 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1555 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1553 int end; 1556 int end;
1554 1557
1555 BUG_TRAP(start <= offset + len); 1558 WARN_ON(start > offset + len);
1556 1559
1557 end = start + skb_shinfo(skb)->frags[i].size; 1560 end = start + skb_shinfo(skb)->frags[i].size;
1558 if ((copy = end - offset) > 0) { 1561 if ((copy = end - offset) > 0) {
@@ -1581,7 +1584,7 @@ __wsum skb_checksum(const struct sk_buff *skb, int offset,
1581 for (; list; list = list->next) { 1584 for (; list; list = list->next) {
1582 int end; 1585 int end;
1583 1586
1584 BUG_TRAP(start <= offset + len); 1587 WARN_ON(start > offset + len);
1585 1588
1586 end = start + list->len; 1589 end = start + list->len;
1587 if ((copy = end - offset) > 0) { 1590 if ((copy = end - offset) > 0) {
@@ -1629,7 +1632,7 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
1629 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1632 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1630 int end; 1633 int end;
1631 1634
1632 BUG_TRAP(start <= offset + len); 1635 WARN_ON(start > offset + len);
1633 1636
1634 end = start + skb_shinfo(skb)->frags[i].size; 1637 end = start + skb_shinfo(skb)->frags[i].size;
1635 if ((copy = end - offset) > 0) { 1638 if ((copy = end - offset) > 0) {
@@ -1662,7 +1665,7 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
1662 __wsum csum2; 1665 __wsum csum2;
1663 int end; 1666 int end;
1664 1667
1665 BUG_TRAP(start <= offset + len); 1668 WARN_ON(start > offset + len);
1666 1669
1667 end = start + list->len; 1670 end = start + list->len;
1668 if ((copy = end - offset) > 0) { 1671 if ((copy = end - offset) > 0) {
@@ -2373,7 +2376,7 @@ __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
2373 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2376 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2374 int end; 2377 int end;
2375 2378
2376 BUG_TRAP(start <= offset + len); 2379 WARN_ON(start > offset + len);
2377 2380
2378 end = start + skb_shinfo(skb)->frags[i].size; 2381 end = start + skb_shinfo(skb)->frags[i].size;
2379 if ((copy = end - offset) > 0) { 2382 if ((copy = end - offset) > 0) {
@@ -2397,7 +2400,7 @@ __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
2397 for (; list; list = list->next) { 2400 for (; list; list = list->next) {
2398 int end; 2401 int end;
2399 2402
2400 BUG_TRAP(start <= offset + len); 2403 WARN_ON(start > offset + len);
2401 2404
2402 end = start + list->len; 2405 end = start + list->len;
2403 if ((copy = end - offset) > 0) { 2406 if ((copy = end - offset) > 0) {
diff --git a/net/core/sock.c b/net/core/sock.c
index 10a64d57078c..91f8bbc93526 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -180,7 +180,7 @@ static const char *af_family_clock_key_strings[AF_MAX+1] = {
180 "clock-AF_ASH" , "clock-AF_ECONET" , "clock-AF_ATMSVC" , 180 "clock-AF_ASH" , "clock-AF_ECONET" , "clock-AF_ATMSVC" ,
181 "clock-21" , "clock-AF_SNA" , "clock-AF_IRDA" , 181 "clock-21" , "clock-AF_SNA" , "clock-AF_IRDA" ,
182 "clock-AF_PPPOX" , "clock-AF_WANPIPE" , "clock-AF_LLC" , 182 "clock-AF_PPPOX" , "clock-AF_WANPIPE" , "clock-AF_LLC" ,
183 "clock-27" , "clock-28" , "clock-29" , 183 "clock-27" , "clock-28" , "clock-AF_CAN" ,
184 "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" , 184 "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" ,
185 "clock-AF_RXRPC" , "clock-AF_MAX" 185 "clock-AF_RXRPC" , "clock-AF_MAX"
186}; 186};
diff --git a/net/core/stream.c b/net/core/stream.c
index 4a0ad152c9c4..a6b3437ff082 100644
--- a/net/core/stream.c
+++ b/net/core/stream.c
@@ -192,13 +192,13 @@ void sk_stream_kill_queues(struct sock *sk)
192 __skb_queue_purge(&sk->sk_error_queue); 192 __skb_queue_purge(&sk->sk_error_queue);
193 193
194 /* Next, the write queue. */ 194 /* Next, the write queue. */
195 BUG_TRAP(skb_queue_empty(&sk->sk_write_queue)); 195 WARN_ON(!skb_queue_empty(&sk->sk_write_queue));
196 196
197 /* Account for returned memory. */ 197 /* Account for returned memory. */
198 sk_mem_reclaim(sk); 198 sk_mem_reclaim(sk);
199 199
200 BUG_TRAP(!sk->sk_wmem_queued); 200 WARN_ON(sk->sk_wmem_queued);
201 BUG_TRAP(!sk->sk_forward_alloc); 201 WARN_ON(sk->sk_forward_alloc);
202 202
203 /* It is _impossible_ for the backlog to contain anything 203 /* It is _impossible_ for the backlog to contain anything
204 * when we get here. All user references to this socket 204 * when we get here. All user references to this socket
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index a570e2af22cb..f686467ff12b 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -67,7 +67,7 @@ static struct ctl_table net_core_table[] = {
67 { 67 {
68 .ctl_name = NET_CORE_MSG_COST, 68 .ctl_name = NET_CORE_MSG_COST,
69 .procname = "message_cost", 69 .procname = "message_cost",
70 .data = &net_msg_cost, 70 .data = &net_ratelimit_state.interval,
71 .maxlen = sizeof(int), 71 .maxlen = sizeof(int),
72 .mode = 0644, 72 .mode = 0644,
73 .proc_handler = &proc_dointvec_jiffies, 73 .proc_handler = &proc_dointvec_jiffies,
@@ -76,7 +76,7 @@ static struct ctl_table net_core_table[] = {
76 { 76 {
77 .ctl_name = NET_CORE_MSG_BURST, 77 .ctl_name = NET_CORE_MSG_BURST,
78 .procname = "message_burst", 78 .procname = "message_burst",
79 .data = &net_msg_burst, 79 .data = &net_ratelimit_state.burst,
80 .maxlen = sizeof(int), 80 .maxlen = sizeof(int),
81 .mode = 0644, 81 .mode = 0644,
82 .proc_handler = &proc_dointvec, 82 .proc_handler = &proc_dointvec,
diff --git a/net/core/user_dma.c b/net/core/user_dma.c
index c77aff9c6eb3..164b090d5ac3 100644
--- a/net/core/user_dma.c
+++ b/net/core/user_dma.c
@@ -27,13 +27,13 @@
27 27
28#include <linux/dmaengine.h> 28#include <linux/dmaengine.h>
29#include <linux/socket.h> 29#include <linux/socket.h>
30#include <linux/rtnetlink.h> /* for BUG_TRAP */
31#include <net/tcp.h> 30#include <net/tcp.h>
32#include <net/netdma.h> 31#include <net/netdma.h>
33 32
34#define NET_DMA_DEFAULT_COPYBREAK 4096 33#define NET_DMA_DEFAULT_COPYBREAK 4096
35 34
36int sysctl_tcp_dma_copybreak = NET_DMA_DEFAULT_COPYBREAK; 35int sysctl_tcp_dma_copybreak = NET_DMA_DEFAULT_COPYBREAK;
36EXPORT_SYMBOL(sysctl_tcp_dma_copybreak);
37 37
38/** 38/**
39 * dma_skb_copy_datagram_iovec - Copy a datagram to an iovec. 39 * dma_skb_copy_datagram_iovec - Copy a datagram to an iovec.
@@ -71,7 +71,7 @@ int dma_skb_copy_datagram_iovec(struct dma_chan *chan,
71 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 71 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
72 int end; 72 int end;
73 73
74 BUG_TRAP(start <= offset + len); 74 WARN_ON(start > offset + len);
75 75
76 end = start + skb_shinfo(skb)->frags[i].size; 76 end = start + skb_shinfo(skb)->frags[i].size;
77 copy = end - offset; 77 copy = end - offset;
@@ -100,7 +100,7 @@ int dma_skb_copy_datagram_iovec(struct dma_chan *chan,
100 for (; list; list = list->next) { 100 for (; list; list = list->next) {
101 int end; 101 int end;
102 102
103 BUG_TRAP(start <= offset + len); 103 WARN_ON(start > offset + len);
104 104
105 end = start + list->len; 105 end = start + list->len;
106 copy = end - offset; 106 copy = end - offset;
diff --git a/net/core/utils.c b/net/core/utils.c
index 8031eb59054e..72e0ebe964a0 100644
--- a/net/core/utils.c
+++ b/net/core/utils.c
@@ -31,17 +31,16 @@
31#include <asm/system.h> 31#include <asm/system.h>
32#include <asm/uaccess.h> 32#include <asm/uaccess.h>
33 33
34int net_msg_cost __read_mostly = 5*HZ;
35int net_msg_burst __read_mostly = 10;
36int net_msg_warn __read_mostly = 1; 34int net_msg_warn __read_mostly = 1;
37EXPORT_SYMBOL(net_msg_warn); 35EXPORT_SYMBOL(net_msg_warn);
38 36
37DEFINE_RATELIMIT_STATE(net_ratelimit_state, 5 * HZ, 10);
39/* 38/*
40 * All net warning printk()s should be guarded by this function. 39 * All net warning printk()s should be guarded by this function.
41 */ 40 */
42int net_ratelimit(void) 41int net_ratelimit(void)
43{ 42{
44 return __printk_ratelimit(net_msg_cost, net_msg_burst); 43 return __ratelimit(&net_ratelimit_state);
45} 44}
46EXPORT_SYMBOL(net_ratelimit); 45EXPORT_SYMBOL(net_ratelimit);
47 46