aboutsummaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
Diffstat (limited to 'net/core')
-rw-r--r--net/core/Makefile2
-rw-r--r--net/core/dev.c19
-rw-r--r--net/core/neighbour.c14
-rw-r--r--net/core/netevent.c69
-rw-r--r--net/core/skbuff.c45
5 files changed, 114 insertions, 35 deletions
diff --git a/net/core/Makefile b/net/core/Makefile
index e9bd2467d5a9..2645ba428d48 100644
--- a/net/core/Makefile
+++ b/net/core/Makefile
@@ -7,7 +7,7 @@ obj-y := sock.o request_sock.o skbuff.o iovec.o datagram.o stream.o scm.o \
7 7
8obj-$(CONFIG_SYSCTL) += sysctl_net_core.o 8obj-$(CONFIG_SYSCTL) += sysctl_net_core.o
9 9
10obj-y += dev.o ethtool.o dev_mcast.o dst.o \ 10obj-y += dev.o ethtool.o dev_mcast.o dst.o netevent.o \
11 neighbour.o rtnetlink.o utils.o link_watch.o filter.o 11 neighbour.o rtnetlink.o utils.o link_watch.o filter.o
12 12
13obj-$(CONFIG_XFRM) += flow.o 13obj-$(CONFIG_XFRM) += flow.o
diff --git a/net/core/dev.c b/net/core/dev.c
index 4d2b5167d7f5..d95e2626d944 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1166,11 +1166,6 @@ int skb_checksum_help(struct sk_buff *skb, int inward)
1166 goto out_set_summed; 1166 goto out_set_summed;
1167 1167
1168 if (unlikely(skb_shinfo(skb)->gso_size)) { 1168 if (unlikely(skb_shinfo(skb)->gso_size)) {
1169 static int warned;
1170
1171 WARN_ON(!warned);
1172 warned = 1;
1173
1174 /* Let GSO fix up the checksum. */ 1169 /* Let GSO fix up the checksum. */
1175 goto out_set_summed; 1170 goto out_set_summed;
1176 } 1171 }
@@ -1220,11 +1215,6 @@ struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
1220 __skb_pull(skb, skb->mac_len); 1215 __skb_pull(skb, skb->mac_len);
1221 1216
1222 if (unlikely(skb->ip_summed != CHECKSUM_HW)) { 1217 if (unlikely(skb->ip_summed != CHECKSUM_HW)) {
1223 static int warned;
1224
1225 WARN_ON(!warned);
1226 warned = 1;
1227
1228 if (skb_header_cloned(skb) && 1218 if (skb_header_cloned(skb) &&
1229 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) 1219 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
1230 return ERR_PTR(err); 1220 return ERR_PTR(err);
@@ -3429,12 +3419,9 @@ static void net_dma_rebalance(void)
3429 unsigned int cpu, i, n; 3419 unsigned int cpu, i, n;
3430 struct dma_chan *chan; 3420 struct dma_chan *chan;
3431 3421
3432 lock_cpu_hotplug();
3433
3434 if (net_dma_count == 0) { 3422 if (net_dma_count == 0) {
3435 for_each_online_cpu(cpu) 3423 for_each_online_cpu(cpu)
3436 rcu_assign_pointer(per_cpu(softnet_data.net_dma, cpu), NULL); 3424 rcu_assign_pointer(per_cpu(softnet_data, cpu).net_dma, NULL);
3437 unlock_cpu_hotplug();
3438 return; 3425 return;
3439 } 3426 }
3440 3427
@@ -3447,15 +3434,13 @@ static void net_dma_rebalance(void)
3447 + (i < (num_online_cpus() % net_dma_count) ? 1 : 0)); 3434 + (i < (num_online_cpus() % net_dma_count) ? 1 : 0));
3448 3435
3449 while(n) { 3436 while(n) {
3450 per_cpu(softnet_data.net_dma, cpu) = chan; 3437 per_cpu(softnet_data, cpu).net_dma = chan;
3451 cpu = next_cpu(cpu, cpu_online_map); 3438 cpu = next_cpu(cpu, cpu_online_map);
3452 n--; 3439 n--;
3453 } 3440 }
3454 i++; 3441 i++;
3455 } 3442 }
3456 rcu_read_unlock(); 3443 rcu_read_unlock();
3457
3458 unlock_cpu_hotplug();
3459} 3444}
3460 3445
3461/** 3446/**
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 7ad681f5e712..5130d2efdbbe 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -29,6 +29,7 @@
29#include <net/neighbour.h> 29#include <net/neighbour.h>
30#include <net/dst.h> 30#include <net/dst.h>
31#include <net/sock.h> 31#include <net/sock.h>
32#include <net/netevent.h>
32#include <linux/rtnetlink.h> 33#include <linux/rtnetlink.h>
33#include <linux/random.h> 34#include <linux/random.h>
34#include <linux/string.h> 35#include <linux/string.h>
@@ -754,6 +755,7 @@ static void neigh_timer_handler(unsigned long arg)
754 neigh->nud_state = NUD_STALE; 755 neigh->nud_state = NUD_STALE;
755 neigh->updated = jiffies; 756 neigh->updated = jiffies;
756 neigh_suspect(neigh); 757 neigh_suspect(neigh);
758 notify = 1;
757 } 759 }
758 } else if (state & NUD_DELAY) { 760 } else if (state & NUD_DELAY) {
759 if (time_before_eq(now, 761 if (time_before_eq(now,
@@ -762,6 +764,7 @@ static void neigh_timer_handler(unsigned long arg)
762 neigh->nud_state = NUD_REACHABLE; 764 neigh->nud_state = NUD_REACHABLE;
763 neigh->updated = jiffies; 765 neigh->updated = jiffies;
764 neigh_connect(neigh); 766 neigh_connect(neigh);
767 notify = 1;
765 next = neigh->confirmed + neigh->parms->reachable_time; 768 next = neigh->confirmed + neigh->parms->reachable_time;
766 } else { 769 } else {
767 NEIGH_PRINTK2("neigh %p is probed.\n", neigh); 770 NEIGH_PRINTK2("neigh %p is probed.\n", neigh);
@@ -819,6 +822,8 @@ static void neigh_timer_handler(unsigned long arg)
819out: 822out:
820 write_unlock(&neigh->lock); 823 write_unlock(&neigh->lock);
821 } 824 }
825 if (notify)
826 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
822 827
823#ifdef CONFIG_ARPD 828#ifdef CONFIG_ARPD
824 if (notify && neigh->parms->app_probes) 829 if (notify && neigh->parms->app_probes)
@@ -926,9 +931,7 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
926{ 931{
927 u8 old; 932 u8 old;
928 int err; 933 int err;
929#ifdef CONFIG_ARPD
930 int notify = 0; 934 int notify = 0;
931#endif
932 struct net_device *dev; 935 struct net_device *dev;
933 int update_isrouter = 0; 936 int update_isrouter = 0;
934 937
@@ -948,9 +951,7 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
948 neigh_suspect(neigh); 951 neigh_suspect(neigh);
949 neigh->nud_state = new; 952 neigh->nud_state = new;
950 err = 0; 953 err = 0;
951#ifdef CONFIG_ARPD
952 notify = old & NUD_VALID; 954 notify = old & NUD_VALID;
953#endif
954 goto out; 955 goto out;
955 } 956 }
956 957
@@ -1022,9 +1023,7 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
1022 if (!(new & NUD_CONNECTED)) 1023 if (!(new & NUD_CONNECTED))
1023 neigh->confirmed = jiffies - 1024 neigh->confirmed = jiffies -
1024 (neigh->parms->base_reachable_time << 1); 1025 (neigh->parms->base_reachable_time << 1);
1025#ifdef CONFIG_ARPD
1026 notify = 1; 1026 notify = 1;
1027#endif
1028 } 1027 }
1029 if (new == old) 1028 if (new == old)
1030 goto out; 1029 goto out;
@@ -1056,6 +1055,9 @@ out:
1056 (neigh->flags & ~NTF_ROUTER); 1055 (neigh->flags & ~NTF_ROUTER);
1057 } 1056 }
1058 write_unlock_bh(&neigh->lock); 1057 write_unlock_bh(&neigh->lock);
1058
1059 if (notify)
1060 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
1059#ifdef CONFIG_ARPD 1061#ifdef CONFIG_ARPD
1060 if (notify && neigh->parms->app_probes) 1062 if (notify && neigh->parms->app_probes)
1061 neigh_app_notify(neigh); 1063 neigh_app_notify(neigh);
diff --git a/net/core/netevent.c b/net/core/netevent.c
new file mode 100644
index 000000000000..35d02c38554e
--- /dev/null
+++ b/net/core/netevent.c
@@ -0,0 +1,69 @@
1/*
2 * Network event notifiers
3 *
4 * Authors:
5 * Tom Tucker <tom@opengridcomputing.com>
6 * Steve Wise <swise@opengridcomputing.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 *
13 * Fixes:
14 */
15
16#include <linux/rtnetlink.h>
17#include <linux/notifier.h>
18
19static ATOMIC_NOTIFIER_HEAD(netevent_notif_chain);
20
21/**
22 * register_netevent_notifier - register a netevent notifier block
23 * @nb: notifier
24 *
25 * Register a notifier to be called when a netevent occurs.
26 * The notifier passed is linked into the kernel structures and must
27 * not be reused until it has been unregistered. A negative errno code
28 * is returned on a failure.
29 */
30int register_netevent_notifier(struct notifier_block *nb)
31{
32 int err;
33
34 err = atomic_notifier_chain_register(&netevent_notif_chain, nb);
35 return err;
36}
37
38/**
39 * netevent_unregister_notifier - unregister a netevent notifier block
40 * @nb: notifier
41 *
42 * Unregister a notifier previously registered by
43 * register_neigh_notifier(). The notifier is unlinked into the
44 * kernel structures and may then be reused. A negative errno code
45 * is returned on a failure.
46 */
47
48int unregister_netevent_notifier(struct notifier_block *nb)
49{
50 return atomic_notifier_chain_unregister(&netevent_notif_chain, nb);
51}
52
53/**
54 * call_netevent_notifiers - call all netevent notifier blocks
55 * @val: value passed unmodified to notifier function
56 * @v: pointer passed unmodified to notifier function
57 *
58 * Call all neighbour notifier blocks. Parameters and return value
59 * are as for notifier_call_chain().
60 */
61
62int call_netevent_notifiers(unsigned long val, void *v)
63{
64 return atomic_notifier_call_chain(&netevent_notif_chain, val, v);
65}
66
67EXPORT_SYMBOL_GPL(register_netevent_notifier);
68EXPORT_SYMBOL_GPL(unregister_netevent_notifier);
69EXPORT_SYMBOL_GPL(call_netevent_notifiers);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 476aa3978504..022d8894c11d 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -71,13 +71,6 @@ static kmem_cache_t *skbuff_head_cache __read_mostly;
71static kmem_cache_t *skbuff_fclone_cache __read_mostly; 71static kmem_cache_t *skbuff_fclone_cache __read_mostly;
72 72
73/* 73/*
74 * lockdep: lock class key used by skb_queue_head_init():
75 */
76struct lock_class_key skb_queue_lock_key;
77
78EXPORT_SYMBOL(skb_queue_lock_key);
79
80/*
81 * Keep out-of-line to prevent kernel bloat. 74 * Keep out-of-line to prevent kernel bloat.
82 * __builtin_return_address is not used because it is not always 75 * __builtin_return_address is not used because it is not always
83 * reliable. 76 * reliable.
@@ -256,6 +249,29 @@ nodata:
256 goto out; 249 goto out;
257} 250}
258 251
252/**
253 * __netdev_alloc_skb - allocate an skbuff for rx on a specific device
254 * @dev: network device to receive on
255 * @length: length to allocate
256 * @gfp_mask: get_free_pages mask, passed to alloc_skb
257 *
258 * Allocate a new &sk_buff and assign it a usage count of one. The
259 * buffer has unspecified headroom built in. Users should allocate
260 * the headroom they think they need without accounting for the
261 * built in space. The built in space is used for optimisations.
262 *
263 * %NULL is returned if there is no free memory.
264 */
265struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
266 unsigned int length, gfp_t gfp_mask)
267{
268 struct sk_buff *skb;
269
270 skb = alloc_skb(length + NET_SKB_PAD, gfp_mask);
271 if (likely(skb))
272 skb_reserve(skb, NET_SKB_PAD);
273 return skb;
274}
259 275
260static void skb_drop_list(struct sk_buff **listp) 276static void skb_drop_list(struct sk_buff **listp)
261{ 277{
@@ -846,7 +862,11 @@ int ___pskb_trim(struct sk_buff *skb, unsigned int len)
846 unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))) 862 unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))))
847 return err; 863 return err;
848 864
849 for (i = 0; i < nfrags; i++) { 865 i = 0;
866 if (offset >= len)
867 goto drop_pages;
868
869 for (; i < nfrags; i++) {
850 int end = offset + skb_shinfo(skb)->frags[i].size; 870 int end = offset + skb_shinfo(skb)->frags[i].size;
851 871
852 if (end < len) { 872 if (end < len) {
@@ -854,9 +874,9 @@ int ___pskb_trim(struct sk_buff *skb, unsigned int len)
854 continue; 874 continue;
855 } 875 }
856 876
857 if (len > offset) 877 skb_shinfo(skb)->frags[i++].size = len - offset;
858 skb_shinfo(skb)->frags[i++].size = len - offset;
859 878
879drop_pages:
860 skb_shinfo(skb)->nr_frags = i; 880 skb_shinfo(skb)->nr_frags = i;
861 881
862 for (; i < nfrags; i++) 882 for (; i < nfrags; i++)
@@ -864,7 +884,7 @@ int ___pskb_trim(struct sk_buff *skb, unsigned int len)
864 884
865 if (skb_shinfo(skb)->frag_list) 885 if (skb_shinfo(skb)->frag_list)
866 skb_drop_fraglist(skb); 886 skb_drop_fraglist(skb);
867 break; 887 goto done;
868 } 888 }
869 889
870 for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp); 890 for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp);
@@ -879,6 +899,7 @@ int ___pskb_trim(struct sk_buff *skb, unsigned int len)
879 return -ENOMEM; 899 return -ENOMEM;
880 900
881 nfrag->next = frag->next; 901 nfrag->next = frag->next;
902 kfree_skb(frag);
882 frag = nfrag; 903 frag = nfrag;
883 *fragp = frag; 904 *fragp = frag;
884 } 905 }
@@ -897,6 +918,7 @@ int ___pskb_trim(struct sk_buff *skb, unsigned int len)
897 break; 918 break;
898 } 919 }
899 920
921done:
900 if (len > skb_headlen(skb)) { 922 if (len > skb_headlen(skb)) {
901 skb->data_len -= skb->len - len; 923 skb->data_len -= skb->len - len;
902 skb->len = len; 924 skb->len = len;
@@ -2042,6 +2064,7 @@ EXPORT_SYMBOL(__kfree_skb);
2042EXPORT_SYMBOL(kfree_skb); 2064EXPORT_SYMBOL(kfree_skb);
2043EXPORT_SYMBOL(__pskb_pull_tail); 2065EXPORT_SYMBOL(__pskb_pull_tail);
2044EXPORT_SYMBOL(__alloc_skb); 2066EXPORT_SYMBOL(__alloc_skb);
2067EXPORT_SYMBOL(__netdev_alloc_skb);
2045EXPORT_SYMBOL(pskb_copy); 2068EXPORT_SYMBOL(pskb_copy);
2046EXPORT_SYMBOL(pskb_expand_head); 2069EXPORT_SYMBOL(pskb_expand_head);
2047EXPORT_SYMBOL(skb_checksum); 2070EXPORT_SYMBOL(skb_checksum);