diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-05-11 08:44:27 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-05-11 08:44:31 -0400 |
commit | 41fb454ebe6024f5c1e3b3cbc0abc0da762e7b51 (patch) | |
tree | 51c50bcb67a5039448ddfa1869d7948cab1217e9 /net/ipv6 | |
parent | 19c1a6f5764d787113fa323ffb18be7991208f82 (diff) | |
parent | 091bf7624d1c90cec9e578a18529f615213ff847 (diff) |
Merge commit 'v2.6.30-rc5' into core/iommu
Merge reason: core/iommu was on an .30-rc1 base,
update it to .30-rc5 to refresh.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'net/ipv6')
-rw-r--r-- | net/ipv6/ipv6_sockglue.c | 4 | ||||
-rw-r--r-- | net/ipv6/netfilter/ip6_tables.c | 123 | ||||
-rw-r--r-- | net/ipv6/udp.c | 6 |
3 files changed, 43 insertions, 90 deletions
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index d31df0f4bc9a..a7fdf9a27f15 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c | |||
@@ -380,10 +380,6 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, | |||
380 | default: | 380 | default: |
381 | goto sticky_done; | 381 | goto sticky_done; |
382 | } | 382 | } |
383 | |||
384 | if ((rthdr->hdrlen & 1) || | ||
385 | (rthdr->hdrlen >> 1) != rthdr->segments_left) | ||
386 | goto sticky_done; | ||
387 | } | 383 | } |
388 | 384 | ||
389 | retv = 0; | 385 | retv = 0; |
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index dfed176aed37..219e165aea10 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c | |||
@@ -365,9 +365,9 @@ ip6t_do_table(struct sk_buff *skb, | |||
365 | 365 | ||
366 | IP_NF_ASSERT(table->valid_hooks & (1 << hook)); | 366 | IP_NF_ASSERT(table->valid_hooks & (1 << hook)); |
367 | 367 | ||
368 | rcu_read_lock_bh(); | 368 | xt_info_rdlock_bh(); |
369 | private = rcu_dereference(table->private); | 369 | private = table->private; |
370 | table_base = rcu_dereference(private->entries[smp_processor_id()]); | 370 | table_base = private->entries[smp_processor_id()]; |
371 | 371 | ||
372 | e = get_entry(table_base, private->hook_entry[hook]); | 372 | e = get_entry(table_base, private->hook_entry[hook]); |
373 | 373 | ||
@@ -466,7 +466,7 @@ ip6t_do_table(struct sk_buff *skb, | |||
466 | #ifdef CONFIG_NETFILTER_DEBUG | 466 | #ifdef CONFIG_NETFILTER_DEBUG |
467 | ((struct ip6t_entry *)table_base)->comefrom = NETFILTER_LINK_POISON; | 467 | ((struct ip6t_entry *)table_base)->comefrom = NETFILTER_LINK_POISON; |
468 | #endif | 468 | #endif |
469 | rcu_read_unlock_bh(); | 469 | xt_info_rdunlock_bh(); |
470 | 470 | ||
471 | #ifdef DEBUG_ALLOW_ALL | 471 | #ifdef DEBUG_ALLOW_ALL |
472 | return NF_ACCEPT; | 472 | return NF_ACCEPT; |
@@ -926,9 +926,12 @@ get_counters(const struct xt_table_info *t, | |||
926 | /* Instead of clearing (by a previous call to memset()) | 926 | /* Instead of clearing (by a previous call to memset()) |
927 | * the counters and using adds, we set the counters | 927 | * the counters and using adds, we set the counters |
928 | * with data used by 'current' CPU | 928 | * with data used by 'current' CPU |
929 | * We dont care about preemption here. | 929 | * |
930 | * Bottom half has to be disabled to prevent deadlock | ||
931 | * if new softirq were to run and call ipt_do_table | ||
930 | */ | 932 | */ |
931 | curcpu = raw_smp_processor_id(); | 933 | local_bh_disable(); |
934 | curcpu = smp_processor_id(); | ||
932 | 935 | ||
933 | i = 0; | 936 | i = 0; |
934 | IP6T_ENTRY_ITERATE(t->entries[curcpu], | 937 | IP6T_ENTRY_ITERATE(t->entries[curcpu], |
@@ -941,72 +944,22 @@ get_counters(const struct xt_table_info *t, | |||
941 | if (cpu == curcpu) | 944 | if (cpu == curcpu) |
942 | continue; | 945 | continue; |
943 | i = 0; | 946 | i = 0; |
947 | xt_info_wrlock(cpu); | ||
944 | IP6T_ENTRY_ITERATE(t->entries[cpu], | 948 | IP6T_ENTRY_ITERATE(t->entries[cpu], |
945 | t->size, | 949 | t->size, |
946 | add_entry_to_counter, | 950 | add_entry_to_counter, |
947 | counters, | 951 | counters, |
948 | &i); | 952 | &i); |
953 | xt_info_wrunlock(cpu); | ||
949 | } | 954 | } |
950 | } | ||
951 | |||
952 | /* We're lazy, and add to the first CPU; overflow works its fey magic | ||
953 | * and everything is OK. */ | ||
954 | static int | ||
955 | add_counter_to_entry(struct ip6t_entry *e, | ||
956 | const struct xt_counters addme[], | ||
957 | unsigned int *i) | ||
958 | { | ||
959 | ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt); | ||
960 | |||
961 | (*i)++; | ||
962 | return 0; | ||
963 | } | ||
964 | |||
965 | /* Take values from counters and add them back onto the current cpu */ | ||
966 | static void put_counters(struct xt_table_info *t, | ||
967 | const struct xt_counters counters[]) | ||
968 | { | ||
969 | unsigned int i, cpu; | ||
970 | |||
971 | local_bh_disable(); | ||
972 | cpu = smp_processor_id(); | ||
973 | i = 0; | ||
974 | IP6T_ENTRY_ITERATE(t->entries[cpu], | ||
975 | t->size, | ||
976 | add_counter_to_entry, | ||
977 | counters, | ||
978 | &i); | ||
979 | local_bh_enable(); | 955 | local_bh_enable(); |
980 | } | 956 | } |
981 | 957 | ||
982 | static inline int | ||
983 | zero_entry_counter(struct ip6t_entry *e, void *arg) | ||
984 | { | ||
985 | e->counters.bcnt = 0; | ||
986 | e->counters.pcnt = 0; | ||
987 | return 0; | ||
988 | } | ||
989 | |||
990 | static void | ||
991 | clone_counters(struct xt_table_info *newinfo, const struct xt_table_info *info) | ||
992 | { | ||
993 | unsigned int cpu; | ||
994 | const void *loc_cpu_entry = info->entries[raw_smp_processor_id()]; | ||
995 | |||
996 | memcpy(newinfo, info, offsetof(struct xt_table_info, entries)); | ||
997 | for_each_possible_cpu(cpu) { | ||
998 | memcpy(newinfo->entries[cpu], loc_cpu_entry, info->size); | ||
999 | IP6T_ENTRY_ITERATE(newinfo->entries[cpu], newinfo->size, | ||
1000 | zero_entry_counter, NULL); | ||
1001 | } | ||
1002 | } | ||
1003 | |||
1004 | static struct xt_counters *alloc_counters(struct xt_table *table) | 958 | static struct xt_counters *alloc_counters(struct xt_table *table) |
1005 | { | 959 | { |
1006 | unsigned int countersize; | 960 | unsigned int countersize; |
1007 | struct xt_counters *counters; | 961 | struct xt_counters *counters; |
1008 | struct xt_table_info *private = table->private; | 962 | struct xt_table_info *private = table->private; |
1009 | struct xt_table_info *info; | ||
1010 | 963 | ||
1011 | /* We need atomic snapshot of counters: rest doesn't change | 964 | /* We need atomic snapshot of counters: rest doesn't change |
1012 | (other than comefrom, which userspace doesn't care | 965 | (other than comefrom, which userspace doesn't care |
@@ -1015,28 +968,11 @@ static struct xt_counters *alloc_counters(struct xt_table *table) | |||
1015 | counters = vmalloc_node(countersize, numa_node_id()); | 968 | counters = vmalloc_node(countersize, numa_node_id()); |
1016 | 969 | ||
1017 | if (counters == NULL) | 970 | if (counters == NULL) |
1018 | goto nomem; | 971 | return ERR_PTR(-ENOMEM); |
1019 | |||
1020 | info = xt_alloc_table_info(private->size); | ||
1021 | if (!info) | ||
1022 | goto free_counters; | ||
1023 | |||
1024 | clone_counters(info, private); | ||
1025 | |||
1026 | mutex_lock(&table->lock); | ||
1027 | xt_table_entry_swap_rcu(private, info); | ||
1028 | synchronize_net(); /* Wait until smoke has cleared */ | ||
1029 | 972 | ||
1030 | get_counters(info, counters); | 973 | get_counters(private, counters); |
1031 | put_counters(private, counters); | ||
1032 | mutex_unlock(&table->lock); | ||
1033 | 974 | ||
1034 | xt_free_table_info(info); | 975 | return counters; |
1035 | |||
1036 | free_counters: | ||
1037 | vfree(counters); | ||
1038 | nomem: | ||
1039 | return ERR_PTR(-ENOMEM); | ||
1040 | } | 976 | } |
1041 | 977 | ||
1042 | static int | 978 | static int |
@@ -1332,8 +1268,9 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks, | |||
1332 | (newinfo->number <= oldinfo->initial_entries)) | 1268 | (newinfo->number <= oldinfo->initial_entries)) |
1333 | module_put(t->me); | 1269 | module_put(t->me); |
1334 | 1270 | ||
1335 | /* Get the old counters. */ | 1271 | /* Get the old counters, and synchronize with replace */ |
1336 | get_counters(oldinfo, counters); | 1272 | get_counters(oldinfo, counters); |
1273 | |||
1337 | /* Decrease module usage counts and free resource */ | 1274 | /* Decrease module usage counts and free resource */ |
1338 | loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()]; | 1275 | loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()]; |
1339 | IP6T_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry, | 1276 | IP6T_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry, |
@@ -1403,11 +1340,24 @@ do_replace(struct net *net, void __user *user, unsigned int len) | |||
1403 | return ret; | 1340 | return ret; |
1404 | } | 1341 | } |
1405 | 1342 | ||
1343 | /* We're lazy, and add to the first CPU; overflow works its fey magic | ||
1344 | * and everything is OK. */ | ||
1345 | static int | ||
1346 | add_counter_to_entry(struct ip6t_entry *e, | ||
1347 | const struct xt_counters addme[], | ||
1348 | unsigned int *i) | ||
1349 | { | ||
1350 | ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt); | ||
1351 | |||
1352 | (*i)++; | ||
1353 | return 0; | ||
1354 | } | ||
1355 | |||
1406 | static int | 1356 | static int |
1407 | do_add_counters(struct net *net, void __user *user, unsigned int len, | 1357 | do_add_counters(struct net *net, void __user *user, unsigned int len, |
1408 | int compat) | 1358 | int compat) |
1409 | { | 1359 | { |
1410 | unsigned int i; | 1360 | unsigned int i, curcpu; |
1411 | struct xt_counters_info tmp; | 1361 | struct xt_counters_info tmp; |
1412 | struct xt_counters *paddc; | 1362 | struct xt_counters *paddc; |
1413 | unsigned int num_counters; | 1363 | unsigned int num_counters; |
@@ -1463,25 +1413,28 @@ do_add_counters(struct net *net, void __user *user, unsigned int len, | |||
1463 | goto free; | 1413 | goto free; |
1464 | } | 1414 | } |
1465 | 1415 | ||
1466 | mutex_lock(&t->lock); | 1416 | |
1417 | local_bh_disable(); | ||
1467 | private = t->private; | 1418 | private = t->private; |
1468 | if (private->number != num_counters) { | 1419 | if (private->number != num_counters) { |
1469 | ret = -EINVAL; | 1420 | ret = -EINVAL; |
1470 | goto unlock_up_free; | 1421 | goto unlock_up_free; |
1471 | } | 1422 | } |
1472 | 1423 | ||
1473 | preempt_disable(); | ||
1474 | i = 0; | 1424 | i = 0; |
1475 | /* Choose the copy that is on our node */ | 1425 | /* Choose the copy that is on our node */ |
1476 | loc_cpu_entry = private->entries[raw_smp_processor_id()]; | 1426 | curcpu = smp_processor_id(); |
1427 | xt_info_wrlock(curcpu); | ||
1428 | loc_cpu_entry = private->entries[curcpu]; | ||
1477 | IP6T_ENTRY_ITERATE(loc_cpu_entry, | 1429 | IP6T_ENTRY_ITERATE(loc_cpu_entry, |
1478 | private->size, | 1430 | private->size, |
1479 | add_counter_to_entry, | 1431 | add_counter_to_entry, |
1480 | paddc, | 1432 | paddc, |
1481 | &i); | 1433 | &i); |
1482 | preempt_enable(); | 1434 | xt_info_wrunlock(curcpu); |
1435 | |||
1483 | unlock_up_free: | 1436 | unlock_up_free: |
1484 | mutex_unlock(&t->lock); | 1437 | local_bh_enable(); |
1485 | xt_table_unlock(t); | 1438 | xt_table_unlock(t); |
1486 | module_put(t->me); | 1439 | module_put(t->me); |
1487 | free: | 1440 | free: |
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 6842dd2edd5b..8905712cfbb8 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c | |||
@@ -53,6 +53,8 @@ int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2) | |||
53 | { | 53 | { |
54 | const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr; | 54 | const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr; |
55 | const struct in6_addr *sk2_rcv_saddr6 = inet6_rcv_saddr(sk2); | 55 | const struct in6_addr *sk2_rcv_saddr6 = inet6_rcv_saddr(sk2); |
56 | __be32 sk_rcv_saddr = inet_sk(sk)->rcv_saddr; | ||
57 | __be32 sk2_rcv_saddr = inet_rcv_saddr(sk2); | ||
56 | int sk_ipv6only = ipv6_only_sock(sk); | 58 | int sk_ipv6only = ipv6_only_sock(sk); |
57 | int sk2_ipv6only = inet_v6_ipv6only(sk2); | 59 | int sk2_ipv6only = inet_v6_ipv6only(sk2); |
58 | int addr_type = ipv6_addr_type(sk_rcv_saddr6); | 60 | int addr_type = ipv6_addr_type(sk_rcv_saddr6); |
@@ -60,7 +62,9 @@ int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2) | |||
60 | 62 | ||
61 | /* if both are mapped, treat as IPv4 */ | 63 | /* if both are mapped, treat as IPv4 */ |
62 | if (addr_type == IPV6_ADDR_MAPPED && addr_type2 == IPV6_ADDR_MAPPED) | 64 | if (addr_type == IPV6_ADDR_MAPPED && addr_type2 == IPV6_ADDR_MAPPED) |
63 | return ipv4_rcv_saddr_equal(sk, sk2); | 65 | return (!sk2_ipv6only && |
66 | (!sk_rcv_saddr || !sk2_rcv_saddr || | ||
67 | sk_rcv_saddr == sk2_rcv_saddr)); | ||
64 | 68 | ||
65 | if (addr_type2 == IPV6_ADDR_ANY && | 69 | if (addr_type2 == IPV6_ADDR_ANY && |
66 | !(sk2_ipv6only && addr_type == IPV6_ADDR_MAPPED)) | 70 | !(sk2_ipv6only && addr_type == IPV6_ADDR_MAPPED)) |