aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2006-03-25 11:39:20 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-03-25 11:39:20 -0500
commitb55813a2e50088ca30df33fa62aeed5d3adb1796 (patch)
treebe50fe404e94869fe854766e190a5753dbc7dd49 /net
parent368d17e068f691dba5a4f122c271db5ec9b2ebd6 (diff)
parent9e19bb6d7a0959f5028d46e1ab99c50f0d36eda8 (diff)
Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
* master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6: [NETFILTER] x_table.c: sem2mutex [IPV4]: Aggregate route entries with different TOS values [TCP]: Mark tcp_*mem[] __read_mostly. [TCP]: Set default max buffers from memory pool size [SCTP]: Fix up sctp_rcv return value [NET]: Take RTNL when unregistering notifier [WIRELESS]: Fix config dependencies. [NET]: Fill in a 32-bit hole in struct sock on 64-bit platforms. [NET]: Ensure device name passed to SO_BINDTODEVICE is NULL terminated. [MODULES]: Don't allow statically declared exports [BRIDGE]: Unaligned accesses in the ethernet bridge
Diffstat (limited to 'net')
-rw-r--r--net/bridge/br_stp_bpdu.c5
-rw-r--r--net/core/dev.c7
-rw-r--r--net/core/sock.c5
-rw-r--r--net/ipv4/icmp.c2
-rw-r--r--net/ipv4/route.c45
-rw-r--r--net/ipv4/tcp.c26
-rw-r--r--net/netfilter/x_tables.c56
-rw-r--r--net/sctp/input.c12
8 files changed, 80 insertions, 78 deletions
diff --git a/net/bridge/br_stp_bpdu.c b/net/bridge/br_stp_bpdu.c
index 8934a54792be..a7ba0cce0b46 100644
--- a/net/bridge/br_stp_bpdu.c
+++ b/net/bridge/br_stp_bpdu.c
@@ -19,6 +19,7 @@
19#include <linux/llc.h> 19#include <linux/llc.h>
20#include <net/llc.h> 20#include <net/llc.h>
21#include <net/llc_pdu.h> 21#include <net/llc_pdu.h>
22#include <asm/unaligned.h>
22 23
23#include "br_private.h" 24#include "br_private.h"
24#include "br_private_stp.h" 25#include "br_private_stp.h"
@@ -59,12 +60,12 @@ static inline void br_set_ticks(unsigned char *dest, int j)
59{ 60{
60 unsigned long ticks = (STP_HZ * j)/ HZ; 61 unsigned long ticks = (STP_HZ * j)/ HZ;
61 62
62 *((__be16 *) dest) = htons(ticks); 63 put_unaligned(htons(ticks), (__be16 *)dest);
63} 64}
64 65
65static inline int br_get_ticks(const unsigned char *src) 66static inline int br_get_ticks(const unsigned char *src)
66{ 67{
67 unsigned long ticks = ntohs(*(__be16 *)src); 68 unsigned long ticks = ntohs(get_unaligned((__be16 *)src));
68 69
69 return (ticks * HZ + STP_HZ - 1) / STP_HZ; 70 return (ticks * HZ + STP_HZ - 1) / STP_HZ;
70} 71}
diff --git a/net/core/dev.c b/net/core/dev.c
index 08dec6eb922b..e0489ca731c5 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -977,7 +977,12 @@ int register_netdevice_notifier(struct notifier_block *nb)
977 977
978int unregister_netdevice_notifier(struct notifier_block *nb) 978int unregister_netdevice_notifier(struct notifier_block *nb)
979{ 979{
980 return notifier_chain_unregister(&netdev_chain, nb); 980 int err;
981
982 rtnl_lock();
983 err = notifier_chain_unregister(&netdev_chain, nb);
984 rtnl_unlock();
985 return err;
981} 986}
982 987
983/** 988/**
diff --git a/net/core/sock.c b/net/core/sock.c
index 1a7e6eac90b0..e110b9004147 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -404,8 +404,9 @@ set_rcvbuf:
404 if (!valbool) { 404 if (!valbool) {
405 sk->sk_bound_dev_if = 0; 405 sk->sk_bound_dev_if = 0;
406 } else { 406 } else {
407 if (optlen > IFNAMSIZ) 407 if (optlen > IFNAMSIZ - 1)
408 optlen = IFNAMSIZ; 408 optlen = IFNAMSIZ - 1;
409 memset(devname, 0, sizeof(devname));
409 if (copy_from_user(devname, optval, optlen)) { 410 if (copy_from_user(devname, optval, optlen)) {
410 ret = -EFAULT; 411 ret = -EFAULT;
411 break; 412 break;
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index e7bbff4340bb..9831fd2c73a0 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -753,7 +753,7 @@ static void icmp_redirect(struct sk_buff *skb)
753 case ICMP_REDIR_HOST: 753 case ICMP_REDIR_HOST:
754 case ICMP_REDIR_HOSTTOS: 754 case ICMP_REDIR_HOSTTOS:
755 ip_rt_redirect(skb->nh.iph->saddr, ip, skb->h.icmph->un.gateway, 755 ip_rt_redirect(skb->nh.iph->saddr, ip, skb->h.icmph->un.gateway,
756 iph->saddr, iph->tos, skb->dev); 756 iph->saddr, skb->dev);
757 break; 757 break;
758 } 758 }
759out: 759out:
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index fca5fe0cf94a..94fcbc5e5a1b 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -55,6 +55,8 @@
55 * Robert Olsson : Added rt_cache statistics 55 * Robert Olsson : Added rt_cache statistics
56 * Arnaldo C. Melo : Convert proc stuff to seq_file 56 * Arnaldo C. Melo : Convert proc stuff to seq_file
57 * Eric Dumazet : hashed spinlocks and rt_check_expire() fixes. 57 * Eric Dumazet : hashed spinlocks and rt_check_expire() fixes.
58 * Ilia Sotnikov : Ignore TOS on PMTUD and Redirect
59 * Ilia Sotnikov : Removed TOS from hash calculations
58 * 60 *
59 * This program is free software; you can redistribute it and/or 61 * This program is free software; you can redistribute it and/or
60 * modify it under the terms of the GNU General Public License 62 * modify it under the terms of the GNU General Public License
@@ -247,9 +249,9 @@ static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
247static int rt_intern_hash(unsigned hash, struct rtable *rth, 249static int rt_intern_hash(unsigned hash, struct rtable *rth,
248 struct rtable **res); 250 struct rtable **res);
249 251
250static unsigned int rt_hash_code(u32 daddr, u32 saddr, u8 tos) 252static unsigned int rt_hash_code(u32 daddr, u32 saddr)
251{ 253{
252 return (jhash_3words(daddr, saddr, (u32) tos, rt_hash_rnd) 254 return (jhash_2words(daddr, saddr, rt_hash_rnd)
253 & rt_hash_mask); 255 & rt_hash_mask);
254} 256}
255 257
@@ -1111,7 +1113,7 @@ static void rt_del(unsigned hash, struct rtable *rt)
1111} 1113}
1112 1114
1113void ip_rt_redirect(u32 old_gw, u32 daddr, u32 new_gw, 1115void ip_rt_redirect(u32 old_gw, u32 daddr, u32 new_gw,
1114 u32 saddr, u8 tos, struct net_device *dev) 1116 u32 saddr, struct net_device *dev)
1115{ 1117{
1116 int i, k; 1118 int i, k;
1117 struct in_device *in_dev = in_dev_get(dev); 1119 struct in_device *in_dev = in_dev_get(dev);
@@ -1119,8 +1121,6 @@ void ip_rt_redirect(u32 old_gw, u32 daddr, u32 new_gw,
1119 u32 skeys[2] = { saddr, 0 }; 1121 u32 skeys[2] = { saddr, 0 };
1120 int ikeys[2] = { dev->ifindex, 0 }; 1122 int ikeys[2] = { dev->ifindex, 0 };
1121 1123
1122 tos &= IPTOS_RT_MASK;
1123
1124 if (!in_dev) 1124 if (!in_dev)
1125 return; 1125 return;
1126 1126
@@ -1141,8 +1141,7 @@ void ip_rt_redirect(u32 old_gw, u32 daddr, u32 new_gw,
1141 for (i = 0; i < 2; i++) { 1141 for (i = 0; i < 2; i++) {
1142 for (k = 0; k < 2; k++) { 1142 for (k = 0; k < 2; k++) {
1143 unsigned hash = rt_hash_code(daddr, 1143 unsigned hash = rt_hash_code(daddr,
1144 skeys[i] ^ (ikeys[k] << 5), 1144 skeys[i] ^ (ikeys[k] << 5));
1145 tos);
1146 1145
1147 rthp=&rt_hash_table[hash].chain; 1146 rthp=&rt_hash_table[hash].chain;
1148 1147
@@ -1152,7 +1151,6 @@ void ip_rt_redirect(u32 old_gw, u32 daddr, u32 new_gw,
1152 1151
1153 if (rth->fl.fl4_dst != daddr || 1152 if (rth->fl.fl4_dst != daddr ||
1154 rth->fl.fl4_src != skeys[i] || 1153 rth->fl.fl4_src != skeys[i] ||
1155 rth->fl.fl4_tos != tos ||
1156 rth->fl.oif != ikeys[k] || 1154 rth->fl.oif != ikeys[k] ||
1157 rth->fl.iif != 0) { 1155 rth->fl.iif != 0) {
1158 rthp = &rth->u.rt_next; 1156 rthp = &rth->u.rt_next;
@@ -1232,10 +1230,9 @@ reject_redirect:
1232 if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) 1230 if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit())
1233 printk(KERN_INFO "Redirect from %u.%u.%u.%u on %s about " 1231 printk(KERN_INFO "Redirect from %u.%u.%u.%u on %s about "
1234 "%u.%u.%u.%u ignored.\n" 1232 "%u.%u.%u.%u ignored.\n"
1235 " Advised path = %u.%u.%u.%u -> %u.%u.%u.%u, " 1233 " Advised path = %u.%u.%u.%u -> %u.%u.%u.%u\n",
1236 "tos %02x\n",
1237 NIPQUAD(old_gw), dev->name, NIPQUAD(new_gw), 1234 NIPQUAD(old_gw), dev->name, NIPQUAD(new_gw),
1238 NIPQUAD(saddr), NIPQUAD(daddr), tos); 1235 NIPQUAD(saddr), NIPQUAD(daddr));
1239#endif 1236#endif
1240 in_dev_put(in_dev); 1237 in_dev_put(in_dev);
1241} 1238}
@@ -1253,8 +1250,7 @@ static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
1253 rt->u.dst.expires) { 1250 rt->u.dst.expires) {
1254 unsigned hash = rt_hash_code(rt->fl.fl4_dst, 1251 unsigned hash = rt_hash_code(rt->fl.fl4_dst,
1255 rt->fl.fl4_src ^ 1252 rt->fl.fl4_src ^
1256 (rt->fl.oif << 5), 1253 (rt->fl.oif << 5));
1257 rt->fl.fl4_tos);
1258#if RT_CACHE_DEBUG >= 1 1254#if RT_CACHE_DEBUG >= 1
1259 printk(KERN_DEBUG "ip_rt_advice: redirect to " 1255 printk(KERN_DEBUG "ip_rt_advice: redirect to "
1260 "%u.%u.%u.%u/%02x dropped\n", 1256 "%u.%u.%u.%u/%02x dropped\n",
@@ -1391,14 +1387,13 @@ unsigned short ip_rt_frag_needed(struct iphdr *iph, unsigned short new_mtu)
1391 struct rtable *rth; 1387 struct rtable *rth;
1392 u32 skeys[2] = { iph->saddr, 0, }; 1388 u32 skeys[2] = { iph->saddr, 0, };
1393 u32 daddr = iph->daddr; 1389 u32 daddr = iph->daddr;
1394 u8 tos = iph->tos & IPTOS_RT_MASK;
1395 unsigned short est_mtu = 0; 1390 unsigned short est_mtu = 0;
1396 1391
1397 if (ipv4_config.no_pmtu_disc) 1392 if (ipv4_config.no_pmtu_disc)
1398 return 0; 1393 return 0;
1399 1394
1400 for (i = 0; i < 2; i++) { 1395 for (i = 0; i < 2; i++) {
1401 unsigned hash = rt_hash_code(daddr, skeys[i], tos); 1396 unsigned hash = rt_hash_code(daddr, skeys[i]);
1402 1397
1403 rcu_read_lock(); 1398 rcu_read_lock();
1404 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth; 1399 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
@@ -1407,7 +1402,6 @@ unsigned short ip_rt_frag_needed(struct iphdr *iph, unsigned short new_mtu)
1407 rth->fl.fl4_src == skeys[i] && 1402 rth->fl.fl4_src == skeys[i] &&
1408 rth->rt_dst == daddr && 1403 rth->rt_dst == daddr &&
1409 rth->rt_src == iph->saddr && 1404 rth->rt_src == iph->saddr &&
1410 rth->fl.fl4_tos == tos &&
1411 rth->fl.iif == 0 && 1405 rth->fl.iif == 0 &&
1412 !(dst_metric_locked(&rth->u.dst, RTAX_MTU))) { 1406 !(dst_metric_locked(&rth->u.dst, RTAX_MTU))) {
1413 unsigned short mtu = new_mtu; 1407 unsigned short mtu = new_mtu;
@@ -1658,7 +1652,7 @@ static int ip_route_input_mc(struct sk_buff *skb, u32 daddr, u32 saddr,
1658 RT_CACHE_STAT_INC(in_slow_mc); 1652 RT_CACHE_STAT_INC(in_slow_mc);
1659 1653
1660 in_dev_put(in_dev); 1654 in_dev_put(in_dev);
1661 hash = rt_hash_code(daddr, saddr ^ (dev->ifindex << 5), tos); 1655 hash = rt_hash_code(daddr, saddr ^ (dev->ifindex << 5));
1662 return rt_intern_hash(hash, rth, (struct rtable**) &skb->dst); 1656 return rt_intern_hash(hash, rth, (struct rtable**) &skb->dst);
1663 1657
1664e_nobufs: 1658e_nobufs:
@@ -1823,7 +1817,7 @@ static inline int ip_mkroute_input_def(struct sk_buff *skb,
1823 return err; 1817 return err;
1824 1818
1825 /* put it into the cache */ 1819 /* put it into the cache */
1826 hash = rt_hash_code(daddr, saddr ^ (fl->iif << 5), tos); 1820 hash = rt_hash_code(daddr, saddr ^ (fl->iif << 5));
1827 return rt_intern_hash(hash, rth, (struct rtable**)&skb->dst); 1821 return rt_intern_hash(hash, rth, (struct rtable**)&skb->dst);
1828} 1822}
1829 1823
@@ -1864,7 +1858,7 @@ static inline int ip_mkroute_input(struct sk_buff *skb,
1864 return err; 1858 return err;
1865 1859
1866 /* put it into the cache */ 1860 /* put it into the cache */
1867 hash = rt_hash_code(daddr, saddr ^ (fl->iif << 5), tos); 1861 hash = rt_hash_code(daddr, saddr ^ (fl->iif << 5));
1868 err = rt_intern_hash(hash, rth, &rtres); 1862 err = rt_intern_hash(hash, rth, &rtres);
1869 if (err) 1863 if (err)
1870 return err; 1864 return err;
@@ -2041,7 +2035,7 @@ local_input:
2041 rth->rt_flags &= ~RTCF_LOCAL; 2035 rth->rt_flags &= ~RTCF_LOCAL;
2042 } 2036 }
2043 rth->rt_type = res.type; 2037 rth->rt_type = res.type;
2044 hash = rt_hash_code(daddr, saddr ^ (fl.iif << 5), tos); 2038 hash = rt_hash_code(daddr, saddr ^ (fl.iif << 5));
2045 err = rt_intern_hash(hash, rth, (struct rtable**)&skb->dst); 2039 err = rt_intern_hash(hash, rth, (struct rtable**)&skb->dst);
2046 goto done; 2040 goto done;
2047 2041
@@ -2088,7 +2082,7 @@ int ip_route_input(struct sk_buff *skb, u32 daddr, u32 saddr,
2088 int iif = dev->ifindex; 2082 int iif = dev->ifindex;
2089 2083
2090 tos &= IPTOS_RT_MASK; 2084 tos &= IPTOS_RT_MASK;
2091 hash = rt_hash_code(daddr, saddr ^ (iif << 5), tos); 2085 hash = rt_hash_code(daddr, saddr ^ (iif << 5));
2092 2086
2093 rcu_read_lock(); 2087 rcu_read_lock();
2094 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth; 2088 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
@@ -2286,10 +2280,8 @@ static inline int ip_mkroute_output_def(struct rtable **rp,
2286 int err = __mkroute_output(&rth, res, fl, oldflp, dev_out, flags); 2280 int err = __mkroute_output(&rth, res, fl, oldflp, dev_out, flags);
2287 unsigned hash; 2281 unsigned hash;
2288 if (err == 0) { 2282 if (err == 0) {
2289 u32 tos = RT_FL_TOS(oldflp);
2290
2291 hash = rt_hash_code(oldflp->fl4_dst, 2283 hash = rt_hash_code(oldflp->fl4_dst,
2292 oldflp->fl4_src ^ (oldflp->oif << 5), tos); 2284 oldflp->fl4_src ^ (oldflp->oif << 5));
2293 err = rt_intern_hash(hash, rth, rp); 2285 err = rt_intern_hash(hash, rth, rp);
2294 } 2286 }
2295 2287
@@ -2304,7 +2296,6 @@ static inline int ip_mkroute_output(struct rtable** rp,
2304 unsigned flags) 2296 unsigned flags)
2305{ 2297{
2306#ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED 2298#ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
2307 u32 tos = RT_FL_TOS(oldflp);
2308 unsigned char hop; 2299 unsigned char hop;
2309 unsigned hash; 2300 unsigned hash;
2310 int err = -EINVAL; 2301 int err = -EINVAL;
@@ -2334,7 +2325,7 @@ static inline int ip_mkroute_output(struct rtable** rp,
2334 2325
2335 hash = rt_hash_code(oldflp->fl4_dst, 2326 hash = rt_hash_code(oldflp->fl4_dst,
2336 oldflp->fl4_src ^ 2327 oldflp->fl4_src ^
2337 (oldflp->oif << 5), tos); 2328 (oldflp->oif << 5));
2338 err = rt_intern_hash(hash, rth, rp); 2329 err = rt_intern_hash(hash, rth, rp);
2339 2330
2340 /* forward hop information to multipath impl. */ 2331 /* forward hop information to multipath impl. */
@@ -2563,7 +2554,7 @@ int __ip_route_output_key(struct rtable **rp, const struct flowi *flp)
2563 unsigned hash; 2554 unsigned hash;
2564 struct rtable *rth; 2555 struct rtable *rth;
2565 2556
2566 hash = rt_hash_code(flp->fl4_dst, flp->fl4_src ^ (flp->oif << 5), flp->fl4_tos); 2557 hash = rt_hash_code(flp->fl4_dst, flp->fl4_src ^ (flp->oif << 5));
2567 2558
2568 rcu_read_lock_bh(); 2559 rcu_read_lock_bh();
2569 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth; 2560 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 19ea5c0b094b..87f68e787d0c 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -257,6 +257,7 @@
257#include <linux/fs.h> 257#include <linux/fs.h>
258#include <linux/random.h> 258#include <linux/random.h>
259#include <linux/bootmem.h> 259#include <linux/bootmem.h>
260#include <linux/cache.h>
260 261
261#include <net/icmp.h> 262#include <net/icmp.h>
262#include <net/tcp.h> 263#include <net/tcp.h>
@@ -275,9 +276,9 @@ atomic_t tcp_orphan_count = ATOMIC_INIT(0);
275 276
276EXPORT_SYMBOL_GPL(tcp_orphan_count); 277EXPORT_SYMBOL_GPL(tcp_orphan_count);
277 278
278int sysctl_tcp_mem[3]; 279int sysctl_tcp_mem[3] __read_mostly;
279int sysctl_tcp_wmem[3] = { 4 * 1024, 16 * 1024, 128 * 1024 }; 280int sysctl_tcp_wmem[3] __read_mostly;
280int sysctl_tcp_rmem[3] = { 4 * 1024, 87380, 87380 * 2 }; 281int sysctl_tcp_rmem[3] __read_mostly;
281 282
282EXPORT_SYMBOL(sysctl_tcp_mem); 283EXPORT_SYMBOL(sysctl_tcp_mem);
283EXPORT_SYMBOL(sysctl_tcp_rmem); 284EXPORT_SYMBOL(sysctl_tcp_rmem);
@@ -2081,7 +2082,8 @@ __setup("thash_entries=", set_thash_entries);
2081void __init tcp_init(void) 2082void __init tcp_init(void)
2082{ 2083{
2083 struct sk_buff *skb = NULL; 2084 struct sk_buff *skb = NULL;
2084 int order, i; 2085 unsigned long limit;
2086 int order, i, max_share;
2085 2087
2086 if (sizeof(struct tcp_skb_cb) > sizeof(skb->cb)) 2088 if (sizeof(struct tcp_skb_cb) > sizeof(skb->cb))
2087 __skb_cb_too_small_for_tcp(sizeof(struct tcp_skb_cb), 2089 __skb_cb_too_small_for_tcp(sizeof(struct tcp_skb_cb),
@@ -2155,12 +2157,16 @@ void __init tcp_init(void)
2155 sysctl_tcp_mem[1] = 1024 << order; 2157 sysctl_tcp_mem[1] = 1024 << order;
2156 sysctl_tcp_mem[2] = 1536 << order; 2158 sysctl_tcp_mem[2] = 1536 << order;
2157 2159
2158 if (order < 3) { 2160 limit = ((unsigned long)sysctl_tcp_mem[1]) << (PAGE_SHIFT - 7);
2159 sysctl_tcp_wmem[2] = 64 * 1024; 2161 max_share = min(4UL*1024*1024, limit);
2160 sysctl_tcp_rmem[0] = PAGE_SIZE; 2162
2161 sysctl_tcp_rmem[1] = 43689; 2163 sysctl_tcp_wmem[0] = SK_STREAM_MEM_QUANTUM;
2162 sysctl_tcp_rmem[2] = 2 * 43689; 2164 sysctl_tcp_wmem[1] = 16*1024;
2163 } 2165 sysctl_tcp_wmem[2] = max(64*1024, max_share);
2166
2167 sysctl_tcp_rmem[0] = SK_STREAM_MEM_QUANTUM;
2168 sysctl_tcp_rmem[1] = 87380;
2169 sysctl_tcp_rmem[2] = max(87380, max_share);
2164 2170
2165 printk(KERN_INFO "TCP: Hash tables configured " 2171 printk(KERN_INFO "TCP: Hash tables configured "
2166 "(established %d bind %d)\n", 2172 "(established %d bind %d)\n",
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index 0a29a24d9a72..a657ab5394c3 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -21,10 +21,12 @@
21#include <linux/seq_file.h> 21#include <linux/seq_file.h>
22#include <linux/string.h> 22#include <linux/string.h>
23#include <linux/vmalloc.h> 23#include <linux/vmalloc.h>
24#include <linux/mutex.h>
24 25
25#include <linux/netfilter/x_tables.h> 26#include <linux/netfilter/x_tables.h>
26#include <linux/netfilter_arp.h> 27#include <linux/netfilter_arp.h>
27 28
29
28MODULE_LICENSE("GPL"); 30MODULE_LICENSE("GPL");
29MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>"); 31MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
30MODULE_DESCRIPTION("[ip,ip6,arp]_tables backend module"); 32MODULE_DESCRIPTION("[ip,ip6,arp]_tables backend module");
@@ -32,7 +34,7 @@ MODULE_DESCRIPTION("[ip,ip6,arp]_tables backend module");
32#define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1)) 34#define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1))
33 35
34struct xt_af { 36struct xt_af {
35 struct semaphore mutex; 37 struct mutex mutex;
36 struct list_head match; 38 struct list_head match;
37 struct list_head target; 39 struct list_head target;
38 struct list_head tables; 40 struct list_head tables;
@@ -64,11 +66,11 @@ xt_register_target(struct xt_target *target)
64{ 66{
65 int ret, af = target->family; 67 int ret, af = target->family;
66 68
67 ret = down_interruptible(&xt[af].mutex); 69 ret = mutex_lock_interruptible(&xt[af].mutex);
68 if (ret != 0) 70 if (ret != 0)
69 return ret; 71 return ret;
70 list_add(&target->list, &xt[af].target); 72 list_add(&target->list, &xt[af].target);
71 up(&xt[af].mutex); 73 mutex_unlock(&xt[af].mutex);
72 return ret; 74 return ret;
73} 75}
74EXPORT_SYMBOL(xt_register_target); 76EXPORT_SYMBOL(xt_register_target);
@@ -78,9 +80,9 @@ xt_unregister_target(struct xt_target *target)
78{ 80{
79 int af = target->family; 81 int af = target->family;
80 82
81 down(&xt[af].mutex); 83 mutex_lock(&xt[af].mutex);
82 LIST_DELETE(&xt[af].target, target); 84 LIST_DELETE(&xt[af].target, target);
83 up(&xt[af].mutex); 85 mutex_unlock(&xt[af].mutex);
84} 86}
85EXPORT_SYMBOL(xt_unregister_target); 87EXPORT_SYMBOL(xt_unregister_target);
86 88
@@ -89,12 +91,12 @@ xt_register_match(struct xt_match *match)
89{ 91{
90 int ret, af = match->family; 92 int ret, af = match->family;
91 93
92 ret = down_interruptible(&xt[af].mutex); 94 ret = mutex_lock_interruptible(&xt[af].mutex);
93 if (ret != 0) 95 if (ret != 0)
94 return ret; 96 return ret;
95 97
96 list_add(&match->list, &xt[af].match); 98 list_add(&match->list, &xt[af].match);
97 up(&xt[af].mutex); 99 mutex_unlock(&xt[af].mutex);
98 100
99 return ret; 101 return ret;
100} 102}
@@ -105,9 +107,9 @@ xt_unregister_match(struct xt_match *match)
105{ 107{
106 int af = match->family; 108 int af = match->family;
107 109
108 down(&xt[af].mutex); 110 mutex_lock(&xt[af].mutex);
109 LIST_DELETE(&xt[af].match, match); 111 LIST_DELETE(&xt[af].match, match);
110 up(&xt[af].mutex); 112 mutex_unlock(&xt[af].mutex);
111} 113}
112EXPORT_SYMBOL(xt_unregister_match); 114EXPORT_SYMBOL(xt_unregister_match);
113 115
@@ -124,21 +126,21 @@ struct xt_match *xt_find_match(int af, const char *name, u8 revision)
124 struct xt_match *m; 126 struct xt_match *m;
125 int err = 0; 127 int err = 0;
126 128
127 if (down_interruptible(&xt[af].mutex) != 0) 129 if (mutex_lock_interruptible(&xt[af].mutex) != 0)
128 return ERR_PTR(-EINTR); 130 return ERR_PTR(-EINTR);
129 131
130 list_for_each_entry(m, &xt[af].match, list) { 132 list_for_each_entry(m, &xt[af].match, list) {
131 if (strcmp(m->name, name) == 0) { 133 if (strcmp(m->name, name) == 0) {
132 if (m->revision == revision) { 134 if (m->revision == revision) {
133 if (try_module_get(m->me)) { 135 if (try_module_get(m->me)) {
134 up(&xt[af].mutex); 136 mutex_unlock(&xt[af].mutex);
135 return m; 137 return m;
136 } 138 }
137 } else 139 } else
138 err = -EPROTOTYPE; /* Found something. */ 140 err = -EPROTOTYPE; /* Found something. */
139 } 141 }
140 } 142 }
141 up(&xt[af].mutex); 143 mutex_unlock(&xt[af].mutex);
142 return ERR_PTR(err); 144 return ERR_PTR(err);
143} 145}
144EXPORT_SYMBOL(xt_find_match); 146EXPORT_SYMBOL(xt_find_match);
@@ -149,21 +151,21 @@ struct xt_target *xt_find_target(int af, const char *name, u8 revision)
149 struct xt_target *t; 151 struct xt_target *t;
150 int err = 0; 152 int err = 0;
151 153
152 if (down_interruptible(&xt[af].mutex) != 0) 154 if (mutex_lock_interruptible(&xt[af].mutex) != 0)
153 return ERR_PTR(-EINTR); 155 return ERR_PTR(-EINTR);
154 156
155 list_for_each_entry(t, &xt[af].target, list) { 157 list_for_each_entry(t, &xt[af].target, list) {
156 if (strcmp(t->name, name) == 0) { 158 if (strcmp(t->name, name) == 0) {
157 if (t->revision == revision) { 159 if (t->revision == revision) {
158 if (try_module_get(t->me)) { 160 if (try_module_get(t->me)) {
159 up(&xt[af].mutex); 161 mutex_unlock(&xt[af].mutex);
160 return t; 162 return t;
161 } 163 }
162 } else 164 } else
163 err = -EPROTOTYPE; /* Found something. */ 165 err = -EPROTOTYPE; /* Found something. */
164 } 166 }
165 } 167 }
166 up(&xt[af].mutex); 168 mutex_unlock(&xt[af].mutex);
167 return ERR_PTR(err); 169 return ERR_PTR(err);
168} 170}
169EXPORT_SYMBOL(xt_find_target); 171EXPORT_SYMBOL(xt_find_target);
@@ -218,7 +220,7 @@ int xt_find_revision(int af, const char *name, u8 revision, int target,
218{ 220{
219 int have_rev, best = -1; 221 int have_rev, best = -1;
220 222
221 if (down_interruptible(&xt[af].mutex) != 0) { 223 if (mutex_lock_interruptible(&xt[af].mutex) != 0) {
222 *err = -EINTR; 224 *err = -EINTR;
223 return 1; 225 return 1;
224 } 226 }
@@ -226,7 +228,7 @@ int xt_find_revision(int af, const char *name, u8 revision, int target,
226 have_rev = target_revfn(af, name, revision, &best); 228 have_rev = target_revfn(af, name, revision, &best);
227 else 229 else
228 have_rev = match_revfn(af, name, revision, &best); 230 have_rev = match_revfn(af, name, revision, &best);
229 up(&xt[af].mutex); 231 mutex_unlock(&xt[af].mutex);
230 232
231 /* Nothing at all? Return 0 to try loading module. */ 233 /* Nothing at all? Return 0 to try loading module. */
232 if (best == -1) { 234 if (best == -1) {
@@ -352,20 +354,20 @@ struct xt_table *xt_find_table_lock(int af, const char *name)
352{ 354{
353 struct xt_table *t; 355 struct xt_table *t;
354 356
355 if (down_interruptible(&xt[af].mutex) != 0) 357 if (mutex_lock_interruptible(&xt[af].mutex) != 0)
356 return ERR_PTR(-EINTR); 358 return ERR_PTR(-EINTR);
357 359
358 list_for_each_entry(t, &xt[af].tables, list) 360 list_for_each_entry(t, &xt[af].tables, list)
359 if (strcmp(t->name, name) == 0 && try_module_get(t->me)) 361 if (strcmp(t->name, name) == 0 && try_module_get(t->me))
360 return t; 362 return t;
361 up(&xt[af].mutex); 363 mutex_unlock(&xt[af].mutex);
362 return NULL; 364 return NULL;
363} 365}
364EXPORT_SYMBOL_GPL(xt_find_table_lock); 366EXPORT_SYMBOL_GPL(xt_find_table_lock);
365 367
366void xt_table_unlock(struct xt_table *table) 368void xt_table_unlock(struct xt_table *table)
367{ 369{
368 up(&xt[table->af].mutex); 370 mutex_unlock(&xt[table->af].mutex);
369} 371}
370EXPORT_SYMBOL_GPL(xt_table_unlock); 372EXPORT_SYMBOL_GPL(xt_table_unlock);
371 373
@@ -405,7 +407,7 @@ int xt_register_table(struct xt_table *table,
405 int ret; 407 int ret;
406 struct xt_table_info *private; 408 struct xt_table_info *private;
407 409
408 ret = down_interruptible(&xt[table->af].mutex); 410 ret = mutex_lock_interruptible(&xt[table->af].mutex);
409 if (ret != 0) 411 if (ret != 0)
410 return ret; 412 return ret;
411 413
@@ -431,7 +433,7 @@ int xt_register_table(struct xt_table *table,
431 433
432 ret = 0; 434 ret = 0;
433 unlock: 435 unlock:
434 up(&xt[table->af].mutex); 436 mutex_unlock(&xt[table->af].mutex);
435 return ret; 437 return ret;
436} 438}
437EXPORT_SYMBOL_GPL(xt_register_table); 439EXPORT_SYMBOL_GPL(xt_register_table);
@@ -440,10 +442,10 @@ void *xt_unregister_table(struct xt_table *table)
440{ 442{
441 struct xt_table_info *private; 443 struct xt_table_info *private;
442 444
443 down(&xt[table->af].mutex); 445 mutex_lock(&xt[table->af].mutex);
444 private = table->private; 446 private = table->private;
445 LIST_DELETE(&xt[table->af].tables, table); 447 LIST_DELETE(&xt[table->af].tables, table);
446 up(&xt[table->af].mutex); 448 mutex_unlock(&xt[table->af].mutex);
447 449
448 return private; 450 return private;
449} 451}
@@ -507,7 +509,7 @@ static void *xt_tgt_seq_start(struct seq_file *seq, loff_t *pos)
507 if (!list) 509 if (!list)
508 return NULL; 510 return NULL;
509 511
510 if (down_interruptible(&xt[af].mutex) != 0) 512 if (mutex_lock_interruptible(&xt[af].mutex) != 0)
511 return NULL; 513 return NULL;
512 514
513 return xt_get_idx(list, seq, *pos); 515 return xt_get_idx(list, seq, *pos);
@@ -536,7 +538,7 @@ static void xt_tgt_seq_stop(struct seq_file *seq, void *v)
536 struct proc_dir_entry *pde = seq->private; 538 struct proc_dir_entry *pde = seq->private;
537 u_int16_t af = (unsigned long)pde->data & 0xffff; 539 u_int16_t af = (unsigned long)pde->data & 0xffff;
538 540
539 up(&xt[af].mutex); 541 mutex_unlock(&xt[af].mutex);
540} 542}
541 543
542static int xt_name_seq_show(struct seq_file *seq, void *v) 544static int xt_name_seq_show(struct seq_file *seq, void *v)
@@ -668,7 +670,7 @@ static int __init xt_init(void)
668 return -ENOMEM; 670 return -ENOMEM;
669 671
670 for (i = 0; i < NPROTO; i++) { 672 for (i = 0; i < NPROTO; i++) {
671 init_MUTEX(&xt[i].mutex); 673 mutex_init(&xt[i].mutex);
672 INIT_LIST_HEAD(&xt[i].target); 674 INIT_LIST_HEAD(&xt[i].target);
673 INIT_LIST_HEAD(&xt[i].match); 675 INIT_LIST_HEAD(&xt[i].match);
674 INIT_LIST_HEAD(&xt[i].tables); 676 INIT_LIST_HEAD(&xt[i].tables);
diff --git a/net/sctp/input.c b/net/sctp/input.c
index cb78b50868ee..d117ebc75cf8 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -127,7 +127,6 @@ int sctp_rcv(struct sk_buff *skb)
127 union sctp_addr dest; 127 union sctp_addr dest;
128 int family; 128 int family;
129 struct sctp_af *af; 129 struct sctp_af *af;
130 int ret = 0;
131 130
132 if (skb->pkt_type!=PACKET_HOST) 131 if (skb->pkt_type!=PACKET_HOST)
133 goto discard_it; 132 goto discard_it;
@@ -227,16 +226,13 @@ int sctp_rcv(struct sk_buff *skb)
227 goto discard_release; 226 goto discard_release;
228 nf_reset(skb); 227 nf_reset(skb);
229 228
230 ret = sk_filter(sk, skb, 1); 229 if (sk_filter(sk, skb, 1))
231 if (ret)
232 goto discard_release; 230 goto discard_release;
233 231
234 /* Create an SCTP packet structure. */ 232 /* Create an SCTP packet structure. */
235 chunk = sctp_chunkify(skb, asoc, sk); 233 chunk = sctp_chunkify(skb, asoc, sk);
236 if (!chunk) { 234 if (!chunk)
237 ret = -ENOMEM;
238 goto discard_release; 235 goto discard_release;
239 }
240 SCTP_INPUT_CB(skb)->chunk = chunk; 236 SCTP_INPUT_CB(skb)->chunk = chunk;
241 237
242 /* Remember what endpoint is to handle this packet. */ 238 /* Remember what endpoint is to handle this packet. */
@@ -277,11 +273,11 @@ int sctp_rcv(struct sk_buff *skb)
277 sctp_bh_unlock_sock(sk); 273 sctp_bh_unlock_sock(sk);
278 sock_put(sk); 274 sock_put(sk);
279 275
280 return ret; 276 return 0;
281 277
282discard_it: 278discard_it:
283 kfree_skb(skb); 279 kfree_skb(skb);
284 return ret; 280 return 0;
285 281
286discard_release: 282discard_release:
287 /* Release any structures we may be holding. */ 283 /* Release any structures we may be holding. */