aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorPavel Emelyanov <xemul@openvz.org>2007-11-20 01:36:45 -0500
committerDavid S. Miller <davem@davemloft.net>2008-01-28 17:54:30 -0500
commitb673e4dfc8f29e5bfe4d342029b793e9d504f6dd (patch)
treee0282a3819176b9f28eb7e2021c55c70bdbda50e /net
parent69d6da0b0faa70249a243a14e6066c013e9294e5 (diff)
[RAW]: Introduce raw_hashinfo structure
The ipv4/raw.c and ipv6/raw.c contain many common code (most of which is proc interface) which can be consolidated. Most of the places to consolidate deal with the raw sockets hashtable, so introduce a struct raw_hashinfo which describes the raw sockets hash. Signed-off-by: Pavel Emelyanov <xemul@openvz.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/ipv4/raw.c50
-rw-r--r--net/ipv6/raw.c49
2 files changed, 50 insertions, 49 deletions
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 8a506618b912..dd9f00b3ab42 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -80,28 +80,27 @@
80#include <linux/netfilter.h> 80#include <linux/netfilter.h>
81#include <linux/netfilter_ipv4.h> 81#include <linux/netfilter_ipv4.h>
82 82
83#define RAWV4_HTABLE_SIZE MAX_INET_PROTOS 83static struct raw_hashinfo raw_v4_hashinfo = {
84 84 .lock = __RW_LOCK_UNLOCKED(),
85static struct hlist_head raw_v4_htable[RAWV4_HTABLE_SIZE]; 85};
86static DEFINE_RWLOCK(raw_v4_lock);
87 86
88static void raw_v4_hash(struct sock *sk) 87static void raw_v4_hash(struct sock *sk)
89{ 88{
90 struct hlist_head *head = &raw_v4_htable[inet_sk(sk)->num & 89 struct hlist_head *head = &raw_v4_hashinfo.ht[inet_sk(sk)->num &
91 (RAWV4_HTABLE_SIZE - 1)]; 90 (RAW_HTABLE_SIZE - 1)];
92 91
93 write_lock_bh(&raw_v4_lock); 92 write_lock_bh(&raw_v4_hashinfo.lock);
94 sk_add_node(sk, head); 93 sk_add_node(sk, head);
95 sock_prot_inc_use(sk->sk_prot); 94 sock_prot_inc_use(sk->sk_prot);
96 write_unlock_bh(&raw_v4_lock); 95 write_unlock_bh(&raw_v4_hashinfo.lock);
97} 96}
98 97
99static void raw_v4_unhash(struct sock *sk) 98static void raw_v4_unhash(struct sock *sk)
100{ 99{
101 write_lock_bh(&raw_v4_lock); 100 write_lock_bh(&raw_v4_hashinfo.lock);
102 if (sk_del_node_init(sk)) 101 if (sk_del_node_init(sk))
103 sock_prot_dec_use(sk->sk_prot); 102 sock_prot_dec_use(sk->sk_prot);
104 write_unlock_bh(&raw_v4_lock); 103 write_unlock_bh(&raw_v4_hashinfo.lock);
105} 104}
106 105
107static struct sock *__raw_v4_lookup(struct sock *sk, unsigned short num, 106static struct sock *__raw_v4_lookup(struct sock *sk, unsigned short num,
@@ -158,8 +157,8 @@ static int raw_v4_input(struct sk_buff *skb, struct iphdr *iph, int hash)
158 struct hlist_head *head; 157 struct hlist_head *head;
159 int delivered = 0; 158 int delivered = 0;
160 159
161 read_lock(&raw_v4_lock); 160 read_lock(&raw_v4_hashinfo.lock);
162 head = &raw_v4_htable[hash]; 161 head = &raw_v4_hashinfo.ht[hash];
163 if (hlist_empty(head)) 162 if (hlist_empty(head))
164 goto out; 163 goto out;
165 sk = __raw_v4_lookup(__sk_head(head), iph->protocol, 164 sk = __raw_v4_lookup(__sk_head(head), iph->protocol,
@@ -180,7 +179,7 @@ static int raw_v4_input(struct sk_buff *skb, struct iphdr *iph, int hash)
180 skb->dev->ifindex); 179 skb->dev->ifindex);
181 } 180 }
182out: 181out:
183 read_unlock(&raw_v4_lock); 182 read_unlock(&raw_v4_hashinfo.lock);
184 return delivered; 183 return delivered;
185} 184}
186 185
@@ -189,8 +188,8 @@ int raw_local_deliver(struct sk_buff *skb, int protocol)
189 int hash; 188 int hash;
190 struct sock *raw_sk; 189 struct sock *raw_sk;
191 190
192 hash = protocol & (RAWV4_HTABLE_SIZE - 1); 191 hash = protocol & (RAW_HTABLE_SIZE - 1);
193 raw_sk = sk_head(&raw_v4_htable[hash]); 192 raw_sk = sk_head(&raw_v4_hashinfo.ht[hash]);
194 193
195 /* If there maybe a raw socket we must check - if not we 194 /* If there maybe a raw socket we must check - if not we
196 * don't care less 195 * don't care less
@@ -262,10 +261,10 @@ void raw_icmp_error(struct sk_buff *skb, int protocol, u32 info)
262 struct sock *raw_sk; 261 struct sock *raw_sk;
263 struct iphdr *iph; 262 struct iphdr *iph;
264 263
265 hash = protocol & (RAWV4_HTABLE_SIZE - 1); 264 hash = protocol & (RAW_HTABLE_SIZE - 1);
266 265
267 read_lock(&raw_v4_lock); 266 read_lock(&raw_v4_hashinfo.lock);
268 raw_sk = sk_head(&raw_v4_htable[hash]); 267 raw_sk = sk_head(&raw_v4_hashinfo.ht[hash]);
269 if (raw_sk != NULL) { 268 if (raw_sk != NULL) {
270 iph = (struct iphdr *)skb->data; 269 iph = (struct iphdr *)skb->data;
271 while ((raw_sk = __raw_v4_lookup(raw_sk, protocol, iph->daddr, 270 while ((raw_sk = __raw_v4_lookup(raw_sk, protocol, iph->daddr,
@@ -276,7 +275,7 @@ void raw_icmp_error(struct sk_buff *skb, int protocol, u32 info)
276 iph = (struct iphdr *)skb->data; 275 iph = (struct iphdr *)skb->data;
277 } 276 }
278 } 277 }
279 read_unlock(&raw_v4_lock); 278 read_unlock(&raw_v4_hashinfo.lock);
280} 279}
281 280
282static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb) 281static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
@@ -844,10 +843,11 @@ static struct sock *raw_get_first(struct seq_file *seq)
844 struct sock *sk; 843 struct sock *sk;
845 struct raw_iter_state* state = raw_seq_private(seq); 844 struct raw_iter_state* state = raw_seq_private(seq);
846 845
847 for (state->bucket = 0; state->bucket < RAWV4_HTABLE_SIZE; ++state->bucket) { 846 for (state->bucket = 0; state->bucket < RAW_HTABLE_SIZE;
847 ++state->bucket) {
848 struct hlist_node *node; 848 struct hlist_node *node;
849 849
850 sk_for_each(sk, node, &raw_v4_htable[state->bucket]) 850 sk_for_each(sk, node, &raw_v4_hashinfo.ht[state->bucket])
851 if (sk->sk_family == PF_INET) 851 if (sk->sk_family == PF_INET)
852 goto found; 852 goto found;
853 } 853 }
@@ -866,8 +866,8 @@ try_again:
866 ; 866 ;
867 } while (sk && sk->sk_family != PF_INET); 867 } while (sk && sk->sk_family != PF_INET);
868 868
869 if (!sk && ++state->bucket < RAWV4_HTABLE_SIZE) { 869 if (!sk && ++state->bucket < RAW_HTABLE_SIZE) {
870 sk = sk_head(&raw_v4_htable[state->bucket]); 870 sk = sk_head(&raw_v4_hashinfo.ht[state->bucket]);
871 goto try_again; 871 goto try_again;
872 } 872 }
873 return sk; 873 return sk;
@@ -885,7 +885,7 @@ static struct sock *raw_get_idx(struct seq_file *seq, loff_t pos)
885 885
886static void *raw_seq_start(struct seq_file *seq, loff_t *pos) 886static void *raw_seq_start(struct seq_file *seq, loff_t *pos)
887{ 887{
888 read_lock(&raw_v4_lock); 888 read_lock(&raw_v4_hashinfo.lock);
889 return *pos ? raw_get_idx(seq, *pos - 1) : SEQ_START_TOKEN; 889 return *pos ? raw_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
890} 890}
891 891
@@ -903,7 +903,7 @@ static void *raw_seq_next(struct seq_file *seq, void *v, loff_t *pos)
903 903
904static void raw_seq_stop(struct seq_file *seq, void *v) 904static void raw_seq_stop(struct seq_file *seq, void *v)
905{ 905{
906 read_unlock(&raw_v4_lock); 906 read_unlock(&raw_v4_hashinfo.lock);
907} 907}
908 908
909static __inline__ char *get_raw_sock(struct sock *sp, char *tmpbuf, int i) 909static __inline__ char *get_raw_sock(struct sock *sp, char *tmpbuf, int i)
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 53f01b4982c7..15c72a6365a6 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -54,34 +54,34 @@
54#include <net/mip6.h> 54#include <net/mip6.h>
55#endif 55#endif
56 56
57#include <net/raw.h>
57#include <net/rawv6.h> 58#include <net/rawv6.h>
58#include <net/xfrm.h> 59#include <net/xfrm.h>
59 60
60#include <linux/proc_fs.h> 61#include <linux/proc_fs.h>
61#include <linux/seq_file.h> 62#include <linux/seq_file.h>
62 63
63#define RAWV6_HTABLE_SIZE MAX_INET_PROTOS 64static struct raw_hashinfo raw_v6_hashinfo = {
64 65 .lock = __RW_LOCK_UNLOCKED(),
65static struct hlist_head raw_v6_htable[RAWV6_HTABLE_SIZE]; 66};
66static DEFINE_RWLOCK(raw_v6_lock);
67 67
68static void raw_v6_hash(struct sock *sk) 68static void raw_v6_hash(struct sock *sk)
69{ 69{
70 struct hlist_head *list = &raw_v6_htable[inet_sk(sk)->num & 70 struct hlist_head *list = &raw_v6_hashinfo.ht[inet_sk(sk)->num &
71 (RAWV6_HTABLE_SIZE - 1)]; 71 (RAW_HTABLE_SIZE - 1)];
72 72
73 write_lock_bh(&raw_v6_lock); 73 write_lock_bh(&raw_v6_hashinfo.lock);
74 sk_add_node(sk, list); 74 sk_add_node(sk, list);
75 sock_prot_inc_use(sk->sk_prot); 75 sock_prot_inc_use(sk->sk_prot);
76 write_unlock_bh(&raw_v6_lock); 76 write_unlock_bh(&raw_v6_hashinfo.lock);
77} 77}
78 78
79static void raw_v6_unhash(struct sock *sk) 79static void raw_v6_unhash(struct sock *sk)
80{ 80{
81 write_lock_bh(&raw_v6_lock); 81 write_lock_bh(&raw_v6_hashinfo.lock);
82 if (sk_del_node_init(sk)) 82 if (sk_del_node_init(sk))
83 sock_prot_dec_use(sk->sk_prot); 83 sock_prot_dec_use(sk->sk_prot);
84 write_unlock_bh(&raw_v6_lock); 84 write_unlock_bh(&raw_v6_hashinfo.lock);
85} 85}
86 86
87 87
@@ -180,8 +180,8 @@ static int ipv6_raw_deliver(struct sk_buff *skb, int nexthdr)
180 180
181 hash = nexthdr & (MAX_INET_PROTOS - 1); 181 hash = nexthdr & (MAX_INET_PROTOS - 1);
182 182
183 read_lock(&raw_v6_lock); 183 read_lock(&raw_v6_hashinfo.lock);
184 sk = sk_head(&raw_v6_htable[hash]); 184 sk = sk_head(&raw_v6_hashinfo.ht[hash]);
185 185
186 /* 186 /*
187 * The first socket found will be delivered after 187 * The first socket found will be delivered after
@@ -238,7 +238,7 @@ static int ipv6_raw_deliver(struct sk_buff *skb, int nexthdr)
238 IP6CB(skb)->iif); 238 IP6CB(skb)->iif);
239 } 239 }
240out: 240out:
241 read_unlock(&raw_v6_lock); 241 read_unlock(&raw_v6_hashinfo.lock);
242 return delivered; 242 return delivered;
243} 243}
244 244
@@ -246,7 +246,7 @@ int raw6_local_deliver(struct sk_buff *skb, int nexthdr)
246{ 246{
247 struct sock *raw_sk; 247 struct sock *raw_sk;
248 248
249 raw_sk = sk_head(&raw_v6_htable[nexthdr & (MAX_INET_PROTOS - 1)]); 249 raw_sk = sk_head(&raw_v6_hashinfo.ht[nexthdr & (MAX_INET_PROTOS - 1)]);
250 if (raw_sk && !ipv6_raw_deliver(skb, nexthdr)) 250 if (raw_sk && !ipv6_raw_deliver(skb, nexthdr))
251 raw_sk = NULL; 251 raw_sk = NULL;
252 252
@@ -368,10 +368,10 @@ void raw6_icmp_error(struct sk_buff *skb, int nexthdr,
368 int hash; 368 int hash;
369 struct in6_addr *saddr, *daddr; 369 struct in6_addr *saddr, *daddr;
370 370
371 hash = nexthdr & (RAWV6_HTABLE_SIZE - 1); 371 hash = nexthdr & (RAW_HTABLE_SIZE - 1);
372 372
373 read_lock(&raw_v6_lock); 373 read_lock(&raw_v6_hashinfo.lock);
374 sk = sk_head(&raw_v6_htable[hash]); 374 sk = sk_head(&raw_v6_hashinfo.ht[hash]);
375 if (sk != NULL) { 375 if (sk != NULL) {
376 saddr = &ipv6_hdr(skb)->saddr; 376 saddr = &ipv6_hdr(skb)->saddr;
377 daddr = &ipv6_hdr(skb)->daddr; 377 daddr = &ipv6_hdr(skb)->daddr;
@@ -383,7 +383,7 @@ void raw6_icmp_error(struct sk_buff *skb, int nexthdr,
383 sk = sk_next(sk); 383 sk = sk_next(sk);
384 } 384 }
385 } 385 }
386 read_unlock(&raw_v6_lock); 386 read_unlock(&raw_v6_hashinfo.lock);
387} 387}
388 388
389static inline int rawv6_rcv_skb(struct sock * sk, struct sk_buff * skb) 389static inline int rawv6_rcv_skb(struct sock * sk, struct sk_buff * skb)
@@ -1221,8 +1221,9 @@ static struct sock *raw6_get_first(struct seq_file *seq)
1221 struct hlist_node *node; 1221 struct hlist_node *node;
1222 struct raw6_iter_state* state = raw6_seq_private(seq); 1222 struct raw6_iter_state* state = raw6_seq_private(seq);
1223 1223
1224 for (state->bucket = 0; state->bucket < RAWV6_HTABLE_SIZE; ++state->bucket) 1224 for (state->bucket = 0; state->bucket < RAW_HTABLE_SIZE;
1225 sk_for_each(sk, node, &raw_v6_htable[state->bucket]) 1225 ++state->bucket)
1226 sk_for_each(sk, node, &raw_v6_hashinfo.ht[state->bucket])
1226 if (sk->sk_family == PF_INET6) 1227 if (sk->sk_family == PF_INET6)
1227 goto out; 1228 goto out;
1228 sk = NULL; 1229 sk = NULL;
@@ -1240,8 +1241,8 @@ try_again:
1240 ; 1241 ;
1241 } while (sk && sk->sk_family != PF_INET6); 1242 } while (sk && sk->sk_family != PF_INET6);
1242 1243
1243 if (!sk && ++state->bucket < RAWV6_HTABLE_SIZE) { 1244 if (!sk && ++state->bucket < RAW_HTABLE_SIZE) {
1244 sk = sk_head(&raw_v6_htable[state->bucket]); 1245 sk = sk_head(&raw_v6_hashinfo.ht[state->bucket]);
1245 goto try_again; 1246 goto try_again;
1246 } 1247 }
1247 return sk; 1248 return sk;
@@ -1258,7 +1259,7 @@ static struct sock *raw6_get_idx(struct seq_file *seq, loff_t pos)
1258 1259
1259static void *raw6_seq_start(struct seq_file *seq, loff_t *pos) 1260static void *raw6_seq_start(struct seq_file *seq, loff_t *pos)
1260{ 1261{
1261 read_lock(&raw_v6_lock); 1262 read_lock(&raw_v6_hashinfo.lock);
1262 return *pos ? raw6_get_idx(seq, *pos - 1) : SEQ_START_TOKEN; 1263 return *pos ? raw6_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
1263} 1264}
1264 1265
@@ -1276,7 +1277,7 @@ static void *raw6_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1276 1277
1277static void raw6_seq_stop(struct seq_file *seq, void *v) 1278static void raw6_seq_stop(struct seq_file *seq, void *v)
1278{ 1279{
1279 read_unlock(&raw_v6_lock); 1280 read_unlock(&raw_v6_hashinfo.lock);
1280} 1281}
1281 1282
1282static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i) 1283static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)