aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/udp.c
diff options
context:
space:
mode:
authorYOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>2008-03-25 13:26:21 -0400
committerYOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>2008-03-25 15:39:55 -0400
commit3b1e0a655f8eba44ab1ee2a1068d169ccfb853b9 (patch)
tree09edb35f32ebcfb1b4dad904425128a110ef16ee /net/ipv4/udp.c
parentc346dca10840a874240c78efe3f39acf4312a1f2 (diff)
[NET] NETNS: Omit sock->sk_net without CONFIG_NET_NS.
Introduce per-sock inlines: sock_net(), sock_net_set() and per-inet_timewait_sock inlines: twsk_net(), twsk_net_set(). Without CONFIG_NET_NS, no namespace other than &init_net exists. Let's explicitly define them to help compiler optimizations. Signed-off-by: YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
Diffstat (limited to 'net/ipv4/udp.c')
-rw-r--r--net/ipv4/udp.c16
1 files changed, 8 insertions, 8 deletions
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index e2cd93481359..76d52d37d6ac 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -137,7 +137,7 @@ static inline int __udp_lib_lport_inuse(struct net *net, __u16 num,
137 struct hlist_node *node; 137 struct hlist_node *node;
138 138
139 sk_for_each(sk, node, &udptable[num & (UDP_HTABLE_SIZE - 1)]) 139 sk_for_each(sk, node, &udptable[num & (UDP_HTABLE_SIZE - 1)])
140 if (sk->sk_net == net && sk->sk_hash == num) 140 if (sock_net(sk) == net && sk->sk_hash == num)
141 return 1; 141 return 1;
142 return 0; 142 return 0;
143} 143}
@@ -158,7 +158,7 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum,
158 struct hlist_head *head; 158 struct hlist_head *head;
159 struct sock *sk2; 159 struct sock *sk2;
160 int error = 1; 160 int error = 1;
161 struct net *net = sk->sk_net; 161 struct net *net = sock_net(sk);
162 162
163 write_lock_bh(&udp_hash_lock); 163 write_lock_bh(&udp_hash_lock);
164 164
@@ -218,7 +218,7 @@ gotit:
218 sk_for_each(sk2, node, head) 218 sk_for_each(sk2, node, head)
219 if (sk2->sk_hash == snum && 219 if (sk2->sk_hash == snum &&
220 sk2 != sk && 220 sk2 != sk &&
221 sk2->sk_net == net && 221 sock_net(sk2) == net &&
222 (!sk2->sk_reuse || !sk->sk_reuse) && 222 (!sk2->sk_reuse || !sk->sk_reuse) &&
223 (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if 223 (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if
224 || sk2->sk_bound_dev_if == sk->sk_bound_dev_if) && 224 || sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
@@ -269,7 +269,7 @@ static struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr,
269 sk_for_each(sk, node, &udptable[hnum & (UDP_HTABLE_SIZE - 1)]) { 269 sk_for_each(sk, node, &udptable[hnum & (UDP_HTABLE_SIZE - 1)]) {
270 struct inet_sock *inet = inet_sk(sk); 270 struct inet_sock *inet = inet_sk(sk);
271 271
272 if (sk->sk_net == net && sk->sk_hash == hnum && 272 if (sock_net(sk) == net && sk->sk_hash == hnum &&
273 !ipv6_only_sock(sk)) { 273 !ipv6_only_sock(sk)) {
274 int score = (sk->sk_family == PF_INET ? 1 : 0); 274 int score = (sk->sk_family == PF_INET ? 1 : 0);
275 if (inet->rcv_saddr) { 275 if (inet->rcv_saddr) {
@@ -607,7 +607,7 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
607 607
608 ipc.oif = sk->sk_bound_dev_if; 608 ipc.oif = sk->sk_bound_dev_if;
609 if (msg->msg_controllen) { 609 if (msg->msg_controllen) {
610 err = ip_cmsg_send(sk->sk_net, msg, &ipc); 610 err = ip_cmsg_send(sock_net(sk), msg, &ipc);
611 if (err) 611 if (err)
612 return err; 612 return err;
613 if (ipc.opt) 613 if (ipc.opt)
@@ -656,7 +656,7 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
656 { .sport = inet->sport, 656 { .sport = inet->sport,
657 .dport = dport } } }; 657 .dport = dport } } };
658 security_sk_classify_flow(sk, &fl); 658 security_sk_classify_flow(sk, &fl);
659 err = ip_route_output_flow(sk->sk_net, &rt, &fl, sk, 1); 659 err = ip_route_output_flow(sock_net(sk), &rt, &fl, sk, 1);
660 if (err) { 660 if (err) {
661 if (err == -ENETUNREACH) 661 if (err == -ENETUNREACH)
662 IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES); 662 IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES);
@@ -1511,7 +1511,7 @@ static struct sock *udp_get_first(struct seq_file *seq)
1511 for (state->bucket = 0; state->bucket < UDP_HTABLE_SIZE; ++state->bucket) { 1511 for (state->bucket = 0; state->bucket < UDP_HTABLE_SIZE; ++state->bucket) {
1512 struct hlist_node *node; 1512 struct hlist_node *node;
1513 sk_for_each(sk, node, state->hashtable + state->bucket) { 1513 sk_for_each(sk, node, state->hashtable + state->bucket) {
1514 if (sk->sk_net != net) 1514 if (sock_net(sk) != net)
1515 continue; 1515 continue;
1516 if (sk->sk_family == state->family) 1516 if (sk->sk_family == state->family)
1517 goto found; 1517 goto found;
@@ -1531,7 +1531,7 @@ static struct sock *udp_get_next(struct seq_file *seq, struct sock *sk)
1531 sk = sk_next(sk); 1531 sk = sk_next(sk);
1532try_again: 1532try_again:
1533 ; 1533 ;
1534 } while (sk && (sk->sk_net != net || sk->sk_family != state->family)); 1534 } while (sk && (sock_net(sk) != net || sk->sk_family != state->family));
1535 1535
1536 if (!sk && ++state->bucket < UDP_HTABLE_SIZE) { 1536 if (!sk && ++state->bucket < UDP_HTABLE_SIZE) {
1537 sk = sk_head(state->hashtable + state->bucket); 1537 sk = sk_head(state->hashtable + state->bucket);