aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2010-04-27 18:05:31 -0400
committerDavid S. Miller <davem@davemloft.net>2010-04-27 18:11:48 -0400
commitc58dc01babfd58ec9e71a6ce080150dc27755d88 (patch)
tree065c58b5236ea23ff0868a6bbf3c5233b990f0be /include
parentc4ee6a5348102b9cea49fb9adf88307445407911 (diff)
net: Make RFS socket operations not be inet specific.
Idea from Eric Dumazet. As for placement inside of struct sock, I tried to choose a place that otherwise has a 32-bit hole on 64-bit systems. Signed-off-by: David S. Miller <davem@davemloft.net> Acked-by: Eric Dumazet <eric.dumazet@gmail.com>
Diffstat (limited to 'include')
-rw-r--r--include/net/inet_sock.h37
-rw-r--r--include/net/sock.h38
2 files changed, 38 insertions, 37 deletions
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index c1d42957b86b..1653de515cee 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -102,7 +102,6 @@ struct rtable;
102 * @uc_ttl - Unicast TTL 102 * @uc_ttl - Unicast TTL
103 * @inet_sport - Source port 103 * @inet_sport - Source port
104 * @inet_id - ID counter for DF pkts 104 * @inet_id - ID counter for DF pkts
105 * @rxhash - flow hash received from netif layer
106 * @tos - TOS 105 * @tos - TOS
107 * @mc_ttl - Multicasting TTL 106 * @mc_ttl - Multicasting TTL
108 * @is_icsk - is this an inet_connection_sock? 107 * @is_icsk - is this an inet_connection_sock?
@@ -126,9 +125,6 @@ struct inet_sock {
126 __u16 cmsg_flags; 125 __u16 cmsg_flags;
127 __be16 inet_sport; 126 __be16 inet_sport;
128 __u16 inet_id; 127 __u16 inet_id;
129#ifdef CONFIG_RPS
130 __u32 rxhash;
131#endif
132 128
133 struct ip_options *opt; 129 struct ip_options *opt;
134 __u8 tos; 130 __u8 tos;
@@ -224,37 +220,4 @@ static inline __u8 inet_sk_flowi_flags(const struct sock *sk)
224 return inet_sk(sk)->transparent ? FLOWI_FLAG_ANYSRC : 0; 220 return inet_sk(sk)->transparent ? FLOWI_FLAG_ANYSRC : 0;
225} 221}
226 222
227static inline void inet_rps_record_flow(const struct sock *sk)
228{
229#ifdef CONFIG_RPS
230 struct rps_sock_flow_table *sock_flow_table;
231
232 rcu_read_lock();
233 sock_flow_table = rcu_dereference(rps_sock_flow_table);
234 rps_record_sock_flow(sock_flow_table, inet_sk(sk)->rxhash);
235 rcu_read_unlock();
236#endif
237}
238
239static inline void inet_rps_reset_flow(const struct sock *sk)
240{
241#ifdef CONFIG_RPS
242 struct rps_sock_flow_table *sock_flow_table;
243
244 rcu_read_lock();
245 sock_flow_table = rcu_dereference(rps_sock_flow_table);
246 rps_reset_sock_flow(sock_flow_table, inet_sk(sk)->rxhash);
247 rcu_read_unlock();
248#endif
249}
250
251static inline void inet_rps_save_rxhash(struct sock *sk, u32 rxhash)
252{
253#ifdef CONFIG_RPS
254 if (unlikely(inet_sk(sk)->rxhash != rxhash)) {
255 inet_rps_reset_flow(sk);
256 inet_sk(sk)->rxhash = rxhash;
257 }
258#endif
259}
260#endif /* _INET_SOCK_H */ 223#endif /* _INET_SOCK_H */
diff --git a/include/net/sock.h b/include/net/sock.h
index 4081db86a352..07822280d953 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -198,6 +198,7 @@ struct sock_common {
198 * @sk_rcvlowat: %SO_RCVLOWAT setting 198 * @sk_rcvlowat: %SO_RCVLOWAT setting
199 * @sk_rcvtimeo: %SO_RCVTIMEO setting 199 * @sk_rcvtimeo: %SO_RCVTIMEO setting
200 * @sk_sndtimeo: %SO_SNDTIMEO setting 200 * @sk_sndtimeo: %SO_SNDTIMEO setting
201 * @sk_rxhash: flow hash received from netif layer
201 * @sk_filter: socket filtering instructions 202 * @sk_filter: socket filtering instructions
202 * @sk_protinfo: private area, net family specific, when not using slab 203 * @sk_protinfo: private area, net family specific, when not using slab
203 * @sk_timer: sock cleanup timer 204 * @sk_timer: sock cleanup timer
@@ -279,6 +280,9 @@ struct sock {
279 int sk_gso_type; 280 int sk_gso_type;
280 unsigned int sk_gso_max_size; 281 unsigned int sk_gso_max_size;
281 int sk_rcvlowat; 282 int sk_rcvlowat;
283#ifdef CONFIG_RPS
284 __u32 sk_rxhash;
285#endif
282 unsigned long sk_flags; 286 unsigned long sk_flags;
283 unsigned long sk_lingertime; 287 unsigned long sk_lingertime;
284 struct sk_buff_head sk_error_queue; 288 struct sk_buff_head sk_error_queue;
@@ -620,6 +624,40 @@ static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
620 return sk->sk_backlog_rcv(sk, skb); 624 return sk->sk_backlog_rcv(sk, skb);
621} 625}
622 626
627static inline void sock_rps_record_flow(const struct sock *sk)
628{
629#ifdef CONFIG_RPS
630 struct rps_sock_flow_table *sock_flow_table;
631
632 rcu_read_lock();
633 sock_flow_table = rcu_dereference(rps_sock_flow_table);
634 rps_record_sock_flow(sock_flow_table, sk->sk_rxhash);
635 rcu_read_unlock();
636#endif
637}
638
639static inline void sock_rps_reset_flow(const struct sock *sk)
640{
641#ifdef CONFIG_RPS
642 struct rps_sock_flow_table *sock_flow_table;
643
644 rcu_read_lock();
645 sock_flow_table = rcu_dereference(rps_sock_flow_table);
646 rps_reset_sock_flow(sock_flow_table, sk->sk_rxhash);
647 rcu_read_unlock();
648#endif
649}
650
651static inline void sock_rps_save_rxhash(struct sock *sk, u32 rxhash)
652{
653#ifdef CONFIG_RPS
654 if (unlikely(sk->sk_rxhash != rxhash)) {
655 sock_rps_reset_flow(sk);
656 sk->sk_rxhash = rxhash;
657 }
658#endif
659}
660
623#define sk_wait_event(__sk, __timeo, __condition) \ 661#define sk_wait_event(__sk, __timeo, __condition) \
624 ({ int __rc; \ 662 ({ int __rc; \
625 release_sock(__sk); \ 663 release_sock(__sk); \