aboutsummaryrefslogtreecommitdiffstats
path: root/include/net/sock.h
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2010-04-27 18:05:31 -0400
committerDavid S. Miller <davem@davemloft.net>2010-04-27 18:11:48 -0400
commitc58dc01babfd58ec9e71a6ce080150dc27755d88 (patch)
tree065c58b5236ea23ff0868a6bbf3c5233b990f0be /include/net/sock.h
parentc4ee6a5348102b9cea49fb9adf88307445407911 (diff)
net: Make RFS socket operations not be inet specific.
Idea from Eric Dumazet. As for placement inside of struct sock, I tried to choose a place that otherwise has a 32-bit hole on 64-bit systems. Signed-off-by: David S. Miller <davem@davemloft.net> Acked-by: Eric Dumazet <eric.dumazet@gmail.com>
Diffstat (limited to 'include/net/sock.h')
-rw-r--r--include/net/sock.h38
1 files changed, 38 insertions, 0 deletions
diff --git a/include/net/sock.h b/include/net/sock.h
index 4081db86a352..07822280d953 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -198,6 +198,7 @@ struct sock_common {
198 * @sk_rcvlowat: %SO_RCVLOWAT setting 198 * @sk_rcvlowat: %SO_RCVLOWAT setting
199 * @sk_rcvtimeo: %SO_RCVTIMEO setting 199 * @sk_rcvtimeo: %SO_RCVTIMEO setting
200 * @sk_sndtimeo: %SO_SNDTIMEO setting 200 * @sk_sndtimeo: %SO_SNDTIMEO setting
201 * @sk_rxhash: flow hash received from netif layer
201 * @sk_filter: socket filtering instructions 202 * @sk_filter: socket filtering instructions
202 * @sk_protinfo: private area, net family specific, when not using slab 203 * @sk_protinfo: private area, net family specific, when not using slab
203 * @sk_timer: sock cleanup timer 204 * @sk_timer: sock cleanup timer
@@ -279,6 +280,9 @@ struct sock {
279 int sk_gso_type; 280 int sk_gso_type;
280 unsigned int sk_gso_max_size; 281 unsigned int sk_gso_max_size;
281 int sk_rcvlowat; 282 int sk_rcvlowat;
283#ifdef CONFIG_RPS
284 __u32 sk_rxhash;
285#endif
282 unsigned long sk_flags; 286 unsigned long sk_flags;
283 unsigned long sk_lingertime; 287 unsigned long sk_lingertime;
284 struct sk_buff_head sk_error_queue; 288 struct sk_buff_head sk_error_queue;
@@ -620,6 +624,40 @@ static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
620 return sk->sk_backlog_rcv(sk, skb); 624 return sk->sk_backlog_rcv(sk, skb);
621} 625}
622 626
627static inline void sock_rps_record_flow(const struct sock *sk)
628{
629#ifdef CONFIG_RPS
630 struct rps_sock_flow_table *sock_flow_table;
631
632 rcu_read_lock();
633 sock_flow_table = rcu_dereference(rps_sock_flow_table);
634 rps_record_sock_flow(sock_flow_table, sk->sk_rxhash);
635 rcu_read_unlock();
636#endif
637}
638
639static inline void sock_rps_reset_flow(const struct sock *sk)
640{
641#ifdef CONFIG_RPS
642 struct rps_sock_flow_table *sock_flow_table;
643
644 rcu_read_lock();
645 sock_flow_table = rcu_dereference(rps_sock_flow_table);
646 rps_reset_sock_flow(sock_flow_table, sk->sk_rxhash);
647 rcu_read_unlock();
648#endif
649}
650
651static inline void sock_rps_save_rxhash(struct sock *sk, u32 rxhash)
652{
653#ifdef CONFIG_RPS
654 if (unlikely(sk->sk_rxhash != rxhash)) {
655 sock_rps_reset_flow(sk);
656 sk->sk_rxhash = rxhash;
657 }
658#endif
659}
660
623#define sk_wait_event(__sk, __timeo, __condition) \ 661#define sk_wait_event(__sk, __timeo, __condition) \
624 ({ int __rc; \ 662 ({ int __rc; \
625 release_sock(__sk); \ 663 release_sock(__sk); \