diff options
author | Arnaldo Carvalho de Melo <acme@ghostprotocols.net> | 2005-08-09 22:59:44 -0400 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2005-08-29 18:38:32 -0400 |
commit | 0f7ff9274e72fd254fbd1ab117bbc1db6e7cdb34 (patch) | |
tree | 95736729a2f5302666604c4287a2af97ececd734 /include/net | |
parent | 304a16180fb6d2b153b45f6fbbcec1fa814496e5 (diff) |
[INET]: Just rename the TCP hashtable functions/structs to inet_
This is to break down the complexity of the series of patches,
making it very clear that this one just does:
1. renames tcp_ prefixed hashtable functions and data structures that
were already mostly generic to inet_ to share it with DCCP and
other INET transport protocols.
2. Removes not used functions (__tb_head & tb_head)
3. Removes some leftover prototypes in the headers (tcp_bucket_unlock &
tcp_v4_build_header)
Next changesets will move tcp_sk(sk)->bind_hash to inet_sock so that we can
make functions such as tcp_inherit_port, __tcp_inherit_port, tcp_v4_get_port,
__tcp_put_port, generic and get others like tcp_destroy_sock closer to generic
(tcp_orphan_count will go to sk->sk_prot to allow this).
Eventually most of these functions will be used passing the transport protocol
inet_hashinfo structure.
Signed-off-by: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/net')
-rw-r--r-- | include/net/tcp.h | 93 |
1 files changed, 42 insertions, 51 deletions
diff --git a/include/net/tcp.h b/include/net/tcp.h index 0c769adb0463..6c9f6f7cab5c 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h | |||
@@ -44,13 +44,13 @@ | |||
44 | * New scheme, half the table is for TIME_WAIT, the other half is | 44 | * New scheme, half the table is for TIME_WAIT, the other half is |
45 | * for the rest. I'll experiment with dynamic table growth later. | 45 | * for the rest. I'll experiment with dynamic table growth later. |
46 | */ | 46 | */ |
47 | struct tcp_ehash_bucket { | 47 | struct inet_ehash_bucket { |
48 | rwlock_t lock; | 48 | rwlock_t lock; |
49 | struct hlist_head chain; | 49 | struct hlist_head chain; |
50 | } __attribute__((__aligned__(8))); | 50 | } __attribute__((__aligned__(8))); |
51 | 51 | ||
52 | /* This is for listening sockets, thus all sockets which possess wildcards. */ | 52 | /* This is for listening sockets, thus all sockets which possess wildcards. */ |
53 | #define TCP_LHTABLE_SIZE 32 /* Yes, really, this is all you need. */ | 53 | #define INET_LHTABLE_SIZE 32 /* Yes, really, this is all you need. */ |
54 | 54 | ||
55 | /* There are a few simple rules, which allow for local port reuse by | 55 | /* There are a few simple rules, which allow for local port reuse by |
56 | * an application. In essence: | 56 | * an application. In essence: |
@@ -83,31 +83,22 @@ struct tcp_ehash_bucket { | |||
83 | * users logged onto your box, isn't it nice to know that new data | 83 | * users logged onto your box, isn't it nice to know that new data |
84 | * ports are created in O(1) time? I thought so. ;-) -DaveM | 84 | * ports are created in O(1) time? I thought so. ;-) -DaveM |
85 | */ | 85 | */ |
86 | struct tcp_bind_bucket { | 86 | struct inet_bind_bucket { |
87 | unsigned short port; | 87 | unsigned short port; |
88 | signed short fastreuse; | 88 | signed short fastreuse; |
89 | struct hlist_node node; | 89 | struct hlist_node node; |
90 | struct hlist_head owners; | 90 | struct hlist_head owners; |
91 | }; | 91 | }; |
92 | 92 | ||
93 | #define tb_for_each(tb, node, head) hlist_for_each_entry(tb, node, head, node) | 93 | #define inet_bind_bucket_for_each(tb, node, head) \ |
94 | hlist_for_each_entry(tb, node, head, node) | ||
94 | 95 | ||
95 | struct tcp_bind_hashbucket { | 96 | struct inet_bind_hashbucket { |
96 | spinlock_t lock; | 97 | spinlock_t lock; |
97 | struct hlist_head chain; | 98 | struct hlist_head chain; |
98 | }; | 99 | }; |
99 | 100 | ||
100 | static inline struct tcp_bind_bucket *__tb_head(struct tcp_bind_hashbucket *head) | 101 | struct inet_hashinfo { |
101 | { | ||
102 | return hlist_entry(head->chain.first, struct tcp_bind_bucket, node); | ||
103 | } | ||
104 | |||
105 | static inline struct tcp_bind_bucket *tb_head(struct tcp_bind_hashbucket *head) | ||
106 | { | ||
107 | return hlist_empty(&head->chain) ? NULL : __tb_head(head); | ||
108 | } | ||
109 | |||
110 | extern struct tcp_hashinfo { | ||
111 | /* This is for sockets with full identity only. Sockets here will | 102 | /* This is for sockets with full identity only. Sockets here will |
112 | * always be without wildcards and will have the following invariant: | 103 | * always be without wildcards and will have the following invariant: |
113 | * | 104 | * |
@@ -116,21 +107,21 @@ extern struct tcp_hashinfo { | |||
116 | * First half of the table is for sockets not in TIME_WAIT, second half | 107 | * First half of the table is for sockets not in TIME_WAIT, second half |
117 | * is for TIME_WAIT sockets only. | 108 | * is for TIME_WAIT sockets only. |
118 | */ | 109 | */ |
119 | struct tcp_ehash_bucket *__tcp_ehash; | 110 | struct inet_ehash_bucket *ehash; |
120 | 111 | ||
121 | /* Ok, let's try this, I give up, we do need a local binding | 112 | /* Ok, let's try this, I give up, we do need a local binding |
122 | * TCP hash as well as the others for fast bind/connect. | 113 | * TCP hash as well as the others for fast bind/connect. |
123 | */ | 114 | */ |
124 | struct tcp_bind_hashbucket *__tcp_bhash; | 115 | struct inet_bind_hashbucket *bhash; |
125 | 116 | ||
126 | int __tcp_bhash_size; | 117 | int bhash_size; |
127 | int __tcp_ehash_size; | 118 | int ehash_size; |
128 | 119 | ||
129 | /* All sockets in TCP_LISTEN state will be in here. This is the only | 120 | /* All sockets in TCP_LISTEN state will be in here. This is the only |
130 | * table where wildcard'd TCP sockets can exist. Hash function here | 121 | * table where wildcard'd TCP sockets can exist. Hash function here |
131 | * is just local port number. | 122 | * is just local port number. |
132 | */ | 123 | */ |
133 | struct hlist_head __tcp_listening_hash[TCP_LHTABLE_SIZE]; | 124 | struct hlist_head listening_hash[INET_LHTABLE_SIZE]; |
134 | 125 | ||
135 | /* All the above members are written once at bootup and | 126 | /* All the above members are written once at bootup and |
136 | * never written again _or_ are predominantly read-access. | 127 | * never written again _or_ are predominantly read-access. |
@@ -138,36 +129,39 @@ extern struct tcp_hashinfo { | |||
138 | * Now align to a new cache line as all the following members | 129 | * Now align to a new cache line as all the following members |
139 | * are often dirty. | 130 | * are often dirty. |
140 | */ | 131 | */ |
141 | rwlock_t __tcp_lhash_lock ____cacheline_aligned; | 132 | rwlock_t lhash_lock ____cacheline_aligned; |
142 | atomic_t __tcp_lhash_users; | 133 | atomic_t lhash_users; |
143 | wait_queue_head_t __tcp_lhash_wait; | 134 | wait_queue_head_t lhash_wait; |
144 | spinlock_t __tcp_portalloc_lock; | 135 | spinlock_t portalloc_lock; |
145 | } tcp_hashinfo; | 136 | }; |
146 | 137 | ||
147 | #define tcp_ehash (tcp_hashinfo.__tcp_ehash) | 138 | extern struct inet_hashinfo tcp_hashinfo; |
148 | #define tcp_bhash (tcp_hashinfo.__tcp_bhash) | 139 | #define tcp_ehash (tcp_hashinfo.ehash) |
149 | #define tcp_ehash_size (tcp_hashinfo.__tcp_ehash_size) | 140 | #define tcp_bhash (tcp_hashinfo.bhash) |
150 | #define tcp_bhash_size (tcp_hashinfo.__tcp_bhash_size) | 141 | #define tcp_ehash_size (tcp_hashinfo.ehash_size) |
151 | #define tcp_listening_hash (tcp_hashinfo.__tcp_listening_hash) | 142 | #define tcp_bhash_size (tcp_hashinfo.bhash_size) |
152 | #define tcp_lhash_lock (tcp_hashinfo.__tcp_lhash_lock) | 143 | #define tcp_listening_hash (tcp_hashinfo.listening_hash) |
153 | #define tcp_lhash_users (tcp_hashinfo.__tcp_lhash_users) | 144 | #define tcp_lhash_lock (tcp_hashinfo.lhash_lock) |
154 | #define tcp_lhash_wait (tcp_hashinfo.__tcp_lhash_wait) | 145 | #define tcp_lhash_users (tcp_hashinfo.lhash_users) |
155 | #define tcp_portalloc_lock (tcp_hashinfo.__tcp_portalloc_lock) | 146 | #define tcp_lhash_wait (tcp_hashinfo.lhash_wait) |
147 | #define tcp_portalloc_lock (tcp_hashinfo.portalloc_lock) | ||
156 | 148 | ||
157 | extern kmem_cache_t *tcp_bucket_cachep; | 149 | extern kmem_cache_t *tcp_bucket_cachep; |
158 | extern struct tcp_bind_bucket *tcp_bucket_create(struct tcp_bind_hashbucket *head, | 150 | extern struct inet_bind_bucket * |
159 | unsigned short snum); | 151 | inet_bind_bucket_create(kmem_cache_t *cachep, |
160 | extern void tcp_bucket_destroy(struct tcp_bind_bucket *tb); | 152 | struct inet_bind_hashbucket *head, |
161 | extern void tcp_bucket_unlock(struct sock *sk); | 153 | const unsigned short snum); |
154 | extern void inet_bind_bucket_destroy(kmem_cache_t *cachep, | ||
155 | struct inet_bind_bucket *tb); | ||
162 | extern int tcp_port_rover; | 156 | extern int tcp_port_rover; |
163 | 157 | ||
164 | /* These are AF independent. */ | 158 | /* These are AF independent. */ |
165 | static __inline__ int tcp_bhashfn(__u16 lport) | 159 | static inline int inet_bhashfn(const __u16 lport, const int bhash_size) |
166 | { | 160 | { |
167 | return (lport & (tcp_bhash_size - 1)); | 161 | return lport & (bhash_size - 1); |
168 | } | 162 | } |
169 | 163 | ||
170 | extern void tcp_bind_hash(struct sock *sk, struct tcp_bind_bucket *tb, | 164 | extern void tcp_bind_hash(struct sock *sk, struct inet_bind_bucket *tb, |
171 | unsigned short snum); | 165 | unsigned short snum); |
172 | 166 | ||
173 | #if (BITS_PER_LONG == 64) | 167 | #if (BITS_PER_LONG == 64) |
@@ -212,7 +206,7 @@ struct tcp_tw_bucket { | |||
212 | __u32 tw_ts_recent; | 206 | __u32 tw_ts_recent; |
213 | long tw_ts_recent_stamp; | 207 | long tw_ts_recent_stamp; |
214 | unsigned long tw_ttd; | 208 | unsigned long tw_ttd; |
215 | struct tcp_bind_bucket *tw_tb; | 209 | struct inet_bind_bucket *tw_tb; |
216 | struct hlist_node tw_death_node; | 210 | struct hlist_node tw_death_node; |
217 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | 211 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) |
218 | struct in6_addr tw_v6_daddr; | 212 | struct in6_addr tw_v6_daddr; |
@@ -366,14 +360,14 @@ extern void tcp_tw_deschedule(struct tcp_tw_bucket *tw); | |||
366 | (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif)))) | 360 | (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif)))) |
367 | 361 | ||
368 | /* These can have wildcards, don't try too hard. */ | 362 | /* These can have wildcards, don't try too hard. */ |
369 | static __inline__ int tcp_lhashfn(unsigned short num) | 363 | static inline int inet_lhashfn(const unsigned short num) |
370 | { | 364 | { |
371 | return num & (TCP_LHTABLE_SIZE - 1); | 365 | return num & (INET_LHTABLE_SIZE - 1); |
372 | } | 366 | } |
373 | 367 | ||
374 | static __inline__ int tcp_sk_listen_hashfn(struct sock *sk) | 368 | static inline int inet_sk_listen_hashfn(const struct sock *sk) |
375 | { | 369 | { |
376 | return tcp_lhashfn(inet_sk(sk)->num); | 370 | return inet_lhashfn(inet_sk(sk)->num); |
377 | } | 371 | } |
378 | 372 | ||
379 | #define MAX_TCP_HEADER (128 + MAX_HEADER) | 373 | #define MAX_TCP_HEADER (128 + MAX_HEADER) |
@@ -799,9 +793,6 @@ extern void tcp_parse_options(struct sk_buff *skb, | |||
799 | * TCP v4 functions exported for the inet6 API | 793 | * TCP v4 functions exported for the inet6 API |
800 | */ | 794 | */ |
801 | 795 | ||
802 | extern int tcp_v4_build_header(struct sock *sk, | ||
803 | struct sk_buff *skb); | ||
804 | |||
805 | extern void tcp_v4_send_check(struct sock *sk, | 796 | extern void tcp_v4_send_check(struct sock *sk, |
806 | struct tcphdr *th, int len, | 797 | struct tcphdr *th, int len, |
807 | struct sk_buff *skb); | 798 | struct sk_buff *skb); |