aboutsummaryrefslogtreecommitdiffstats
path: root/include/net
diff options
context:
space:
mode:
Diffstat (limited to 'include/net')
-rw-r--r--include/net/tcp.h93
1 files changed, 42 insertions, 51 deletions
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 0c769adb0463..6c9f6f7cab5c 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -44,13 +44,13 @@
44 * New scheme, half the table is for TIME_WAIT, the other half is 44 * New scheme, half the table is for TIME_WAIT, the other half is
45 * for the rest. I'll experiment with dynamic table growth later. 45 * for the rest. I'll experiment with dynamic table growth later.
46 */ 46 */
47struct tcp_ehash_bucket { 47struct inet_ehash_bucket {
48 rwlock_t lock; 48 rwlock_t lock;
49 struct hlist_head chain; 49 struct hlist_head chain;
50} __attribute__((__aligned__(8))); 50} __attribute__((__aligned__(8)));
51 51
52/* This is for listening sockets, thus all sockets which possess wildcards. */ 52/* This is for listening sockets, thus all sockets which possess wildcards. */
53#define TCP_LHTABLE_SIZE 32 /* Yes, really, this is all you need. */ 53#define INET_LHTABLE_SIZE 32 /* Yes, really, this is all you need. */
54 54
55/* There are a few simple rules, which allow for local port reuse by 55/* There are a few simple rules, which allow for local port reuse by
56 * an application. In essence: 56 * an application. In essence:
@@ -83,31 +83,22 @@ struct tcp_ehash_bucket {
83 * users logged onto your box, isn't it nice to know that new data 83 * users logged onto your box, isn't it nice to know that new data
84 * ports are created in O(1) time? I thought so. ;-) -DaveM 84 * ports are created in O(1) time? I thought so. ;-) -DaveM
85 */ 85 */
86struct tcp_bind_bucket { 86struct inet_bind_bucket {
87 unsigned short port; 87 unsigned short port;
88 signed short fastreuse; 88 signed short fastreuse;
89 struct hlist_node node; 89 struct hlist_node node;
90 struct hlist_head owners; 90 struct hlist_head owners;
91}; 91};
92 92
93#define tb_for_each(tb, node, head) hlist_for_each_entry(tb, node, head, node) 93#define inet_bind_bucket_for_each(tb, node, head) \
94 hlist_for_each_entry(tb, node, head, node)
94 95
95struct tcp_bind_hashbucket { 96struct inet_bind_hashbucket {
96 spinlock_t lock; 97 spinlock_t lock;
97 struct hlist_head chain; 98 struct hlist_head chain;
98}; 99};
99 100
100static inline struct tcp_bind_bucket *__tb_head(struct tcp_bind_hashbucket *head) 101struct inet_hashinfo {
101{
102 return hlist_entry(head->chain.first, struct tcp_bind_bucket, node);
103}
104
105static inline struct tcp_bind_bucket *tb_head(struct tcp_bind_hashbucket *head)
106{
107 return hlist_empty(&head->chain) ? NULL : __tb_head(head);
108}
109
110extern struct tcp_hashinfo {
111 /* This is for sockets with full identity only. Sockets here will 102 /* This is for sockets with full identity only. Sockets here will
112 * always be without wildcards and will have the following invariant: 103 * always be without wildcards and will have the following invariant:
113 * 104 *
@@ -116,21 +107,21 @@ extern struct tcp_hashinfo {
116 * First half of the table is for sockets not in TIME_WAIT, second half 107 * First half of the table is for sockets not in TIME_WAIT, second half
117 * is for TIME_WAIT sockets only. 108 * is for TIME_WAIT sockets only.
118 */ 109 */
119 struct tcp_ehash_bucket *__tcp_ehash; 110 struct inet_ehash_bucket *ehash;
120 111
121 /* Ok, let's try this, I give up, we do need a local binding 112 /* Ok, let's try this, I give up, we do need a local binding
122 * TCP hash as well as the others for fast bind/connect. 113 * TCP hash as well as the others for fast bind/connect.
123 */ 114 */
124 struct tcp_bind_hashbucket *__tcp_bhash; 115 struct inet_bind_hashbucket *bhash;
125 116
126 int __tcp_bhash_size; 117 int bhash_size;
127 int __tcp_ehash_size; 118 int ehash_size;
128 119
129 /* All sockets in TCP_LISTEN state will be in here. This is the only 120 /* All sockets in TCP_LISTEN state will be in here. This is the only
130 * table where wildcard'd TCP sockets can exist. Hash function here 121 * table where wildcard'd TCP sockets can exist. Hash function here
131 * is just local port number. 122 * is just local port number.
132 */ 123 */
133 struct hlist_head __tcp_listening_hash[TCP_LHTABLE_SIZE]; 124 struct hlist_head listening_hash[INET_LHTABLE_SIZE];
134 125
135 /* All the above members are written once at bootup and 126 /* All the above members are written once at bootup and
136 * never written again _or_ are predominantly read-access. 127 * never written again _or_ are predominantly read-access.
@@ -138,36 +129,39 @@ extern struct tcp_hashinfo {
138 * Now align to a new cache line as all the following members 129 * Now align to a new cache line as all the following members
139 * are often dirty. 130 * are often dirty.
140 */ 131 */
141 rwlock_t __tcp_lhash_lock ____cacheline_aligned; 132 rwlock_t lhash_lock ____cacheline_aligned;
142 atomic_t __tcp_lhash_users; 133 atomic_t lhash_users;
143 wait_queue_head_t __tcp_lhash_wait; 134 wait_queue_head_t lhash_wait;
144 spinlock_t __tcp_portalloc_lock; 135 spinlock_t portalloc_lock;
145} tcp_hashinfo; 136};
146 137
147#define tcp_ehash (tcp_hashinfo.__tcp_ehash) 138extern struct inet_hashinfo tcp_hashinfo;
148#define tcp_bhash (tcp_hashinfo.__tcp_bhash) 139#define tcp_ehash (tcp_hashinfo.ehash)
149#define tcp_ehash_size (tcp_hashinfo.__tcp_ehash_size) 140#define tcp_bhash (tcp_hashinfo.bhash)
150#define tcp_bhash_size (tcp_hashinfo.__tcp_bhash_size) 141#define tcp_ehash_size (tcp_hashinfo.ehash_size)
151#define tcp_listening_hash (tcp_hashinfo.__tcp_listening_hash) 142#define tcp_bhash_size (tcp_hashinfo.bhash_size)
152#define tcp_lhash_lock (tcp_hashinfo.__tcp_lhash_lock) 143#define tcp_listening_hash (tcp_hashinfo.listening_hash)
153#define tcp_lhash_users (tcp_hashinfo.__tcp_lhash_users) 144#define tcp_lhash_lock (tcp_hashinfo.lhash_lock)
154#define tcp_lhash_wait (tcp_hashinfo.__tcp_lhash_wait) 145#define tcp_lhash_users (tcp_hashinfo.lhash_users)
155#define tcp_portalloc_lock (tcp_hashinfo.__tcp_portalloc_lock) 146#define tcp_lhash_wait (tcp_hashinfo.lhash_wait)
147#define tcp_portalloc_lock (tcp_hashinfo.portalloc_lock)
156 148
157extern kmem_cache_t *tcp_bucket_cachep; 149extern kmem_cache_t *tcp_bucket_cachep;
158extern struct tcp_bind_bucket *tcp_bucket_create(struct tcp_bind_hashbucket *head, 150extern struct inet_bind_bucket *
159 unsigned short snum); 151 inet_bind_bucket_create(kmem_cache_t *cachep,
160extern void tcp_bucket_destroy(struct tcp_bind_bucket *tb); 152 struct inet_bind_hashbucket *head,
161extern void tcp_bucket_unlock(struct sock *sk); 153 const unsigned short snum);
154extern void inet_bind_bucket_destroy(kmem_cache_t *cachep,
155 struct inet_bind_bucket *tb);
162extern int tcp_port_rover; 156extern int tcp_port_rover;
163 157
164/* These are AF independent. */ 158/* These are AF independent. */
165static __inline__ int tcp_bhashfn(__u16 lport) 159static inline int inet_bhashfn(const __u16 lport, const int bhash_size)
166{ 160{
167 return (lport & (tcp_bhash_size - 1)); 161 return lport & (bhash_size - 1);
168} 162}
169 163
170extern void tcp_bind_hash(struct sock *sk, struct tcp_bind_bucket *tb, 164extern void tcp_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
171 unsigned short snum); 165 unsigned short snum);
172 166
173#if (BITS_PER_LONG == 64) 167#if (BITS_PER_LONG == 64)
@@ -212,7 +206,7 @@ struct tcp_tw_bucket {
212 __u32 tw_ts_recent; 206 __u32 tw_ts_recent;
213 long tw_ts_recent_stamp; 207 long tw_ts_recent_stamp;
214 unsigned long tw_ttd; 208 unsigned long tw_ttd;
215 struct tcp_bind_bucket *tw_tb; 209 struct inet_bind_bucket *tw_tb;
216 struct hlist_node tw_death_node; 210 struct hlist_node tw_death_node;
217#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 211#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
218 struct in6_addr tw_v6_daddr; 212 struct in6_addr tw_v6_daddr;
@@ -366,14 +360,14 @@ extern void tcp_tw_deschedule(struct tcp_tw_bucket *tw);
366 (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif)))) 360 (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
367 361
368/* These can have wildcards, don't try too hard. */ 362/* These can have wildcards, don't try too hard. */
369static __inline__ int tcp_lhashfn(unsigned short num) 363static inline int inet_lhashfn(const unsigned short num)
370{ 364{
371 return num & (TCP_LHTABLE_SIZE - 1); 365 return num & (INET_LHTABLE_SIZE - 1);
372} 366}
373 367
374static __inline__ int tcp_sk_listen_hashfn(struct sock *sk) 368static inline int inet_sk_listen_hashfn(const struct sock *sk)
375{ 369{
376 return tcp_lhashfn(inet_sk(sk)->num); 370 return inet_lhashfn(inet_sk(sk)->num);
377} 371}
378 372
379#define MAX_TCP_HEADER (128 + MAX_HEADER) 373#define MAX_TCP_HEADER (128 + MAX_HEADER)
@@ -799,9 +793,6 @@ extern void tcp_parse_options(struct sk_buff *skb,
799 * TCP v4 functions exported for the inet6 API 793 * TCP v4 functions exported for the inet6 API
800 */ 794 */
801 795
802extern int tcp_v4_build_header(struct sock *sk,
803 struct sk_buff *skb);
804
805extern void tcp_v4_send_check(struct sock *sk, 796extern void tcp_v4_send_check(struct sock *sk,
806 struct tcphdr *th, int len, 797 struct tcphdr *th, int len,
807 struct sk_buff *skb); 798 struct sk_buff *skb);