aboutsummaryrefslogtreecommitdiffstats
path: root/include/net
diff options
context:
space:
mode:
Diffstat (limited to 'include/net')
-rw-r--r--include/net/neighbour.h7
-rw-r--r--include/net/request_sock.h255
-rw-r--r--include/net/sch_generic.h122
-rw-r--r--include/net/sock.h4
-rw-r--r--include/net/tcp.h160
-rw-r--r--include/net/tcp_ecn.h13
-rw-r--r--include/net/xfrm.h24
7 files changed, 444 insertions, 141 deletions
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index 4f33bbc21e7f..89809891e5ab 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -65,11 +65,10 @@ struct neighbour;
65 65
66struct neigh_parms 66struct neigh_parms
67{ 67{
68 struct net_device *dev;
68 struct neigh_parms *next; 69 struct neigh_parms *next;
69 int (*neigh_setup)(struct neighbour *); 70 int (*neigh_setup)(struct neighbour *);
70 struct neigh_table *tbl; 71 struct neigh_table *tbl;
71 int entries;
72 void *priv;
73 72
74 void *sysctl_table; 73 void *sysctl_table;
75 74
@@ -192,7 +191,6 @@ struct neigh_table
192 atomic_t entries; 191 atomic_t entries;
193 rwlock_t lock; 192 rwlock_t lock;
194 unsigned long last_rand; 193 unsigned long last_rand;
195 struct neigh_parms *parms_list;
196 kmem_cache_t *kmem_cachep; 194 kmem_cache_t *kmem_cachep;
197 struct neigh_statistics *stats; 195 struct neigh_statistics *stats;
198 struct neighbour **hash_buckets; 196 struct neighbour **hash_buckets;
@@ -252,6 +250,9 @@ extern int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg);
252extern int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg); 250extern int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg);
253extern void neigh_app_ns(struct neighbour *n); 251extern void neigh_app_ns(struct neighbour *n);
254 252
253extern int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb);
254extern int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg);
255
255extern void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie); 256extern void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie);
256extern void __neigh_for_each_release(struct neigh_table *tbl, int (*cb)(struct neighbour *)); 257extern void __neigh_for_each_release(struct neigh_table *tbl, int (*cb)(struct neighbour *));
257extern void pneigh_for_each(struct neigh_table *tbl, void (*cb)(struct pneigh_entry *)); 258extern void pneigh_for_each(struct neigh_table *tbl, void (*cb)(struct pneigh_entry *));
diff --git a/include/net/request_sock.h b/include/net/request_sock.h
new file mode 100644
index 000000000000..72fd6f5e86b1
--- /dev/null
+++ b/include/net/request_sock.h
@@ -0,0 +1,255 @@
1/*
2 * NET Generic infrastructure for Network protocols.
3 *
4 * Definitions for request_sock
5 *
6 * Authors: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
7 *
8 * From code originally in include/net/tcp.h
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15#ifndef _REQUEST_SOCK_H
16#define _REQUEST_SOCK_H
17
18#include <linux/slab.h>
19#include <linux/spinlock.h>
20#include <linux/types.h>
21
22#include <net/sock.h>
23
24struct request_sock;
25struct sk_buff;
26struct dst_entry;
27struct proto;
28
29struct request_sock_ops {
30 int family;
31 kmem_cache_t *slab;
32 int obj_size;
33 int (*rtx_syn_ack)(struct sock *sk,
34 struct request_sock *req,
35 struct dst_entry *dst);
36 void (*send_ack)(struct sk_buff *skb,
37 struct request_sock *req);
38 void (*send_reset)(struct sk_buff *skb);
39 void (*destructor)(struct request_sock *req);
40};
41
42/* struct request_sock - mini sock to represent a connection request
43 */
44struct request_sock {
45 struct request_sock *dl_next; /* Must be first member! */
46 u16 mss;
47 u8 retrans;
48 u8 __pad;
49 /* The following two fields can be easily recomputed I think -AK */
50 u32 window_clamp; /* window clamp at creation time */
51 u32 rcv_wnd; /* rcv_wnd offered first time */
52 u32 ts_recent;
53 unsigned long expires;
54 struct request_sock_ops *rsk_ops;
55 struct sock *sk;
56};
57
58static inline struct request_sock *reqsk_alloc(struct request_sock_ops *ops)
59{
60 struct request_sock *req = kmem_cache_alloc(ops->slab, SLAB_ATOMIC);
61
62 if (req != NULL)
63 req->rsk_ops = ops;
64
65 return req;
66}
67
68static inline void __reqsk_free(struct request_sock *req)
69{
70 kmem_cache_free(req->rsk_ops->slab, req);
71}
72
73static inline void reqsk_free(struct request_sock *req)
74{
75 req->rsk_ops->destructor(req);
76 __reqsk_free(req);
77}
78
79extern int sysctl_max_syn_backlog;
80
81/** struct listen_sock - listen state
82 *
83 * @max_qlen_log - log_2 of maximal queued SYNs/REQUESTs
84 */
85struct listen_sock {
86 u8 max_qlen_log;
87 /* 3 bytes hole, try to use */
88 int qlen;
89 int qlen_young;
90 int clock_hand;
91 u32 hash_rnd;
92 struct request_sock *syn_table[0];
93};
94
95/** struct request_sock_queue - queue of request_socks
96 *
97 * @rskq_accept_head - FIFO head of established children
98 * @rskq_accept_tail - FIFO tail of established children
99 * @syn_wait_lock - serializer
100 *
101 * %syn_wait_lock is necessary only to avoid proc interface having to grab the main
102 * lock sock while browsing the listening hash (otherwise it's deadlock prone).
103 *
104 * This lock is acquired in read mode only from listening_get_next() seq_file
105 * op and it's acquired in write mode _only_ from code that is actively
106 * changing rskq_accept_head. All readers that are holding the master sock lock
107 * don't need to grab this lock in read mode too as rskq_accept_head. writes
108 * are always protected from the main sock lock.
109 */
110struct request_sock_queue {
111 struct request_sock *rskq_accept_head;
112 struct request_sock *rskq_accept_tail;
113 rwlock_t syn_wait_lock;
114 struct listen_sock *listen_opt;
115};
116
117extern int reqsk_queue_alloc(struct request_sock_queue *queue,
118 const int nr_table_entries);
119
120static inline struct listen_sock *reqsk_queue_yank_listen_sk(struct request_sock_queue *queue)
121{
122 struct listen_sock *lopt;
123
124 write_lock_bh(&queue->syn_wait_lock);
125 lopt = queue->listen_opt;
126 queue->listen_opt = NULL;
127 write_unlock_bh(&queue->syn_wait_lock);
128
129 return lopt;
130}
131
132static inline void reqsk_queue_destroy(struct request_sock_queue *queue)
133{
134 kfree(reqsk_queue_yank_listen_sk(queue));
135}
136
137static inline struct request_sock *
138 reqsk_queue_yank_acceptq(struct request_sock_queue *queue)
139{
140 struct request_sock *req = queue->rskq_accept_head;
141
142 queue->rskq_accept_head = queue->rskq_accept_head = NULL;
143 return req;
144}
145
146static inline int reqsk_queue_empty(struct request_sock_queue *queue)
147{
148 return queue->rskq_accept_head == NULL;
149}
150
151static inline void reqsk_queue_unlink(struct request_sock_queue *queue,
152 struct request_sock *req,
153 struct request_sock **prev_req)
154{
155 write_lock(&queue->syn_wait_lock);
156 *prev_req = req->dl_next;
157 write_unlock(&queue->syn_wait_lock);
158}
159
160static inline void reqsk_queue_add(struct request_sock_queue *queue,
161 struct request_sock *req,
162 struct sock *parent,
163 struct sock *child)
164{
165 req->sk = child;
166 sk_acceptq_added(parent);
167
168 if (queue->rskq_accept_head == NULL)
169 queue->rskq_accept_head = req;
170 else
171 queue->rskq_accept_tail->dl_next = req;
172
173 queue->rskq_accept_tail = req;
174 req->dl_next = NULL;
175}
176
177static inline struct request_sock *reqsk_queue_remove(struct request_sock_queue *queue)
178{
179 struct request_sock *req = queue->rskq_accept_head;
180
181 BUG_TRAP(req != NULL);
182
183 queue->rskq_accept_head = req->dl_next;
184 if (queue->rskq_accept_head == NULL)
185 queue->rskq_accept_tail = NULL;
186
187 return req;
188}
189
190static inline struct sock *reqsk_queue_get_child(struct request_sock_queue *queue,
191 struct sock *parent)
192{
193 struct request_sock *req = reqsk_queue_remove(queue);
194 struct sock *child = req->sk;
195
196 BUG_TRAP(child != NULL);
197
198 sk_acceptq_removed(parent);
199 __reqsk_free(req);
200 return child;
201}
202
203static inline int reqsk_queue_removed(struct request_sock_queue *queue,
204 struct request_sock *req)
205{
206 struct listen_sock *lopt = queue->listen_opt;
207
208 if (req->retrans == 0)
209 --lopt->qlen_young;
210
211 return --lopt->qlen;
212}
213
214static inline int reqsk_queue_added(struct request_sock_queue *queue)
215{
216 struct listen_sock *lopt = queue->listen_opt;
217 const int prev_qlen = lopt->qlen;
218
219 lopt->qlen_young++;
220 lopt->qlen++;
221 return prev_qlen;
222}
223
224static inline int reqsk_queue_len(struct request_sock_queue *queue)
225{
226 return queue->listen_opt != NULL ? queue->listen_opt->qlen : 0;
227}
228
229static inline int reqsk_queue_len_young(struct request_sock_queue *queue)
230{
231 return queue->listen_opt->qlen_young;
232}
233
234static inline int reqsk_queue_is_full(struct request_sock_queue *queue)
235{
236 return queue->listen_opt->qlen >> queue->listen_opt->max_qlen_log;
237}
238
239static inline void reqsk_queue_hash_req(struct request_sock_queue *queue,
240 u32 hash, struct request_sock *req,
241 unsigned timeout)
242{
243 struct listen_sock *lopt = queue->listen_opt;
244
245 req->expires = jiffies + timeout;
246 req->retrans = 0;
247 req->sk = NULL;
248 req->dl_next = lopt->syn_table[hash];
249
250 write_lock(&queue->syn_wait_lock);
251 lopt->syn_table[hash] = req;
252 write_unlock(&queue->syn_wait_lock);
253}
254
255#endif /* _REQUEST_SOCK_H */
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index c57504b3b518..7b97405e2dbf 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -172,4 +172,126 @@ tcf_destroy(struct tcf_proto *tp)
172 kfree(tp); 172 kfree(tp);
173} 173}
174 174
175static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
176 struct sk_buff_head *list)
177{
178 __skb_queue_tail(list, skb);
179 sch->qstats.backlog += skb->len;
180 sch->bstats.bytes += skb->len;
181 sch->bstats.packets++;
182
183 return NET_XMIT_SUCCESS;
184}
185
186static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch)
187{
188 return __qdisc_enqueue_tail(skb, sch, &sch->q);
189}
190
191static inline struct sk_buff *__qdisc_dequeue_head(struct Qdisc *sch,
192 struct sk_buff_head *list)
193{
194 struct sk_buff *skb = __skb_dequeue(list);
195
196 if (likely(skb != NULL))
197 sch->qstats.backlog -= skb->len;
198
199 return skb;
200}
201
202static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch)
203{
204 return __qdisc_dequeue_head(sch, &sch->q);
205}
206
207static inline struct sk_buff *__qdisc_dequeue_tail(struct Qdisc *sch,
208 struct sk_buff_head *list)
209{
210 struct sk_buff *skb = __skb_dequeue_tail(list);
211
212 if (likely(skb != NULL))
213 sch->qstats.backlog -= skb->len;
214
215 return skb;
216}
217
218static inline struct sk_buff *qdisc_dequeue_tail(struct Qdisc *sch)
219{
220 return __qdisc_dequeue_tail(sch, &sch->q);
221}
222
223static inline int __qdisc_requeue(struct sk_buff *skb, struct Qdisc *sch,
224 struct sk_buff_head *list)
225{
226 __skb_queue_head(list, skb);
227 sch->qstats.backlog += skb->len;
228 sch->qstats.requeues++;
229
230 return NET_XMIT_SUCCESS;
231}
232
233static inline int qdisc_requeue(struct sk_buff *skb, struct Qdisc *sch)
234{
235 return __qdisc_requeue(skb, sch, &sch->q);
236}
237
238static inline void __qdisc_reset_queue(struct Qdisc *sch,
239 struct sk_buff_head *list)
240{
241 /*
242 * We do not know the backlog in bytes of this list, it
243 * is up to the caller to correct it
244 */
245 skb_queue_purge(list);
246}
247
248static inline void qdisc_reset_queue(struct Qdisc *sch)
249{
250 __qdisc_reset_queue(sch, &sch->q);
251 sch->qstats.backlog = 0;
252}
253
254static inline unsigned int __qdisc_queue_drop(struct Qdisc *sch,
255 struct sk_buff_head *list)
256{
257 struct sk_buff *skb = __qdisc_dequeue_tail(sch, list);
258
259 if (likely(skb != NULL)) {
260 unsigned int len = skb->len;
261 kfree_skb(skb);
262 return len;
263 }
264
265 return 0;
266}
267
268static inline unsigned int qdisc_queue_drop(struct Qdisc *sch)
269{
270 return __qdisc_queue_drop(sch, &sch->q);
271}
272
273static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch)
274{
275 kfree_skb(skb);
276 sch->qstats.drops++;
277
278 return NET_XMIT_DROP;
279}
280
281static inline int qdisc_reshape_fail(struct sk_buff *skb, struct Qdisc *sch)
282{
283 sch->qstats.drops++;
284
285#ifdef CONFIG_NET_CLS_POLICE
286 if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch))
287 goto drop;
288
289 return NET_XMIT_SUCCESS;
290
291drop:
292#endif
293 kfree_skb(skb);
294 return NET_XMIT_DROP;
295}
296
175#endif 297#endif
diff --git a/include/net/sock.h b/include/net/sock.h
index a9ef3a6a13f3..e593af5b1ecc 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -484,6 +484,8 @@ extern void sk_stream_kill_queues(struct sock *sk);
484 484
485extern int sk_wait_data(struct sock *sk, long *timeo); 485extern int sk_wait_data(struct sock *sk, long *timeo);
486 486
487struct request_sock_ops;
488
487/* Networking protocol blocks we attach to sockets. 489/* Networking protocol blocks we attach to sockets.
488 * socket layer -> transport layer interface 490 * socket layer -> transport layer interface
489 * transport -> network interface is defined by struct inet_proto 491 * transport -> network interface is defined by struct inet_proto
@@ -547,6 +549,8 @@ struct proto {
547 kmem_cache_t *slab; 549 kmem_cache_t *slab;
548 unsigned int obj_size; 550 unsigned int obj_size;
549 551
552 struct request_sock_ops *rsk_prot;
553
550 struct module *owner; 554 struct module *owner;
551 555
552 char name[32]; 556 char name[32];
diff --git a/include/net/tcp.h b/include/net/tcp.h
index e71f8ba3e101..f730935b824a 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -31,6 +31,7 @@
31#include <linux/cache.h> 31#include <linux/cache.h>
32#include <linux/percpu.h> 32#include <linux/percpu.h>
33#include <net/checksum.h> 33#include <net/checksum.h>
34#include <net/request_sock.h>
34#include <net/sock.h> 35#include <net/sock.h>
35#include <net/snmp.h> 36#include <net/snmp.h>
36#include <net/ip.h> 37#include <net/ip.h>
@@ -563,7 +564,6 @@ static __inline__ int tcp_sk_listen_hashfn(struct sock *sk)
563#define TCP_NAGLE_PUSH 4 /* Cork is overriden for already queued data */ 564#define TCP_NAGLE_PUSH 4 /* Cork is overriden for already queued data */
564 565
565/* sysctl variables for tcp */ 566/* sysctl variables for tcp */
566extern int sysctl_max_syn_backlog;
567extern int sysctl_tcp_timestamps; 567extern int sysctl_tcp_timestamps;
568extern int sysctl_tcp_window_scaling; 568extern int sysctl_tcp_window_scaling;
569extern int sysctl_tcp_sack; 569extern int sysctl_tcp_sack;
@@ -613,74 +613,6 @@ extern atomic_t tcp_memory_allocated;
613extern atomic_t tcp_sockets_allocated; 613extern atomic_t tcp_sockets_allocated;
614extern int tcp_memory_pressure; 614extern int tcp_memory_pressure;
615 615
616struct open_request;
617
618struct or_calltable {
619 int family;
620 int (*rtx_syn_ack) (struct sock *sk, struct open_request *req, struct dst_entry*);
621 void (*send_ack) (struct sk_buff *skb, struct open_request *req);
622 void (*destructor) (struct open_request *req);
623 void (*send_reset) (struct sk_buff *skb);
624};
625
626struct tcp_v4_open_req {
627 __u32 loc_addr;
628 __u32 rmt_addr;
629 struct ip_options *opt;
630};
631
632#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
633struct tcp_v6_open_req {
634 struct in6_addr loc_addr;
635 struct in6_addr rmt_addr;
636 struct sk_buff *pktopts;
637 int iif;
638};
639#endif
640
641/* this structure is too big */
642struct open_request {
643 struct open_request *dl_next; /* Must be first member! */
644 __u32 rcv_isn;
645 __u32 snt_isn;
646 __u16 rmt_port;
647 __u16 mss;
648 __u8 retrans;
649 __u8 __pad;
650 __u16 snd_wscale : 4,
651 rcv_wscale : 4,
652 tstamp_ok : 1,
653 sack_ok : 1,
654 wscale_ok : 1,
655 ecn_ok : 1,
656 acked : 1;
657 /* The following two fields can be easily recomputed I think -AK */
658 __u32 window_clamp; /* window clamp at creation time */
659 __u32 rcv_wnd; /* rcv_wnd offered first time */
660 __u32 ts_recent;
661 unsigned long expires;
662 struct or_calltable *class;
663 struct sock *sk;
664 union {
665 struct tcp_v4_open_req v4_req;
666#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
667 struct tcp_v6_open_req v6_req;
668#endif
669 } af;
670};
671
672/* SLAB cache for open requests. */
673extern kmem_cache_t *tcp_openreq_cachep;
674
675#define tcp_openreq_alloc() kmem_cache_alloc(tcp_openreq_cachep, SLAB_ATOMIC)
676#define tcp_openreq_fastfree(req) kmem_cache_free(tcp_openreq_cachep, req)
677
678static inline void tcp_openreq_free(struct open_request *req)
679{
680 req->class->destructor(req);
681 tcp_openreq_fastfree(req);
682}
683
684#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 616#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
685#define TCP_INET_FAMILY(fam) ((fam) == AF_INET) 617#define TCP_INET_FAMILY(fam) ((fam) == AF_INET)
686#else 618#else
@@ -708,7 +640,7 @@ struct tcp_func {
708 640
709 struct sock * (*syn_recv_sock) (struct sock *sk, 641 struct sock * (*syn_recv_sock) (struct sock *sk,
710 struct sk_buff *skb, 642 struct sk_buff *skb,
711 struct open_request *req, 643 struct request_sock *req,
712 struct dst_entry *dst); 644 struct dst_entry *dst);
713 645
714 int (*remember_stamp) (struct sock *sk); 646 int (*remember_stamp) (struct sock *sk);
@@ -852,8 +784,8 @@ extern enum tcp_tw_status tcp_timewait_state_process(struct tcp_tw_bucket *tw,
852 unsigned len); 784 unsigned len);
853 785
854extern struct sock * tcp_check_req(struct sock *sk,struct sk_buff *skb, 786extern struct sock * tcp_check_req(struct sock *sk,struct sk_buff *skb,
855 struct open_request *req, 787 struct request_sock *req,
856 struct open_request **prev); 788 struct request_sock **prev);
857extern int tcp_child_process(struct sock *parent, 789extern int tcp_child_process(struct sock *parent,
858 struct sock *child, 790 struct sock *child,
859 struct sk_buff *skb); 791 struct sk_buff *skb);
@@ -903,12 +835,12 @@ extern int tcp_v4_conn_request(struct sock *sk,
903 struct sk_buff *skb); 835 struct sk_buff *skb);
904 836
905extern struct sock * tcp_create_openreq_child(struct sock *sk, 837extern struct sock * tcp_create_openreq_child(struct sock *sk,
906 struct open_request *req, 838 struct request_sock *req,
907 struct sk_buff *skb); 839 struct sk_buff *skb);
908 840
909extern struct sock * tcp_v4_syn_recv_sock(struct sock *sk, 841extern struct sock * tcp_v4_syn_recv_sock(struct sock *sk,
910 struct sk_buff *skb, 842 struct sk_buff *skb,
911 struct open_request *req, 843 struct request_sock *req,
912 struct dst_entry *dst); 844 struct dst_entry *dst);
913 845
914extern int tcp_v4_do_rcv(struct sock *sk, 846extern int tcp_v4_do_rcv(struct sock *sk,
@@ -922,7 +854,7 @@ extern int tcp_connect(struct sock *sk);
922 854
923extern struct sk_buff * tcp_make_synack(struct sock *sk, 855extern struct sk_buff * tcp_make_synack(struct sock *sk,
924 struct dst_entry *dst, 856 struct dst_entry *dst,
925 struct open_request *req); 857 struct request_sock *req);
926 858
927extern int tcp_disconnect(struct sock *sk, int flags); 859extern int tcp_disconnect(struct sock *sk, int flags);
928 860
@@ -1750,99 +1682,71 @@ static inline int tcp_full_space(const struct sock *sk)
1750 return tcp_win_from_space(sk->sk_rcvbuf); 1682 return tcp_win_from_space(sk->sk_rcvbuf);
1751} 1683}
1752 1684
1753static inline void tcp_acceptq_queue(struct sock *sk, struct open_request *req, 1685static inline void tcp_acceptq_queue(struct sock *sk, struct request_sock *req,
1754 struct sock *child) 1686 struct sock *child)
1755{ 1687{
1756 struct tcp_sock *tp = tcp_sk(sk); 1688 reqsk_queue_add(&tcp_sk(sk)->accept_queue, req, sk, child);
1757
1758 req->sk = child;
1759 sk_acceptq_added(sk);
1760
1761 if (!tp->accept_queue_tail) {
1762 tp->accept_queue = req;
1763 } else {
1764 tp->accept_queue_tail->dl_next = req;
1765 }
1766 tp->accept_queue_tail = req;
1767 req->dl_next = NULL;
1768} 1689}
1769 1690
1770struct tcp_listen_opt
1771{
1772 u8 max_qlen_log; /* log_2 of maximal queued SYNs */
1773 int qlen;
1774 int qlen_young;
1775 int clock_hand;
1776 u32 hash_rnd;
1777 struct open_request *syn_table[TCP_SYNQ_HSIZE];
1778};
1779
1780static inline void 1691static inline void
1781tcp_synq_removed(struct sock *sk, struct open_request *req) 1692tcp_synq_removed(struct sock *sk, struct request_sock *req)
1782{ 1693{
1783 struct tcp_listen_opt *lopt = tcp_sk(sk)->listen_opt; 1694 if (reqsk_queue_removed(&tcp_sk(sk)->accept_queue, req) == 0)
1784
1785 if (--lopt->qlen == 0)
1786 tcp_delete_keepalive_timer(sk); 1695 tcp_delete_keepalive_timer(sk);
1787 if (req->retrans == 0)
1788 lopt->qlen_young--;
1789} 1696}
1790 1697
1791static inline void tcp_synq_added(struct sock *sk) 1698static inline void tcp_synq_added(struct sock *sk)
1792{ 1699{
1793 struct tcp_listen_opt *lopt = tcp_sk(sk)->listen_opt; 1700 if (reqsk_queue_added(&tcp_sk(sk)->accept_queue) == 0)
1794
1795 if (lopt->qlen++ == 0)
1796 tcp_reset_keepalive_timer(sk, TCP_TIMEOUT_INIT); 1701 tcp_reset_keepalive_timer(sk, TCP_TIMEOUT_INIT);
1797 lopt->qlen_young++;
1798} 1702}
1799 1703
1800static inline int tcp_synq_len(struct sock *sk) 1704static inline int tcp_synq_len(struct sock *sk)
1801{ 1705{
1802 return tcp_sk(sk)->listen_opt->qlen; 1706 return reqsk_queue_len(&tcp_sk(sk)->accept_queue);
1803} 1707}
1804 1708
1805static inline int tcp_synq_young(struct sock *sk) 1709static inline int tcp_synq_young(struct sock *sk)
1806{ 1710{
1807 return tcp_sk(sk)->listen_opt->qlen_young; 1711 return reqsk_queue_len_young(&tcp_sk(sk)->accept_queue);
1808} 1712}
1809 1713
1810static inline int tcp_synq_is_full(struct sock *sk) 1714static inline int tcp_synq_is_full(struct sock *sk)
1811{ 1715{
1812 return tcp_synq_len(sk) >> tcp_sk(sk)->listen_opt->max_qlen_log; 1716 return reqsk_queue_is_full(&tcp_sk(sk)->accept_queue);
1813} 1717}
1814 1718
1815static inline void tcp_synq_unlink(struct tcp_sock *tp, struct open_request *req, 1719static inline void tcp_synq_unlink(struct tcp_sock *tp, struct request_sock *req,
1816 struct open_request **prev) 1720 struct request_sock **prev)
1817{ 1721{
1818 write_lock(&tp->syn_wait_lock); 1722 reqsk_queue_unlink(&tp->accept_queue, req, prev);
1819 *prev = req->dl_next;
1820 write_unlock(&tp->syn_wait_lock);
1821} 1723}
1822 1724
1823static inline void tcp_synq_drop(struct sock *sk, struct open_request *req, 1725static inline void tcp_synq_drop(struct sock *sk, struct request_sock *req,
1824 struct open_request **prev) 1726 struct request_sock **prev)
1825{ 1727{
1826 tcp_synq_unlink(tcp_sk(sk), req, prev); 1728 tcp_synq_unlink(tcp_sk(sk), req, prev);
1827 tcp_synq_removed(sk, req); 1729 tcp_synq_removed(sk, req);
1828 tcp_openreq_free(req); 1730 reqsk_free(req);
1829} 1731}
1830 1732
1831static __inline__ void tcp_openreq_init(struct open_request *req, 1733static __inline__ void tcp_openreq_init(struct request_sock *req,
1832 struct tcp_options_received *rx_opt, 1734 struct tcp_options_received *rx_opt,
1833 struct sk_buff *skb) 1735 struct sk_buff *skb)
1834{ 1736{
1737 struct inet_request_sock *ireq = inet_rsk(req);
1738
1835 req->rcv_wnd = 0; /* So that tcp_send_synack() knows! */ 1739 req->rcv_wnd = 0; /* So that tcp_send_synack() knows! */
1836 req->rcv_isn = TCP_SKB_CB(skb)->seq; 1740 tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq;
1837 req->mss = rx_opt->mss_clamp; 1741 req->mss = rx_opt->mss_clamp;
1838 req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0; 1742 req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0;
1839 req->tstamp_ok = rx_opt->tstamp_ok; 1743 ireq->tstamp_ok = rx_opt->tstamp_ok;
1840 req->sack_ok = rx_opt->sack_ok; 1744 ireq->sack_ok = rx_opt->sack_ok;
1841 req->snd_wscale = rx_opt->snd_wscale; 1745 ireq->snd_wscale = rx_opt->snd_wscale;
1842 req->wscale_ok = rx_opt->wscale_ok; 1746 ireq->wscale_ok = rx_opt->wscale_ok;
1843 req->acked = 0; 1747 ireq->acked = 0;
1844 req->ecn_ok = 0; 1748 ireq->ecn_ok = 0;
1845 req->rmt_port = skb->h.th->source; 1749 ireq->rmt_port = skb->h.th->source;
1846} 1750}
1847 1751
1848extern void tcp_enter_memory_pressure(void); 1752extern void tcp_enter_memory_pressure(void);
diff --git a/include/net/tcp_ecn.h b/include/net/tcp_ecn.h
index dc1456389a97..64980ee8c92a 100644
--- a/include/net/tcp_ecn.h
+++ b/include/net/tcp_ecn.h
@@ -2,6 +2,7 @@
2#define _NET_TCP_ECN_H_ 1 2#define _NET_TCP_ECN_H_ 1
3 3
4#include <net/inet_ecn.h> 4#include <net/inet_ecn.h>
5#include <net/request_sock.h>
5 6
6#define TCP_HP_BITS (~(TCP_RESERVED_BITS|TCP_FLAG_PSH)) 7#define TCP_HP_BITS (~(TCP_RESERVED_BITS|TCP_FLAG_PSH))
7 8
@@ -38,9 +39,9 @@ static inline void TCP_ECN_send_syn(struct sock *sk, struct tcp_sock *tp,
38} 39}
39 40
40static __inline__ void 41static __inline__ void
41TCP_ECN_make_synack(struct open_request *req, struct tcphdr *th) 42TCP_ECN_make_synack(struct request_sock *req, struct tcphdr *th)
42{ 43{
43 if (req->ecn_ok) 44 if (inet_rsk(req)->ecn_ok)
44 th->ece = 1; 45 th->ece = 1;
45} 46}
46 47
@@ -111,16 +112,16 @@ static inline int TCP_ECN_rcv_ecn_echo(struct tcp_sock *tp, struct tcphdr *th)
111} 112}
112 113
113static inline void TCP_ECN_openreq_child(struct tcp_sock *tp, 114static inline void TCP_ECN_openreq_child(struct tcp_sock *tp,
114 struct open_request *req) 115 struct request_sock *req)
115{ 116{
116 tp->ecn_flags = req->ecn_ok ? TCP_ECN_OK : 0; 117 tp->ecn_flags = inet_rsk(req)->ecn_ok ? TCP_ECN_OK : 0;
117} 118}
118 119
119static __inline__ void 120static __inline__ void
120TCP_ECN_create_request(struct open_request *req, struct tcphdr *th) 121TCP_ECN_create_request(struct request_sock *req, struct tcphdr *th)
121{ 122{
122 if (sysctl_tcp_ecn && th->ece && th->cwr) 123 if (sysctl_tcp_ecn && th->ece && th->cwr)
123 req->ecn_ok = 1; 124 inet_rsk(req)->ecn_ok = 1;
124} 125}
125 126
126#endif 127#endif
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index d675836ba6c3..0e65e02b7a1d 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -158,6 +158,20 @@ enum {
158 XFRM_STATE_DEAD 158 XFRM_STATE_DEAD
159}; 159};
160 160
161/* callback structure passed from either netlink or pfkey */
162struct km_event
163{
164 union {
165 u32 hard;
166 u32 proto;
167 u32 byid;
168 } data;
169
170 u32 seq;
171 u32 pid;
172 u32 event;
173};
174
161struct xfrm_type; 175struct xfrm_type;
162struct xfrm_dst; 176struct xfrm_dst;
163struct xfrm_policy_afinfo { 177struct xfrm_policy_afinfo {
@@ -179,6 +193,8 @@ struct xfrm_policy_afinfo {
179 193
180extern int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo); 194extern int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo);
181extern int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo); 195extern int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo);
196extern void km_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c);
197extern void km_state_notify(struct xfrm_state *x, struct km_event *c);
182 198
183#define XFRM_ACQ_EXPIRES 30 199#define XFRM_ACQ_EXPIRES 30
184 200
@@ -290,11 +306,11 @@ struct xfrm_mgr
290{ 306{
291 struct list_head list; 307 struct list_head list;
292 char *id; 308 char *id;
293 int (*notify)(struct xfrm_state *x, int event); 309 int (*notify)(struct xfrm_state *x, struct km_event *c);
294 int (*acquire)(struct xfrm_state *x, struct xfrm_tmpl *, struct xfrm_policy *xp, int dir); 310 int (*acquire)(struct xfrm_state *x, struct xfrm_tmpl *, struct xfrm_policy *xp, int dir);
295 struct xfrm_policy *(*compile_policy)(u16 family, int opt, u8 *data, int len, int *dir); 311 struct xfrm_policy *(*compile_policy)(u16 family, int opt, u8 *data, int len, int *dir);
296 int (*new_mapping)(struct xfrm_state *x, xfrm_address_t *ipaddr, u16 sport); 312 int (*new_mapping)(struct xfrm_state *x, xfrm_address_t *ipaddr, u16 sport);
297 int (*notify_policy)(struct xfrm_policy *x, int dir, int event); 313 int (*notify_policy)(struct xfrm_policy *x, int dir, struct km_event *c);
298}; 314};
299 315
300extern int xfrm_register_km(struct xfrm_mgr *km); 316extern int xfrm_register_km(struct xfrm_mgr *km);
@@ -656,7 +672,7 @@ static inline int xfrm_sk_clone_policy(struct sock *sk)
656 return 0; 672 return 0;
657} 673}
658 674
659extern void xfrm_policy_delete(struct xfrm_policy *pol, int dir); 675extern int xfrm_policy_delete(struct xfrm_policy *pol, int dir);
660 676
661static inline void xfrm_sk_free_policy(struct sock *sk) 677static inline void xfrm_sk_free_policy(struct sock *sk)
662{ 678{
@@ -817,7 +833,7 @@ extern int xfrm_state_add(struct xfrm_state *x);
817extern int xfrm_state_update(struct xfrm_state *x); 833extern int xfrm_state_update(struct xfrm_state *x);
818extern struct xfrm_state *xfrm_state_lookup(xfrm_address_t *daddr, u32 spi, u8 proto, unsigned short family); 834extern struct xfrm_state *xfrm_state_lookup(xfrm_address_t *daddr, u32 spi, u8 proto, unsigned short family);
819extern struct xfrm_state *xfrm_find_acq_byseq(u32 seq); 835extern struct xfrm_state *xfrm_find_acq_byseq(u32 seq);
820extern void xfrm_state_delete(struct xfrm_state *x); 836extern int xfrm_state_delete(struct xfrm_state *x);
821extern void xfrm_state_flush(u8 proto); 837extern void xfrm_state_flush(u8 proto);
822extern int xfrm_replay_check(struct xfrm_state *x, u32 seq); 838extern int xfrm_replay_check(struct xfrm_state *x, u32 seq);
823extern void xfrm_replay_advance(struct xfrm_state *x, u32 seq); 839extern void xfrm_replay_advance(struct xfrm_state *x, u32 seq);