aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/linux/ip.h21
-rw-r--r--include/linux/ipv6.h13
-rw-r--r--include/linux/netlink.h24
-rw-r--r--include/linux/rtnetlink.h176
-rw-r--r--include/linux/slab.h1
-rw-r--r--include/linux/tcp.h28
-rw-r--r--include/linux/xfrm.h4
-rw-r--r--include/net/neighbour.h7
-rw-r--r--include/net/request_sock.h255
-rw-r--r--include/net/sch_generic.h122
-rw-r--r--include/net/sock.h4
-rw-r--r--include/net/tcp.h160
-rw-r--r--include/net/tcp_ecn.h13
-rw-r--r--include/net/xfrm.h24
14 files changed, 691 insertions, 161 deletions
diff --git a/include/linux/ip.h b/include/linux/ip.h
index 8438c68591f9..31e7cedd9f84 100644
--- a/include/linux/ip.h
+++ b/include/linux/ip.h
@@ -81,6 +81,7 @@
81#ifdef __KERNEL__ 81#ifdef __KERNEL__
82#include <linux/config.h> 82#include <linux/config.h>
83#include <linux/types.h> 83#include <linux/types.h>
84#include <net/request_sock.h>
84#include <net/sock.h> 85#include <net/sock.h>
85#include <linux/igmp.h> 86#include <linux/igmp.h>
86#include <net/flow.h> 87#include <net/flow.h>
@@ -107,6 +108,26 @@ struct ip_options {
107 108
108#define optlength(opt) (sizeof(struct ip_options) + opt->optlen) 109#define optlength(opt) (sizeof(struct ip_options) + opt->optlen)
109 110
111struct inet_request_sock {
112 struct request_sock req;
113 u32 loc_addr;
114 u32 rmt_addr;
115 u16 rmt_port;
116 u16 snd_wscale : 4,
117 rcv_wscale : 4,
118 tstamp_ok : 1,
119 sack_ok : 1,
120 wscale_ok : 1,
121 ecn_ok : 1,
122 acked : 1;
123 struct ip_options *opt;
124};
125
126static inline struct inet_request_sock *inet_rsk(const struct request_sock *sk)
127{
128 return (struct inet_request_sock *)sk;
129}
130
110struct ipv6_pinfo; 131struct ipv6_pinfo;
111 132
112struct inet_sock { 133struct inet_sock {
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index ab0d0efbf240..6fcd6a0ade24 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -193,6 +193,19 @@ struct inet6_skb_parm {
193 193
194#define IP6CB(skb) ((struct inet6_skb_parm*)((skb)->cb)) 194#define IP6CB(skb) ((struct inet6_skb_parm*)((skb)->cb))
195 195
196struct tcp6_request_sock {
197 struct tcp_request_sock req;
198 struct in6_addr loc_addr;
199 struct in6_addr rmt_addr;
200 struct sk_buff *pktopts;
201 int iif;
202};
203
204static inline struct tcp6_request_sock *tcp6_rsk(const struct request_sock *sk)
205{
206 return (struct tcp6_request_sock *)sk;
207}
208
196/** 209/**
197 * struct ipv6_pinfo - ipv6 private area 210 * struct ipv6_pinfo - ipv6 private area
198 * 211 *
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index b2738ac8bc99..e38407a23d04 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -156,7 +156,7 @@ struct netlink_notify
156}; 156};
157 157
158static __inline__ struct nlmsghdr * 158static __inline__ struct nlmsghdr *
159__nlmsg_put(struct sk_buff *skb, u32 pid, u32 seq, int type, int len) 159__nlmsg_put(struct sk_buff *skb, u32 pid, u32 seq, int type, int len, int flags)
160{ 160{
161 struct nlmsghdr *nlh; 161 struct nlmsghdr *nlh;
162 int size = NLMSG_LENGTH(len); 162 int size = NLMSG_LENGTH(len);
@@ -164,15 +164,31 @@ __nlmsg_put(struct sk_buff *skb, u32 pid, u32 seq, int type, int len)
164 nlh = (struct nlmsghdr*)skb_put(skb, NLMSG_ALIGN(size)); 164 nlh = (struct nlmsghdr*)skb_put(skb, NLMSG_ALIGN(size));
165 nlh->nlmsg_type = type; 165 nlh->nlmsg_type = type;
166 nlh->nlmsg_len = size; 166 nlh->nlmsg_len = size;
167 nlh->nlmsg_flags = 0; 167 nlh->nlmsg_flags = flags;
168 nlh->nlmsg_pid = pid; 168 nlh->nlmsg_pid = pid;
169 nlh->nlmsg_seq = seq; 169 nlh->nlmsg_seq = seq;
170 return nlh; 170 return nlh;
171} 171}
172 172
173#define NLMSG_NEW(skb, pid, seq, type, len, flags) \
174({ if (skb_tailroom(skb) < (int)NLMSG_SPACE(len)) \
175 goto nlmsg_failure; \
176 __nlmsg_put(skb, pid, seq, type, len, flags); })
177
173#define NLMSG_PUT(skb, pid, seq, type, len) \ 178#define NLMSG_PUT(skb, pid, seq, type, len) \
174({ if (skb_tailroom(skb) < (int)NLMSG_SPACE(len)) goto nlmsg_failure; \ 179 NLMSG_NEW(skb, pid, seq, type, len, 0)
175 __nlmsg_put(skb, pid, seq, type, len); }) 180
181#define NLMSG_NEW_ANSWER(skb, cb, type, len, flags) \
182 NLMSG_NEW(skb, NETLINK_CB((cb)->skb).pid, \
183 (cb)->nlh->nlmsg_seq, type, len, flags)
184
185#define NLMSG_END(skb, nlh) \
186({ (nlh)->nlmsg_len = (skb)->tail - (unsigned char *) (nlh); \
187 (skb)->len; })
188
189#define NLMSG_CANCEL(skb, nlh) \
190({ skb_trim(skb, (unsigned char *) (nlh) - (skb)->data); \
191 -1; })
176 192
177extern int netlink_dump_start(struct sock *ssk, struct sk_buff *skb, 193extern int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
178 struct nlmsghdr *nlh, 194 struct nlmsghdr *nlh,
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index 91ac97c20777..e68dbf0bf579 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -89,6 +89,13 @@ enum {
89 RTM_GETANYCAST = 62, 89 RTM_GETANYCAST = 62,
90#define RTM_GETANYCAST RTM_GETANYCAST 90#define RTM_GETANYCAST RTM_GETANYCAST
91 91
92 RTM_NEWNEIGHTBL = 64,
93#define RTM_NEWNEIGHTBL RTM_NEWNEIGHTBL
94 RTM_GETNEIGHTBL = 66,
95#define RTM_GETNEIGHTBL RTM_GETNEIGHTBL
96 RTM_SETNEIGHTBL,
97#define RTM_SETNEIGHTBL RTM_SETNEIGHTBL
98
92 __RTM_MAX, 99 __RTM_MAX,
93#define RTM_MAX (((__RTM_MAX + 3) & ~3) - 1) 100#define RTM_MAX (((__RTM_MAX + 3) & ~3) - 1)
94}; 101};
@@ -493,6 +500,106 @@ struct nda_cacheinfo
493 __u32 ndm_refcnt; 500 __u32 ndm_refcnt;
494}; 501};
495 502
503
504/*****************************************************************
505 * Neighbour tables specific messages.
506 *
507 * To retrieve the neighbour tables send RTM_GETNEIGHTBL with the
508 * NLM_F_DUMP flag set. Every neighbour table configuration is
509 * spread over multiple messages to avoid running into message
510 * size limits on systems with many interfaces. The first message
511 * in the sequence transports all not device specific data such as
512 * statistics, configuration, and the default parameter set.
513 * This message is followed by 0..n messages carrying device
514 * specific parameter sets.
515 * Although the ordering should be sufficient, NDTA_NAME can be
516 * used to identify sequences. The initial message can be identified
517 * by checking for NDTA_CONFIG. The device specific messages do
518 * not contain this TLV but have NDTPA_IFINDEX set to the
519 * corresponding interface index.
520 *
521 * To change neighbour table attributes, send RTM_SETNEIGHTBL
522 * with NDTA_NAME set. Changeable attribute include NDTA_THRESH[1-3],
523 * NDTA_GC_INTERVAL, and all TLVs in NDTA_PARMS unless marked
524 * otherwise. Device specific parameter sets can be changed by
525 * setting NDTPA_IFINDEX to the interface index of the corresponding
526 * device.
527 ****/
528
529struct ndt_stats
530{
531 __u64 ndts_allocs;
532 __u64 ndts_destroys;
533 __u64 ndts_hash_grows;
534 __u64 ndts_res_failed;
535 __u64 ndts_lookups;
536 __u64 ndts_hits;
537 __u64 ndts_rcv_probes_mcast;
538 __u64 ndts_rcv_probes_ucast;
539 __u64 ndts_periodic_gc_runs;
540 __u64 ndts_forced_gc_runs;
541};
542
543enum {
544 NDTPA_UNSPEC,
545 NDTPA_IFINDEX, /* u32, unchangeable */
546 NDTPA_REFCNT, /* u32, read-only */
547 NDTPA_REACHABLE_TIME, /* u64, read-only, msecs */
548 NDTPA_BASE_REACHABLE_TIME, /* u64, msecs */
549 NDTPA_RETRANS_TIME, /* u64, msecs */
550 NDTPA_GC_STALETIME, /* u64, msecs */
551 NDTPA_DELAY_PROBE_TIME, /* u64, msecs */
552 NDTPA_QUEUE_LEN, /* u32 */
553 NDTPA_APP_PROBES, /* u32 */
554 NDTPA_UCAST_PROBES, /* u32 */
555 NDTPA_MCAST_PROBES, /* u32 */
556 NDTPA_ANYCAST_DELAY, /* u64, msecs */
557 NDTPA_PROXY_DELAY, /* u64, msecs */
558 NDTPA_PROXY_QLEN, /* u32 */
559 NDTPA_LOCKTIME, /* u64, msecs */
560 __NDTPA_MAX
561};
562#define NDTPA_MAX (__NDTPA_MAX - 1)
563
564struct ndtmsg
565{
566 __u8 ndtm_family;
567 __u8 ndtm_pad1;
568 __u16 ndtm_pad2;
569};
570
571struct ndt_config
572{
573 __u16 ndtc_key_len;
574 __u16 ndtc_entry_size;
575 __u32 ndtc_entries;
576 __u32 ndtc_last_flush; /* delta to now in msecs */
577 __u32 ndtc_last_rand; /* delta to now in msecs */
578 __u32 ndtc_hash_rnd;
579 __u32 ndtc_hash_mask;
580 __u32 ndtc_hash_chain_gc;
581 __u32 ndtc_proxy_qlen;
582};
583
584enum {
585 NDTA_UNSPEC,
586 NDTA_NAME, /* char *, unchangeable */
587 NDTA_THRESH1, /* u32 */
588 NDTA_THRESH2, /* u32 */
589 NDTA_THRESH3, /* u32 */
590 NDTA_CONFIG, /* struct ndt_config, read-only */
591 NDTA_PARMS, /* nested TLV NDTPA_* */
592 NDTA_STATS, /* struct ndt_stats, read-only */
593 NDTA_GC_INTERVAL, /* u64, msecs */
594 __NDTA_MAX
595};
596#define NDTA_MAX (__NDTA_MAX - 1)
597
598#define NDTA_RTA(r) ((struct rtattr*)(((char*)(r)) + \
599 NLMSG_ALIGN(sizeof(struct ndtmsg))))
600#define NDTA_PAYLOAD(n) NLMSG_PAYLOAD(n,sizeof(struct ndtmsg))
601
602
496/**** 603/****
497 * General form of address family dependent message. 604 * General form of address family dependent message.
498 ****/ 605 ****/
@@ -789,6 +896,75 @@ extern void __rta_fill(struct sk_buff *skb, int attrtype, int attrlen, const voi
789({ if (unlikely(skb_tailroom(skb) < (int)(attrlen))) \ 896({ if (unlikely(skb_tailroom(skb) < (int)(attrlen))) \
790 goto rtattr_failure; \ 897 goto rtattr_failure; \
791 memcpy(skb_put(skb, RTA_ALIGN(attrlen)), data, attrlen); }) 898 memcpy(skb_put(skb, RTA_ALIGN(attrlen)), data, attrlen); })
899
900#define RTA_PUT_U8(skb, attrtype, value) \
901({ u8 _tmp = (value); \
902 RTA_PUT(skb, attrtype, sizeof(u8), &_tmp); })
903
904#define RTA_PUT_U16(skb, attrtype, value) \
905({ u16 _tmp = (value); \
906 RTA_PUT(skb, attrtype, sizeof(u16), &_tmp); })
907
908#define RTA_PUT_U32(skb, attrtype, value) \
909({ u32 _tmp = (value); \
910 RTA_PUT(skb, attrtype, sizeof(u32), &_tmp); })
911
912#define RTA_PUT_U64(skb, attrtype, value) \
913({ u64 _tmp = (value); \
914 RTA_PUT(skb, attrtype, sizeof(u64), &_tmp); })
915
916#define RTA_PUT_SECS(skb, attrtype, value) \
917 RTA_PUT_U64(skb, attrtype, (value) / HZ)
918
919#define RTA_PUT_MSECS(skb, attrtype, value) \
920 RTA_PUT_U64(skb, attrtype, jiffies_to_msecs(value))
921
922#define RTA_PUT_STRING(skb, attrtype, value) \
923 RTA_PUT(skb, attrtype, strlen(value) + 1, value)
924
925#define RTA_PUT_FLAG(skb, attrtype) \
926 RTA_PUT(skb, attrtype, 0, NULL);
927
928#define RTA_NEST(skb, type) \
929({ struct rtattr *__start = (struct rtattr *) (skb)->tail; \
930 RTA_PUT(skb, type, 0, NULL); \
931 __start; })
932
933#define RTA_NEST_END(skb, start) \
934({ (start)->rta_len = ((skb)->tail - (unsigned char *) (start)); \
935 (skb)->len; })
936
937#define RTA_NEST_CANCEL(skb, start) \
938({ if (start) \
939 skb_trim(skb, (unsigned char *) (start) - (skb)->data); \
940 -1; })
941
942#define RTA_GET_U8(rta) \
943({ if (!rta || RTA_PAYLOAD(rta) < sizeof(u8)) \
944 goto rtattr_failure; \
945 *(u8 *) RTA_DATA(rta); })
946
947#define RTA_GET_U16(rta) \
948({ if (!rta || RTA_PAYLOAD(rta) < sizeof(u16)) \
949 goto rtattr_failure; \
950 *(u16 *) RTA_DATA(rta); })
951
952#define RTA_GET_U32(rta) \
953({ if (!rta || RTA_PAYLOAD(rta) < sizeof(u32)) \
954 goto rtattr_failure; \
955 *(u32 *) RTA_DATA(rta); })
956
957#define RTA_GET_U64(rta) \
958({ u64 _tmp; \
959 if (!rta || RTA_PAYLOAD(rta) < sizeof(u64)) \
960 goto rtattr_failure; \
961 memcpy(&_tmp, RTA_DATA(rta), sizeof(_tmp)); \
962 _tmp; })
963
964#define RTA_GET_FLAG(rta) (!!(rta))
965
966#define RTA_GET_SECS(rta) ((unsigned long) RTA_GET_U64(rta) * HZ)
967#define RTA_GET_MSECS(rta) (msecs_to_jiffies((unsigned long) RTA_GET_U64(rta)))
792 968
793static inline struct rtattr * 969static inline struct rtattr *
794__rta_reserve(struct sk_buff *skb, int attrtype, int attrlen) 970__rta_reserve(struct sk_buff *skb, int attrtype, int attrlen)
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 7d66385ae750..76cf7e60216c 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -64,6 +64,7 @@ extern int kmem_cache_shrink(kmem_cache_t *);
64extern void *kmem_cache_alloc(kmem_cache_t *, unsigned int __nocast); 64extern void *kmem_cache_alloc(kmem_cache_t *, unsigned int __nocast);
65extern void kmem_cache_free(kmem_cache_t *, void *); 65extern void kmem_cache_free(kmem_cache_t *, void *);
66extern unsigned int kmem_cache_size(kmem_cache_t *); 66extern unsigned int kmem_cache_size(kmem_cache_t *);
67extern const char *kmem_cache_name(kmem_cache_t *);
67extern kmem_cache_t *kmem_find_general_cachep(size_t size, int gfpflags); 68extern kmem_cache_t *kmem_find_general_cachep(size_t size, int gfpflags);
68 69
69/* Size description struct for general caches. */ 70/* Size description struct for general caches. */
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 14a55e3e3a50..97a7c9e03df5 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -230,6 +230,17 @@ struct tcp_options_received {
230 __u16 mss_clamp; /* Maximal mss, negotiated at connection setup */ 230 __u16 mss_clamp; /* Maximal mss, negotiated at connection setup */
231}; 231};
232 232
233struct tcp_request_sock {
234 struct inet_request_sock req;
235 __u32 rcv_isn;
236 __u32 snt_isn;
237};
238
239static inline struct tcp_request_sock *tcp_rsk(const struct request_sock *req)
240{
241 return (struct tcp_request_sock *)req;
242}
243
233struct tcp_sock { 244struct tcp_sock {
234 /* inet_sock has to be the first member of tcp_sock */ 245 /* inet_sock has to be the first member of tcp_sock */
235 struct inet_sock inet; 246 struct inet_sock inet;
@@ -368,22 +379,7 @@ struct tcp_sock {
368 379
369 __u32 total_retrans; /* Total retransmits for entire connection */ 380 __u32 total_retrans; /* Total retransmits for entire connection */
370 381
371 /* The syn_wait_lock is necessary only to avoid proc interface having 382 struct request_sock_queue accept_queue; /* FIFO of established children */
372 * to grab the main lock sock while browsing the listening hash
373 * (otherwise it's deadlock prone).
374 * This lock is acquired in read mode only from listening_get_next()
375 * and it's acquired in write mode _only_ from code that is actively
376 * changing the syn_wait_queue. All readers that are holding
377 * the master sock lock don't need to grab this lock in read mode
378 * too as the syn_wait_queue writes are always protected from
379 * the main sock lock.
380 */
381 rwlock_t syn_wait_lock;
382 struct tcp_listen_opt *listen_opt;
383
384 /* FIFO of established children */
385 struct open_request *accept_queue;
386 struct open_request *accept_queue_tail;
387 383
388 unsigned int keepalive_time; /* time before keep alive takes place */ 384 unsigned int keepalive_time; /* time before keep alive takes place */
389 unsigned int keepalive_intvl; /* time interval between keep alive probes */ 385 unsigned int keepalive_intvl; /* time interval between keep alive probes */
diff --git a/include/linux/xfrm.h b/include/linux/xfrm.h
index fd2ef742a9fd..d68391a9b9f3 100644
--- a/include/linux/xfrm.h
+++ b/include/linux/xfrm.h
@@ -174,6 +174,8 @@ enum xfrm_attr_type_t {
174 XFRMA_ALG_COMP, /* struct xfrm_algo */ 174 XFRMA_ALG_COMP, /* struct xfrm_algo */
175 XFRMA_ENCAP, /* struct xfrm_algo + struct xfrm_encap_tmpl */ 175 XFRMA_ENCAP, /* struct xfrm_algo + struct xfrm_encap_tmpl */
176 XFRMA_TMPL, /* 1 or more struct xfrm_user_tmpl */ 176 XFRMA_TMPL, /* 1 or more struct xfrm_user_tmpl */
177 XFRMA_SA,
178 XFRMA_POLICY,
177 __XFRMA_MAX 179 __XFRMA_MAX
178 180
179#define XFRMA_MAX (__XFRMA_MAX - 1) 181#define XFRMA_MAX (__XFRMA_MAX - 1)
@@ -257,5 +259,7 @@ struct xfrm_usersa_flush {
257 259
258#define XFRMGRP_ACQUIRE 1 260#define XFRMGRP_ACQUIRE 1
259#define XFRMGRP_EXPIRE 2 261#define XFRMGRP_EXPIRE 2
262#define XFRMGRP_SA 4
263#define XFRMGRP_POLICY 8
260 264
261#endif /* _LINUX_XFRM_H */ 265#endif /* _LINUX_XFRM_H */
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index 4f33bbc21e7f..89809891e5ab 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -65,11 +65,10 @@ struct neighbour;
65 65
66struct neigh_parms 66struct neigh_parms
67{ 67{
68 struct net_device *dev;
68 struct neigh_parms *next; 69 struct neigh_parms *next;
69 int (*neigh_setup)(struct neighbour *); 70 int (*neigh_setup)(struct neighbour *);
70 struct neigh_table *tbl; 71 struct neigh_table *tbl;
71 int entries;
72 void *priv;
73 72
74 void *sysctl_table; 73 void *sysctl_table;
75 74
@@ -192,7 +191,6 @@ struct neigh_table
192 atomic_t entries; 191 atomic_t entries;
193 rwlock_t lock; 192 rwlock_t lock;
194 unsigned long last_rand; 193 unsigned long last_rand;
195 struct neigh_parms *parms_list;
196 kmem_cache_t *kmem_cachep; 194 kmem_cache_t *kmem_cachep;
197 struct neigh_statistics *stats; 195 struct neigh_statistics *stats;
198 struct neighbour **hash_buckets; 196 struct neighbour **hash_buckets;
@@ -252,6 +250,9 @@ extern int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg);
252extern int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg); 250extern int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg);
253extern void neigh_app_ns(struct neighbour *n); 251extern void neigh_app_ns(struct neighbour *n);
254 252
253extern int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb);
254extern int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg);
255
255extern void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie); 256extern void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie);
256extern void __neigh_for_each_release(struct neigh_table *tbl, int (*cb)(struct neighbour *)); 257extern void __neigh_for_each_release(struct neigh_table *tbl, int (*cb)(struct neighbour *));
257extern void pneigh_for_each(struct neigh_table *tbl, void (*cb)(struct pneigh_entry *)); 258extern void pneigh_for_each(struct neigh_table *tbl, void (*cb)(struct pneigh_entry *));
diff --git a/include/net/request_sock.h b/include/net/request_sock.h
new file mode 100644
index 000000000000..72fd6f5e86b1
--- /dev/null
+++ b/include/net/request_sock.h
@@ -0,0 +1,255 @@
1/*
2 * NET Generic infrastructure for Network protocols.
3 *
4 * Definitions for request_sock
5 *
6 * Authors: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
7 *
8 * From code originally in include/net/tcp.h
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15#ifndef _REQUEST_SOCK_H
16#define _REQUEST_SOCK_H
17
18#include <linux/slab.h>
19#include <linux/spinlock.h>
20#include <linux/types.h>
21
22#include <net/sock.h>
23
24struct request_sock;
25struct sk_buff;
26struct dst_entry;
27struct proto;
28
29struct request_sock_ops {
30 int family;
31 kmem_cache_t *slab;
32 int obj_size;
33 int (*rtx_syn_ack)(struct sock *sk,
34 struct request_sock *req,
35 struct dst_entry *dst);
36 void (*send_ack)(struct sk_buff *skb,
37 struct request_sock *req);
38 void (*send_reset)(struct sk_buff *skb);
39 void (*destructor)(struct request_sock *req);
40};
41
42/* struct request_sock - mini sock to represent a connection request
43 */
44struct request_sock {
45 struct request_sock *dl_next; /* Must be first member! */
46 u16 mss;
47 u8 retrans;
48 u8 __pad;
49 /* The following two fields can be easily recomputed I think -AK */
50 u32 window_clamp; /* window clamp at creation time */
51 u32 rcv_wnd; /* rcv_wnd offered first time */
52 u32 ts_recent;
53 unsigned long expires;
54 struct request_sock_ops *rsk_ops;
55 struct sock *sk;
56};
57
58static inline struct request_sock *reqsk_alloc(struct request_sock_ops *ops)
59{
60 struct request_sock *req = kmem_cache_alloc(ops->slab, SLAB_ATOMIC);
61
62 if (req != NULL)
63 req->rsk_ops = ops;
64
65 return req;
66}
67
68static inline void __reqsk_free(struct request_sock *req)
69{
70 kmem_cache_free(req->rsk_ops->slab, req);
71}
72
73static inline void reqsk_free(struct request_sock *req)
74{
75 req->rsk_ops->destructor(req);
76 __reqsk_free(req);
77}
78
79extern int sysctl_max_syn_backlog;
80
81/** struct listen_sock - listen state
82 *
83 * @max_qlen_log - log_2 of maximal queued SYNs/REQUESTs
84 */
85struct listen_sock {
86 u8 max_qlen_log;
87 /* 3 bytes hole, try to use */
88 int qlen;
89 int qlen_young;
90 int clock_hand;
91 u32 hash_rnd;
92 struct request_sock *syn_table[0];
93};
94
95/** struct request_sock_queue - queue of request_socks
96 *
97 * @rskq_accept_head - FIFO head of established children
98 * @rskq_accept_tail - FIFO tail of established children
99 * @syn_wait_lock - serializer
100 *
101 * %syn_wait_lock is necessary only to avoid proc interface having to grab the main
102 * lock sock while browsing the listening hash (otherwise it's deadlock prone).
103 *
104 * This lock is acquired in read mode only from listening_get_next() seq_file
105 * op and it's acquired in write mode _only_ from code that is actively
106 * changing rskq_accept_head. All readers that are holding the master sock lock
107 * don't need to grab this lock in read mode too as rskq_accept_head. writes
108 * are always protected from the main sock lock.
109 */
110struct request_sock_queue {
111 struct request_sock *rskq_accept_head;
112 struct request_sock *rskq_accept_tail;
113 rwlock_t syn_wait_lock;
114 struct listen_sock *listen_opt;
115};
116
117extern int reqsk_queue_alloc(struct request_sock_queue *queue,
118 const int nr_table_entries);
119
120static inline struct listen_sock *reqsk_queue_yank_listen_sk(struct request_sock_queue *queue)
121{
122 struct listen_sock *lopt;
123
124 write_lock_bh(&queue->syn_wait_lock);
125 lopt = queue->listen_opt;
126 queue->listen_opt = NULL;
127 write_unlock_bh(&queue->syn_wait_lock);
128
129 return lopt;
130}
131
132static inline void reqsk_queue_destroy(struct request_sock_queue *queue)
133{
134 kfree(reqsk_queue_yank_listen_sk(queue));
135}
136
137static inline struct request_sock *
138 reqsk_queue_yank_acceptq(struct request_sock_queue *queue)
139{
140 struct request_sock *req = queue->rskq_accept_head;
141
142 queue->rskq_accept_head = queue->rskq_accept_head = NULL;
143 return req;
144}
145
146static inline int reqsk_queue_empty(struct request_sock_queue *queue)
147{
148 return queue->rskq_accept_head == NULL;
149}
150
151static inline void reqsk_queue_unlink(struct request_sock_queue *queue,
152 struct request_sock *req,
153 struct request_sock **prev_req)
154{
155 write_lock(&queue->syn_wait_lock);
156 *prev_req = req->dl_next;
157 write_unlock(&queue->syn_wait_lock);
158}
159
160static inline void reqsk_queue_add(struct request_sock_queue *queue,
161 struct request_sock *req,
162 struct sock *parent,
163 struct sock *child)
164{
165 req->sk = child;
166 sk_acceptq_added(parent);
167
168 if (queue->rskq_accept_head == NULL)
169 queue->rskq_accept_head = req;
170 else
171 queue->rskq_accept_tail->dl_next = req;
172
173 queue->rskq_accept_tail = req;
174 req->dl_next = NULL;
175}
176
177static inline struct request_sock *reqsk_queue_remove(struct request_sock_queue *queue)
178{
179 struct request_sock *req = queue->rskq_accept_head;
180
181 BUG_TRAP(req != NULL);
182
183 queue->rskq_accept_head = req->dl_next;
184 if (queue->rskq_accept_head == NULL)
185 queue->rskq_accept_tail = NULL;
186
187 return req;
188}
189
190static inline struct sock *reqsk_queue_get_child(struct request_sock_queue *queue,
191 struct sock *parent)
192{
193 struct request_sock *req = reqsk_queue_remove(queue);
194 struct sock *child = req->sk;
195
196 BUG_TRAP(child != NULL);
197
198 sk_acceptq_removed(parent);
199 __reqsk_free(req);
200 return child;
201}
202
203static inline int reqsk_queue_removed(struct request_sock_queue *queue,
204 struct request_sock *req)
205{
206 struct listen_sock *lopt = queue->listen_opt;
207
208 if (req->retrans == 0)
209 --lopt->qlen_young;
210
211 return --lopt->qlen;
212}
213
214static inline int reqsk_queue_added(struct request_sock_queue *queue)
215{
216 struct listen_sock *lopt = queue->listen_opt;
217 const int prev_qlen = lopt->qlen;
218
219 lopt->qlen_young++;
220 lopt->qlen++;
221 return prev_qlen;
222}
223
224static inline int reqsk_queue_len(struct request_sock_queue *queue)
225{
226 return queue->listen_opt != NULL ? queue->listen_opt->qlen : 0;
227}
228
229static inline int reqsk_queue_len_young(struct request_sock_queue *queue)
230{
231 return queue->listen_opt->qlen_young;
232}
233
234static inline int reqsk_queue_is_full(struct request_sock_queue *queue)
235{
236 return queue->listen_opt->qlen >> queue->listen_opt->max_qlen_log;
237}
238
239static inline void reqsk_queue_hash_req(struct request_sock_queue *queue,
240 u32 hash, struct request_sock *req,
241 unsigned timeout)
242{
243 struct listen_sock *lopt = queue->listen_opt;
244
245 req->expires = jiffies + timeout;
246 req->retrans = 0;
247 req->sk = NULL;
248 req->dl_next = lopt->syn_table[hash];
249
250 write_lock(&queue->syn_wait_lock);
251 lopt->syn_table[hash] = req;
252 write_unlock(&queue->syn_wait_lock);
253}
254
255#endif /* _REQUEST_SOCK_H */
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index c57504b3b518..7b97405e2dbf 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -172,4 +172,126 @@ tcf_destroy(struct tcf_proto *tp)
172 kfree(tp); 172 kfree(tp);
173} 173}
174 174
175static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
176 struct sk_buff_head *list)
177{
178 __skb_queue_tail(list, skb);
179 sch->qstats.backlog += skb->len;
180 sch->bstats.bytes += skb->len;
181 sch->bstats.packets++;
182
183 return NET_XMIT_SUCCESS;
184}
185
186static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch)
187{
188 return __qdisc_enqueue_tail(skb, sch, &sch->q);
189}
190
191static inline struct sk_buff *__qdisc_dequeue_head(struct Qdisc *sch,
192 struct sk_buff_head *list)
193{
194 struct sk_buff *skb = __skb_dequeue(list);
195
196 if (likely(skb != NULL))
197 sch->qstats.backlog -= skb->len;
198
199 return skb;
200}
201
202static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch)
203{
204 return __qdisc_dequeue_head(sch, &sch->q);
205}
206
207static inline struct sk_buff *__qdisc_dequeue_tail(struct Qdisc *sch,
208 struct sk_buff_head *list)
209{
210 struct sk_buff *skb = __skb_dequeue_tail(list);
211
212 if (likely(skb != NULL))
213 sch->qstats.backlog -= skb->len;
214
215 return skb;
216}
217
218static inline struct sk_buff *qdisc_dequeue_tail(struct Qdisc *sch)
219{
220 return __qdisc_dequeue_tail(sch, &sch->q);
221}
222
223static inline int __qdisc_requeue(struct sk_buff *skb, struct Qdisc *sch,
224 struct sk_buff_head *list)
225{
226 __skb_queue_head(list, skb);
227 sch->qstats.backlog += skb->len;
228 sch->qstats.requeues++;
229
230 return NET_XMIT_SUCCESS;
231}
232
233static inline int qdisc_requeue(struct sk_buff *skb, struct Qdisc *sch)
234{
235 return __qdisc_requeue(skb, sch, &sch->q);
236}
237
238static inline void __qdisc_reset_queue(struct Qdisc *sch,
239 struct sk_buff_head *list)
240{
241 /*
242 * We do not know the backlog in bytes of this list, it
243 * is up to the caller to correct it
244 */
245 skb_queue_purge(list);
246}
247
248static inline void qdisc_reset_queue(struct Qdisc *sch)
249{
250 __qdisc_reset_queue(sch, &sch->q);
251 sch->qstats.backlog = 0;
252}
253
254static inline unsigned int __qdisc_queue_drop(struct Qdisc *sch,
255 struct sk_buff_head *list)
256{
257 struct sk_buff *skb = __qdisc_dequeue_tail(sch, list);
258
259 if (likely(skb != NULL)) {
260 unsigned int len = skb->len;
261 kfree_skb(skb);
262 return len;
263 }
264
265 return 0;
266}
267
268static inline unsigned int qdisc_queue_drop(struct Qdisc *sch)
269{
270 return __qdisc_queue_drop(sch, &sch->q);
271}
272
273static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch)
274{
275 kfree_skb(skb);
276 sch->qstats.drops++;
277
278 return NET_XMIT_DROP;
279}
280
281static inline int qdisc_reshape_fail(struct sk_buff *skb, struct Qdisc *sch)
282{
283 sch->qstats.drops++;
284
285#ifdef CONFIG_NET_CLS_POLICE
286 if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch))
287 goto drop;
288
289 return NET_XMIT_SUCCESS;
290
291drop:
292#endif
293 kfree_skb(skb);
294 return NET_XMIT_DROP;
295}
296
175#endif 297#endif
diff --git a/include/net/sock.h b/include/net/sock.h
index a9ef3a6a13f3..e593af5b1ecc 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -484,6 +484,8 @@ extern void sk_stream_kill_queues(struct sock *sk);
484 484
485extern int sk_wait_data(struct sock *sk, long *timeo); 485extern int sk_wait_data(struct sock *sk, long *timeo);
486 486
487struct request_sock_ops;
488
487/* Networking protocol blocks we attach to sockets. 489/* Networking protocol blocks we attach to sockets.
488 * socket layer -> transport layer interface 490 * socket layer -> transport layer interface
489 * transport -> network interface is defined by struct inet_proto 491 * transport -> network interface is defined by struct inet_proto
@@ -547,6 +549,8 @@ struct proto {
547 kmem_cache_t *slab; 549 kmem_cache_t *slab;
548 unsigned int obj_size; 550 unsigned int obj_size;
549 551
552 struct request_sock_ops *rsk_prot;
553
550 struct module *owner; 554 struct module *owner;
551 555
552 char name[32]; 556 char name[32];
diff --git a/include/net/tcp.h b/include/net/tcp.h
index e71f8ba3e101..f730935b824a 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -31,6 +31,7 @@
31#include <linux/cache.h> 31#include <linux/cache.h>
32#include <linux/percpu.h> 32#include <linux/percpu.h>
33#include <net/checksum.h> 33#include <net/checksum.h>
34#include <net/request_sock.h>
34#include <net/sock.h> 35#include <net/sock.h>
35#include <net/snmp.h> 36#include <net/snmp.h>
36#include <net/ip.h> 37#include <net/ip.h>
@@ -563,7 +564,6 @@ static __inline__ int tcp_sk_listen_hashfn(struct sock *sk)
563#define TCP_NAGLE_PUSH 4 /* Cork is overriden for already queued data */ 564#define TCP_NAGLE_PUSH 4 /* Cork is overriden for already queued data */
564 565
565/* sysctl variables for tcp */ 566/* sysctl variables for tcp */
566extern int sysctl_max_syn_backlog;
567extern int sysctl_tcp_timestamps; 567extern int sysctl_tcp_timestamps;
568extern int sysctl_tcp_window_scaling; 568extern int sysctl_tcp_window_scaling;
569extern int sysctl_tcp_sack; 569extern int sysctl_tcp_sack;
@@ -613,74 +613,6 @@ extern atomic_t tcp_memory_allocated;
613extern atomic_t tcp_sockets_allocated; 613extern atomic_t tcp_sockets_allocated;
614extern int tcp_memory_pressure; 614extern int tcp_memory_pressure;
615 615
616struct open_request;
617
618struct or_calltable {
619 int family;
620 int (*rtx_syn_ack) (struct sock *sk, struct open_request *req, struct dst_entry*);
621 void (*send_ack) (struct sk_buff *skb, struct open_request *req);
622 void (*destructor) (struct open_request *req);
623 void (*send_reset) (struct sk_buff *skb);
624};
625
626struct tcp_v4_open_req {
627 __u32 loc_addr;
628 __u32 rmt_addr;
629 struct ip_options *opt;
630};
631
632#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
633struct tcp_v6_open_req {
634 struct in6_addr loc_addr;
635 struct in6_addr rmt_addr;
636 struct sk_buff *pktopts;
637 int iif;
638};
639#endif
640
641/* this structure is too big */
642struct open_request {
643 struct open_request *dl_next; /* Must be first member! */
644 __u32 rcv_isn;
645 __u32 snt_isn;
646 __u16 rmt_port;
647 __u16 mss;
648 __u8 retrans;
649 __u8 __pad;
650 __u16 snd_wscale : 4,
651 rcv_wscale : 4,
652 tstamp_ok : 1,
653 sack_ok : 1,
654 wscale_ok : 1,
655 ecn_ok : 1,
656 acked : 1;
657 /* The following two fields can be easily recomputed I think -AK */
658 __u32 window_clamp; /* window clamp at creation time */
659 __u32 rcv_wnd; /* rcv_wnd offered first time */
660 __u32 ts_recent;
661 unsigned long expires;
662 struct or_calltable *class;
663 struct sock *sk;
664 union {
665 struct tcp_v4_open_req v4_req;
666#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
667 struct tcp_v6_open_req v6_req;
668#endif
669 } af;
670};
671
672/* SLAB cache for open requests. */
673extern kmem_cache_t *tcp_openreq_cachep;
674
675#define tcp_openreq_alloc() kmem_cache_alloc(tcp_openreq_cachep, SLAB_ATOMIC)
676#define tcp_openreq_fastfree(req) kmem_cache_free(tcp_openreq_cachep, req)
677
678static inline void tcp_openreq_free(struct open_request *req)
679{
680 req->class->destructor(req);
681 tcp_openreq_fastfree(req);
682}
683
684#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 616#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
685#define TCP_INET_FAMILY(fam) ((fam) == AF_INET) 617#define TCP_INET_FAMILY(fam) ((fam) == AF_INET)
686#else 618#else
@@ -708,7 +640,7 @@ struct tcp_func {
708 640
709 struct sock * (*syn_recv_sock) (struct sock *sk, 641 struct sock * (*syn_recv_sock) (struct sock *sk,
710 struct sk_buff *skb, 642 struct sk_buff *skb,
711 struct open_request *req, 643 struct request_sock *req,
712 struct dst_entry *dst); 644 struct dst_entry *dst);
713 645
714 int (*remember_stamp) (struct sock *sk); 646 int (*remember_stamp) (struct sock *sk);
@@ -852,8 +784,8 @@ extern enum tcp_tw_status tcp_timewait_state_process(struct tcp_tw_bucket *tw,
852 unsigned len); 784 unsigned len);
853 785
854extern struct sock * tcp_check_req(struct sock *sk,struct sk_buff *skb, 786extern struct sock * tcp_check_req(struct sock *sk,struct sk_buff *skb,
855 struct open_request *req, 787 struct request_sock *req,
856 struct open_request **prev); 788 struct request_sock **prev);
857extern int tcp_child_process(struct sock *parent, 789extern int tcp_child_process(struct sock *parent,
858 struct sock *child, 790 struct sock *child,
859 struct sk_buff *skb); 791 struct sk_buff *skb);
@@ -903,12 +835,12 @@ extern int tcp_v4_conn_request(struct sock *sk,
903 struct sk_buff *skb); 835 struct sk_buff *skb);
904 836
905extern struct sock * tcp_create_openreq_child(struct sock *sk, 837extern struct sock * tcp_create_openreq_child(struct sock *sk,
906 struct open_request *req, 838 struct request_sock *req,
907 struct sk_buff *skb); 839 struct sk_buff *skb);
908 840
909extern struct sock * tcp_v4_syn_recv_sock(struct sock *sk, 841extern struct sock * tcp_v4_syn_recv_sock(struct sock *sk,
910 struct sk_buff *skb, 842 struct sk_buff *skb,
911 struct open_request *req, 843 struct request_sock *req,
912 struct dst_entry *dst); 844 struct dst_entry *dst);
913 845
914extern int tcp_v4_do_rcv(struct sock *sk, 846extern int tcp_v4_do_rcv(struct sock *sk,
@@ -922,7 +854,7 @@ extern int tcp_connect(struct sock *sk);
922 854
923extern struct sk_buff * tcp_make_synack(struct sock *sk, 855extern struct sk_buff * tcp_make_synack(struct sock *sk,
924 struct dst_entry *dst, 856 struct dst_entry *dst,
925 struct open_request *req); 857 struct request_sock *req);
926 858
927extern int tcp_disconnect(struct sock *sk, int flags); 859extern int tcp_disconnect(struct sock *sk, int flags);
928 860
@@ -1750,99 +1682,71 @@ static inline int tcp_full_space(const struct sock *sk)
1750 return tcp_win_from_space(sk->sk_rcvbuf); 1682 return tcp_win_from_space(sk->sk_rcvbuf);
1751} 1683}
1752 1684
1753static inline void tcp_acceptq_queue(struct sock *sk, struct open_request *req, 1685static inline void tcp_acceptq_queue(struct sock *sk, struct request_sock *req,
1754 struct sock *child) 1686 struct sock *child)
1755{ 1687{
1756 struct tcp_sock *tp = tcp_sk(sk); 1688 reqsk_queue_add(&tcp_sk(sk)->accept_queue, req, sk, child);
1757
1758 req->sk = child;
1759 sk_acceptq_added(sk);
1760
1761 if (!tp->accept_queue_tail) {
1762 tp->accept_queue = req;
1763 } else {
1764 tp->accept_queue_tail->dl_next = req;
1765 }
1766 tp->accept_queue_tail = req;
1767 req->dl_next = NULL;
1768} 1689}
1769 1690
1770struct tcp_listen_opt
1771{
1772 u8 max_qlen_log; /* log_2 of maximal queued SYNs */
1773 int qlen;
1774 int qlen_young;
1775 int clock_hand;
1776 u32 hash_rnd;
1777 struct open_request *syn_table[TCP_SYNQ_HSIZE];
1778};
1779
1780static inline void 1691static inline void
1781tcp_synq_removed(struct sock *sk, struct open_request *req) 1692tcp_synq_removed(struct sock *sk, struct request_sock *req)
1782{ 1693{
1783 struct tcp_listen_opt *lopt = tcp_sk(sk)->listen_opt; 1694 if (reqsk_queue_removed(&tcp_sk(sk)->accept_queue, req) == 0)
1784
1785 if (--lopt->qlen == 0)
1786 tcp_delete_keepalive_timer(sk); 1695 tcp_delete_keepalive_timer(sk);
1787 if (req->retrans == 0)
1788 lopt->qlen_young--;
1789} 1696}
1790 1697
1791static inline void tcp_synq_added(struct sock *sk) 1698static inline void tcp_synq_added(struct sock *sk)
1792{ 1699{
1793 struct tcp_listen_opt *lopt = tcp_sk(sk)->listen_opt; 1700 if (reqsk_queue_added(&tcp_sk(sk)->accept_queue) == 0)
1794
1795 if (lopt->qlen++ == 0)
1796 tcp_reset_keepalive_timer(sk, TCP_TIMEOUT_INIT); 1701 tcp_reset_keepalive_timer(sk, TCP_TIMEOUT_INIT);
1797 lopt->qlen_young++;
1798} 1702}
1799 1703
1800static inline int tcp_synq_len(struct sock *sk) 1704static inline int tcp_synq_len(struct sock *sk)
1801{ 1705{
1802 return tcp_sk(sk)->listen_opt->qlen; 1706 return reqsk_queue_len(&tcp_sk(sk)->accept_queue);
1803} 1707}
1804 1708
1805static inline int tcp_synq_young(struct sock *sk) 1709static inline int tcp_synq_young(struct sock *sk)
1806{ 1710{
1807 return tcp_sk(sk)->listen_opt->qlen_young; 1711 return reqsk_queue_len_young(&tcp_sk(sk)->accept_queue);
1808} 1712}
1809 1713
1810static inline int tcp_synq_is_full(struct sock *sk) 1714static inline int tcp_synq_is_full(struct sock *sk)
1811{ 1715{
1812 return tcp_synq_len(sk) >> tcp_sk(sk)->listen_opt->max_qlen_log; 1716 return reqsk_queue_is_full(&tcp_sk(sk)->accept_queue);
1813} 1717}
1814 1718
1815static inline void tcp_synq_unlink(struct tcp_sock *tp, struct open_request *req, 1719static inline void tcp_synq_unlink(struct tcp_sock *tp, struct request_sock *req,
1816 struct open_request **prev) 1720 struct request_sock **prev)
1817{ 1721{
1818 write_lock(&tp->syn_wait_lock); 1722 reqsk_queue_unlink(&tp->accept_queue, req, prev);
1819 *prev = req->dl_next;
1820 write_unlock(&tp->syn_wait_lock);
1821} 1723}
1822 1724
1823static inline void tcp_synq_drop(struct sock *sk, struct open_request *req, 1725static inline void tcp_synq_drop(struct sock *sk, struct request_sock *req,
1824 struct open_request **prev) 1726 struct request_sock **prev)
1825{ 1727{
1826 tcp_synq_unlink(tcp_sk(sk), req, prev); 1728 tcp_synq_unlink(tcp_sk(sk), req, prev);
1827 tcp_synq_removed(sk, req); 1729 tcp_synq_removed(sk, req);
1828 tcp_openreq_free(req); 1730 reqsk_free(req);
1829} 1731}
1830 1732
1831static __inline__ void tcp_openreq_init(struct open_request *req, 1733static __inline__ void tcp_openreq_init(struct request_sock *req,
1832 struct tcp_options_received *rx_opt, 1734 struct tcp_options_received *rx_opt,
1833 struct sk_buff *skb) 1735 struct sk_buff *skb)
1834{ 1736{
1737 struct inet_request_sock *ireq = inet_rsk(req);
1738
1835 req->rcv_wnd = 0; /* So that tcp_send_synack() knows! */ 1739 req->rcv_wnd = 0; /* So that tcp_send_synack() knows! */
1836 req->rcv_isn = TCP_SKB_CB(skb)->seq; 1740 tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq;
1837 req->mss = rx_opt->mss_clamp; 1741 req->mss = rx_opt->mss_clamp;
1838 req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0; 1742 req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0;
1839 req->tstamp_ok = rx_opt->tstamp_ok; 1743 ireq->tstamp_ok = rx_opt->tstamp_ok;
1840 req->sack_ok = rx_opt->sack_ok; 1744 ireq->sack_ok = rx_opt->sack_ok;
1841 req->snd_wscale = rx_opt->snd_wscale; 1745 ireq->snd_wscale = rx_opt->snd_wscale;
1842 req->wscale_ok = rx_opt->wscale_ok; 1746 ireq->wscale_ok = rx_opt->wscale_ok;
1843 req->acked = 0; 1747 ireq->acked = 0;
1844 req->ecn_ok = 0; 1748 ireq->ecn_ok = 0;
1845 req->rmt_port = skb->h.th->source; 1749 ireq->rmt_port = skb->h.th->source;
1846} 1750}
1847 1751
1848extern void tcp_enter_memory_pressure(void); 1752extern void tcp_enter_memory_pressure(void);
diff --git a/include/net/tcp_ecn.h b/include/net/tcp_ecn.h
index dc1456389a97..64980ee8c92a 100644
--- a/include/net/tcp_ecn.h
+++ b/include/net/tcp_ecn.h
@@ -2,6 +2,7 @@
2#define _NET_TCP_ECN_H_ 1 2#define _NET_TCP_ECN_H_ 1
3 3
4#include <net/inet_ecn.h> 4#include <net/inet_ecn.h>
5#include <net/request_sock.h>
5 6
6#define TCP_HP_BITS (~(TCP_RESERVED_BITS|TCP_FLAG_PSH)) 7#define TCP_HP_BITS (~(TCP_RESERVED_BITS|TCP_FLAG_PSH))
7 8
@@ -38,9 +39,9 @@ static inline void TCP_ECN_send_syn(struct sock *sk, struct tcp_sock *tp,
38} 39}
39 40
40static __inline__ void 41static __inline__ void
41TCP_ECN_make_synack(struct open_request *req, struct tcphdr *th) 42TCP_ECN_make_synack(struct request_sock *req, struct tcphdr *th)
42{ 43{
43 if (req->ecn_ok) 44 if (inet_rsk(req)->ecn_ok)
44 th->ece = 1; 45 th->ece = 1;
45} 46}
46 47
@@ -111,16 +112,16 @@ static inline int TCP_ECN_rcv_ecn_echo(struct tcp_sock *tp, struct tcphdr *th)
111} 112}
112 113
113static inline void TCP_ECN_openreq_child(struct tcp_sock *tp, 114static inline void TCP_ECN_openreq_child(struct tcp_sock *tp,
114 struct open_request *req) 115 struct request_sock *req)
115{ 116{
116 tp->ecn_flags = req->ecn_ok ? TCP_ECN_OK : 0; 117 tp->ecn_flags = inet_rsk(req)->ecn_ok ? TCP_ECN_OK : 0;
117} 118}
118 119
119static __inline__ void 120static __inline__ void
120TCP_ECN_create_request(struct open_request *req, struct tcphdr *th) 121TCP_ECN_create_request(struct request_sock *req, struct tcphdr *th)
121{ 122{
122 if (sysctl_tcp_ecn && th->ece && th->cwr) 123 if (sysctl_tcp_ecn && th->ece && th->cwr)
123 req->ecn_ok = 1; 124 inet_rsk(req)->ecn_ok = 1;
124} 125}
125 126
126#endif 127#endif
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index d675836ba6c3..0e65e02b7a1d 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -158,6 +158,20 @@ enum {
158 XFRM_STATE_DEAD 158 XFRM_STATE_DEAD
159}; 159};
160 160
161/* callback structure passed from either netlink or pfkey */
162struct km_event
163{
164 union {
165 u32 hard;
166 u32 proto;
167 u32 byid;
168 } data;
169
170 u32 seq;
171 u32 pid;
172 u32 event;
173};
174
161struct xfrm_type; 175struct xfrm_type;
162struct xfrm_dst; 176struct xfrm_dst;
163struct xfrm_policy_afinfo { 177struct xfrm_policy_afinfo {
@@ -179,6 +193,8 @@ struct xfrm_policy_afinfo {
179 193
180extern int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo); 194extern int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo);
181extern int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo); 195extern int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo);
196extern void km_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c);
197extern void km_state_notify(struct xfrm_state *x, struct km_event *c);
182 198
183#define XFRM_ACQ_EXPIRES 30 199#define XFRM_ACQ_EXPIRES 30
184 200
@@ -290,11 +306,11 @@ struct xfrm_mgr
290{ 306{
291 struct list_head list; 307 struct list_head list;
292 char *id; 308 char *id;
293 int (*notify)(struct xfrm_state *x, int event); 309 int (*notify)(struct xfrm_state *x, struct km_event *c);
294 int (*acquire)(struct xfrm_state *x, struct xfrm_tmpl *, struct xfrm_policy *xp, int dir); 310 int (*acquire)(struct xfrm_state *x, struct xfrm_tmpl *, struct xfrm_policy *xp, int dir);
295 struct xfrm_policy *(*compile_policy)(u16 family, int opt, u8 *data, int len, int *dir); 311 struct xfrm_policy *(*compile_policy)(u16 family, int opt, u8 *data, int len, int *dir);
296 int (*new_mapping)(struct xfrm_state *x, xfrm_address_t *ipaddr, u16 sport); 312 int (*new_mapping)(struct xfrm_state *x, xfrm_address_t *ipaddr, u16 sport);
297 int (*notify_policy)(struct xfrm_policy *x, int dir, int event); 313 int (*notify_policy)(struct xfrm_policy *x, int dir, struct km_event *c);
298}; 314};
299 315
300extern int xfrm_register_km(struct xfrm_mgr *km); 316extern int xfrm_register_km(struct xfrm_mgr *km);
@@ -656,7 +672,7 @@ static inline int xfrm_sk_clone_policy(struct sock *sk)
656 return 0; 672 return 0;
657} 673}
658 674
659extern void xfrm_policy_delete(struct xfrm_policy *pol, int dir); 675extern int xfrm_policy_delete(struct xfrm_policy *pol, int dir);
660 676
661static inline void xfrm_sk_free_policy(struct sock *sk) 677static inline void xfrm_sk_free_policy(struct sock *sk)
662{ 678{
@@ -817,7 +833,7 @@ extern int xfrm_state_add(struct xfrm_state *x);
817extern int xfrm_state_update(struct xfrm_state *x); 833extern int xfrm_state_update(struct xfrm_state *x);
818extern struct xfrm_state *xfrm_state_lookup(xfrm_address_t *daddr, u32 spi, u8 proto, unsigned short family); 834extern struct xfrm_state *xfrm_state_lookup(xfrm_address_t *daddr, u32 spi, u8 proto, unsigned short family);
819extern struct xfrm_state *xfrm_find_acq_byseq(u32 seq); 835extern struct xfrm_state *xfrm_find_acq_byseq(u32 seq);
820extern void xfrm_state_delete(struct xfrm_state *x); 836extern int xfrm_state_delete(struct xfrm_state *x);
821extern void xfrm_state_flush(u8 proto); 837extern void xfrm_state_flush(u8 proto);
822extern int xfrm_replay_check(struct xfrm_state *x, u32 seq); 838extern int xfrm_replay_check(struct xfrm_state *x, u32 seq);
823extern void xfrm_replay_advance(struct xfrm_state *x, u32 seq); 839extern void xfrm_replay_advance(struct xfrm_state *x, u32 seq);