aboutsummaryrefslogtreecommitdiffstats
path: root/include/net/request_sock.h
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-10-02 16:38:27 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-10-02 16:38:27 -0400
commitaecdc33e111b2c447b622e287c6003726daa1426 (patch)
tree3e7657eae4b785e1a1fb5dfb225dbae0b2f0cfc6 /include/net/request_sock.h
parenta20acf99f75e49271381d65db097c9763060a1e8 (diff)
parenta3a6cab5ea10cca64d036851fe0d932448f2fe4f (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking changes from David Miller: 1) GRE now works over ipv6, from Dmitry Kozlov. 2) Make SCTP more network namespace aware, from Eric Biederman. 3) TEAM driver now works with non-ethernet devices, from Jiri Pirko. 4) Make openvswitch network namespace aware, from Pravin B Shelar. 5) IPV6 NAT implementation, from Patrick McHardy. 6) Server side support for TCP Fast Open, from Jerry Chu and others. 7) Packet BPF filter supports MOD and XOR, from Eric Dumazet and Daniel Borkmann. 8) Increate the loopback default MTU to 64K, from Eric Dumazet. 9) Use a per-task rather than per-socket page fragment allocator for outgoing networking traffic. This benefits processes that have very many mostly idle sockets, which is quite common. From Eric Dumazet. 10) Use up to 32K for page fragment allocations, with fallbacks to smaller sizes when higher order page allocations fail. Benefits are a) less segments for driver to process b) less calls to page allocator c) less waste of space. From Eric Dumazet. 11) Allow GRO to be used on GRE tunnels, from Eric Dumazet. 12) VXLAN device driver, one way to handle VLAN issues such as the limitation of 4096 VLAN IDs yet still have some level of isolation. From Stephen Hemminger. 13) As usual there is a large boatload of driver changes, with the scale perhaps tilted towards the wireless side this time around. Fix up various fairly trivial conflicts, mostly caused by the user namespace changes. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1012 commits) hyperv: Add buffer for extended info after the RNDIS response message. hyperv: Report actual status in receive completion packet hyperv: Remove extra allocated space for recv_pkt_list elements hyperv: Fix page buffer handling in rndis_filter_send_request() hyperv: Fix the missing return value in rndis_filter_set_packet_filter() hyperv: Fix the max_xfer_size in RNDIS initialization vxlan: put UDP socket in correct namespace vxlan: Depend on CONFIG_INET sfc: Fix the reported priorities of different filter types sfc: Remove EFX_FILTER_FLAG_RX_OVERRIDE_IP sfc: Fix loopback self-test with separate_tx_channels=1 sfc: Fix MCDI structure field lookup sfc: Add parentheses around use of bitfield macro arguments sfc: Fix null function pointer in efx_sriov_channel_type vxlan: virtual extensible lan igmp: export symbol ip_mc_leave_group netlink: add attributes to fdb interface tg3: unconditionally select HWMON support when tg3 is enabled. Revert "net: ti cpsw ethernet: allow reading phy interface mode from DT" gre: fix sparse warning ...
Diffstat (limited to 'include/net/request_sock.h')
-rw-r--r--include/net/request_sock.h49
1 files changed, 36 insertions, 13 deletions
diff --git a/include/net/request_sock.h b/include/net/request_sock.h
index 4c0766e201e3..b01d8dd9ee7c 100644
--- a/include/net/request_sock.h
+++ b/include/net/request_sock.h
@@ -106,6 +106,34 @@ struct listen_sock {
106 struct request_sock *syn_table[0]; 106 struct request_sock *syn_table[0];
107}; 107};
108 108
109/*
110 * For a TCP Fast Open listener -
111 * lock - protects the access to all the reqsk, which is co-owned by
112 * the listener and the child socket.
113 * qlen - pending TFO requests (still in TCP_SYN_RECV).
114 * max_qlen - max TFO reqs allowed before TFO is disabled.
115 *
116 * XXX (TFO) - ideally these fields can be made as part of "listen_sock"
117 * structure above. But there is some implementation difficulty due to
118 * listen_sock being part of request_sock_queue hence will be freed when
119 * a listener is stopped. But TFO related fields may continue to be
120 * accessed even after a listener is closed, until its sk_refcnt drops
121 * to 0 implying no more outstanding TFO reqs. One solution is to keep
122 * listen_opt around until sk_refcnt drops to 0. But there is some other
123 * complexity that needs to be resolved. E.g., a listener can be disabled
124 * temporarily through shutdown()->tcp_disconnect(), and re-enabled later.
125 */
126struct fastopen_queue {
127 struct request_sock *rskq_rst_head; /* Keep track of past TFO */
128 struct request_sock *rskq_rst_tail; /* requests that caused RST.
129 * This is part of the defense
130 * against spoofing attack.
131 */
132 spinlock_t lock;
133 int qlen; /* # of pending (TCP_SYN_RECV) reqs */
134 int max_qlen; /* != 0 iff TFO is currently enabled */
135};
136
109/** struct request_sock_queue - queue of request_socks 137/** struct request_sock_queue - queue of request_socks
110 * 138 *
111 * @rskq_accept_head - FIFO head of established children 139 * @rskq_accept_head - FIFO head of established children
@@ -129,6 +157,12 @@ struct request_sock_queue {
129 u8 rskq_defer_accept; 157 u8 rskq_defer_accept;
130 /* 3 bytes hole, try to pack */ 158 /* 3 bytes hole, try to pack */
131 struct listen_sock *listen_opt; 159 struct listen_sock *listen_opt;
160 struct fastopen_queue *fastopenq; /* This is non-NULL iff TFO has been
161 * enabled on this listener. Check
162 * max_qlen != 0 in fastopen_queue
163 * to determine if TFO is enabled
164 * right at this moment.
165 */
132}; 166};
133 167
134extern int reqsk_queue_alloc(struct request_sock_queue *queue, 168extern int reqsk_queue_alloc(struct request_sock_queue *queue,
@@ -136,6 +170,8 @@ extern int reqsk_queue_alloc(struct request_sock_queue *queue,
136 170
137extern void __reqsk_queue_destroy(struct request_sock_queue *queue); 171extern void __reqsk_queue_destroy(struct request_sock_queue *queue);
138extern void reqsk_queue_destroy(struct request_sock_queue *queue); 172extern void reqsk_queue_destroy(struct request_sock_queue *queue);
173extern void reqsk_fastopen_remove(struct sock *sk,
174 struct request_sock *req, bool reset);
139 175
140static inline struct request_sock * 176static inline struct request_sock *
141 reqsk_queue_yank_acceptq(struct request_sock_queue *queue) 177 reqsk_queue_yank_acceptq(struct request_sock_queue *queue)
@@ -190,19 +226,6 @@ static inline struct request_sock *reqsk_queue_remove(struct request_sock_queue
190 return req; 226 return req;
191} 227}
192 228
193static inline struct sock *reqsk_queue_get_child(struct request_sock_queue *queue,
194 struct sock *parent)
195{
196 struct request_sock *req = reqsk_queue_remove(queue);
197 struct sock *child = req->sk;
198
199 WARN_ON(child == NULL);
200
201 sk_acceptq_removed(parent);
202 __reqsk_free(req);
203 return child;
204}
205
206static inline int reqsk_queue_removed(struct request_sock_queue *queue, 229static inline int reqsk_queue_removed(struct request_sock_queue *queue,
207 struct request_sock *req) 230 struct request_sock *req)
208{ 231{