diff options
Diffstat (limited to 'net/dccp/output.c')
-rw-r--r-- | net/dccp/output.c | 528 |
1 files changed, 528 insertions, 0 deletions
diff --git a/net/dccp/output.c b/net/dccp/output.c new file mode 100644 index 000000000000..28de157a4326 --- /dev/null +++ b/net/dccp/output.c | |||
@@ -0,0 +1,528 @@ | |||
1 | /* | ||
2 | * net/dccp/output.c | ||
3 | * | ||
4 | * An implementation of the DCCP protocol | ||
5 | * Arnaldo Carvalho de Melo <acme@conectiva.com.br> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License | ||
9 | * as published by the Free Software Foundation; either version | ||
10 | * 2 of the License, or (at your option) any later version. | ||
11 | */ | ||
12 | |||
13 | #include <linux/config.h> | ||
14 | #include <linux/dccp.h> | ||
15 | #include <linux/skbuff.h> | ||
16 | |||
17 | #include <net/sock.h> | ||
18 | |||
19 | #include "ccid.h" | ||
20 | #include "dccp.h" | ||
21 | |||
22 | static inline void dccp_event_ack_sent(struct sock *sk) | ||
23 | { | ||
24 | inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK); | ||
25 | } | ||
26 | |||
27 | /* | ||
28 | * All SKB's seen here are completely headerless. It is our | ||
29 | * job to build the DCCP header, and pass the packet down to | ||
30 | * IP so it can do the same plus pass the packet off to the | ||
31 | * device. | ||
32 | */ | ||
33 | int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb) | ||
34 | { | ||
35 | if (likely(skb != NULL)) { | ||
36 | const struct inet_sock *inet = inet_sk(sk); | ||
37 | struct dccp_sock *dp = dccp_sk(sk); | ||
38 | struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb); | ||
39 | struct dccp_hdr *dh; | ||
40 | /* XXX For now we're using only 48 bits sequence numbers */ | ||
41 | const int dccp_header_size = sizeof(*dh) + | ||
42 | sizeof(struct dccp_hdr_ext) + | ||
43 | dccp_packet_hdr_len(dcb->dccpd_type); | ||
44 | int err, set_ack = 1; | ||
45 | u64 ackno = dp->dccps_gsr; | ||
46 | |||
47 | dccp_inc_seqno(&dp->dccps_gss); | ||
48 | |||
49 | switch (dcb->dccpd_type) { | ||
50 | case DCCP_PKT_DATA: | ||
51 | set_ack = 0; | ||
52 | break; | ||
53 | case DCCP_PKT_SYNC: | ||
54 | case DCCP_PKT_SYNCACK: | ||
55 | ackno = dcb->dccpd_seq; | ||
56 | break; | ||
57 | } | ||
58 | |||
59 | dcb->dccpd_seq = dp->dccps_gss; | ||
60 | dccp_insert_options(sk, skb); | ||
61 | |||
62 | skb->h.raw = skb_push(skb, dccp_header_size); | ||
63 | dh = dccp_hdr(skb); | ||
64 | /* | ||
65 | * Data packets are not cloned as they are never retransmitted | ||
66 | */ | ||
67 | if (skb_cloned(skb)) | ||
68 | skb_set_owner_w(skb, sk); | ||
69 | |||
70 | /* Build DCCP header and checksum it. */ | ||
71 | memset(dh, 0, dccp_header_size); | ||
72 | dh->dccph_type = dcb->dccpd_type; | ||
73 | dh->dccph_sport = inet->sport; | ||
74 | dh->dccph_dport = inet->dport; | ||
75 | dh->dccph_doff = (dccp_header_size + dcb->dccpd_opt_len) / 4; | ||
76 | dh->dccph_ccval = dcb->dccpd_ccval; | ||
77 | /* XXX For now we're using only 48 bits sequence numbers */ | ||
78 | dh->dccph_x = 1; | ||
79 | |||
80 | dp->dccps_awh = dp->dccps_gss; | ||
81 | dccp_hdr_set_seq(dh, dp->dccps_gss); | ||
82 | if (set_ack) | ||
83 | dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), ackno); | ||
84 | |||
85 | switch (dcb->dccpd_type) { | ||
86 | case DCCP_PKT_REQUEST: | ||
87 | dccp_hdr_request(skb)->dccph_req_service = | ||
88 | dcb->dccpd_service; | ||
89 | break; | ||
90 | case DCCP_PKT_RESET: | ||
91 | dccp_hdr_reset(skb)->dccph_reset_code = | ||
92 | dcb->dccpd_reset_code; | ||
93 | break; | ||
94 | } | ||
95 | |||
96 | dh->dccph_checksum = dccp_v4_checksum(skb, inet->saddr, | ||
97 | inet->daddr); | ||
98 | |||
99 | if (set_ack) | ||
100 | dccp_event_ack_sent(sk); | ||
101 | |||
102 | DCCP_INC_STATS(DCCP_MIB_OUTSEGS); | ||
103 | |||
104 | err = ip_queue_xmit(skb, 0); | ||
105 | if (err <= 0) | ||
106 | return err; | ||
107 | |||
108 | /* NET_XMIT_CN is special. It does not guarantee, | ||
109 | * that this packet is lost. It tells that device | ||
110 | * is about to start to drop packets or already | ||
111 | * drops some packets of the same priority and | ||
112 | * invokes us to send less aggressively. | ||
113 | */ | ||
114 | return err == NET_XMIT_CN ? 0 : err; | ||
115 | } | ||
116 | return -ENOBUFS; | ||
117 | } | ||
118 | |||
119 | unsigned int dccp_sync_mss(struct sock *sk, u32 pmtu) | ||
120 | { | ||
121 | struct dccp_sock *dp = dccp_sk(sk); | ||
122 | int mss_now; | ||
123 | |||
124 | /* | ||
125 | * FIXME: we really should be using the af_specific thing to support | ||
126 | * IPv6. | ||
127 | * mss_now = pmtu - tp->af_specific->net_header_len - | ||
128 | * sizeof(struct dccp_hdr) - sizeof(struct dccp_hdr_ext); | ||
129 | */ | ||
130 | mss_now = pmtu - sizeof(struct iphdr) - sizeof(struct dccp_hdr) - | ||
131 | sizeof(struct dccp_hdr_ext); | ||
132 | |||
133 | /* Now subtract optional transport overhead */ | ||
134 | mss_now -= dp->dccps_ext_header_len; | ||
135 | |||
136 | /* | ||
137 | * FIXME: this should come from the CCID infrastructure, where, say, | ||
138 | * TFRC will say it wants TIMESTAMPS, ELAPSED time, etc, for now lets | ||
139 | * put a rough estimate for NDP + TIMESTAMP + TIMESTAMP_ECHO + ELAPSED | ||
140 | * TIME + TFRC_OPT_LOSS_EVENT_RATE + TFRC_OPT_RECEIVE_RATE + padding to | ||
141 | * make it a multiple of 4 | ||
142 | */ | ||
143 | |||
144 | mss_now -= ((5 + 6 + 10 + 6 + 6 + 6 + 3) / 4) * 4; | ||
145 | |||
146 | /* And store cached results */ | ||
147 | dp->dccps_pmtu_cookie = pmtu; | ||
148 | dp->dccps_mss_cache = mss_now; | ||
149 | |||
150 | return mss_now; | ||
151 | } | ||
152 | |||
153 | void dccp_write_space(struct sock *sk) | ||
154 | { | ||
155 | read_lock(&sk->sk_callback_lock); | ||
156 | |||
157 | if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) | ||
158 | wake_up_interruptible(sk->sk_sleep); | ||
159 | /* Should agree with poll, otherwise some programs break */ | ||
160 | if (sock_writeable(sk)) | ||
161 | sk_wake_async(sk, 2, POLL_OUT); | ||
162 | |||
163 | read_unlock(&sk->sk_callback_lock); | ||
164 | } | ||
165 | |||
166 | /** | ||
167 | * dccp_wait_for_ccid - Wait for ccid to tell us we can send a packet | ||
168 | * @sk: socket to wait for | ||
169 | * @timeo: for how long | ||
170 | */ | ||
171 | static int dccp_wait_for_ccid(struct sock *sk, struct sk_buff *skb, | ||
172 | long *timeo) | ||
173 | { | ||
174 | struct dccp_sock *dp = dccp_sk(sk); | ||
175 | DEFINE_WAIT(wait); | ||
176 | long delay; | ||
177 | int rc; | ||
178 | |||
179 | while (1) { | ||
180 | prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); | ||
181 | |||
182 | if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) | ||
183 | goto do_error; | ||
184 | if (!*timeo) | ||
185 | goto do_nonblock; | ||
186 | if (signal_pending(current)) | ||
187 | goto do_interrupted; | ||
188 | |||
189 | rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb, | ||
190 | skb->len); | ||
191 | if (rc <= 0) | ||
192 | break; | ||
193 | delay = msecs_to_jiffies(rc); | ||
194 | if (delay > *timeo || delay < 0) | ||
195 | goto do_nonblock; | ||
196 | |||
197 | sk->sk_write_pending++; | ||
198 | release_sock(sk); | ||
199 | *timeo -= schedule_timeout(delay); | ||
200 | lock_sock(sk); | ||
201 | sk->sk_write_pending--; | ||
202 | } | ||
203 | out: | ||
204 | finish_wait(sk->sk_sleep, &wait); | ||
205 | return rc; | ||
206 | |||
207 | do_error: | ||
208 | rc = -EPIPE; | ||
209 | goto out; | ||
210 | do_nonblock: | ||
211 | rc = -EAGAIN; | ||
212 | goto out; | ||
213 | do_interrupted: | ||
214 | rc = sock_intr_errno(*timeo); | ||
215 | goto out; | ||
216 | } | ||
217 | |||
218 | int dccp_write_xmit(struct sock *sk, struct sk_buff *skb, long *timeo) | ||
219 | { | ||
220 | const struct dccp_sock *dp = dccp_sk(sk); | ||
221 | int err = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb, | ||
222 | skb->len); | ||
223 | |||
224 | if (err > 0) | ||
225 | err = dccp_wait_for_ccid(sk, skb, timeo); | ||
226 | |||
227 | if (err == 0) { | ||
228 | const struct dccp_ackpkts *ap = dp->dccps_hc_rx_ackpkts; | ||
229 | struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb); | ||
230 | const int len = skb->len; | ||
231 | |||
232 | if (sk->sk_state == DCCP_PARTOPEN) { | ||
233 | /* See 8.1.5. Handshake Completion */ | ||
234 | inet_csk_schedule_ack(sk); | ||
235 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, | ||
236 | inet_csk(sk)->icsk_rto, | ||
237 | DCCP_RTO_MAX); | ||
238 | dcb->dccpd_type = DCCP_PKT_DATAACK; | ||
239 | /* | ||
240 | * FIXME: we really should have a | ||
241 | * dccps_ack_pending or use icsk. | ||
242 | */ | ||
243 | } else if (inet_csk_ack_scheduled(sk) || | ||
244 | dp->dccps_timestamp_echo != 0 || | ||
245 | (dp->dccps_options.dccpo_send_ack_vector && | ||
246 | ap->dccpap_buf_ackno != DCCP_MAX_SEQNO + 1 && | ||
247 | ap->dccpap_ack_seqno == DCCP_MAX_SEQNO + 1)) | ||
248 | dcb->dccpd_type = DCCP_PKT_DATAACK; | ||
249 | else | ||
250 | dcb->dccpd_type = DCCP_PKT_DATA; | ||
251 | |||
252 | err = dccp_transmit_skb(sk, skb); | ||
253 | ccid_hc_tx_packet_sent(dp->dccps_hc_tx_ccid, sk, 0, len); | ||
254 | } | ||
255 | |||
256 | return err; | ||
257 | } | ||
258 | |||
259 | int dccp_retransmit_skb(struct sock *sk, struct sk_buff *skb) | ||
260 | { | ||
261 | if (inet_sk_rebuild_header(sk) != 0) | ||
262 | return -EHOSTUNREACH; /* Routing failure or similar. */ | ||
263 | |||
264 | return dccp_transmit_skb(sk, (skb_cloned(skb) ? | ||
265 | pskb_copy(skb, GFP_ATOMIC): | ||
266 | skb_clone(skb, GFP_ATOMIC))); | ||
267 | } | ||
268 | |||
269 | struct sk_buff *dccp_make_response(struct sock *sk, struct dst_entry *dst, | ||
270 | struct request_sock *req) | ||
271 | { | ||
272 | struct dccp_hdr *dh; | ||
273 | const int dccp_header_size = sizeof(struct dccp_hdr) + | ||
274 | sizeof(struct dccp_hdr_ext) + | ||
275 | sizeof(struct dccp_hdr_response); | ||
276 | struct sk_buff *skb = sock_wmalloc(sk, MAX_HEADER + DCCP_MAX_OPT_LEN + | ||
277 | dccp_header_size, 1, | ||
278 | GFP_ATOMIC); | ||
279 | if (skb == NULL) | ||
280 | return NULL; | ||
281 | |||
282 | /* Reserve space for headers. */ | ||
283 | skb_reserve(skb, MAX_HEADER + DCCP_MAX_OPT_LEN + dccp_header_size); | ||
284 | |||
285 | skb->dst = dst_clone(dst); | ||
286 | skb->csum = 0; | ||
287 | |||
288 | DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_RESPONSE; | ||
289 | DCCP_SKB_CB(skb)->dccpd_seq = dccp_rsk(req)->dreq_iss; | ||
290 | dccp_insert_options(sk, skb); | ||
291 | |||
292 | skb->h.raw = skb_push(skb, dccp_header_size); | ||
293 | |||
294 | dh = dccp_hdr(skb); | ||
295 | memset(dh, 0, dccp_header_size); | ||
296 | |||
297 | dh->dccph_sport = inet_sk(sk)->sport; | ||
298 | dh->dccph_dport = inet_rsk(req)->rmt_port; | ||
299 | dh->dccph_doff = (dccp_header_size + | ||
300 | DCCP_SKB_CB(skb)->dccpd_opt_len) / 4; | ||
301 | dh->dccph_type = DCCP_PKT_RESPONSE; | ||
302 | dh->dccph_x = 1; | ||
303 | dccp_hdr_set_seq(dh, dccp_rsk(req)->dreq_iss); | ||
304 | dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dccp_rsk(req)->dreq_isr); | ||
305 | |||
306 | dh->dccph_checksum = dccp_v4_checksum(skb, inet_rsk(req)->loc_addr, | ||
307 | inet_rsk(req)->rmt_addr); | ||
308 | |||
309 | DCCP_INC_STATS(DCCP_MIB_OUTSEGS); | ||
310 | return skb; | ||
311 | } | ||
312 | |||
313 | struct sk_buff *dccp_make_reset(struct sock *sk, struct dst_entry *dst, | ||
314 | const enum dccp_reset_codes code) | ||
315 | |||
316 | { | ||
317 | struct dccp_hdr *dh; | ||
318 | struct dccp_sock *dp = dccp_sk(sk); | ||
319 | const int dccp_header_size = sizeof(struct dccp_hdr) + | ||
320 | sizeof(struct dccp_hdr_ext) + | ||
321 | sizeof(struct dccp_hdr_reset); | ||
322 | struct sk_buff *skb = sock_wmalloc(sk, MAX_HEADER + DCCP_MAX_OPT_LEN + | ||
323 | dccp_header_size, 1, | ||
324 | GFP_ATOMIC); | ||
325 | if (skb == NULL) | ||
326 | return NULL; | ||
327 | |||
328 | /* Reserve space for headers. */ | ||
329 | skb_reserve(skb, MAX_HEADER + DCCP_MAX_OPT_LEN + dccp_header_size); | ||
330 | |||
331 | skb->dst = dst_clone(dst); | ||
332 | skb->csum = 0; | ||
333 | |||
334 | dccp_inc_seqno(&dp->dccps_gss); | ||
335 | |||
336 | DCCP_SKB_CB(skb)->dccpd_reset_code = code; | ||
337 | DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_RESET; | ||
338 | DCCP_SKB_CB(skb)->dccpd_seq = dp->dccps_gss; | ||
339 | dccp_insert_options(sk, skb); | ||
340 | |||
341 | skb->h.raw = skb_push(skb, dccp_header_size); | ||
342 | |||
343 | dh = dccp_hdr(skb); | ||
344 | memset(dh, 0, dccp_header_size); | ||
345 | |||
346 | dh->dccph_sport = inet_sk(sk)->sport; | ||
347 | dh->dccph_dport = inet_sk(sk)->dport; | ||
348 | dh->dccph_doff = (dccp_header_size + | ||
349 | DCCP_SKB_CB(skb)->dccpd_opt_len) / 4; | ||
350 | dh->dccph_type = DCCP_PKT_RESET; | ||
351 | dh->dccph_x = 1; | ||
352 | dccp_hdr_set_seq(dh, dp->dccps_gss); | ||
353 | dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dp->dccps_gsr); | ||
354 | |||
355 | dccp_hdr_reset(skb)->dccph_reset_code = code; | ||
356 | |||
357 | dh->dccph_checksum = dccp_v4_checksum(skb, inet_sk(sk)->saddr, | ||
358 | inet_sk(sk)->daddr); | ||
359 | |||
360 | DCCP_INC_STATS(DCCP_MIB_OUTSEGS); | ||
361 | return skb; | ||
362 | } | ||
363 | |||
364 | /* | ||
365 | * Do all connect socket setups that can be done AF independent. | ||
366 | */ | ||
367 | static inline void dccp_connect_init(struct sock *sk) | ||
368 | { | ||
369 | struct dst_entry *dst = __sk_dst_get(sk); | ||
370 | struct inet_connection_sock *icsk = inet_csk(sk); | ||
371 | |||
372 | sk->sk_err = 0; | ||
373 | sock_reset_flag(sk, SOCK_DONE); | ||
374 | |||
375 | dccp_sync_mss(sk, dst_mtu(dst)); | ||
376 | |||
377 | /* | ||
378 | * FIXME: set dp->{dccps_swh,dccps_swl}, with | ||
379 | * something like dccp_inc_seq | ||
380 | */ | ||
381 | |||
382 | icsk->icsk_retransmits = 0; | ||
383 | } | ||
384 | |||
385 | int dccp_connect(struct sock *sk) | ||
386 | { | ||
387 | struct sk_buff *skb; | ||
388 | struct inet_connection_sock *icsk = inet_csk(sk); | ||
389 | |||
390 | dccp_connect_init(sk); | ||
391 | |||
392 | skb = alloc_skb(MAX_DCCP_HEADER + 15, sk->sk_allocation); | ||
393 | if (unlikely(skb == NULL)) | ||
394 | return -ENOBUFS; | ||
395 | |||
396 | /* Reserve space for headers. */ | ||
397 | skb_reserve(skb, MAX_DCCP_HEADER); | ||
398 | |||
399 | DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_REQUEST; | ||
400 | /* FIXME: set service to something meaningful, coming | ||
401 | * from userspace*/ | ||
402 | DCCP_SKB_CB(skb)->dccpd_service = 0; | ||
403 | skb->csum = 0; | ||
404 | skb_set_owner_w(skb, sk); | ||
405 | |||
406 | BUG_TRAP(sk->sk_send_head == NULL); | ||
407 | sk->sk_send_head = skb; | ||
408 | dccp_transmit_skb(sk, skb_clone(skb, GFP_KERNEL)); | ||
409 | DCCP_INC_STATS(DCCP_MIB_ACTIVEOPENS); | ||
410 | |||
411 | /* Timer for repeating the REQUEST until an answer. */ | ||
412 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, | ||
413 | icsk->icsk_rto, DCCP_RTO_MAX); | ||
414 | return 0; | ||
415 | } | ||
416 | |||
417 | void dccp_send_ack(struct sock *sk) | ||
418 | { | ||
419 | /* If we have been reset, we may not send again. */ | ||
420 | if (sk->sk_state != DCCP_CLOSED) { | ||
421 | struct sk_buff *skb = alloc_skb(MAX_DCCP_HEADER, GFP_ATOMIC); | ||
422 | |||
423 | if (skb == NULL) { | ||
424 | inet_csk_schedule_ack(sk); | ||
425 | inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN; | ||
426 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, | ||
427 | TCP_DELACK_MAX, | ||
428 | DCCP_RTO_MAX); | ||
429 | return; | ||
430 | } | ||
431 | |||
432 | /* Reserve space for headers */ | ||
433 | skb_reserve(skb, MAX_DCCP_HEADER); | ||
434 | skb->csum = 0; | ||
435 | DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_ACK; | ||
436 | skb_set_owner_w(skb, sk); | ||
437 | dccp_transmit_skb(sk, skb); | ||
438 | } | ||
439 | } | ||
440 | |||
441 | EXPORT_SYMBOL_GPL(dccp_send_ack); | ||
442 | |||
443 | void dccp_send_delayed_ack(struct sock *sk) | ||
444 | { | ||
445 | struct inet_connection_sock *icsk = inet_csk(sk); | ||
446 | /* | ||
447 | * FIXME: tune this timer. elapsed time fixes the skew, so no problem | ||
448 | * with using 2s, and active senders also piggyback the ACK into a | ||
449 | * DATAACK packet, so this is really for quiescent senders. | ||
450 | */ | ||
451 | unsigned long timeout = jiffies + 2 * HZ; | ||
452 | |||
453 | /* Use new timeout only if there wasn't a older one earlier. */ | ||
454 | if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) { | ||
455 | /* If delack timer was blocked or is about to expire, | ||
456 | * send ACK now. | ||
457 | * | ||
458 | * FIXME: check the "about to expire" part | ||
459 | */ | ||
460 | if (icsk->icsk_ack.blocked) { | ||
461 | dccp_send_ack(sk); | ||
462 | return; | ||
463 | } | ||
464 | |||
465 | if (!time_before(timeout, icsk->icsk_ack.timeout)) | ||
466 | timeout = icsk->icsk_ack.timeout; | ||
467 | } | ||
468 | icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER; | ||
469 | icsk->icsk_ack.timeout = timeout; | ||
470 | sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout); | ||
471 | } | ||
472 | |||
473 | void dccp_send_sync(struct sock *sk, const u64 seq, | ||
474 | const enum dccp_pkt_type pkt_type) | ||
475 | { | ||
476 | /* | ||
477 | * We are not putting this on the write queue, so | ||
478 | * dccp_transmit_skb() will set the ownership to this | ||
479 | * sock. | ||
480 | */ | ||
481 | struct sk_buff *skb = alloc_skb(MAX_DCCP_HEADER, GFP_ATOMIC); | ||
482 | |||
483 | if (skb == NULL) | ||
484 | /* FIXME: how to make sure the sync is sent? */ | ||
485 | return; | ||
486 | |||
487 | /* Reserve space for headers and prepare control bits. */ | ||
488 | skb_reserve(skb, MAX_DCCP_HEADER); | ||
489 | skb->csum = 0; | ||
490 | DCCP_SKB_CB(skb)->dccpd_type = pkt_type; | ||
491 | DCCP_SKB_CB(skb)->dccpd_seq = seq; | ||
492 | |||
493 | skb_set_owner_w(skb, sk); | ||
494 | dccp_transmit_skb(sk, skb); | ||
495 | } | ||
496 | |||
497 | /* | ||
498 | * Send a DCCP_PKT_CLOSE/CLOSEREQ. The caller locks the socket for us. This | ||
499 | * cannot be allowed to fail queueing a DCCP_PKT_CLOSE/CLOSEREQ frame under | ||
500 | * any circumstances. | ||
501 | */ | ||
502 | void dccp_send_close(struct sock *sk, const int active) | ||
503 | { | ||
504 | struct dccp_sock *dp = dccp_sk(sk); | ||
505 | struct sk_buff *skb; | ||
506 | const unsigned int prio = active ? GFP_KERNEL : GFP_ATOMIC; | ||
507 | |||
508 | skb = alloc_skb(sk->sk_prot->max_header, prio); | ||
509 | if (skb == NULL) | ||
510 | return; | ||
511 | |||
512 | /* Reserve space for headers and prepare control bits. */ | ||
513 | skb_reserve(skb, sk->sk_prot->max_header); | ||
514 | skb->csum = 0; | ||
515 | DCCP_SKB_CB(skb)->dccpd_type = dp->dccps_role == DCCP_ROLE_CLIENT ? | ||
516 | DCCP_PKT_CLOSE : DCCP_PKT_CLOSEREQ; | ||
517 | |||
518 | skb_set_owner_w(skb, sk); | ||
519 | if (active) { | ||
520 | BUG_TRAP(sk->sk_send_head == NULL); | ||
521 | sk->sk_send_head = skb; | ||
522 | dccp_transmit_skb(sk, skb_clone(skb, prio)); | ||
523 | } else | ||
524 | dccp_transmit_skb(sk, skb); | ||
525 | |||
526 | ccid_hc_rx_exit(dp->dccps_hc_rx_ccid, sk); | ||
527 | ccid_hc_tx_exit(dp->dccps_hc_tx_ccid, sk); | ||
528 | } | ||