diff options
author | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2012-01-09 02:38:23 -0500 |
---|---|---|
committer | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2012-01-09 02:38:23 -0500 |
commit | da733563be5a9da26fe81d9f007262d00b846e22 (patch) | |
tree | db28291df94a2043af2123911984c5c173da4e6f /include/net/tcp.h | |
parent | 6ccbcf2cb41131f8d56ef0723bf3f7c1f8486076 (diff) | |
parent | dab78d7924598ea4031663dd10db814e2e324928 (diff) |
Merge branch 'next' into for-linus
Diffstat (limited to 'include/net/tcp.h')
-rw-r--r-- | include/net/tcp.h | 103 |
1 files changed, 65 insertions, 38 deletions
diff --git a/include/net/tcp.h b/include/net/tcp.h index 149a415d1e0a..bb18c4d69aba 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h | |||
@@ -18,7 +18,6 @@ | |||
18 | #ifndef _TCP_H | 18 | #ifndef _TCP_H |
19 | #define _TCP_H | 19 | #define _TCP_H |
20 | 20 | ||
21 | #define TCP_DEBUG 1 | ||
22 | #define FASTRETRANS_DEBUG 1 | 21 | #define FASTRETRANS_DEBUG 1 |
23 | 22 | ||
24 | #include <linux/list.h> | 23 | #include <linux/list.h> |
@@ -327,9 +326,9 @@ extern int tcp_sendpage(struct sock *sk, struct page *page, int offset, | |||
327 | size_t size, int flags); | 326 | size_t size, int flags); |
328 | extern int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg); | 327 | extern int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg); |
329 | extern int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, | 328 | extern int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, |
330 | struct tcphdr *th, unsigned len); | 329 | const struct tcphdr *th, unsigned int len); |
331 | extern int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, | 330 | extern int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, |
332 | struct tcphdr *th, unsigned len); | 331 | const struct tcphdr *th, unsigned int len); |
333 | extern void tcp_rcv_space_adjust(struct sock *sk); | 332 | extern void tcp_rcv_space_adjust(struct sock *sk); |
334 | extern void tcp_cleanup_rbuf(struct sock *sk, int copied); | 333 | extern void tcp_cleanup_rbuf(struct sock *sk, int copied); |
335 | extern int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp); | 334 | extern int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp); |
@@ -356,6 +355,7 @@ static inline void tcp_dec_quickack_mode(struct sock *sk, | |||
356 | #define TCP_ECN_OK 1 | 355 | #define TCP_ECN_OK 1 |
357 | #define TCP_ECN_QUEUE_CWR 2 | 356 | #define TCP_ECN_QUEUE_CWR 2 |
358 | #define TCP_ECN_DEMAND_CWR 4 | 357 | #define TCP_ECN_DEMAND_CWR 4 |
358 | #define TCP_ECN_SEEN 8 | ||
359 | 359 | ||
360 | static __inline__ void | 360 | static __inline__ void |
361 | TCP_ECN_create_request(struct request_sock *req, struct tcphdr *th) | 361 | TCP_ECN_create_request(struct request_sock *req, struct tcphdr *th) |
@@ -400,10 +400,10 @@ extern void tcp_set_keepalive(struct sock *sk, int val); | |||
400 | extern void tcp_syn_ack_timeout(struct sock *sk, struct request_sock *req); | 400 | extern void tcp_syn_ack_timeout(struct sock *sk, struct request_sock *req); |
401 | extern int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | 401 | extern int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, |
402 | size_t len, int nonblock, int flags, int *addr_len); | 402 | size_t len, int nonblock, int flags, int *addr_len); |
403 | extern void tcp_parse_options(struct sk_buff *skb, | 403 | extern void tcp_parse_options(const struct sk_buff *skb, |
404 | struct tcp_options_received *opt_rx, u8 **hvpp, | 404 | struct tcp_options_received *opt_rx, const u8 **hvpp, |
405 | int estab); | 405 | int estab); |
406 | extern u8 *tcp_parse_md5sig_option(struct tcphdr *th); | 406 | extern const u8 *tcp_parse_md5sig_option(const struct tcphdr *th); |
407 | 407 | ||
408 | /* | 408 | /* |
409 | * TCP v4 functions exported for the inet6 API | 409 | * TCP v4 functions exported for the inet6 API |
@@ -431,17 +431,34 @@ extern int tcp_disconnect(struct sock *sk, int flags); | |||
431 | extern __u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS]; | 431 | extern __u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS]; |
432 | extern struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, | 432 | extern struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, |
433 | struct ip_options *opt); | 433 | struct ip_options *opt); |
434 | #ifdef CONFIG_SYN_COOKIES | ||
434 | extern __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, | 435 | extern __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, |
435 | __u16 *mss); | 436 | __u16 *mss); |
437 | #else | ||
438 | static inline __u32 cookie_v4_init_sequence(struct sock *sk, | ||
439 | struct sk_buff *skb, | ||
440 | __u16 *mss) | ||
441 | { | ||
442 | return 0; | ||
443 | } | ||
444 | #endif | ||
436 | 445 | ||
437 | extern __u32 cookie_init_timestamp(struct request_sock *req); | 446 | extern __u32 cookie_init_timestamp(struct request_sock *req); |
438 | extern bool cookie_check_timestamp(struct tcp_options_received *opt, bool *); | 447 | extern bool cookie_check_timestamp(struct tcp_options_received *opt, bool *); |
439 | 448 | ||
440 | /* From net/ipv6/syncookies.c */ | 449 | /* From net/ipv6/syncookies.c */ |
441 | extern struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb); | 450 | extern struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb); |
442 | extern __u32 cookie_v6_init_sequence(struct sock *sk, struct sk_buff *skb, | 451 | #ifdef CONFIG_SYN_COOKIES |
452 | extern __u32 cookie_v6_init_sequence(struct sock *sk, const struct sk_buff *skb, | ||
443 | __u16 *mss); | 453 | __u16 *mss); |
444 | 454 | #else | |
455 | static inline __u32 cookie_v6_init_sequence(struct sock *sk, | ||
456 | struct sk_buff *skb, | ||
457 | __u16 *mss) | ||
458 | { | ||
459 | return 0; | ||
460 | } | ||
461 | #endif | ||
445 | /* tcp_output.c */ | 462 | /* tcp_output.c */ |
446 | 463 | ||
447 | extern void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss, | 464 | extern void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss, |
@@ -460,6 +477,9 @@ extern int tcp_write_wakeup(struct sock *); | |||
460 | extern void tcp_send_fin(struct sock *sk); | 477 | extern void tcp_send_fin(struct sock *sk); |
461 | extern void tcp_send_active_reset(struct sock *sk, gfp_t priority); | 478 | extern void tcp_send_active_reset(struct sock *sk, gfp_t priority); |
462 | extern int tcp_send_synack(struct sock *); | 479 | extern int tcp_send_synack(struct sock *); |
480 | extern int tcp_syn_flood_action(struct sock *sk, | ||
481 | const struct sk_buff *skb, | ||
482 | const char *proto); | ||
463 | extern void tcp_push_one(struct sock *, unsigned int mss_now); | 483 | extern void tcp_push_one(struct sock *, unsigned int mss_now); |
464 | extern void tcp_send_ack(struct sock *sk); | 484 | extern void tcp_send_ack(struct sock *sk); |
465 | extern void tcp_send_delayed_ack(struct sock *sk); | 485 | extern void tcp_send_delayed_ack(struct sock *sk); |
@@ -501,7 +521,7 @@ static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize) | |||
501 | } | 521 | } |
502 | 522 | ||
503 | /* tcp.c */ | 523 | /* tcp.c */ |
504 | extern void tcp_get_info(struct sock *, struct tcp_info *); | 524 | extern void tcp_get_info(const struct sock *, struct tcp_info *); |
505 | 525 | ||
506 | /* Read 'sendfile()'-style from a TCP socket */ | 526 | /* Read 'sendfile()'-style from a TCP socket */ |
507 | typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *, | 527 | typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *, |
@@ -511,8 +531,8 @@ extern int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, | |||
511 | 531 | ||
512 | extern void tcp_initialize_rcv_mss(struct sock *sk); | 532 | extern void tcp_initialize_rcv_mss(struct sock *sk); |
513 | 533 | ||
514 | extern int tcp_mtu_to_mss(struct sock *sk, int pmtu); | 534 | extern int tcp_mtu_to_mss(const struct sock *sk, int pmtu); |
515 | extern int tcp_mss_to_mtu(struct sock *sk, int mss); | 535 | extern int tcp_mss_to_mtu(const struct sock *sk, int mss); |
516 | extern void tcp_mtup_init(struct sock *sk); | 536 | extern void tcp_mtup_init(struct sock *sk); |
517 | extern void tcp_valid_rtt_meas(struct sock *sk, u32 seq_rtt); | 537 | extern void tcp_valid_rtt_meas(struct sock *sk, u32 seq_rtt); |
518 | 538 | ||
@@ -553,7 +573,7 @@ static inline void tcp_fast_path_check(struct sock *sk) | |||
553 | /* Compute the actual rto_min value */ | 573 | /* Compute the actual rto_min value */ |
554 | static inline u32 tcp_rto_min(struct sock *sk) | 574 | static inline u32 tcp_rto_min(struct sock *sk) |
555 | { | 575 | { |
556 | struct dst_entry *dst = __sk_dst_get(sk); | 576 | const struct dst_entry *dst = __sk_dst_get(sk); |
557 | u32 rto_min = TCP_RTO_MIN; | 577 | u32 rto_min = TCP_RTO_MIN; |
558 | 578 | ||
559 | if (dst && dst_metric_locked(dst, RTAX_RTO_MIN)) | 579 | if (dst && dst_metric_locked(dst, RTAX_RTO_MIN)) |
@@ -615,13 +635,14 @@ struct tcp_skb_cb { | |||
615 | __u32 seq; /* Starting sequence number */ | 635 | __u32 seq; /* Starting sequence number */ |
616 | __u32 end_seq; /* SEQ + FIN + SYN + datalen */ | 636 | __u32 end_seq; /* SEQ + FIN + SYN + datalen */ |
617 | __u32 when; /* used to compute rtt's */ | 637 | __u32 when; /* used to compute rtt's */ |
618 | __u8 flags; /* TCP header flags. */ | 638 | __u8 tcp_flags; /* TCP header flags. (tcp[13]) */ |
619 | __u8 sacked; /* State flags for SACK/FACK. */ | 639 | __u8 sacked; /* State flags for SACK/FACK. */ |
620 | #define TCPCB_SACKED_ACKED 0x01 /* SKB ACK'd by a SACK block */ | 640 | #define TCPCB_SACKED_ACKED 0x01 /* SKB ACK'd by a SACK block */ |
621 | #define TCPCB_SACKED_RETRANS 0x02 /* SKB retransmitted */ | 641 | #define TCPCB_SACKED_RETRANS 0x02 /* SKB retransmitted */ |
622 | #define TCPCB_LOST 0x04 /* SKB is lost */ | 642 | #define TCPCB_LOST 0x04 /* SKB is lost */ |
623 | #define TCPCB_TAGBITS 0x07 /* All tag bits */ | 643 | #define TCPCB_TAGBITS 0x07 /* All tag bits */ |
624 | 644 | __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */ | |
645 | /* 1 byte hole */ | ||
625 | #define TCPCB_EVER_RETRANS 0x80 /* Ever retransmitted frame */ | 646 | #define TCPCB_EVER_RETRANS 0x80 /* Ever retransmitted frame */ |
626 | #define TCPCB_RETRANS (TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS) | 647 | #define TCPCB_RETRANS (TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS) |
627 | 648 | ||
@@ -798,6 +819,7 @@ static inline bool tcp_in_initial_slowstart(const struct tcp_sock *tp) | |||
798 | static inline __u32 tcp_current_ssthresh(const struct sock *sk) | 819 | static inline __u32 tcp_current_ssthresh(const struct sock *sk) |
799 | { | 820 | { |
800 | const struct tcp_sock *tp = tcp_sk(sk); | 821 | const struct tcp_sock *tp = tcp_sk(sk); |
822 | |||
801 | if ((1 << inet_csk(sk)->icsk_ca_state) & (TCPF_CA_CWR | TCPF_CA_Recovery)) | 823 | if ((1 << inet_csk(sk)->icsk_ca_state) & (TCPF_CA_CWR | TCPF_CA_Recovery)) |
802 | return tp->snd_ssthresh; | 824 | return tp->snd_ssthresh; |
803 | else | 825 | else |
@@ -810,7 +832,7 @@ static inline __u32 tcp_current_ssthresh(const struct sock *sk) | |||
810 | #define tcp_verify_left_out(tp) WARN_ON(tcp_left_out(tp) > tp->packets_out) | 832 | #define tcp_verify_left_out(tp) WARN_ON(tcp_left_out(tp) > tp->packets_out) |
811 | 833 | ||
812 | extern void tcp_enter_cwr(struct sock *sk, const int set_ssthresh); | 834 | extern void tcp_enter_cwr(struct sock *sk, const int set_ssthresh); |
813 | extern __u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst); | 835 | extern __u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst); |
814 | 836 | ||
815 | /* Slow start with delack produces 3 packets of burst, so that | 837 | /* Slow start with delack produces 3 packets of burst, so that |
816 | * it is safe "de facto". This will be the default - same as | 838 | * it is safe "de facto". This will be the default - same as |
@@ -839,7 +861,7 @@ static inline void tcp_minshall_update(struct tcp_sock *tp, unsigned int mss, | |||
839 | 861 | ||
840 | static inline void tcp_check_probe_timer(struct sock *sk) | 862 | static inline void tcp_check_probe_timer(struct sock *sk) |
841 | { | 863 | { |
842 | struct tcp_sock *tp = tcp_sk(sk); | 864 | const struct tcp_sock *tp = tcp_sk(sk); |
843 | const struct inet_connection_sock *icsk = inet_csk(sk); | 865 | const struct inet_connection_sock *icsk = inet_csk(sk); |
844 | 866 | ||
845 | if (!tp->packets_out && !icsk->icsk_pending) | 867 | if (!tp->packets_out && !icsk->icsk_pending) |
@@ -1162,8 +1184,9 @@ struct tcp_md5sig_pool { | |||
1162 | 1184 | ||
1163 | /* - functions */ | 1185 | /* - functions */ |
1164 | extern int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key, | 1186 | extern int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key, |
1165 | struct sock *sk, struct request_sock *req, | 1187 | const struct sock *sk, |
1166 | struct sk_buff *skb); | 1188 | const struct request_sock *req, |
1189 | const struct sk_buff *skb); | ||
1167 | extern struct tcp_md5sig_key * tcp_v4_md5_lookup(struct sock *sk, | 1190 | extern struct tcp_md5sig_key * tcp_v4_md5_lookup(struct sock *sk, |
1168 | struct sock *addr_sk); | 1191 | struct sock *addr_sk); |
1169 | extern int tcp_v4_md5_do_add(struct sock *sk, __be32 addr, u8 *newkey, | 1192 | extern int tcp_v4_md5_do_add(struct sock *sk, __be32 addr, u8 *newkey, |
@@ -1180,17 +1203,17 @@ extern int tcp_v4_md5_do_del(struct sock *sk, __be32 addr); | |||
1180 | #define tcp_twsk_md5_key(twsk) NULL | 1203 | #define tcp_twsk_md5_key(twsk) NULL |
1181 | #endif | 1204 | #endif |
1182 | 1205 | ||
1183 | extern struct tcp_md5sig_pool * __percpu *tcp_alloc_md5sig_pool(struct sock *); | 1206 | extern struct tcp_md5sig_pool __percpu *tcp_alloc_md5sig_pool(struct sock *); |
1184 | extern void tcp_free_md5sig_pool(void); | 1207 | extern void tcp_free_md5sig_pool(void); |
1185 | 1208 | ||
1186 | extern struct tcp_md5sig_pool *tcp_get_md5sig_pool(void); | 1209 | extern struct tcp_md5sig_pool *tcp_get_md5sig_pool(void); |
1187 | extern void tcp_put_md5sig_pool(void); | 1210 | extern void tcp_put_md5sig_pool(void); |
1188 | 1211 | ||
1189 | extern int tcp_md5_hash_header(struct tcp_md5sig_pool *, struct tcphdr *); | 1212 | extern int tcp_md5_hash_header(struct tcp_md5sig_pool *, const struct tcphdr *); |
1190 | extern int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, struct sk_buff *, | 1213 | extern int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, const struct sk_buff *, |
1191 | unsigned header_len); | 1214 | unsigned header_len); |
1192 | extern int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, | 1215 | extern int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, |
1193 | struct tcp_md5sig_key *key); | 1216 | const struct tcp_md5sig_key *key); |
1194 | 1217 | ||
1195 | /* write queue abstraction */ | 1218 | /* write queue abstraction */ |
1196 | static inline void tcp_write_queue_purge(struct sock *sk) | 1219 | static inline void tcp_write_queue_purge(struct sock *sk) |
@@ -1203,22 +1226,24 @@ static inline void tcp_write_queue_purge(struct sock *sk) | |||
1203 | tcp_clear_all_retrans_hints(tcp_sk(sk)); | 1226 | tcp_clear_all_retrans_hints(tcp_sk(sk)); |
1204 | } | 1227 | } |
1205 | 1228 | ||
1206 | static inline struct sk_buff *tcp_write_queue_head(struct sock *sk) | 1229 | static inline struct sk_buff *tcp_write_queue_head(const struct sock *sk) |
1207 | { | 1230 | { |
1208 | return skb_peek(&sk->sk_write_queue); | 1231 | return skb_peek(&sk->sk_write_queue); |
1209 | } | 1232 | } |
1210 | 1233 | ||
1211 | static inline struct sk_buff *tcp_write_queue_tail(struct sock *sk) | 1234 | static inline struct sk_buff *tcp_write_queue_tail(const struct sock *sk) |
1212 | { | 1235 | { |
1213 | return skb_peek_tail(&sk->sk_write_queue); | 1236 | return skb_peek_tail(&sk->sk_write_queue); |
1214 | } | 1237 | } |
1215 | 1238 | ||
1216 | static inline struct sk_buff *tcp_write_queue_next(struct sock *sk, struct sk_buff *skb) | 1239 | static inline struct sk_buff *tcp_write_queue_next(const struct sock *sk, |
1240 | const struct sk_buff *skb) | ||
1217 | { | 1241 | { |
1218 | return skb_queue_next(&sk->sk_write_queue, skb); | 1242 | return skb_queue_next(&sk->sk_write_queue, skb); |
1219 | } | 1243 | } |
1220 | 1244 | ||
1221 | static inline struct sk_buff *tcp_write_queue_prev(struct sock *sk, struct sk_buff *skb) | 1245 | static inline struct sk_buff *tcp_write_queue_prev(const struct sock *sk, |
1246 | const struct sk_buff *skb) | ||
1222 | { | 1247 | { |
1223 | return skb_queue_prev(&sk->sk_write_queue, skb); | 1248 | return skb_queue_prev(&sk->sk_write_queue, skb); |
1224 | } | 1249 | } |
@@ -1232,7 +1257,7 @@ static inline struct sk_buff *tcp_write_queue_prev(struct sock *sk, struct sk_bu | |||
1232 | #define tcp_for_write_queue_from_safe(skb, tmp, sk) \ | 1257 | #define tcp_for_write_queue_from_safe(skb, tmp, sk) \ |
1233 | skb_queue_walk_from_safe(&(sk)->sk_write_queue, skb, tmp) | 1258 | skb_queue_walk_from_safe(&(sk)->sk_write_queue, skb, tmp) |
1234 | 1259 | ||
1235 | static inline struct sk_buff *tcp_send_head(struct sock *sk) | 1260 | static inline struct sk_buff *tcp_send_head(const struct sock *sk) |
1236 | { | 1261 | { |
1237 | return sk->sk_send_head; | 1262 | return sk->sk_send_head; |
1238 | } | 1263 | } |
@@ -1243,7 +1268,7 @@ static inline bool tcp_skb_is_last(const struct sock *sk, | |||
1243 | return skb_queue_is_last(&sk->sk_write_queue, skb); | 1268 | return skb_queue_is_last(&sk->sk_write_queue, skb); |
1244 | } | 1269 | } |
1245 | 1270 | ||
1246 | static inline void tcp_advance_send_head(struct sock *sk, struct sk_buff *skb) | 1271 | static inline void tcp_advance_send_head(struct sock *sk, const struct sk_buff *skb) |
1247 | { | 1272 | { |
1248 | if (tcp_skb_is_last(sk, skb)) | 1273 | if (tcp_skb_is_last(sk, skb)) |
1249 | sk->sk_send_head = NULL; | 1274 | sk->sk_send_head = NULL; |
@@ -1378,11 +1403,13 @@ enum tcp_seq_states { | |||
1378 | TCP_SEQ_STATE_TIME_WAIT, | 1403 | TCP_SEQ_STATE_TIME_WAIT, |
1379 | }; | 1404 | }; |
1380 | 1405 | ||
1406 | int tcp_seq_open(struct inode *inode, struct file *file); | ||
1407 | |||
1381 | struct tcp_seq_afinfo { | 1408 | struct tcp_seq_afinfo { |
1382 | char *name; | 1409 | char *name; |
1383 | sa_family_t family; | 1410 | sa_family_t family; |
1384 | struct file_operations seq_fops; | 1411 | const struct file_operations *seq_fops; |
1385 | struct seq_operations seq_ops; | 1412 | struct seq_operations seq_ops; |
1386 | }; | 1413 | }; |
1387 | 1414 | ||
1388 | struct tcp_iter_state { | 1415 | struct tcp_iter_state { |
@@ -1423,9 +1450,9 @@ struct tcp_sock_af_ops { | |||
1423 | struct sock *addr_sk); | 1450 | struct sock *addr_sk); |
1424 | int (*calc_md5_hash) (char *location, | 1451 | int (*calc_md5_hash) (char *location, |
1425 | struct tcp_md5sig_key *md5, | 1452 | struct tcp_md5sig_key *md5, |
1426 | struct sock *sk, | 1453 | const struct sock *sk, |
1427 | struct request_sock *req, | 1454 | const struct request_sock *req, |
1428 | struct sk_buff *skb); | 1455 | const struct sk_buff *skb); |
1429 | int (*md5_add) (struct sock *sk, | 1456 | int (*md5_add) (struct sock *sk, |
1430 | struct sock *addr_sk, | 1457 | struct sock *addr_sk, |
1431 | u8 *newkey, | 1458 | u8 *newkey, |
@@ -1442,9 +1469,9 @@ struct tcp_request_sock_ops { | |||
1442 | struct request_sock *req); | 1469 | struct request_sock *req); |
1443 | int (*calc_md5_hash) (char *location, | 1470 | int (*calc_md5_hash) (char *location, |
1444 | struct tcp_md5sig_key *md5, | 1471 | struct tcp_md5sig_key *md5, |
1445 | struct sock *sk, | 1472 | const struct sock *sk, |
1446 | struct request_sock *req, | 1473 | const struct request_sock *req, |
1447 | struct sk_buff *skb); | 1474 | const struct sk_buff *skb); |
1448 | #endif | 1475 | #endif |
1449 | }; | 1476 | }; |
1450 | 1477 | ||