summaryrefslogtreecommitdiffstats
path: root/include/net
diff options
context:
space:
mode:
authorEric Dumazet <eric.dumazet@gmail.com>2011-10-21 05:22:42 -0400
committerDavid S. Miller <davem@davemloft.net>2011-10-21 05:22:42 -0400
commitcf533ea53ebfae41be15b103d78e7ebec30b9969 (patch)
tree51ed3c69f4a15117fefe5cbd291a75010beb0f4b /include/net
parentf04565ddf52e401880f8ba51de0dff8ba51c99fd (diff)
tcp: add const qualifiers where possible
Adding const qualifiers to pointers can ease code review, and spot some bugs. It might allow compiler to optimize code further. For example, is it legal to temporary write a null cksum into tcphdr in tcp_md5_hash_header() ? I am afraid a sniffer could catch the temporary null value... Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/net')
-rw-r--r--include/net/secure_seq.h2
-rw-r--r--include/net/tcp.h43
2 files changed, 24 insertions, 21 deletions
diff --git a/include/net/secure_seq.h b/include/net/secure_seq.h
index d97f6892c019..c2e542b27a5a 100644
--- a/include/net/secure_seq.h
+++ b/include/net/secure_seq.h
@@ -10,7 +10,7 @@ extern u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
10 __be16 dport); 10 __be16 dport);
11extern __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr, 11extern __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
12 __be16 sport, __be16 dport); 12 __be16 sport, __be16 dport);
13extern __u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr, 13extern __u32 secure_tcpv6_sequence_number(const __be32 *saddr, const __be32 *daddr,
14 __be16 sport, __be16 dport); 14 __be16 sport, __be16 dport);
15extern u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr, 15extern u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
16 __be16 sport, __be16 dport); 16 __be16 sport, __be16 dport);
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 0113d306fcb0..3edef0bebdd1 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -327,9 +327,9 @@ extern int tcp_sendpage(struct sock *sk, struct page *page, int offset,
327 size_t size, int flags); 327 size_t size, int flags);
328extern int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg); 328extern int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg);
329extern int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, 329extern int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
330 struct tcphdr *th, unsigned len); 330 const struct tcphdr *th, unsigned int len);
331extern int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, 331extern int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
332 struct tcphdr *th, unsigned len); 332 const struct tcphdr *th, unsigned int len);
333extern void tcp_rcv_space_adjust(struct sock *sk); 333extern void tcp_rcv_space_adjust(struct sock *sk);
334extern void tcp_cleanup_rbuf(struct sock *sk, int copied); 334extern void tcp_cleanup_rbuf(struct sock *sk, int copied);
335extern int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp); 335extern int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
@@ -401,10 +401,10 @@ extern void tcp_set_keepalive(struct sock *sk, int val);
401extern void tcp_syn_ack_timeout(struct sock *sk, struct request_sock *req); 401extern void tcp_syn_ack_timeout(struct sock *sk, struct request_sock *req);
402extern int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, 402extern int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
403 size_t len, int nonblock, int flags, int *addr_len); 403 size_t len, int nonblock, int flags, int *addr_len);
404extern void tcp_parse_options(struct sk_buff *skb, 404extern void tcp_parse_options(const struct sk_buff *skb,
405 struct tcp_options_received *opt_rx, u8 **hvpp, 405 struct tcp_options_received *opt_rx, const u8 **hvpp,
406 int estab); 406 int estab);
407extern u8 *tcp_parse_md5sig_option(struct tcphdr *th); 407extern const u8 *tcp_parse_md5sig_option(const struct tcphdr *th);
408 408
409/* 409/*
410 * TCP v4 functions exported for the inet6 API 410 * TCP v4 functions exported for the inet6 API
@@ -450,7 +450,7 @@ extern bool cookie_check_timestamp(struct tcp_options_received *opt, bool *);
450/* From net/ipv6/syncookies.c */ 450/* From net/ipv6/syncookies.c */
451extern struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb); 451extern struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
452#ifdef CONFIG_SYN_COOKIES 452#ifdef CONFIG_SYN_COOKIES
453extern __u32 cookie_v6_init_sequence(struct sock *sk, struct sk_buff *skb, 453extern __u32 cookie_v6_init_sequence(struct sock *sk, const struct sk_buff *skb,
454 __u16 *mss); 454 __u16 *mss);
455#else 455#else
456static inline __u32 cookie_v6_init_sequence(struct sock *sk, 456static inline __u32 cookie_v6_init_sequence(struct sock *sk,
@@ -522,7 +522,7 @@ static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
522} 522}
523 523
524/* tcp.c */ 524/* tcp.c */
525extern void tcp_get_info(struct sock *, struct tcp_info *); 525extern void tcp_get_info(const struct sock *, struct tcp_info *);
526 526
527/* Read 'sendfile()'-style from a TCP socket */ 527/* Read 'sendfile()'-style from a TCP socket */
528typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *, 528typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *,
@@ -532,8 +532,8 @@ extern int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
532 532
533extern void tcp_initialize_rcv_mss(struct sock *sk); 533extern void tcp_initialize_rcv_mss(struct sock *sk);
534 534
535extern int tcp_mtu_to_mss(struct sock *sk, int pmtu); 535extern int tcp_mtu_to_mss(const struct sock *sk, int pmtu);
536extern int tcp_mss_to_mtu(struct sock *sk, int mss); 536extern int tcp_mss_to_mtu(const struct sock *sk, int mss);
537extern void tcp_mtup_init(struct sock *sk); 537extern void tcp_mtup_init(struct sock *sk);
538extern void tcp_valid_rtt_meas(struct sock *sk, u32 seq_rtt); 538extern void tcp_valid_rtt_meas(struct sock *sk, u32 seq_rtt);
539 539
@@ -574,7 +574,7 @@ static inline void tcp_fast_path_check(struct sock *sk)
574/* Compute the actual rto_min value */ 574/* Compute the actual rto_min value */
575static inline u32 tcp_rto_min(struct sock *sk) 575static inline u32 tcp_rto_min(struct sock *sk)
576{ 576{
577 struct dst_entry *dst = __sk_dst_get(sk); 577 const struct dst_entry *dst = __sk_dst_get(sk);
578 u32 rto_min = TCP_RTO_MIN; 578 u32 rto_min = TCP_RTO_MIN;
579 579
580 if (dst && dst_metric_locked(dst, RTAX_RTO_MIN)) 580 if (dst && dst_metric_locked(dst, RTAX_RTO_MIN))
@@ -820,6 +820,7 @@ static inline bool tcp_in_initial_slowstart(const struct tcp_sock *tp)
820static inline __u32 tcp_current_ssthresh(const struct sock *sk) 820static inline __u32 tcp_current_ssthresh(const struct sock *sk)
821{ 821{
822 const struct tcp_sock *tp = tcp_sk(sk); 822 const struct tcp_sock *tp = tcp_sk(sk);
823
823 if ((1 << inet_csk(sk)->icsk_ca_state) & (TCPF_CA_CWR | TCPF_CA_Recovery)) 824 if ((1 << inet_csk(sk)->icsk_ca_state) & (TCPF_CA_CWR | TCPF_CA_Recovery))
824 return tp->snd_ssthresh; 825 return tp->snd_ssthresh;
825 else 826 else
@@ -832,7 +833,7 @@ static inline __u32 tcp_current_ssthresh(const struct sock *sk)
832#define tcp_verify_left_out(tp) WARN_ON(tcp_left_out(tp) > tp->packets_out) 833#define tcp_verify_left_out(tp) WARN_ON(tcp_left_out(tp) > tp->packets_out)
833 834
834extern void tcp_enter_cwr(struct sock *sk, const int set_ssthresh); 835extern void tcp_enter_cwr(struct sock *sk, const int set_ssthresh);
835extern __u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst); 836extern __u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst);
836 837
837/* Slow start with delack produces 3 packets of burst, so that 838/* Slow start with delack produces 3 packets of burst, so that
838 * it is safe "de facto". This will be the default - same as 839 * it is safe "de facto". This will be the default - same as
@@ -861,7 +862,7 @@ static inline void tcp_minshall_update(struct tcp_sock *tp, unsigned int mss,
861 862
862static inline void tcp_check_probe_timer(struct sock *sk) 863static inline void tcp_check_probe_timer(struct sock *sk)
863{ 864{
864 struct tcp_sock *tp = tcp_sk(sk); 865 const struct tcp_sock *tp = tcp_sk(sk);
865 const struct inet_connection_sock *icsk = inet_csk(sk); 866 const struct inet_connection_sock *icsk = inet_csk(sk);
866 867
867 if (!tp->packets_out && !icsk->icsk_pending) 868 if (!tp->packets_out && !icsk->icsk_pending)
@@ -1209,10 +1210,10 @@ extern struct tcp_md5sig_pool *tcp_get_md5sig_pool(void);
1209extern void tcp_put_md5sig_pool(void); 1210extern void tcp_put_md5sig_pool(void);
1210 1211
1211extern int tcp_md5_hash_header(struct tcp_md5sig_pool *, struct tcphdr *); 1212extern int tcp_md5_hash_header(struct tcp_md5sig_pool *, struct tcphdr *);
1212extern int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, struct sk_buff *, 1213extern int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, const struct sk_buff *,
1213 unsigned header_len); 1214 unsigned header_len);
1214extern int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, 1215extern int tcp_md5_hash_key(struct tcp_md5sig_pool *hp,
1215 struct tcp_md5sig_key *key); 1216 const struct tcp_md5sig_key *key);
1216 1217
1217/* write queue abstraction */ 1218/* write queue abstraction */
1218static inline void tcp_write_queue_purge(struct sock *sk) 1219static inline void tcp_write_queue_purge(struct sock *sk)
@@ -1225,22 +1226,24 @@ static inline void tcp_write_queue_purge(struct sock *sk)
1225 tcp_clear_all_retrans_hints(tcp_sk(sk)); 1226 tcp_clear_all_retrans_hints(tcp_sk(sk));
1226} 1227}
1227 1228
1228static inline struct sk_buff *tcp_write_queue_head(struct sock *sk) 1229static inline struct sk_buff *tcp_write_queue_head(const struct sock *sk)
1229{ 1230{
1230 return skb_peek(&sk->sk_write_queue); 1231 return skb_peek(&sk->sk_write_queue);
1231} 1232}
1232 1233
1233static inline struct sk_buff *tcp_write_queue_tail(struct sock *sk) 1234static inline struct sk_buff *tcp_write_queue_tail(const struct sock *sk)
1234{ 1235{
1235 return skb_peek_tail(&sk->sk_write_queue); 1236 return skb_peek_tail(&sk->sk_write_queue);
1236} 1237}
1237 1238
1238static inline struct sk_buff *tcp_write_queue_next(struct sock *sk, struct sk_buff *skb) 1239static inline struct sk_buff *tcp_write_queue_next(const struct sock *sk,
1240 const struct sk_buff *skb)
1239{ 1241{
1240 return skb_queue_next(&sk->sk_write_queue, skb); 1242 return skb_queue_next(&sk->sk_write_queue, skb);
1241} 1243}
1242 1244
1243static inline struct sk_buff *tcp_write_queue_prev(struct sock *sk, struct sk_buff *skb) 1245static inline struct sk_buff *tcp_write_queue_prev(const struct sock *sk,
1246 const struct sk_buff *skb)
1244{ 1247{
1245 return skb_queue_prev(&sk->sk_write_queue, skb); 1248 return skb_queue_prev(&sk->sk_write_queue, skb);
1246} 1249}
@@ -1254,7 +1257,7 @@ static inline struct sk_buff *tcp_write_queue_prev(struct sock *sk, struct sk_bu
1254#define tcp_for_write_queue_from_safe(skb, tmp, sk) \ 1257#define tcp_for_write_queue_from_safe(skb, tmp, sk) \
1255 skb_queue_walk_from_safe(&(sk)->sk_write_queue, skb, tmp) 1258 skb_queue_walk_from_safe(&(sk)->sk_write_queue, skb, tmp)
1256 1259
1257static inline struct sk_buff *tcp_send_head(struct sock *sk) 1260static inline struct sk_buff *tcp_send_head(const struct sock *sk)
1258{ 1261{
1259 return sk->sk_send_head; 1262 return sk->sk_send_head;
1260} 1263}
@@ -1265,7 +1268,7 @@ static inline bool tcp_skb_is_last(const struct sock *sk,
1265 return skb_queue_is_last(&sk->sk_write_queue, skb); 1268 return skb_queue_is_last(&sk->sk_write_queue, skb);
1266} 1269}
1267 1270
1268static inline void tcp_advance_send_head(struct sock *sk, struct sk_buff *skb) 1271static inline void tcp_advance_send_head(struct sock *sk, const struct sk_buff *skb)
1269{ 1272{
1270 if (tcp_skb_is_last(sk, skb)) 1273 if (tcp_skb_is_last(sk, skb))
1271 sk->sk_send_head = NULL; 1274 sk->sk_send_head = NULL;