diff options
| author | Eric Dumazet <edumazet@google.com> | 2013-12-06 01:31:30 -0500 |
|---|---|---|
| committer | David S. Miller <davem@davemloft.net> | 2013-12-06 12:51:40 -0500 |
| commit | 7b7fc97aa390dfe4770e1d19e215fa289d94b477 (patch) | |
| tree | 780f432a513cc6e8c980b061b38e0c3c01288a99 | |
| parent | 84b9cd633bc35a028b313178829ee313525f6892 (diff) | |
tcp: optimize some skb_shinfo(skb) uses
Compiler doesn't know skb_shinfo(skb) pointer is usually constant.
By using a temporary variable, we help generating smaller code.
For example, tcp_init_nondata_skb() is inlined after this patch.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
| -rw-r--r-- | net/ipv4/tcp_output.c | 36 |
1 files changed, 21 insertions, 15 deletions
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 7820f3a7dd70..993da005e087 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
| @@ -363,15 +363,17 @@ static inline void TCP_ECN_send(struct sock *sk, struct sk_buff *skb, | |||
| 363 | */ | 363 | */ |
| 364 | static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags) | 364 | static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags) |
| 365 | { | 365 | { |
| 366 | struct skb_shared_info *shinfo = skb_shinfo(skb); | ||
| 367 | |||
| 366 | skb->ip_summed = CHECKSUM_PARTIAL; | 368 | skb->ip_summed = CHECKSUM_PARTIAL; |
| 367 | skb->csum = 0; | 369 | skb->csum = 0; |
| 368 | 370 | ||
| 369 | TCP_SKB_CB(skb)->tcp_flags = flags; | 371 | TCP_SKB_CB(skb)->tcp_flags = flags; |
| 370 | TCP_SKB_CB(skb)->sacked = 0; | 372 | TCP_SKB_CB(skb)->sacked = 0; |
| 371 | 373 | ||
| 372 | skb_shinfo(skb)->gso_segs = 1; | 374 | shinfo->gso_segs = 1; |
| 373 | skb_shinfo(skb)->gso_size = 0; | 375 | shinfo->gso_size = 0; |
| 374 | skb_shinfo(skb)->gso_type = 0; | 376 | shinfo->gso_type = 0; |
| 375 | 377 | ||
| 376 | TCP_SKB_CB(skb)->seq = seq; | 378 | TCP_SKB_CB(skb)->seq = seq; |
| 377 | if (flags & (TCPHDR_SYN | TCPHDR_FIN)) | 379 | if (flags & (TCPHDR_SYN | TCPHDR_FIN)) |
| @@ -986,6 +988,8 @@ static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb) | |||
| 986 | static void tcp_set_skb_tso_segs(const struct sock *sk, struct sk_buff *skb, | 988 | static void tcp_set_skb_tso_segs(const struct sock *sk, struct sk_buff *skb, |
| 987 | unsigned int mss_now) | 989 | unsigned int mss_now) |
| 988 | { | 990 | { |
| 991 | struct skb_shared_info *shinfo = skb_shinfo(skb); | ||
| 992 | |||
| 989 | /* Make sure we own this skb before messing gso_size/gso_segs */ | 993 | /* Make sure we own this skb before messing gso_size/gso_segs */ |
| 990 | WARN_ON_ONCE(skb_cloned(skb)); | 994 | WARN_ON_ONCE(skb_cloned(skb)); |
| 991 | 995 | ||
| @@ -993,13 +997,13 @@ static void tcp_set_skb_tso_segs(const struct sock *sk, struct sk_buff *skb, | |||
| 993 | /* Avoid the costly divide in the normal | 997 | /* Avoid the costly divide in the normal |
| 994 | * non-TSO case. | 998 | * non-TSO case. |
| 995 | */ | 999 | */ |
| 996 | skb_shinfo(skb)->gso_segs = 1; | 1000 | shinfo->gso_segs = 1; |
| 997 | skb_shinfo(skb)->gso_size = 0; | 1001 | shinfo->gso_size = 0; |
| 998 | skb_shinfo(skb)->gso_type = 0; | 1002 | shinfo->gso_type = 0; |
| 999 | } else { | 1003 | } else { |
| 1000 | skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss_now); | 1004 | shinfo->gso_segs = DIV_ROUND_UP(skb->len, mss_now); |
| 1001 | skb_shinfo(skb)->gso_size = mss_now; | 1005 | shinfo->gso_size = mss_now; |
| 1002 | skb_shinfo(skb)->gso_type = sk->sk_gso_type; | 1006 | shinfo->gso_type = sk->sk_gso_type; |
| 1003 | } | 1007 | } |
| 1004 | } | 1008 | } |
| 1005 | 1009 | ||
| @@ -1146,6 +1150,7 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, | |||
| 1146 | */ | 1150 | */ |
| 1147 | static void __pskb_trim_head(struct sk_buff *skb, int len) | 1151 | static void __pskb_trim_head(struct sk_buff *skb, int len) |
| 1148 | { | 1152 | { |
| 1153 | struct skb_shared_info *shinfo; | ||
| 1149 | int i, k, eat; | 1154 | int i, k, eat; |
| 1150 | 1155 | ||
| 1151 | eat = min_t(int, len, skb_headlen(skb)); | 1156 | eat = min_t(int, len, skb_headlen(skb)); |
| @@ -1157,23 +1162,24 @@ static void __pskb_trim_head(struct sk_buff *skb, int len) | |||
| 1157 | } | 1162 | } |
| 1158 | eat = len; | 1163 | eat = len; |
| 1159 | k = 0; | 1164 | k = 0; |
| 1160 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | 1165 | shinfo = skb_shinfo(skb); |
| 1161 | int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); | 1166 | for (i = 0; i < shinfo->nr_frags; i++) { |
| 1167 | int size = skb_frag_size(&shinfo->frags[i]); | ||
| 1162 | 1168 | ||
| 1163 | if (size <= eat) { | 1169 | if (size <= eat) { |
| 1164 | skb_frag_unref(skb, i); | 1170 | skb_frag_unref(skb, i); |
| 1165 | eat -= size; | 1171 | eat -= size; |
| 1166 | } else { | 1172 | } else { |
| 1167 | skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i]; | 1173 | shinfo->frags[k] = shinfo->frags[i]; |
| 1168 | if (eat) { | 1174 | if (eat) { |
| 1169 | skb_shinfo(skb)->frags[k].page_offset += eat; | 1175 | shinfo->frags[k].page_offset += eat; |
| 1170 | skb_frag_size_sub(&skb_shinfo(skb)->frags[k], eat); | 1176 | skb_frag_size_sub(&shinfo->frags[k], eat); |
| 1171 | eat = 0; | 1177 | eat = 0; |
| 1172 | } | 1178 | } |
| 1173 | k++; | 1179 | k++; |
| 1174 | } | 1180 | } |
| 1175 | } | 1181 | } |
| 1176 | skb_shinfo(skb)->nr_frags = k; | 1182 | shinfo->nr_frags = k; |
| 1177 | 1183 | ||
| 1178 | skb_reset_tail_pointer(skb); | 1184 | skb_reset_tail_pointer(skb); |
| 1179 | skb->data_len -= len; | 1185 | skb->data_len -= len; |
