diff options
author | David S. Miller <davem@sunset.davemloft.net> | 2007-03-07 15:12:44 -0500 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2007-04-26 01:24:02 -0400 |
commit | fe067e8ab5e0dc5ca3c54634924c628da92090b4 (patch) | |
tree | 98f5a6ebbb770f16682cfc52caea2da1e7eeb73b /include/net/tcp.h | |
parent | 02ea4923b4997d7e1310c027081f46d584b9d714 (diff) |
[TCP]: Abstract out all write queue operations.
This allows the write queue implementation to be changed,
for example, to one which allows fast interval searching.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/net/tcp.h')
-rw-r--r-- | include/net/tcp.h | 114 |
1 files changed, 114 insertions, 0 deletions
diff --git a/include/net/tcp.h b/include/net/tcp.h index 181c0600af1c..6dacc352dcf1 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h | |||
@@ -1162,6 +1162,120 @@ static inline void tcp_put_md5sig_pool(void) | |||
1162 | put_cpu(); | 1162 | put_cpu(); |
1163 | } | 1163 | } |
1164 | 1164 | ||
1165 | /* write queue abstraction */ | ||
1166 | static inline void tcp_write_queue_purge(struct sock *sk) | ||
1167 | { | ||
1168 | struct sk_buff *skb; | ||
1169 | |||
1170 | while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) | ||
1171 | sk_stream_free_skb(sk, skb); | ||
1172 | sk_stream_mem_reclaim(sk); | ||
1173 | } | ||
1174 | |||
1175 | static inline struct sk_buff *tcp_write_queue_head(struct sock *sk) | ||
1176 | { | ||
1177 | struct sk_buff *skb = sk->sk_write_queue.next; | ||
1178 | if (skb == (struct sk_buff *) &sk->sk_write_queue) | ||
1179 | return NULL; | ||
1180 | return skb; | ||
1181 | } | ||
1182 | |||
1183 | static inline struct sk_buff *tcp_write_queue_tail(struct sock *sk) | ||
1184 | { | ||
1185 | struct sk_buff *skb = sk->sk_write_queue.prev; | ||
1186 | if (skb == (struct sk_buff *) &sk->sk_write_queue) | ||
1187 | return NULL; | ||
1188 | return skb; | ||
1189 | } | ||
1190 | |||
1191 | static inline struct sk_buff *tcp_write_queue_next(struct sock *sk, struct sk_buff *skb) | ||
1192 | { | ||
1193 | return skb->next; | ||
1194 | } | ||
1195 | |||
1196 | #define tcp_for_write_queue(skb, sk) \ | ||
1197 | for (skb = (sk)->sk_write_queue.next; \ | ||
1198 | (skb != (struct sk_buff *)&(sk)->sk_write_queue); \ | ||
1199 | skb = skb->next) | ||
1200 | |||
1201 | #define tcp_for_write_queue_from(skb, sk) \ | ||
1202 | for (; (skb != (struct sk_buff *)&(sk)->sk_write_queue);\ | ||
1203 | skb = skb->next) | ||
1204 | |||
1205 | static inline struct sk_buff *tcp_send_head(struct sock *sk) | ||
1206 | { | ||
1207 | return sk->sk_send_head; | ||
1208 | } | ||
1209 | |||
1210 | static inline void tcp_advance_send_head(struct sock *sk, struct sk_buff *skb) | ||
1211 | { | ||
1212 | sk->sk_send_head = skb->next; | ||
1213 | if (sk->sk_send_head == (struct sk_buff *)&sk->sk_write_queue) | ||
1214 | sk->sk_send_head = NULL; | ||
1215 | } | ||
1216 | |||
1217 | static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unlinked) | ||
1218 | { | ||
1219 | if (sk->sk_send_head == skb_unlinked) | ||
1220 | sk->sk_send_head = NULL; | ||
1221 | } | ||
1222 | |||
1223 | static inline void tcp_init_send_head(struct sock *sk) | ||
1224 | { | ||
1225 | sk->sk_send_head = NULL; | ||
1226 | } | ||
1227 | |||
1228 | static inline void __tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb) | ||
1229 | { | ||
1230 | __skb_queue_tail(&sk->sk_write_queue, skb); | ||
1231 | } | ||
1232 | |||
1233 | static inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb) | ||
1234 | { | ||
1235 | __tcp_add_write_queue_tail(sk, skb); | ||
1236 | |||
1237 | /* Queue it, remembering where we must start sending. */ | ||
1238 | if (sk->sk_send_head == NULL) | ||
1239 | sk->sk_send_head = skb; | ||
1240 | } | ||
1241 | |||
1242 | static inline void __tcp_add_write_queue_head(struct sock *sk, struct sk_buff *skb) | ||
1243 | { | ||
1244 | __skb_queue_head(&sk->sk_write_queue, skb); | ||
1245 | } | ||
1246 | |||
1247 | /* Insert buff after skb on the write queue of sk. */ | ||
1248 | static inline void tcp_insert_write_queue_after(struct sk_buff *skb, | ||
1249 | struct sk_buff *buff, | ||
1250 | struct sock *sk) | ||
1251 | { | ||
1252 | __skb_append(skb, buff, &sk->sk_write_queue); | ||
1253 | } | ||
1254 | |||
1255 | /* Insert skb between prev and next on the write queue of sk. */ | ||
1256 | static inline void tcp_insert_write_queue_before(struct sk_buff *new, | ||
1257 | struct sk_buff *skb, | ||
1258 | struct sock *sk) | ||
1259 | { | ||
1260 | __skb_insert(new, skb->prev, skb, &sk->sk_write_queue); | ||
1261 | } | ||
1262 | |||
1263 | static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk) | ||
1264 | { | ||
1265 | __skb_unlink(skb, &sk->sk_write_queue); | ||
1266 | } | ||
1267 | |||
1268 | static inline int tcp_skb_is_last(const struct sock *sk, | ||
1269 | const struct sk_buff *skb) | ||
1270 | { | ||
1271 | return skb->next == (struct sk_buff *)&sk->sk_write_queue; | ||
1272 | } | ||
1273 | |||
1274 | static inline int tcp_write_queue_empty(struct sock *sk) | ||
1275 | { | ||
1276 | return skb_queue_empty(&sk->sk_write_queue); | ||
1277 | } | ||
1278 | |||
1165 | /* /proc */ | 1279 | /* /proc */ |
1166 | enum tcp_seq_states { | 1280 | enum tcp_seq_states { |
1167 | TCP_SEQ_STATE_LISTENING, | 1281 | TCP_SEQ_STATE_LISTENING, |