diff options
author | David S. Miller <davem@sunset.davemloft.net> | 2007-03-07 15:12:44 -0500 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2007-04-26 01:24:02 -0400 |
commit | fe067e8ab5e0dc5ca3c54634924c628da92090b4 (patch) | |
tree | 98f5a6ebbb770f16682cfc52caea2da1e7eeb73b /net/ipv4/tcp.c | |
parent | 02ea4923b4997d7e1310c027081f46d584b9d714 (diff) |
[TCP]: Abstract out all write queue operations.
This allows the write queue implementation to be changed,
for example, to one which allows fast interval searching.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp.c')
-rw-r--r-- | net/ipv4/tcp.c | 32 |
1 files changed, 16 insertions, 16 deletions
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 3834b10b5115..689f9330f1b9 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -470,10 +470,8 @@ static inline void skb_entail(struct sock *sk, struct tcp_sock *tp, | |||
470 | tcb->flags = TCPCB_FLAG_ACK; | 470 | tcb->flags = TCPCB_FLAG_ACK; |
471 | tcb->sacked = 0; | 471 | tcb->sacked = 0; |
472 | skb_header_release(skb); | 472 | skb_header_release(skb); |
473 | __skb_queue_tail(&sk->sk_write_queue, skb); | 473 | tcp_add_write_queue_tail(sk, skb); |
474 | sk_charge_skb(sk, skb); | 474 | sk_charge_skb(sk, skb); |
475 | if (!sk->sk_send_head) | ||
476 | sk->sk_send_head = skb; | ||
477 | if (tp->nonagle & TCP_NAGLE_PUSH) | 475 | if (tp->nonagle & TCP_NAGLE_PUSH) |
478 | tp->nonagle &= ~TCP_NAGLE_PUSH; | 476 | tp->nonagle &= ~TCP_NAGLE_PUSH; |
479 | } | 477 | } |
@@ -491,8 +489,8 @@ static inline void tcp_mark_urg(struct tcp_sock *tp, int flags, | |||
491 | static inline void tcp_push(struct sock *sk, struct tcp_sock *tp, int flags, | 489 | static inline void tcp_push(struct sock *sk, struct tcp_sock *tp, int flags, |
492 | int mss_now, int nonagle) | 490 | int mss_now, int nonagle) |
493 | { | 491 | { |
494 | if (sk->sk_send_head) { | 492 | if (tcp_send_head(sk)) { |
495 | struct sk_buff *skb = sk->sk_write_queue.prev; | 493 | struct sk_buff *skb = tcp_write_queue_tail(sk); |
496 | if (!(flags & MSG_MORE) || forced_push(tp)) | 494 | if (!(flags & MSG_MORE) || forced_push(tp)) |
497 | tcp_mark_push(tp, skb); | 495 | tcp_mark_push(tp, skb); |
498 | tcp_mark_urg(tp, flags, skb); | 496 | tcp_mark_urg(tp, flags, skb); |
@@ -526,13 +524,13 @@ static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffse | |||
526 | goto do_error; | 524 | goto do_error; |
527 | 525 | ||
528 | while (psize > 0) { | 526 | while (psize > 0) { |
529 | struct sk_buff *skb = sk->sk_write_queue.prev; | 527 | struct sk_buff *skb = tcp_write_queue_tail(sk); |
530 | struct page *page = pages[poffset / PAGE_SIZE]; | 528 | struct page *page = pages[poffset / PAGE_SIZE]; |
531 | int copy, i, can_coalesce; | 529 | int copy, i, can_coalesce; |
532 | int offset = poffset % PAGE_SIZE; | 530 | int offset = poffset % PAGE_SIZE; |
533 | int size = min_t(size_t, psize, PAGE_SIZE - offset); | 531 | int size = min_t(size_t, psize, PAGE_SIZE - offset); |
534 | 532 | ||
535 | if (!sk->sk_send_head || (copy = size_goal - skb->len) <= 0) { | 533 | if (!tcp_send_head(sk) || (copy = size_goal - skb->len) <= 0) { |
536 | new_segment: | 534 | new_segment: |
537 | if (!sk_stream_memory_free(sk)) | 535 | if (!sk_stream_memory_free(sk)) |
538 | goto wait_for_sndbuf; | 536 | goto wait_for_sndbuf; |
@@ -589,7 +587,7 @@ new_segment: | |||
589 | if (forced_push(tp)) { | 587 | if (forced_push(tp)) { |
590 | tcp_mark_push(tp, skb); | 588 | tcp_mark_push(tp, skb); |
591 | __tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_PUSH); | 589 | __tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_PUSH); |
592 | } else if (skb == sk->sk_send_head) | 590 | } else if (skb == tcp_send_head(sk)) |
593 | tcp_push_one(sk, mss_now); | 591 | tcp_push_one(sk, mss_now); |
594 | continue; | 592 | continue; |
595 | 593 | ||
@@ -704,9 +702,9 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
704 | while (seglen > 0) { | 702 | while (seglen > 0) { |
705 | int copy; | 703 | int copy; |
706 | 704 | ||
707 | skb = sk->sk_write_queue.prev; | 705 | skb = tcp_write_queue_tail(sk); |
708 | 706 | ||
709 | if (!sk->sk_send_head || | 707 | if (!tcp_send_head(sk) || |
710 | (copy = size_goal - skb->len) <= 0) { | 708 | (copy = size_goal - skb->len) <= 0) { |
711 | 709 | ||
712 | new_segment: | 710 | new_segment: |
@@ -833,7 +831,7 @@ new_segment: | |||
833 | if (forced_push(tp)) { | 831 | if (forced_push(tp)) { |
834 | tcp_mark_push(tp, skb); | 832 | tcp_mark_push(tp, skb); |
835 | __tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_PUSH); | 833 | __tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_PUSH); |
836 | } else if (skb == sk->sk_send_head) | 834 | } else if (skb == tcp_send_head(sk)) |
837 | tcp_push_one(sk, mss_now); | 835 | tcp_push_one(sk, mss_now); |
838 | continue; | 836 | continue; |
839 | 837 | ||
@@ -860,9 +858,11 @@ out: | |||
860 | 858 | ||
861 | do_fault: | 859 | do_fault: |
862 | if (!skb->len) { | 860 | if (!skb->len) { |
863 | if (sk->sk_send_head == skb) | 861 | tcp_unlink_write_queue(skb, sk); |
864 | sk->sk_send_head = NULL; | 862 | /* It is the one place in all of TCP, except connection |
865 | __skb_unlink(skb, &sk->sk_write_queue); | 863 | * reset, where we can be unlinking the send_head. |
864 | */ | ||
865 | tcp_check_send_head(sk, skb); | ||
866 | sk_stream_free_skb(sk, skb); | 866 | sk_stream_free_skb(sk, skb); |
867 | } | 867 | } |
868 | 868 | ||
@@ -1732,7 +1732,7 @@ int tcp_disconnect(struct sock *sk, int flags) | |||
1732 | 1732 | ||
1733 | tcp_clear_xmit_timers(sk); | 1733 | tcp_clear_xmit_timers(sk); |
1734 | __skb_queue_purge(&sk->sk_receive_queue); | 1734 | __skb_queue_purge(&sk->sk_receive_queue); |
1735 | sk_stream_writequeue_purge(sk); | 1735 | tcp_write_queue_purge(sk); |
1736 | __skb_queue_purge(&tp->out_of_order_queue); | 1736 | __skb_queue_purge(&tp->out_of_order_queue); |
1737 | #ifdef CONFIG_NET_DMA | 1737 | #ifdef CONFIG_NET_DMA |
1738 | __skb_queue_purge(&sk->sk_async_wait_queue); | 1738 | __skb_queue_purge(&sk->sk_async_wait_queue); |
@@ -1758,7 +1758,7 @@ int tcp_disconnect(struct sock *sk, int flags) | |||
1758 | tcp_set_ca_state(sk, TCP_CA_Open); | 1758 | tcp_set_ca_state(sk, TCP_CA_Open); |
1759 | tcp_clear_retrans(tp); | 1759 | tcp_clear_retrans(tp); |
1760 | inet_csk_delack_init(sk); | 1760 | inet_csk_delack_init(sk); |
1761 | sk->sk_send_head = NULL; | 1761 | tcp_init_send_head(sk); |
1762 | tp->rx_opt.saw_tstamp = 0; | 1762 | tp->rx_opt.saw_tstamp = 0; |
1763 | tcp_sack_reset(&tp->rx_opt); | 1763 | tcp_sack_reset(&tp->rx_opt); |
1764 | __sk_dst_reset(sk); | 1764 | __sk_dst_reset(sk); |