aboutsummaryrefslogtreecommitdiffstats
path: root/net/dccp/ackvec.c
diff options
context:
space:
mode:
authorGerrit Renker <gerrit@erg.abdn.ac.uk>2010-11-10 15:21:35 -0500
committerGerrit Renker <gerrit@erg.abdn.ac.uk>2010-11-10 15:21:35 -0500
commitb3d14bff12a38ad13a174eb0cc83d2ac7169eee4 (patch)
tree2248e4d994ce857113c34ce5f754c554e17d8d9e /net/dccp/ackvec.c
parent7d870936602533836bba821bd5c679c62c52a95f (diff)
dccp ccid-2: Implementation of circular Ack Vector buffer with overflow handling
This completes the implementation of a circular buffer for Ack Vectors, by extending the current (linear array-based) implementation. The changes are: (a) An `overflow' flag to deal with the case of overflow. As before, dynamic growth of the buffer will not be supported; but code will be added to deal robustly with overflowing Ack Vector buffers. (b) A `tail_seqno' field. When naively implementing the algorithm of Appendix A in RFC 4340, problems arise whenever subsequent Ack Vector records overlap, which can bring the entire run length calculation completely out of synch. (This is documented on http://www.erg.abdn.ac.uk/users/gerrit/dccp/notes/\ ack_vectors/tracking_tail_ackno/ .) (c) The buffer length is now computed dynamically (i.e. current fill level), as the span between head to tail. As a result, dccp_ackvec_pending() is now simpler - the #ifdef is no longer necessary since buf_empty is always true when IP_DCCP_ACKVEC is not configured. Signed-off-by: Gerrit Renker <gerrit@erg.abdn.ac.uk>
Diffstat (limited to 'net/dccp/ackvec.c')
-rw-r--r--net/dccp/ackvec.c31
1 files changed, 30 insertions, 1 deletions
diff --git a/net/dccp/ackvec.c b/net/dccp/ackvec.c
index af976fca407a..abaf241c7353 100644
--- a/net/dccp/ackvec.c
+++ b/net/dccp/ackvec.c
@@ -29,7 +29,7 @@ struct dccp_ackvec *dccp_ackvec_alloc(const gfp_t priority)
29 struct dccp_ackvec *av = kmem_cache_zalloc(dccp_ackvec_slab, priority); 29 struct dccp_ackvec *av = kmem_cache_zalloc(dccp_ackvec_slab, priority);
30 30
31 if (av != NULL) { 31 if (av != NULL) {
32 av->av_buf_head = DCCPAV_MAX_ACKVEC_LEN - 1; 32 av->av_buf_head = av->av_buf_tail = DCCPAV_MAX_ACKVEC_LEN - 1;
33 INIT_LIST_HEAD(&av->av_records); 33 INIT_LIST_HEAD(&av->av_records);
34 } 34 }
35 return av; 35 return av;
@@ -72,6 +72,14 @@ int dccp_ackvec_update_records(struct dccp_ackvec *av, u64 seqno, u8 nonce_sum)
72 avr->avr_ack_nonce = nonce_sum; 72 avr->avr_ack_nonce = nonce_sum;
73 avr->avr_ack_runlen = dccp_ackvec_runlen(av->av_buf + av->av_buf_head); 73 avr->avr_ack_runlen = dccp_ackvec_runlen(av->av_buf + av->av_buf_head);
74 /* 74 /*
75 * When the buffer overflows, we keep no more than one record. This is
76 * the simplest way of disambiguating sender-Acks dating from before the
77 * overflow from sender-Acks which refer to after the overflow; a simple
78 * solution is preferable here since we are handling an exception.
79 */
80 if (av->av_overflow)
81 dccp_ackvec_purge_records(av);
82 /*
75 * Since GSS is incremented for each packet, the list is automatically 83 * Since GSS is incremented for each packet, the list is automatically
76 * arranged in descending order of @ack_seqno. 84 * arranged in descending order of @ack_seqno.
77 */ 85 */
@@ -85,6 +93,27 @@ int dccp_ackvec_update_records(struct dccp_ackvec *av, u64 seqno, u8 nonce_sum)
85} 93}
86 94
87/* 95/*
96 * Buffer index and length computation using modulo-buffersize arithmetic.
97 * Note that, as pointers move from right to left, head is `before' tail.
98 */
99static inline u16 __ackvec_idx_add(const u16 a, const u16 b)
100{
101 return (a + b) % DCCPAV_MAX_ACKVEC_LEN;
102}
103
104static inline u16 __ackvec_idx_sub(const u16 a, const u16 b)
105{
106 return __ackvec_idx_add(a, DCCPAV_MAX_ACKVEC_LEN - b);
107}
108
109u16 dccp_ackvec_buflen(const struct dccp_ackvec *av)
110{
111 if (unlikely(av->av_overflow))
112 return DCCPAV_MAX_ACKVEC_LEN;
113 return __ackvec_idx_sub(av->av_buf_tail, av->av_buf_head);
114}
115
116/*
88 * If several packets are missing, the HC-Receiver may prefer to enter multiple 117 * If several packets are missing, the HC-Receiver may prefer to enter multiple
89 * bytes with run length 0, rather than a single byte with a larger run length; 118 * bytes with run length 0, rather than a single byte with a larger run length;
90 * this simplifies table updates if one of the missing packets arrives. 119 * this simplifies table updates if one of the missing packets arrives.