aboutsummaryrefslogtreecommitdiffstats
path: root/net/sctp/ulpqueue.c
diff options
context:
space:
mode:
authorVlad Yasevich <vladislav.yasevich@hp.com>2007-04-20 15:23:15 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2007-04-26 01:27:59 -0400
commitb6e1331f3ce25a56edb956054eaf8011654686cb (patch)
tree63995f7e1de7d717df69cb4d138bce3fa4fe77ba /net/sctp/ulpqueue.c
parentc95e939508e64863a1c5c73a9e1a908784e06820 (diff)
[SCTP]: Implement SCTP_FRAGMENT_INTERLEAVE socket option
This option was introduced in draft-ietf-tsvwg-sctpsocket-13. It prevents head-of-line blocking in the case of one-to-many endpoint. Applications enabling this option really must enable SCTP_SNDRCV event so that they would know where the data belongs. Based on an earlier patch by Ivan Skytte Jørgensen. Additionally, this functionality now permits multiple associations on the same endpoint to enter Partial Delivery. Applications should be extra careful, when using this functionality, to track EOR indicators. Signed-off-by: Vlad Yasevich <vladislav.yasevich@hp.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sctp/ulpqueue.c')
-rw-r--r--net/sctp/ulpqueue.c103
1 files changed, 74 insertions, 29 deletions
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
index b29e3e4b72c9..ac80c34f6c2c 100644
--- a/net/sctp/ulpqueue.c
+++ b/net/sctp/ulpqueue.c
@@ -138,18 +138,42 @@ int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
138/* Clear the partial delivery mode for this socket. Note: This 138/* Clear the partial delivery mode for this socket. Note: This
139 * assumes that no association is currently in partial delivery mode. 139 * assumes that no association is currently in partial delivery mode.
140 */ 140 */
141int sctp_clear_pd(struct sock *sk) 141int sctp_clear_pd(struct sock *sk, struct sctp_association *asoc)
142{ 142{
143 struct sctp_sock *sp = sctp_sk(sk); 143 struct sctp_sock *sp = sctp_sk(sk);
144 144
145 sp->pd_mode = 0; 145 if (atomic_dec_and_test(&sp->pd_mode)) {
146 if (!skb_queue_empty(&sp->pd_lobby)) { 146 /* This means there are no other associations in PD, so
147 struct list_head *list; 147 * we can go ahead and clear out the lobby in one shot
148 sctp_skb_list_tail(&sp->pd_lobby, &sk->sk_receive_queue); 148 */
149 list = (struct list_head *)&sctp_sk(sk)->pd_lobby; 149 if (!skb_queue_empty(&sp->pd_lobby)) {
150 INIT_LIST_HEAD(list); 150 struct list_head *list;
151 return 1; 151 sctp_skb_list_tail(&sp->pd_lobby, &sk->sk_receive_queue);
152 list = (struct list_head *)&sctp_sk(sk)->pd_lobby;
153 INIT_LIST_HEAD(list);
154 return 1;
155 }
156 } else {
157 /* There are other associations in PD, so we only need to
158 * pull stuff out of the lobby that belongs to the
159 * associations that is exiting PD (all of its notifications
160 * are posted here).
161 */
162 if (!skb_queue_empty(&sp->pd_lobby) && asoc) {
163 struct sk_buff *skb, *tmp;
164 struct sctp_ulpevent *event;
165
166 sctp_skb_for_each(skb, &sp->pd_lobby, tmp) {
167 event = sctp_skb2event(skb);
168 if (event->asoc == asoc) {
169 __skb_unlink(skb, &sp->pd_lobby);
170 __skb_queue_tail(&sk->sk_receive_queue,
171 skb);
172 }
173 }
174 }
152 } 175 }
176
153 return 0; 177 return 0;
154} 178}
155 179
@@ -157,7 +181,7 @@ int sctp_clear_pd(struct sock *sk)
157static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq) 181static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq)
158{ 182{
159 ulpq->pd_mode = 0; 183 ulpq->pd_mode = 0;
160 return sctp_clear_pd(ulpq->asoc->base.sk); 184 return sctp_clear_pd(ulpq->asoc->base.sk, ulpq->asoc);
161} 185}
162 186
163/* If the SKB of 'event' is on a list, it is the first such member 187/* If the SKB of 'event' is on a list, it is the first such member
@@ -187,25 +211,35 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
187 * the association the cause of the partial delivery. 211 * the association the cause of the partial delivery.
188 */ 212 */
189 213
190 if (!sctp_sk(sk)->pd_mode) { 214 if (atomic_read(&sctp_sk(sk)->pd_mode) == 0) {
191 queue = &sk->sk_receive_queue; 215 queue = &sk->sk_receive_queue;
192 } else if (ulpq->pd_mode) { 216 } else {
193 /* If the association is in partial delivery, we 217 if (ulpq->pd_mode) {
194 * need to finish delivering the partially processed 218 /* If the association is in partial delivery, we
195 * packet before passing any other data. This is 219 * need to finish delivering the partially processed
196 * because we don't truly support stream interleaving. 220 * packet before passing any other data. This is
197 */ 221 * because we don't truly support stream interleaving.
198 if ((event->msg_flags & MSG_NOTIFICATION) || 222 */
199 (SCTP_DATA_NOT_FRAG == 223 if ((event->msg_flags & MSG_NOTIFICATION) ||
200 (event->msg_flags & SCTP_DATA_FRAG_MASK))) 224 (SCTP_DATA_NOT_FRAG ==
201 queue = &sctp_sk(sk)->pd_lobby; 225 (event->msg_flags & SCTP_DATA_FRAG_MASK)))
202 else { 226 queue = &sctp_sk(sk)->pd_lobby;
203 clear_pd = event->msg_flags & MSG_EOR; 227 else {
204 queue = &sk->sk_receive_queue; 228 clear_pd = event->msg_flags & MSG_EOR;
229 queue = &sk->sk_receive_queue;
230 }
231 } else {
232 /*
233 * If fragment interleave is enabled, we
234 * can queue this to the recieve queue instead
235 * of the lobby.
236 */
237 if (sctp_sk(sk)->frag_interleave)
238 queue = &sk->sk_receive_queue;
239 else
240 queue = &sctp_sk(sk)->pd_lobby;
205 } 241 }
206 } else 242 }
207 queue = &sctp_sk(sk)->pd_lobby;
208
209 243
210 /* If we are harvesting multiple skbs they will be 244 /* If we are harvesting multiple skbs they will be
211 * collected on a list. 245 * collected on a list.
@@ -826,18 +860,29 @@ void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq,
826{ 860{
827 struct sctp_ulpevent *event; 861 struct sctp_ulpevent *event;
828 struct sctp_association *asoc; 862 struct sctp_association *asoc;
863 struct sctp_sock *sp;
829 864
830 asoc = ulpq->asoc; 865 asoc = ulpq->asoc;
866 sp = sctp_sk(asoc->base.sk);
831 867
832 /* Are we already in partial delivery mode? */ 868 /* If the association is already in Partial Delivery mode
833 if (!sctp_sk(asoc->base.sk)->pd_mode) { 869 * we have noting to do.
870 */
871 if (ulpq->pd_mode)
872 return;
834 873
874 /* If the user enabled fragment interleave socket option,
875 * multiple associations can enter partial delivery.
876 * Otherwise, we can only enter partial delivery if the
877 * socket is not in partial deliver mode.
878 */
879 if (sp->frag_interleave || atomic_read(&sp->pd_mode) == 0) {
835 /* Is partial delivery possible? */ 880 /* Is partial delivery possible? */
836 event = sctp_ulpq_retrieve_first(ulpq); 881 event = sctp_ulpq_retrieve_first(ulpq);
837 /* Send event to the ULP. */ 882 /* Send event to the ULP. */
838 if (event) { 883 if (event) {
839 sctp_ulpq_tail_event(ulpq, event); 884 sctp_ulpq_tail_event(ulpq, event);
840 sctp_sk(asoc->base.sk)->pd_mode = 1; 885 atomic_inc(&sp->pd_mode);
841 ulpq->pd_mode = 1; 886 ulpq->pd_mode = 1;
842 return; 887 return;
843 } 888 }