aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorStephen Hemminger <stephen.hemminger@vyatta.com>2008-01-20 20:20:56 -0500
committerDavid S. Miller <davem@davemloft.net>2008-01-28 18:08:16 -0500
commit6f9e98f7a96fdf4d621b8241d5a8a55c692de373 (patch)
treec19eb84f7af9c0be589b7349020cf9faa519fdf8
parentd46f8dd87d9e7d5356891cbe97b8472e74db1413 (diff)
[PKT_SCHED] SFQ: whitespace cleanup
Add whitespace around operators, and add a few blank lines to improve readability. Signed-off-by: Stephen Hemminger <stephen.hemminger@vyatta.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--net/sched/sch_sfq.c42
1 files changed, 25 insertions, 17 deletions
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 4179758450f5..25afe0f1d83a 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -122,7 +122,7 @@ static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
122 { 122 {
123 const struct iphdr *iph = ip_hdr(skb); 123 const struct iphdr *iph = ip_hdr(skb);
124 h = iph->daddr; 124 h = iph->daddr;
125 h2 = iph->saddr^iph->protocol; 125 h2 = iph->saddr ^ iph->protocol;
126 if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) && 126 if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) &&
127 (iph->protocol == IPPROTO_TCP || 127 (iph->protocol == IPPROTO_TCP ||
128 iph->protocol == IPPROTO_UDP || 128 iph->protocol == IPPROTO_UDP ||
@@ -137,7 +137,7 @@ static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
137 { 137 {
138 struct ipv6hdr *iph = ipv6_hdr(skb); 138 struct ipv6hdr *iph = ipv6_hdr(skb);
139 h = iph->daddr.s6_addr32[3]; 139 h = iph->daddr.s6_addr32[3];
140 h2 = iph->saddr.s6_addr32[3]^iph->nexthdr; 140 h2 = iph->saddr.s6_addr32[3] ^ iph->nexthdr;
141 if (iph->nexthdr == IPPROTO_TCP || 141 if (iph->nexthdr == IPPROTO_TCP ||
142 iph->nexthdr == IPPROTO_UDP || 142 iph->nexthdr == IPPROTO_UDP ||
143 iph->nexthdr == IPPROTO_UDPLITE || 143 iph->nexthdr == IPPROTO_UDPLITE ||
@@ -148,9 +148,10 @@ static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
148 break; 148 break;
149 } 149 }
150 default: 150 default:
151 h = (u32)(unsigned long)skb->dst^skb->protocol; 151 h = (unsigned long)skb->dst ^ skb->protocol;
152 h2 = (u32)(unsigned long)skb->sk; 152 h2 = (unsigned long)skb->sk;
153 } 153 }
154
154 return sfq_fold_hash(q, h, h2); 155 return sfq_fold_hash(q, h, h2);
155} 156}
156 157
@@ -208,7 +209,7 @@ static unsigned int sfq_drop(struct Qdisc *sch)
208 drop a packet from it */ 209 drop a packet from it */
209 210
210 if (d > 1) { 211 if (d > 1) {
211 sfq_index x = q->dep[d+SFQ_DEPTH].next; 212 sfq_index x = q->dep[d + SFQ_DEPTH].next;
212 skb = q->qs[x].prev; 213 skb = q->qs[x].prev;
213 len = skb->len; 214 len = skb->len;
214 __skb_unlink(skb, &q->qs[x]); 215 __skb_unlink(skb, &q->qs[x]);
@@ -241,7 +242,7 @@ static unsigned int sfq_drop(struct Qdisc *sch)
241} 242}
242 243
243static int 244static int
244sfq_enqueue(struct sk_buff *skb, struct Qdisc* sch) 245sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
245{ 246{
246 struct sfq_sched_data *q = qdisc_priv(sch); 247 struct sfq_sched_data *q = qdisc_priv(sch);
247 unsigned hash = sfq_hash(q, skb); 248 unsigned hash = sfq_hash(q, skb);
@@ -252,6 +253,7 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc* sch)
252 q->ht[hash] = x = q->dep[SFQ_DEPTH].next; 253 q->ht[hash] = x = q->dep[SFQ_DEPTH].next;
253 q->hash[x] = hash; 254 q->hash[x] = hash;
254 } 255 }
256
255 /* If selected queue has length q->limit, this means that 257 /* If selected queue has length q->limit, this means that
256 * all another queues are empty and that we do simple tail drop, 258 * all another queues are empty and that we do simple tail drop,
257 * i.e. drop _this_ packet. 259 * i.e. drop _this_ packet.
@@ -284,7 +286,7 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc* sch)
284} 286}
285 287
286static int 288static int
287sfq_requeue(struct sk_buff *skb, struct Qdisc* sch) 289sfq_requeue(struct sk_buff *skb, struct Qdisc *sch)
288{ 290{
289 struct sfq_sched_data *q = qdisc_priv(sch); 291 struct sfq_sched_data *q = qdisc_priv(sch);
290 unsigned hash = sfq_hash(q, skb); 292 unsigned hash = sfq_hash(q, skb);
@@ -295,6 +297,7 @@ sfq_requeue(struct sk_buff *skb, struct Qdisc* sch)
295 q->ht[hash] = x = q->dep[SFQ_DEPTH].next; 297 q->ht[hash] = x = q->dep[SFQ_DEPTH].next;
296 q->hash[x] = hash; 298 q->hash[x] = hash;
297 } 299 }
300
298 sch->qstats.backlog += skb->len; 301 sch->qstats.backlog += skb->len;
299 __skb_queue_head(&q->qs[x], skb); 302 __skb_queue_head(&q->qs[x], skb);
300 /* If selected queue has length q->limit+1, this means that 303 /* If selected queue has length q->limit+1, this means that
@@ -310,6 +313,7 @@ sfq_requeue(struct sk_buff *skb, struct Qdisc* sch)
310 kfree_skb(skb); 313 kfree_skb(skb);
311 return NET_XMIT_CN; 314 return NET_XMIT_CN;
312 } 315 }
316
313 sfq_inc(q, x); 317 sfq_inc(q, x);
314 if (q->qs[x].qlen == 1) { /* The flow is new */ 318 if (q->qs[x].qlen == 1) { /* The flow is new */
315 if (q->tail == SFQ_DEPTH) { /* It is the first flow */ 319 if (q->tail == SFQ_DEPTH) { /* It is the first flow */
@@ -322,6 +326,7 @@ sfq_requeue(struct sk_buff *skb, struct Qdisc* sch)
322 q->tail = x; 326 q->tail = x;
323 } 327 }
324 } 328 }
329
325 if (++sch->q.qlen <= q->limit) { 330 if (++sch->q.qlen <= q->limit) {
326 sch->qstats.requeues++; 331 sch->qstats.requeues++;
327 return 0; 332 return 0;
@@ -336,7 +341,7 @@ sfq_requeue(struct sk_buff *skb, struct Qdisc* sch)
336 341
337 342
338static struct sk_buff * 343static struct sk_buff *
339sfq_dequeue(struct Qdisc* sch) 344sfq_dequeue(struct Qdisc *sch)
340{ 345{
341 struct sfq_sched_data *q = qdisc_priv(sch); 346 struct sfq_sched_data *q = qdisc_priv(sch);
342 struct sk_buff *skb; 347 struct sk_buff *skb;
@@ -373,7 +378,7 @@ sfq_dequeue(struct Qdisc* sch)
373} 378}
374 379
375static void 380static void
376sfq_reset(struct Qdisc* sch) 381sfq_reset(struct Qdisc *sch)
377{ 382{
378 struct sk_buff *skb; 383 struct sk_buff *skb;
379 384
@@ -383,7 +388,7 @@ sfq_reset(struct Qdisc* sch)
383 388
384static void sfq_perturbation(unsigned long arg) 389static void sfq_perturbation(unsigned long arg)
385{ 390{
386 struct Qdisc *sch = (struct Qdisc*)arg; 391 struct Qdisc *sch = (struct Qdisc *)arg;
387 struct sfq_sched_data *q = qdisc_priv(sch); 392 struct sfq_sched_data *q = qdisc_priv(sch);
388 393
389 q->perturbation = net_random(); 394 q->perturbation = net_random();
@@ -403,7 +408,7 @@ static int sfq_change(struct Qdisc *sch, struct rtattr *opt)
403 408
404 sch_tree_lock(sch); 409 sch_tree_lock(sch);
405 q->quantum = ctl->quantum ? : psched_mtu(sch->dev); 410 q->quantum = ctl->quantum ? : psched_mtu(sch->dev);
406 q->perturb_period = ctl->perturb_period*HZ; 411 q->perturb_period = ctl->perturb_period * HZ;
407 if (ctl->limit) 412 if (ctl->limit)
408 q->limit = min_t(u32, ctl->limit, SFQ_DEPTH - 1); 413 q->limit = min_t(u32, ctl->limit, SFQ_DEPTH - 1);
409 414
@@ -430,13 +435,15 @@ static int sfq_init(struct Qdisc *sch, struct rtattr *opt)
430 q->perturb_timer.data = (unsigned long)sch;; 435 q->perturb_timer.data = (unsigned long)sch;;
431 init_timer_deferrable(&q->perturb_timer); 436 init_timer_deferrable(&q->perturb_timer);
432 437
433 for (i=0; i<SFQ_HASH_DIVISOR; i++) 438 for (i = 0; i < SFQ_HASH_DIVISOR; i++)
434 q->ht[i] = SFQ_DEPTH; 439 q->ht[i] = SFQ_DEPTH;
435 for (i=0; i<SFQ_DEPTH; i++) { 440
441 for (i = 0; i < SFQ_DEPTH; i++) {
436 skb_queue_head_init(&q->qs[i]); 442 skb_queue_head_init(&q->qs[i]);
437 q->dep[i+SFQ_DEPTH].next = i+SFQ_DEPTH; 443 q->dep[i + SFQ_DEPTH].next = i + SFQ_DEPTH;
438 q->dep[i+SFQ_DEPTH].prev = i+SFQ_DEPTH; 444 q->dep[i + SFQ_DEPTH].prev = i + SFQ_DEPTH;
439 } 445 }
446
440 q->limit = SFQ_DEPTH - 1; 447 q->limit = SFQ_DEPTH - 1;
441 q->max_depth = 0; 448 q->max_depth = 0;
442 q->tail = SFQ_DEPTH; 449 q->tail = SFQ_DEPTH;
@@ -449,7 +456,8 @@ static int sfq_init(struct Qdisc *sch, struct rtattr *opt)
449 if (err) 456 if (err)
450 return err; 457 return err;
451 } 458 }
452 for (i=0; i<SFQ_DEPTH; i++) 459
460 for (i = 0; i < SFQ_DEPTH; i++)
453 sfq_link(q, i); 461 sfq_link(q, i);
454 return 0; 462 return 0;
455} 463}
@@ -467,7 +475,7 @@ static int sfq_dump(struct Qdisc *sch, struct sk_buff *skb)
467 struct tc_sfq_qopt opt; 475 struct tc_sfq_qopt opt;
468 476
469 opt.quantum = q->quantum; 477 opt.quantum = q->quantum;
470 opt.perturb_period = q->perturb_period/HZ; 478 opt.perturb_period = q->perturb_period / HZ;
471 479
472 opt.limit = q->limit; 480 opt.limit = q->limit;
473 opt.divisor = SFQ_HASH_DIVISOR; 481 opt.divisor = SFQ_HASH_DIVISOR;