aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/shaper.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /drivers/net/shaper.c
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'drivers/net/shaper.c')
-rw-r--r--drivers/net/shaper.c755
1 files changed, 755 insertions, 0 deletions
diff --git a/drivers/net/shaper.c b/drivers/net/shaper.c
new file mode 100644
index 000000000000..e68cf5fb4920
--- /dev/null
+++ b/drivers/net/shaper.c
@@ -0,0 +1,755 @@
1/*
2 * Simple traffic shaper for Linux NET3.
3 *
4 * (c) Copyright 1996 Alan Cox <alan@redhat.com>, All Rights Reserved.
5 * http://www.redhat.com
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 *
12 * Neither Alan Cox nor CymruNet Ltd. admit liability nor provide
13 * warranty for any of this software. This material is provided
14 * "AS-IS" and at no charge.
15 *
16 *
17 * Algorithm:
18 *
19 * Queue Frame:
20 * Compute time length of frame at regulated speed
21 * Add frame to queue at appropriate point
22 * Adjust time length computation for followup frames
23 * Any frame that falls outside of its boundaries is freed
24 *
25 * We work to the following constants
26 *
27 * SHAPER_QLEN Maximum queued frames
28 * SHAPER_LATENCY Bounding latency on a frame. Leaving this latency
29 * window drops the frame. This stops us queueing
30 * frames for a long time and confusing a remote
31 * host.
32 * SHAPER_MAXSLIP Maximum time a priority frame may jump forward.
33 * That bounds the penalty we will inflict on low
34 * priority traffic.
35 * SHAPER_BURST Time range we call "now" in order to reduce
36 * system load. The more we make this the burstier
37 * the behaviour, the better local performance you
38 * get through packet clustering on routers and the
39 * worse the remote end gets to judge rtts.
40 *
41 * This is designed to handle lower speed links ( < 200K/second or so). We
42 * run off a 100-150Hz base clock typically. This gives us a resolution at
43 * 200Kbit/second of about 2Kbit or 256 bytes. Above that our timer
44 * resolution may start to cause much more burstiness in the traffic. We
45 * could avoid a lot of that by calling kick_shaper() at the end of the
46 * tied device transmissions. If you run above about 100K second you
47 * may need to tune the supposed speed rate for the right values.
48 *
49 * BUGS:
50 * Downing the interface under the shaper before the shaper
51 * will render your machine defunct. Don't for now shape over
52 * PPP or SLIP therefore!
53 * This will be fixed in BETA4
54 *
55 * Update History :
56 *
57 * bh_atomic() SMP races fixes and rewritten the locking code to
58 * be SMP safe and irq-mask friendly.
59 * NOTE: we can't use start_bh_atomic() in kick_shaper()
60 * because it's going to be recalled from an irq handler,
61 * and synchronize_bh() is a nono if called from irq context.
62 * 1999 Andrea Arcangeli
63 *
64 * Device statistics (tx_pakets, tx_bytes,
65 * tx_drops: queue_over_time and collisions: max_queue_exceded)
66 * 1999/06/18 Jordi Murgo <savage@apostols.org>
67 *
68 * Use skb->cb for private data.
69 * 2000/03 Andi Kleen
70 */
71
72#include <linux/config.h>
73#include <linux/module.h>
74#include <linux/kernel.h>
75#include <linux/fcntl.h>
76#include <linux/mm.h>
77#include <linux/slab.h>
78#include <linux/string.h>
79#include <linux/errno.h>
80#include <linux/netdevice.h>
81#include <linux/etherdevice.h>
82#include <linux/skbuff.h>
83#include <linux/if_arp.h>
84#include <linux/init.h>
85#include <linux/if_shaper.h>
86
87#include <net/dst.h>
88#include <net/arp.h>
89
90struct shaper_cb {
91 unsigned long shapeclock; /* Time it should go out */
92 unsigned long shapestamp; /* Stamp for shaper */
93 __u32 shapelatency; /* Latency on frame */
94 __u32 shapelen; /* Frame length in clocks */
95 __u16 shapepend; /* Pending */
96};
97#define SHAPERCB(skb) ((struct shaper_cb *) ((skb)->cb))
98
99static int sh_debug; /* Debug flag */
100
101#define SHAPER_BANNER "CymruNet Traffic Shaper BETA 0.04 for Linux 2.1\n"
102
103/*
104 * Locking
105 */
106
107static int shaper_lock(struct shaper *sh)
108{
109 /*
110 * Lock in an interrupt must fail
111 */
112 while (test_and_set_bit(0, &sh->locked))
113 {
114 if (!in_interrupt())
115 sleep_on(&sh->wait_queue);
116 else
117 return 0;
118
119 }
120 return 1;
121}
122
123static void shaper_kick(struct shaper *sh);
124
125static void shaper_unlock(struct shaper *sh)
126{
127 clear_bit(0, &sh->locked);
128 wake_up(&sh->wait_queue);
129 shaper_kick(sh);
130}
131
132/*
133 * Compute clocks on a buffer
134 */
135
136static int shaper_clocks(struct shaper *shaper, struct sk_buff *skb)
137{
138 int t=skb->len/shaper->bytespertick;
139 return t;
140}
141
142/*
143 * Set the speed of a shaper. We compute this in bytes per tick since
144 * thats how the machine wants to run. Quoted input is in bits per second
145 * as is traditional (note not BAUD). We assume 8 bit bytes.
146 */
147
148static void shaper_setspeed(struct shaper *shaper, int bitspersec)
149{
150 shaper->bitspersec=bitspersec;
151 shaper->bytespertick=(bitspersec/HZ)/8;
152 if(!shaper->bytespertick)
153 shaper->bytespertick++;
154}
155
156/*
157 * Throw a frame at a shaper.
158 */
159
160static int shaper_qframe(struct shaper *shaper, struct sk_buff *skb)
161{
162 struct sk_buff *ptr;
163
164 /*
165 * Get ready to work on this shaper. Lock may fail if its
166 * an interrupt and locked.
167 */
168
169 if(!shaper_lock(shaper))
170 return -1;
171 ptr=shaper->sendq.prev;
172
173 /*
174 * Set up our packet details
175 */
176
177 SHAPERCB(skb)->shapelatency=0;
178 SHAPERCB(skb)->shapeclock=shaper->recovery;
179 if(time_before(SHAPERCB(skb)->shapeclock, jiffies))
180 SHAPERCB(skb)->shapeclock=jiffies;
181 skb->priority=0; /* short term bug fix */
182 SHAPERCB(skb)->shapestamp=jiffies;
183
184 /*
185 * Time slots for this packet.
186 */
187
188 SHAPERCB(skb)->shapelen= shaper_clocks(shaper,skb);
189
190#ifdef SHAPER_COMPLEX /* and broken.. */
191
192 while(ptr && ptr!=(struct sk_buff *)&shaper->sendq)
193 {
194 if(ptr->pri<skb->pri
195 && jiffies - SHAPERCB(ptr)->shapeclock < SHAPER_MAXSLIP)
196 {
197 struct sk_buff *tmp=ptr->prev;
198
199 /*
200 * It goes before us therefore we slip the length
201 * of the new frame.
202 */
203
204 SHAPERCB(ptr)->shapeclock+=SHAPERCB(skb)->shapelen;
205 SHAPERCB(ptr)->shapelatency+=SHAPERCB(skb)->shapelen;
206
207 /*
208 * The packet may have slipped so far back it
209 * fell off.
210 */
211 if(SHAPERCB(ptr)->shapelatency > SHAPER_LATENCY)
212 {
213 skb_unlink(ptr);
214 dev_kfree_skb(ptr);
215 }
216 ptr=tmp;
217 }
218 else
219 break;
220 }
221 if(ptr==NULL || ptr==(struct sk_buff *)&shaper->sendq)
222 skb_queue_head(&shaper->sendq,skb);
223 else
224 {
225 struct sk_buff *tmp;
226 /*
227 * Set the packet clock out time according to the
228 * frames ahead. Im sure a bit of thought could drop
229 * this loop.
230 */
231 for(tmp=skb_peek(&shaper->sendq); tmp!=NULL && tmp!=ptr; tmp=tmp->next)
232 SHAPERCB(skb)->shapeclock+=tmp->shapelen;
233 skb_append(ptr,skb);
234 }
235#else
236 {
237 struct sk_buff *tmp;
238 /*
239 * Up our shape clock by the time pending on the queue
240 * (Should keep this in the shaper as a variable..)
241 */
242 for(tmp=skb_peek(&shaper->sendq); tmp!=NULL &&
243 tmp!=(struct sk_buff *)&shaper->sendq; tmp=tmp->next)
244 SHAPERCB(skb)->shapeclock+=SHAPERCB(tmp)->shapelen;
245 /*
246 * Queue over time. Spill packet.
247 */
248 if(SHAPERCB(skb)->shapeclock-jiffies > SHAPER_LATENCY) {
249 dev_kfree_skb(skb);
250 shaper->stats.tx_dropped++;
251 } else
252 skb_queue_tail(&shaper->sendq, skb);
253 }
254#endif
255 if(sh_debug)
256 printk("Frame queued.\n");
257 if(skb_queue_len(&shaper->sendq)>SHAPER_QLEN)
258 {
259 ptr=skb_dequeue(&shaper->sendq);
260 dev_kfree_skb(ptr);
261 shaper->stats.collisions++;
262 }
263 shaper_unlock(shaper);
264 return 0;
265}
266
267/*
268 * Transmit from a shaper
269 */
270
271static void shaper_queue_xmit(struct shaper *shaper, struct sk_buff *skb)
272{
273 struct sk_buff *newskb=skb_clone(skb, GFP_ATOMIC);
274 if(sh_debug)
275 printk("Kick frame on %p\n",newskb);
276 if(newskb)
277 {
278 newskb->dev=shaper->dev;
279 newskb->priority=2;
280 if(sh_debug)
281 printk("Kick new frame to %s, %d\n",
282 shaper->dev->name,newskb->priority);
283 dev_queue_xmit(newskb);
284
285 shaper->stats.tx_bytes += skb->len;
286 shaper->stats.tx_packets++;
287
288 if(sh_debug)
289 printk("Kicked new frame out.\n");
290 dev_kfree_skb(skb);
291 }
292}
293
294/*
295 * Timer handler for shaping clock
296 */
297
298static void shaper_timer(unsigned long data)
299{
300 struct shaper *sh=(struct shaper *)data;
301 shaper_kick(sh);
302}
303
304/*
305 * Kick a shaper queue and try and do something sensible with the
306 * queue.
307 */
308
309static void shaper_kick(struct shaper *shaper)
310{
311 struct sk_buff *skb;
312
313 /*
314 * Shaper unlock will kick
315 */
316
317 if (test_and_set_bit(0, &shaper->locked))
318 {
319 if(sh_debug)
320 printk("Shaper locked.\n");
321 mod_timer(&shaper->timer, jiffies);
322 return;
323 }
324
325
326 /*
327 * Walk the list (may be empty)
328 */
329
330 while((skb=skb_peek(&shaper->sendq))!=NULL)
331 {
332 /*
333 * Each packet due to go out by now (within an error
334 * of SHAPER_BURST) gets kicked onto the link
335 */
336
337 if(sh_debug)
338 printk("Clock = %ld, jiffies = %ld\n", SHAPERCB(skb)->shapeclock, jiffies);
339 if(time_before_eq(SHAPERCB(skb)->shapeclock, jiffies + SHAPER_BURST))
340 {
341 /*
342 * Pull the frame and get interrupts back on.
343 */
344
345 skb_unlink(skb);
346 if (shaper->recovery <
347 SHAPERCB(skb)->shapeclock + SHAPERCB(skb)->shapelen)
348 shaper->recovery = SHAPERCB(skb)->shapeclock + SHAPERCB(skb)->shapelen;
349 /*
350 * Pass on to the physical target device via
351 * our low level packet thrower.
352 */
353
354 SHAPERCB(skb)->shapepend=0;
355 shaper_queue_xmit(shaper, skb); /* Fire */
356 }
357 else
358 break;
359 }
360
361 /*
362 * Next kick.
363 */
364
365 if(skb!=NULL)
366 mod_timer(&shaper->timer, SHAPERCB(skb)->shapeclock);
367
368 clear_bit(0, &shaper->locked);
369}
370
371
372/*
373 * Flush the shaper queues on a closedown
374 */
375
376static void shaper_flush(struct shaper *shaper)
377{
378 struct sk_buff *skb;
379 if(!shaper_lock(shaper))
380 {
381 printk(KERN_ERR "shaper: shaper_flush() called by an irq!\n");
382 return;
383 }
384 while((skb=skb_dequeue(&shaper->sendq))!=NULL)
385 dev_kfree_skb(skb);
386 shaper_unlock(shaper);
387}
388
389/*
390 * Bring the interface up. We just disallow this until a
391 * bind.
392 */
393
394static int shaper_open(struct net_device *dev)
395{
396 struct shaper *shaper=dev->priv;
397
398 /*
399 * Can't open until attached.
400 * Also can't open until speed is set, or we'll get
401 * a division by zero.
402 */
403
404 if(shaper->dev==NULL)
405 return -ENODEV;
406 if(shaper->bitspersec==0)
407 return -EINVAL;
408 return 0;
409}
410
411/*
412 * Closing a shaper flushes the queues.
413 */
414
415static int shaper_close(struct net_device *dev)
416{
417 struct shaper *shaper=dev->priv;
418 shaper_flush(shaper);
419 del_timer_sync(&shaper->timer);
420 return 0;
421}
422
423/*
424 * Revectored calls. We alter the parameters and call the functions
425 * for our attached device. This enables us to bandwidth allocate after
426 * ARP and other resolutions and not before.
427 */
428
429
430static int shaper_start_xmit(struct sk_buff *skb, struct net_device *dev)
431{
432 struct shaper *sh=dev->priv;
433 return shaper_qframe(sh, skb);
434}
435
436static struct net_device_stats *shaper_get_stats(struct net_device *dev)
437{
438 struct shaper *sh=dev->priv;
439 return &sh->stats;
440}
441
442static int shaper_header(struct sk_buff *skb, struct net_device *dev,
443 unsigned short type, void *daddr, void *saddr, unsigned len)
444{
445 struct shaper *sh=dev->priv;
446 int v;
447 if(sh_debug)
448 printk("Shaper header\n");
449 skb->dev=sh->dev;
450 v=sh->hard_header(skb,sh->dev,type,daddr,saddr,len);
451 skb->dev=dev;
452 return v;
453}
454
455static int shaper_rebuild_header(struct sk_buff *skb)
456{
457 struct shaper *sh=skb->dev->priv;
458 struct net_device *dev=skb->dev;
459 int v;
460 if(sh_debug)
461 printk("Shaper rebuild header\n");
462 skb->dev=sh->dev;
463 v=sh->rebuild_header(skb);
464 skb->dev=dev;
465 return v;
466}
467
468#if 0
469static int shaper_cache(struct neighbour *neigh, struct hh_cache *hh)
470{
471 struct shaper *sh=neigh->dev->priv;
472 struct net_device *tmp;
473 int ret;
474 if(sh_debug)
475 printk("Shaper header cache bind\n");
476 tmp=neigh->dev;
477 neigh->dev=sh->dev;
478 ret=sh->hard_header_cache(neigh,hh);
479 neigh->dev=tmp;
480 return ret;
481}
482
483static void shaper_cache_update(struct hh_cache *hh, struct net_device *dev,
484 unsigned char *haddr)
485{
486 struct shaper *sh=dev->priv;
487 if(sh_debug)
488 printk("Shaper cache update\n");
489 sh->header_cache_update(hh, sh->dev, haddr);
490}
491#endif
492
493#ifdef CONFIG_INET
494
495static int shaper_neigh_setup(struct neighbour *n)
496{
497#ifdef CONFIG_INET
498 if (n->nud_state == NUD_NONE) {
499 n->ops = &arp_broken_ops;
500 n->output = n->ops->output;
501 }
502#endif
503 return 0;
504}
505
506static int shaper_neigh_setup_dev(struct net_device *dev, struct neigh_parms *p)
507{
508#ifdef CONFIG_INET
509 if (p->tbl->family == AF_INET) {
510 p->neigh_setup = shaper_neigh_setup;
511 p->ucast_probes = 0;
512 p->mcast_probes = 0;
513 }
514#endif
515 return 0;
516}
517
518#else /* !(CONFIG_INET) */
519
520static int shaper_neigh_setup_dev(struct net_device *dev, struct neigh_parms *p)
521{
522 return 0;
523}
524
525#endif
526
527static int shaper_attach(struct net_device *shdev, struct shaper *sh, struct net_device *dev)
528{
529 sh->dev = dev;
530 sh->hard_start_xmit=dev->hard_start_xmit;
531 sh->get_stats=dev->get_stats;
532 if(dev->hard_header)
533 {
534 sh->hard_header=dev->hard_header;
535 shdev->hard_header = shaper_header;
536 }
537 else
538 shdev->hard_header = NULL;
539
540 if(dev->rebuild_header)
541 {
542 sh->rebuild_header = dev->rebuild_header;
543 shdev->rebuild_header = shaper_rebuild_header;
544 }
545 else
546 shdev->rebuild_header = NULL;
547
548#if 0
549 if(dev->hard_header_cache)
550 {
551 sh->hard_header_cache = dev->hard_header_cache;
552 shdev->hard_header_cache= shaper_cache;
553 }
554 else
555 {
556 shdev->hard_header_cache= NULL;
557 }
558
559 if(dev->header_cache_update)
560 {
561 sh->header_cache_update = dev->header_cache_update;
562 shdev->header_cache_update = shaper_cache_update;
563 }
564 else
565 shdev->header_cache_update= NULL;
566#else
567 shdev->header_cache_update = NULL;
568 shdev->hard_header_cache = NULL;
569#endif
570 shdev->neigh_setup = shaper_neigh_setup_dev;
571
572 shdev->hard_header_len=dev->hard_header_len;
573 shdev->type=dev->type;
574 shdev->addr_len=dev->addr_len;
575 shdev->mtu=dev->mtu;
576 sh->bitspersec=0;
577 return 0;
578}
579
580static int shaper_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
581{
582 struct shaperconf *ss= (struct shaperconf *)&ifr->ifr_ifru;
583 struct shaper *sh=dev->priv;
584
585 if(ss->ss_cmd == SHAPER_SET_DEV || ss->ss_cmd == SHAPER_SET_SPEED)
586 {
587 if(!capable(CAP_NET_ADMIN))
588 return -EPERM;
589 }
590
591 switch(ss->ss_cmd)
592 {
593 case SHAPER_SET_DEV:
594 {
595 struct net_device *them=__dev_get_by_name(ss->ss_name);
596 if(them==NULL)
597 return -ENODEV;
598 if(sh->dev)
599 return -EBUSY;
600 return shaper_attach(dev,dev->priv, them);
601 }
602 case SHAPER_GET_DEV:
603 if(sh->dev==NULL)
604 return -ENODEV;
605 strcpy(ss->ss_name, sh->dev->name);
606 return 0;
607 case SHAPER_SET_SPEED:
608 shaper_setspeed(sh,ss->ss_speed);
609 return 0;
610 case SHAPER_GET_SPEED:
611 ss->ss_speed=sh->bitspersec;
612 return 0;
613 default:
614 return -EINVAL;
615 }
616}
617
618static void shaper_init_priv(struct net_device *dev)
619{
620 struct shaper *sh = dev->priv;
621
622 skb_queue_head_init(&sh->sendq);
623 init_timer(&sh->timer);
624 sh->timer.function=shaper_timer;
625 sh->timer.data=(unsigned long)sh;
626 init_waitqueue_head(&sh->wait_queue);
627}
628
629/*
630 * Add a shaper device to the system
631 */
632
633static void __init shaper_setup(struct net_device *dev)
634{
635 /*
636 * Set up the shaper.
637 */
638
639 SET_MODULE_OWNER(dev);
640
641 shaper_init_priv(dev);
642
643 dev->open = shaper_open;
644 dev->stop = shaper_close;
645 dev->hard_start_xmit = shaper_start_xmit;
646 dev->get_stats = shaper_get_stats;
647 dev->set_multicast_list = NULL;
648
649 /*
650 * Intialise the packet queues
651 */
652
653 /*
654 * Handlers for when we attach to a device.
655 */
656
657 dev->hard_header = shaper_header;
658 dev->rebuild_header = shaper_rebuild_header;
659#if 0
660 dev->hard_header_cache = shaper_cache;
661 dev->header_cache_update= shaper_cache_update;
662#endif
663 dev->neigh_setup = shaper_neigh_setup_dev;
664 dev->do_ioctl = shaper_ioctl;
665 dev->hard_header_len = 0;
666 dev->type = ARPHRD_ETHER; /* initially */
667 dev->set_mac_address = NULL;
668 dev->mtu = 1500;
669 dev->addr_len = 0;
670 dev->tx_queue_len = 10;
671 dev->flags = 0;
672}
673
674static int shapers = 1;
675#ifdef MODULE
676
677module_param(shapers, int, 0);
678MODULE_PARM_DESC(shapers, "Traffic shaper: maximum number of shapers");
679
680#else /* MODULE */
681
682static int __init set_num_shapers(char *str)
683{
684 shapers = simple_strtol(str, NULL, 0);
685 return 1;
686}
687
688__setup("shapers=", set_num_shapers);
689
690#endif /* MODULE */
691
692static struct net_device **devs;
693
694static unsigned int shapers_registered = 0;
695
696static int __init shaper_init(void)
697{
698 int i;
699 size_t alloc_size;
700 struct net_device *dev;
701 char name[IFNAMSIZ];
702
703 if (shapers < 1)
704 return -ENODEV;
705
706 alloc_size = sizeof(*dev) * shapers;
707 devs = kmalloc(alloc_size, GFP_KERNEL);
708 if (!devs)
709 return -ENOMEM;
710 memset(devs, 0, alloc_size);
711
712 for (i = 0; i < shapers; i++) {
713
714 snprintf(name, IFNAMSIZ, "shaper%d", i);
715 dev = alloc_netdev(sizeof(struct shaper), name,
716 shaper_setup);
717 if (!dev)
718 break;
719
720 if (register_netdev(dev)) {
721 free_netdev(dev);
722 break;
723 }
724
725 devs[i] = dev;
726 shapers_registered++;
727 }
728
729 if (!shapers_registered) {
730 kfree(devs);
731 devs = NULL;
732 }
733
734 return (shapers_registered ? 0 : -ENODEV);
735}
736
737static void __exit shaper_exit (void)
738{
739 int i;
740
741 for (i = 0; i < shapers_registered; i++) {
742 if (devs[i]) {
743 unregister_netdev(devs[i]);
744 free_netdev(devs[i]);
745 }
746 }
747
748 kfree(devs);
749 devs = NULL;
750}
751
752module_init(shaper_init);
753module_exit(shaper_exit);
754MODULE_LICENSE("GPL");
755