aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/core/neighbour.c5
-rw-r--r--net/core/pktgen.c506
-rw-r--r--net/core/skbuff.c2
-rw-r--r--net/decnet/af_decnet.c13
-rw-r--r--net/ipv4/devinet.c3
-rw-r--r--net/ipv4/fib_trie.c2
-rw-r--r--net/ipv4/icmp.c5
-rw-r--r--net/ipv4/ip_output.c5
-rw-r--r--net/ipv4/netfilter/ip_conntrack_core.c132
-rw-r--r--net/ipv4/proc.c4
-rw-r--r--net/ipv6/icmp.c9
-rw-r--r--net/ipv6/proc.c4
-rw-r--r--net/netlink/af_netlink.c3
-rw-r--r--net/rose/rose_route.c2
-rw-r--r--net/sctp/proc.c4
-rw-r--r--net/xfrm/xfrm_policy.c43
-rw-r--r--net/xfrm/xfrm_state.c6
17 files changed, 344 insertions, 404 deletions
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 1dcf7fa1f0fe..e68700f950a5 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -1625,12 +1625,9 @@ static int neightbl_fill_info(struct neigh_table *tbl, struct sk_buff *skb,
1625 1625
1626 memset(&ndst, 0, sizeof(ndst)); 1626 memset(&ndst, 0, sizeof(ndst));
1627 1627
1628 for (cpu = 0; cpu < NR_CPUS; cpu++) { 1628 for_each_cpu(cpu) {
1629 struct neigh_statistics *st; 1629 struct neigh_statistics *st;
1630 1630
1631 if (!cpu_possible(cpu))
1632 continue;
1633
1634 st = per_cpu_ptr(tbl->stats, cpu); 1631 st = per_cpu_ptr(tbl->stats, cpu);
1635 ndst.ndts_allocs += st->allocs; 1632 ndst.ndts_allocs += st->allocs;
1636 ndst.ndts_destroys += st->destroys; 1633 ndst.ndts_destroys += st->destroys;
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 5f043d346694..7fc3e9e28c34 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -75,7 +75,7 @@
75 * By design there should only be *one* "controlling" process. In practice 75 * By design there should only be *one* "controlling" process. In practice
76 * multiple write accesses gives unpredictable result. Understood by "write" 76 * multiple write accesses gives unpredictable result. Understood by "write"
77 * to /proc gives result code thats should be read be the "writer". 77 * to /proc gives result code thats should be read be the "writer".
78 * For pratical use this should be no problem. 78 * For practical use this should be no problem.
79 * 79 *
80 * Note when adding devices to a specific CPU there good idea to also assign 80 * Note when adding devices to a specific CPU there good idea to also assign
81 * /proc/irq/XX/smp_affinity so TX-interrupts gets bound to the same CPU. 81 * /proc/irq/XX/smp_affinity so TX-interrupts gets bound to the same CPU.
@@ -96,7 +96,7 @@
96 * New xmit() return, do_div and misc clean up by Stephen Hemminger 96 * New xmit() return, do_div and misc clean up by Stephen Hemminger
97 * <shemminger@osdl.org> 040923 97 * <shemminger@osdl.org> 040923
98 * 98 *
99 * Rany Dunlap fixed u64 printk compiler waring 99 * Randy Dunlap fixed u64 printk compiler waring
100 * 100 *
101 * Remove FCS from BW calculation. Lennert Buytenhek <buytenh@wantstofly.org> 101 * Remove FCS from BW calculation. Lennert Buytenhek <buytenh@wantstofly.org>
102 * New time handling. Lennert Buytenhek <buytenh@wantstofly.org> 041213 102 * New time handling. Lennert Buytenhek <buytenh@wantstofly.org> 041213
@@ -137,6 +137,7 @@
137#include <linux/ipv6.h> 137#include <linux/ipv6.h>
138#include <linux/udp.h> 138#include <linux/udp.h>
139#include <linux/proc_fs.h> 139#include <linux/proc_fs.h>
140#include <linux/seq_file.h>
140#include <linux/wait.h> 141#include <linux/wait.h>
141#include <net/checksum.h> 142#include <net/checksum.h>
142#include <net/ipv6.h> 143#include <net/ipv6.h>
@@ -151,7 +152,7 @@
151#include <asm/timex.h> 152#include <asm/timex.h>
152 153
153 154
154#define VERSION "pktgen v2.62: Packet Generator for packet performance testing.\n" 155#define VERSION "pktgen v2.63: Packet Generator for packet performance testing.\n"
155 156
156/* #define PG_DEBUG(a) a */ 157/* #define PG_DEBUG(a) a */
157#define PG_DEBUG(a) 158#define PG_DEBUG(a)
@@ -177,8 +178,8 @@
177#define T_REMDEV (1<<3) /* Remove all devs */ 178#define T_REMDEV (1<<3) /* Remove all devs */
178 179
179/* Locks */ 180/* Locks */
180#define thread_lock() spin_lock(&_thread_lock) 181#define thread_lock() down(&pktgen_sem)
181#define thread_unlock() spin_unlock(&_thread_lock) 182#define thread_unlock() up(&pktgen_sem)
182 183
183/* If lock -- can be removed after some work */ 184/* If lock -- can be removed after some work */
184#define if_lock(t) spin_lock(&(t->if_lock)); 185#define if_lock(t) spin_lock(&(t->if_lock));
@@ -186,7 +187,9 @@
186 187
187/* Used to help with determining the pkts on receive */ 188/* Used to help with determining the pkts on receive */
188#define PKTGEN_MAGIC 0xbe9be955 189#define PKTGEN_MAGIC 0xbe9be955
189#define PG_PROC_DIR "net/pktgen" 190#define PG_PROC_DIR "pktgen"
191#define PGCTRL "pgctrl"
192static struct proc_dir_entry *pg_proc_dir = NULL;
190 193
191#define MAX_CFLOWS 65536 194#define MAX_CFLOWS 65536
192 195
@@ -202,11 +205,8 @@ struct pktgen_dev {
202 * Try to keep frequent/infrequent used vars. separated. 205 * Try to keep frequent/infrequent used vars. separated.
203 */ 206 */
204 207
205 char ifname[32]; 208 char ifname[IFNAMSIZ];
206 struct proc_dir_entry *proc_ent;
207 char result[512]; 209 char result[512];
208 /* proc file names */
209 char fname[80];
210 210
211 struct pktgen_thread* pg_thread; /* the owner */ 211 struct pktgen_thread* pg_thread; /* the owner */
212 struct pktgen_dev *next; /* Used for chaining in the thread's run-queue */ 212 struct pktgen_dev *next; /* Used for chaining in the thread's run-queue */
@@ -244,7 +244,7 @@ struct pktgen_dev {
244 __u32 seq_num; 244 __u32 seq_num;
245 245
246 int clone_skb; /* Use multiple SKBs during packet gen. If this number 246 int clone_skb; /* Use multiple SKBs during packet gen. If this number
247 * is greater than 1, then that many coppies of the same 247 * is greater than 1, then that many copies of the same
248 * packet will be sent before a new packet is allocated. 248 * packet will be sent before a new packet is allocated.
249 * For instance, if you want to send 1024 identical packets 249 * For instance, if you want to send 1024 identical packets
250 * before creating a new packet, set clone_skb to 1024. 250 * before creating a new packet, set clone_skb to 1024.
@@ -330,8 +330,6 @@ struct pktgen_thread {
330 struct pktgen_dev *if_list; /* All device here */ 330 struct pktgen_dev *if_list; /* All device here */
331 struct pktgen_thread* next; 331 struct pktgen_thread* next;
332 char name[32]; 332 char name[32];
333 char fname[128]; /* name of proc file */
334 struct proc_dir_entry *proc_ent;
335 char result[512]; 333 char result[512];
336 u32 max_before_softirq; /* We'll call do_softirq to prevent starvation. */ 334 u32 max_before_softirq; /* We'll call do_softirq to prevent starvation. */
337 335
@@ -396,7 +394,7 @@ static inline s64 divremdi3(s64 x, s64 y, int type)
396 394
397/* End of hacks to deal with 64-bit math on x86 */ 395/* End of hacks to deal with 64-bit math on x86 */
398 396
399/** Convert to miliseconds */ 397/** Convert to milliseconds */
400static inline __u64 tv_to_ms(const struct timeval* tv) 398static inline __u64 tv_to_ms(const struct timeval* tv)
401{ 399{
402 __u64 ms = tv->tv_usec / 1000; 400 __u64 ms = tv->tv_usec / 1000;
@@ -425,7 +423,7 @@ static inline __u64 pg_div64(__u64 n, __u64 base)
425{ 423{
426 __u64 tmp = n; 424 __u64 tmp = n;
427/* 425/*
428 * How do we know if the architectrure we are running on 426 * How do we know if the architecture we are running on
429 * supports division with 64 bit base? 427 * supports division with 64 bit base?
430 * 428 *
431 */ 429 */
@@ -473,16 +471,6 @@ static inline __u64 tv_diff(const struct timeval* a, const struct timeval* b)
473 471
474static char version[] __initdata = VERSION; 472static char version[] __initdata = VERSION;
475 473
476static ssize_t proc_pgctrl_read(struct file* file, char __user * buf, size_t count, loff_t *ppos);
477static ssize_t proc_pgctrl_write(struct file* file, const char __user * buf, size_t count, loff_t *ppos);
478static int proc_if_read(char *buf , char **start, off_t offset, int len, int *eof, void *data);
479
480static int proc_thread_read(char *buf , char **start, off_t offset, int len, int *eof, void *data);
481static int proc_if_write(struct file *file, const char __user *user_buffer, unsigned long count, void *data);
482static int proc_thread_write(struct file *file, const char __user *user_buffer, unsigned long count, void *data);
483static int create_proc_dir(void);
484static int remove_proc_dir(void);
485
486static int pktgen_remove_device(struct pktgen_thread* t, struct pktgen_dev *i); 474static int pktgen_remove_device(struct pktgen_thread* t, struct pktgen_dev *i);
487static int pktgen_add_device(struct pktgen_thread* t, const char* ifname); 475static int pktgen_add_device(struct pktgen_thread* t, const char* ifname);
488static struct pktgen_thread* pktgen_find_thread(const char* name); 476static struct pktgen_thread* pktgen_find_thread(const char* name);
@@ -503,83 +491,41 @@ static int pg_delay_d = 0;
503static int pg_clone_skb_d = 0; 491static int pg_clone_skb_d = 0;
504static int debug = 0; 492static int debug = 0;
505 493
506static DEFINE_SPINLOCK(_thread_lock); 494static DECLARE_MUTEX(pktgen_sem);
507static struct pktgen_thread *pktgen_threads = NULL; 495static struct pktgen_thread *pktgen_threads = NULL;
508 496
509static char module_fname[128];
510static struct proc_dir_entry *module_proc_ent = NULL;
511
512static struct notifier_block pktgen_notifier_block = { 497static struct notifier_block pktgen_notifier_block = {
513 .notifier_call = pktgen_device_event, 498 .notifier_call = pktgen_device_event,
514}; 499};
515 500
516static struct file_operations pktgen_fops = {
517 .read = proc_pgctrl_read,
518 .write = proc_pgctrl_write,
519 /* .ioctl = pktgen_ioctl, later maybe */
520};
521
522/* 501/*
523 * /proc handling functions 502 * /proc handling functions
524 * 503 *
525 */ 504 */
526 505
527static struct proc_dir_entry *pg_proc_dir = NULL; 506static int pgctrl_show(struct seq_file *seq, void *v)
528static int proc_pgctrl_read_eof=0;
529
530static ssize_t proc_pgctrl_read(struct file* file, char __user * buf,
531 size_t count, loff_t *ppos)
532{ 507{
533 char data[200]; 508 seq_puts(seq, VERSION);
534 int len = 0; 509 return 0;
535
536 if(proc_pgctrl_read_eof) {
537 proc_pgctrl_read_eof=0;
538 len = 0;
539 goto out;
540 }
541
542 sprintf(data, "%s", VERSION);
543
544 len = strlen(data);
545
546 if(len > count) {
547 len =-EFAULT;
548 goto out;
549 }
550
551 if (copy_to_user(buf, data, len)) {
552 len =-EFAULT;
553 goto out;
554 }
555
556 *ppos += len;
557 proc_pgctrl_read_eof=1; /* EOF next call */
558
559 out:
560 return len;
561} 510}
562 511
563static ssize_t proc_pgctrl_write(struct file* file,const char __user * buf, 512static ssize_t pgctrl_write(struct file* file,const char __user * buf,
564 size_t count, loff_t *ppos) 513 size_t count, loff_t *ppos)
565{ 514{
566 char *data = NULL;
567 int err = 0; 515 int err = 0;
516 char data[128];
568 517
569 if (!capable(CAP_NET_ADMIN)){ 518 if (!capable(CAP_NET_ADMIN)){
570 err = -EPERM; 519 err = -EPERM;
571 goto out; 520 goto out;
572 } 521 }
573 522
574 data = (void*)vmalloc ((unsigned int)count); 523 if (count > sizeof(data))
524 count = sizeof(data);
575 525
576 if(!data) {
577 err = -ENOMEM;
578 goto out;
579 }
580 if (copy_from_user(data, buf, count)) { 526 if (copy_from_user(data, buf, count)) {
581 err =-EFAULT; 527 err = -EFAULT;
582 goto out_free; 528 goto out;
583 } 529 }
584 data[count-1] = 0; /* Make string */ 530 data[count-1] = 0; /* Make string */
585 531
@@ -594,31 +540,40 @@ static ssize_t proc_pgctrl_write(struct file* file,const char __user * buf,
594 540
595 err = count; 541 err = count;
596 542
597 out_free:
598 vfree (data);
599 out: 543 out:
600 return err; 544 return err;
601} 545}
602 546
603static int proc_if_read(char *buf , char **start, off_t offset, 547static int pgctrl_open(struct inode *inode, struct file *file)
604 int len, int *eof, void *data) 548{
549 return single_open(file, pgctrl_show, PDE(inode)->data);
550}
551
552static struct file_operations pktgen_fops = {
553 .owner = THIS_MODULE,
554 .open = pgctrl_open,
555 .read = seq_read,
556 .llseek = seq_lseek,
557 .write = pgctrl_write,
558 .release = single_release,
559};
560
561static int pktgen_if_show(struct seq_file *seq, void *v)
605{ 562{
606 char *p;
607 int i; 563 int i;
608 struct pktgen_dev *pkt_dev = (struct pktgen_dev*)(data); 564 struct pktgen_dev *pkt_dev = seq->private;
609 __u64 sa; 565 __u64 sa;
610 __u64 stopped; 566 __u64 stopped;
611 __u64 now = getCurUs(); 567 __u64 now = getCurUs();
612 568
613 p = buf; 569 seq_printf(seq, "Params: count %llu min_pkt_size: %u max_pkt_size: %u\n",
614 p += sprintf(p, "Params: count %llu min_pkt_size: %u max_pkt_size: %u\n", 570 (unsigned long long) pkt_dev->count,
615 (unsigned long long) pkt_dev->count, 571 pkt_dev->min_pkt_size, pkt_dev->max_pkt_size);
616 pkt_dev->min_pkt_size, pkt_dev->max_pkt_size);
617 572
618 p += sprintf(p, " frags: %d delay: %u clone_skb: %d ifname: %s\n", 573 seq_printf(seq, " frags: %d delay: %u clone_skb: %d ifname: %s\n",
619 pkt_dev->nfrags, 1000*pkt_dev->delay_us+pkt_dev->delay_ns, pkt_dev->clone_skb, pkt_dev->ifname); 574 pkt_dev->nfrags, 1000*pkt_dev->delay_us+pkt_dev->delay_ns, pkt_dev->clone_skb, pkt_dev->ifname);
620 575
621 p += sprintf(p, " flows: %u flowlen: %u\n", pkt_dev->cflows, pkt_dev->lflow); 576 seq_printf(seq, " flows: %u flowlen: %u\n", pkt_dev->cflows, pkt_dev->lflow);
622 577
623 578
624 if(pkt_dev->flags & F_IPV6) { 579 if(pkt_dev->flags & F_IPV6) {
@@ -626,19 +581,19 @@ static int proc_if_read(char *buf , char **start, off_t offset,
626 fmt_ip6(b1, pkt_dev->in6_saddr.s6_addr); 581 fmt_ip6(b1, pkt_dev->in6_saddr.s6_addr);
627 fmt_ip6(b2, pkt_dev->min_in6_saddr.s6_addr); 582 fmt_ip6(b2, pkt_dev->min_in6_saddr.s6_addr);
628 fmt_ip6(b3, pkt_dev->max_in6_saddr.s6_addr); 583 fmt_ip6(b3, pkt_dev->max_in6_saddr.s6_addr);
629 p += sprintf(p, " saddr: %s min_saddr: %s max_saddr: %s\n", b1, b2, b3); 584 seq_printf(seq, " saddr: %s min_saddr: %s max_saddr: %s\n", b1, b2, b3);
630 585
631 fmt_ip6(b1, pkt_dev->in6_daddr.s6_addr); 586 fmt_ip6(b1, pkt_dev->in6_daddr.s6_addr);
632 fmt_ip6(b2, pkt_dev->min_in6_daddr.s6_addr); 587 fmt_ip6(b2, pkt_dev->min_in6_daddr.s6_addr);
633 fmt_ip6(b3, pkt_dev->max_in6_daddr.s6_addr); 588 fmt_ip6(b3, pkt_dev->max_in6_daddr.s6_addr);
634 p += sprintf(p, " daddr: %s min_daddr: %s max_daddr: %s\n", b1, b2, b3); 589 seq_printf(seq, " daddr: %s min_daddr: %s max_daddr: %s\n", b1, b2, b3);
635 590
636 } 591 }
637 else 592 else
638 p += sprintf(p, " dst_min: %s dst_max: %s\n src_min: %s src_max: %s\n", 593 seq_printf(seq," dst_min: %s dst_max: %s\n src_min: %s src_max: %s\n",
639 pkt_dev->dst_min, pkt_dev->dst_max, pkt_dev->src_min, pkt_dev->src_max); 594 pkt_dev->dst_min, pkt_dev->dst_max, pkt_dev->src_min, pkt_dev->src_max);
640 595
641 p += sprintf(p, " src_mac: "); 596 seq_puts(seq, " src_mac: ");
642 597
643 if ((pkt_dev->src_mac[0] == 0) && 598 if ((pkt_dev->src_mac[0] == 0) &&
644 (pkt_dev->src_mac[1] == 0) && 599 (pkt_dev->src_mac[1] == 0) &&
@@ -648,89 +603,89 @@ static int proc_if_read(char *buf , char **start, off_t offset,
648 (pkt_dev->src_mac[5] == 0)) 603 (pkt_dev->src_mac[5] == 0))
649 604
650 for (i = 0; i < 6; i++) 605 for (i = 0; i < 6; i++)
651 p += sprintf(p, "%02X%s", pkt_dev->odev->dev_addr[i], i == 5 ? " " : ":"); 606 seq_printf(seq, "%02X%s", pkt_dev->odev->dev_addr[i], i == 5 ? " " : ":");
652 607
653 else 608 else
654 for (i = 0; i < 6; i++) 609 for (i = 0; i < 6; i++)
655 p += sprintf(p, "%02X%s", pkt_dev->src_mac[i], i == 5 ? " " : ":"); 610 seq_printf(seq, "%02X%s", pkt_dev->src_mac[i], i == 5 ? " " : ":");
656 611
657 p += sprintf(p, "dst_mac: "); 612 seq_printf(seq, "dst_mac: ");
658 for (i = 0; i < 6; i++) 613 for (i = 0; i < 6; i++)
659 p += sprintf(p, "%02X%s", pkt_dev->dst_mac[i], i == 5 ? "\n" : ":"); 614 seq_printf(seq, "%02X%s", pkt_dev->dst_mac[i], i == 5 ? "\n" : ":");
660 615
661 p += sprintf(p, " udp_src_min: %d udp_src_max: %d udp_dst_min: %d udp_dst_max: %d\n", 616 seq_printf(seq, " udp_src_min: %d udp_src_max: %d udp_dst_min: %d udp_dst_max: %d\n",
662 pkt_dev->udp_src_min, pkt_dev->udp_src_max, pkt_dev->udp_dst_min, 617 pkt_dev->udp_src_min, pkt_dev->udp_src_max, pkt_dev->udp_dst_min,
663 pkt_dev->udp_dst_max); 618 pkt_dev->udp_dst_max);
664 619
665 p += sprintf(p, " src_mac_count: %d dst_mac_count: %d \n Flags: ", 620 seq_printf(seq, " src_mac_count: %d dst_mac_count: %d \n Flags: ",
666 pkt_dev->src_mac_count, pkt_dev->dst_mac_count); 621 pkt_dev->src_mac_count, pkt_dev->dst_mac_count);
667 622
668 623
669 if (pkt_dev->flags & F_IPV6) 624 if (pkt_dev->flags & F_IPV6)
670 p += sprintf(p, "IPV6 "); 625 seq_printf(seq, "IPV6 ");
671 626
672 if (pkt_dev->flags & F_IPSRC_RND) 627 if (pkt_dev->flags & F_IPSRC_RND)
673 p += sprintf(p, "IPSRC_RND "); 628 seq_printf(seq, "IPSRC_RND ");
674 629
675 if (pkt_dev->flags & F_IPDST_RND) 630 if (pkt_dev->flags & F_IPDST_RND)
676 p += sprintf(p, "IPDST_RND "); 631 seq_printf(seq, "IPDST_RND ");
677 632
678 if (pkt_dev->flags & F_TXSIZE_RND) 633 if (pkt_dev->flags & F_TXSIZE_RND)
679 p += sprintf(p, "TXSIZE_RND "); 634 seq_printf(seq, "TXSIZE_RND ");
680 635
681 if (pkt_dev->flags & F_UDPSRC_RND) 636 if (pkt_dev->flags & F_UDPSRC_RND)
682 p += sprintf(p, "UDPSRC_RND "); 637 seq_printf(seq, "UDPSRC_RND ");
683 638
684 if (pkt_dev->flags & F_UDPDST_RND) 639 if (pkt_dev->flags & F_UDPDST_RND)
685 p += sprintf(p, "UDPDST_RND "); 640 seq_printf(seq, "UDPDST_RND ");
686 641
687 if (pkt_dev->flags & F_MACSRC_RND) 642 if (pkt_dev->flags & F_MACSRC_RND)
688 p += sprintf(p, "MACSRC_RND "); 643 seq_printf(seq, "MACSRC_RND ");
689 644
690 if (pkt_dev->flags & F_MACDST_RND) 645 if (pkt_dev->flags & F_MACDST_RND)
691 p += sprintf(p, "MACDST_RND "); 646 seq_printf(seq, "MACDST_RND ");
692 647
693 648
694 p += sprintf(p, "\n"); 649 seq_puts(seq, "\n");
695 650
696 sa = pkt_dev->started_at; 651 sa = pkt_dev->started_at;
697 stopped = pkt_dev->stopped_at; 652 stopped = pkt_dev->stopped_at;
698 if (pkt_dev->running) 653 if (pkt_dev->running)
699 stopped = now; /* not really stopped, more like last-running-at */ 654 stopped = now; /* not really stopped, more like last-running-at */
700 655
701 p += sprintf(p, "Current:\n pkts-sofar: %llu errors: %llu\n started: %lluus stopped: %lluus idle: %lluus\n", 656 seq_printf(seq, "Current:\n pkts-sofar: %llu errors: %llu\n started: %lluus stopped: %lluus idle: %lluus\n",
702 (unsigned long long) pkt_dev->sofar, 657 (unsigned long long) pkt_dev->sofar,
703 (unsigned long long) pkt_dev->errors, 658 (unsigned long long) pkt_dev->errors,
704 (unsigned long long) sa, 659 (unsigned long long) sa,
705 (unsigned long long) stopped, 660 (unsigned long long) stopped,
706 (unsigned long long) pkt_dev->idle_acc); 661 (unsigned long long) pkt_dev->idle_acc);
707 662
708 p += sprintf(p, " seq_num: %d cur_dst_mac_offset: %d cur_src_mac_offset: %d\n", 663 seq_printf(seq, " seq_num: %d cur_dst_mac_offset: %d cur_src_mac_offset: %d\n",
709 pkt_dev->seq_num, pkt_dev->cur_dst_mac_offset, pkt_dev->cur_src_mac_offset); 664 pkt_dev->seq_num, pkt_dev->cur_dst_mac_offset,
665 pkt_dev->cur_src_mac_offset);
710 666
711 if(pkt_dev->flags & F_IPV6) { 667 if(pkt_dev->flags & F_IPV6) {
712 char b1[128], b2[128]; 668 char b1[128], b2[128];
713 fmt_ip6(b1, pkt_dev->cur_in6_daddr.s6_addr); 669 fmt_ip6(b1, pkt_dev->cur_in6_daddr.s6_addr);
714 fmt_ip6(b2, pkt_dev->cur_in6_saddr.s6_addr); 670 fmt_ip6(b2, pkt_dev->cur_in6_saddr.s6_addr);
715 p += sprintf(p, " cur_saddr: %s cur_daddr: %s\n", b2, b1); 671 seq_printf(seq, " cur_saddr: %s cur_daddr: %s\n", b2, b1);
716 } 672 }
717 else 673 else
718 p += sprintf(p, " cur_saddr: 0x%x cur_daddr: 0x%x\n", 674 seq_printf(seq, " cur_saddr: 0x%x cur_daddr: 0x%x\n",
719 pkt_dev->cur_saddr, pkt_dev->cur_daddr); 675 pkt_dev->cur_saddr, pkt_dev->cur_daddr);
720 676
721 677
722 p += sprintf(p, " cur_udp_dst: %d cur_udp_src: %d\n", 678 seq_printf(seq, " cur_udp_dst: %d cur_udp_src: %d\n",
723 pkt_dev->cur_udp_dst, pkt_dev->cur_udp_src); 679 pkt_dev->cur_udp_dst, pkt_dev->cur_udp_src);
724 680
725 p += sprintf(p, " flows: %u\n", pkt_dev->nflows); 681 seq_printf(seq, " flows: %u\n", pkt_dev->nflows);
726 682
727 if (pkt_dev->result[0]) 683 if (pkt_dev->result[0])
728 p += sprintf(p, "Result: %s\n", pkt_dev->result); 684 seq_printf(seq, "Result: %s\n", pkt_dev->result);
729 else 685 else
730 p += sprintf(p, "Result: Idle\n"); 686 seq_printf(seq, "Result: Idle\n");
731 *eof = 1;
732 687
733 return p - buf; 688 return 0;
734} 689}
735 690
736 691
@@ -802,13 +757,14 @@ done_str:
802 return i; 757 return i;
803} 758}
804 759
805static int proc_if_write(struct file *file, const char __user *user_buffer, 760static ssize_t pktgen_if_write(struct file *file, const char __user *user_buffer,
806 unsigned long count, void *data) 761 size_t count, loff_t *offset)
807{ 762{
763 struct seq_file *seq = (struct seq_file *) file->private_data;
764 struct pktgen_dev *pkt_dev = seq->private;
808 int i = 0, max, len; 765 int i = 0, max, len;
809 char name[16], valstr[32]; 766 char name[16], valstr[32];
810 unsigned long value = 0; 767 unsigned long value = 0;
811 struct pktgen_dev *pkt_dev = (struct pktgen_dev*)(data);
812 char* pg_result = NULL; 768 char* pg_result = NULL;
813 int tmp = 0; 769 int tmp = 0;
814 char buf[128]; 770 char buf[128];
@@ -849,7 +805,8 @@ static int proc_if_write(struct file *file, const char __user *user_buffer,
849 if (copy_from_user(tb, user_buffer, count)) 805 if (copy_from_user(tb, user_buffer, count))
850 return -EFAULT; 806 return -EFAULT;
851 tb[count] = 0; 807 tb[count] = 0;
852 printk("pktgen: %s,%lu buffer -:%s:-\n", name, count, tb); 808 printk("pktgen: %s,%lu buffer -:%s:-\n", name,
809 (unsigned long) count, tb);
853 } 810 }
854 811
855 if (!strcmp(name, "min_pkt_size")) { 812 if (!strcmp(name, "min_pkt_size")) {
@@ -1335,92 +1292,98 @@ static int proc_if_write(struct file *file, const char __user *user_buffer,
1335 return -EINVAL; 1292 return -EINVAL;
1336} 1293}
1337 1294
1338static int proc_thread_read(char *buf , char **start, off_t offset, 1295static int pktgen_if_open(struct inode *inode, struct file *file)
1339 int len, int *eof, void *data)
1340{ 1296{
1341 char *p; 1297 return single_open(file, pktgen_if_show, PDE(inode)->data);
1342 struct pktgen_thread *t = (struct pktgen_thread*)(data); 1298}
1343 struct pktgen_dev *pkt_dev = NULL;
1344 1299
1300static struct file_operations pktgen_if_fops = {
1301 .owner = THIS_MODULE,
1302 .open = pktgen_if_open,
1303 .read = seq_read,
1304 .llseek = seq_lseek,
1305 .write = pktgen_if_write,
1306 .release = single_release,
1307};
1345 1308
1346 if (!t) { 1309static int pktgen_thread_show(struct seq_file *seq, void *v)
1347 printk("pktgen: ERROR: could not find thread in proc_thread_read\n"); 1310{
1348 return -EINVAL; 1311 struct pktgen_thread *t = seq->private;
1349 } 1312 struct pktgen_dev *pkt_dev = NULL;
1313
1314 BUG_ON(!t);
1350 1315
1351 p = buf; 1316 seq_printf(seq, "Name: %s max_before_softirq: %d\n",
1352 p += sprintf(p, "Name: %s max_before_softirq: %d\n",
1353 t->name, t->max_before_softirq); 1317 t->name, t->max_before_softirq);
1354 1318
1355 p += sprintf(p, "Running: "); 1319 seq_printf(seq, "Running: ");
1356 1320
1357 if_lock(t); 1321 if_lock(t);
1358 for(pkt_dev = t->if_list;pkt_dev; pkt_dev = pkt_dev->next) 1322 for(pkt_dev = t->if_list;pkt_dev; pkt_dev = pkt_dev->next)
1359 if(pkt_dev->running) 1323 if(pkt_dev->running)
1360 p += sprintf(p, "%s ", pkt_dev->ifname); 1324 seq_printf(seq, "%s ", pkt_dev->ifname);
1361 1325
1362 p += sprintf(p, "\nStopped: "); 1326 seq_printf(seq, "\nStopped: ");
1363 1327
1364 for(pkt_dev = t->if_list;pkt_dev; pkt_dev = pkt_dev->next) 1328 for(pkt_dev = t->if_list;pkt_dev; pkt_dev = pkt_dev->next)
1365 if(!pkt_dev->running) 1329 if(!pkt_dev->running)
1366 p += sprintf(p, "%s ", pkt_dev->ifname); 1330 seq_printf(seq, "%s ", pkt_dev->ifname);
1367 1331
1368 if (t->result[0]) 1332 if (t->result[0])
1369 p += sprintf(p, "\nResult: %s\n", t->result); 1333 seq_printf(seq, "\nResult: %s\n", t->result);
1370 else 1334 else
1371 p += sprintf(p, "\nResult: NA\n"); 1335 seq_printf(seq, "\nResult: NA\n");
1372
1373 *eof = 1;
1374 1336
1375 if_unlock(t); 1337 if_unlock(t);
1376 1338
1377 return p - buf; 1339 return 0;
1378} 1340}
1379 1341
1380static int proc_thread_write(struct file *file, const char __user *user_buffer, 1342static ssize_t pktgen_thread_write(struct file *file,
1381 unsigned long count, void *data) 1343 const char __user *user_buffer,
1344 size_t count, loff_t *offset)
1382{ 1345{
1346 struct seq_file *seq = (struct seq_file *) file->private_data;
1347 struct pktgen_thread *t = seq->private;
1383 int i = 0, max, len, ret; 1348 int i = 0, max, len, ret;
1384 char name[40]; 1349 char name[40];
1385 struct pktgen_thread *t;
1386 char *pg_result; 1350 char *pg_result;
1387 unsigned long value = 0; 1351 unsigned long value = 0;
1388 1352
1389 if (count < 1) { 1353 if (count < 1) {
1390 // sprintf(pg_result, "Wrong command format"); 1354 // sprintf(pg_result, "Wrong command format");
1391 return -EINVAL; 1355 return -EINVAL;
1392 } 1356 }
1393 1357
1394 max = count - i; 1358 max = count - i;
1395 len = count_trail_chars(&user_buffer[i], max); 1359 len = count_trail_chars(&user_buffer[i], max);
1396 if (len < 0) 1360 if (len < 0)
1397 return len; 1361 return len;
1398 1362
1399 i += len; 1363 i += len;
1400 1364
1401 /* Read variable name */ 1365 /* Read variable name */
1402 1366
1403 len = strn_len(&user_buffer[i], sizeof(name) - 1); 1367 len = strn_len(&user_buffer[i], sizeof(name) - 1);
1404 if (len < 0) 1368 if (len < 0)
1405 return len; 1369 return len;
1406 1370
1407 memset(name, 0, sizeof(name)); 1371 memset(name, 0, sizeof(name));
1408 if (copy_from_user(name, &user_buffer[i], len)) 1372 if (copy_from_user(name, &user_buffer[i], len))
1409 return -EFAULT; 1373 return -EFAULT;
1410 i += len; 1374 i += len;
1411 1375
1412 max = count -i; 1376 max = count -i;
1413 len = count_trail_chars(&user_buffer[i], max); 1377 len = count_trail_chars(&user_buffer[i], max);
1414 if (len < 0) 1378 if (len < 0)
1415 return len; 1379 return len;
1416 1380
1417 i += len; 1381 i += len;
1418 1382
1419 if (debug) 1383 if (debug)
1420 printk("pktgen: t=%s, count=%lu\n", name, count); 1384 printk("pktgen: t=%s, count=%lu\n", name,
1421 1385 (unsigned long) count);
1422 1386
1423 t = (struct pktgen_thread*)(data);
1424 if(!t) { 1387 if(!t) {
1425 printk("pktgen: ERROR: No thread\n"); 1388 printk("pktgen: ERROR: No thread\n");
1426 ret = -EINVAL; 1389 ret = -EINVAL;
@@ -1474,21 +1437,19 @@ static int proc_thread_write(struct file *file, const char __user *user_buffer,
1474 return ret; 1437 return ret;
1475} 1438}
1476 1439
1477static int create_proc_dir(void) 1440static int pktgen_thread_open(struct inode *inode, struct file *file)
1478{ 1441{
1479 pg_proc_dir = proc_mkdir(PG_PROC_DIR, NULL); 1442 return single_open(file, pktgen_thread_show, PDE(inode)->data);
1480
1481 if (!pg_proc_dir)
1482 return -ENODEV;
1483
1484 return 0;
1485} 1443}
1486 1444
1487static int remove_proc_dir(void) 1445static struct file_operations pktgen_thread_fops = {
1488{ 1446 .owner = THIS_MODULE,
1489 remove_proc_entry(PG_PROC_DIR, NULL); 1447 .open = pktgen_thread_open,
1490 return 0; 1448 .read = seq_read,
1491} 1449 .llseek = seq_lseek,
1450 .write = pktgen_thread_write,
1451 .release = single_release,
1452};
1492 1453
1493/* Think find or remove for NN */ 1454/* Think find or remove for NN */
1494static struct pktgen_dev *__pktgen_NN_threads(const char* ifname, int remove) 1455static struct pktgen_dev *__pktgen_NN_threads(const char* ifname, int remove)
@@ -1702,7 +1663,7 @@ static void spin(struct pktgen_dev *pkt_dev, __u64 spin_until_us)
1702 start = now = getCurUs(); 1663 start = now = getCurUs();
1703 printk(KERN_INFO "sleeping for %d\n", (int)(spin_until_us - now)); 1664 printk(KERN_INFO "sleeping for %d\n", (int)(spin_until_us - now));
1704 while (now < spin_until_us) { 1665 while (now < spin_until_us) {
1705 /* TODO: optimise sleeping behavior */ 1666 /* TODO: optimize sleeping behavior */
1706 if (spin_until_us - now > jiffies_to_usecs(1)+1) 1667 if (spin_until_us - now > jiffies_to_usecs(1)+1)
1707 schedule_timeout_interruptible(1); 1668 schedule_timeout_interruptible(1);
1708 else if (spin_until_us - now > 100) { 1669 else if (spin_until_us - now > 100) {
@@ -2361,7 +2322,7 @@ static void pktgen_stop_all_threads_ifs(void)
2361 pktgen_stop(t); 2322 pktgen_stop(t);
2362 t = t->next; 2323 t = t->next;
2363 } 2324 }
2364 thread_unlock(); 2325 thread_unlock();
2365} 2326}
2366 2327
2367static int thread_is_running(struct pktgen_thread *t ) 2328static int thread_is_running(struct pktgen_thread *t )
@@ -2552,10 +2513,9 @@ static void pktgen_rem_thread(struct pktgen_thread *t)
2552 2513
2553 struct pktgen_thread *tmp = pktgen_threads; 2514 struct pktgen_thread *tmp = pktgen_threads;
2554 2515
2555 if (strlen(t->fname)) 2516 remove_proc_entry(t->name, pg_proc_dir);
2556 remove_proc_entry(t->fname, NULL);
2557 2517
2558 thread_lock(); 2518 thread_lock();
2559 2519
2560 if (tmp == t) 2520 if (tmp == t)
2561 pktgen_threads = tmp->next; 2521 pktgen_threads = tmp->next;
@@ -2825,7 +2785,7 @@ static struct pktgen_dev *pktgen_find_dev(struct pktgen_thread *t, const char* i
2825 if_lock(t); 2785 if_lock(t);
2826 2786
2827 for(pkt_dev=t->if_list; pkt_dev; pkt_dev = pkt_dev->next ) { 2787 for(pkt_dev=t->if_list; pkt_dev; pkt_dev = pkt_dev->next ) {
2828 if (strcmp(pkt_dev->ifname, ifname) == 0) { 2788 if (strncmp(pkt_dev->ifname, ifname, IFNAMSIZ) == 0) {
2829 break; 2789 break;
2830 } 2790 }
2831 } 2791 }
@@ -2864,74 +2824,70 @@ static int add_dev_to_thread(struct pktgen_thread *t, struct pktgen_dev *pkt_dev
2864static int pktgen_add_device(struct pktgen_thread *t, const char* ifname) 2824static int pktgen_add_device(struct pktgen_thread *t, const char* ifname)
2865{ 2825{
2866 struct pktgen_dev *pkt_dev; 2826 struct pktgen_dev *pkt_dev;
2827 struct proc_dir_entry *pe;
2867 2828
2868 /* We don't allow a device to be on several threads */ 2829 /* We don't allow a device to be on several threads */
2869 2830
2870 if( (pkt_dev = __pktgen_NN_threads(ifname, FIND)) == NULL) { 2831 pkt_dev = __pktgen_NN_threads(ifname, FIND);
2871 2832 if (pkt_dev) {
2872 pkt_dev = kmalloc(sizeof(struct pktgen_dev), GFP_KERNEL); 2833 printk("pktgen: ERROR: interface already used.\n");
2873 if (!pkt_dev) 2834 return -EBUSY;
2874 return -ENOMEM; 2835 }
2875 2836
2876 memset(pkt_dev, 0, sizeof(struct pktgen_dev)); 2837 pkt_dev = kzalloc(sizeof(struct pktgen_dev), GFP_KERNEL);
2838 if (!pkt_dev)
2839 return -ENOMEM;
2877 2840
2878 pkt_dev->flows = vmalloc(MAX_CFLOWS*sizeof(struct flow_state)); 2841 pkt_dev->flows = vmalloc(MAX_CFLOWS*sizeof(struct flow_state));
2879 if (pkt_dev->flows == NULL) { 2842 if (pkt_dev->flows == NULL) {
2880 kfree(pkt_dev); 2843 kfree(pkt_dev);
2881 return -ENOMEM; 2844 return -ENOMEM;
2882 } 2845 }
2883 memset(pkt_dev->flows, 0, MAX_CFLOWS*sizeof(struct flow_state)); 2846 memset(pkt_dev->flows, 0, MAX_CFLOWS*sizeof(struct flow_state));
2884
2885 pkt_dev->min_pkt_size = ETH_ZLEN;
2886 pkt_dev->max_pkt_size = ETH_ZLEN;
2887 pkt_dev->nfrags = 0;
2888 pkt_dev->clone_skb = pg_clone_skb_d;
2889 pkt_dev->delay_us = pg_delay_d / 1000;
2890 pkt_dev->delay_ns = pg_delay_d % 1000;
2891 pkt_dev->count = pg_count_d;
2892 pkt_dev->sofar = 0;
2893 pkt_dev->udp_src_min = 9; /* sink port */
2894 pkt_dev->udp_src_max = 9;
2895 pkt_dev->udp_dst_min = 9;
2896 pkt_dev->udp_dst_max = 9;
2897
2898 strncpy(pkt_dev->ifname, ifname, 31);
2899 sprintf(pkt_dev->fname, "%s/%s", PG_PROC_DIR, ifname);
2900
2901 if (! pktgen_setup_dev(pkt_dev)) {
2902 printk("pktgen: ERROR: pktgen_setup_dev failed.\n");
2903 if (pkt_dev->flows)
2904 vfree(pkt_dev->flows);
2905 kfree(pkt_dev);
2906 return -ENODEV;
2907 }
2908 2847
2909 pkt_dev->proc_ent = create_proc_entry(pkt_dev->fname, 0600, NULL); 2848 pkt_dev->min_pkt_size = ETH_ZLEN;
2910 if (!pkt_dev->proc_ent) { 2849 pkt_dev->max_pkt_size = ETH_ZLEN;
2911 printk("pktgen: cannot create %s procfs entry.\n", pkt_dev->fname); 2850 pkt_dev->nfrags = 0;
2912 if (pkt_dev->flows) 2851 pkt_dev->clone_skb = pg_clone_skb_d;
2913 vfree(pkt_dev->flows); 2852 pkt_dev->delay_us = pg_delay_d / 1000;
2914 kfree(pkt_dev); 2853 pkt_dev->delay_ns = pg_delay_d % 1000;
2915 return -EINVAL; 2854 pkt_dev->count = pg_count_d;
2916 } 2855 pkt_dev->sofar = 0;
2917 pkt_dev->proc_ent->read_proc = proc_if_read; 2856 pkt_dev->udp_src_min = 9; /* sink port */
2918 pkt_dev->proc_ent->write_proc = proc_if_write; 2857 pkt_dev->udp_src_max = 9;
2919 pkt_dev->proc_ent->data = (void*)(pkt_dev); 2858 pkt_dev->udp_dst_min = 9;
2920 pkt_dev->proc_ent->owner = THIS_MODULE; 2859 pkt_dev->udp_dst_max = 9;
2860
2861 strncpy(pkt_dev->ifname, ifname, IFNAMSIZ);
2862
2863 if (! pktgen_setup_dev(pkt_dev)) {
2864 printk("pktgen: ERROR: pktgen_setup_dev failed.\n");
2865 if (pkt_dev->flows)
2866 vfree(pkt_dev->flows);
2867 kfree(pkt_dev);
2868 return -ENODEV;
2869 }
2870
2871 pe = create_proc_entry(ifname, 0600, pg_proc_dir);
2872 if (!pe) {
2873 printk("pktgen: cannot create %s/%s procfs entry.\n",
2874 PG_PROC_DIR, ifname);
2875 if (pkt_dev->flows)
2876 vfree(pkt_dev->flows);
2877 kfree(pkt_dev);
2878 return -EINVAL;
2879 }
2880 pe->proc_fops = &pktgen_if_fops;
2881 pe->data = pkt_dev;
2921 2882
2922 return add_dev_to_thread(t, pkt_dev); 2883 return add_dev_to_thread(t, pkt_dev);
2923 }
2924 else {
2925 printk("pktgen: ERROR: interface already used.\n");
2926 return -EBUSY;
2927 }
2928} 2884}
2929 2885
2930static struct pktgen_thread *pktgen_find_thread(const char* name) 2886static struct pktgen_thread *pktgen_find_thread(const char* name)
2931{ 2887{
2932 struct pktgen_thread *t = NULL; 2888 struct pktgen_thread *t = NULL;
2933 2889
2934 thread_lock(); 2890 thread_lock();
2935 2891
2936 t = pktgen_threads; 2892 t = pktgen_threads;
2937 while (t) { 2893 while (t) {
@@ -2947,6 +2903,7 @@ static struct pktgen_thread *pktgen_find_thread(const char* name)
2947static int pktgen_create_thread(const char* name, int cpu) 2903static int pktgen_create_thread(const char* name, int cpu)
2948{ 2904{
2949 struct pktgen_thread *t = NULL; 2905 struct pktgen_thread *t = NULL;
2906 struct proc_dir_entry *pe;
2950 2907
2951 if (strlen(name) > 31) { 2908 if (strlen(name) > 31) {
2952 printk("pktgen: ERROR: Thread name cannot be more than 31 characters.\n"); 2909 printk("pktgen: ERROR: Thread name cannot be more than 31 characters.\n");
@@ -2958,28 +2915,26 @@ static int pktgen_create_thread(const char* name, int cpu)
2958 return -EINVAL; 2915 return -EINVAL;
2959 } 2916 }
2960 2917
2961 t = (struct pktgen_thread*)(kmalloc(sizeof(struct pktgen_thread), GFP_KERNEL)); 2918 t = kzalloc(sizeof(struct pktgen_thread), GFP_KERNEL);
2962 if (!t) { 2919 if (!t) {
2963 printk("pktgen: ERROR: out of memory, can't create new thread.\n"); 2920 printk("pktgen: ERROR: out of memory, can't create new thread.\n");
2964 return -ENOMEM; 2921 return -ENOMEM;
2965 } 2922 }
2966 2923
2967 memset(t, 0, sizeof(struct pktgen_thread));
2968 strcpy(t->name, name); 2924 strcpy(t->name, name);
2969 spin_lock_init(&t->if_lock); 2925 spin_lock_init(&t->if_lock);
2970 t->cpu = cpu; 2926 t->cpu = cpu;
2971 2927
2972 sprintf(t->fname, "%s/%s", PG_PROC_DIR, t->name); 2928 pe = create_proc_entry(t->name, 0600, pg_proc_dir);
2973 t->proc_ent = create_proc_entry(t->fname, 0600, NULL); 2929 if (!pe) {
2974 if (!t->proc_ent) { 2930 printk("pktgen: cannot create %s/%s procfs entry.\n",
2975 printk("pktgen: cannot create %s procfs entry.\n", t->fname); 2931 PG_PROC_DIR, t->name);
2976 kfree(t); 2932 kfree(t);
2977 return -EINVAL; 2933 return -EINVAL;
2978 } 2934 }
2979 t->proc_ent->read_proc = proc_thread_read; 2935
2980 t->proc_ent->write_proc = proc_thread_write; 2936 pe->proc_fops = &pktgen_thread_fops;
2981 t->proc_ent->data = (void*)(t); 2937 pe->data = t;
2982 t->proc_ent->owner = THIS_MODULE;
2983 2938
2984 t->next = pktgen_threads; 2939 t->next = pktgen_threads;
2985 pktgen_threads = t; 2940 pktgen_threads = t;
@@ -3034,8 +2989,7 @@ static int pktgen_remove_device(struct pktgen_thread *t, struct pktgen_dev *pkt_
3034 2989
3035 /* Clean up proc file system */ 2990 /* Clean up proc file system */
3036 2991
3037 if (strlen(pkt_dev->fname)) 2992 remove_proc_entry(pkt_dev->ifname, pg_proc_dir);
3038 remove_proc_entry(pkt_dev->fname, NULL);
3039 2993
3040 if (pkt_dev->flows) 2994 if (pkt_dev->flows)
3041 vfree(pkt_dev->flows); 2995 vfree(pkt_dev->flows);
@@ -3046,31 +3000,31 @@ static int pktgen_remove_device(struct pktgen_thread *t, struct pktgen_dev *pkt_
3046static int __init pg_init(void) 3000static int __init pg_init(void)
3047{ 3001{
3048 int cpu; 3002 int cpu;
3049 printk(version); 3003 struct proc_dir_entry *pe;
3050 3004
3051 module_fname[0] = 0; 3005 printk(version);
3052 3006
3053 create_proc_dir(); 3007 pg_proc_dir = proc_mkdir(PG_PROC_DIR, proc_net);
3008 if (!pg_proc_dir)
3009 return -ENODEV;
3010 pg_proc_dir->owner = THIS_MODULE;
3054 3011
3055 sprintf(module_fname, "%s/pgctrl", PG_PROC_DIR); 3012 pe = create_proc_entry(PGCTRL, 0600, pg_proc_dir);
3056 module_proc_ent = create_proc_entry(module_fname, 0600, NULL); 3013 if (pe == NULL) {
3057 if (!module_proc_ent) { 3014 printk("pktgen: ERROR: cannot create %s procfs entry.\n", PGCTRL);
3058 printk("pktgen: ERROR: cannot create %s procfs entry.\n", module_fname); 3015 proc_net_remove(PG_PROC_DIR);
3059 return -EINVAL; 3016 return -EINVAL;
3060 } 3017 }
3061 3018
3062 module_proc_ent->proc_fops = &pktgen_fops; 3019 pe->proc_fops = &pktgen_fops;
3063 module_proc_ent->data = NULL; 3020 pe->data = NULL;
3064 3021
3065 /* Register us to receive netdevice events */ 3022 /* Register us to receive netdevice events */
3066 register_netdevice_notifier(&pktgen_notifier_block); 3023 register_netdevice_notifier(&pktgen_notifier_block);
3067 3024
3068 for (cpu = 0; cpu < NR_CPUS ; cpu++) { 3025 for_each_online_cpu(cpu) {
3069 char buf[30]; 3026 char buf[30];
3070 3027
3071 if (!cpu_online(cpu))
3072 continue;
3073
3074 sprintf(buf, "kpktgend_%i", cpu); 3028 sprintf(buf, "kpktgend_%i", cpu);
3075 pktgen_create_thread(buf, cpu); 3029 pktgen_create_thread(buf, cpu);
3076 } 3030 }
@@ -3095,10 +3049,8 @@ static void __exit pg_cleanup(void)
3095 unregister_netdevice_notifier(&pktgen_notifier_block); 3049 unregister_netdevice_notifier(&pktgen_notifier_block);
3096 3050
3097 /* Clean up proc file system */ 3051 /* Clean up proc file system */
3098 3052 remove_proc_entry(PGCTRL, pg_proc_dir);
3099 remove_proc_entry(module_fname, NULL); 3053 proc_net_remove(PG_PROC_DIR);
3100
3101 remove_proc_dir();
3102} 3054}
3103 3055
3104 3056
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 02cd4cde2112..ef9d46b91eb9 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -122,6 +122,8 @@ void skb_under_panic(struct sk_buff *skb, int sz, void *here)
122 * __alloc_skb - allocate a network buffer 122 * __alloc_skb - allocate a network buffer
123 * @size: size to allocate 123 * @size: size to allocate
124 * @gfp_mask: allocation mask 124 * @gfp_mask: allocation mask
125 * @fclone: allocate from fclone cache instead of head cache
126 * and allocate a cloned (child) skb
125 * 127 *
126 * Allocate a new &sk_buff. The returned buffer has no headroom and a 128 * Allocate a new &sk_buff. The returned buffer has no headroom and a
127 * tail room of size bytes. The object has a reference count of one. 129 * tail room of size bytes. The object has a reference count of one.
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index 1186dc44cdff..3f25cadccddd 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -719,22 +719,9 @@ static int dn_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
719 if (saddr->sdn_flags & ~SDF_WILD) 719 if (saddr->sdn_flags & ~SDF_WILD)
720 return -EINVAL; 720 return -EINVAL;
721 721
722#if 1
723 if (!capable(CAP_NET_BIND_SERVICE) && (saddr->sdn_objnum || 722 if (!capable(CAP_NET_BIND_SERVICE) && (saddr->sdn_objnum ||
724 (saddr->sdn_flags & SDF_WILD))) 723 (saddr->sdn_flags & SDF_WILD)))
725 return -EACCES; 724 return -EACCES;
726#else
727 /*
728 * Maybe put the default actions in the default security ops for
729 * dn_prot_sock ? Would be nice if the capable call would go there
730 * too.
731 */
732 if (security_dn_prot_sock(saddr) &&
733 !capable(CAP_NET_BIND_SERVICE) ||
734 saddr->sdn_objnum || (saddr->sdn_flags & SDF_WILD))
735 return -EACCES;
736#endif
737
738 725
739 if (!(saddr->sdn_flags & SDF_WILD)) { 726 if (!(saddr->sdn_flags & SDF_WILD)) {
740 if (dn_ntohs(saddr->sdn_nodeaddrl)) { 727 if (dn_ntohs(saddr->sdn_nodeaddrl)) {
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 74f2207e131a..4ec4b2ca6ab1 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -715,6 +715,7 @@ int devinet_ioctl(unsigned int cmd, void __user *arg)
715 break; 715 break;
716 ret = 0; 716 ret = 0;
717 if (ifa->ifa_mask != sin->sin_addr.s_addr) { 717 if (ifa->ifa_mask != sin->sin_addr.s_addr) {
718 u32 old_mask = ifa->ifa_mask;
718 inet_del_ifa(in_dev, ifap, 0); 719 inet_del_ifa(in_dev, ifap, 0);
719 ifa->ifa_mask = sin->sin_addr.s_addr; 720 ifa->ifa_mask = sin->sin_addr.s_addr;
720 ifa->ifa_prefixlen = inet_mask_len(ifa->ifa_mask); 721 ifa->ifa_prefixlen = inet_mask_len(ifa->ifa_mask);
@@ -728,7 +729,7 @@ int devinet_ioctl(unsigned int cmd, void __user *arg)
728 if ((dev->flags & IFF_BROADCAST) && 729 if ((dev->flags & IFF_BROADCAST) &&
729 (ifa->ifa_prefixlen < 31) && 730 (ifa->ifa_prefixlen < 31) &&
730 (ifa->ifa_broadcast == 731 (ifa->ifa_broadcast ==
731 (ifa->ifa_local|~ifa->ifa_mask))) { 732 (ifa->ifa_local|~old_mask))) {
732 ifa->ifa_broadcast = (ifa->ifa_local | 733 ifa->ifa_broadcast = (ifa->ifa_local |
733 ~sin->sin_addr.s_addr); 734 ~sin->sin_addr.s_addr);
734 } 735 }
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 0093ea08c7f5..66247f38b371 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -2404,7 +2404,7 @@ static int fib_route_seq_show(struct seq_file *seq, void *v)
2404 prefix = htonl(l->key); 2404 prefix = htonl(l->key);
2405 2405
2406 list_for_each_entry_rcu(fa, &li->falh, fa_list) { 2406 list_for_each_entry_rcu(fa, &li->falh, fa_list) {
2407 const struct fib_info *fi = rcu_dereference(fa->fa_info); 2407 const struct fib_info *fi = fa->fa_info;
2408 unsigned flags = fib_flag_trans(fa->fa_type, mask, fi); 2408 unsigned flags = fib_flag_trans(fa->fa_type, mask, fi);
2409 2409
2410 if (fa->fa_type == RTN_BROADCAST 2410 if (fa->fa_type == RTN_BROADCAST
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 90dca711ac9f..175e093ec564 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -1108,12 +1108,9 @@ void __init icmp_init(struct net_proto_family *ops)
1108 struct inet_sock *inet; 1108 struct inet_sock *inet;
1109 int i; 1109 int i;
1110 1110
1111 for (i = 0; i < NR_CPUS; i++) { 1111 for_each_cpu(i) {
1112 int err; 1112 int err;
1113 1113
1114 if (!cpu_possible(i))
1115 continue;
1116
1117 err = sock_create_kern(PF_INET, SOCK_RAW, IPPROTO_ICMP, 1114 err = sock_create_kern(PF_INET, SOCK_RAW, IPPROTO_ICMP,
1118 &per_cpu(__icmp_socket, i)); 1115 &per_cpu(__icmp_socket, i));
1119 1116
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 1ad5202e556b..87e350069abb 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -1023,10 +1023,7 @@ ssize_t ip_append_page(struct sock *sk, struct page *page,
1023 int alloclen; 1023 int alloclen;
1024 1024
1025 skb_prev = skb; 1025 skb_prev = skb;
1026 if (skb_prev) 1026 fraggap = skb_prev->len - maxfraglen;
1027 fraggap = skb_prev->len - maxfraglen;
1028 else
1029 fraggap = 0;
1030 1027
1031 alloclen = fragheaderlen + hh_len + fraggap + 15; 1028 alloclen = fragheaderlen + hh_len + fraggap + 15;
1032 skb = sock_wmalloc(sk, alloclen, 1, sk->sk_allocation); 1029 skb = sock_wmalloc(sk, alloclen, 1, sk->sk_allocation);
diff --git a/net/ipv4/netfilter/ip_conntrack_core.c b/net/ipv4/netfilter/ip_conntrack_core.c
index 07a80b56e8dc..422ab68ee7fb 100644
--- a/net/ipv4/netfilter/ip_conntrack_core.c
+++ b/net/ipv4/netfilter/ip_conntrack_core.c
@@ -50,7 +50,7 @@
50#include <linux/netfilter_ipv4/ip_conntrack_core.h> 50#include <linux/netfilter_ipv4/ip_conntrack_core.h>
51#include <linux/netfilter_ipv4/listhelp.h> 51#include <linux/netfilter_ipv4/listhelp.h>
52 52
53#define IP_CONNTRACK_VERSION "2.3" 53#define IP_CONNTRACK_VERSION "2.4"
54 54
55#if 0 55#if 0
56#define DEBUGP printk 56#define DEBUGP printk
@@ -148,16 +148,20 @@ DEFINE_PER_CPU(struct ip_conntrack_stat, ip_conntrack_stat);
148static int ip_conntrack_hash_rnd_initted; 148static int ip_conntrack_hash_rnd_initted;
149static unsigned int ip_conntrack_hash_rnd; 149static unsigned int ip_conntrack_hash_rnd;
150 150
151static u_int32_t 151static u_int32_t __hash_conntrack(const struct ip_conntrack_tuple *tuple,
152hash_conntrack(const struct ip_conntrack_tuple *tuple) 152 unsigned int size, unsigned int rnd)
153{ 153{
154#if 0
155 dump_tuple(tuple);
156#endif
157 return (jhash_3words(tuple->src.ip, 154 return (jhash_3words(tuple->src.ip,
158 (tuple->dst.ip ^ tuple->dst.protonum), 155 (tuple->dst.ip ^ tuple->dst.protonum),
159 (tuple->src.u.all | (tuple->dst.u.all << 16)), 156 (tuple->src.u.all | (tuple->dst.u.all << 16)),
160 ip_conntrack_hash_rnd) % ip_conntrack_htable_size); 157 rnd) % size);
158}
159
160static u_int32_t
161hash_conntrack(const struct ip_conntrack_tuple *tuple)
162{
163 return __hash_conntrack(tuple, ip_conntrack_htable_size,
164 ip_conntrack_hash_rnd);
161} 165}
162 166
163int 167int
@@ -1341,14 +1345,13 @@ static int kill_all(struct ip_conntrack *i, void *data)
1341 return 1; 1345 return 1;
1342} 1346}
1343 1347
1344static void free_conntrack_hash(void) 1348static void free_conntrack_hash(struct list_head *hash, int vmalloced,int size)
1345{ 1349{
1346 if (ip_conntrack_vmalloc) 1350 if (vmalloced)
1347 vfree(ip_conntrack_hash); 1351 vfree(hash);
1348 else 1352 else
1349 free_pages((unsigned long)ip_conntrack_hash, 1353 free_pages((unsigned long)hash,
1350 get_order(sizeof(struct list_head) 1354 get_order(sizeof(struct list_head) * size));
1351 * ip_conntrack_htable_size));
1352} 1355}
1353 1356
1354void ip_conntrack_flush() 1357void ip_conntrack_flush()
@@ -1378,12 +1381,83 @@ void ip_conntrack_cleanup(void)
1378 ip_conntrack_flush(); 1381 ip_conntrack_flush();
1379 kmem_cache_destroy(ip_conntrack_cachep); 1382 kmem_cache_destroy(ip_conntrack_cachep);
1380 kmem_cache_destroy(ip_conntrack_expect_cachep); 1383 kmem_cache_destroy(ip_conntrack_expect_cachep);
1381 free_conntrack_hash(); 1384 free_conntrack_hash(ip_conntrack_hash, ip_conntrack_vmalloc,
1385 ip_conntrack_htable_size);
1382 nf_unregister_sockopt(&so_getorigdst); 1386 nf_unregister_sockopt(&so_getorigdst);
1383} 1387}
1384 1388
1385static int hashsize; 1389static struct list_head *alloc_hashtable(int size, int *vmalloced)
1386module_param(hashsize, int, 0400); 1390{
1391 struct list_head *hash;
1392 unsigned int i;
1393
1394 *vmalloced = 0;
1395 hash = (void*)__get_free_pages(GFP_KERNEL,
1396 get_order(sizeof(struct list_head)
1397 * size));
1398 if (!hash) {
1399 *vmalloced = 1;
1400 printk(KERN_WARNING"ip_conntrack: falling back to vmalloc.\n");
1401 hash = vmalloc(sizeof(struct list_head) * size);
1402 }
1403
1404 if (hash)
1405 for (i = 0; i < size; i++)
1406 INIT_LIST_HEAD(&hash[i]);
1407
1408 return hash;
1409}
1410
1411int set_hashsize(const char *val, struct kernel_param *kp)
1412{
1413 int i, bucket, hashsize, vmalloced;
1414 int old_vmalloced, old_size;
1415 int rnd;
1416 struct list_head *hash, *old_hash;
1417 struct ip_conntrack_tuple_hash *h;
1418
1419 /* On boot, we can set this without any fancy locking. */
1420 if (!ip_conntrack_htable_size)
1421 return param_set_int(val, kp);
1422
1423 hashsize = simple_strtol(val, NULL, 0);
1424 if (!hashsize)
1425 return -EINVAL;
1426
1427 hash = alloc_hashtable(hashsize, &vmalloced);
1428 if (!hash)
1429 return -ENOMEM;
1430
1431 /* We have to rehash for the new table anyway, so we also can
1432 * use a new random seed */
1433 get_random_bytes(&rnd, 4);
1434
1435 write_lock_bh(&ip_conntrack_lock);
1436 for (i = 0; i < ip_conntrack_htable_size; i++) {
1437 while (!list_empty(&ip_conntrack_hash[i])) {
1438 h = list_entry(ip_conntrack_hash[i].next,
1439 struct ip_conntrack_tuple_hash, list);
1440 list_del(&h->list);
1441 bucket = __hash_conntrack(&h->tuple, hashsize, rnd);
1442 list_add_tail(&h->list, &hash[bucket]);
1443 }
1444 }
1445 old_size = ip_conntrack_htable_size;
1446 old_vmalloced = ip_conntrack_vmalloc;
1447 old_hash = ip_conntrack_hash;
1448
1449 ip_conntrack_htable_size = hashsize;
1450 ip_conntrack_vmalloc = vmalloced;
1451 ip_conntrack_hash = hash;
1452 ip_conntrack_hash_rnd = rnd;
1453 write_unlock_bh(&ip_conntrack_lock);
1454
1455 free_conntrack_hash(old_hash, old_vmalloced, old_size);
1456 return 0;
1457}
1458
1459module_param_call(hashsize, set_hashsize, param_get_uint,
1460 &ip_conntrack_htable_size, 0600);
1387 1461
1388int __init ip_conntrack_init(void) 1462int __init ip_conntrack_init(void)
1389{ 1463{
@@ -1392,9 +1466,7 @@ int __init ip_conntrack_init(void)
1392 1466
1393 /* Idea from tcp.c: use 1/16384 of memory. On i386: 32MB 1467 /* Idea from tcp.c: use 1/16384 of memory. On i386: 32MB
1394 * machine has 256 buckets. >= 1GB machines have 8192 buckets. */ 1468 * machine has 256 buckets. >= 1GB machines have 8192 buckets. */
1395 if (hashsize) { 1469 if (!ip_conntrack_htable_size) {
1396 ip_conntrack_htable_size = hashsize;
1397 } else {
1398 ip_conntrack_htable_size 1470 ip_conntrack_htable_size
1399 = (((num_physpages << PAGE_SHIFT) / 16384) 1471 = (((num_physpages << PAGE_SHIFT) / 16384)
1400 / sizeof(struct list_head)); 1472 / sizeof(struct list_head));
@@ -1416,20 +1488,8 @@ int __init ip_conntrack_init(void)
1416 return ret; 1488 return ret;
1417 } 1489 }
1418 1490
1419 /* AK: the hash table is twice as big than needed because it 1491 ip_conntrack_hash = alloc_hashtable(ip_conntrack_htable_size,
1420 uses list_head. it would be much nicer to caches to use a 1492 &ip_conntrack_vmalloc);
1421 single pointer list head here. */
1422 ip_conntrack_vmalloc = 0;
1423 ip_conntrack_hash
1424 =(void*)__get_free_pages(GFP_KERNEL,
1425 get_order(sizeof(struct list_head)
1426 *ip_conntrack_htable_size));
1427 if (!ip_conntrack_hash) {
1428 ip_conntrack_vmalloc = 1;
1429 printk(KERN_WARNING "ip_conntrack: falling back to vmalloc.\n");
1430 ip_conntrack_hash = vmalloc(sizeof(struct list_head)
1431 * ip_conntrack_htable_size);
1432 }
1433 if (!ip_conntrack_hash) { 1493 if (!ip_conntrack_hash) {
1434 printk(KERN_ERR "Unable to create ip_conntrack_hash\n"); 1494 printk(KERN_ERR "Unable to create ip_conntrack_hash\n");
1435 goto err_unreg_sockopt; 1495 goto err_unreg_sockopt;
@@ -1461,9 +1521,6 @@ int __init ip_conntrack_init(void)
1461 ip_ct_protos[IPPROTO_ICMP] = &ip_conntrack_protocol_icmp; 1521 ip_ct_protos[IPPROTO_ICMP] = &ip_conntrack_protocol_icmp;
1462 write_unlock_bh(&ip_conntrack_lock); 1522 write_unlock_bh(&ip_conntrack_lock);
1463 1523
1464 for (i = 0; i < ip_conntrack_htable_size; i++)
1465 INIT_LIST_HEAD(&ip_conntrack_hash[i]);
1466
1467 /* For use by ipt_REJECT */ 1524 /* For use by ipt_REJECT */
1468 ip_ct_attach = ip_conntrack_attach; 1525 ip_ct_attach = ip_conntrack_attach;
1469 1526
@@ -1478,7 +1535,8 @@ int __init ip_conntrack_init(void)
1478err_free_conntrack_slab: 1535err_free_conntrack_slab:
1479 kmem_cache_destroy(ip_conntrack_cachep); 1536 kmem_cache_destroy(ip_conntrack_cachep);
1480err_free_hash: 1537err_free_hash:
1481 free_conntrack_hash(); 1538 free_conntrack_hash(ip_conntrack_hash, ip_conntrack_vmalloc,
1539 ip_conntrack_htable_size);
1482err_unreg_sockopt: 1540err_unreg_sockopt:
1483 nf_unregister_sockopt(&so_getorigdst); 1541 nf_unregister_sockopt(&so_getorigdst);
1484 1542
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index f7943ba1f43c..a65e508fbd40 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -90,9 +90,7 @@ fold_field(void *mib[], int offt)
90 unsigned long res = 0; 90 unsigned long res = 0;
91 int i; 91 int i;
92 92
93 for (i = 0; i < NR_CPUS; i++) { 93 for_each_cpu(i) {
94 if (!cpu_possible(i))
95 continue;
96 res += *(((unsigned long *) per_cpu_ptr(mib[0], i)) + offt); 94 res += *(((unsigned long *) per_cpu_ptr(mib[0], i)) + offt);
97 res += *(((unsigned long *) per_cpu_ptr(mib[1], i)) + offt); 95 res += *(((unsigned long *) per_cpu_ptr(mib[1], i)) + offt);
98 } 96 }
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index b7185fb3377c..23e540365a14 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -700,10 +700,7 @@ int __init icmpv6_init(struct net_proto_family *ops)
700 struct sock *sk; 700 struct sock *sk;
701 int err, i, j; 701 int err, i, j;
702 702
703 for (i = 0; i < NR_CPUS; i++) { 703 for_each_cpu(i) {
704 if (!cpu_possible(i))
705 continue;
706
707 err = sock_create_kern(PF_INET6, SOCK_RAW, IPPROTO_ICMPV6, 704 err = sock_create_kern(PF_INET6, SOCK_RAW, IPPROTO_ICMPV6,
708 &per_cpu(__icmpv6_socket, i)); 705 &per_cpu(__icmpv6_socket, i));
709 if (err < 0) { 706 if (err < 0) {
@@ -749,9 +746,7 @@ void icmpv6_cleanup(void)
749{ 746{
750 int i; 747 int i;
751 748
752 for (i = 0; i < NR_CPUS; i++) { 749 for_each_cpu(i) {
753 if (!cpu_possible(i))
754 continue;
755 sock_release(per_cpu(__icmpv6_socket, i)); 750 sock_release(per_cpu(__icmpv6_socket, i));
756 } 751 }
757 inet6_del_protocol(&icmpv6_protocol, IPPROTO_ICMPV6); 752 inet6_del_protocol(&icmpv6_protocol, IPPROTO_ICMPV6);
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
index 334a5967831e..50a13e75d70e 100644
--- a/net/ipv6/proc.c
+++ b/net/ipv6/proc.c
@@ -140,9 +140,7 @@ fold_field(void *mib[], int offt)
140 unsigned long res = 0; 140 unsigned long res = 0;
141 int i; 141 int i;
142 142
143 for (i = 0; i < NR_CPUS; i++) { 143 for_each_cpu(i) {
144 if (!cpu_possible(i))
145 continue;
146 res += *(((unsigned long *)per_cpu_ptr(mib[0], i)) + offt); 144 res += *(((unsigned long *)per_cpu_ptr(mib[0], i)) + offt);
147 res += *(((unsigned long *)per_cpu_ptr(mib[1], i)) + offt); 145 res += *(((unsigned long *)per_cpu_ptr(mib[1], i)) + offt);
148 } 146 }
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 291df2e4c492..5ca283537bc6 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -740,11 +740,8 @@ int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock, long t
740 740
741int netlink_sendskb(struct sock *sk, struct sk_buff *skb, int protocol) 741int netlink_sendskb(struct sock *sk, struct sk_buff *skb, int protocol)
742{ 742{
743 struct netlink_sock *nlk;
744 int len = skb->len; 743 int len = skb->len;
745 744
746 nlk = nlk_sk(sk);
747
748 skb_queue_tail(&sk->sk_receive_queue, skb); 745 skb_queue_tail(&sk->sk_receive_queue, skb);
749 sk->sk_data_ready(sk, len); 746 sk->sk_data_ready(sk, len);
750 sock_put(sk); 747 sock_put(sk);
diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c
index e556d92c0bc4..b18fe5043019 100644
--- a/net/rose/rose_route.c
+++ b/net/rose/rose_route.c
@@ -727,7 +727,7 @@ int rose_rt_ioctl(unsigned int cmd, void __user *arg)
727 } 727 }
728 if (rose_route.mask > 10) /* Mask can't be more than 10 digits */ 728 if (rose_route.mask > 10) /* Mask can't be more than 10 digits */
729 return -EINVAL; 729 return -EINVAL;
730 if (rose_route.ndigis > 8) /* No more than 8 digipeats */ 730 if (rose_route.ndigis > AX25_MAX_DIGIS)
731 return -EINVAL; 731 return -EINVAL;
732 err = rose_add_node(&rose_route, dev); 732 err = rose_add_node(&rose_route, dev);
733 dev_put(dev); 733 dev_put(dev);
diff --git a/net/sctp/proc.c b/net/sctp/proc.c
index b74f7772b576..6e4dc28874d7 100644
--- a/net/sctp/proc.c
+++ b/net/sctp/proc.c
@@ -69,9 +69,7 @@ fold_field(void *mib[], int nr)
69 unsigned long res = 0; 69 unsigned long res = 0;
70 int i; 70 int i;
71 71
72 for (i = 0; i < NR_CPUS; i++) { 72 for_each_cpu(i) {
73 if (!cpu_possible(i))
74 continue;
75 res += 73 res +=
76 *((unsigned long *) (((void *) per_cpu_ptr(mib[0], i)) + 74 *((unsigned long *) (((void *) per_cpu_ptr(mib[0], i)) +
77 sizeof (unsigned long) * nr)); 75 sizeof (unsigned long) * nr));
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index cbb0ba34a600..0db9e57013fd 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1192,46 +1192,6 @@ int xfrm_bundle_ok(struct xfrm_dst *first, struct flowi *fl, int family)
1192 1192
1193EXPORT_SYMBOL(xfrm_bundle_ok); 1193EXPORT_SYMBOL(xfrm_bundle_ok);
1194 1194
1195/* Well... that's _TASK_. We need to scan through transformation
1196 * list and figure out what mss tcp should generate in order to
1197 * final datagram fit to mtu. Mama mia... :-)
1198 *
1199 * Apparently, some easy way exists, but we used to choose the most
1200 * bizarre ones. :-) So, raising Kalashnikov... tra-ta-ta.
1201 *
1202 * Consider this function as something like dark humour. :-)
1203 */
1204static int xfrm_get_mss(struct dst_entry *dst, u32 mtu)
1205{
1206 int res = mtu - dst->header_len;
1207
1208 for (;;) {
1209 struct dst_entry *d = dst;
1210 int m = res;
1211
1212 do {
1213 struct xfrm_state *x = d->xfrm;
1214 if (x) {
1215 spin_lock_bh(&x->lock);
1216 if (x->km.state == XFRM_STATE_VALID &&
1217 x->type && x->type->get_max_size)
1218 m = x->type->get_max_size(d->xfrm, m);
1219 else
1220 m += x->props.header_len;
1221 spin_unlock_bh(&x->lock);
1222 }
1223 } while ((d = d->child) != NULL);
1224
1225 if (m <= mtu)
1226 break;
1227 res -= (m - mtu);
1228 if (res < 88)
1229 return mtu;
1230 }
1231
1232 return res + dst->header_len;
1233}
1234
1235int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo) 1195int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
1236{ 1196{
1237 int err = 0; 1197 int err = 0;
@@ -1252,8 +1212,6 @@ int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
1252 dst_ops->negative_advice = xfrm_negative_advice; 1212 dst_ops->negative_advice = xfrm_negative_advice;
1253 if (likely(dst_ops->link_failure == NULL)) 1213 if (likely(dst_ops->link_failure == NULL))
1254 dst_ops->link_failure = xfrm_link_failure; 1214 dst_ops->link_failure = xfrm_link_failure;
1255 if (likely(dst_ops->get_mss == NULL))
1256 dst_ops->get_mss = xfrm_get_mss;
1257 if (likely(afinfo->garbage_collect == NULL)) 1215 if (likely(afinfo->garbage_collect == NULL))
1258 afinfo->garbage_collect = __xfrm_garbage_collect; 1216 afinfo->garbage_collect = __xfrm_garbage_collect;
1259 xfrm_policy_afinfo[afinfo->family] = afinfo; 1217 xfrm_policy_afinfo[afinfo->family] = afinfo;
@@ -1281,7 +1239,6 @@ int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo)
1281 dst_ops->check = NULL; 1239 dst_ops->check = NULL;
1282 dst_ops->negative_advice = NULL; 1240 dst_ops->negative_advice = NULL;
1283 dst_ops->link_failure = NULL; 1241 dst_ops->link_failure = NULL;
1284 dst_ops->get_mss = NULL;
1285 afinfo->garbage_collect = NULL; 1242 afinfo->garbage_collect = NULL;
1286 } 1243 }
1287 } 1244 }
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 9d206c282cf1..8b9a4747417d 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -1026,6 +1026,12 @@ void xfrm_state_delete_tunnel(struct xfrm_state *x)
1026} 1026}
1027EXPORT_SYMBOL(xfrm_state_delete_tunnel); 1027EXPORT_SYMBOL(xfrm_state_delete_tunnel);
1028 1028
1029/*
1030 * This function is NOT optimal. For example, with ESP it will give an
1031 * MTU that's usually two bytes short of being optimal. However, it will
1032 * usually give an answer that's a multiple of 4 provided the input is
1033 * also a multiple of 4.
1034 */
1029int xfrm_state_mtu(struct xfrm_state *x, int mtu) 1035int xfrm_state_mtu(struct xfrm_state *x, int mtu)
1030{ 1036{
1031 int res = mtu; 1037 int res = mtu;