aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched/sch_fq.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/sched/sch_fq.c')
-rw-r--r--net/sched/sch_fq.c36
1 files changed, 29 insertions, 7 deletions
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index 95d843961907..08ef7a42c0e4 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -47,6 +47,7 @@
47#include <linux/rbtree.h> 47#include <linux/rbtree.h>
48#include <linux/hash.h> 48#include <linux/hash.h>
49#include <linux/prefetch.h> 49#include <linux/prefetch.h>
50#include <linux/vmalloc.h>
50#include <net/netlink.h> 51#include <net/netlink.h>
51#include <net/pkt_sched.h> 52#include <net/pkt_sched.h>
52#include <net/sock.h> 53#include <net/sock.h>
@@ -225,7 +226,7 @@ static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q)
225 /* By forcing low order bit to 1, we make sure to not 226 /* By forcing low order bit to 1, we make sure to not
226 * collide with a local flow (socket pointers are word aligned) 227 * collide with a local flow (socket pointers are word aligned)
227 */ 228 */
228 sk = (struct sock *)(skb_get_rxhash(skb) | 1L); 229 sk = (struct sock *)(skb_get_hash(skb) | 1L);
229 } 230 }
230 231
231 root = &q->fq_root[hash_32((u32)(long)sk, q->fq_trees_log)]; 232 root = &q->fq_root[hash_32((u32)(long)sk, q->fq_trees_log)];
@@ -578,15 +579,36 @@ static void fq_rehash(struct fq_sched_data *q,
578 q->stat_gc_flows += fcnt; 579 q->stat_gc_flows += fcnt;
579} 580}
580 581
581static int fq_resize(struct fq_sched_data *q, u32 log) 582static void *fq_alloc_node(size_t sz, int node)
582{ 583{
584 void *ptr;
585
586 ptr = kmalloc_node(sz, GFP_KERNEL | __GFP_REPEAT | __GFP_NOWARN, node);
587 if (!ptr)
588 ptr = vmalloc_node(sz, node);
589 return ptr;
590}
591
592static void fq_free(void *addr)
593{
594 if (addr && is_vmalloc_addr(addr))
595 vfree(addr);
596 else
597 kfree(addr);
598}
599
600static int fq_resize(struct Qdisc *sch, u32 log)
601{
602 struct fq_sched_data *q = qdisc_priv(sch);
583 struct rb_root *array; 603 struct rb_root *array;
584 u32 idx; 604 u32 idx;
585 605
586 if (q->fq_root && log == q->fq_trees_log) 606 if (q->fq_root && log == q->fq_trees_log)
587 return 0; 607 return 0;
588 608
589 array = kmalloc(sizeof(struct rb_root) << log, GFP_KERNEL); 609 /* If XPS was setup, we can allocate memory on right NUMA node */
610 array = fq_alloc_node(sizeof(struct rb_root) << log,
611 netdev_queue_numa_node_read(sch->dev_queue));
590 if (!array) 612 if (!array)
591 return -ENOMEM; 613 return -ENOMEM;
592 614
@@ -595,7 +617,7 @@ static int fq_resize(struct fq_sched_data *q, u32 log)
595 617
596 if (q->fq_root) { 618 if (q->fq_root) {
597 fq_rehash(q, q->fq_root, q->fq_trees_log, array, log); 619 fq_rehash(q, q->fq_root, q->fq_trees_log, array, log);
598 kfree(q->fq_root); 620 fq_free(q->fq_root);
599 } 621 }
600 q->fq_root = array; 622 q->fq_root = array;
601 q->fq_trees_log = log; 623 q->fq_trees_log = log;
@@ -676,7 +698,7 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt)
676 } 698 }
677 699
678 if (!err) 700 if (!err)
679 err = fq_resize(q, fq_log); 701 err = fq_resize(sch, fq_log);
680 702
681 while (sch->q.qlen > sch->limit) { 703 while (sch->q.qlen > sch->limit) {
682 struct sk_buff *skb = fq_dequeue(sch); 704 struct sk_buff *skb = fq_dequeue(sch);
@@ -697,7 +719,7 @@ static void fq_destroy(struct Qdisc *sch)
697 struct fq_sched_data *q = qdisc_priv(sch); 719 struct fq_sched_data *q = qdisc_priv(sch);
698 720
699 fq_reset(sch); 721 fq_reset(sch);
700 kfree(q->fq_root); 722 fq_free(q->fq_root);
701 qdisc_watchdog_cancel(&q->watchdog); 723 qdisc_watchdog_cancel(&q->watchdog);
702} 724}
703 725
@@ -723,7 +745,7 @@ static int fq_init(struct Qdisc *sch, struct nlattr *opt)
723 if (opt) 745 if (opt)
724 err = fq_change(sch, opt); 746 err = fq_change(sch, opt);
725 else 747 else
726 err = fq_resize(q, q->fq_trees_log); 748 err = fq_resize(sch, q->fq_trees_log);
727 749
728 return err; 750 return err;
729} 751}