From bbd8a0d3a3b65d341437f8b99c828fa5cc29c739 Mon Sep 17 00:00:00 2001 From: Krishna Kumar Date: Thu, 6 Aug 2009 01:44:21 +0000 Subject: net: Avoid enqueuing skb for default qdiscs dev_queue_xmit enqueue's a skb and calls qdisc_run which dequeue's the skb and xmits it. In most cases, the skb that is enqueue'd is the same one that is dequeue'd (unless the queue gets stopped or multiple cpu's write to the same queue and ends in a race with qdisc_run). For default qdiscs, we can remove the redundant enqueue/dequeue and simply xmit the skb since the default qdisc is work-conserving. The patch uses a new flag - TCQ_F_CAN_BYPASS to identify the default fast queue. The controversial part of the patch is incrementing qlen when a skb is requeued - this is to avoid checks like the second line below: + } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) && >> !q->gso_skb && + !test_and_set_bit(__QDISC_STATE_RUNNING, &q->state)) { Results of a 2 hour testing for multiple netperf sessions (1, 2, 4, 8, 12 sessions on a 4 cpu system-X). The BW numbers are aggregate Mb/s across iterations tested with this version on System-X boxes with Chelsio 10gbps cards: ---------------------------------- Size | ORG BW NEW BW | ---------------------------------- 128K | 156964 159381 | 256K | 158650 162042 | ---------------------------------- Changes from ver1: 1. Move sch_direct_xmit declaration from sch_generic.h to pkt_sched.h 2. Update qdisc basic statistics for direct xmit path. 3. Set qlen to zero in qdisc_reset. 4. Changed some function names to more meaningful ones. Signed-off-by: Krishna Kumar Signed-off-by: David S. Miller --- net/sched/sch_generic.c | 93 ++++++++++++++++++++++++++++++------------------- 1 file changed, 57 insertions(+), 36 deletions(-) (limited to 'net/sched/sch_generic.c') diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 27d03816ec3e..693df7ae33d8 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -37,15 +37,11 @@ * - updates to tree and tree walking are only done under the rtnl mutex. */ -static inline int qdisc_qlen(struct Qdisc *q) -{ - return q->q.qlen; -} - static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q) { q->gso_skb = skb; q->qstats.requeues++; + q->q.qlen++; /* it's still part of the queue */ __netif_schedule(q); return 0; @@ -61,9 +57,11 @@ static inline struct sk_buff *dequeue_skb(struct Qdisc *q) /* check the reason of requeuing without tx lock first */ txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); - if (!netif_tx_queue_stopped(txq) && !netif_tx_queue_frozen(txq)) + if (!netif_tx_queue_stopped(txq) && + !netif_tx_queue_frozen(txq)) { q->gso_skb = NULL; - else + q->q.qlen--; + } else skb = NULL; } else { skb = q->dequeue(q); @@ -103,44 +101,23 @@ static inline int handle_dev_cpu_collision(struct sk_buff *skb, } /* - * NOTE: Called under qdisc_lock(q) with locally disabled BH. - * - * __QDISC_STATE_RUNNING guarantees only one CPU can process - * this qdisc at a time. qdisc_lock(q) serializes queue accesses for - * this queue. - * - * netif_tx_lock serializes accesses to device driver. - * - * qdisc_lock(q) and netif_tx_lock are mutually exclusive, - * if one is grabbed, another must be free. - * - * Note, that this procedure can be called by a watchdog timer + * Transmit one skb, and handle the return status as required. Holding the + * __QDISC_STATE_RUNNING bit guarantees that only one CPU can execute this + * function. * * Returns to the caller: * 0 - queue is empty or throttled. * >0 - queue is not empty. - * */ -static inline int qdisc_restart(struct Qdisc *q) +int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, + struct net_device *dev, struct netdev_queue *txq, + spinlock_t *root_lock) { - struct netdev_queue *txq; int ret = NETDEV_TX_BUSY; - struct net_device *dev; - spinlock_t *root_lock; - struct sk_buff *skb; - - /* Dequeue packet */ - if (unlikely((skb = dequeue_skb(q)) == NULL)) - return 0; - - root_lock = qdisc_lock(q); /* And release qdisc */ spin_unlock(root_lock); - dev = qdisc_dev(q); - txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); - HARD_TX_LOCK(dev, txq, smp_processor_id()); if (!netif_tx_queue_stopped(txq) && !netif_tx_queue_frozen(txq)) @@ -177,6 +154,44 @@ static inline int qdisc_restart(struct Qdisc *q) return ret; } +/* + * NOTE: Called under qdisc_lock(q) with locally disabled BH. + * + * __QDISC_STATE_RUNNING guarantees only one CPU can process + * this qdisc at a time. qdisc_lock(q) serializes queue accesses for + * this queue. + * + * netif_tx_lock serializes accesses to device driver. + * + * qdisc_lock(q) and netif_tx_lock are mutually exclusive, + * if one is grabbed, another must be free. + * + * Note, that this procedure can be called by a watchdog timer + * + * Returns to the caller: + * 0 - queue is empty or throttled. + * >0 - queue is not empty. + * + */ +static inline int qdisc_restart(struct Qdisc *q) +{ + struct netdev_queue *txq; + struct net_device *dev; + spinlock_t *root_lock; + struct sk_buff *skb; + + /* Dequeue packet */ + skb = dequeue_skb(q); + if (unlikely(!skb)) + return 0; + + root_lock = qdisc_lock(q); + dev = qdisc_dev(q); + txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); + + return sch_direct_xmit(skb, q, dev, txq, root_lock); +} + void __qdisc_run(struct Qdisc *q) { unsigned long start_time = jiffies; @@ -547,8 +562,11 @@ void qdisc_reset(struct Qdisc *qdisc) if (ops->reset) ops->reset(qdisc); - kfree_skb(qdisc->gso_skb); - qdisc->gso_skb = NULL; + if (qdisc->gso_skb) { + kfree_skb(qdisc->gso_skb); + qdisc->gso_skb = NULL; + qdisc->q.qlen = 0; + } } EXPORT_SYMBOL(qdisc_reset); @@ -605,6 +623,9 @@ static void attach_one_default_qdisc(struct net_device *dev, printk(KERN_INFO "%s: activation failed\n", dev->name); return; } + + /* Can by-pass the queue discipline for default qdisc */ + qdisc->flags |= TCQ_F_CAN_BYPASS; } else { qdisc = &noqueue_qdisc; } -- cgit v1.2.2 From fd3ae5e8fc5e947a9f151e80a65763a24b6368a9 Mon Sep 17 00:00:00 2001 From: Krishna Kumar Date: Tue, 18 Aug 2009 21:55:59 +0000 Subject: Speed-up pfifo_fast lookup using a private bitmap Maintain a per-qdisc bitmap for pfifo_fast giving availability of skbs for each band. This allows faster lookup for a skb when there are no high priority skbs. Also, it helps in (rare) cases when there are no skbs on the list, where an immediate lookup is faster than iterating through the three bands. Signed-off-by: Krishna Kumar Signed-off-by: David S. Miller --- net/sched/sch_generic.c | 70 +++++++++++++++++++++++++++++++++---------------- 1 file changed, 48 insertions(+), 22 deletions(-) (limited to 'net/sched/sch_generic.c') diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 693df7ae33d8..6f7aebd14072 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -406,18 +406,38 @@ static const u8 prio2band[TC_PRIO_MAX+1] = #define PFIFO_FAST_BANDS 3 -static inline struct sk_buff_head *prio2list(struct sk_buff *skb, - struct Qdisc *qdisc) +/* + * Private data for a pfifo_fast scheduler containing: + * - queues for the three band + * - bitmap indicating which of the bands contain skbs + */ +struct pfifo_fast_priv { + u32 bitmap; + struct sk_buff_head q[PFIFO_FAST_BANDS]; +}; + +/* + * Convert a bitmap to the first band number where an skb is queued, where: + * bitmap=0 means there are no skbs on any band. + * bitmap=1 means there is an skb on band 0. + * bitmap=7 means there are skbs on all 3 bands, etc. + */ +static const int bitmap2band[] = {-1, 0, 1, 0, 2, 0, 1, 0}; + +static inline struct sk_buff_head *band2list(struct pfifo_fast_priv *priv, + int band) { - struct sk_buff_head *list = qdisc_priv(qdisc); - return list + prio2band[skb->priority & TC_PRIO_MAX]; + return priv->q + band; } static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc) { - struct sk_buff_head *list = prio2list(skb, qdisc); + int band = prio2band[skb->priority & TC_PRIO_MAX]; + struct pfifo_fast_priv *priv = qdisc_priv(qdisc); + struct sk_buff_head *list = band2list(priv, band); if (skb_queue_len(list) < qdisc_dev(qdisc)->tx_queue_len) { + priv->bitmap |= (1 << band); qdisc->q.qlen++; return __qdisc_enqueue_tail(skb, qdisc, list); } @@ -427,14 +447,18 @@ static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc) static struct sk_buff *pfifo_fast_dequeue(struct Qdisc* qdisc) { - int prio; - struct sk_buff_head *list = qdisc_priv(qdisc); + struct pfifo_fast_priv *priv = qdisc_priv(qdisc); + int band = bitmap2band[priv->bitmap]; - for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) { - if (!skb_queue_empty(list + prio)) { - qdisc->q.qlen--; - return __qdisc_dequeue_head(qdisc, list + prio); - } + if (likely(band >= 0)) { + struct sk_buff_head *list = band2list(priv, band); + struct sk_buff *skb = __qdisc_dequeue_head(qdisc, list); + + qdisc->q.qlen--; + if (skb_queue_empty(list)) + priv->bitmap &= ~(1 << band); + + return skb; } return NULL; @@ -442,12 +466,13 @@ static struct sk_buff *pfifo_fast_dequeue(struct Qdisc* qdisc) static struct sk_buff *pfifo_fast_peek(struct Qdisc* qdisc) { - int prio; - struct sk_buff_head *list = qdisc_priv(qdisc); + struct pfifo_fast_priv *priv = qdisc_priv(qdisc); + int band = bitmap2band[priv->bitmap]; + + if (band >= 0) { + struct sk_buff_head *list = band2list(priv, band); - for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) { - if (!skb_queue_empty(list + prio)) - return skb_peek(list + prio); + return skb_peek(list); } return NULL; @@ -456,11 +481,12 @@ static struct sk_buff *pfifo_fast_peek(struct Qdisc* qdisc) static void pfifo_fast_reset(struct Qdisc* qdisc) { int prio; - struct sk_buff_head *list = qdisc_priv(qdisc); + struct pfifo_fast_priv *priv = qdisc_priv(qdisc); for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) - __qdisc_reset_queue(qdisc, list + prio); + __qdisc_reset_queue(qdisc, band2list(priv, prio)); + priv->bitmap = 0; qdisc->qstats.backlog = 0; qdisc->q.qlen = 0; } @@ -480,17 +506,17 @@ nla_put_failure: static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt) { int prio; - struct sk_buff_head *list = qdisc_priv(qdisc); + struct pfifo_fast_priv *priv = qdisc_priv(qdisc); for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) - skb_queue_head_init(list + prio); + skb_queue_head_init(band2list(priv, prio)); return 0; } static struct Qdisc_ops pfifo_fast_ops __read_mostly = { .id = "pfifo_fast", - .priv_size = PFIFO_FAST_BANDS * sizeof(struct sk_buff_head), + .priv_size = sizeof(struct pfifo_fast_priv), .enqueue = pfifo_fast_enqueue, .dequeue = pfifo_fast_dequeue, .peek = pfifo_fast_peek, -- cgit v1.2.2 From a453e0689a3ccf85c08cb89753d7685046248c5c Mon Sep 17 00:00:00 2001 From: Krishna Kumar Date: Sun, 30 Aug 2009 22:20:28 -0700 Subject: pkt_sched: Fix resource limiting in pfifo_fast pfifo_fast_enqueue has this check: if (skb_queue_len(list) < qdisc_dev(qdisc)->tx_queue_len) { which allows each band to enqueue upto tx_queue_len skbs for a total of 3*tx_queue_len skbs. I am not sure if this was the intention of limiting in qdisc. Patch compiled and 32 simultaneous netperf testing ran fine. Also: # tc -s qdisc show dev eth2 qdisc pfifo_fast 0: root bands 3 priomap 1 2 2 2 1 2 0 0 1 1 1 1 1 1 1 1 Sent 16835026752 bytes 373116 pkt (dropped 0, overlimits 0 requeues 25) rate 0bit 0pps backlog 0b 0p requeues 25 Signed-off-by: Krishna Kumar Signed-off-by: David S. Miller --- net/sched/sch_generic.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'net/sched/sch_generic.c') diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 6f7aebd14072..6128e6f24589 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -432,11 +432,11 @@ static inline struct sk_buff_head *band2list(struct pfifo_fast_priv *priv, static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc) { - int band = prio2band[skb->priority & TC_PRIO_MAX]; - struct pfifo_fast_priv *priv = qdisc_priv(qdisc); - struct sk_buff_head *list = band2list(priv, band); + if (skb_queue_len(&qdisc->q) < qdisc_dev(qdisc)->tx_queue_len) { + int band = prio2band[skb->priority & TC_PRIO_MAX]; + struct pfifo_fast_priv *priv = qdisc_priv(qdisc); + struct sk_buff_head *list = band2list(priv, band); - if (skb_queue_len(list) < qdisc_dev(qdisc)->tx_queue_len) { priv->bitmap |= (1 << band); qdisc->q.qlen++; return __qdisc_enqueue_tail(skb, qdisc, list); -- cgit v1.2.2 From af356afa010f3cd2c8b8fcc3bce90f7a7b7ec02a Mon Sep 17 00:00:00 2001 From: Patrick McHardy Date: Fri, 4 Sep 2009 06:41:18 +0000 Subject: net_sched: reintroduce dev->qdisc for use by sch_api Currently the multiqueue integration with the qdisc API suffers from a few problems: - with multiple queues, all root qdiscs use the same handle. This means they can't be exposed to userspace in a backwards compatible fashion. - all API operations always refer to queue number 0. Newly created qdiscs are automatically shared between all queues, its not possible to address individual queues or restore multiqueue behaviour once a shared qdisc has been attached. - Dumps only contain the root qdisc of queue 0, in case of non-shared qdiscs this means the statistics are incomplete. This patch reintroduces dev->qdisc, which points to the (single) root qdisc from userspace's point of view. Currently it either points to the first (non-shared) default qdisc, or a qdisc shared between all queues. The following patches will introduce a classful dummy qdisc, which will be used as root qdisc and contain the per-queue qdiscs as children. Signed-off-by: Patrick McHardy Signed-off-by: David S. Miller --- net/sched/sch_generic.c | 25 +++++++++++-------------- 1 file changed, 11 insertions(+), 14 deletions(-) (limited to 'net/sched/sch_generic.c') diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 6128e6f24589..a91f079fb47a 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -623,19 +623,6 @@ void qdisc_destroy(struct Qdisc *qdisc) } EXPORT_SYMBOL(qdisc_destroy); -static bool dev_all_qdisc_sleeping_noop(struct net_device *dev) -{ - unsigned int i; - - for (i = 0; i < dev->num_tx_queues; i++) { - struct netdev_queue *txq = netdev_get_tx_queue(dev, i); - - if (txq->qdisc_sleeping != &noop_qdisc) - return false; - } - return true; -} - static void attach_one_default_qdisc(struct net_device *dev, struct netdev_queue *dev_queue, void *_unused) @@ -677,6 +664,7 @@ static void transition_one_qdisc(struct net_device *dev, void dev_activate(struct net_device *dev) { + struct netdev_queue *txq; int need_watchdog; /* No queueing discipline is attached to device; @@ -685,9 +673,14 @@ void dev_activate(struct net_device *dev) virtual interfaces */ - if (dev_all_qdisc_sleeping_noop(dev)) + if (dev->qdisc == &noop_qdisc) { netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL); + txq = netdev_get_tx_queue(dev, 0); + dev->qdisc = txq->qdisc_sleeping; + atomic_inc(&dev->qdisc->refcnt); + } + if (!netif_carrier_ok(dev)) /* Delay activation until next carrier-on event */ return; @@ -777,6 +770,7 @@ static void dev_init_scheduler_queue(struct net_device *dev, void dev_init_scheduler(struct net_device *dev) { + dev->qdisc = &noop_qdisc; netdev_for_each_tx_queue(dev, dev_init_scheduler_queue, &noop_qdisc); dev_init_scheduler_queue(dev, &dev->rx_queue, &noop_qdisc); @@ -802,5 +796,8 @@ void dev_shutdown(struct net_device *dev) { netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc); shutdown_scheduler_queue(dev, &dev->rx_queue, &noop_qdisc); + qdisc_destroy(dev->qdisc); + dev->qdisc = &noop_qdisc; + WARN_ON(timer_pending(&dev->watchdog_timer)); } -- cgit v1.2.2 From 589983cd21f4a2e4ed74a958805a90fa676845c5 Mon Sep 17 00:00:00 2001 From: Patrick McHardy Date: Fri, 4 Sep 2009 06:41:20 +0000 Subject: net_sched: move dev_graft_qdisc() to sch_generic.c It will be used in a following patch by the multiqueue qdisc. Signed-off-by: Patrick McHardy Signed-off-by: David S. Miller --- net/sched/sch_generic.c | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) (limited to 'net/sched/sch_generic.c') diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index a91f079fb47a..e7c47ceb0098 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -623,6 +623,31 @@ void qdisc_destroy(struct Qdisc *qdisc) } EXPORT_SYMBOL(qdisc_destroy); +/* Attach toplevel qdisc to device queue. */ +struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue, + struct Qdisc *qdisc) +{ + struct Qdisc *oqdisc = dev_queue->qdisc_sleeping; + spinlock_t *root_lock; + + root_lock = qdisc_lock(oqdisc); + spin_lock_bh(root_lock); + + /* Prune old scheduler */ + if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1) + qdisc_reset(oqdisc); + + /* ... and graft new one */ + if (qdisc == NULL) + qdisc = &noop_qdisc; + dev_queue->qdisc_sleeping = qdisc; + rcu_assign_pointer(dev_queue->qdisc, &noop_qdisc); + + spin_unlock_bh(root_lock); + + return oqdisc; +} + static void attach_one_default_qdisc(struct net_device *dev, struct netdev_queue *dev_queue, void *_unused) -- cgit v1.2.2 From 6ec1c69a8f6492fd25722f4762721921da074c12 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sun, 6 Sep 2009 01:58:51 -0700 Subject: net_sched: add classful multiqueue dummy scheduler This patch adds a classful dummy scheduler which can be used as root qdisc for multiqueue devices and exposes each device queue as a child class. This allows to address queues individually and graft them similar to regular classes. Additionally it presents an accumulated view of the statistics of all real root qdiscs in the dummy root. Two new callbacks are added to the qdisc_ops and qdisc_class_ops: - cl_ops->select_queue selects the tx queue number for new child classes. - qdisc_ops->attach() overrides root qdisc device grafting to attach non-shared qdiscs to the queues. Signed-off-by: Patrick McHardy Signed-off-by: David S. Miller --- net/sched/sch_generic.c | 32 +++++++++++++++++++++++--------- 1 file changed, 23 insertions(+), 9 deletions(-) (limited to 'net/sched/sch_generic.c') diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index e7c47ceb0098..4ae6aa562f2b 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -514,7 +514,7 @@ static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt) return 0; } -static struct Qdisc_ops pfifo_fast_ops __read_mostly = { +struct Qdisc_ops pfifo_fast_ops __read_mostly = { .id = "pfifo_fast", .priv_size = sizeof(struct pfifo_fast_priv), .enqueue = pfifo_fast_enqueue, @@ -670,6 +670,26 @@ static void attach_one_default_qdisc(struct net_device *dev, dev_queue->qdisc_sleeping = qdisc; } +static void attach_default_qdiscs(struct net_device *dev) +{ + struct netdev_queue *txq; + struct Qdisc *qdisc; + + txq = netdev_get_tx_queue(dev, 0); + + if (!netif_is_multiqueue(dev) || dev->tx_queue_len == 0) { + netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL); + dev->qdisc = txq->qdisc_sleeping; + atomic_inc(&dev->qdisc->refcnt); + } else { + qdisc = qdisc_create_dflt(dev, txq, &mq_qdisc_ops, TC_H_ROOT); + if (qdisc) { + qdisc->ops->attach(qdisc); + dev->qdisc = qdisc; + } + } +} + static void transition_one_qdisc(struct net_device *dev, struct netdev_queue *dev_queue, void *_need_watchdog) @@ -689,7 +709,6 @@ static void transition_one_qdisc(struct net_device *dev, void dev_activate(struct net_device *dev) { - struct netdev_queue *txq; int need_watchdog; /* No queueing discipline is attached to device; @@ -698,13 +717,8 @@ void dev_activate(struct net_device *dev) virtual interfaces */ - if (dev->qdisc == &noop_qdisc) { - netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL); - - txq = netdev_get_tx_queue(dev, 0); - dev->qdisc = txq->qdisc_sleeping; - atomic_inc(&dev->qdisc->refcnt); - } + if (dev->qdisc == &noop_qdisc) + attach_default_qdiscs(dev); if (!netif_carrier_ok(dev)) /* Delay activation until next carrier-on event */ -- cgit v1.2.2