aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-09-17 23:53:52 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-09-17 23:53:52 -0400
commitf205ce83a766c08965ec78342f138cdc00631fba (patch)
tree7a9d2db6c16594ef7c730ca93a87131cf0abca41 /net/sched
parent3dc95666df0e1ae5b7381a8ec97a583bb3ce4306 (diff)
parentb31c50a7f9e93a61d14740dedcbbf2c376998bc7 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (66 commits) be2net: fix some cmds to use mccq instead of mbox atl1e: fix 2.6.31-git4 -- ATL1E 0000:03:00.0: DMA-API: device driver frees DMA pkt_sched: Fix qstats.qlen updating in dump_stats ipv6: Log the affected address when DAD failure occurs wl12xx: Fix print_mac() conversion. af_iucv: fix race when queueing skbs on the backlog queue af_iucv: do not call iucv_sock_kill() twice af_iucv: handle non-accepted sockets after resuming from suspend af_iucv: fix race in __iucv_sock_wait() iucv: use correct output register in iucv_query_maxconn() iucv: fix iucv_buffer_cpumask check when calling IUCV functions iucv: suspend/resume error msg for left over pathes wl12xx: switch to %pM to print the mac address b44: the poll handler b44_poll must not enable IRQ unconditionally ipv6: Ignore route option with ROUTER_PREF_INVALID bonding: make ab_arp select active slaves as other modes cfg80211: fix SME connect rc80211_minstrel: fix contention window calculation ssb/sdio: fix printk format warnings p54usb: add Zcomax XG-705A usbid ...
Diffstat (limited to 'net/sched')
-rw-r--r--net/sched/sch_api.c29
-rw-r--r--net/sched/sch_drr.c4
-rw-r--r--net/sched/sch_mq.c14
-rw-r--r--net/sched/sch_multiq.c1
-rw-r--r--net/sched/sch_prio.c1
5 files changed, 34 insertions, 15 deletions
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 692d9a41cd23..903e4188b6ca 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -693,13 +693,18 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
693 if (new && i > 0) 693 if (new && i > 0)
694 atomic_inc(&new->refcnt); 694 atomic_inc(&new->refcnt);
695 695
696 qdisc_destroy(old); 696 if (!ingress)
697 qdisc_destroy(old);
697 } 698 }
698 699
699 notify_and_destroy(skb, n, classid, dev->qdisc, new); 700 if (!ingress) {
700 if (new && !new->ops->attach) 701 notify_and_destroy(skb, n, classid, dev->qdisc, new);
701 atomic_inc(&new->refcnt); 702 if (new && !new->ops->attach)
702 dev->qdisc = new ? : &noop_qdisc; 703 atomic_inc(&new->refcnt);
704 dev->qdisc = new ? : &noop_qdisc;
705 } else {
706 notify_and_destroy(skb, n, classid, old, new);
707 }
703 708
704 if (dev->flags & IFF_UP) 709 if (dev->flags & IFF_UP)
705 dev_activate(dev); 710 dev_activate(dev);
@@ -804,7 +809,7 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
804 stab = qdisc_get_stab(tca[TCA_STAB]); 809 stab = qdisc_get_stab(tca[TCA_STAB]);
805 if (IS_ERR(stab)) { 810 if (IS_ERR(stab)) {
806 err = PTR_ERR(stab); 811 err = PTR_ERR(stab);
807 goto err_out3; 812 goto err_out4;
808 } 813 }
809 sch->stab = stab; 814 sch->stab = stab;
810 } 815 }
@@ -833,7 +838,6 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
833 return sch; 838 return sch;
834 } 839 }
835err_out3: 840err_out3:
836 qdisc_put_stab(sch->stab);
837 dev_put(dev); 841 dev_put(dev);
838 kfree((char *) sch - sch->padded); 842 kfree((char *) sch - sch->padded);
839err_out2: 843err_out2:
@@ -847,6 +851,7 @@ err_out4:
847 * Any broken qdiscs that would require a ops->reset() here? 851 * Any broken qdiscs that would require a ops->reset() here?
848 * The qdisc was never in action so it shouldn't be necessary. 852 * The qdisc was never in action so it shouldn't be necessary.
849 */ 853 */
854 qdisc_put_stab(sch->stab);
850 if (ops->destroy) 855 if (ops->destroy)
851 ops->destroy(sch); 856 ops->destroy(sch);
852 goto err_out3; 857 goto err_out3;
@@ -1111,12 +1116,16 @@ create_n_graft:
1111 tcm->tcm_parent, tcm->tcm_parent, 1116 tcm->tcm_parent, tcm->tcm_parent,
1112 tca, &err); 1117 tca, &err);
1113 else { 1118 else {
1114 unsigned int ntx = 0; 1119 struct netdev_queue *dev_queue;
1115 1120
1116 if (p && p->ops->cl_ops && p->ops->cl_ops->select_queue) 1121 if (p && p->ops->cl_ops && p->ops->cl_ops->select_queue)
1117 ntx = p->ops->cl_ops->select_queue(p, tcm); 1122 dev_queue = p->ops->cl_ops->select_queue(p, tcm);
1123 else if (p)
1124 dev_queue = p->dev_queue;
1125 else
1126 dev_queue = netdev_get_tx_queue(dev, 0);
1118 1127
1119 q = qdisc_create(dev, netdev_get_tx_queue(dev, ntx), p, 1128 q = qdisc_create(dev, dev_queue, p,
1120 tcm->tcm_parent, tcm->tcm_handle, 1129 tcm->tcm_parent, tcm->tcm_handle,
1121 tca, &err); 1130 tca, &err);
1122 } 1131 }
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
index 12b2fb04b29b..5a888af7e5da 100644
--- a/net/sched/sch_drr.c
+++ b/net/sched/sch_drr.c
@@ -274,8 +274,10 @@ static int drr_dump_class_stats(struct Qdisc *sch, unsigned long arg,
274 struct tc_drr_stats xstats; 274 struct tc_drr_stats xstats;
275 275
276 memset(&xstats, 0, sizeof(xstats)); 276 memset(&xstats, 0, sizeof(xstats));
277 if (cl->qdisc->q.qlen) 277 if (cl->qdisc->q.qlen) {
278 xstats.deficit = cl->deficit; 278 xstats.deficit = cl->deficit;
279 cl->qdisc->qstats.qlen = cl->qdisc->q.qlen;
280 }
279 281
280 if (gnet_stats_copy_basic(d, &cl->bstats) < 0 || 282 if (gnet_stats_copy_basic(d, &cl->bstats) < 0 ||
281 gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 || 283 gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
diff --git a/net/sched/sch_mq.c b/net/sched/sch_mq.c
index dd5ee022f1f7..d1dea3d5dc92 100644
--- a/net/sched/sch_mq.c
+++ b/net/sched/sch_mq.c
@@ -125,13 +125,18 @@ static struct netdev_queue *mq_queue_get(struct Qdisc *sch, unsigned long cl)
125 return netdev_get_tx_queue(dev, ntx); 125 return netdev_get_tx_queue(dev, ntx);
126} 126}
127 127
128static unsigned int mq_select_queue(struct Qdisc *sch, struct tcmsg *tcm) 128static struct netdev_queue *mq_select_queue(struct Qdisc *sch,
129 struct tcmsg *tcm)
129{ 130{
130 unsigned int ntx = TC_H_MIN(tcm->tcm_parent); 131 unsigned int ntx = TC_H_MIN(tcm->tcm_parent);
132 struct netdev_queue *dev_queue = mq_queue_get(sch, ntx);
131 133
132 if (!mq_queue_get(sch, ntx)) 134 if (!dev_queue) {
133 return 0; 135 struct net_device *dev = qdisc_dev(sch);
134 return ntx - 1; 136
137 return netdev_get_tx_queue(dev, 0);
138 }
139 return dev_queue;
135} 140}
136 141
137static int mq_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new, 142static int mq_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,
@@ -188,6 +193,7 @@ static int mq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
188 struct netdev_queue *dev_queue = mq_queue_get(sch, cl); 193 struct netdev_queue *dev_queue = mq_queue_get(sch, cl);
189 194
190 sch = dev_queue->qdisc_sleeping; 195 sch = dev_queue->qdisc_sleeping;
196 sch->qstats.qlen = sch->q.qlen;
191 if (gnet_stats_copy_basic(d, &sch->bstats) < 0 || 197 if (gnet_stats_copy_basic(d, &sch->bstats) < 0 ||
192 gnet_stats_copy_queue(d, &sch->qstats) < 0) 198 gnet_stats_copy_queue(d, &sch->qstats) < 0)
193 return -1; 199 return -1;
diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c
index 069f81c97277..7db2c88ce585 100644
--- a/net/sched/sch_multiq.c
+++ b/net/sched/sch_multiq.c
@@ -359,6 +359,7 @@ static int multiq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
359 struct Qdisc *cl_q; 359 struct Qdisc *cl_q;
360 360
361 cl_q = q->queues[cl - 1]; 361 cl_q = q->queues[cl - 1];
362 cl_q->qstats.qlen = cl_q->q.qlen;
362 if (gnet_stats_copy_basic(d, &cl_q->bstats) < 0 || 363 if (gnet_stats_copy_basic(d, &cl_q->bstats) < 0 ||
363 gnet_stats_copy_queue(d, &cl_q->qstats) < 0) 364 gnet_stats_copy_queue(d, &cl_q->qstats) < 0)
364 return -1; 365 return -1;
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index 0f73c412d04b..93285cecb246 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -322,6 +322,7 @@ static int prio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
322 struct Qdisc *cl_q; 322 struct Qdisc *cl_q;
323 323
324 cl_q = q->queues[cl - 1]; 324 cl_q = q->queues[cl - 1];
325 cl_q->qstats.qlen = cl_q->q.qlen;
325 if (gnet_stats_copy_basic(d, &cl_q->bstats) < 0 || 326 if (gnet_stats_copy_basic(d, &cl_q->bstats) < 0 ||
326 gnet_stats_copy_queue(d, &cl_q->qstats) < 0) 327 gnet_stats_copy_queue(d, &cl_q->qstats) < 0)
327 return -1; 328 return -1;