aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched/sch_multiq.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-12-28 15:49:40 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2008-12-28 15:49:40 -0500
commit0191b625ca5a46206d2fb862bb08f36f2fcb3b31 (patch)
tree454d1842b1833d976da62abcbd5c47521ebe9bd7 /net/sched/sch_multiq.c
parent54a696bd07c14d3b1192d03ce7269bc59b45209a (diff)
parenteb56092fc168bf5af199d47af50c0d84a96db898 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6: (1429 commits) net: Allow dependancies of FDDI & Tokenring to be modular. igb: Fix build warning when DCA is disabled. net: Fix warning fallout from recent NAPI interface changes. gro: Fix potential use after free sfc: If AN is enabled, always read speed/duplex from the AN advertising bits sfc: When disabling the NIC, close the device rather than unregistering it sfc: SFT9001: Add cable diagnostics sfc: Add support for multiple PHY self-tests sfc: Merge top-level functions for self-tests sfc: Clean up PHY mode management in loopback self-test sfc: Fix unreliable link detection in some loopback modes sfc: Generate unique names for per-NIC workqueues 802.3ad: use standard ethhdr instead of ad_header 802.3ad: generalize out mac address initializer 802.3ad: initialize ports LACPDU from const initializer 802.3ad: remove typedef around ad_system 802.3ad: turn ports is_individual into a bool 802.3ad: turn ports is_enabled into a bool 802.3ad: make ntt bool ixgbe: Fix set_ringparam in ixgbe to use the same memory pools. ... Fixed trivial IPv4/6 address printing conflicts in fs/cifs/connect.c due to the conversion to %pI (in this networking merge) and the addition of doing IPv6 addresses (from the earlier merge of CIFS).
Diffstat (limited to 'net/sched/sch_multiq.c')
-rw-r--r--net/sched/sch_multiq.c82
1 files changed, 39 insertions, 43 deletions
diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c
index 915f3149dde2..7e151861794b 100644
--- a/net/sched/sch_multiq.c
+++ b/net/sched/sch_multiq.c
@@ -92,40 +92,6 @@ multiq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
92 return ret; 92 return ret;
93} 93}
94 94
95
96static int
97multiq_requeue(struct sk_buff *skb, struct Qdisc *sch)
98{
99 struct Qdisc *qdisc;
100 struct multiq_sched_data *q = qdisc_priv(sch);
101 int ret;
102
103 qdisc = multiq_classify(skb, sch, &ret);
104#ifdef CONFIG_NET_CLS_ACT
105 if (qdisc == NULL) {
106 if (ret & __NET_XMIT_BYPASS)
107 sch->qstats.drops++;
108 kfree_skb(skb);
109 return ret;
110 }
111#endif
112
113 ret = qdisc->ops->requeue(skb, qdisc);
114 if (ret == NET_XMIT_SUCCESS) {
115 sch->q.qlen++;
116 sch->qstats.requeues++;
117 if (q->curband)
118 q->curband--;
119 else
120 q->curband = q->bands - 1;
121 return NET_XMIT_SUCCESS;
122 }
123 if (net_xmit_drop_count(ret))
124 sch->qstats.drops++;
125 return ret;
126}
127
128
129static struct sk_buff *multiq_dequeue(struct Qdisc *sch) 95static struct sk_buff *multiq_dequeue(struct Qdisc *sch)
130{ 96{
131 struct multiq_sched_data *q = qdisc_priv(sch); 97 struct multiq_sched_data *q = qdisc_priv(sch);
@@ -140,7 +106,7 @@ static struct sk_buff *multiq_dequeue(struct Qdisc *sch)
140 q->curband = 0; 106 q->curband = 0;
141 107
142 /* Check that target subqueue is available before 108 /* Check that target subqueue is available before
143 * pulling an skb to avoid excessive requeues 109 * pulling an skb to avoid head-of-line blocking.
144 */ 110 */
145 if (!__netif_subqueue_stopped(qdisc_dev(sch), q->curband)) { 111 if (!__netif_subqueue_stopped(qdisc_dev(sch), q->curband)) {
146 qdisc = q->queues[q->curband]; 112 qdisc = q->queues[q->curband];
@@ -155,6 +121,34 @@ static struct sk_buff *multiq_dequeue(struct Qdisc *sch)
155 121
156} 122}
157 123
124static struct sk_buff *multiq_peek(struct Qdisc *sch)
125{
126 struct multiq_sched_data *q = qdisc_priv(sch);
127 unsigned int curband = q->curband;
128 struct Qdisc *qdisc;
129 struct sk_buff *skb;
130 int band;
131
132 for (band = 0; band < q->bands; band++) {
133 /* cycle through bands to ensure fairness */
134 curband++;
135 if (curband >= q->bands)
136 curband = 0;
137
138 /* Check that target subqueue is available before
139 * pulling an skb to avoid head-of-line blocking.
140 */
141 if (!__netif_subqueue_stopped(qdisc_dev(sch), curband)) {
142 qdisc = q->queues[curband];
143 skb = qdisc->ops->peek(qdisc);
144 if (skb)
145 return skb;
146 }
147 }
148 return NULL;
149
150}
151
158static unsigned int multiq_drop(struct Qdisc *sch) 152static unsigned int multiq_drop(struct Qdisc *sch)
159{ 153{
160 struct multiq_sched_data *q = qdisc_priv(sch); 154 struct multiq_sched_data *q = qdisc_priv(sch);
@@ -220,7 +214,8 @@ static int multiq_tune(struct Qdisc *sch, struct nlattr *opt)
220 q->bands = qopt->bands; 214 q->bands = qopt->bands;
221 for (i = q->bands; i < q->max_bands; i++) { 215 for (i = q->bands; i < q->max_bands; i++) {
222 if (q->queues[i] != &noop_qdisc) { 216 if (q->queues[i] != &noop_qdisc) {
223 struct Qdisc *child = xchg(&q->queues[i], &noop_qdisc); 217 struct Qdisc *child = q->queues[i];
218 q->queues[i] = &noop_qdisc;
224 qdisc_tree_decrease_qlen(child, child->q.qlen); 219 qdisc_tree_decrease_qlen(child, child->q.qlen);
225 qdisc_destroy(child); 220 qdisc_destroy(child);
226 } 221 }
@@ -230,7 +225,7 @@ static int multiq_tune(struct Qdisc *sch, struct nlattr *opt)
230 225
231 for (i = 0; i < q->bands; i++) { 226 for (i = 0; i < q->bands; i++) {
232 if (q->queues[i] == &noop_qdisc) { 227 if (q->queues[i] == &noop_qdisc) {
233 struct Qdisc *child; 228 struct Qdisc *child, *old;
234 child = qdisc_create_dflt(qdisc_dev(sch), 229 child = qdisc_create_dflt(qdisc_dev(sch),
235 sch->dev_queue, 230 sch->dev_queue,
236 &pfifo_qdisc_ops, 231 &pfifo_qdisc_ops,
@@ -238,12 +233,13 @@ static int multiq_tune(struct Qdisc *sch, struct nlattr *opt)
238 i + 1)); 233 i + 1));
239 if (child) { 234 if (child) {
240 sch_tree_lock(sch); 235 sch_tree_lock(sch);
241 child = xchg(&q->queues[i], child); 236 old = q->queues[i];
237 q->queues[i] = child;
242 238
243 if (child != &noop_qdisc) { 239 if (old != &noop_qdisc) {
244 qdisc_tree_decrease_qlen(child, 240 qdisc_tree_decrease_qlen(old,
245 child->q.qlen); 241 old->q.qlen);
246 qdisc_destroy(child); 242 qdisc_destroy(old);
247 } 243 }
248 sch_tree_unlock(sch); 244 sch_tree_unlock(sch);
249 } 245 }
@@ -451,7 +447,7 @@ static struct Qdisc_ops multiq_qdisc_ops __read_mostly = {
451 .priv_size = sizeof(struct multiq_sched_data), 447 .priv_size = sizeof(struct multiq_sched_data),
452 .enqueue = multiq_enqueue, 448 .enqueue = multiq_enqueue,
453 .dequeue = multiq_dequeue, 449 .dequeue = multiq_dequeue,
454 .requeue = multiq_requeue, 450 .peek = multiq_peek,
455 .drop = multiq_drop, 451 .drop = multiq_drop,
456 .init = multiq_init, 452 .init = multiq_init,
457 .reset = multiq_reset, 453 .reset = multiq_reset,