aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched/sch_multiq.c
diff options
context:
space:
mode:
authorJarek Poplawski <jarkao2@gmail.com>2008-11-14 01:56:30 -0500
committerDavid S. Miller <davem@davemloft.net>2008-11-14 01:56:30 -0500
commitf30ab418a1d3c5a8b83493e7d70d6876a74aa0ce (patch)
tree271f0d093d2436b0d0ebdff151fc4f5b1fb15f21 /net/sched/sch_multiq.c
parent38a7ddffa4b79d7b1fbc9bf2fa82b21b72622858 (diff)
pkt_sched: Remove qdisc->ops->requeue() etc.
After implementing qdisc->ops->peek() and changing sch_netem into classless qdisc there are no more qdisc->ops->requeue() users. This patch removes this method with its wrappers (qdisc_requeue()), and also unused qdisc->requeue structure. There are a few minor fixes of warnings (htb_enqueue()) and comments btw. The idea to kill ->requeue() and a similar patch were first developed by David S. Miller. Signed-off-by: Jarek Poplawski <jarkao2@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched/sch_multiq.c')
-rw-r--r--net/sched/sch_multiq.c39
1 files changed, 2 insertions, 37 deletions
diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c
index 155648d23b7c..f645ac55a1a1 100644
--- a/net/sched/sch_multiq.c
+++ b/net/sched/sch_multiq.c
@@ -92,40 +92,6 @@ multiq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
92 return ret; 92 return ret;
93} 93}
94 94
95
96static int
97multiq_requeue(struct sk_buff *skb, struct Qdisc *sch)
98{
99 struct Qdisc *qdisc;
100 struct multiq_sched_data *q = qdisc_priv(sch);
101 int ret;
102
103 qdisc = multiq_classify(skb, sch, &ret);
104#ifdef CONFIG_NET_CLS_ACT
105 if (qdisc == NULL) {
106 if (ret & __NET_XMIT_BYPASS)
107 sch->qstats.drops++;
108 kfree_skb(skb);
109 return ret;
110 }
111#endif
112
113 ret = qdisc->ops->requeue(skb, qdisc);
114 if (ret == NET_XMIT_SUCCESS) {
115 sch->q.qlen++;
116 sch->qstats.requeues++;
117 if (q->curband)
118 q->curband--;
119 else
120 q->curband = q->bands - 1;
121 return NET_XMIT_SUCCESS;
122 }
123 if (net_xmit_drop_count(ret))
124 sch->qstats.drops++;
125 return ret;
126}
127
128
129static struct sk_buff *multiq_dequeue(struct Qdisc *sch) 95static struct sk_buff *multiq_dequeue(struct Qdisc *sch)
130{ 96{
131 struct multiq_sched_data *q = qdisc_priv(sch); 97 struct multiq_sched_data *q = qdisc_priv(sch);
@@ -140,7 +106,7 @@ static struct sk_buff *multiq_dequeue(struct Qdisc *sch)
140 q->curband = 0; 106 q->curband = 0;
141 107
142 /* Check that target subqueue is available before 108 /* Check that target subqueue is available before
143 * pulling an skb to avoid excessive requeues 109 * pulling an skb to avoid head-of-line blocking.
144 */ 110 */
145 if (!__netif_subqueue_stopped(qdisc_dev(sch), q->curband)) { 111 if (!__netif_subqueue_stopped(qdisc_dev(sch), q->curband)) {
146 qdisc = q->queues[q->curband]; 112 qdisc = q->queues[q->curband];
@@ -170,7 +136,7 @@ static struct sk_buff *multiq_peek(struct Qdisc *sch)
170 curband = 0; 136 curband = 0;
171 137
172 /* Check that target subqueue is available before 138 /* Check that target subqueue is available before
173 * pulling an skb to avoid excessive requeues 139 * pulling an skb to avoid head-of-line blocking.
174 */ 140 */
175 if (!__netif_subqueue_stopped(qdisc_dev(sch), curband)) { 141 if (!__netif_subqueue_stopped(qdisc_dev(sch), curband)) {
176 qdisc = q->queues[curband]; 142 qdisc = q->queues[curband];
@@ -480,7 +446,6 @@ static struct Qdisc_ops multiq_qdisc_ops __read_mostly = {
480 .enqueue = multiq_enqueue, 446 .enqueue = multiq_enqueue,
481 .dequeue = multiq_dequeue, 447 .dequeue = multiq_dequeue,
482 .peek = multiq_peek, 448 .peek = multiq_peek,
483 .requeue = multiq_requeue,
484 .drop = multiq_drop, 449 .drop = multiq_drop,
485 .init = multiq_init, 450 .init = multiq_init,
486 .reset = multiq_reset, 451 .reset = multiq_reset,