aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched
diff options
context:
space:
mode:
Diffstat (limited to 'net/sched')
-rw-r--r--net/sched/cls_cgroup.c6
-rw-r--r--net/sched/cls_flow.c8
-rw-r--r--net/sched/cls_route.c2
-rw-r--r--net/sched/em_meta.c8
-rw-r--r--net/sched/sch_api.c4
-rw-r--r--net/sched/sch_cbq.c4
-rw-r--r--net/sched/sch_generic.c40
-rw-r--r--net/sched/sch_hfsc.c8
-rw-r--r--net/sched/sch_sfq.c2
-rw-r--r--net/sched/sch_teql.c26
10 files changed, 60 insertions, 48 deletions
diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c
index e5becb92b3e7..e4877ca6727c 100644
--- a/net/sched/cls_cgroup.c
+++ b/net/sched/cls_cgroup.c
@@ -62,13 +62,7 @@ static u64 read_classid(struct cgroup *cgrp, struct cftype *cft)
62 62
63static int write_classid(struct cgroup *cgrp, struct cftype *cft, u64 value) 63static int write_classid(struct cgroup *cgrp, struct cftype *cft, u64 value)
64{ 64{
65 if (!cgroup_lock_live_group(cgrp))
66 return -ENODEV;
67
68 cgrp_cls_state(cgrp)->classid = (u32) value; 65 cgrp_cls_state(cgrp)->classid = (u32) value;
69
70 cgroup_unlock();
71
72 return 0; 66 return 0;
73} 67}
74 68
diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c
index 0ef4e3065bcd..9402a7fd3785 100644
--- a/net/sched/cls_flow.c
+++ b/net/sched/cls_flow.c
@@ -84,7 +84,7 @@ static u32 flow_get_dst(const struct sk_buff *skb)
84 case htons(ETH_P_IPV6): 84 case htons(ETH_P_IPV6):
85 return ntohl(ipv6_hdr(skb)->daddr.s6_addr32[3]); 85 return ntohl(ipv6_hdr(skb)->daddr.s6_addr32[3]);
86 default: 86 default:
87 return addr_fold(skb->dst) ^ (__force u16)skb->protocol; 87 return addr_fold(skb_dst(skb)) ^ (__force u16)skb->protocol;
88 } 88 }
89} 89}
90 90
@@ -163,7 +163,7 @@ static u32 flow_get_proto_dst(const struct sk_buff *skb)
163 break; 163 break;
164 } 164 }
165 default: 165 default:
166 res = addr_fold(skb->dst) ^ (__force u16)skb->protocol; 166 res = addr_fold(skb_dst(skb)) ^ (__force u16)skb->protocol;
167 } 167 }
168 168
169 return res; 169 return res;
@@ -251,8 +251,8 @@ fallback:
251static u32 flow_get_rtclassid(const struct sk_buff *skb) 251static u32 flow_get_rtclassid(const struct sk_buff *skb)
252{ 252{
253#ifdef CONFIG_NET_CLS_ROUTE 253#ifdef CONFIG_NET_CLS_ROUTE
254 if (skb->dst) 254 if (skb_dst(skb))
255 return skb->dst->tclassid; 255 return skb_dst(skb)->tclassid;
256#endif 256#endif
257 return 0; 257 return 0;
258} 258}
diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c
index bdf1f4172eef..dd872d5383ef 100644
--- a/net/sched/cls_route.c
+++ b/net/sched/cls_route.c
@@ -137,7 +137,7 @@ static int route4_classify(struct sk_buff *skb, struct tcf_proto *tp,
137 u32 id, h; 137 u32 id, h;
138 int iif, dont_cache = 0; 138 int iif, dont_cache = 0;
139 139
140 if ((dst = skb->dst) == NULL) 140 if ((dst = skb_dst(skb)) == NULL)
141 goto failure; 141 goto failure;
142 142
143 id = dst->tclassid; 143 id = dst->tclassid;
diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c
index fad596bf32d7..266151ae85a3 100644
--- a/net/sched/em_meta.c
+++ b/net/sched/em_meta.c
@@ -246,11 +246,11 @@ META_COLLECTOR(int_tcindex)
246 246
247META_COLLECTOR(int_rtclassid) 247META_COLLECTOR(int_rtclassid)
248{ 248{
249 if (unlikely(skb->dst == NULL)) 249 if (unlikely(skb_dst(skb) == NULL))
250 *err = -1; 250 *err = -1;
251 else 251 else
252#ifdef CONFIG_NET_CLS_ROUTE 252#ifdef CONFIG_NET_CLS_ROUTE
253 dst->value = skb->dst->tclassid; 253 dst->value = skb_dst(skb)->tclassid;
254#else 254#else
255 dst->value = 0; 255 dst->value = 0;
256#endif 256#endif
@@ -258,10 +258,10 @@ META_COLLECTOR(int_rtclassid)
258 258
259META_COLLECTOR(int_rtiif) 259META_COLLECTOR(int_rtiif)
260{ 260{
261 if (unlikely(skb->rtable == NULL)) 261 if (unlikely(skb_rtable(skb) == NULL))
262 *err = -1; 262 *err = -1;
263 else 263 else
264 dst->value = skb->rtable->fl.iif; 264 dst->value = skb_rtable(skb)->fl.iif;
265} 265}
266 266
267/************************************************************************** 267/**************************************************************************
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 32009793307b..24d17ce9c294 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -484,7 +484,7 @@ void qdisc_watchdog_schedule(struct qdisc_watchdog *wd, psched_time_t expires)
484 484
485 wd->qdisc->flags |= TCQ_F_THROTTLED; 485 wd->qdisc->flags |= TCQ_F_THROTTLED;
486 time = ktime_set(0, 0); 486 time = ktime_set(0, 0);
487 time = ktime_add_ns(time, PSCHED_US2NS(expires)); 487 time = ktime_add_ns(time, PSCHED_TICKS2NS(expires));
488 hrtimer_start(&wd->timer, time, HRTIMER_MODE_ABS); 488 hrtimer_start(&wd->timer, time, HRTIMER_MODE_ABS);
489} 489}
490EXPORT_SYMBOL(qdisc_watchdog_schedule); 490EXPORT_SYMBOL(qdisc_watchdog_schedule);
@@ -1680,7 +1680,7 @@ static int psched_show(struct seq_file *seq, void *v)
1680 1680
1681 hrtimer_get_res(CLOCK_MONOTONIC, &ts); 1681 hrtimer_get_res(CLOCK_MONOTONIC, &ts);
1682 seq_printf(seq, "%08x %08x %08x %08x\n", 1682 seq_printf(seq, "%08x %08x %08x %08x\n",
1683 (u32)NSEC_PER_USEC, (u32)PSCHED_US2NS(1), 1683 (u32)NSEC_PER_USEC, (u32)PSCHED_TICKS2NS(1),
1684 1000000, 1684 1000000,
1685 (u32)NSEC_PER_SEC/(u32)ktime_to_ns(timespec_to_ktime(ts))); 1685 (u32)NSEC_PER_SEC/(u32)ktime_to_ns(timespec_to_ktime(ts)));
1686 1686
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index d728d8111732..23a167670fd5 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -509,7 +509,7 @@ static void cbq_ovl_delay(struct cbq_class *cl)
509 q->pmask |= (1<<TC_CBQ_MAXPRIO); 509 q->pmask |= (1<<TC_CBQ_MAXPRIO);
510 510
511 expires = ktime_set(0, 0); 511 expires = ktime_set(0, 0);
512 expires = ktime_add_ns(expires, PSCHED_US2NS(sched)); 512 expires = ktime_add_ns(expires, PSCHED_TICKS2NS(sched));
513 if (hrtimer_try_to_cancel(&q->delay_timer) && 513 if (hrtimer_try_to_cancel(&q->delay_timer) &&
514 ktime_to_ns(ktime_sub( 514 ktime_to_ns(ktime_sub(
515 hrtimer_get_expires(&q->delay_timer), 515 hrtimer_get_expires(&q->delay_timer),
@@ -620,7 +620,7 @@ static enum hrtimer_restart cbq_undelay(struct hrtimer *timer)
620 ktime_t time; 620 ktime_t time;
621 621
622 time = ktime_set(0, 0); 622 time = ktime_set(0, 0);
623 time = ktime_add_ns(time, PSCHED_US2NS(now + delay)); 623 time = ktime_add_ns(time, PSCHED_TICKS2NS(now + delay));
624 hrtimer_start(&q->delay_timer, time, HRTIMER_MODE_ABS); 624 hrtimer_start(&q->delay_timer, time, HRTIMER_MODE_ABS);
625 } 625 }
626 626
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 5f5efe4e6072..27d03816ec3e 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -196,6 +196,21 @@ void __qdisc_run(struct Qdisc *q)
196 clear_bit(__QDISC_STATE_RUNNING, &q->state); 196 clear_bit(__QDISC_STATE_RUNNING, &q->state);
197} 197}
198 198
199unsigned long dev_trans_start(struct net_device *dev)
200{
201 unsigned long val, res = dev->trans_start;
202 unsigned int i;
203
204 for (i = 0; i < dev->num_tx_queues; i++) {
205 val = netdev_get_tx_queue(dev, i)->trans_start;
206 if (val && time_after(val, res))
207 res = val;
208 }
209 dev->trans_start = res;
210 return res;
211}
212EXPORT_SYMBOL(dev_trans_start);
213
199static void dev_watchdog(unsigned long arg) 214static void dev_watchdog(unsigned long arg)
200{ 215{
201 struct net_device *dev = (struct net_device *)arg; 216 struct net_device *dev = (struct net_device *)arg;
@@ -205,25 +220,30 @@ static void dev_watchdog(unsigned long arg)
205 if (netif_device_present(dev) && 220 if (netif_device_present(dev) &&
206 netif_running(dev) && 221 netif_running(dev) &&
207 netif_carrier_ok(dev)) { 222 netif_carrier_ok(dev)) {
208 int some_queue_stopped = 0; 223 int some_queue_timedout = 0;
209 unsigned int i; 224 unsigned int i;
225 unsigned long trans_start;
210 226
211 for (i = 0; i < dev->num_tx_queues; i++) { 227 for (i = 0; i < dev->num_tx_queues; i++) {
212 struct netdev_queue *txq; 228 struct netdev_queue *txq;
213 229
214 txq = netdev_get_tx_queue(dev, i); 230 txq = netdev_get_tx_queue(dev, i);
215 if (netif_tx_queue_stopped(txq)) { 231 /*
216 some_queue_stopped = 1; 232 * old device drivers set dev->trans_start
233 */
234 trans_start = txq->trans_start ? : dev->trans_start;
235 if (netif_tx_queue_stopped(txq) &&
236 time_after(jiffies, (trans_start +
237 dev->watchdog_timeo))) {
238 some_queue_timedout = 1;
217 break; 239 break;
218 } 240 }
219 } 241 }
220 242
221 if (some_queue_stopped && 243 if (some_queue_timedout) {
222 time_after(jiffies, (dev->trans_start +
223 dev->watchdog_timeo))) {
224 char drivername[64]; 244 char drivername[64];
225 WARN_ONCE(1, KERN_INFO "NETDEV WATCHDOG: %s (%s): transmit timed out\n", 245 WARN_ONCE(1, KERN_INFO "NETDEV WATCHDOG: %s (%s): transmit queue %u timed out\n",
226 dev->name, netdev_drivername(dev, drivername, 64)); 246 dev->name, netdev_drivername(dev, drivername, 64), i);
227 dev->netdev_ops->ndo_tx_timeout(dev); 247 dev->netdev_ops->ndo_tx_timeout(dev);
228 } 248 }
229 if (!mod_timer(&dev->watchdog_timer, 249 if (!mod_timer(&dev->watchdog_timer,
@@ -602,8 +622,10 @@ static void transition_one_qdisc(struct net_device *dev,
602 clear_bit(__QDISC_STATE_DEACTIVATED, &new_qdisc->state); 622 clear_bit(__QDISC_STATE_DEACTIVATED, &new_qdisc->state);
603 623
604 rcu_assign_pointer(dev_queue->qdisc, new_qdisc); 624 rcu_assign_pointer(dev_queue->qdisc, new_qdisc);
605 if (need_watchdog_p && new_qdisc != &noqueue_qdisc) 625 if (need_watchdog_p && new_qdisc != &noqueue_qdisc) {
626 dev_queue->trans_start = 0;
606 *need_watchdog_p = 1; 627 *need_watchdog_p = 1;
628 }
607} 629}
608 630
609void dev_activate(struct net_device *dev) 631void dev_activate(struct net_device *dev)
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 5022f9c1f34b..362c2811b2df 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -372,7 +372,7 @@ cftree_update(struct hfsc_class *cl)
372 * ism: (psched_us/byte) << ISM_SHIFT 372 * ism: (psched_us/byte) << ISM_SHIFT
373 * dx: psched_us 373 * dx: psched_us
374 * 374 *
375 * The clock source resolution with ktime is 1.024us. 375 * The clock source resolution with ktime and PSCHED_SHIFT 10 is 1.024us.
376 * 376 *
377 * sm and ism are scaled in order to keep effective digits. 377 * sm and ism are scaled in order to keep effective digits.
378 * SM_SHIFT and ISM_SHIFT are selected to keep at least 4 effective 378 * SM_SHIFT and ISM_SHIFT are selected to keep at least 4 effective
@@ -383,9 +383,11 @@ cftree_update(struct hfsc_class *cl)
383 * bytes/1.024us 12.8e-3 128e-3 1280e-3 12800e-3 128000e-3 383 * bytes/1.024us 12.8e-3 128e-3 1280e-3 12800e-3 128000e-3
384 * 384 *
385 * 1.024us/byte 78.125 7.8125 0.78125 0.078125 0.0078125 385 * 1.024us/byte 78.125 7.8125 0.78125 0.078125 0.0078125
386 *
387 * So, for PSCHED_SHIFT 10 we need: SM_SHIFT 20, ISM_SHIFT 18.
386 */ 388 */
387#define SM_SHIFT 20 389#define SM_SHIFT (30 - PSCHED_SHIFT)
388#define ISM_SHIFT 18 390#define ISM_SHIFT (8 + PSCHED_SHIFT)
389 391
390#define SM_MASK ((1ULL << SM_SHIFT) - 1) 392#define SM_MASK ((1ULL << SM_SHIFT) - 1)
391#define ISM_MASK ((1ULL << ISM_SHIFT) - 1) 393#define ISM_MASK ((1ULL << ISM_SHIFT) - 1)
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 33133d27b539..8706920a6d45 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -149,7 +149,7 @@ static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
149 break; 149 break;
150 } 150 }
151 default: 151 default:
152 h = (unsigned long)skb->dst ^ skb->protocol; 152 h = (unsigned long)skb_dst(skb) ^ skb->protocol;
153 h2 = (unsigned long)skb->sk; 153 h2 = (unsigned long)skb->sk;
154 } 154 }
155 155
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index 3b6418297231..9c002b6e0533 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -58,7 +58,6 @@ struct teql_master
58 struct net_device *dev; 58 struct net_device *dev;
59 struct Qdisc *slaves; 59 struct Qdisc *slaves;
60 struct list_head master_list; 60 struct list_head master_list;
61 struct net_device_stats stats;
62}; 61};
63 62
64struct teql_sched_data 63struct teql_sched_data
@@ -223,7 +222,7 @@ __teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device *
223{ 222{
224 struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, 0); 223 struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, 0);
225 struct teql_sched_data *q = qdisc_priv(dev_queue->qdisc); 224 struct teql_sched_data *q = qdisc_priv(dev_queue->qdisc);
226 struct neighbour *mn = skb->dst->neighbour; 225 struct neighbour *mn = skb_dst(skb)->neighbour;
227 struct neighbour *n = q->ncache; 226 struct neighbour *n = q->ncache;
228 227
229 if (mn->tbl == NULL) 228 if (mn->tbl == NULL)
@@ -263,8 +262,8 @@ static inline int teql_resolve(struct sk_buff *skb,
263 return -ENODEV; 262 return -ENODEV;
264 263
265 if (dev->header_ops == NULL || 264 if (dev->header_ops == NULL ||
266 skb->dst == NULL || 265 skb_dst(skb) == NULL ||
267 skb->dst->neighbour == NULL) 266 skb_dst(skb)->neighbour == NULL)
268 return 0; 267 return 0;
269 return __teql_resolve(skb, skb_res, dev); 268 return __teql_resolve(skb, skb_res, dev);
270} 269}
@@ -272,6 +271,7 @@ static inline int teql_resolve(struct sk_buff *skb,
272static int teql_master_xmit(struct sk_buff *skb, struct net_device *dev) 271static int teql_master_xmit(struct sk_buff *skb, struct net_device *dev)
273{ 272{
274 struct teql_master *master = netdev_priv(dev); 273 struct teql_master *master = netdev_priv(dev);
274 struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
275 struct Qdisc *start, *q; 275 struct Qdisc *start, *q;
276 int busy; 276 int busy;
277 int nores; 277 int nores;
@@ -308,11 +308,12 @@ restart:
308 if (!netif_tx_queue_stopped(slave_txq) && 308 if (!netif_tx_queue_stopped(slave_txq) &&
309 !netif_tx_queue_frozen(slave_txq) && 309 !netif_tx_queue_frozen(slave_txq) &&
310 slave_ops->ndo_start_xmit(skb, slave) == 0) { 310 slave_ops->ndo_start_xmit(skb, slave) == 0) {
311 txq_trans_update(slave_txq);
311 __netif_tx_unlock(slave_txq); 312 __netif_tx_unlock(slave_txq);
312 master->slaves = NEXT_SLAVE(q); 313 master->slaves = NEXT_SLAVE(q);
313 netif_wake_queue(dev); 314 netif_wake_queue(dev);
314 master->stats.tx_packets++; 315 txq->tx_packets++;
315 master->stats.tx_bytes += length; 316 txq->tx_bytes += length;
316 return 0; 317 return 0;
317 } 318 }
318 __netif_tx_unlock(slave_txq); 319 __netif_tx_unlock(slave_txq);
@@ -337,12 +338,12 @@ restart:
337 338
338 if (busy) { 339 if (busy) {
339 netif_stop_queue(dev); 340 netif_stop_queue(dev);
340 return 1; 341 return NETDEV_TX_BUSY;
341 } 342 }
342 master->stats.tx_errors++; 343 dev->stats.tx_errors++;
343 344
344drop: 345drop:
345 master->stats.tx_dropped++; 346 txq->tx_dropped++;
346 dev_kfree_skb(skb); 347 dev_kfree_skb(skb);
347 return 0; 348 return 0;
348} 349}
@@ -395,12 +396,6 @@ static int teql_master_close(struct net_device *dev)
395 return 0; 396 return 0;
396} 397}
397 398
398static struct net_device_stats *teql_master_stats(struct net_device *dev)
399{
400 struct teql_master *m = netdev_priv(dev);
401 return &m->stats;
402}
403
404static int teql_master_mtu(struct net_device *dev, int new_mtu) 399static int teql_master_mtu(struct net_device *dev, int new_mtu)
405{ 400{
406 struct teql_master *m = netdev_priv(dev); 401 struct teql_master *m = netdev_priv(dev);
@@ -425,7 +420,6 @@ static const struct net_device_ops teql_netdev_ops = {
425 .ndo_open = teql_master_open, 420 .ndo_open = teql_master_open,
426 .ndo_stop = teql_master_close, 421 .ndo_stop = teql_master_close,
427 .ndo_start_xmit = teql_master_xmit, 422 .ndo_start_xmit = teql_master_xmit,
428 .ndo_get_stats = teql_master_stats,
429 .ndo_change_mtu = teql_master_mtu, 423 .ndo_change_mtu = teql_master_mtu,
430}; 424};
431 425