diff options
author | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-01-17 16:15:55 -0500 |
---|---|---|
committer | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-01-17 16:15:55 -0500 |
commit | 8dea78da5cee153b8af9c07a2745f6c55057fe12 (patch) | |
tree | a8f4d49d63b1ecc92f2fddceba0655b2472c5bd9 /net/sched/sch_generic.c | |
parent | 406089d01562f1e2bf9f089fd7637009ebaad589 (diff) |
Patched in Tegra support.
Diffstat (limited to 'net/sched/sch_generic.c')
-rw-r--r-- | net/sched/sch_generic.c | 61 |
1 files changed, 35 insertions, 26 deletions
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 5d81a447851..69fca279880 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c | |||
@@ -53,19 +53,20 @@ static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q) | |||
53 | static inline struct sk_buff *dequeue_skb(struct Qdisc *q) | 53 | static inline struct sk_buff *dequeue_skb(struct Qdisc *q) |
54 | { | 54 | { |
55 | struct sk_buff *skb = q->gso_skb; | 55 | struct sk_buff *skb = q->gso_skb; |
56 | const struct netdev_queue *txq = q->dev_queue; | ||
57 | 56 | ||
58 | if (unlikely(skb)) { | 57 | if (unlikely(skb)) { |
58 | struct net_device *dev = qdisc_dev(q); | ||
59 | struct netdev_queue *txq; | ||
60 | |||
59 | /* check the reason of requeuing without tx lock first */ | 61 | /* check the reason of requeuing without tx lock first */ |
60 | txq = netdev_get_tx_queue(txq->dev, skb_get_queue_mapping(skb)); | 62 | txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); |
61 | if (!netif_xmit_frozen_or_stopped(txq)) { | 63 | if (!netif_tx_queue_frozen_or_stopped(txq)) { |
62 | q->gso_skb = NULL; | 64 | q->gso_skb = NULL; |
63 | q->q.qlen--; | 65 | q->q.qlen--; |
64 | } else | 66 | } else |
65 | skb = NULL; | 67 | skb = NULL; |
66 | } else { | 68 | } else { |
67 | if (!(q->flags & TCQ_F_ONETXQUEUE) || !netif_xmit_frozen_or_stopped(txq)) | 69 | skb = q->dequeue(q); |
68 | skb = q->dequeue(q); | ||
69 | } | 70 | } |
70 | 71 | ||
71 | return skb; | 72 | return skb; |
@@ -85,8 +86,9 @@ static inline int handle_dev_cpu_collision(struct sk_buff *skb, | |||
85 | * deadloop is detected. Return OK to try the next skb. | 86 | * deadloop is detected. Return OK to try the next skb. |
86 | */ | 87 | */ |
87 | kfree_skb(skb); | 88 | kfree_skb(skb); |
88 | net_warn_ratelimited("Dead loop on netdevice %s, fix it urgently!\n", | 89 | if (net_ratelimit()) |
89 | dev_queue->dev->name); | 90 | pr_warning("Dead loop on netdevice %s, fix it urgently!\n", |
91 | dev_queue->dev->name); | ||
90 | ret = qdisc_qlen(q); | 92 | ret = qdisc_qlen(q); |
91 | } else { | 93 | } else { |
92 | /* | 94 | /* |
@@ -119,7 +121,7 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, | |||
119 | spin_unlock(root_lock); | 121 | spin_unlock(root_lock); |
120 | 122 | ||
121 | HARD_TX_LOCK(dev, txq, smp_processor_id()); | 123 | HARD_TX_LOCK(dev, txq, smp_processor_id()); |
122 | if (!netif_xmit_frozen_or_stopped(txq)) | 124 | if (!netif_tx_queue_frozen_or_stopped(txq)) |
123 | ret = dev_hard_start_xmit(skb, dev, txq); | 125 | ret = dev_hard_start_xmit(skb, dev, txq); |
124 | 126 | ||
125 | HARD_TX_UNLOCK(dev, txq); | 127 | HARD_TX_UNLOCK(dev, txq); |
@@ -134,14 +136,14 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, | |||
134 | ret = handle_dev_cpu_collision(skb, txq, q); | 136 | ret = handle_dev_cpu_collision(skb, txq, q); |
135 | } else { | 137 | } else { |
136 | /* Driver returned NETDEV_TX_BUSY - requeue skb */ | 138 | /* Driver returned NETDEV_TX_BUSY - requeue skb */ |
137 | if (unlikely(ret != NETDEV_TX_BUSY)) | 139 | if (unlikely (ret != NETDEV_TX_BUSY && net_ratelimit())) |
138 | net_warn_ratelimited("BUG %s code %d qlen %d\n", | 140 | pr_warning("BUG %s code %d qlen %d\n", |
139 | dev->name, ret, q->q.qlen); | 141 | dev->name, ret, q->q.qlen); |
140 | 142 | ||
141 | ret = dev_requeue_skb(skb, q); | 143 | ret = dev_requeue_skb(skb, q); |
142 | } | 144 | } |
143 | 145 | ||
144 | if (ret && netif_xmit_frozen_or_stopped(txq)) | 146 | if (ret && netif_tx_queue_frozen_or_stopped(txq)) |
145 | ret = 0; | 147 | ret = 0; |
146 | 148 | ||
147 | return ret; | 149 | return ret; |
@@ -240,11 +242,10 @@ static void dev_watchdog(unsigned long arg) | |||
240 | * old device drivers set dev->trans_start | 242 | * old device drivers set dev->trans_start |
241 | */ | 243 | */ |
242 | trans_start = txq->trans_start ? : dev->trans_start; | 244 | trans_start = txq->trans_start ? : dev->trans_start; |
243 | if (netif_xmit_stopped(txq) && | 245 | if (netif_tx_queue_stopped(txq) && |
244 | time_after(jiffies, (trans_start + | 246 | time_after(jiffies, (trans_start + |
245 | dev->watchdog_timeo))) { | 247 | dev->watchdog_timeo))) { |
246 | some_queue_timedout = 1; | 248 | some_queue_timedout = 1; |
247 | txq->trans_timeout++; | ||
248 | break; | 249 | break; |
249 | } | 250 | } |
250 | } | 251 | } |
@@ -323,6 +324,24 @@ void netif_carrier_off(struct net_device *dev) | |||
323 | } | 324 | } |
324 | EXPORT_SYMBOL(netif_carrier_off); | 325 | EXPORT_SYMBOL(netif_carrier_off); |
325 | 326 | ||
327 | /** | ||
328 | * netif_notify_peers - notify network peers about existence of @dev | ||
329 | * @dev: network device | ||
330 | * | ||
331 | * Generate traffic such that interested network peers are aware of | ||
332 | * @dev, such as by generating a gratuitous ARP. This may be used when | ||
333 | * a device wants to inform the rest of the network about some sort of | ||
334 | * reconfiguration such as a failover event or virtual machine | ||
335 | * migration. | ||
336 | */ | ||
337 | void netif_notify_peers(struct net_device *dev) | ||
338 | { | ||
339 | rtnl_lock(); | ||
340 | call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev); | ||
341 | rtnl_unlock(); | ||
342 | } | ||
343 | EXPORT_SYMBOL(netif_notify_peers); | ||
344 | |||
326 | /* "NOOP" scheduler: the best scheduler, recommended for all interfaces | 345 | /* "NOOP" scheduler: the best scheduler, recommended for all interfaces |
327 | under all circumstances. It is difficult to invent anything faster or | 346 | under all circumstances. It is difficult to invent anything faster or |
328 | cheaper. | 347 | cheaper. |
@@ -492,8 +511,7 @@ static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb) | |||
492 | struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS }; | 511 | struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS }; |
493 | 512 | ||
494 | memcpy(&opt.priomap, prio2band, TC_PRIO_MAX + 1); | 513 | memcpy(&opt.priomap, prio2band, TC_PRIO_MAX + 1); |
495 | if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt)) | 514 | NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); |
496 | goto nla_put_failure; | ||
497 | return skb->len; | 515 | return skb->len; |
498 | 516 | ||
499 | nla_put_failure: | 517 | nla_put_failure: |
@@ -526,8 +544,6 @@ struct Qdisc_ops pfifo_fast_ops __read_mostly = { | |||
526 | }; | 544 | }; |
527 | EXPORT_SYMBOL(pfifo_fast_ops); | 545 | EXPORT_SYMBOL(pfifo_fast_ops); |
528 | 546 | ||
529 | static struct lock_class_key qdisc_tx_busylock; | ||
530 | |||
531 | struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, | 547 | struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, |
532 | struct Qdisc_ops *ops) | 548 | struct Qdisc_ops *ops) |
533 | { | 549 | { |
@@ -535,7 +551,6 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, | |||
535 | struct Qdisc *sch; | 551 | struct Qdisc *sch; |
536 | unsigned int size = QDISC_ALIGN(sizeof(*sch)) + ops->priv_size; | 552 | unsigned int size = QDISC_ALIGN(sizeof(*sch)) + ops->priv_size; |
537 | int err = -ENOBUFS; | 553 | int err = -ENOBUFS; |
538 | struct net_device *dev = dev_queue->dev; | ||
539 | 554 | ||
540 | p = kzalloc_node(size, GFP_KERNEL, | 555 | p = kzalloc_node(size, GFP_KERNEL, |
541 | netdev_queue_numa_node_read(dev_queue)); | 556 | netdev_queue_numa_node_read(dev_queue)); |
@@ -555,16 +570,12 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, | |||
555 | } | 570 | } |
556 | INIT_LIST_HEAD(&sch->list); | 571 | INIT_LIST_HEAD(&sch->list); |
557 | skb_queue_head_init(&sch->q); | 572 | skb_queue_head_init(&sch->q); |
558 | |||
559 | spin_lock_init(&sch->busylock); | 573 | spin_lock_init(&sch->busylock); |
560 | lockdep_set_class(&sch->busylock, | ||
561 | dev->qdisc_tx_busylock ?: &qdisc_tx_busylock); | ||
562 | |||
563 | sch->ops = ops; | 574 | sch->ops = ops; |
564 | sch->enqueue = ops->enqueue; | 575 | sch->enqueue = ops->enqueue; |
565 | sch->dequeue = ops->dequeue; | 576 | sch->dequeue = ops->dequeue; |
566 | sch->dev_queue = dev_queue; | 577 | sch->dev_queue = dev_queue; |
567 | dev_hold(dev); | 578 | dev_hold(qdisc_dev(sch)); |
568 | atomic_set(&sch->refcnt, 1); | 579 | atomic_set(&sch->refcnt, 1); |
569 | 580 | ||
570 | return sch; | 581 | return sch; |
@@ -685,8 +696,6 @@ static void attach_one_default_qdisc(struct net_device *dev, | |||
685 | netdev_info(dev, "activation failed\n"); | 696 | netdev_info(dev, "activation failed\n"); |
686 | return; | 697 | return; |
687 | } | 698 | } |
688 | if (!netif_is_multiqueue(dev)) | ||
689 | qdisc->flags |= TCQ_F_ONETXQUEUE; | ||
690 | } | 699 | } |
691 | dev_queue->qdisc_sleeping = qdisc; | 700 | dev_queue->qdisc_sleeping = qdisc; |
692 | } | 701 | } |