aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched/sch_teql.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/sched/sch_teql.c')
-rw-r--r--net/sched/sch_teql.c77
1 files changed, 46 insertions, 31 deletions
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index feaabc103ce6..45cd30098e34 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -53,38 +53,38 @@
53 which will not break load balancing, though native slave 53 which will not break load balancing, though native slave
54 traffic will have the highest priority. */ 54 traffic will have the highest priority. */
55 55
56struct teql_master 56struct teql_master {
57{
58 struct Qdisc_ops qops; 57 struct Qdisc_ops qops;
59 struct net_device *dev; 58 struct net_device *dev;
60 struct Qdisc *slaves; 59 struct Qdisc *slaves;
61 struct list_head master_list; 60 struct list_head master_list;
61 unsigned long tx_bytes;
62 unsigned long tx_packets;
63 unsigned long tx_errors;
64 unsigned long tx_dropped;
62}; 65};
63 66
64struct teql_sched_data 67struct teql_sched_data {
65{
66 struct Qdisc *next; 68 struct Qdisc *next;
67 struct teql_master *m; 69 struct teql_master *m;
68 struct neighbour *ncache; 70 struct neighbour *ncache;
69 struct sk_buff_head q; 71 struct sk_buff_head q;
70}; 72};
71 73
72#define NEXT_SLAVE(q) (((struct teql_sched_data*)qdisc_priv(q))->next) 74#define NEXT_SLAVE(q) (((struct teql_sched_data *)qdisc_priv(q))->next)
73 75
74#define FMASK (IFF_BROADCAST|IFF_POINTOPOINT) 76#define FMASK (IFF_BROADCAST | IFF_POINTOPOINT)
75 77
76/* "teql*" qdisc routines */ 78/* "teql*" qdisc routines */
77 79
78static int 80static int
79teql_enqueue(struct sk_buff *skb, struct Qdisc* sch) 81teql_enqueue(struct sk_buff *skb, struct Qdisc *sch)
80{ 82{
81 struct net_device *dev = qdisc_dev(sch); 83 struct net_device *dev = qdisc_dev(sch);
82 struct teql_sched_data *q = qdisc_priv(sch); 84 struct teql_sched_data *q = qdisc_priv(sch);
83 85
84 if (q->q.qlen < dev->tx_queue_len) { 86 if (q->q.qlen < dev->tx_queue_len) {
85 __skb_queue_tail(&q->q, skb); 87 __skb_queue_tail(&q->q, skb);
86 sch->bstats.bytes += qdisc_pkt_len(skb);
87 sch->bstats.packets++;
88 return NET_XMIT_SUCCESS; 88 return NET_XMIT_SUCCESS;
89 } 89 }
90 90
@@ -94,7 +94,7 @@ teql_enqueue(struct sk_buff *skb, struct Qdisc* sch)
94} 94}
95 95
96static struct sk_buff * 96static struct sk_buff *
97teql_dequeue(struct Qdisc* sch) 97teql_dequeue(struct Qdisc *sch)
98{ 98{
99 struct teql_sched_data *dat = qdisc_priv(sch); 99 struct teql_sched_data *dat = qdisc_priv(sch);
100 struct netdev_queue *dat_queue; 100 struct netdev_queue *dat_queue;
@@ -108,19 +108,21 @@ teql_dequeue(struct Qdisc* sch)
108 dat->m->slaves = sch; 108 dat->m->slaves = sch;
109 netif_wake_queue(m); 109 netif_wake_queue(m);
110 } 110 }
111 } else {
112 qdisc_bstats_update(sch, skb);
111 } 113 }
112 sch->q.qlen = dat->q.qlen + dat_queue->qdisc->q.qlen; 114 sch->q.qlen = dat->q.qlen + dat_queue->qdisc->q.qlen;
113 return skb; 115 return skb;
114} 116}
115 117
116static struct sk_buff * 118static struct sk_buff *
117teql_peek(struct Qdisc* sch) 119teql_peek(struct Qdisc *sch)
118{ 120{
119 /* teql is meant to be used as root qdisc */ 121 /* teql is meant to be used as root qdisc */
120 return NULL; 122 return NULL;
121} 123}
122 124
123static __inline__ void 125static inline void
124teql_neigh_release(struct neighbour *n) 126teql_neigh_release(struct neighbour *n)
125{ 127{
126 if (n) 128 if (n)
@@ -128,7 +130,7 @@ teql_neigh_release(struct neighbour *n)
128} 130}
129 131
130static void 132static void
131teql_reset(struct Qdisc* sch) 133teql_reset(struct Qdisc *sch)
132{ 134{
133 struct teql_sched_data *dat = qdisc_priv(sch); 135 struct teql_sched_data *dat = qdisc_priv(sch);
134 136
@@ -138,13 +140,14 @@ teql_reset(struct Qdisc* sch)
138} 140}
139 141
140static void 142static void
141teql_destroy(struct Qdisc* sch) 143teql_destroy(struct Qdisc *sch)
142{ 144{
143 struct Qdisc *q, *prev; 145 struct Qdisc *q, *prev;
144 struct teql_sched_data *dat = qdisc_priv(sch); 146 struct teql_sched_data *dat = qdisc_priv(sch);
145 struct teql_master *master = dat->m; 147 struct teql_master *master = dat->m;
146 148
147 if ((prev = master->slaves) != NULL) { 149 prev = master->slaves;
150 if (prev) {
148 do { 151 do {
149 q = NEXT_SLAVE(prev); 152 q = NEXT_SLAVE(prev);
150 if (q == sch) { 153 if (q == sch) {
@@ -176,7 +179,7 @@ teql_destroy(struct Qdisc* sch)
176static int teql_qdisc_init(struct Qdisc *sch, struct nlattr *opt) 179static int teql_qdisc_init(struct Qdisc *sch, struct nlattr *opt)
177{ 180{
178 struct net_device *dev = qdisc_dev(sch); 181 struct net_device *dev = qdisc_dev(sch);
179 struct teql_master *m = (struct teql_master*)sch->ops; 182 struct teql_master *m = (struct teql_master *)sch->ops;
180 struct teql_sched_data *q = qdisc_priv(sch); 183 struct teql_sched_data *q = qdisc_priv(sch);
181 184
182 if (dev->hard_header_len > m->dev->hard_header_len) 185 if (dev->hard_header_len > m->dev->hard_header_len)
@@ -241,11 +244,11 @@ __teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device *
241 } 244 }
242 if (neigh_event_send(n, skb_res) == 0) { 245 if (neigh_event_send(n, skb_res) == 0) {
243 int err; 246 int err;
247 char haddr[MAX_ADDR_LEN];
244 248
245 read_lock(&n->lock); 249 neigh_ha_snapshot(haddr, n, dev);
246 err = dev_hard_header(skb, dev, ntohs(skb->protocol), 250 err = dev_hard_header(skb, dev, ntohs(skb->protocol), haddr,
247 n->ha, NULL, skb->len); 251 NULL, skb->len);
248 read_unlock(&n->lock);
249 252
250 if (err < 0) { 253 if (err < 0) {
251 neigh_release(n); 254 neigh_release(n);
@@ -275,7 +278,6 @@ static inline int teql_resolve(struct sk_buff *skb,
275static netdev_tx_t teql_master_xmit(struct sk_buff *skb, struct net_device *dev) 278static netdev_tx_t teql_master_xmit(struct sk_buff *skb, struct net_device *dev)
276{ 279{
277 struct teql_master *master = netdev_priv(dev); 280 struct teql_master *master = netdev_priv(dev);
278 struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
279 struct Qdisc *start, *q; 281 struct Qdisc *start, *q;
280 int busy; 282 int busy;
281 int nores; 283 int nores;
@@ -288,7 +290,8 @@ restart:
288 nores = 0; 290 nores = 0;
289 busy = 0; 291 busy = 0;
290 292
291 if ((q = start) == NULL) 293 q = start;
294 if (!q)
292 goto drop; 295 goto drop;
293 296
294 do { 297 do {
@@ -309,15 +312,14 @@ restart:
309 if (__netif_tx_trylock(slave_txq)) { 312 if (__netif_tx_trylock(slave_txq)) {
310 unsigned int length = qdisc_pkt_len(skb); 313 unsigned int length = qdisc_pkt_len(skb);
311 314
312 if (!netif_tx_queue_stopped(slave_txq) && 315 if (!netif_tx_queue_frozen_or_stopped(slave_txq) &&
313 !netif_tx_queue_frozen(slave_txq) &&
314 slave_ops->ndo_start_xmit(skb, slave) == NETDEV_TX_OK) { 316 slave_ops->ndo_start_xmit(skb, slave) == NETDEV_TX_OK) {
315 txq_trans_update(slave_txq); 317 txq_trans_update(slave_txq);
316 __netif_tx_unlock(slave_txq); 318 __netif_tx_unlock(slave_txq);
317 master->slaves = NEXT_SLAVE(q); 319 master->slaves = NEXT_SLAVE(q);
318 netif_wake_queue(dev); 320 netif_wake_queue(dev);
319 txq->tx_packets++; 321 master->tx_packets++;
320 txq->tx_bytes += length; 322 master->tx_bytes += length;
321 return NETDEV_TX_OK; 323 return NETDEV_TX_OK;
322 } 324 }
323 __netif_tx_unlock(slave_txq); 325 __netif_tx_unlock(slave_txq);
@@ -344,20 +346,20 @@ restart:
344 netif_stop_queue(dev); 346 netif_stop_queue(dev);
345 return NETDEV_TX_BUSY; 347 return NETDEV_TX_BUSY;
346 } 348 }
347 dev->stats.tx_errors++; 349 master->tx_errors++;
348 350
349drop: 351drop:
350 txq->tx_dropped++; 352 master->tx_dropped++;
351 dev_kfree_skb(skb); 353 dev_kfree_skb(skb);
352 return NETDEV_TX_OK; 354 return NETDEV_TX_OK;
353} 355}
354 356
355static int teql_master_open(struct net_device *dev) 357static int teql_master_open(struct net_device *dev)
356{ 358{
357 struct Qdisc * q; 359 struct Qdisc *q;
358 struct teql_master *m = netdev_priv(dev); 360 struct teql_master *m = netdev_priv(dev);
359 int mtu = 0xFFFE; 361 int mtu = 0xFFFE;
360 unsigned flags = IFF_NOARP|IFF_MULTICAST; 362 unsigned int flags = IFF_NOARP | IFF_MULTICAST;
361 363
362 if (m->slaves == NULL) 364 if (m->slaves == NULL)
363 return -EUNATCH; 365 return -EUNATCH;
@@ -400,6 +402,18 @@ static int teql_master_close(struct net_device *dev)
400 return 0; 402 return 0;
401} 403}
402 404
405static struct rtnl_link_stats64 *teql_master_stats64(struct net_device *dev,
406 struct rtnl_link_stats64 *stats)
407{
408 struct teql_master *m = netdev_priv(dev);
409
410 stats->tx_packets = m->tx_packets;
411 stats->tx_bytes = m->tx_bytes;
412 stats->tx_errors = m->tx_errors;
413 stats->tx_dropped = m->tx_dropped;
414 return stats;
415}
416
403static int teql_master_mtu(struct net_device *dev, int new_mtu) 417static int teql_master_mtu(struct net_device *dev, int new_mtu)
404{ 418{
405 struct teql_master *m = netdev_priv(dev); 419 struct teql_master *m = netdev_priv(dev);
@@ -413,7 +427,7 @@ static int teql_master_mtu(struct net_device *dev, int new_mtu)
413 do { 427 do {
414 if (new_mtu > qdisc_dev(q)->mtu) 428 if (new_mtu > qdisc_dev(q)->mtu)
415 return -EINVAL; 429 return -EINVAL;
416 } while ((q=NEXT_SLAVE(q)) != m->slaves); 430 } while ((q = NEXT_SLAVE(q)) != m->slaves);
417 } 431 }
418 432
419 dev->mtu = new_mtu; 433 dev->mtu = new_mtu;
@@ -424,6 +438,7 @@ static const struct net_device_ops teql_netdev_ops = {
424 .ndo_open = teql_master_open, 438 .ndo_open = teql_master_open,
425 .ndo_stop = teql_master_close, 439 .ndo_stop = teql_master_close,
426 .ndo_start_xmit = teql_master_xmit, 440 .ndo_start_xmit = teql_master_xmit,
441 .ndo_get_stats64 = teql_master_stats64,
427 .ndo_change_mtu = teql_master_mtu, 442 .ndo_change_mtu = teql_master_mtu,
428}; 443};
429 444