diff options
Diffstat (limited to 'drivers/net/sunqe.c')
-rw-r--r-- | drivers/net/sunqe.c | 105 |
1 files changed, 49 insertions, 56 deletions
diff --git a/drivers/net/sunqe.c b/drivers/net/sunqe.c index b5c2974fd625..ff23c6489efd 100644 --- a/drivers/net/sunqe.c +++ b/drivers/net/sunqe.c | |||
@@ -260,31 +260,31 @@ static int qe_is_bolixed(struct sunqe *qep, u32 qe_status) | |||
260 | 260 | ||
261 | if (qe_status & CREG_STAT_EDEFER) { | 261 | if (qe_status & CREG_STAT_EDEFER) { |
262 | printk(KERN_ERR "%s: Excessive transmit defers.\n", dev->name); | 262 | printk(KERN_ERR "%s: Excessive transmit defers.\n", dev->name); |
263 | qep->net_stats.tx_errors++; | 263 | dev->stats.tx_errors++; |
264 | } | 264 | } |
265 | 265 | ||
266 | if (qe_status & CREG_STAT_CLOSS) { | 266 | if (qe_status & CREG_STAT_CLOSS) { |
267 | printk(KERN_ERR "%s: Carrier lost, link down?\n", dev->name); | 267 | printk(KERN_ERR "%s: Carrier lost, link down?\n", dev->name); |
268 | qep->net_stats.tx_errors++; | 268 | dev->stats.tx_errors++; |
269 | qep->net_stats.tx_carrier_errors++; | 269 | dev->stats.tx_carrier_errors++; |
270 | } | 270 | } |
271 | 271 | ||
272 | if (qe_status & CREG_STAT_ERETRIES) { | 272 | if (qe_status & CREG_STAT_ERETRIES) { |
273 | printk(KERN_ERR "%s: Excessive transmit retries (more than 16).\n", dev->name); | 273 | printk(KERN_ERR "%s: Excessive transmit retries (more than 16).\n", dev->name); |
274 | qep->net_stats.tx_errors++; | 274 | dev->stats.tx_errors++; |
275 | mace_hwbug_workaround = 1; | 275 | mace_hwbug_workaround = 1; |
276 | } | 276 | } |
277 | 277 | ||
278 | if (qe_status & CREG_STAT_LCOLL) { | 278 | if (qe_status & CREG_STAT_LCOLL) { |
279 | printk(KERN_ERR "%s: Late transmit collision.\n", dev->name); | 279 | printk(KERN_ERR "%s: Late transmit collision.\n", dev->name); |
280 | qep->net_stats.tx_errors++; | 280 | dev->stats.tx_errors++; |
281 | qep->net_stats.collisions++; | 281 | dev->stats.collisions++; |
282 | mace_hwbug_workaround = 1; | 282 | mace_hwbug_workaround = 1; |
283 | } | 283 | } |
284 | 284 | ||
285 | if (qe_status & CREG_STAT_FUFLOW) { | 285 | if (qe_status & CREG_STAT_FUFLOW) { |
286 | printk(KERN_ERR "%s: Transmit fifo underflow, driver bug.\n", dev->name); | 286 | printk(KERN_ERR "%s: Transmit fifo underflow, driver bug.\n", dev->name); |
287 | qep->net_stats.tx_errors++; | 287 | dev->stats.tx_errors++; |
288 | mace_hwbug_workaround = 1; | 288 | mace_hwbug_workaround = 1; |
289 | } | 289 | } |
290 | 290 | ||
@@ -297,104 +297,104 @@ static int qe_is_bolixed(struct sunqe *qep, u32 qe_status) | |||
297 | } | 297 | } |
298 | 298 | ||
299 | if (qe_status & CREG_STAT_CCOFLOW) { | 299 | if (qe_status & CREG_STAT_CCOFLOW) { |
300 | qep->net_stats.tx_errors += 256; | 300 | dev->stats.tx_errors += 256; |
301 | qep->net_stats.collisions += 256; | 301 | dev->stats.collisions += 256; |
302 | } | 302 | } |
303 | 303 | ||
304 | if (qe_status & CREG_STAT_TXDERROR) { | 304 | if (qe_status & CREG_STAT_TXDERROR) { |
305 | printk(KERN_ERR "%s: Transmit descriptor is bogus, driver bug.\n", dev->name); | 305 | printk(KERN_ERR "%s: Transmit descriptor is bogus, driver bug.\n", dev->name); |
306 | qep->net_stats.tx_errors++; | 306 | dev->stats.tx_errors++; |
307 | qep->net_stats.tx_aborted_errors++; | 307 | dev->stats.tx_aborted_errors++; |
308 | mace_hwbug_workaround = 1; | 308 | mace_hwbug_workaround = 1; |
309 | } | 309 | } |
310 | 310 | ||
311 | if (qe_status & CREG_STAT_TXLERR) { | 311 | if (qe_status & CREG_STAT_TXLERR) { |
312 | printk(KERN_ERR "%s: Transmit late error.\n", dev->name); | 312 | printk(KERN_ERR "%s: Transmit late error.\n", dev->name); |
313 | qep->net_stats.tx_errors++; | 313 | dev->stats.tx_errors++; |
314 | mace_hwbug_workaround = 1; | 314 | mace_hwbug_workaround = 1; |
315 | } | 315 | } |
316 | 316 | ||
317 | if (qe_status & CREG_STAT_TXPERR) { | 317 | if (qe_status & CREG_STAT_TXPERR) { |
318 | printk(KERN_ERR "%s: Transmit DMA parity error.\n", dev->name); | 318 | printk(KERN_ERR "%s: Transmit DMA parity error.\n", dev->name); |
319 | qep->net_stats.tx_errors++; | 319 | dev->stats.tx_errors++; |
320 | qep->net_stats.tx_aborted_errors++; | 320 | dev->stats.tx_aborted_errors++; |
321 | mace_hwbug_workaround = 1; | 321 | mace_hwbug_workaround = 1; |
322 | } | 322 | } |
323 | 323 | ||
324 | if (qe_status & CREG_STAT_TXSERR) { | 324 | if (qe_status & CREG_STAT_TXSERR) { |
325 | printk(KERN_ERR "%s: Transmit DMA sbus error ack.\n", dev->name); | 325 | printk(KERN_ERR "%s: Transmit DMA sbus error ack.\n", dev->name); |
326 | qep->net_stats.tx_errors++; | 326 | dev->stats.tx_errors++; |
327 | qep->net_stats.tx_aborted_errors++; | 327 | dev->stats.tx_aborted_errors++; |
328 | mace_hwbug_workaround = 1; | 328 | mace_hwbug_workaround = 1; |
329 | } | 329 | } |
330 | 330 | ||
331 | if (qe_status & CREG_STAT_RCCOFLOW) { | 331 | if (qe_status & CREG_STAT_RCCOFLOW) { |
332 | qep->net_stats.rx_errors += 256; | 332 | dev->stats.rx_errors += 256; |
333 | qep->net_stats.collisions += 256; | 333 | dev->stats.collisions += 256; |
334 | } | 334 | } |
335 | 335 | ||
336 | if (qe_status & CREG_STAT_RUOFLOW) { | 336 | if (qe_status & CREG_STAT_RUOFLOW) { |
337 | qep->net_stats.rx_errors += 256; | 337 | dev->stats.rx_errors += 256; |
338 | qep->net_stats.rx_over_errors += 256; | 338 | dev->stats.rx_over_errors += 256; |
339 | } | 339 | } |
340 | 340 | ||
341 | if (qe_status & CREG_STAT_MCOFLOW) { | 341 | if (qe_status & CREG_STAT_MCOFLOW) { |
342 | qep->net_stats.rx_errors += 256; | 342 | dev->stats.rx_errors += 256; |
343 | qep->net_stats.rx_missed_errors += 256; | 343 | dev->stats.rx_missed_errors += 256; |
344 | } | 344 | } |
345 | 345 | ||
346 | if (qe_status & CREG_STAT_RXFOFLOW) { | 346 | if (qe_status & CREG_STAT_RXFOFLOW) { |
347 | printk(KERN_ERR "%s: Receive fifo overflow.\n", dev->name); | 347 | printk(KERN_ERR "%s: Receive fifo overflow.\n", dev->name); |
348 | qep->net_stats.rx_errors++; | 348 | dev->stats.rx_errors++; |
349 | qep->net_stats.rx_over_errors++; | 349 | dev->stats.rx_over_errors++; |
350 | } | 350 | } |
351 | 351 | ||
352 | if (qe_status & CREG_STAT_RLCOLL) { | 352 | if (qe_status & CREG_STAT_RLCOLL) { |
353 | printk(KERN_ERR "%s: Late receive collision.\n", dev->name); | 353 | printk(KERN_ERR "%s: Late receive collision.\n", dev->name); |
354 | qep->net_stats.rx_errors++; | 354 | dev->stats.rx_errors++; |
355 | qep->net_stats.collisions++; | 355 | dev->stats.collisions++; |
356 | } | 356 | } |
357 | 357 | ||
358 | if (qe_status & CREG_STAT_FCOFLOW) { | 358 | if (qe_status & CREG_STAT_FCOFLOW) { |
359 | qep->net_stats.rx_errors += 256; | 359 | dev->stats.rx_errors += 256; |
360 | qep->net_stats.rx_frame_errors += 256; | 360 | dev->stats.rx_frame_errors += 256; |
361 | } | 361 | } |
362 | 362 | ||
363 | if (qe_status & CREG_STAT_CECOFLOW) { | 363 | if (qe_status & CREG_STAT_CECOFLOW) { |
364 | qep->net_stats.rx_errors += 256; | 364 | dev->stats.rx_errors += 256; |
365 | qep->net_stats.rx_crc_errors += 256; | 365 | dev->stats.rx_crc_errors += 256; |
366 | } | 366 | } |
367 | 367 | ||
368 | if (qe_status & CREG_STAT_RXDROP) { | 368 | if (qe_status & CREG_STAT_RXDROP) { |
369 | printk(KERN_ERR "%s: Receive packet dropped.\n", dev->name); | 369 | printk(KERN_ERR "%s: Receive packet dropped.\n", dev->name); |
370 | qep->net_stats.rx_errors++; | 370 | dev->stats.rx_errors++; |
371 | qep->net_stats.rx_dropped++; | 371 | dev->stats.rx_dropped++; |
372 | qep->net_stats.rx_missed_errors++; | 372 | dev->stats.rx_missed_errors++; |
373 | } | 373 | } |
374 | 374 | ||
375 | if (qe_status & CREG_STAT_RXSMALL) { | 375 | if (qe_status & CREG_STAT_RXSMALL) { |
376 | printk(KERN_ERR "%s: Receive buffer too small, driver bug.\n", dev->name); | 376 | printk(KERN_ERR "%s: Receive buffer too small, driver bug.\n", dev->name); |
377 | qep->net_stats.rx_errors++; | 377 | dev->stats.rx_errors++; |
378 | qep->net_stats.rx_length_errors++; | 378 | dev->stats.rx_length_errors++; |
379 | } | 379 | } |
380 | 380 | ||
381 | if (qe_status & CREG_STAT_RXLERR) { | 381 | if (qe_status & CREG_STAT_RXLERR) { |
382 | printk(KERN_ERR "%s: Receive late error.\n", dev->name); | 382 | printk(KERN_ERR "%s: Receive late error.\n", dev->name); |
383 | qep->net_stats.rx_errors++; | 383 | dev->stats.rx_errors++; |
384 | mace_hwbug_workaround = 1; | 384 | mace_hwbug_workaround = 1; |
385 | } | 385 | } |
386 | 386 | ||
387 | if (qe_status & CREG_STAT_RXPERR) { | 387 | if (qe_status & CREG_STAT_RXPERR) { |
388 | printk(KERN_ERR "%s: Receive DMA parity error.\n", dev->name); | 388 | printk(KERN_ERR "%s: Receive DMA parity error.\n", dev->name); |
389 | qep->net_stats.rx_errors++; | 389 | dev->stats.rx_errors++; |
390 | qep->net_stats.rx_missed_errors++; | 390 | dev->stats.rx_missed_errors++; |
391 | mace_hwbug_workaround = 1; | 391 | mace_hwbug_workaround = 1; |
392 | } | 392 | } |
393 | 393 | ||
394 | if (qe_status & CREG_STAT_RXSERR) { | 394 | if (qe_status & CREG_STAT_RXSERR) { |
395 | printk(KERN_ERR "%s: Receive DMA sbus error ack.\n", dev->name); | 395 | printk(KERN_ERR "%s: Receive DMA sbus error ack.\n", dev->name); |
396 | qep->net_stats.rx_errors++; | 396 | dev->stats.rx_errors++; |
397 | qep->net_stats.rx_missed_errors++; | 397 | dev->stats.rx_missed_errors++; |
398 | mace_hwbug_workaround = 1; | 398 | mace_hwbug_workaround = 1; |
399 | } | 399 | } |
400 | 400 | ||
@@ -409,6 +409,7 @@ static int qe_is_bolixed(struct sunqe *qep, u32 qe_status) | |||
409 | static void qe_rx(struct sunqe *qep) | 409 | static void qe_rx(struct sunqe *qep) |
410 | { | 410 | { |
411 | struct qe_rxd *rxbase = &qep->qe_block->qe_rxd[0]; | 411 | struct qe_rxd *rxbase = &qep->qe_block->qe_rxd[0]; |
412 | struct net_device *dev = qep->dev; | ||
412 | struct qe_rxd *this; | 413 | struct qe_rxd *this; |
413 | struct sunqe_buffers *qbufs = qep->buffers; | 414 | struct sunqe_buffers *qbufs = qep->buffers; |
414 | __u32 qbufs_dvma = qep->buffers_dvma; | 415 | __u32 qbufs_dvma = qep->buffers_dvma; |
@@ -428,14 +429,14 @@ static void qe_rx(struct sunqe *qep) | |||
428 | 429 | ||
429 | /* Check for errors. */ | 430 | /* Check for errors. */ |
430 | if (len < ETH_ZLEN) { | 431 | if (len < ETH_ZLEN) { |
431 | qep->net_stats.rx_errors++; | 432 | dev->stats.rx_errors++; |
432 | qep->net_stats.rx_length_errors++; | 433 | dev->stats.rx_length_errors++; |
433 | qep->net_stats.rx_dropped++; | 434 | dev->stats.rx_dropped++; |
434 | } else { | 435 | } else { |
435 | skb = dev_alloc_skb(len + 2); | 436 | skb = dev_alloc_skb(len + 2); |
436 | if (skb == NULL) { | 437 | if (skb == NULL) { |
437 | drops++; | 438 | drops++; |
438 | qep->net_stats.rx_dropped++; | 439 | dev->stats.rx_dropped++; |
439 | } else { | 440 | } else { |
440 | skb_reserve(skb, 2); | 441 | skb_reserve(skb, 2); |
441 | skb_put(skb, len); | 442 | skb_put(skb, len); |
@@ -444,8 +445,8 @@ static void qe_rx(struct sunqe *qep) | |||
444 | skb->protocol = eth_type_trans(skb, qep->dev); | 445 | skb->protocol = eth_type_trans(skb, qep->dev); |
445 | netif_rx(skb); | 446 | netif_rx(skb); |
446 | qep->dev->last_rx = jiffies; | 447 | qep->dev->last_rx = jiffies; |
447 | qep->net_stats.rx_packets++; | 448 | dev->stats.rx_packets++; |
448 | qep->net_stats.rx_bytes += len; | 449 | dev->stats.rx_bytes += len; |
449 | } | 450 | } |
450 | } | 451 | } |
451 | end_rxd->rx_addr = this_qbuf_dvma; | 452 | end_rxd->rx_addr = this_qbuf_dvma; |
@@ -603,8 +604,8 @@ static int qe_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
603 | dev->trans_start = jiffies; | 604 | dev->trans_start = jiffies; |
604 | sbus_writel(CREG_CTRL_TWAKEUP, qep->qcregs + CREG_CTRL); | 605 | sbus_writel(CREG_CTRL_TWAKEUP, qep->qcregs + CREG_CTRL); |
605 | 606 | ||
606 | qep->net_stats.tx_packets++; | 607 | dev->stats.tx_packets++; |
607 | qep->net_stats.tx_bytes += len; | 608 | dev->stats.tx_bytes += len; |
608 | 609 | ||
609 | if (TX_BUFFS_AVAIL(qep) <= 0) { | 610 | if (TX_BUFFS_AVAIL(qep) <= 0) { |
610 | /* Halt the net queue and enable tx interrupts. | 611 | /* Halt the net queue and enable tx interrupts. |
@@ -622,13 +623,6 @@ static int qe_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
622 | return 0; | 623 | return 0; |
623 | } | 624 | } |
624 | 625 | ||
625 | static struct net_device_stats *qe_get_stats(struct net_device *dev) | ||
626 | { | ||
627 | struct sunqe *qep = (struct sunqe *) dev->priv; | ||
628 | |||
629 | return &qep->net_stats; | ||
630 | } | ||
631 | |||
632 | static void qe_set_multicast(struct net_device *dev) | 626 | static void qe_set_multicast(struct net_device *dev) |
633 | { | 627 | { |
634 | struct sunqe *qep = (struct sunqe *) dev->priv; | 628 | struct sunqe *qep = (struct sunqe *) dev->priv; |
@@ -903,7 +897,6 @@ static int __init qec_ether_init(struct sbus_dev *sdev) | |||
903 | dev->open = qe_open; | 897 | dev->open = qe_open; |
904 | dev->stop = qe_close; | 898 | dev->stop = qe_close; |
905 | dev->hard_start_xmit = qe_start_xmit; | 899 | dev->hard_start_xmit = qe_start_xmit; |
906 | dev->get_stats = qe_get_stats; | ||
907 | dev->set_multicast_list = qe_set_multicast; | 900 | dev->set_multicast_list = qe_set_multicast; |
908 | dev->tx_timeout = qe_tx_timeout; | 901 | dev->tx_timeout = qe_tx_timeout; |
909 | dev->watchdog_timeo = 5*HZ; | 902 | dev->watchdog_timeo = 5*HZ; |