diff options
| author | David S. Miller <davem@davemloft.net> | 2008-05-23 03:05:14 -0400 |
|---|---|---|
| committer | David S. Miller <davem@davemloft.net> | 2008-05-23 03:05:14 -0400 |
| commit | 7bece8155be133cd67c41eed2b31c60a310609de (patch) | |
| tree | 906a62dcf10c1f2e6c1188a2b6e386668fcd9fe3 | |
| parent | b9a2f2e450b0f770bb4347ae8d48eb2dea701e24 (diff) | |
| parent | bdefff1f54cb76a19700663f211350de2f65cc91 (diff) | |
Merge branch 'upstream-davem' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6
48 files changed, 853 insertions, 576 deletions
diff --git a/drivers/net/3c509.c b/drivers/net/3c509.c index e6c545fe5f58..fe6d84105e55 100644 --- a/drivers/net/3c509.c +++ b/drivers/net/3c509.c | |||
| @@ -1063,7 +1063,6 @@ el3_rx(struct net_device *dev) | |||
| 1063 | struct sk_buff *skb; | 1063 | struct sk_buff *skb; |
| 1064 | 1064 | ||
| 1065 | skb = dev_alloc_skb(pkt_len+5); | 1065 | skb = dev_alloc_skb(pkt_len+5); |
| 1066 | dev->stats.rx_bytes += pkt_len; | ||
| 1067 | if (el3_debug > 4) | 1066 | if (el3_debug > 4) |
| 1068 | printk("Receiving packet size %d status %4.4x.\n", | 1067 | printk("Receiving packet size %d status %4.4x.\n", |
| 1069 | pkt_len, rx_status); | 1068 | pkt_len, rx_status); |
| @@ -1078,6 +1077,7 @@ el3_rx(struct net_device *dev) | |||
| 1078 | skb->protocol = eth_type_trans(skb,dev); | 1077 | skb->protocol = eth_type_trans(skb,dev); |
| 1079 | netif_rx(skb); | 1078 | netif_rx(skb); |
| 1080 | dev->last_rx = jiffies; | 1079 | dev->last_rx = jiffies; |
| 1080 | dev->stats.rx_bytes += pkt_len; | ||
| 1081 | dev->stats.rx_packets++; | 1081 | dev->stats.rx_packets++; |
| 1082 | continue; | 1082 | continue; |
| 1083 | } | 1083 | } |
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c index 3634b5fd7919..7023d77bf380 100644 --- a/drivers/net/au1000_eth.c +++ b/drivers/net/au1000_eth.c | |||
| @@ -1239,12 +1239,7 @@ static int au1000_rx(struct net_device *dev) | |||
| 1239 | */ | 1239 | */ |
| 1240 | static irqreturn_t au1000_interrupt(int irq, void *dev_id) | 1240 | static irqreturn_t au1000_interrupt(int irq, void *dev_id) |
| 1241 | { | 1241 | { |
| 1242 | struct net_device *dev = (struct net_device *) dev_id; | 1242 | struct net_device *dev = dev_id; |
| 1243 | |||
| 1244 | if (dev == NULL) { | ||
| 1245 | printk(KERN_ERR "%s: isr: null dev ptr\n", dev->name); | ||
| 1246 | return IRQ_RETVAL(1); | ||
| 1247 | } | ||
| 1248 | 1243 | ||
| 1249 | /* Handle RX interrupts first to minimize chance of overrun */ | 1244 | /* Handle RX interrupts first to minimize chance of overrun */ |
| 1250 | 1245 | ||
diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c index 89c0018132ec..41443435ab1c 100644 --- a/drivers/net/bfin_mac.c +++ b/drivers/net/bfin_mac.c | |||
| @@ -22,7 +22,6 @@ | |||
| 22 | #include <linux/crc32.h> | 22 | #include <linux/crc32.h> |
| 23 | #include <linux/device.h> | 23 | #include <linux/device.h> |
| 24 | #include <linux/spinlock.h> | 24 | #include <linux/spinlock.h> |
| 25 | #include <linux/ethtool.h> | ||
| 26 | #include <linux/mii.h> | 25 | #include <linux/mii.h> |
| 27 | #include <linux/phy.h> | 26 | #include <linux/phy.h> |
| 28 | #include <linux/netdevice.h> | 27 | #include <linux/netdevice.h> |
diff --git a/drivers/net/cpmac.c b/drivers/net/cpmac.c index 2b5740b3d182..7f3f62e1b113 100644 --- a/drivers/net/cpmac.c +++ b/drivers/net/cpmac.c | |||
| @@ -38,6 +38,7 @@ | |||
| 38 | #include <linux/platform_device.h> | 38 | #include <linux/platform_device.h> |
| 39 | #include <linux/dma-mapping.h> | 39 | #include <linux/dma-mapping.h> |
| 40 | #include <asm/gpio.h> | 40 | #include <asm/gpio.h> |
| 41 | #include <asm/atomic.h> | ||
| 41 | 42 | ||
| 42 | MODULE_AUTHOR("Eugene Konev <ejka@imfi.kspu.ru>"); | 43 | MODULE_AUTHOR("Eugene Konev <ejka@imfi.kspu.ru>"); |
| 43 | MODULE_DESCRIPTION("TI AR7 ethernet driver (CPMAC)"); | 44 | MODULE_DESCRIPTION("TI AR7 ethernet driver (CPMAC)"); |
| @@ -187,6 +188,7 @@ struct cpmac_desc { | |||
| 187 | #define CPMAC_EOQ 0x1000 | 188 | #define CPMAC_EOQ 0x1000 |
| 188 | struct sk_buff *skb; | 189 | struct sk_buff *skb; |
| 189 | struct cpmac_desc *next; | 190 | struct cpmac_desc *next; |
| 191 | struct cpmac_desc *prev; | ||
| 190 | dma_addr_t mapping; | 192 | dma_addr_t mapping; |
| 191 | dma_addr_t data_mapping; | 193 | dma_addr_t data_mapping; |
| 192 | }; | 194 | }; |
| @@ -208,6 +210,7 @@ struct cpmac_priv { | |||
| 208 | struct work_struct reset_work; | 210 | struct work_struct reset_work; |
| 209 | struct platform_device *pdev; | 211 | struct platform_device *pdev; |
| 210 | struct napi_struct napi; | 212 | struct napi_struct napi; |
| 213 | atomic_t reset_pending; | ||
| 211 | }; | 214 | }; |
| 212 | 215 | ||
| 213 | static irqreturn_t cpmac_irq(int, void *); | 216 | static irqreturn_t cpmac_irq(int, void *); |
| @@ -241,6 +244,16 @@ static void cpmac_dump_desc(struct net_device *dev, struct cpmac_desc *desc) | |||
| 241 | printk("\n"); | 244 | printk("\n"); |
| 242 | } | 245 | } |
| 243 | 246 | ||
| 247 | static void cpmac_dump_all_desc(struct net_device *dev) | ||
| 248 | { | ||
| 249 | struct cpmac_priv *priv = netdev_priv(dev); | ||
| 250 | struct cpmac_desc *dump = priv->rx_head; | ||
| 251 | do { | ||
| 252 | cpmac_dump_desc(dev, dump); | ||
| 253 | dump = dump->next; | ||
| 254 | } while (dump != priv->rx_head); | ||
| 255 | } | ||
| 256 | |||
| 244 | static void cpmac_dump_skb(struct net_device *dev, struct sk_buff *skb) | 257 | static void cpmac_dump_skb(struct net_device *dev, struct sk_buff *skb) |
| 245 | { | 258 | { |
| 246 | int i; | 259 | int i; |
| @@ -412,21 +425,42 @@ static struct sk_buff *cpmac_rx_one(struct cpmac_priv *priv, | |||
| 412 | static int cpmac_poll(struct napi_struct *napi, int budget) | 425 | static int cpmac_poll(struct napi_struct *napi, int budget) |
| 413 | { | 426 | { |
| 414 | struct sk_buff *skb; | 427 | struct sk_buff *skb; |
| 415 | struct cpmac_desc *desc; | 428 | struct cpmac_desc *desc, *restart; |
| 416 | int received = 0; | ||
| 417 | struct cpmac_priv *priv = container_of(napi, struct cpmac_priv, napi); | 429 | struct cpmac_priv *priv = container_of(napi, struct cpmac_priv, napi); |
| 430 | int received = 0, processed = 0; | ||
| 418 | 431 | ||
| 419 | spin_lock(&priv->rx_lock); | 432 | spin_lock(&priv->rx_lock); |
| 420 | if (unlikely(!priv->rx_head)) { | 433 | if (unlikely(!priv->rx_head)) { |
| 421 | if (netif_msg_rx_err(priv) && net_ratelimit()) | 434 | if (netif_msg_rx_err(priv) && net_ratelimit()) |
| 422 | printk(KERN_WARNING "%s: rx: polling, but no queue\n", | 435 | printk(KERN_WARNING "%s: rx: polling, but no queue\n", |
| 423 | priv->dev->name); | 436 | priv->dev->name); |
| 437 | spin_unlock(&priv->rx_lock); | ||
| 424 | netif_rx_complete(priv->dev, napi); | 438 | netif_rx_complete(priv->dev, napi); |
| 425 | return 0; | 439 | return 0; |
| 426 | } | 440 | } |
| 427 | 441 | ||
| 428 | desc = priv->rx_head; | 442 | desc = priv->rx_head; |
| 443 | restart = NULL; | ||
| 429 | while (((desc->dataflags & CPMAC_OWN) == 0) && (received < budget)) { | 444 | while (((desc->dataflags & CPMAC_OWN) == 0) && (received < budget)) { |
| 445 | processed++; | ||
| 446 | |||
| 447 | if ((desc->dataflags & CPMAC_EOQ) != 0) { | ||
| 448 | /* The last update to eoq->hw_next didn't happen | ||
| 449 | * soon enough, and the receiver stopped here. | ||
| 450 | *Remember this descriptor so we can restart | ||
| 451 | * the receiver after freeing some space. | ||
| 452 | */ | ||
| 453 | if (unlikely(restart)) { | ||
| 454 | if (netif_msg_rx_err(priv)) | ||
| 455 | printk(KERN_ERR "%s: poll found a" | ||
| 456 | " duplicate EOQ: %p and %p\n", | ||
| 457 | priv->dev->name, restart, desc); | ||
| 458 | goto fatal_error; | ||
| 459 | } | ||
| 460 | |||
| 461 | restart = desc->next; | ||
| 462 | } | ||
| 463 | |||
| 430 | skb = cpmac_rx_one(priv, desc); | 464 | skb = cpmac_rx_one(priv, desc); |
| 431 | if (likely(skb)) { | 465 | if (likely(skb)) { |
| 432 | netif_receive_skb(skb); | 466 | netif_receive_skb(skb); |
| @@ -435,19 +469,90 @@ static int cpmac_poll(struct napi_struct *napi, int budget) | |||
| 435 | desc = desc->next; | 469 | desc = desc->next; |
| 436 | } | 470 | } |
| 437 | 471 | ||
| 472 | if (desc != priv->rx_head) { | ||
| 473 | /* We freed some buffers, but not the whole ring, | ||
| 474 | * add what we did free to the rx list */ | ||
| 475 | desc->prev->hw_next = (u32)0; | ||
| 476 | priv->rx_head->prev->hw_next = priv->rx_head->mapping; | ||
| 477 | } | ||
| 478 | |||
| 479 | /* Optimization: If we did not actually process an EOQ (perhaps because | ||
| 480 | * of quota limits), check to see if the tail of the queue has EOQ set. | ||
| 481 | * We should immediately restart in that case so that the receiver can | ||
| 482 | * restart and run in parallel with more packet processing. | ||
| 483 | * This lets us handle slightly larger bursts before running | ||
| 484 | * out of ring space (assuming dev->weight < ring_size) */ | ||
| 485 | |||
| 486 | if (!restart && | ||
| 487 | (priv->rx_head->prev->dataflags & (CPMAC_OWN|CPMAC_EOQ)) | ||
| 488 | == CPMAC_EOQ && | ||
| 489 | (priv->rx_head->dataflags & CPMAC_OWN) != 0) { | ||
| 490 | /* reset EOQ so the poll loop (above) doesn't try to | ||
| 491 | * restart this when it eventually gets to this descriptor. | ||
| 492 | */ | ||
| 493 | priv->rx_head->prev->dataflags &= ~CPMAC_EOQ; | ||
| 494 | restart = priv->rx_head; | ||
| 495 | } | ||
| 496 | |||
| 497 | if (restart) { | ||
| 498 | priv->dev->stats.rx_errors++; | ||
| 499 | priv->dev->stats.rx_fifo_errors++; | ||
| 500 | if (netif_msg_rx_err(priv) && net_ratelimit()) | ||
| 501 | printk(KERN_WARNING "%s: rx dma ring overrun\n", | ||
| 502 | priv->dev->name); | ||
| 503 | |||
| 504 | if (unlikely((restart->dataflags & CPMAC_OWN) == 0)) { | ||
| 505 | if (netif_msg_drv(priv)) | ||
| 506 | printk(KERN_ERR "%s: cpmac_poll is trying to " | ||
| 507 | "restart rx from a descriptor that's " | ||
| 508 | "not free: %p\n", | ||
| 509 | priv->dev->name, restart); | ||
| 510 | goto fatal_error; | ||
| 511 | } | ||
| 512 | |||
| 513 | cpmac_write(priv->regs, CPMAC_RX_PTR(0), restart->mapping); | ||
| 514 | } | ||
| 515 | |||
| 438 | priv->rx_head = desc; | 516 | priv->rx_head = desc; |
| 439 | spin_unlock(&priv->rx_lock); | 517 | spin_unlock(&priv->rx_lock); |
| 440 | if (unlikely(netif_msg_rx_status(priv))) | 518 | if (unlikely(netif_msg_rx_status(priv))) |
| 441 | printk(KERN_DEBUG "%s: poll processed %d packets\n", | 519 | printk(KERN_DEBUG "%s: poll processed %d packets\n", |
| 442 | priv->dev->name, received); | 520 | priv->dev->name, received); |
| 443 | if (desc->dataflags & CPMAC_OWN) { | 521 | if (processed == 0) { |
| 522 | /* we ran out of packets to read, | ||
| 523 | * revert to interrupt-driven mode */ | ||
| 444 | netif_rx_complete(priv->dev, napi); | 524 | netif_rx_complete(priv->dev, napi); |
| 445 | cpmac_write(priv->regs, CPMAC_RX_PTR(0), (u32)desc->mapping); | ||
| 446 | cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1); | 525 | cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1); |
| 447 | return 0; | 526 | return 0; |
| 448 | } | 527 | } |
| 449 | 528 | ||
| 450 | return 1; | 529 | return 1; |
| 530 | |||
| 531 | fatal_error: | ||
| 532 | /* Something went horribly wrong. | ||
| 533 | * Reset hardware to try to recover rather than wedging. */ | ||
| 534 | |||
| 535 | if (netif_msg_drv(priv)) { | ||
| 536 | printk(KERN_ERR "%s: cpmac_poll is confused. " | ||
| 537 | "Resetting hardware\n", priv->dev->name); | ||
| 538 | cpmac_dump_all_desc(priv->dev); | ||
| 539 | printk(KERN_DEBUG "%s: RX_PTR(0)=0x%08x RX_ACK(0)=0x%08x\n", | ||
| 540 | priv->dev->name, | ||
| 541 | cpmac_read(priv->regs, CPMAC_RX_PTR(0)), | ||
| 542 | cpmac_read(priv->regs, CPMAC_RX_ACK(0))); | ||
| 543 | } | ||
| 544 | |||
| 545 | spin_unlock(&priv->rx_lock); | ||
| 546 | netif_rx_complete(priv->dev, napi); | ||
| 547 | netif_stop_queue(priv->dev); | ||
| 548 | napi_disable(&priv->napi); | ||
| 549 | |||
| 550 | atomic_inc(&priv->reset_pending); | ||
| 551 | cpmac_hw_stop(priv->dev); | ||
| 552 | if (!schedule_work(&priv->reset_work)) | ||
| 553 | atomic_dec(&priv->reset_pending); | ||
| 554 | return 0; | ||
| 555 | |||
| 451 | } | 556 | } |
| 452 | 557 | ||
| 453 | static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev) | 558 | static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev) |
| @@ -456,6 +561,9 @@ static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 456 | struct cpmac_desc *desc; | 561 | struct cpmac_desc *desc; |
| 457 | struct cpmac_priv *priv = netdev_priv(dev); | 562 | struct cpmac_priv *priv = netdev_priv(dev); |
| 458 | 563 | ||
| 564 | if (unlikely(atomic_read(&priv->reset_pending))) | ||
| 565 | return NETDEV_TX_BUSY; | ||
| 566 | |||
| 459 | if (unlikely(skb_padto(skb, ETH_ZLEN))) | 567 | if (unlikely(skb_padto(skb, ETH_ZLEN))) |
| 460 | return NETDEV_TX_OK; | 568 | return NETDEV_TX_OK; |
| 461 | 569 | ||
| @@ -621,8 +729,10 @@ static void cpmac_clear_rx(struct net_device *dev) | |||
| 621 | desc->dataflags = CPMAC_OWN; | 729 | desc->dataflags = CPMAC_OWN; |
| 622 | dev->stats.rx_dropped++; | 730 | dev->stats.rx_dropped++; |
| 623 | } | 731 | } |
| 732 | desc->hw_next = desc->next->mapping; | ||
| 624 | desc = desc->next; | 733 | desc = desc->next; |
| 625 | } | 734 | } |
| 735 | priv->rx_head->prev->hw_next = 0; | ||
| 626 | } | 736 | } |
| 627 | 737 | ||
| 628 | static void cpmac_clear_tx(struct net_device *dev) | 738 | static void cpmac_clear_tx(struct net_device *dev) |
| @@ -635,14 +745,14 @@ static void cpmac_clear_tx(struct net_device *dev) | |||
| 635 | priv->desc_ring[i].dataflags = 0; | 745 | priv->desc_ring[i].dataflags = 0; |
| 636 | if (priv->desc_ring[i].skb) { | 746 | if (priv->desc_ring[i].skb) { |
| 637 | dev_kfree_skb_any(priv->desc_ring[i].skb); | 747 | dev_kfree_skb_any(priv->desc_ring[i].skb); |
| 638 | if (netif_subqueue_stopped(dev, i)) | 748 | priv->desc_ring[i].skb = NULL; |
| 639 | netif_wake_subqueue(dev, i); | ||
| 640 | } | 749 | } |
| 641 | } | 750 | } |
| 642 | } | 751 | } |
| 643 | 752 | ||
| 644 | static void cpmac_hw_error(struct work_struct *work) | 753 | static void cpmac_hw_error(struct work_struct *work) |
| 645 | { | 754 | { |
| 755 | int i; | ||
| 646 | struct cpmac_priv *priv = | 756 | struct cpmac_priv *priv = |
| 647 | container_of(work, struct cpmac_priv, reset_work); | 757 | container_of(work, struct cpmac_priv, reset_work); |
| 648 | 758 | ||
| @@ -651,8 +761,48 @@ static void cpmac_hw_error(struct work_struct *work) | |||
| 651 | spin_unlock(&priv->rx_lock); | 761 | spin_unlock(&priv->rx_lock); |
| 652 | cpmac_clear_tx(priv->dev); | 762 | cpmac_clear_tx(priv->dev); |
| 653 | cpmac_hw_start(priv->dev); | 763 | cpmac_hw_start(priv->dev); |
| 654 | napi_enable(&priv->napi); | 764 | barrier(); |
| 655 | netif_start_queue(priv->dev); | 765 | atomic_dec(&priv->reset_pending); |
| 766 | |||
| 767 | for (i = 0; i < CPMAC_QUEUES; i++) | ||
| 768 | netif_wake_subqueue(priv->dev, i); | ||
| 769 | netif_wake_queue(priv->dev); | ||
| 770 | cpmac_write(priv->regs, CPMAC_MAC_INT_ENABLE, 3); | ||
| 771 | } | ||
| 772 | |||
| 773 | static void cpmac_check_status(struct net_device *dev) | ||
| 774 | { | ||
| 775 | struct cpmac_priv *priv = netdev_priv(dev); | ||
| 776 | |||
| 777 | u32 macstatus = cpmac_read(priv->regs, CPMAC_MAC_STATUS); | ||
| 778 | int rx_channel = (macstatus >> 8) & 7; | ||
| 779 | int rx_code = (macstatus >> 12) & 15; | ||
| 780 | int tx_channel = (macstatus >> 16) & 7; | ||
| 781 | int tx_code = (macstatus >> 20) & 15; | ||
| 782 | |||
| 783 | if (rx_code || tx_code) { | ||
| 784 | if (netif_msg_drv(priv) && net_ratelimit()) { | ||
| 785 | /* Can't find any documentation on what these | ||
| 786 | *error codes actually are. So just log them and hope.. | ||
| 787 | */ | ||
| 788 | if (rx_code) | ||
| 789 | printk(KERN_WARNING "%s: host error %d on rx " | ||
| 790 | "channel %d (macstatus %08x), resetting\n", | ||
| 791 | dev->name, rx_code, rx_channel, macstatus); | ||
| 792 | if (tx_code) | ||
| 793 | printk(KERN_WARNING "%s: host error %d on tx " | ||
| 794 | "channel %d (macstatus %08x), resetting\n", | ||
| 795 | dev->name, tx_code, tx_channel, macstatus); | ||
| 796 | } | ||
| 797 | |||
| 798 | netif_stop_queue(dev); | ||
| 799 | cpmac_hw_stop(dev); | ||
| 800 | if (schedule_work(&priv->reset_work)) | ||
| 801 | atomic_inc(&priv->reset_pending); | ||
| 802 | if (unlikely(netif_msg_hw(priv))) | ||
| 803 | cpmac_dump_regs(dev); | ||
| 804 | } | ||
| 805 | cpmac_write(priv->regs, CPMAC_MAC_INT_CLEAR, 0xff); | ||
| 656 | } | 806 | } |
| 657 | 807 | ||
| 658 | static irqreturn_t cpmac_irq(int irq, void *dev_id) | 808 | static irqreturn_t cpmac_irq(int irq, void *dev_id) |
| @@ -683,49 +833,32 @@ static irqreturn_t cpmac_irq(int irq, void *dev_id) | |||
| 683 | 833 | ||
| 684 | cpmac_write(priv->regs, CPMAC_MAC_EOI_VECTOR, 0); | 834 | cpmac_write(priv->regs, CPMAC_MAC_EOI_VECTOR, 0); |
| 685 | 835 | ||
| 686 | if (unlikely(status & (MAC_INT_HOST | MAC_INT_STATUS))) { | 836 | if (unlikely(status & (MAC_INT_HOST | MAC_INT_STATUS))) |
| 687 | if (netif_msg_drv(priv) && net_ratelimit()) | 837 | cpmac_check_status(dev); |
| 688 | printk(KERN_ERR "%s: hw error, resetting...\n", | ||
| 689 | dev->name); | ||
| 690 | netif_stop_queue(dev); | ||
| 691 | napi_disable(&priv->napi); | ||
| 692 | cpmac_hw_stop(dev); | ||
| 693 | schedule_work(&priv->reset_work); | ||
| 694 | if (unlikely(netif_msg_hw(priv))) | ||
| 695 | cpmac_dump_regs(dev); | ||
| 696 | } | ||
| 697 | 838 | ||
| 698 | return IRQ_HANDLED; | 839 | return IRQ_HANDLED; |
| 699 | } | 840 | } |
| 700 | 841 | ||
| 701 | static void cpmac_tx_timeout(struct net_device *dev) | 842 | static void cpmac_tx_timeout(struct net_device *dev) |
| 702 | { | 843 | { |
| 703 | struct cpmac_priv *priv = netdev_priv(dev); | ||
| 704 | int i; | 844 | int i; |
| 845 | struct cpmac_priv *priv = netdev_priv(dev); | ||
| 705 | 846 | ||
| 706 | spin_lock(&priv->lock); | 847 | spin_lock(&priv->lock); |
| 707 | dev->stats.tx_errors++; | 848 | dev->stats.tx_errors++; |
| 708 | spin_unlock(&priv->lock); | 849 | spin_unlock(&priv->lock); |
| 709 | if (netif_msg_tx_err(priv) && net_ratelimit()) | 850 | if (netif_msg_tx_err(priv) && net_ratelimit()) |
| 710 | printk(KERN_WARNING "%s: transmit timeout\n", dev->name); | 851 | printk(KERN_WARNING "%s: transmit timeout\n", dev->name); |
| 711 | /* | 852 | |
| 712 | * FIXME: waking up random queue is not the best thing to | 853 | atomic_inc(&priv->reset_pending); |
| 713 | * do... on the other hand why we got here at all? | 854 | barrier(); |
| 714 | */ | 855 | cpmac_clear_tx(dev); |
| 715 | #ifdef CONFIG_NETDEVICES_MULTIQUEUE | 856 | barrier(); |
| 857 | atomic_dec(&priv->reset_pending); | ||
| 858 | |||
| 859 | netif_wake_queue(priv->dev); | ||
| 716 | for (i = 0; i < CPMAC_QUEUES; i++) | 860 | for (i = 0; i < CPMAC_QUEUES; i++) |
| 717 | if (priv->desc_ring[i].skb) { | 861 | netif_wake_subqueue(dev, i); |
| 718 | priv->desc_ring[i].dataflags = 0; | ||
| 719 | dev_kfree_skb_any(priv->desc_ring[i].skb); | ||
| 720 | netif_wake_subqueue(dev, i); | ||
| 721 | break; | ||
| 722 | } | ||
| 723 | #else | ||
| 724 | priv->desc_ring[0].dataflags = 0; | ||
| 725 | if (priv->desc_ring[0].skb) | ||
| 726 | dev_kfree_skb_any(priv->desc_ring[0].skb); | ||
| 727 | netif_wake_queue(dev); | ||
| 728 | #endif | ||
| 729 | } | 862 | } |
| 730 | 863 | ||
| 731 | static int cpmac_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | 864 | static int cpmac_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) |
| @@ -901,9 +1034,12 @@ static int cpmac_open(struct net_device *dev) | |||
| 901 | desc->buflen = CPMAC_SKB_SIZE; | 1034 | desc->buflen = CPMAC_SKB_SIZE; |
| 902 | desc->dataflags = CPMAC_OWN; | 1035 | desc->dataflags = CPMAC_OWN; |
| 903 | desc->next = &priv->rx_head[(i + 1) % priv->ring_size]; | 1036 | desc->next = &priv->rx_head[(i + 1) % priv->ring_size]; |
| 1037 | desc->next->prev = desc; | ||
| 904 | desc->hw_next = (u32)desc->next->mapping; | 1038 | desc->hw_next = (u32)desc->next->mapping; |
| 905 | } | 1039 | } |
| 906 | 1040 | ||
| 1041 | priv->rx_head->prev->hw_next = (u32)0; | ||
| 1042 | |||
| 907 | if ((res = request_irq(dev->irq, cpmac_irq, IRQF_SHARED, | 1043 | if ((res = request_irq(dev->irq, cpmac_irq, IRQF_SHARED, |
| 908 | dev->name, dev))) { | 1044 | dev->name, dev))) { |
| 909 | if (netif_msg_drv(priv)) | 1045 | if (netif_msg_drv(priv)) |
| @@ -912,6 +1048,7 @@ static int cpmac_open(struct net_device *dev) | |||
| 912 | goto fail_irq; | 1048 | goto fail_irq; |
| 913 | } | 1049 | } |
| 914 | 1050 | ||
| 1051 | atomic_set(&priv->reset_pending, 0); | ||
| 915 | INIT_WORK(&priv->reset_work, cpmac_hw_error); | 1052 | INIT_WORK(&priv->reset_work, cpmac_hw_error); |
| 916 | cpmac_hw_start(dev); | 1053 | cpmac_hw_start(dev); |
| 917 | 1054 | ||
| @@ -1007,21 +1144,10 @@ static int __devinit cpmac_probe(struct platform_device *pdev) | |||
| 1007 | 1144 | ||
| 1008 | if (phy_id == PHY_MAX_ADDR) { | 1145 | if (phy_id == PHY_MAX_ADDR) { |
| 1009 | if (external_switch || dumb_switch) { | 1146 | if (external_switch || dumb_switch) { |
| 1010 | struct fixed_phy_status status = {}; | 1147 | mdio_bus_id = 0; /* fixed phys bus */ |
| 1011 | 1148 | phy_id = pdev->id; | |
| 1012 | /* | ||
| 1013 | * FIXME: this should be in the platform code! | ||
| 1014 | * Since there is not platform code at all (that is, | ||
| 1015 | * no mainline users of that driver), place it here | ||
| 1016 | * for now. | ||
| 1017 | */ | ||
| 1018 | phy_id = 0; | ||
| 1019 | status.link = 1; | ||
| 1020 | status.duplex = 1; | ||
| 1021 | status.speed = 100; | ||
| 1022 | fixed_phy_add(PHY_POLL, phy_id, &status); | ||
| 1023 | } else { | 1149 | } else { |
| 1024 | printk(KERN_ERR "cpmac: no PHY present\n"); | 1150 | dev_err(&pdev->dev, "no PHY present\n"); |
| 1025 | return -ENODEV; | 1151 | return -ENODEV; |
| 1026 | } | 1152 | } |
| 1027 | } | 1153 | } |
| @@ -1064,10 +1190,8 @@ static int __devinit cpmac_probe(struct platform_device *pdev) | |||
| 1064 | priv->msg_enable = netif_msg_init(debug_level, 0xff); | 1190 | priv->msg_enable = netif_msg_init(debug_level, 0xff); |
| 1065 | memcpy(dev->dev_addr, pdata->dev_addr, sizeof(dev->dev_addr)); | 1191 | memcpy(dev->dev_addr, pdata->dev_addr, sizeof(dev->dev_addr)); |
| 1066 | 1192 | ||
| 1067 | snprintf(priv->phy_name, BUS_ID_SIZE, PHY_ID_FMT, mdio_bus_id, phy_id); | 1193 | priv->phy = phy_connect(dev, cpmac_mii.phy_map[phy_id]->dev.bus_id, |
| 1068 | 1194 | &cpmac_adjust_link, 0, PHY_INTERFACE_MODE_MII); | |
| 1069 | priv->phy = phy_connect(dev, priv->phy_name, &cpmac_adjust_link, 0, | ||
| 1070 | PHY_INTERFACE_MODE_MII); | ||
| 1071 | if (IS_ERR(priv->phy)) { | 1195 | if (IS_ERR(priv->phy)) { |
| 1072 | if (netif_msg_drv(priv)) | 1196 | if (netif_msg_drv(priv)) |
| 1073 | printk(KERN_ERR "%s: Could not attach to PHY\n", | 1197 | printk(KERN_ERR "%s: Could not attach to PHY\n", |
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c index d45bcd2660af..864295e081b6 100644 --- a/drivers/net/dm9000.c +++ b/drivers/net/dm9000.c | |||
| @@ -903,7 +903,7 @@ dm9000_stop(struct net_device *ndev) | |||
| 903 | if (netif_msg_ifdown(db)) | 903 | if (netif_msg_ifdown(db)) |
| 904 | dev_dbg(db->dev, "shutting down %s\n", ndev->name); | 904 | dev_dbg(db->dev, "shutting down %s\n", ndev->name); |
| 905 | 905 | ||
| 906 | cancel_delayed_work(&db->phy_poll); | 906 | cancel_delayed_work_sync(&db->phy_poll); |
| 907 | 907 | ||
| 908 | netif_stop_queue(ndev); | 908 | netif_stop_queue(ndev); |
| 909 | netif_carrier_off(ndev); | 909 | netif_carrier_off(ndev); |
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c index 8cbb40f3a506..cab1835173cd 100644 --- a/drivers/net/e1000e/netdev.c +++ b/drivers/net/e1000e/netdev.c | |||
| @@ -4201,8 +4201,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev, | |||
| 4201 | struct e1000_adapter *adapter; | 4201 | struct e1000_adapter *adapter; |
| 4202 | struct e1000_hw *hw; | 4202 | struct e1000_hw *hw; |
| 4203 | const struct e1000_info *ei = e1000_info_tbl[ent->driver_data]; | 4203 | const struct e1000_info *ei = e1000_info_tbl[ent->driver_data]; |
| 4204 | unsigned long mmio_start, mmio_len; | 4204 | resource_size_t mmio_start, mmio_len; |
| 4205 | unsigned long flash_start, flash_len; | 4205 | resource_size_t flash_start, flash_len; |
| 4206 | 4206 | ||
| 4207 | static int cards_found; | 4207 | static int cards_found; |
| 4208 | int i, err, pci_using_dac; | 4208 | int i, err, pci_using_dac; |
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c index d1b6d4e7495d..287a61918739 100644 --- a/drivers/net/ehea/ehea_main.c +++ b/drivers/net/ehea/ehea_main.c | |||
| @@ -2213,8 +2213,6 @@ static void ehea_vlan_rx_register(struct net_device *dev, | |||
| 2213 | goto out; | 2213 | goto out; |
| 2214 | } | 2214 | } |
| 2215 | 2215 | ||
| 2216 | memset(cb1->vlan_filter, 0, sizeof(cb1->vlan_filter)); | ||
| 2217 | |||
| 2218 | hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id, | 2216 | hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id, |
| 2219 | H_PORT_CB1, H_PORT_CB1_ALL, cb1); | 2217 | H_PORT_CB1, H_PORT_CB1_ALL, cb1); |
| 2220 | if (hret != H_SUCCESS) | 2218 | if (hret != H_SUCCESS) |
| @@ -3178,11 +3176,12 @@ out_err: | |||
| 3178 | 3176 | ||
| 3179 | static void ehea_shutdown_single_port(struct ehea_port *port) | 3177 | static void ehea_shutdown_single_port(struct ehea_port *port) |
| 3180 | { | 3178 | { |
| 3179 | struct ehea_adapter *adapter = port->adapter; | ||
| 3181 | unregister_netdev(port->netdev); | 3180 | unregister_netdev(port->netdev); |
| 3182 | ehea_unregister_port(port); | 3181 | ehea_unregister_port(port); |
| 3183 | kfree(port->mc_list); | 3182 | kfree(port->mc_list); |
| 3184 | free_netdev(port->netdev); | 3183 | free_netdev(port->netdev); |
| 3185 | port->adapter->active_ports--; | 3184 | adapter->active_ports--; |
| 3186 | } | 3185 | } |
| 3187 | 3186 | ||
| 3188 | static int ehea_setup_ports(struct ehea_adapter *adapter) | 3187 | static int ehea_setup_ports(struct ehea_adapter *adapter) |
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c index 35f66d4a4595..9eca97fb0a54 100644 --- a/drivers/net/forcedeth.c +++ b/drivers/net/forcedeth.c | |||
| @@ -5823,6 +5823,7 @@ static int nv_resume(struct pci_dev *pdev) | |||
| 5823 | writel(txreg, base + NvRegTransmitPoll); | 5823 | writel(txreg, base + NvRegTransmitPoll); |
| 5824 | 5824 | ||
| 5825 | rc = nv_open(dev); | 5825 | rc = nv_open(dev); |
| 5826 | nv_set_multicast(dev); | ||
| 5826 | out: | 5827 | out: |
| 5827 | return rc; | 5828 | return rc; |
| 5828 | } | 5829 | } |
diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c index 67b4b0728fce..a5baaf59ff66 100644 --- a/drivers/net/fs_enet/fs_enet-main.c +++ b/drivers/net/fs_enet/fs_enet-main.c | |||
| @@ -1093,7 +1093,7 @@ err: | |||
| 1093 | if (registered) | 1093 | if (registered) |
| 1094 | unregister_netdev(ndev); | 1094 | unregister_netdev(ndev); |
| 1095 | 1095 | ||
| 1096 | if (fep != NULL) { | 1096 | if (fep && fep->ops) { |
| 1097 | (*fep->ops->free_bd)(ndev); | 1097 | (*fep->ops->free_bd)(ndev); |
| 1098 | (*fep->ops->cleanup_data)(ndev); | 1098 | (*fep->ops->cleanup_data)(ndev); |
| 1099 | } | 1099 | } |
diff --git a/drivers/net/hamradio/scc.c b/drivers/net/hamradio/scc.c index f90515935833..45ae9d1191d7 100644 --- a/drivers/net/hamradio/scc.c +++ b/drivers/net/hamradio/scc.c | |||
| @@ -1340,9 +1340,10 @@ static unsigned int scc_set_param(struct scc_channel *scc, unsigned int cmd, uns | |||
| 1340 | case PARAM_RTS: | 1340 | case PARAM_RTS: |
| 1341 | if ( !(scc->wreg[R5] & RTS) ) | 1341 | if ( !(scc->wreg[R5] & RTS) ) |
| 1342 | { | 1342 | { |
| 1343 | if (arg != TX_OFF) | 1343 | if (arg != TX_OFF) { |
| 1344 | scc_key_trx(scc, TX_ON); | 1344 | scc_key_trx(scc, TX_ON); |
| 1345 | scc_start_tx_timer(scc, t_txdelay, scc->kiss.txdelay); | 1345 | scc_start_tx_timer(scc, t_txdelay, scc->kiss.txdelay); |
| 1346 | } | ||
| 1346 | } else { | 1347 | } else { |
| 1347 | if (arg == TX_OFF) | 1348 | if (arg == TX_OFF) |
| 1348 | { | 1349 | { |
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c index c91b12ea26ad..36be6efc6398 100644 --- a/drivers/net/myri10ge/myri10ge.c +++ b/drivers/net/myri10ge/myri10ge.c | |||
| @@ -631,7 +631,7 @@ static int myri10ge_adopt_running_firmware(struct myri10ge_priv *mgp) | |||
| 631 | return status; | 631 | return status; |
| 632 | } | 632 | } |
| 633 | 633 | ||
| 634 | int myri10ge_get_firmware_capabilities(struct myri10ge_priv *mgp) | 634 | static int myri10ge_get_firmware_capabilities(struct myri10ge_priv *mgp) |
| 635 | { | 635 | { |
| 636 | struct myri10ge_cmd cmd; | 636 | struct myri10ge_cmd cmd; |
| 637 | int status; | 637 | int status; |
diff --git a/drivers/net/pcmcia/fmvj18x_cs.c b/drivers/net/pcmcia/fmvj18x_cs.c index 8f328a03847b..a550c9bd126f 100644 --- a/drivers/net/pcmcia/fmvj18x_cs.c +++ b/drivers/net/pcmcia/fmvj18x_cs.c | |||
| @@ -391,7 +391,9 @@ static int fmvj18x_config(struct pcmcia_device *link) | |||
| 391 | cardtype = CONTEC; | 391 | cardtype = CONTEC; |
| 392 | break; | 392 | break; |
| 393 | case MANFID_FUJITSU: | 393 | case MANFID_FUJITSU: |
| 394 | if (link->card_id == PRODID_FUJITSU_MBH10302) | 394 | if (link->conf.ConfigBase == 0x0fe0) |
| 395 | cardtype = MBH10302; | ||
| 396 | else if (link->card_id == PRODID_FUJITSU_MBH10302) | ||
| 395 | /* RATOC REX-5588/9822/4886's PRODID are 0004(=MBH10302), | 397 | /* RATOC REX-5588/9822/4886's PRODID are 0004(=MBH10302), |
| 396 | but these are MBH10304 based card. */ | 398 | but these are MBH10304 based card. */ |
| 397 | cardtype = MBH10304; | 399 | cardtype = MBH10304; |
diff --git a/drivers/net/pcmcia/xirc2ps_cs.c b/drivers/net/pcmcia/xirc2ps_cs.c index d041f831a18d..f6c4698ce738 100644 --- a/drivers/net/pcmcia/xirc2ps_cs.c +++ b/drivers/net/pcmcia/xirc2ps_cs.c | |||
| @@ -1461,22 +1461,25 @@ static void | |||
| 1461 | set_multicast_list(struct net_device *dev) | 1461 | set_multicast_list(struct net_device *dev) |
| 1462 | { | 1462 | { |
| 1463 | unsigned int ioaddr = dev->base_addr; | 1463 | unsigned int ioaddr = dev->base_addr; |
| 1464 | unsigned value; | ||
| 1464 | 1465 | ||
| 1465 | SelectPage(0x42); | 1466 | SelectPage(0x42); |
| 1467 | value = GetByte(XIRCREG42_SWC1) & 0xC0; | ||
| 1468 | |||
| 1466 | if (dev->flags & IFF_PROMISC) { /* snoop */ | 1469 | if (dev->flags & IFF_PROMISC) { /* snoop */ |
| 1467 | PutByte(XIRCREG42_SWC1, 0x06); /* set MPE and PME */ | 1470 | PutByte(XIRCREG42_SWC1, value | 0x06); /* set MPE and PME */ |
| 1468 | } else if (dev->mc_count > 9 || (dev->flags & IFF_ALLMULTI)) { | 1471 | } else if (dev->mc_count > 9 || (dev->flags & IFF_ALLMULTI)) { |
| 1469 | PutByte(XIRCREG42_SWC1, 0x02); /* set MPE */ | 1472 | PutByte(XIRCREG42_SWC1, value | 0x02); /* set MPE */ |
| 1470 | } else if (dev->mc_count) { | 1473 | } else if (dev->mc_count) { |
| 1471 | /* the chip can filter 9 addresses perfectly */ | 1474 | /* the chip can filter 9 addresses perfectly */ |
| 1472 | PutByte(XIRCREG42_SWC1, 0x01); | 1475 | PutByte(XIRCREG42_SWC1, value | 0x01); |
| 1473 | SelectPage(0x40); | 1476 | SelectPage(0x40); |
| 1474 | PutByte(XIRCREG40_CMD0, Offline); | 1477 | PutByte(XIRCREG40_CMD0, Offline); |
| 1475 | set_addresses(dev); | 1478 | set_addresses(dev); |
| 1476 | SelectPage(0x40); | 1479 | SelectPage(0x40); |
| 1477 | PutByte(XIRCREG40_CMD0, EnableRecv | Online); | 1480 | PutByte(XIRCREG40_CMD0, EnableRecv | Online); |
| 1478 | } else { /* standard usage */ | 1481 | } else { /* standard usage */ |
| 1479 | PutByte(XIRCREG42_SWC1, 0x00); | 1482 | PutByte(XIRCREG42_SWC1, value | 0x00); |
| 1480 | } | 1483 | } |
| 1481 | SelectPage(0); | 1484 | SelectPage(0); |
| 1482 | } | 1485 | } |
| @@ -1722,6 +1725,7 @@ do_reset(struct net_device *dev, int full) | |||
| 1722 | 1725 | ||
| 1723 | /* enable receiver and put the mac online */ | 1726 | /* enable receiver and put the mac online */ |
| 1724 | if (full) { | 1727 | if (full) { |
| 1728 | set_multicast_list(dev); | ||
| 1725 | SelectPage(0x40); | 1729 | SelectPage(0x40); |
| 1726 | PutByte(XIRCREG40_CMD0, EnableRecv | Online); | 1730 | PutByte(XIRCREG40_CMD0, EnableRecv | Online); |
| 1727 | } | 1731 | } |
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c index a1c454dbc164..1c89b97f4e09 100644 --- a/drivers/net/pcnet32.c +++ b/drivers/net/pcnet32.c | |||
| @@ -325,7 +325,7 @@ static int pcnet32_get_regs_len(struct net_device *dev); | |||
| 325 | static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs, | 325 | static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs, |
| 326 | void *ptr); | 326 | void *ptr); |
| 327 | static void pcnet32_purge_tx_ring(struct net_device *dev); | 327 | static void pcnet32_purge_tx_ring(struct net_device *dev); |
| 328 | static int pcnet32_alloc_ring(struct net_device *dev, char *name); | 328 | static int pcnet32_alloc_ring(struct net_device *dev, const char *name); |
| 329 | static void pcnet32_free_ring(struct net_device *dev); | 329 | static void pcnet32_free_ring(struct net_device *dev); |
| 330 | static void pcnet32_check_media(struct net_device *dev, int verbose); | 330 | static void pcnet32_check_media(struct net_device *dev, int verbose); |
| 331 | 331 | ||
| @@ -1983,7 +1983,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev) | |||
| 1983 | } | 1983 | } |
| 1984 | 1984 | ||
| 1985 | /* if any allocation fails, caller must also call pcnet32_free_ring */ | 1985 | /* if any allocation fails, caller must also call pcnet32_free_ring */ |
| 1986 | static int pcnet32_alloc_ring(struct net_device *dev, char *name) | 1986 | static int pcnet32_alloc_ring(struct net_device *dev, const char *name) |
| 1987 | { | 1987 | { |
| 1988 | struct pcnet32_private *lp = netdev_priv(dev); | 1988 | struct pcnet32_private *lp = netdev_priv(dev); |
| 1989 | 1989 | ||
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index 6bf9e76b0a00..6eb2d31d1e34 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig | |||
| @@ -5,7 +5,7 @@ | |||
| 5 | menuconfig PHYLIB | 5 | menuconfig PHYLIB |
| 6 | tristate "PHY Device support and infrastructure" | 6 | tristate "PHY Device support and infrastructure" |
| 7 | depends on !S390 | 7 | depends on !S390 |
| 8 | depends on NET_ETHERNET && (BROKEN || !S390) | 8 | depends on NET_ETHERNET |
| 9 | help | 9 | help |
| 10 | Ethernet controllers are usually attached to PHY | 10 | Ethernet controllers are usually attached to PHY |
| 11 | devices. This option provides infrastructure for | 11 | devices. This option provides infrastructure for |
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index ac3c01d28fdf..16a0e7de5888 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c | |||
| @@ -207,6 +207,7 @@ int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id) | |||
| 207 | 207 | ||
| 208 | return 0; | 208 | return 0; |
| 209 | } | 209 | } |
| 210 | EXPORT_SYMBOL(get_phy_id); | ||
| 210 | 211 | ||
| 211 | /** | 212 | /** |
| 212 | * get_phy_device - reads the specified PHY device and returns its @phy_device struct | 213 | * get_phy_device - reads the specified PHY device and returns its @phy_device struct |
diff --git a/drivers/net/s2io-regs.h b/drivers/net/s2io-regs.h index 2109508c047a..f8274f8941ea 100644 --- a/drivers/net/s2io-regs.h +++ b/drivers/net/s2io-regs.h | |||
| @@ -250,7 +250,7 @@ struct XENA_dev_config { | |||
| 250 | u64 tx_mat0_n[0x8]; | 250 | u64 tx_mat0_n[0x8]; |
| 251 | #define TX_MAT_SET(fifo, msi) vBIT(msi, (8 * fifo), 8) | 251 | #define TX_MAT_SET(fifo, msi) vBIT(msi, (8 * fifo), 8) |
| 252 | 252 | ||
| 253 | u8 unused_1[0x8]; | 253 | u64 xmsi_mask_reg; |
| 254 | u64 stat_byte_cnt; | 254 | u64 stat_byte_cnt; |
| 255 | #define STAT_BC(n) vBIT(n,4,12) | 255 | #define STAT_BC(n) vBIT(n,4,12) |
| 256 | 256 | ||
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c index 523478ebfd69..a20693e09ae8 100644 --- a/drivers/net/s2io.c +++ b/drivers/net/s2io.c | |||
| @@ -86,7 +86,7 @@ | |||
| 86 | #include "s2io.h" | 86 | #include "s2io.h" |
| 87 | #include "s2io-regs.h" | 87 | #include "s2io-regs.h" |
| 88 | 88 | ||
| 89 | #define DRV_VERSION "2.0.26.23" | 89 | #define DRV_VERSION "2.0.26.24" |
| 90 | 90 | ||
| 91 | /* S2io Driver name & version. */ | 91 | /* S2io Driver name & version. */ |
| 92 | static char s2io_driver_name[] = "Neterion"; | 92 | static char s2io_driver_name[] = "Neterion"; |
| @@ -1113,9 +1113,10 @@ static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev) | |||
| 1113 | struct pci_dev *tdev = NULL; | 1113 | struct pci_dev *tdev = NULL; |
| 1114 | while ((tdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) { | 1114 | while ((tdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) { |
| 1115 | if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) { | 1115 | if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) { |
| 1116 | if (tdev->bus == s2io_pdev->bus->parent) | 1116 | if (tdev->bus == s2io_pdev->bus->parent) { |
| 1117 | pci_dev_put(tdev); | 1117 | pci_dev_put(tdev); |
| 1118 | return 1; | 1118 | return 1; |
| 1119 | } | ||
| 1119 | } | 1120 | } |
| 1120 | } | 1121 | } |
| 1121 | return 0; | 1122 | return 0; |
| @@ -1219,15 +1220,33 @@ static int init_tti(struct s2io_nic *nic, int link) | |||
| 1219 | TTI_DATA1_MEM_TX_URNG_B(0x10) | | 1220 | TTI_DATA1_MEM_TX_URNG_B(0x10) | |
| 1220 | TTI_DATA1_MEM_TX_URNG_C(0x30) | | 1221 | TTI_DATA1_MEM_TX_URNG_C(0x30) | |
| 1221 | TTI_DATA1_MEM_TX_TIMER_AC_EN; | 1222 | TTI_DATA1_MEM_TX_TIMER_AC_EN; |
| 1222 | 1223 | if (i == 0) | |
| 1223 | if (use_continuous_tx_intrs && (link == LINK_UP)) | 1224 | if (use_continuous_tx_intrs && (link == LINK_UP)) |
| 1224 | val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN; | 1225 | val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN; |
| 1225 | writeq(val64, &bar0->tti_data1_mem); | 1226 | writeq(val64, &bar0->tti_data1_mem); |
| 1226 | 1227 | ||
| 1227 | val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) | | 1228 | if (nic->config.intr_type == MSI_X) { |
| 1228 | TTI_DATA2_MEM_TX_UFC_B(0x20) | | 1229 | val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) | |
| 1229 | TTI_DATA2_MEM_TX_UFC_C(0x40) | | 1230 | TTI_DATA2_MEM_TX_UFC_B(0x100) | |
| 1230 | TTI_DATA2_MEM_TX_UFC_D(0x80); | 1231 | TTI_DATA2_MEM_TX_UFC_C(0x200) | |
| 1232 | TTI_DATA2_MEM_TX_UFC_D(0x300); | ||
| 1233 | } else { | ||
| 1234 | if ((nic->config.tx_steering_type == | ||
| 1235 | TX_DEFAULT_STEERING) && | ||
| 1236 | (config->tx_fifo_num > 1) && | ||
| 1237 | (i >= nic->udp_fifo_idx) && | ||
| 1238 | (i < (nic->udp_fifo_idx + | ||
| 1239 | nic->total_udp_fifos))) | ||
| 1240 | val64 = TTI_DATA2_MEM_TX_UFC_A(0x50) | | ||
| 1241 | TTI_DATA2_MEM_TX_UFC_B(0x80) | | ||
| 1242 | TTI_DATA2_MEM_TX_UFC_C(0x100) | | ||
| 1243 | TTI_DATA2_MEM_TX_UFC_D(0x120); | ||
| 1244 | else | ||
| 1245 | val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) | | ||
| 1246 | TTI_DATA2_MEM_TX_UFC_B(0x20) | | ||
| 1247 | TTI_DATA2_MEM_TX_UFC_C(0x40) | | ||
| 1248 | TTI_DATA2_MEM_TX_UFC_D(0x80); | ||
| 1249 | } | ||
| 1231 | 1250 | ||
| 1232 | writeq(val64, &bar0->tti_data2_mem); | 1251 | writeq(val64, &bar0->tti_data2_mem); |
| 1233 | 1252 | ||
| @@ -2813,6 +2832,15 @@ static void free_rx_buffers(struct s2io_nic *sp) | |||
| 2813 | } | 2832 | } |
| 2814 | } | 2833 | } |
| 2815 | 2834 | ||
| 2835 | static int s2io_chk_rx_buffers(struct ring_info *ring) | ||
| 2836 | { | ||
| 2837 | if (fill_rx_buffers(ring) == -ENOMEM) { | ||
| 2838 | DBG_PRINT(INFO_DBG, "%s:Out of memory", ring->dev->name); | ||
| 2839 | DBG_PRINT(INFO_DBG, " in Rx Intr!!\n"); | ||
| 2840 | } | ||
| 2841 | return 0; | ||
| 2842 | } | ||
| 2843 | |||
| 2816 | /** | 2844 | /** |
| 2817 | * s2io_poll - Rx interrupt handler for NAPI support | 2845 | * s2io_poll - Rx interrupt handler for NAPI support |
| 2818 | * @napi : pointer to the napi structure. | 2846 | * @napi : pointer to the napi structure. |
| @@ -2826,57 +2854,72 @@ static void free_rx_buffers(struct s2io_nic *sp) | |||
| 2826 | * 0 on success and 1 if there are No Rx packets to be processed. | 2854 | * 0 on success and 1 if there are No Rx packets to be processed. |
| 2827 | */ | 2855 | */ |
| 2828 | 2856 | ||
| 2829 | static int s2io_poll(struct napi_struct *napi, int budget) | 2857 | static int s2io_poll_msix(struct napi_struct *napi, int budget) |
| 2830 | { | 2858 | { |
| 2831 | struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi); | 2859 | struct ring_info *ring = container_of(napi, struct ring_info, napi); |
| 2832 | struct net_device *dev = nic->dev; | 2860 | struct net_device *dev = ring->dev; |
| 2833 | int pkt_cnt = 0, org_pkts_to_process; | ||
| 2834 | struct mac_info *mac_control; | ||
| 2835 | struct config_param *config; | 2861 | struct config_param *config; |
| 2862 | struct mac_info *mac_control; | ||
| 2863 | int pkts_processed = 0; | ||
| 2864 | u8 *addr = NULL, val8 = 0; | ||
| 2865 | struct s2io_nic *nic = dev->priv; | ||
| 2836 | struct XENA_dev_config __iomem *bar0 = nic->bar0; | 2866 | struct XENA_dev_config __iomem *bar0 = nic->bar0; |
| 2837 | int i; | 2867 | int budget_org = budget; |
| 2838 | 2868 | ||
| 2839 | mac_control = &nic->mac_control; | ||
| 2840 | config = &nic->config; | 2869 | config = &nic->config; |
| 2870 | mac_control = &nic->mac_control; | ||
| 2841 | 2871 | ||
| 2842 | nic->pkts_to_process = budget; | 2872 | if (unlikely(!is_s2io_card_up(nic))) |
| 2843 | org_pkts_to_process = nic->pkts_to_process; | 2873 | return 0; |
| 2844 | 2874 | ||
| 2845 | writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int); | 2875 | pkts_processed = rx_intr_handler(ring, budget); |
| 2846 | readl(&bar0->rx_traffic_int); | 2876 | s2io_chk_rx_buffers(ring); |
| 2847 | 2877 | ||
| 2848 | for (i = 0; i < config->rx_ring_num; i++) { | 2878 | if (pkts_processed < budget_org) { |
| 2849 | rx_intr_handler(&mac_control->rings[i]); | 2879 | netif_rx_complete(dev, napi); |
| 2850 | pkt_cnt = org_pkts_to_process - nic->pkts_to_process; | 2880 | /*Re Enable MSI-Rx Vector*/ |
| 2851 | if (!nic->pkts_to_process) { | 2881 | addr = (u8 *)&bar0->xmsi_mask_reg; |
| 2852 | /* Quota for the current iteration has been met */ | 2882 | addr += 7 - ring->ring_no; |
| 2853 | goto no_rx; | 2883 | val8 = (ring->ring_no == 0) ? 0x3f : 0xbf; |
| 2854 | } | 2884 | writeb(val8, addr); |
| 2885 | val8 = readb(addr); | ||
| 2855 | } | 2886 | } |
| 2887 | return pkts_processed; | ||
| 2888 | } | ||
| 2889 | static int s2io_poll_inta(struct napi_struct *napi, int budget) | ||
| 2890 | { | ||
| 2891 | struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi); | ||
| 2892 | struct ring_info *ring; | ||
| 2893 | struct net_device *dev = nic->dev; | ||
| 2894 | struct config_param *config; | ||
| 2895 | struct mac_info *mac_control; | ||
| 2896 | int pkts_processed = 0; | ||
| 2897 | int ring_pkts_processed, i; | ||
| 2898 | struct XENA_dev_config __iomem *bar0 = nic->bar0; | ||
| 2899 | int budget_org = budget; | ||
| 2856 | 2900 | ||
| 2857 | netif_rx_complete(dev, napi); | 2901 | config = &nic->config; |
| 2902 | mac_control = &nic->mac_control; | ||
| 2858 | 2903 | ||
| 2859 | for (i = 0; i < config->rx_ring_num; i++) { | 2904 | if (unlikely(!is_s2io_card_up(nic))) |
| 2860 | if (fill_rx_buffers(&mac_control->rings[i]) == -ENOMEM) { | 2905 | return 0; |
| 2861 | DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name); | ||
| 2862 | DBG_PRINT(INFO_DBG, " in Rx Poll!!\n"); | ||
| 2863 | break; | ||
| 2864 | } | ||
| 2865 | } | ||
| 2866 | /* Re enable the Rx interrupts. */ | ||
| 2867 | writeq(0x0, &bar0->rx_traffic_mask); | ||
| 2868 | readl(&bar0->rx_traffic_mask); | ||
| 2869 | return pkt_cnt; | ||
| 2870 | 2906 | ||
| 2871 | no_rx: | ||
| 2872 | for (i = 0; i < config->rx_ring_num; i++) { | 2907 | for (i = 0; i < config->rx_ring_num; i++) { |
| 2873 | if (fill_rx_buffers(&mac_control->rings[i]) == -ENOMEM) { | 2908 | ring = &mac_control->rings[i]; |
| 2874 | DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name); | 2909 | ring_pkts_processed = rx_intr_handler(ring, budget); |
| 2875 | DBG_PRINT(INFO_DBG, " in Rx Poll!!\n"); | 2910 | s2io_chk_rx_buffers(ring); |
| 2911 | pkts_processed += ring_pkts_processed; | ||
| 2912 | budget -= ring_pkts_processed; | ||
| 2913 | if (budget <= 0) | ||
| 2876 | break; | 2914 | break; |
| 2877 | } | ||
| 2878 | } | 2915 | } |
| 2879 | return pkt_cnt; | 2916 | if (pkts_processed < budget_org) { |
| 2917 | netif_rx_complete(dev, napi); | ||
| 2918 | /* Re enable the Rx interrupts for the ring */ | ||
| 2919 | writeq(0, &bar0->rx_traffic_mask); | ||
| 2920 | readl(&bar0->rx_traffic_mask); | ||
| 2921 | } | ||
| 2922 | return pkts_processed; | ||
| 2880 | } | 2923 | } |
| 2881 | 2924 | ||
| 2882 | #ifdef CONFIG_NET_POLL_CONTROLLER | 2925 | #ifdef CONFIG_NET_POLL_CONTROLLER |
| @@ -2918,7 +2961,7 @@ static void s2io_netpoll(struct net_device *dev) | |||
| 2918 | 2961 | ||
| 2919 | /* check for received packet and indicate up to network */ | 2962 | /* check for received packet and indicate up to network */ |
| 2920 | for (i = 0; i < config->rx_ring_num; i++) | 2963 | for (i = 0; i < config->rx_ring_num; i++) |
| 2921 | rx_intr_handler(&mac_control->rings[i]); | 2964 | rx_intr_handler(&mac_control->rings[i], 0); |
| 2922 | 2965 | ||
| 2923 | for (i = 0; i < config->rx_ring_num; i++) { | 2966 | for (i = 0; i < config->rx_ring_num; i++) { |
| 2924 | if (fill_rx_buffers(&mac_control->rings[i]) == -ENOMEM) { | 2967 | if (fill_rx_buffers(&mac_control->rings[i]) == -ENOMEM) { |
| @@ -2934,7 +2977,8 @@ static void s2io_netpoll(struct net_device *dev) | |||
| 2934 | 2977 | ||
| 2935 | /** | 2978 | /** |
| 2936 | * rx_intr_handler - Rx interrupt handler | 2979 | * rx_intr_handler - Rx interrupt handler |
| 2937 | * @nic: device private variable. | 2980 | * @ring_info: per ring structure. |
| 2981 | * @budget: budget for napi processing. | ||
| 2938 | * Description: | 2982 | * Description: |
| 2939 | * If the interrupt is because of a received frame or if the | 2983 | * If the interrupt is because of a received frame or if the |
| 2940 | * receive ring contains fresh as yet un-processed frames,this function is | 2984 | * receive ring contains fresh as yet un-processed frames,this function is |
| @@ -2942,15 +2986,15 @@ static void s2io_netpoll(struct net_device *dev) | |||
| 2942 | * stopped and sends the skb to the OSM's Rx handler and then increments | 2986 | * stopped and sends the skb to the OSM's Rx handler and then increments |
| 2943 | * the offset. | 2987 | * the offset. |
| 2944 | * Return Value: | 2988 | * Return Value: |
| 2945 | * NONE. | 2989 | * No. of napi packets processed. |
| 2946 | */ | 2990 | */ |
| 2947 | static void rx_intr_handler(struct ring_info *ring_data) | 2991 | static int rx_intr_handler(struct ring_info *ring_data, int budget) |
| 2948 | { | 2992 | { |
| 2949 | int get_block, put_block; | 2993 | int get_block, put_block; |
| 2950 | struct rx_curr_get_info get_info, put_info; | 2994 | struct rx_curr_get_info get_info, put_info; |
| 2951 | struct RxD_t *rxdp; | 2995 | struct RxD_t *rxdp; |
| 2952 | struct sk_buff *skb; | 2996 | struct sk_buff *skb; |
| 2953 | int pkt_cnt = 0; | 2997 | int pkt_cnt = 0, napi_pkts = 0; |
| 2954 | int i; | 2998 | int i; |
| 2955 | struct RxD1* rxdp1; | 2999 | struct RxD1* rxdp1; |
| 2956 | struct RxD3* rxdp3; | 3000 | struct RxD3* rxdp3; |
| @@ -2977,7 +3021,7 @@ static void rx_intr_handler(struct ring_info *ring_data) | |||
| 2977 | DBG_PRINT(ERR_DBG, "%s: The skb is ", | 3021 | DBG_PRINT(ERR_DBG, "%s: The skb is ", |
| 2978 | ring_data->dev->name); | 3022 | ring_data->dev->name); |
| 2979 | DBG_PRINT(ERR_DBG, "Null in Rx Intr\n"); | 3023 | DBG_PRINT(ERR_DBG, "Null in Rx Intr\n"); |
| 2980 | return; | 3024 | return 0; |
| 2981 | } | 3025 | } |
| 2982 | if (ring_data->rxd_mode == RXD_MODE_1) { | 3026 | if (ring_data->rxd_mode == RXD_MODE_1) { |
| 2983 | rxdp1 = (struct RxD1*)rxdp; | 3027 | rxdp1 = (struct RxD1*)rxdp; |
| @@ -3014,9 +3058,10 @@ static void rx_intr_handler(struct ring_info *ring_data) | |||
| 3014 | rxdp = ring_data->rx_blocks[get_block].block_virt_addr; | 3058 | rxdp = ring_data->rx_blocks[get_block].block_virt_addr; |
| 3015 | } | 3059 | } |
| 3016 | 3060 | ||
| 3017 | if(ring_data->nic->config.napi){ | 3061 | if (ring_data->nic->config.napi) { |
| 3018 | ring_data->nic->pkts_to_process -= 1; | 3062 | budget--; |
| 3019 | if (!ring_data->nic->pkts_to_process) | 3063 | napi_pkts++; |
| 3064 | if (!budget) | ||
| 3020 | break; | 3065 | break; |
| 3021 | } | 3066 | } |
| 3022 | pkt_cnt++; | 3067 | pkt_cnt++; |
| @@ -3034,6 +3079,7 @@ static void rx_intr_handler(struct ring_info *ring_data) | |||
| 3034 | } | 3079 | } |
| 3035 | } | 3080 | } |
| 3036 | } | 3081 | } |
| 3082 | return(napi_pkts); | ||
| 3037 | } | 3083 | } |
| 3038 | 3084 | ||
| 3039 | /** | 3085 | /** |
| @@ -3730,14 +3776,19 @@ static void restore_xmsi_data(struct s2io_nic *nic) | |||
| 3730 | { | 3776 | { |
| 3731 | struct XENA_dev_config __iomem *bar0 = nic->bar0; | 3777 | struct XENA_dev_config __iomem *bar0 = nic->bar0; |
| 3732 | u64 val64; | 3778 | u64 val64; |
| 3733 | int i; | 3779 | int i, msix_index; |
| 3780 | |||
| 3781 | |||
| 3782 | if (nic->device_type == XFRAME_I_DEVICE) | ||
| 3783 | return; | ||
| 3734 | 3784 | ||
| 3735 | for (i=0; i < MAX_REQUESTED_MSI_X; i++) { | 3785 | for (i=0; i < MAX_REQUESTED_MSI_X; i++) { |
| 3786 | msix_index = (i) ? ((i-1) * 8 + 1): 0; | ||
| 3736 | writeq(nic->msix_info[i].addr, &bar0->xmsi_address); | 3787 | writeq(nic->msix_info[i].addr, &bar0->xmsi_address); |
| 3737 | writeq(nic->msix_info[i].data, &bar0->xmsi_data); | 3788 | writeq(nic->msix_info[i].data, &bar0->xmsi_data); |
| 3738 | val64 = (s2BIT(7) | s2BIT(15) | vBIT(i, 26, 6)); | 3789 | val64 = (s2BIT(7) | s2BIT(15) | vBIT(msix_index, 26, 6)); |
| 3739 | writeq(val64, &bar0->xmsi_access); | 3790 | writeq(val64, &bar0->xmsi_access); |
| 3740 | if (wait_for_msix_trans(nic, i)) { | 3791 | if (wait_for_msix_trans(nic, msix_index)) { |
| 3741 | DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__); | 3792 | DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__); |
| 3742 | continue; | 3793 | continue; |
| 3743 | } | 3794 | } |
| @@ -3748,13 +3799,17 @@ static void store_xmsi_data(struct s2io_nic *nic) | |||
| 3748 | { | 3799 | { |
| 3749 | struct XENA_dev_config __iomem *bar0 = nic->bar0; | 3800 | struct XENA_dev_config __iomem *bar0 = nic->bar0; |
| 3750 | u64 val64, addr, data; | 3801 | u64 val64, addr, data; |
| 3751 | int i; | 3802 | int i, msix_index; |
| 3803 | |||
| 3804 | if (nic->device_type == XFRAME_I_DEVICE) | ||
| 3805 | return; | ||
| 3752 | 3806 | ||
| 3753 | /* Store and display */ | 3807 | /* Store and display */ |
| 3754 | for (i=0; i < MAX_REQUESTED_MSI_X; i++) { | 3808 | for (i=0; i < MAX_REQUESTED_MSI_X; i++) { |
| 3755 | val64 = (s2BIT(15) | vBIT(i, 26, 6)); | 3809 | msix_index = (i) ? ((i-1) * 8 + 1): 0; |
| 3810 | val64 = (s2BIT(15) | vBIT(msix_index, 26, 6)); | ||
| 3756 | writeq(val64, &bar0->xmsi_access); | 3811 | writeq(val64, &bar0->xmsi_access); |
| 3757 | if (wait_for_msix_trans(nic, i)) { | 3812 | if (wait_for_msix_trans(nic, msix_index)) { |
| 3758 | DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__); | 3813 | DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__); |
| 3759 | continue; | 3814 | continue; |
| 3760 | } | 3815 | } |
| @@ -3770,11 +3825,11 @@ static void store_xmsi_data(struct s2io_nic *nic) | |||
| 3770 | static int s2io_enable_msi_x(struct s2io_nic *nic) | 3825 | static int s2io_enable_msi_x(struct s2io_nic *nic) |
| 3771 | { | 3826 | { |
| 3772 | struct XENA_dev_config __iomem *bar0 = nic->bar0; | 3827 | struct XENA_dev_config __iomem *bar0 = nic->bar0; |
| 3773 | u64 tx_mat, rx_mat; | 3828 | u64 rx_mat; |
| 3774 | u16 msi_control; /* Temp variable */ | 3829 | u16 msi_control; /* Temp variable */ |
| 3775 | int ret, i, j, msix_indx = 1; | 3830 | int ret, i, j, msix_indx = 1; |
| 3776 | 3831 | ||
| 3777 | nic->entries = kcalloc(MAX_REQUESTED_MSI_X, sizeof(struct msix_entry), | 3832 | nic->entries = kmalloc(nic->num_entries * sizeof(struct msix_entry), |
| 3778 | GFP_KERNEL); | 3833 | GFP_KERNEL); |
| 3779 | if (!nic->entries) { | 3834 | if (!nic->entries) { |
| 3780 | DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", \ | 3835 | DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", \ |
| @@ -3783,10 +3838,12 @@ static int s2io_enable_msi_x(struct s2io_nic *nic) | |||
| 3783 | return -ENOMEM; | 3838 | return -ENOMEM; |
| 3784 | } | 3839 | } |
| 3785 | nic->mac_control.stats_info->sw_stat.mem_allocated | 3840 | nic->mac_control.stats_info->sw_stat.mem_allocated |
| 3786 | += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry)); | 3841 | += (nic->num_entries * sizeof(struct msix_entry)); |
| 3842 | |||
| 3843 | memset(nic->entries, 0, nic->num_entries * sizeof(struct msix_entry)); | ||
| 3787 | 3844 | ||
| 3788 | nic->s2io_entries = | 3845 | nic->s2io_entries = |
| 3789 | kcalloc(MAX_REQUESTED_MSI_X, sizeof(struct s2io_msix_entry), | 3846 | kmalloc(nic->num_entries * sizeof(struct s2io_msix_entry), |
| 3790 | GFP_KERNEL); | 3847 | GFP_KERNEL); |
| 3791 | if (!nic->s2io_entries) { | 3848 | if (!nic->s2io_entries) { |
| 3792 | DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", | 3849 | DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", |
| @@ -3794,60 +3851,52 @@ static int s2io_enable_msi_x(struct s2io_nic *nic) | |||
| 3794 | nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++; | 3851 | nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++; |
| 3795 | kfree(nic->entries); | 3852 | kfree(nic->entries); |
| 3796 | nic->mac_control.stats_info->sw_stat.mem_freed | 3853 | nic->mac_control.stats_info->sw_stat.mem_freed |
| 3797 | += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry)); | 3854 | += (nic->num_entries * sizeof(struct msix_entry)); |
| 3798 | return -ENOMEM; | 3855 | return -ENOMEM; |
| 3799 | } | 3856 | } |
| 3800 | nic->mac_control.stats_info->sw_stat.mem_allocated | 3857 | nic->mac_control.stats_info->sw_stat.mem_allocated |
| 3801 | += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry)); | 3858 | += (nic->num_entries * sizeof(struct s2io_msix_entry)); |
| 3802 | 3859 | memset(nic->s2io_entries, 0, | |
| 3803 | for (i=0; i< MAX_REQUESTED_MSI_X; i++) { | 3860 | nic->num_entries * sizeof(struct s2io_msix_entry)); |
| 3804 | nic->entries[i].entry = i; | 3861 | |
| 3805 | nic->s2io_entries[i].entry = i; | 3862 | nic->entries[0].entry = 0; |
| 3863 | nic->s2io_entries[0].entry = 0; | ||
| 3864 | nic->s2io_entries[0].in_use = MSIX_FLG; | ||
| 3865 | nic->s2io_entries[0].type = MSIX_ALARM_TYPE; | ||
| 3866 | nic->s2io_entries[0].arg = &nic->mac_control.fifos; | ||
| 3867 | |||
| 3868 | for (i = 1; i < nic->num_entries; i++) { | ||
| 3869 | nic->entries[i].entry = ((i - 1) * 8) + 1; | ||
| 3870 | nic->s2io_entries[i].entry = ((i - 1) * 8) + 1; | ||
| 3806 | nic->s2io_entries[i].arg = NULL; | 3871 | nic->s2io_entries[i].arg = NULL; |
| 3807 | nic->s2io_entries[i].in_use = 0; | 3872 | nic->s2io_entries[i].in_use = 0; |
| 3808 | } | 3873 | } |
| 3809 | 3874 | ||
| 3810 | tx_mat = readq(&bar0->tx_mat0_n[0]); | ||
| 3811 | for (i=0; i<nic->config.tx_fifo_num; i++, msix_indx++) { | ||
| 3812 | tx_mat |= TX_MAT_SET(i, msix_indx); | ||
| 3813 | nic->s2io_entries[msix_indx].arg = &nic->mac_control.fifos[i]; | ||
| 3814 | nic->s2io_entries[msix_indx].type = MSIX_FIFO_TYPE; | ||
| 3815 | nic->s2io_entries[msix_indx].in_use = MSIX_FLG; | ||
| 3816 | } | ||
| 3817 | writeq(tx_mat, &bar0->tx_mat0_n[0]); | ||
| 3818 | |||
| 3819 | rx_mat = readq(&bar0->rx_mat); | 3875 | rx_mat = readq(&bar0->rx_mat); |
| 3820 | for (j = 0; j < nic->config.rx_ring_num; j++, msix_indx++) { | 3876 | for (j = 0; j < nic->config.rx_ring_num; j++) { |
| 3821 | rx_mat |= RX_MAT_SET(j, msix_indx); | 3877 | rx_mat |= RX_MAT_SET(j, msix_indx); |
| 3822 | nic->s2io_entries[msix_indx].arg | 3878 | nic->s2io_entries[j+1].arg = &nic->mac_control.rings[j]; |
| 3823 | = &nic->mac_control.rings[j]; | 3879 | nic->s2io_entries[j+1].type = MSIX_RING_TYPE; |
| 3824 | nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE; | 3880 | nic->s2io_entries[j+1].in_use = MSIX_FLG; |
| 3825 | nic->s2io_entries[msix_indx].in_use = MSIX_FLG; | 3881 | msix_indx += 8; |
| 3826 | } | 3882 | } |
| 3827 | writeq(rx_mat, &bar0->rx_mat); | 3883 | writeq(rx_mat, &bar0->rx_mat); |
| 3884 | readq(&bar0->rx_mat); | ||
| 3828 | 3885 | ||
| 3829 | nic->avail_msix_vectors = 0; | 3886 | ret = pci_enable_msix(nic->pdev, nic->entries, nic->num_entries); |
| 3830 | ret = pci_enable_msix(nic->pdev, nic->entries, MAX_REQUESTED_MSI_X); | ||
| 3831 | /* We fail init if error or we get less vectors than min required */ | 3887 | /* We fail init if error or we get less vectors than min required */ |
| 3832 | if (ret >= (nic->config.tx_fifo_num + nic->config.rx_ring_num + 1)) { | ||
| 3833 | nic->avail_msix_vectors = ret; | ||
| 3834 | ret = pci_enable_msix(nic->pdev, nic->entries, ret); | ||
| 3835 | } | ||
| 3836 | if (ret) { | 3888 | if (ret) { |
| 3837 | DBG_PRINT(ERR_DBG, "%s: Enabling MSIX failed\n", nic->dev->name); | 3889 | DBG_PRINT(ERR_DBG, "%s: Enabling MSIX failed\n", nic->dev->name); |
| 3838 | kfree(nic->entries); | 3890 | kfree(nic->entries); |
| 3839 | nic->mac_control.stats_info->sw_stat.mem_freed | 3891 | nic->mac_control.stats_info->sw_stat.mem_freed |
| 3840 | += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry)); | 3892 | += (nic->num_entries * sizeof(struct msix_entry)); |
| 3841 | kfree(nic->s2io_entries); | 3893 | kfree(nic->s2io_entries); |
| 3842 | nic->mac_control.stats_info->sw_stat.mem_freed | 3894 | nic->mac_control.stats_info->sw_stat.mem_freed |
| 3843 | += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry)); | 3895 | += (nic->num_entries * sizeof(struct s2io_msix_entry)); |
| 3844 | nic->entries = NULL; | 3896 | nic->entries = NULL; |
| 3845 | nic->s2io_entries = NULL; | 3897 | nic->s2io_entries = NULL; |
| 3846 | nic->avail_msix_vectors = 0; | ||
| 3847 | return -ENOMEM; | 3898 | return -ENOMEM; |
| 3848 | } | 3899 | } |
| 3849 | if (!nic->avail_msix_vectors) | ||
| 3850 | nic->avail_msix_vectors = MAX_REQUESTED_MSI_X; | ||
| 3851 | 3900 | ||
| 3852 | /* | 3901 | /* |
| 3853 | * To enable MSI-X, MSI also needs to be enabled, due to a bug | 3902 | * To enable MSI-X, MSI also needs to be enabled, due to a bug |
| @@ -3919,7 +3968,7 @@ static void remove_msix_isr(struct s2io_nic *sp) | |||
| 3919 | int i; | 3968 | int i; |
| 3920 | u16 msi_control; | 3969 | u16 msi_control; |
| 3921 | 3970 | ||
| 3922 | for (i = 0; i < MAX_REQUESTED_MSI_X; i++) { | 3971 | for (i = 0; i < sp->num_entries; i++) { |
| 3923 | if (sp->s2io_entries[i].in_use == | 3972 | if (sp->s2io_entries[i].in_use == |
| 3924 | MSIX_REGISTERED_SUCCESS) { | 3973 | MSIX_REGISTERED_SUCCESS) { |
| 3925 | int vector = sp->entries[i].vector; | 3974 | int vector = sp->entries[i].vector; |
| @@ -3975,29 +4024,6 @@ static int s2io_open(struct net_device *dev) | |||
| 3975 | netif_carrier_off(dev); | 4024 | netif_carrier_off(dev); |
| 3976 | sp->last_link_state = 0; | 4025 | sp->last_link_state = 0; |
| 3977 | 4026 | ||
| 3978 | if (sp->config.intr_type == MSI_X) { | ||
| 3979 | int ret = s2io_enable_msi_x(sp); | ||
| 3980 | |||
| 3981 | if (!ret) { | ||
| 3982 | ret = s2io_test_msi(sp); | ||
| 3983 | /* rollback MSI-X, will re-enable during add_isr() */ | ||
| 3984 | remove_msix_isr(sp); | ||
| 3985 | } | ||
| 3986 | if (ret) { | ||
| 3987 | |||
| 3988 | DBG_PRINT(ERR_DBG, | ||
| 3989 | "%s: MSI-X requested but failed to enable\n", | ||
| 3990 | dev->name); | ||
| 3991 | sp->config.intr_type = INTA; | ||
| 3992 | } | ||
| 3993 | } | ||
| 3994 | |||
| 3995 | /* NAPI doesn't work well with MSI(X) */ | ||
| 3996 | if (sp->config.intr_type != INTA) { | ||
| 3997 | if(sp->config.napi) | ||
| 3998 | sp->config.napi = 0; | ||
| 3999 | } | ||
| 4000 | |||
| 4001 | /* Initialize H/W and enable interrupts */ | 4027 | /* Initialize H/W and enable interrupts */ |
| 4002 | err = s2io_card_up(sp); | 4028 | err = s2io_card_up(sp); |
| 4003 | if (err) { | 4029 | if (err) { |
| @@ -4020,12 +4046,12 @@ hw_init_failed: | |||
| 4020 | if (sp->entries) { | 4046 | if (sp->entries) { |
| 4021 | kfree(sp->entries); | 4047 | kfree(sp->entries); |
| 4022 | sp->mac_control.stats_info->sw_stat.mem_freed | 4048 | sp->mac_control.stats_info->sw_stat.mem_freed |
| 4023 | += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry)); | 4049 | += (sp->num_entries * sizeof(struct msix_entry)); |
| 4024 | } | 4050 | } |
| 4025 | if (sp->s2io_entries) { | 4051 | if (sp->s2io_entries) { |
| 4026 | kfree(sp->s2io_entries); | 4052 | kfree(sp->s2io_entries); |
| 4027 | sp->mac_control.stats_info->sw_stat.mem_freed | 4053 | sp->mac_control.stats_info->sw_stat.mem_freed |
| 4028 | += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry)); | 4054 | += (sp->num_entries * sizeof(struct s2io_msix_entry)); |
| 4029 | } | 4055 | } |
| 4030 | } | 4056 | } |
| 4031 | return err; | 4057 | return err; |
| @@ -4327,40 +4353,64 @@ s2io_alarm_handle(unsigned long data) | |||
| 4327 | mod_timer(&sp->alarm_timer, jiffies + HZ / 2); | 4353 | mod_timer(&sp->alarm_timer, jiffies + HZ / 2); |
| 4328 | } | 4354 | } |
| 4329 | 4355 | ||
| 4330 | static int s2io_chk_rx_buffers(struct ring_info *ring) | ||
| 4331 | { | ||
| 4332 | if (fill_rx_buffers(ring) == -ENOMEM) { | ||
| 4333 | DBG_PRINT(INFO_DBG, "%s:Out of memory", ring->dev->name); | ||
| 4334 | DBG_PRINT(INFO_DBG, " in Rx Intr!!\n"); | ||
| 4335 | } | ||
| 4336 | return 0; | ||
| 4337 | } | ||
| 4338 | |||
| 4339 | static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id) | 4356 | static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id) |
| 4340 | { | 4357 | { |
| 4341 | struct ring_info *ring = (struct ring_info *)dev_id; | 4358 | struct ring_info *ring = (struct ring_info *)dev_id; |
| 4342 | struct s2io_nic *sp = ring->nic; | 4359 | struct s2io_nic *sp = ring->nic; |
| 4360 | struct XENA_dev_config __iomem *bar0 = sp->bar0; | ||
| 4361 | struct net_device *dev = sp->dev; | ||
| 4343 | 4362 | ||
| 4344 | if (!is_s2io_card_up(sp)) | 4363 | if (unlikely(!is_s2io_card_up(sp))) |
| 4345 | return IRQ_HANDLED; | 4364 | return IRQ_HANDLED; |
| 4346 | 4365 | ||
| 4347 | rx_intr_handler(ring); | 4366 | if (sp->config.napi) { |
| 4348 | s2io_chk_rx_buffers(ring); | 4367 | u8 *addr = NULL, val8 = 0; |
| 4368 | |||
| 4369 | addr = (u8 *)&bar0->xmsi_mask_reg; | ||
| 4370 | addr += (7 - ring->ring_no); | ||
| 4371 | val8 = (ring->ring_no == 0) ? 0x7f : 0xff; | ||
| 4372 | writeb(val8, addr); | ||
| 4373 | val8 = readb(addr); | ||
| 4374 | netif_rx_schedule(dev, &ring->napi); | ||
| 4375 | } else { | ||
| 4376 | rx_intr_handler(ring, 0); | ||
| 4377 | s2io_chk_rx_buffers(ring); | ||
| 4378 | } | ||
| 4349 | 4379 | ||
| 4350 | return IRQ_HANDLED; | 4380 | return IRQ_HANDLED; |
| 4351 | } | 4381 | } |
| 4352 | 4382 | ||
| 4353 | static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id) | 4383 | static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id) |
| 4354 | { | 4384 | { |
| 4355 | struct fifo_info *fifo = (struct fifo_info *)dev_id; | 4385 | int i; |
| 4356 | struct s2io_nic *sp = fifo->nic; | 4386 | struct fifo_info *fifos = (struct fifo_info *)dev_id; |
| 4387 | struct s2io_nic *sp = fifos->nic; | ||
| 4388 | struct XENA_dev_config __iomem *bar0 = sp->bar0; | ||
| 4389 | struct config_param *config = &sp->config; | ||
| 4390 | u64 reason; | ||
| 4357 | 4391 | ||
| 4358 | if (!is_s2io_card_up(sp)) | 4392 | if (unlikely(!is_s2io_card_up(sp))) |
| 4393 | return IRQ_NONE; | ||
| 4394 | |||
| 4395 | reason = readq(&bar0->general_int_status); | ||
| 4396 | if (unlikely(reason == S2IO_MINUS_ONE)) | ||
| 4397 | /* Nothing much can be done. Get out */ | ||
| 4359 | return IRQ_HANDLED; | 4398 | return IRQ_HANDLED; |
| 4360 | 4399 | ||
| 4361 | tx_intr_handler(fifo); | 4400 | writeq(S2IO_MINUS_ONE, &bar0->general_int_mask); |
| 4401 | |||
| 4402 | if (reason & GEN_INTR_TXTRAFFIC) | ||
| 4403 | writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int); | ||
| 4404 | |||
| 4405 | for (i = 0; i < config->tx_fifo_num; i++) | ||
| 4406 | tx_intr_handler(&fifos[i]); | ||
| 4407 | |||
| 4408 | writeq(sp->general_int_mask, &bar0->general_int_mask); | ||
| 4409 | readl(&bar0->general_int_status); | ||
| 4410 | |||
| 4362 | return IRQ_HANDLED; | 4411 | return IRQ_HANDLED; |
| 4363 | } | 4412 | } |
| 4413 | |||
| 4364 | static void s2io_txpic_intr_handle(struct s2io_nic *sp) | 4414 | static void s2io_txpic_intr_handle(struct s2io_nic *sp) |
| 4365 | { | 4415 | { |
| 4366 | struct XENA_dev_config __iomem *bar0 = sp->bar0; | 4416 | struct XENA_dev_config __iomem *bar0 = sp->bar0; |
| @@ -4762,14 +4812,10 @@ static irqreturn_t s2io_isr(int irq, void *dev_id) | |||
| 4762 | 4812 | ||
| 4763 | if (config->napi) { | 4813 | if (config->napi) { |
| 4764 | if (reason & GEN_INTR_RXTRAFFIC) { | 4814 | if (reason & GEN_INTR_RXTRAFFIC) { |
| 4765 | if (likely(netif_rx_schedule_prep(dev, | 4815 | netif_rx_schedule(dev, &sp->napi); |
| 4766 | &sp->napi))) { | 4816 | writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask); |
| 4767 | __netif_rx_schedule(dev, &sp->napi); | 4817 | writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int); |
| 4768 | writeq(S2IO_MINUS_ONE, | 4818 | readl(&bar0->rx_traffic_int); |
| 4769 | &bar0->rx_traffic_mask); | ||
| 4770 | } else | ||
| 4771 | writeq(S2IO_MINUS_ONE, | ||
| 4772 | &bar0->rx_traffic_int); | ||
| 4773 | } | 4819 | } |
| 4774 | } else { | 4820 | } else { |
| 4775 | /* | 4821 | /* |
| @@ -4781,7 +4827,7 @@ static irqreturn_t s2io_isr(int irq, void *dev_id) | |||
| 4781 | writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int); | 4827 | writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int); |
| 4782 | 4828 | ||
| 4783 | for (i = 0; i < config->rx_ring_num; i++) | 4829 | for (i = 0; i < config->rx_ring_num; i++) |
| 4784 | rx_intr_handler(&mac_control->rings[i]); | 4830 | rx_intr_handler(&mac_control->rings[i], 0); |
| 4785 | } | 4831 | } |
| 4786 | 4832 | ||
| 4787 | /* | 4833 | /* |
| @@ -6984,62 +7030,62 @@ static int s2io_add_isr(struct s2io_nic * sp) | |||
| 6984 | 7030 | ||
| 6985 | /* After proper initialization of H/W, register ISR */ | 7031 | /* After proper initialization of H/W, register ISR */ |
| 6986 | if (sp->config.intr_type == MSI_X) { | 7032 | if (sp->config.intr_type == MSI_X) { |
| 6987 | int i, msix_tx_cnt=0,msix_rx_cnt=0; | 7033 | int i, msix_rx_cnt = 0; |
| 6988 | 7034 | ||
| 6989 | for (i=1; (sp->s2io_entries[i].in_use == MSIX_FLG); i++) { | 7035 | for (i = 0; i < sp->num_entries; i++) { |
| 6990 | if (sp->s2io_entries[i].type == MSIX_FIFO_TYPE) { | 7036 | if (sp->s2io_entries[i].in_use == MSIX_FLG) { |
| 6991 | sprintf(sp->desc[i], "%s:MSI-X-%d-TX", | 7037 | if (sp->s2io_entries[i].type == |
| 7038 | MSIX_RING_TYPE) { | ||
| 7039 | sprintf(sp->desc[i], "%s:MSI-X-%d-RX", | ||
| 7040 | dev->name, i); | ||
| 7041 | err = request_irq(sp->entries[i].vector, | ||
| 7042 | s2io_msix_ring_handle, 0, | ||
| 7043 | sp->desc[i], | ||
| 7044 | sp->s2io_entries[i].arg); | ||
| 7045 | } else if (sp->s2io_entries[i].type == | ||
| 7046 | MSIX_ALARM_TYPE) { | ||
| 7047 | sprintf(sp->desc[i], "%s:MSI-X-%d-TX", | ||
| 6992 | dev->name, i); | 7048 | dev->name, i); |
| 6993 | err = request_irq(sp->entries[i].vector, | 7049 | err = request_irq(sp->entries[i].vector, |
| 6994 | s2io_msix_fifo_handle, 0, sp->desc[i], | 7050 | s2io_msix_fifo_handle, 0, |
| 6995 | sp->s2io_entries[i].arg); | 7051 | sp->desc[i], |
| 6996 | /* If either data or addr is zero print it */ | 7052 | sp->s2io_entries[i].arg); |
| 6997 | if(!(sp->msix_info[i].addr && | 7053 | |
| 6998 | sp->msix_info[i].data)) { | ||
| 6999 | DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx " | ||
| 7000 | "Data:0x%llx\n",sp->desc[i], | ||
| 7001 | (unsigned long long) | ||
| 7002 | sp->msix_info[i].addr, | ||
| 7003 | (unsigned long long) | ||
| 7004 | sp->msix_info[i].data); | ||
| 7005 | } else { | ||
| 7006 | msix_tx_cnt++; | ||
| 7007 | } | 7054 | } |
| 7008 | } else { | 7055 | /* if either data or addr is zero print it. */ |
| 7009 | sprintf(sp->desc[i], "%s:MSI-X-%d-RX", | 7056 | if (!(sp->msix_info[i].addr && |
| 7010 | dev->name, i); | ||
| 7011 | err = request_irq(sp->entries[i].vector, | ||
| 7012 | s2io_msix_ring_handle, 0, sp->desc[i], | ||
| 7013 | sp->s2io_entries[i].arg); | ||
| 7014 | /* If either data or addr is zero print it */ | ||
| 7015 | if(!(sp->msix_info[i].addr && | ||
| 7016 | sp->msix_info[i].data)) { | 7057 | sp->msix_info[i].data)) { |
| 7017 | DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx " | 7058 | DBG_PRINT(ERR_DBG, |
| 7018 | "Data:0x%llx\n",sp->desc[i], | 7059 | "%s @Addr:0x%llx Data:0x%llx\n", |
| 7060 | sp->desc[i], | ||
| 7019 | (unsigned long long) | 7061 | (unsigned long long) |
| 7020 | sp->msix_info[i].addr, | 7062 | sp->msix_info[i].addr, |
| 7021 | (unsigned long long) | 7063 | (unsigned long long) |
| 7022 | sp->msix_info[i].data); | 7064 | ntohl(sp->msix_info[i].data)); |
| 7023 | } else { | 7065 | } else |
| 7024 | msix_rx_cnt++; | 7066 | msix_rx_cnt++; |
| 7067 | if (err) { | ||
| 7068 | remove_msix_isr(sp); | ||
| 7069 | |||
| 7070 | DBG_PRINT(ERR_DBG, | ||
| 7071 | "%s:MSI-X-%d registration " | ||
| 7072 | "failed\n", dev->name, i); | ||
| 7073 | |||
| 7074 | DBG_PRINT(ERR_DBG, | ||
| 7075 | "%s: Defaulting to INTA\n", | ||
| 7076 | dev->name); | ||
| 7077 | sp->config.intr_type = INTA; | ||
| 7078 | break; | ||
| 7025 | } | 7079 | } |
| 7080 | sp->s2io_entries[i].in_use = | ||
| 7081 | MSIX_REGISTERED_SUCCESS; | ||
| 7026 | } | 7082 | } |
| 7027 | if (err) { | ||
| 7028 | remove_msix_isr(sp); | ||
| 7029 | DBG_PRINT(ERR_DBG,"%s:MSI-X-%d registration " | ||
| 7030 | "failed\n", dev->name, i); | ||
| 7031 | DBG_PRINT(ERR_DBG, "%s: defaulting to INTA\n", | ||
| 7032 | dev->name); | ||
| 7033 | sp->config.intr_type = INTA; | ||
| 7034 | break; | ||
| 7035 | } | ||
| 7036 | sp->s2io_entries[i].in_use = MSIX_REGISTERED_SUCCESS; | ||
| 7037 | } | 7083 | } |
| 7038 | if (!err) { | 7084 | if (!err) { |
| 7039 | printk(KERN_INFO "MSI-X-TX %d entries enabled\n", | ||
| 7040 | msix_tx_cnt); | ||
| 7041 | printk(KERN_INFO "MSI-X-RX %d entries enabled\n", | 7085 | printk(KERN_INFO "MSI-X-RX %d entries enabled\n", |
| 7042 | msix_rx_cnt); | 7086 | --msix_rx_cnt); |
| 7087 | DBG_PRINT(INFO_DBG, "MSI-X-TX entries enabled" | ||
| 7088 | " through alarm vector\n"); | ||
| 7043 | } | 7089 | } |
| 7044 | } | 7090 | } |
| 7045 | if (sp->config.intr_type == INTA) { | 7091 | if (sp->config.intr_type == INTA) { |
| @@ -7080,8 +7126,15 @@ static void do_s2io_card_down(struct s2io_nic * sp, int do_io) | |||
| 7080 | clear_bit(__S2IO_STATE_CARD_UP, &sp->state); | 7126 | clear_bit(__S2IO_STATE_CARD_UP, &sp->state); |
| 7081 | 7127 | ||
| 7082 | /* Disable napi */ | 7128 | /* Disable napi */ |
| 7083 | if (config->napi) | 7129 | if (sp->config.napi) { |
| 7084 | napi_disable(&sp->napi); | 7130 | int off = 0; |
| 7131 | if (config->intr_type == MSI_X) { | ||
| 7132 | for (; off < sp->config.rx_ring_num; off++) | ||
| 7133 | napi_disable(&sp->mac_control.rings[off].napi); | ||
| 7134 | } | ||
| 7135 | else | ||
| 7136 | napi_disable(&sp->napi); | ||
| 7137 | } | ||
| 7085 | 7138 | ||
| 7086 | /* disable Tx and Rx traffic on the NIC */ | 7139 | /* disable Tx and Rx traffic on the NIC */ |
| 7087 | if (do_io) | 7140 | if (do_io) |
| @@ -7173,8 +7226,15 @@ static int s2io_card_up(struct s2io_nic * sp) | |||
| 7173 | } | 7226 | } |
| 7174 | 7227 | ||
| 7175 | /* Initialise napi */ | 7228 | /* Initialise napi */ |
| 7176 | if (config->napi) | 7229 | if (config->napi) { |
| 7177 | napi_enable(&sp->napi); | 7230 | int i; |
| 7231 | if (config->intr_type == MSI_X) { | ||
| 7232 | for (i = 0; i < sp->config.rx_ring_num; i++) | ||
| 7233 | napi_enable(&sp->mac_control.rings[i].napi); | ||
| 7234 | } else { | ||
| 7235 | napi_enable(&sp->napi); | ||
| 7236 | } | ||
| 7237 | } | ||
| 7178 | 7238 | ||
| 7179 | /* Maintain the state prior to the open */ | 7239 | /* Maintain the state prior to the open */ |
| 7180 | if (sp->promisc_flg) | 7240 | if (sp->promisc_flg) |
| @@ -7217,7 +7277,7 @@ static int s2io_card_up(struct s2io_nic * sp) | |||
| 7217 | /* Enable select interrupts */ | 7277 | /* Enable select interrupts */ |
| 7218 | en_dis_err_alarms(sp, ENA_ALL_INTRS, ENABLE_INTRS); | 7278 | en_dis_err_alarms(sp, ENA_ALL_INTRS, ENABLE_INTRS); |
| 7219 | if (sp->config.intr_type != INTA) | 7279 | if (sp->config.intr_type != INTA) |
| 7220 | en_dis_able_nic_intrs(sp, ENA_ALL_INTRS, DISABLE_INTRS); | 7280 | en_dis_able_nic_intrs(sp, TX_TRAFFIC_INTR, ENABLE_INTRS); |
| 7221 | else { | 7281 | else { |
| 7222 | interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR; | 7282 | interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR; |
| 7223 | interruptible |= TX_PIC_INTR; | 7283 | interruptible |= TX_PIC_INTR; |
| @@ -7615,9 +7675,6 @@ static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type, | |||
| 7615 | rx_ring_num = MAX_RX_RINGS; | 7675 | rx_ring_num = MAX_RX_RINGS; |
| 7616 | } | 7676 | } |
| 7617 | 7677 | ||
| 7618 | if (*dev_intr_type != INTA) | ||
| 7619 | napi = 0; | ||
| 7620 | |||
| 7621 | if ((*dev_intr_type != INTA) && (*dev_intr_type != MSI_X)) { | 7678 | if ((*dev_intr_type != INTA) && (*dev_intr_type != MSI_X)) { |
| 7622 | DBG_PRINT(ERR_DBG, "s2io: Wrong intr_type requested. " | 7679 | DBG_PRINT(ERR_DBG, "s2io: Wrong intr_type requested. " |
| 7623 | "Defaulting to INTA\n"); | 7680 | "Defaulting to INTA\n"); |
| @@ -7918,8 +7975,6 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
| 7918 | * will use eth_mac_addr() for dev->set_mac_address | 7975 | * will use eth_mac_addr() for dev->set_mac_address |
| 7919 | * mac address will be set every time dev->open() is called | 7976 | * mac address will be set every time dev->open() is called |
| 7920 | */ | 7977 | */ |
| 7921 | netif_napi_add(dev, &sp->napi, s2io_poll, 32); | ||
| 7922 | |||
| 7923 | #ifdef CONFIG_NET_POLL_CONTROLLER | 7978 | #ifdef CONFIG_NET_POLL_CONTROLLER |
| 7924 | dev->poll_controller = s2io_netpoll; | 7979 | dev->poll_controller = s2io_netpoll; |
| 7925 | #endif | 7980 | #endif |
| @@ -7963,6 +8018,32 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
| 7963 | } | 8018 | } |
| 7964 | } | 8019 | } |
| 7965 | 8020 | ||
| 8021 | if (sp->config.intr_type == MSI_X) { | ||
| 8022 | sp->num_entries = config->rx_ring_num + 1; | ||
| 8023 | ret = s2io_enable_msi_x(sp); | ||
| 8024 | |||
| 8025 | if (!ret) { | ||
| 8026 | ret = s2io_test_msi(sp); | ||
| 8027 | /* rollback MSI-X, will re-enable during add_isr() */ | ||
| 8028 | remove_msix_isr(sp); | ||
| 8029 | } | ||
| 8030 | if (ret) { | ||
| 8031 | |||
| 8032 | DBG_PRINT(ERR_DBG, | ||
| 8033 | "%s: MSI-X requested but failed to enable\n", | ||
| 8034 | dev->name); | ||
| 8035 | sp->config.intr_type = INTA; | ||
| 8036 | } | ||
| 8037 | } | ||
| 8038 | |||
| 8039 | if (config->intr_type == MSI_X) { | ||
| 8040 | for (i = 0; i < config->rx_ring_num ; i++) | ||
| 8041 | netif_napi_add(dev, &mac_control->rings[i].napi, | ||
| 8042 | s2io_poll_msix, 64); | ||
| 8043 | } else { | ||
| 8044 | netif_napi_add(dev, &sp->napi, s2io_poll_inta, 64); | ||
| 8045 | } | ||
| 8046 | |||
| 7966 | /* Not needed for Herc */ | 8047 | /* Not needed for Herc */ |
| 7967 | if (sp->device_type & XFRAME_I_DEVICE) { | 8048 | if (sp->device_type & XFRAME_I_DEVICE) { |
| 7968 | /* | 8049 | /* |
| @@ -8013,6 +8094,11 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
| 8013 | /* store mac addresses from CAM to s2io_nic structure */ | 8094 | /* store mac addresses from CAM to s2io_nic structure */ |
| 8014 | do_s2io_store_unicast_mc(sp); | 8095 | do_s2io_store_unicast_mc(sp); |
| 8015 | 8096 | ||
| 8097 | /* Configure MSIX vector for number of rings configured plus one */ | ||
| 8098 | if ((sp->device_type == XFRAME_II_DEVICE) && | ||
| 8099 | (config->intr_type == MSI_X)) | ||
| 8100 | sp->num_entries = config->rx_ring_num + 1; | ||
| 8101 | |||
| 8016 | /* Store the values of the MSIX table in the s2io_nic structure */ | 8102 | /* Store the values of the MSIX table in the s2io_nic structure */ |
| 8017 | store_xmsi_data(sp); | 8103 | store_xmsi_data(sp); |
| 8018 | /* reset Nic and bring it to known state */ | 8104 | /* reset Nic and bring it to known state */ |
| @@ -8078,8 +8164,14 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
| 8078 | break; | 8164 | break; |
| 8079 | } | 8165 | } |
| 8080 | 8166 | ||
| 8081 | if (napi) | 8167 | switch (sp->config.napi) { |
| 8168 | case 0: | ||
| 8169 | DBG_PRINT(ERR_DBG, "%s: NAPI disabled\n", dev->name); | ||
| 8170 | break; | ||
| 8171 | case 1: | ||
| 8082 | DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name); | 8172 | DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name); |
| 8173 | break; | ||
| 8174 | } | ||
| 8083 | 8175 | ||
| 8084 | DBG_PRINT(ERR_DBG, "%s: Using %d Tx fifo(s)\n", dev->name, | 8176 | DBG_PRINT(ERR_DBG, "%s: Using %d Tx fifo(s)\n", dev->name, |
| 8085 | sp->config.tx_fifo_num); | 8177 | sp->config.tx_fifo_num); |
diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h index 0709ebae9139..4706f7f9acb6 100644 --- a/drivers/net/s2io.h +++ b/drivers/net/s2io.h | |||
| @@ -706,7 +706,7 @@ struct ring_info { | |||
| 706 | /* per-ring buffer counter */ | 706 | /* per-ring buffer counter */ |
| 707 | u32 rx_bufs_left; | 707 | u32 rx_bufs_left; |
| 708 | 708 | ||
| 709 | #define MAX_LRO_SESSIONS 32 | 709 | #define MAX_LRO_SESSIONS 32 |
| 710 | struct lro lro0_n[MAX_LRO_SESSIONS]; | 710 | struct lro lro0_n[MAX_LRO_SESSIONS]; |
| 711 | u8 lro; | 711 | u8 lro; |
| 712 | 712 | ||
| @@ -725,6 +725,11 @@ struct ring_info { | |||
| 725 | /* copy of sp->pdev pointer */ | 725 | /* copy of sp->pdev pointer */ |
| 726 | struct pci_dev *pdev; | 726 | struct pci_dev *pdev; |
| 727 | 727 | ||
| 728 | /* Per ring napi struct */ | ||
| 729 | struct napi_struct napi; | ||
| 730 | |||
| 731 | unsigned long interrupt_count; | ||
| 732 | |||
| 728 | /* | 733 | /* |
| 729 | * Place holders for the virtual and physical addresses of | 734 | * Place holders for the virtual and physical addresses of |
| 730 | * all the Rx Blocks | 735 | * all the Rx Blocks |
| @@ -841,7 +846,7 @@ struct usr_addr { | |||
| 841 | * Structure to keep track of the MSI-X vectors and the corresponding | 846 | * Structure to keep track of the MSI-X vectors and the corresponding |
| 842 | * argument registered against each vector | 847 | * argument registered against each vector |
| 843 | */ | 848 | */ |
| 844 | #define MAX_REQUESTED_MSI_X 17 | 849 | #define MAX_REQUESTED_MSI_X 9 |
| 845 | struct s2io_msix_entry | 850 | struct s2io_msix_entry |
| 846 | { | 851 | { |
| 847 | u16 vector; | 852 | u16 vector; |
| @@ -849,8 +854,8 @@ struct s2io_msix_entry | |||
| 849 | void *arg; | 854 | void *arg; |
| 850 | 855 | ||
| 851 | u8 type; | 856 | u8 type; |
| 852 | #define MSIX_FIFO_TYPE 1 | 857 | #define MSIX_ALARM_TYPE 1 |
| 853 | #define MSIX_RING_TYPE 2 | 858 | #define MSIX_RING_TYPE 2 |
| 854 | 859 | ||
| 855 | u8 in_use; | 860 | u8 in_use; |
| 856 | #define MSIX_REGISTERED_SUCCESS 0xAA | 861 | #define MSIX_REGISTERED_SUCCESS 0xAA |
| @@ -877,7 +882,6 @@ struct s2io_nic { | |||
| 877 | */ | 882 | */ |
| 878 | int pkts_to_process; | 883 | int pkts_to_process; |
| 879 | struct net_device *dev; | 884 | struct net_device *dev; |
| 880 | struct napi_struct napi; | ||
| 881 | struct mac_info mac_control; | 885 | struct mac_info mac_control; |
| 882 | struct config_param config; | 886 | struct config_param config; |
| 883 | struct pci_dev *pdev; | 887 | struct pci_dev *pdev; |
| @@ -948,6 +952,7 @@ struct s2io_nic { | |||
| 948 | */ | 952 | */ |
| 949 | u8 other_fifo_idx; | 953 | u8 other_fifo_idx; |
| 950 | 954 | ||
| 955 | struct napi_struct napi; | ||
| 951 | /* after blink, the adapter must be restored with original | 956 | /* after blink, the adapter must be restored with original |
| 952 | * values. | 957 | * values. |
| 953 | */ | 958 | */ |
| @@ -962,6 +967,7 @@ struct s2io_nic { | |||
| 962 | unsigned long long start_time; | 967 | unsigned long long start_time; |
| 963 | struct vlan_group *vlgrp; | 968 | struct vlan_group *vlgrp; |
| 964 | #define MSIX_FLG 0xA5 | 969 | #define MSIX_FLG 0xA5 |
| 970 | int num_entries; | ||
| 965 | struct msix_entry *entries; | 971 | struct msix_entry *entries; |
| 966 | int msi_detected; | 972 | int msi_detected; |
| 967 | wait_queue_head_t msi_wait; | 973 | wait_queue_head_t msi_wait; |
| @@ -982,6 +988,7 @@ struct s2io_nic { | |||
| 982 | u16 lro_max_aggr_per_sess; | 988 | u16 lro_max_aggr_per_sess; |
| 983 | volatile unsigned long state; | 989 | volatile unsigned long state; |
| 984 | u64 general_int_mask; | 990 | u64 general_int_mask; |
| 991 | |||
| 985 | #define VPD_STRING_LEN 80 | 992 | #define VPD_STRING_LEN 80 |
| 986 | u8 product_name[VPD_STRING_LEN]; | 993 | u8 product_name[VPD_STRING_LEN]; |
| 987 | u8 serial_num[VPD_STRING_LEN]; | 994 | u8 serial_num[VPD_STRING_LEN]; |
| @@ -1103,7 +1110,7 @@ static void __devexit s2io_rem_nic(struct pci_dev *pdev); | |||
| 1103 | static int init_shared_mem(struct s2io_nic *sp); | 1110 | static int init_shared_mem(struct s2io_nic *sp); |
| 1104 | static void free_shared_mem(struct s2io_nic *sp); | 1111 | static void free_shared_mem(struct s2io_nic *sp); |
| 1105 | static int init_nic(struct s2io_nic *nic); | 1112 | static int init_nic(struct s2io_nic *nic); |
| 1106 | static void rx_intr_handler(struct ring_info *ring_data); | 1113 | static int rx_intr_handler(struct ring_info *ring_data, int budget); |
| 1107 | static void tx_intr_handler(struct fifo_info *fifo_data); | 1114 | static void tx_intr_handler(struct fifo_info *fifo_data); |
| 1108 | static void s2io_handle_errors(void * dev_id); | 1115 | static void s2io_handle_errors(void * dev_id); |
| 1109 | 1116 | ||
| @@ -1114,7 +1121,8 @@ static void s2io_set_multicast(struct net_device *dev); | |||
| 1114 | static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp); | 1121 | static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp); |
| 1115 | static void s2io_link(struct s2io_nic * sp, int link); | 1122 | static void s2io_link(struct s2io_nic * sp, int link); |
| 1116 | static void s2io_reset(struct s2io_nic * sp); | 1123 | static void s2io_reset(struct s2io_nic * sp); |
| 1117 | static int s2io_poll(struct napi_struct *napi, int budget); | 1124 | static int s2io_poll_msix(struct napi_struct *napi, int budget); |
| 1125 | static int s2io_poll_inta(struct napi_struct *napi, int budget); | ||
| 1118 | static void s2io_init_pci(struct s2io_nic * sp); | 1126 | static void s2io_init_pci(struct s2io_nic * sp); |
| 1119 | static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr); | 1127 | static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr); |
| 1120 | static void s2io_alarm_handle(unsigned long data); | 1128 | static void s2io_alarm_handle(unsigned long data); |
diff --git a/drivers/net/sb1250-mac.c b/drivers/net/sb1250-mac.c index 888b7dec9866..33bb18f810fb 100644 --- a/drivers/net/sb1250-mac.c +++ b/drivers/net/sb1250-mac.c | |||
| @@ -179,8 +179,7 @@ enum sbmac_state { | |||
| 179 | #define SBMAC_MAX_TXDESCR 256 | 179 | #define SBMAC_MAX_TXDESCR 256 |
| 180 | #define SBMAC_MAX_RXDESCR 256 | 180 | #define SBMAC_MAX_RXDESCR 256 |
| 181 | 181 | ||
| 182 | #define ETHER_ALIGN 2 | 182 | #define ETHER_ADDR_LEN 6 |
| 183 | #define ETHER_ADDR_LEN 6 | ||
| 184 | #define ENET_PACKET_SIZE 1518 | 183 | #define ENET_PACKET_SIZE 1518 |
| 185 | /*#define ENET_PACKET_SIZE 9216 */ | 184 | /*#define ENET_PACKET_SIZE 9216 */ |
| 186 | 185 | ||
| @@ -262,8 +261,6 @@ struct sbmac_softc { | |||
| 262 | spinlock_t sbm_lock; /* spin lock */ | 261 | spinlock_t sbm_lock; /* spin lock */ |
| 263 | int sbm_devflags; /* current device flags */ | 262 | int sbm_devflags; /* current device flags */ |
| 264 | 263 | ||
| 265 | int sbm_buffersize; | ||
| 266 | |||
| 267 | /* | 264 | /* |
| 268 | * Controller-specific things | 265 | * Controller-specific things |
| 269 | */ | 266 | */ |
| @@ -305,10 +302,11 @@ struct sbmac_softc { | |||
| 305 | static void sbdma_initctx(struct sbmacdma *d, struct sbmac_softc *s, int chan, | 302 | static void sbdma_initctx(struct sbmacdma *d, struct sbmac_softc *s, int chan, |
| 306 | int txrx, int maxdescr); | 303 | int txrx, int maxdescr); |
| 307 | static void sbdma_channel_start(struct sbmacdma *d, int rxtx); | 304 | static void sbdma_channel_start(struct sbmacdma *d, int rxtx); |
| 308 | static int sbdma_add_rcvbuffer(struct sbmacdma *d, struct sk_buff *m); | 305 | static int sbdma_add_rcvbuffer(struct sbmac_softc *sc, struct sbmacdma *d, |
| 306 | struct sk_buff *m); | ||
| 309 | static int sbdma_add_txbuffer(struct sbmacdma *d, struct sk_buff *m); | 307 | static int sbdma_add_txbuffer(struct sbmacdma *d, struct sk_buff *m); |
| 310 | static void sbdma_emptyring(struct sbmacdma *d); | 308 | static void sbdma_emptyring(struct sbmacdma *d); |
| 311 | static void sbdma_fillring(struct sbmacdma *d); | 309 | static void sbdma_fillring(struct sbmac_softc *sc, struct sbmacdma *d); |
| 312 | static int sbdma_rx_process(struct sbmac_softc *sc, struct sbmacdma *d, | 310 | static int sbdma_rx_process(struct sbmac_softc *sc, struct sbmacdma *d, |
| 313 | int work_to_do, int poll); | 311 | int work_to_do, int poll); |
| 314 | static void sbdma_tx_process(struct sbmac_softc *sc, struct sbmacdma *d, | 312 | static void sbdma_tx_process(struct sbmac_softc *sc, struct sbmacdma *d, |
| @@ -777,16 +775,13 @@ static void sbdma_channel_stop(struct sbmacdma *d) | |||
| 777 | d->sbdma_remptr = NULL; | 775 | d->sbdma_remptr = NULL; |
| 778 | } | 776 | } |
| 779 | 777 | ||
| 780 | static void sbdma_align_skb(struct sk_buff *skb,int power2,int offset) | 778 | static inline void sbdma_align_skb(struct sk_buff *skb, |
| 779 | unsigned int power2, unsigned int offset) | ||
| 781 | { | 780 | { |
| 782 | unsigned long addr; | 781 | unsigned char *addr = skb->data; |
| 783 | unsigned long newaddr; | 782 | unsigned char *newaddr = PTR_ALIGN(addr, power2); |
| 784 | |||
| 785 | addr = (unsigned long) skb->data; | ||
| 786 | |||
| 787 | newaddr = (addr + power2 - 1) & ~(power2 - 1); | ||
| 788 | 783 | ||
| 789 | skb_reserve(skb,newaddr-addr+offset); | 784 | skb_reserve(skb, newaddr - addr + offset); |
| 790 | } | 785 | } |
| 791 | 786 | ||
| 792 | 787 | ||
| @@ -797,7 +792,8 @@ static void sbdma_align_skb(struct sk_buff *skb,int power2,int offset) | |||
| 797 | * this queues a buffer for inbound packets. | 792 | * this queues a buffer for inbound packets. |
| 798 | * | 793 | * |
| 799 | * Input parameters: | 794 | * Input parameters: |
| 800 | * d - DMA channel descriptor | 795 | * sc - softc structure |
| 796 | * d - DMA channel descriptor | ||
| 801 | * sb - sk_buff to add, or NULL if we should allocate one | 797 | * sb - sk_buff to add, or NULL if we should allocate one |
| 802 | * | 798 | * |
| 803 | * Return value: | 799 | * Return value: |
| @@ -806,8 +802,10 @@ static void sbdma_align_skb(struct sk_buff *skb,int power2,int offset) | |||
| 806 | ********************************************************************* */ | 802 | ********************************************************************* */ |
| 807 | 803 | ||
| 808 | 804 | ||
| 809 | static int sbdma_add_rcvbuffer(struct sbmacdma *d, struct sk_buff *sb) | 805 | static int sbdma_add_rcvbuffer(struct sbmac_softc *sc, struct sbmacdma *d, |
| 806 | struct sk_buff *sb) | ||
| 810 | { | 807 | { |
| 808 | struct net_device *dev = sc->sbm_dev; | ||
| 811 | struct sbdmadscr *dsc; | 809 | struct sbdmadscr *dsc; |
| 812 | struct sbdmadscr *nextdsc; | 810 | struct sbdmadscr *nextdsc; |
| 813 | struct sk_buff *sb_new = NULL; | 811 | struct sk_buff *sb_new = NULL; |
| @@ -848,14 +846,16 @@ static int sbdma_add_rcvbuffer(struct sbmacdma *d, struct sk_buff *sb) | |||
| 848 | */ | 846 | */ |
| 849 | 847 | ||
| 850 | if (sb == NULL) { | 848 | if (sb == NULL) { |
| 851 | sb_new = dev_alloc_skb(ENET_PACKET_SIZE + SMP_CACHE_BYTES * 2 + ETHER_ALIGN); | 849 | sb_new = netdev_alloc_skb(dev, ENET_PACKET_SIZE + |
| 850 | SMP_CACHE_BYTES * 2 + | ||
| 851 | NET_IP_ALIGN); | ||
| 852 | if (sb_new == NULL) { | 852 | if (sb_new == NULL) { |
| 853 | pr_info("%s: sk_buff allocation failed\n", | 853 | pr_info("%s: sk_buff allocation failed\n", |
| 854 | d->sbdma_eth->sbm_dev->name); | 854 | d->sbdma_eth->sbm_dev->name); |
| 855 | return -ENOBUFS; | 855 | return -ENOBUFS; |
| 856 | } | 856 | } |
| 857 | 857 | ||
| 858 | sbdma_align_skb(sb_new, SMP_CACHE_BYTES, ETHER_ALIGN); | 858 | sbdma_align_skb(sb_new, SMP_CACHE_BYTES, NET_IP_ALIGN); |
| 859 | } | 859 | } |
| 860 | else { | 860 | else { |
| 861 | sb_new = sb; | 861 | sb_new = sb; |
| @@ -874,10 +874,10 @@ static int sbdma_add_rcvbuffer(struct sbmacdma *d, struct sk_buff *sb) | |||
| 874 | * Do not interrupt per DMA transfer. | 874 | * Do not interrupt per DMA transfer. |
| 875 | */ | 875 | */ |
| 876 | dsc->dscr_a = virt_to_phys(sb_new->data) | | 876 | dsc->dscr_a = virt_to_phys(sb_new->data) | |
| 877 | V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize+ETHER_ALIGN)) | 0; | 877 | V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize + NET_IP_ALIGN)) | 0; |
| 878 | #else | 878 | #else |
| 879 | dsc->dscr_a = virt_to_phys(sb_new->data) | | 879 | dsc->dscr_a = virt_to_phys(sb_new->data) | |
| 880 | V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize+ETHER_ALIGN)) | | 880 | V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize + NET_IP_ALIGN)) | |
| 881 | M_DMA_DSCRA_INTERRUPT; | 881 | M_DMA_DSCRA_INTERRUPT; |
| 882 | #endif | 882 | #endif |
| 883 | 883 | ||
| @@ -1032,18 +1032,19 @@ static void sbdma_emptyring(struct sbmacdma *d) | |||
| 1032 | * with sk_buffs | 1032 | * with sk_buffs |
| 1033 | * | 1033 | * |
| 1034 | * Input parameters: | 1034 | * Input parameters: |
| 1035 | * d - DMA channel | 1035 | * sc - softc structure |
| 1036 | * d - DMA channel | ||
| 1036 | * | 1037 | * |
| 1037 | * Return value: | 1038 | * Return value: |
| 1038 | * nothing | 1039 | * nothing |
| 1039 | ********************************************************************* */ | 1040 | ********************************************************************* */ |
| 1040 | 1041 | ||
| 1041 | static void sbdma_fillring(struct sbmacdma *d) | 1042 | static void sbdma_fillring(struct sbmac_softc *sc, struct sbmacdma *d) |
| 1042 | { | 1043 | { |
| 1043 | int idx; | 1044 | int idx; |
| 1044 | 1045 | ||
| 1045 | for (idx = 0; idx < SBMAC_MAX_RXDESCR-1; idx++) { | 1046 | for (idx = 0; idx < SBMAC_MAX_RXDESCR - 1; idx++) { |
| 1046 | if (sbdma_add_rcvbuffer(d,NULL) != 0) | 1047 | if (sbdma_add_rcvbuffer(sc, d, NULL) != 0) |
| 1047 | break; | 1048 | break; |
| 1048 | } | 1049 | } |
| 1049 | } | 1050 | } |
| @@ -1159,10 +1160,11 @@ again: | |||
| 1159 | * packet and put it right back on the receive ring. | 1160 | * packet and put it right back on the receive ring. |
| 1160 | */ | 1161 | */ |
| 1161 | 1162 | ||
| 1162 | if (unlikely (sbdma_add_rcvbuffer(d,NULL) == | 1163 | if (unlikely(sbdma_add_rcvbuffer(sc, d, NULL) == |
| 1163 | -ENOBUFS)) { | 1164 | -ENOBUFS)) { |
| 1164 | dev->stats.rx_dropped++; | 1165 | dev->stats.rx_dropped++; |
| 1165 | sbdma_add_rcvbuffer(d,sb); /* re-add old buffer */ | 1166 | /* Re-add old buffer */ |
| 1167 | sbdma_add_rcvbuffer(sc, d, sb); | ||
| 1166 | /* No point in continuing at the moment */ | 1168 | /* No point in continuing at the moment */ |
| 1167 | printk(KERN_ERR "dropped packet (1)\n"); | 1169 | printk(KERN_ERR "dropped packet (1)\n"); |
| 1168 | d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr); | 1170 | d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr); |
| @@ -1212,7 +1214,7 @@ again: | |||
| 1212 | * put it back on the receive ring. | 1214 | * put it back on the receive ring. |
| 1213 | */ | 1215 | */ |
| 1214 | dev->stats.rx_errors++; | 1216 | dev->stats.rx_errors++; |
| 1215 | sbdma_add_rcvbuffer(d,sb); | 1217 | sbdma_add_rcvbuffer(sc, d, sb); |
| 1216 | } | 1218 | } |
| 1217 | 1219 | ||
| 1218 | 1220 | ||
| @@ -1570,7 +1572,7 @@ static void sbmac_channel_start(struct sbmac_softc *s) | |||
| 1570 | * Fill the receive ring | 1572 | * Fill the receive ring |
| 1571 | */ | 1573 | */ |
| 1572 | 1574 | ||
| 1573 | sbdma_fillring(&(s->sbm_rxdma)); | 1575 | sbdma_fillring(s, &(s->sbm_rxdma)); |
| 1574 | 1576 | ||
| 1575 | /* | 1577 | /* |
| 1576 | * Turn on the rest of the bits in the enable register | 1578 | * Turn on the rest of the bits in the enable register |
| @@ -2312,13 +2314,6 @@ static int sbmac_init(struct platform_device *pldev, long long base) | |||
| 2312 | dev->dev_addr[i] = eaddr[i]; | 2314 | dev->dev_addr[i] = eaddr[i]; |
| 2313 | } | 2315 | } |
| 2314 | 2316 | ||
| 2315 | |||
| 2316 | /* | ||
| 2317 | * Init packet size | ||
| 2318 | */ | ||
| 2319 | |||
| 2320 | sc->sbm_buffersize = ENET_PACKET_SIZE + SMP_CACHE_BYTES * 2 + ETHER_ALIGN; | ||
| 2321 | |||
| 2322 | /* | 2317 | /* |
| 2323 | * Initialize context (get pointers to registers and stuff), then | 2318 | * Initialize context (get pointers to registers and stuff), then |
| 2324 | * allocate the memory for the descriptor tables. | 2319 | * allocate the memory for the descriptor tables. |
diff --git a/drivers/net/sc92031.c b/drivers/net/sc92031.c index f64a860029b7..b4b63805ee8f 100644 --- a/drivers/net/sc92031.c +++ b/drivers/net/sc92031.c | |||
| @@ -953,9 +953,6 @@ static int sc92031_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 953 | unsigned entry; | 953 | unsigned entry; |
| 954 | u32 tx_status; | 954 | u32 tx_status; |
| 955 | 955 | ||
| 956 | if (skb_padto(skb, ETH_ZLEN)) | ||
| 957 | return NETDEV_TX_OK; | ||
| 958 | |||
| 959 | if (unlikely(skb->len > TX_BUF_SIZE)) { | 956 | if (unlikely(skb->len > TX_BUF_SIZE)) { |
| 960 | dev->stats.tx_dropped++; | 957 | dev->stats.tx_dropped++; |
| 961 | goto out; | 958 | goto out; |
| @@ -975,6 +972,11 @@ static int sc92031_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 975 | skb_copy_and_csum_dev(skb, priv->tx_bufs + entry * TX_BUF_SIZE); | 972 | skb_copy_and_csum_dev(skb, priv->tx_bufs + entry * TX_BUF_SIZE); |
| 976 | 973 | ||
| 977 | len = skb->len; | 974 | len = skb->len; |
| 975 | if (unlikely(len < ETH_ZLEN)) { | ||
| 976 | memset(priv->tx_bufs + entry * TX_BUF_SIZE + len, | ||
| 977 | 0, ETH_ZLEN - len); | ||
| 978 | len = ETH_ZLEN; | ||
| 979 | } | ||
| 978 | 980 | ||
| 979 | wmb(); | 981 | wmb(); |
| 980 | 982 | ||
diff --git a/drivers/net/sfc/bitfield.h b/drivers/net/sfc/bitfield.h index 2806201644cc..2c79d27404e0 100644 --- a/drivers/net/sfc/bitfield.h +++ b/drivers/net/sfc/bitfield.h | |||
| @@ -483,7 +483,7 @@ typedef union efx_oword { | |||
| 483 | #endif | 483 | #endif |
| 484 | 484 | ||
| 485 | #define EFX_SET_OWORD_FIELD_VER(efx, oword, field, value) do { \ | 485 | #define EFX_SET_OWORD_FIELD_VER(efx, oword, field, value) do { \ |
| 486 | if (FALCON_REV(efx) >= FALCON_REV_B0) { \ | 486 | if (falcon_rev(efx) >= FALCON_REV_B0) { \ |
| 487 | EFX_SET_OWORD_FIELD((oword), field##_B0, (value)); \ | 487 | EFX_SET_OWORD_FIELD((oword), field##_B0, (value)); \ |
| 488 | } else { \ | 488 | } else { \ |
| 489 | EFX_SET_OWORD_FIELD((oword), field##_A1, (value)); \ | 489 | EFX_SET_OWORD_FIELD((oword), field##_A1, (value)); \ |
| @@ -491,7 +491,7 @@ typedef union efx_oword { | |||
| 491 | } while (0) | 491 | } while (0) |
| 492 | 492 | ||
| 493 | #define EFX_QWORD_FIELD_VER(efx, qword, field) \ | 493 | #define EFX_QWORD_FIELD_VER(efx, qword, field) \ |
| 494 | (FALCON_REV(efx) >= FALCON_REV_B0 ? \ | 494 | (falcon_rev(efx) >= FALCON_REV_B0 ? \ |
| 495 | EFX_QWORD_FIELD((qword), field##_B0) : \ | 495 | EFX_QWORD_FIELD((qword), field##_B0) : \ |
| 496 | EFX_QWORD_FIELD((qword), field##_A1)) | 496 | EFX_QWORD_FIELD((qword), field##_A1)) |
| 497 | 497 | ||
| @@ -501,8 +501,5 @@ typedef union efx_oword { | |||
| 501 | #define DMA_ADDR_T_WIDTH (8 * sizeof(dma_addr_t)) | 501 | #define DMA_ADDR_T_WIDTH (8 * sizeof(dma_addr_t)) |
| 502 | #define EFX_DMA_TYPE_WIDTH(width) \ | 502 | #define EFX_DMA_TYPE_WIDTH(width) \ |
| 503 | (((width) < DMA_ADDR_T_WIDTH) ? (width) : DMA_ADDR_T_WIDTH) | 503 | (((width) < DMA_ADDR_T_WIDTH) ? (width) : DMA_ADDR_T_WIDTH) |
| 504 | #define EFX_DMA_MAX_MASK ((DMA_ADDR_T_WIDTH == 64) ? \ | ||
| 505 | ~((u64) 0) : ~((u32) 0)) | ||
| 506 | #define EFX_DMA_MASK(mask) ((mask) & EFX_DMA_MAX_MASK) | ||
| 507 | 504 | ||
| 508 | #endif /* EFX_BITFIELD_H */ | 505 | #endif /* EFX_BITFIELD_H */ |
diff --git a/drivers/net/sfc/boards.c b/drivers/net/sfc/boards.c index eecaa6d58584..7fc0328dc055 100644 --- a/drivers/net/sfc/boards.c +++ b/drivers/net/sfc/boards.c | |||
| @@ -27,10 +27,8 @@ static void blink_led_timer(unsigned long context) | |||
| 27 | struct efx_blinker *bl = &efx->board_info.blinker; | 27 | struct efx_blinker *bl = &efx->board_info.blinker; |
| 28 | efx->board_info.set_fault_led(efx, bl->state); | 28 | efx->board_info.set_fault_led(efx, bl->state); |
| 29 | bl->state = !bl->state; | 29 | bl->state = !bl->state; |
| 30 | if (bl->resubmit) { | 30 | if (bl->resubmit) |
| 31 | bl->timer.expires = jiffies + BLINK_INTERVAL; | 31 | mod_timer(&bl->timer, jiffies + BLINK_INTERVAL); |
| 32 | add_timer(&bl->timer); | ||
| 33 | } | ||
| 34 | } | 32 | } |
| 35 | 33 | ||
| 36 | static void board_blink(struct efx_nic *efx, int blink) | 34 | static void board_blink(struct efx_nic *efx, int blink) |
| @@ -44,8 +42,7 @@ static void board_blink(struct efx_nic *efx, int blink) | |||
| 44 | blinker->state = 0; | 42 | blinker->state = 0; |
| 45 | setup_timer(&blinker->timer, blink_led_timer, | 43 | setup_timer(&blinker->timer, blink_led_timer, |
| 46 | (unsigned long)efx); | 44 | (unsigned long)efx); |
| 47 | blinker->timer.expires = jiffies + BLINK_INTERVAL; | 45 | mod_timer(&blinker->timer, jiffies + BLINK_INTERVAL); |
| 48 | add_timer(&blinker->timer); | ||
| 49 | } else { | 46 | } else { |
| 50 | blinker->resubmit = 0; | 47 | blinker->resubmit = 0; |
| 51 | if (blinker->timer.function) | 48 | if (blinker->timer.function) |
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c index 418f2e53a95b..449760642e31 100644 --- a/drivers/net/sfc/efx.c +++ b/drivers/net/sfc/efx.c | |||
| @@ -199,11 +199,12 @@ static inline int efx_process_channel(struct efx_channel *channel, int rx_quota) | |||
| 199 | */ | 199 | */ |
| 200 | static inline void efx_channel_processed(struct efx_channel *channel) | 200 | static inline void efx_channel_processed(struct efx_channel *channel) |
| 201 | { | 201 | { |
| 202 | /* Write to EVQ_RPTR_REG. If a new event arrived in a race | 202 | /* The interrupt handler for this channel may set work_pending |
| 203 | * with finishing processing, a new interrupt will be raised. | 203 | * as soon as we acknowledge the events we've seen. Make sure |
| 204 | */ | 204 | * it's cleared before then. */ |
| 205 | channel->work_pending = 0; | 205 | channel->work_pending = 0; |
| 206 | smp_wmb(); /* Ensure channel updated before any new interrupt. */ | 206 | smp_wmb(); |
| 207 | |||
| 207 | falcon_eventq_read_ack(channel); | 208 | falcon_eventq_read_ack(channel); |
| 208 | } | 209 | } |
| 209 | 210 | ||
| @@ -265,7 +266,7 @@ void efx_process_channel_now(struct efx_channel *channel) | |||
| 265 | napi_disable(&channel->napi_str); | 266 | napi_disable(&channel->napi_str); |
| 266 | 267 | ||
| 267 | /* Poll the channel */ | 268 | /* Poll the channel */ |
| 268 | (void) efx_process_channel(channel, efx->type->evq_size); | 269 | efx_process_channel(channel, efx->type->evq_size); |
| 269 | 270 | ||
| 270 | /* Ack the eventq. This may cause an interrupt to be generated | 271 | /* Ack the eventq. This may cause an interrupt to be generated |
| 271 | * when they are reenabled */ | 272 | * when they are reenabled */ |
| @@ -317,26 +318,6 @@ static void efx_remove_eventq(struct efx_channel *channel) | |||
| 317 | * | 318 | * |
| 318 | *************************************************************************/ | 319 | *************************************************************************/ |
| 319 | 320 | ||
| 320 | /* Setup per-NIC RX buffer parameters. | ||
| 321 | * Calculate the rx buffer allocation parameters required to support | ||
| 322 | * the current MTU, including padding for header alignment and overruns. | ||
| 323 | */ | ||
| 324 | static void efx_calc_rx_buffer_params(struct efx_nic *efx) | ||
| 325 | { | ||
| 326 | unsigned int order, len; | ||
| 327 | |||
| 328 | len = (max(EFX_PAGE_IP_ALIGN, NET_IP_ALIGN) + | ||
| 329 | EFX_MAX_FRAME_LEN(efx->net_dev->mtu) + | ||
| 330 | efx->type->rx_buffer_padding); | ||
| 331 | |||
| 332 | /* Calculate page-order */ | ||
| 333 | for (order = 0; ((1u << order) * PAGE_SIZE) < len; ++order) | ||
| 334 | ; | ||
| 335 | |||
| 336 | efx->rx_buffer_len = len; | ||
| 337 | efx->rx_buffer_order = order; | ||
| 338 | } | ||
| 339 | |||
| 340 | static int efx_probe_channel(struct efx_channel *channel) | 321 | static int efx_probe_channel(struct efx_channel *channel) |
| 341 | { | 322 | { |
| 342 | struct efx_tx_queue *tx_queue; | 323 | struct efx_tx_queue *tx_queue; |
| @@ -387,7 +368,14 @@ static int efx_init_channels(struct efx_nic *efx) | |||
| 387 | struct efx_channel *channel; | 368 | struct efx_channel *channel; |
| 388 | int rc = 0; | 369 | int rc = 0; |
| 389 | 370 | ||
| 390 | efx_calc_rx_buffer_params(efx); | 371 | /* Calculate the rx buffer allocation parameters required to |
| 372 | * support the current MTU, including padding for header | ||
| 373 | * alignment and overruns. | ||
| 374 | */ | ||
| 375 | efx->rx_buffer_len = (max(EFX_PAGE_IP_ALIGN, NET_IP_ALIGN) + | ||
| 376 | EFX_MAX_FRAME_LEN(efx->net_dev->mtu) + | ||
| 377 | efx->type->rx_buffer_padding); | ||
| 378 | efx->rx_buffer_order = get_order(efx->rx_buffer_len); | ||
| 391 | 379 | ||
| 392 | /* Initialise the channels */ | 380 | /* Initialise the channels */ |
| 393 | efx_for_each_channel(channel, efx) { | 381 | efx_for_each_channel(channel, efx) { |
| @@ -440,9 +428,12 @@ static void efx_start_channel(struct efx_channel *channel) | |||
| 440 | netif_napi_add(channel->napi_dev, &channel->napi_str, | 428 | netif_napi_add(channel->napi_dev, &channel->napi_str, |
| 441 | efx_poll, napi_weight); | 429 | efx_poll, napi_weight); |
| 442 | 430 | ||
| 431 | /* The interrupt handler for this channel may set work_pending | ||
| 432 | * as soon as we enable it. Make sure it's cleared before | ||
| 433 | * then. Similarly, make sure it sees the enabled flag set. */ | ||
| 443 | channel->work_pending = 0; | 434 | channel->work_pending = 0; |
| 444 | channel->enabled = 1; | 435 | channel->enabled = 1; |
| 445 | smp_wmb(); /* ensure channel updated before first interrupt */ | 436 | smp_wmb(); |
| 446 | 437 | ||
| 447 | napi_enable(&channel->napi_str); | 438 | napi_enable(&channel->napi_str); |
| 448 | 439 | ||
| @@ -704,7 +695,7 @@ static void efx_stop_port(struct efx_nic *efx) | |||
| 704 | mutex_unlock(&efx->mac_lock); | 695 | mutex_unlock(&efx->mac_lock); |
| 705 | 696 | ||
| 706 | /* Serialise against efx_set_multicast_list() */ | 697 | /* Serialise against efx_set_multicast_list() */ |
| 707 | if (NET_DEV_REGISTERED(efx)) { | 698 | if (efx_dev_registered(efx)) { |
| 708 | netif_tx_lock_bh(efx->net_dev); | 699 | netif_tx_lock_bh(efx->net_dev); |
| 709 | netif_tx_unlock_bh(efx->net_dev); | 700 | netif_tx_unlock_bh(efx->net_dev); |
| 710 | } | 701 | } |
| @@ -791,22 +782,23 @@ static int efx_init_io(struct efx_nic *efx) | |||
| 791 | efx->membase = ioremap_nocache(efx->membase_phys, | 782 | efx->membase = ioremap_nocache(efx->membase_phys, |
| 792 | efx->type->mem_map_size); | 783 | efx->type->mem_map_size); |
| 793 | if (!efx->membase) { | 784 | if (!efx->membase) { |
| 794 | EFX_ERR(efx, "could not map memory BAR %d at %lx+%x\n", | 785 | EFX_ERR(efx, "could not map memory BAR %d at %llx+%x\n", |
| 795 | efx->type->mem_bar, efx->membase_phys, | 786 | efx->type->mem_bar, |
| 787 | (unsigned long long)efx->membase_phys, | ||
| 796 | efx->type->mem_map_size); | 788 | efx->type->mem_map_size); |
| 797 | rc = -ENOMEM; | 789 | rc = -ENOMEM; |
| 798 | goto fail4; | 790 | goto fail4; |
| 799 | } | 791 | } |
| 800 | EFX_LOG(efx, "memory BAR %u at %lx+%x (virtual %p)\n", | 792 | EFX_LOG(efx, "memory BAR %u at %llx+%x (virtual %p)\n", |
| 801 | efx->type->mem_bar, efx->membase_phys, efx->type->mem_map_size, | 793 | efx->type->mem_bar, (unsigned long long)efx->membase_phys, |
| 802 | efx->membase); | 794 | efx->type->mem_map_size, efx->membase); |
| 803 | 795 | ||
| 804 | return 0; | 796 | return 0; |
| 805 | 797 | ||
| 806 | fail4: | 798 | fail4: |
| 807 | release_mem_region(efx->membase_phys, efx->type->mem_map_size); | 799 | release_mem_region(efx->membase_phys, efx->type->mem_map_size); |
| 808 | fail3: | 800 | fail3: |
| 809 | efx->membase_phys = 0UL; | 801 | efx->membase_phys = 0; |
| 810 | fail2: | 802 | fail2: |
| 811 | pci_disable_device(efx->pci_dev); | 803 | pci_disable_device(efx->pci_dev); |
| 812 | fail1: | 804 | fail1: |
| @@ -824,7 +816,7 @@ static void efx_fini_io(struct efx_nic *efx) | |||
| 824 | 816 | ||
| 825 | if (efx->membase_phys) { | 817 | if (efx->membase_phys) { |
| 826 | pci_release_region(efx->pci_dev, efx->type->mem_bar); | 818 | pci_release_region(efx->pci_dev, efx->type->mem_bar); |
| 827 | efx->membase_phys = 0UL; | 819 | efx->membase_phys = 0; |
| 828 | } | 820 | } |
| 829 | 821 | ||
| 830 | pci_disable_device(efx->pci_dev); | 822 | pci_disable_device(efx->pci_dev); |
| @@ -1043,7 +1035,7 @@ static void efx_start_all(struct efx_nic *efx) | |||
| 1043 | return; | 1035 | return; |
| 1044 | if ((efx->state != STATE_RUNNING) && (efx->state != STATE_INIT)) | 1036 | if ((efx->state != STATE_RUNNING) && (efx->state != STATE_INIT)) |
| 1045 | return; | 1037 | return; |
| 1046 | if (NET_DEV_REGISTERED(efx) && !netif_running(efx->net_dev)) | 1038 | if (efx_dev_registered(efx) && !netif_running(efx->net_dev)) |
| 1047 | return; | 1039 | return; |
| 1048 | 1040 | ||
| 1049 | /* Mark the port as enabled so port reconfigurations can start, then | 1041 | /* Mark the port as enabled so port reconfigurations can start, then |
| @@ -1073,9 +1065,8 @@ static void efx_flush_all(struct efx_nic *efx) | |||
| 1073 | cancel_delayed_work_sync(&efx->monitor_work); | 1065 | cancel_delayed_work_sync(&efx->monitor_work); |
| 1074 | 1066 | ||
| 1075 | /* Ensure that all RX slow refills are complete. */ | 1067 | /* Ensure that all RX slow refills are complete. */ |
| 1076 | efx_for_each_rx_queue(rx_queue, efx) { | 1068 | efx_for_each_rx_queue(rx_queue, efx) |
| 1077 | cancel_delayed_work_sync(&rx_queue->work); | 1069 | cancel_delayed_work_sync(&rx_queue->work); |
| 1078 | } | ||
| 1079 | 1070 | ||
| 1080 | /* Stop scheduled port reconfigurations */ | 1071 | /* Stop scheduled port reconfigurations */ |
| 1081 | cancel_work_sync(&efx->reconfigure_work); | 1072 | cancel_work_sync(&efx->reconfigure_work); |
| @@ -1101,9 +1092,10 @@ static void efx_stop_all(struct efx_nic *efx) | |||
| 1101 | falcon_disable_interrupts(efx); | 1092 | falcon_disable_interrupts(efx); |
| 1102 | if (efx->legacy_irq) | 1093 | if (efx->legacy_irq) |
| 1103 | synchronize_irq(efx->legacy_irq); | 1094 | synchronize_irq(efx->legacy_irq); |
| 1104 | efx_for_each_channel_with_interrupt(channel, efx) | 1095 | efx_for_each_channel_with_interrupt(channel, efx) { |
| 1105 | if (channel->irq) | 1096 | if (channel->irq) |
| 1106 | synchronize_irq(channel->irq); | 1097 | synchronize_irq(channel->irq); |
| 1098 | } | ||
| 1107 | 1099 | ||
| 1108 | /* Stop all NAPI processing and synchronous rx refills */ | 1100 | /* Stop all NAPI processing and synchronous rx refills */ |
| 1109 | efx_for_each_channel(channel, efx) | 1101 | efx_for_each_channel(channel, efx) |
| @@ -1125,7 +1117,7 @@ static void efx_stop_all(struct efx_nic *efx) | |||
| 1125 | /* Stop the kernel transmit interface late, so the watchdog | 1117 | /* Stop the kernel transmit interface late, so the watchdog |
| 1126 | * timer isn't ticking over the flush */ | 1118 | * timer isn't ticking over the flush */ |
| 1127 | efx_stop_queue(efx); | 1119 | efx_stop_queue(efx); |
| 1128 | if (NET_DEV_REGISTERED(efx)) { | 1120 | if (efx_dev_registered(efx)) { |
| 1129 | netif_tx_lock_bh(efx->net_dev); | 1121 | netif_tx_lock_bh(efx->net_dev); |
| 1130 | netif_tx_unlock_bh(efx->net_dev); | 1122 | netif_tx_unlock_bh(efx->net_dev); |
| 1131 | } | 1123 | } |
| @@ -1344,13 +1336,17 @@ static int efx_net_stop(struct net_device *net_dev) | |||
| 1344 | return 0; | 1336 | return 0; |
| 1345 | } | 1337 | } |
| 1346 | 1338 | ||
| 1347 | /* Context: process, dev_base_lock held, non-blocking. */ | 1339 | /* Context: process, dev_base_lock or RTNL held, non-blocking. */ |
| 1348 | static struct net_device_stats *efx_net_stats(struct net_device *net_dev) | 1340 | static struct net_device_stats *efx_net_stats(struct net_device *net_dev) |
| 1349 | { | 1341 | { |
| 1350 | struct efx_nic *efx = net_dev->priv; | 1342 | struct efx_nic *efx = net_dev->priv; |
| 1351 | struct efx_mac_stats *mac_stats = &efx->mac_stats; | 1343 | struct efx_mac_stats *mac_stats = &efx->mac_stats; |
| 1352 | struct net_device_stats *stats = &net_dev->stats; | 1344 | struct net_device_stats *stats = &net_dev->stats; |
| 1353 | 1345 | ||
| 1346 | /* Update stats if possible, but do not wait if another thread | ||
| 1347 | * is updating them (or resetting the NIC); slightly stale | ||
| 1348 | * stats are acceptable. | ||
| 1349 | */ | ||
| 1354 | if (!spin_trylock(&efx->stats_lock)) | 1350 | if (!spin_trylock(&efx->stats_lock)) |
| 1355 | return stats; | 1351 | return stats; |
| 1356 | if (efx->state == STATE_RUNNING) { | 1352 | if (efx->state == STATE_RUNNING) { |
| @@ -1494,7 +1490,7 @@ static void efx_set_multicast_list(struct net_device *net_dev) | |||
| 1494 | static int efx_netdev_event(struct notifier_block *this, | 1490 | static int efx_netdev_event(struct notifier_block *this, |
| 1495 | unsigned long event, void *ptr) | 1491 | unsigned long event, void *ptr) |
| 1496 | { | 1492 | { |
| 1497 | struct net_device *net_dev = (struct net_device *)ptr; | 1493 | struct net_device *net_dev = ptr; |
| 1498 | 1494 | ||
| 1499 | if (net_dev->open == efx_net_open && event == NETDEV_CHANGENAME) { | 1495 | if (net_dev->open == efx_net_open && event == NETDEV_CHANGENAME) { |
| 1500 | struct efx_nic *efx = net_dev->priv; | 1496 | struct efx_nic *efx = net_dev->priv; |
| @@ -1563,7 +1559,7 @@ static void efx_unregister_netdev(struct efx_nic *efx) | |||
| 1563 | efx_for_each_tx_queue(tx_queue, efx) | 1559 | efx_for_each_tx_queue(tx_queue, efx) |
| 1564 | efx_release_tx_buffers(tx_queue); | 1560 | efx_release_tx_buffers(tx_queue); |
| 1565 | 1561 | ||
| 1566 | if (NET_DEV_REGISTERED(efx)) { | 1562 | if (efx_dev_registered(efx)) { |
| 1567 | strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name)); | 1563 | strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name)); |
| 1568 | unregister_netdev(efx->net_dev); | 1564 | unregister_netdev(efx->net_dev); |
| 1569 | } | 1565 | } |
| @@ -1688,7 +1684,7 @@ static int efx_reset(struct efx_nic *efx) | |||
| 1688 | if (method == RESET_TYPE_DISABLE) { | 1684 | if (method == RESET_TYPE_DISABLE) { |
| 1689 | /* Reinitialise the device anyway so the driver unload sequence | 1685 | /* Reinitialise the device anyway so the driver unload sequence |
| 1690 | * can talk to the external SRAM */ | 1686 | * can talk to the external SRAM */ |
| 1691 | (void) falcon_init_nic(efx); | 1687 | falcon_init_nic(efx); |
| 1692 | rc = -EIO; | 1688 | rc = -EIO; |
| 1693 | goto fail4; | 1689 | goto fail4; |
| 1694 | } | 1690 | } |
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c index b57cc68058c0..d3f749c72d41 100644 --- a/drivers/net/sfc/falcon.c +++ b/drivers/net/sfc/falcon.c | |||
| @@ -116,17 +116,8 @@ MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold"); | |||
| 116 | ************************************************************************** | 116 | ************************************************************************** |
| 117 | */ | 117 | */ |
| 118 | 118 | ||
| 119 | /* DMA address mask (up to 46-bit, avoiding compiler warnings) | 119 | /* DMA address mask */ |
| 120 | * | 120 | #define FALCON_DMA_MASK DMA_BIT_MASK(46) |
| 121 | * Note that it is possible to have a platform with 64-bit longs and | ||
| 122 | * 32-bit DMA addresses, or vice versa. EFX_DMA_MASK takes care of the | ||
| 123 | * platform DMA mask. | ||
| 124 | */ | ||
| 125 | #if BITS_PER_LONG == 64 | ||
| 126 | #define FALCON_DMA_MASK EFX_DMA_MASK(0x00003fffffffffffUL) | ||
| 127 | #else | ||
| 128 | #define FALCON_DMA_MASK EFX_DMA_MASK(0x00003fffffffffffULL) | ||
| 129 | #endif | ||
| 130 | 121 | ||
| 131 | /* TX DMA length mask (13-bit) */ | 122 | /* TX DMA length mask (13-bit) */ |
| 132 | #define FALCON_TX_DMA_MASK (4096 - 1) | 123 | #define FALCON_TX_DMA_MASK (4096 - 1) |
| @@ -145,7 +136,7 @@ MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold"); | |||
| 145 | #define PCI_EXP_LNKSTA_LNK_WID_LBN 4 | 136 | #define PCI_EXP_LNKSTA_LNK_WID_LBN 4 |
| 146 | 137 | ||
| 147 | #define FALCON_IS_DUAL_FUNC(efx) \ | 138 | #define FALCON_IS_DUAL_FUNC(efx) \ |
| 148 | (FALCON_REV(efx) < FALCON_REV_B0) | 139 | (falcon_rev(efx) < FALCON_REV_B0) |
| 149 | 140 | ||
| 150 | /************************************************************************** | 141 | /************************************************************************** |
| 151 | * | 142 | * |
| @@ -465,7 +456,7 @@ int falcon_init_tx(struct efx_tx_queue *tx_queue) | |||
| 465 | TX_DESCQ_TYPE, 0, | 456 | TX_DESCQ_TYPE, 0, |
| 466 | TX_NON_IP_DROP_DIS_B0, 1); | 457 | TX_NON_IP_DROP_DIS_B0, 1); |
| 467 | 458 | ||
| 468 | if (FALCON_REV(efx) >= FALCON_REV_B0) { | 459 | if (falcon_rev(efx) >= FALCON_REV_B0) { |
| 469 | int csum = !(efx->net_dev->features & NETIF_F_IP_CSUM); | 460 | int csum = !(efx->net_dev->features & NETIF_F_IP_CSUM); |
| 470 | EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_IP_CHKSM_DIS_B0, csum); | 461 | EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_IP_CHKSM_DIS_B0, csum); |
| 471 | EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_TCP_CHKSM_DIS_B0, csum); | 462 | EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_TCP_CHKSM_DIS_B0, csum); |
| @@ -474,7 +465,7 @@ int falcon_init_tx(struct efx_tx_queue *tx_queue) | |||
| 474 | falcon_write_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base, | 465 | falcon_write_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base, |
| 475 | tx_queue->queue); | 466 | tx_queue->queue); |
| 476 | 467 | ||
| 477 | if (FALCON_REV(efx) < FALCON_REV_B0) { | 468 | if (falcon_rev(efx) < FALCON_REV_B0) { |
| 478 | efx_oword_t reg; | 469 | efx_oword_t reg; |
| 479 | 470 | ||
| 480 | BUG_ON(tx_queue->queue >= 128); /* HW limit */ | 471 | BUG_ON(tx_queue->queue >= 128); /* HW limit */ |
| @@ -635,7 +626,7 @@ int falcon_init_rx(struct efx_rx_queue *rx_queue) | |||
| 635 | efx_oword_t rx_desc_ptr; | 626 | efx_oword_t rx_desc_ptr; |
| 636 | struct efx_nic *efx = rx_queue->efx; | 627 | struct efx_nic *efx = rx_queue->efx; |
| 637 | int rc; | 628 | int rc; |
| 638 | int is_b0 = FALCON_REV(efx) >= FALCON_REV_B0; | 629 | int is_b0 = falcon_rev(efx) >= FALCON_REV_B0; |
| 639 | int iscsi_digest_en = is_b0; | 630 | int iscsi_digest_en = is_b0; |
| 640 | 631 | ||
| 641 | EFX_LOG(efx, "RX queue %d ring in special buffers %d-%d\n", | 632 | EFX_LOG(efx, "RX queue %d ring in special buffers %d-%d\n", |
| @@ -822,10 +813,10 @@ static inline void falcon_handle_tx_event(struct efx_channel *channel, | |||
| 822 | tx_ev_q_label = EFX_QWORD_FIELD(*event, TX_EV_Q_LABEL); | 813 | tx_ev_q_label = EFX_QWORD_FIELD(*event, TX_EV_Q_LABEL); |
| 823 | tx_queue = &efx->tx_queue[tx_ev_q_label]; | 814 | tx_queue = &efx->tx_queue[tx_ev_q_label]; |
| 824 | 815 | ||
| 825 | if (NET_DEV_REGISTERED(efx)) | 816 | if (efx_dev_registered(efx)) |
| 826 | netif_tx_lock(efx->net_dev); | 817 | netif_tx_lock(efx->net_dev); |
| 827 | falcon_notify_tx_desc(tx_queue); | 818 | falcon_notify_tx_desc(tx_queue); |
| 828 | if (NET_DEV_REGISTERED(efx)) | 819 | if (efx_dev_registered(efx)) |
| 829 | netif_tx_unlock(efx->net_dev); | 820 | netif_tx_unlock(efx->net_dev); |
| 830 | } else if (EFX_QWORD_FIELD(*event, TX_EV_PKT_ERR) && | 821 | } else if (EFX_QWORD_FIELD(*event, TX_EV_PKT_ERR) && |
| 831 | EFX_WORKAROUND_10727(efx)) { | 822 | EFX_WORKAROUND_10727(efx)) { |
| @@ -884,7 +875,7 @@ static void falcon_handle_rx_not_ok(struct efx_rx_queue *rx_queue, | |||
| 884 | RX_EV_TCP_UDP_CHKSUM_ERR); | 875 | RX_EV_TCP_UDP_CHKSUM_ERR); |
| 885 | rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, RX_EV_ETH_CRC_ERR); | 876 | rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, RX_EV_ETH_CRC_ERR); |
| 886 | rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, RX_EV_FRM_TRUNC); | 877 | rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, RX_EV_FRM_TRUNC); |
| 887 | rx_ev_drib_nib = ((FALCON_REV(efx) >= FALCON_REV_B0) ? | 878 | rx_ev_drib_nib = ((falcon_rev(efx) >= FALCON_REV_B0) ? |
| 888 | 0 : EFX_QWORD_FIELD(*event, RX_EV_DRIB_NIB)); | 879 | 0 : EFX_QWORD_FIELD(*event, RX_EV_DRIB_NIB)); |
| 889 | rx_ev_pause_frm = EFX_QWORD_FIELD(*event, RX_EV_PAUSE_FRM_ERR); | 880 | rx_ev_pause_frm = EFX_QWORD_FIELD(*event, RX_EV_PAUSE_FRM_ERR); |
| 890 | 881 | ||
| @@ -1065,7 +1056,7 @@ static void falcon_handle_global_event(struct efx_channel *channel, | |||
| 1065 | EFX_QWORD_FIELD(*event, XG_PHY_INTR)) | 1056 | EFX_QWORD_FIELD(*event, XG_PHY_INTR)) |
| 1066 | is_phy_event = 1; | 1057 | is_phy_event = 1; |
| 1067 | 1058 | ||
| 1068 | if ((FALCON_REV(efx) >= FALCON_REV_B0) && | 1059 | if ((falcon_rev(efx) >= FALCON_REV_B0) && |
| 1069 | EFX_OWORD_FIELD(*event, XG_MNT_INTR_B0)) | 1060 | EFX_OWORD_FIELD(*event, XG_MNT_INTR_B0)) |
| 1070 | is_phy_event = 1; | 1061 | is_phy_event = 1; |
| 1071 | 1062 | ||
| @@ -1405,7 +1396,7 @@ static inline void falcon_irq_ack_a1(struct efx_nic *efx) | |||
| 1405 | static irqreturn_t falcon_fatal_interrupt(struct efx_nic *efx) | 1396 | static irqreturn_t falcon_fatal_interrupt(struct efx_nic *efx) |
| 1406 | { | 1397 | { |
| 1407 | struct falcon_nic_data *nic_data = efx->nic_data; | 1398 | struct falcon_nic_data *nic_data = efx->nic_data; |
| 1408 | efx_oword_t *int_ker = (efx_oword_t *) efx->irq_status.addr; | 1399 | efx_oword_t *int_ker = efx->irq_status.addr; |
| 1409 | efx_oword_t fatal_intr; | 1400 | efx_oword_t fatal_intr; |
| 1410 | int error, mem_perr; | 1401 | int error, mem_perr; |
| 1411 | static int n_int_errors; | 1402 | static int n_int_errors; |
| @@ -1451,8 +1442,8 @@ out: | |||
| 1451 | */ | 1442 | */ |
| 1452 | static irqreturn_t falcon_legacy_interrupt_b0(int irq, void *dev_id) | 1443 | static irqreturn_t falcon_legacy_interrupt_b0(int irq, void *dev_id) |
| 1453 | { | 1444 | { |
| 1454 | struct efx_nic *efx = (struct efx_nic *)dev_id; | 1445 | struct efx_nic *efx = dev_id; |
| 1455 | efx_oword_t *int_ker = (efx_oword_t *) efx->irq_status.addr; | 1446 | efx_oword_t *int_ker = efx->irq_status.addr; |
| 1456 | struct efx_channel *channel; | 1447 | struct efx_channel *channel; |
| 1457 | efx_dword_t reg; | 1448 | efx_dword_t reg; |
| 1458 | u32 queues; | 1449 | u32 queues; |
| @@ -1489,8 +1480,8 @@ static irqreturn_t falcon_legacy_interrupt_b0(int irq, void *dev_id) | |||
| 1489 | 1480 | ||
| 1490 | static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id) | 1481 | static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id) |
| 1491 | { | 1482 | { |
| 1492 | struct efx_nic *efx = (struct efx_nic *)dev_id; | 1483 | struct efx_nic *efx = dev_id; |
| 1493 | efx_oword_t *int_ker = (efx_oword_t *) efx->irq_status.addr; | 1484 | efx_oword_t *int_ker = efx->irq_status.addr; |
| 1494 | struct efx_channel *channel; | 1485 | struct efx_channel *channel; |
| 1495 | int syserr; | 1486 | int syserr; |
| 1496 | int queues; | 1487 | int queues; |
| @@ -1542,9 +1533,9 @@ static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id) | |||
| 1542 | */ | 1533 | */ |
| 1543 | static irqreturn_t falcon_msi_interrupt(int irq, void *dev_id) | 1534 | static irqreturn_t falcon_msi_interrupt(int irq, void *dev_id) |
| 1544 | { | 1535 | { |
| 1545 | struct efx_channel *channel = (struct efx_channel *)dev_id; | 1536 | struct efx_channel *channel = dev_id; |
| 1546 | struct efx_nic *efx = channel->efx; | 1537 | struct efx_nic *efx = channel->efx; |
| 1547 | efx_oword_t *int_ker = (efx_oword_t *) efx->irq_status.addr; | 1538 | efx_oword_t *int_ker = efx->irq_status.addr; |
| 1548 | int syserr; | 1539 | int syserr; |
| 1549 | 1540 | ||
| 1550 | efx->last_irq_cpu = raw_smp_processor_id(); | 1541 | efx->last_irq_cpu = raw_smp_processor_id(); |
| @@ -1572,7 +1563,7 @@ static void falcon_setup_rss_indir_table(struct efx_nic *efx) | |||
| 1572 | unsigned long offset; | 1563 | unsigned long offset; |
| 1573 | efx_dword_t dword; | 1564 | efx_dword_t dword; |
| 1574 | 1565 | ||
| 1575 | if (FALCON_REV(efx) < FALCON_REV_B0) | 1566 | if (falcon_rev(efx) < FALCON_REV_B0) |
| 1576 | return; | 1567 | return; |
| 1577 | 1568 | ||
| 1578 | for (offset = RX_RSS_INDIR_TBL_B0; | 1569 | for (offset = RX_RSS_INDIR_TBL_B0; |
| @@ -1595,7 +1586,7 @@ int falcon_init_interrupt(struct efx_nic *efx) | |||
| 1595 | 1586 | ||
| 1596 | if (!EFX_INT_MODE_USE_MSI(efx)) { | 1587 | if (!EFX_INT_MODE_USE_MSI(efx)) { |
| 1597 | irq_handler_t handler; | 1588 | irq_handler_t handler; |
| 1598 | if (FALCON_REV(efx) >= FALCON_REV_B0) | 1589 | if (falcon_rev(efx) >= FALCON_REV_B0) |
| 1599 | handler = falcon_legacy_interrupt_b0; | 1590 | handler = falcon_legacy_interrupt_b0; |
| 1600 | else | 1591 | else |
| 1601 | handler = falcon_legacy_interrupt_a1; | 1592 | handler = falcon_legacy_interrupt_a1; |
| @@ -1636,12 +1627,13 @@ void falcon_fini_interrupt(struct efx_nic *efx) | |||
| 1636 | efx_oword_t reg; | 1627 | efx_oword_t reg; |
| 1637 | 1628 | ||
| 1638 | /* Disable MSI/MSI-X interrupts */ | 1629 | /* Disable MSI/MSI-X interrupts */ |
| 1639 | efx_for_each_channel_with_interrupt(channel, efx) | 1630 | efx_for_each_channel_with_interrupt(channel, efx) { |
| 1640 | if (channel->irq) | 1631 | if (channel->irq) |
| 1641 | free_irq(channel->irq, channel); | 1632 | free_irq(channel->irq, channel); |
| 1633 | } | ||
| 1642 | 1634 | ||
| 1643 | /* ACK legacy interrupt */ | 1635 | /* ACK legacy interrupt */ |
| 1644 | if (FALCON_REV(efx) >= FALCON_REV_B0) | 1636 | if (falcon_rev(efx) >= FALCON_REV_B0) |
| 1645 | falcon_read(efx, ®, INT_ISR0_B0); | 1637 | falcon_read(efx, ®, INT_ISR0_B0); |
| 1646 | else | 1638 | else |
| 1647 | falcon_irq_ack_a1(efx); | 1639 | falcon_irq_ack_a1(efx); |
| @@ -1732,7 +1724,7 @@ void falcon_drain_tx_fifo(struct efx_nic *efx) | |||
| 1732 | efx_oword_t temp; | 1724 | efx_oword_t temp; |
| 1733 | int count; | 1725 | int count; |
| 1734 | 1726 | ||
| 1735 | if ((FALCON_REV(efx) < FALCON_REV_B0) || | 1727 | if ((falcon_rev(efx) < FALCON_REV_B0) || |
| 1736 | (efx->loopback_mode != LOOPBACK_NONE)) | 1728 | (efx->loopback_mode != LOOPBACK_NONE)) |
| 1737 | return; | 1729 | return; |
| 1738 | 1730 | ||
| @@ -1785,7 +1777,7 @@ void falcon_deconfigure_mac_wrapper(struct efx_nic *efx) | |||
| 1785 | { | 1777 | { |
| 1786 | efx_oword_t temp; | 1778 | efx_oword_t temp; |
| 1787 | 1779 | ||
| 1788 | if (FALCON_REV(efx) < FALCON_REV_B0) | 1780 | if (falcon_rev(efx) < FALCON_REV_B0) |
| 1789 | return; | 1781 | return; |
| 1790 | 1782 | ||
| 1791 | /* Isolate the MAC -> RX */ | 1783 | /* Isolate the MAC -> RX */ |
| @@ -1823,7 +1815,7 @@ void falcon_reconfigure_mac_wrapper(struct efx_nic *efx) | |||
| 1823 | MAC_SPEED, link_speed); | 1815 | MAC_SPEED, link_speed); |
| 1824 | /* On B0, MAC backpressure can be disabled and packets get | 1816 | /* On B0, MAC backpressure can be disabled and packets get |
| 1825 | * discarded. */ | 1817 | * discarded. */ |
| 1826 | if (FALCON_REV(efx) >= FALCON_REV_B0) { | 1818 | if (falcon_rev(efx) >= FALCON_REV_B0) { |
| 1827 | EFX_SET_OWORD_FIELD(reg, TXFIFO_DRAIN_EN_B0, | 1819 | EFX_SET_OWORD_FIELD(reg, TXFIFO_DRAIN_EN_B0, |
| 1828 | !efx->link_up); | 1820 | !efx->link_up); |
| 1829 | } | 1821 | } |
| @@ -1841,7 +1833,7 @@ void falcon_reconfigure_mac_wrapper(struct efx_nic *efx) | |||
| 1841 | EFX_SET_OWORD_FIELD_VER(efx, reg, RX_XOFF_MAC_EN, tx_fc); | 1833 | EFX_SET_OWORD_FIELD_VER(efx, reg, RX_XOFF_MAC_EN, tx_fc); |
| 1842 | 1834 | ||
| 1843 | /* Unisolate the MAC -> RX */ | 1835 | /* Unisolate the MAC -> RX */ |
| 1844 | if (FALCON_REV(efx) >= FALCON_REV_B0) | 1836 | if (falcon_rev(efx) >= FALCON_REV_B0) |
| 1845 | EFX_SET_OWORD_FIELD(reg, RX_INGR_EN_B0, 1); | 1837 | EFX_SET_OWORD_FIELD(reg, RX_INGR_EN_B0, 1); |
| 1846 | falcon_write(efx, ®, RX_CFG_REG_KER); | 1838 | falcon_write(efx, ®, RX_CFG_REG_KER); |
| 1847 | } | 1839 | } |
| @@ -1856,7 +1848,7 @@ int falcon_dma_stats(struct efx_nic *efx, unsigned int done_offset) | |||
| 1856 | return 0; | 1848 | return 0; |
| 1857 | 1849 | ||
| 1858 | /* Statistics fetch will fail if the MAC is in TX drain */ | 1850 | /* Statistics fetch will fail if the MAC is in TX drain */ |
| 1859 | if (FALCON_REV(efx) >= FALCON_REV_B0) { | 1851 | if (falcon_rev(efx) >= FALCON_REV_B0) { |
| 1860 | efx_oword_t temp; | 1852 | efx_oword_t temp; |
| 1861 | falcon_read(efx, &temp, MAC0_CTRL_REG_KER); | 1853 | falcon_read(efx, &temp, MAC0_CTRL_REG_KER); |
| 1862 | if (EFX_OWORD_FIELD(temp, TXFIFO_DRAIN_EN_B0)) | 1854 | if (EFX_OWORD_FIELD(temp, TXFIFO_DRAIN_EN_B0)) |
| @@ -1940,7 +1932,7 @@ static int falcon_gmii_wait(struct efx_nic *efx) | |||
| 1940 | static void falcon_mdio_write(struct net_device *net_dev, int phy_id, | 1932 | static void falcon_mdio_write(struct net_device *net_dev, int phy_id, |
| 1941 | int addr, int value) | 1933 | int addr, int value) |
| 1942 | { | 1934 | { |
| 1943 | struct efx_nic *efx = (struct efx_nic *)net_dev->priv; | 1935 | struct efx_nic *efx = net_dev->priv; |
| 1944 | unsigned int phy_id2 = phy_id & FALCON_PHY_ID_ID_MASK; | 1936 | unsigned int phy_id2 = phy_id & FALCON_PHY_ID_ID_MASK; |
| 1945 | efx_oword_t reg; | 1937 | efx_oword_t reg; |
| 1946 | 1938 | ||
| @@ -2008,7 +2000,7 @@ static void falcon_mdio_write(struct net_device *net_dev, int phy_id, | |||
| 2008 | * could be read, -1 will be returned. */ | 2000 | * could be read, -1 will be returned. */ |
| 2009 | static int falcon_mdio_read(struct net_device *net_dev, int phy_id, int addr) | 2001 | static int falcon_mdio_read(struct net_device *net_dev, int phy_id, int addr) |
| 2010 | { | 2002 | { |
| 2011 | struct efx_nic *efx = (struct efx_nic *)net_dev->priv; | 2003 | struct efx_nic *efx = net_dev->priv; |
| 2012 | unsigned int phy_addr = phy_id & FALCON_PHY_ID_ID_MASK; | 2004 | unsigned int phy_addr = phy_id & FALCON_PHY_ID_ID_MASK; |
| 2013 | efx_oword_t reg; | 2005 | efx_oword_t reg; |
| 2014 | int value = -1; | 2006 | int value = -1; |
| @@ -2113,7 +2105,7 @@ int falcon_probe_port(struct efx_nic *efx) | |||
| 2113 | falcon_init_mdio(&efx->mii); | 2105 | falcon_init_mdio(&efx->mii); |
| 2114 | 2106 | ||
| 2115 | /* Hardware flow ctrl. FalconA RX FIFO too small for pause generation */ | 2107 | /* Hardware flow ctrl. FalconA RX FIFO too small for pause generation */ |
| 2116 | if (FALCON_REV(efx) >= FALCON_REV_B0) | 2108 | if (falcon_rev(efx) >= FALCON_REV_B0) |
| 2117 | efx->flow_control = EFX_FC_RX | EFX_FC_TX; | 2109 | efx->flow_control = EFX_FC_RX | EFX_FC_TX; |
| 2118 | else | 2110 | else |
| 2119 | efx->flow_control = EFX_FC_RX; | 2111 | efx->flow_control = EFX_FC_RX; |
| @@ -2373,7 +2365,7 @@ static int falcon_probe_nic_variant(struct efx_nic *efx) | |||
| 2373 | return -ENODEV; | 2365 | return -ENODEV; |
| 2374 | } | 2366 | } |
| 2375 | 2367 | ||
| 2376 | switch (FALCON_REV(efx)) { | 2368 | switch (falcon_rev(efx)) { |
| 2377 | case FALCON_REV_A0: | 2369 | case FALCON_REV_A0: |
| 2378 | case 0xff: | 2370 | case 0xff: |
| 2379 | EFX_ERR(efx, "Falcon rev A0 not supported\n"); | 2371 | EFX_ERR(efx, "Falcon rev A0 not supported\n"); |
| @@ -2399,7 +2391,7 @@ static int falcon_probe_nic_variant(struct efx_nic *efx) | |||
| 2399 | break; | 2391 | break; |
| 2400 | 2392 | ||
| 2401 | default: | 2393 | default: |
| 2402 | EFX_ERR(efx, "Unknown Falcon rev %d\n", FALCON_REV(efx)); | 2394 | EFX_ERR(efx, "Unknown Falcon rev %d\n", falcon_rev(efx)); |
| 2403 | return -ENODEV; | 2395 | return -ENODEV; |
| 2404 | } | 2396 | } |
| 2405 | 2397 | ||
| @@ -2419,7 +2411,7 @@ int falcon_probe_nic(struct efx_nic *efx) | |||
| 2419 | 2411 | ||
| 2420 | /* Allocate storage for hardware specific data */ | 2412 | /* Allocate storage for hardware specific data */ |
| 2421 | nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL); | 2413 | nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL); |
| 2422 | efx->nic_data = (void *) nic_data; | 2414 | efx->nic_data = nic_data; |
| 2423 | 2415 | ||
| 2424 | /* Determine number of ports etc. */ | 2416 | /* Determine number of ports etc. */ |
| 2425 | rc = falcon_probe_nic_variant(efx); | 2417 | rc = falcon_probe_nic_variant(efx); |
| @@ -2489,13 +2481,10 @@ int falcon_probe_nic(struct efx_nic *efx) | |||
| 2489 | */ | 2481 | */ |
| 2490 | int falcon_init_nic(struct efx_nic *efx) | 2482 | int falcon_init_nic(struct efx_nic *efx) |
| 2491 | { | 2483 | { |
| 2492 | struct falcon_nic_data *data; | ||
| 2493 | efx_oword_t temp; | 2484 | efx_oword_t temp; |
| 2494 | unsigned thresh; | 2485 | unsigned thresh; |
| 2495 | int rc; | 2486 | int rc; |
| 2496 | 2487 | ||
| 2497 | data = (struct falcon_nic_data *)efx->nic_data; | ||
| 2498 | |||
| 2499 | /* Set up the address region register. This is only needed | 2488 | /* Set up the address region register. This is only needed |
| 2500 | * for the B0 FPGA, but since we are just pushing in the | 2489 | * for the B0 FPGA, but since we are just pushing in the |
| 2501 | * reset defaults this may as well be unconditional. */ | 2490 | * reset defaults this may as well be unconditional. */ |
| @@ -2562,7 +2551,7 @@ int falcon_init_nic(struct efx_nic *efx) | |||
| 2562 | 2551 | ||
| 2563 | /* Set number of RSS queues for receive path. */ | 2552 | /* Set number of RSS queues for receive path. */ |
| 2564 | falcon_read(efx, &temp, RX_FILTER_CTL_REG); | 2553 | falcon_read(efx, &temp, RX_FILTER_CTL_REG); |
| 2565 | if (FALCON_REV(efx) >= FALCON_REV_B0) | 2554 | if (falcon_rev(efx) >= FALCON_REV_B0) |
| 2566 | EFX_SET_OWORD_FIELD(temp, NUM_KER, 0); | 2555 | EFX_SET_OWORD_FIELD(temp, NUM_KER, 0); |
| 2567 | else | 2556 | else |
| 2568 | EFX_SET_OWORD_FIELD(temp, NUM_KER, efx->rss_queues - 1); | 2557 | EFX_SET_OWORD_FIELD(temp, NUM_KER, efx->rss_queues - 1); |
| @@ -2600,7 +2589,7 @@ int falcon_init_nic(struct efx_nic *efx) | |||
| 2600 | /* Prefetch threshold 2 => fetch when descriptor cache half empty */ | 2589 | /* Prefetch threshold 2 => fetch when descriptor cache half empty */ |
| 2601 | EFX_SET_OWORD_FIELD(temp, TX_PREF_THRESHOLD, 2); | 2590 | EFX_SET_OWORD_FIELD(temp, TX_PREF_THRESHOLD, 2); |
| 2602 | /* Squash TX of packets of 16 bytes or less */ | 2591 | /* Squash TX of packets of 16 bytes or less */ |
| 2603 | if (FALCON_REV(efx) >= FALCON_REV_B0 && EFX_WORKAROUND_9141(efx)) | 2592 | if (falcon_rev(efx) >= FALCON_REV_B0 && EFX_WORKAROUND_9141(efx)) |
| 2604 | EFX_SET_OWORD_FIELD(temp, TX_FLUSH_MIN_LEN_EN_B0, 1); | 2593 | EFX_SET_OWORD_FIELD(temp, TX_FLUSH_MIN_LEN_EN_B0, 1); |
| 2605 | falcon_write(efx, &temp, TX_CFG2_REG_KER); | 2594 | falcon_write(efx, &temp, TX_CFG2_REG_KER); |
| 2606 | 2595 | ||
| @@ -2617,7 +2606,7 @@ int falcon_init_nic(struct efx_nic *efx) | |||
| 2617 | if (EFX_WORKAROUND_7575(efx)) | 2606 | if (EFX_WORKAROUND_7575(efx)) |
| 2618 | EFX_SET_OWORD_FIELD_VER(efx, temp, RX_USR_BUF_SIZE, | 2607 | EFX_SET_OWORD_FIELD_VER(efx, temp, RX_USR_BUF_SIZE, |
| 2619 | (3 * 4096) / 32); | 2608 | (3 * 4096) / 32); |
| 2620 | if (FALCON_REV(efx) >= FALCON_REV_B0) | 2609 | if (falcon_rev(efx) >= FALCON_REV_B0) |
| 2621 | EFX_SET_OWORD_FIELD(temp, RX_INGR_EN_B0, 1); | 2610 | EFX_SET_OWORD_FIELD(temp, RX_INGR_EN_B0, 1); |
| 2622 | 2611 | ||
| 2623 | /* RX FIFO flow control thresholds */ | 2612 | /* RX FIFO flow control thresholds */ |
| @@ -2633,7 +2622,7 @@ int falcon_init_nic(struct efx_nic *efx) | |||
| 2633 | falcon_write(efx, &temp, RX_CFG_REG_KER); | 2622 | falcon_write(efx, &temp, RX_CFG_REG_KER); |
| 2634 | 2623 | ||
| 2635 | /* Set destination of both TX and RX Flush events */ | 2624 | /* Set destination of both TX and RX Flush events */ |
| 2636 | if (FALCON_REV(efx) >= FALCON_REV_B0) { | 2625 | if (falcon_rev(efx) >= FALCON_REV_B0) { |
| 2637 | EFX_POPULATE_OWORD_1(temp, FLS_EVQ_ID, 0); | 2626 | EFX_POPULATE_OWORD_1(temp, FLS_EVQ_ID, 0); |
| 2638 | falcon_write(efx, &temp, DP_CTRL_REG); | 2627 | falcon_write(efx, &temp, DP_CTRL_REG); |
| 2639 | } | 2628 | } |
| @@ -2647,7 +2636,7 @@ void falcon_remove_nic(struct efx_nic *efx) | |||
| 2647 | 2636 | ||
| 2648 | falcon_free_buffer(efx, &efx->irq_status); | 2637 | falcon_free_buffer(efx, &efx->irq_status); |
| 2649 | 2638 | ||
| 2650 | (void) falcon_reset_hw(efx, RESET_TYPE_ALL); | 2639 | falcon_reset_hw(efx, RESET_TYPE_ALL); |
| 2651 | 2640 | ||
| 2652 | /* Release the second function after the reset */ | 2641 | /* Release the second function after the reset */ |
| 2653 | if (nic_data->pci_dev2) { | 2642 | if (nic_data->pci_dev2) { |
diff --git a/drivers/net/sfc/falcon.h b/drivers/net/sfc/falcon.h index 6117403b0c03..492f9bc28840 100644 --- a/drivers/net/sfc/falcon.h +++ b/drivers/net/sfc/falcon.h | |||
| @@ -23,7 +23,10 @@ enum falcon_revision { | |||
| 23 | FALCON_REV_B0 = 2, | 23 | FALCON_REV_B0 = 2, |
| 24 | }; | 24 | }; |
| 25 | 25 | ||
| 26 | #define FALCON_REV(efx) ((efx)->pci_dev->revision) | 26 | static inline int falcon_rev(struct efx_nic *efx) |
| 27 | { | ||
| 28 | return efx->pci_dev->revision; | ||
| 29 | } | ||
| 27 | 30 | ||
| 28 | extern struct efx_nic_type falcon_a_nic_type; | 31 | extern struct efx_nic_type falcon_a_nic_type; |
| 29 | extern struct efx_nic_type falcon_b_nic_type; | 32 | extern struct efx_nic_type falcon_b_nic_type; |
diff --git a/drivers/net/sfc/falcon_hwdefs.h b/drivers/net/sfc/falcon_hwdefs.h index 06e2d68fc3d1..6d003114eeab 100644 --- a/drivers/net/sfc/falcon_hwdefs.h +++ b/drivers/net/sfc/falcon_hwdefs.h | |||
| @@ -1125,7 +1125,7 @@ struct falcon_nvconfig_board_v2 { | |||
| 1125 | u8 port1_phy_type; | 1125 | u8 port1_phy_type; |
| 1126 | __le16 asic_sub_revision; | 1126 | __le16 asic_sub_revision; |
| 1127 | __le16 board_revision; | 1127 | __le16 board_revision; |
| 1128 | } __attribute__ ((packed)); | 1128 | } __packed; |
| 1129 | 1129 | ||
| 1130 | #define NVCONFIG_BASE 0x300 | 1130 | #define NVCONFIG_BASE 0x300 |
| 1131 | #define NVCONFIG_BOARD_MAGIC_NUM 0xFA1C | 1131 | #define NVCONFIG_BOARD_MAGIC_NUM 0xFA1C |
| @@ -1144,6 +1144,6 @@ struct falcon_nvconfig { | |||
| 1144 | __le16 board_struct_ver; | 1144 | __le16 board_struct_ver; |
| 1145 | __le16 board_checksum; | 1145 | __le16 board_checksum; |
| 1146 | struct falcon_nvconfig_board_v2 board_v2; | 1146 | struct falcon_nvconfig_board_v2 board_v2; |
| 1147 | } __attribute__ ((packed)); | 1147 | } __packed; |
| 1148 | 1148 | ||
| 1149 | #endif /* EFX_FALCON_HWDEFS_H */ | 1149 | #endif /* EFX_FALCON_HWDEFS_H */ |
diff --git a/drivers/net/sfc/falcon_io.h b/drivers/net/sfc/falcon_io.h index ea08184ddfa9..6670cdfc41ab 100644 --- a/drivers/net/sfc/falcon_io.h +++ b/drivers/net/sfc/falcon_io.h | |||
| @@ -56,14 +56,27 @@ | |||
| 56 | #define FALCON_USE_QWORD_IO 1 | 56 | #define FALCON_USE_QWORD_IO 1 |
| 57 | #endif | 57 | #endif |
| 58 | 58 | ||
| 59 | #define _falcon_writeq(efx, value, reg) \ | 59 | #ifdef FALCON_USE_QWORD_IO |
| 60 | __raw_writeq((__force u64) (value), (efx)->membase + (reg)) | 60 | static inline void _falcon_writeq(struct efx_nic *efx, __le64 value, |
| 61 | #define _falcon_writel(efx, value, reg) \ | 61 | unsigned int reg) |
| 62 | __raw_writel((__force u32) (value), (efx)->membase + (reg)) | 62 | { |
| 63 | #define _falcon_readq(efx, reg) \ | 63 | __raw_writeq((__force u64)value, efx->membase + reg); |
| 64 | ((__force __le64) __raw_readq((efx)->membase + (reg))) | 64 | } |
| 65 | #define _falcon_readl(efx, reg) \ | 65 | static inline __le64 _falcon_readq(struct efx_nic *efx, unsigned int reg) |
| 66 | ((__force __le32) __raw_readl((efx)->membase + (reg))) | 66 | { |
| 67 | return (__force __le64)__raw_readq(efx->membase + reg); | ||
| 68 | } | ||
| 69 | #endif | ||
| 70 | |||
| 71 | static inline void _falcon_writel(struct efx_nic *efx, __le32 value, | ||
| 72 | unsigned int reg) | ||
| 73 | { | ||
| 74 | __raw_writel((__force u32)value, efx->membase + reg); | ||
| 75 | } | ||
| 76 | static inline __le32 _falcon_readl(struct efx_nic *efx, unsigned int reg) | ||
| 77 | { | ||
| 78 | return (__force __le32)__raw_readl(efx->membase + reg); | ||
| 79 | } | ||
| 67 | 80 | ||
| 68 | /* Writes to a normal 16-byte Falcon register, locking as appropriate. */ | 81 | /* Writes to a normal 16-byte Falcon register, locking as appropriate. */ |
| 69 | static inline void falcon_write(struct efx_nic *efx, efx_oword_t *value, | 82 | static inline void falcon_write(struct efx_nic *efx, efx_oword_t *value, |
diff --git a/drivers/net/sfc/falcon_xmac.c b/drivers/net/sfc/falcon_xmac.c index a74b7931a3c4..dbdcee4b0f8d 100644 --- a/drivers/net/sfc/falcon_xmac.c +++ b/drivers/net/sfc/falcon_xmac.c | |||
| @@ -221,7 +221,7 @@ static int falcon_xgmii_status(struct efx_nic *efx) | |||
| 221 | { | 221 | { |
| 222 | efx_dword_t reg; | 222 | efx_dword_t reg; |
| 223 | 223 | ||
| 224 | if (FALCON_REV(efx) < FALCON_REV_B0) | 224 | if (falcon_rev(efx) < FALCON_REV_B0) |
| 225 | return 1; | 225 | return 1; |
| 226 | 226 | ||
| 227 | /* The ISR latches, so clear it and re-read */ | 227 | /* The ISR latches, so clear it and re-read */ |
| @@ -241,7 +241,7 @@ static void falcon_mask_status_intr(struct efx_nic *efx, int enable) | |||
| 241 | { | 241 | { |
| 242 | efx_dword_t reg; | 242 | efx_dword_t reg; |
| 243 | 243 | ||
| 244 | if ((FALCON_REV(efx) < FALCON_REV_B0) || LOOPBACK_INTERNAL(efx)) | 244 | if ((falcon_rev(efx) < FALCON_REV_B0) || LOOPBACK_INTERNAL(efx)) |
| 245 | return; | 245 | return; |
| 246 | 246 | ||
| 247 | /* Flush the ISR */ | 247 | /* Flush the ISR */ |
| @@ -454,7 +454,7 @@ static int falcon_check_xaui_link_up(struct efx_nic *efx) | |||
| 454 | 454 | ||
| 455 | EFX_LOG(efx, "%s Clobbering XAUI (%d tries left).\n", | 455 | EFX_LOG(efx, "%s Clobbering XAUI (%d tries left).\n", |
| 456 | __func__, tries); | 456 | __func__, tries); |
| 457 | (void) falcon_reset_xaui(efx); | 457 | falcon_reset_xaui(efx); |
| 458 | udelay(200); | 458 | udelay(200); |
| 459 | tries--; | 459 | tries--; |
| 460 | } | 460 | } |
| @@ -572,7 +572,7 @@ int falcon_check_xmac(struct efx_nic *efx) | |||
| 572 | xaui_link_ok = falcon_xaui_link_ok(efx); | 572 | xaui_link_ok = falcon_xaui_link_ok(efx); |
| 573 | 573 | ||
| 574 | if (EFX_WORKAROUND_5147(efx) && !xaui_link_ok) | 574 | if (EFX_WORKAROUND_5147(efx) && !xaui_link_ok) |
| 575 | (void) falcon_reset_xaui(efx); | 575 | falcon_reset_xaui(efx); |
| 576 | 576 | ||
| 577 | /* Call the PHY check_hw routine */ | 577 | /* Call the PHY check_hw routine */ |
| 578 | rc = efx->phy_op->check_hw(efx); | 578 | rc = efx->phy_op->check_hw(efx); |
| @@ -639,7 +639,7 @@ int falcon_xmac_set_pause(struct efx_nic *efx, enum efx_fc_type flow_control) | |||
| 639 | reset = ((flow_control & EFX_FC_TX) && | 639 | reset = ((flow_control & EFX_FC_TX) && |
| 640 | !(efx->flow_control & EFX_FC_TX)); | 640 | !(efx->flow_control & EFX_FC_TX)); |
| 641 | if (EFX_WORKAROUND_11482(efx) && reset) { | 641 | if (EFX_WORKAROUND_11482(efx) && reset) { |
| 642 | if (FALCON_REV(efx) >= FALCON_REV_B0) { | 642 | if (falcon_rev(efx) >= FALCON_REV_B0) { |
| 643 | /* Recover by resetting the EM block */ | 643 | /* Recover by resetting the EM block */ |
| 644 | if (efx->link_up) | 644 | if (efx->link_up) |
| 645 | falcon_drain_tx_fifo(efx); | 645 | falcon_drain_tx_fifo(efx); |
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h index 59f261b4171f..5e20e7551dae 100644 --- a/drivers/net/sfc/net_driver.h +++ b/drivers/net/sfc/net_driver.h | |||
| @@ -42,7 +42,7 @@ | |||
| 42 | #ifndef EFX_DRIVER_NAME | 42 | #ifndef EFX_DRIVER_NAME |
| 43 | #define EFX_DRIVER_NAME "sfc" | 43 | #define EFX_DRIVER_NAME "sfc" |
| 44 | #endif | 44 | #endif |
| 45 | #define EFX_DRIVER_VERSION "2.2.0136" | 45 | #define EFX_DRIVER_VERSION "2.2" |
| 46 | 46 | ||
| 47 | #ifdef EFX_ENABLE_DEBUG | 47 | #ifdef EFX_ENABLE_DEBUG |
| 48 | #define EFX_BUG_ON_PARANOID(x) BUG_ON(x) | 48 | #define EFX_BUG_ON_PARANOID(x) BUG_ON(x) |
| @@ -52,28 +52,19 @@ | |||
| 52 | #define EFX_WARN_ON_PARANOID(x) do {} while (0) | 52 | #define EFX_WARN_ON_PARANOID(x) do {} while (0) |
| 53 | #endif | 53 | #endif |
| 54 | 54 | ||
| 55 | #define NET_DEV_REGISTERED(efx) \ | ||
| 56 | ((efx)->net_dev->reg_state == NETREG_REGISTERED) | ||
| 57 | |||
| 58 | /* Include net device name in log messages if it has been registered. | ||
| 59 | * Use efx->name not efx->net_dev->name so that races with (un)registration | ||
| 60 | * are harmless. | ||
| 61 | */ | ||
| 62 | #define NET_DEV_NAME(efx) (NET_DEV_REGISTERED(efx) ? (efx)->name : "") | ||
| 63 | |||
| 64 | /* Un-rate-limited logging */ | 55 | /* Un-rate-limited logging */ |
| 65 | #define EFX_ERR(efx, fmt, args...) \ | 56 | #define EFX_ERR(efx, fmt, args...) \ |
| 66 | dev_err(&((efx)->pci_dev->dev), "ERR: %s " fmt, NET_DEV_NAME(efx), ##args) | 57 | dev_err(&((efx)->pci_dev->dev), "ERR: %s " fmt, efx_dev_name(efx), ##args) |
| 67 | 58 | ||
| 68 | #define EFX_INFO(efx, fmt, args...) \ | 59 | #define EFX_INFO(efx, fmt, args...) \ |
| 69 | dev_info(&((efx)->pci_dev->dev), "INFO: %s " fmt, NET_DEV_NAME(efx), ##args) | 60 | dev_info(&((efx)->pci_dev->dev), "INFO: %s " fmt, efx_dev_name(efx), ##args) |
| 70 | 61 | ||
| 71 | #ifdef EFX_ENABLE_DEBUG | 62 | #ifdef EFX_ENABLE_DEBUG |
| 72 | #define EFX_LOG(efx, fmt, args...) \ | 63 | #define EFX_LOG(efx, fmt, args...) \ |
| 73 | dev_info(&((efx)->pci_dev->dev), "DBG: %s " fmt, NET_DEV_NAME(efx), ##args) | 64 | dev_info(&((efx)->pci_dev->dev), "DBG: %s " fmt, efx_dev_name(efx), ##args) |
| 74 | #else | 65 | #else |
| 75 | #define EFX_LOG(efx, fmt, args...) \ | 66 | #define EFX_LOG(efx, fmt, args...) \ |
| 76 | dev_dbg(&((efx)->pci_dev->dev), "DBG: %s " fmt, NET_DEV_NAME(efx), ##args) | 67 | dev_dbg(&((efx)->pci_dev->dev), "DBG: %s " fmt, efx_dev_name(efx), ##args) |
| 77 | #endif | 68 | #endif |
| 78 | 69 | ||
| 79 | #define EFX_TRACE(efx, fmt, args...) do {} while (0) | 70 | #define EFX_TRACE(efx, fmt, args...) do {} while (0) |
| @@ -90,11 +81,6 @@ do {if (net_ratelimit()) EFX_INFO(efx, fmt, ##args); } while (0) | |||
| 90 | #define EFX_LOG_RL(efx, fmt, args...) \ | 81 | #define EFX_LOG_RL(efx, fmt, args...) \ |
| 91 | do {if (net_ratelimit()) EFX_LOG(efx, fmt, ##args); } while (0) | 82 | do {if (net_ratelimit()) EFX_LOG(efx, fmt, ##args); } while (0) |
| 92 | 83 | ||
| 93 | /* Kernel headers may redefine inline anyway */ | ||
| 94 | #ifndef inline | ||
| 95 | #define inline inline __attribute__ ((always_inline)) | ||
| 96 | #endif | ||
| 97 | |||
| 98 | /************************************************************************** | 84 | /************************************************************************** |
| 99 | * | 85 | * |
| 100 | * Efx data structures | 86 | * Efx data structures |
| @@ -695,7 +681,7 @@ struct efx_nic { | |||
| 695 | struct workqueue_struct *workqueue; | 681 | struct workqueue_struct *workqueue; |
| 696 | struct work_struct reset_work; | 682 | struct work_struct reset_work; |
| 697 | struct delayed_work monitor_work; | 683 | struct delayed_work monitor_work; |
| 698 | unsigned long membase_phys; | 684 | resource_size_t membase_phys; |
| 699 | void __iomem *membase; | 685 | void __iomem *membase; |
| 700 | spinlock_t biu_lock; | 686 | spinlock_t biu_lock; |
| 701 | enum efx_int_mode interrupt_mode; | 687 | enum efx_int_mode interrupt_mode; |
| @@ -719,7 +705,7 @@ struct efx_nic { | |||
| 719 | 705 | ||
| 720 | unsigned n_rx_nodesc_drop_cnt; | 706 | unsigned n_rx_nodesc_drop_cnt; |
| 721 | 707 | ||
| 722 | void *nic_data; | 708 | struct falcon_nic_data *nic_data; |
| 723 | 709 | ||
| 724 | struct mutex mac_lock; | 710 | struct mutex mac_lock; |
| 725 | int port_enabled; | 711 | int port_enabled; |
| @@ -760,6 +746,20 @@ struct efx_nic { | |||
| 760 | void *loopback_selftest; | 746 | void *loopback_selftest; |
| 761 | }; | 747 | }; |
| 762 | 748 | ||
| 749 | static inline int efx_dev_registered(struct efx_nic *efx) | ||
| 750 | { | ||
| 751 | return efx->net_dev->reg_state == NETREG_REGISTERED; | ||
| 752 | } | ||
| 753 | |||
| 754 | /* Net device name, for inclusion in log messages if it has been registered. | ||
| 755 | * Use efx->name not efx->net_dev->name so that races with (un)registration | ||
| 756 | * are harmless. | ||
| 757 | */ | ||
| 758 | static inline const char *efx_dev_name(struct efx_nic *efx) | ||
| 759 | { | ||
| 760 | return efx_dev_registered(efx) ? efx->name : ""; | ||
| 761 | } | ||
| 762 | |||
| 763 | /** | 763 | /** |
| 764 | * struct efx_nic_type - Efx device type definition | 764 | * struct efx_nic_type - Efx device type definition |
| 765 | * @mem_bar: Memory BAR number | 765 | * @mem_bar: Memory BAR number |
| @@ -795,7 +795,7 @@ struct efx_nic_type { | |||
| 795 | unsigned int txd_ring_mask; | 795 | unsigned int txd_ring_mask; |
| 796 | unsigned int rxd_ring_mask; | 796 | unsigned int rxd_ring_mask; |
| 797 | unsigned int evq_size; | 797 | unsigned int evq_size; |
| 798 | dma_addr_t max_dma_mask; | 798 | u64 max_dma_mask; |
| 799 | unsigned int tx_dma_mask; | 799 | unsigned int tx_dma_mask; |
| 800 | unsigned bug5391_mask; | 800 | unsigned bug5391_mask; |
| 801 | 801 | ||
diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c index 670622373ddf..601b001437c0 100644 --- a/drivers/net/sfc/rx.c +++ b/drivers/net/sfc/rx.c | |||
| @@ -86,14 +86,17 @@ static unsigned int rx_refill_limit = 95; | |||
| 86 | */ | 86 | */ |
| 87 | #define EFX_RXD_HEAD_ROOM 2 | 87 | #define EFX_RXD_HEAD_ROOM 2 |
| 88 | 88 | ||
| 89 | /* Macros for zero-order pages (potentially) containing multiple RX buffers */ | 89 | static inline unsigned int efx_rx_buf_offset(struct efx_rx_buffer *buf) |
| 90 | #define RX_DATA_OFFSET(_data) \ | 90 | { |
| 91 | (((unsigned long) (_data)) & (PAGE_SIZE-1)) | 91 | /* Offset is always within one page, so we don't need to consider |
| 92 | #define RX_BUF_OFFSET(_rx_buf) \ | 92 | * the page order. |
| 93 | RX_DATA_OFFSET((_rx_buf)->data) | 93 | */ |
| 94 | 94 | return (__force unsigned long) buf->data & (PAGE_SIZE - 1); | |
| 95 | #define RX_PAGE_SIZE(_efx) \ | 95 | } |
| 96 | (PAGE_SIZE * (1u << (_efx)->rx_buffer_order)) | 96 | static inline unsigned int efx_rx_buf_size(struct efx_nic *efx) |
| 97 | { | ||
| 98 | return PAGE_SIZE << efx->rx_buffer_order; | ||
| 99 | } | ||
| 97 | 100 | ||
| 98 | 101 | ||
| 99 | /************************************************************************** | 102 | /************************************************************************** |
| @@ -106,7 +109,7 @@ static unsigned int rx_refill_limit = 95; | |||
| 106 | static int efx_lro_get_skb_hdr(struct sk_buff *skb, void **ip_hdr, | 109 | static int efx_lro_get_skb_hdr(struct sk_buff *skb, void **ip_hdr, |
| 107 | void **tcpudp_hdr, u64 *hdr_flags, void *priv) | 110 | void **tcpudp_hdr, u64 *hdr_flags, void *priv) |
| 108 | { | 111 | { |
| 109 | struct efx_channel *channel = (struct efx_channel *)priv; | 112 | struct efx_channel *channel = priv; |
| 110 | struct iphdr *iph; | 113 | struct iphdr *iph; |
| 111 | struct tcphdr *th; | 114 | struct tcphdr *th; |
| 112 | 115 | ||
| @@ -131,12 +134,12 @@ static int efx_get_frag_hdr(struct skb_frag_struct *frag, void **mac_hdr, | |||
| 131 | void **ip_hdr, void **tcpudp_hdr, u64 *hdr_flags, | 134 | void **ip_hdr, void **tcpudp_hdr, u64 *hdr_flags, |
| 132 | void *priv) | 135 | void *priv) |
| 133 | { | 136 | { |
| 134 | struct efx_channel *channel = (struct efx_channel *)priv; | 137 | struct efx_channel *channel = priv; |
| 135 | struct ethhdr *eh; | 138 | struct ethhdr *eh; |
| 136 | struct iphdr *iph; | 139 | struct iphdr *iph; |
| 137 | 140 | ||
| 138 | /* We support EtherII and VLAN encapsulated IPv4 */ | 141 | /* We support EtherII and VLAN encapsulated IPv4 */ |
| 139 | eh = (struct ethhdr *)(page_address(frag->page) + frag->page_offset); | 142 | eh = page_address(frag->page) + frag->page_offset; |
| 140 | *mac_hdr = eh; | 143 | *mac_hdr = eh; |
| 141 | 144 | ||
| 142 | if (eh->h_proto == htons(ETH_P_IP)) { | 145 | if (eh->h_proto == htons(ETH_P_IP)) { |
| @@ -269,7 +272,7 @@ static inline int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue, | |||
| 269 | return -ENOMEM; | 272 | return -ENOMEM; |
| 270 | 273 | ||
| 271 | dma_addr = pci_map_page(efx->pci_dev, rx_buf->page, | 274 | dma_addr = pci_map_page(efx->pci_dev, rx_buf->page, |
| 272 | 0, RX_PAGE_SIZE(efx), | 275 | 0, efx_rx_buf_size(efx), |
| 273 | PCI_DMA_FROMDEVICE); | 276 | PCI_DMA_FROMDEVICE); |
| 274 | 277 | ||
| 275 | if (unlikely(pci_dma_mapping_error(dma_addr))) { | 278 | if (unlikely(pci_dma_mapping_error(dma_addr))) { |
| @@ -280,14 +283,14 @@ static inline int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue, | |||
| 280 | 283 | ||
| 281 | rx_queue->buf_page = rx_buf->page; | 284 | rx_queue->buf_page = rx_buf->page; |
| 282 | rx_queue->buf_dma_addr = dma_addr; | 285 | rx_queue->buf_dma_addr = dma_addr; |
| 283 | rx_queue->buf_data = ((char *) page_address(rx_buf->page) + | 286 | rx_queue->buf_data = (page_address(rx_buf->page) + |
| 284 | EFX_PAGE_IP_ALIGN); | 287 | EFX_PAGE_IP_ALIGN); |
| 285 | } | 288 | } |
| 286 | 289 | ||
| 287 | offset = RX_DATA_OFFSET(rx_queue->buf_data); | ||
| 288 | rx_buf->len = bytes; | 290 | rx_buf->len = bytes; |
| 289 | rx_buf->dma_addr = rx_queue->buf_dma_addr + offset; | ||
| 290 | rx_buf->data = rx_queue->buf_data; | 291 | rx_buf->data = rx_queue->buf_data; |
| 292 | offset = efx_rx_buf_offset(rx_buf); | ||
| 293 | rx_buf->dma_addr = rx_queue->buf_dma_addr + offset; | ||
| 291 | 294 | ||
| 292 | /* Try to pack multiple buffers per page */ | 295 | /* Try to pack multiple buffers per page */ |
| 293 | if (efx->rx_buffer_order == 0) { | 296 | if (efx->rx_buffer_order == 0) { |
| @@ -295,7 +298,7 @@ static inline int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue, | |||
| 295 | rx_queue->buf_data += ((bytes + 0x1ff) & ~0x1ff); | 298 | rx_queue->buf_data += ((bytes + 0x1ff) & ~0x1ff); |
| 296 | offset += ((bytes + 0x1ff) & ~0x1ff); | 299 | offset += ((bytes + 0x1ff) & ~0x1ff); |
| 297 | 300 | ||
| 298 | space = RX_PAGE_SIZE(efx) - offset; | 301 | space = efx_rx_buf_size(efx) - offset; |
| 299 | if (space >= bytes) { | 302 | if (space >= bytes) { |
| 300 | /* Refs dropped on kernel releasing each skb */ | 303 | /* Refs dropped on kernel releasing each skb */ |
| 301 | get_page(rx_queue->buf_page); | 304 | get_page(rx_queue->buf_page); |
| @@ -344,7 +347,8 @@ static inline void efx_unmap_rx_buffer(struct efx_nic *efx, | |||
| 344 | EFX_BUG_ON_PARANOID(rx_buf->skb); | 347 | EFX_BUG_ON_PARANOID(rx_buf->skb); |
| 345 | if (rx_buf->unmap_addr) { | 348 | if (rx_buf->unmap_addr) { |
| 346 | pci_unmap_page(efx->pci_dev, rx_buf->unmap_addr, | 349 | pci_unmap_page(efx->pci_dev, rx_buf->unmap_addr, |
| 347 | RX_PAGE_SIZE(efx), PCI_DMA_FROMDEVICE); | 350 | efx_rx_buf_size(efx), |
| 351 | PCI_DMA_FROMDEVICE); | ||
| 348 | rx_buf->unmap_addr = 0; | 352 | rx_buf->unmap_addr = 0; |
| 349 | } | 353 | } |
| 350 | } else if (likely(rx_buf->skb)) { | 354 | } else if (likely(rx_buf->skb)) { |
| @@ -400,9 +404,10 @@ static int __efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, | |||
| 400 | return 0; | 404 | return 0; |
| 401 | 405 | ||
| 402 | /* Record minimum fill level */ | 406 | /* Record minimum fill level */ |
| 403 | if (unlikely(fill_level < rx_queue->min_fill)) | 407 | if (unlikely(fill_level < rx_queue->min_fill)) { |
| 404 | if (fill_level) | 408 | if (fill_level) |
| 405 | rx_queue->min_fill = fill_level; | 409 | rx_queue->min_fill = fill_level; |
| 410 | } | ||
| 406 | 411 | ||
| 407 | /* Acquire RX add lock. If this lock is contended, then a fast | 412 | /* Acquire RX add lock. If this lock is contended, then a fast |
| 408 | * fill must already be in progress (e.g. in the refill | 413 | * fill must already be in progress (e.g. in the refill |
| @@ -552,7 +557,7 @@ static inline void efx_rx_packet_lro(struct efx_channel *channel, | |||
| 552 | struct skb_frag_struct frags; | 557 | struct skb_frag_struct frags; |
| 553 | 558 | ||
| 554 | frags.page = rx_buf->page; | 559 | frags.page = rx_buf->page; |
| 555 | frags.page_offset = RX_BUF_OFFSET(rx_buf); | 560 | frags.page_offset = efx_rx_buf_offset(rx_buf); |
| 556 | frags.size = rx_buf->len; | 561 | frags.size = rx_buf->len; |
| 557 | 562 | ||
| 558 | lro_receive_frags(lro_mgr, &frags, rx_buf->len, | 563 | lro_receive_frags(lro_mgr, &frags, rx_buf->len, |
| @@ -597,7 +602,7 @@ static inline struct sk_buff *efx_rx_mk_skb(struct efx_rx_buffer *rx_buf, | |||
| 597 | if (unlikely(rx_buf->len > hdr_len)) { | 602 | if (unlikely(rx_buf->len > hdr_len)) { |
| 598 | struct skb_frag_struct *frag = skb_shinfo(skb)->frags; | 603 | struct skb_frag_struct *frag = skb_shinfo(skb)->frags; |
| 599 | frag->page = rx_buf->page; | 604 | frag->page = rx_buf->page; |
| 600 | frag->page_offset = RX_BUF_OFFSET(rx_buf) + hdr_len; | 605 | frag->page_offset = efx_rx_buf_offset(rx_buf) + hdr_len; |
| 601 | frag->size = skb->len - hdr_len; | 606 | frag->size = skb->len - hdr_len; |
| 602 | skb_shinfo(skb)->nr_frags = 1; | 607 | skb_shinfo(skb)->nr_frags = 1; |
| 603 | skb->data_len = frag->size; | 608 | skb->data_len = frag->size; |
| @@ -851,7 +856,8 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue) | |||
| 851 | /* For a page that is part-way through splitting into RX buffers */ | 856 | /* For a page that is part-way through splitting into RX buffers */ |
| 852 | if (rx_queue->buf_page != NULL) { | 857 | if (rx_queue->buf_page != NULL) { |
| 853 | pci_unmap_page(rx_queue->efx->pci_dev, rx_queue->buf_dma_addr, | 858 | pci_unmap_page(rx_queue->efx->pci_dev, rx_queue->buf_dma_addr, |
| 854 | RX_PAGE_SIZE(rx_queue->efx), PCI_DMA_FROMDEVICE); | 859 | efx_rx_buf_size(rx_queue->efx), |
| 860 | PCI_DMA_FROMDEVICE); | ||
| 855 | __free_pages(rx_queue->buf_page, | 861 | __free_pages(rx_queue->buf_page, |
| 856 | rx_queue->efx->rx_buffer_order); | 862 | rx_queue->efx->rx_buffer_order); |
| 857 | rx_queue->buf_page = NULL; | 863 | rx_queue->buf_page = NULL; |
diff --git a/drivers/net/sfc/selftest.c b/drivers/net/sfc/selftest.c index cbda15946e8f..3b2de9fe7f27 100644 --- a/drivers/net/sfc/selftest.c +++ b/drivers/net/sfc/selftest.c | |||
| @@ -290,7 +290,7 @@ void efx_loopback_rx_packet(struct efx_nic *efx, | |||
| 290 | 290 | ||
| 291 | payload = &state->payload; | 291 | payload = &state->payload; |
| 292 | 292 | ||
| 293 | received = (struct efx_loopback_payload *)(char *) buf_ptr; | 293 | received = (struct efx_loopback_payload *) buf_ptr; |
| 294 | received->ip.saddr = payload->ip.saddr; | 294 | received->ip.saddr = payload->ip.saddr; |
| 295 | received->ip.check = payload->ip.check; | 295 | received->ip.check = payload->ip.check; |
| 296 | 296 | ||
| @@ -424,10 +424,10 @@ static int efx_tx_loopback(struct efx_tx_queue *tx_queue) | |||
| 424 | * interrupt handler. */ | 424 | * interrupt handler. */ |
| 425 | smp_wmb(); | 425 | smp_wmb(); |
| 426 | 426 | ||
| 427 | if (NET_DEV_REGISTERED(efx)) | 427 | if (efx_dev_registered(efx)) |
| 428 | netif_tx_lock_bh(efx->net_dev); | 428 | netif_tx_lock_bh(efx->net_dev); |
| 429 | rc = efx_xmit(efx, tx_queue, skb); | 429 | rc = efx_xmit(efx, tx_queue, skb); |
| 430 | if (NET_DEV_REGISTERED(efx)) | 430 | if (efx_dev_registered(efx)) |
| 431 | netif_tx_unlock_bh(efx->net_dev); | 431 | netif_tx_unlock_bh(efx->net_dev); |
| 432 | 432 | ||
| 433 | if (rc != NETDEV_TX_OK) { | 433 | if (rc != NETDEV_TX_OK) { |
| @@ -453,7 +453,7 @@ static int efx_rx_loopback(struct efx_tx_queue *tx_queue, | |||
| 453 | int tx_done = 0, rx_good, rx_bad; | 453 | int tx_done = 0, rx_good, rx_bad; |
| 454 | int i, rc = 0; | 454 | int i, rc = 0; |
| 455 | 455 | ||
| 456 | if (NET_DEV_REGISTERED(efx)) | 456 | if (efx_dev_registered(efx)) |
| 457 | netif_tx_lock_bh(efx->net_dev); | 457 | netif_tx_lock_bh(efx->net_dev); |
| 458 | 458 | ||
| 459 | /* Count the number of tx completions, and decrement the refcnt. Any | 459 | /* Count the number of tx completions, and decrement the refcnt. Any |
| @@ -465,7 +465,7 @@ static int efx_rx_loopback(struct efx_tx_queue *tx_queue, | |||
| 465 | dev_kfree_skb_any(skb); | 465 | dev_kfree_skb_any(skb); |
| 466 | } | 466 | } |
| 467 | 467 | ||
| 468 | if (NET_DEV_REGISTERED(efx)) | 468 | if (efx_dev_registered(efx)) |
| 469 | netif_tx_unlock_bh(efx->net_dev); | 469 | netif_tx_unlock_bh(efx->net_dev); |
| 470 | 470 | ||
| 471 | /* Check TX completion and received packet counts */ | 471 | /* Check TX completion and received packet counts */ |
| @@ -517,6 +517,8 @@ efx_test_loopback(struct efx_tx_queue *tx_queue, | |||
| 517 | state->packet_count = min(1 << (i << 2), state->packet_count); | 517 | state->packet_count = min(1 << (i << 2), state->packet_count); |
| 518 | state->skbs = kzalloc(sizeof(state->skbs[0]) * | 518 | state->skbs = kzalloc(sizeof(state->skbs[0]) * |
| 519 | state->packet_count, GFP_KERNEL); | 519 | state->packet_count, GFP_KERNEL); |
| 520 | if (!state->skbs) | ||
| 521 | return -ENOMEM; | ||
| 520 | state->flush = 0; | 522 | state->flush = 0; |
| 521 | 523 | ||
| 522 | EFX_LOG(efx, "TX queue %d testing %s loopback with %d " | 524 | EFX_LOG(efx, "TX queue %d testing %s loopback with %d " |
| @@ -700,7 +702,7 @@ int efx_offline_test(struct efx_nic *efx, | |||
| 700 | * "flushing" so all inflight packets are dropped */ | 702 | * "flushing" so all inflight packets are dropped */ |
| 701 | BUG_ON(efx->loopback_selftest); | 703 | BUG_ON(efx->loopback_selftest); |
| 702 | state->flush = 1; | 704 | state->flush = 1; |
| 703 | efx->loopback_selftest = (void *)state; | 705 | efx->loopback_selftest = state; |
| 704 | 706 | ||
| 705 | rc = efx_test_loopbacks(efx, tests, loopback_modes); | 707 | rc = efx_test_loopbacks(efx, tests, loopback_modes); |
| 706 | 708 | ||
diff --git a/drivers/net/sfc/sfe4001.c b/drivers/net/sfc/sfe4001.c index 725d1a539c49..66a0d1442aba 100644 --- a/drivers/net/sfc/sfe4001.c +++ b/drivers/net/sfc/sfe4001.c | |||
| @@ -116,18 +116,18 @@ void sfe4001_poweroff(struct efx_nic *efx) | |||
| 116 | 116 | ||
| 117 | /* Turn off all power rails */ | 117 | /* Turn off all power rails */ |
| 118 | out = 0xff; | 118 | out = 0xff; |
| 119 | (void) efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1); | 119 | efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1); |
| 120 | 120 | ||
| 121 | /* Disable port 1 outputs on IO expander */ | 121 | /* Disable port 1 outputs on IO expander */ |
| 122 | cfg = 0xff; | 122 | cfg = 0xff; |
| 123 | (void) efx_i2c_write(i2c, PCA9539, P1_CONFIG, &cfg, 1); | 123 | efx_i2c_write(i2c, PCA9539, P1_CONFIG, &cfg, 1); |
| 124 | 124 | ||
| 125 | /* Disable port 0 outputs on IO expander */ | 125 | /* Disable port 0 outputs on IO expander */ |
| 126 | cfg = 0xff; | 126 | cfg = 0xff; |
| 127 | (void) efx_i2c_write(i2c, PCA9539, P0_CONFIG, &cfg, 1); | 127 | efx_i2c_write(i2c, PCA9539, P0_CONFIG, &cfg, 1); |
| 128 | 128 | ||
| 129 | /* Clear any over-temperature alert */ | 129 | /* Clear any over-temperature alert */ |
| 130 | (void) efx_i2c_read(i2c, MAX6647, RSL, &in, 1); | 130 | efx_i2c_read(i2c, MAX6647, RSL, &in, 1); |
| 131 | } | 131 | } |
| 132 | 132 | ||
| 133 | /* The P0_EN_3V3X line on SFE4001 boards (from A2 onward) is connected | 133 | /* The P0_EN_3V3X line on SFE4001 boards (from A2 onward) is connected |
| @@ -253,14 +253,14 @@ done: | |||
| 253 | fail3: | 253 | fail3: |
| 254 | /* Turn off all power rails */ | 254 | /* Turn off all power rails */ |
| 255 | out = 0xff; | 255 | out = 0xff; |
| 256 | (void) efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1); | 256 | efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1); |
| 257 | /* Disable port 1 outputs on IO expander */ | 257 | /* Disable port 1 outputs on IO expander */ |
| 258 | out = 0xff; | 258 | out = 0xff; |
| 259 | (void) efx_i2c_write(i2c, PCA9539, P1_CONFIG, &out, 1); | 259 | efx_i2c_write(i2c, PCA9539, P1_CONFIG, &out, 1); |
| 260 | fail2: | 260 | fail2: |
| 261 | /* Disable port 0 outputs on IO expander */ | 261 | /* Disable port 0 outputs on IO expander */ |
| 262 | out = 0xff; | 262 | out = 0xff; |
| 263 | (void) efx_i2c_write(i2c, PCA9539, P0_CONFIG, &out, 1); | 263 | efx_i2c_write(i2c, PCA9539, P0_CONFIG, &out, 1); |
| 264 | fail1: | 264 | fail1: |
| 265 | return rc; | 265 | return rc; |
| 266 | } | 266 | } |
diff --git a/drivers/net/sfc/tenxpress.c b/drivers/net/sfc/tenxpress.c index b1cd6deec01f..c0146061c326 100644 --- a/drivers/net/sfc/tenxpress.c +++ b/drivers/net/sfc/tenxpress.c | |||
| @@ -211,6 +211,8 @@ static int tenxpress_phy_init(struct efx_nic *efx) | |||
| 211 | int rc = 0; | 211 | int rc = 0; |
| 212 | 212 | ||
| 213 | phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL); | 213 | phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL); |
| 214 | if (!phy_data) | ||
| 215 | return -ENOMEM; | ||
| 214 | efx->phy_data = phy_data; | 216 | efx->phy_data = phy_data; |
| 215 | 217 | ||
| 216 | tenxpress_set_state(efx, TENXPRESS_STATUS_NORMAL); | 218 | tenxpress_set_state(efx, TENXPRESS_STATUS_NORMAL); |
| @@ -376,7 +378,7 @@ static void tenxpress_phy_reconfigure(struct efx_nic *efx) | |||
| 376 | * perform a special software reset */ | 378 | * perform a special software reset */ |
| 377 | if ((phy_data->tx_disabled && !efx->tx_disabled) || | 379 | if ((phy_data->tx_disabled && !efx->tx_disabled) || |
| 378 | loop_change) { | 380 | loop_change) { |
| 379 | (void) tenxpress_special_reset(efx); | 381 | tenxpress_special_reset(efx); |
| 380 | falcon_reset_xaui(efx); | 382 | falcon_reset_xaui(efx); |
| 381 | } | 383 | } |
| 382 | 384 | ||
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c index 9b436f5b4888..5cdd082ab8f6 100644 --- a/drivers/net/sfc/tx.c +++ b/drivers/net/sfc/tx.c | |||
| @@ -387,7 +387,7 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) | |||
| 387 | if (unlikely(tx_queue->stopped)) { | 387 | if (unlikely(tx_queue->stopped)) { |
| 388 | fill_level = tx_queue->insert_count - tx_queue->read_count; | 388 | fill_level = tx_queue->insert_count - tx_queue->read_count; |
| 389 | if (fill_level < EFX_NETDEV_TX_THRESHOLD(tx_queue)) { | 389 | if (fill_level < EFX_NETDEV_TX_THRESHOLD(tx_queue)) { |
| 390 | EFX_BUG_ON_PARANOID(!NET_DEV_REGISTERED(efx)); | 390 | EFX_BUG_ON_PARANOID(!efx_dev_registered(efx)); |
| 391 | 391 | ||
| 392 | /* Do this under netif_tx_lock(), to avoid racing | 392 | /* Do this under netif_tx_lock(), to avoid racing |
| 393 | * with efx_xmit(). */ | 393 | * with efx_xmit(). */ |
| @@ -639,11 +639,12 @@ static void efx_tsoh_block_free(struct efx_tx_queue *tx_queue, | |||
| 639 | base_dma = tsoh->dma_addr & PAGE_MASK; | 639 | base_dma = tsoh->dma_addr & PAGE_MASK; |
| 640 | 640 | ||
| 641 | p = &tx_queue->tso_headers_free; | 641 | p = &tx_queue->tso_headers_free; |
| 642 | while (*p != NULL) | 642 | while (*p != NULL) { |
| 643 | if (((unsigned long)*p & PAGE_MASK) == base_kva) | 643 | if (((unsigned long)*p & PAGE_MASK) == base_kva) |
| 644 | *p = (*p)->next; | 644 | *p = (*p)->next; |
| 645 | else | 645 | else |
| 646 | p = &(*p)->next; | 646 | p = &(*p)->next; |
| 647 | } | ||
| 647 | 648 | ||
| 648 | pci_free_consistent(pci_dev, PAGE_SIZE, (void *)base_kva, base_dma); | 649 | pci_free_consistent(pci_dev, PAGE_SIZE, (void *)base_kva, base_dma); |
| 649 | } | 650 | } |
| @@ -939,9 +940,10 @@ static inline int tso_start_new_packet(struct efx_tx_queue *tx_queue, | |||
| 939 | 940 | ||
| 940 | /* Allocate a DMA-mapped header buffer. */ | 941 | /* Allocate a DMA-mapped header buffer. */ |
| 941 | if (likely(TSOH_SIZE(st->p.header_length) <= TSOH_STD_SIZE)) { | 942 | if (likely(TSOH_SIZE(st->p.header_length) <= TSOH_STD_SIZE)) { |
| 942 | if (tx_queue->tso_headers_free == NULL) | 943 | if (tx_queue->tso_headers_free == NULL) { |
| 943 | if (efx_tsoh_block_alloc(tx_queue)) | 944 | if (efx_tsoh_block_alloc(tx_queue)) |
| 944 | return -1; | 945 | return -1; |
| 946 | } | ||
| 945 | EFX_BUG_ON_PARANOID(!tx_queue->tso_headers_free); | 947 | EFX_BUG_ON_PARANOID(!tx_queue->tso_headers_free); |
| 946 | tsoh = tx_queue->tso_headers_free; | 948 | tsoh = tx_queue->tso_headers_free; |
| 947 | tx_queue->tso_headers_free = tsoh->next; | 949 | tx_queue->tso_headers_free = tsoh->next; |
| @@ -1106,9 +1108,10 @@ static void efx_fini_tso(struct efx_tx_queue *tx_queue) | |||
| 1106 | { | 1108 | { |
| 1107 | unsigned i; | 1109 | unsigned i; |
| 1108 | 1110 | ||
| 1109 | if (tx_queue->buffer) | 1111 | if (tx_queue->buffer) { |
| 1110 | for (i = 0; i <= tx_queue->efx->type->txd_ring_mask; ++i) | 1112 | for (i = 0; i <= tx_queue->efx->type->txd_ring_mask; ++i) |
| 1111 | efx_tsoh_free(tx_queue, &tx_queue->buffer[i]); | 1113 | efx_tsoh_free(tx_queue, &tx_queue->buffer[i]); |
| 1114 | } | ||
| 1112 | 1115 | ||
| 1113 | while (tx_queue->tso_headers_free != NULL) | 1116 | while (tx_queue->tso_headers_free != NULL) |
| 1114 | efx_tsoh_block_free(tx_queue, tx_queue->tso_headers_free, | 1117 | efx_tsoh_block_free(tx_queue, tx_queue->tso_headers_free, |
diff --git a/drivers/net/sfc/workarounds.h b/drivers/net/sfc/workarounds.h index dca62f190198..35ab19c27f8d 100644 --- a/drivers/net/sfc/workarounds.h +++ b/drivers/net/sfc/workarounds.h | |||
| @@ -16,7 +16,7 @@ | |||
| 16 | */ | 16 | */ |
| 17 | 17 | ||
| 18 | #define EFX_WORKAROUND_ALWAYS(efx) 1 | 18 | #define EFX_WORKAROUND_ALWAYS(efx) 1 |
| 19 | #define EFX_WORKAROUND_FALCON_A(efx) (FALCON_REV(efx) <= FALCON_REV_A1) | 19 | #define EFX_WORKAROUND_FALCON_A(efx) (falcon_rev(efx) <= FALCON_REV_A1) |
| 20 | 20 | ||
| 21 | /* XAUI resets if link not detected */ | 21 | /* XAUI resets if link not detected */ |
| 22 | #define EFX_WORKAROUND_5147 EFX_WORKAROUND_ALWAYS | 22 | #define EFX_WORKAROUND_5147 EFX_WORKAROUND_ALWAYS |
diff --git a/drivers/net/sfc/xfp_phy.c b/drivers/net/sfc/xfp_phy.c index 3b9f9ddbc372..f3684ad28887 100644 --- a/drivers/net/sfc/xfp_phy.c +++ b/drivers/net/sfc/xfp_phy.c | |||
| @@ -85,7 +85,9 @@ static int xfp_phy_init(struct efx_nic *efx) | |||
| 85 | int rc; | 85 | int rc; |
| 86 | 86 | ||
| 87 | phy_data = kzalloc(sizeof(struct xfp_phy_data), GFP_KERNEL); | 87 | phy_data = kzalloc(sizeof(struct xfp_phy_data), GFP_KERNEL); |
| 88 | efx->phy_data = (void *) phy_data; | 88 | if (!phy_data) |
| 89 | return -ENOMEM; | ||
| 90 | efx->phy_data = phy_data; | ||
| 89 | 91 | ||
| 90 | EFX_INFO(efx, "XFP: PHY ID reg %x (OUI %x model %x revision" | 92 | EFX_INFO(efx, "XFP: PHY ID reg %x (OUI %x model %x revision" |
| 91 | " %x)\n", devid, MDIO_ID_OUI(devid), MDIO_ID_MODEL(devid), | 93 | " %x)\n", devid, MDIO_ID_OUI(devid), MDIO_ID_MODEL(devid), |
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c index f226bcac7d17..3bb60530d4d7 100644 --- a/drivers/net/sky2.c +++ b/drivers/net/sky2.c | |||
| @@ -1159,17 +1159,9 @@ static int sky2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
| 1159 | } | 1159 | } |
| 1160 | 1160 | ||
| 1161 | #ifdef SKY2_VLAN_TAG_USED | 1161 | #ifdef SKY2_VLAN_TAG_USED |
| 1162 | static void sky2_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) | 1162 | static void sky2_set_vlan_mode(struct sky2_hw *hw, u16 port, bool onoff) |
| 1163 | { | 1163 | { |
| 1164 | struct sky2_port *sky2 = netdev_priv(dev); | 1164 | if (onoff) { |
| 1165 | struct sky2_hw *hw = sky2->hw; | ||
| 1166 | u16 port = sky2->port; | ||
| 1167 | |||
| 1168 | netif_tx_lock_bh(dev); | ||
| 1169 | napi_disable(&hw->napi); | ||
| 1170 | |||
| 1171 | sky2->vlgrp = grp; | ||
| 1172 | if (grp) { | ||
| 1173 | sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), | 1165 | sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), |
| 1174 | RX_VLAN_STRIP_ON); | 1166 | RX_VLAN_STRIP_ON); |
| 1175 | sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), | 1167 | sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), |
| @@ -1180,6 +1172,19 @@ static void sky2_vlan_rx_register(struct net_device *dev, struct vlan_group *grp | |||
| 1180 | sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), | 1172 | sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), |
| 1181 | TX_VLAN_TAG_OFF); | 1173 | TX_VLAN_TAG_OFF); |
| 1182 | } | 1174 | } |
| 1175 | } | ||
| 1176 | |||
| 1177 | static void sky2_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) | ||
| 1178 | { | ||
| 1179 | struct sky2_port *sky2 = netdev_priv(dev); | ||
| 1180 | struct sky2_hw *hw = sky2->hw; | ||
| 1181 | u16 port = sky2->port; | ||
| 1182 | |||
| 1183 | netif_tx_lock_bh(dev); | ||
| 1184 | napi_disable(&hw->napi); | ||
| 1185 | |||
| 1186 | sky2->vlgrp = grp; | ||
| 1187 | sky2_set_vlan_mode(hw, port, grp != NULL); | ||
| 1183 | 1188 | ||
| 1184 | sky2_read32(hw, B0_Y2_SP_LISR); | 1189 | sky2_read32(hw, B0_Y2_SP_LISR); |
| 1185 | napi_enable(&hw->napi); | 1190 | napi_enable(&hw->napi); |
| @@ -1418,6 +1423,10 @@ static int sky2_up(struct net_device *dev) | |||
| 1418 | sky2_prefetch_init(hw, txqaddr[port], sky2->tx_le_map, | 1423 | sky2_prefetch_init(hw, txqaddr[port], sky2->tx_le_map, |
| 1419 | TX_RING_SIZE - 1); | 1424 | TX_RING_SIZE - 1); |
| 1420 | 1425 | ||
| 1426 | #ifdef SKY2_VLAN_TAG_USED | ||
| 1427 | sky2_set_vlan_mode(hw, port, sky2->vlgrp != NULL); | ||
| 1428 | #endif | ||
| 1429 | |||
| 1421 | err = sky2_rx_start(sky2); | 1430 | err = sky2_rx_start(sky2); |
| 1422 | if (err) | 1431 | if (err) |
| 1423 | goto err_out; | 1432 | goto err_out; |
diff --git a/drivers/net/tokenring/3c359.h b/drivers/net/tokenring/3c359.h index b880cba0f6fd..74cf8e1a181b 100644 --- a/drivers/net/tokenring/3c359.h +++ b/drivers/net/tokenring/3c359.h | |||
| @@ -264,7 +264,7 @@ struct xl_private { | |||
| 264 | u16 asb; | 264 | u16 asb; |
| 265 | 265 | ||
| 266 | u8 __iomem *xl_mmio; | 266 | u8 __iomem *xl_mmio; |
| 267 | char *xl_card_name; | 267 | const char *xl_card_name; |
| 268 | struct pci_dev *pdev ; | 268 | struct pci_dev *pdev ; |
| 269 | 269 | ||
| 270 | spinlock_t xl_lock ; | 270 | spinlock_t xl_lock ; |
diff --git a/drivers/net/tokenring/olympic.h b/drivers/net/tokenring/olympic.h index c91956310fb2..10fbba08978f 100644 --- a/drivers/net/tokenring/olympic.h +++ b/drivers/net/tokenring/olympic.h | |||
| @@ -254,7 +254,7 @@ struct olympic_private { | |||
| 254 | u8 __iomem *olympic_mmio; | 254 | u8 __iomem *olympic_mmio; |
| 255 | u8 __iomem *olympic_lap; | 255 | u8 __iomem *olympic_lap; |
| 256 | struct pci_dev *pdev ; | 256 | struct pci_dev *pdev ; |
| 257 | char *olympic_card_name ; | 257 | const char *olympic_card_name; |
| 258 | 258 | ||
| 259 | spinlock_t olympic_lock ; | 259 | spinlock_t olympic_lock ; |
| 260 | 260 | ||
diff --git a/drivers/net/tulip/uli526x.c b/drivers/net/tulip/uli526x.c index 2511ca7a12aa..e9e628621639 100644 --- a/drivers/net/tulip/uli526x.c +++ b/drivers/net/tulip/uli526x.c | |||
| @@ -225,6 +225,9 @@ static void uli526x_set_filter_mode(struct net_device *); | |||
| 225 | static const struct ethtool_ops netdev_ethtool_ops; | 225 | static const struct ethtool_ops netdev_ethtool_ops; |
| 226 | static u16 read_srom_word(long, int); | 226 | static u16 read_srom_word(long, int); |
| 227 | static irqreturn_t uli526x_interrupt(int, void *); | 227 | static irqreturn_t uli526x_interrupt(int, void *); |
| 228 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 229 | static void uli526x_poll(struct net_device *dev); | ||
| 230 | #endif | ||
| 228 | static void uli526x_descriptor_init(struct uli526x_board_info *, unsigned long); | 231 | static void uli526x_descriptor_init(struct uli526x_board_info *, unsigned long); |
| 229 | static void allocate_rx_buffer(struct uli526x_board_info *); | 232 | static void allocate_rx_buffer(struct uli526x_board_info *); |
| 230 | static void update_cr6(u32, unsigned long); | 233 | static void update_cr6(u32, unsigned long); |
| @@ -339,6 +342,9 @@ static int __devinit uli526x_init_one (struct pci_dev *pdev, | |||
| 339 | dev->get_stats = &uli526x_get_stats; | 342 | dev->get_stats = &uli526x_get_stats; |
| 340 | dev->set_multicast_list = &uli526x_set_filter_mode; | 343 | dev->set_multicast_list = &uli526x_set_filter_mode; |
| 341 | dev->ethtool_ops = &netdev_ethtool_ops; | 344 | dev->ethtool_ops = &netdev_ethtool_ops; |
| 345 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 346 | dev->poll_controller = &uli526x_poll; | ||
| 347 | #endif | ||
| 342 | spin_lock_init(&db->lock); | 348 | spin_lock_init(&db->lock); |
| 343 | 349 | ||
| 344 | 350 | ||
| @@ -681,8 +687,9 @@ static irqreturn_t uli526x_interrupt(int irq, void *dev_id) | |||
| 681 | db->cr5_data = inl(ioaddr + DCR5); | 687 | db->cr5_data = inl(ioaddr + DCR5); |
| 682 | outl(db->cr5_data, ioaddr + DCR5); | 688 | outl(db->cr5_data, ioaddr + DCR5); |
| 683 | if ( !(db->cr5_data & 0x180c1) ) { | 689 | if ( !(db->cr5_data & 0x180c1) ) { |
| 684 | spin_unlock_irqrestore(&db->lock, flags); | 690 | /* Restore CR7 to enable interrupt mask */ |
| 685 | outl(db->cr7_data, ioaddr + DCR7); | 691 | outl(db->cr7_data, ioaddr + DCR7); |
| 692 | spin_unlock_irqrestore(&db->lock, flags); | ||
| 686 | return IRQ_HANDLED; | 693 | return IRQ_HANDLED; |
| 687 | } | 694 | } |
| 688 | 695 | ||
| @@ -715,6 +722,13 @@ static irqreturn_t uli526x_interrupt(int irq, void *dev_id) | |||
| 715 | return IRQ_HANDLED; | 722 | return IRQ_HANDLED; |
| 716 | } | 723 | } |
| 717 | 724 | ||
| 725 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 726 | static void uli526x_poll(struct net_device *dev) | ||
| 727 | { | ||
| 728 | /* ISR grabs the irqsave lock, so this should be safe */ | ||
| 729 | uli526x_interrupt(dev->irq, dev); | ||
| 730 | } | ||
| 731 | #endif | ||
| 718 | 732 | ||
| 719 | /* | 733 | /* |
| 720 | * Free TX resource after TX complete | 734 | * Free TX resource after TX complete |
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c index ca0bdac07a78..fb0b918e5ccb 100644 --- a/drivers/net/ucc_geth.c +++ b/drivers/net/ucc_geth.c | |||
| @@ -237,7 +237,7 @@ static struct sk_buff *get_new_skb(struct ucc_geth_private *ugeth, | |||
| 237 | skb->dev = ugeth->dev; | 237 | skb->dev = ugeth->dev; |
| 238 | 238 | ||
| 239 | out_be32(&((struct qe_bd __iomem *)bd)->buf, | 239 | out_be32(&((struct qe_bd __iomem *)bd)->buf, |
| 240 | dma_map_single(NULL, | 240 | dma_map_single(&ugeth->dev->dev, |
| 241 | skb->data, | 241 | skb->data, |
| 242 | ugeth->ug_info->uf_info.max_rx_buf_length + | 242 | ugeth->ug_info->uf_info.max_rx_buf_length + |
| 243 | UCC_GETH_RX_DATA_BUF_ALIGNMENT, | 243 | UCC_GETH_RX_DATA_BUF_ALIGNMENT, |
| @@ -2158,7 +2158,7 @@ static void ucc_geth_memclean(struct ucc_geth_private *ugeth) | |||
| 2158 | continue; | 2158 | continue; |
| 2159 | for (j = 0; j < ugeth->ug_info->bdRingLenTx[i]; j++) { | 2159 | for (j = 0; j < ugeth->ug_info->bdRingLenTx[i]; j++) { |
| 2160 | if (ugeth->tx_skbuff[i][j]) { | 2160 | if (ugeth->tx_skbuff[i][j]) { |
| 2161 | dma_unmap_single(NULL, | 2161 | dma_unmap_single(&ugeth->dev->dev, |
| 2162 | in_be32(&((struct qe_bd __iomem *)bd)->buf), | 2162 | in_be32(&((struct qe_bd __iomem *)bd)->buf), |
| 2163 | (in_be32((u32 __iomem *)bd) & | 2163 | (in_be32((u32 __iomem *)bd) & |
| 2164 | BD_LENGTH_MASK), | 2164 | BD_LENGTH_MASK), |
| @@ -2186,7 +2186,7 @@ static void ucc_geth_memclean(struct ucc_geth_private *ugeth) | |||
| 2186 | bd = ugeth->p_rx_bd_ring[i]; | 2186 | bd = ugeth->p_rx_bd_ring[i]; |
| 2187 | for (j = 0; j < ugeth->ug_info->bdRingLenRx[i]; j++) { | 2187 | for (j = 0; j < ugeth->ug_info->bdRingLenRx[i]; j++) { |
| 2188 | if (ugeth->rx_skbuff[i][j]) { | 2188 | if (ugeth->rx_skbuff[i][j]) { |
| 2189 | dma_unmap_single(NULL, | 2189 | dma_unmap_single(&ugeth->dev->dev, |
| 2190 | in_be32(&((struct qe_bd __iomem *)bd)->buf), | 2190 | in_be32(&((struct qe_bd __iomem *)bd)->buf), |
| 2191 | ugeth->ug_info-> | 2191 | ugeth->ug_info-> |
| 2192 | uf_info.max_rx_buf_length + | 2192 | uf_info.max_rx_buf_length + |
| @@ -3406,7 +3406,8 @@ static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 3406 | 3406 | ||
| 3407 | /* set up the buffer descriptor */ | 3407 | /* set up the buffer descriptor */ |
| 3408 | out_be32(&((struct qe_bd __iomem *)bd)->buf, | 3408 | out_be32(&((struct qe_bd __iomem *)bd)->buf, |
| 3409 | dma_map_single(NULL, skb->data, skb->len, DMA_TO_DEVICE)); | 3409 | dma_map_single(&ugeth->dev->dev, skb->data, |
| 3410 | skb->len, DMA_TO_DEVICE)); | ||
| 3410 | 3411 | ||
| 3411 | /* printk(KERN_DEBUG"skb->data is 0x%x\n",skb->data); */ | 3412 | /* printk(KERN_DEBUG"skb->data is 0x%x\n",skb->data); */ |
| 3412 | 3413 | ||
diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c index dc6f097062df..37ecf845edfe 100644 --- a/drivers/net/usb/asix.c +++ b/drivers/net/usb/asix.c | |||
| @@ -1440,6 +1440,10 @@ static const struct usb_device_id products [] = { | |||
| 1440 | // Belkin F5D5055 | 1440 | // Belkin F5D5055 |
| 1441 | USB_DEVICE(0x050d, 0x5055), | 1441 | USB_DEVICE(0x050d, 0x5055), |
| 1442 | .driver_info = (unsigned long) &ax88178_info, | 1442 | .driver_info = (unsigned long) &ax88178_info, |
| 1443 | }, { | ||
| 1444 | // Apple USB Ethernet Adapter | ||
| 1445 | USB_DEVICE(0x05ac, 0x1402), | ||
| 1446 | .driver_info = (unsigned long) &ax88772_info, | ||
| 1443 | }, | 1447 | }, |
| 1444 | { }, // END | 1448 | { }, // END |
| 1445 | }; | 1449 | }; |
diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c index 21a7785cb8b6..e1177cca8a76 100644 --- a/drivers/net/usb/rndis_host.c +++ b/drivers/net/usb/rndis_host.c | |||
| @@ -194,7 +194,7 @@ int rndis_command(struct usbnet *dev, struct rndis_msg_hdr *buf) | |||
| 194 | dev_dbg(&info->control->dev, | 194 | dev_dbg(&info->control->dev, |
| 195 | "rndis response error, code %d\n", retval); | 195 | "rndis response error, code %d\n", retval); |
| 196 | } | 196 | } |
| 197 | msleep(2); | 197 | msleep(20); |
| 198 | } | 198 | } |
| 199 | dev_dbg(&info->control->dev, "rndis response timeout\n"); | 199 | dev_dbg(&info->control->dev, "rndis response timeout\n"); |
| 200 | return -ETIMEDOUT; | 200 | return -ETIMEDOUT; |
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index f926b5ab3d09..fe7cdf2a2a23 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c | |||
| @@ -470,8 +470,7 @@ static void virtnet_remove(struct virtio_device *vdev) | |||
| 470 | kfree_skb(skb); | 470 | kfree_skb(skb); |
| 471 | vi->num--; | 471 | vi->num--; |
| 472 | } | 472 | } |
| 473 | while ((skb = __skb_dequeue(&vi->send)) != NULL) | 473 | __skb_queue_purge(&vi->send); |
| 474 | kfree_skb(skb); | ||
| 475 | 474 | ||
| 476 | BUG_ON(vi->num != 0); | 475 | BUG_ON(vi->num != 0); |
| 477 | 476 | ||
diff --git a/drivers/net/wan/hdlc.c b/drivers/net/wan/hdlc.c index 9a83c9d5b8cf..7f984895b0d5 100644 --- a/drivers/net/wan/hdlc.c +++ b/drivers/net/wan/hdlc.c | |||
| @@ -43,8 +43,7 @@ static const char* version = "HDLC support module revision 1.22"; | |||
| 43 | 43 | ||
| 44 | #undef DEBUG_LINK | 44 | #undef DEBUG_LINK |
| 45 | 45 | ||
| 46 | static struct hdlc_proto *first_proto = NULL; | 46 | static struct hdlc_proto *first_proto; |
| 47 | |||
| 48 | 47 | ||
| 49 | static int hdlc_change_mtu(struct net_device *dev, int new_mtu) | 48 | static int hdlc_change_mtu(struct net_device *dev, int new_mtu) |
| 50 | { | 49 | { |
| @@ -314,21 +313,25 @@ void detach_hdlc_protocol(struct net_device *dev) | |||
| 314 | 313 | ||
| 315 | void register_hdlc_protocol(struct hdlc_proto *proto) | 314 | void register_hdlc_protocol(struct hdlc_proto *proto) |
| 316 | { | 315 | { |
| 316 | rtnl_lock(); | ||
| 317 | proto->next = first_proto; | 317 | proto->next = first_proto; |
| 318 | first_proto = proto; | 318 | first_proto = proto; |
| 319 | rtnl_unlock(); | ||
| 319 | } | 320 | } |
| 320 | 321 | ||
| 321 | 322 | ||
| 322 | void unregister_hdlc_protocol(struct hdlc_proto *proto) | 323 | void unregister_hdlc_protocol(struct hdlc_proto *proto) |
| 323 | { | 324 | { |
| 324 | struct hdlc_proto **p = &first_proto; | 325 | struct hdlc_proto **p; |
| 325 | while (*p) { | 326 | |
| 326 | if (*p == proto) { | 327 | rtnl_lock(); |
| 327 | *p = proto->next; | 328 | p = &first_proto; |
| 328 | return; | 329 | while (*p != proto) { |
| 329 | } | 330 | BUG_ON(!*p); |
| 330 | p = &((*p)->next); | 331 | p = &((*p)->next); |
| 331 | } | 332 | } |
| 333 | *p = proto->next; | ||
| 334 | rtnl_unlock(); | ||
| 332 | } | 335 | } |
| 333 | 336 | ||
| 334 | 337 | ||
diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c index 7133c688cf20..762d21c1c703 100644 --- a/drivers/net/wan/hdlc_cisco.c +++ b/drivers/net/wan/hdlc_cisco.c | |||
| @@ -56,6 +56,7 @@ struct cisco_state { | |||
| 56 | cisco_proto settings; | 56 | cisco_proto settings; |
| 57 | 57 | ||
| 58 | struct timer_list timer; | 58 | struct timer_list timer; |
| 59 | spinlock_t lock; | ||
| 59 | unsigned long last_poll; | 60 | unsigned long last_poll; |
| 60 | int up; | 61 | int up; |
| 61 | int request_sent; | 62 | int request_sent; |
| @@ -158,6 +159,7 @@ static int cisco_rx(struct sk_buff *skb) | |||
| 158 | { | 159 | { |
| 159 | struct net_device *dev = skb->dev; | 160 | struct net_device *dev = skb->dev; |
| 160 | hdlc_device *hdlc = dev_to_hdlc(dev); | 161 | hdlc_device *hdlc = dev_to_hdlc(dev); |
| 162 | struct cisco_state *st = state(hdlc); | ||
| 161 | struct hdlc_header *data = (struct hdlc_header*)skb->data; | 163 | struct hdlc_header *data = (struct hdlc_header*)skb->data; |
| 162 | struct cisco_packet *cisco_data; | 164 | struct cisco_packet *cisco_data; |
| 163 | struct in_device *in_dev; | 165 | struct in_device *in_dev; |
| @@ -220,11 +222,12 @@ static int cisco_rx(struct sk_buff *skb) | |||
| 220 | goto rx_error; | 222 | goto rx_error; |
| 221 | 223 | ||
| 222 | case CISCO_KEEPALIVE_REQ: | 224 | case CISCO_KEEPALIVE_REQ: |
| 223 | state(hdlc)->rxseq = ntohl(cisco_data->par1); | 225 | spin_lock(&st->lock); |
| 224 | if (state(hdlc)->request_sent && | 226 | st->rxseq = ntohl(cisco_data->par1); |
| 225 | ntohl(cisco_data->par2) == state(hdlc)->txseq) { | 227 | if (st->request_sent && |
| 226 | state(hdlc)->last_poll = jiffies; | 228 | ntohl(cisco_data->par2) == st->txseq) { |
| 227 | if (!state(hdlc)->up) { | 229 | st->last_poll = jiffies; |
| 230 | if (!st->up) { | ||
| 228 | u32 sec, min, hrs, days; | 231 | u32 sec, min, hrs, days; |
| 229 | sec = ntohl(cisco_data->time) / 1000; | 232 | sec = ntohl(cisco_data->time) / 1000; |
| 230 | min = sec / 60; sec -= min * 60; | 233 | min = sec / 60; sec -= min * 60; |
| @@ -232,12 +235,12 @@ static int cisco_rx(struct sk_buff *skb) | |||
| 232 | days = hrs / 24; hrs -= days * 24; | 235 | days = hrs / 24; hrs -= days * 24; |
| 233 | printk(KERN_INFO "%s: Link up (peer " | 236 | printk(KERN_INFO "%s: Link up (peer " |
| 234 | "uptime %ud%uh%um%us)\n", | 237 | "uptime %ud%uh%um%us)\n", |
| 235 | dev->name, days, hrs, | 238 | dev->name, days, hrs, min, sec); |
| 236 | min, sec); | ||
| 237 | netif_dormant_off(dev); | 239 | netif_dormant_off(dev); |
| 238 | state(hdlc)->up = 1; | 240 | st->up = 1; |
| 239 | } | 241 | } |
| 240 | } | 242 | } |
| 243 | spin_unlock(&st->lock); | ||
| 241 | 244 | ||
| 242 | dev_kfree_skb_any(skb); | 245 | dev_kfree_skb_any(skb); |
| 243 | return NET_RX_SUCCESS; | 246 | return NET_RX_SUCCESS; |
| @@ -261,24 +264,25 @@ static void cisco_timer(unsigned long arg) | |||
| 261 | { | 264 | { |
| 262 | struct net_device *dev = (struct net_device *)arg; | 265 | struct net_device *dev = (struct net_device *)arg; |
| 263 | hdlc_device *hdlc = dev_to_hdlc(dev); | 266 | hdlc_device *hdlc = dev_to_hdlc(dev); |
| 267 | struct cisco_state *st = state(hdlc); | ||
| 264 | 268 | ||
| 265 | if (state(hdlc)->up && | 269 | spin_lock(&st->lock); |
| 266 | time_after(jiffies, state(hdlc)->last_poll + | 270 | if (st->up && |
| 267 | state(hdlc)->settings.timeout * HZ)) { | 271 | time_after(jiffies, st->last_poll + st->settings.timeout * HZ)) { |
| 268 | state(hdlc)->up = 0; | 272 | st->up = 0; |
| 269 | printk(KERN_INFO "%s: Link down\n", dev->name); | 273 | printk(KERN_INFO "%s: Link down\n", dev->name); |
| 270 | netif_dormant_on(dev); | 274 | netif_dormant_on(dev); |
| 271 | } | 275 | } |
| 272 | 276 | ||
| 273 | cisco_keepalive_send(dev, CISCO_KEEPALIVE_REQ, | 277 | cisco_keepalive_send(dev, CISCO_KEEPALIVE_REQ, htonl(++st->txseq), |
| 274 | htonl(++state(hdlc)->txseq), | 278 | htonl(st->rxseq)); |
| 275 | htonl(state(hdlc)->rxseq)); | 279 | st->request_sent = 1; |
| 276 | state(hdlc)->request_sent = 1; | 280 | spin_unlock(&st->lock); |
| 277 | state(hdlc)->timer.expires = jiffies + | 281 | |
| 278 | state(hdlc)->settings.interval * HZ; | 282 | st->timer.expires = jiffies + st->settings.interval * HZ; |
| 279 | state(hdlc)->timer.function = cisco_timer; | 283 | st->timer.function = cisco_timer; |
| 280 | state(hdlc)->timer.data = arg; | 284 | st->timer.data = arg; |
| 281 | add_timer(&state(hdlc)->timer); | 285 | add_timer(&st->timer); |
| 282 | } | 286 | } |
| 283 | 287 | ||
| 284 | 288 | ||
| @@ -286,15 +290,20 @@ static void cisco_timer(unsigned long arg) | |||
| 286 | static void cisco_start(struct net_device *dev) | 290 | static void cisco_start(struct net_device *dev) |
| 287 | { | 291 | { |
| 288 | hdlc_device *hdlc = dev_to_hdlc(dev); | 292 | hdlc_device *hdlc = dev_to_hdlc(dev); |
| 289 | state(hdlc)->up = 0; | 293 | struct cisco_state *st = state(hdlc); |
| 290 | state(hdlc)->request_sent = 0; | 294 | unsigned long flags; |
| 291 | state(hdlc)->txseq = state(hdlc)->rxseq = 0; | 295 | |
| 292 | 296 | spin_lock_irqsave(&st->lock, flags); | |
| 293 | init_timer(&state(hdlc)->timer); | 297 | st->up = 0; |
| 294 | state(hdlc)->timer.expires = jiffies + HZ; /*First poll after 1s*/ | 298 | st->request_sent = 0; |
| 295 | state(hdlc)->timer.function = cisco_timer; | 299 | st->txseq = st->rxseq = 0; |
| 296 | state(hdlc)->timer.data = (unsigned long)dev; | 300 | spin_unlock_irqrestore(&st->lock, flags); |
| 297 | add_timer(&state(hdlc)->timer); | 301 | |
| 302 | init_timer(&st->timer); | ||
| 303 | st->timer.expires = jiffies + HZ; /* First poll after 1 s */ | ||
| 304 | st->timer.function = cisco_timer; | ||
| 305 | st->timer.data = (unsigned long)dev; | ||
| 306 | add_timer(&st->timer); | ||
| 298 | } | 307 | } |
| 299 | 308 | ||
| 300 | 309 | ||
| @@ -302,10 +311,16 @@ static void cisco_start(struct net_device *dev) | |||
| 302 | static void cisco_stop(struct net_device *dev) | 311 | static void cisco_stop(struct net_device *dev) |
| 303 | { | 312 | { |
| 304 | hdlc_device *hdlc = dev_to_hdlc(dev); | 313 | hdlc_device *hdlc = dev_to_hdlc(dev); |
| 305 | del_timer_sync(&state(hdlc)->timer); | 314 | struct cisco_state *st = state(hdlc); |
| 315 | unsigned long flags; | ||
| 316 | |||
| 317 | del_timer_sync(&st->timer); | ||
| 318 | |||
| 319 | spin_lock_irqsave(&st->lock, flags); | ||
| 306 | netif_dormant_on(dev); | 320 | netif_dormant_on(dev); |
| 307 | state(hdlc)->up = 0; | 321 | st->up = 0; |
| 308 | state(hdlc)->request_sent = 0; | 322 | st->request_sent = 0; |
| 323 | spin_unlock_irqrestore(&st->lock, flags); | ||
| 309 | } | 324 | } |
| 310 | 325 | ||
| 311 | 326 | ||
| @@ -367,6 +382,7 @@ static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr) | |||
| 367 | return result; | 382 | return result; |
| 368 | 383 | ||
| 369 | memcpy(&state(hdlc)->settings, &new_settings, size); | 384 | memcpy(&state(hdlc)->settings, &new_settings, size); |
| 385 | spin_lock_init(&state(hdlc)->lock); | ||
| 370 | dev->hard_start_xmit = hdlc->xmit; | 386 | dev->hard_start_xmit = hdlc->xmit; |
| 371 | dev->header_ops = &cisco_header_ops; | 387 | dev->header_ops = &cisco_header_ops; |
| 372 | dev->type = ARPHRD_CISCO; | 388 | dev->type = ARPHRD_CISCO; |
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 8bddff150c70..d26f69b0184f 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c | |||
| @@ -946,8 +946,7 @@ err: | |||
| 946 | work_done++; | 946 | work_done++; |
| 947 | } | 947 | } |
| 948 | 948 | ||
| 949 | while ((skb = __skb_dequeue(&errq))) | 949 | __skb_queue_purge(&errq); |
| 950 | kfree_skb(skb); | ||
| 951 | 950 | ||
| 952 | work_done -= handle_incoming_queue(dev, &rxq); | 951 | work_done -= handle_incoming_queue(dev, &rxq); |
| 953 | 952 | ||
| @@ -1079,8 +1078,7 @@ static void xennet_release_rx_bufs(struct netfront_info *np) | |||
| 1079 | } | 1078 | } |
| 1080 | } | 1079 | } |
| 1081 | 1080 | ||
| 1082 | while ((skb = __skb_dequeue(&free_list)) != NULL) | 1081 | __skb_queue_purge(&free_list); |
| 1083 | dev_kfree_skb(skb); | ||
| 1084 | 1082 | ||
| 1085 | spin_unlock_bh(&np->rx_lock); | 1083 | spin_unlock_bh(&np->rx_lock); |
| 1086 | } | 1084 | } |
