diff options
author | Stephen Hemminger <shemminger@linux-foundation.org> | 2007-10-03 19:41:36 -0400 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2007-10-10 19:47:45 -0400 |
commit | bea3348eef27e6044b6161fd04c3152215f96411 (patch) | |
tree | f0990b263e5ce42505d290a4c346fe990bcd4c33 /drivers/net/ibmveth.c | |
parent | dde4e47e8fe333a5649a3fa0e7db1fa7c08d6158 (diff) |
[NET]: Make NAPI polling independent of struct net_device objects.
Several devices have multiple independant RX queues per net
device, and some have a single interrupt doorbell for several
queues.
In either case, it's easier to support layouts like that if the
structure representing the poll is independant from the net
device itself.
The signature of the ->poll() call back goes from:
int foo_poll(struct net_device *dev, int *budget)
to
int foo_poll(struct napi_struct *napi, int budget)
The caller is returned the number of RX packets processed (or
the number of "NAPI credits" consumed if you want to get
abstract). The callee no longer messes around bumping
dev->quota, *budget, etc. because that is all handled in the
caller upon return.
The napi_struct is to be embedded in the device driver private data
structures.
Furthermore, it is the driver's responsibility to disable all NAPI
instances in it's ->stop() device close handler. Since the
napi_struct is privatized into the driver's private data structures,
only the driver knows how to get at all of the napi_struct instances
it may have per-device.
With lots of help and suggestions from Rusty Russell, Roland Dreier,
Michael Chan, Jeff Garzik, and Jamal Hadi Salim.
Bug fixes from Thomas Graf, Roland Dreier, Peter Zijlstra,
Joseph Fannin, Scott Wood, Hans J. Koch, and Michael Chan.
[ Ported to current tree and all drivers converted. Integrated
Stephen's follow-on kerneldoc additions, and restored poll_list
handling to the old style to fix mutual exclusion issues. -DaveM ]
Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ibmveth.c')
-rw-r--r-- | drivers/net/ibmveth.c | 117 |
1 files changed, 58 insertions, 59 deletions
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c index acba90f1638e..78e28ada1e21 100644 --- a/drivers/net/ibmveth.c +++ b/drivers/net/ibmveth.c | |||
@@ -83,7 +83,7 @@ | |||
83 | static int ibmveth_open(struct net_device *dev); | 83 | static int ibmveth_open(struct net_device *dev); |
84 | static int ibmveth_close(struct net_device *dev); | 84 | static int ibmveth_close(struct net_device *dev); |
85 | static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); | 85 | static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); |
86 | static int ibmveth_poll(struct net_device *dev, int *budget); | 86 | static int ibmveth_poll(struct napi_struct *napi, int budget); |
87 | static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *dev); | 87 | static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *dev); |
88 | static struct net_device_stats *ibmveth_get_stats(struct net_device *dev); | 88 | static struct net_device_stats *ibmveth_get_stats(struct net_device *dev); |
89 | static void ibmveth_set_multicast_list(struct net_device *dev); | 89 | static void ibmveth_set_multicast_list(struct net_device *dev); |
@@ -480,6 +480,8 @@ static int ibmveth_open(struct net_device *netdev) | |||
480 | 480 | ||
481 | ibmveth_debug_printk("open starting\n"); | 481 | ibmveth_debug_printk("open starting\n"); |
482 | 482 | ||
483 | napi_enable(&adapter->napi); | ||
484 | |||
483 | for(i = 0; i<IbmVethNumBufferPools; i++) | 485 | for(i = 0; i<IbmVethNumBufferPools; i++) |
484 | rxq_entries += adapter->rx_buff_pool[i].size; | 486 | rxq_entries += adapter->rx_buff_pool[i].size; |
485 | 487 | ||
@@ -489,6 +491,7 @@ static int ibmveth_open(struct net_device *netdev) | |||
489 | if(!adapter->buffer_list_addr || !adapter->filter_list_addr) { | 491 | if(!adapter->buffer_list_addr || !adapter->filter_list_addr) { |
490 | ibmveth_error_printk("unable to allocate filter or buffer list pages\n"); | 492 | ibmveth_error_printk("unable to allocate filter or buffer list pages\n"); |
491 | ibmveth_cleanup(adapter); | 493 | ibmveth_cleanup(adapter); |
494 | napi_disable(&adapter->napi); | ||
492 | return -ENOMEM; | 495 | return -ENOMEM; |
493 | } | 496 | } |
494 | 497 | ||
@@ -498,6 +501,7 @@ static int ibmveth_open(struct net_device *netdev) | |||
498 | if(!adapter->rx_queue.queue_addr) { | 501 | if(!adapter->rx_queue.queue_addr) { |
499 | ibmveth_error_printk("unable to allocate rx queue pages\n"); | 502 | ibmveth_error_printk("unable to allocate rx queue pages\n"); |
500 | ibmveth_cleanup(adapter); | 503 | ibmveth_cleanup(adapter); |
504 | napi_disable(&adapter->napi); | ||
501 | return -ENOMEM; | 505 | return -ENOMEM; |
502 | } | 506 | } |
503 | 507 | ||
@@ -514,6 +518,7 @@ static int ibmveth_open(struct net_device *netdev) | |||
514 | (dma_mapping_error(adapter->rx_queue.queue_dma))) { | 518 | (dma_mapping_error(adapter->rx_queue.queue_dma))) { |
515 | ibmveth_error_printk("unable to map filter or buffer list pages\n"); | 519 | ibmveth_error_printk("unable to map filter or buffer list pages\n"); |
516 | ibmveth_cleanup(adapter); | 520 | ibmveth_cleanup(adapter); |
521 | napi_disable(&adapter->napi); | ||
517 | return -ENOMEM; | 522 | return -ENOMEM; |
518 | } | 523 | } |
519 | 524 | ||
@@ -545,6 +550,7 @@ static int ibmveth_open(struct net_device *netdev) | |||
545 | rxq_desc.desc, | 550 | rxq_desc.desc, |
546 | mac_address); | 551 | mac_address); |
547 | ibmveth_cleanup(adapter); | 552 | ibmveth_cleanup(adapter); |
553 | napi_disable(&adapter->napi); | ||
548 | return -ENONET; | 554 | return -ENONET; |
549 | } | 555 | } |
550 | 556 | ||
@@ -555,6 +561,7 @@ static int ibmveth_open(struct net_device *netdev) | |||
555 | ibmveth_error_printk("unable to alloc pool\n"); | 561 | ibmveth_error_printk("unable to alloc pool\n"); |
556 | adapter->rx_buff_pool[i].active = 0; | 562 | adapter->rx_buff_pool[i].active = 0; |
557 | ibmveth_cleanup(adapter); | 563 | ibmveth_cleanup(adapter); |
564 | napi_disable(&adapter->napi); | ||
558 | return -ENOMEM ; | 565 | return -ENOMEM ; |
559 | } | 566 | } |
560 | } | 567 | } |
@@ -567,6 +574,7 @@ static int ibmveth_open(struct net_device *netdev) | |||
567 | } while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY)); | 574 | } while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY)); |
568 | 575 | ||
569 | ibmveth_cleanup(adapter); | 576 | ibmveth_cleanup(adapter); |
577 | napi_disable(&adapter->napi); | ||
570 | return rc; | 578 | return rc; |
571 | } | 579 | } |
572 | 580 | ||
@@ -587,6 +595,8 @@ static int ibmveth_close(struct net_device *netdev) | |||
587 | 595 | ||
588 | ibmveth_debug_printk("close starting\n"); | 596 | ibmveth_debug_printk("close starting\n"); |
589 | 597 | ||
598 | napi_disable(&adapter->napi); | ||
599 | |||
590 | if (!adapter->pool_config) | 600 | if (!adapter->pool_config) |
591 | netif_stop_queue(netdev); | 601 | netif_stop_queue(netdev); |
592 | 602 | ||
@@ -767,80 +777,68 @@ out: spin_lock_irqsave(&adapter->stats_lock, flags); | |||
767 | return 0; | 777 | return 0; |
768 | } | 778 | } |
769 | 779 | ||
770 | static int ibmveth_poll(struct net_device *netdev, int *budget) | 780 | static int ibmveth_poll(struct napi_struct *napi, int budget) |
771 | { | 781 | { |
772 | struct ibmveth_adapter *adapter = netdev->priv; | 782 | struct ibmveth_adapter *adapter = container_of(napi, struct ibmveth_adapter, napi); |
773 | int max_frames_to_process = netdev->quota; | 783 | struct net_device *netdev = adapter->netdev; |
774 | int frames_processed = 0; | 784 | int frames_processed = 0; |
775 | int more_work = 1; | ||
776 | unsigned long lpar_rc; | 785 | unsigned long lpar_rc; |
777 | 786 | ||
778 | restart_poll: | 787 | restart_poll: |
779 | do { | 788 | do { |
780 | struct net_device *netdev = adapter->netdev; | 789 | struct sk_buff *skb; |
781 | |||
782 | if(ibmveth_rxq_pending_buffer(adapter)) { | ||
783 | struct sk_buff *skb; | ||
784 | 790 | ||
785 | rmb(); | 791 | if (!ibmveth_rxq_pending_buffer(adapter)) |
792 | break; | ||
786 | 793 | ||
787 | if(!ibmveth_rxq_buffer_valid(adapter)) { | 794 | rmb(); |
788 | wmb(); /* suggested by larson1 */ | 795 | if (!ibmveth_rxq_buffer_valid(adapter)) { |
789 | adapter->rx_invalid_buffer++; | 796 | wmb(); /* suggested by larson1 */ |
790 | ibmveth_debug_printk("recycling invalid buffer\n"); | 797 | adapter->rx_invalid_buffer++; |
791 | ibmveth_rxq_recycle_buffer(adapter); | 798 | ibmveth_debug_printk("recycling invalid buffer\n"); |
792 | } else { | 799 | ibmveth_rxq_recycle_buffer(adapter); |
793 | int length = ibmveth_rxq_frame_length(adapter); | 800 | } else { |
794 | int offset = ibmveth_rxq_frame_offset(adapter); | 801 | int length = ibmveth_rxq_frame_length(adapter); |
795 | skb = ibmveth_rxq_get_buffer(adapter); | 802 | int offset = ibmveth_rxq_frame_offset(adapter); |
803 | skb = ibmveth_rxq_get_buffer(adapter); | ||
796 | 804 | ||
797 | ibmveth_rxq_harvest_buffer(adapter); | 805 | ibmveth_rxq_harvest_buffer(adapter); |
798 | 806 | ||
799 | skb_reserve(skb, offset); | 807 | skb_reserve(skb, offset); |
800 | skb_put(skb, length); | 808 | skb_put(skb, length); |
801 | skb->protocol = eth_type_trans(skb, netdev); | 809 | skb->protocol = eth_type_trans(skb, netdev); |
802 | 810 | ||
803 | netif_receive_skb(skb); /* send it up */ | 811 | netif_receive_skb(skb); /* send it up */ |
804 | 812 | ||
805 | adapter->stats.rx_packets++; | 813 | adapter->stats.rx_packets++; |
806 | adapter->stats.rx_bytes += length; | 814 | adapter->stats.rx_bytes += length; |
807 | frames_processed++; | 815 | frames_processed++; |
808 | netdev->last_rx = jiffies; | 816 | netdev->last_rx = jiffies; |
809 | } | ||
810 | } else { | ||
811 | more_work = 0; | ||
812 | } | 817 | } |
813 | } while(more_work && (frames_processed < max_frames_to_process)); | 818 | } while (frames_processed < budget); |
814 | 819 | ||
815 | ibmveth_replenish_task(adapter); | 820 | ibmveth_replenish_task(adapter); |
816 | 821 | ||
817 | if(more_work) { | 822 | if (frames_processed < budget) { |
818 | /* more work to do - return that we are not done yet */ | 823 | /* We think we are done - reenable interrupts, |
819 | netdev->quota -= frames_processed; | 824 | * then check once more to make sure we are done. |
820 | *budget -= frames_processed; | 825 | */ |
821 | return 1; | 826 | lpar_rc = h_vio_signal(adapter->vdev->unit_address, |
822 | } | 827 | VIO_IRQ_ENABLE); |
823 | |||
824 | /* we think we are done - reenable interrupts, then check once more to make sure we are done */ | ||
825 | lpar_rc = h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_ENABLE); | ||
826 | 828 | ||
827 | ibmveth_assert(lpar_rc == H_SUCCESS); | 829 | ibmveth_assert(lpar_rc == H_SUCCESS); |
828 | 830 | ||
829 | netif_rx_complete(netdev); | 831 | netif_rx_complete(netdev, napi); |
830 | 832 | ||
831 | if(ibmveth_rxq_pending_buffer(adapter) && netif_rx_reschedule(netdev, frames_processed)) | 833 | if (ibmveth_rxq_pending_buffer(adapter) && |
832 | { | 834 | netif_rx_reschedule(netdev, napi)) { |
833 | lpar_rc = h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE); | 835 | lpar_rc = h_vio_signal(adapter->vdev->unit_address, |
834 | ibmveth_assert(lpar_rc == H_SUCCESS); | 836 | VIO_IRQ_DISABLE); |
835 | more_work = 1; | 837 | goto restart_poll; |
836 | goto restart_poll; | 838 | } |
837 | } | 839 | } |
838 | 840 | ||
839 | netdev->quota -= frames_processed; | 841 | return frames_processed; |
840 | *budget -= frames_processed; | ||
841 | |||
842 | /* we really are done */ | ||
843 | return 0; | ||
844 | } | 842 | } |
845 | 843 | ||
846 | static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance) | 844 | static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance) |
@@ -849,10 +847,11 @@ static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance) | |||
849 | struct ibmveth_adapter *adapter = netdev->priv; | 847 | struct ibmveth_adapter *adapter = netdev->priv; |
850 | unsigned long lpar_rc; | 848 | unsigned long lpar_rc; |
851 | 849 | ||
852 | if(netif_rx_schedule_prep(netdev)) { | 850 | if (netif_rx_schedule_prep(netdev, &adapter->napi)) { |
853 | lpar_rc = h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE); | 851 | lpar_rc = h_vio_signal(adapter->vdev->unit_address, |
852 | VIO_IRQ_DISABLE); | ||
854 | ibmveth_assert(lpar_rc == H_SUCCESS); | 853 | ibmveth_assert(lpar_rc == H_SUCCESS); |
855 | __netif_rx_schedule(netdev); | 854 | __netif_rx_schedule(netdev, &adapter->napi); |
856 | } | 855 | } |
857 | return IRQ_HANDLED; | 856 | return IRQ_HANDLED; |
858 | } | 857 | } |
@@ -1004,6 +1003,8 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_ | |||
1004 | adapter->mcastFilterSize= *mcastFilterSize_p; | 1003 | adapter->mcastFilterSize= *mcastFilterSize_p; |
1005 | adapter->pool_config = 0; | 1004 | adapter->pool_config = 0; |
1006 | 1005 | ||
1006 | netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16); | ||
1007 | |||
1007 | /* Some older boxes running PHYP non-natively have an OF that | 1008 | /* Some older boxes running PHYP non-natively have an OF that |
1008 | returns a 8-byte local-mac-address field (and the first | 1009 | returns a 8-byte local-mac-address field (and the first |
1009 | 2 bytes have to be ignored) while newer boxes' OF return | 1010 | 2 bytes have to be ignored) while newer boxes' OF return |
@@ -1020,8 +1021,6 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_ | |||
1020 | 1021 | ||
1021 | netdev->irq = dev->irq; | 1022 | netdev->irq = dev->irq; |
1022 | netdev->open = ibmveth_open; | 1023 | netdev->open = ibmveth_open; |
1023 | netdev->poll = ibmveth_poll; | ||
1024 | netdev->weight = 16; | ||
1025 | netdev->stop = ibmveth_close; | 1024 | netdev->stop = ibmveth_close; |
1026 | netdev->hard_start_xmit = ibmveth_start_xmit; | 1025 | netdev->hard_start_xmit = ibmveth_start_xmit; |
1027 | netdev->get_stats = ibmveth_get_stats; | 1026 | netdev->get_stats = ibmveth_get_stats; |