diff options
author | Amit S. Kale <amitkale@netxen.com> | 2006-12-04 12:23:25 -0500 |
---|---|---|
committer | Jeff Garzik <jeff@garzik.org> | 2006-12-04 18:36:03 -0500 |
commit | ed25ffa16434724f5ed825aa48734c7f3aefa203 (patch) | |
tree | 71cff36d0b2f43adf20e67ac6cc3ba3020f94ff2 /drivers/net/netxen/netxen_nic_init.c | |
parent | 80922fbcb6f00127e91580e7565bb665947ac5d3 (diff) |
[PATCH] NetXen: multiport firmware support, ioctl interface
NetXen: 1G/10G Ethernet driver updates
- Multiport and newer firmware support
- ioctl interface for user level tools
- Cast error fix for multiport
Signed-off-by: Amit S. Kale <amitkale@netxen.com>
netxen_nic.h | 281 +++++++++++++++++++++++++-------
netxen_nic_ethtool.c | 12 -
netxen_nic_hw.c | 429 +++++++++++++++++++++++++++++++++++++++++---------
netxen_nic_init.c | 301 ++++++++++++++++++++++++++++++-----
netxen_nic_ioctl.h | 2
netxen_nic_isr.c | 3
netxen_nic_main.c | 260 ++++++++++++++++++------------
netxen_nic_niu.c | 22 +-
netxen_nic_phan_reg.h | 228 ++++++++++++++++----------
9 files changed, 1161 insertions(+), 377 deletions(-)
Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/net/netxen/netxen_nic_init.c')
-rw-r--r-- | drivers/net/netxen/netxen_nic_init.c | 301 |
1 files changed, 261 insertions, 40 deletions
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c index deac1a3ae275..f78668030ec6 100644 --- a/drivers/net/netxen/netxen_nic_init.c +++ b/drivers/net/netxen/netxen_nic_init.c | |||
@@ -137,6 +137,8 @@ int netxen_init_firmware(struct netxen_adapter *adapter) | |||
137 | return err; | 137 | return err; |
138 | } | 138 | } |
139 | /* Window 1 call */ | 139 | /* Window 1 call */ |
140 | writel(MPORT_SINGLE_FUNCTION_MODE, | ||
141 | NETXEN_CRB_NORMALIZE(adapter, CRB_MPORT_MODE)); | ||
140 | writel(PHAN_INITIALIZE_ACK, | 142 | writel(PHAN_INITIALIZE_ACK, |
141 | NETXEN_CRB_NORMALIZE(adapter, CRB_CMDPEG_STATE)); | 143 | NETXEN_CRB_NORMALIZE(adapter, CRB_CMDPEG_STATE)); |
142 | 144 | ||
@@ -184,15 +186,12 @@ void netxen_initialize_adapter_sw(struct netxen_adapter *adapter) | |||
184 | for (i = 0; i < num_rx_bufs; i++) { | 186 | for (i = 0; i < num_rx_bufs; i++) { |
185 | rx_buf->ref_handle = i; | 187 | rx_buf->ref_handle = i; |
186 | rx_buf->state = NETXEN_BUFFER_FREE; | 188 | rx_buf->state = NETXEN_BUFFER_FREE; |
187 | |||
188 | DPRINTK(INFO, "Rx buf:ctx%d i(%d) rx_buf:" | 189 | DPRINTK(INFO, "Rx buf:ctx%d i(%d) rx_buf:" |
189 | "%p\n", ctxid, i, rx_buf); | 190 | "%p\n", ctxid, i, rx_buf); |
190 | rx_buf++; | 191 | rx_buf++; |
191 | } | 192 | } |
192 | } | 193 | } |
193 | } | 194 | } |
194 | DPRINTK(INFO, "initialized buffers for %s and %s\n", | ||
195 | "adapter->free_cmd_buf_list", "adapter->free_rxbuf"); | ||
196 | } | 195 | } |
197 | 196 | ||
198 | void netxen_initialize_adapter_hw(struct netxen_adapter *adapter) | 197 | void netxen_initialize_adapter_hw(struct netxen_adapter *adapter) |
@@ -621,6 +620,43 @@ int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose) | |||
621 | return 0; | 620 | return 0; |
622 | } | 621 | } |
623 | 622 | ||
623 | int netxen_initialize_adapter_offload(struct netxen_adapter *adapter) | ||
624 | { | ||
625 | uint64_t addr; | ||
626 | uint32_t hi; | ||
627 | uint32_t lo; | ||
628 | |||
629 | adapter->dummy_dma.addr = | ||
630 | pci_alloc_consistent(adapter->ahw.pdev, | ||
631 | NETXEN_HOST_DUMMY_DMA_SIZE, | ||
632 | &adapter->dummy_dma.phys_addr); | ||
633 | if (adapter->dummy_dma.addr == NULL) { | ||
634 | printk("%s: ERROR: Could not allocate dummy DMA memory\n", | ||
635 | __FUNCTION__); | ||
636 | return -ENOMEM; | ||
637 | } | ||
638 | |||
639 | addr = (uint64_t) adapter->dummy_dma.phys_addr; | ||
640 | hi = (addr >> 32) & 0xffffffff; | ||
641 | lo = addr & 0xffffffff; | ||
642 | |||
643 | writel(hi, NETXEN_CRB_NORMALIZE(adapter, CRB_HOST_DUMMY_BUF_ADDR_HI)); | ||
644 | writel(lo, NETXEN_CRB_NORMALIZE(adapter, CRB_HOST_DUMMY_BUF_ADDR_LO)); | ||
645 | |||
646 | return 0; | ||
647 | } | ||
648 | |||
649 | void netxen_free_adapter_offload(struct netxen_adapter *adapter) | ||
650 | { | ||
651 | if (adapter->dummy_dma.addr) { | ||
652 | pci_free_consistent(adapter->ahw.pdev, | ||
653 | NETXEN_HOST_DUMMY_DMA_SIZE, | ||
654 | adapter->dummy_dma.addr, | ||
655 | adapter->dummy_dma.phys_addr); | ||
656 | adapter->dummy_dma.addr = NULL; | ||
657 | } | ||
658 | } | ||
659 | |||
624 | void netxen_phantom_init(struct netxen_adapter *adapter, int pegtune_val) | 660 | void netxen_phantom_init(struct netxen_adapter *adapter, int pegtune_val) |
625 | { | 661 | { |
626 | u32 val = 0; | 662 | u32 val = 0; |
@@ -655,7 +691,8 @@ int netxen_nic_rx_has_work(struct netxen_adapter *adapter) | |||
655 | desc_head = recv_ctx->rcv_status_desc_head; | 691 | desc_head = recv_ctx->rcv_status_desc_head; |
656 | desc = &desc_head[consumer]; | 692 | desc = &desc_head[consumer]; |
657 | 693 | ||
658 | if (((le16_to_cpu(desc->owner)) & STATUS_OWNER_HOST)) | 694 | if (((le16_to_cpu(netxen_get_sts_owner(desc))) |
695 | & STATUS_OWNER_HOST)) | ||
659 | return 1; | 696 | return 1; |
660 | } | 697 | } |
661 | 698 | ||
@@ -747,19 +784,19 @@ void | |||
747 | netxen_process_rcv(struct netxen_adapter *adapter, int ctxid, | 784 | netxen_process_rcv(struct netxen_adapter *adapter, int ctxid, |
748 | struct status_desc *desc) | 785 | struct status_desc *desc) |
749 | { | 786 | { |
750 | struct netxen_port *port = adapter->port[STATUS_DESC_PORT(desc)]; | 787 | struct netxen_port *port = adapter->port[netxen_get_sts_port(desc)]; |
751 | struct pci_dev *pdev = port->pdev; | 788 | struct pci_dev *pdev = port->pdev; |
752 | struct net_device *netdev = port->netdev; | 789 | struct net_device *netdev = port->netdev; |
753 | int index = le16_to_cpu(desc->reference_handle); | 790 | int index = le16_to_cpu(netxen_get_sts_refhandle(desc)); |
754 | struct netxen_recv_context *recv_ctx = &(adapter->recv_ctx[ctxid]); | 791 | struct netxen_recv_context *recv_ctx = &(adapter->recv_ctx[ctxid]); |
755 | struct netxen_rx_buffer *buffer; | 792 | struct netxen_rx_buffer *buffer; |
756 | struct sk_buff *skb; | 793 | struct sk_buff *skb; |
757 | u32 length = le16_to_cpu(desc->total_length); | 794 | u32 length = le16_to_cpu(netxen_get_sts_totallength(desc)); |
758 | u32 desc_ctx; | 795 | u32 desc_ctx; |
759 | struct netxen_rcv_desc_ctx *rcv_desc; | 796 | struct netxen_rcv_desc_ctx *rcv_desc; |
760 | int ret; | 797 | int ret; |
761 | 798 | ||
762 | desc_ctx = STATUS_DESC_TYPE(desc); | 799 | desc_ctx = netxen_get_sts_type(desc); |
763 | if (unlikely(desc_ctx >= NUM_RCV_DESC_RINGS)) { | 800 | if (unlikely(desc_ctx >= NUM_RCV_DESC_RINGS)) { |
764 | printk("%s: %s Bad Rcv descriptor ring\n", | 801 | printk("%s: %s Bad Rcv descriptor ring\n", |
765 | netxen_nic_driver_name, netdev->name); | 802 | netxen_nic_driver_name, netdev->name); |
@@ -767,20 +804,49 @@ netxen_process_rcv(struct netxen_adapter *adapter, int ctxid, | |||
767 | } | 804 | } |
768 | 805 | ||
769 | rcv_desc = &recv_ctx->rcv_desc[desc_ctx]; | 806 | rcv_desc = &recv_ctx->rcv_desc[desc_ctx]; |
807 | if (unlikely(index > rcv_desc->max_rx_desc_count)) { | ||
808 | DPRINTK(ERR, "Got a buffer index:%x Max is %x\n", | ||
809 | index, rcv_desc->max_rx_desc_count); | ||
810 | return; | ||
811 | } | ||
770 | buffer = &rcv_desc->rx_buf_arr[index]; | 812 | buffer = &rcv_desc->rx_buf_arr[index]; |
813 | if (desc_ctx == RCV_DESC_LRO_CTXID) { | ||
814 | buffer->lro_current_frags++; | ||
815 | if (netxen_get_sts_desc_lro_last_frag(desc)) { | ||
816 | buffer->lro_expected_frags = | ||
817 | netxen_get_sts_desc_lro_cnt(desc); | ||
818 | buffer->lro_length = length; | ||
819 | } | ||
820 | if (buffer->lro_current_frags != buffer->lro_expected_frags) { | ||
821 | if (buffer->lro_expected_frags != 0) { | ||
822 | printk("LRO: (refhandle:%x) recv frag." | ||
823 | "wait for last. flags: %x expected:%d" | ||
824 | "have:%d\n", index, | ||
825 | netxen_get_sts_desc_lro_last_frag(desc), | ||
826 | buffer->lro_expected_frags, | ||
827 | buffer->lro_current_frags); | ||
828 | } | ||
829 | return; | ||
830 | } | ||
831 | } | ||
771 | 832 | ||
772 | pci_unmap_single(pdev, buffer->dma, rcv_desc->dma_size, | 833 | pci_unmap_single(pdev, buffer->dma, rcv_desc->dma_size, |
773 | PCI_DMA_FROMDEVICE); | 834 | PCI_DMA_FROMDEVICE); |
774 | 835 | ||
775 | skb = (struct sk_buff *)buffer->skb; | 836 | skb = (struct sk_buff *)buffer->skb; |
776 | 837 | ||
777 | if (likely(STATUS_DESC_STATUS(desc) == STATUS_CKSUM_OK)) { | 838 | if (likely(netxen_get_sts_status(desc) == STATUS_CKSUM_OK)) { |
778 | port->stats.csummed++; | 839 | port->stats.csummed++; |
779 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 840 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
780 | } else | 841 | } |
781 | skb->ip_summed = CHECKSUM_NONE; | ||
782 | skb->dev = netdev; | 842 | skb->dev = netdev; |
783 | skb_put(skb, length); | 843 | if (desc_ctx == RCV_DESC_LRO_CTXID) { |
844 | /* True length was only available on the last pkt */ | ||
845 | skb_put(skb, buffer->lro_length); | ||
846 | } else { | ||
847 | skb_put(skb, length); | ||
848 | } | ||
849 | |||
784 | skb->protocol = eth_type_trans(skb, netdev); | 850 | skb->protocol = eth_type_trans(skb, netdev); |
785 | 851 | ||
786 | ret = netif_receive_skb(skb); | 852 | ret = netif_receive_skb(skb); |
@@ -826,6 +892,8 @@ netxen_process_rcv(struct netxen_adapter *adapter, int ctxid, | |||
826 | adapter->stats.post_called++; | 892 | adapter->stats.post_called++; |
827 | buffer->skb = NULL; | 893 | buffer->skb = NULL; |
828 | buffer->state = NETXEN_BUFFER_FREE; | 894 | buffer->state = NETXEN_BUFFER_FREE; |
895 | buffer->lro_current_frags = 0; | ||
896 | buffer->lro_expected_frags = 0; | ||
829 | 897 | ||
830 | port->stats.no_rcv++; | 898 | port->stats.no_rcv++; |
831 | port->stats.rxbytes += length; | 899 | port->stats.rxbytes += length; |
@@ -838,6 +906,7 @@ u32 netxen_process_rcv_ring(struct netxen_adapter *adapter, int ctxid, int max) | |||
838 | struct status_desc *desc_head = recv_ctx->rcv_status_desc_head; | 906 | struct status_desc *desc_head = recv_ctx->rcv_status_desc_head; |
839 | struct status_desc *desc; /* used to read status desc here */ | 907 | struct status_desc *desc; /* used to read status desc here */ |
840 | u32 consumer = recv_ctx->status_rx_consumer; | 908 | u32 consumer = recv_ctx->status_rx_consumer; |
909 | u32 producer = 0; | ||
841 | int count = 0, ring; | 910 | int count = 0, ring; |
842 | 911 | ||
843 | DPRINTK(INFO, "procesing receive\n"); | 912 | DPRINTK(INFO, "procesing receive\n"); |
@@ -849,18 +918,22 @@ u32 netxen_process_rcv_ring(struct netxen_adapter *adapter, int ctxid, int max) | |||
849 | */ | 918 | */ |
850 | while (count < max) { | 919 | while (count < max) { |
851 | desc = &desc_head[consumer]; | 920 | desc = &desc_head[consumer]; |
852 | if (!((le16_to_cpu(desc->owner)) & STATUS_OWNER_HOST)) { | 921 | if (! |
853 | DPRINTK(ERR, "desc %p ownedby %x\n", desc, desc->owner); | 922 | (le16_to_cpu(netxen_get_sts_owner(desc)) & |
923 | STATUS_OWNER_HOST)) { | ||
924 | DPRINTK(ERR, "desc %p ownedby %x\n", desc, | ||
925 | netxen_get_sts_owner(desc)); | ||
854 | break; | 926 | break; |
855 | } | 927 | } |
856 | netxen_process_rcv(adapter, ctxid, desc); | 928 | netxen_process_rcv(adapter, ctxid, desc); |
857 | desc->owner = STATUS_OWNER_PHANTOM; | 929 | netxen_clear_sts_owner(desc); |
930 | netxen_set_sts_owner(desc, STATUS_OWNER_PHANTOM); | ||
858 | consumer = (consumer + 1) & (adapter->max_rx_desc_count - 1); | 931 | consumer = (consumer + 1) & (adapter->max_rx_desc_count - 1); |
859 | count++; | 932 | count++; |
860 | } | 933 | } |
861 | if (count) { | 934 | if (count) { |
862 | for (ring = 0; ring < NUM_RCV_DESC_RINGS; ring++) { | 935 | for (ring = 0; ring < NUM_RCV_DESC_RINGS; ring++) { |
863 | netxen_post_rx_buffers(adapter, ctxid, ring); | 936 | netxen_post_rx_buffers_nodb(adapter, ctxid, ring); |
864 | } | 937 | } |
865 | } | 938 | } |
866 | 939 | ||
@@ -868,6 +941,7 @@ u32 netxen_process_rcv_ring(struct netxen_adapter *adapter, int ctxid, int max) | |||
868 | if (count) { | 941 | if (count) { |
869 | adapter->stats.process_rcv++; | 942 | adapter->stats.process_rcv++; |
870 | recv_ctx->status_rx_consumer = consumer; | 943 | recv_ctx->status_rx_consumer = consumer; |
944 | recv_ctx->status_rx_producer = producer; | ||
871 | 945 | ||
872 | /* Window = 1 */ | 946 | /* Window = 1 */ |
873 | writel(consumer, | 947 | writel(consumer, |
@@ -880,12 +954,13 @@ u32 netxen_process_rcv_ring(struct netxen_adapter *adapter, int ctxid, int max) | |||
880 | } | 954 | } |
881 | 955 | ||
882 | /* Process Command status ring */ | 956 | /* Process Command status ring */ |
883 | void netxen_process_cmd_ring(unsigned long data) | 957 | int netxen_process_cmd_ring(unsigned long data) |
884 | { | 958 | { |
885 | u32 last_consumer; | 959 | u32 last_consumer; |
886 | u32 consumer; | 960 | u32 consumer; |
887 | struct netxen_adapter *adapter = (struct netxen_adapter *)data; | 961 | struct netxen_adapter *adapter = (struct netxen_adapter *)data; |
888 | int count = 0; | 962 | int count1 = 0; |
963 | int count2 = 0; | ||
889 | struct netxen_cmd_buffer *buffer; | 964 | struct netxen_cmd_buffer *buffer; |
890 | struct netxen_port *port; /* port #1 */ | 965 | struct netxen_port *port; /* port #1 */ |
891 | struct netxen_port *nport; | 966 | struct netxen_port *nport; |
@@ -894,6 +969,7 @@ void netxen_process_cmd_ring(unsigned long data) | |||
894 | u32 i; | 969 | u32 i; |
895 | struct sk_buff *skb = NULL; | 970 | struct sk_buff *skb = NULL; |
896 | int p; | 971 | int p; |
972 | int done; | ||
897 | 973 | ||
898 | spin_lock(&adapter->tx_lock); | 974 | spin_lock(&adapter->tx_lock); |
899 | last_consumer = adapter->last_cmd_consumer; | 975 | last_consumer = adapter->last_cmd_consumer; |
@@ -903,14 +979,13 @@ void netxen_process_cmd_ring(unsigned long data) | |||
903 | * number as part of the descriptor. This way we will be able to get | 979 | * number as part of the descriptor. This way we will be able to get |
904 | * the netdev which is associated with that device. | 980 | * the netdev which is associated with that device. |
905 | */ | 981 | */ |
906 | consumer = | ||
907 | readl(NETXEN_CRB_NORMALIZE(adapter, CRB_CMD_CONSUMER_OFFSET)); | ||
908 | 982 | ||
983 | consumer = *(adapter->cmd_consumer); | ||
909 | if (last_consumer == consumer) { /* Ring is empty */ | 984 | if (last_consumer == consumer) { /* Ring is empty */ |
910 | DPRINTK(INFO, "last_consumer %d == consumer %d\n", | 985 | DPRINTK(INFO, "last_consumer %d == consumer %d\n", |
911 | last_consumer, consumer); | 986 | last_consumer, consumer); |
912 | spin_unlock(&adapter->tx_lock); | 987 | spin_unlock(&adapter->tx_lock); |
913 | return; | 988 | return 1; |
914 | } | 989 | } |
915 | 990 | ||
916 | adapter->proc_cmd_buf_counter++; | 991 | adapter->proc_cmd_buf_counter++; |
@@ -921,7 +996,7 @@ void netxen_process_cmd_ring(unsigned long data) | |||
921 | */ | 996 | */ |
922 | spin_unlock(&adapter->tx_lock); | 997 | spin_unlock(&adapter->tx_lock); |
923 | 998 | ||
924 | while ((last_consumer != consumer) && (count < MAX_STATUS_HANDLE)) { | 999 | while ((last_consumer != consumer) && (count1 < MAX_STATUS_HANDLE)) { |
925 | buffer = &adapter->cmd_buf_arr[last_consumer]; | 1000 | buffer = &adapter->cmd_buf_arr[last_consumer]; |
926 | port = adapter->port[buffer->port]; | 1001 | port = adapter->port[buffer->port]; |
927 | pdev = port->pdev; | 1002 | pdev = port->pdev; |
@@ -947,24 +1022,25 @@ void netxen_process_cmd_ring(unsigned long data) | |||
947 | && netif_carrier_ok(port->netdev)) | 1022 | && netif_carrier_ok(port->netdev)) |
948 | && ((jiffies - port->netdev->trans_start) > | 1023 | && ((jiffies - port->netdev->trans_start) > |
949 | port->netdev->watchdog_timeo)) { | 1024 | port->netdev->watchdog_timeo)) { |
950 | schedule_work(&port->adapter->tx_timeout_task); | 1025 | SCHEDULE_WORK(port->adapter->tx_timeout_task |
1026 | + port->portnum); | ||
951 | } | 1027 | } |
952 | 1028 | ||
953 | last_consumer = get_next_index(last_consumer, | 1029 | last_consumer = get_next_index(last_consumer, |
954 | adapter->max_tx_desc_count); | 1030 | adapter->max_tx_desc_count); |
955 | count++; | 1031 | count1++; |
956 | } | 1032 | } |
957 | adapter->stats.noxmitdone += count; | 1033 | adapter->stats.noxmitdone += count1; |
958 | 1034 | ||
959 | count = 0; | 1035 | count2 = 0; |
960 | spin_lock(&adapter->tx_lock); | 1036 | spin_lock(&adapter->tx_lock); |
961 | if ((--adapter->proc_cmd_buf_counter) == 0) { | 1037 | if ((--adapter->proc_cmd_buf_counter) == 0) { |
962 | adapter->last_cmd_consumer = last_consumer; | 1038 | adapter->last_cmd_consumer = last_consumer; |
963 | while ((adapter->last_cmd_consumer != consumer) | 1039 | while ((adapter->last_cmd_consumer != consumer) |
964 | && (count < MAX_STATUS_HANDLE)) { | 1040 | && (count2 < MAX_STATUS_HANDLE)) { |
965 | buffer = | 1041 | buffer = |
966 | &adapter->cmd_buf_arr[adapter->last_cmd_consumer]; | 1042 | &adapter->cmd_buf_arr[adapter->last_cmd_consumer]; |
967 | count++; | 1043 | count2++; |
968 | if (buffer->skb) | 1044 | if (buffer->skb) |
969 | break; | 1045 | break; |
970 | else | 1046 | else |
@@ -973,7 +1049,7 @@ void netxen_process_cmd_ring(unsigned long data) | |||
973 | adapter->max_tx_desc_count); | 1049 | adapter->max_tx_desc_count); |
974 | } | 1050 | } |
975 | } | 1051 | } |
976 | if (count) { | 1052 | if (count1 || count2) { |
977 | for (p = 0; p < adapter->ahw.max_ports; p++) { | 1053 | for (p = 0; p < adapter->ahw.max_ports; p++) { |
978 | nport = adapter->port[p]; | 1054 | nport = adapter->port[p]; |
979 | if (netif_queue_stopped(nport->netdev) | 1055 | if (netif_queue_stopped(nport->netdev) |
@@ -983,10 +1059,30 @@ void netxen_process_cmd_ring(unsigned long data) | |||
983 | } | 1059 | } |
984 | } | 1060 | } |
985 | } | 1061 | } |
1062 | /* | ||
1063 | * If everything is freed up to consumer then check if the ring is full | ||
1064 | * If the ring is full then check if more needs to be freed and | ||
1065 | * schedule the call back again. | ||
1066 | * | ||
1067 | * This happens when there are 2 CPUs. One could be freeing and the | ||
1068 | * other filling it. If the ring is full when we get out of here and | ||
1069 | * the card has already interrupted the host then the host can miss the | ||
1070 | * interrupt. | ||
1071 | * | ||
1072 | * There is still a possible race condition and the host could miss an | ||
1073 | * interrupt. The card has to take care of this. | ||
1074 | */ | ||
1075 | if (adapter->last_cmd_consumer == consumer && | ||
1076 | (((adapter->cmd_producer + 1) % | ||
1077 | adapter->max_tx_desc_count) == adapter->last_cmd_consumer)) { | ||
1078 | consumer = *(adapter->cmd_consumer); | ||
1079 | } | ||
1080 | done = (adapter->last_cmd_consumer == consumer); | ||
986 | 1081 | ||
987 | spin_unlock(&adapter->tx_lock); | 1082 | spin_unlock(&adapter->tx_lock); |
988 | DPRINTK(INFO, "last consumer is %d in %s\n", last_consumer, | 1083 | DPRINTK(INFO, "last consumer is %d in %s\n", last_consumer, |
989 | __FUNCTION__); | 1084 | __FUNCTION__); |
1085 | return (done); | ||
990 | } | 1086 | } |
991 | 1087 | ||
992 | /* | 1088 | /* |
@@ -998,17 +1094,16 @@ void netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ctx, u32 ringid) | |||
998 | struct sk_buff *skb; | 1094 | struct sk_buff *skb; |
999 | struct netxen_recv_context *recv_ctx = &(adapter->recv_ctx[ctx]); | 1095 | struct netxen_recv_context *recv_ctx = &(adapter->recv_ctx[ctx]); |
1000 | struct netxen_rcv_desc_ctx *rcv_desc = NULL; | 1096 | struct netxen_rcv_desc_ctx *rcv_desc = NULL; |
1001 | struct netxen_recv_crb *crbarea = &recv_crb_registers[ctx]; | 1097 | uint producer; |
1002 | struct netxen_rcv_desc_crb *rcv_desc_crb = NULL; | ||
1003 | u32 producer; | ||
1004 | struct rcv_desc *pdesc; | 1098 | struct rcv_desc *pdesc; |
1005 | struct netxen_rx_buffer *buffer; | 1099 | struct netxen_rx_buffer *buffer; |
1006 | int count = 0; | 1100 | int count = 0; |
1007 | int index = 0; | 1101 | int index = 0; |
1102 | netxen_ctx_msg msg = 0; | ||
1103 | dma_addr_t dma; | ||
1008 | 1104 | ||
1009 | adapter->stats.post_called++; | 1105 | adapter->stats.post_called++; |
1010 | rcv_desc = &recv_ctx->rcv_desc[ringid]; | 1106 | rcv_desc = &recv_ctx->rcv_desc[ringid]; |
1011 | rcv_desc_crb = &crbarea->rcv_desc_crb[ringid]; | ||
1012 | 1107 | ||
1013 | producer = rcv_desc->producer; | 1108 | producer = rcv_desc->producer; |
1014 | index = rcv_desc->begin_alloc; | 1109 | index = rcv_desc->begin_alloc; |
@@ -1018,6 +1113,7 @@ void netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ctx, u32 ringid) | |||
1018 | skb = dev_alloc_skb(rcv_desc->skb_size); | 1113 | skb = dev_alloc_skb(rcv_desc->skb_size); |
1019 | if (unlikely(!skb)) { | 1114 | if (unlikely(!skb)) { |
1020 | /* | 1115 | /* |
1116 | * TODO | ||
1021 | * We need to schedule the posting of buffers to the pegs. | 1117 | * We need to schedule the posting of buffers to the pegs. |
1022 | */ | 1118 | */ |
1023 | rcv_desc->begin_alloc = index; | 1119 | rcv_desc->begin_alloc = index; |
@@ -1025,9 +1121,105 @@ void netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ctx, u32 ringid) | |||
1025 | " allocated only %d buffers\n", count); | 1121 | " allocated only %d buffers\n", count); |
1026 | break; | 1122 | break; |
1027 | } | 1123 | } |
1124 | |||
1125 | count++; /* now there should be no failure */ | ||
1126 | pdesc = &rcv_desc->desc_head[producer]; | ||
1127 | |||
1128 | #if defined(XGB_DEBUG) | ||
1129 | *(unsigned long *)(skb->head) = 0xc0debabe; | ||
1130 | if (skb_is_nonlinear(skb)) { | ||
1131 | printk("Allocated SKB @%p is nonlinear\n"); | ||
1132 | } | ||
1133 | #endif | ||
1134 | skb_reserve(skb, 2); | ||
1135 | /* This will be setup when we receive the | ||
1136 | * buffer after it has been filled FSL TBD TBD | ||
1137 | * skb->dev = netdev; | ||
1138 | */ | ||
1139 | dma = pci_map_single(pdev, skb->data, rcv_desc->dma_size, | ||
1140 | PCI_DMA_FROMDEVICE); | ||
1141 | pdesc->addr_buffer = dma; | ||
1142 | buffer->skb = skb; | ||
1143 | buffer->state = NETXEN_BUFFER_BUSY; | ||
1144 | buffer->dma = dma; | ||
1145 | /* make a rcv descriptor */ | ||
1146 | pdesc->reference_handle = buffer->ref_handle; | ||
1147 | pdesc->buffer_length = rcv_desc->dma_size; | ||
1148 | DPRINTK(INFO, "done writing descripter\n"); | ||
1149 | producer = | ||
1150 | get_next_index(producer, rcv_desc->max_rx_desc_count); | ||
1151 | index = get_next_index(index, rcv_desc->max_rx_desc_count); | ||
1152 | buffer = &rcv_desc->rx_buf_arr[index]; | ||
1153 | } | ||
1154 | /* if we did allocate buffers, then write the count to Phantom */ | ||
1155 | if (count) { | ||
1156 | rcv_desc->begin_alloc = index; | ||
1157 | rcv_desc->rcv_pending += count; | ||
1158 | adapter->stats.lastposted = count; | ||
1159 | adapter->stats.posted += count; | ||
1160 | rcv_desc->producer = producer; | ||
1161 | if (rcv_desc->rcv_free >= 32) { | ||
1162 | rcv_desc->rcv_free = 0; | ||
1163 | /* Window = 1 */ | ||
1164 | writel((producer - 1) & | ||
1165 | (rcv_desc->max_rx_desc_count - 1), | ||
1166 | NETXEN_CRB_NORMALIZE(adapter, | ||
1167 | recv_crb_registers[0]. | ||
1168 | rcv_desc_crb[ringid]. | ||
1169 | crb_rcv_producer_offset)); | ||
1170 | /* | ||
1171 | * Write a doorbell msg to tell phanmon of change in | ||
1172 | * receive ring producer | ||
1173 | */ | ||
1174 | netxen_set_msg_peg_id(msg, NETXEN_RCV_PEG_DB_ID); | ||
1175 | netxen_set_msg_privid(msg); | ||
1176 | netxen_set_msg_count(msg, | ||
1177 | ((producer - | ||
1178 | 1) & (rcv_desc-> | ||
1179 | max_rx_desc_count - 1))); | ||
1180 | netxen_set_msg_ctxid(msg, 0); | ||
1181 | netxen_set_msg_opcode(msg, NETXEN_RCV_PRODUCER(ringid)); | ||
1182 | writel(msg, | ||
1183 | DB_NORMALIZE(adapter, | ||
1184 | NETXEN_RCV_PRODUCER_OFFSET)); | ||
1185 | } | ||
1186 | } | ||
1187 | } | ||
1188 | |||
1189 | void netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter, uint32_t ctx, | ||
1190 | uint32_t ringid) | ||
1191 | { | ||
1192 | struct pci_dev *pdev = adapter->ahw.pdev; | ||
1193 | struct sk_buff *skb; | ||
1194 | struct netxen_recv_context *recv_ctx = &(adapter->recv_ctx[ctx]); | ||
1195 | struct netxen_rcv_desc_ctx *rcv_desc = NULL; | ||
1196 | u32 producer; | ||
1197 | struct rcv_desc *pdesc; | ||
1198 | struct netxen_rx_buffer *buffer; | ||
1199 | int count = 0; | ||
1200 | int index = 0; | ||
1201 | |||
1202 | adapter->stats.post_called++; | ||
1203 | rcv_desc = &recv_ctx->rcv_desc[ringid]; | ||
1204 | |||
1205 | producer = rcv_desc->producer; | ||
1206 | index = rcv_desc->begin_alloc; | ||
1207 | buffer = &rcv_desc->rx_buf_arr[index]; | ||
1208 | /* We can start writing rx descriptors into the phantom memory. */ | ||
1209 | while (buffer->state == NETXEN_BUFFER_FREE) { | ||
1210 | skb = dev_alloc_skb(rcv_desc->skb_size); | ||
1211 | if (unlikely(!skb)) { | ||
1212 | /* | ||
1213 | * We need to schedule the posting of buffers to the pegs. | ||
1214 | */ | ||
1215 | rcv_desc->begin_alloc = index; | ||
1216 | DPRINTK(ERR, "netxen_post_rx_buffers_nodb: " | ||
1217 | " allocated only %d buffers\n", count); | ||
1218 | break; | ||
1219 | } | ||
1028 | count++; /* now there should be no failure */ | 1220 | count++; /* now there should be no failure */ |
1029 | pdesc = &rcv_desc->desc_head[producer]; | 1221 | pdesc = &rcv_desc->desc_head[producer]; |
1030 | skb_reserve(skb, NET_IP_ALIGN); | 1222 | skb_reserve(skb, 2); |
1031 | /* | 1223 | /* |
1032 | * This will be setup when we receive the | 1224 | * This will be setup when we receive the |
1033 | * buffer after it has been filled | 1225 | * buffer after it has been filled |
@@ -1038,6 +1230,7 @@ void netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ctx, u32 ringid) | |||
1038 | buffer->dma = pci_map_single(pdev, skb->data, | 1230 | buffer->dma = pci_map_single(pdev, skb->data, |
1039 | rcv_desc->dma_size, | 1231 | rcv_desc->dma_size, |
1040 | PCI_DMA_FROMDEVICE); | 1232 | PCI_DMA_FROMDEVICE); |
1233 | |||
1041 | /* make a rcv descriptor */ | 1234 | /* make a rcv descriptor */ |
1042 | pdesc->reference_handle = le16_to_cpu(buffer->ref_handle); | 1235 | pdesc->reference_handle = le16_to_cpu(buffer->ref_handle); |
1043 | pdesc->buffer_length = le16_to_cpu(rcv_desc->dma_size); | 1236 | pdesc->buffer_length = le16_to_cpu(rcv_desc->dma_size); |
@@ -1062,7 +1255,8 @@ void netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ctx, u32 ringid) | |||
1062 | writel((producer - 1) & | 1255 | writel((producer - 1) & |
1063 | (rcv_desc->max_rx_desc_count - 1), | 1256 | (rcv_desc->max_rx_desc_count - 1), |
1064 | NETXEN_CRB_NORMALIZE(adapter, | 1257 | NETXEN_CRB_NORMALIZE(adapter, |
1065 | rcv_desc_crb-> | 1258 | recv_crb_registers[0]. |
1259 | rcv_desc_crb[ringid]. | ||
1066 | crb_rcv_producer_offset)); | 1260 | crb_rcv_producer_offset)); |
1067 | wmb(); | 1261 | wmb(); |
1068 | } | 1262 | } |
@@ -1195,8 +1389,8 @@ netxen_nic_do_ioctl(struct netxen_adapter *adapter, void *u_data, | |||
1195 | 1389 | ||
1196 | switch (data.cmd) { | 1390 | switch (data.cmd) { |
1197 | case netxen_nic_cmd_pci_read: | 1391 | case netxen_nic_cmd_pci_read: |
1198 | if ((retval = netxen_nic_hw_read_wx(adapter, data.off, | 1392 | if ((retval = netxen_nic_hw_read_ioctl(adapter, data.off, |
1199 | &(data.u), data.size))) | 1393 | &(data.u), data.size))) |
1200 | goto error_out; | 1394 | goto error_out; |
1201 | if (copy_to_user | 1395 | if (copy_to_user |
1202 | ((void __user *)&(up_data->u), &(data.u), data.size)) { | 1396 | ((void __user *)&(up_data->u), &(data.u), data.size)) { |
@@ -1209,8 +1403,35 @@ netxen_nic_do_ioctl(struct netxen_adapter *adapter, void *u_data, | |||
1209 | break; | 1403 | break; |
1210 | 1404 | ||
1211 | case netxen_nic_cmd_pci_write: | 1405 | case netxen_nic_cmd_pci_write: |
1212 | data.rv = netxen_nic_hw_write_wx(adapter, data.off, &(data.u), | 1406 | if ((retval = netxen_nic_hw_write_ioctl(adapter, data.off, |
1213 | data.size); | 1407 | &(data.u), data.size))) |
1408 | goto error_out; | ||
1409 | data.rv = 0; | ||
1410 | break; | ||
1411 | |||
1412 | case netxen_nic_cmd_pci_mem_read: | ||
1413 | if (netxen_nic_pci_mem_read_ioctl(adapter, data.off, &(data.u), | ||
1414 | data.size)) { | ||
1415 | DPRINTK(ERR, "Failed to read the data.\n"); | ||
1416 | retval = -EFAULT; | ||
1417 | goto error_out; | ||
1418 | } | ||
1419 | if (copy_to_user | ||
1420 | ((void __user *)&(up_data->u), &(data.u), data.size)) { | ||
1421 | DPRINTK(ERR, "bad copy to userland: %d\n", | ||
1422 | (int)sizeof(data)); | ||
1423 | retval = -EFAULT; | ||
1424 | goto error_out; | ||
1425 | } | ||
1426 | data.rv = 0; | ||
1427 | break; | ||
1428 | |||
1429 | case netxen_nic_cmd_pci_mem_write: | ||
1430 | if ((retval = netxen_nic_pci_mem_write_ioctl(adapter, data.off, | ||
1431 | &(data.u), | ||
1432 | data.size))) | ||
1433 | goto error_out; | ||
1434 | data.rv = 0; | ||
1214 | break; | 1435 | break; |
1215 | 1436 | ||
1216 | case netxen_nic_cmd_pci_config_read: | 1437 | case netxen_nic_cmd_pci_config_read: |
@@ -1295,7 +1516,7 @@ netxen_nic_do_ioctl(struct netxen_adapter *adapter, void *u_data, | |||
1295 | retval = -EOPNOTSUPP; | 1516 | retval = -EOPNOTSUPP; |
1296 | goto error_out; | 1517 | goto error_out; |
1297 | } | 1518 | } |
1298 | put_user(data.rv, (u16 __user *) (&(up_data->rv))); | 1519 | put_user(data.rv, (&(up_data->rv))); |
1299 | DPRINTK(INFO, "done ioctl for %p well.\n", adapter); | 1520 | DPRINTK(INFO, "done ioctl for %p well.\n", adapter); |
1300 | 1521 | ||
1301 | error_out: | 1522 | error_out: |