diff options
author | Dhananjay Phadke <dhananjay@netxen.com> | 2009-03-13 10:52:05 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-03-14 17:00:33 -0400 |
commit | d8b100c5da003b6f8c410453e1e6e74ced8d1cc1 (patch) | |
tree | 75d895254bb70859ef5c3b4cab2d5da313af8e87 /drivers/net/netxen/netxen_nic_init.c | |
parent | 9b3ef55c6ddbe8c7b76707eae9a77d874fe2cec0 (diff) |
netxen: add receive side scaling (rss) support
This patch enables the load balancing capability of firmware
and hardware to spray traffic into different cpus through
separate rx msix interrupts.
The feature is being enabled for NX3031, NX2031 (old) will be
enabled later. This depends on msi-x and compatibility with
msi and legacy is maintained by enabling single rx ring.
Signed-off-by: Dhananjay Phadke <dhananjay@netxen.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/netxen/netxen_nic_init.c')
-rw-r--r-- | drivers/net/netxen/netxen_nic_init.c | 201 |
1 files changed, 124 insertions, 77 deletions
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c index 1b8f79f7f8ce..0759c35f16ac 100644 --- a/drivers/net/netxen/netxen_nic_init.c +++ b/drivers/net/netxen/netxen_nic_init.c | |||
@@ -50,7 +50,8 @@ static unsigned int crb_addr_xform[NETXEN_MAX_CRB_XFORM]; | |||
50 | #define NETXEN_NIC_XDMA_RESET 0x8000ff | 50 | #define NETXEN_NIC_XDMA_RESET 0x8000ff |
51 | 51 | ||
52 | static void | 52 | static void |
53 | netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter, uint32_t ringid); | 53 | netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter, |
54 | struct nx_host_rds_ring *rds_ring); | ||
54 | 55 | ||
55 | static void crb_addr_transform_setup(void) | 56 | static void crb_addr_transform_setup(void) |
56 | { | 57 | { |
@@ -222,19 +223,21 @@ int netxen_alloc_sw_resources(struct netxen_adapter *adapter) | |||
222 | { | 223 | { |
223 | struct netxen_recv_context *recv_ctx; | 224 | struct netxen_recv_context *recv_ctx; |
224 | struct nx_host_rds_ring *rds_ring; | 225 | struct nx_host_rds_ring *rds_ring; |
226 | struct nx_host_sds_ring *sds_ring; | ||
225 | struct netxen_rx_buffer *rx_buf; | 227 | struct netxen_rx_buffer *rx_buf; |
226 | int ring, i, num_rx_bufs; | 228 | int ring, i, num_rx_bufs; |
227 | 229 | ||
228 | struct netxen_cmd_buffer *cmd_buf_arr; | 230 | struct netxen_cmd_buffer *cmd_buf_arr; |
229 | struct net_device *netdev = adapter->netdev; | 231 | struct net_device *netdev = adapter->netdev; |
230 | 232 | ||
231 | cmd_buf_arr = (struct netxen_cmd_buffer *)vmalloc(TX_RINGSIZE); | 233 | cmd_buf_arr = |
234 | (struct netxen_cmd_buffer *)vmalloc(TX_BUFF_RINGSIZE(adapter)); | ||
232 | if (cmd_buf_arr == NULL) { | 235 | if (cmd_buf_arr == NULL) { |
233 | printk(KERN_ERR "%s: Failed to allocate cmd buffer ring\n", | 236 | printk(KERN_ERR "%s: Failed to allocate cmd buffer ring\n", |
234 | netdev->name); | 237 | netdev->name); |
235 | return -ENOMEM; | 238 | return -ENOMEM; |
236 | } | 239 | } |
237 | memset(cmd_buf_arr, 0, TX_RINGSIZE); | 240 | memset(cmd_buf_arr, 0, TX_BUFF_RINGSIZE(adapter)); |
238 | adapter->cmd_buf_arr = cmd_buf_arr; | 241 | adapter->cmd_buf_arr = cmd_buf_arr; |
239 | 242 | ||
240 | recv_ctx = &adapter->recv_ctx; | 243 | recv_ctx = &adapter->recv_ctx; |
@@ -275,7 +278,7 @@ int netxen_alloc_sw_resources(struct netxen_adapter *adapter) | |||
275 | 278 | ||
276 | } | 279 | } |
277 | rds_ring->rx_buf_arr = (struct netxen_rx_buffer *) | 280 | rds_ring->rx_buf_arr = (struct netxen_rx_buffer *) |
278 | vmalloc(RCV_BUFFSIZE); | 281 | vmalloc(RCV_BUFF_RINGSIZE(rds_ring)); |
279 | if (rds_ring->rx_buf_arr == NULL) { | 282 | if (rds_ring->rx_buf_arr == NULL) { |
280 | printk(KERN_ERR "%s: Failed to allocate " | 283 | printk(KERN_ERR "%s: Failed to allocate " |
281 | "rx buffer ring %d\n", | 284 | "rx buffer ring %d\n", |
@@ -283,7 +286,7 @@ int netxen_alloc_sw_resources(struct netxen_adapter *adapter) | |||
283 | /* free whatever was already allocated */ | 286 | /* free whatever was already allocated */ |
284 | goto err_out; | 287 | goto err_out; |
285 | } | 288 | } |
286 | memset(rds_ring->rx_buf_arr, 0, RCV_BUFFSIZE); | 289 | memset(rds_ring->rx_buf_arr, 0, RCV_BUFF_RINGSIZE(rds_ring)); |
287 | INIT_LIST_HEAD(&rds_ring->free_list); | 290 | INIT_LIST_HEAD(&rds_ring->free_list); |
288 | /* | 291 | /* |
289 | * Now go through all of them, set reference handles | 292 | * Now go through all of them, set reference handles |
@@ -298,6 +301,19 @@ int netxen_alloc_sw_resources(struct netxen_adapter *adapter) | |||
298 | rx_buf->state = NETXEN_BUFFER_FREE; | 301 | rx_buf->state = NETXEN_BUFFER_FREE; |
299 | rx_buf++; | 302 | rx_buf++; |
300 | } | 303 | } |
304 | spin_lock_init(&rds_ring->lock); | ||
305 | } | ||
306 | |||
307 | for (ring = 0; ring < adapter->max_sds_rings; ring++) { | ||
308 | sds_ring = &recv_ctx->sds_rings[ring]; | ||
309 | sds_ring->irq = adapter->msix_entries[ring].vector; | ||
310 | sds_ring->clean_tx = (ring == 0); | ||
311 | sds_ring->post_rxd = (ring == 0); | ||
312 | sds_ring->adapter = adapter; | ||
313 | sds_ring->num_desc = adapter->num_rxd; | ||
314 | |||
315 | for (i = 0; i < NUM_RCV_DESC_RINGS; i++) | ||
316 | INIT_LIST_HEAD(&sds_ring->free_list[i]); | ||
301 | } | 317 | } |
302 | 318 | ||
303 | return 0; | 319 | return 0; |
@@ -793,6 +809,40 @@ int netxen_receive_peg_ready(struct netxen_adapter *adapter) | |||
793 | return 0; | 809 | return 0; |
794 | } | 810 | } |
795 | 811 | ||
812 | static int | ||
813 | netxen_alloc_rx_skb(struct netxen_adapter *adapter, | ||
814 | struct nx_host_rds_ring *rds_ring, | ||
815 | struct netxen_rx_buffer *buffer) | ||
816 | { | ||
817 | struct sk_buff *skb; | ||
818 | dma_addr_t dma; | ||
819 | struct pci_dev *pdev = adapter->pdev; | ||
820 | |||
821 | buffer->skb = dev_alloc_skb(rds_ring->skb_size); | ||
822 | if (!buffer->skb) | ||
823 | return 1; | ||
824 | |||
825 | skb = buffer->skb; | ||
826 | |||
827 | if (!adapter->ahw.cut_through) | ||
828 | skb_reserve(skb, 2); | ||
829 | |||
830 | dma = pci_map_single(pdev, skb->data, | ||
831 | rds_ring->dma_size, PCI_DMA_FROMDEVICE); | ||
832 | |||
833 | if (pci_dma_mapping_error(pdev, dma)) { | ||
834 | dev_kfree_skb_any(skb); | ||
835 | buffer->skb = NULL; | ||
836 | return 1; | ||
837 | } | ||
838 | |||
839 | buffer->skb = skb; | ||
840 | buffer->dma = dma; | ||
841 | buffer->state = NETXEN_BUFFER_BUSY; | ||
842 | |||
843 | return 0; | ||
844 | } | ||
845 | |||
796 | static struct sk_buff *netxen_process_rxbuf(struct netxen_adapter *adapter, | 846 | static struct sk_buff *netxen_process_rxbuf(struct netxen_adapter *adapter, |
797 | struct nx_host_rds_ring *rds_ring, u16 index, u16 cksum) | 847 | struct nx_host_rds_ring *rds_ring, u16 index, u16 cksum) |
798 | { | 848 | { |
@@ -817,14 +867,12 @@ static struct sk_buff *netxen_process_rxbuf(struct netxen_adapter *adapter, | |||
817 | skb->dev = adapter->netdev; | 867 | skb->dev = adapter->netdev; |
818 | 868 | ||
819 | buffer->skb = NULL; | 869 | buffer->skb = NULL; |
820 | |||
821 | no_skb: | 870 | no_skb: |
822 | buffer->state = NETXEN_BUFFER_FREE; | 871 | buffer->state = NETXEN_BUFFER_FREE; |
823 | list_add_tail(&buffer->list, &rds_ring->free_list); | ||
824 | return skb; | 872 | return skb; |
825 | } | 873 | } |
826 | 874 | ||
827 | static void | 875 | static struct netxen_rx_buffer * |
828 | netxen_process_rcv(struct netxen_adapter *adapter, | 876 | netxen_process_rcv(struct netxen_adapter *adapter, |
829 | int ring, int index, int length, int cksum, int pkt_offset) | 877 | int ring, int index, int length, int cksum, int pkt_offset) |
830 | { | 878 | { |
@@ -835,13 +883,13 @@ netxen_process_rcv(struct netxen_adapter *adapter, | |||
835 | struct nx_host_rds_ring *rds_ring = &recv_ctx->rds_rings[ring]; | 883 | struct nx_host_rds_ring *rds_ring = &recv_ctx->rds_rings[ring]; |
836 | 884 | ||
837 | if (unlikely(index > rds_ring->num_desc)) | 885 | if (unlikely(index > rds_ring->num_desc)) |
838 | return; | 886 | return NULL; |
839 | 887 | ||
840 | buffer = &rds_ring->rx_buf_arr[index]; | 888 | buffer = &rds_ring->rx_buf_arr[index]; |
841 | 889 | ||
842 | skb = netxen_process_rxbuf(adapter, rds_ring, index, cksum); | 890 | skb = netxen_process_rxbuf(adapter, rds_ring, index, cksum); |
843 | if (!skb) | 891 | if (!skb) |
844 | return; | 892 | return buffer; |
845 | 893 | ||
846 | if (length > rds_ring->skb_size) | 894 | if (length > rds_ring->skb_size) |
847 | skb_put(skb, rds_ring->skb_size); | 895 | skb_put(skb, rds_ring->skb_size); |
@@ -858,21 +906,31 @@ netxen_process_rcv(struct netxen_adapter *adapter, | |||
858 | 906 | ||
859 | adapter->stats.no_rcv++; | 907 | adapter->stats.no_rcv++; |
860 | adapter->stats.rxbytes += length; | 908 | adapter->stats.rxbytes += length; |
909 | |||
910 | return buffer; | ||
861 | } | 911 | } |
862 | 912 | ||
913 | #define netxen_merge_rx_buffers(list, head) \ | ||
914 | do { list_splice_tail_init(list, head); } while (0); | ||
915 | |||
863 | int | 916 | int |
864 | netxen_process_rcv_ring(struct netxen_adapter *adapter, int max) | 917 | netxen_process_rcv_ring(struct nx_host_sds_ring *sds_ring, int max) |
865 | { | 918 | { |
866 | struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; | 919 | struct netxen_adapter *adapter = sds_ring->adapter; |
867 | struct status_desc *desc_head = recv_ctx->rcv_status_desc_head; | 920 | |
921 | struct list_head *cur; | ||
922 | |||
868 | struct status_desc *desc; | 923 | struct status_desc *desc; |
869 | u32 consumer = recv_ctx->status_rx_consumer; | 924 | struct netxen_rx_buffer *rxbuf; |
925 | |||
926 | u32 consumer = sds_ring->consumer; | ||
927 | |||
870 | int count = 0; | 928 | int count = 0; |
871 | u64 sts_data; | 929 | u64 sts_data; |
872 | int opcode, ring, index, length, cksum, pkt_offset; | 930 | int opcode, ring, index, length, cksum, pkt_offset; |
873 | 931 | ||
874 | while (count < max) { | 932 | while (count < max) { |
875 | desc = &desc_head[consumer]; | 933 | desc = &sds_ring->desc_head[consumer]; |
876 | sts_data = le64_to_cpu(desc->status_desc_data); | 934 | sts_data = le64_to_cpu(desc->status_desc_data); |
877 | 935 | ||
878 | if (!(sts_data & STATUS_OWNER_HOST)) | 936 | if (!(sts_data & STATUS_OWNER_HOST)) |
@@ -889,22 +947,41 @@ netxen_process_rcv_ring(struct netxen_adapter *adapter, int max) | |||
889 | cksum = netxen_get_sts_status(sts_data); | 947 | cksum = netxen_get_sts_status(sts_data); |
890 | pkt_offset = netxen_get_sts_pkt_offset(sts_data); | 948 | pkt_offset = netxen_get_sts_pkt_offset(sts_data); |
891 | 949 | ||
892 | netxen_process_rcv(adapter, ring, index, | 950 | rxbuf = netxen_process_rcv(adapter, ring, index, |
893 | length, cksum, pkt_offset); | 951 | length, cksum, pkt_offset); |
894 | 952 | ||
953 | if (rxbuf) | ||
954 | list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]); | ||
955 | |||
895 | desc->status_desc_data = cpu_to_le64(STATUS_OWNER_PHANTOM); | 956 | desc->status_desc_data = cpu_to_le64(STATUS_OWNER_PHANTOM); |
896 | 957 | ||
897 | consumer = get_next_index(consumer, adapter->num_rxd); | 958 | consumer = get_next_index(consumer, sds_ring->num_desc); |
898 | count++; | 959 | count++; |
899 | } | 960 | } |
900 | 961 | ||
901 | for (ring = 0; ring < adapter->max_rds_rings; ring++) | 962 | for (ring = 0; ring < adapter->max_rds_rings; ring++) { |
902 | netxen_post_rx_buffers_nodb(adapter, ring); | 963 | struct nx_host_rds_ring *rds_ring = |
964 | &adapter->recv_ctx.rds_rings[ring]; | ||
965 | |||
966 | if (!list_empty(&sds_ring->free_list[ring])) { | ||
967 | list_for_each(cur, &sds_ring->free_list[ring]) { | ||
968 | rxbuf = list_entry(cur, | ||
969 | struct netxen_rx_buffer, list); | ||
970 | netxen_alloc_rx_skb(adapter, rds_ring, rxbuf); | ||
971 | } | ||
972 | spin_lock(&rds_ring->lock); | ||
973 | netxen_merge_rx_buffers(&sds_ring->free_list[ring], | ||
974 | &rds_ring->free_list); | ||
975 | spin_unlock(&rds_ring->lock); | ||
976 | } | ||
977 | |||
978 | netxen_post_rx_buffers_nodb(adapter, rds_ring); | ||
979 | } | ||
903 | 980 | ||
904 | if (count) { | 981 | if (count) { |
905 | recv_ctx->status_rx_consumer = consumer; | 982 | sds_ring->consumer = consumer; |
906 | adapter->pci_write_normalize(adapter, | 983 | adapter->pci_write_normalize(adapter, |
907 | recv_ctx->crb_sts_consumer, consumer); | 984 | sds_ring->crb_sts_consumer, consumer); |
908 | } | 985 | } |
909 | 986 | ||
910 | return count; | 987 | return count; |
@@ -921,6 +998,9 @@ int netxen_process_cmd_ring(struct netxen_adapter *adapter) | |||
921 | struct netxen_skb_frag *frag; | 998 | struct netxen_skb_frag *frag; |
922 | int done = 0; | 999 | int done = 0; |
923 | 1000 | ||
1001 | if (!spin_trylock(&adapter->tx_clean_lock)) | ||
1002 | return 1; | ||
1003 | |||
924 | last_consumer = adapter->last_cmd_consumer; | 1004 | last_consumer = adapter->last_cmd_consumer; |
925 | barrier(); /* cmd_consumer can change underneath */ | 1005 | barrier(); /* cmd_consumer can change underneath */ |
926 | consumer = le32_to_cpu(*(adapter->cmd_consumer)); | 1006 | consumer = le32_to_cpu(*(adapter->cmd_consumer)); |
@@ -976,63 +1056,46 @@ int netxen_process_cmd_ring(struct netxen_adapter *adapter) | |||
976 | barrier(); /* cmd_consumer can change underneath */ | 1056 | barrier(); /* cmd_consumer can change underneath */ |
977 | consumer = le32_to_cpu(*(adapter->cmd_consumer)); | 1057 | consumer = le32_to_cpu(*(adapter->cmd_consumer)); |
978 | done = (last_consumer == consumer); | 1058 | done = (last_consumer == consumer); |
1059 | spin_unlock(&adapter->tx_clean_lock); | ||
979 | 1060 | ||
980 | return (done); | 1061 | return (done); |
981 | } | 1062 | } |
982 | 1063 | ||
983 | void | 1064 | void |
984 | netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ringid) | 1065 | netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ringid, |
1066 | struct nx_host_rds_ring *rds_ring) | ||
985 | { | 1067 | { |
986 | struct pci_dev *pdev = adapter->pdev; | ||
987 | struct sk_buff *skb; | ||
988 | struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; | ||
989 | struct nx_host_rds_ring *rds_ring = NULL; | ||
990 | uint producer; | ||
991 | struct rcv_desc *pdesc; | 1068 | struct rcv_desc *pdesc; |
992 | struct netxen_rx_buffer *buffer; | 1069 | struct netxen_rx_buffer *buffer; |
993 | int count = 0; | 1070 | int producer, count = 0; |
994 | netxen_ctx_msg msg = 0; | 1071 | netxen_ctx_msg msg = 0; |
995 | dma_addr_t dma; | ||
996 | struct list_head *head; | 1072 | struct list_head *head; |
997 | 1073 | ||
998 | rds_ring = &recv_ctx->rds_rings[ringid]; | ||
999 | |||
1000 | producer = rds_ring->producer; | 1074 | producer = rds_ring->producer; |
1001 | head = &rds_ring->free_list; | ||
1002 | 1075 | ||
1076 | spin_lock(&rds_ring->lock); | ||
1077 | head = &rds_ring->free_list; | ||
1003 | while (!list_empty(head)) { | 1078 | while (!list_empty(head)) { |
1004 | 1079 | ||
1005 | skb = dev_alloc_skb(rds_ring->skb_size); | 1080 | buffer = list_entry(head->next, struct netxen_rx_buffer, list); |
1006 | if (unlikely(!skb)) { | ||
1007 | break; | ||
1008 | } | ||
1009 | |||
1010 | if (!adapter->ahw.cut_through) | ||
1011 | skb_reserve(skb, 2); | ||
1012 | 1081 | ||
1013 | dma = pci_map_single(pdev, skb->data, | 1082 | if (!buffer->skb) { |
1014 | rds_ring->dma_size, PCI_DMA_FROMDEVICE); | 1083 | if (netxen_alloc_rx_skb(adapter, rds_ring, buffer)) |
1015 | if (pci_dma_mapping_error(pdev, dma)) { | 1084 | break; |
1016 | dev_kfree_skb_any(skb); | ||
1017 | break; | ||
1018 | } | 1085 | } |
1019 | 1086 | ||
1020 | count++; | 1087 | count++; |
1021 | buffer = list_entry(head->next, struct netxen_rx_buffer, list); | ||
1022 | list_del(&buffer->list); | 1088 | list_del(&buffer->list); |
1023 | 1089 | ||
1024 | buffer->skb = skb; | ||
1025 | buffer->state = NETXEN_BUFFER_BUSY; | ||
1026 | buffer->dma = dma; | ||
1027 | |||
1028 | /* make a rcv descriptor */ | 1090 | /* make a rcv descriptor */ |
1029 | pdesc = &rds_ring->desc_head[producer]; | 1091 | pdesc = &rds_ring->desc_head[producer]; |
1030 | pdesc->addr_buffer = cpu_to_le64(dma); | 1092 | pdesc->addr_buffer = cpu_to_le64(buffer->dma); |
1031 | pdesc->reference_handle = cpu_to_le16(buffer->ref_handle); | 1093 | pdesc->reference_handle = cpu_to_le16(buffer->ref_handle); |
1032 | pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size); | 1094 | pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size); |
1033 | 1095 | ||
1034 | producer = get_next_index(producer, rds_ring->num_desc); | 1096 | producer = get_next_index(producer, rds_ring->num_desc); |
1035 | } | 1097 | } |
1098 | spin_unlock(&rds_ring->lock); | ||
1036 | 1099 | ||
1037 | if (count) { | 1100 | if (count) { |
1038 | rds_ring->producer = producer; | 1101 | rds_ring->producer = producer; |
@@ -1061,48 +1124,31 @@ netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ringid) | |||
1061 | } | 1124 | } |
1062 | 1125 | ||
1063 | static void | 1126 | static void |
1064 | netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter, uint32_t ringid) | 1127 | netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter, |
1128 | struct nx_host_rds_ring *rds_ring) | ||
1065 | { | 1129 | { |
1066 | struct pci_dev *pdev = adapter->pdev; | ||
1067 | struct sk_buff *skb; | ||
1068 | struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; | ||
1069 | struct nx_host_rds_ring *rds_ring = NULL; | ||
1070 | u32 producer; | ||
1071 | struct rcv_desc *pdesc; | 1130 | struct rcv_desc *pdesc; |
1072 | struct netxen_rx_buffer *buffer; | 1131 | struct netxen_rx_buffer *buffer; |
1073 | int count = 0; | 1132 | int producer, count = 0; |
1074 | struct list_head *head; | 1133 | struct list_head *head; |
1075 | dma_addr_t dma; | ||
1076 | |||
1077 | rds_ring = &recv_ctx->rds_rings[ringid]; | ||
1078 | 1134 | ||
1079 | producer = rds_ring->producer; | 1135 | producer = rds_ring->producer; |
1136 | if (!spin_trylock(&rds_ring->lock)) | ||
1137 | return; | ||
1138 | |||
1080 | head = &rds_ring->free_list; | 1139 | head = &rds_ring->free_list; |
1081 | while (!list_empty(head)) { | 1140 | while (!list_empty(head)) { |
1082 | 1141 | ||
1083 | skb = dev_alloc_skb(rds_ring->skb_size); | 1142 | buffer = list_entry(head->next, struct netxen_rx_buffer, list); |
1084 | if (unlikely(!skb)) { | ||
1085 | break; | ||
1086 | } | ||
1087 | |||
1088 | if (!adapter->ahw.cut_through) | ||
1089 | skb_reserve(skb, 2); | ||
1090 | 1143 | ||
1091 | dma = pci_map_single(pdev, skb->data, | 1144 | if (!buffer->skb) { |
1092 | rds_ring->dma_size, PCI_DMA_FROMDEVICE); | 1145 | if (netxen_alloc_rx_skb(adapter, rds_ring, buffer)) |
1093 | if (pci_dma_mapping_error(pdev, dma)) { | 1146 | break; |
1094 | dev_kfree_skb_any(skb); | ||
1095 | break; | ||
1096 | } | 1147 | } |
1097 | 1148 | ||
1098 | count++; | 1149 | count++; |
1099 | buffer = list_entry(head->next, struct netxen_rx_buffer, list); | ||
1100 | list_del(&buffer->list); | 1150 | list_del(&buffer->list); |
1101 | 1151 | ||
1102 | buffer->skb = skb; | ||
1103 | buffer->state = NETXEN_BUFFER_BUSY; | ||
1104 | buffer->dma = dma; | ||
1105 | |||
1106 | /* make a rcv descriptor */ | 1152 | /* make a rcv descriptor */ |
1107 | pdesc = &rds_ring->desc_head[producer]; | 1153 | pdesc = &rds_ring->desc_head[producer]; |
1108 | pdesc->reference_handle = cpu_to_le16(buffer->ref_handle); | 1154 | pdesc->reference_handle = cpu_to_le16(buffer->ref_handle); |
@@ -1119,6 +1165,7 @@ netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter, uint32_t ringid) | |||
1119 | (producer - 1) & (rds_ring->num_desc - 1)); | 1165 | (producer - 1) & (rds_ring->num_desc - 1)); |
1120 | wmb(); | 1166 | wmb(); |
1121 | } | 1167 | } |
1168 | spin_unlock(&rds_ring->lock); | ||
1122 | } | 1169 | } |
1123 | 1170 | ||
1124 | void netxen_nic_clear_stats(struct netxen_adapter *adapter) | 1171 | void netxen_nic_clear_stats(struct netxen_adapter *adapter) |