diff options
author | Dhananjay Phadke <dhananjay@netxen.com> | 2009-03-13 10:52:03 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-03-14 17:00:32 -0400 |
commit | 438627c77b877e445a4b918a50ff910a5ea2a12d (patch) | |
tree | 254abcb0c57f6c57947428f9c8ec38b6904801fc /drivers/net/netxen/netxen_nic_init.c | |
parent | 0b72e659a10ec50acbef90756bf04177b66c8266 (diff) |
netxen: sanitize variable names
o remove max_ prefix from ring sizes, since they don't really
represent max possible sizes.
o cleanup naming of rx ring types (normal, jumbo, lro).
o simplify logic to choose rx ring size, gig ports get half
rx ring of 10 gig ports.
Signed-off-by: Dhananjay Phadke <dhananjay@netxen.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/netxen/netxen_nic_init.c')
-rw-r--r-- | drivers/net/netxen/netxen_nic_init.c | 54 |
1 files changed, 22 insertions, 32 deletions
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c index 120b480c1e82..d722589b1ce9 100644 --- a/drivers/net/netxen/netxen_nic_init.c +++ b/drivers/net/netxen/netxen_nic_init.c | |||
@@ -153,7 +153,7 @@ void netxen_release_rx_buffers(struct netxen_adapter *adapter) | |||
153 | recv_ctx = &adapter->recv_ctx; | 153 | recv_ctx = &adapter->recv_ctx; |
154 | for (ring = 0; ring < adapter->max_rds_rings; ring++) { | 154 | for (ring = 0; ring < adapter->max_rds_rings; ring++) { |
155 | rds_ring = &recv_ctx->rds_rings[ring]; | 155 | rds_ring = &recv_ctx->rds_rings[ring]; |
156 | for (i = 0; i < rds_ring->max_rx_desc_count; ++i) { | 156 | for (i = 0; i < rds_ring->num_desc; ++i) { |
157 | rx_buf = &(rds_ring->rx_buf_arr[i]); | 157 | rx_buf = &(rds_ring->rx_buf_arr[i]); |
158 | if (rx_buf->state == NETXEN_BUFFER_FREE) | 158 | if (rx_buf->state == NETXEN_BUFFER_FREE) |
159 | continue; | 159 | continue; |
@@ -174,7 +174,7 @@ void netxen_release_tx_buffers(struct netxen_adapter *adapter) | |||
174 | int i, j; | 174 | int i, j; |
175 | 175 | ||
176 | cmd_buf = adapter->cmd_buf_arr; | 176 | cmd_buf = adapter->cmd_buf_arr; |
177 | for (i = 0; i < adapter->max_tx_desc_count; i++) { | 177 | for (i = 0; i < adapter->num_txd; i++) { |
178 | buffrag = cmd_buf->frag_array; | 178 | buffrag = cmd_buf->frag_array; |
179 | if (buffrag->dma) { | 179 | if (buffrag->dma) { |
180 | pci_unmap_single(adapter->pdev, buffrag->dma, | 180 | pci_unmap_single(adapter->pdev, buffrag->dma, |
@@ -190,7 +190,6 @@ void netxen_release_tx_buffers(struct netxen_adapter *adapter) | |||
190 | buffrag->dma = 0ULL; | 190 | buffrag->dma = 0ULL; |
191 | } | 191 | } |
192 | } | 192 | } |
193 | /* Free the skb we received in netxen_nic_xmit_frame */ | ||
194 | if (cmd_buf->skb) { | 193 | if (cmd_buf->skb) { |
195 | dev_kfree_skb_any(cmd_buf->skb); | 194 | dev_kfree_skb_any(cmd_buf->skb); |
196 | cmd_buf->skb = NULL; | 195 | cmd_buf->skb = NULL; |
@@ -241,11 +240,9 @@ int netxen_alloc_sw_resources(struct netxen_adapter *adapter) | |||
241 | recv_ctx = &adapter->recv_ctx; | 240 | recv_ctx = &adapter->recv_ctx; |
242 | for (ring = 0; ring < adapter->max_rds_rings; ring++) { | 241 | for (ring = 0; ring < adapter->max_rds_rings; ring++) { |
243 | rds_ring = &recv_ctx->rds_rings[ring]; | 242 | rds_ring = &recv_ctx->rds_rings[ring]; |
244 | switch (RCV_DESC_TYPE(ring)) { | 243 | switch (ring) { |
245 | case RCV_DESC_NORMAL: | 244 | case RCV_RING_NORMAL: |
246 | rds_ring->max_rx_desc_count = | 245 | rds_ring->num_desc = adapter->num_rxd; |
247 | adapter->max_rx_desc_count; | ||
248 | rds_ring->flags = RCV_DESC_NORMAL; | ||
249 | if (adapter->ahw.cut_through) { | 246 | if (adapter->ahw.cut_through) { |
250 | rds_ring->dma_size = | 247 | rds_ring->dma_size = |
251 | NX_CT_DEFAULT_RX_BUF_LEN; | 248 | NX_CT_DEFAULT_RX_BUF_LEN; |
@@ -258,10 +255,8 @@ int netxen_alloc_sw_resources(struct netxen_adapter *adapter) | |||
258 | } | 255 | } |
259 | break; | 256 | break; |
260 | 257 | ||
261 | case RCV_DESC_JUMBO: | 258 | case RCV_RING_JUMBO: |
262 | rds_ring->max_rx_desc_count = | 259 | rds_ring->num_desc = adapter->num_jumbo_rxd; |
263 | adapter->max_jumbo_rx_desc_count; | ||
264 | rds_ring->flags = RCV_DESC_JUMBO; | ||
265 | if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) | 260 | if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) |
266 | rds_ring->dma_size = | 261 | rds_ring->dma_size = |
267 | NX_P3_RX_JUMBO_BUF_MAX_LEN; | 262 | NX_P3_RX_JUMBO_BUF_MAX_LEN; |
@@ -273,9 +268,7 @@ int netxen_alloc_sw_resources(struct netxen_adapter *adapter) | |||
273 | break; | 268 | break; |
274 | 269 | ||
275 | case RCV_RING_LRO: | 270 | case RCV_RING_LRO: |
276 | rds_ring->max_rx_desc_count = | 271 | rds_ring->num_desc = adapter->num_lro_rxd; |
277 | adapter->max_lro_rx_desc_count; | ||
278 | rds_ring->flags = RCV_DESC_LRO; | ||
279 | rds_ring->dma_size = RX_LRO_DMA_MAP_LEN; | 272 | rds_ring->dma_size = RX_LRO_DMA_MAP_LEN; |
280 | rds_ring->skb_size = MAX_RX_LRO_BUFFER_LENGTH; | 273 | rds_ring->skb_size = MAX_RX_LRO_BUFFER_LENGTH; |
281 | break; | 274 | break; |
@@ -296,7 +289,7 @@ int netxen_alloc_sw_resources(struct netxen_adapter *adapter) | |||
296 | * Now go through all of them, set reference handles | 289 | * Now go through all of them, set reference handles |
297 | * and put them in the queues. | 290 | * and put them in the queues. |
298 | */ | 291 | */ |
299 | num_rx_bufs = rds_ring->max_rx_desc_count; | 292 | num_rx_bufs = rds_ring->num_desc; |
300 | rx_buf = rds_ring->rx_buf_arr; | 293 | rx_buf = rds_ring->rx_buf_arr; |
301 | for (i = 0; i < num_rx_bufs; i++) { | 294 | for (i = 0; i < num_rx_bufs; i++) { |
302 | list_add_tail(&rx_buf->list, | 295 | list_add_tail(&rx_buf->list, |
@@ -848,16 +841,15 @@ static void netxen_process_rcv(struct netxen_adapter *adapter, | |||
848 | struct nx_host_rds_ring *rds_ring; | 841 | struct nx_host_rds_ring *rds_ring; |
849 | 842 | ||
850 | desc_ctx = netxen_get_sts_type(sts_data); | 843 | desc_ctx = netxen_get_sts_type(sts_data); |
851 | if (unlikely(desc_ctx >= NUM_RCV_DESC_RINGS)) { | 844 | if (unlikely(desc_ctx >= adapter->max_rds_rings)) |
852 | return; | 845 | return; |
853 | } | ||
854 | 846 | ||
855 | rds_ring = &recv_ctx->rds_rings[desc_ctx]; | 847 | rds_ring = &recv_ctx->rds_rings[desc_ctx]; |
856 | if (unlikely(index > rds_ring->max_rx_desc_count)) { | 848 | if (unlikely(index > rds_ring->num_desc)) |
857 | return; | 849 | return; |
858 | } | 850 | |
859 | buffer = &rds_ring->rx_buf_arr[index]; | 851 | buffer = &rds_ring->rx_buf_arr[index]; |
860 | if (desc_ctx == RCV_DESC_LRO_CTXID) { | 852 | if (desc_ctx == RCV_RING_LRO) { |
861 | buffer->lro_current_frags++; | 853 | buffer->lro_current_frags++; |
862 | if (netxen_get_sts_desc_lro_last_frag(desc)) { | 854 | if (netxen_get_sts_desc_lro_last_frag(desc)) { |
863 | buffer->lro_expected_frags = | 855 | buffer->lro_expected_frags = |
@@ -875,7 +867,7 @@ static void netxen_process_rcv(struct netxen_adapter *adapter, | |||
875 | if (!skb) | 867 | if (!skb) |
876 | return; | 868 | return; |
877 | 869 | ||
878 | if (desc_ctx == RCV_DESC_LRO_CTXID) { | 870 | if (desc_ctx == RCV_RING_LRO) { |
879 | /* True length was only available on the last pkt */ | 871 | /* True length was only available on the last pkt */ |
880 | skb_put(skb, buffer->lro_length); | 872 | skb_put(skb, buffer->lro_length); |
881 | } else { | 873 | } else { |
@@ -921,8 +913,7 @@ netxen_process_rcv_ring(struct netxen_adapter *adapter, int max) | |||
921 | 913 | ||
922 | desc->status_desc_data = cpu_to_le64(STATUS_OWNER_PHANTOM); | 914 | desc->status_desc_data = cpu_to_le64(STATUS_OWNER_PHANTOM); |
923 | 915 | ||
924 | consumer = get_next_index(consumer, | 916 | consumer = get_next_index(consumer, adapter->num_rxd); |
925 | adapter->max_rx_desc_count); | ||
926 | count++; | 917 | count++; |
927 | } | 918 | } |
928 | 919 | ||
@@ -973,7 +964,7 @@ int netxen_process_cmd_ring(struct netxen_adapter *adapter) | |||
973 | } | 964 | } |
974 | 965 | ||
975 | last_consumer = get_next_index(last_consumer, | 966 | last_consumer = get_next_index(last_consumer, |
976 | adapter->max_tx_desc_count); | 967 | adapter->num_txd); |
977 | if (++count >= MAX_STATUS_HANDLE) | 968 | if (++count >= MAX_STATUS_HANDLE) |
978 | break; | 969 | break; |
979 | } | 970 | } |
@@ -1060,7 +1051,7 @@ netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ringid) | |||
1060 | pdesc->reference_handle = cpu_to_le16(buffer->ref_handle); | 1051 | pdesc->reference_handle = cpu_to_le16(buffer->ref_handle); |
1061 | pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size); | 1052 | pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size); |
1062 | 1053 | ||
1063 | producer = get_next_index(producer, rds_ring->max_rx_desc_count); | 1054 | producer = get_next_index(producer, rds_ring->num_desc); |
1064 | } | 1055 | } |
1065 | /* if we did allocate buffers, then write the count to Phantom */ | 1056 | /* if we did allocate buffers, then write the count to Phantom */ |
1066 | if (count) { | 1057 | if (count) { |
@@ -1068,7 +1059,7 @@ netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ringid) | |||
1068 | /* Window = 1 */ | 1059 | /* Window = 1 */ |
1069 | adapter->pci_write_normalize(adapter, | 1060 | adapter->pci_write_normalize(adapter, |
1070 | rds_ring->crb_rcv_producer, | 1061 | rds_ring->crb_rcv_producer, |
1071 | (producer-1) & (rds_ring->max_rx_desc_count-1)); | 1062 | (producer-1) & (rds_ring->num_desc-1)); |
1072 | 1063 | ||
1073 | if (adapter->fw_major < 4) { | 1064 | if (adapter->fw_major < 4) { |
1074 | /* | 1065 | /* |
@@ -1079,9 +1070,8 @@ netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ringid) | |||
1079 | netxen_set_msg_peg_id(msg, NETXEN_RCV_PEG_DB_ID); | 1070 | netxen_set_msg_peg_id(msg, NETXEN_RCV_PEG_DB_ID); |
1080 | netxen_set_msg_privid(msg); | 1071 | netxen_set_msg_privid(msg); |
1081 | netxen_set_msg_count(msg, | 1072 | netxen_set_msg_count(msg, |
1082 | ((producer - | 1073 | ((producer - 1) & |
1083 | 1) & (rds_ring-> | 1074 | (rds_ring->num_desc - 1))); |
1084 | max_rx_desc_count - 1))); | ||
1085 | netxen_set_msg_ctxid(msg, adapter->portnum); | 1075 | netxen_set_msg_ctxid(msg, adapter->portnum); |
1086 | netxen_set_msg_opcode(msg, NETXEN_RCV_PRODUCER(ringid)); | 1076 | netxen_set_msg_opcode(msg, NETXEN_RCV_PRODUCER(ringid)); |
1087 | writel(msg, | 1077 | writel(msg, |
@@ -1141,7 +1131,7 @@ netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter, uint32_t ringid) | |||
1141 | pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size); | 1131 | pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size); |
1142 | pdesc->addr_buffer = cpu_to_le64(buffer->dma); | 1132 | pdesc->addr_buffer = cpu_to_le64(buffer->dma); |
1143 | 1133 | ||
1144 | producer = get_next_index(producer, rds_ring->max_rx_desc_count); | 1134 | producer = get_next_index(producer, rds_ring->num_desc); |
1145 | } | 1135 | } |
1146 | 1136 | ||
1147 | /* if we did allocate buffers, then write the count to Phantom */ | 1137 | /* if we did allocate buffers, then write the count to Phantom */ |
@@ -1150,7 +1140,7 @@ netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter, uint32_t ringid) | |||
1150 | /* Window = 1 */ | 1140 | /* Window = 1 */ |
1151 | adapter->pci_write_normalize(adapter, | 1141 | adapter->pci_write_normalize(adapter, |
1152 | rds_ring->crb_rcv_producer, | 1142 | rds_ring->crb_rcv_producer, |
1153 | (producer-1) & (rds_ring->max_rx_desc_count-1)); | 1143 | (producer - 1) & (rds_ring->num_desc - 1)); |
1154 | wmb(); | 1144 | wmb(); |
1155 | } | 1145 | } |
1156 | } | 1146 | } |