diff options
Diffstat (limited to 'drivers/net/netxen/netxen_nic_init.c')
-rw-r--r-- | drivers/net/netxen/netxen_nic_init.c | 154 |
1 files changed, 79 insertions, 75 deletions
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c index 7c3fbc4a5723..d222436bd5bd 100644 --- a/drivers/net/netxen/netxen_nic_init.c +++ b/drivers/net/netxen/netxen_nic_init.c | |||
@@ -158,21 +158,21 @@ int netxen_init_firmware(struct netxen_adapter *adapter) | |||
158 | void netxen_release_rx_buffers(struct netxen_adapter *adapter) | 158 | void netxen_release_rx_buffers(struct netxen_adapter *adapter) |
159 | { | 159 | { |
160 | struct netxen_recv_context *recv_ctx; | 160 | struct netxen_recv_context *recv_ctx; |
161 | struct netxen_rcv_desc_ctx *rcv_desc; | 161 | struct nx_host_rds_ring *rds_ring; |
162 | struct netxen_rx_buffer *rx_buf; | 162 | struct netxen_rx_buffer *rx_buf; |
163 | int i, ctxid, ring; | 163 | int i, ctxid, ring; |
164 | 164 | ||
165 | for (ctxid = 0; ctxid < MAX_RCV_CTX; ++ctxid) { | 165 | for (ctxid = 0; ctxid < MAX_RCV_CTX; ++ctxid) { |
166 | recv_ctx = &adapter->recv_ctx[ctxid]; | 166 | recv_ctx = &adapter->recv_ctx[ctxid]; |
167 | for (ring = 0; ring < NUM_RCV_DESC_RINGS; ring++) { | 167 | for (ring = 0; ring < adapter->max_rds_rings; ring++) { |
168 | rcv_desc = &recv_ctx->rcv_desc[ring]; | 168 | rds_ring = &recv_ctx->rds_rings[ring]; |
169 | for (i = 0; i < rcv_desc->max_rx_desc_count; ++i) { | 169 | for (i = 0; i < rds_ring->max_rx_desc_count; ++i) { |
170 | rx_buf = &(rcv_desc->rx_buf_arr[i]); | 170 | rx_buf = &(rds_ring->rx_buf_arr[i]); |
171 | if (rx_buf->state == NETXEN_BUFFER_FREE) | 171 | if (rx_buf->state == NETXEN_BUFFER_FREE) |
172 | continue; | 172 | continue; |
173 | pci_unmap_single(adapter->pdev, | 173 | pci_unmap_single(adapter->pdev, |
174 | rx_buf->dma, | 174 | rx_buf->dma, |
175 | rcv_desc->dma_size, | 175 | rds_ring->dma_size, |
176 | PCI_DMA_FROMDEVICE); | 176 | PCI_DMA_FROMDEVICE); |
177 | if (rx_buf->skb != NULL) | 177 | if (rx_buf->skb != NULL) |
178 | dev_kfree_skb_any(rx_buf->skb); | 178 | dev_kfree_skb_any(rx_buf->skb); |
@@ -216,16 +216,16 @@ void netxen_release_tx_buffers(struct netxen_adapter *adapter) | |||
216 | void netxen_free_sw_resources(struct netxen_adapter *adapter) | 216 | void netxen_free_sw_resources(struct netxen_adapter *adapter) |
217 | { | 217 | { |
218 | struct netxen_recv_context *recv_ctx; | 218 | struct netxen_recv_context *recv_ctx; |
219 | struct netxen_rcv_desc_ctx *rcv_desc; | 219 | struct nx_host_rds_ring *rds_ring; |
220 | int ctx, ring; | 220 | int ctx, ring; |
221 | 221 | ||
222 | for (ctx = 0; ctx < MAX_RCV_CTX; ctx++) { | 222 | for (ctx = 0; ctx < MAX_RCV_CTX; ctx++) { |
223 | recv_ctx = &adapter->recv_ctx[ctx]; | 223 | recv_ctx = &adapter->recv_ctx[ctx]; |
224 | for (ring = 0; ring < NUM_RCV_DESC_RINGS; ring++) { | 224 | for (ring = 0; ring < adapter->max_rds_rings; ring++) { |
225 | rcv_desc = &recv_ctx->rcv_desc[ring]; | 225 | rds_ring = &recv_ctx->rds_rings[ring]; |
226 | if (rcv_desc->rx_buf_arr) { | 226 | if (rds_ring->rx_buf_arr) { |
227 | vfree(rcv_desc->rx_buf_arr); | 227 | vfree(rds_ring->rx_buf_arr); |
228 | rcv_desc->rx_buf_arr = NULL; | 228 | rds_ring->rx_buf_arr = NULL; |
229 | } | 229 | } |
230 | } | 230 | } |
231 | } | 231 | } |
@@ -237,7 +237,7 @@ void netxen_free_sw_resources(struct netxen_adapter *adapter) | |||
237 | int netxen_alloc_sw_resources(struct netxen_adapter *adapter) | 237 | int netxen_alloc_sw_resources(struct netxen_adapter *adapter) |
238 | { | 238 | { |
239 | struct netxen_recv_context *recv_ctx; | 239 | struct netxen_recv_context *recv_ctx; |
240 | struct netxen_rcv_desc_ctx *rcv_desc; | 240 | struct nx_host_rds_ring *rds_ring; |
241 | struct netxen_rx_buffer *rx_buf; | 241 | struct netxen_rx_buffer *rx_buf; |
242 | int ctx, ring, i, num_rx_bufs; | 242 | int ctx, ring, i, num_rx_bufs; |
243 | 243 | ||
@@ -255,52 +255,52 @@ int netxen_alloc_sw_resources(struct netxen_adapter *adapter) | |||
255 | 255 | ||
256 | for (ctx = 0; ctx < MAX_RCV_CTX; ctx++) { | 256 | for (ctx = 0; ctx < MAX_RCV_CTX; ctx++) { |
257 | recv_ctx = &adapter->recv_ctx[ctx]; | 257 | recv_ctx = &adapter->recv_ctx[ctx]; |
258 | for (ring = 0; ring < NUM_RCV_DESC_RINGS; ring++) { | 258 | for (ring = 0; ring < adapter->max_rds_rings; ring++) { |
259 | rcv_desc = &recv_ctx->rcv_desc[ring]; | 259 | rds_ring = &recv_ctx->rds_rings[ring]; |
260 | switch (RCV_DESC_TYPE(ring)) { | 260 | switch (RCV_DESC_TYPE(ring)) { |
261 | case RCV_DESC_NORMAL: | 261 | case RCV_DESC_NORMAL: |
262 | rcv_desc->max_rx_desc_count = | 262 | rds_ring->max_rx_desc_count = |
263 | adapter->max_rx_desc_count; | 263 | adapter->max_rx_desc_count; |
264 | rcv_desc->flags = RCV_DESC_NORMAL; | 264 | rds_ring->flags = RCV_DESC_NORMAL; |
265 | rcv_desc->dma_size = RX_DMA_MAP_LEN; | 265 | rds_ring->dma_size = RX_DMA_MAP_LEN; |
266 | rcv_desc->skb_size = MAX_RX_BUFFER_LENGTH; | 266 | rds_ring->skb_size = MAX_RX_BUFFER_LENGTH; |
267 | break; | 267 | break; |
268 | 268 | ||
269 | case RCV_DESC_JUMBO: | 269 | case RCV_DESC_JUMBO: |
270 | rcv_desc->max_rx_desc_count = | 270 | rds_ring->max_rx_desc_count = |
271 | adapter->max_jumbo_rx_desc_count; | 271 | adapter->max_jumbo_rx_desc_count; |
272 | rcv_desc->flags = RCV_DESC_JUMBO; | 272 | rds_ring->flags = RCV_DESC_JUMBO; |
273 | rcv_desc->dma_size = RX_JUMBO_DMA_MAP_LEN; | 273 | rds_ring->dma_size = RX_JUMBO_DMA_MAP_LEN; |
274 | rcv_desc->skb_size = | 274 | rds_ring->skb_size = |
275 | MAX_RX_JUMBO_BUFFER_LENGTH; | 275 | MAX_RX_JUMBO_BUFFER_LENGTH; |
276 | break; | 276 | break; |
277 | 277 | ||
278 | case RCV_RING_LRO: | 278 | case RCV_RING_LRO: |
279 | rcv_desc->max_rx_desc_count = | 279 | rds_ring->max_rx_desc_count = |
280 | adapter->max_lro_rx_desc_count; | 280 | adapter->max_lro_rx_desc_count; |
281 | rcv_desc->flags = RCV_DESC_LRO; | 281 | rds_ring->flags = RCV_DESC_LRO; |
282 | rcv_desc->dma_size = RX_LRO_DMA_MAP_LEN; | 282 | rds_ring->dma_size = RX_LRO_DMA_MAP_LEN; |
283 | rcv_desc->skb_size = MAX_RX_LRO_BUFFER_LENGTH; | 283 | rds_ring->skb_size = MAX_RX_LRO_BUFFER_LENGTH; |
284 | break; | 284 | break; |
285 | 285 | ||
286 | } | 286 | } |
287 | rcv_desc->rx_buf_arr = (struct netxen_rx_buffer *) | 287 | rds_ring->rx_buf_arr = (struct netxen_rx_buffer *) |
288 | vmalloc(RCV_BUFFSIZE); | 288 | vmalloc(RCV_BUFFSIZE); |
289 | if (rcv_desc->rx_buf_arr == NULL) { | 289 | if (rds_ring->rx_buf_arr == NULL) { |
290 | printk(KERN_ERR "%s: Failed to allocate " | 290 | printk(KERN_ERR "%s: Failed to allocate " |
291 | "rx buffer ring %d\n", | 291 | "rx buffer ring %d\n", |
292 | netdev->name, ring); | 292 | netdev->name, ring); |
293 | /* free whatever was already allocated */ | 293 | /* free whatever was already allocated */ |
294 | goto err_out; | 294 | goto err_out; |
295 | } | 295 | } |
296 | memset(rcv_desc->rx_buf_arr, 0, RCV_BUFFSIZE); | 296 | memset(rds_ring->rx_buf_arr, 0, RCV_BUFFSIZE); |
297 | rcv_desc->begin_alloc = 0; | 297 | rds_ring->begin_alloc = 0; |
298 | /* | 298 | /* |
299 | * Now go through all of them, set reference handles | 299 | * Now go through all of them, set reference handles |
300 | * and put them in the queues. | 300 | * and put them in the queues. |
301 | */ | 301 | */ |
302 | num_rx_bufs = rcv_desc->max_rx_desc_count; | 302 | num_rx_bufs = rds_ring->max_rx_desc_count; |
303 | rx_buf = rcv_desc->rx_buf_arr; | 303 | rx_buf = rds_ring->rx_buf_arr; |
304 | for (i = 0; i < num_rx_bufs; i++) { | 304 | for (i = 0; i < num_rx_bufs; i++) { |
305 | rx_buf->ref_handle = i; | 305 | rx_buf->ref_handle = i; |
306 | rx_buf->state = NETXEN_BUFFER_FREE; | 306 | rx_buf->state = NETXEN_BUFFER_FREE; |
@@ -1154,7 +1154,7 @@ static void netxen_process_rcv(struct netxen_adapter *adapter, int ctxid, | |||
1154 | struct sk_buff *skb; | 1154 | struct sk_buff *skb; |
1155 | u32 length = netxen_get_sts_totallength(sts_data); | 1155 | u32 length = netxen_get_sts_totallength(sts_data); |
1156 | u32 desc_ctx; | 1156 | u32 desc_ctx; |
1157 | struct netxen_rcv_desc_ctx *rcv_desc; | 1157 | struct nx_host_rds_ring *rds_ring; |
1158 | int ret; | 1158 | int ret; |
1159 | 1159 | ||
1160 | desc_ctx = netxen_get_sts_type(sts_data); | 1160 | desc_ctx = netxen_get_sts_type(sts_data); |
@@ -1164,13 +1164,13 @@ static void netxen_process_rcv(struct netxen_adapter *adapter, int ctxid, | |||
1164 | return; | 1164 | return; |
1165 | } | 1165 | } |
1166 | 1166 | ||
1167 | rcv_desc = &recv_ctx->rcv_desc[desc_ctx]; | 1167 | rds_ring = &recv_ctx->rds_rings[desc_ctx]; |
1168 | if (unlikely(index > rcv_desc->max_rx_desc_count)) { | 1168 | if (unlikely(index > rds_ring->max_rx_desc_count)) { |
1169 | DPRINTK(ERR, "Got a buffer index:%x Max is %x\n", | 1169 | DPRINTK(ERR, "Got a buffer index:%x Max is %x\n", |
1170 | index, rcv_desc->max_rx_desc_count); | 1170 | index, rds_ring->max_rx_desc_count); |
1171 | return; | 1171 | return; |
1172 | } | 1172 | } |
1173 | buffer = &rcv_desc->rx_buf_arr[index]; | 1173 | buffer = &rds_ring->rx_buf_arr[index]; |
1174 | if (desc_ctx == RCV_DESC_LRO_CTXID) { | 1174 | if (desc_ctx == RCV_DESC_LRO_CTXID) { |
1175 | buffer->lro_current_frags++; | 1175 | buffer->lro_current_frags++; |
1176 | if (netxen_get_sts_desc_lro_last_frag(desc)) { | 1176 | if (netxen_get_sts_desc_lro_last_frag(desc)) { |
@@ -1191,7 +1191,7 @@ static void netxen_process_rcv(struct netxen_adapter *adapter, int ctxid, | |||
1191 | } | 1191 | } |
1192 | } | 1192 | } |
1193 | 1193 | ||
1194 | pci_unmap_single(pdev, buffer->dma, rcv_desc->dma_size, | 1194 | pci_unmap_single(pdev, buffer->dma, rds_ring->dma_size, |
1195 | PCI_DMA_FROMDEVICE); | 1195 | PCI_DMA_FROMDEVICE); |
1196 | 1196 | ||
1197 | skb = (struct sk_buff *)buffer->skb; | 1197 | skb = (struct sk_buff *)buffer->skb; |
@@ -1249,7 +1249,7 @@ u32 netxen_process_rcv_ring(struct netxen_adapter *adapter, int ctxid, int max) | |||
1249 | consumer = (consumer + 1) & (adapter->max_rx_desc_count - 1); | 1249 | consumer = (consumer + 1) & (adapter->max_rx_desc_count - 1); |
1250 | count++; | 1250 | count++; |
1251 | } | 1251 | } |
1252 | for (ring = 0; ring < NUM_RCV_DESC_RINGS; ring++) | 1252 | for (ring = 0; ring < adapter->max_rds_rings; ring++) |
1253 | netxen_post_rx_buffers_nodb(adapter, ctxid, ring); | 1253 | netxen_post_rx_buffers_nodb(adapter, ctxid, ring); |
1254 | 1254 | ||
1255 | /* update the consumer index in phantom */ | 1255 | /* update the consumer index in phantom */ |
@@ -1340,7 +1340,7 @@ void netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ctx, u32 ringid) | |||
1340 | struct pci_dev *pdev = adapter->pdev; | 1340 | struct pci_dev *pdev = adapter->pdev; |
1341 | struct sk_buff *skb; | 1341 | struct sk_buff *skb; |
1342 | struct netxen_recv_context *recv_ctx = &(adapter->recv_ctx[ctx]); | 1342 | struct netxen_recv_context *recv_ctx = &(adapter->recv_ctx[ctx]); |
1343 | struct netxen_rcv_desc_ctx *rcv_desc = NULL; | 1343 | struct nx_host_rds_ring *rds_ring = NULL; |
1344 | uint producer; | 1344 | uint producer; |
1345 | struct rcv_desc *pdesc; | 1345 | struct rcv_desc *pdesc; |
1346 | struct netxen_rx_buffer *buffer; | 1346 | struct netxen_rx_buffer *buffer; |
@@ -1349,27 +1349,27 @@ void netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ctx, u32 ringid) | |||
1349 | netxen_ctx_msg msg = 0; | 1349 | netxen_ctx_msg msg = 0; |
1350 | dma_addr_t dma; | 1350 | dma_addr_t dma; |
1351 | 1351 | ||
1352 | rcv_desc = &recv_ctx->rcv_desc[ringid]; | 1352 | rds_ring = &recv_ctx->rds_rings[ringid]; |
1353 | 1353 | ||
1354 | producer = rcv_desc->producer; | 1354 | producer = rds_ring->producer; |
1355 | index = rcv_desc->begin_alloc; | 1355 | index = rds_ring->begin_alloc; |
1356 | buffer = &rcv_desc->rx_buf_arr[index]; | 1356 | buffer = &rds_ring->rx_buf_arr[index]; |
1357 | /* We can start writing rx descriptors into the phantom memory. */ | 1357 | /* We can start writing rx descriptors into the phantom memory. */ |
1358 | while (buffer->state == NETXEN_BUFFER_FREE) { | 1358 | while (buffer->state == NETXEN_BUFFER_FREE) { |
1359 | skb = dev_alloc_skb(rcv_desc->skb_size); | 1359 | skb = dev_alloc_skb(rds_ring->skb_size); |
1360 | if (unlikely(!skb)) { | 1360 | if (unlikely(!skb)) { |
1361 | /* | 1361 | /* |
1362 | * TODO | 1362 | * TODO |
1363 | * We need to schedule the posting of buffers to the pegs. | 1363 | * We need to schedule the posting of buffers to the pegs. |
1364 | */ | 1364 | */ |
1365 | rcv_desc->begin_alloc = index; | 1365 | rds_ring->begin_alloc = index; |
1366 | DPRINTK(ERR, "netxen_post_rx_buffers: " | 1366 | DPRINTK(ERR, "netxen_post_rx_buffers: " |
1367 | " allocated only %d buffers\n", count); | 1367 | " allocated only %d buffers\n", count); |
1368 | break; | 1368 | break; |
1369 | } | 1369 | } |
1370 | 1370 | ||
1371 | count++; /* now there should be no failure */ | 1371 | count++; /* now there should be no failure */ |
1372 | pdesc = &rcv_desc->desc_head[producer]; | 1372 | pdesc = &rds_ring->desc_head[producer]; |
1373 | 1373 | ||
1374 | #if defined(XGB_DEBUG) | 1374 | #if defined(XGB_DEBUG) |
1375 | *(unsigned long *)(skb->head) = 0xc0debabe; | 1375 | *(unsigned long *)(skb->head) = 0xc0debabe; |
@@ -1382,7 +1382,7 @@ void netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ctx, u32 ringid) | |||
1382 | * buffer after it has been filled FSL TBD TBD | 1382 | * buffer after it has been filled FSL TBD TBD |
1383 | * skb->dev = netdev; | 1383 | * skb->dev = netdev; |
1384 | */ | 1384 | */ |
1385 | dma = pci_map_single(pdev, skb->data, rcv_desc->dma_size, | 1385 | dma = pci_map_single(pdev, skb->data, rds_ring->dma_size, |
1386 | PCI_DMA_FROMDEVICE); | 1386 | PCI_DMA_FROMDEVICE); |
1387 | pdesc->addr_buffer = cpu_to_le64(dma); | 1387 | pdesc->addr_buffer = cpu_to_le64(dma); |
1388 | buffer->skb = skb; | 1388 | buffer->skb = skb; |
@@ -1390,36 +1390,40 @@ void netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ctx, u32 ringid) | |||
1390 | buffer->dma = dma; | 1390 | buffer->dma = dma; |
1391 | /* make a rcv descriptor */ | 1391 | /* make a rcv descriptor */ |
1392 | pdesc->reference_handle = cpu_to_le16(buffer->ref_handle); | 1392 | pdesc->reference_handle = cpu_to_le16(buffer->ref_handle); |
1393 | pdesc->buffer_length = cpu_to_le32(rcv_desc->dma_size); | 1393 | pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size); |
1394 | DPRINTK(INFO, "done writing descripter\n"); | 1394 | DPRINTK(INFO, "done writing descripter\n"); |
1395 | producer = | 1395 | producer = |
1396 | get_next_index(producer, rcv_desc->max_rx_desc_count); | 1396 | get_next_index(producer, rds_ring->max_rx_desc_count); |
1397 | index = get_next_index(index, rcv_desc->max_rx_desc_count); | 1397 | index = get_next_index(index, rds_ring->max_rx_desc_count); |
1398 | buffer = &rcv_desc->rx_buf_arr[index]; | 1398 | buffer = &rds_ring->rx_buf_arr[index]; |
1399 | } | 1399 | } |
1400 | /* if we did allocate buffers, then write the count to Phantom */ | 1400 | /* if we did allocate buffers, then write the count to Phantom */ |
1401 | if (count) { | 1401 | if (count) { |
1402 | rcv_desc->begin_alloc = index; | 1402 | rds_ring->begin_alloc = index; |
1403 | rcv_desc->producer = producer; | 1403 | rds_ring->producer = producer; |
1404 | /* Window = 1 */ | 1404 | /* Window = 1 */ |
1405 | adapter->pci_write_normalize(adapter, | 1405 | adapter->pci_write_normalize(adapter, |
1406 | rcv_desc->crb_rcv_producer, | 1406 | rds_ring->crb_rcv_producer, |
1407 | (producer-1) & (rcv_desc->max_rx_desc_count-1)); | 1407 | (producer-1) & (rds_ring->max_rx_desc_count-1)); |
1408 | |||
1409 | if (adapter->fw_major < 4) { | ||
1408 | /* | 1410 | /* |
1409 | * Write a doorbell msg to tell phanmon of change in | 1411 | * Write a doorbell msg to tell phanmon of change in |
1410 | * receive ring producer | 1412 | * receive ring producer |
1413 | * Only for firmware version < 4.0.0 | ||
1411 | */ | 1414 | */ |
1412 | netxen_set_msg_peg_id(msg, NETXEN_RCV_PEG_DB_ID); | 1415 | netxen_set_msg_peg_id(msg, NETXEN_RCV_PEG_DB_ID); |
1413 | netxen_set_msg_privid(msg); | 1416 | netxen_set_msg_privid(msg); |
1414 | netxen_set_msg_count(msg, | 1417 | netxen_set_msg_count(msg, |
1415 | ((producer - | 1418 | ((producer - |
1416 | 1) & (rcv_desc-> | 1419 | 1) & (rds_ring-> |
1417 | max_rx_desc_count - 1))); | 1420 | max_rx_desc_count - 1))); |
1418 | netxen_set_msg_ctxid(msg, adapter->portnum); | 1421 | netxen_set_msg_ctxid(msg, adapter->portnum); |
1419 | netxen_set_msg_opcode(msg, NETXEN_RCV_PRODUCER(ringid)); | 1422 | netxen_set_msg_opcode(msg, NETXEN_RCV_PRODUCER(ringid)); |
1420 | writel(msg, | 1423 | writel(msg, |
1421 | DB_NORMALIZE(adapter, | 1424 | DB_NORMALIZE(adapter, |
1422 | NETXEN_RCV_PRODUCER_OFFSET)); | 1425 | NETXEN_RCV_PRODUCER_OFFSET)); |
1426 | } | ||
1423 | } | 1427 | } |
1424 | } | 1428 | } |
1425 | 1429 | ||
@@ -1429,32 +1433,32 @@ static void netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter, | |||
1429 | struct pci_dev *pdev = adapter->pdev; | 1433 | struct pci_dev *pdev = adapter->pdev; |
1430 | struct sk_buff *skb; | 1434 | struct sk_buff *skb; |
1431 | struct netxen_recv_context *recv_ctx = &(adapter->recv_ctx[ctx]); | 1435 | struct netxen_recv_context *recv_ctx = &(adapter->recv_ctx[ctx]); |
1432 | struct netxen_rcv_desc_ctx *rcv_desc = NULL; | 1436 | struct nx_host_rds_ring *rds_ring = NULL; |
1433 | u32 producer; | 1437 | u32 producer; |
1434 | struct rcv_desc *pdesc; | 1438 | struct rcv_desc *pdesc; |
1435 | struct netxen_rx_buffer *buffer; | 1439 | struct netxen_rx_buffer *buffer; |
1436 | int count = 0; | 1440 | int count = 0; |
1437 | int index = 0; | 1441 | int index = 0; |
1438 | 1442 | ||
1439 | rcv_desc = &recv_ctx->rcv_desc[ringid]; | 1443 | rds_ring = &recv_ctx->rds_rings[ringid]; |
1440 | 1444 | ||
1441 | producer = rcv_desc->producer; | 1445 | producer = rds_ring->producer; |
1442 | index = rcv_desc->begin_alloc; | 1446 | index = rds_ring->begin_alloc; |
1443 | buffer = &rcv_desc->rx_buf_arr[index]; | 1447 | buffer = &rds_ring->rx_buf_arr[index]; |
1444 | /* We can start writing rx descriptors into the phantom memory. */ | 1448 | /* We can start writing rx descriptors into the phantom memory. */ |
1445 | while (buffer->state == NETXEN_BUFFER_FREE) { | 1449 | while (buffer->state == NETXEN_BUFFER_FREE) { |
1446 | skb = dev_alloc_skb(rcv_desc->skb_size); | 1450 | skb = dev_alloc_skb(rds_ring->skb_size); |
1447 | if (unlikely(!skb)) { | 1451 | if (unlikely(!skb)) { |
1448 | /* | 1452 | /* |
1449 | * We need to schedule the posting of buffers to the pegs. | 1453 | * We need to schedule the posting of buffers to the pegs. |
1450 | */ | 1454 | */ |
1451 | rcv_desc->begin_alloc = index; | 1455 | rds_ring->begin_alloc = index; |
1452 | DPRINTK(ERR, "netxen_post_rx_buffers_nodb: " | 1456 | DPRINTK(ERR, "netxen_post_rx_buffers_nodb: " |
1453 | " allocated only %d buffers\n", count); | 1457 | " allocated only %d buffers\n", count); |
1454 | break; | 1458 | break; |
1455 | } | 1459 | } |
1456 | count++; /* now there should be no failure */ | 1460 | count++; /* now there should be no failure */ |
1457 | pdesc = &rcv_desc->desc_head[producer]; | 1461 | pdesc = &rds_ring->desc_head[producer]; |
1458 | skb_reserve(skb, 2); | 1462 | skb_reserve(skb, 2); |
1459 | /* | 1463 | /* |
1460 | * This will be setup when we receive the | 1464 | * This will be setup when we receive the |
@@ -1464,27 +1468,27 @@ static void netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter, | |||
1464 | buffer->skb = skb; | 1468 | buffer->skb = skb; |
1465 | buffer->state = NETXEN_BUFFER_BUSY; | 1469 | buffer->state = NETXEN_BUFFER_BUSY; |
1466 | buffer->dma = pci_map_single(pdev, skb->data, | 1470 | buffer->dma = pci_map_single(pdev, skb->data, |
1467 | rcv_desc->dma_size, | 1471 | rds_ring->dma_size, |
1468 | PCI_DMA_FROMDEVICE); | 1472 | PCI_DMA_FROMDEVICE); |
1469 | 1473 | ||
1470 | /* make a rcv descriptor */ | 1474 | /* make a rcv descriptor */ |
1471 | pdesc->reference_handle = cpu_to_le16(buffer->ref_handle); | 1475 | pdesc->reference_handle = cpu_to_le16(buffer->ref_handle); |
1472 | pdesc->buffer_length = cpu_to_le32(rcv_desc->dma_size); | 1476 | pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size); |
1473 | pdesc->addr_buffer = cpu_to_le64(buffer->dma); | 1477 | pdesc->addr_buffer = cpu_to_le64(buffer->dma); |
1474 | producer = | 1478 | producer = |
1475 | get_next_index(producer, rcv_desc->max_rx_desc_count); | 1479 | get_next_index(producer, rds_ring->max_rx_desc_count); |
1476 | index = get_next_index(index, rcv_desc->max_rx_desc_count); | 1480 | index = get_next_index(index, rds_ring->max_rx_desc_count); |
1477 | buffer = &rcv_desc->rx_buf_arr[index]; | 1481 | buffer = &rds_ring->rx_buf_arr[index]; |
1478 | } | 1482 | } |
1479 | 1483 | ||
1480 | /* if we did allocate buffers, then write the count to Phantom */ | 1484 | /* if we did allocate buffers, then write the count to Phantom */ |
1481 | if (count) { | 1485 | if (count) { |
1482 | rcv_desc->begin_alloc = index; | 1486 | rds_ring->begin_alloc = index; |
1483 | rcv_desc->producer = producer; | 1487 | rds_ring->producer = producer; |
1484 | /* Window = 1 */ | 1488 | /* Window = 1 */ |
1485 | adapter->pci_write_normalize(adapter, | 1489 | adapter->pci_write_normalize(adapter, |
1486 | rcv_desc->crb_rcv_producer, | 1490 | rds_ring->crb_rcv_producer, |
1487 | (producer-1) & (rcv_desc->max_rx_desc_count-1)); | 1491 | (producer-1) & (rds_ring->max_rx_desc_count-1)); |
1488 | wmb(); | 1492 | wmb(); |
1489 | } | 1493 | } |
1490 | } | 1494 | } |