diff options
author | Dhananjay Phadke <dhananjay@netxen.com> | 2009-01-14 23:50:00 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-01-14 23:50:00 -0500 |
commit | 6f70340698333f14b1d9c9e913c5de8f66b72c55 (patch) | |
tree | 52733b9edfee2fcaa396054cbf44555b142e2fd1 /drivers/net/netxen/netxen_nic_init.c | |
parent | 03e678ee968ae54b79c1580c2935895bd863ad95 (diff) |
netxen: handle dma mapping failures
o Bail out if pci_map_single() fails while replenishing rx ring.
o Drop packet if pci_map_{single,page}() fail in tx.
Signed-off-by: Dhananjay Phadke <dhananjay@netxen.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/netxen/netxen_nic_init.c')
-rw-r--r-- | drivers/net/netxen/netxen_nic_init.c | 68 |
1 files changed, 32 insertions, 36 deletions
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c index a3203644b482..ca7c8d8050c9 100644 --- a/drivers/net/netxen/netxen_nic_init.c +++ b/drivers/net/netxen/netxen_nic_init.c | |||
@@ -308,7 +308,6 @@ int netxen_alloc_sw_resources(struct netxen_adapter *adapter) | |||
308 | } | 308 | } |
309 | memset(rds_ring->rx_buf_arr, 0, RCV_BUFFSIZE); | 309 | memset(rds_ring->rx_buf_arr, 0, RCV_BUFFSIZE); |
310 | INIT_LIST_HEAD(&rds_ring->free_list); | 310 | INIT_LIST_HEAD(&rds_ring->free_list); |
311 | rds_ring->begin_alloc = 0; | ||
312 | /* | 311 | /* |
313 | * Now go through all of them, set reference handles | 312 | * Now go through all of them, set reference handles |
314 | * and put them in the queues. | 313 | * and put them in the queues. |
@@ -1435,7 +1434,6 @@ void netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ctx, u32 ringid) | |||
1435 | struct rcv_desc *pdesc; | 1434 | struct rcv_desc *pdesc; |
1436 | struct netxen_rx_buffer *buffer; | 1435 | struct netxen_rx_buffer *buffer; |
1437 | int count = 0; | 1436 | int count = 0; |
1438 | int index = 0; | ||
1439 | netxen_ctx_msg msg = 0; | 1437 | netxen_ctx_msg msg = 0; |
1440 | dma_addr_t dma; | 1438 | dma_addr_t dma; |
1441 | struct list_head *head; | 1439 | struct list_head *head; |
@@ -1443,7 +1441,6 @@ void netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ctx, u32 ringid) | |||
1443 | rds_ring = &recv_ctx->rds_rings[ringid]; | 1441 | rds_ring = &recv_ctx->rds_rings[ringid]; |
1444 | 1442 | ||
1445 | producer = rds_ring->producer; | 1443 | producer = rds_ring->producer; |
1446 | index = rds_ring->begin_alloc; | ||
1447 | head = &rds_ring->free_list; | 1444 | head = &rds_ring->free_list; |
1448 | 1445 | ||
1449 | /* We can start writing rx descriptors into the phantom memory. */ | 1446 | /* We can start writing rx descriptors into the phantom memory. */ |
@@ -1451,39 +1448,37 @@ void netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ctx, u32 ringid) | |||
1451 | 1448 | ||
1452 | skb = dev_alloc_skb(rds_ring->skb_size); | 1449 | skb = dev_alloc_skb(rds_ring->skb_size); |
1453 | if (unlikely(!skb)) { | 1450 | if (unlikely(!skb)) { |
1454 | rds_ring->begin_alloc = index; | ||
1455 | break; | 1451 | break; |
1456 | } | 1452 | } |
1457 | 1453 | ||
1454 | if (!adapter->ahw.cut_through) | ||
1455 | skb_reserve(skb, 2); | ||
1456 | |||
1457 | dma = pci_map_single(pdev, skb->data, | ||
1458 | rds_ring->dma_size, PCI_DMA_FROMDEVICE); | ||
1459 | if (pci_dma_mapping_error(pdev, dma)) { | ||
1460 | dev_kfree_skb_any(skb); | ||
1461 | break; | ||
1462 | } | ||
1463 | |||
1464 | count++; | ||
1458 | buffer = list_entry(head->next, struct netxen_rx_buffer, list); | 1465 | buffer = list_entry(head->next, struct netxen_rx_buffer, list); |
1459 | list_del(&buffer->list); | 1466 | list_del(&buffer->list); |
1460 | 1467 | ||
1461 | count++; /* now there should be no failure */ | ||
1462 | pdesc = &rds_ring->desc_head[producer]; | ||
1463 | |||
1464 | if (!adapter->ahw.cut_through) | ||
1465 | skb_reserve(skb, 2); | ||
1466 | /* This will be setup when we receive the | ||
1467 | * buffer after it has been filled FSL TBD TBD | ||
1468 | * skb->dev = netdev; | ||
1469 | */ | ||
1470 | dma = pci_map_single(pdev, skb->data, rds_ring->dma_size, | ||
1471 | PCI_DMA_FROMDEVICE); | ||
1472 | pdesc->addr_buffer = cpu_to_le64(dma); | ||
1473 | buffer->skb = skb; | 1468 | buffer->skb = skb; |
1474 | buffer->state = NETXEN_BUFFER_BUSY; | 1469 | buffer->state = NETXEN_BUFFER_BUSY; |
1475 | buffer->dma = dma; | 1470 | buffer->dma = dma; |
1471 | |||
1476 | /* make a rcv descriptor */ | 1472 | /* make a rcv descriptor */ |
1473 | pdesc = &rds_ring->desc_head[producer]; | ||
1474 | pdesc->addr_buffer = cpu_to_le64(dma); | ||
1477 | pdesc->reference_handle = cpu_to_le16(buffer->ref_handle); | 1475 | pdesc->reference_handle = cpu_to_le16(buffer->ref_handle); |
1478 | pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size); | 1476 | pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size); |
1479 | DPRINTK(INFO, "done writing descripter\n"); | 1477 | |
1480 | producer = | 1478 | producer = get_next_index(producer, rds_ring->max_rx_desc_count); |
1481 | get_next_index(producer, rds_ring->max_rx_desc_count); | ||
1482 | index = get_next_index(index, rds_ring->max_rx_desc_count); | ||
1483 | } | 1479 | } |
1484 | /* if we did allocate buffers, then write the count to Phantom */ | 1480 | /* if we did allocate buffers, then write the count to Phantom */ |
1485 | if (count) { | 1481 | if (count) { |
1486 | rds_ring->begin_alloc = index; | ||
1487 | rds_ring->producer = producer; | 1482 | rds_ring->producer = producer; |
1488 | /* Window = 1 */ | 1483 | /* Window = 1 */ |
1489 | adapter->pci_write_normalize(adapter, | 1484 | adapter->pci_write_normalize(adapter, |
@@ -1522,49 +1517,50 @@ static void netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter, | |||
1522 | struct rcv_desc *pdesc; | 1517 | struct rcv_desc *pdesc; |
1523 | struct netxen_rx_buffer *buffer; | 1518 | struct netxen_rx_buffer *buffer; |
1524 | int count = 0; | 1519 | int count = 0; |
1525 | int index = 0; | ||
1526 | struct list_head *head; | 1520 | struct list_head *head; |
1521 | dma_addr_t dma; | ||
1527 | 1522 | ||
1528 | rds_ring = &recv_ctx->rds_rings[ringid]; | 1523 | rds_ring = &recv_ctx->rds_rings[ringid]; |
1529 | 1524 | ||
1530 | producer = rds_ring->producer; | 1525 | producer = rds_ring->producer; |
1531 | index = rds_ring->begin_alloc; | ||
1532 | head = &rds_ring->free_list; | 1526 | head = &rds_ring->free_list; |
1533 | /* We can start writing rx descriptors into the phantom memory. */ | 1527 | /* We can start writing rx descriptors into the phantom memory. */ |
1534 | while (!list_empty(head)) { | 1528 | while (!list_empty(head)) { |
1535 | 1529 | ||
1536 | skb = dev_alloc_skb(rds_ring->skb_size); | 1530 | skb = dev_alloc_skb(rds_ring->skb_size); |
1537 | if (unlikely(!skb)) { | 1531 | if (unlikely(!skb)) { |
1538 | rds_ring->begin_alloc = index; | ||
1539 | break; | 1532 | break; |
1540 | } | 1533 | } |
1541 | 1534 | ||
1535 | if (!adapter->ahw.cut_through) | ||
1536 | skb_reserve(skb, 2); | ||
1537 | |||
1538 | dma = pci_map_single(pdev, skb->data, | ||
1539 | rds_ring->dma_size, PCI_DMA_FROMDEVICE); | ||
1540 | if (pci_dma_mapping_error(pdev, dma)) { | ||
1541 | dev_kfree_skb_any(skb); | ||
1542 | break; | ||
1543 | } | ||
1544 | |||
1545 | count++; | ||
1542 | buffer = list_entry(head->next, struct netxen_rx_buffer, list); | 1546 | buffer = list_entry(head->next, struct netxen_rx_buffer, list); |
1543 | list_del(&buffer->list); | 1547 | list_del(&buffer->list); |
1544 | 1548 | ||
1545 | count++; /* now there should be no failure */ | ||
1546 | pdesc = &rds_ring->desc_head[producer]; | ||
1547 | if (!adapter->ahw.cut_through) | ||
1548 | skb_reserve(skb, 2); | ||
1549 | buffer->skb = skb; | 1549 | buffer->skb = skb; |
1550 | buffer->state = NETXEN_BUFFER_BUSY; | 1550 | buffer->state = NETXEN_BUFFER_BUSY; |
1551 | buffer->dma = pci_map_single(pdev, skb->data, | 1551 | buffer->dma = dma; |
1552 | rds_ring->dma_size, | ||
1553 | PCI_DMA_FROMDEVICE); | ||
1554 | 1552 | ||
1555 | /* make a rcv descriptor */ | 1553 | /* make a rcv descriptor */ |
1554 | pdesc = &rds_ring->desc_head[producer]; | ||
1556 | pdesc->reference_handle = cpu_to_le16(buffer->ref_handle); | 1555 | pdesc->reference_handle = cpu_to_le16(buffer->ref_handle); |
1557 | pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size); | 1556 | pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size); |
1558 | pdesc->addr_buffer = cpu_to_le64(buffer->dma); | 1557 | pdesc->addr_buffer = cpu_to_le64(buffer->dma); |
1559 | producer = | 1558 | |
1560 | get_next_index(producer, rds_ring->max_rx_desc_count); | 1559 | producer = get_next_index(producer, rds_ring->max_rx_desc_count); |
1561 | index = get_next_index(index, rds_ring->max_rx_desc_count); | ||
1562 | buffer = &rds_ring->rx_buf_arr[index]; | ||
1563 | } | 1560 | } |
1564 | 1561 | ||
1565 | /* if we did allocate buffers, then write the count to Phantom */ | 1562 | /* if we did allocate buffers, then write the count to Phantom */ |
1566 | if (count) { | 1563 | if (count) { |
1567 | rds_ring->begin_alloc = index; | ||
1568 | rds_ring->producer = producer; | 1564 | rds_ring->producer = producer; |
1569 | /* Window = 1 */ | 1565 | /* Window = 1 */ |
1570 | adapter->pci_write_normalize(adapter, | 1566 | adapter->pci_write_normalize(adapter, |