diff options
author | Dhananjay Phadke <dhananjay@netxen.com> | 2009-01-14 23:50:00 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-01-14 23:50:00 -0500 |
commit | 6f70340698333f14b1d9c9e913c5de8f66b72c55 (patch) | |
tree | 52733b9edfee2fcaa396054cbf44555b142e2fd1 | |
parent | 03e678ee968ae54b79c1580c2935895bd863ad95 (diff) |
netxen: handle dma mapping failures
o Bail out if pci_map_single() fails while replenishing rx ring.
o Drop packet if pci_map_{single,page}() fail in tx.
Signed-off-by: Dhananjay Phadke <dhananjay@netxen.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | drivers/net/netxen/netxen_nic.h | 1 | ||||
-rw-r--r-- | drivers/net/netxen/netxen_nic_init.c | 68 | ||||
-rw-r--r-- | drivers/net/netxen/netxen_nic_main.c | 38 |
3 files changed, 67 insertions, 40 deletions
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h index 6598a34b87d4..c11c568fd7db 100644 --- a/drivers/net/netxen/netxen_nic.h +++ b/drivers/net/netxen/netxen_nic.h | |||
@@ -860,7 +860,6 @@ struct nx_host_rds_ring { | |||
860 | u32 skb_size; | 860 | u32 skb_size; |
861 | struct netxen_rx_buffer *rx_buf_arr; /* rx buffers for receive */ | 861 | struct netxen_rx_buffer *rx_buf_arr; /* rx buffers for receive */ |
862 | struct list_head free_list; | 862 | struct list_head free_list; |
863 | int begin_alloc; | ||
864 | }; | 863 | }; |
865 | 864 | ||
866 | /* | 865 | /* |
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c index a3203644b482..ca7c8d8050c9 100644 --- a/drivers/net/netxen/netxen_nic_init.c +++ b/drivers/net/netxen/netxen_nic_init.c | |||
@@ -308,7 +308,6 @@ int netxen_alloc_sw_resources(struct netxen_adapter *adapter) | |||
308 | } | 308 | } |
309 | memset(rds_ring->rx_buf_arr, 0, RCV_BUFFSIZE); | 309 | memset(rds_ring->rx_buf_arr, 0, RCV_BUFFSIZE); |
310 | INIT_LIST_HEAD(&rds_ring->free_list); | 310 | INIT_LIST_HEAD(&rds_ring->free_list); |
311 | rds_ring->begin_alloc = 0; | ||
312 | /* | 311 | /* |
313 | * Now go through all of them, set reference handles | 312 | * Now go through all of them, set reference handles |
314 | * and put them in the queues. | 313 | * and put them in the queues. |
@@ -1435,7 +1434,6 @@ void netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ctx, u32 ringid) | |||
1435 | struct rcv_desc *pdesc; | 1434 | struct rcv_desc *pdesc; |
1436 | struct netxen_rx_buffer *buffer; | 1435 | struct netxen_rx_buffer *buffer; |
1437 | int count = 0; | 1436 | int count = 0; |
1438 | int index = 0; | ||
1439 | netxen_ctx_msg msg = 0; | 1437 | netxen_ctx_msg msg = 0; |
1440 | dma_addr_t dma; | 1438 | dma_addr_t dma; |
1441 | struct list_head *head; | 1439 | struct list_head *head; |
@@ -1443,7 +1441,6 @@ void netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ctx, u32 ringid) | |||
1443 | rds_ring = &recv_ctx->rds_rings[ringid]; | 1441 | rds_ring = &recv_ctx->rds_rings[ringid]; |
1444 | 1442 | ||
1445 | producer = rds_ring->producer; | 1443 | producer = rds_ring->producer; |
1446 | index = rds_ring->begin_alloc; | ||
1447 | head = &rds_ring->free_list; | 1444 | head = &rds_ring->free_list; |
1448 | 1445 | ||
1449 | /* We can start writing rx descriptors into the phantom memory. */ | 1446 | /* We can start writing rx descriptors into the phantom memory. */ |
@@ -1451,39 +1448,37 @@ void netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ctx, u32 ringid) | |||
1451 | 1448 | ||
1452 | skb = dev_alloc_skb(rds_ring->skb_size); | 1449 | skb = dev_alloc_skb(rds_ring->skb_size); |
1453 | if (unlikely(!skb)) { | 1450 | if (unlikely(!skb)) { |
1454 | rds_ring->begin_alloc = index; | ||
1455 | break; | 1451 | break; |
1456 | } | 1452 | } |
1457 | 1453 | ||
1454 | if (!adapter->ahw.cut_through) | ||
1455 | skb_reserve(skb, 2); | ||
1456 | |||
1457 | dma = pci_map_single(pdev, skb->data, | ||
1458 | rds_ring->dma_size, PCI_DMA_FROMDEVICE); | ||
1459 | if (pci_dma_mapping_error(pdev, dma)) { | ||
1460 | dev_kfree_skb_any(skb); | ||
1461 | break; | ||
1462 | } | ||
1463 | |||
1464 | count++; | ||
1458 | buffer = list_entry(head->next, struct netxen_rx_buffer, list); | 1465 | buffer = list_entry(head->next, struct netxen_rx_buffer, list); |
1459 | list_del(&buffer->list); | 1466 | list_del(&buffer->list); |
1460 | 1467 | ||
1461 | count++; /* now there should be no failure */ | ||
1462 | pdesc = &rds_ring->desc_head[producer]; | ||
1463 | |||
1464 | if (!adapter->ahw.cut_through) | ||
1465 | skb_reserve(skb, 2); | ||
1466 | /* This will be setup when we receive the | ||
1467 | * buffer after it has been filled FSL TBD TBD | ||
1468 | * skb->dev = netdev; | ||
1469 | */ | ||
1470 | dma = pci_map_single(pdev, skb->data, rds_ring->dma_size, | ||
1471 | PCI_DMA_FROMDEVICE); | ||
1472 | pdesc->addr_buffer = cpu_to_le64(dma); | ||
1473 | buffer->skb = skb; | 1468 | buffer->skb = skb; |
1474 | buffer->state = NETXEN_BUFFER_BUSY; | 1469 | buffer->state = NETXEN_BUFFER_BUSY; |
1475 | buffer->dma = dma; | 1470 | buffer->dma = dma; |
1471 | |||
1476 | /* make a rcv descriptor */ | 1472 | /* make a rcv descriptor */ |
1473 | pdesc = &rds_ring->desc_head[producer]; | ||
1474 | pdesc->addr_buffer = cpu_to_le64(dma); | ||
1477 | pdesc->reference_handle = cpu_to_le16(buffer->ref_handle); | 1475 | pdesc->reference_handle = cpu_to_le16(buffer->ref_handle); |
1478 | pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size); | 1476 | pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size); |
1479 | DPRINTK(INFO, "done writing descripter\n"); | 1477 | |
1480 | producer = | 1478 | producer = get_next_index(producer, rds_ring->max_rx_desc_count); |
1481 | get_next_index(producer, rds_ring->max_rx_desc_count); | ||
1482 | index = get_next_index(index, rds_ring->max_rx_desc_count); | ||
1483 | } | 1479 | } |
1484 | /* if we did allocate buffers, then write the count to Phantom */ | 1480 | /* if we did allocate buffers, then write the count to Phantom */ |
1485 | if (count) { | 1481 | if (count) { |
1486 | rds_ring->begin_alloc = index; | ||
1487 | rds_ring->producer = producer; | 1482 | rds_ring->producer = producer; |
1488 | /* Window = 1 */ | 1483 | /* Window = 1 */ |
1489 | adapter->pci_write_normalize(adapter, | 1484 | adapter->pci_write_normalize(adapter, |
@@ -1522,49 +1517,50 @@ static void netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter, | |||
1522 | struct rcv_desc *pdesc; | 1517 | struct rcv_desc *pdesc; |
1523 | struct netxen_rx_buffer *buffer; | 1518 | struct netxen_rx_buffer *buffer; |
1524 | int count = 0; | 1519 | int count = 0; |
1525 | int index = 0; | ||
1526 | struct list_head *head; | 1520 | struct list_head *head; |
1521 | dma_addr_t dma; | ||
1527 | 1522 | ||
1528 | rds_ring = &recv_ctx->rds_rings[ringid]; | 1523 | rds_ring = &recv_ctx->rds_rings[ringid]; |
1529 | 1524 | ||
1530 | producer = rds_ring->producer; | 1525 | producer = rds_ring->producer; |
1531 | index = rds_ring->begin_alloc; | ||
1532 | head = &rds_ring->free_list; | 1526 | head = &rds_ring->free_list; |
1533 | /* We can start writing rx descriptors into the phantom memory. */ | 1527 | /* We can start writing rx descriptors into the phantom memory. */ |
1534 | while (!list_empty(head)) { | 1528 | while (!list_empty(head)) { |
1535 | 1529 | ||
1536 | skb = dev_alloc_skb(rds_ring->skb_size); | 1530 | skb = dev_alloc_skb(rds_ring->skb_size); |
1537 | if (unlikely(!skb)) { | 1531 | if (unlikely(!skb)) { |
1538 | rds_ring->begin_alloc = index; | ||
1539 | break; | 1532 | break; |
1540 | } | 1533 | } |
1541 | 1534 | ||
1535 | if (!adapter->ahw.cut_through) | ||
1536 | skb_reserve(skb, 2); | ||
1537 | |||
1538 | dma = pci_map_single(pdev, skb->data, | ||
1539 | rds_ring->dma_size, PCI_DMA_FROMDEVICE); | ||
1540 | if (pci_dma_mapping_error(pdev, dma)) { | ||
1541 | dev_kfree_skb_any(skb); | ||
1542 | break; | ||
1543 | } | ||
1544 | |||
1545 | count++; | ||
1542 | buffer = list_entry(head->next, struct netxen_rx_buffer, list); | 1546 | buffer = list_entry(head->next, struct netxen_rx_buffer, list); |
1543 | list_del(&buffer->list); | 1547 | list_del(&buffer->list); |
1544 | 1548 | ||
1545 | count++; /* now there should be no failure */ | ||
1546 | pdesc = &rds_ring->desc_head[producer]; | ||
1547 | if (!adapter->ahw.cut_through) | ||
1548 | skb_reserve(skb, 2); | ||
1549 | buffer->skb = skb; | 1549 | buffer->skb = skb; |
1550 | buffer->state = NETXEN_BUFFER_BUSY; | 1550 | buffer->state = NETXEN_BUFFER_BUSY; |
1551 | buffer->dma = pci_map_single(pdev, skb->data, | 1551 | buffer->dma = dma; |
1552 | rds_ring->dma_size, | ||
1553 | PCI_DMA_FROMDEVICE); | ||
1554 | 1552 | ||
1555 | /* make a rcv descriptor */ | 1553 | /* make a rcv descriptor */ |
1554 | pdesc = &rds_ring->desc_head[producer]; | ||
1556 | pdesc->reference_handle = cpu_to_le16(buffer->ref_handle); | 1555 | pdesc->reference_handle = cpu_to_le16(buffer->ref_handle); |
1557 | pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size); | 1556 | pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size); |
1558 | pdesc->addr_buffer = cpu_to_le64(buffer->dma); | 1557 | pdesc->addr_buffer = cpu_to_le64(buffer->dma); |
1559 | producer = | 1558 | |
1560 | get_next_index(producer, rds_ring->max_rx_desc_count); | 1559 | producer = get_next_index(producer, rds_ring->max_rx_desc_count); |
1561 | index = get_next_index(index, rds_ring->max_rx_desc_count); | ||
1562 | buffer = &rds_ring->rx_buf_arr[index]; | ||
1563 | } | 1560 | } |
1564 | 1561 | ||
1565 | /* if we did allocate buffers, then write the count to Phantom */ | 1562 | /* if we did allocate buffers, then write the count to Phantom */ |
1566 | if (count) { | 1563 | if (count) { |
1567 | rds_ring->begin_alloc = index; | ||
1568 | rds_ring->producer = producer; | 1564 | rds_ring->producer = producer; |
1569 | /* Window = 1 */ | 1565 | /* Window = 1 */ |
1570 | adapter->pci_write_normalize(adapter, | 1566 | adapter->pci_write_normalize(adapter, |
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c index 9268fd2fbacf..86867405a367 100644 --- a/drivers/net/netxen/netxen_nic_main.c +++ b/drivers/net/netxen/netxen_nic_main.c | |||
@@ -1200,6 +1200,24 @@ static bool netxen_tso_check(struct net_device *netdev, | |||
1200 | return tso; | 1200 | return tso; |
1201 | } | 1201 | } |
1202 | 1202 | ||
1203 | static void | ||
1204 | netxen_clean_tx_dma_mapping(struct pci_dev *pdev, | ||
1205 | struct netxen_cmd_buffer *pbuf, int last) | ||
1206 | { | ||
1207 | int k; | ||
1208 | struct netxen_skb_frag *buffrag; | ||
1209 | |||
1210 | buffrag = &pbuf->frag_array[0]; | ||
1211 | pci_unmap_single(pdev, buffrag->dma, | ||
1212 | buffrag->length, PCI_DMA_TODEVICE); | ||
1213 | |||
1214 | for (k = 1; k < last; k++) { | ||
1215 | buffrag = &pbuf->frag_array[k]; | ||
1216 | pci_unmap_page(pdev, buffrag->dma, | ||
1217 | buffrag->length, PCI_DMA_TODEVICE); | ||
1218 | } | ||
1219 | } | ||
1220 | |||
1203 | static int netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | 1221 | static int netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) |
1204 | { | 1222 | { |
1205 | struct netxen_adapter *adapter = netdev_priv(netdev); | 1223 | struct netxen_adapter *adapter = netdev_priv(netdev); |
@@ -1208,6 +1226,8 @@ static int netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
1208 | struct netxen_cmd_buffer *pbuf; | 1226 | struct netxen_cmd_buffer *pbuf; |
1209 | struct netxen_skb_frag *buffrag; | 1227 | struct netxen_skb_frag *buffrag; |
1210 | struct cmd_desc_type0 *hwdesc; | 1228 | struct cmd_desc_type0 *hwdesc; |
1229 | struct pci_dev *pdev = adapter->pdev; | ||
1230 | dma_addr_t temp_dma; | ||
1211 | int i, k; | 1231 | int i, k; |
1212 | 1232 | ||
1213 | u32 producer, consumer; | 1233 | u32 producer, consumer; |
@@ -1240,8 +1260,12 @@ static int netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
1240 | pbuf->skb = skb; | 1260 | pbuf->skb = skb; |
1241 | pbuf->frag_count = frag_count; | 1261 | pbuf->frag_count = frag_count; |
1242 | buffrag = &pbuf->frag_array[0]; | 1262 | buffrag = &pbuf->frag_array[0]; |
1243 | buffrag->dma = pci_map_single(adapter->pdev, skb->data, first_seg_len, | 1263 | temp_dma = pci_map_single(pdev, skb->data, first_seg_len, |
1244 | PCI_DMA_TODEVICE); | 1264 | PCI_DMA_TODEVICE); |
1265 | if (pci_dma_mapping_error(pdev, temp_dma)) | ||
1266 | goto drop_packet; | ||
1267 | |||
1268 | buffrag->dma = temp_dma; | ||
1245 | buffrag->length = first_seg_len; | 1269 | buffrag->length = first_seg_len; |
1246 | netxen_set_tx_frags_len(hwdesc, frag_count, skb->len); | 1270 | netxen_set_tx_frags_len(hwdesc, frag_count, skb->len); |
1247 | netxen_set_tx_port(hwdesc, adapter->portnum); | 1271 | netxen_set_tx_port(hwdesc, adapter->portnum); |
@@ -1253,7 +1277,6 @@ static int netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
1253 | struct skb_frag_struct *frag; | 1277 | struct skb_frag_struct *frag; |
1254 | int len, temp_len; | 1278 | int len, temp_len; |
1255 | unsigned long offset; | 1279 | unsigned long offset; |
1256 | dma_addr_t temp_dma; | ||
1257 | 1280 | ||
1258 | /* move to next desc. if there is a need */ | 1281 | /* move to next desc. if there is a need */ |
1259 | if ((i & 0x3) == 0) { | 1282 | if ((i & 0x3) == 0) { |
@@ -1269,8 +1292,12 @@ static int netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
1269 | offset = frag->page_offset; | 1292 | offset = frag->page_offset; |
1270 | 1293 | ||
1271 | temp_len = len; | 1294 | temp_len = len; |
1272 | temp_dma = pci_map_page(adapter->pdev, frag->page, offset, | 1295 | temp_dma = pci_map_page(pdev, frag->page, offset, |
1273 | len, PCI_DMA_TODEVICE); | 1296 | len, PCI_DMA_TODEVICE); |
1297 | if (pci_dma_mapping_error(pdev, temp_dma)) { | ||
1298 | netxen_clean_tx_dma_mapping(pdev, pbuf, i); | ||
1299 | goto drop_packet; | ||
1300 | } | ||
1274 | 1301 | ||
1275 | buffrag++; | 1302 | buffrag++; |
1276 | buffrag->dma = temp_dma; | 1303 | buffrag->dma = temp_dma; |
@@ -1345,6 +1372,11 @@ static int netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
1345 | netdev->trans_start = jiffies; | 1372 | netdev->trans_start = jiffies; |
1346 | 1373 | ||
1347 | return NETDEV_TX_OK; | 1374 | return NETDEV_TX_OK; |
1375 | |||
1376 | drop_packet: | ||
1377 | adapter->stats.txdropped++; | ||
1378 | dev_kfree_skb_any(skb); | ||
1379 | return NETDEV_TX_OK; | ||
1348 | } | 1380 | } |
1349 | 1381 | ||
1350 | static int netxen_nic_check_temp(struct netxen_adapter *adapter) | 1382 | static int netxen_nic_check_temp(struct netxen_adapter *adapter) |