aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/e1000e/netdev.c
diff options
context:
space:
mode:
authorAuke Kok <auke-jan.h.kok@intel.com>2007-10-25 16:57:44 -0400
committerJeff Garzik <jeff@garzik.org>2007-10-29 05:47:08 -0400
commit47f44e40a3c12f8604aba9288d7a7f991cbf17ba (patch)
tree0531dd3d7501a519946046d2cf18d596bcb4d903 /drivers/net/e1000e/netdev.c
parente38c2c651a038b78fd01cf2e3f3a65cacf0e41cc (diff)
e1000e: Fix jumbo frame receive code.
Fix allocation and freeing of jumbo frames where several bugs were recently introduced by cleanups after we forked this code from e1000. This moves ps_pages to buffer_info where it really belongs and makes it a dynamically allocated array. The penalty is not that high since it's allocated outside of the buffer_info struct anyway. Without this patch all jumbo frames are completely broken and the driver panics. Signed-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com> Signed-off-by: Auke Kok <auke-jan.h.kok@intel.com> Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/net/e1000e/netdev.c')
-rw-r--r--drivers/net/e1000e/netdev.c102
1 files changed, 52 insertions, 50 deletions
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 033e124d1c1f..46c5ac6b4d77 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -245,37 +245,36 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
245 rx_desc = E1000_RX_DESC_PS(*rx_ring, i); 245 rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
246 246
247 for (j = 0; j < PS_PAGE_BUFFERS; j++) { 247 for (j = 0; j < PS_PAGE_BUFFERS; j++) {
248 ps_page = &rx_ring->ps_pages[(i * PS_PAGE_BUFFERS) 248 ps_page = &buffer_info->ps_pages[j];
249 + j]; 249 if (j >= adapter->rx_ps_pages) {
250 if (j < adapter->rx_ps_pages) { 250 /* all unused desc entries get hw null ptr */
251 rx_desc->read.buffer_addr[j+1] = ~0;
252 continue;
253 }
254 if (!ps_page->page) {
255 ps_page->page = alloc_page(GFP_ATOMIC);
251 if (!ps_page->page) { 256 if (!ps_page->page) {
252 ps_page->page = alloc_page(GFP_ATOMIC); 257 adapter->alloc_rx_buff_failed++;
253 if (!ps_page->page) { 258 goto no_buffers;
254 adapter->alloc_rx_buff_failed++; 259 }
255 goto no_buffers; 260 ps_page->dma = pci_map_page(pdev,
256 } 261 ps_page->page,
257 ps_page->dma = pci_map_page(pdev, 262 0, PAGE_SIZE,
258 ps_page->page, 263 PCI_DMA_FROMDEVICE);
259 0, PAGE_SIZE, 264 if (pci_dma_mapping_error(ps_page->dma)) {
260 PCI_DMA_FROMDEVICE); 265 dev_err(&adapter->pdev->dev,
261 if (pci_dma_mapping_error( 266 "RX DMA page map failed\n");
262 ps_page->dma)) { 267 adapter->rx_dma_failed++;
263 dev_err(&adapter->pdev->dev, 268 goto no_buffers;
264 "RX DMA page map failed\n");
265 adapter->rx_dma_failed++;
266 goto no_buffers;
267 }
268 } 269 }
269 /*
270 * Refresh the desc even if buffer_addrs
271 * didn't change because each write-back
272 * erases this info.
273 */
274 rx_desc->read.buffer_addr[j+1] =
275 cpu_to_le64(ps_page->dma);
276 } else {
277 rx_desc->read.buffer_addr[j+1] = ~0;
278 } 270 }
271 /*
272 * Refresh the desc even if buffer_addrs
273 * didn't change because each write-back
274 * erases this info.
275 */
276 rx_desc->read.buffer_addr[j+1] =
277 cpu_to_le64(ps_page->dma);
279 } 278 }
280 279
281 skb = netdev_alloc_skb(netdev, 280 skb = netdev_alloc_skb(netdev,
@@ -953,7 +952,7 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
953 ((length + l1) <= adapter->rx_ps_bsize0)) { 952 ((length + l1) <= adapter->rx_ps_bsize0)) {
954 u8 *vaddr; 953 u8 *vaddr;
955 954
956 ps_page = &rx_ring->ps_pages[i * PS_PAGE_BUFFERS]; 955 ps_page = &buffer_info->ps_pages[0];
957 956
958 /* there is no documentation about how to call 957 /* there is no documentation about how to call
959 * kmap_atomic, so we can't hold the mapping 958 * kmap_atomic, so we can't hold the mapping
@@ -977,7 +976,7 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
977 if (!length) 976 if (!length)
978 break; 977 break;
979 978
980 ps_page = &rx_ring->ps_pages[(i * PS_PAGE_BUFFERS) + j]; 979 ps_page = &buffer_info->ps_pages[j];
981 pci_unmap_page(pdev, ps_page->dma, PAGE_SIZE, 980 pci_unmap_page(pdev, ps_page->dma, PAGE_SIZE,
982 PCI_DMA_FROMDEVICE); 981 PCI_DMA_FROMDEVICE);
983 ps_page->dma = 0; 982 ps_page->dma = 0;
@@ -1043,7 +1042,6 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter)
1043 struct e1000_buffer *buffer_info; 1042 struct e1000_buffer *buffer_info;
1044 struct e1000_ps_page *ps_page; 1043 struct e1000_ps_page *ps_page;
1045 struct pci_dev *pdev = adapter->pdev; 1044 struct pci_dev *pdev = adapter->pdev;
1046 unsigned long size;
1047 unsigned int i, j; 1045 unsigned int i, j;
1048 1046
1049 /* Free all the Rx ring sk_buffs */ 1047 /* Free all the Rx ring sk_buffs */
@@ -1075,8 +1073,7 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter)
1075 } 1073 }
1076 1074
1077 for (j = 0; j < PS_PAGE_BUFFERS; j++) { 1075 for (j = 0; j < PS_PAGE_BUFFERS; j++) {
1078 ps_page = &rx_ring->ps_pages[(i * PS_PAGE_BUFFERS) 1076 ps_page = &buffer_info->ps_pages[j];
1079 + j];
1080 if (!ps_page->page) 1077 if (!ps_page->page)
1081 break; 1078 break;
1082 pci_unmap_page(pdev, ps_page->dma, PAGE_SIZE, 1079 pci_unmap_page(pdev, ps_page->dma, PAGE_SIZE,
@@ -1093,12 +1090,6 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter)
1093 rx_ring->rx_skb_top = NULL; 1090 rx_ring->rx_skb_top = NULL;
1094 } 1091 }
1095 1092
1096 size = sizeof(struct e1000_buffer) * rx_ring->count;
1097 memset(rx_ring->buffer_info, 0, size);
1098 size = sizeof(struct e1000_ps_page)
1099 * (rx_ring->count * PS_PAGE_BUFFERS);
1100 memset(rx_ring->ps_pages, 0, size);
1101
1102 /* Zero out the descriptor ring */ 1093 /* Zero out the descriptor ring */
1103 memset(rx_ring->desc, 0, rx_ring->size); 1094 memset(rx_ring->desc, 0, rx_ring->size);
1104 1095
@@ -1421,7 +1412,8 @@ err:
1421int e1000e_setup_rx_resources(struct e1000_adapter *adapter) 1412int e1000e_setup_rx_resources(struct e1000_adapter *adapter)
1422{ 1413{
1423 struct e1000_ring *rx_ring = adapter->rx_ring; 1414 struct e1000_ring *rx_ring = adapter->rx_ring;
1424 int size, desc_len, err = -ENOMEM; 1415 struct e1000_buffer *buffer_info;
1416 int i, size, desc_len, err = -ENOMEM;
1425 1417
1426 size = sizeof(struct e1000_buffer) * rx_ring->count; 1418 size = sizeof(struct e1000_buffer) * rx_ring->count;
1427 rx_ring->buffer_info = vmalloc(size); 1419 rx_ring->buffer_info = vmalloc(size);
@@ -1429,11 +1421,14 @@ int e1000e_setup_rx_resources(struct e1000_adapter *adapter)
1429 goto err; 1421 goto err;
1430 memset(rx_ring->buffer_info, 0, size); 1422 memset(rx_ring->buffer_info, 0, size);
1431 1423
1432 rx_ring->ps_pages = kcalloc(rx_ring->count * PS_PAGE_BUFFERS, 1424 for (i = 0; i < rx_ring->count; i++) {
1433 sizeof(struct e1000_ps_page), 1425 buffer_info = &rx_ring->buffer_info[i];
1434 GFP_KERNEL); 1426 buffer_info->ps_pages = kcalloc(PS_PAGE_BUFFERS,
1435 if (!rx_ring->ps_pages) 1427 sizeof(struct e1000_ps_page),
1436 goto err; 1428 GFP_KERNEL);
1429 if (!buffer_info->ps_pages)
1430 goto err_pages;
1431 }
1437 1432
1438 desc_len = sizeof(union e1000_rx_desc_packet_split); 1433 desc_len = sizeof(union e1000_rx_desc_packet_split);
1439 1434
@@ -1443,16 +1438,21 @@ int e1000e_setup_rx_resources(struct e1000_adapter *adapter)
1443 1438
1444 err = e1000_alloc_ring_dma(adapter, rx_ring); 1439 err = e1000_alloc_ring_dma(adapter, rx_ring);
1445 if (err) 1440 if (err)
1446 goto err; 1441 goto err_pages;
1447 1442
1448 rx_ring->next_to_clean = 0; 1443 rx_ring->next_to_clean = 0;
1449 rx_ring->next_to_use = 0; 1444 rx_ring->next_to_use = 0;
1450 rx_ring->rx_skb_top = NULL; 1445 rx_ring->rx_skb_top = NULL;
1451 1446
1452 return 0; 1447 return 0;
1448
1449err_pages:
1450 for (i = 0; i < rx_ring->count; i++) {
1451 buffer_info = &rx_ring->buffer_info[i];
1452 kfree(buffer_info->ps_pages);
1453 }
1453err: 1454err:
1454 vfree(rx_ring->buffer_info); 1455 vfree(rx_ring->buffer_info);
1455 kfree(rx_ring->ps_pages);
1456 ndev_err(adapter->netdev, 1456 ndev_err(adapter->netdev,
1457 "Unable to allocate memory for the transmit descriptor ring\n"); 1457 "Unable to allocate memory for the transmit descriptor ring\n");
1458 return err; 1458 return err;
@@ -1518,15 +1518,17 @@ void e1000e_free_rx_resources(struct e1000_adapter *adapter)
1518{ 1518{
1519 struct pci_dev *pdev = adapter->pdev; 1519 struct pci_dev *pdev = adapter->pdev;
1520 struct e1000_ring *rx_ring = adapter->rx_ring; 1520 struct e1000_ring *rx_ring = adapter->rx_ring;
1521 int i;
1521 1522
1522 e1000_clean_rx_ring(adapter); 1523 e1000_clean_rx_ring(adapter);
1523 1524
1525 for (i = 0; i < rx_ring->count; i++) {
1526 kfree(rx_ring->buffer_info[i].ps_pages);
1527 }
1528
1524 vfree(rx_ring->buffer_info); 1529 vfree(rx_ring->buffer_info);
1525 rx_ring->buffer_info = NULL; 1530 rx_ring->buffer_info = NULL;
1526 1531
1527 kfree(rx_ring->ps_pages);
1528 rx_ring->ps_pages = NULL;
1529
1530 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, 1532 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
1531 rx_ring->dma); 1533 rx_ring->dma);
1532 rx_ring->desc = NULL; 1534 rx_ring->desc = NULL;