aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorToshiharu Okada <toshiharu-linux@dsn.okisemi.com>2011-09-01 10:20:08 -0400
committerDavid S. Miller <davem@davemloft.net>2011-09-15 17:31:45 -0400
commit124d770a6459be21b84445f6ebf7dbfb60d43585 (patch)
tree4733fe453d2a8e1a5a4b1a1cb2a24d30ce5b6da2 /drivers/net
parent5229d87edcd80a3bceb0708ebd767faff2e589a9 (diff)
pch_gbe: added the process of FIFO over run error
This patch added the processing which should be done to hardware, when a FIFO over run error occurred. Signed-off-by: Toshiharu Okada <toshiharu-linux@dsn.okisemi.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/pch_gbe/pch_gbe.h12
-rw-r--r--drivers/net/pch_gbe/pch_gbe_main.c271
2 files changed, 179 insertions, 104 deletions
diff --git a/drivers/net/pch_gbe/pch_gbe.h b/drivers/net/pch_gbe/pch_gbe.h
index 59fac77d0dbb..a09a07197eb5 100644
--- a/drivers/net/pch_gbe/pch_gbe.h
+++ b/drivers/net/pch_gbe/pch_gbe.h
@@ -127,8 +127,8 @@ struct pch_gbe_regs {
127 127
128/* Reset */ 128/* Reset */
129#define PCH_GBE_ALL_RST 0x80000000 /* All reset */ 129#define PCH_GBE_ALL_RST 0x80000000 /* All reset */
130#define PCH_GBE_TX_RST 0x40000000 /* TX MAC, TX FIFO, TX DMA reset */ 130#define PCH_GBE_TX_RST 0x00008000 /* TX MAC, TX FIFO, TX DMA reset */
131#define PCH_GBE_RX_RST 0x04000000 /* RX MAC, RX FIFO, RX DMA reset */ 131#define PCH_GBE_RX_RST 0x00004000 /* RX MAC, RX FIFO, RX DMA reset */
132 132
133/* TCP/IP Accelerator Control */ 133/* TCP/IP Accelerator Control */
134#define PCH_GBE_EX_LIST_EN 0x00000008 /* External List Enable */ 134#define PCH_GBE_EX_LIST_EN 0x00000008 /* External List Enable */
@@ -276,6 +276,9 @@ struct pch_gbe_regs {
276#define PCH_GBE_RX_DMA_EN 0x00000002 /* Enables Receive DMA */ 276#define PCH_GBE_RX_DMA_EN 0x00000002 /* Enables Receive DMA */
277#define PCH_GBE_TX_DMA_EN 0x00000001 /* Enables Transmission DMA */ 277#define PCH_GBE_TX_DMA_EN 0x00000001 /* Enables Transmission DMA */
278 278
279/* RX DMA STATUS */
280#define PCH_GBE_IDLE_CHECK 0xFFFFFFFE
281
279/* Wake On LAN Status */ 282/* Wake On LAN Status */
280#define PCH_GBE_WLS_BR 0x00000008 /* Broadcas Address */ 283#define PCH_GBE_WLS_BR 0x00000008 /* Broadcas Address */
281#define PCH_GBE_WLS_MLT 0x00000004 /* Multicast Address */ 284#define PCH_GBE_WLS_MLT 0x00000004 /* Multicast Address */
@@ -471,6 +474,7 @@ struct pch_gbe_tx_desc {
471struct pch_gbe_buffer { 474struct pch_gbe_buffer {
472 struct sk_buff *skb; 475 struct sk_buff *skb;
473 dma_addr_t dma; 476 dma_addr_t dma;
477 unsigned char *rx_buffer;
474 unsigned long time_stamp; 478 unsigned long time_stamp;
475 u16 length; 479 u16 length;
476 bool mapped; 480 bool mapped;
@@ -511,6 +515,9 @@ struct pch_gbe_tx_ring {
511struct pch_gbe_rx_ring { 515struct pch_gbe_rx_ring {
512 struct pch_gbe_rx_desc *desc; 516 struct pch_gbe_rx_desc *desc;
513 dma_addr_t dma; 517 dma_addr_t dma;
518 unsigned char *rx_buff_pool;
519 dma_addr_t rx_buff_pool_logic;
520 unsigned int rx_buff_pool_size;
514 unsigned int size; 521 unsigned int size;
515 unsigned int count; 522 unsigned int count;
516 unsigned int next_to_use; 523 unsigned int next_to_use;
@@ -622,6 +629,7 @@ struct pch_gbe_adapter {
622 unsigned long rx_buffer_len; 629 unsigned long rx_buffer_len;
623 unsigned long tx_queue_len; 630 unsigned long tx_queue_len;
624 bool have_msi; 631 bool have_msi;
632 bool rx_stop_flag;
625}; 633};
626 634
627extern const char pch_driver_version[]; 635extern const char pch_driver_version[];
diff --git a/drivers/net/pch_gbe/pch_gbe_main.c b/drivers/net/pch_gbe/pch_gbe_main.c
index 48ff87c455ae..39ce0ee44ad7 100644
--- a/drivers/net/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/pch_gbe/pch_gbe_main.c
@@ -20,7 +20,6 @@
20 20
21#include "pch_gbe.h" 21#include "pch_gbe.h"
22#include "pch_gbe_api.h" 22#include "pch_gbe_api.h"
23#include <linux/prefetch.h>
24 23
25#define DRV_VERSION "1.00" 24#define DRV_VERSION "1.00"
26const char pch_driver_version[] = DRV_VERSION; 25const char pch_driver_version[] = DRV_VERSION;
@@ -34,6 +33,7 @@ const char pch_driver_version[] = DRV_VERSION;
34#define PCH_GBE_WATCHDOG_PERIOD (1 * HZ) /* watchdog time */ 33#define PCH_GBE_WATCHDOG_PERIOD (1 * HZ) /* watchdog time */
35#define PCH_GBE_COPYBREAK_DEFAULT 256 34#define PCH_GBE_COPYBREAK_DEFAULT 256
36#define PCH_GBE_PCI_BAR 1 35#define PCH_GBE_PCI_BAR 1
36#define PCH_GBE_RESERVE_MEMORY 0x200000 /* 2MB */
37 37
38/* Macros for ML7223 */ 38/* Macros for ML7223 */
39#define PCI_VENDOR_ID_ROHM 0x10db 39#define PCI_VENDOR_ID_ROHM 0x10db
@@ -52,6 +52,7 @@ const char pch_driver_version[] = DRV_VERSION;
52 ) 52 )
53 53
54/* Ethertype field values */ 54/* Ethertype field values */
55#define PCH_GBE_MAX_RX_BUFFER_SIZE 0x2880
55#define PCH_GBE_MAX_JUMBO_FRAME_SIZE 10318 56#define PCH_GBE_MAX_JUMBO_FRAME_SIZE 10318
56#define PCH_GBE_FRAME_SIZE_2048 2048 57#define PCH_GBE_FRAME_SIZE_2048 2048
57#define PCH_GBE_FRAME_SIZE_4096 4096 58#define PCH_GBE_FRAME_SIZE_4096 4096
@@ -83,10 +84,12 @@ const char pch_driver_version[] = DRV_VERSION;
83#define PCH_GBE_INT_ENABLE_MASK ( \ 84#define PCH_GBE_INT_ENABLE_MASK ( \
84 PCH_GBE_INT_RX_DMA_CMPLT | \ 85 PCH_GBE_INT_RX_DMA_CMPLT | \
85 PCH_GBE_INT_RX_DSC_EMP | \ 86 PCH_GBE_INT_RX_DSC_EMP | \
87 PCH_GBE_INT_RX_FIFO_ERR | \
86 PCH_GBE_INT_WOL_DET | \ 88 PCH_GBE_INT_WOL_DET | \
87 PCH_GBE_INT_TX_CMPLT \ 89 PCH_GBE_INT_TX_CMPLT \
88 ) 90 )
89 91
92#define PCH_GBE_INT_DISABLE_ALL 0
90 93
91static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT; 94static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT;
92 95
@@ -138,6 +141,27 @@ static void pch_gbe_wait_clr_bit(void *reg, u32 bit)
138 if (!tmp) 141 if (!tmp)
139 pr_err("Error: busy bit is not cleared\n"); 142 pr_err("Error: busy bit is not cleared\n");
140} 143}
144
145/**
146 * pch_gbe_wait_clr_bit_irq - Wait to clear a bit for interrupt context
147 * @reg: Pointer of register
148 * @busy: Busy bit
149 */
150static int pch_gbe_wait_clr_bit_irq(void *reg, u32 bit)
151{
152 u32 tmp;
153 int ret = -1;
154 /* wait busy */
155 tmp = 20;
156 while ((ioread32(reg) & bit) && --tmp)
157 udelay(5);
158 if (!tmp)
159 pr_err("Error: busy bit is not cleared\n");
160 else
161 ret = 0;
162 return ret;
163}
164
141/** 165/**
142 * pch_gbe_mac_mar_set - Set MAC address register 166 * pch_gbe_mac_mar_set - Set MAC address register
143 * @hw: Pointer to the HW structure 167 * @hw: Pointer to the HW structure
@@ -189,6 +213,17 @@ static void pch_gbe_mac_reset_hw(struct pch_gbe_hw *hw)
189 return; 213 return;
190} 214}
191 215
216static void pch_gbe_mac_reset_rx(struct pch_gbe_hw *hw)
217{
218 /* Read the MAC address. and store to the private data */
219 pch_gbe_mac_read_mac_addr(hw);
220 iowrite32(PCH_GBE_RX_RST, &hw->reg->RESET);
221 pch_gbe_wait_clr_bit_irq(&hw->reg->RESET, PCH_GBE_RX_RST);
222 /* Setup the MAC address */
223 pch_gbe_mac_mar_set(hw, hw->mac.addr, 0);
224 return;
225}
226
192/** 227/**
193 * pch_gbe_mac_init_rx_addrs - Initialize receive address's 228 * pch_gbe_mac_init_rx_addrs - Initialize receive address's
194 * @hw: Pointer to the HW structure 229 * @hw: Pointer to the HW structure
@@ -671,13 +706,8 @@ static void pch_gbe_setup_rctl(struct pch_gbe_adapter *adapter)
671 706
672 tcpip = ioread32(&hw->reg->TCPIP_ACC); 707 tcpip = ioread32(&hw->reg->TCPIP_ACC);
673 708
674 if (netdev->features & NETIF_F_RXCSUM) { 709 tcpip |= PCH_GBE_RX_TCPIPACC_OFF;
675 tcpip &= ~PCH_GBE_RX_TCPIPACC_OFF; 710 tcpip &= ~PCH_GBE_RX_TCPIPACC_EN;
676 tcpip |= PCH_GBE_RX_TCPIPACC_EN;
677 } else {
678 tcpip |= PCH_GBE_RX_TCPIPACC_OFF;
679 tcpip &= ~PCH_GBE_RX_TCPIPACC_EN;
680 }
681 iowrite32(tcpip, &hw->reg->TCPIP_ACC); 711 iowrite32(tcpip, &hw->reg->TCPIP_ACC);
682 return; 712 return;
683} 713}
@@ -1090,6 +1120,35 @@ void pch_gbe_update_stats(struct pch_gbe_adapter *adapter)
1090 spin_unlock_irqrestore(&adapter->stats_lock, flags); 1120 spin_unlock_irqrestore(&adapter->stats_lock, flags);
1091} 1121}
1092 1122
1123static void pch_gbe_stop_receive(struct pch_gbe_adapter *adapter)
1124{
1125 struct pch_gbe_hw *hw = &adapter->hw;
1126 u32 rxdma;
1127 u16 value;
1128 int ret;
1129
1130 /* Disable Receive DMA */
1131 rxdma = ioread32(&hw->reg->DMA_CTRL);
1132 rxdma &= ~PCH_GBE_RX_DMA_EN;
1133 iowrite32(rxdma, &hw->reg->DMA_CTRL);
1134 /* Wait Rx DMA BUS is IDLE */
1135 ret = pch_gbe_wait_clr_bit_irq(&hw->reg->RX_DMA_ST, PCH_GBE_IDLE_CHECK);
1136 if (ret) {
1137 /* Disable Bus master */
1138 pci_read_config_word(adapter->pdev, PCI_COMMAND, &value);
1139 value &= ~PCI_COMMAND_MASTER;
1140 pci_write_config_word(adapter->pdev, PCI_COMMAND, value);
1141 /* Stop Receive */
1142 pch_gbe_mac_reset_rx(hw);
1143 /* Enable Bus master */
1144 value |= PCI_COMMAND_MASTER;
1145 pci_write_config_word(adapter->pdev, PCI_COMMAND, value);
1146 } else {
1147 /* Stop Receive */
1148 pch_gbe_mac_reset_rx(hw);
1149 }
1150}
1151
1093static void pch_gbe_start_receive(struct pch_gbe_hw *hw) 1152static void pch_gbe_start_receive(struct pch_gbe_hw *hw)
1094{ 1153{
1095 u32 rxdma; 1154 u32 rxdma;
@@ -1129,7 +1188,15 @@ static irqreturn_t pch_gbe_intr(int irq, void *data)
1129 if (int_st & PCH_GBE_INT_RX_FRAME_ERR) 1188 if (int_st & PCH_GBE_INT_RX_FRAME_ERR)
1130 adapter->stats.intr_rx_frame_err_count++; 1189 adapter->stats.intr_rx_frame_err_count++;
1131 if (int_st & PCH_GBE_INT_RX_FIFO_ERR) 1190 if (int_st & PCH_GBE_INT_RX_FIFO_ERR)
1132 adapter->stats.intr_rx_fifo_err_count++; 1191 if (!adapter->rx_stop_flag) {
1192 adapter->stats.intr_rx_fifo_err_count++;
1193 pr_debug("Rx fifo over run\n");
1194 adapter->rx_stop_flag = true;
1195 int_en = ioread32(&hw->reg->INT_EN);
1196 iowrite32((int_en & ~PCH_GBE_INT_RX_FIFO_ERR),
1197 &hw->reg->INT_EN);
1198 pch_gbe_stop_receive(adapter);
1199 }
1133 if (int_st & PCH_GBE_INT_RX_DMA_ERR) 1200 if (int_st & PCH_GBE_INT_RX_DMA_ERR)
1134 adapter->stats.intr_rx_dma_err_count++; 1201 adapter->stats.intr_rx_dma_err_count++;
1135 if (int_st & PCH_GBE_INT_TX_FIFO_ERR) 1202 if (int_st & PCH_GBE_INT_TX_FIFO_ERR)
@@ -1141,7 +1208,7 @@ static irqreturn_t pch_gbe_intr(int irq, void *data)
1141 /* When Rx descriptor is empty */ 1208 /* When Rx descriptor is empty */
1142 if ((int_st & PCH_GBE_INT_RX_DSC_EMP)) { 1209 if ((int_st & PCH_GBE_INT_RX_DSC_EMP)) {
1143 adapter->stats.intr_rx_dsc_empty_count++; 1210 adapter->stats.intr_rx_dsc_empty_count++;
1144 pr_err("Rx descriptor is empty\n"); 1211 pr_debug("Rx descriptor is empty\n");
1145 int_en = ioread32(&hw->reg->INT_EN); 1212 int_en = ioread32(&hw->reg->INT_EN);
1146 iowrite32((int_en & ~PCH_GBE_INT_RX_DSC_EMP), &hw->reg->INT_EN); 1213 iowrite32((int_en & ~PCH_GBE_INT_RX_DSC_EMP), &hw->reg->INT_EN);
1147 if (hw->mac.tx_fc_enable) { 1214 if (hw->mac.tx_fc_enable) {
@@ -1191,29 +1258,23 @@ pch_gbe_alloc_rx_buffers(struct pch_gbe_adapter *adapter,
1191 unsigned int i; 1258 unsigned int i;
1192 unsigned int bufsz; 1259 unsigned int bufsz;
1193 1260
1194 bufsz = adapter->rx_buffer_len + PCH_GBE_DMA_ALIGN; 1261 bufsz = adapter->rx_buffer_len + NET_IP_ALIGN;
1195 i = rx_ring->next_to_use; 1262 i = rx_ring->next_to_use;
1196 1263
1197 while ((cleaned_count--)) { 1264 while ((cleaned_count--)) {
1198 buffer_info = &rx_ring->buffer_info[i]; 1265 buffer_info = &rx_ring->buffer_info[i];
1199 skb = buffer_info->skb; 1266 skb = netdev_alloc_skb(netdev, bufsz);
1200 if (skb) { 1267 if (unlikely(!skb)) {
1201 skb_trim(skb, 0); 1268 /* Better luck next round */
1202 } else { 1269 adapter->stats.rx_alloc_buff_failed++;
1203 skb = netdev_alloc_skb(netdev, bufsz); 1270 break;
1204 if (unlikely(!skb)) {
1205 /* Better luck next round */
1206 adapter->stats.rx_alloc_buff_failed++;
1207 break;
1208 }
1209 /* 64byte align */
1210 skb_reserve(skb, PCH_GBE_DMA_ALIGN);
1211
1212 buffer_info->skb = skb;
1213 buffer_info->length = adapter->rx_buffer_len;
1214 } 1271 }
1272 /* align */
1273 skb_reserve(skb, NET_IP_ALIGN);
1274 buffer_info->skb = skb;
1275
1215 buffer_info->dma = dma_map_single(&pdev->dev, 1276 buffer_info->dma = dma_map_single(&pdev->dev,
1216 skb->data, 1277 buffer_info->rx_buffer,
1217 buffer_info->length, 1278 buffer_info->length,
1218 DMA_FROM_DEVICE); 1279 DMA_FROM_DEVICE);
1219 if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) { 1280 if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) {
@@ -1246,6 +1307,36 @@ pch_gbe_alloc_rx_buffers(struct pch_gbe_adapter *adapter,
1246 return; 1307 return;
1247} 1308}
1248 1309
1310static int
1311pch_gbe_alloc_rx_buffers_pool(struct pch_gbe_adapter *adapter,
1312 struct pch_gbe_rx_ring *rx_ring, int cleaned_count)
1313{
1314 struct pci_dev *pdev = adapter->pdev;
1315 struct pch_gbe_buffer *buffer_info;
1316 unsigned int i;
1317 unsigned int bufsz;
1318 unsigned int size;
1319
1320 bufsz = adapter->rx_buffer_len;
1321
1322 size = rx_ring->count * bufsz + PCH_GBE_RESERVE_MEMORY;
1323 rx_ring->rx_buff_pool = dma_alloc_coherent(&pdev->dev, size,
1324 &rx_ring->rx_buff_pool_logic,
1325 GFP_KERNEL);
1326 if (!rx_ring->rx_buff_pool) {
1327 pr_err("Unable to allocate memory for the receive poll buffer\n");
1328 return -ENOMEM;
1329 }
1330 memset(rx_ring->rx_buff_pool, 0, size);
1331 rx_ring->rx_buff_pool_size = size;
1332 for (i = 0; i < rx_ring->count; i++) {
1333 buffer_info = &rx_ring->buffer_info[i];
1334 buffer_info->rx_buffer = rx_ring->rx_buff_pool + bufsz * i;
1335 buffer_info->length = bufsz;
1336 }
1337 return 0;
1338}
1339
1249/** 1340/**
1250 * pch_gbe_alloc_tx_buffers - Allocate transmit buffers 1341 * pch_gbe_alloc_tx_buffers - Allocate transmit buffers
1251 * @adapter: Board private structure 1342 * @adapter: Board private structure
@@ -1386,7 +1477,7 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
1386 unsigned int i; 1477 unsigned int i;
1387 unsigned int cleaned_count = 0; 1478 unsigned int cleaned_count = 0;
1388 bool cleaned = false; 1479 bool cleaned = false;
1389 struct sk_buff *skb, *new_skb; 1480 struct sk_buff *skb;
1390 u8 dma_status; 1481 u8 dma_status;
1391 u16 gbec_status; 1482 u16 gbec_status;
1392 u32 tcp_ip_status; 1483 u32 tcp_ip_status;
@@ -1407,13 +1498,12 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
1407 rx_desc->gbec_status = DSC_INIT16; 1498 rx_desc->gbec_status = DSC_INIT16;
1408 buffer_info = &rx_ring->buffer_info[i]; 1499 buffer_info = &rx_ring->buffer_info[i];
1409 skb = buffer_info->skb; 1500 skb = buffer_info->skb;
1501 buffer_info->skb = NULL;
1410 1502
1411 /* unmap dma */ 1503 /* unmap dma */
1412 dma_unmap_single(&pdev->dev, buffer_info->dma, 1504 dma_unmap_single(&pdev->dev, buffer_info->dma,
1413 buffer_info->length, DMA_FROM_DEVICE); 1505 buffer_info->length, DMA_FROM_DEVICE);
1414 buffer_info->mapped = false; 1506 buffer_info->mapped = false;
1415 /* Prefetch the packet */
1416 prefetch(skb->data);
1417 1507
1418 pr_debug("RxDecNo = 0x%04x Status[DMA:0x%02x GBE:0x%04x " 1508 pr_debug("RxDecNo = 0x%04x Status[DMA:0x%02x GBE:0x%04x "
1419 "TCP:0x%08x] BufInf = 0x%p\n", 1509 "TCP:0x%08x] BufInf = 0x%p\n",
@@ -1433,70 +1523,16 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
1433 pr_err("Receive CRC Error\n"); 1523 pr_err("Receive CRC Error\n");
1434 } else { 1524 } else {
1435 /* get receive length */ 1525 /* get receive length */
1436 /* length convert[-3] */ 1526 /* length convert[-3], length includes FCS length */
1437 length = (rx_desc->rx_words_eob) - 3; 1527 length = (rx_desc->rx_words_eob) - 3 - ETH_FCS_LEN;
1438 1528 if (rx_desc->rx_words_eob & 0x02)
1439 /* Decide the data conversion method */ 1529 length = length - 4;
1440 if (!(netdev->features & NETIF_F_RXCSUM)) { 1530 /*
1441 /* [Header:14][payload] */ 1531 * buffer_info->rx_buffer: [Header:14][payload]
1442 if (NET_IP_ALIGN) { 1532 * skb->data: [Reserve:2][Header:14][payload]
1443 /* Because alignment differs, 1533 */
1444 * the new_skb is newly allocated, 1534 memcpy(skb->data, buffer_info->rx_buffer, length);
1445 * and data is copied to new_skb.*/ 1535
1446 new_skb = netdev_alloc_skb(netdev,
1447 length + NET_IP_ALIGN);
1448 if (!new_skb) {
1449 /* dorrop error */
1450 pr_err("New skb allocation "
1451 "Error\n");
1452 goto dorrop;
1453 }
1454 skb_reserve(new_skb, NET_IP_ALIGN);
1455 memcpy(new_skb->data, skb->data,
1456 length);
1457 skb = new_skb;
1458 } else {
1459 /* DMA buffer is used as SKB as it is.*/
1460 buffer_info->skb = NULL;
1461 }
1462 } else {
1463 /* [Header:14][padding:2][payload] */
1464 /* The length includes padding length */
1465 length = length - PCH_GBE_DMA_PADDING;
1466 if ((length < copybreak) ||
1467 (NET_IP_ALIGN != PCH_GBE_DMA_PADDING)) {
1468 /* Because alignment differs,
1469 * the new_skb is newly allocated,
1470 * and data is copied to new_skb.
1471 * Padding data is deleted
1472 * at the time of a copy.*/
1473 new_skb = netdev_alloc_skb(netdev,
1474 length + NET_IP_ALIGN);
1475 if (!new_skb) {
1476 /* dorrop error */
1477 pr_err("New skb allocation "
1478 "Error\n");
1479 goto dorrop;
1480 }
1481 skb_reserve(new_skb, NET_IP_ALIGN);
1482 memcpy(new_skb->data, skb->data,
1483 ETH_HLEN);
1484 memcpy(&new_skb->data[ETH_HLEN],
1485 &skb->data[ETH_HLEN +
1486 PCH_GBE_DMA_PADDING],
1487 length - ETH_HLEN);
1488 skb = new_skb;
1489 } else {
1490 /* Padding data is deleted
1491 * by moving header data.*/
1492 memmove(&skb->data[PCH_GBE_DMA_PADDING],
1493 &skb->data[0], ETH_HLEN);
1494 skb_reserve(skb, NET_IP_ALIGN);
1495 buffer_info->skb = NULL;
1496 }
1497 }
1498 /* The length includes FCS length */
1499 length = length - ETH_FCS_LEN;
1500 /* update status of driver */ 1536 /* update status of driver */
1501 adapter->stats.rx_bytes += length; 1537 adapter->stats.rx_bytes += length;
1502 adapter->stats.rx_packets++; 1538 adapter->stats.rx_packets++;
@@ -1515,7 +1551,6 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
1515 pr_debug("Receive skb->ip_summed: %d length: %d\n", 1551 pr_debug("Receive skb->ip_summed: %d length: %d\n",
1516 skb->ip_summed, length); 1552 skb->ip_summed, length);
1517 } 1553 }
1518dorrop:
1519 /* return some buffers to hardware, one at a time is too slow */ 1554 /* return some buffers to hardware, one at a time is too slow */
1520 if (unlikely(cleaned_count >= PCH_GBE_RX_BUFFER_WRITE)) { 1555 if (unlikely(cleaned_count >= PCH_GBE_RX_BUFFER_WRITE)) {
1521 pch_gbe_alloc_rx_buffers(adapter, rx_ring, 1556 pch_gbe_alloc_rx_buffers(adapter, rx_ring,
@@ -1720,6 +1755,11 @@ int pch_gbe_up(struct pch_gbe_adapter *adapter)
1720 pr_err("Error: can't bring device up\n"); 1755 pr_err("Error: can't bring device up\n");
1721 return err; 1756 return err;
1722 } 1757 }
1758 err = pch_gbe_alloc_rx_buffers_pool(adapter, rx_ring, rx_ring->count);
1759 if (err) {
1760 pr_err("Error: can't bring device up\n");
1761 return err;
1762 }
1723 pch_gbe_alloc_tx_buffers(adapter, tx_ring); 1763 pch_gbe_alloc_tx_buffers(adapter, tx_ring);
1724 pch_gbe_alloc_rx_buffers(adapter, rx_ring, rx_ring->count); 1764 pch_gbe_alloc_rx_buffers(adapter, rx_ring, rx_ring->count);
1725 adapter->tx_queue_len = netdev->tx_queue_len; 1765 adapter->tx_queue_len = netdev->tx_queue_len;
@@ -1741,6 +1781,7 @@ int pch_gbe_up(struct pch_gbe_adapter *adapter)
1741void pch_gbe_down(struct pch_gbe_adapter *adapter) 1781void pch_gbe_down(struct pch_gbe_adapter *adapter)
1742{ 1782{
1743 struct net_device *netdev = adapter->netdev; 1783 struct net_device *netdev = adapter->netdev;
1784 struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring;
1744 1785
1745 /* signal that we're down so the interrupt handler does not 1786 /* signal that we're down so the interrupt handler does not
1746 * reschedule our watchdog timer */ 1787 * reschedule our watchdog timer */
@@ -1759,6 +1800,12 @@ void pch_gbe_down(struct pch_gbe_adapter *adapter)
1759 pch_gbe_reset(adapter); 1800 pch_gbe_reset(adapter);
1760 pch_gbe_clean_tx_ring(adapter, adapter->tx_ring); 1801 pch_gbe_clean_tx_ring(adapter, adapter->tx_ring);
1761 pch_gbe_clean_rx_ring(adapter, adapter->rx_ring); 1802 pch_gbe_clean_rx_ring(adapter, adapter->rx_ring);
1803
1804 pci_free_consistent(adapter->pdev, rx_ring->rx_buff_pool_size,
1805 rx_ring->rx_buff_pool, rx_ring->rx_buff_pool_logic);
1806 rx_ring->rx_buff_pool_logic = 0;
1807 rx_ring->rx_buff_pool_size = 0;
1808 rx_ring->rx_buff_pool = NULL;
1762} 1809}
1763 1810
1764/** 1811/**
@@ -2011,6 +2058,8 @@ static int pch_gbe_change_mtu(struct net_device *netdev, int new_mtu)
2011{ 2058{
2012 struct pch_gbe_adapter *adapter = netdev_priv(netdev); 2059 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
2013 int max_frame; 2060 int max_frame;
2061 unsigned long old_rx_buffer_len = adapter->rx_buffer_len;
2062 int err;
2014 2063
2015 max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; 2064 max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
2016 if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) || 2065 if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
@@ -2025,14 +2074,24 @@ static int pch_gbe_change_mtu(struct net_device *netdev, int new_mtu)
2025 else if (max_frame <= PCH_GBE_FRAME_SIZE_8192) 2074 else if (max_frame <= PCH_GBE_FRAME_SIZE_8192)
2026 adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_8192; 2075 adapter->rx_buffer_len = PCH_GBE_FRAME_SIZE_8192;
2027 else 2076 else
2028 adapter->rx_buffer_len = PCH_GBE_MAX_JUMBO_FRAME_SIZE; 2077 adapter->rx_buffer_len = PCH_GBE_MAX_RX_BUFFER_SIZE;
2029 netdev->mtu = new_mtu;
2030 adapter->hw.mac.max_frame_size = max_frame;
2031 2078
2032 if (netif_running(netdev)) 2079 if (netif_running(netdev)) {
2033 pch_gbe_reinit_locked(adapter); 2080 pch_gbe_down(adapter);
2034 else 2081 err = pch_gbe_up(adapter);
2082 if (err) {
2083 adapter->rx_buffer_len = old_rx_buffer_len;
2084 pch_gbe_up(adapter);
2085 return -ENOMEM;
2086 } else {
2087 netdev->mtu = new_mtu;
2088 adapter->hw.mac.max_frame_size = max_frame;
2089 }
2090 } else {
2035 pch_gbe_reset(adapter); 2091 pch_gbe_reset(adapter);
2092 netdev->mtu = new_mtu;
2093 adapter->hw.mac.max_frame_size = max_frame;
2094 }
2036 2095
2037 pr_debug("max_frame : %d rx_buffer_len : %d mtu : %d max_frame_size : %d\n", 2096 pr_debug("max_frame : %d rx_buffer_len : %d mtu : %d max_frame_size : %d\n",
2038 max_frame, (u32) adapter->rx_buffer_len, netdev->mtu, 2097 max_frame, (u32) adapter->rx_buffer_len, netdev->mtu,
@@ -2110,6 +2169,7 @@ static int pch_gbe_napi_poll(struct napi_struct *napi, int budget)
2110 int work_done = 0; 2169 int work_done = 0;
2111 bool poll_end_flag = false; 2170 bool poll_end_flag = false;
2112 bool cleaned = false; 2171 bool cleaned = false;
2172 u32 int_en;
2113 2173
2114 pr_debug("budget : %d\n", budget); 2174 pr_debug("budget : %d\n", budget);
2115 2175
@@ -2117,8 +2177,15 @@ static int pch_gbe_napi_poll(struct napi_struct *napi, int budget)
2117 if (!netif_carrier_ok(netdev)) { 2177 if (!netif_carrier_ok(netdev)) {
2118 poll_end_flag = true; 2178 poll_end_flag = true;
2119 } else { 2179 } else {
2120 cleaned = pch_gbe_clean_tx(adapter, adapter->tx_ring);
2121 pch_gbe_clean_rx(adapter, adapter->rx_ring, &work_done, budget); 2180 pch_gbe_clean_rx(adapter, adapter->rx_ring, &work_done, budget);
2181 if (adapter->rx_stop_flag) {
2182 adapter->rx_stop_flag = false;
2183 pch_gbe_start_receive(&adapter->hw);
2184 int_en = ioread32(&adapter->hw.reg->INT_EN);
2185 iowrite32((int_en | PCH_GBE_INT_RX_FIFO_ERR),
2186 &adapter->hw.reg->INT_EN);
2187 }
2188 cleaned = pch_gbe_clean_tx(adapter, adapter->tx_ring);
2122 2189
2123 if (cleaned) 2190 if (cleaned)
2124 work_done = budget; 2191 work_done = budget;