aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorVeaceslav Falico <vfalico@redhat.com>2012-10-22 00:43:25 -0400
committerDavid S. Miller <davem@davemloft.net>2012-10-23 02:31:15 -0400
commita35279f002c5843c2b2faaa4538963c8eb18446c (patch)
tree3b5c10e54ffcc850363f1018075668b455bc82e9 /drivers/net
parent9c0314e111a540620441b27aeedb55f572f812f5 (diff)
pch_gbe: don't reset MAC_RX on FIFO overflow
Currently, when FIFO_ERR happens, we stop the dma, wait for it to become idle and then reset the whole MAC_RX logic (and after that we must re-set multicast addresses and also re-enable MAC_RX when we're finally ready to accept new packets). This leads to CRC errors on high number of incoming packets and is not needed according to the datasheet. This patch fixes it by the following steps: 1) remove this reset in pch_gbe_stop_receive(), which causes some functions to not be used anywhere 2) remove already unused functions pch_gbe_wait_clr_bit_irq() and pch_gbe_mac_reset_rx() to correctly build 3) move pch_gbe_enable_mac_rx() out of pch_gbe_start_receive() to pch_gbe_up() where it's only needed after we've removed the MAC_RX reset 4) rename pch_gbe_start/stop_receive() to pch_gbe_enable/disable_dma_rx() to more precisely reflect what the functions are now doing. After these changes we already don't see the CRC errors and gain some increase in RX processing speed. Signed-off-by: Veaceslav Falico <vfalico@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c66
1 files changed, 6 insertions, 60 deletions
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index 4ffad780c7ac..a8854d04c275 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -339,26 +339,6 @@ static void pch_gbe_wait_clr_bit(void *reg, u32 bit)
339} 339}
340 340
341/** 341/**
342 * pch_gbe_wait_clr_bit_irq - Wait to clear a bit for interrupt context
343 * @reg: Pointer of register
344 * @busy: Busy bit
345 */
346static int pch_gbe_wait_clr_bit_irq(void *reg, u32 bit)
347{
348 u32 tmp;
349 int ret = -1;
350 /* wait busy */
351 tmp = 20;
352 while ((ioread32(reg) & bit) && --tmp)
353 udelay(5);
354 if (!tmp)
355 pr_err("Error: busy bit is not cleared\n");
356 else
357 ret = 0;
358 return ret;
359}
360
361/**
362 * pch_gbe_mac_mar_set - Set MAC address register 342 * pch_gbe_mac_mar_set - Set MAC address register
363 * @hw: Pointer to the HW structure 343 * @hw: Pointer to the HW structure
364 * @addr: Pointer to the MAC address 344 * @addr: Pointer to the MAC address
@@ -409,17 +389,6 @@ static void pch_gbe_mac_reset_hw(struct pch_gbe_hw *hw)
409 return; 389 return;
410} 390}
411 391
412static void pch_gbe_mac_reset_rx(struct pch_gbe_hw *hw)
413{
414 /* Read the MAC addresses. and store to the private data */
415 pch_gbe_mac_read_mac_addr(hw);
416 iowrite32(PCH_GBE_RX_RST, &hw->reg->RESET);
417 pch_gbe_wait_clr_bit_irq(&hw->reg->RESET, PCH_GBE_RX_RST);
418 /* Setup the MAC addresses */
419 pch_gbe_mac_mar_set(hw, hw->mac.addr, 0);
420 return;
421}
422
423static void pch_gbe_disable_mac_rx(struct pch_gbe_hw *hw) 392static void pch_gbe_disable_mac_rx(struct pch_gbe_hw *hw)
424{ 393{
425 u32 rctl; 394 u32 rctl;
@@ -1330,38 +1299,17 @@ void pch_gbe_update_stats(struct pch_gbe_adapter *adapter)
1330 spin_unlock_irqrestore(&adapter->stats_lock, flags); 1299 spin_unlock_irqrestore(&adapter->stats_lock, flags);
1331} 1300}
1332 1301
1333static void pch_gbe_stop_receive(struct pch_gbe_adapter *adapter) 1302static void pch_gbe_disable_dma_rx(struct pch_gbe_hw *hw)
1334{ 1303{
1335 struct pch_gbe_hw *hw = &adapter->hw;
1336 u32 rxdma; 1304 u32 rxdma;
1337 u16 value;
1338 int ret;
1339 1305
1340 /* Disable Receive DMA */ 1306 /* Disable Receive DMA */
1341 rxdma = ioread32(&hw->reg->DMA_CTRL); 1307 rxdma = ioread32(&hw->reg->DMA_CTRL);
1342 rxdma &= ~PCH_GBE_RX_DMA_EN; 1308 rxdma &= ~PCH_GBE_RX_DMA_EN;
1343 iowrite32(rxdma, &hw->reg->DMA_CTRL); 1309 iowrite32(rxdma, &hw->reg->DMA_CTRL);
1344 /* Wait Rx DMA BUS is IDLE */
1345 ret = pch_gbe_wait_clr_bit_irq(&hw->reg->RX_DMA_ST, PCH_GBE_IDLE_CHECK);
1346 if (ret) {
1347 /* Disable Bus master */
1348 pci_read_config_word(adapter->pdev, PCI_COMMAND, &value);
1349 value &= ~PCI_COMMAND_MASTER;
1350 pci_write_config_word(adapter->pdev, PCI_COMMAND, value);
1351 /* Stop Receive */
1352 pch_gbe_mac_reset_rx(hw);
1353 /* Enable Bus master */
1354 value |= PCI_COMMAND_MASTER;
1355 pci_write_config_word(adapter->pdev, PCI_COMMAND, value);
1356 } else {
1357 /* Stop Receive */
1358 pch_gbe_mac_reset_rx(hw);
1359 }
1360 /* reprogram multicast address register after reset */
1361 pch_gbe_set_multi(adapter->netdev);
1362} 1310}
1363 1311
1364static void pch_gbe_start_receive(struct pch_gbe_hw *hw) 1312static void pch_gbe_enable_dma_rx(struct pch_gbe_hw *hw)
1365{ 1313{
1366 u32 rxdma; 1314 u32 rxdma;
1367 1315
@@ -1369,9 +1317,6 @@ static void pch_gbe_start_receive(struct pch_gbe_hw *hw)
1369 rxdma = ioread32(&hw->reg->DMA_CTRL); 1317 rxdma = ioread32(&hw->reg->DMA_CTRL);
1370 rxdma |= PCH_GBE_RX_DMA_EN; 1318 rxdma |= PCH_GBE_RX_DMA_EN;
1371 iowrite32(rxdma, &hw->reg->DMA_CTRL); 1319 iowrite32(rxdma, &hw->reg->DMA_CTRL);
1372
1373 pch_gbe_enable_mac_rx(hw);
1374 return;
1375} 1320}
1376 1321
1377/** 1322/**
@@ -1407,7 +1352,7 @@ static irqreturn_t pch_gbe_intr(int irq, void *data)
1407 int_en = ioread32(&hw->reg->INT_EN); 1352 int_en = ioread32(&hw->reg->INT_EN);
1408 iowrite32((int_en & ~PCH_GBE_INT_RX_FIFO_ERR), 1353 iowrite32((int_en & ~PCH_GBE_INT_RX_FIFO_ERR),
1409 &hw->reg->INT_EN); 1354 &hw->reg->INT_EN);
1410 pch_gbe_stop_receive(adapter); 1355 pch_gbe_disable_dma_rx(&adapter->hw);
1411 int_st |= ioread32(&hw->reg->INT_ST); 1356 int_st |= ioread32(&hw->reg->INT_ST);
1412 int_st = int_st & ioread32(&hw->reg->INT_EN); 1357 int_st = int_st & ioread32(&hw->reg->INT_EN);
1413 } 1358 }
@@ -2014,7 +1959,8 @@ int pch_gbe_up(struct pch_gbe_adapter *adapter)
2014 pch_gbe_alloc_tx_buffers(adapter, tx_ring); 1959 pch_gbe_alloc_tx_buffers(adapter, tx_ring);
2015 pch_gbe_alloc_rx_buffers(adapter, rx_ring, rx_ring->count); 1960 pch_gbe_alloc_rx_buffers(adapter, rx_ring, rx_ring->count);
2016 adapter->tx_queue_len = netdev->tx_queue_len; 1961 adapter->tx_queue_len = netdev->tx_queue_len;
2017 pch_gbe_start_receive(&adapter->hw); 1962 pch_gbe_enable_dma_rx(&adapter->hw);
1963 pch_gbe_enable_mac_rx(&adapter->hw);
2018 1964
2019 mod_timer(&adapter->watchdog_timer, jiffies); 1965 mod_timer(&adapter->watchdog_timer, jiffies);
2020 1966
@@ -2440,7 +2386,7 @@ static int pch_gbe_napi_poll(struct napi_struct *napi, int budget)
2440 2386
2441 if (adapter->rx_stop_flag) { 2387 if (adapter->rx_stop_flag) {
2442 adapter->rx_stop_flag = false; 2388 adapter->rx_stop_flag = false;
2443 pch_gbe_start_receive(&adapter->hw); 2389 pch_gbe_enable_dma_rx(&adapter->hw);
2444 } 2390 }
2445 2391
2446 pr_debug("poll_end_flag : %d work_done : %d budget : %d\n", 2392 pr_debug("poll_end_flag : %d work_done : %d budget : %d\n",