aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/bnx2x_main.c
diff options
context:
space:
mode:
authorEilon Greenstein <eilong@broadcom.com>2009-01-15 00:23:36 -0500
committerDavid S. Miller <davem@davemloft.net>2009-01-15 11:28:10 -0500
commit58f4c4cfce5c4715b79621f0a635925c55f855d5 (patch)
tree08babde2d71ebabce63551950a61528500c0c4cc /drivers/net/bnx2x_main.c
parent1cf167f27ad2720af11ee8aa350009342f909e70 (diff)
bnx2x: Missing memory barriers
While working on IA64, it became clear that the following memory barriers are missing Signed-off-by: Eilon Greenstein <eilong@broadcom.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/bnx2x_main.c')
-rw-r--r--drivers/net/bnx2x_main.c28
1 files changed, 26 insertions, 2 deletions
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c
index cc6ffba74592..f0b2e73b87f7 100644
--- a/drivers/net/bnx2x_main.c
+++ b/drivers/net/bnx2x_main.c
@@ -1357,11 +1357,23 @@ static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1357 rx_prods.cqe_prod = rx_comp_prod; 1357 rx_prods.cqe_prod = rx_comp_prod;
1358 rx_prods.sge_prod = rx_sge_prod; 1358 rx_prods.sge_prod = rx_sge_prod;
1359 1359
1360 /*
1361 * Make sure that the BD and SGE data is updated before updating the
1362 * producers since FW might read the BD/SGE right after the producer
1363 * is updated.
1364 * This is only applicable for weak-ordered memory model archs such
1365 * as IA-64. The following barrier is also mandatory since FW will
1366 * assumes BDs must have buffers.
1367 */
1368 wmb();
1369
1360 for (i = 0; i < sizeof(struct tstorm_eth_rx_producers)/4; i++) 1370 for (i = 0; i < sizeof(struct tstorm_eth_rx_producers)/4; i++)
1361 REG_WR(bp, BAR_TSTRORM_INTMEM + 1371 REG_WR(bp, BAR_TSTRORM_INTMEM +
1362 TSTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4, 1372 TSTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4,
1363 ((u32 *)&rx_prods)[i]); 1373 ((u32 *)&rx_prods)[i]);
1364 1374
1375 mmiowb(); /* keep prod updates ordered */
1376
1365 DP(NETIF_MSG_RX_STATUS, 1377 DP(NETIF_MSG_RX_STATUS,
1366 "Wrote: bd_prod %u cqe_prod %u sge_prod %u\n", 1378 "Wrote: bd_prod %u cqe_prod %u sge_prod %u\n",
1367 bd_prod, rx_comp_prod, rx_sge_prod); 1379 bd_prod, rx_comp_prod, rx_sge_prod);
@@ -1582,7 +1594,6 @@ next_cqe:
1582 /* Update producers */ 1594 /* Update producers */
1583 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod, 1595 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1584 fp->rx_sge_prod); 1596 fp->rx_sge_prod);
1585 mmiowb(); /* keep prod updates ordered */
1586 1597
1587 fp->rx_pkt += rx_pkt; 1598 fp->rx_pkt += rx_pkt;
1588 fp->rx_calls++; 1599 fp->rx_calls++;
@@ -8729,6 +8740,8 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
8729 tx_bd->general_data = ((UNICAST_ADDRESS << 8740 tx_bd->general_data = ((UNICAST_ADDRESS <<
8730 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1); 8741 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
8731 8742
8743 wmb();
8744
8732 fp->hw_tx_prods->bds_prod = 8745 fp->hw_tx_prods->bds_prod =
8733 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + 1); 8746 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + 1);
8734 mb(); /* FW restriction: must not reorder writing nbd and packets */ 8747 mb(); /* FW restriction: must not reorder writing nbd and packets */
@@ -8780,7 +8793,6 @@ test_loopback_rx_exit:
8780 /* Update producers */ 8793 /* Update producers */
8781 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod, 8794 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
8782 fp->rx_sge_prod); 8795 fp->rx_sge_prod);
8783 mmiowb(); /* keep prod updates ordered */
8784 8796
8785test_loopback_exit: 8797test_loopback_exit:
8786 bp->link_params.loopback_mode = LOOPBACK_NONE; 8798 bp->link_params.loopback_mode = LOOPBACK_NONE;
@@ -9707,6 +9719,15 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9707 9719
9708 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod); 9720 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
9709 9721
9722 /*
9723 * Make sure that the BD data is updated before updating the producer
9724 * since FW might read the BD right after the producer is updated.
9725 * This is only applicable for weak-ordered memory model archs such
9726 * as IA-64. The following barrier is also mandatory since FW will
9727 * assumes packets must have BDs.
9728 */
9729 wmb();
9730
9710 fp->hw_tx_prods->bds_prod = 9731 fp->hw_tx_prods->bds_prod =
9711 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd); 9732 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd);
9712 mb(); /* FW restriction: must not reorder writing nbd and packets */ 9733 mb(); /* FW restriction: must not reorder writing nbd and packets */
@@ -9720,6 +9741,9 @@ static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9720 dev->trans_start = jiffies; 9741 dev->trans_start = jiffies;
9721 9742
9722 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) { 9743 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
9744 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
9745 if we put Tx into XOFF state. */
9746 smp_mb();
9723 netif_stop_queue(dev); 9747 netif_stop_queue(dev);
9724 bp->eth_stats.driver_xoff++; 9748 bp->eth_stats.driver_xoff++;
9725 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3) 9749 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)