aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/benet/be_main.c
diff options
context:
space:
mode:
authorSathya Perla <sathyap@serverengines.com>2010-06-28 20:11:17 -0400
committerDavid S. Miller <davem@davemloft.net>2010-06-30 16:26:42 -0400
commitf3eb62d2cc7da7bea4b394dd06f6bc738aa284e7 (patch)
tree2e98c0b346690eeca0ea6cad6f8f21a9e16af476 /drivers/net/benet/be_main.c
parent7e307c7ad5340b226966da6e564ec7f717da3adb (diff)
be2net: memory barrier fixes on IBM p7 platform
The ibm p7 architecure seems to reorder memory accesses more aggressively than previous ppc64 architectures. This requires memory barriers to ensure that rx/tx doorbells are pressed only after memory to be DMAed is written. Signed-off-by: Sathya Perla <sathyap@serverengines.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/benet/be_main.c')
-rw-r--r--drivers/net/benet/be_main.c7
1 files changed, 7 insertions, 0 deletions
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index 01eb447f98b6..b63687956f2b 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -89,6 +89,8 @@ static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
89 u32 val = 0; 89 u32 val = 0;
90 val |= qid & DB_RQ_RING_ID_MASK; 90 val |= qid & DB_RQ_RING_ID_MASK;
91 val |= posted << DB_RQ_NUM_POSTED_SHIFT; 91 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
92
93 wmb();
92 iowrite32(val, adapter->db + DB_RQ_OFFSET); 94 iowrite32(val, adapter->db + DB_RQ_OFFSET);
93} 95}
94 96
@@ -97,6 +99,8 @@ static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
97 u32 val = 0; 99 u32 val = 0;
98 val |= qid & DB_TXULP_RING_ID_MASK; 100 val |= qid & DB_TXULP_RING_ID_MASK;
99 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT; 101 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
102
103 wmb();
100 iowrite32(val, adapter->db + DB_TXULP1_OFFSET); 104 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
101} 105}
102 106
@@ -973,6 +977,7 @@ static struct be_eth_rx_compl *be_rx_compl_get(struct be_adapter *adapter)
973 if (rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] == 0) 977 if (rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] == 0)
974 return NULL; 978 return NULL;
975 979
980 rmb();
976 be_dws_le_to_cpu(rxcp, sizeof(*rxcp)); 981 be_dws_le_to_cpu(rxcp, sizeof(*rxcp));
977 982
978 queue_tail_inc(&adapter->rx_obj.cq); 983 queue_tail_inc(&adapter->rx_obj.cq);
@@ -1066,6 +1071,7 @@ static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1066 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0) 1071 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1067 return NULL; 1072 return NULL;
1068 1073
1074 rmb();
1069 be_dws_le_to_cpu(txcp, sizeof(*txcp)); 1075 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1070 1076
1071 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0; 1077 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
@@ -1113,6 +1119,7 @@ static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1113 if (!eqe->evt) 1119 if (!eqe->evt)
1114 return NULL; 1120 return NULL;
1115 1121
1122 rmb();
1116 eqe->evt = le32_to_cpu(eqe->evt); 1123 eqe->evt = le32_to_cpu(eqe->evt);
1117 queue_tail_inc(&eq_obj->q); 1124 queue_tail_inc(&eq_obj->q);
1118 return eqe; 1125 return eqe;