aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorNeil Turton <nturton@solarflare.com>2011-04-11 06:42:43 -0400
committerBen Hutchings <bhutchings@solarflare.com>2011-04-12 18:52:44 -0400
commitfcfa060468a4edcf776f0c1211d826d5de1668c1 (patch)
tree3e8387932c5cc10facffa248ecfb648564a9ae5e /drivers/net
parentd4fabcc8e8ecac21262b1a5b9684fe415b128bd2 (diff)
sfc: Use rmb() to ensure reads occur in order
Enabling write-combining may also enable read reordering. The BIU is only guaranteed to read from a 128-bit CSR or 64-bit SRAM word when the host reads from its lowest address; otherwise the BIU may use the latched value. Therefore we need to reinstate the read memory barriers after the first read operation for each CSR or SRAM word. Signed-off-by; Ben Hutchings <bhutchings@solarflare.com>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/sfc/io.h2
1 files changed, 2 insertions, 0 deletions
diff --git a/drivers/net/sfc/io.h b/drivers/net/sfc/io.h
index d9d8c2ef1074..cc978803d484 100644
--- a/drivers/net/sfc/io.h
+++ b/drivers/net/sfc/io.h
@@ -152,6 +152,7 @@ static inline void efx_reado(struct efx_nic *efx, efx_oword_t *value,
152 152
153 spin_lock_irqsave(&efx->biu_lock, flags); 153 spin_lock_irqsave(&efx->biu_lock, flags);
154 value->u32[0] = _efx_readd(efx, reg + 0); 154 value->u32[0] = _efx_readd(efx, reg + 0);
155 rmb();
155 value->u32[1] = _efx_readd(efx, reg + 4); 156 value->u32[1] = _efx_readd(efx, reg + 4);
156 value->u32[2] = _efx_readd(efx, reg + 8); 157 value->u32[2] = _efx_readd(efx, reg + 8);
157 value->u32[3] = _efx_readd(efx, reg + 12); 158 value->u32[3] = _efx_readd(efx, reg + 12);
@@ -174,6 +175,7 @@ static inline void efx_sram_readq(struct efx_nic *efx, void __iomem *membase,
174 value->u64[0] = (__force __le64)__raw_readq(membase + addr); 175 value->u64[0] = (__force __le64)__raw_readq(membase + addr);
175#else 176#else
176 value->u32[0] = (__force __le32)__raw_readl(membase + addr); 177 value->u32[0] = (__force __le32)__raw_readl(membase + addr);
178 rmb();
177 value->u32[1] = (__force __le32)__raw_readl(membase + addr + 4); 179 value->u32[1] = (__force __le32)__raw_readl(membase + addr + 4);
178#endif 180#endif
179 spin_unlock_irqrestore(&efx->biu_lock, flags); 181 spin_unlock_irqrestore(&efx->biu_lock, flags);