diff options
author | Alexander Duyck <alexander.h.duyck@redhat.com> | 2015-04-07 19:55:14 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2015-04-08 12:15:14 -0400 |
commit | b4468cc6f2aeccaea74baa3b211a49851fd84158 (patch) | |
tree | 1c9e7919c80749a6c7ca67b089e3d6773878c381 | |
parent | 04abac5fd6ad9341434add1c27047f4b16ada92c (diff) |
sungem, sunhme, sunvnet: Update drivers to use dma_wmb/rmb
This patch goes through and replaces wmb/rmb with dma_wmb/dma_rmb in cases
where the barrier is being used to order writes or reads to just memory and
doesn't involve any programmed I/O.
Signed-off-by: Alexander Duyck <alexander.h.duyck@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | drivers/net/ethernet/sun/sungem.c | 14 | ||||
-rw-r--r-- | drivers/net/ethernet/sun/sunhme.c | 16 | ||||
-rw-r--r-- | drivers/net/ethernet/sun/sunvnet.c | 6 |
3 files changed, 18 insertions, 18 deletions
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c index 74e9b148378c..e23a642357e7 100644 --- a/drivers/net/ethernet/sun/sungem.c +++ b/drivers/net/ethernet/sun/sungem.c | |||
@@ -718,7 +718,7 @@ static __inline__ void gem_post_rxds(struct gem *gp, int limit) | |||
718 | cluster_start = curr = (gp->rx_new & ~(4 - 1)); | 718 | cluster_start = curr = (gp->rx_new & ~(4 - 1)); |
719 | count = 0; | 719 | count = 0; |
720 | kick = -1; | 720 | kick = -1; |
721 | wmb(); | 721 | dma_wmb(); |
722 | while (curr != limit) { | 722 | while (curr != limit) { |
723 | curr = NEXT_RX(curr); | 723 | curr = NEXT_RX(curr); |
724 | if (++count == 4) { | 724 | if (++count == 4) { |
@@ -1038,7 +1038,7 @@ static netdev_tx_t gem_start_xmit(struct sk_buff *skb, | |||
1038 | if (gem_intme(entry)) | 1038 | if (gem_intme(entry)) |
1039 | ctrl |= TXDCTRL_INTME; | 1039 | ctrl |= TXDCTRL_INTME; |
1040 | txd->buffer = cpu_to_le64(mapping); | 1040 | txd->buffer = cpu_to_le64(mapping); |
1041 | wmb(); | 1041 | dma_wmb(); |
1042 | txd->control_word = cpu_to_le64(ctrl); | 1042 | txd->control_word = cpu_to_le64(ctrl); |
1043 | entry = NEXT_TX(entry); | 1043 | entry = NEXT_TX(entry); |
1044 | } else { | 1044 | } else { |
@@ -1076,7 +1076,7 @@ static netdev_tx_t gem_start_xmit(struct sk_buff *skb, | |||
1076 | 1076 | ||
1077 | txd = &gp->init_block->txd[entry]; | 1077 | txd = &gp->init_block->txd[entry]; |
1078 | txd->buffer = cpu_to_le64(mapping); | 1078 | txd->buffer = cpu_to_le64(mapping); |
1079 | wmb(); | 1079 | dma_wmb(); |
1080 | txd->control_word = cpu_to_le64(this_ctrl | len); | 1080 | txd->control_word = cpu_to_le64(this_ctrl | len); |
1081 | 1081 | ||
1082 | if (gem_intme(entry)) | 1082 | if (gem_intme(entry)) |
@@ -1086,7 +1086,7 @@ static netdev_tx_t gem_start_xmit(struct sk_buff *skb, | |||
1086 | } | 1086 | } |
1087 | txd = &gp->init_block->txd[first_entry]; | 1087 | txd = &gp->init_block->txd[first_entry]; |
1088 | txd->buffer = cpu_to_le64(first_mapping); | 1088 | txd->buffer = cpu_to_le64(first_mapping); |
1089 | wmb(); | 1089 | dma_wmb(); |
1090 | txd->control_word = | 1090 | txd->control_word = |
1091 | cpu_to_le64(ctrl | TXDCTRL_SOF | intme | first_len); | 1091 | cpu_to_le64(ctrl | TXDCTRL_SOF | intme | first_len); |
1092 | } | 1092 | } |
@@ -1585,7 +1585,7 @@ static void gem_clean_rings(struct gem *gp) | |||
1585 | gp->rx_skbs[i] = NULL; | 1585 | gp->rx_skbs[i] = NULL; |
1586 | } | 1586 | } |
1587 | rxd->status_word = 0; | 1587 | rxd->status_word = 0; |
1588 | wmb(); | 1588 | dma_wmb(); |
1589 | rxd->buffer = 0; | 1589 | rxd->buffer = 0; |
1590 | } | 1590 | } |
1591 | 1591 | ||
@@ -1647,7 +1647,7 @@ static void gem_init_rings(struct gem *gp) | |||
1647 | RX_BUF_ALLOC_SIZE(gp), | 1647 | RX_BUF_ALLOC_SIZE(gp), |
1648 | PCI_DMA_FROMDEVICE); | 1648 | PCI_DMA_FROMDEVICE); |
1649 | rxd->buffer = cpu_to_le64(dma_addr); | 1649 | rxd->buffer = cpu_to_le64(dma_addr); |
1650 | wmb(); | 1650 | dma_wmb(); |
1651 | rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp)); | 1651 | rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp)); |
1652 | skb_reserve(skb, RX_OFFSET); | 1652 | skb_reserve(skb, RX_OFFSET); |
1653 | } | 1653 | } |
@@ -1656,7 +1656,7 @@ static void gem_init_rings(struct gem *gp) | |||
1656 | struct gem_txd *txd = &gb->txd[i]; | 1656 | struct gem_txd *txd = &gb->txd[i]; |
1657 | 1657 | ||
1658 | txd->control_word = 0; | 1658 | txd->control_word = 0; |
1659 | wmb(); | 1659 | dma_wmb(); |
1660 | txd->buffer = 0; | 1660 | txd->buffer = 0; |
1661 | } | 1661 | } |
1662 | wmb(); | 1662 | wmb(); |
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c index 7a8ca2c7b7df..cf4dcff051d5 100644 --- a/drivers/net/ethernet/sun/sunhme.c +++ b/drivers/net/ethernet/sun/sunhme.c | |||
@@ -196,14 +196,14 @@ static u32 sbus_hme_read32(void __iomem *reg) | |||
196 | static void sbus_hme_write_rxd(struct happy_meal_rxd *rxd, u32 flags, u32 addr) | 196 | static void sbus_hme_write_rxd(struct happy_meal_rxd *rxd, u32 flags, u32 addr) |
197 | { | 197 | { |
198 | rxd->rx_addr = (__force hme32)addr; | 198 | rxd->rx_addr = (__force hme32)addr; |
199 | wmb(); | 199 | dma_wmb(); |
200 | rxd->rx_flags = (__force hme32)flags; | 200 | rxd->rx_flags = (__force hme32)flags; |
201 | } | 201 | } |
202 | 202 | ||
203 | static void sbus_hme_write_txd(struct happy_meal_txd *txd, u32 flags, u32 addr) | 203 | static void sbus_hme_write_txd(struct happy_meal_txd *txd, u32 flags, u32 addr) |
204 | { | 204 | { |
205 | txd->tx_addr = (__force hme32)addr; | 205 | txd->tx_addr = (__force hme32)addr; |
206 | wmb(); | 206 | dma_wmb(); |
207 | txd->tx_flags = (__force hme32)flags; | 207 | txd->tx_flags = (__force hme32)flags; |
208 | } | 208 | } |
209 | 209 | ||
@@ -225,14 +225,14 @@ static u32 pci_hme_read32(void __iomem *reg) | |||
225 | static void pci_hme_write_rxd(struct happy_meal_rxd *rxd, u32 flags, u32 addr) | 225 | static void pci_hme_write_rxd(struct happy_meal_rxd *rxd, u32 flags, u32 addr) |
226 | { | 226 | { |
227 | rxd->rx_addr = (__force hme32)cpu_to_le32(addr); | 227 | rxd->rx_addr = (__force hme32)cpu_to_le32(addr); |
228 | wmb(); | 228 | dma_wmb(); |
229 | rxd->rx_flags = (__force hme32)cpu_to_le32(flags); | 229 | rxd->rx_flags = (__force hme32)cpu_to_le32(flags); |
230 | } | 230 | } |
231 | 231 | ||
232 | static void pci_hme_write_txd(struct happy_meal_txd *txd, u32 flags, u32 addr) | 232 | static void pci_hme_write_txd(struct happy_meal_txd *txd, u32 flags, u32 addr) |
233 | { | 233 | { |
234 | txd->tx_addr = (__force hme32)cpu_to_le32(addr); | 234 | txd->tx_addr = (__force hme32)cpu_to_le32(addr); |
235 | wmb(); | 235 | dma_wmb(); |
236 | txd->tx_flags = (__force hme32)cpu_to_le32(flags); | 236 | txd->tx_flags = (__force hme32)cpu_to_le32(flags); |
237 | } | 237 | } |
238 | 238 | ||
@@ -268,12 +268,12 @@ static u32 pci_hme_read_desc32(hme32 *p) | |||
268 | sbus_readl(__reg) | 268 | sbus_readl(__reg) |
269 | #define hme_write_rxd(__hp, __rxd, __flags, __addr) \ | 269 | #define hme_write_rxd(__hp, __rxd, __flags, __addr) \ |
270 | do { (__rxd)->rx_addr = (__force hme32)(u32)(__addr); \ | 270 | do { (__rxd)->rx_addr = (__force hme32)(u32)(__addr); \ |
271 | wmb(); \ | 271 | dma_wmb(); \ |
272 | (__rxd)->rx_flags = (__force hme32)(u32)(__flags); \ | 272 | (__rxd)->rx_flags = (__force hme32)(u32)(__flags); \ |
273 | } while(0) | 273 | } while(0) |
274 | #define hme_write_txd(__hp, __txd, __flags, __addr) \ | 274 | #define hme_write_txd(__hp, __txd, __flags, __addr) \ |
275 | do { (__txd)->tx_addr = (__force hme32)(u32)(__addr); \ | 275 | do { (__txd)->tx_addr = (__force hme32)(u32)(__addr); \ |
276 | wmb(); \ | 276 | dma_wmb(); \ |
277 | (__txd)->tx_flags = (__force hme32)(u32)(__flags); \ | 277 | (__txd)->tx_flags = (__force hme32)(u32)(__flags); \ |
278 | } while(0) | 278 | } while(0) |
279 | #define hme_read_desc32(__hp, __p) ((__force u32)(hme32)*(__p)) | 279 | #define hme_read_desc32(__hp, __p) ((__force u32)(hme32)*(__p)) |
@@ -293,12 +293,12 @@ do { (__txd)->tx_addr = (__force hme32)(u32)(__addr); \ | |||
293 | readl(__reg) | 293 | readl(__reg) |
294 | #define hme_write_rxd(__hp, __rxd, __flags, __addr) \ | 294 | #define hme_write_rxd(__hp, __rxd, __flags, __addr) \ |
295 | do { (__rxd)->rx_addr = (__force hme32)cpu_to_le32(__addr); \ | 295 | do { (__rxd)->rx_addr = (__force hme32)cpu_to_le32(__addr); \ |
296 | wmb(); \ | 296 | dma_wmb(); \ |
297 | (__rxd)->rx_flags = (__force hme32)cpu_to_le32(__flags); \ | 297 | (__rxd)->rx_flags = (__force hme32)cpu_to_le32(__flags); \ |
298 | } while(0) | 298 | } while(0) |
299 | #define hme_write_txd(__hp, __txd, __flags, __addr) \ | 299 | #define hme_write_txd(__hp, __txd, __flags, __addr) \ |
300 | do { (__txd)->tx_addr = (__force hme32)cpu_to_le32(__addr); \ | 300 | do { (__txd)->tx_addr = (__force hme32)cpu_to_le32(__addr); \ |
301 | wmb(); \ | 301 | dma_wmb(); \ |
302 | (__txd)->tx_flags = (__force hme32)cpu_to_le32(__flags); \ | 302 | (__txd)->tx_flags = (__force hme32)cpu_to_le32(__flags); \ |
303 | } while(0) | 303 | } while(0) |
304 | static inline u32 hme_read_desc32(struct happy_meal *hp, hme32 *p) | 304 | static inline u32 hme_read_desc32(struct happy_meal *hp, hme32 *p) |
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c index 401abf7254d3..53fe200e0b79 100644 --- a/drivers/net/ethernet/sun/sunvnet.c +++ b/drivers/net/ethernet/sun/sunvnet.c | |||
@@ -519,7 +519,7 @@ static int vnet_walk_rx_one(struct vnet_port *port, | |||
519 | if (desc->hdr.state != VIO_DESC_READY) | 519 | if (desc->hdr.state != VIO_DESC_READY) |
520 | return 1; | 520 | return 1; |
521 | 521 | ||
522 | rmb(); | 522 | dma_rmb(); |
523 | 523 | ||
524 | viodbg(DATA, "vio_walk_rx_one desc[%02x:%02x:%08x:%08x:%llx:%llx]\n", | 524 | viodbg(DATA, "vio_walk_rx_one desc[%02x:%02x:%08x:%08x:%llx:%llx]\n", |
525 | desc->hdr.state, desc->hdr.ack, | 525 | desc->hdr.state, desc->hdr.ack, |
@@ -1380,7 +1380,7 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1380 | /* This has to be a non-SMP write barrier because we are writing | 1380 | /* This has to be a non-SMP write barrier because we are writing |
1381 | * to memory which is shared with the peer LDOM. | 1381 | * to memory which is shared with the peer LDOM. |
1382 | */ | 1382 | */ |
1383 | wmb(); | 1383 | dma_wmb(); |
1384 | 1384 | ||
1385 | d->hdr.state = VIO_DESC_READY; | 1385 | d->hdr.state = VIO_DESC_READY; |
1386 | 1386 | ||
@@ -1395,7 +1395,7 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1395 | * is marked READY, but start_cons was false. | 1395 | * is marked READY, but start_cons was false. |
1396 | * If so, vnet_ack() should send out the missed "start" trigger. | 1396 | * If so, vnet_ack() should send out the missed "start" trigger. |
1397 | * | 1397 | * |
1398 | * Note that the wmb() above makes sure the cookies et al. are | 1398 | * Note that the dma_wmb() above makes sure the cookies et al. are |
1399 | * not globally visible before the VIO_DESC_READY, and that the | 1399 | * not globally visible before the VIO_DESC_READY, and that the |
1400 | * stores are ordered correctly by the compiler. The consumer will | 1400 | * stores are ordered correctly by the compiler. The consumer will |
1401 | * not proceed until the VIO_DESC_READY is visible assuring that | 1401 | * not proceed until the VIO_DESC_READY is visible assuring that |