aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSonic Zhang <sonic.zhang@analog.com>2010-05-10 01:39:08 -0400
committerDavid S. Miller <davem@davemloft.net>2010-05-17 20:20:58 -0400
commitf6e1e4f3e511589dd0c47d42b870501659e7195f (patch)
tree0574a664c6cf76b7b9b0c0b795f0c6b8d2351391
parentec497b32c311b1e1aac22a76d294d24285d06331 (diff)
netdev: bfin_mac: invalid data cache only once for each new rx skb buffer
The skb buffer isn't actually used until we finish transferring and pass it up to higher layers, so only invalidate the range once before we start receiving actual data. This also avoids the problem with data invalidating on Blackfin systems -- there is no invalidate-only, just invalidate+flush. So when running in writeback mode, there is the small (but not uncommon) possibility of the flush overwriting valid DMA-ed data from the cache. Signed-off-by: Sonic Zhang <sonic.zhang@analog.com> Signed-off-by: Mike Frysinger <vapier@gentoo.org> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/bfin_mac.c13
1 files changed, 8 insertions, 5 deletions
diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c
index 16f2a37c733d..2b364ba6b62e 100644
--- a/drivers/net/bfin_mac.c
+++ b/drivers/net/bfin_mac.c
@@ -203,6 +203,11 @@ static int desc_list_init(void)
203 goto init_error; 203 goto init_error;
204 } 204 }
205 skb_reserve(new_skb, NET_IP_ALIGN); 205 skb_reserve(new_skb, NET_IP_ALIGN);
206 /* Invidate the data cache of skb->data range when it is write back
207 * cache. It will prevent overwritting the new data from DMA
208 */
209 blackfin_dcache_invalidate_range((unsigned long)new_skb->head,
210 (unsigned long)new_skb->end);
206 r->skb = new_skb; 211 r->skb = new_skb;
207 212
208 /* 213 /*
@@ -1011,19 +1016,17 @@ static void bfin_mac_rx(struct net_device *dev)
1011 } 1016 }
1012 /* reserve 2 bytes for RXDWA padding */ 1017 /* reserve 2 bytes for RXDWA padding */
1013 skb_reserve(new_skb, NET_IP_ALIGN); 1018 skb_reserve(new_skb, NET_IP_ALIGN);
1014 current_rx_ptr->skb = new_skb;
1015 current_rx_ptr->desc_a.start_addr = (unsigned long)new_skb->data - 2;
1016
1017 /* Invidate the data cache of skb->data range when it is write back 1019 /* Invidate the data cache of skb->data range when it is write back
1018 * cache. It will prevent overwritting the new data from DMA 1020 * cache. It will prevent overwritting the new data from DMA
1019 */ 1021 */
1020 blackfin_dcache_invalidate_range((unsigned long)new_skb->head, 1022 blackfin_dcache_invalidate_range((unsigned long)new_skb->head,
1021 (unsigned long)new_skb->end); 1023 (unsigned long)new_skb->end);
1022 1024
1025 current_rx_ptr->skb = new_skb;
1026 current_rx_ptr->desc_a.start_addr = (unsigned long)new_skb->data - 2;
1027
1023 len = (unsigned short)((current_rx_ptr->status.status_word) & RX_FRLEN); 1028 len = (unsigned short)((current_rx_ptr->status.status_word) & RX_FRLEN);
1024 skb_put(skb, len); 1029 skb_put(skb, len);
1025 blackfin_dcache_invalidate_range((unsigned long)skb->head,
1026 (unsigned long)skb->tail);
1027 1030
1028 skb->protocol = eth_type_trans(skb, dev); 1031 skb->protocol = eth_type_trans(skb, dev);
1029 1032