diff options
author | Yoshihiro Shimoda <shimoda.yoshihiro@renesas.com> | 2009-05-24 19:53:20 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-05-26 01:49:54 -0400 |
commit | 0029d64af5d72049e2170e4609fa83bd1f3f07cd (patch) | |
tree | 1a0fc073e2ec3116cfdb74fa2eb19d3efbe7215a /drivers/net/sh_eth.c | |
parent | e88aae7bb1dc50457489d1d7c81dcf4db23ccf94 (diff) |
net: sh_eth: fix TX/RX descriptor not set physical memory
Fix the probrem that TX/RX descirptor not set physical memory.
Signed-off-by: Yoshihiro Shimoda <shimoda.yoshihiro@renesas.com>
Signed-off-by: Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/sh_eth.c')
-rw-r--r-- | drivers/net/sh_eth.c | 35 |
1 files changed, 12 insertions, 23 deletions
diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c index a742297e3e94..eb768357c78d 100644 --- a/drivers/net/sh_eth.c +++ b/drivers/net/sh_eth.c | |||
@@ -263,26 +263,20 @@ static void sh_eth_ring_format(struct net_device *ndev) | |||
263 | #endif | 263 | #endif |
264 | /* RX descriptor */ | 264 | /* RX descriptor */ |
265 | rxdesc = &mdp->rx_ring[i]; | 265 | rxdesc = &mdp->rx_ring[i]; |
266 | rxdesc->addr = (u32)skb->data & ~0x3UL; | 266 | rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4)); |
267 | rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP); | 267 | rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP); |
268 | 268 | ||
269 | /* The size of the buffer is 16 byte boundary. */ | 269 | /* The size of the buffer is 16 byte boundary. */ |
270 | rxdesc->buffer_length = (mdp->rx_buf_sz + 16) & ~0x0F; | 270 | rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16); |
271 | /* Rx descriptor address set */ | 271 | /* Rx descriptor address set */ |
272 | if (i == 0) { | 272 | if (i == 0) { |
273 | ctrl_outl((u32)rxdesc, ioaddr + RDLAR); | 273 | ctrl_outl(mdp->rx_desc_dma, ioaddr + RDLAR); |
274 | #if defined(CONFIG_CPU_SUBTYPE_SH7763) | 274 | #if defined(CONFIG_CPU_SUBTYPE_SH7763) |
275 | ctrl_outl((u32)rxdesc, ioaddr + RDFAR); | 275 | ctrl_outl(mdp->rx_desc_dma, ioaddr + RDFAR); |
276 | #endif | 276 | #endif |
277 | } | 277 | } |
278 | } | 278 | } |
279 | 279 | ||
280 | /* Rx descriptor address set */ | ||
281 | #if defined(CONFIG_CPU_SUBTYPE_SH7763) | ||
282 | ctrl_outl((u32)rxdesc, ioaddr + RDFXR); | ||
283 | ctrl_outl(0x1, ioaddr + RDFFR); | ||
284 | #endif | ||
285 | |||
286 | mdp->dirty_rx = (u32) (i - RX_RING_SIZE); | 280 | mdp->dirty_rx = (u32) (i - RX_RING_SIZE); |
287 | 281 | ||
288 | /* Mark the last entry as wrapping the ring. */ | 282 | /* Mark the last entry as wrapping the ring. */ |
@@ -298,19 +292,13 @@ static void sh_eth_ring_format(struct net_device *ndev) | |||
298 | txdesc->buffer_length = 0; | 292 | txdesc->buffer_length = 0; |
299 | if (i == 0) { | 293 | if (i == 0) { |
300 | /* Tx descriptor address set */ | 294 | /* Tx descriptor address set */ |
301 | ctrl_outl((u32)txdesc, ioaddr + TDLAR); | 295 | ctrl_outl(mdp->tx_desc_dma, ioaddr + TDLAR); |
302 | #if defined(CONFIG_CPU_SUBTYPE_SH7763) | 296 | #if defined(CONFIG_CPU_SUBTYPE_SH7763) |
303 | ctrl_outl((u32)txdesc, ioaddr + TDFAR); | 297 | ctrl_outl(mdp->tx_desc_dma, ioaddr + TDFAR); |
304 | #endif | 298 | #endif |
305 | } | 299 | } |
306 | } | 300 | } |
307 | 301 | ||
308 | /* Tx descriptor address set */ | ||
309 | #if defined(CONFIG_CPU_SUBTYPE_SH7763) | ||
310 | ctrl_outl((u32)txdesc, ioaddr + TDFXR); | ||
311 | ctrl_outl(0x1, ioaddr + TDFFR); | ||
312 | #endif | ||
313 | |||
314 | txdesc->status |= cpu_to_edmac(mdp, TD_TDLE); | 302 | txdesc->status |= cpu_to_edmac(mdp, TD_TDLE); |
315 | } | 303 | } |
316 | 304 | ||
@@ -536,7 +524,8 @@ static int sh_eth_rx(struct net_device *ndev) | |||
536 | if (desc_status & RD_RFS10) | 524 | if (desc_status & RD_RFS10) |
537 | mdp->stats.rx_over_errors++; | 525 | mdp->stats.rx_over_errors++; |
538 | } else { | 526 | } else { |
539 | swaps((char *)(rxdesc->addr & ~0x3), pkt_len + 2); | 527 | swaps(phys_to_virt(ALIGN(rxdesc->addr, 4)), |
528 | pkt_len + 2); | ||
540 | skb = mdp->rx_skbuff[entry]; | 529 | skb = mdp->rx_skbuff[entry]; |
541 | mdp->rx_skbuff[entry] = NULL; | 530 | mdp->rx_skbuff[entry] = NULL; |
542 | skb_put(skb, pkt_len); | 531 | skb_put(skb, pkt_len); |
@@ -554,7 +543,7 @@ static int sh_eth_rx(struct net_device *ndev) | |||
554 | entry = mdp->dirty_rx % RX_RING_SIZE; | 543 | entry = mdp->dirty_rx % RX_RING_SIZE; |
555 | rxdesc = &mdp->rx_ring[entry]; | 544 | rxdesc = &mdp->rx_ring[entry]; |
556 | /* The size of the buffer is 16 byte boundary. */ | 545 | /* The size of the buffer is 16 byte boundary. */ |
557 | rxdesc->buffer_length = (mdp->rx_buf_sz + 16) & ~0x0F; | 546 | rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16); |
558 | 547 | ||
559 | if (mdp->rx_skbuff[entry] == NULL) { | 548 | if (mdp->rx_skbuff[entry] == NULL) { |
560 | skb = dev_alloc_skb(mdp->rx_buf_sz); | 549 | skb = dev_alloc_skb(mdp->rx_buf_sz); |
@@ -573,7 +562,7 @@ static int sh_eth_rx(struct net_device *ndev) | |||
573 | skb_reserve(skb, RX_OFFSET); | 562 | skb_reserve(skb, RX_OFFSET); |
574 | #endif | 563 | #endif |
575 | skb->ip_summed = CHECKSUM_NONE; | 564 | skb->ip_summed = CHECKSUM_NONE; |
576 | rxdesc->addr = (u32)skb->data & ~0x3UL; | 565 | rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4)); |
577 | } | 566 | } |
578 | if (entry >= RX_RING_SIZE - 1) | 567 | if (entry >= RX_RING_SIZE - 1) |
579 | rxdesc->status |= | 568 | rxdesc->status |= |
@@ -959,9 +948,9 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
959 | entry = mdp->cur_tx % TX_RING_SIZE; | 948 | entry = mdp->cur_tx % TX_RING_SIZE; |
960 | mdp->tx_skbuff[entry] = skb; | 949 | mdp->tx_skbuff[entry] = skb; |
961 | txdesc = &mdp->tx_ring[entry]; | 950 | txdesc = &mdp->tx_ring[entry]; |
962 | txdesc->addr = (u32)(skb->data); | 951 | txdesc->addr = virt_to_phys(skb->data); |
963 | /* soft swap. */ | 952 | /* soft swap. */ |
964 | swaps((char *)(txdesc->addr & ~0x3), skb->len + 2); | 953 | swaps(phys_to_virt(ALIGN(txdesc->addr, 4)), skb->len + 2); |
965 | /* write back */ | 954 | /* write back */ |
966 | __flush_purge_region(skb->data, skb->len); | 955 | __flush_purge_region(skb->data, skb->len); |
967 | if (skb->len < ETHERSMALL) | 956 | if (skb->len < ETHERSMALL) |