diff options
author | Dale Farnsworth <dale@farnsworth.org> | 2006-01-16 18:51:22 -0500 |
---|---|---|
committer | Jeff Garzik <jgarzik@pobox.com> | 2006-01-17 07:23:37 -0500 |
commit | b44cd572623cb6a931a947d9108595517fd945f8 (patch) | |
tree | fc4a28e35531ad9bc3596b262ad834aff82d4662 | |
parent | 16e0301831767ee1b8e5e022cc08e76f9f8a8938 (diff) |
[PATCH] mv643xx_eth: Receive buffers require 8 byte alignment
The Marvell mv643xx ethernet hardware requires that DMA buffers be
aligned to 8-byte boundaries. This patch satisfies this requirement.
Buffers allocated by dev_alloc_skb() only have 4-byte alignment when
slab debugging is enabled.
Also, document that the 2-byte offset to align the IP packets on
receive is a hardware feature and is not tied to NET_IP_ALIGN.
Signed-off-by: Dale Farnsworth <dale@farnsworth.org>
mv643xx_eth.c | 12 +++++++++---
1 file changed, 9 insertions(+), 3 deletions(-)
Signed-off-by: Jeff Garzik <jgarzik@pobox.com>
-rw-r--r-- | drivers/net/mv643xx_eth.c | 12 |
1 files changed, 9 insertions, 3 deletions
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c index f100ca7d3ee2..4afb954092a6 100644 --- a/drivers/net/mv643xx_eth.c +++ b/drivers/net/mv643xx_eth.c | |||
@@ -57,7 +57,9 @@ | |||
57 | /* Constants */ | 57 | /* Constants */ |
58 | #define VLAN_HLEN 4 | 58 | #define VLAN_HLEN 4 |
59 | #define FCS_LEN 4 | 59 | #define FCS_LEN 4 |
60 | #define WRAP NET_IP_ALIGN + ETH_HLEN + VLAN_HLEN + FCS_LEN | 60 | #define DMA_ALIGN 8 /* hw requires 8-byte alignment */ |
61 | #define HW_IP_ALIGN 2 /* hw aligns IP header */ | ||
62 | #define WRAP HW_IP_ALIGN + ETH_HLEN + VLAN_HLEN + FCS_LEN | ||
61 | #define RX_SKB_SIZE ((dev->mtu + WRAP + 7) & ~0x7) | 63 | #define RX_SKB_SIZE ((dev->mtu + WRAP + 7) & ~0x7) |
62 | 64 | ||
63 | #define INT_CAUSE_UNMASK_ALL 0x0007ffff | 65 | #define INT_CAUSE_UNMASK_ALL 0x0007ffff |
@@ -173,15 +175,19 @@ static void mv643xx_eth_rx_task(void *data) | |||
173 | struct mv643xx_private *mp = netdev_priv(dev); | 175 | struct mv643xx_private *mp = netdev_priv(dev); |
174 | struct pkt_info pkt_info; | 176 | struct pkt_info pkt_info; |
175 | struct sk_buff *skb; | 177 | struct sk_buff *skb; |
178 | int unaligned; | ||
176 | 179 | ||
177 | if (test_and_set_bit(0, &mp->rx_task_busy)) | 180 | if (test_and_set_bit(0, &mp->rx_task_busy)) |
178 | panic("%s: Error in test_set_bit / clear_bit", dev->name); | 181 | panic("%s: Error in test_set_bit / clear_bit", dev->name); |
179 | 182 | ||
180 | while (mp->rx_ring_skbs < (mp->rx_ring_size - 5)) { | 183 | while (mp->rx_ring_skbs < (mp->rx_ring_size - 5)) { |
181 | skb = dev_alloc_skb(RX_SKB_SIZE); | 184 | skb = dev_alloc_skb(RX_SKB_SIZE + DMA_ALIGN); |
182 | if (!skb) | 185 | if (!skb) |
183 | break; | 186 | break; |
184 | mp->rx_ring_skbs++; | 187 | mp->rx_ring_skbs++; |
188 | unaligned = (u32)skb->data & (DMA_ALIGN - 1); | ||
189 | if (unaligned) | ||
190 | skb_reserve(skb, DMA_ALIGN - unaligned); | ||
185 | pkt_info.cmd_sts = ETH_RX_ENABLE_INTERRUPT; | 191 | pkt_info.cmd_sts = ETH_RX_ENABLE_INTERRUPT; |
186 | pkt_info.byte_cnt = RX_SKB_SIZE; | 192 | pkt_info.byte_cnt = RX_SKB_SIZE; |
187 | pkt_info.buf_ptr = dma_map_single(NULL, skb->data, RX_SKB_SIZE, | 193 | pkt_info.buf_ptr = dma_map_single(NULL, skb->data, RX_SKB_SIZE, |
@@ -192,7 +198,7 @@ static void mv643xx_eth_rx_task(void *data) | |||
192 | "%s: Error allocating RX Ring\n", dev->name); | 198 | "%s: Error allocating RX Ring\n", dev->name); |
193 | break; | 199 | break; |
194 | } | 200 | } |
195 | skb_reserve(skb, 2); | 201 | skb_reserve(skb, HW_IP_ALIGN); |
196 | } | 202 | } |
197 | clear_bit(0, &mp->rx_task_busy); | 203 | clear_bit(0, &mp->rx_task_busy); |
198 | /* | 204 | /* |