aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/arm
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2011-06-21 01:29:08 -0400
committerDavid S. Miller <davem@davemloft.net>2011-06-21 01:29:08 -0400
commit9f6ec8d697c08963d83880ccd35c13c5ace716ea (patch)
treead8d93cf6fcdd09b86ade09f5fcbbc66cdb1cca2 /drivers/net/arm
parent4aa3a715551c93eda32d79bd52042ce500bd5383 (diff)
parent56299378726d5f2ba8d3c8cbbd13cb280ba45e4f (diff)
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
Conflicts: drivers/net/wireless/iwlwifi/iwl-agn-rxon.c drivers/net/wireless/rtlwifi/pci.c net/netfilter/ipvs/ip_vs_core.c
Diffstat (limited to 'drivers/net/arm')
-rw-r--r--drivers/net/arm/am79c961a.c126
-rw-r--r--drivers/net/arm/ep93xx_eth.c82
2 files changed, 106 insertions, 102 deletions
diff --git a/drivers/net/arm/am79c961a.c b/drivers/net/arm/am79c961a.c
index 0c9217f48b72..7b3e23f38913 100644
--- a/drivers/net/arm/am79c961a.c
+++ b/drivers/net/arm/am79c961a.c
@@ -50,7 +50,7 @@ static const char version[] =
50#ifdef __arm__ 50#ifdef __arm__
51static void write_rreg(u_long base, u_int reg, u_int val) 51static void write_rreg(u_long base, u_int reg, u_int val)
52{ 52{
53 __asm__( 53 asm volatile(
54 "str%?h %1, [%2] @ NET_RAP\n\t" 54 "str%?h %1, [%2] @ NET_RAP\n\t"
55 "str%?h %0, [%2, #-4] @ NET_RDP" 55 "str%?h %0, [%2, #-4] @ NET_RDP"
56 : 56 :
@@ -60,7 +60,7 @@ static void write_rreg(u_long base, u_int reg, u_int val)
60static inline unsigned short read_rreg(u_long base_addr, u_int reg) 60static inline unsigned short read_rreg(u_long base_addr, u_int reg)
61{ 61{
62 unsigned short v; 62 unsigned short v;
63 __asm__( 63 asm volatile(
64 "str%?h %1, [%2] @ NET_RAP\n\t" 64 "str%?h %1, [%2] @ NET_RAP\n\t"
65 "ldr%?h %0, [%2, #-4] @ NET_RDP" 65 "ldr%?h %0, [%2, #-4] @ NET_RDP"
66 : "=r" (v) 66 : "=r" (v)
@@ -70,7 +70,7 @@ static inline unsigned short read_rreg(u_long base_addr, u_int reg)
70 70
71static inline void write_ireg(u_long base, u_int reg, u_int val) 71static inline void write_ireg(u_long base, u_int reg, u_int val)
72{ 72{
73 __asm__( 73 asm volatile(
74 "str%?h %1, [%2] @ NET_RAP\n\t" 74 "str%?h %1, [%2] @ NET_RAP\n\t"
75 "str%?h %0, [%2, #8] @ NET_IDP" 75 "str%?h %0, [%2, #8] @ NET_IDP"
76 : 76 :
@@ -80,7 +80,7 @@ static inline void write_ireg(u_long base, u_int reg, u_int val)
80static inline unsigned short read_ireg(u_long base_addr, u_int reg) 80static inline unsigned short read_ireg(u_long base_addr, u_int reg)
81{ 81{
82 u_short v; 82 u_short v;
83 __asm__( 83 asm volatile(
84 "str%?h %1, [%2] @ NAT_RAP\n\t" 84 "str%?h %1, [%2] @ NAT_RAP\n\t"
85 "ldr%?h %0, [%2, #8] @ NET_IDP\n\t" 85 "ldr%?h %0, [%2, #8] @ NET_IDP\n\t"
86 : "=r" (v) 86 : "=r" (v)
@@ -91,47 +91,48 @@ static inline unsigned short read_ireg(u_long base_addr, u_int reg)
91#define am_writeword(dev,off,val) __raw_writew(val, ISAMEM_BASE + ((off) << 1)) 91#define am_writeword(dev,off,val) __raw_writew(val, ISAMEM_BASE + ((off) << 1))
92#define am_readword(dev,off) __raw_readw(ISAMEM_BASE + ((off) << 1)) 92#define am_readword(dev,off) __raw_readw(ISAMEM_BASE + ((off) << 1))
93 93
94static inline void 94static void
95am_writebuffer(struct net_device *dev, u_int offset, unsigned char *buf, unsigned int length) 95am_writebuffer(struct net_device *dev, u_int offset, unsigned char *buf, unsigned int length)
96{ 96{
97 offset = ISAMEM_BASE + (offset << 1); 97 offset = ISAMEM_BASE + (offset << 1);
98 length = (length + 1) & ~1; 98 length = (length + 1) & ~1;
99 if ((int)buf & 2) { 99 if ((int)buf & 2) {
100 __asm__ __volatile__("str%?h %2, [%0], #4" 100 asm volatile("str%?h %2, [%0], #4"
101 : "=&r" (offset) : "0" (offset), "r" (buf[0] | (buf[1] << 8))); 101 : "=&r" (offset) : "0" (offset), "r" (buf[0] | (buf[1] << 8)));
102 buf += 2; 102 buf += 2;
103 length -= 2; 103 length -= 2;
104 } 104 }
105 while (length > 8) { 105 while (length > 8) {
106 unsigned int tmp, tmp2; 106 register unsigned int tmp asm("r2"), tmp2 asm("r3");
107 __asm__ __volatile__( 107 asm volatile(
108 "ldm%?ia %1!, {%2, %3}\n\t" 108 "ldm%?ia %0!, {%1, %2}"
109 : "+r" (buf), "=&r" (tmp), "=&r" (tmp2));
110 length -= 8;
111 asm volatile(
112 "str%?h %1, [%0], #4\n\t"
113 "mov%? %1, %1, lsr #16\n\t"
114 "str%?h %1, [%0], #4\n\t"
109 "str%?h %2, [%0], #4\n\t" 115 "str%?h %2, [%0], #4\n\t"
110 "mov%? %2, %2, lsr #16\n\t" 116 "mov%? %2, %2, lsr #16\n\t"
111 "str%?h %2, [%0], #4\n\t" 117 "str%?h %2, [%0], #4"
112 "str%?h %3, [%0], #4\n\t" 118 : "+r" (offset), "=&r" (tmp), "=&r" (tmp2));
113 "mov%? %3, %3, lsr #16\n\t"
114 "str%?h %3, [%0], #4"
115 : "=&r" (offset), "=&r" (buf), "=r" (tmp), "=r" (tmp2)
116 : "0" (offset), "1" (buf));
117 length -= 8;
118 } 119 }
119 while (length > 0) { 120 while (length > 0) {
120 __asm__ __volatile__("str%?h %2, [%0], #4" 121 asm volatile("str%?h %2, [%0], #4"
121 : "=&r" (offset) : "0" (offset), "r" (buf[0] | (buf[1] << 8))); 122 : "=&r" (offset) : "0" (offset), "r" (buf[0] | (buf[1] << 8)));
122 buf += 2; 123 buf += 2;
123 length -= 2; 124 length -= 2;
124 } 125 }
125} 126}
126 127
127static inline void 128static void
128am_readbuffer(struct net_device *dev, u_int offset, unsigned char *buf, unsigned int length) 129am_readbuffer(struct net_device *dev, u_int offset, unsigned char *buf, unsigned int length)
129{ 130{
130 offset = ISAMEM_BASE + (offset << 1); 131 offset = ISAMEM_BASE + (offset << 1);
131 length = (length + 1) & ~1; 132 length = (length + 1) & ~1;
132 if ((int)buf & 2) { 133 if ((int)buf & 2) {
133 unsigned int tmp; 134 unsigned int tmp;
134 __asm__ __volatile__( 135 asm volatile(
135 "ldr%?h %2, [%0], #4\n\t" 136 "ldr%?h %2, [%0], #4\n\t"
136 "str%?b %2, [%1], #1\n\t" 137 "str%?b %2, [%1], #1\n\t"
137 "mov%? %2, %2, lsr #8\n\t" 138 "mov%? %2, %2, lsr #8\n\t"
@@ -140,12 +141,12 @@ am_readbuffer(struct net_device *dev, u_int offset, unsigned char *buf, unsigned
140 length -= 2; 141 length -= 2;
141 } 142 }
142 while (length > 8) { 143 while (length > 8) {
143 unsigned int tmp, tmp2, tmp3; 144 register unsigned int tmp asm("r2"), tmp2 asm("r3"), tmp3;
144 __asm__ __volatile__( 145 asm volatile(
145 "ldr%?h %2, [%0], #4\n\t" 146 "ldr%?h %2, [%0], #4\n\t"
147 "ldr%?h %4, [%0], #4\n\t"
146 "ldr%?h %3, [%0], #4\n\t" 148 "ldr%?h %3, [%0], #4\n\t"
147 "orr%? %2, %2, %3, lsl #16\n\t" 149 "orr%? %2, %2, %4, lsl #16\n\t"
148 "ldr%?h %3, [%0], #4\n\t"
149 "ldr%?h %4, [%0], #4\n\t" 150 "ldr%?h %4, [%0], #4\n\t"
150 "orr%? %3, %3, %4, lsl #16\n\t" 151 "orr%? %3, %3, %4, lsl #16\n\t"
151 "stm%?ia %1!, {%2, %3}" 152 "stm%?ia %1!, {%2, %3}"
@@ -155,7 +156,7 @@ am_readbuffer(struct net_device *dev, u_int offset, unsigned char *buf, unsigned
155 } 156 }
156 while (length > 0) { 157 while (length > 0) {
157 unsigned int tmp; 158 unsigned int tmp;
158 __asm__ __volatile__( 159 asm volatile(
159 "ldr%?h %2, [%0], #4\n\t" 160 "ldr%?h %2, [%0], #4\n\t"
160 "str%?b %2, [%1], #1\n\t" 161 "str%?b %2, [%1], #1\n\t"
161 "mov%? %2, %2, lsr #8\n\t" 162 "mov%? %2, %2, lsr #8\n\t"
@@ -196,6 +197,42 @@ am79c961_ramtest(struct net_device *dev, unsigned int val)
196 return errorcount; 197 return errorcount;
197} 198}
198 199
200static void am79c961_mc_hash(char *addr, u16 *hash)
201{
202 if (addr[0] & 0x01) {
203 int idx, bit;
204 u32 crc;
205
206 crc = ether_crc_le(ETH_ALEN, addr);
207
208 idx = crc >> 30;
209 bit = (crc >> 26) & 15;
210
211 hash[idx] |= 1 << bit;
212 }
213}
214
215static unsigned int am79c961_get_rx_mode(struct net_device *dev, u16 *hash)
216{
217 unsigned int mode = MODE_PORT_10BT;
218
219 if (dev->flags & IFF_PROMISC) {
220 mode |= MODE_PROMISC;
221 memset(hash, 0xff, 4 * sizeof(*hash));
222 } else if (dev->flags & IFF_ALLMULTI) {
223 memset(hash, 0xff, 4 * sizeof(*hash));
224 } else {
225 struct netdev_hw_addr *ha;
226
227 memset(hash, 0, 4 * sizeof(*hash));
228
229 netdev_for_each_mc_addr(ha, dev)
230 am79c961_mc_hash(ha->addr, hash);
231 }
232
233 return mode;
234}
235
199static void 236static void
200am79c961_init_for_open(struct net_device *dev) 237am79c961_init_for_open(struct net_device *dev)
201{ 238{
@@ -203,6 +240,7 @@ am79c961_init_for_open(struct net_device *dev)
203 unsigned long flags; 240 unsigned long flags;
204 unsigned char *p; 241 unsigned char *p;
205 u_int hdr_addr, first_free_addr; 242 u_int hdr_addr, first_free_addr;
243 u16 multi_hash[4], mode = am79c961_get_rx_mode(dev, multi_hash);
206 int i; 244 int i;
207 245
208 /* 246 /*
@@ -218,16 +256,12 @@ am79c961_init_for_open(struct net_device *dev)
218 write_ireg (dev->base_addr, 2, 0x0000); /* MODE register selects media */ 256 write_ireg (dev->base_addr, 2, 0x0000); /* MODE register selects media */
219 257
220 for (i = LADRL; i <= LADRH; i++) 258 for (i = LADRL; i <= LADRH; i++)
221 write_rreg (dev->base_addr, i, 0); 259 write_rreg (dev->base_addr, i, multi_hash[i - LADRL]);
222 260
223 for (i = PADRL, p = dev->dev_addr; i <= PADRH; i++, p += 2) 261 for (i = PADRL, p = dev->dev_addr; i <= PADRH; i++, p += 2)
224 write_rreg (dev->base_addr, i, p[0] | (p[1] << 8)); 262 write_rreg (dev->base_addr, i, p[0] | (p[1] << 8));
225 263
226 i = MODE_PORT_10BT; 264 write_rreg (dev->base_addr, MODE, mode);
227 if (dev->flags & IFF_PROMISC)
228 i |= MODE_PROMISC;
229
230 write_rreg (dev->base_addr, MODE, i);
231 write_rreg (dev->base_addr, POLLINT, 0); 265 write_rreg (dev->base_addr, POLLINT, 0);
232 write_rreg (dev->base_addr, SIZERXR, -RX_BUFFERS); 266 write_rreg (dev->base_addr, SIZERXR, -RX_BUFFERS);
233 write_rreg (dev->base_addr, SIZETXR, -TX_BUFFERS); 267 write_rreg (dev->base_addr, SIZETXR, -TX_BUFFERS);
@@ -340,21 +374,6 @@ am79c961_close(struct net_device *dev)
340 return 0; 374 return 0;
341} 375}
342 376
343static void am79c961_mc_hash(char *addr, unsigned short *hash)
344{
345 if (addr[0] & 0x01) {
346 int idx, bit;
347 u32 crc;
348
349 crc = ether_crc_le(ETH_ALEN, addr);
350
351 idx = crc >> 30;
352 bit = (crc >> 26) & 15;
353
354 hash[idx] |= 1 << bit;
355 }
356}
357
358/* 377/*
359 * Set or clear promiscuous/multicast mode filter for this adapter. 378 * Set or clear promiscuous/multicast mode filter for this adapter.
360 */ 379 */
@@ -362,24 +381,9 @@ static void am79c961_setmulticastlist (struct net_device *dev)
362{ 381{
363 struct dev_priv *priv = netdev_priv(dev); 382 struct dev_priv *priv = netdev_priv(dev);
364 unsigned long flags; 383 unsigned long flags;
365 unsigned short multi_hash[4], mode; 384 u16 multi_hash[4], mode = am79c961_get_rx_mode(dev, multi_hash);
366 int i, stopped; 385 int i, stopped;
367 386
368 mode = MODE_PORT_10BT;
369
370 if (dev->flags & IFF_PROMISC) {
371 mode |= MODE_PROMISC;
372 } else if (dev->flags & IFF_ALLMULTI) {
373 memset(multi_hash, 0xff, sizeof(multi_hash));
374 } else {
375 struct netdev_hw_addr *ha;
376
377 memset(multi_hash, 0x00, sizeof(multi_hash));
378
379 netdev_for_each_mc_addr(ha, dev)
380 am79c961_mc_hash(ha->addr, multi_hash);
381 }
382
383 spin_lock_irqsave(&priv->chip_lock, flags); 387 spin_lock_irqsave(&priv->chip_lock, flags);
384 388
385 stopped = read_rreg(dev->base_addr, CSR0) & CSR0_STOP; 389 stopped = read_rreg(dev->base_addr, CSR0) & CSR0_STOP;
diff --git a/drivers/net/arm/ep93xx_eth.c b/drivers/net/arm/ep93xx_eth.c
index a167addd5382..4317af8d2f0a 100644
--- a/drivers/net/arm/ep93xx_eth.c
+++ b/drivers/net/arm/ep93xx_eth.c
@@ -284,10 +284,14 @@ static int ep93xx_rx(struct net_device *dev, int processed, int budget)
284 284
285 skb = dev_alloc_skb(length + 2); 285 skb = dev_alloc_skb(length + 2);
286 if (likely(skb != NULL)) { 286 if (likely(skb != NULL)) {
287 struct ep93xx_rdesc *rxd = &ep->descs->rdesc[entry];
287 skb_reserve(skb, 2); 288 skb_reserve(skb, 2);
288 dma_sync_single_for_cpu(NULL, ep->descs->rdesc[entry].buf_addr, 289 dma_sync_single_for_cpu(dev->dev.parent, rxd->buf_addr,
289 length, DMA_FROM_DEVICE); 290 length, DMA_FROM_DEVICE);
290 skb_copy_to_linear_data(skb, ep->rx_buf[entry], length); 291 skb_copy_to_linear_data(skb, ep->rx_buf[entry], length);
292 dma_sync_single_for_device(dev->dev.parent,
293 rxd->buf_addr, length,
294 DMA_FROM_DEVICE);
291 skb_put(skb, length); 295 skb_put(skb, length);
292 skb->protocol = eth_type_trans(skb, dev); 296 skb->protocol = eth_type_trans(skb, dev);
293 297
@@ -349,6 +353,7 @@ poll_some_more:
349static int ep93xx_xmit(struct sk_buff *skb, struct net_device *dev) 353static int ep93xx_xmit(struct sk_buff *skb, struct net_device *dev)
350{ 354{
351 struct ep93xx_priv *ep = netdev_priv(dev); 355 struct ep93xx_priv *ep = netdev_priv(dev);
356 struct ep93xx_tdesc *txd;
352 int entry; 357 int entry;
353 358
354 if (unlikely(skb->len > MAX_PKT_SIZE)) { 359 if (unlikely(skb->len > MAX_PKT_SIZE)) {
@@ -360,11 +365,14 @@ static int ep93xx_xmit(struct sk_buff *skb, struct net_device *dev)
360 entry = ep->tx_pointer; 365 entry = ep->tx_pointer;
361 ep->tx_pointer = (ep->tx_pointer + 1) & (TX_QUEUE_ENTRIES - 1); 366 ep->tx_pointer = (ep->tx_pointer + 1) & (TX_QUEUE_ENTRIES - 1);
362 367
363 ep->descs->tdesc[entry].tdesc1 = 368 txd = &ep->descs->tdesc[entry];
364 TDESC1_EOF | (entry << 16) | (skb->len & 0xfff); 369
370 txd->tdesc1 = TDESC1_EOF | (entry << 16) | (skb->len & 0xfff);
371 dma_sync_single_for_cpu(dev->dev.parent, txd->buf_addr, skb->len,
372 DMA_TO_DEVICE);
365 skb_copy_and_csum_dev(skb, ep->tx_buf[entry]); 373 skb_copy_and_csum_dev(skb, ep->tx_buf[entry]);
366 dma_sync_single_for_cpu(NULL, ep->descs->tdesc[entry].buf_addr, 374 dma_sync_single_for_device(dev->dev.parent, txd->buf_addr, skb->len,
367 skb->len, DMA_TO_DEVICE); 375 DMA_TO_DEVICE);
368 dev_kfree_skb(skb); 376 dev_kfree_skb(skb);
369 377
370 spin_lock_irq(&ep->tx_pending_lock); 378 spin_lock_irq(&ep->tx_pending_lock);
@@ -458,89 +466,80 @@ static irqreturn_t ep93xx_irq(int irq, void *dev_id)
458 466
459static void ep93xx_free_buffers(struct ep93xx_priv *ep) 467static void ep93xx_free_buffers(struct ep93xx_priv *ep)
460{ 468{
469 struct device *dev = ep->dev->dev.parent;
461 int i; 470 int i;
462 471
463 for (i = 0; i < RX_QUEUE_ENTRIES; i += 2) { 472 for (i = 0; i < RX_QUEUE_ENTRIES; i++) {
464 dma_addr_t d; 473 dma_addr_t d;
465 474
466 d = ep->descs->rdesc[i].buf_addr; 475 d = ep->descs->rdesc[i].buf_addr;
467 if (d) 476 if (d)
468 dma_unmap_single(NULL, d, PAGE_SIZE, DMA_FROM_DEVICE); 477 dma_unmap_single(dev, d, PKT_BUF_SIZE, DMA_FROM_DEVICE);
469 478
470 if (ep->rx_buf[i] != NULL) 479 if (ep->rx_buf[i] != NULL)
471 free_page((unsigned long)ep->rx_buf[i]); 480 kfree(ep->rx_buf[i]);
472 } 481 }
473 482
474 for (i = 0; i < TX_QUEUE_ENTRIES; i += 2) { 483 for (i = 0; i < TX_QUEUE_ENTRIES; i++) {
475 dma_addr_t d; 484 dma_addr_t d;
476 485
477 d = ep->descs->tdesc[i].buf_addr; 486 d = ep->descs->tdesc[i].buf_addr;
478 if (d) 487 if (d)
479 dma_unmap_single(NULL, d, PAGE_SIZE, DMA_TO_DEVICE); 488 dma_unmap_single(dev, d, PKT_BUF_SIZE, DMA_TO_DEVICE);
480 489
481 if (ep->tx_buf[i] != NULL) 490 if (ep->tx_buf[i] != NULL)
482 free_page((unsigned long)ep->tx_buf[i]); 491 kfree(ep->tx_buf[i]);
483 } 492 }
484 493
485 dma_free_coherent(NULL, sizeof(struct ep93xx_descs), ep->descs, 494 dma_free_coherent(dev, sizeof(struct ep93xx_descs), ep->descs,
486 ep->descs_dma_addr); 495 ep->descs_dma_addr);
487} 496}
488 497
489/*
490 * The hardware enforces a sub-2K maximum packet size, so we put
491 * two buffers on every hardware page.
492 */
493static int ep93xx_alloc_buffers(struct ep93xx_priv *ep) 498static int ep93xx_alloc_buffers(struct ep93xx_priv *ep)
494{ 499{
500 struct device *dev = ep->dev->dev.parent;
495 int i; 501 int i;
496 502
497 ep->descs = dma_alloc_coherent(NULL, sizeof(struct ep93xx_descs), 503 ep->descs = dma_alloc_coherent(dev, sizeof(struct ep93xx_descs),
498 &ep->descs_dma_addr, GFP_KERNEL | GFP_DMA); 504 &ep->descs_dma_addr, GFP_KERNEL);
499 if (ep->descs == NULL) 505 if (ep->descs == NULL)
500 return 1; 506 return 1;
501 507
502 for (i = 0; i < RX_QUEUE_ENTRIES; i += 2) { 508 for (i = 0; i < RX_QUEUE_ENTRIES; i++) {
503 void *page; 509 void *buf;
504 dma_addr_t d; 510 dma_addr_t d;
505 511
506 page = (void *)__get_free_page(GFP_KERNEL | GFP_DMA); 512 buf = kmalloc(PKT_BUF_SIZE, GFP_KERNEL);
507 if (page == NULL) 513 if (buf == NULL)
508 goto err; 514 goto err;
509 515
510 d = dma_map_single(NULL, page, PAGE_SIZE, DMA_FROM_DEVICE); 516 d = dma_map_single(dev, buf, PKT_BUF_SIZE, DMA_FROM_DEVICE);
511 if (dma_mapping_error(NULL, d)) { 517 if (dma_mapping_error(dev, d)) {
512 free_page((unsigned long)page); 518 kfree(buf);
513 goto err; 519 goto err;
514 } 520 }
515 521
516 ep->rx_buf[i] = page; 522 ep->rx_buf[i] = buf;
517 ep->descs->rdesc[i].buf_addr = d; 523 ep->descs->rdesc[i].buf_addr = d;
518 ep->descs->rdesc[i].rdesc1 = (i << 16) | PKT_BUF_SIZE; 524 ep->descs->rdesc[i].rdesc1 = (i << 16) | PKT_BUF_SIZE;
519
520 ep->rx_buf[i + 1] = page + PKT_BUF_SIZE;
521 ep->descs->rdesc[i + 1].buf_addr = d + PKT_BUF_SIZE;
522 ep->descs->rdesc[i + 1].rdesc1 = ((i + 1) << 16) | PKT_BUF_SIZE;
523 } 525 }
524 526
525 for (i = 0; i < TX_QUEUE_ENTRIES; i += 2) { 527 for (i = 0; i < TX_QUEUE_ENTRIES; i++) {
526 void *page; 528 void *buf;
527 dma_addr_t d; 529 dma_addr_t d;
528 530
529 page = (void *)__get_free_page(GFP_KERNEL | GFP_DMA); 531 buf = kmalloc(PKT_BUF_SIZE, GFP_KERNEL);
530 if (page == NULL) 532 if (buf == NULL)
531 goto err; 533 goto err;
532 534
533 d = dma_map_single(NULL, page, PAGE_SIZE, DMA_TO_DEVICE); 535 d = dma_map_single(dev, buf, PKT_BUF_SIZE, DMA_TO_DEVICE);
534 if (dma_mapping_error(NULL, d)) { 536 if (dma_mapping_error(dev, d)) {
535 free_page((unsigned long)page); 537 kfree(buf);
536 goto err; 538 goto err;
537 } 539 }
538 540
539 ep->tx_buf[i] = page; 541 ep->tx_buf[i] = buf;
540 ep->descs->tdesc[i].buf_addr = d; 542 ep->descs->tdesc[i].buf_addr = d;
541
542 ep->tx_buf[i + 1] = page + PKT_BUF_SIZE;
543 ep->descs->tdesc[i + 1].buf_addr = d + PKT_BUF_SIZE;
544 } 543 }
545 544
546 return 0; 545 return 0;
@@ -830,6 +829,7 @@ static int ep93xx_eth_probe(struct platform_device *pdev)
830 } 829 }
831 ep = netdev_priv(dev); 830 ep = netdev_priv(dev);
832 ep->dev = dev; 831 ep->dev = dev;
832 SET_NETDEV_DEV(dev, &pdev->dev);
833 netif_napi_add(dev, &ep->napi, ep93xx_poll, 64); 833 netif_napi_add(dev, &ep->napi, ep93xx_poll, 64);
834 834
835 platform_set_drvdata(pdev, dev); 835 platform_set_drvdata(pdev, dev);