aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/arm/w90p910_ether.c206
1 files changed, 112 insertions, 94 deletions
diff --git a/drivers/net/arm/w90p910_ether.c b/drivers/net/arm/w90p910_ether.c
index 616fb7985a34..890716f6c018 100644
--- a/drivers/net/arm/w90p910_ether.c
+++ b/drivers/net/arm/w90p910_ether.c
@@ -143,16 +143,17 @@ struct recv_pdesc {
143 143
144struct tran_pdesc { 144struct tran_pdesc {
145 struct w90p910_txbd desclist[TX_DESC_SIZE]; 145 struct w90p910_txbd desclist[TX_DESC_SIZE];
146 char tran_buf[RX_DESC_SIZE][MAX_TBUFF_SZ]; 146 char tran_buf[TX_DESC_SIZE][MAX_TBUFF_SZ];
147}; 147};
148 148
149struct w90p910_ether { 149struct w90p910_ether {
150 struct recv_pdesc *rdesc; 150 struct recv_pdesc *rdesc;
151 struct recv_pdesc *rdesc_phys;
152 struct tran_pdesc *tdesc; 151 struct tran_pdesc *tdesc;
153 struct tran_pdesc *tdesc_phys; 152 dma_addr_t rdesc_phys;
153 dma_addr_t tdesc_phys;
154 struct net_device_stats stats; 154 struct net_device_stats stats;
155 struct platform_device *pdev; 155 struct platform_device *pdev;
156 struct resource *res;
156 struct sk_buff *skb; 157 struct sk_buff *skb;
157 struct clk *clk; 158 struct clk *clk;
158 struct clk *rmiiclk; 159 struct clk *rmiiclk;
@@ -169,7 +170,6 @@ struct w90p910_ether {
169 unsigned int start_tx_ptr; 170 unsigned int start_tx_ptr;
170 unsigned int start_rx_ptr; 171 unsigned int start_rx_ptr;
171 unsigned int linkflag; 172 unsigned int linkflag;
172 spinlock_t lock;
173}; 173};
174 174
175static void update_linkspeed_register(struct net_device *dev, 175static void update_linkspeed_register(struct net_device *dev,
@@ -275,59 +275,75 @@ static void w90p910_write_cam(struct net_device *dev,
275 __raw_writel(msw, ether->reg + REG_CAMM_BASE + x * CAM_ENTRY_SIZE); 275 __raw_writel(msw, ether->reg + REG_CAMM_BASE + x * CAM_ENTRY_SIZE);
276} 276}
277 277
278static void w90p910_init_desc(struct net_device *dev) 278static int w90p910_init_desc(struct net_device *dev)
279{ 279{
280 struct w90p910_ether *ether; 280 struct w90p910_ether *ether;
281 struct w90p910_txbd *tdesc, *tdesc_phys; 281 struct w90p910_txbd *tdesc;
282 struct w90p910_rxbd *rdesc, *rdesc_phys; 282 struct w90p910_rxbd *rdesc;
283 unsigned int i, j; 283 struct platform_device *pdev;
284 unsigned int i;
284 285
285 ether = netdev_priv(dev); 286 ether = netdev_priv(dev);
287 pdev = ether->pdev;
286 288
287 ether->tdesc = (struct tran_pdesc *) 289 ether->tdesc = (struct tran_pdesc *)
288 dma_alloc_coherent(NULL, sizeof(struct tran_pdesc), 290 dma_alloc_coherent(&pdev->dev, sizeof(struct tran_pdesc),
289 (dma_addr_t *) &ether->tdesc_phys, GFP_KERNEL); 291 &ether->tdesc_phys, GFP_KERNEL);
292
293 if (!ether->tdesc) {
294 dev_err(&pdev->dev, "Failed to allocate memory for tx desc\n");
295 return -ENOMEM;
296 }
290 297
291 ether->rdesc = (struct recv_pdesc *) 298 ether->rdesc = (struct recv_pdesc *)
292 dma_alloc_coherent(NULL, sizeof(struct recv_pdesc), 299 dma_alloc_coherent(&pdev->dev, sizeof(struct recv_pdesc),
293 (dma_addr_t *) &ether->rdesc_phys, GFP_KERNEL); 300 &ether->rdesc_phys, GFP_KERNEL);
301
302 if (!ether->rdesc) {
303 dev_err(&pdev->dev, "Failed to allocate memory for rx desc\n");
304 dma_free_coherent(&pdev->dev, sizeof(struct tran_pdesc),
305 ether->tdesc, ether->tdesc_phys);
306 return -ENOMEM;
307 }
294 308
295 for (i = 0; i < TX_DESC_SIZE; i++) { 309 for (i = 0; i < TX_DESC_SIZE; i++) {
296 tdesc = &(ether->tdesc->desclist[i]); 310 unsigned int offset;
297 311
298 j = ((i + 1) / TX_DESC_SIZE); 312 tdesc = &(ether->tdesc->desclist[i]);
299 313
300 if (j != 0) { 314 if (i == TX_DESC_SIZE - 1)
301 tdesc_phys = &(ether->tdesc_phys->desclist[0]); 315 offset = offsetof(struct tran_pdesc, desclist[0]);
302 ether->start_tx_ptr = (unsigned int)tdesc_phys; 316 else
303 tdesc->next = (unsigned int)ether->start_tx_ptr; 317 offset = offsetof(struct tran_pdesc, desclist[i + 1]);
304 } else {
305 tdesc_phys = &(ether->tdesc_phys->desclist[i+1]);
306 tdesc->next = (unsigned int)tdesc_phys;
307 }
308 318
309 tdesc->buffer = (unsigned int)ether->tdesc_phys->tran_buf[i]; 319 tdesc->next = ether->tdesc_phys + offset;
320 tdesc->buffer = ether->tdesc_phys +
321 offsetof(struct tran_pdesc, tran_buf[i]);
310 tdesc->sl = 0; 322 tdesc->sl = 0;
311 tdesc->mode = 0; 323 tdesc->mode = 0;
312 } 324 }
313 325
326 ether->start_tx_ptr = ether->tdesc_phys;
327
314 for (i = 0; i < RX_DESC_SIZE; i++) { 328 for (i = 0; i < RX_DESC_SIZE; i++) {
315 rdesc = &(ether->rdesc->desclist[i]); 329 unsigned int offset;
316 330
317 j = ((i + 1) / RX_DESC_SIZE); 331 rdesc = &(ether->rdesc->desclist[i]);
318 332
319 if (j != 0) { 333 if (i == RX_DESC_SIZE - 1)
320 rdesc_phys = &(ether->rdesc_phys->desclist[0]); 334 offset = offsetof(struct recv_pdesc, desclist[0]);
321 ether->start_rx_ptr = (unsigned int)rdesc_phys; 335 else
322 rdesc->next = (unsigned int)ether->start_rx_ptr; 336 offset = offsetof(struct recv_pdesc, desclist[i + 1]);
323 } else {
324 rdesc_phys = &(ether->rdesc_phys->desclist[i+1]);
325 rdesc->next = (unsigned int)rdesc_phys;
326 }
327 337
338 rdesc->next = ether->rdesc_phys + offset;
328 rdesc->sl = RX_OWEN_DMA; 339 rdesc->sl = RX_OWEN_DMA;
329 rdesc->buffer = (unsigned int)ether->rdesc_phys->recv_buf[i]; 340 rdesc->buffer = ether->rdesc_phys +
341 offsetof(struct recv_pdesc, recv_buf[i]);
330 } 342 }
343
344 ether->start_rx_ptr = ether->rdesc_phys;
345
346 return 0;
331} 347}
332 348
333static void w90p910_set_fifo_threshold(struct net_device *dev) 349static void w90p910_set_fifo_threshold(struct net_device *dev)
@@ -456,8 +472,6 @@ static void w90p910_reset_mac(struct net_device *dev)
456{ 472{
457 struct w90p910_ether *ether = netdev_priv(dev); 473 struct w90p910_ether *ether = netdev_priv(dev);
458 474
459 spin_lock(&ether->lock);
460
461 w90p910_enable_tx(dev, 0); 475 w90p910_enable_tx(dev, 0);
462 w90p910_enable_rx(dev, 0); 476 w90p910_enable_rx(dev, 0);
463 w90p910_set_fifo_threshold(dev); 477 w90p910_set_fifo_threshold(dev);
@@ -486,8 +500,6 @@ static void w90p910_reset_mac(struct net_device *dev)
486 500
487 if (netif_queue_stopped(dev)) 501 if (netif_queue_stopped(dev))
488 netif_wake_queue(dev); 502 netif_wake_queue(dev);
489
490 spin_unlock(&ether->lock);
491} 503}
492 504
493static void w90p910_mdio_write(struct net_device *dev, 505static void w90p910_mdio_write(struct net_device *dev,
@@ -541,7 +553,7 @@ static int w90p910_mdio_read(struct net_device *dev, int phy_id, int reg)
541 return data; 553 return data;
542} 554}
543 555
544static int set_mac_address(struct net_device *dev, void *addr) 556static int w90p910_set_mac_address(struct net_device *dev, void *addr)
545{ 557{
546 struct sockaddr *address = addr; 558 struct sockaddr *address = addr;
547 559
@@ -557,11 +569,14 @@ static int set_mac_address(struct net_device *dev, void *addr)
557static int w90p910_ether_close(struct net_device *dev) 569static int w90p910_ether_close(struct net_device *dev)
558{ 570{
559 struct w90p910_ether *ether = netdev_priv(dev); 571 struct w90p910_ether *ether = netdev_priv(dev);
572 struct platform_device *pdev;
560 573
561 dma_free_writecombine(NULL, sizeof(struct w90p910_rxbd), 574 pdev = ether->pdev;
562 ether->rdesc, (dma_addr_t)ether->rdesc_phys); 575
563 dma_free_writecombine(NULL, sizeof(struct w90p910_txbd), 576 dma_free_coherent(&pdev->dev, sizeof(struct recv_pdesc),
564 ether->tdesc, (dma_addr_t)ether->tdesc_phys); 577 ether->rdesc, ether->rdesc_phys);
578 dma_free_coherent(&pdev->dev, sizeof(struct tran_pdesc),
579 ether->tdesc, ether->tdesc_phys);
565 580
566 netif_stop_queue(dev); 581 netif_stop_queue(dev);
567 582
@@ -597,6 +612,7 @@ static int w90p910_send_frame(struct net_device *dev,
597 612
598 txbd = &ether->tdesc->desclist[ether->cur_tx]; 613 txbd = &ether->tdesc->desclist[ether->cur_tx];
599 buffer = ether->tdesc->tran_buf[ether->cur_tx]; 614 buffer = ether->tdesc->tran_buf[ether->cur_tx];
615
600 if (length > 1514) { 616 if (length > 1514) {
601 dev_err(&pdev->dev, "send data %d bytes, check it\n", length); 617 dev_err(&pdev->dev, "send data %d bytes, check it\n", length);
602 length = 1514; 618 length = 1514;
@@ -612,7 +628,9 @@ static int w90p910_send_frame(struct net_device *dev,
612 628
613 w90p910_trigger_tx(dev); 629 w90p910_trigger_tx(dev);
614 630
615 ether->cur_tx = (ether->cur_tx+1) % TX_DESC_SIZE; 631 if (++ether->cur_tx >= TX_DESC_SIZE)
632 ether->cur_tx = 0;
633
616 txbd = &ether->tdesc->desclist[ether->cur_tx]; 634 txbd = &ether->tdesc->desclist[ether->cur_tx];
617 635
618 dev->trans_start = jiffies; 636 dev->trans_start = jiffies;
@@ -632,7 +650,7 @@ static int w90p910_ether_start_xmit(struct sk_buff *skb, struct net_device *dev)
632 dev_kfree_skb_irq(skb); 650 dev_kfree_skb_irq(skb);
633 return 0; 651 return 0;
634 } 652 }
635 return -1; 653 return -EAGAIN;
636} 654}
637 655
638static irqreturn_t w90p910_tx_interrupt(int irq, void *dev_id) 656static irqreturn_t w90p910_tx_interrupt(int irq, void *dev_id)
@@ -640,27 +658,25 @@ static irqreturn_t w90p910_tx_interrupt(int irq, void *dev_id)
640 struct w90p910_ether *ether; 658 struct w90p910_ether *ether;
641 struct w90p910_txbd *txbd; 659 struct w90p910_txbd *txbd;
642 struct platform_device *pdev; 660 struct platform_device *pdev;
643 struct tran_pdesc *tran_pdesc;
644 struct net_device *dev; 661 struct net_device *dev;
645 unsigned int cur_entry, entry, status; 662 unsigned int cur_entry, entry, status;
646 663
647 dev = (struct net_device *)dev_id; 664 dev = dev_id;
648 ether = netdev_priv(dev); 665 ether = netdev_priv(dev);
649 pdev = ether->pdev; 666 pdev = ether->pdev;
650 667
651 spin_lock(&ether->lock);
652
653 w90p910_get_and_clear_int(dev, &status); 668 w90p910_get_and_clear_int(dev, &status);
654 669
655 cur_entry = __raw_readl(ether->reg + REG_CTXDSA); 670 cur_entry = __raw_readl(ether->reg + REG_CTXDSA);
656 671
657 tran_pdesc = ether->tdesc_phys; 672 entry = ether->tdesc_phys +
658 entry = (unsigned int)(&tran_pdesc->desclist[ether->finish_tx]); 673 offsetof(struct tran_pdesc, desclist[ether->finish_tx]);
659 674
660 while (entry != cur_entry) { 675 while (entry != cur_entry) {
661 txbd = &ether->tdesc->desclist[ether->finish_tx]; 676 txbd = &ether->tdesc->desclist[ether->finish_tx];
662 677
663 ether->finish_tx = (ether->finish_tx + 1) % TX_DESC_SIZE; 678 if (++ether->finish_tx >= TX_DESC_SIZE)
679 ether->finish_tx = 0;
664 680
665 if (txbd->sl & TXDS_TXCP) { 681 if (txbd->sl & TXDS_TXCP) {
666 ether->stats.tx_packets++; 682 ether->stats.tx_packets++;
@@ -675,20 +691,19 @@ static irqreturn_t w90p910_tx_interrupt(int irq, void *dev_id)
675 if (netif_queue_stopped(dev)) 691 if (netif_queue_stopped(dev))
676 netif_wake_queue(dev); 692 netif_wake_queue(dev);
677 693
678 entry = (unsigned int)(&tran_pdesc->desclist[ether->finish_tx]); 694 entry = ether->tdesc_phys +
695 offsetof(struct tran_pdesc, desclist[ether->finish_tx]);
679 } 696 }
680 697
681 if (status & MISTA_EXDEF) { 698 if (status & MISTA_EXDEF) {
682 dev_err(&pdev->dev, "emc defer exceed interrupt\n"); 699 dev_err(&pdev->dev, "emc defer exceed interrupt\n");
683 } else if (status & MISTA_TXBERR) { 700 } else if (status & MISTA_TXBERR) {
684 dev_err(&pdev->dev, "emc bus error interrupt\n"); 701 dev_err(&pdev->dev, "emc bus error interrupt\n");
685 w90p910_reset_mac(dev); 702 w90p910_reset_mac(dev);
686 } else if (status & MISTA_TDU) { 703 } else if (status & MISTA_TDU) {
687 if (netif_queue_stopped(dev)) 704 if (netif_queue_stopped(dev))
688 netif_wake_queue(dev); 705 netif_wake_queue(dev);
689 } 706 }
690
691 spin_unlock(&ether->lock);
692 707
693 return IRQ_HANDLED; 708 return IRQ_HANDLED;
694} 709}
@@ -698,20 +713,20 @@ static void netdev_rx(struct net_device *dev)
698 struct w90p910_ether *ether; 713 struct w90p910_ether *ether;
699 struct w90p910_rxbd *rxbd; 714 struct w90p910_rxbd *rxbd;
700 struct platform_device *pdev; 715 struct platform_device *pdev;
701 struct recv_pdesc *rdesc_phys;
702 struct sk_buff *skb; 716 struct sk_buff *skb;
703 unsigned char *data; 717 unsigned char *data;
704 unsigned int length, status, val, entry; 718 unsigned int length, status, val, entry;
705 719
706 ether = netdev_priv(dev); 720 ether = netdev_priv(dev);
707 pdev = ether->pdev; 721 pdev = ether->pdev;
708 rdesc_phys = ether->rdesc_phys;
709 722
710 rxbd = &ether->rdesc->desclist[ether->cur_rx]; 723 rxbd = &ether->rdesc->desclist[ether->cur_rx];
711 724
712 do { 725 do {
713 val = __raw_readl(ether->reg + REG_CRXDSA); 726 val = __raw_readl(ether->reg + REG_CRXDSA);
714 entry = (unsigned int)&rdesc_phys->desclist[ether->cur_rx]; 727
728 entry = ether->rdesc_phys +
729 offsetof(struct recv_pdesc, desclist[ether->cur_rx]);
715 730
716 if (val == entry) 731 if (val == entry)
717 break; 732 break;
@@ -743,22 +758,23 @@ static void netdev_rx(struct net_device *dev)
743 dev_err(&pdev->dev, "rx runt err\n"); 758 dev_err(&pdev->dev, "rx runt err\n");
744 ether->stats.rx_length_errors++; 759 ether->stats.rx_length_errors++;
745 } else if (status & RXDS_CRCE) { 760 } else if (status & RXDS_CRCE) {
746 dev_err(&pdev->dev, "rx crc err\n"); 761 dev_err(&pdev->dev, "rx crc err\n");
747 ether->stats.rx_crc_errors++; 762 ether->stats.rx_crc_errors++;
748 } 763 } else if (status & RXDS_ALIE) {
749
750 if (status & RXDS_ALIE) {
751 dev_err(&pdev->dev, "rx aligment err\n"); 764 dev_err(&pdev->dev, "rx aligment err\n");
752 ether->stats.rx_frame_errors++; 765 ether->stats.rx_frame_errors++;
753 } else if (status & RXDS_PTLE) { 766 } else if (status & RXDS_PTLE) {
754 dev_err(&pdev->dev, "rx longer err\n"); 767 dev_err(&pdev->dev, "rx longer err\n");
755 ether->stats.rx_over_errors++; 768 ether->stats.rx_over_errors++;
756 }
757 } 769 }
770 }
758 771
759 rxbd->sl = RX_OWEN_DMA; 772 rxbd->sl = RX_OWEN_DMA;
760 rxbd->reserved = 0x0; 773 rxbd->reserved = 0x0;
761 ether->cur_rx = (ether->cur_rx+1) % RX_DESC_SIZE; 774
775 if (++ether->cur_rx >= RX_DESC_SIZE)
776 ether->cur_rx = 0;
777
762 rxbd = &ether->rdesc->desclist[ether->cur_rx]; 778 rxbd = &ether->rdesc->desclist[ether->cur_rx];
763 779
764 dev->last_rx = jiffies; 780 dev->last_rx = jiffies;
@@ -772,28 +788,23 @@ static irqreturn_t w90p910_rx_interrupt(int irq, void *dev_id)
772 struct platform_device *pdev; 788 struct platform_device *pdev;
773 unsigned int status; 789 unsigned int status;
774 790
775 dev = (struct net_device *)dev_id; 791 dev = dev_id;
776 ether = netdev_priv(dev); 792 ether = netdev_priv(dev);
777 pdev = ether->pdev; 793 pdev = ether->pdev;
778 794
779 spin_lock(&ether->lock);
780
781 w90p910_get_and_clear_int(dev, &status); 795 w90p910_get_and_clear_int(dev, &status);
782 796
783 if (status & MISTA_RDU) { 797 if (status & MISTA_RDU) {
784 netdev_rx(dev); 798 netdev_rx(dev);
785
786 w90p910_trigger_rx(dev); 799 w90p910_trigger_rx(dev);
787 800
788 spin_unlock(&ether->lock);
789 return IRQ_HANDLED; 801 return IRQ_HANDLED;
790 } else if (status & MISTA_RXBERR) { 802 } else if (status & MISTA_RXBERR) {
791 dev_err(&pdev->dev, "emc rx bus error\n"); 803 dev_err(&pdev->dev, "emc rx bus error\n");
792 w90p910_reset_mac(dev); 804 w90p910_reset_mac(dev);
793 } 805 }
794 806
795 netdev_rx(dev); 807 netdev_rx(dev);
796 spin_unlock(&ether->lock);
797 return IRQ_HANDLED; 808 return IRQ_HANDLED;
798} 809}
799 810
@@ -826,6 +837,7 @@ static int w90p910_ether_open(struct net_device *dev)
826 if (request_irq(ether->rxirq, w90p910_rx_interrupt, 837 if (request_irq(ether->rxirq, w90p910_rx_interrupt,
827 0x0, pdev->name, dev)) { 838 0x0, pdev->name, dev)) {
828 dev_err(&pdev->dev, "register irq rx failed\n"); 839 dev_err(&pdev->dev, "register irq rx failed\n");
840 free_irq(ether->txirq, dev);
829 return -EAGAIN; 841 return -EAGAIN;
830 } 842 }
831 843
@@ -908,7 +920,7 @@ static const struct net_device_ops w90p910_ether_netdev_ops = {
908 .ndo_start_xmit = w90p910_ether_start_xmit, 920 .ndo_start_xmit = w90p910_ether_start_xmit,
909 .ndo_get_stats = w90p910_ether_stats, 921 .ndo_get_stats = w90p910_ether_stats,
910 .ndo_set_multicast_list = w90p910_ether_set_multicast_list, 922 .ndo_set_multicast_list = w90p910_ether_set_multicast_list,
911 .ndo_set_mac_address = set_mac_address, 923 .ndo_set_mac_address = w90p910_set_mac_address,
912 .ndo_do_ioctl = w90p910_ether_ioctl, 924 .ndo_do_ioctl = w90p910_ether_ioctl,
913 .ndo_validate_addr = eth_validate_addr, 925 .ndo_validate_addr = eth_validate_addr,
914 .ndo_change_mtu = eth_change_mtu, 926 .ndo_change_mtu = eth_change_mtu,
@@ -949,8 +961,6 @@ static int w90p910_ether_setup(struct net_device *dev)
949 961
950 get_mac_address(dev); 962 get_mac_address(dev);
951 963
952 spin_lock_init(&ether->lock);
953
954 ether->cur_tx = 0x0; 964 ether->cur_tx = 0x0;
955 ether->cur_rx = 0x0; 965 ether->cur_rx = 0x0;
956 ether->finish_tx = 0x0; 966 ether->finish_tx = 0x0;
@@ -972,30 +982,29 @@ static int __devinit w90p910_ether_probe(struct platform_device *pdev)
972{ 982{
973 struct w90p910_ether *ether; 983 struct w90p910_ether *ether;
974 struct net_device *dev; 984 struct net_device *dev;
975 struct resource *res;
976 int error; 985 int error;
977 986
978 dev = alloc_etherdev(sizeof(struct w90p910_ether)); 987 dev = alloc_etherdev(sizeof(struct w90p910_ether));
979 if (!dev) 988 if (!dev)
980 return -ENOMEM; 989 return -ENOMEM;
981 990
982 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 991 ether = netdev_priv(dev);
983 if (res == NULL) { 992
993 ether->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
994 if (ether->res == NULL) {
984 dev_err(&pdev->dev, "failed to get I/O memory\n"); 995 dev_err(&pdev->dev, "failed to get I/O memory\n");
985 error = -ENXIO; 996 error = -ENXIO;
986 goto failed_free; 997 goto failed_free;
987 } 998 }
988 999
989 res = request_mem_region(res->start, resource_size(res), pdev->name); 1000 if (!request_mem_region(ether->res->start,
990 if (res == NULL) { 1001 resource_size(ether->res), pdev->name)) {
991 dev_err(&pdev->dev, "failed to request I/O memory\n"); 1002 dev_err(&pdev->dev, "failed to request I/O memory\n");
992 error = -EBUSY; 1003 error = -EBUSY;
993 goto failed_free; 1004 goto failed_free;
994 } 1005 }
995 1006
996 ether = netdev_priv(dev); 1007 ether->reg = ioremap(ether->res->start, resource_size(ether->res));
997
998 ether->reg = ioremap(res->start, resource_size(res));
999 if (ether->reg == NULL) { 1008 if (ether->reg == NULL) {
1000 dev_err(&pdev->dev, "failed to remap I/O memory\n"); 1009 dev_err(&pdev->dev, "failed to remap I/O memory\n");
1001 error = -ENXIO; 1010 error = -ENXIO;
@@ -1056,7 +1065,7 @@ failed_free_txirq:
1056failed_free_io: 1065failed_free_io:
1057 iounmap(ether->reg); 1066 iounmap(ether->reg);
1058failed_free_mem: 1067failed_free_mem:
1059 release_mem_region(res->start, resource_size(res)); 1068 release_mem_region(ether->res->start, resource_size(ether->res));
1060failed_free: 1069failed_free:
1061 free_netdev(dev); 1070 free_netdev(dev);
1062 return error; 1071 return error;
@@ -1068,10 +1077,19 @@ static int __devexit w90p910_ether_remove(struct platform_device *pdev)
1068 struct w90p910_ether *ether = netdev_priv(dev); 1077 struct w90p910_ether *ether = netdev_priv(dev);
1069 1078
1070 unregister_netdev(dev); 1079 unregister_netdev(dev);
1080
1071 clk_put(ether->rmiiclk); 1081 clk_put(ether->rmiiclk);
1072 clk_put(ether->clk); 1082 clk_put(ether->clk);
1083
1084 iounmap(ether->reg);
1085 release_mem_region(ether->res->start, resource_size(ether->res));
1086
1087 free_irq(ether->txirq, dev);
1088 free_irq(ether->rxirq, dev);
1089
1073 del_timer_sync(&ether->check_timer); 1090 del_timer_sync(&ether->check_timer);
1074 platform_set_drvdata(pdev, NULL); 1091 platform_set_drvdata(pdev, NULL);
1092
1075 free_netdev(dev); 1093 free_netdev(dev);
1076 return 0; 1094 return 0;
1077} 1095}