diff options
author | Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com> | 2011-03-07 16:59:26 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2011-03-14 17:10:13 -0400 |
commit | 4a55530f38e4eeee3afb06093e81309138fe8360 (patch) | |
tree | 176fa77c4eacd0809205bde0b8a171d2d43d69a2 /drivers/net/sh_eth.c | |
parent | 201a11c1db82247143f0fbe29b4a97f16fa3a591 (diff) |
net: sh_eth: modify the definitions of register
The previous code cannot handle the ETHER and GETHER both as same time
because the definitions of register was hardcoded.
Signed-off-by: Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/sh_eth.c')
-rw-r--r-- | drivers/net/sh_eth.c | 326 |
1 files changed, 163 insertions, 163 deletions
diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c index 095e52580884..51268f591405 100644 --- a/drivers/net/sh_eth.c +++ b/drivers/net/sh_eth.c | |||
@@ -49,25 +49,23 @@ | |||
49 | static void sh_eth_set_duplex(struct net_device *ndev) | 49 | static void sh_eth_set_duplex(struct net_device *ndev) |
50 | { | 50 | { |
51 | struct sh_eth_private *mdp = netdev_priv(ndev); | 51 | struct sh_eth_private *mdp = netdev_priv(ndev); |
52 | u32 ioaddr = ndev->base_addr; | ||
53 | 52 | ||
54 | if (mdp->duplex) /* Full */ | 53 | if (mdp->duplex) /* Full */ |
55 | writel(readl(ioaddr + ECMR) | ECMR_DM, ioaddr + ECMR); | 54 | sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR); |
56 | else /* Half */ | 55 | else /* Half */ |
57 | writel(readl(ioaddr + ECMR) & ~ECMR_DM, ioaddr + ECMR); | 56 | sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR); |
58 | } | 57 | } |
59 | 58 | ||
60 | static void sh_eth_set_rate(struct net_device *ndev) | 59 | static void sh_eth_set_rate(struct net_device *ndev) |
61 | { | 60 | { |
62 | struct sh_eth_private *mdp = netdev_priv(ndev); | 61 | struct sh_eth_private *mdp = netdev_priv(ndev); |
63 | u32 ioaddr = ndev->base_addr; | ||
64 | 62 | ||
65 | switch (mdp->speed) { | 63 | switch (mdp->speed) { |
66 | case 10: /* 10BASE */ | 64 | case 10: /* 10BASE */ |
67 | writel(readl(ioaddr + ECMR) & ~ECMR_RTM, ioaddr + ECMR); | 65 | sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_RTM, ECMR); |
68 | break; | 66 | break; |
69 | case 100:/* 100BASE */ | 67 | case 100:/* 100BASE */ |
70 | writel(readl(ioaddr + ECMR) | ECMR_RTM, ioaddr + ECMR); | 68 | sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_RTM, ECMR); |
71 | break; | 69 | break; |
72 | default: | 70 | default: |
73 | break; | 71 | break; |
@@ -100,25 +98,23 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = { | |||
100 | static void sh_eth_set_duplex(struct net_device *ndev) | 98 | static void sh_eth_set_duplex(struct net_device *ndev) |
101 | { | 99 | { |
102 | struct sh_eth_private *mdp = netdev_priv(ndev); | 100 | struct sh_eth_private *mdp = netdev_priv(ndev); |
103 | u32 ioaddr = ndev->base_addr; | ||
104 | 101 | ||
105 | if (mdp->duplex) /* Full */ | 102 | if (mdp->duplex) /* Full */ |
106 | writel(readl(ioaddr + ECMR) | ECMR_DM, ioaddr + ECMR); | 103 | sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR); |
107 | else /* Half */ | 104 | else /* Half */ |
108 | writel(readl(ioaddr + ECMR) & ~ECMR_DM, ioaddr + ECMR); | 105 | sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR); |
109 | } | 106 | } |
110 | 107 | ||
111 | static void sh_eth_set_rate(struct net_device *ndev) | 108 | static void sh_eth_set_rate(struct net_device *ndev) |
112 | { | 109 | { |
113 | struct sh_eth_private *mdp = netdev_priv(ndev); | 110 | struct sh_eth_private *mdp = netdev_priv(ndev); |
114 | u32 ioaddr = ndev->base_addr; | ||
115 | 111 | ||
116 | switch (mdp->speed) { | 112 | switch (mdp->speed) { |
117 | case 10: /* 10BASE */ | 113 | case 10: /* 10BASE */ |
118 | writel(0, ioaddr + RTRATE); | 114 | sh_eth_write(ndev, 0, RTRATE); |
119 | break; | 115 | break; |
120 | case 100:/* 100BASE */ | 116 | case 100:/* 100BASE */ |
121 | writel(1, ioaddr + RTRATE); | 117 | sh_eth_write(ndev, 1, RTRATE); |
122 | break; | 118 | break; |
123 | default: | 119 | default: |
124 | break; | 120 | break; |
@@ -156,13 +152,12 @@ static void sh_eth_chip_reset(struct net_device *ndev) | |||
156 | 152 | ||
157 | static void sh_eth_reset(struct net_device *ndev) | 153 | static void sh_eth_reset(struct net_device *ndev) |
158 | { | 154 | { |
159 | u32 ioaddr = ndev->base_addr; | ||
160 | int cnt = 100; | 155 | int cnt = 100; |
161 | 156 | ||
162 | writel(EDSR_ENALL, ioaddr + EDSR); | 157 | sh_eth_write(ndev, EDSR_ENALL, EDSR); |
163 | writel(readl(ioaddr + EDMR) | EDMR_SRST, ioaddr + EDMR); | 158 | sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST, EDMR); |
164 | while (cnt > 0) { | 159 | while (cnt > 0) { |
165 | if (!(readl(ioaddr + EDMR) & 0x3)) | 160 | if (!(sh_eth_read(ndev, EDMR) & 0x3)) |
166 | break; | 161 | break; |
167 | mdelay(1); | 162 | mdelay(1); |
168 | cnt--; | 163 | cnt--; |
@@ -171,41 +166,39 @@ static void sh_eth_reset(struct net_device *ndev) | |||
171 | printk(KERN_ERR "Device reset fail\n"); | 166 | printk(KERN_ERR "Device reset fail\n"); |
172 | 167 | ||
173 | /* Table Init */ | 168 | /* Table Init */ |
174 | writel(0x0, ioaddr + TDLAR); | 169 | sh_eth_write(ndev, 0x0, TDLAR); |
175 | writel(0x0, ioaddr + TDFAR); | 170 | sh_eth_write(ndev, 0x0, TDFAR); |
176 | writel(0x0, ioaddr + TDFXR); | 171 | sh_eth_write(ndev, 0x0, TDFXR); |
177 | writel(0x0, ioaddr + TDFFR); | 172 | sh_eth_write(ndev, 0x0, TDFFR); |
178 | writel(0x0, ioaddr + RDLAR); | 173 | sh_eth_write(ndev, 0x0, RDLAR); |
179 | writel(0x0, ioaddr + RDFAR); | 174 | sh_eth_write(ndev, 0x0, RDFAR); |
180 | writel(0x0, ioaddr + RDFXR); | 175 | sh_eth_write(ndev, 0x0, RDFXR); |
181 | writel(0x0, ioaddr + RDFFR); | 176 | sh_eth_write(ndev, 0x0, RDFFR); |
182 | } | 177 | } |
183 | 178 | ||
184 | static void sh_eth_set_duplex(struct net_device *ndev) | 179 | static void sh_eth_set_duplex(struct net_device *ndev) |
185 | { | 180 | { |
186 | struct sh_eth_private *mdp = netdev_priv(ndev); | 181 | struct sh_eth_private *mdp = netdev_priv(ndev); |
187 | u32 ioaddr = ndev->base_addr; | ||
188 | 182 | ||
189 | if (mdp->duplex) /* Full */ | 183 | if (mdp->duplex) /* Full */ |
190 | writel(readl(ioaddr + ECMR) | ECMR_DM, ioaddr + ECMR); | 184 | sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR); |
191 | else /* Half */ | 185 | else /* Half */ |
192 | writel(readl(ioaddr + ECMR) & ~ECMR_DM, ioaddr + ECMR); | 186 | sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR); |
193 | } | 187 | } |
194 | 188 | ||
195 | static void sh_eth_set_rate(struct net_device *ndev) | 189 | static void sh_eth_set_rate(struct net_device *ndev) |
196 | { | 190 | { |
197 | struct sh_eth_private *mdp = netdev_priv(ndev); | 191 | struct sh_eth_private *mdp = netdev_priv(ndev); |
198 | u32 ioaddr = ndev->base_addr; | ||
199 | 192 | ||
200 | switch (mdp->speed) { | 193 | switch (mdp->speed) { |
201 | case 10: /* 10BASE */ | 194 | case 10: /* 10BASE */ |
202 | writel(GECMR_10, ioaddr + GECMR); | 195 | sh_eth_write(ndev, GECMR_10, GECMR); |
203 | break; | 196 | break; |
204 | case 100:/* 100BASE */ | 197 | case 100:/* 100BASE */ |
205 | writel(GECMR_100, ioaddr + GECMR); | 198 | sh_eth_write(ndev, GECMR_100, GECMR); |
206 | break; | 199 | break; |
207 | case 1000: /* 1000BASE */ | 200 | case 1000: /* 1000BASE */ |
208 | writel(GECMR_1000, ioaddr + GECMR); | 201 | sh_eth_write(ndev, GECMR_1000, GECMR); |
209 | break; | 202 | break; |
210 | default: | 203 | default: |
211 | break; | 204 | break; |
@@ -288,11 +281,9 @@ static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd) | |||
288 | /* Chip Reset */ | 281 | /* Chip Reset */ |
289 | static void sh_eth_reset(struct net_device *ndev) | 282 | static void sh_eth_reset(struct net_device *ndev) |
290 | { | 283 | { |
291 | u32 ioaddr = ndev->base_addr; | 284 | sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST, EDMR); |
292 | |||
293 | writel(readl(ioaddr + EDMR) | EDMR_SRST, ioaddr + EDMR); | ||
294 | mdelay(3); | 285 | mdelay(3); |
295 | writel(readl(ioaddr + EDMR) & ~EDMR_SRST, ioaddr + EDMR); | 286 | sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST, EDMR); |
296 | } | 287 | } |
297 | #endif | 288 | #endif |
298 | 289 | ||
@@ -341,13 +332,11 @@ static inline __u32 edmac_to_cpu(struct sh_eth_private *mdp, u32 x) | |||
341 | */ | 332 | */ |
342 | static void update_mac_address(struct net_device *ndev) | 333 | static void update_mac_address(struct net_device *ndev) |
343 | { | 334 | { |
344 | u32 ioaddr = ndev->base_addr; | 335 | sh_eth_write(ndev, |
345 | 336 | (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) | | |
346 | writel((ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) | | 337 | (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR); |
347 | (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), | 338 | sh_eth_write(ndev, |
348 | ioaddr + MAHR); | 339 | (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR); |
349 | writel((ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), | ||
350 | ioaddr + MALR); | ||
351 | } | 340 | } |
352 | 341 | ||
353 | /* | 342 | /* |
@@ -360,17 +349,15 @@ static void update_mac_address(struct net_device *ndev) | |||
360 | */ | 349 | */ |
361 | static void read_mac_address(struct net_device *ndev, unsigned char *mac) | 350 | static void read_mac_address(struct net_device *ndev, unsigned char *mac) |
362 | { | 351 | { |
363 | u32 ioaddr = ndev->base_addr; | ||
364 | |||
365 | if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) { | 352 | if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) { |
366 | memcpy(ndev->dev_addr, mac, 6); | 353 | memcpy(ndev->dev_addr, mac, 6); |
367 | } else { | 354 | } else { |
368 | ndev->dev_addr[0] = (readl(ioaddr + MAHR) >> 24); | 355 | ndev->dev_addr[0] = (sh_eth_read(ndev, MAHR) >> 24); |
369 | ndev->dev_addr[1] = (readl(ioaddr + MAHR) >> 16) & 0xFF; | 356 | ndev->dev_addr[1] = (sh_eth_read(ndev, MAHR) >> 16) & 0xFF; |
370 | ndev->dev_addr[2] = (readl(ioaddr + MAHR) >> 8) & 0xFF; | 357 | ndev->dev_addr[2] = (sh_eth_read(ndev, MAHR) >> 8) & 0xFF; |
371 | ndev->dev_addr[3] = (readl(ioaddr + MAHR) & 0xFF); | 358 | ndev->dev_addr[3] = (sh_eth_read(ndev, MAHR) & 0xFF); |
372 | ndev->dev_addr[4] = (readl(ioaddr + MALR) >> 8) & 0xFF; | 359 | ndev->dev_addr[4] = (sh_eth_read(ndev, MALR) >> 8) & 0xFF; |
373 | ndev->dev_addr[5] = (readl(ioaddr + MALR) & 0xFF); | 360 | ndev->dev_addr[5] = (sh_eth_read(ndev, MALR) & 0xFF); |
374 | } | 361 | } |
375 | } | 362 | } |
376 | 363 | ||
@@ -477,7 +464,6 @@ static void sh_eth_ring_free(struct net_device *ndev) | |||
477 | /* format skb and descriptor buffer */ | 464 | /* format skb and descriptor buffer */ |
478 | static void sh_eth_ring_format(struct net_device *ndev) | 465 | static void sh_eth_ring_format(struct net_device *ndev) |
479 | { | 466 | { |
480 | u32 ioaddr = ndev->base_addr; | ||
481 | struct sh_eth_private *mdp = netdev_priv(ndev); | 467 | struct sh_eth_private *mdp = netdev_priv(ndev); |
482 | int i; | 468 | int i; |
483 | struct sk_buff *skb; | 469 | struct sk_buff *skb; |
@@ -513,9 +499,9 @@ static void sh_eth_ring_format(struct net_device *ndev) | |||
513 | rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16); | 499 | rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16); |
514 | /* Rx descriptor address set */ | 500 | /* Rx descriptor address set */ |
515 | if (i == 0) { | 501 | if (i == 0) { |
516 | writel(mdp->rx_desc_dma, ioaddr + RDLAR); | 502 | sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR); |
517 | #if defined(CONFIG_CPU_SUBTYPE_SH7763) | 503 | #if defined(CONFIG_CPU_SUBTYPE_SH7763) |
518 | writel(mdp->rx_desc_dma, ioaddr + RDFAR); | 504 | sh_eth_write(ndev, mdp->rx_desc_dma, RDFAR); |
519 | #endif | 505 | #endif |
520 | } | 506 | } |
521 | } | 507 | } |
@@ -535,9 +521,9 @@ static void sh_eth_ring_format(struct net_device *ndev) | |||
535 | txdesc->buffer_length = 0; | 521 | txdesc->buffer_length = 0; |
536 | if (i == 0) { | 522 | if (i == 0) { |
537 | /* Tx descriptor address set */ | 523 | /* Tx descriptor address set */ |
538 | writel(mdp->tx_desc_dma, ioaddr + TDLAR); | 524 | sh_eth_write(ndev, mdp->tx_desc_dma, TDLAR); |
539 | #if defined(CONFIG_CPU_SUBTYPE_SH7763) | 525 | #if defined(CONFIG_CPU_SUBTYPE_SH7763) |
540 | writel(mdp->tx_desc_dma, ioaddr + TDFAR); | 526 | sh_eth_write(ndev, mdp->tx_desc_dma, TDFAR); |
541 | #endif | 527 | #endif |
542 | } | 528 | } |
543 | } | 529 | } |
@@ -620,7 +606,6 @@ static int sh_eth_dev_init(struct net_device *ndev) | |||
620 | { | 606 | { |
621 | int ret = 0; | 607 | int ret = 0; |
622 | struct sh_eth_private *mdp = netdev_priv(ndev); | 608 | struct sh_eth_private *mdp = netdev_priv(ndev); |
623 | u32 ioaddr = ndev->base_addr; | ||
624 | u_int32_t rx_int_var, tx_int_var; | 609 | u_int32_t rx_int_var, tx_int_var; |
625 | u32 val; | 610 | u32 val; |
626 | 611 | ||
@@ -630,71 +615,71 @@ static int sh_eth_dev_init(struct net_device *ndev) | |||
630 | /* Descriptor format */ | 615 | /* Descriptor format */ |
631 | sh_eth_ring_format(ndev); | 616 | sh_eth_ring_format(ndev); |
632 | if (mdp->cd->rpadir) | 617 | if (mdp->cd->rpadir) |
633 | writel(mdp->cd->rpadir_value, ioaddr + RPADIR); | 618 | sh_eth_write(ndev, mdp->cd->rpadir_value, RPADIR); |
634 | 619 | ||
635 | /* all sh_eth int mask */ | 620 | /* all sh_eth int mask */ |
636 | writel(0, ioaddr + EESIPR); | 621 | sh_eth_write(ndev, 0, EESIPR); |
637 | 622 | ||
638 | #if defined(__LITTLE_ENDIAN__) | 623 | #if defined(__LITTLE_ENDIAN__) |
639 | if (mdp->cd->hw_swap) | 624 | if (mdp->cd->hw_swap) |
640 | writel(EDMR_EL, ioaddr + EDMR); | 625 | sh_eth_write(ndev, EDMR_EL, EDMR); |
641 | else | 626 | else |
642 | #endif | 627 | #endif |
643 | writel(0, ioaddr + EDMR); | 628 | sh_eth_write(ndev, 0, EDMR); |
644 | 629 | ||
645 | /* FIFO size set */ | 630 | /* FIFO size set */ |
646 | writel(mdp->cd->fdr_value, ioaddr + FDR); | 631 | sh_eth_write(ndev, mdp->cd->fdr_value, FDR); |
647 | writel(0, ioaddr + TFTR); | 632 | sh_eth_write(ndev, 0, TFTR); |
648 | 633 | ||
649 | /* Frame recv control */ | 634 | /* Frame recv control */ |
650 | writel(mdp->cd->rmcr_value, ioaddr + RMCR); | 635 | sh_eth_write(ndev, mdp->cd->rmcr_value, RMCR); |
651 | 636 | ||
652 | rx_int_var = mdp->rx_int_var = DESC_I_RINT8 | DESC_I_RINT5; | 637 | rx_int_var = mdp->rx_int_var = DESC_I_RINT8 | DESC_I_RINT5; |
653 | tx_int_var = mdp->tx_int_var = DESC_I_TINT2; | 638 | tx_int_var = mdp->tx_int_var = DESC_I_TINT2; |
654 | writel(rx_int_var | tx_int_var, ioaddr + TRSCER); | 639 | sh_eth_write(ndev, rx_int_var | tx_int_var, TRSCER); |
655 | 640 | ||
656 | if (mdp->cd->bculr) | 641 | if (mdp->cd->bculr) |
657 | writel(0x800, ioaddr + BCULR); /* Burst sycle set */ | 642 | sh_eth_write(ndev, 0x800, BCULR); /* Burst sycle set */ |
658 | 643 | ||
659 | writel(mdp->cd->fcftr_value, ioaddr + FCFTR); | 644 | sh_eth_write(ndev, mdp->cd->fcftr_value, FCFTR); |
660 | 645 | ||
661 | if (!mdp->cd->no_trimd) | 646 | if (!mdp->cd->no_trimd) |
662 | writel(0, ioaddr + TRIMD); | 647 | sh_eth_write(ndev, 0, TRIMD); |
663 | 648 | ||
664 | /* Recv frame limit set register */ | 649 | /* Recv frame limit set register */ |
665 | writel(RFLR_VALUE, ioaddr + RFLR); | 650 | sh_eth_write(ndev, RFLR_VALUE, RFLR); |
666 | 651 | ||
667 | writel(readl(ioaddr + EESR), ioaddr + EESR); | 652 | sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR); |
668 | writel(mdp->cd->eesipr_value, ioaddr + EESIPR); | 653 | sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); |
669 | 654 | ||
670 | /* PAUSE Prohibition */ | 655 | /* PAUSE Prohibition */ |
671 | val = (readl(ioaddr + ECMR) & ECMR_DM) | | 656 | val = (sh_eth_read(ndev, ECMR) & ECMR_DM) | |
672 | ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE; | 657 | ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE; |
673 | 658 | ||
674 | writel(val, ioaddr + ECMR); | 659 | sh_eth_write(ndev, val, ECMR); |
675 | 660 | ||
676 | if (mdp->cd->set_rate) | 661 | if (mdp->cd->set_rate) |
677 | mdp->cd->set_rate(ndev); | 662 | mdp->cd->set_rate(ndev); |
678 | 663 | ||
679 | /* E-MAC Status Register clear */ | 664 | /* E-MAC Status Register clear */ |
680 | writel(mdp->cd->ecsr_value, ioaddr + ECSR); | 665 | sh_eth_write(ndev, mdp->cd->ecsr_value, ECSR); |
681 | 666 | ||
682 | /* E-MAC Interrupt Enable register */ | 667 | /* E-MAC Interrupt Enable register */ |
683 | writel(mdp->cd->ecsipr_value, ioaddr + ECSIPR); | 668 | sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR); |
684 | 669 | ||
685 | /* Set MAC address */ | 670 | /* Set MAC address */ |
686 | update_mac_address(ndev); | 671 | update_mac_address(ndev); |
687 | 672 | ||
688 | /* mask reset */ | 673 | /* mask reset */ |
689 | if (mdp->cd->apr) | 674 | if (mdp->cd->apr) |
690 | writel(APR_AP, ioaddr + APR); | 675 | sh_eth_write(ndev, APR_AP, APR); |
691 | if (mdp->cd->mpr) | 676 | if (mdp->cd->mpr) |
692 | writel(MPR_MP, ioaddr + MPR); | 677 | sh_eth_write(ndev, MPR_MP, MPR); |
693 | if (mdp->cd->tpauser) | 678 | if (mdp->cd->tpauser) |
694 | writel(TPAUSER_UNLIMITED, ioaddr + TPAUSER); | 679 | sh_eth_write(ndev, TPAUSER_UNLIMITED, TPAUSER); |
695 | 680 | ||
696 | /* Setting the Rx mode will start the Rx process. */ | 681 | /* Setting the Rx mode will start the Rx process. */ |
697 | writel(EDRRR_R, ioaddr + EDRRR); | 682 | sh_eth_write(ndev, EDRRR_R, EDRRR); |
698 | 683 | ||
699 | netif_start_queue(ndev); | 684 | netif_start_queue(ndev); |
700 | 685 | ||
@@ -818,38 +803,37 @@ static int sh_eth_rx(struct net_device *ndev) | |||
818 | 803 | ||
819 | /* Restart Rx engine if stopped. */ | 804 | /* Restart Rx engine if stopped. */ |
820 | /* If we don't need to check status, don't. -KDU */ | 805 | /* If we don't need to check status, don't. -KDU */ |
821 | if (!(readl(ndev->base_addr + EDRRR) & EDRRR_R)) | 806 | if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) |
822 | writel(EDRRR_R, ndev->base_addr + EDRRR); | 807 | sh_eth_write(ndev, EDRRR_R, EDRRR); |
823 | 808 | ||
824 | return 0; | 809 | return 0; |
825 | } | 810 | } |
826 | 811 | ||
827 | static void sh_eth_rcv_snd_disable(u32 ioaddr) | 812 | static void sh_eth_rcv_snd_disable(struct net_device *ndev) |
828 | { | 813 | { |
829 | /* disable tx and rx */ | 814 | /* disable tx and rx */ |
830 | writel(readl(ioaddr + ECMR) & | 815 | sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & |
831 | ~(ECMR_RE | ECMR_TE), ioaddr + ECMR); | 816 | ~(ECMR_RE | ECMR_TE), ECMR); |
832 | } | 817 | } |
833 | 818 | ||
834 | static void sh_eth_rcv_snd_enable(u32 ioaddr) | 819 | static void sh_eth_rcv_snd_enable(struct net_device *ndev) |
835 | { | 820 | { |
836 | /* enable tx and rx */ | 821 | /* enable tx and rx */ |
837 | writel(readl(ioaddr + ECMR) | | 822 | sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | |
838 | (ECMR_RE | ECMR_TE), ioaddr + ECMR); | 823 | (ECMR_RE | ECMR_TE), ECMR); |
839 | } | 824 | } |
840 | 825 | ||
841 | /* error control function */ | 826 | /* error control function */ |
842 | static void sh_eth_error(struct net_device *ndev, int intr_status) | 827 | static void sh_eth_error(struct net_device *ndev, int intr_status) |
843 | { | 828 | { |
844 | struct sh_eth_private *mdp = netdev_priv(ndev); | 829 | struct sh_eth_private *mdp = netdev_priv(ndev); |
845 | u32 ioaddr = ndev->base_addr; | ||
846 | u32 felic_stat; | 830 | u32 felic_stat; |
847 | u32 link_stat; | 831 | u32 link_stat; |
848 | u32 mask; | 832 | u32 mask; |
849 | 833 | ||
850 | if (intr_status & EESR_ECI) { | 834 | if (intr_status & EESR_ECI) { |
851 | felic_stat = readl(ioaddr + ECSR); | 835 | felic_stat = sh_eth_read(ndev, ECSR); |
852 | writel(felic_stat, ioaddr + ECSR); /* clear int */ | 836 | sh_eth_write(ndev, felic_stat, ECSR); /* clear int */ |
853 | if (felic_stat & ECSR_ICD) | 837 | if (felic_stat & ECSR_ICD) |
854 | mdp->stats.tx_carrier_errors++; | 838 | mdp->stats.tx_carrier_errors++; |
855 | if (felic_stat & ECSR_LCHNG) { | 839 | if (felic_stat & ECSR_LCHNG) { |
@@ -860,23 +844,23 @@ static void sh_eth_error(struct net_device *ndev, int intr_status) | |||
860 | else | 844 | else |
861 | link_stat = PHY_ST_LINK; | 845 | link_stat = PHY_ST_LINK; |
862 | } else { | 846 | } else { |
863 | link_stat = (readl(ioaddr + PSR)); | 847 | link_stat = (sh_eth_read(ndev, PSR)); |
864 | if (mdp->ether_link_active_low) | 848 | if (mdp->ether_link_active_low) |
865 | link_stat = ~link_stat; | 849 | link_stat = ~link_stat; |
866 | } | 850 | } |
867 | if (!(link_stat & PHY_ST_LINK)) | 851 | if (!(link_stat & PHY_ST_LINK)) |
868 | sh_eth_rcv_snd_disable(ioaddr); | 852 | sh_eth_rcv_snd_disable(ndev); |
869 | else { | 853 | else { |
870 | /* Link Up */ | 854 | /* Link Up */ |
871 | writel(readl(ioaddr + EESIPR) & | 855 | sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) & |
872 | ~DMAC_M_ECI, ioaddr + EESIPR); | 856 | ~DMAC_M_ECI, EESIPR); |
873 | /*clear int */ | 857 | /*clear int */ |
874 | writel(readl(ioaddr + ECSR), | 858 | sh_eth_write(ndev, sh_eth_read(ndev, ECSR), |
875 | ioaddr + ECSR); | 859 | ECSR); |
876 | writel(readl(ioaddr + EESIPR) | | 860 | sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) | |
877 | DMAC_M_ECI, ioaddr + EESIPR); | 861 | DMAC_M_ECI, EESIPR); |
878 | /* enable tx and rx */ | 862 | /* enable tx and rx */ |
879 | sh_eth_rcv_snd_enable(ioaddr); | 863 | sh_eth_rcv_snd_enable(ndev); |
880 | } | 864 | } |
881 | } | 865 | } |
882 | } | 866 | } |
@@ -917,8 +901,8 @@ static void sh_eth_error(struct net_device *ndev, int intr_status) | |||
917 | /* Receive Descriptor Empty int */ | 901 | /* Receive Descriptor Empty int */ |
918 | mdp->stats.rx_over_errors++; | 902 | mdp->stats.rx_over_errors++; |
919 | 903 | ||
920 | if (readl(ioaddr + EDRRR) ^ EDRRR_R) | 904 | if (sh_eth_read(ndev, EDRRR) ^ EDRRR_R) |
921 | writel(EDRRR_R, ioaddr + EDRRR); | 905 | sh_eth_write(ndev, EDRRR_R, EDRRR); |
922 | if (netif_msg_rx_err(mdp)) | 906 | if (netif_msg_rx_err(mdp)) |
923 | dev_err(&ndev->dev, "Receive Descriptor Empty\n"); | 907 | dev_err(&ndev->dev, "Receive Descriptor Empty\n"); |
924 | } | 908 | } |
@@ -942,7 +926,7 @@ static void sh_eth_error(struct net_device *ndev, int intr_status) | |||
942 | mask &= ~EESR_ADE; | 926 | mask &= ~EESR_ADE; |
943 | if (intr_status & mask) { | 927 | if (intr_status & mask) { |
944 | /* Tx error */ | 928 | /* Tx error */ |
945 | u32 edtrr = readl(ndev->base_addr + EDTRR); | 929 | u32 edtrr = sh_eth_read(ndev, EDTRR); |
946 | /* dmesg */ | 930 | /* dmesg */ |
947 | dev_err(&ndev->dev, "TX error. status=%8.8x cur_tx=%8.8x ", | 931 | dev_err(&ndev->dev, "TX error. status=%8.8x cur_tx=%8.8x ", |
948 | intr_status, mdp->cur_tx); | 932 | intr_status, mdp->cur_tx); |
@@ -954,7 +938,7 @@ static void sh_eth_error(struct net_device *ndev, int intr_status) | |||
954 | /* SH7712 BUG */ | 938 | /* SH7712 BUG */ |
955 | if (edtrr ^ EDTRR_TRNS) { | 939 | if (edtrr ^ EDTRR_TRNS) { |
956 | /* tx dma start */ | 940 | /* tx dma start */ |
957 | writel(EDTRR_TRNS, ndev->base_addr + EDTRR); | 941 | sh_eth_write(ndev, EDTRR_TRNS, EDTRR); |
958 | } | 942 | } |
959 | /* wakeup */ | 943 | /* wakeup */ |
960 | netif_wake_queue(ndev); | 944 | netif_wake_queue(ndev); |
@@ -967,18 +951,17 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev) | |||
967 | struct sh_eth_private *mdp = netdev_priv(ndev); | 951 | struct sh_eth_private *mdp = netdev_priv(ndev); |
968 | struct sh_eth_cpu_data *cd = mdp->cd; | 952 | struct sh_eth_cpu_data *cd = mdp->cd; |
969 | irqreturn_t ret = IRQ_NONE; | 953 | irqreturn_t ret = IRQ_NONE; |
970 | u32 ioaddr, intr_status = 0; | 954 | u32 intr_status = 0; |
971 | 955 | ||
972 | ioaddr = ndev->base_addr; | ||
973 | spin_lock(&mdp->lock); | 956 | spin_lock(&mdp->lock); |
974 | 957 | ||
975 | /* Get interrpt stat */ | 958 | /* Get interrpt stat */ |
976 | intr_status = readl(ioaddr + EESR); | 959 | intr_status = sh_eth_read(ndev, EESR); |
977 | /* Clear interrupt */ | 960 | /* Clear interrupt */ |
978 | if (intr_status & (EESR_FRC | EESR_RMAF | EESR_RRF | | 961 | if (intr_status & (EESR_FRC | EESR_RMAF | EESR_RRF | |
979 | EESR_RTLF | EESR_RTSF | EESR_PRE | EESR_CERF | | 962 | EESR_RTLF | EESR_RTSF | EESR_PRE | EESR_CERF | |
980 | cd->tx_check | cd->eesr_err_check)) { | 963 | cd->tx_check | cd->eesr_err_check)) { |
981 | writel(intr_status, ioaddr + EESR); | 964 | sh_eth_write(ndev, intr_status, EESR); |
982 | ret = IRQ_HANDLED; | 965 | ret = IRQ_HANDLED; |
983 | } else | 966 | } else |
984 | goto other_irq; | 967 | goto other_irq; |
@@ -1021,7 +1004,6 @@ static void sh_eth_adjust_link(struct net_device *ndev) | |||
1021 | { | 1004 | { |
1022 | struct sh_eth_private *mdp = netdev_priv(ndev); | 1005 | struct sh_eth_private *mdp = netdev_priv(ndev); |
1023 | struct phy_device *phydev = mdp->phydev; | 1006 | struct phy_device *phydev = mdp->phydev; |
1024 | u32 ioaddr = ndev->base_addr; | ||
1025 | int new_state = 0; | 1007 | int new_state = 0; |
1026 | 1008 | ||
1027 | if (phydev->link != PHY_DOWN) { | 1009 | if (phydev->link != PHY_DOWN) { |
@@ -1039,8 +1021,8 @@ static void sh_eth_adjust_link(struct net_device *ndev) | |||
1039 | mdp->cd->set_rate(ndev); | 1021 | mdp->cd->set_rate(ndev); |
1040 | } | 1022 | } |
1041 | if (mdp->link == PHY_DOWN) { | 1023 | if (mdp->link == PHY_DOWN) { |
1042 | writel((readl(ioaddr + ECMR) & ~ECMR_TXF) | 1024 | sh_eth_write(ndev, (sh_eth_read(ndev, ECMR) & ~ECMR_TXF) |
1043 | | ECMR_DM, ioaddr + ECMR); | 1025 | | ECMR_DM, ECMR); |
1044 | new_state = 1; | 1026 | new_state = 1; |
1045 | mdp->link = phydev->link; | 1027 | mdp->link = phydev->link; |
1046 | } | 1028 | } |
@@ -1122,12 +1104,11 @@ static int sh_eth_set_settings(struct net_device *ndev, | |||
1122 | struct sh_eth_private *mdp = netdev_priv(ndev); | 1104 | struct sh_eth_private *mdp = netdev_priv(ndev); |
1123 | unsigned long flags; | 1105 | unsigned long flags; |
1124 | int ret; | 1106 | int ret; |
1125 | u32 ioaddr = ndev->base_addr; | ||
1126 | 1107 | ||
1127 | spin_lock_irqsave(&mdp->lock, flags); | 1108 | spin_lock_irqsave(&mdp->lock, flags); |
1128 | 1109 | ||
1129 | /* disable tx and rx */ | 1110 | /* disable tx and rx */ |
1130 | sh_eth_rcv_snd_disable(ioaddr); | 1111 | sh_eth_rcv_snd_disable(ndev); |
1131 | 1112 | ||
1132 | ret = phy_ethtool_sset(mdp->phydev, ecmd); | 1113 | ret = phy_ethtool_sset(mdp->phydev, ecmd); |
1133 | if (ret) | 1114 | if (ret) |
@@ -1145,7 +1126,7 @@ error_exit: | |||
1145 | mdelay(1); | 1126 | mdelay(1); |
1146 | 1127 | ||
1147 | /* enable tx and rx */ | 1128 | /* enable tx and rx */ |
1148 | sh_eth_rcv_snd_enable(ioaddr); | 1129 | sh_eth_rcv_snd_enable(ndev); |
1149 | 1130 | ||
1150 | spin_unlock_irqrestore(&mdp->lock, flags); | 1131 | spin_unlock_irqrestore(&mdp->lock, flags); |
1151 | 1132 | ||
@@ -1282,7 +1263,6 @@ out_free_irq: | |||
1282 | static void sh_eth_tx_timeout(struct net_device *ndev) | 1263 | static void sh_eth_tx_timeout(struct net_device *ndev) |
1283 | { | 1264 | { |
1284 | struct sh_eth_private *mdp = netdev_priv(ndev); | 1265 | struct sh_eth_private *mdp = netdev_priv(ndev); |
1285 | u32 ioaddr = ndev->base_addr; | ||
1286 | struct sh_eth_rxdesc *rxdesc; | 1266 | struct sh_eth_rxdesc *rxdesc; |
1287 | int i; | 1267 | int i; |
1288 | 1268 | ||
@@ -1290,7 +1270,7 @@ static void sh_eth_tx_timeout(struct net_device *ndev) | |||
1290 | 1270 | ||
1291 | if (netif_msg_timer(mdp)) | 1271 | if (netif_msg_timer(mdp)) |
1292 | dev_err(&ndev->dev, "%s: transmit timed out, status %8.8x," | 1272 | dev_err(&ndev->dev, "%s: transmit timed out, status %8.8x," |
1293 | " resetting...\n", ndev->name, (int)readl(ioaddr + EESR)); | 1273 | " resetting...\n", ndev->name, (int)sh_eth_read(ndev, EESR)); |
1294 | 1274 | ||
1295 | /* tx_errors count up */ | 1275 | /* tx_errors count up */ |
1296 | mdp->stats.tx_errors++; | 1276 | mdp->stats.tx_errors++; |
@@ -1363,8 +1343,8 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
1363 | 1343 | ||
1364 | mdp->cur_tx++; | 1344 | mdp->cur_tx++; |
1365 | 1345 | ||
1366 | if (!(readl(ndev->base_addr + EDTRR) & EDTRR_TRNS)) | 1346 | if (!(sh_eth_read(ndev, EDTRR) & EDTRR_TRNS)) |
1367 | writel(EDTRR_TRNS, ndev->base_addr + EDTRR); | 1347 | sh_eth_write(ndev, EDTRR_TRNS, EDTRR); |
1368 | 1348 | ||
1369 | return NETDEV_TX_OK; | 1349 | return NETDEV_TX_OK; |
1370 | } | 1350 | } |
@@ -1373,17 +1353,16 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
1373 | static int sh_eth_close(struct net_device *ndev) | 1353 | static int sh_eth_close(struct net_device *ndev) |
1374 | { | 1354 | { |
1375 | struct sh_eth_private *mdp = netdev_priv(ndev); | 1355 | struct sh_eth_private *mdp = netdev_priv(ndev); |
1376 | u32 ioaddr = ndev->base_addr; | ||
1377 | int ringsize; | 1356 | int ringsize; |
1378 | 1357 | ||
1379 | netif_stop_queue(ndev); | 1358 | netif_stop_queue(ndev); |
1380 | 1359 | ||
1381 | /* Disable interrupts by clearing the interrupt mask. */ | 1360 | /* Disable interrupts by clearing the interrupt mask. */ |
1382 | writel(0x0000, ioaddr + EESIPR); | 1361 | sh_eth_write(ndev, 0x0000, EESIPR); |
1383 | 1362 | ||
1384 | /* Stop the chip's Tx and Rx processes. */ | 1363 | /* Stop the chip's Tx and Rx processes. */ |
1385 | writel(0, ioaddr + EDTRR); | 1364 | sh_eth_write(ndev, 0, EDTRR); |
1386 | writel(0, ioaddr + EDRRR); | 1365 | sh_eth_write(ndev, 0, EDRRR); |
1387 | 1366 | ||
1388 | /* PHY Disconnect */ | 1367 | /* PHY Disconnect */ |
1389 | if (mdp->phydev) { | 1368 | if (mdp->phydev) { |
@@ -1414,24 +1393,23 @@ static int sh_eth_close(struct net_device *ndev) | |||
1414 | static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev) | 1393 | static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev) |
1415 | { | 1394 | { |
1416 | struct sh_eth_private *mdp = netdev_priv(ndev); | 1395 | struct sh_eth_private *mdp = netdev_priv(ndev); |
1417 | u32 ioaddr = ndev->base_addr; | ||
1418 | 1396 | ||
1419 | pm_runtime_get_sync(&mdp->pdev->dev); | 1397 | pm_runtime_get_sync(&mdp->pdev->dev); |
1420 | 1398 | ||
1421 | mdp->stats.tx_dropped += readl(ioaddr + TROCR); | 1399 | mdp->stats.tx_dropped += sh_eth_read(ndev, TROCR); |
1422 | writel(0, ioaddr + TROCR); /* (write clear) */ | 1400 | sh_eth_write(ndev, 0, TROCR); /* (write clear) */ |
1423 | mdp->stats.collisions += readl(ioaddr + CDCR); | 1401 | mdp->stats.collisions += sh_eth_read(ndev, CDCR); |
1424 | writel(0, ioaddr + CDCR); /* (write clear) */ | 1402 | sh_eth_write(ndev, 0, CDCR); /* (write clear) */ |
1425 | mdp->stats.tx_carrier_errors += readl(ioaddr + LCCR); | 1403 | mdp->stats.tx_carrier_errors += sh_eth_read(ndev, LCCR); |
1426 | writel(0, ioaddr + LCCR); /* (write clear) */ | 1404 | sh_eth_write(ndev, 0, LCCR); /* (write clear) */ |
1427 | #if defined(CONFIG_CPU_SUBTYPE_SH7763) | 1405 | #if defined(CONFIG_CPU_SUBTYPE_SH7763) |
1428 | mdp->stats.tx_carrier_errors += readl(ioaddr + CERCR);/* CERCR */ | 1406 | mdp->stats.tx_carrier_errors += sh_eth_read(ndev, CERCR);/* CERCR */ |
1429 | writel(0, ioaddr + CERCR); /* (write clear) */ | 1407 | sh_eth_write(ndev, 0, CERCR); /* (write clear) */ |
1430 | mdp->stats.tx_carrier_errors += readl(ioaddr + CEECR);/* CEECR */ | 1408 | mdp->stats.tx_carrier_errors += sh_eth_read(ndev, CEECR);/* CEECR */ |
1431 | writel(0, ioaddr + CEECR); /* (write clear) */ | 1409 | sh_eth_write(ndev, 0, CEECR); /* (write clear) */ |
1432 | #else | 1410 | #else |
1433 | mdp->stats.tx_carrier_errors += readl(ioaddr + CNDCR); | 1411 | mdp->stats.tx_carrier_errors += sh_eth_read(ndev, CNDCR); |
1434 | writel(0, ioaddr + CNDCR); /* (write clear) */ | 1412 | sh_eth_write(ndev, 0, CNDCR); /* (write clear) */ |
1435 | #endif | 1413 | #endif |
1436 | pm_runtime_put_sync(&mdp->pdev->dev); | 1414 | pm_runtime_put_sync(&mdp->pdev->dev); |
1437 | 1415 | ||
@@ -1458,46 +1436,44 @@ static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq, | |||
1458 | /* Multicast reception directions set */ | 1436 | /* Multicast reception directions set */ |
1459 | static void sh_eth_set_multicast_list(struct net_device *ndev) | 1437 | static void sh_eth_set_multicast_list(struct net_device *ndev) |
1460 | { | 1438 | { |
1461 | u32 ioaddr = ndev->base_addr; | ||
1462 | |||
1463 | if (ndev->flags & IFF_PROMISC) { | 1439 | if (ndev->flags & IFF_PROMISC) { |
1464 | /* Set promiscuous. */ | 1440 | /* Set promiscuous. */ |
1465 | writel((readl(ioaddr + ECMR) & ~ECMR_MCT) | ECMR_PRM, | 1441 | sh_eth_write(ndev, (sh_eth_read(ndev, ECMR) & ~ECMR_MCT) | |
1466 | ioaddr + ECMR); | 1442 | ECMR_PRM, ECMR); |
1467 | } else { | 1443 | } else { |
1468 | /* Normal, unicast/broadcast-only mode. */ | 1444 | /* Normal, unicast/broadcast-only mode. */ |
1469 | writel((readl(ioaddr + ECMR) & ~ECMR_PRM) | ECMR_MCT, | 1445 | sh_eth_write(ndev, (sh_eth_read(ndev, ECMR) & ~ECMR_PRM) | |
1470 | ioaddr + ECMR); | 1446 | ECMR_MCT, ECMR); |
1471 | } | 1447 | } |
1472 | } | 1448 | } |
1473 | 1449 | ||
1474 | /* SuperH's TSU register init function */ | 1450 | /* SuperH's TSU register init function */ |
1475 | static void sh_eth_tsu_init(u32 ioaddr) | 1451 | static void sh_eth_tsu_init(struct sh_eth_private *mdp) |
1476 | { | 1452 | { |
1477 | writel(0, ioaddr + TSU_FWEN0); /* Disable forward(0->1) */ | 1453 | sh_eth_tsu_write(mdp, 0, TSU_FWEN0); /* Disable forward(0->1) */ |
1478 | writel(0, ioaddr + TSU_FWEN1); /* Disable forward(1->0) */ | 1454 | sh_eth_tsu_write(mdp, 0, TSU_FWEN1); /* Disable forward(1->0) */ |
1479 | writel(0, ioaddr + TSU_FCM); /* forward fifo 3k-3k */ | 1455 | sh_eth_tsu_write(mdp, 0, TSU_FCM); /* forward fifo 3k-3k */ |
1480 | writel(0xc, ioaddr + TSU_BSYSL0); | 1456 | sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL0); |
1481 | writel(0xc, ioaddr + TSU_BSYSL1); | 1457 | sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL1); |
1482 | writel(0, ioaddr + TSU_PRISL0); | 1458 | sh_eth_tsu_write(mdp, 0, TSU_PRISL0); |
1483 | writel(0, ioaddr + TSU_PRISL1); | 1459 | sh_eth_tsu_write(mdp, 0, TSU_PRISL1); |
1484 | writel(0, ioaddr + TSU_FWSL0); | 1460 | sh_eth_tsu_write(mdp, 0, TSU_FWSL0); |
1485 | writel(0, ioaddr + TSU_FWSL1); | 1461 | sh_eth_tsu_write(mdp, 0, TSU_FWSL1); |
1486 | writel(TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, ioaddr + TSU_FWSLC); | 1462 | sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, TSU_FWSLC); |
1487 | #if defined(CONFIG_CPU_SUBTYPE_SH7763) | 1463 | #if defined(CONFIG_CPU_SUBTYPE_SH7763) |
1488 | writel(0, ioaddr + TSU_QTAG0); /* Disable QTAG(0->1) */ | 1464 | sh_eth_tsu_write(mdp, 0, TSU_QTAG0); /* Disable QTAG(0->1) */ |
1489 | writel(0, ioaddr + TSU_QTAG1); /* Disable QTAG(1->0) */ | 1465 | sh_eth_tsu_write(mdp, 0, TSU_QTAG1); /* Disable QTAG(1->0) */ |
1490 | #else | 1466 | #else |
1491 | writel(0, ioaddr + TSU_QTAGM0); /* Disable QTAG(0->1) */ | 1467 | sh_eth_tsu_write(mdp, 0, TSU_QTAGM0); /* Disable QTAG(0->1) */ |
1492 | writel(0, ioaddr + TSU_QTAGM1); /* Disable QTAG(1->0) */ | 1468 | sh_eth_tsu_write(mdp, 0, TSU_QTAGM1); /* Disable QTAG(1->0) */ |
1493 | #endif | 1469 | #endif |
1494 | writel(0, ioaddr + TSU_FWSR); /* all interrupt status clear */ | 1470 | sh_eth_tsu_write(mdp, 0, TSU_FWSR); /* all interrupt status clear */ |
1495 | writel(0, ioaddr + TSU_FWINMK); /* Disable all interrupt */ | 1471 | sh_eth_tsu_write(mdp, 0, TSU_FWINMK); /* Disable all interrupt */ |
1496 | writel(0, ioaddr + TSU_TEN); /* Disable all CAM entry */ | 1472 | sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */ |
1497 | writel(0, ioaddr + TSU_POST1); /* Disable CAM entry [ 0- 7] */ | 1473 | sh_eth_tsu_write(mdp, 0, TSU_POST1); /* Disable CAM entry [ 0- 7] */ |
1498 | writel(0, ioaddr + TSU_POST2); /* Disable CAM entry [ 8-15] */ | 1474 | sh_eth_tsu_write(mdp, 0, TSU_POST2); /* Disable CAM entry [ 8-15] */ |
1499 | writel(0, ioaddr + TSU_POST3); /* Disable CAM entry [16-23] */ | 1475 | sh_eth_tsu_write(mdp, 0, TSU_POST3); /* Disable CAM entry [16-23] */ |
1500 | writel(0, ioaddr + TSU_POST4); /* Disable CAM entry [24-31] */ | 1476 | sh_eth_tsu_write(mdp, 0, TSU_POST4); /* Disable CAM entry [24-31] */ |
1501 | } | 1477 | } |
1502 | #endif /* SH_ETH_HAS_TSU */ | 1478 | #endif /* SH_ETH_HAS_TSU */ |
1503 | 1479 | ||
@@ -1536,7 +1512,7 @@ static int sh_mdio_init(struct net_device *ndev, int id) | |||
1536 | } | 1512 | } |
1537 | 1513 | ||
1538 | /* bitbang init */ | 1514 | /* bitbang init */ |
1539 | bitbang->addr = ndev->base_addr + PIR; | 1515 | bitbang->addr = ndev->base_addr + mdp->reg_offset[PIR]; |
1540 | bitbang->mdi_msk = 0x08; | 1516 | bitbang->mdi_msk = 0x08; |
1541 | bitbang->mdo_msk = 0x04; | 1517 | bitbang->mdo_msk = 0x04; |
1542 | bitbang->mmd_msk = 0x02;/* MMD */ | 1518 | bitbang->mmd_msk = 0x02;/* MMD */ |
@@ -1587,6 +1563,28 @@ out: | |||
1587 | return ret; | 1563 | return ret; |
1588 | } | 1564 | } |
1589 | 1565 | ||
1566 | static const u16 *sh_eth_get_register_offset(int register_type) | ||
1567 | { | ||
1568 | const u16 *reg_offset = NULL; | ||
1569 | |||
1570 | switch (register_type) { | ||
1571 | case SH_ETH_REG_GIGABIT: | ||
1572 | reg_offset = sh_eth_offset_gigabit; | ||
1573 | break; | ||
1574 | case SH_ETH_REG_FAST_SH4: | ||
1575 | reg_offset = sh_eth_offset_fast_sh4; | ||
1576 | break; | ||
1577 | case SH_ETH_REG_FAST_SH3_SH2: | ||
1578 | reg_offset = sh_eth_offset_fast_sh3_sh2; | ||
1579 | break; | ||
1580 | default: | ||
1581 | printk(KERN_ERR "Unknown register type (%d)\n", register_type); | ||
1582 | break; | ||
1583 | } | ||
1584 | |||
1585 | return reg_offset; | ||
1586 | } | ||
1587 | |||
1590 | static const struct net_device_ops sh_eth_netdev_ops = { | 1588 | static const struct net_device_ops sh_eth_netdev_ops = { |
1591 | .ndo_open = sh_eth_open, | 1589 | .ndo_open = sh_eth_open, |
1592 | .ndo_stop = sh_eth_close, | 1590 | .ndo_stop = sh_eth_close, |
@@ -1657,6 +1655,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev) | |||
1657 | mdp->edmac_endian = pd->edmac_endian; | 1655 | mdp->edmac_endian = pd->edmac_endian; |
1658 | mdp->no_ether_link = pd->no_ether_link; | 1656 | mdp->no_ether_link = pd->no_ether_link; |
1659 | mdp->ether_link_active_low = pd->ether_link_active_low; | 1657 | mdp->ether_link_active_low = pd->ether_link_active_low; |
1658 | mdp->reg_offset = sh_eth_get_register_offset(pd->register_type); | ||
1660 | 1659 | ||
1661 | /* set cpu data */ | 1660 | /* set cpu data */ |
1662 | mdp->cd = &sh_eth_my_cpu_data; | 1661 | mdp->cd = &sh_eth_my_cpu_data; |
@@ -1682,7 +1681,8 @@ static int sh_eth_drv_probe(struct platform_device *pdev) | |||
1682 | 1681 | ||
1683 | #if defined(SH_ETH_HAS_TSU) | 1682 | #if defined(SH_ETH_HAS_TSU) |
1684 | /* TSU init (Init only)*/ | 1683 | /* TSU init (Init only)*/ |
1685 | sh_eth_tsu_init(SH_TSU_ADDR); | 1684 | mdp->tsu_addr = SH_TSU_ADDR; |
1685 | sh_eth_tsu_init(mdp); | ||
1686 | #endif | 1686 | #endif |
1687 | } | 1687 | } |
1688 | 1688 | ||