aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid Daney <david.daney@cavium.com>2012-07-05 12:12:39 -0400
committerRalf Baechle <ralf@linux-mips.org>2012-07-23 08:54:53 -0400
commit368bec0d4a84f78f8c2be8441916d905a8da73c2 (patch)
tree29fd1d32da4eec4804908ac558469dfe0fe689bb
parent2fd46f47be0f96be700053d6caa8dcb14453a520 (diff)
netdev: octeon_mgmt: Convert to use device tree.
The device tree will supply the register bank base addresses, make register addressing relative to those. PHY connection is now described by the device tree. The OCTEON_IRQ_MII{0,1} symbols are also removed as they are now unused and interfere with the irq_domain used for device tree irq mapping. Signed-off-by: David Daney <david.daney@cavium.com> Acked-by: David S. Miller <davem@davemloft.net> Cc: linux-mips@linux-mips.org Cc: devicetree-discuss@lists.ozlabs.org Cc: Grant Likely <grant.likely@secretlab.ca> Cc: Rob Herring <rob.herring@calxeda.com> Cc: linux-kernel@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/3941/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
-rw-r--r--arch/mips/cavium-octeon/octeon-irq.c2
-rw-r--r--arch/mips/cavium-octeon/octeon-platform.c62
-rw-r--r--drivers/net/ethernet/octeon/octeon_mgmt.c312
3 files changed, 207 insertions, 169 deletions
diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c
index 2a661ad35cf7..5fb76aa346be 100644
--- a/arch/mips/cavium-octeon/octeon-irq.c
+++ b/arch/mips/cavium-octeon/octeon-irq.c
@@ -1197,7 +1197,6 @@ static void __init octeon_irq_init_ciu(void)
1197 octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_TIMER0, 0, i + 52, chip, handle_edge_irq); 1197 octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_TIMER0, 0, i + 52, chip, handle_edge_irq);
1198 1198
1199 octeon_irq_set_ciu_mapping(OCTEON_IRQ_USB0, 0, 56, chip, handle_level_irq); 1199 octeon_irq_set_ciu_mapping(OCTEON_IRQ_USB0, 0, 56, chip, handle_level_irq);
1200 octeon_irq_set_ciu_mapping(OCTEON_IRQ_MII0, 0, 62, chip, handle_level_irq);
1201 octeon_irq_set_ciu_mapping(OCTEON_IRQ_BOOTDMA, 0, 63, chip, handle_level_irq); 1200 octeon_irq_set_ciu_mapping(OCTEON_IRQ_BOOTDMA, 0, 63, chip, handle_level_irq);
1202 1201
1203 /* CIU_1 */ 1202 /* CIU_1 */
@@ -1206,7 +1205,6 @@ static void __init octeon_irq_init_ciu(void)
1206 1205
1207 octeon_irq_set_ciu_mapping(OCTEON_IRQ_UART2, 1, 16, chip, handle_level_irq); 1206 octeon_irq_set_ciu_mapping(OCTEON_IRQ_UART2, 1, 16, chip, handle_level_irq);
1208 octeon_irq_set_ciu_mapping(OCTEON_IRQ_USB1, 1, 17, chip, handle_level_irq); 1207 octeon_irq_set_ciu_mapping(OCTEON_IRQ_USB1, 1, 17, chip, handle_level_irq);
1209 octeon_irq_set_ciu_mapping(OCTEON_IRQ_MII1, 1, 18, chip, handle_level_irq);
1210 1208
1211 gpio_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-3860-gpio"); 1209 gpio_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-3860-gpio");
1212 if (gpio_node) { 1210 if (gpio_node) {
diff --git a/arch/mips/cavium-octeon/octeon-platform.c b/arch/mips/cavium-octeon/octeon-platform.c
index 66cabc2e64c8..0938df10a71c 100644
--- a/arch/mips/cavium-octeon/octeon-platform.c
+++ b/arch/mips/cavium-octeon/octeon-platform.c
@@ -168,68 +168,6 @@ out:
168} 168}
169device_initcall(octeon_rng_device_init); 169device_initcall(octeon_rng_device_init);
170 170
171/* Octeon mgmt port Ethernet interface. */
172static int __init octeon_mgmt_device_init(void)
173{
174 struct platform_device *pd;
175 int ret = 0;
176 int port, num_ports;
177
178 struct resource mgmt_port_resource = {
179 .flags = IORESOURCE_IRQ,
180 .start = -1,
181 .end = -1
182 };
183
184 if (!OCTEON_IS_MODEL(OCTEON_CN56XX) && !OCTEON_IS_MODEL(OCTEON_CN52XX))
185 return 0;
186
187 if (OCTEON_IS_MODEL(OCTEON_CN56XX))
188 num_ports = 1;
189 else
190 num_ports = 2;
191
192 for (port = 0; port < num_ports; port++) {
193 pd = platform_device_alloc("octeon_mgmt", port);
194 if (!pd) {
195 ret = -ENOMEM;
196 goto out;
197 }
198 /* No DMA restrictions */
199 pd->dev.coherent_dma_mask = DMA_BIT_MASK(64);
200 pd->dev.dma_mask = &pd->dev.coherent_dma_mask;
201
202 switch (port) {
203 case 0:
204 mgmt_port_resource.start = OCTEON_IRQ_MII0;
205 break;
206 case 1:
207 mgmt_port_resource.start = OCTEON_IRQ_MII1;
208 break;
209 default:
210 BUG();
211 }
212 mgmt_port_resource.end = mgmt_port_resource.start;
213
214 ret = platform_device_add_resources(pd, &mgmt_port_resource, 1);
215
216 if (ret)
217 goto fail;
218
219 ret = platform_device_add(pd);
220 if (ret)
221 goto fail;
222 }
223 return ret;
224fail:
225 platform_device_put(pd);
226
227out:
228 return ret;
229
230}
231device_initcall(octeon_mgmt_device_init);
232
233#ifdef CONFIG_USB 171#ifdef CONFIG_USB
234 172
235static int __init octeon_ehci_device_init(void) 173static int __init octeon_ehci_device_init(void)
diff --git a/drivers/net/ethernet/octeon/octeon_mgmt.c b/drivers/net/ethernet/octeon/octeon_mgmt.c
index cd827ff4a021..c42bbb16cdae 100644
--- a/drivers/net/ethernet/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/octeon/octeon_mgmt.c
@@ -6,19 +6,21 @@
6 * Copyright (C) 2009 Cavium Networks 6 * Copyright (C) 2009 Cavium Networks
7 */ 7 */
8 8
9#include <linux/capability.h> 9#include <linux/platform_device.h>
10#include <linux/dma-mapping.h> 10#include <linux/dma-mapping.h>
11#include <linux/init.h> 11#include <linux/etherdevice.h>
12#include <linux/module.h> 12#include <linux/capability.h>
13#include <linux/interrupt.h> 13#include <linux/interrupt.h>
14#include <linux/platform_device.h>
15#include <linux/netdevice.h> 14#include <linux/netdevice.h>
16#include <linux/etherdevice.h> 15#include <linux/spinlock.h>
17#include <linux/if.h>
18#include <linux/if_vlan.h> 16#include <linux/if_vlan.h>
17#include <linux/of_mdio.h>
18#include <linux/module.h>
19#include <linux/of_net.h>
20#include <linux/init.h>
19#include <linux/slab.h> 21#include <linux/slab.h>
20#include <linux/phy.h> 22#include <linux/phy.h>
21#include <linux/spinlock.h> 23#include <linux/io.h>
22 24
23#include <asm/octeon/octeon.h> 25#include <asm/octeon/octeon.h>
24#include <asm/octeon/cvmx-mixx-defs.h> 26#include <asm/octeon/cvmx-mixx-defs.h>
@@ -58,8 +60,56 @@ union mgmt_port_ring_entry {
58 } s; 60 } s;
59}; 61};
60 62
63#define MIX_ORING1 0x0
64#define MIX_ORING2 0x8
65#define MIX_IRING1 0x10
66#define MIX_IRING2 0x18
67#define MIX_CTL 0x20
68#define MIX_IRHWM 0x28
69#define MIX_IRCNT 0x30
70#define MIX_ORHWM 0x38
71#define MIX_ORCNT 0x40
72#define MIX_ISR 0x48
73#define MIX_INTENA 0x50
74#define MIX_REMCNT 0x58
75#define MIX_BIST 0x78
76
77#define AGL_GMX_PRT_CFG 0x10
78#define AGL_GMX_RX_FRM_CTL 0x18
79#define AGL_GMX_RX_FRM_MAX 0x30
80#define AGL_GMX_RX_JABBER 0x38
81#define AGL_GMX_RX_STATS_CTL 0x50
82
83#define AGL_GMX_RX_STATS_PKTS_DRP 0xb0
84#define AGL_GMX_RX_STATS_OCTS_DRP 0xb8
85#define AGL_GMX_RX_STATS_PKTS_BAD 0xc0
86
87#define AGL_GMX_RX_ADR_CTL 0x100
88#define AGL_GMX_RX_ADR_CAM_EN 0x108
89#define AGL_GMX_RX_ADR_CAM0 0x180
90#define AGL_GMX_RX_ADR_CAM1 0x188
91#define AGL_GMX_RX_ADR_CAM2 0x190
92#define AGL_GMX_RX_ADR_CAM3 0x198
93#define AGL_GMX_RX_ADR_CAM4 0x1a0
94#define AGL_GMX_RX_ADR_CAM5 0x1a8
95
96#define AGL_GMX_TX_STATS_CTL 0x268
97#define AGL_GMX_TX_CTL 0x270
98#define AGL_GMX_TX_STAT0 0x280
99#define AGL_GMX_TX_STAT1 0x288
100#define AGL_GMX_TX_STAT2 0x290
101#define AGL_GMX_TX_STAT3 0x298
102#define AGL_GMX_TX_STAT4 0x2a0
103#define AGL_GMX_TX_STAT5 0x2a8
104#define AGL_GMX_TX_STAT6 0x2b0
105#define AGL_GMX_TX_STAT7 0x2b8
106#define AGL_GMX_TX_STAT8 0x2c0
107#define AGL_GMX_TX_STAT9 0x2c8
108
61struct octeon_mgmt { 109struct octeon_mgmt {
62 struct net_device *netdev; 110 struct net_device *netdev;
111 u64 mix;
112 u64 agl;
63 int port; 113 int port;
64 int irq; 114 int irq;
65 u64 *tx_ring; 115 u64 *tx_ring;
@@ -85,31 +135,34 @@ struct octeon_mgmt {
85 struct napi_struct napi; 135 struct napi_struct napi;
86 struct tasklet_struct tx_clean_tasklet; 136 struct tasklet_struct tx_clean_tasklet;
87 struct phy_device *phydev; 137 struct phy_device *phydev;
138 struct device_node *phy_np;
139 resource_size_t mix_phys;
140 resource_size_t mix_size;
141 resource_size_t agl_phys;
142 resource_size_t agl_size;
88}; 143};
89 144
90static void octeon_mgmt_set_rx_irq(struct octeon_mgmt *p, int enable) 145static void octeon_mgmt_set_rx_irq(struct octeon_mgmt *p, int enable)
91{ 146{
92 int port = p->port;
93 union cvmx_mixx_intena mix_intena; 147 union cvmx_mixx_intena mix_intena;
94 unsigned long flags; 148 unsigned long flags;
95 149
96 spin_lock_irqsave(&p->lock, flags); 150 spin_lock_irqsave(&p->lock, flags);
97 mix_intena.u64 = cvmx_read_csr(CVMX_MIXX_INTENA(port)); 151 mix_intena.u64 = cvmx_read_csr(p->mix + MIX_INTENA);
98 mix_intena.s.ithena = enable ? 1 : 0; 152 mix_intena.s.ithena = enable ? 1 : 0;
99 cvmx_write_csr(CVMX_MIXX_INTENA(port), mix_intena.u64); 153 cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64);
100 spin_unlock_irqrestore(&p->lock, flags); 154 spin_unlock_irqrestore(&p->lock, flags);
101} 155}
102 156
103static void octeon_mgmt_set_tx_irq(struct octeon_mgmt *p, int enable) 157static void octeon_mgmt_set_tx_irq(struct octeon_mgmt *p, int enable)
104{ 158{
105 int port = p->port;
106 union cvmx_mixx_intena mix_intena; 159 union cvmx_mixx_intena mix_intena;
107 unsigned long flags; 160 unsigned long flags;
108 161
109 spin_lock_irqsave(&p->lock, flags); 162 spin_lock_irqsave(&p->lock, flags);
110 mix_intena.u64 = cvmx_read_csr(CVMX_MIXX_INTENA(port)); 163 mix_intena.u64 = cvmx_read_csr(p->mix + MIX_INTENA);
111 mix_intena.s.othena = enable ? 1 : 0; 164 mix_intena.s.othena = enable ? 1 : 0;
112 cvmx_write_csr(CVMX_MIXX_INTENA(port), mix_intena.u64); 165 cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64);
113 spin_unlock_irqrestore(&p->lock, flags); 166 spin_unlock_irqrestore(&p->lock, flags);
114} 167}
115 168
@@ -146,7 +199,6 @@ static unsigned int ring_size_to_bytes(unsigned int ring_size)
146static void octeon_mgmt_rx_fill_ring(struct net_device *netdev) 199static void octeon_mgmt_rx_fill_ring(struct net_device *netdev)
147{ 200{
148 struct octeon_mgmt *p = netdev_priv(netdev); 201 struct octeon_mgmt *p = netdev_priv(netdev);
149 int port = p->port;
150 202
151 while (p->rx_current_fill < ring_max_fill(OCTEON_MGMT_RX_RING_SIZE)) { 203 while (p->rx_current_fill < ring_max_fill(OCTEON_MGMT_RX_RING_SIZE)) {
152 unsigned int size; 204 unsigned int size;
@@ -177,24 +229,23 @@ static void octeon_mgmt_rx_fill_ring(struct net_device *netdev)
177 (p->rx_next_fill + 1) % OCTEON_MGMT_RX_RING_SIZE; 229 (p->rx_next_fill + 1) % OCTEON_MGMT_RX_RING_SIZE;
178 p->rx_current_fill++; 230 p->rx_current_fill++;
179 /* Ring the bell. */ 231 /* Ring the bell. */
180 cvmx_write_csr(CVMX_MIXX_IRING2(port), 1); 232 cvmx_write_csr(p->mix + MIX_IRING2, 1);
181 } 233 }
182} 234}
183 235
184static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt *p) 236static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt *p)
185{ 237{
186 int port = p->port;
187 union cvmx_mixx_orcnt mix_orcnt; 238 union cvmx_mixx_orcnt mix_orcnt;
188 union mgmt_port_ring_entry re; 239 union mgmt_port_ring_entry re;
189 struct sk_buff *skb; 240 struct sk_buff *skb;
190 int cleaned = 0; 241 int cleaned = 0;
191 unsigned long flags; 242 unsigned long flags;
192 243
193 mix_orcnt.u64 = cvmx_read_csr(CVMX_MIXX_ORCNT(port)); 244 mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT);
194 while (mix_orcnt.s.orcnt) { 245 while (mix_orcnt.s.orcnt) {
195 spin_lock_irqsave(&p->tx_list.lock, flags); 246 spin_lock_irqsave(&p->tx_list.lock, flags);
196 247
197 mix_orcnt.u64 = cvmx_read_csr(CVMX_MIXX_ORCNT(port)); 248 mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT);
198 249
199 if (mix_orcnt.s.orcnt == 0) { 250 if (mix_orcnt.s.orcnt == 0) {
200 spin_unlock_irqrestore(&p->tx_list.lock, flags); 251 spin_unlock_irqrestore(&p->tx_list.lock, flags);
@@ -214,7 +265,7 @@ static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt *p)
214 mix_orcnt.s.orcnt = 1; 265 mix_orcnt.s.orcnt = 1;
215 266
216 /* Acknowledge to hardware that we have the buffer. */ 267 /* Acknowledge to hardware that we have the buffer. */
217 cvmx_write_csr(CVMX_MIXX_ORCNT(port), mix_orcnt.u64); 268 cvmx_write_csr(p->mix + MIX_ORCNT, mix_orcnt.u64);
218 p->tx_current_fill--; 269 p->tx_current_fill--;
219 270
220 spin_unlock_irqrestore(&p->tx_list.lock, flags); 271 spin_unlock_irqrestore(&p->tx_list.lock, flags);
@@ -224,7 +275,7 @@ static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt *p)
224 dev_kfree_skb_any(skb); 275 dev_kfree_skb_any(skb);
225 cleaned++; 276 cleaned++;
226 277
227 mix_orcnt.u64 = cvmx_read_csr(CVMX_MIXX_ORCNT(port)); 278 mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT);
228 } 279 }
229 280
230 if (cleaned && netif_queue_stopped(p->netdev)) 281 if (cleaned && netif_queue_stopped(p->netdev))
@@ -241,13 +292,12 @@ static void octeon_mgmt_clean_tx_tasklet(unsigned long arg)
241static void octeon_mgmt_update_rx_stats(struct net_device *netdev) 292static void octeon_mgmt_update_rx_stats(struct net_device *netdev)
242{ 293{
243 struct octeon_mgmt *p = netdev_priv(netdev); 294 struct octeon_mgmt *p = netdev_priv(netdev);
244 int port = p->port;
245 unsigned long flags; 295 unsigned long flags;
246 u64 drop, bad; 296 u64 drop, bad;
247 297
248 /* These reads also clear the count registers. */ 298 /* These reads also clear the count registers. */
249 drop = cvmx_read_csr(CVMX_AGL_GMX_RXX_STATS_PKTS_DRP(port)); 299 drop = cvmx_read_csr(p->agl + AGL_GMX_RX_STATS_PKTS_DRP);
250 bad = cvmx_read_csr(CVMX_AGL_GMX_RXX_STATS_PKTS_BAD(port)); 300 bad = cvmx_read_csr(p->agl + AGL_GMX_RX_STATS_PKTS_BAD);
251 301
252 if (drop || bad) { 302 if (drop || bad) {
253 /* Do an atomic update. */ 303 /* Do an atomic update. */
@@ -261,15 +311,14 @@ static void octeon_mgmt_update_rx_stats(struct net_device *netdev)
261static void octeon_mgmt_update_tx_stats(struct net_device *netdev) 311static void octeon_mgmt_update_tx_stats(struct net_device *netdev)
262{ 312{
263 struct octeon_mgmt *p = netdev_priv(netdev); 313 struct octeon_mgmt *p = netdev_priv(netdev);
264 int port = p->port;
265 unsigned long flags; 314 unsigned long flags;
266 315
267 union cvmx_agl_gmx_txx_stat0 s0; 316 union cvmx_agl_gmx_txx_stat0 s0;
268 union cvmx_agl_gmx_txx_stat1 s1; 317 union cvmx_agl_gmx_txx_stat1 s1;
269 318
270 /* These reads also clear the count registers. */ 319 /* These reads also clear the count registers. */
271 s0.u64 = cvmx_read_csr(CVMX_AGL_GMX_TXX_STAT0(port)); 320 s0.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_STAT0);
272 s1.u64 = cvmx_read_csr(CVMX_AGL_GMX_TXX_STAT1(port)); 321 s1.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_STAT1);
273 322
274 if (s0.s.xsdef || s0.s.xscol || s1.s.scol || s1.s.mcol) { 323 if (s0.s.xsdef || s0.s.xscol || s1.s.scol || s1.s.mcol) {
275 /* Do an atomic update. */ 324 /* Do an atomic update. */
@@ -308,7 +357,6 @@ static u64 octeon_mgmt_dequeue_rx_buffer(struct octeon_mgmt *p,
308 357
309static int octeon_mgmt_receive_one(struct octeon_mgmt *p) 358static int octeon_mgmt_receive_one(struct octeon_mgmt *p)
310{ 359{
311 int port = p->port;
312 struct net_device *netdev = p->netdev; 360 struct net_device *netdev = p->netdev;
313 union cvmx_mixx_ircnt mix_ircnt; 361 union cvmx_mixx_ircnt mix_ircnt;
314 union mgmt_port_ring_entry re; 362 union mgmt_port_ring_entry re;
@@ -381,18 +429,17 @@ done:
381 /* Tell the hardware we processed a packet. */ 429 /* Tell the hardware we processed a packet. */
382 mix_ircnt.u64 = 0; 430 mix_ircnt.u64 = 0;
383 mix_ircnt.s.ircnt = 1; 431 mix_ircnt.s.ircnt = 1;
384 cvmx_write_csr(CVMX_MIXX_IRCNT(port), mix_ircnt.u64); 432 cvmx_write_csr(p->mix + MIX_IRCNT, mix_ircnt.u64);
385 return rc; 433 return rc;
386} 434}
387 435
388static int octeon_mgmt_receive_packets(struct octeon_mgmt *p, int budget) 436static int octeon_mgmt_receive_packets(struct octeon_mgmt *p, int budget)
389{ 437{
390 int port = p->port;
391 unsigned int work_done = 0; 438 unsigned int work_done = 0;
392 union cvmx_mixx_ircnt mix_ircnt; 439 union cvmx_mixx_ircnt mix_ircnt;
393 int rc; 440 int rc;
394 441
395 mix_ircnt.u64 = cvmx_read_csr(CVMX_MIXX_IRCNT(port)); 442 mix_ircnt.u64 = cvmx_read_csr(p->mix + MIX_IRCNT);
396 while (work_done < budget && mix_ircnt.s.ircnt) { 443 while (work_done < budget && mix_ircnt.s.ircnt) {
397 444
398 rc = octeon_mgmt_receive_one(p); 445 rc = octeon_mgmt_receive_one(p);
@@ -400,7 +447,7 @@ static int octeon_mgmt_receive_packets(struct octeon_mgmt *p, int budget)
400 work_done++; 447 work_done++;
401 448
402 /* Check for more packets. */ 449 /* Check for more packets. */
403 mix_ircnt.u64 = cvmx_read_csr(CVMX_MIXX_IRCNT(port)); 450 mix_ircnt.u64 = cvmx_read_csr(p->mix + MIX_IRCNT);
404 } 451 }
405 452
406 octeon_mgmt_rx_fill_ring(p->netdev); 453 octeon_mgmt_rx_fill_ring(p->netdev);
@@ -434,16 +481,16 @@ static void octeon_mgmt_reset_hw(struct octeon_mgmt *p)
434 union cvmx_agl_gmx_bist agl_gmx_bist; 481 union cvmx_agl_gmx_bist agl_gmx_bist;
435 482
436 mix_ctl.u64 = 0; 483 mix_ctl.u64 = 0;
437 cvmx_write_csr(CVMX_MIXX_CTL(p->port), mix_ctl.u64); 484 cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);
438 do { 485 do {
439 mix_ctl.u64 = cvmx_read_csr(CVMX_MIXX_CTL(p->port)); 486 mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL);
440 } while (mix_ctl.s.busy); 487 } while (mix_ctl.s.busy);
441 mix_ctl.s.reset = 1; 488 mix_ctl.s.reset = 1;
442 cvmx_write_csr(CVMX_MIXX_CTL(p->port), mix_ctl.u64); 489 cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);
443 cvmx_read_csr(CVMX_MIXX_CTL(p->port)); 490 cvmx_read_csr(p->mix + MIX_CTL);
444 cvmx_wait(64); 491 cvmx_wait(64);
445 492
446 mix_bist.u64 = cvmx_read_csr(CVMX_MIXX_BIST(p->port)); 493 mix_bist.u64 = cvmx_read_csr(p->mix + MIX_BIST);
447 if (mix_bist.u64) 494 if (mix_bist.u64)
448 dev_warn(p->dev, "MIX failed BIST (0x%016llx)\n", 495 dev_warn(p->dev, "MIX failed BIST (0x%016llx)\n",
449 (unsigned long long)mix_bist.u64); 496 (unsigned long long)mix_bist.u64);
@@ -474,7 +521,6 @@ static void octeon_mgmt_cam_state_add(struct octeon_mgmt_cam_state *cs,
474static void octeon_mgmt_set_rx_filtering(struct net_device *netdev) 521static void octeon_mgmt_set_rx_filtering(struct net_device *netdev)
475{ 522{
476 struct octeon_mgmt *p = netdev_priv(netdev); 523 struct octeon_mgmt *p = netdev_priv(netdev);
477 int port = p->port;
478 union cvmx_agl_gmx_rxx_adr_ctl adr_ctl; 524 union cvmx_agl_gmx_rxx_adr_ctl adr_ctl;
479 union cvmx_agl_gmx_prtx_cfg agl_gmx_prtx; 525 union cvmx_agl_gmx_prtx_cfg agl_gmx_prtx;
480 unsigned long flags; 526 unsigned long flags;
@@ -520,29 +566,29 @@ static void octeon_mgmt_set_rx_filtering(struct net_device *netdev)
520 spin_lock_irqsave(&p->lock, flags); 566 spin_lock_irqsave(&p->lock, flags);
521 567
522 /* Disable packet I/O. */ 568 /* Disable packet I/O. */
523 agl_gmx_prtx.u64 = cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port)); 569 agl_gmx_prtx.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
524 prev_packet_enable = agl_gmx_prtx.s.en; 570 prev_packet_enable = agl_gmx_prtx.s.en;
525 agl_gmx_prtx.s.en = 0; 571 agl_gmx_prtx.s.en = 0;
526 cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), agl_gmx_prtx.u64); 572 cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, agl_gmx_prtx.u64);
527 573
528 adr_ctl.u64 = 0; 574 adr_ctl.u64 = 0;
529 adr_ctl.s.cam_mode = cam_mode; 575 adr_ctl.s.cam_mode = cam_mode;
530 adr_ctl.s.mcst = multicast_mode; 576 adr_ctl.s.mcst = multicast_mode;
531 adr_ctl.s.bcst = 1; /* Allow broadcast */ 577 adr_ctl.s.bcst = 1; /* Allow broadcast */
532 578
533 cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CTL(port), adr_ctl.u64); 579 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CTL, adr_ctl.u64);
534 580
535 cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM0(port), cam_state.cam[0]); 581 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM0, cam_state.cam[0]);
536 cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM1(port), cam_state.cam[1]); 582 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM1, cam_state.cam[1]);
537 cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM2(port), cam_state.cam[2]); 583 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM2, cam_state.cam[2]);
538 cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM3(port), cam_state.cam[3]); 584 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM3, cam_state.cam[3]);
539 cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM4(port), cam_state.cam[4]); 585 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM4, cam_state.cam[4]);
540 cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM5(port), cam_state.cam[5]); 586 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM5, cam_state.cam[5]);
541 cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM_EN(port), cam_state.cam_mask); 587 cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM_EN, cam_state.cam_mask);
542 588
543 /* Restore packet I/O. */ 589 /* Restore packet I/O. */
544 agl_gmx_prtx.s.en = prev_packet_enable; 590 agl_gmx_prtx.s.en = prev_packet_enable;
545 cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), agl_gmx_prtx.u64); 591 cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, agl_gmx_prtx.u64);
546 592
547 spin_unlock_irqrestore(&p->lock, flags); 593 spin_unlock_irqrestore(&p->lock, flags);
548} 594}
@@ -564,7 +610,6 @@ static int octeon_mgmt_set_mac_address(struct net_device *netdev, void *addr)
564static int octeon_mgmt_change_mtu(struct net_device *netdev, int new_mtu) 610static int octeon_mgmt_change_mtu(struct net_device *netdev, int new_mtu)
565{ 611{
566 struct octeon_mgmt *p = netdev_priv(netdev); 612 struct octeon_mgmt *p = netdev_priv(netdev);
567 int port = p->port;
568 int size_without_fcs = new_mtu + OCTEON_MGMT_RX_HEADROOM; 613 int size_without_fcs = new_mtu + OCTEON_MGMT_RX_HEADROOM;
569 614
570 /* 615 /*
@@ -580,8 +625,8 @@ static int octeon_mgmt_change_mtu(struct net_device *netdev, int new_mtu)
580 625
581 netdev->mtu = new_mtu; 626 netdev->mtu = new_mtu;
582 627
583 cvmx_write_csr(CVMX_AGL_GMX_RXX_FRM_MAX(port), size_without_fcs); 628 cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_MAX, size_without_fcs);
584 cvmx_write_csr(CVMX_AGL_GMX_RXX_JABBER(port), 629 cvmx_write_csr(p->agl + AGL_GMX_RX_JABBER,
585 (size_without_fcs + 7) & 0xfff8); 630 (size_without_fcs + 7) & 0xfff8);
586 631
587 return 0; 632 return 0;
@@ -591,14 +636,13 @@ static irqreturn_t octeon_mgmt_interrupt(int cpl, void *dev_id)
591{ 636{
592 struct net_device *netdev = dev_id; 637 struct net_device *netdev = dev_id;
593 struct octeon_mgmt *p = netdev_priv(netdev); 638 struct octeon_mgmt *p = netdev_priv(netdev);
594 int port = p->port;
595 union cvmx_mixx_isr mixx_isr; 639 union cvmx_mixx_isr mixx_isr;
596 640
597 mixx_isr.u64 = cvmx_read_csr(CVMX_MIXX_ISR(port)); 641 mixx_isr.u64 = cvmx_read_csr(p->mix + MIX_ISR);
598 642
599 /* Clear any pending interrupts */ 643 /* Clear any pending interrupts */
600 cvmx_write_csr(CVMX_MIXX_ISR(port), mixx_isr.u64); 644 cvmx_write_csr(p->mix + MIX_ISR, mixx_isr.u64);
601 cvmx_read_csr(CVMX_MIXX_ISR(port)); 645 cvmx_read_csr(p->mix + MIX_ISR);
602 646
603 if (mixx_isr.s.irthresh) { 647 if (mixx_isr.s.irthresh) {
604 octeon_mgmt_disable_rx_irq(p); 648 octeon_mgmt_disable_rx_irq(p);
@@ -629,7 +673,6 @@ static int octeon_mgmt_ioctl(struct net_device *netdev,
629static void octeon_mgmt_adjust_link(struct net_device *netdev) 673static void octeon_mgmt_adjust_link(struct net_device *netdev)
630{ 674{
631 struct octeon_mgmt *p = netdev_priv(netdev); 675 struct octeon_mgmt *p = netdev_priv(netdev);
632 int port = p->port;
633 union cvmx_agl_gmx_prtx_cfg prtx_cfg; 676 union cvmx_agl_gmx_prtx_cfg prtx_cfg;
634 unsigned long flags; 677 unsigned long flags;
635 int link_changed = 0; 678 int link_changed = 0;
@@ -640,11 +683,9 @@ static void octeon_mgmt_adjust_link(struct net_device *netdev)
640 link_changed = 1; 683 link_changed = 1;
641 if (p->last_duplex != p->phydev->duplex) { 684 if (p->last_duplex != p->phydev->duplex) {
642 p->last_duplex = p->phydev->duplex; 685 p->last_duplex = p->phydev->duplex;
643 prtx_cfg.u64 = 686 prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
644 cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port));
645 prtx_cfg.s.duplex = p->phydev->duplex; 687 prtx_cfg.s.duplex = p->phydev->duplex;
646 cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), 688 cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64);
647 prtx_cfg.u64);
648 } 689 }
649 } else { 690 } else {
650 if (p->last_link) 691 if (p->last_link)
@@ -670,18 +711,16 @@ static void octeon_mgmt_adjust_link(struct net_device *netdev)
670static int octeon_mgmt_init_phy(struct net_device *netdev) 711static int octeon_mgmt_init_phy(struct net_device *netdev)
671{ 712{
672 struct octeon_mgmt *p = netdev_priv(netdev); 713 struct octeon_mgmt *p = netdev_priv(netdev);
673 char phy_id[MII_BUS_ID_SIZE + 3];
674 714
675 if (octeon_is_simulation()) { 715 if (octeon_is_simulation() || p->phy_np == NULL) {
676 /* No PHYs in the simulator. */ 716 /* No PHYs in the simulator. */
677 netif_carrier_on(netdev); 717 netif_carrier_on(netdev);
678 return 0; 718 return 0;
679 } 719 }
680 720
681 snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, "mdio-octeon-0", p->port); 721 p->phydev = of_phy_connect(netdev, p->phy_np,
682 722 octeon_mgmt_adjust_link, 0,
683 p->phydev = phy_connect(netdev, phy_id, octeon_mgmt_adjust_link, 0, 723 PHY_INTERFACE_MODE_MII);
684 PHY_INTERFACE_MODE_MII);
685 724
686 if (IS_ERR(p->phydev)) { 725 if (IS_ERR(p->phydev)) {
687 p->phydev = NULL; 726 p->phydev = NULL;
@@ -737,14 +776,14 @@ static int octeon_mgmt_open(struct net_device *netdev)
737 776
738 octeon_mgmt_reset_hw(p); 777 octeon_mgmt_reset_hw(p);
739 778
740 mix_ctl.u64 = cvmx_read_csr(CVMX_MIXX_CTL(port)); 779 mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL);
741 780
742 /* Bring it out of reset if needed. */ 781 /* Bring it out of reset if needed. */
743 if (mix_ctl.s.reset) { 782 if (mix_ctl.s.reset) {
744 mix_ctl.s.reset = 0; 783 mix_ctl.s.reset = 0;
745 cvmx_write_csr(CVMX_MIXX_CTL(port), mix_ctl.u64); 784 cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);
746 do { 785 do {
747 mix_ctl.u64 = cvmx_read_csr(CVMX_MIXX_CTL(port)); 786 mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL);
748 } while (mix_ctl.s.reset); 787 } while (mix_ctl.s.reset);
749 } 788 }
750 789
@@ -755,17 +794,17 @@ static int octeon_mgmt_open(struct net_device *netdev)
755 oring1.u64 = 0; 794 oring1.u64 = 0;
756 oring1.s.obase = p->tx_ring_handle >> 3; 795 oring1.s.obase = p->tx_ring_handle >> 3;
757 oring1.s.osize = OCTEON_MGMT_TX_RING_SIZE; 796 oring1.s.osize = OCTEON_MGMT_TX_RING_SIZE;
758 cvmx_write_csr(CVMX_MIXX_ORING1(port), oring1.u64); 797 cvmx_write_csr(p->mix + MIX_ORING1, oring1.u64);
759 798
760 iring1.u64 = 0; 799 iring1.u64 = 0;
761 iring1.s.ibase = p->rx_ring_handle >> 3; 800 iring1.s.ibase = p->rx_ring_handle >> 3;
762 iring1.s.isize = OCTEON_MGMT_RX_RING_SIZE; 801 iring1.s.isize = OCTEON_MGMT_RX_RING_SIZE;
763 cvmx_write_csr(CVMX_MIXX_IRING1(port), iring1.u64); 802 cvmx_write_csr(p->mix + MIX_IRING1, iring1.u64);
764 803
765 /* Disable packet I/O. */ 804 /* Disable packet I/O. */
766 prtx_cfg.u64 = cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port)); 805 prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
767 prtx_cfg.s.en = 0; 806 prtx_cfg.s.en = 0;
768 cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), prtx_cfg.u64); 807 cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64);
769 808
770 memcpy(sa.sa_data, netdev->dev_addr, ETH_ALEN); 809 memcpy(sa.sa_data, netdev->dev_addr, ETH_ALEN);
771 octeon_mgmt_set_mac_address(netdev, &sa); 810 octeon_mgmt_set_mac_address(netdev, &sa);
@@ -782,7 +821,7 @@ static int octeon_mgmt_open(struct net_device *netdev)
782 mix_ctl.s.nbtarb = 0; /* Arbitration mode */ 821 mix_ctl.s.nbtarb = 0; /* Arbitration mode */
783 /* MII CB-request FIFO programmable high watermark */ 822 /* MII CB-request FIFO programmable high watermark */
784 mix_ctl.s.mrq_hwm = 1; 823 mix_ctl.s.mrq_hwm = 1;
785 cvmx_write_csr(CVMX_MIXX_CTL(port), mix_ctl.u64); 824 cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);
786 825
787 if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X) 826 if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X)
788 || OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) { 827 || OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) {
@@ -809,16 +848,16 @@ static int octeon_mgmt_open(struct net_device *netdev)
809 848
810 /* Clear statistics. */ 849 /* Clear statistics. */
811 /* Clear on read. */ 850 /* Clear on read. */
812 cvmx_write_csr(CVMX_AGL_GMX_RXX_STATS_CTL(port), 1); 851 cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_CTL, 1);
813 cvmx_write_csr(CVMX_AGL_GMX_RXX_STATS_PKTS_DRP(port), 0); 852 cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_PKTS_DRP, 0);
814 cvmx_write_csr(CVMX_AGL_GMX_RXX_STATS_PKTS_BAD(port), 0); 853 cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_PKTS_BAD, 0);
815 854
816 cvmx_write_csr(CVMX_AGL_GMX_TXX_STATS_CTL(port), 1); 855 cvmx_write_csr(p->agl + AGL_GMX_TX_STATS_CTL, 1);
817 cvmx_write_csr(CVMX_AGL_GMX_TXX_STAT0(port), 0); 856 cvmx_write_csr(p->agl + AGL_GMX_TX_STAT0, 0);
818 cvmx_write_csr(CVMX_AGL_GMX_TXX_STAT1(port), 0); 857 cvmx_write_csr(p->agl + AGL_GMX_TX_STAT1, 0);
819 858
820 /* Clear any pending interrupts */ 859 /* Clear any pending interrupts */
821 cvmx_write_csr(CVMX_MIXX_ISR(port), cvmx_read_csr(CVMX_MIXX_ISR(port))); 860 cvmx_write_csr(p->mix + MIX_ISR, cvmx_read_csr(p->mix + MIX_ISR));
822 861
823 if (request_irq(p->irq, octeon_mgmt_interrupt, 0, netdev->name, 862 if (request_irq(p->irq, octeon_mgmt_interrupt, 0, netdev->name,
824 netdev)) { 863 netdev)) {
@@ -829,18 +868,18 @@ static int octeon_mgmt_open(struct net_device *netdev)
829 /* Interrupt every single RX packet */ 868 /* Interrupt every single RX packet */
830 mix_irhwm.u64 = 0; 869 mix_irhwm.u64 = 0;
831 mix_irhwm.s.irhwm = 0; 870 mix_irhwm.s.irhwm = 0;
832 cvmx_write_csr(CVMX_MIXX_IRHWM(port), mix_irhwm.u64); 871 cvmx_write_csr(p->mix + MIX_IRHWM, mix_irhwm.u64);
833 872
834 /* Interrupt when we have 1 or more packets to clean. */ 873 /* Interrupt when we have 1 or more packets to clean. */
835 mix_orhwm.u64 = 0; 874 mix_orhwm.u64 = 0;
836 mix_orhwm.s.orhwm = 1; 875 mix_orhwm.s.orhwm = 1;
837 cvmx_write_csr(CVMX_MIXX_ORHWM(port), mix_orhwm.u64); 876 cvmx_write_csr(p->mix + MIX_ORHWM, mix_orhwm.u64);
838 877
839 /* Enable receive and transmit interrupts */ 878 /* Enable receive and transmit interrupts */
840 mix_intena.u64 = 0; 879 mix_intena.u64 = 0;
841 mix_intena.s.ithena = 1; 880 mix_intena.s.ithena = 1;
842 mix_intena.s.othena = 1; 881 mix_intena.s.othena = 1;
843 cvmx_write_csr(CVMX_MIXX_INTENA(port), mix_intena.u64); 882 cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64);
844 883
845 884
846 /* Enable packet I/O. */ 885 /* Enable packet I/O. */
@@ -871,7 +910,7 @@ static int octeon_mgmt_open(struct net_device *netdev)
871 * frame. GMX checks that the PREAMBLE is sent correctly. 910 * frame. GMX checks that the PREAMBLE is sent correctly.
872 */ 911 */
873 rxx_frm_ctl.s.pre_chk = 1; 912 rxx_frm_ctl.s.pre_chk = 1;
874 cvmx_write_csr(CVMX_AGL_GMX_RXX_FRM_CTL(port), rxx_frm_ctl.u64); 913 cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64);
875 914
876 /* Enable the AGL block */ 915 /* Enable the AGL block */
877 agl_gmx_inf_mode.u64 = 0; 916 agl_gmx_inf_mode.u64 = 0;
@@ -879,13 +918,13 @@ static int octeon_mgmt_open(struct net_device *netdev)
879 cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64); 918 cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64);
880 919
881 /* Configure the port duplex and enables */ 920 /* Configure the port duplex and enables */
882 prtx_cfg.u64 = cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port)); 921 prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
883 prtx_cfg.s.tx_en = 1; 922 prtx_cfg.s.tx_en = 1;
884 prtx_cfg.s.rx_en = 1; 923 prtx_cfg.s.rx_en = 1;
885 prtx_cfg.s.en = 1; 924 prtx_cfg.s.en = 1;
886 p->last_duplex = 1; 925 p->last_duplex = 1;
887 prtx_cfg.s.duplex = p->last_duplex; 926 prtx_cfg.s.duplex = p->last_duplex;
888 cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), prtx_cfg.u64); 927 cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64);
889 928
890 p->last_link = 0; 929 p->last_link = 0;
891 netif_carrier_off(netdev); 930 netif_carrier_off(netdev);
@@ -949,7 +988,6 @@ static int octeon_mgmt_stop(struct net_device *netdev)
949static int octeon_mgmt_xmit(struct sk_buff *skb, struct net_device *netdev) 988static int octeon_mgmt_xmit(struct sk_buff *skb, struct net_device *netdev)
950{ 989{
951 struct octeon_mgmt *p = netdev_priv(netdev); 990 struct octeon_mgmt *p = netdev_priv(netdev);
952 int port = p->port;
953 union mgmt_port_ring_entry re; 991 union mgmt_port_ring_entry re;
954 unsigned long flags; 992 unsigned long flags;
955 int rv = NETDEV_TX_BUSY; 993 int rv = NETDEV_TX_BUSY;
@@ -993,7 +1031,7 @@ static int octeon_mgmt_xmit(struct sk_buff *skb, struct net_device *netdev)
993 netdev->stats.tx_bytes += skb->len; 1031 netdev->stats.tx_bytes += skb->len;
994 1032
995 /* Ring the bell. */ 1033 /* Ring the bell. */
996 cvmx_write_csr(CVMX_MIXX_ORING2(port), 1); 1034 cvmx_write_csr(p->mix + MIX_ORING2, 1);
997 1035
998 rv = NETDEV_TX_OK; 1036 rv = NETDEV_TX_OK;
999out: 1037out:
@@ -1071,10 +1109,14 @@ static const struct net_device_ops octeon_mgmt_ops = {
1071 1109
1072static int __devinit octeon_mgmt_probe(struct platform_device *pdev) 1110static int __devinit octeon_mgmt_probe(struct platform_device *pdev)
1073{ 1111{
1074 struct resource *res_irq;
1075 struct net_device *netdev; 1112 struct net_device *netdev;
1076 struct octeon_mgmt *p; 1113 struct octeon_mgmt *p;
1077 int i; 1114 const __be32 *data;
1115 const u8 *mac;
1116 struct resource *res_mix;
1117 struct resource *res_agl;
1118 int len;
1119 int result;
1078 1120
1079 netdev = alloc_etherdev(sizeof(struct octeon_mgmt)); 1121 netdev = alloc_etherdev(sizeof(struct octeon_mgmt));
1080 if (netdev == NULL) 1122 if (netdev == NULL)
@@ -1088,14 +1130,63 @@ static int __devinit octeon_mgmt_probe(struct platform_device *pdev)
1088 p->netdev = netdev; 1130 p->netdev = netdev;
1089 p->dev = &pdev->dev; 1131 p->dev = &pdev->dev;
1090 1132
1091 p->port = pdev->id; 1133 data = of_get_property(pdev->dev.of_node, "cell-index", &len);
1134 if (data && len == sizeof(*data)) {
1135 p->port = be32_to_cpup(data);
1136 } else {
1137 dev_err(&pdev->dev, "no 'cell-index' property\n");
1138 result = -ENXIO;
1139 goto err;
1140 }
1141
1092 snprintf(netdev->name, IFNAMSIZ, "mgmt%d", p->port); 1142 snprintf(netdev->name, IFNAMSIZ, "mgmt%d", p->port);
1093 1143
1094 res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 1144 result = platform_get_irq(pdev, 0);
1095 if (!res_irq) 1145 if (result < 0)
1146 goto err;
1147
1148 p->irq = result;
1149
1150 res_mix = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1151 if (res_mix == NULL) {
1152 dev_err(&pdev->dev, "no 'reg' resource\n");
1153 result = -ENXIO;
1154 goto err;
1155 }
1156
1157 res_agl = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1158 if (res_agl == NULL) {
1159 dev_err(&pdev->dev, "no 'reg' resource\n");
1160 result = -ENXIO;
1161 goto err;
1162 }
1163
1164 p->mix_phys = res_mix->start;
1165 p->mix_size = resource_size(res_mix);
1166 p->agl_phys = res_agl->start;
1167 p->agl_size = resource_size(res_agl);
1168
1169
1170 if (!devm_request_mem_region(&pdev->dev, p->mix_phys, p->mix_size,
1171 res_mix->name)) {
1172 dev_err(&pdev->dev, "request_mem_region (%s) failed\n",
1173 res_mix->name);
1174 result = -ENXIO;
1175 goto err;
1176 }
1177
1178 if (!devm_request_mem_region(&pdev->dev, p->agl_phys, p->agl_size,
1179 res_agl->name)) {
1180 result = -ENXIO;
1181 dev_err(&pdev->dev, "request_mem_region (%s) failed\n",
1182 res_agl->name);
1096 goto err; 1183 goto err;
1184 }
1185
1186
1187 p->mix = (u64)devm_ioremap(&pdev->dev, p->mix_phys, p->mix_size);
1188 p->agl = (u64)devm_ioremap(&pdev->dev, p->agl_phys, p->agl_size);
1097 1189
1098 p->irq = res_irq->start;
1099 spin_lock_init(&p->lock); 1190 spin_lock_init(&p->lock);
1100 1191
1101 skb_queue_head_init(&p->tx_list); 1192 skb_queue_head_init(&p->tx_list);
@@ -1108,24 +1199,26 @@ static int __devinit octeon_mgmt_probe(struct platform_device *pdev)
1108 netdev->netdev_ops = &octeon_mgmt_ops; 1199 netdev->netdev_ops = &octeon_mgmt_ops;
1109 netdev->ethtool_ops = &octeon_mgmt_ethtool_ops; 1200 netdev->ethtool_ops = &octeon_mgmt_ethtool_ops;
1110 1201
1111 /* The mgmt ports get the first N MACs. */ 1202 mac = of_get_mac_address(pdev->dev.of_node);
1112 for (i = 0; i < 6; i++) 1203
1113 netdev->dev_addr[i] = octeon_bootinfo->mac_addr_base[i]; 1204 if (mac)
1114 netdev->dev_addr[5] += p->port; 1205 memcpy(netdev->dev_addr, mac, 6);
1115 1206
1116 if (p->port >= octeon_bootinfo->mac_addr_count) 1207 p->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
1117 dev_err(&pdev->dev,
1118 "Error %s: Using MAC outside of the assigned range: %pM\n",
1119 netdev->name, netdev->dev_addr);
1120 1208
1121 if (register_netdev(netdev)) 1209 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(64);
1210 pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
1211
1212 result = register_netdev(netdev);
1213 if (result)
1122 goto err; 1214 goto err;
1123 1215
1124 dev_info(&pdev->dev, "Version " DRV_VERSION "\n"); 1216 dev_info(&pdev->dev, "Version " DRV_VERSION "\n");
1125 return 0; 1217 return 0;
1218
1126err: 1219err:
1127 free_netdev(netdev); 1220 free_netdev(netdev);
1128 return -ENOENT; 1221 return result;
1129} 1222}
1130 1223
1131static int __devexit octeon_mgmt_remove(struct platform_device *pdev) 1224static int __devexit octeon_mgmt_remove(struct platform_device *pdev)
@@ -1137,10 +1230,19 @@ static int __devexit octeon_mgmt_remove(struct platform_device *pdev)
1137 return 0; 1230 return 0;
1138} 1231}
1139 1232
1233static struct of_device_id octeon_mgmt_match[] = {
1234 {
1235 .compatible = "cavium,octeon-5750-mix",
1236 },
1237 {},
1238};
1239MODULE_DEVICE_TABLE(of, octeon_mgmt_match);
1240
1140static struct platform_driver octeon_mgmt_driver = { 1241static struct platform_driver octeon_mgmt_driver = {
1141 .driver = { 1242 .driver = {
1142 .name = "octeon_mgmt", 1243 .name = "octeon_mgmt",
1143 .owner = THIS_MODULE, 1244 .owner = THIS_MODULE,
1245 .of_match_table = octeon_mgmt_match,
1144 }, 1246 },
1145 .probe = octeon_mgmt_probe, 1247 .probe = octeon_mgmt_probe,
1146 .remove = __devexit_p(octeon_mgmt_remove), 1248 .remove = __devexit_p(octeon_mgmt_remove),