diff options
Diffstat (limited to 'drivers/net/octeon')
-rw-r--r-- | drivers/net/octeon/Kconfig | 10 | ||||
-rw-r--r-- | drivers/net/octeon/Makefile | 2 | ||||
-rw-r--r-- | drivers/net/octeon/octeon_mgmt.c | 1166 |
3 files changed, 1178 insertions, 0 deletions
diff --git a/drivers/net/octeon/Kconfig b/drivers/net/octeon/Kconfig new file mode 100644 index 00000000000..1e56bbf3f5c --- /dev/null +++ b/drivers/net/octeon/Kconfig | |||
@@ -0,0 +1,10 @@ | |||
1 | config OCTEON_MGMT_ETHERNET | ||
2 | tristate "Octeon Management port ethernet driver (CN5XXX, CN6XXX)" | ||
3 | depends on CPU_CAVIUM_OCTEON | ||
4 | select PHYLIB | ||
5 | select MDIO_OCTEON | ||
6 | default y | ||
7 | help | ||
8 | This option enables the ethernet driver for the management | ||
9 | port on Cavium Networks' Octeon CN57XX, CN56XX, CN55XX, | ||
10 | CN54XX, CN52XX, and CN6XXX chips. | ||
diff --git a/drivers/net/octeon/Makefile b/drivers/net/octeon/Makefile new file mode 100644 index 00000000000..906edecacfd --- /dev/null +++ b/drivers/net/octeon/Makefile | |||
@@ -0,0 +1,2 @@ | |||
1 | |||
2 | obj-$(CONFIG_OCTEON_MGMT_ETHERNET) += octeon_mgmt.o | ||
diff --git a/drivers/net/octeon/octeon_mgmt.c b/drivers/net/octeon/octeon_mgmt.c new file mode 100644 index 00000000000..429e08c84e9 --- /dev/null +++ b/drivers/net/octeon/octeon_mgmt.c | |||
@@ -0,0 +1,1166 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2009 Cavium Networks | ||
7 | */ | ||
8 | |||
9 | #include <linux/capability.h> | ||
10 | #include <linux/dma-mapping.h> | ||
11 | #include <linux/init.h> | ||
12 | #include <linux/interrupt.h> | ||
13 | #include <linux/platform_device.h> | ||
14 | #include <linux/netdevice.h> | ||
15 | #include <linux/etherdevice.h> | ||
16 | #include <linux/if_vlan.h> | ||
17 | #include <linux/slab.h> | ||
18 | #include <linux/phy.h> | ||
19 | #include <linux/spinlock.h> | ||
20 | |||
21 | #include <asm/octeon/octeon.h> | ||
22 | #include <asm/octeon/cvmx-mixx-defs.h> | ||
23 | #include <asm/octeon/cvmx-agl-defs.h> | ||
24 | |||
25 | #define DRV_NAME "octeon_mgmt" | ||
26 | #define DRV_VERSION "2.0" | ||
27 | #define DRV_DESCRIPTION \ | ||
28 | "Cavium Networks Octeon MII (management) port Network Driver" | ||
29 | |||
30 | #define OCTEON_MGMT_NAPI_WEIGHT 16 | ||
31 | |||
32 | /* | ||
33 | * Ring sizes that are powers of two allow for more efficient modulo | ||
34 | * opertions. | ||
35 | */ | ||
36 | #define OCTEON_MGMT_RX_RING_SIZE 512 | ||
37 | #define OCTEON_MGMT_TX_RING_SIZE 128 | ||
38 | |||
39 | /* Allow 8 bytes for vlan and FCS. */ | ||
40 | #define OCTEON_MGMT_RX_HEADROOM (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN) | ||
41 | |||
42 | union mgmt_port_ring_entry { | ||
43 | u64 d64; | ||
44 | struct { | ||
45 | u64 reserved_62_63:2; | ||
46 | /* Length of the buffer/packet in bytes */ | ||
47 | u64 len:14; | ||
48 | /* For TX, signals that the packet should be timestamped */ | ||
49 | u64 tstamp:1; | ||
50 | /* The RX error code */ | ||
51 | u64 code:7; | ||
52 | #define RING_ENTRY_CODE_DONE 0xf | ||
53 | #define RING_ENTRY_CODE_MORE 0x10 | ||
54 | /* Physical address of the buffer */ | ||
55 | u64 addr:40; | ||
56 | } s; | ||
57 | }; | ||
58 | |||
59 | struct octeon_mgmt { | ||
60 | struct net_device *netdev; | ||
61 | int port; | ||
62 | int irq; | ||
63 | u64 *tx_ring; | ||
64 | dma_addr_t tx_ring_handle; | ||
65 | unsigned int tx_next; | ||
66 | unsigned int tx_next_clean; | ||
67 | unsigned int tx_current_fill; | ||
68 | /* The tx_list lock also protects the ring related variables */ | ||
69 | struct sk_buff_head tx_list; | ||
70 | |||
71 | /* RX variables only touched in napi_poll. No locking necessary. */ | ||
72 | u64 *rx_ring; | ||
73 | dma_addr_t rx_ring_handle; | ||
74 | unsigned int rx_next; | ||
75 | unsigned int rx_next_fill; | ||
76 | unsigned int rx_current_fill; | ||
77 | struct sk_buff_head rx_list; | ||
78 | |||
79 | spinlock_t lock; | ||
80 | unsigned int last_duplex; | ||
81 | unsigned int last_link; | ||
82 | struct device *dev; | ||
83 | struct napi_struct napi; | ||
84 | struct tasklet_struct tx_clean_tasklet; | ||
85 | struct phy_device *phydev; | ||
86 | }; | ||
87 | |||
88 | static void octeon_mgmt_set_rx_irq(struct octeon_mgmt *p, int enable) | ||
89 | { | ||
90 | int port = p->port; | ||
91 | union cvmx_mixx_intena mix_intena; | ||
92 | unsigned long flags; | ||
93 | |||
94 | spin_lock_irqsave(&p->lock, flags); | ||
95 | mix_intena.u64 = cvmx_read_csr(CVMX_MIXX_INTENA(port)); | ||
96 | mix_intena.s.ithena = enable ? 1 : 0; | ||
97 | cvmx_write_csr(CVMX_MIXX_INTENA(port), mix_intena.u64); | ||
98 | spin_unlock_irqrestore(&p->lock, flags); | ||
99 | } | ||
100 | |||
101 | static void octeon_mgmt_set_tx_irq(struct octeon_mgmt *p, int enable) | ||
102 | { | ||
103 | int port = p->port; | ||
104 | union cvmx_mixx_intena mix_intena; | ||
105 | unsigned long flags; | ||
106 | |||
107 | spin_lock_irqsave(&p->lock, flags); | ||
108 | mix_intena.u64 = cvmx_read_csr(CVMX_MIXX_INTENA(port)); | ||
109 | mix_intena.s.othena = enable ? 1 : 0; | ||
110 | cvmx_write_csr(CVMX_MIXX_INTENA(port), mix_intena.u64); | ||
111 | spin_unlock_irqrestore(&p->lock, flags); | ||
112 | } | ||
113 | |||
114 | static inline void octeon_mgmt_enable_rx_irq(struct octeon_mgmt *p) | ||
115 | { | ||
116 | octeon_mgmt_set_rx_irq(p, 1); | ||
117 | } | ||
118 | |||
119 | static inline void octeon_mgmt_disable_rx_irq(struct octeon_mgmt *p) | ||
120 | { | ||
121 | octeon_mgmt_set_rx_irq(p, 0); | ||
122 | } | ||
123 | |||
124 | static inline void octeon_mgmt_enable_tx_irq(struct octeon_mgmt *p) | ||
125 | { | ||
126 | octeon_mgmt_set_tx_irq(p, 1); | ||
127 | } | ||
128 | |||
129 | static inline void octeon_mgmt_disable_tx_irq(struct octeon_mgmt *p) | ||
130 | { | ||
131 | octeon_mgmt_set_tx_irq(p, 0); | ||
132 | } | ||
133 | |||
134 | static unsigned int ring_max_fill(unsigned int ring_size) | ||
135 | { | ||
136 | return ring_size - 8; | ||
137 | } | ||
138 | |||
139 | static unsigned int ring_size_to_bytes(unsigned int ring_size) | ||
140 | { | ||
141 | return ring_size * sizeof(union mgmt_port_ring_entry); | ||
142 | } | ||
143 | |||
144 | static void octeon_mgmt_rx_fill_ring(struct net_device *netdev) | ||
145 | { | ||
146 | struct octeon_mgmt *p = netdev_priv(netdev); | ||
147 | int port = p->port; | ||
148 | |||
149 | while (p->rx_current_fill < ring_max_fill(OCTEON_MGMT_RX_RING_SIZE)) { | ||
150 | unsigned int size; | ||
151 | union mgmt_port_ring_entry re; | ||
152 | struct sk_buff *skb; | ||
153 | |||
154 | /* CN56XX pass 1 needs 8 bytes of padding. */ | ||
155 | size = netdev->mtu + OCTEON_MGMT_RX_HEADROOM + 8 + NET_IP_ALIGN; | ||
156 | |||
157 | skb = netdev_alloc_skb(netdev, size); | ||
158 | if (!skb) | ||
159 | break; | ||
160 | skb_reserve(skb, NET_IP_ALIGN); | ||
161 | __skb_queue_tail(&p->rx_list, skb); | ||
162 | |||
163 | re.d64 = 0; | ||
164 | re.s.len = size; | ||
165 | re.s.addr = dma_map_single(p->dev, skb->data, | ||
166 | size, | ||
167 | DMA_FROM_DEVICE); | ||
168 | |||
169 | /* Put it in the ring. */ | ||
170 | p->rx_ring[p->rx_next_fill] = re.d64; | ||
171 | dma_sync_single_for_device(p->dev, p->rx_ring_handle, | ||
172 | ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), | ||
173 | DMA_BIDIRECTIONAL); | ||
174 | p->rx_next_fill = | ||
175 | (p->rx_next_fill + 1) % OCTEON_MGMT_RX_RING_SIZE; | ||
176 | p->rx_current_fill++; | ||
177 | /* Ring the bell. */ | ||
178 | cvmx_write_csr(CVMX_MIXX_IRING2(port), 1); | ||
179 | } | ||
180 | } | ||
181 | |||
182 | static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt *p) | ||
183 | { | ||
184 | int port = p->port; | ||
185 | union cvmx_mixx_orcnt mix_orcnt; | ||
186 | union mgmt_port_ring_entry re; | ||
187 | struct sk_buff *skb; | ||
188 | int cleaned = 0; | ||
189 | unsigned long flags; | ||
190 | |||
191 | mix_orcnt.u64 = cvmx_read_csr(CVMX_MIXX_ORCNT(port)); | ||
192 | while (mix_orcnt.s.orcnt) { | ||
193 | spin_lock_irqsave(&p->tx_list.lock, flags); | ||
194 | |||
195 | mix_orcnt.u64 = cvmx_read_csr(CVMX_MIXX_ORCNT(port)); | ||
196 | |||
197 | if (mix_orcnt.s.orcnt == 0) { | ||
198 | spin_unlock_irqrestore(&p->tx_list.lock, flags); | ||
199 | break; | ||
200 | } | ||
201 | |||
202 | dma_sync_single_for_cpu(p->dev, p->tx_ring_handle, | ||
203 | ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), | ||
204 | DMA_BIDIRECTIONAL); | ||
205 | |||
206 | re.d64 = p->tx_ring[p->tx_next_clean]; | ||
207 | p->tx_next_clean = | ||
208 | (p->tx_next_clean + 1) % OCTEON_MGMT_TX_RING_SIZE; | ||
209 | skb = __skb_dequeue(&p->tx_list); | ||
210 | |||
211 | mix_orcnt.u64 = 0; | ||
212 | mix_orcnt.s.orcnt = 1; | ||
213 | |||
214 | /* Acknowledge to hardware that we have the buffer. */ | ||
215 | cvmx_write_csr(CVMX_MIXX_ORCNT(port), mix_orcnt.u64); | ||
216 | p->tx_current_fill--; | ||
217 | |||
218 | spin_unlock_irqrestore(&p->tx_list.lock, flags); | ||
219 | |||
220 | dma_unmap_single(p->dev, re.s.addr, re.s.len, | ||
221 | DMA_TO_DEVICE); | ||
222 | dev_kfree_skb_any(skb); | ||
223 | cleaned++; | ||
224 | |||
225 | mix_orcnt.u64 = cvmx_read_csr(CVMX_MIXX_ORCNT(port)); | ||
226 | } | ||
227 | |||
228 | if (cleaned && netif_queue_stopped(p->netdev)) | ||
229 | netif_wake_queue(p->netdev); | ||
230 | } | ||
231 | |||
232 | static void octeon_mgmt_clean_tx_tasklet(unsigned long arg) | ||
233 | { | ||
234 | struct octeon_mgmt *p = (struct octeon_mgmt *)arg; | ||
235 | octeon_mgmt_clean_tx_buffers(p); | ||
236 | octeon_mgmt_enable_tx_irq(p); | ||
237 | } | ||
238 | |||
239 | static void octeon_mgmt_update_rx_stats(struct net_device *netdev) | ||
240 | { | ||
241 | struct octeon_mgmt *p = netdev_priv(netdev); | ||
242 | int port = p->port; | ||
243 | unsigned long flags; | ||
244 | u64 drop, bad; | ||
245 | |||
246 | /* These reads also clear the count registers. */ | ||
247 | drop = cvmx_read_csr(CVMX_AGL_GMX_RXX_STATS_PKTS_DRP(port)); | ||
248 | bad = cvmx_read_csr(CVMX_AGL_GMX_RXX_STATS_PKTS_BAD(port)); | ||
249 | |||
250 | if (drop || bad) { | ||
251 | /* Do an atomic update. */ | ||
252 | spin_lock_irqsave(&p->lock, flags); | ||
253 | netdev->stats.rx_errors += bad; | ||
254 | netdev->stats.rx_dropped += drop; | ||
255 | spin_unlock_irqrestore(&p->lock, flags); | ||
256 | } | ||
257 | } | ||
258 | |||
259 | static void octeon_mgmt_update_tx_stats(struct net_device *netdev) | ||
260 | { | ||
261 | struct octeon_mgmt *p = netdev_priv(netdev); | ||
262 | int port = p->port; | ||
263 | unsigned long flags; | ||
264 | |||
265 | union cvmx_agl_gmx_txx_stat0 s0; | ||
266 | union cvmx_agl_gmx_txx_stat1 s1; | ||
267 | |||
268 | /* These reads also clear the count registers. */ | ||
269 | s0.u64 = cvmx_read_csr(CVMX_AGL_GMX_TXX_STAT0(port)); | ||
270 | s1.u64 = cvmx_read_csr(CVMX_AGL_GMX_TXX_STAT1(port)); | ||
271 | |||
272 | if (s0.s.xsdef || s0.s.xscol || s1.s.scol || s1.s.mcol) { | ||
273 | /* Do an atomic update. */ | ||
274 | spin_lock_irqsave(&p->lock, flags); | ||
275 | netdev->stats.tx_errors += s0.s.xsdef + s0.s.xscol; | ||
276 | netdev->stats.collisions += s1.s.scol + s1.s.mcol; | ||
277 | spin_unlock_irqrestore(&p->lock, flags); | ||
278 | } | ||
279 | } | ||
280 | |||
281 | /* | ||
282 | * Dequeue a receive skb and its corresponding ring entry. The ring | ||
283 | * entry is returned, *pskb is updated to point to the skb. | ||
284 | */ | ||
285 | static u64 octeon_mgmt_dequeue_rx_buffer(struct octeon_mgmt *p, | ||
286 | struct sk_buff **pskb) | ||
287 | { | ||
288 | union mgmt_port_ring_entry re; | ||
289 | |||
290 | dma_sync_single_for_cpu(p->dev, p->rx_ring_handle, | ||
291 | ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), | ||
292 | DMA_BIDIRECTIONAL); | ||
293 | |||
294 | re.d64 = p->rx_ring[p->rx_next]; | ||
295 | p->rx_next = (p->rx_next + 1) % OCTEON_MGMT_RX_RING_SIZE; | ||
296 | p->rx_current_fill--; | ||
297 | *pskb = __skb_dequeue(&p->rx_list); | ||
298 | |||
299 | dma_unmap_single(p->dev, re.s.addr, | ||
300 | ETH_FRAME_LEN + OCTEON_MGMT_RX_HEADROOM, | ||
301 | DMA_FROM_DEVICE); | ||
302 | |||
303 | return re.d64; | ||
304 | } | ||
305 | |||
306 | |||
307 | static int octeon_mgmt_receive_one(struct octeon_mgmt *p) | ||
308 | { | ||
309 | int port = p->port; | ||
310 | struct net_device *netdev = p->netdev; | ||
311 | union cvmx_mixx_ircnt mix_ircnt; | ||
312 | union mgmt_port_ring_entry re; | ||
313 | struct sk_buff *skb; | ||
314 | struct sk_buff *skb2; | ||
315 | struct sk_buff *skb_new; | ||
316 | union mgmt_port_ring_entry re2; | ||
317 | int rc = 1; | ||
318 | |||
319 | |||
320 | re.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb); | ||
321 | if (likely(re.s.code == RING_ENTRY_CODE_DONE)) { | ||
322 | /* A good packet, send it up. */ | ||
323 | skb_put(skb, re.s.len); | ||
324 | good: | ||
325 | skb->protocol = eth_type_trans(skb, netdev); | ||
326 | netdev->stats.rx_packets++; | ||
327 | netdev->stats.rx_bytes += skb->len; | ||
328 | netif_receive_skb(skb); | ||
329 | rc = 0; | ||
330 | } else if (re.s.code == RING_ENTRY_CODE_MORE) { | ||
331 | /* | ||
332 | * Packet split across skbs. This can happen if we | ||
333 | * increase the MTU. Buffers that are already in the | ||
334 | * rx ring can then end up being too small. As the rx | ||
335 | * ring is refilled, buffers sized for the new MTU | ||
336 | * will be used and we should go back to the normal | ||
337 | * non-split case. | ||
338 | */ | ||
339 | skb_put(skb, re.s.len); | ||
340 | do { | ||
341 | re2.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb2); | ||
342 | if (re2.s.code != RING_ENTRY_CODE_MORE | ||
343 | && re2.s.code != RING_ENTRY_CODE_DONE) | ||
344 | goto split_error; | ||
345 | skb_put(skb2, re2.s.len); | ||
346 | skb_new = skb_copy_expand(skb, 0, skb2->len, | ||
347 | GFP_ATOMIC); | ||
348 | if (!skb_new) | ||
349 | goto split_error; | ||
350 | if (skb_copy_bits(skb2, 0, skb_tail_pointer(skb_new), | ||
351 | skb2->len)) | ||
352 | goto split_error; | ||
353 | skb_put(skb_new, skb2->len); | ||
354 | dev_kfree_skb_any(skb); | ||
355 | dev_kfree_skb_any(skb2); | ||
356 | skb = skb_new; | ||
357 | } while (re2.s.code == RING_ENTRY_CODE_MORE); | ||
358 | goto good; | ||
359 | } else { | ||
360 | /* Some other error, discard it. */ | ||
361 | dev_kfree_skb_any(skb); | ||
362 | /* | ||
363 | * Error statistics are accumulated in | ||
364 | * octeon_mgmt_update_rx_stats. | ||
365 | */ | ||
366 | } | ||
367 | goto done; | ||
368 | split_error: | ||
369 | /* Discard the whole mess. */ | ||
370 | dev_kfree_skb_any(skb); | ||
371 | dev_kfree_skb_any(skb2); | ||
372 | while (re2.s.code == RING_ENTRY_CODE_MORE) { | ||
373 | re2.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb2); | ||
374 | dev_kfree_skb_any(skb2); | ||
375 | } | ||
376 | netdev->stats.rx_errors++; | ||
377 | |||
378 | done: | ||
379 | /* Tell the hardware we processed a packet. */ | ||
380 | mix_ircnt.u64 = 0; | ||
381 | mix_ircnt.s.ircnt = 1; | ||
382 | cvmx_write_csr(CVMX_MIXX_IRCNT(port), mix_ircnt.u64); | ||
383 | return rc; | ||
384 | } | ||
385 | |||
386 | static int octeon_mgmt_receive_packets(struct octeon_mgmt *p, int budget) | ||
387 | { | ||
388 | int port = p->port; | ||
389 | unsigned int work_done = 0; | ||
390 | union cvmx_mixx_ircnt mix_ircnt; | ||
391 | int rc; | ||
392 | |||
393 | mix_ircnt.u64 = cvmx_read_csr(CVMX_MIXX_IRCNT(port)); | ||
394 | while (work_done < budget && mix_ircnt.s.ircnt) { | ||
395 | |||
396 | rc = octeon_mgmt_receive_one(p); | ||
397 | if (!rc) | ||
398 | work_done++; | ||
399 | |||
400 | /* Check for more packets. */ | ||
401 | mix_ircnt.u64 = cvmx_read_csr(CVMX_MIXX_IRCNT(port)); | ||
402 | } | ||
403 | |||
404 | octeon_mgmt_rx_fill_ring(p->netdev); | ||
405 | |||
406 | return work_done; | ||
407 | } | ||
408 | |||
409 | static int octeon_mgmt_napi_poll(struct napi_struct *napi, int budget) | ||
410 | { | ||
411 | struct octeon_mgmt *p = container_of(napi, struct octeon_mgmt, napi); | ||
412 | struct net_device *netdev = p->netdev; | ||
413 | unsigned int work_done = 0; | ||
414 | |||
415 | work_done = octeon_mgmt_receive_packets(p, budget); | ||
416 | |||
417 | if (work_done < budget) { | ||
418 | /* We stopped because no more packets were available. */ | ||
419 | napi_complete(napi); | ||
420 | octeon_mgmt_enable_rx_irq(p); | ||
421 | } | ||
422 | octeon_mgmt_update_rx_stats(netdev); | ||
423 | |||
424 | return work_done; | ||
425 | } | ||
426 | |||
427 | /* Reset the hardware to clean state. */ | ||
428 | static void octeon_mgmt_reset_hw(struct octeon_mgmt *p) | ||
429 | { | ||
430 | union cvmx_mixx_ctl mix_ctl; | ||
431 | union cvmx_mixx_bist mix_bist; | ||
432 | union cvmx_agl_gmx_bist agl_gmx_bist; | ||
433 | |||
434 | mix_ctl.u64 = 0; | ||
435 | cvmx_write_csr(CVMX_MIXX_CTL(p->port), mix_ctl.u64); | ||
436 | do { | ||
437 | mix_ctl.u64 = cvmx_read_csr(CVMX_MIXX_CTL(p->port)); | ||
438 | } while (mix_ctl.s.busy); | ||
439 | mix_ctl.s.reset = 1; | ||
440 | cvmx_write_csr(CVMX_MIXX_CTL(p->port), mix_ctl.u64); | ||
441 | cvmx_read_csr(CVMX_MIXX_CTL(p->port)); | ||
442 | cvmx_wait(64); | ||
443 | |||
444 | mix_bist.u64 = cvmx_read_csr(CVMX_MIXX_BIST(p->port)); | ||
445 | if (mix_bist.u64) | ||
446 | dev_warn(p->dev, "MIX failed BIST (0x%016llx)\n", | ||
447 | (unsigned long long)mix_bist.u64); | ||
448 | |||
449 | agl_gmx_bist.u64 = cvmx_read_csr(CVMX_AGL_GMX_BIST); | ||
450 | if (agl_gmx_bist.u64) | ||
451 | dev_warn(p->dev, "AGL failed BIST (0x%016llx)\n", | ||
452 | (unsigned long long)agl_gmx_bist.u64); | ||
453 | } | ||
454 | |||
455 | struct octeon_mgmt_cam_state { | ||
456 | u64 cam[6]; | ||
457 | u64 cam_mask; | ||
458 | int cam_index; | ||
459 | }; | ||
460 | |||
461 | static void octeon_mgmt_cam_state_add(struct octeon_mgmt_cam_state *cs, | ||
462 | unsigned char *addr) | ||
463 | { | ||
464 | int i; | ||
465 | |||
466 | for (i = 0; i < 6; i++) | ||
467 | cs->cam[i] |= (u64)addr[i] << (8 * (cs->cam_index)); | ||
468 | cs->cam_mask |= (1ULL << cs->cam_index); | ||
469 | cs->cam_index++; | ||
470 | } | ||
471 | |||
472 | static void octeon_mgmt_set_rx_filtering(struct net_device *netdev) | ||
473 | { | ||
474 | struct octeon_mgmt *p = netdev_priv(netdev); | ||
475 | int port = p->port; | ||
476 | union cvmx_agl_gmx_rxx_adr_ctl adr_ctl; | ||
477 | union cvmx_agl_gmx_prtx_cfg agl_gmx_prtx; | ||
478 | unsigned long flags; | ||
479 | unsigned int prev_packet_enable; | ||
480 | unsigned int cam_mode = 1; /* 1 - Accept on CAM match */ | ||
481 | unsigned int multicast_mode = 1; /* 1 - Reject all multicast. */ | ||
482 | struct octeon_mgmt_cam_state cam_state; | ||
483 | struct netdev_hw_addr *ha; | ||
484 | int available_cam_entries; | ||
485 | |||
486 | memset(&cam_state, 0, sizeof(cam_state)); | ||
487 | |||
488 | if ((netdev->flags & IFF_PROMISC) || netdev->uc.count > 7) { | ||
489 | cam_mode = 0; | ||
490 | available_cam_entries = 8; | ||
491 | } else { | ||
492 | /* | ||
493 | * One CAM entry for the primary address, leaves seven | ||
494 | * for the secondary addresses. | ||
495 | */ | ||
496 | available_cam_entries = 7 - netdev->uc.count; | ||
497 | } | ||
498 | |||
499 | if (netdev->flags & IFF_MULTICAST) { | ||
500 | if (cam_mode == 0 || (netdev->flags & IFF_ALLMULTI) || | ||
501 | netdev_mc_count(netdev) > available_cam_entries) | ||
502 | multicast_mode = 2; /* 2 - Accept all multicast. */ | ||
503 | else | ||
504 | multicast_mode = 0; /* 0 - Use CAM. */ | ||
505 | } | ||
506 | |||
507 | if (cam_mode == 1) { | ||
508 | /* Add primary address. */ | ||
509 | octeon_mgmt_cam_state_add(&cam_state, netdev->dev_addr); | ||
510 | netdev_for_each_uc_addr(ha, netdev) | ||
511 | octeon_mgmt_cam_state_add(&cam_state, ha->addr); | ||
512 | } | ||
513 | if (multicast_mode == 0) { | ||
514 | netdev_for_each_mc_addr(ha, netdev) | ||
515 | octeon_mgmt_cam_state_add(&cam_state, ha->addr); | ||
516 | } | ||
517 | |||
518 | spin_lock_irqsave(&p->lock, flags); | ||
519 | |||
520 | /* Disable packet I/O. */ | ||
521 | agl_gmx_prtx.u64 = cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port)); | ||
522 | prev_packet_enable = agl_gmx_prtx.s.en; | ||
523 | agl_gmx_prtx.s.en = 0; | ||
524 | cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), agl_gmx_prtx.u64); | ||
525 | |||
526 | adr_ctl.u64 = 0; | ||
527 | adr_ctl.s.cam_mode = cam_mode; | ||
528 | adr_ctl.s.mcst = multicast_mode; | ||
529 | adr_ctl.s.bcst = 1; /* Allow broadcast */ | ||
530 | |||
531 | cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CTL(port), adr_ctl.u64); | ||
532 | |||
533 | cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM0(port), cam_state.cam[0]); | ||
534 | cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM1(port), cam_state.cam[1]); | ||
535 | cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM2(port), cam_state.cam[2]); | ||
536 | cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM3(port), cam_state.cam[3]); | ||
537 | cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM4(port), cam_state.cam[4]); | ||
538 | cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM5(port), cam_state.cam[5]); | ||
539 | cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM_EN(port), cam_state.cam_mask); | ||
540 | |||
541 | /* Restore packet I/O. */ | ||
542 | agl_gmx_prtx.s.en = prev_packet_enable; | ||
543 | cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), agl_gmx_prtx.u64); | ||
544 | |||
545 | spin_unlock_irqrestore(&p->lock, flags); | ||
546 | } | ||
547 | |||
548 | static int octeon_mgmt_set_mac_address(struct net_device *netdev, void *addr) | ||
549 | { | ||
550 | struct sockaddr *sa = addr; | ||
551 | |||
552 | if (!is_valid_ether_addr(sa->sa_data)) | ||
553 | return -EADDRNOTAVAIL; | ||
554 | |||
555 | memcpy(netdev->dev_addr, sa->sa_data, ETH_ALEN); | ||
556 | |||
557 | octeon_mgmt_set_rx_filtering(netdev); | ||
558 | |||
559 | return 0; | ||
560 | } | ||
561 | |||
562 | static int octeon_mgmt_change_mtu(struct net_device *netdev, int new_mtu) | ||
563 | { | ||
564 | struct octeon_mgmt *p = netdev_priv(netdev); | ||
565 | int port = p->port; | ||
566 | int size_without_fcs = new_mtu + OCTEON_MGMT_RX_HEADROOM; | ||
567 | |||
568 | /* | ||
569 | * Limit the MTU to make sure the ethernet packets are between | ||
570 | * 64 bytes and 16383 bytes. | ||
571 | */ | ||
572 | if (size_without_fcs < 64 || size_without_fcs > 16383) { | ||
573 | dev_warn(p->dev, "MTU must be between %d and %d.\n", | ||
574 | 64 - OCTEON_MGMT_RX_HEADROOM, | ||
575 | 16383 - OCTEON_MGMT_RX_HEADROOM); | ||
576 | return -EINVAL; | ||
577 | } | ||
578 | |||
579 | netdev->mtu = new_mtu; | ||
580 | |||
581 | cvmx_write_csr(CVMX_AGL_GMX_RXX_FRM_MAX(port), size_without_fcs); | ||
582 | cvmx_write_csr(CVMX_AGL_GMX_RXX_JABBER(port), | ||
583 | (size_without_fcs + 7) & 0xfff8); | ||
584 | |||
585 | return 0; | ||
586 | } | ||
587 | |||
588 | static irqreturn_t octeon_mgmt_interrupt(int cpl, void *dev_id) | ||
589 | { | ||
590 | struct net_device *netdev = dev_id; | ||
591 | struct octeon_mgmt *p = netdev_priv(netdev); | ||
592 | int port = p->port; | ||
593 | union cvmx_mixx_isr mixx_isr; | ||
594 | |||
595 | mixx_isr.u64 = cvmx_read_csr(CVMX_MIXX_ISR(port)); | ||
596 | |||
597 | /* Clear any pending interrupts */ | ||
598 | cvmx_write_csr(CVMX_MIXX_ISR(port), mixx_isr.u64); | ||
599 | cvmx_read_csr(CVMX_MIXX_ISR(port)); | ||
600 | |||
601 | if (mixx_isr.s.irthresh) { | ||
602 | octeon_mgmt_disable_rx_irq(p); | ||
603 | napi_schedule(&p->napi); | ||
604 | } | ||
605 | if (mixx_isr.s.orthresh) { | ||
606 | octeon_mgmt_disable_tx_irq(p); | ||
607 | tasklet_schedule(&p->tx_clean_tasklet); | ||
608 | } | ||
609 | |||
610 | return IRQ_HANDLED; | ||
611 | } | ||
612 | |||
613 | static int octeon_mgmt_ioctl(struct net_device *netdev, | ||
614 | struct ifreq *rq, int cmd) | ||
615 | { | ||
616 | struct octeon_mgmt *p = netdev_priv(netdev); | ||
617 | |||
618 | if (!netif_running(netdev)) | ||
619 | return -EINVAL; | ||
620 | |||
621 | if (!p->phydev) | ||
622 | return -EINVAL; | ||
623 | |||
624 | return phy_mii_ioctl(p->phydev, rq, cmd); | ||
625 | } | ||
626 | |||
627 | static void octeon_mgmt_adjust_link(struct net_device *netdev) | ||
628 | { | ||
629 | struct octeon_mgmt *p = netdev_priv(netdev); | ||
630 | int port = p->port; | ||
631 | union cvmx_agl_gmx_prtx_cfg prtx_cfg; | ||
632 | unsigned long flags; | ||
633 | int link_changed = 0; | ||
634 | |||
635 | spin_lock_irqsave(&p->lock, flags); | ||
636 | if (p->phydev->link) { | ||
637 | if (!p->last_link) | ||
638 | link_changed = 1; | ||
639 | if (p->last_duplex != p->phydev->duplex) { | ||
640 | p->last_duplex = p->phydev->duplex; | ||
641 | prtx_cfg.u64 = | ||
642 | cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port)); | ||
643 | prtx_cfg.s.duplex = p->phydev->duplex; | ||
644 | cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), | ||
645 | prtx_cfg.u64); | ||
646 | } | ||
647 | } else { | ||
648 | if (p->last_link) | ||
649 | link_changed = -1; | ||
650 | } | ||
651 | p->last_link = p->phydev->link; | ||
652 | spin_unlock_irqrestore(&p->lock, flags); | ||
653 | |||
654 | if (link_changed != 0) { | ||
655 | if (link_changed > 0) { | ||
656 | netif_carrier_on(netdev); | ||
657 | pr_info("%s: Link is up - %d/%s\n", netdev->name, | ||
658 | p->phydev->speed, | ||
659 | DUPLEX_FULL == p->phydev->duplex ? | ||
660 | "Full" : "Half"); | ||
661 | } else { | ||
662 | netif_carrier_off(netdev); | ||
663 | pr_info("%s: Link is down\n", netdev->name); | ||
664 | } | ||
665 | } | ||
666 | } | ||
667 | |||
668 | static int octeon_mgmt_init_phy(struct net_device *netdev) | ||
669 | { | ||
670 | struct octeon_mgmt *p = netdev_priv(netdev); | ||
671 | char phy_id[20]; | ||
672 | |||
673 | if (octeon_is_simulation()) { | ||
674 | /* No PHYs in the simulator. */ | ||
675 | netif_carrier_on(netdev); | ||
676 | return 0; | ||
677 | } | ||
678 | |||
679 | snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, "0", p->port); | ||
680 | |||
681 | p->phydev = phy_connect(netdev, phy_id, octeon_mgmt_adjust_link, 0, | ||
682 | PHY_INTERFACE_MODE_MII); | ||
683 | |||
684 | if (IS_ERR(p->phydev)) { | ||
685 | p->phydev = NULL; | ||
686 | return -1; | ||
687 | } | ||
688 | |||
689 | phy_start_aneg(p->phydev); | ||
690 | |||
691 | return 0; | ||
692 | } | ||
693 | |||
694 | static int octeon_mgmt_open(struct net_device *netdev) | ||
695 | { | ||
696 | struct octeon_mgmt *p = netdev_priv(netdev); | ||
697 | int port = p->port; | ||
698 | union cvmx_mixx_ctl mix_ctl; | ||
699 | union cvmx_agl_gmx_inf_mode agl_gmx_inf_mode; | ||
700 | union cvmx_mixx_oring1 oring1; | ||
701 | union cvmx_mixx_iring1 iring1; | ||
702 | union cvmx_agl_gmx_prtx_cfg prtx_cfg; | ||
703 | union cvmx_agl_gmx_rxx_frm_ctl rxx_frm_ctl; | ||
704 | union cvmx_mixx_irhwm mix_irhwm; | ||
705 | union cvmx_mixx_orhwm mix_orhwm; | ||
706 | union cvmx_mixx_intena mix_intena; | ||
707 | struct sockaddr sa; | ||
708 | |||
709 | /* Allocate ring buffers. */ | ||
710 | p->tx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), | ||
711 | GFP_KERNEL); | ||
712 | if (!p->tx_ring) | ||
713 | return -ENOMEM; | ||
714 | p->tx_ring_handle = | ||
715 | dma_map_single(p->dev, p->tx_ring, | ||
716 | ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), | ||
717 | DMA_BIDIRECTIONAL); | ||
718 | p->tx_next = 0; | ||
719 | p->tx_next_clean = 0; | ||
720 | p->tx_current_fill = 0; | ||
721 | |||
722 | |||
723 | p->rx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), | ||
724 | GFP_KERNEL); | ||
725 | if (!p->rx_ring) | ||
726 | goto err_nomem; | ||
727 | p->rx_ring_handle = | ||
728 | dma_map_single(p->dev, p->rx_ring, | ||
729 | ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), | ||
730 | DMA_BIDIRECTIONAL); | ||
731 | |||
732 | p->rx_next = 0; | ||
733 | p->rx_next_fill = 0; | ||
734 | p->rx_current_fill = 0; | ||
735 | |||
736 | octeon_mgmt_reset_hw(p); | ||
737 | |||
738 | mix_ctl.u64 = cvmx_read_csr(CVMX_MIXX_CTL(port)); | ||
739 | |||
740 | /* Bring it out of reset if needed. */ | ||
741 | if (mix_ctl.s.reset) { | ||
742 | mix_ctl.s.reset = 0; | ||
743 | cvmx_write_csr(CVMX_MIXX_CTL(port), mix_ctl.u64); | ||
744 | do { | ||
745 | mix_ctl.u64 = cvmx_read_csr(CVMX_MIXX_CTL(port)); | ||
746 | } while (mix_ctl.s.reset); | ||
747 | } | ||
748 | |||
749 | agl_gmx_inf_mode.u64 = 0; | ||
750 | agl_gmx_inf_mode.s.en = 1; | ||
751 | cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64); | ||
752 | |||
753 | oring1.u64 = 0; | ||
754 | oring1.s.obase = p->tx_ring_handle >> 3; | ||
755 | oring1.s.osize = OCTEON_MGMT_TX_RING_SIZE; | ||
756 | cvmx_write_csr(CVMX_MIXX_ORING1(port), oring1.u64); | ||
757 | |||
758 | iring1.u64 = 0; | ||
759 | iring1.s.ibase = p->rx_ring_handle >> 3; | ||
760 | iring1.s.isize = OCTEON_MGMT_RX_RING_SIZE; | ||
761 | cvmx_write_csr(CVMX_MIXX_IRING1(port), iring1.u64); | ||
762 | |||
763 | /* Disable packet I/O. */ | ||
764 | prtx_cfg.u64 = cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port)); | ||
765 | prtx_cfg.s.en = 0; | ||
766 | cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), prtx_cfg.u64); | ||
767 | |||
768 | memcpy(sa.sa_data, netdev->dev_addr, ETH_ALEN); | ||
769 | octeon_mgmt_set_mac_address(netdev, &sa); | ||
770 | |||
771 | octeon_mgmt_change_mtu(netdev, netdev->mtu); | ||
772 | |||
773 | /* | ||
774 | * Enable the port HW. Packets are not allowed until | ||
775 | * cvmx_mgmt_port_enable() is called. | ||
776 | */ | ||
777 | mix_ctl.u64 = 0; | ||
778 | mix_ctl.s.crc_strip = 1; /* Strip the ending CRC */ | ||
779 | mix_ctl.s.en = 1; /* Enable the port */ | ||
780 | mix_ctl.s.nbtarb = 0; /* Arbitration mode */ | ||
781 | /* MII CB-request FIFO programmable high watermark */ | ||
782 | mix_ctl.s.mrq_hwm = 1; | ||
783 | cvmx_write_csr(CVMX_MIXX_CTL(port), mix_ctl.u64); | ||
784 | |||
785 | if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X) | ||
786 | || OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) { | ||
787 | /* | ||
788 | * Force compensation values, as they are not | ||
789 | * determined properly by HW | ||
790 | */ | ||
791 | union cvmx_agl_gmx_drv_ctl drv_ctl; | ||
792 | |||
793 | drv_ctl.u64 = cvmx_read_csr(CVMX_AGL_GMX_DRV_CTL); | ||
794 | if (port) { | ||
795 | drv_ctl.s.byp_en1 = 1; | ||
796 | drv_ctl.s.nctl1 = 6; | ||
797 | drv_ctl.s.pctl1 = 6; | ||
798 | } else { | ||
799 | drv_ctl.s.byp_en = 1; | ||
800 | drv_ctl.s.nctl = 6; | ||
801 | drv_ctl.s.pctl = 6; | ||
802 | } | ||
803 | cvmx_write_csr(CVMX_AGL_GMX_DRV_CTL, drv_ctl.u64); | ||
804 | } | ||
805 | |||
806 | octeon_mgmt_rx_fill_ring(netdev); | ||
807 | |||
808 | /* Clear statistics. */ | ||
809 | /* Clear on read. */ | ||
810 | cvmx_write_csr(CVMX_AGL_GMX_RXX_STATS_CTL(port), 1); | ||
811 | cvmx_write_csr(CVMX_AGL_GMX_RXX_STATS_PKTS_DRP(port), 0); | ||
812 | cvmx_write_csr(CVMX_AGL_GMX_RXX_STATS_PKTS_BAD(port), 0); | ||
813 | |||
814 | cvmx_write_csr(CVMX_AGL_GMX_TXX_STATS_CTL(port), 1); | ||
815 | cvmx_write_csr(CVMX_AGL_GMX_TXX_STAT0(port), 0); | ||
816 | cvmx_write_csr(CVMX_AGL_GMX_TXX_STAT1(port), 0); | ||
817 | |||
818 | /* Clear any pending interrupts */ | ||
819 | cvmx_write_csr(CVMX_MIXX_ISR(port), cvmx_read_csr(CVMX_MIXX_ISR(port))); | ||
820 | |||
821 | if (request_irq(p->irq, octeon_mgmt_interrupt, 0, netdev->name, | ||
822 | netdev)) { | ||
823 | dev_err(p->dev, "request_irq(%d) failed.\n", p->irq); | ||
824 | goto err_noirq; | ||
825 | } | ||
826 | |||
827 | /* Interrupt every single RX packet */ | ||
828 | mix_irhwm.u64 = 0; | ||
829 | mix_irhwm.s.irhwm = 0; | ||
830 | cvmx_write_csr(CVMX_MIXX_IRHWM(port), mix_irhwm.u64); | ||
831 | |||
832 | /* Interrupt when we have 1 or more packets to clean. */ | ||
833 | mix_orhwm.u64 = 0; | ||
834 | mix_orhwm.s.orhwm = 1; | ||
835 | cvmx_write_csr(CVMX_MIXX_ORHWM(port), mix_orhwm.u64); | ||
836 | |||
837 | /* Enable receive and transmit interrupts */ | ||
838 | mix_intena.u64 = 0; | ||
839 | mix_intena.s.ithena = 1; | ||
840 | mix_intena.s.othena = 1; | ||
841 | cvmx_write_csr(CVMX_MIXX_INTENA(port), mix_intena.u64); | ||
842 | |||
843 | |||
844 | /* Enable packet I/O. */ | ||
845 | |||
846 | rxx_frm_ctl.u64 = 0; | ||
847 | rxx_frm_ctl.s.pre_align = 1; | ||
848 | /* | ||
849 | * When set, disables the length check for non-min sized pkts | ||
850 | * with padding in the client data. | ||
851 | */ | ||
852 | rxx_frm_ctl.s.pad_len = 1; | ||
853 | /* When set, disables the length check for VLAN pkts */ | ||
854 | rxx_frm_ctl.s.vlan_len = 1; | ||
855 | /* When set, PREAMBLE checking is less strict */ | ||
856 | rxx_frm_ctl.s.pre_free = 1; | ||
857 | /* Control Pause Frames can match station SMAC */ | ||
858 | rxx_frm_ctl.s.ctl_smac = 0; | ||
859 | /* Control Pause Frames can match globally assign Multicast address */ | ||
860 | rxx_frm_ctl.s.ctl_mcst = 1; | ||
861 | /* Forward pause information to TX block */ | ||
862 | rxx_frm_ctl.s.ctl_bck = 1; | ||
863 | /* Drop Control Pause Frames */ | ||
864 | rxx_frm_ctl.s.ctl_drp = 1; | ||
865 | /* Strip off the preamble */ | ||
866 | rxx_frm_ctl.s.pre_strp = 1; | ||
867 | /* | ||
868 | * This port is configured to send PREAMBLE+SFD to begin every | ||
869 | * frame. GMX checks that the PREAMBLE is sent correctly. | ||
870 | */ | ||
871 | rxx_frm_ctl.s.pre_chk = 1; | ||
872 | cvmx_write_csr(CVMX_AGL_GMX_RXX_FRM_CTL(port), rxx_frm_ctl.u64); | ||
873 | |||
874 | /* Enable the AGL block */ | ||
875 | agl_gmx_inf_mode.u64 = 0; | ||
876 | agl_gmx_inf_mode.s.en = 1; | ||
877 | cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64); | ||
878 | |||
879 | /* Configure the port duplex and enables */ | ||
880 | prtx_cfg.u64 = cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port)); | ||
881 | prtx_cfg.s.tx_en = 1; | ||
882 | prtx_cfg.s.rx_en = 1; | ||
883 | prtx_cfg.s.en = 1; | ||
884 | p->last_duplex = 1; | ||
885 | prtx_cfg.s.duplex = p->last_duplex; | ||
886 | cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), prtx_cfg.u64); | ||
887 | |||
888 | p->last_link = 0; | ||
889 | netif_carrier_off(netdev); | ||
890 | |||
891 | if (octeon_mgmt_init_phy(netdev)) { | ||
892 | dev_err(p->dev, "Cannot initialize PHY.\n"); | ||
893 | goto err_noirq; | ||
894 | } | ||
895 | |||
896 | netif_wake_queue(netdev); | ||
897 | napi_enable(&p->napi); | ||
898 | |||
899 | return 0; | ||
900 | err_noirq: | ||
901 | octeon_mgmt_reset_hw(p); | ||
902 | dma_unmap_single(p->dev, p->rx_ring_handle, | ||
903 | ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), | ||
904 | DMA_BIDIRECTIONAL); | ||
905 | kfree(p->rx_ring); | ||
906 | err_nomem: | ||
907 | dma_unmap_single(p->dev, p->tx_ring_handle, | ||
908 | ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), | ||
909 | DMA_BIDIRECTIONAL); | ||
910 | kfree(p->tx_ring); | ||
911 | return -ENOMEM; | ||
912 | } | ||
913 | |||
914 | static int octeon_mgmt_stop(struct net_device *netdev) | ||
915 | { | ||
916 | struct octeon_mgmt *p = netdev_priv(netdev); | ||
917 | |||
918 | napi_disable(&p->napi); | ||
919 | netif_stop_queue(netdev); | ||
920 | |||
921 | if (p->phydev) | ||
922 | phy_disconnect(p->phydev); | ||
923 | |||
924 | netif_carrier_off(netdev); | ||
925 | |||
926 | octeon_mgmt_reset_hw(p); | ||
927 | |||
928 | free_irq(p->irq, netdev); | ||
929 | |||
930 | /* dma_unmap is a nop on Octeon, so just free everything. */ | ||
931 | skb_queue_purge(&p->tx_list); | ||
932 | skb_queue_purge(&p->rx_list); | ||
933 | |||
934 | dma_unmap_single(p->dev, p->rx_ring_handle, | ||
935 | ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), | ||
936 | DMA_BIDIRECTIONAL); | ||
937 | kfree(p->rx_ring); | ||
938 | |||
939 | dma_unmap_single(p->dev, p->tx_ring_handle, | ||
940 | ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), | ||
941 | DMA_BIDIRECTIONAL); | ||
942 | kfree(p->tx_ring); | ||
943 | |||
944 | return 0; | ||
945 | } | ||
946 | |||
947 | static int octeon_mgmt_xmit(struct sk_buff *skb, struct net_device *netdev) | ||
948 | { | ||
949 | struct octeon_mgmt *p = netdev_priv(netdev); | ||
950 | int port = p->port; | ||
951 | union mgmt_port_ring_entry re; | ||
952 | unsigned long flags; | ||
953 | int rv = NETDEV_TX_BUSY; | ||
954 | |||
955 | re.d64 = 0; | ||
956 | re.s.len = skb->len; | ||
957 | re.s.addr = dma_map_single(p->dev, skb->data, | ||
958 | skb->len, | ||
959 | DMA_TO_DEVICE); | ||
960 | |||
961 | spin_lock_irqsave(&p->tx_list.lock, flags); | ||
962 | |||
963 | if (unlikely(p->tx_current_fill >= ring_max_fill(OCTEON_MGMT_TX_RING_SIZE) - 1)) { | ||
964 | spin_unlock_irqrestore(&p->tx_list.lock, flags); | ||
965 | netif_stop_queue(netdev); | ||
966 | spin_lock_irqsave(&p->tx_list.lock, flags); | ||
967 | } | ||
968 | |||
969 | if (unlikely(p->tx_current_fill >= | ||
970 | ring_max_fill(OCTEON_MGMT_TX_RING_SIZE))) { | ||
971 | spin_unlock_irqrestore(&p->tx_list.lock, flags); | ||
972 | dma_unmap_single(p->dev, re.s.addr, re.s.len, | ||
973 | DMA_TO_DEVICE); | ||
974 | goto out; | ||
975 | } | ||
976 | |||
977 | __skb_queue_tail(&p->tx_list, skb); | ||
978 | |||
979 | /* Put it in the ring. */ | ||
980 | p->tx_ring[p->tx_next] = re.d64; | ||
981 | p->tx_next = (p->tx_next + 1) % OCTEON_MGMT_TX_RING_SIZE; | ||
982 | p->tx_current_fill++; | ||
983 | |||
984 | spin_unlock_irqrestore(&p->tx_list.lock, flags); | ||
985 | |||
986 | dma_sync_single_for_device(p->dev, p->tx_ring_handle, | ||
987 | ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), | ||
988 | DMA_BIDIRECTIONAL); | ||
989 | |||
990 | netdev->stats.tx_packets++; | ||
991 | netdev->stats.tx_bytes += skb->len; | ||
992 | |||
993 | /* Ring the bell. */ | ||
994 | cvmx_write_csr(CVMX_MIXX_ORING2(port), 1); | ||
995 | |||
996 | rv = NETDEV_TX_OK; | ||
997 | out: | ||
998 | octeon_mgmt_update_tx_stats(netdev); | ||
999 | return rv; | ||
1000 | } | ||
1001 | |||
1002 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
1003 | static void octeon_mgmt_poll_controller(struct net_device *netdev) | ||
1004 | { | ||
1005 | struct octeon_mgmt *p = netdev_priv(netdev); | ||
1006 | |||
1007 | octeon_mgmt_receive_packets(p, 16); | ||
1008 | octeon_mgmt_update_rx_stats(netdev); | ||
1009 | } | ||
1010 | #endif | ||
1011 | |||
1012 | static void octeon_mgmt_get_drvinfo(struct net_device *netdev, | ||
1013 | struct ethtool_drvinfo *info) | ||
1014 | { | ||
1015 | strncpy(info->driver, DRV_NAME, sizeof(info->driver)); | ||
1016 | strncpy(info->version, DRV_VERSION, sizeof(info->version)); | ||
1017 | strncpy(info->fw_version, "N/A", sizeof(info->fw_version)); | ||
1018 | strncpy(info->bus_info, "N/A", sizeof(info->bus_info)); | ||
1019 | info->n_stats = 0; | ||
1020 | info->testinfo_len = 0; | ||
1021 | info->regdump_len = 0; | ||
1022 | info->eedump_len = 0; | ||
1023 | } | ||
1024 | |||
1025 | static int octeon_mgmt_get_settings(struct net_device *netdev, | ||
1026 | struct ethtool_cmd *cmd) | ||
1027 | { | ||
1028 | struct octeon_mgmt *p = netdev_priv(netdev); | ||
1029 | |||
1030 | if (p->phydev) | ||
1031 | return phy_ethtool_gset(p->phydev, cmd); | ||
1032 | |||
1033 | return -EINVAL; | ||
1034 | } | ||
1035 | |||
1036 | static int octeon_mgmt_set_settings(struct net_device *netdev, | ||
1037 | struct ethtool_cmd *cmd) | ||
1038 | { | ||
1039 | struct octeon_mgmt *p = netdev_priv(netdev); | ||
1040 | |||
1041 | if (!capable(CAP_NET_ADMIN)) | ||
1042 | return -EPERM; | ||
1043 | |||
1044 | if (p->phydev) | ||
1045 | return phy_ethtool_sset(p->phydev, cmd); | ||
1046 | |||
1047 | return -EINVAL; | ||
1048 | } | ||
1049 | |||
1050 | static const struct ethtool_ops octeon_mgmt_ethtool_ops = { | ||
1051 | .get_drvinfo = octeon_mgmt_get_drvinfo, | ||
1052 | .get_link = ethtool_op_get_link, | ||
1053 | .get_settings = octeon_mgmt_get_settings, | ||
1054 | .set_settings = octeon_mgmt_set_settings | ||
1055 | }; | ||
1056 | |||
1057 | static const struct net_device_ops octeon_mgmt_ops = { | ||
1058 | .ndo_open = octeon_mgmt_open, | ||
1059 | .ndo_stop = octeon_mgmt_stop, | ||
1060 | .ndo_start_xmit = octeon_mgmt_xmit, | ||
1061 | .ndo_set_rx_mode = octeon_mgmt_set_rx_filtering, | ||
1062 | .ndo_set_multicast_list = octeon_mgmt_set_rx_filtering, | ||
1063 | .ndo_set_mac_address = octeon_mgmt_set_mac_address, | ||
1064 | .ndo_do_ioctl = octeon_mgmt_ioctl, | ||
1065 | .ndo_change_mtu = octeon_mgmt_change_mtu, | ||
1066 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
1067 | .ndo_poll_controller = octeon_mgmt_poll_controller, | ||
1068 | #endif | ||
1069 | }; | ||
1070 | |||
1071 | static int __devinit octeon_mgmt_probe(struct platform_device *pdev) | ||
1072 | { | ||
1073 | struct resource *res_irq; | ||
1074 | struct net_device *netdev; | ||
1075 | struct octeon_mgmt *p; | ||
1076 | int i; | ||
1077 | |||
1078 | netdev = alloc_etherdev(sizeof(struct octeon_mgmt)); | ||
1079 | if (netdev == NULL) | ||
1080 | return -ENOMEM; | ||
1081 | |||
1082 | dev_set_drvdata(&pdev->dev, netdev); | ||
1083 | p = netdev_priv(netdev); | ||
1084 | netif_napi_add(netdev, &p->napi, octeon_mgmt_napi_poll, | ||
1085 | OCTEON_MGMT_NAPI_WEIGHT); | ||
1086 | |||
1087 | p->netdev = netdev; | ||
1088 | p->dev = &pdev->dev; | ||
1089 | |||
1090 | p->port = pdev->id; | ||
1091 | snprintf(netdev->name, IFNAMSIZ, "mgmt%d", p->port); | ||
1092 | |||
1093 | res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); | ||
1094 | if (!res_irq) | ||
1095 | goto err; | ||
1096 | |||
1097 | p->irq = res_irq->start; | ||
1098 | spin_lock_init(&p->lock); | ||
1099 | |||
1100 | skb_queue_head_init(&p->tx_list); | ||
1101 | skb_queue_head_init(&p->rx_list); | ||
1102 | tasklet_init(&p->tx_clean_tasklet, | ||
1103 | octeon_mgmt_clean_tx_tasklet, (unsigned long)p); | ||
1104 | |||
1105 | netdev->netdev_ops = &octeon_mgmt_ops; | ||
1106 | netdev->ethtool_ops = &octeon_mgmt_ethtool_ops; | ||
1107 | |||
1108 | /* The mgmt ports get the first N MACs. */ | ||
1109 | for (i = 0; i < 6; i++) | ||
1110 | netdev->dev_addr[i] = octeon_bootinfo->mac_addr_base[i]; | ||
1111 | netdev->dev_addr[5] += p->port; | ||
1112 | |||
1113 | if (p->port >= octeon_bootinfo->mac_addr_count) | ||
1114 | dev_err(&pdev->dev, | ||
1115 | "Error %s: Using MAC outside of the assigned range: %pM\n", | ||
1116 | netdev->name, netdev->dev_addr); | ||
1117 | |||
1118 | if (register_netdev(netdev)) | ||
1119 | goto err; | ||
1120 | |||
1121 | dev_info(&pdev->dev, "Version " DRV_VERSION "\n"); | ||
1122 | return 0; | ||
1123 | err: | ||
1124 | free_netdev(netdev); | ||
1125 | return -ENOENT; | ||
1126 | } | ||
1127 | |||
1128 | static int __devexit octeon_mgmt_remove(struct platform_device *pdev) | ||
1129 | { | ||
1130 | struct net_device *netdev = dev_get_drvdata(&pdev->dev); | ||
1131 | |||
1132 | unregister_netdev(netdev); | ||
1133 | free_netdev(netdev); | ||
1134 | return 0; | ||
1135 | } | ||
1136 | |||
1137 | static struct platform_driver octeon_mgmt_driver = { | ||
1138 | .driver = { | ||
1139 | .name = "octeon_mgmt", | ||
1140 | .owner = THIS_MODULE, | ||
1141 | }, | ||
1142 | .probe = octeon_mgmt_probe, | ||
1143 | .remove = __devexit_p(octeon_mgmt_remove), | ||
1144 | }; | ||
1145 | |||
1146 | extern void octeon_mdiobus_force_mod_depencency(void); | ||
1147 | |||
1148 | static int __init octeon_mgmt_mod_init(void) | ||
1149 | { | ||
1150 | /* Force our mdiobus driver module to be loaded first. */ | ||
1151 | octeon_mdiobus_force_mod_depencency(); | ||
1152 | return platform_driver_register(&octeon_mgmt_driver); | ||
1153 | } | ||
1154 | |||
1155 | static void __exit octeon_mgmt_mod_exit(void) | ||
1156 | { | ||
1157 | platform_driver_unregister(&octeon_mgmt_driver); | ||
1158 | } | ||
1159 | |||
1160 | module_init(octeon_mgmt_mod_init); | ||
1161 | module_exit(octeon_mgmt_mod_exit); | ||
1162 | |||
1163 | MODULE_DESCRIPTION(DRV_DESCRIPTION); | ||
1164 | MODULE_AUTHOR("David Daney"); | ||
1165 | MODULE_LICENSE("GPL"); | ||
1166 | MODULE_VERSION(DRV_VERSION); | ||