diff options
-rw-r--r-- | drivers/net/Kconfig | 2 | ||||
-rw-r--r-- | drivers/net/Makefile | 2 | ||||
-rw-r--r-- | drivers/net/octeon/Kconfig | 10 | ||||
-rw-r--r-- | drivers/net/octeon/Makefile | 2 | ||||
-rw-r--r-- | drivers/net/octeon/octeon_mgmt.c | 1176 |
5 files changed, 1192 insertions, 0 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index a5be9ac6405c..e58a65391ad2 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig | |||
@@ -1953,6 +1953,8 @@ config BCM63XX_ENET | |||
1953 | 1953 | ||
1954 | source "drivers/net/fs_enet/Kconfig" | 1954 | source "drivers/net/fs_enet/Kconfig" |
1955 | 1955 | ||
1956 | source "drivers/net/octeon/Kconfig" | ||
1957 | |||
1956 | endif # NET_ETHERNET | 1958 | endif # NET_ETHERNET |
1957 | 1959 | ||
1958 | # | 1960 | # |
diff --git a/drivers/net/Makefile b/drivers/net/Makefile index 246323d7f161..ad1346dd9da9 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile | |||
@@ -285,3 +285,5 @@ obj-$(CONFIG_VIRTIO_NET) += virtio_net.o | |||
285 | obj-$(CONFIG_SFC) += sfc/ | 285 | obj-$(CONFIG_SFC) += sfc/ |
286 | 286 | ||
287 | obj-$(CONFIG_WIMAX) += wimax/ | 287 | obj-$(CONFIG_WIMAX) += wimax/ |
288 | |||
289 | obj-$(CONFIG_OCTEON_MGMT_ETHERNET) += octeon/ | ||
diff --git a/drivers/net/octeon/Kconfig b/drivers/net/octeon/Kconfig new file mode 100644 index 000000000000..1e56bbf3f5c0 --- /dev/null +++ b/drivers/net/octeon/Kconfig | |||
@@ -0,0 +1,10 @@ | |||
1 | config OCTEON_MGMT_ETHERNET | ||
2 | tristate "Octeon Management port ethernet driver (CN5XXX, CN6XXX)" | ||
3 | depends on CPU_CAVIUM_OCTEON | ||
4 | select PHYLIB | ||
5 | select MDIO_OCTEON | ||
6 | default y | ||
7 | help | ||
8 | This option enables the ethernet driver for the management | ||
9 | port on Cavium Networks' Octeon CN57XX, CN56XX, CN55XX, | ||
10 | CN54XX, CN52XX, and CN6XXX chips. | ||
diff --git a/drivers/net/octeon/Makefile b/drivers/net/octeon/Makefile new file mode 100644 index 000000000000..906edecacfd3 --- /dev/null +++ b/drivers/net/octeon/Makefile | |||
@@ -0,0 +1,2 @@ | |||
1 | |||
2 | obj-$(CONFIG_OCTEON_MGMT_ETHERNET) += octeon_mgmt.o | ||
diff --git a/drivers/net/octeon/octeon_mgmt.c b/drivers/net/octeon/octeon_mgmt.c new file mode 100644 index 000000000000..050538bf155a --- /dev/null +++ b/drivers/net/octeon/octeon_mgmt.c | |||
@@ -0,0 +1,1176 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2009 Cavium Networks | ||
7 | */ | ||
8 | |||
9 | #include <linux/capability.h> | ||
10 | #include <linux/dma-mapping.h> | ||
11 | #include <linux/init.h> | ||
12 | #include <linux/platform_device.h> | ||
13 | #include <linux/netdevice.h> | ||
14 | #include <linux/etherdevice.h> | ||
15 | #include <linux/if_vlan.h> | ||
16 | #include <linux/phy.h> | ||
17 | #include <linux/spinlock.h> | ||
18 | |||
19 | #include <asm/octeon/octeon.h> | ||
20 | #include <asm/octeon/cvmx-mixx-defs.h> | ||
21 | #include <asm/octeon/cvmx-agl-defs.h> | ||
22 | |||
23 | #define DRV_NAME "octeon_mgmt" | ||
24 | #define DRV_VERSION "2.0" | ||
25 | #define DRV_DESCRIPTION \ | ||
26 | "Cavium Networks Octeon MII (management) port Network Driver" | ||
27 | |||
28 | #define OCTEON_MGMT_NAPI_WEIGHT 16 | ||
29 | |||
30 | /* | ||
31 | * Ring sizes that are powers of two allow for more efficient modulo | ||
32 | * opertions. | ||
33 | */ | ||
34 | #define OCTEON_MGMT_RX_RING_SIZE 512 | ||
35 | #define OCTEON_MGMT_TX_RING_SIZE 128 | ||
36 | |||
37 | /* Allow 8 bytes for vlan and FCS. */ | ||
38 | #define OCTEON_MGMT_RX_HEADROOM (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN) | ||
39 | |||
40 | union mgmt_port_ring_entry { | ||
41 | u64 d64; | ||
42 | struct { | ||
43 | u64 reserved_62_63:2; | ||
44 | /* Length of the buffer/packet in bytes */ | ||
45 | u64 len:14; | ||
46 | /* For TX, signals that the packet should be timestamped */ | ||
47 | u64 tstamp:1; | ||
48 | /* The RX error code */ | ||
49 | u64 code:7; | ||
50 | #define RING_ENTRY_CODE_DONE 0xf | ||
51 | #define RING_ENTRY_CODE_MORE 0x10 | ||
52 | /* Physical address of the buffer */ | ||
53 | u64 addr:40; | ||
54 | } s; | ||
55 | }; | ||
56 | |||
57 | struct octeon_mgmt { | ||
58 | struct net_device *netdev; | ||
59 | int port; | ||
60 | int irq; | ||
61 | u64 *tx_ring; | ||
62 | dma_addr_t tx_ring_handle; | ||
63 | unsigned int tx_next; | ||
64 | unsigned int tx_next_clean; | ||
65 | unsigned int tx_current_fill; | ||
66 | /* The tx_list lock also protects the ring related variables */ | ||
67 | struct sk_buff_head tx_list; | ||
68 | |||
69 | /* RX variables only touched in napi_poll. No locking necessary. */ | ||
70 | u64 *rx_ring; | ||
71 | dma_addr_t rx_ring_handle; | ||
72 | unsigned int rx_next; | ||
73 | unsigned int rx_next_fill; | ||
74 | unsigned int rx_current_fill; | ||
75 | struct sk_buff_head rx_list; | ||
76 | |||
77 | spinlock_t lock; | ||
78 | unsigned int last_duplex; | ||
79 | unsigned int last_link; | ||
80 | struct device *dev; | ||
81 | struct napi_struct napi; | ||
82 | struct tasklet_struct tx_clean_tasklet; | ||
83 | struct phy_device *phydev; | ||
84 | }; | ||
85 | |||
86 | static void octeon_mgmt_set_rx_irq(struct octeon_mgmt *p, int enable) | ||
87 | { | ||
88 | int port = p->port; | ||
89 | union cvmx_mixx_intena mix_intena; | ||
90 | unsigned long flags; | ||
91 | |||
92 | spin_lock_irqsave(&p->lock, flags); | ||
93 | mix_intena.u64 = cvmx_read_csr(CVMX_MIXX_INTENA(port)); | ||
94 | mix_intena.s.ithena = enable ? 1 : 0; | ||
95 | cvmx_write_csr(CVMX_MIXX_INTENA(port), mix_intena.u64); | ||
96 | spin_unlock_irqrestore(&p->lock, flags); | ||
97 | } | ||
98 | |||
99 | static void octeon_mgmt_set_tx_irq(struct octeon_mgmt *p, int enable) | ||
100 | { | ||
101 | int port = p->port; | ||
102 | union cvmx_mixx_intena mix_intena; | ||
103 | unsigned long flags; | ||
104 | |||
105 | spin_lock_irqsave(&p->lock, flags); | ||
106 | mix_intena.u64 = cvmx_read_csr(CVMX_MIXX_INTENA(port)); | ||
107 | mix_intena.s.othena = enable ? 1 : 0; | ||
108 | cvmx_write_csr(CVMX_MIXX_INTENA(port), mix_intena.u64); | ||
109 | spin_unlock_irqrestore(&p->lock, flags); | ||
110 | } | ||
111 | |||
112 | static inline void octeon_mgmt_enable_rx_irq(struct octeon_mgmt *p) | ||
113 | { | ||
114 | octeon_mgmt_set_rx_irq(p, 1); | ||
115 | } | ||
116 | |||
117 | static inline void octeon_mgmt_disable_rx_irq(struct octeon_mgmt *p) | ||
118 | { | ||
119 | octeon_mgmt_set_rx_irq(p, 0); | ||
120 | } | ||
121 | |||
122 | static inline void octeon_mgmt_enable_tx_irq(struct octeon_mgmt *p) | ||
123 | { | ||
124 | octeon_mgmt_set_tx_irq(p, 1); | ||
125 | } | ||
126 | |||
127 | static inline void octeon_mgmt_disable_tx_irq(struct octeon_mgmt *p) | ||
128 | { | ||
129 | octeon_mgmt_set_tx_irq(p, 0); | ||
130 | } | ||
131 | |||
132 | static unsigned int ring_max_fill(unsigned int ring_size) | ||
133 | { | ||
134 | return ring_size - 8; | ||
135 | } | ||
136 | |||
137 | static unsigned int ring_size_to_bytes(unsigned int ring_size) | ||
138 | { | ||
139 | return ring_size * sizeof(union mgmt_port_ring_entry); | ||
140 | } | ||
141 | |||
142 | static void octeon_mgmt_rx_fill_ring(struct net_device *netdev) | ||
143 | { | ||
144 | struct octeon_mgmt *p = netdev_priv(netdev); | ||
145 | int port = p->port; | ||
146 | |||
147 | while (p->rx_current_fill < ring_max_fill(OCTEON_MGMT_RX_RING_SIZE)) { | ||
148 | unsigned int size; | ||
149 | union mgmt_port_ring_entry re; | ||
150 | struct sk_buff *skb; | ||
151 | |||
152 | /* CN56XX pass 1 needs 8 bytes of padding. */ | ||
153 | size = netdev->mtu + OCTEON_MGMT_RX_HEADROOM + 8 + NET_IP_ALIGN; | ||
154 | |||
155 | skb = netdev_alloc_skb(netdev, size); | ||
156 | if (!skb) | ||
157 | break; | ||
158 | skb_reserve(skb, NET_IP_ALIGN); | ||
159 | __skb_queue_tail(&p->rx_list, skb); | ||
160 | |||
161 | re.d64 = 0; | ||
162 | re.s.len = size; | ||
163 | re.s.addr = dma_map_single(p->dev, skb->data, | ||
164 | size, | ||
165 | DMA_FROM_DEVICE); | ||
166 | |||
167 | /* Put it in the ring. */ | ||
168 | p->rx_ring[p->rx_next_fill] = re.d64; | ||
169 | dma_sync_single_for_device(p->dev, p->rx_ring_handle, | ||
170 | ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), | ||
171 | DMA_BIDIRECTIONAL); | ||
172 | p->rx_next_fill = | ||
173 | (p->rx_next_fill + 1) % OCTEON_MGMT_RX_RING_SIZE; | ||
174 | p->rx_current_fill++; | ||
175 | /* Ring the bell. */ | ||
176 | cvmx_write_csr(CVMX_MIXX_IRING2(port), 1); | ||
177 | } | ||
178 | } | ||
179 | |||
180 | static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt *p) | ||
181 | { | ||
182 | int port = p->port; | ||
183 | union cvmx_mixx_orcnt mix_orcnt; | ||
184 | union mgmt_port_ring_entry re; | ||
185 | struct sk_buff *skb; | ||
186 | int cleaned = 0; | ||
187 | unsigned long flags; | ||
188 | |||
189 | mix_orcnt.u64 = cvmx_read_csr(CVMX_MIXX_ORCNT(port)); | ||
190 | while (mix_orcnt.s.orcnt) { | ||
191 | dma_sync_single_for_cpu(p->dev, p->tx_ring_handle, | ||
192 | ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), | ||
193 | DMA_BIDIRECTIONAL); | ||
194 | |||
195 | spin_lock_irqsave(&p->tx_list.lock, flags); | ||
196 | |||
197 | re.d64 = p->tx_ring[p->tx_next_clean]; | ||
198 | p->tx_next_clean = | ||
199 | (p->tx_next_clean + 1) % OCTEON_MGMT_TX_RING_SIZE; | ||
200 | skb = __skb_dequeue(&p->tx_list); | ||
201 | |||
202 | mix_orcnt.u64 = 0; | ||
203 | mix_orcnt.s.orcnt = 1; | ||
204 | |||
205 | /* Acknowledge to hardware that we have the buffer. */ | ||
206 | cvmx_write_csr(CVMX_MIXX_ORCNT(port), mix_orcnt.u64); | ||
207 | p->tx_current_fill--; | ||
208 | |||
209 | spin_unlock_irqrestore(&p->tx_list.lock, flags); | ||
210 | |||
211 | dma_unmap_single(p->dev, re.s.addr, re.s.len, | ||
212 | DMA_TO_DEVICE); | ||
213 | dev_kfree_skb_any(skb); | ||
214 | cleaned++; | ||
215 | |||
216 | mix_orcnt.u64 = cvmx_read_csr(CVMX_MIXX_ORCNT(port)); | ||
217 | } | ||
218 | |||
219 | if (cleaned && netif_queue_stopped(p->netdev)) | ||
220 | netif_wake_queue(p->netdev); | ||
221 | } | ||
222 | |||
223 | static void octeon_mgmt_clean_tx_tasklet(unsigned long arg) | ||
224 | { | ||
225 | struct octeon_mgmt *p = (struct octeon_mgmt *)arg; | ||
226 | octeon_mgmt_clean_tx_buffers(p); | ||
227 | octeon_mgmt_enable_tx_irq(p); | ||
228 | } | ||
229 | |||
230 | static void octeon_mgmt_update_rx_stats(struct net_device *netdev) | ||
231 | { | ||
232 | struct octeon_mgmt *p = netdev_priv(netdev); | ||
233 | int port = p->port; | ||
234 | unsigned long flags; | ||
235 | u64 drop, bad; | ||
236 | |||
237 | /* These reads also clear the count registers. */ | ||
238 | drop = cvmx_read_csr(CVMX_AGL_GMX_RXX_STATS_PKTS_DRP(port)); | ||
239 | bad = cvmx_read_csr(CVMX_AGL_GMX_RXX_STATS_PKTS_BAD(port)); | ||
240 | |||
241 | if (drop || bad) { | ||
242 | /* Do an atomic update. */ | ||
243 | spin_lock_irqsave(&p->lock, flags); | ||
244 | netdev->stats.rx_errors += bad; | ||
245 | netdev->stats.rx_dropped += drop; | ||
246 | spin_unlock_irqrestore(&p->lock, flags); | ||
247 | } | ||
248 | } | ||
249 | |||
250 | static void octeon_mgmt_update_tx_stats(struct net_device *netdev) | ||
251 | { | ||
252 | struct octeon_mgmt *p = netdev_priv(netdev); | ||
253 | int port = p->port; | ||
254 | unsigned long flags; | ||
255 | |||
256 | union cvmx_agl_gmx_txx_stat0 s0; | ||
257 | union cvmx_agl_gmx_txx_stat1 s1; | ||
258 | |||
259 | /* These reads also clear the count registers. */ | ||
260 | s0.u64 = cvmx_read_csr(CVMX_AGL_GMX_TXX_STAT0(port)); | ||
261 | s1.u64 = cvmx_read_csr(CVMX_AGL_GMX_TXX_STAT1(port)); | ||
262 | |||
263 | if (s0.s.xsdef || s0.s.xscol || s1.s.scol || s1.s.mcol) { | ||
264 | /* Do an atomic update. */ | ||
265 | spin_lock_irqsave(&p->lock, flags); | ||
266 | netdev->stats.tx_errors += s0.s.xsdef + s0.s.xscol; | ||
267 | netdev->stats.collisions += s1.s.scol + s1.s.mcol; | ||
268 | spin_unlock_irqrestore(&p->lock, flags); | ||
269 | } | ||
270 | } | ||
271 | |||
272 | /* | ||
273 | * Dequeue a receive skb and its corresponding ring entry. The ring | ||
274 | * entry is returned, *pskb is updated to point to the skb. | ||
275 | */ | ||
276 | static u64 octeon_mgmt_dequeue_rx_buffer(struct octeon_mgmt *p, | ||
277 | struct sk_buff **pskb) | ||
278 | { | ||
279 | union mgmt_port_ring_entry re; | ||
280 | |||
281 | dma_sync_single_for_cpu(p->dev, p->rx_ring_handle, | ||
282 | ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), | ||
283 | DMA_BIDIRECTIONAL); | ||
284 | |||
285 | re.d64 = p->rx_ring[p->rx_next]; | ||
286 | p->rx_next = (p->rx_next + 1) % OCTEON_MGMT_RX_RING_SIZE; | ||
287 | p->rx_current_fill--; | ||
288 | *pskb = __skb_dequeue(&p->rx_list); | ||
289 | |||
290 | dma_unmap_single(p->dev, re.s.addr, | ||
291 | ETH_FRAME_LEN + OCTEON_MGMT_RX_HEADROOM, | ||
292 | DMA_FROM_DEVICE); | ||
293 | |||
294 | return re.d64; | ||
295 | } | ||
296 | |||
297 | |||
298 | static int octeon_mgmt_receive_one(struct octeon_mgmt *p) | ||
299 | { | ||
300 | int port = p->port; | ||
301 | struct net_device *netdev = p->netdev; | ||
302 | union cvmx_mixx_ircnt mix_ircnt; | ||
303 | union mgmt_port_ring_entry re; | ||
304 | struct sk_buff *skb; | ||
305 | struct sk_buff *skb2; | ||
306 | struct sk_buff *skb_new; | ||
307 | union mgmt_port_ring_entry re2; | ||
308 | int rc = 1; | ||
309 | |||
310 | |||
311 | re.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb); | ||
312 | if (likely(re.s.code == RING_ENTRY_CODE_DONE)) { | ||
313 | /* A good packet, send it up. */ | ||
314 | skb_put(skb, re.s.len); | ||
315 | good: | ||
316 | skb->protocol = eth_type_trans(skb, netdev); | ||
317 | netdev->stats.rx_packets++; | ||
318 | netdev->stats.rx_bytes += skb->len; | ||
319 | netdev->last_rx = jiffies; | ||
320 | netif_receive_skb(skb); | ||
321 | rc = 0; | ||
322 | } else if (re.s.code == RING_ENTRY_CODE_MORE) { | ||
323 | /* | ||
324 | * Packet split across skbs. This can happen if we | ||
325 | * increase the MTU. Buffers that are already in the | ||
326 | * rx ring can then end up being too small. As the rx | ||
327 | * ring is refilled, buffers sized for the new MTU | ||
328 | * will be used and we should go back to the normal | ||
329 | * non-split case. | ||
330 | */ | ||
331 | skb_put(skb, re.s.len); | ||
332 | do { | ||
333 | re2.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb2); | ||
334 | if (re2.s.code != RING_ENTRY_CODE_MORE | ||
335 | && re2.s.code != RING_ENTRY_CODE_DONE) | ||
336 | goto split_error; | ||
337 | skb_put(skb2, re2.s.len); | ||
338 | skb_new = skb_copy_expand(skb, 0, skb2->len, | ||
339 | GFP_ATOMIC); | ||
340 | if (!skb_new) | ||
341 | goto split_error; | ||
342 | if (skb_copy_bits(skb2, 0, skb_tail_pointer(skb_new), | ||
343 | skb2->len)) | ||
344 | goto split_error; | ||
345 | skb_put(skb_new, skb2->len); | ||
346 | dev_kfree_skb_any(skb); | ||
347 | dev_kfree_skb_any(skb2); | ||
348 | skb = skb_new; | ||
349 | } while (re2.s.code == RING_ENTRY_CODE_MORE); | ||
350 | goto good; | ||
351 | } else { | ||
352 | /* Some other error, discard it. */ | ||
353 | dev_kfree_skb_any(skb); | ||
354 | /* | ||
355 | * Error statistics are accumulated in | ||
356 | * octeon_mgmt_update_rx_stats. | ||
357 | */ | ||
358 | } | ||
359 | goto done; | ||
360 | split_error: | ||
361 | /* Discard the whole mess. */ | ||
362 | dev_kfree_skb_any(skb); | ||
363 | dev_kfree_skb_any(skb2); | ||
364 | while (re2.s.code == RING_ENTRY_CODE_MORE) { | ||
365 | re2.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb2); | ||
366 | dev_kfree_skb_any(skb2); | ||
367 | } | ||
368 | netdev->stats.rx_errors++; | ||
369 | |||
370 | done: | ||
371 | /* Tell the hardware we processed a packet. */ | ||
372 | mix_ircnt.u64 = 0; | ||
373 | mix_ircnt.s.ircnt = 1; | ||
374 | cvmx_write_csr(CVMX_MIXX_IRCNT(port), mix_ircnt.u64); | ||
375 | return rc; | ||
376 | |||
377 | } | ||
378 | |||
379 | static int octeon_mgmt_receive_packets(struct octeon_mgmt *p, int budget) | ||
380 | { | ||
381 | int port = p->port; | ||
382 | unsigned int work_done = 0; | ||
383 | union cvmx_mixx_ircnt mix_ircnt; | ||
384 | int rc; | ||
385 | |||
386 | |||
387 | mix_ircnt.u64 = cvmx_read_csr(CVMX_MIXX_IRCNT(port)); | ||
388 | while (work_done < budget && mix_ircnt.s.ircnt) { | ||
389 | |||
390 | rc = octeon_mgmt_receive_one(p); | ||
391 | if (!rc) | ||
392 | work_done++; | ||
393 | |||
394 | /* Check for more packets. */ | ||
395 | mix_ircnt.u64 = cvmx_read_csr(CVMX_MIXX_IRCNT(port)); | ||
396 | } | ||
397 | |||
398 | octeon_mgmt_rx_fill_ring(p->netdev); | ||
399 | |||
400 | return work_done; | ||
401 | } | ||
402 | |||
403 | static int octeon_mgmt_napi_poll(struct napi_struct *napi, int budget) | ||
404 | { | ||
405 | struct octeon_mgmt *p = container_of(napi, struct octeon_mgmt, napi); | ||
406 | struct net_device *netdev = p->netdev; | ||
407 | unsigned int work_done = 0; | ||
408 | |||
409 | work_done = octeon_mgmt_receive_packets(p, budget); | ||
410 | |||
411 | if (work_done < budget) { | ||
412 | /* We stopped because no more packets were available. */ | ||
413 | napi_complete(napi); | ||
414 | octeon_mgmt_enable_rx_irq(p); | ||
415 | } | ||
416 | octeon_mgmt_update_rx_stats(netdev); | ||
417 | |||
418 | return work_done; | ||
419 | } | ||
420 | |||
421 | /* Reset the hardware to clean state. */ | ||
422 | static void octeon_mgmt_reset_hw(struct octeon_mgmt *p) | ||
423 | { | ||
424 | union cvmx_mixx_ctl mix_ctl; | ||
425 | union cvmx_mixx_bist mix_bist; | ||
426 | union cvmx_agl_gmx_bist agl_gmx_bist; | ||
427 | |||
428 | mix_ctl.u64 = 0; | ||
429 | cvmx_write_csr(CVMX_MIXX_CTL(p->port), mix_ctl.u64); | ||
430 | do { | ||
431 | mix_ctl.u64 = cvmx_read_csr(CVMX_MIXX_CTL(p->port)); | ||
432 | } while (mix_ctl.s.busy); | ||
433 | mix_ctl.s.reset = 1; | ||
434 | cvmx_write_csr(CVMX_MIXX_CTL(p->port), mix_ctl.u64); | ||
435 | cvmx_read_csr(CVMX_MIXX_CTL(p->port)); | ||
436 | cvmx_wait(64); | ||
437 | |||
438 | mix_bist.u64 = cvmx_read_csr(CVMX_MIXX_BIST(p->port)); | ||
439 | if (mix_bist.u64) | ||
440 | dev_warn(p->dev, "MIX failed BIST (0x%016llx)\n", | ||
441 | (unsigned long long)mix_bist.u64); | ||
442 | |||
443 | agl_gmx_bist.u64 = cvmx_read_csr(CVMX_AGL_GMX_BIST); | ||
444 | if (agl_gmx_bist.u64) | ||
445 | dev_warn(p->dev, "AGL failed BIST (0x%016llx)\n", | ||
446 | (unsigned long long)agl_gmx_bist.u64); | ||
447 | } | ||
448 | |||
449 | struct octeon_mgmt_cam_state { | ||
450 | u64 cam[6]; | ||
451 | u64 cam_mask; | ||
452 | int cam_index; | ||
453 | }; | ||
454 | |||
455 | static void octeon_mgmt_cam_state_add(struct octeon_mgmt_cam_state *cs, | ||
456 | unsigned char *addr) | ||
457 | { | ||
458 | int i; | ||
459 | |||
460 | for (i = 0; i < 6; i++) | ||
461 | cs->cam[i] |= (u64)addr[i] << (8 * (cs->cam_index)); | ||
462 | cs->cam_mask |= (1ULL << cs->cam_index); | ||
463 | cs->cam_index++; | ||
464 | } | ||
465 | |||
466 | static void octeon_mgmt_set_rx_filtering(struct net_device *netdev) | ||
467 | { | ||
468 | struct octeon_mgmt *p = netdev_priv(netdev); | ||
469 | int port = p->port; | ||
470 | int i; | ||
471 | union cvmx_agl_gmx_rxx_adr_ctl adr_ctl; | ||
472 | union cvmx_agl_gmx_prtx_cfg agl_gmx_prtx; | ||
473 | unsigned long flags; | ||
474 | unsigned int prev_packet_enable; | ||
475 | unsigned int cam_mode = 1; /* 1 - Accept on CAM match */ | ||
476 | unsigned int multicast_mode = 1; /* 1 - Reject all multicast. */ | ||
477 | struct octeon_mgmt_cam_state cam_state; | ||
478 | struct dev_addr_list *list; | ||
479 | struct list_head *pos; | ||
480 | int available_cam_entries; | ||
481 | |||
482 | memset(&cam_state, 0, sizeof(cam_state)); | ||
483 | |||
484 | if ((netdev->flags & IFF_PROMISC) || netdev->dev_addrs.count > 7) { | ||
485 | cam_mode = 0; | ||
486 | available_cam_entries = 8; | ||
487 | } else { | ||
488 | /* | ||
489 | * One CAM entry for the primary address, leaves seven | ||
490 | * for the secondary addresses. | ||
491 | */ | ||
492 | available_cam_entries = 7 - netdev->dev_addrs.count; | ||
493 | } | ||
494 | |||
495 | if (netdev->flags & IFF_MULTICAST) { | ||
496 | if (cam_mode == 0 || (netdev->flags & IFF_ALLMULTI) | ||
497 | || netdev->mc_count > available_cam_entries) | ||
498 | multicast_mode = 2; /* 1 - Accept all multicast. */ | ||
499 | else | ||
500 | multicast_mode = 0; /* 0 - Use CAM. */ | ||
501 | } | ||
502 | |||
503 | if (cam_mode == 1) { | ||
504 | /* Add primary address. */ | ||
505 | octeon_mgmt_cam_state_add(&cam_state, netdev->dev_addr); | ||
506 | list_for_each(pos, &netdev->dev_addrs.list) { | ||
507 | struct netdev_hw_addr *hw_addr; | ||
508 | hw_addr = list_entry(pos, struct netdev_hw_addr, list); | ||
509 | octeon_mgmt_cam_state_add(&cam_state, hw_addr->addr); | ||
510 | list = list->next; | ||
511 | } | ||
512 | } | ||
513 | if (multicast_mode == 0) { | ||
514 | i = netdev->mc_count; | ||
515 | list = netdev->mc_list; | ||
516 | while (i--) { | ||
517 | octeon_mgmt_cam_state_add(&cam_state, list->da_addr); | ||
518 | list = list->next; | ||
519 | } | ||
520 | } | ||
521 | |||
522 | |||
523 | spin_lock_irqsave(&p->lock, flags); | ||
524 | |||
525 | /* Disable packet I/O. */ | ||
526 | agl_gmx_prtx.u64 = cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port)); | ||
527 | prev_packet_enable = agl_gmx_prtx.s.en; | ||
528 | agl_gmx_prtx.s.en = 0; | ||
529 | cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), agl_gmx_prtx.u64); | ||
530 | |||
531 | |||
532 | adr_ctl.u64 = 0; | ||
533 | adr_ctl.s.cam_mode = cam_mode; | ||
534 | adr_ctl.s.mcst = multicast_mode; | ||
535 | adr_ctl.s.bcst = 1; /* Allow broadcast */ | ||
536 | |||
537 | cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CTL(port), adr_ctl.u64); | ||
538 | |||
539 | cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM0(port), cam_state.cam[0]); | ||
540 | cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM1(port), cam_state.cam[1]); | ||
541 | cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM2(port), cam_state.cam[2]); | ||
542 | cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM3(port), cam_state.cam[3]); | ||
543 | cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM4(port), cam_state.cam[4]); | ||
544 | cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM5(port), cam_state.cam[5]); | ||
545 | cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM_EN(port), cam_state.cam_mask); | ||
546 | |||
547 | /* Restore packet I/O. */ | ||
548 | agl_gmx_prtx.s.en = prev_packet_enable; | ||
549 | cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), agl_gmx_prtx.u64); | ||
550 | |||
551 | spin_unlock_irqrestore(&p->lock, flags); | ||
552 | } | ||
553 | |||
554 | static int octeon_mgmt_set_mac_address(struct net_device *netdev, void *addr) | ||
555 | { | ||
556 | struct sockaddr *sa = addr; | ||
557 | |||
558 | if (!is_valid_ether_addr(sa->sa_data)) | ||
559 | return -EADDRNOTAVAIL; | ||
560 | |||
561 | memcpy(netdev->dev_addr, sa->sa_data, ETH_ALEN); | ||
562 | |||
563 | octeon_mgmt_set_rx_filtering(netdev); | ||
564 | |||
565 | return 0; | ||
566 | } | ||
567 | |||
568 | static int octeon_mgmt_change_mtu(struct net_device *netdev, int new_mtu) | ||
569 | { | ||
570 | struct octeon_mgmt *p = netdev_priv(netdev); | ||
571 | int port = p->port; | ||
572 | int size_without_fcs = new_mtu + OCTEON_MGMT_RX_HEADROOM; | ||
573 | |||
574 | /* | ||
575 | * Limit the MTU to make sure the ethernet packets are between | ||
576 | * 64 bytes and 16383 bytes. | ||
577 | */ | ||
578 | if (size_without_fcs < 64 || size_without_fcs > 16383) { | ||
579 | dev_warn(p->dev, "MTU must be between %d and %d.\n", | ||
580 | 64 - OCTEON_MGMT_RX_HEADROOM, | ||
581 | 16383 - OCTEON_MGMT_RX_HEADROOM); | ||
582 | return -EINVAL; | ||
583 | } | ||
584 | |||
585 | netdev->mtu = new_mtu; | ||
586 | |||
587 | cvmx_write_csr(CVMX_AGL_GMX_RXX_FRM_MAX(port), size_without_fcs); | ||
588 | cvmx_write_csr(CVMX_AGL_GMX_RXX_JABBER(port), | ||
589 | (size_without_fcs + 7) & 0xfff8); | ||
590 | |||
591 | return 0; | ||
592 | } | ||
593 | |||
594 | static irqreturn_t octeon_mgmt_interrupt(int cpl, void *dev_id) | ||
595 | { | ||
596 | struct net_device *netdev = dev_id; | ||
597 | struct octeon_mgmt *p = netdev_priv(netdev); | ||
598 | int port = p->port; | ||
599 | union cvmx_mixx_isr mixx_isr; | ||
600 | |||
601 | mixx_isr.u64 = cvmx_read_csr(CVMX_MIXX_ISR(port)); | ||
602 | |||
603 | /* Clear any pending interrupts */ | ||
604 | cvmx_write_csr(CVMX_MIXX_ISR(port), | ||
605 | cvmx_read_csr(CVMX_MIXX_ISR(port))); | ||
606 | cvmx_read_csr(CVMX_MIXX_ISR(port)); | ||
607 | |||
608 | if (mixx_isr.s.irthresh) { | ||
609 | octeon_mgmt_disable_rx_irq(p); | ||
610 | napi_schedule(&p->napi); | ||
611 | } | ||
612 | if (mixx_isr.s.orthresh) { | ||
613 | octeon_mgmt_disable_tx_irq(p); | ||
614 | tasklet_schedule(&p->tx_clean_tasklet); | ||
615 | } | ||
616 | |||
617 | return IRQ_HANDLED; | ||
618 | } | ||
619 | |||
620 | static int octeon_mgmt_ioctl(struct net_device *netdev, | ||
621 | struct ifreq *rq, int cmd) | ||
622 | { | ||
623 | struct octeon_mgmt *p = netdev_priv(netdev); | ||
624 | |||
625 | if (!netif_running(netdev)) | ||
626 | return -EINVAL; | ||
627 | |||
628 | if (!p->phydev) | ||
629 | return -EINVAL; | ||
630 | |||
631 | return phy_mii_ioctl(p->phydev, if_mii(rq), cmd); | ||
632 | } | ||
633 | |||
634 | static void octeon_mgmt_adjust_link(struct net_device *netdev) | ||
635 | { | ||
636 | struct octeon_mgmt *p = netdev_priv(netdev); | ||
637 | int port = p->port; | ||
638 | union cvmx_agl_gmx_prtx_cfg prtx_cfg; | ||
639 | unsigned long flags; | ||
640 | int link_changed = 0; | ||
641 | |||
642 | spin_lock_irqsave(&p->lock, flags); | ||
643 | if (p->phydev->link) { | ||
644 | if (!p->last_link) | ||
645 | link_changed = 1; | ||
646 | if (p->last_duplex != p->phydev->duplex) { | ||
647 | p->last_duplex = p->phydev->duplex; | ||
648 | prtx_cfg.u64 = | ||
649 | cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port)); | ||
650 | prtx_cfg.s.duplex = p->phydev->duplex; | ||
651 | cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), | ||
652 | prtx_cfg.u64); | ||
653 | } | ||
654 | } else { | ||
655 | if (p->last_link) | ||
656 | link_changed = -1; | ||
657 | } | ||
658 | p->last_link = p->phydev->link; | ||
659 | spin_unlock_irqrestore(&p->lock, flags); | ||
660 | |||
661 | if (link_changed != 0) { | ||
662 | if (link_changed > 0) { | ||
663 | netif_carrier_on(netdev); | ||
664 | pr_info("%s: Link is up - %d/%s\n", netdev->name, | ||
665 | p->phydev->speed, | ||
666 | DUPLEX_FULL == p->phydev->duplex ? | ||
667 | "Full" : "Half"); | ||
668 | } else { | ||
669 | netif_carrier_off(netdev); | ||
670 | pr_info("%s: Link is down\n", netdev->name); | ||
671 | } | ||
672 | } | ||
673 | } | ||
674 | |||
675 | static int octeon_mgmt_init_phy(struct net_device *netdev) | ||
676 | { | ||
677 | struct octeon_mgmt *p = netdev_priv(netdev); | ||
678 | char phy_id[20]; | ||
679 | |||
680 | if (octeon_is_simulation()) { | ||
681 | /* No PHYs in the simulator. */ | ||
682 | netif_carrier_on(netdev); | ||
683 | return 0; | ||
684 | } | ||
685 | |||
686 | snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, "0", p->port); | ||
687 | |||
688 | p->phydev = phy_connect(netdev, phy_id, octeon_mgmt_adjust_link, 0, | ||
689 | PHY_INTERFACE_MODE_MII); | ||
690 | |||
691 | if (IS_ERR(p->phydev)) { | ||
692 | p->phydev = NULL; | ||
693 | return -1; | ||
694 | } | ||
695 | |||
696 | phy_start_aneg(p->phydev); | ||
697 | |||
698 | return 0; | ||
699 | } | ||
700 | |||
701 | static int octeon_mgmt_open(struct net_device *netdev) | ||
702 | { | ||
703 | struct octeon_mgmt *p = netdev_priv(netdev); | ||
704 | int port = p->port; | ||
705 | union cvmx_mixx_ctl mix_ctl; | ||
706 | union cvmx_agl_gmx_inf_mode agl_gmx_inf_mode; | ||
707 | union cvmx_mixx_oring1 oring1; | ||
708 | union cvmx_mixx_iring1 iring1; | ||
709 | union cvmx_agl_gmx_prtx_cfg prtx_cfg; | ||
710 | union cvmx_agl_gmx_rxx_frm_ctl rxx_frm_ctl; | ||
711 | union cvmx_mixx_irhwm mix_irhwm; | ||
712 | union cvmx_mixx_orhwm mix_orhwm; | ||
713 | union cvmx_mixx_intena mix_intena; | ||
714 | struct sockaddr sa; | ||
715 | |||
716 | /* Allocate ring buffers. */ | ||
717 | p->tx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), | ||
718 | GFP_KERNEL); | ||
719 | if (!p->tx_ring) | ||
720 | return -ENOMEM; | ||
721 | p->tx_ring_handle = | ||
722 | dma_map_single(p->dev, p->tx_ring, | ||
723 | ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), | ||
724 | DMA_BIDIRECTIONAL); | ||
725 | p->tx_next = 0; | ||
726 | p->tx_next_clean = 0; | ||
727 | p->tx_current_fill = 0; | ||
728 | |||
729 | |||
730 | p->rx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), | ||
731 | GFP_KERNEL); | ||
732 | if (!p->rx_ring) | ||
733 | goto err_nomem; | ||
734 | p->rx_ring_handle = | ||
735 | dma_map_single(p->dev, p->rx_ring, | ||
736 | ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), | ||
737 | DMA_BIDIRECTIONAL); | ||
738 | |||
739 | p->rx_next = 0; | ||
740 | p->rx_next_fill = 0; | ||
741 | p->rx_current_fill = 0; | ||
742 | |||
743 | octeon_mgmt_reset_hw(p); | ||
744 | |||
745 | mix_ctl.u64 = cvmx_read_csr(CVMX_MIXX_CTL(port)); | ||
746 | |||
747 | /* Bring it out of reset if needed. */ | ||
748 | if (mix_ctl.s.reset) { | ||
749 | mix_ctl.s.reset = 0; | ||
750 | cvmx_write_csr(CVMX_MIXX_CTL(port), mix_ctl.u64); | ||
751 | do { | ||
752 | mix_ctl.u64 = cvmx_read_csr(CVMX_MIXX_CTL(port)); | ||
753 | } while (mix_ctl.s.reset); | ||
754 | } | ||
755 | |||
756 | agl_gmx_inf_mode.u64 = 0; | ||
757 | agl_gmx_inf_mode.s.en = 1; | ||
758 | cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64); | ||
759 | |||
760 | oring1.u64 = 0; | ||
761 | oring1.s.obase = p->tx_ring_handle >> 3; | ||
762 | oring1.s.osize = OCTEON_MGMT_TX_RING_SIZE; | ||
763 | cvmx_write_csr(CVMX_MIXX_ORING1(port), oring1.u64); | ||
764 | |||
765 | iring1.u64 = 0; | ||
766 | iring1.s.ibase = p->rx_ring_handle >> 3; | ||
767 | iring1.s.isize = OCTEON_MGMT_RX_RING_SIZE; | ||
768 | cvmx_write_csr(CVMX_MIXX_IRING1(port), iring1.u64); | ||
769 | |||
770 | /* Disable packet I/O. */ | ||
771 | prtx_cfg.u64 = cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port)); | ||
772 | prtx_cfg.s.en = 0; | ||
773 | cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), prtx_cfg.u64); | ||
774 | |||
775 | memcpy(sa.sa_data, netdev->dev_addr, ETH_ALEN); | ||
776 | octeon_mgmt_set_mac_address(netdev, &sa); | ||
777 | |||
778 | octeon_mgmt_change_mtu(netdev, netdev->mtu); | ||
779 | |||
780 | /* | ||
781 | * Enable the port HW. Packets are not allowed until | ||
782 | * cvmx_mgmt_port_enable() is called. | ||
783 | */ | ||
784 | mix_ctl.u64 = 0; | ||
785 | mix_ctl.s.crc_strip = 1; /* Strip the ending CRC */ | ||
786 | mix_ctl.s.en = 1; /* Enable the port */ | ||
787 | mix_ctl.s.nbtarb = 0; /* Arbitration mode */ | ||
788 | /* MII CB-request FIFO programmable high watermark */ | ||
789 | mix_ctl.s.mrq_hwm = 1; | ||
790 | cvmx_write_csr(CVMX_MIXX_CTL(port), mix_ctl.u64); | ||
791 | |||
792 | if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X) | ||
793 | || OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) { | ||
794 | /* | ||
795 | * Force compensation values, as they are not | ||
796 | * determined properly by HW | ||
797 | */ | ||
798 | union cvmx_agl_gmx_drv_ctl drv_ctl; | ||
799 | |||
800 | drv_ctl.u64 = cvmx_read_csr(CVMX_AGL_GMX_DRV_CTL); | ||
801 | if (port) { | ||
802 | drv_ctl.s.byp_en1 = 1; | ||
803 | drv_ctl.s.nctl1 = 6; | ||
804 | drv_ctl.s.pctl1 = 6; | ||
805 | } else { | ||
806 | drv_ctl.s.byp_en = 1; | ||
807 | drv_ctl.s.nctl = 6; | ||
808 | drv_ctl.s.pctl = 6; | ||
809 | } | ||
810 | cvmx_write_csr(CVMX_AGL_GMX_DRV_CTL, drv_ctl.u64); | ||
811 | } | ||
812 | |||
813 | octeon_mgmt_rx_fill_ring(netdev); | ||
814 | |||
815 | /* Clear statistics. */ | ||
816 | /* Clear on read. */ | ||
817 | cvmx_write_csr(CVMX_AGL_GMX_RXX_STATS_CTL(port), 1); | ||
818 | cvmx_write_csr(CVMX_AGL_GMX_RXX_STATS_PKTS_DRP(port), 0); | ||
819 | cvmx_write_csr(CVMX_AGL_GMX_RXX_STATS_PKTS_BAD(port), 0); | ||
820 | |||
821 | cvmx_write_csr(CVMX_AGL_GMX_TXX_STATS_CTL(port), 1); | ||
822 | cvmx_write_csr(CVMX_AGL_GMX_TXX_STAT0(port), 0); | ||
823 | cvmx_write_csr(CVMX_AGL_GMX_TXX_STAT1(port), 0); | ||
824 | |||
825 | /* Clear any pending interrupts */ | ||
826 | cvmx_write_csr(CVMX_MIXX_ISR(port), cvmx_read_csr(CVMX_MIXX_ISR(port))); | ||
827 | |||
828 | if (request_irq(p->irq, octeon_mgmt_interrupt, 0, netdev->name, | ||
829 | netdev)) { | ||
830 | dev_err(p->dev, "request_irq(%d) failed.\n", p->irq); | ||
831 | goto err_noirq; | ||
832 | } | ||
833 | |||
834 | /* Interrupt every single RX packet */ | ||
835 | mix_irhwm.u64 = 0; | ||
836 | mix_irhwm.s.irhwm = 0; | ||
837 | cvmx_write_csr(CVMX_MIXX_IRHWM(port), mix_irhwm.u64); | ||
838 | |||
839 | /* Interrupt when we have 5 or more packets to clean. */ | ||
840 | mix_orhwm.u64 = 0; | ||
841 | mix_orhwm.s.orhwm = 5; | ||
842 | cvmx_write_csr(CVMX_MIXX_ORHWM(port), mix_orhwm.u64); | ||
843 | |||
844 | /* Enable receive and transmit interrupts */ | ||
845 | mix_intena.u64 = 0; | ||
846 | mix_intena.s.ithena = 1; | ||
847 | mix_intena.s.othena = 1; | ||
848 | cvmx_write_csr(CVMX_MIXX_INTENA(port), mix_intena.u64); | ||
849 | |||
850 | |||
851 | /* Enable packet I/O. */ | ||
852 | |||
853 | rxx_frm_ctl.u64 = 0; | ||
854 | rxx_frm_ctl.s.pre_align = 1; | ||
855 | /* | ||
856 | * When set, disables the length check for non-min sized pkts | ||
857 | * with padding in the client data. | ||
858 | */ | ||
859 | rxx_frm_ctl.s.pad_len = 1; | ||
860 | /* When set, disables the length check for VLAN pkts */ | ||
861 | rxx_frm_ctl.s.vlan_len = 1; | ||
862 | /* When set, PREAMBLE checking is less strict */ | ||
863 | rxx_frm_ctl.s.pre_free = 1; | ||
864 | /* Control Pause Frames can match station SMAC */ | ||
865 | rxx_frm_ctl.s.ctl_smac = 0; | ||
866 | /* Control Pause Frames can match globally assign Multicast address */ | ||
867 | rxx_frm_ctl.s.ctl_mcst = 1; | ||
868 | /* Forward pause information to TX block */ | ||
869 | rxx_frm_ctl.s.ctl_bck = 1; | ||
870 | /* Drop Control Pause Frames */ | ||
871 | rxx_frm_ctl.s.ctl_drp = 1; | ||
872 | /* Strip off the preamble */ | ||
873 | rxx_frm_ctl.s.pre_strp = 1; | ||
874 | /* | ||
875 | * This port is configured to send PREAMBLE+SFD to begin every | ||
876 | * frame. GMX checks that the PREAMBLE is sent correctly. | ||
877 | */ | ||
878 | rxx_frm_ctl.s.pre_chk = 1; | ||
879 | cvmx_write_csr(CVMX_AGL_GMX_RXX_FRM_CTL(port), rxx_frm_ctl.u64); | ||
880 | |||
881 | /* Enable the AGL block */ | ||
882 | agl_gmx_inf_mode.u64 = 0; | ||
883 | agl_gmx_inf_mode.s.en = 1; | ||
884 | cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64); | ||
885 | |||
886 | /* Configure the port duplex and enables */ | ||
887 | prtx_cfg.u64 = cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port)); | ||
888 | prtx_cfg.s.tx_en = 1; | ||
889 | prtx_cfg.s.rx_en = 1; | ||
890 | prtx_cfg.s.en = 1; | ||
891 | p->last_duplex = 1; | ||
892 | prtx_cfg.s.duplex = p->last_duplex; | ||
893 | cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), prtx_cfg.u64); | ||
894 | |||
895 | p->last_link = 0; | ||
896 | netif_carrier_off(netdev); | ||
897 | |||
898 | if (octeon_mgmt_init_phy(netdev)) { | ||
899 | dev_err(p->dev, "Cannot initialize PHY.\n"); | ||
900 | goto err_noirq; | ||
901 | } | ||
902 | |||
903 | netif_wake_queue(netdev); | ||
904 | napi_enable(&p->napi); | ||
905 | |||
906 | return 0; | ||
907 | err_noirq: | ||
908 | octeon_mgmt_reset_hw(p); | ||
909 | dma_unmap_single(p->dev, p->rx_ring_handle, | ||
910 | ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), | ||
911 | DMA_BIDIRECTIONAL); | ||
912 | kfree(p->rx_ring); | ||
913 | err_nomem: | ||
914 | dma_unmap_single(p->dev, p->tx_ring_handle, | ||
915 | ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), | ||
916 | DMA_BIDIRECTIONAL); | ||
917 | kfree(p->tx_ring); | ||
918 | return -ENOMEM; | ||
919 | } | ||
920 | |||
921 | static int octeon_mgmt_stop(struct net_device *netdev) | ||
922 | { | ||
923 | struct octeon_mgmt *p = netdev_priv(netdev); | ||
924 | |||
925 | napi_disable(&p->napi); | ||
926 | netif_stop_queue(netdev); | ||
927 | |||
928 | if (p->phydev) | ||
929 | phy_disconnect(p->phydev); | ||
930 | |||
931 | netif_carrier_off(netdev); | ||
932 | |||
933 | octeon_mgmt_reset_hw(p); | ||
934 | |||
935 | |||
936 | free_irq(p->irq, netdev); | ||
937 | |||
938 | /* dma_unmap is a nop on Octeon, so just free everything. */ | ||
939 | skb_queue_purge(&p->tx_list); | ||
940 | skb_queue_purge(&p->rx_list); | ||
941 | |||
942 | dma_unmap_single(p->dev, p->rx_ring_handle, | ||
943 | ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), | ||
944 | DMA_BIDIRECTIONAL); | ||
945 | kfree(p->rx_ring); | ||
946 | |||
947 | dma_unmap_single(p->dev, p->tx_ring_handle, | ||
948 | ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), | ||
949 | DMA_BIDIRECTIONAL); | ||
950 | kfree(p->tx_ring); | ||
951 | |||
952 | |||
953 | return 0; | ||
954 | } | ||
955 | |||
956 | static int octeon_mgmt_xmit(struct sk_buff *skb, struct net_device *netdev) | ||
957 | { | ||
958 | struct octeon_mgmt *p = netdev_priv(netdev); | ||
959 | int port = p->port; | ||
960 | union mgmt_port_ring_entry re; | ||
961 | unsigned long flags; | ||
962 | |||
963 | re.d64 = 0; | ||
964 | re.s.len = skb->len; | ||
965 | re.s.addr = dma_map_single(p->dev, skb->data, | ||
966 | skb->len, | ||
967 | DMA_TO_DEVICE); | ||
968 | |||
969 | spin_lock_irqsave(&p->tx_list.lock, flags); | ||
970 | |||
971 | if (unlikely(p->tx_current_fill >= | ||
972 | ring_max_fill(OCTEON_MGMT_TX_RING_SIZE))) { | ||
973 | spin_unlock_irqrestore(&p->tx_list.lock, flags); | ||
974 | |||
975 | dma_unmap_single(p->dev, re.s.addr, re.s.len, | ||
976 | DMA_TO_DEVICE); | ||
977 | |||
978 | netif_stop_queue(netdev); | ||
979 | return NETDEV_TX_BUSY; | ||
980 | } | ||
981 | |||
982 | __skb_queue_tail(&p->tx_list, skb); | ||
983 | |||
984 | /* Put it in the ring. */ | ||
985 | p->tx_ring[p->tx_next] = re.d64; | ||
986 | p->tx_next = (p->tx_next + 1) % OCTEON_MGMT_TX_RING_SIZE; | ||
987 | p->tx_current_fill++; | ||
988 | |||
989 | spin_unlock_irqrestore(&p->tx_list.lock, flags); | ||
990 | |||
991 | dma_sync_single_for_device(p->dev, p->tx_ring_handle, | ||
992 | ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), | ||
993 | DMA_BIDIRECTIONAL); | ||
994 | |||
995 | netdev->stats.tx_packets++; | ||
996 | netdev->stats.tx_bytes += skb->len; | ||
997 | |||
998 | /* Ring the bell. */ | ||
999 | cvmx_write_csr(CVMX_MIXX_ORING2(port), 1); | ||
1000 | |||
1001 | netdev->trans_start = jiffies; | ||
1002 | octeon_mgmt_clean_tx_buffers(p); | ||
1003 | octeon_mgmt_update_tx_stats(netdev); | ||
1004 | return NETDEV_TX_OK; | ||
1005 | } | ||
1006 | |||
1007 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
1008 | static void octeon_mgmt_poll_controller(struct net_device *netdev) | ||
1009 | { | ||
1010 | struct octeon_mgmt *p = netdev_priv(netdev); | ||
1011 | |||
1012 | octeon_mgmt_receive_packets(p, 16); | ||
1013 | octeon_mgmt_update_rx_stats(netdev); | ||
1014 | return; | ||
1015 | } | ||
1016 | #endif | ||
1017 | |||
1018 | static void octeon_mgmt_get_drvinfo(struct net_device *netdev, | ||
1019 | struct ethtool_drvinfo *info) | ||
1020 | { | ||
1021 | strncpy(info->driver, DRV_NAME, sizeof(info->driver)); | ||
1022 | strncpy(info->version, DRV_VERSION, sizeof(info->version)); | ||
1023 | strncpy(info->fw_version, "N/A", sizeof(info->fw_version)); | ||
1024 | strncpy(info->bus_info, "N/A", sizeof(info->bus_info)); | ||
1025 | info->n_stats = 0; | ||
1026 | info->testinfo_len = 0; | ||
1027 | info->regdump_len = 0; | ||
1028 | info->eedump_len = 0; | ||
1029 | } | ||
1030 | |||
1031 | static int octeon_mgmt_get_settings(struct net_device *netdev, | ||
1032 | struct ethtool_cmd *cmd) | ||
1033 | { | ||
1034 | struct octeon_mgmt *p = netdev_priv(netdev); | ||
1035 | |||
1036 | if (p->phydev) | ||
1037 | return phy_ethtool_gset(p->phydev, cmd); | ||
1038 | |||
1039 | return -EINVAL; | ||
1040 | } | ||
1041 | |||
1042 | static int octeon_mgmt_set_settings(struct net_device *netdev, | ||
1043 | struct ethtool_cmd *cmd) | ||
1044 | { | ||
1045 | struct octeon_mgmt *p = netdev_priv(netdev); | ||
1046 | |||
1047 | if (!capable(CAP_NET_ADMIN)) | ||
1048 | return -EPERM; | ||
1049 | |||
1050 | if (p->phydev) | ||
1051 | return phy_ethtool_sset(p->phydev, cmd); | ||
1052 | |||
1053 | return -EINVAL; | ||
1054 | } | ||
1055 | |||
1056 | static const struct ethtool_ops octeon_mgmt_ethtool_ops = { | ||
1057 | .get_drvinfo = octeon_mgmt_get_drvinfo, | ||
1058 | .get_link = ethtool_op_get_link, | ||
1059 | .get_settings = octeon_mgmt_get_settings, | ||
1060 | .set_settings = octeon_mgmt_set_settings | ||
1061 | }; | ||
1062 | |||
1063 | static const struct net_device_ops octeon_mgmt_ops = { | ||
1064 | .ndo_open = octeon_mgmt_open, | ||
1065 | .ndo_stop = octeon_mgmt_stop, | ||
1066 | .ndo_start_xmit = octeon_mgmt_xmit, | ||
1067 | .ndo_set_rx_mode = octeon_mgmt_set_rx_filtering, | ||
1068 | .ndo_set_multicast_list = octeon_mgmt_set_rx_filtering, | ||
1069 | .ndo_set_mac_address = octeon_mgmt_set_mac_address, | ||
1070 | .ndo_do_ioctl = octeon_mgmt_ioctl, | ||
1071 | .ndo_change_mtu = octeon_mgmt_change_mtu, | ||
1072 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
1073 | .ndo_poll_controller = octeon_mgmt_poll_controller, | ||
1074 | #endif | ||
1075 | }; | ||
1076 | |||
1077 | static int __init octeon_mgmt_probe(struct platform_device *pdev) | ||
1078 | { | ||
1079 | struct resource *res_irq; | ||
1080 | struct net_device *netdev; | ||
1081 | struct octeon_mgmt *p; | ||
1082 | int i; | ||
1083 | |||
1084 | netdev = alloc_etherdev(sizeof(struct octeon_mgmt)); | ||
1085 | if (netdev == NULL) | ||
1086 | return -ENOMEM; | ||
1087 | |||
1088 | dev_set_drvdata(&pdev->dev, netdev); | ||
1089 | p = netdev_priv(netdev); | ||
1090 | netif_napi_add(netdev, &p->napi, octeon_mgmt_napi_poll, | ||
1091 | OCTEON_MGMT_NAPI_WEIGHT); | ||
1092 | |||
1093 | p->netdev = netdev; | ||
1094 | p->dev = &pdev->dev; | ||
1095 | |||
1096 | p->port = pdev->id; | ||
1097 | snprintf(netdev->name, IFNAMSIZ, "mgmt%d", p->port); | ||
1098 | |||
1099 | res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); | ||
1100 | if (!res_irq) | ||
1101 | goto err; | ||
1102 | |||
1103 | p->irq = res_irq->start; | ||
1104 | spin_lock_init(&p->lock); | ||
1105 | |||
1106 | skb_queue_head_init(&p->tx_list); | ||
1107 | skb_queue_head_init(&p->rx_list); | ||
1108 | tasklet_init(&p->tx_clean_tasklet, | ||
1109 | octeon_mgmt_clean_tx_tasklet, (unsigned long)p); | ||
1110 | |||
1111 | netdev->netdev_ops = &octeon_mgmt_ops; | ||
1112 | netdev->ethtool_ops = &octeon_mgmt_ethtool_ops; | ||
1113 | |||
1114 | |||
1115 | /* The mgmt ports get the first N MACs. */ | ||
1116 | for (i = 0; i < 6; i++) | ||
1117 | netdev->dev_addr[i] = octeon_bootinfo->mac_addr_base[i]; | ||
1118 | netdev->dev_addr[5] += p->port; | ||
1119 | |||
1120 | if (p->port >= octeon_bootinfo->mac_addr_count) | ||
1121 | dev_err(&pdev->dev, | ||
1122 | "Error %s: Using MAC outside of the assigned range: " | ||
1123 | "%02x:%02x:%02x:%02x:%02x:%02x\n", netdev->name, | ||
1124 | netdev->dev_addr[0], netdev->dev_addr[1], | ||
1125 | netdev->dev_addr[2], netdev->dev_addr[3], | ||
1126 | netdev->dev_addr[4], netdev->dev_addr[5]); | ||
1127 | |||
1128 | if (register_netdev(netdev)) | ||
1129 | goto err; | ||
1130 | |||
1131 | dev_info(&pdev->dev, "Version " DRV_VERSION "\n"); | ||
1132 | return 0; | ||
1133 | err: | ||
1134 | free_netdev(netdev); | ||
1135 | return -ENOENT; | ||
1136 | } | ||
1137 | |||
1138 | static int __exit octeon_mgmt_remove(struct platform_device *pdev) | ||
1139 | { | ||
1140 | struct net_device *netdev = dev_get_drvdata(&pdev->dev); | ||
1141 | |||
1142 | unregister_netdev(netdev); | ||
1143 | free_netdev(netdev); | ||
1144 | return 0; | ||
1145 | } | ||
1146 | |||
1147 | static struct platform_driver octeon_mgmt_driver = { | ||
1148 | .driver = { | ||
1149 | .name = "octeon_mgmt", | ||
1150 | .owner = THIS_MODULE, | ||
1151 | }, | ||
1152 | .probe = octeon_mgmt_probe, | ||
1153 | .remove = __exit_p(octeon_mgmt_remove), | ||
1154 | }; | ||
1155 | |||
1156 | extern void octeon_mdiobus_force_mod_depencency(void); | ||
1157 | |||
1158 | static int __init octeon_mgmt_mod_init(void) | ||
1159 | { | ||
1160 | /* Force our mdiobus driver module to be loaded first. */ | ||
1161 | octeon_mdiobus_force_mod_depencency(); | ||
1162 | return platform_driver_register(&octeon_mgmt_driver); | ||
1163 | } | ||
1164 | |||
1165 | static void __exit octeon_mgmt_mod_exit(void) | ||
1166 | { | ||
1167 | platform_driver_unregister(&octeon_mgmt_driver); | ||
1168 | } | ||
1169 | |||
1170 | module_init(octeon_mgmt_mod_init); | ||
1171 | module_exit(octeon_mgmt_mod_exit); | ||
1172 | |||
1173 | MODULE_DESCRIPTION(DRV_DESCRIPTION); | ||
1174 | MODULE_AUTHOR("David Daney"); | ||
1175 | MODULE_LICENSE("GPL"); | ||
1176 | MODULE_VERSION(DRV_VERSION); | ||