aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/broadcom/bgmac.c
diff options
context:
space:
mode:
authorRafał Miłecki <zajec5@gmail.com>2013-01-08 15:06:23 -0500
committerDavid S. Miller <davem@davemloft.net>2013-01-10 02:37:03 -0500
commitdd4544f05469aaaeee891d7dc54d66430344321e (patch)
treecf40cf96b106a2862dea62dfb730bccbe300124c /drivers/net/ethernet/broadcom/bgmac.c
parentaaeb6cdfa5c07533c2cd6d2c381374c69f7db9dc (diff)
bgmac: driver for GBit MAC core on BCMA bus
BCMA is a Broadcom specific bus with devices AKA cores. All recent BCMA based SoCs have gigabit ethernet provided by the GBit MAC core. This patch adds driver for such a cores registering itself as a netdev. It has been tested on a BCM4706 and BCM4718 chipsets. In the kernel tree there is already b44 driver which has some common things with bgmac, however there are many differences that has led to the decision or writing a new driver: 1) GBit MAC cores appear on BCMA bus (not SSB as in case of b44) 2) There is 64bit DMA engine which differs from 32bit one 3) There is no CAM (Content Addressable Memory) in GBit MAC 4) We have 4 TX queues on GBit MAC devices (instead of 1) 5) Many registers have different addresses/values 6) RX header flags are also different The driver in it's state is functional how, however there is of course place for improvements: 1) Supporting more net_device_ops 2) SUpporting more ethtool_ops 3) Unaligned addressing in DMA 4) Writing separated PHY driver Signed-off-by: Rafał Miłecki <zajec5@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/broadcom/bgmac.c')
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c1422
1 files changed, 1422 insertions, 0 deletions
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
new file mode 100644
index 000000000000..9bd33db7fddd
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -0,0 +1,1422 @@
1/*
2 * Driver for (BCM4706)? GBit MAC core on BCMA bus.
3 *
4 * Copyright (C) 2012 Rafał Miłecki <zajec5@gmail.com>
5 *
6 * Licensed under the GNU/GPL. See COPYING for details.
7 */
8
9#include "bgmac.h"
10
11#include <linux/kernel.h>
12#include <linux/module.h>
13#include <linux/delay.h>
14#include <linux/etherdevice.h>
15#include <linux/mii.h>
16#include <linux/interrupt.h>
17#include <linux/dma-mapping.h>
18#include <asm/mach-bcm47xx/nvram.h>
19
20static const struct bcma_device_id bgmac_bcma_tbl[] = {
21 BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_4706_MAC_GBIT, BCMA_ANY_REV, BCMA_ANY_CLASS),
22 BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_MAC_GBIT, BCMA_ANY_REV, BCMA_ANY_CLASS),
23 BCMA_CORETABLE_END
24};
25MODULE_DEVICE_TABLE(bcma, bgmac_bcma_tbl);
26
27static bool bgmac_wait_value(struct bcma_device *core, u16 reg, u32 mask,
28 u32 value, int timeout)
29{
30 u32 val;
31 int i;
32
33 for (i = 0; i < timeout / 10; i++) {
34 val = bcma_read32(core, reg);
35 if ((val & mask) == value)
36 return true;
37 udelay(10);
38 }
39 pr_err("Timeout waiting for reg 0x%X\n", reg);
40 return false;
41}
42
43/**************************************************
44 * DMA
45 **************************************************/
46
47static void bgmac_dma_tx_reset(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
48{
49 u32 val;
50 int i;
51
52 if (!ring->mmio_base)
53 return;
54
55 /* Suspend DMA TX ring first.
56 * bgmac_wait_value doesn't support waiting for any of few values, so
57 * implement whole loop here.
58 */
59 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL,
60 BGMAC_DMA_TX_SUSPEND);
61 for (i = 0; i < 10000 / 10; i++) {
62 val = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
63 val &= BGMAC_DMA_TX_STAT;
64 if (val == BGMAC_DMA_TX_STAT_DISABLED ||
65 val == BGMAC_DMA_TX_STAT_IDLEWAIT ||
66 val == BGMAC_DMA_TX_STAT_STOPPED) {
67 i = 0;
68 break;
69 }
70 udelay(10);
71 }
72 if (i)
73 bgmac_err(bgmac, "Timeout suspending DMA TX ring 0x%X (BGMAC_DMA_TX_STAT: 0x%08X)\n",
74 ring->mmio_base, val);
75
76 /* Remove SUSPEND bit */
77 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL, 0);
78 if (!bgmac_wait_value(bgmac->core,
79 ring->mmio_base + BGMAC_DMA_TX_STATUS,
80 BGMAC_DMA_TX_STAT, BGMAC_DMA_TX_STAT_DISABLED,
81 10000)) {
82 bgmac_warn(bgmac, "DMA TX ring 0x%X wasn't disabled on time, waiting additional 300us\n",
83 ring->mmio_base);
84 udelay(300);
85 val = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
86 if ((val & BGMAC_DMA_TX_STAT) != BGMAC_DMA_TX_STAT_DISABLED)
87 bgmac_err(bgmac, "Reset of DMA TX ring 0x%X failed\n",
88 ring->mmio_base);
89 }
90}
91
92static void bgmac_dma_tx_enable(struct bgmac *bgmac,
93 struct bgmac_dma_ring *ring)
94{
95 u32 ctl;
96
97 ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL);
98 ctl |= BGMAC_DMA_TX_ENABLE;
99 ctl |= BGMAC_DMA_TX_PARITY_DISABLE;
100 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL, ctl);
101}
102
103static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac,
104 struct bgmac_dma_ring *ring,
105 struct sk_buff *skb)
106{
107 struct device *dma_dev = bgmac->core->dma_dev;
108 struct net_device *net_dev = bgmac->net_dev;
109 struct bgmac_dma_desc *dma_desc;
110 struct bgmac_slot_info *slot;
111 u32 ctl0, ctl1;
112 int free_slots;
113
114 if (skb->len > BGMAC_DESC_CTL1_LEN) {
115 bgmac_err(bgmac, "Too long skb (%d)\n", skb->len);
116 goto err_stop_drop;
117 }
118
119 if (ring->start <= ring->end)
120 free_slots = ring->start - ring->end + BGMAC_TX_RING_SLOTS;
121 else
122 free_slots = ring->start - ring->end;
123 if (free_slots == 1) {
124 bgmac_err(bgmac, "TX ring is full, queue should be stopped!\n");
125 netif_stop_queue(net_dev);
126 return NETDEV_TX_BUSY;
127 }
128
129 slot = &ring->slots[ring->end];
130 slot->skb = skb;
131 slot->dma_addr = dma_map_single(dma_dev, skb->data, skb->len,
132 DMA_TO_DEVICE);
133 if (dma_mapping_error(dma_dev, slot->dma_addr)) {
134 bgmac_err(bgmac, "Mapping error of skb on ring 0x%X\n",
135 ring->mmio_base);
136 goto err_stop_drop;
137 }
138
139 ctl0 = BGMAC_DESC_CTL0_IOC | BGMAC_DESC_CTL0_SOF | BGMAC_DESC_CTL0_EOF;
140 if (ring->end == ring->num_slots - 1)
141 ctl0 |= BGMAC_DESC_CTL0_EOT;
142 ctl1 = skb->len & BGMAC_DESC_CTL1_LEN;
143
144 dma_desc = ring->cpu_base;
145 dma_desc += ring->end;
146 dma_desc->addr_low = cpu_to_le32(lower_32_bits(slot->dma_addr));
147 dma_desc->addr_high = cpu_to_le32(upper_32_bits(slot->dma_addr));
148 dma_desc->ctl0 = cpu_to_le32(ctl0);
149 dma_desc->ctl1 = cpu_to_le32(ctl1);
150
151 wmb();
152
153 /* Increase ring->end to point empty slot. We tell hardware the first
154 * slot it should *not* read.
155 */
156 if (++ring->end >= BGMAC_TX_RING_SLOTS)
157 ring->end = 0;
158 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_INDEX,
159 ring->end * sizeof(struct bgmac_dma_desc));
160
161 /* Always keep one slot free to allow detecting bugged calls. */
162 if (--free_slots == 1)
163 netif_stop_queue(net_dev);
164
165 return NETDEV_TX_OK;
166
167err_stop_drop:
168 netif_stop_queue(net_dev);
169 dev_kfree_skb(skb);
170 return NETDEV_TX_OK;
171}
172
173/* Free transmitted packets */
174static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
175{
176 struct device *dma_dev = bgmac->core->dma_dev;
177 int empty_slot;
178 bool freed = false;
179
180 /* The last slot that hardware didn't consume yet */
181 empty_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
182 empty_slot &= BGMAC_DMA_TX_STATDPTR;
183 empty_slot /= sizeof(struct bgmac_dma_desc);
184
185 while (ring->start != empty_slot) {
186 struct bgmac_slot_info *slot = &ring->slots[ring->start];
187
188 if (slot->skb) {
189 /* Unmap no longer used buffer */
190 dma_unmap_single(dma_dev, slot->dma_addr,
191 slot->skb->len, DMA_TO_DEVICE);
192 slot->dma_addr = 0;
193
194 /* Free memory! :) */
195 dev_kfree_skb(slot->skb);
196 slot->skb = NULL;
197 } else {
198 bgmac_err(bgmac, "Hardware reported transmission for empty TX ring slot %d! End of ring: %d\n",
199 ring->start, ring->end);
200 }
201
202 if (++ring->start >= BGMAC_TX_RING_SLOTS)
203 ring->start = 0;
204 freed = true;
205 }
206
207 if (freed && netif_queue_stopped(bgmac->net_dev))
208 netif_wake_queue(bgmac->net_dev);
209}
210
211static void bgmac_dma_rx_reset(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
212{
213 if (!ring->mmio_base)
214 return;
215
216 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL, 0);
217 if (!bgmac_wait_value(bgmac->core,
218 ring->mmio_base + BGMAC_DMA_RX_STATUS,
219 BGMAC_DMA_RX_STAT, BGMAC_DMA_RX_STAT_DISABLED,
220 10000))
221 bgmac_err(bgmac, "Reset of ring 0x%X RX failed\n",
222 ring->mmio_base);
223}
224
225static void bgmac_dma_rx_enable(struct bgmac *bgmac,
226 struct bgmac_dma_ring *ring)
227{
228 u32 ctl;
229
230 ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL);
231 ctl &= BGMAC_DMA_RX_ADDREXT_MASK;
232 ctl |= BGMAC_DMA_RX_ENABLE;
233 ctl |= BGMAC_DMA_RX_PARITY_DISABLE;
234 ctl |= BGMAC_DMA_RX_OVERFLOW_CONT;
235 ctl |= BGMAC_RX_FRAME_OFFSET << BGMAC_DMA_RX_FRAME_OFFSET_SHIFT;
236 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL, ctl);
237}
238
239static int bgmac_dma_rx_skb_for_slot(struct bgmac *bgmac,
240 struct bgmac_slot_info *slot)
241{
242 struct device *dma_dev = bgmac->core->dma_dev;
243 struct bgmac_rx_header *rx;
244
245 /* Alloc skb */
246 slot->skb = netdev_alloc_skb(bgmac->net_dev, BGMAC_RX_BUF_SIZE);
247 if (!slot->skb) {
248 bgmac_err(bgmac, "Allocation of skb failed!\n");
249 return -ENOMEM;
250 }
251
252 /* Poison - if everything goes fine, hardware will overwrite it */
253 rx = (struct bgmac_rx_header *)slot->skb->data;
254 rx->len = cpu_to_le16(0xdead);
255 rx->flags = cpu_to_le16(0xbeef);
256
257 /* Map skb for the DMA */
258 slot->dma_addr = dma_map_single(dma_dev, slot->skb->data,
259 BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
260 if (dma_mapping_error(dma_dev, slot->dma_addr)) {
261 bgmac_err(bgmac, "DMA mapping error\n");
262 return -ENOMEM;
263 }
264 if (slot->dma_addr & 0xC0000000)
265 bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
266
267 return 0;
268}
269
270static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
271 int weight)
272{
273 u32 end_slot;
274 int handled = 0;
275
276 end_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_STATUS);
277 end_slot &= BGMAC_DMA_RX_STATDPTR;
278 end_slot /= sizeof(struct bgmac_dma_desc);
279
280 ring->end = end_slot;
281
282 while (ring->start != ring->end) {
283 struct device *dma_dev = bgmac->core->dma_dev;
284 struct bgmac_slot_info *slot = &ring->slots[ring->start];
285 struct sk_buff *skb = slot->skb;
286 struct sk_buff *new_skb;
287 struct bgmac_rx_header *rx;
288 u16 len, flags;
289
290 /* Unmap buffer to make it accessible to the CPU */
291 dma_sync_single_for_cpu(dma_dev, slot->dma_addr,
292 BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
293
294 /* Get info from the header */
295 rx = (struct bgmac_rx_header *)skb->data;
296 len = le16_to_cpu(rx->len);
297 flags = le16_to_cpu(rx->flags);
298
299 /* Check for poison and drop or pass the packet */
300 if (len == 0xdead && flags == 0xbeef) {
301 bgmac_err(bgmac, "Found poisoned packet at slot %d, DMA issue!\n",
302 ring->start);
303 } else {
304 new_skb = netdev_alloc_skb(bgmac->net_dev, len);
305 if (new_skb) {
306 skb_put(new_skb, len);
307 skb_copy_from_linear_data_offset(skb, BGMAC_RX_FRAME_OFFSET,
308 new_skb->data,
309 len);
310 new_skb->protocol =
311 eth_type_trans(new_skb, bgmac->net_dev);
312 netif_receive_skb(new_skb);
313 handled++;
314 } else {
315 bgmac->net_dev->stats.rx_dropped++;
316 bgmac_err(bgmac, "Allocation of skb for copying packet failed!\n");
317 }
318
319 /* Poison the old skb */
320 rx->len = cpu_to_le16(0xdead);
321 rx->flags = cpu_to_le16(0xbeef);
322 }
323
324 /* Make it back accessible to the hardware */
325 dma_sync_single_for_device(dma_dev, slot->dma_addr,
326 BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
327
328 if (++ring->start >= BGMAC_RX_RING_SLOTS)
329 ring->start = 0;
330
331 if (handled >= weight) /* Should never be greater */
332 break;
333 }
334
335 return handled;
336}
337
338/* Does ring support unaligned addressing? */
339static bool bgmac_dma_unaligned(struct bgmac *bgmac,
340 struct bgmac_dma_ring *ring,
341 enum bgmac_dma_ring_type ring_type)
342{
343 switch (ring_type) {
344 case BGMAC_DMA_RING_TX:
345 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO,
346 0xff0);
347 if (bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO))
348 return true;
349 break;
350 case BGMAC_DMA_RING_RX:
351 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO,
352 0xff0);
353 if (bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO))
354 return true;
355 break;
356 }
357 return false;
358}
359
360static void bgmac_dma_ring_free(struct bgmac *bgmac,
361 struct bgmac_dma_ring *ring)
362{
363 struct device *dma_dev = bgmac->core->dma_dev;
364 struct bgmac_slot_info *slot;
365 int size;
366 int i;
367
368 for (i = 0; i < ring->num_slots; i++) {
369 slot = &ring->slots[i];
370 if (slot->skb) {
371 if (slot->dma_addr)
372 dma_unmap_single(dma_dev, slot->dma_addr,
373 slot->skb->len, DMA_TO_DEVICE);
374 dev_kfree_skb(slot->skb);
375 }
376 }
377
378 if (ring->cpu_base) {
379 /* Free ring of descriptors */
380 size = ring->num_slots * sizeof(struct bgmac_dma_desc);
381 dma_free_coherent(dma_dev, size, ring->cpu_base,
382 ring->dma_base);
383 }
384}
385
386static void bgmac_dma_free(struct bgmac *bgmac)
387{
388 int i;
389
390 for (i = 0; i < BGMAC_MAX_TX_RINGS; i++)
391 bgmac_dma_ring_free(bgmac, &bgmac->tx_ring[i]);
392 for (i = 0; i < BGMAC_MAX_RX_RINGS; i++)
393 bgmac_dma_ring_free(bgmac, &bgmac->rx_ring[i]);
394}
395
396static int bgmac_dma_alloc(struct bgmac *bgmac)
397{
398 struct device *dma_dev = bgmac->core->dma_dev;
399 struct bgmac_dma_ring *ring;
400 static const u16 ring_base[] = { BGMAC_DMA_BASE0, BGMAC_DMA_BASE1,
401 BGMAC_DMA_BASE2, BGMAC_DMA_BASE3, };
402 int size; /* ring size: different for Tx and Rx */
403 int err;
404 int i;
405
406 BUILD_BUG_ON(BGMAC_MAX_TX_RINGS > ARRAY_SIZE(ring_base));
407 BUILD_BUG_ON(BGMAC_MAX_RX_RINGS > ARRAY_SIZE(ring_base));
408
409 if (!(bcma_aread32(bgmac->core, BCMA_IOST) & BCMA_IOST_DMA64)) {
410 bgmac_err(bgmac, "Core does not report 64-bit DMA\n");
411 return -ENOTSUPP;
412 }
413
414 for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) {
415 ring = &bgmac->tx_ring[i];
416 ring->num_slots = BGMAC_TX_RING_SLOTS;
417 ring->mmio_base = ring_base[i];
418 if (bgmac_dma_unaligned(bgmac, ring, BGMAC_DMA_RING_TX))
419 bgmac_warn(bgmac, "TX on ring 0x%X supports unaligned addressing but this feature is not implemented\n",
420 ring->mmio_base);
421
422 /* Alloc ring of descriptors */
423 size = ring->num_slots * sizeof(struct bgmac_dma_desc);
424 ring->cpu_base = dma_zalloc_coherent(dma_dev, size,
425 &ring->dma_base,
426 GFP_KERNEL);
427 if (!ring->cpu_base) {
428 bgmac_err(bgmac, "Allocation of TX ring 0x%X failed\n",
429 ring->mmio_base);
430 goto err_dma_free;
431 }
432 if (ring->dma_base & 0xC0000000)
433 bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
434
435 /* No need to alloc TX slots yet */
436 }
437
438 for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) {
439 ring = &bgmac->rx_ring[i];
440 ring->num_slots = BGMAC_RX_RING_SLOTS;
441 ring->mmio_base = ring_base[i];
442 if (bgmac_dma_unaligned(bgmac, ring, BGMAC_DMA_RING_RX))
443 bgmac_warn(bgmac, "RX on ring 0x%X supports unaligned addressing but this feature is not implemented\n",
444 ring->mmio_base);
445
446 /* Alloc ring of descriptors */
447 size = ring->num_slots * sizeof(struct bgmac_dma_desc);
448 ring->cpu_base = dma_zalloc_coherent(dma_dev, size,
449 &ring->dma_base,
450 GFP_KERNEL);
451 if (!ring->cpu_base) {
452 bgmac_err(bgmac, "Allocation of RX ring 0x%X failed\n",
453 ring->mmio_base);
454 err = -ENOMEM;
455 goto err_dma_free;
456 }
457 if (ring->dma_base & 0xC0000000)
458 bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
459
460 /* Alloc RX slots */
461 for (i = 0; i < ring->num_slots; i++) {
462 err = bgmac_dma_rx_skb_for_slot(bgmac, &ring->slots[i]);
463 if (err) {
464 bgmac_err(bgmac, "Can't allocate skb for slot in RX ring\n");
465 goto err_dma_free;
466 }
467 }
468 }
469
470 return 0;
471
472err_dma_free:
473 bgmac_dma_free(bgmac);
474 return -ENOMEM;
475}
476
477static void bgmac_dma_init(struct bgmac *bgmac)
478{
479 struct bgmac_dma_ring *ring;
480 struct bgmac_dma_desc *dma_desc;
481 u32 ctl0, ctl1;
482 int i;
483
484 for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) {
485 ring = &bgmac->tx_ring[i];
486
487 /* We don't implement unaligned addressing, so enable first */
488 bgmac_dma_tx_enable(bgmac, ring);
489 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO,
490 lower_32_bits(ring->dma_base));
491 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGHI,
492 upper_32_bits(ring->dma_base));
493
494 ring->start = 0;
495 ring->end = 0; /* Points the slot that should *not* be read */
496 }
497
498 for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) {
499 ring = &bgmac->rx_ring[i];
500
501 /* We don't implement unaligned addressing, so enable first */
502 bgmac_dma_rx_enable(bgmac, ring);
503 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO,
504 lower_32_bits(ring->dma_base));
505 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGHI,
506 upper_32_bits(ring->dma_base));
507
508 for (i = 0, dma_desc = ring->cpu_base; i < ring->num_slots;
509 i++, dma_desc++) {
510 ctl0 = ctl1 = 0;
511
512 if (i == ring->num_slots - 1)
513 ctl0 |= BGMAC_DESC_CTL0_EOT;
514 ctl1 |= BGMAC_RX_BUF_SIZE & BGMAC_DESC_CTL1_LEN;
515 /* Is there any BGMAC device that requires extension? */
516 /* ctl1 |= (addrext << B43_DMA64_DCTL1_ADDREXT_SHIFT) &
517 * B43_DMA64_DCTL1_ADDREXT_MASK;
518 */
519
520 dma_desc->addr_low = cpu_to_le32(lower_32_bits(ring->slots[i].dma_addr));
521 dma_desc->addr_high = cpu_to_le32(upper_32_bits(ring->slots[i].dma_addr));
522 dma_desc->ctl0 = cpu_to_le32(ctl0);
523 dma_desc->ctl1 = cpu_to_le32(ctl1);
524 }
525
526 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_INDEX,
527 ring->num_slots * sizeof(struct bgmac_dma_desc));
528
529 ring->start = 0;
530 ring->end = 0;
531 }
532}
533
534/**************************************************
535 * PHY ops
536 **************************************************/
537
538u16 bgmac_phy_read(struct bgmac *bgmac, u8 phyaddr, u8 reg)
539{
540 struct bcma_device *core;
541 u16 phy_access_addr;
542 u16 phy_ctl_addr;
543 u32 tmp;
544
545 BUILD_BUG_ON(BGMAC_PA_DATA_MASK != BCMA_GMAC_CMN_PA_DATA_MASK);
546 BUILD_BUG_ON(BGMAC_PA_ADDR_MASK != BCMA_GMAC_CMN_PA_ADDR_MASK);
547 BUILD_BUG_ON(BGMAC_PA_ADDR_SHIFT != BCMA_GMAC_CMN_PA_ADDR_SHIFT);
548 BUILD_BUG_ON(BGMAC_PA_REG_MASK != BCMA_GMAC_CMN_PA_REG_MASK);
549 BUILD_BUG_ON(BGMAC_PA_REG_SHIFT != BCMA_GMAC_CMN_PA_REG_SHIFT);
550 BUILD_BUG_ON(BGMAC_PA_WRITE != BCMA_GMAC_CMN_PA_WRITE);
551 BUILD_BUG_ON(BGMAC_PA_START != BCMA_GMAC_CMN_PA_START);
552 BUILD_BUG_ON(BGMAC_PC_EPA_MASK != BCMA_GMAC_CMN_PC_EPA_MASK);
553 BUILD_BUG_ON(BGMAC_PC_MCT_MASK != BCMA_GMAC_CMN_PC_MCT_MASK);
554 BUILD_BUG_ON(BGMAC_PC_MCT_SHIFT != BCMA_GMAC_CMN_PC_MCT_SHIFT);
555 BUILD_BUG_ON(BGMAC_PC_MTE != BCMA_GMAC_CMN_PC_MTE);
556
557 if (bgmac->core->id.id == BCMA_CORE_4706_MAC_GBIT) {
558 core = bgmac->core->bus->drv_gmac_cmn.core;
559 phy_access_addr = BCMA_GMAC_CMN_PHY_ACCESS;
560 phy_ctl_addr = BCMA_GMAC_CMN_PHY_CTL;
561 } else {
562 core = bgmac->core;
563 phy_access_addr = BGMAC_PHY_ACCESS;
564 phy_ctl_addr = BGMAC_PHY_CNTL;
565 }
566
567 tmp = bcma_read32(core, phy_ctl_addr);
568 tmp &= ~BGMAC_PC_EPA_MASK;
569 tmp |= phyaddr;
570 bcma_write32(core, phy_ctl_addr, tmp);
571
572 tmp = BGMAC_PA_START;
573 tmp |= phyaddr << BGMAC_PA_ADDR_SHIFT;
574 tmp |= reg << BGMAC_PA_REG_SHIFT;
575 bcma_write32(core, phy_access_addr, tmp);
576
577 if (!bgmac_wait_value(core, phy_access_addr, BGMAC_PA_START, 0, 1000)) {
578 bgmac_err(bgmac, "Reading PHY %d register 0x%X failed\n",
579 phyaddr, reg);
580 return 0xffff;
581 }
582
583 return bcma_read32(core, phy_access_addr) & BGMAC_PA_DATA_MASK;
584}
585
586/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphywr */
587void bgmac_phy_write(struct bgmac *bgmac, u8 phyaddr, u8 reg, u16 value)
588{
589 struct bcma_device *core;
590 u16 phy_access_addr;
591 u16 phy_ctl_addr;
592 u32 tmp;
593
594 if (bgmac->core->id.id == BCMA_CORE_4706_MAC_GBIT) {
595 core = bgmac->core->bus->drv_gmac_cmn.core;
596 phy_access_addr = BCMA_GMAC_CMN_PHY_ACCESS;
597 phy_ctl_addr = BCMA_GMAC_CMN_PHY_CTL;
598 } else {
599 core = bgmac->core;
600 phy_access_addr = BGMAC_PHY_ACCESS;
601 phy_ctl_addr = BGMAC_PHY_CNTL;
602 }
603
604 tmp = bcma_read32(core, phy_ctl_addr);
605 tmp &= ~BGMAC_PC_EPA_MASK;
606 tmp |= phyaddr;
607 bcma_write32(core, phy_ctl_addr, tmp);
608
609 bgmac_write(bgmac, BGMAC_INT_STATUS, BGMAC_IS_MDIO);
610 if (bgmac_read(bgmac, BGMAC_INT_STATUS) & BGMAC_IS_MDIO)
611 bgmac_warn(bgmac, "Error setting MDIO int\n");
612
613 tmp = BGMAC_PA_START;
614 tmp |= BGMAC_PA_WRITE;
615 tmp |= phyaddr << BGMAC_PA_ADDR_SHIFT;
616 tmp |= reg << BGMAC_PA_REG_SHIFT;
617 tmp |= value;
618 bcma_write32(core, phy_access_addr, tmp);
619
620 if (!bgmac_wait_value(core, phy_access_addr, BGMAC_PA_START, 0, 1000))
621 bgmac_err(bgmac, "Writing to PHY %d register 0x%X failed\n",
622 phyaddr, reg);
623}
624
625/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyforce */
626static void bgmac_phy_force(struct bgmac *bgmac)
627{
628 u16 ctl;
629 u16 mask = ~(BGMAC_PHY_CTL_SPEED | BGMAC_PHY_CTL_SPEED_MSB |
630 BGMAC_PHY_CTL_ANENAB | BGMAC_PHY_CTL_DUPLEX);
631
632 if (bgmac->phyaddr == BGMAC_PHY_NOREGS)
633 return;
634
635 if (bgmac->autoneg)
636 return;
637
638 ctl = bgmac_phy_read(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL);
639 ctl &= mask;
640 if (bgmac->full_duplex)
641 ctl |= BGMAC_PHY_CTL_DUPLEX;
642 if (bgmac->speed == BGMAC_SPEED_100)
643 ctl |= BGMAC_PHY_CTL_SPEED_100;
644 else if (bgmac->speed == BGMAC_SPEED_1000)
645 ctl |= BGMAC_PHY_CTL_SPEED_1000;
646 bgmac_phy_write(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL, ctl);
647}
648
649/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyadvertise */
650static void bgmac_phy_advertise(struct bgmac *bgmac)
651{
652 u16 adv;
653
654 if (bgmac->phyaddr == BGMAC_PHY_NOREGS)
655 return;
656
657 if (!bgmac->autoneg)
658 return;
659
660 /* Adv selected 10/100 speeds */
661 adv = bgmac_phy_read(bgmac, bgmac->phyaddr, BGMAC_PHY_ADV);
662 adv &= ~(BGMAC_PHY_ADV_10HALF | BGMAC_PHY_ADV_10FULL |
663 BGMAC_PHY_ADV_100HALF | BGMAC_PHY_ADV_100FULL);
664 if (!bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_10)
665 adv |= BGMAC_PHY_ADV_10HALF;
666 if (!bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_100)
667 adv |= BGMAC_PHY_ADV_100HALF;
668 if (bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_10)
669 adv |= BGMAC_PHY_ADV_10FULL;
670 if (bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_100)
671 adv |= BGMAC_PHY_ADV_100FULL;
672 bgmac_phy_write(bgmac, bgmac->phyaddr, BGMAC_PHY_ADV, adv);
673
674 /* Adv selected 1000 speeds */
675 adv = bgmac_phy_read(bgmac, bgmac->phyaddr, BGMAC_PHY_ADV2);
676 adv &= ~(BGMAC_PHY_ADV2_1000HALF | BGMAC_PHY_ADV2_1000FULL);
677 if (!bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_1000)
678 adv |= BGMAC_PHY_ADV2_1000HALF;
679 if (bgmac->full_duplex && bgmac->speed & BGMAC_SPEED_1000)
680 adv |= BGMAC_PHY_ADV2_1000FULL;
681 bgmac_phy_write(bgmac, bgmac->phyaddr, BGMAC_PHY_ADV2, adv);
682
683 /* Restart */
684 bgmac_phy_write(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL,
685 bgmac_phy_read(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL) |
686 BGMAC_PHY_CTL_RESTART);
687}
688
689/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyinit */
690static void bgmac_phy_init(struct bgmac *bgmac)
691{
692 struct bcma_chipinfo *ci = &bgmac->core->bus->chipinfo;
693 struct bcma_drv_cc *cc = &bgmac->core->bus->drv_cc;
694 u8 i;
695
696 if (ci->id == BCMA_CHIP_ID_BCM5356) {
697 for (i = 0; i < 5; i++) {
698 bgmac_phy_write(bgmac, i, 0x1f, 0x008b);
699 bgmac_phy_write(bgmac, i, 0x15, 0x0100);
700 bgmac_phy_write(bgmac, i, 0x1f, 0x000f);
701 bgmac_phy_write(bgmac, i, 0x12, 0x2aaa);
702 bgmac_phy_write(bgmac, i, 0x1f, 0x000b);
703 }
704 }
705 if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg != 10) ||
706 (ci->id == BCMA_CHIP_ID_BCM4749 && ci->pkg != 10) ||
707 (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg != 9)) {
708 bcma_chipco_chipctl_maskset(cc, 2, ~0xc0000000, 0);
709 bcma_chipco_chipctl_maskset(cc, 4, ~0x80000000, 0);
710 for (i = 0; i < 5; i++) {
711 bgmac_phy_write(bgmac, i, 0x1f, 0x000f);
712 bgmac_phy_write(bgmac, i, 0x16, 0x5284);
713 bgmac_phy_write(bgmac, i, 0x1f, 0x000b);
714 bgmac_phy_write(bgmac, i, 0x17, 0x0010);
715 bgmac_phy_write(bgmac, i, 0x1f, 0x000f);
716 bgmac_phy_write(bgmac, i, 0x16, 0x5296);
717 bgmac_phy_write(bgmac, i, 0x17, 0x1073);
718 bgmac_phy_write(bgmac, i, 0x17, 0x9073);
719 bgmac_phy_write(bgmac, i, 0x16, 0x52b6);
720 bgmac_phy_write(bgmac, i, 0x17, 0x9273);
721 bgmac_phy_write(bgmac, i, 0x1f, 0x000b);
722 }
723 }
724}
725
726/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyreset */
727static void bgmac_phy_reset(struct bgmac *bgmac)
728{
729 if (bgmac->phyaddr == BGMAC_PHY_NOREGS)
730 return;
731
732 bgmac_phy_write(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL,
733 BGMAC_PHY_CTL_RESET);
734 udelay(100);
735 if (bgmac_phy_read(bgmac, bgmac->phyaddr, BGMAC_PHY_CTL) &
736 BGMAC_PHY_CTL_RESET)
737 bgmac_err(bgmac, "PHY reset failed\n");
738 bgmac_phy_init(bgmac);
739}
740
741/**************************************************
742 * Chip ops
743 **************************************************/
744
745/* TODO: can we just drop @force? Can we don't reset MAC at all if there is
746 * nothing to change? Try if after stabilizng driver.
747 */
748static void bgmac_cmdcfg_maskset(struct bgmac *bgmac, u32 mask, u32 set,
749 bool force)
750{
751 u32 cmdcfg = bgmac_read(bgmac, BGMAC_CMDCFG);
752 u32 new_val = (cmdcfg & mask) | set;
753
754 bgmac_set(bgmac, BGMAC_CMDCFG, BGMAC_CMDCFG_SR);
755 udelay(2);
756
757 if (new_val != cmdcfg || force)
758 bgmac_write(bgmac, BGMAC_CMDCFG, new_val);
759
760 bgmac_mask(bgmac, BGMAC_CMDCFG, ~BGMAC_CMDCFG_SR);
761 udelay(2);
762}
763
764#if 0 /* We don't use that regs yet */
765static void bgmac_chip_stats_update(struct bgmac *bgmac)
766{
767 int i;
768
769 if (bgmac->core->id.id != BCMA_CORE_4706_MAC_GBIT) {
770 for (i = 0; i < BGMAC_NUM_MIB_TX_REGS; i++)
771 bgmac->mib_tx_regs[i] =
772 bgmac_read(bgmac,
773 BGMAC_TX_GOOD_OCTETS + (i * 4));
774 for (i = 0; i < BGMAC_NUM_MIB_RX_REGS; i++)
775 bgmac->mib_rx_regs[i] =
776 bgmac_read(bgmac,
777 BGMAC_RX_GOOD_OCTETS + (i * 4));
778 }
779
780 /* TODO: what else? how to handle BCM4706? Specs are needed */
781}
782#endif
783
784static void bgmac_clear_mib(struct bgmac *bgmac)
785{
786 int i;
787
788 if (bgmac->core->id.id == BCMA_CORE_4706_MAC_GBIT)
789 return;
790
791 bgmac_set(bgmac, BGMAC_DEV_CTL, BGMAC_DC_MROR);
792 for (i = 0; i < BGMAC_NUM_MIB_TX_REGS; i++)
793 bgmac_read(bgmac, BGMAC_TX_GOOD_OCTETS + (i * 4));
794 for (i = 0; i < BGMAC_NUM_MIB_RX_REGS; i++)
795 bgmac_read(bgmac, BGMAC_RX_GOOD_OCTETS + (i * 4));
796}
797
798/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_speed */
799static void bgmac_speed(struct bgmac *bgmac, int speed)
800{
801 u32 mask = ~(BGMAC_CMDCFG_ES_MASK | BGMAC_CMDCFG_HD);
802 u32 set = 0;
803
804 if (speed & BGMAC_SPEED_10)
805 set |= BGMAC_CMDCFG_ES_10;
806 if (speed & BGMAC_SPEED_100)
807 set |= BGMAC_CMDCFG_ES_100;
808 if (speed & BGMAC_SPEED_1000)
809 set |= BGMAC_CMDCFG_ES_1000;
810 if (!bgmac->full_duplex)
811 set |= BGMAC_CMDCFG_HD;
812 bgmac_cmdcfg_maskset(bgmac, mask, set, true);
813}
814
815static void bgmac_miiconfig(struct bgmac *bgmac)
816{
817 u8 imode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) & BGMAC_DS_MM_MASK) >>
818 BGMAC_DS_MM_SHIFT;
819 if (imode == 0 || imode == 1) {
820 if (bgmac->autoneg)
821 bgmac_speed(bgmac, BGMAC_SPEED_100);
822 else
823 bgmac_speed(bgmac, bgmac->speed);
824 }
825}
826
827/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipreset */
828static void bgmac_chip_reset(struct bgmac *bgmac)
829{
830 struct bcma_device *core = bgmac->core;
831 struct bcma_bus *bus = core->bus;
832 struct bcma_chipinfo *ci = &bus->chipinfo;
833 u32 flags = 0;
834 u32 iost;
835 int i;
836
837 if (bcma_core_is_enabled(core)) {
838 if (!bgmac->stats_grabbed) {
839 /* bgmac_chip_stats_update(bgmac); */
840 bgmac->stats_grabbed = true;
841 }
842
843 for (i = 0; i < BGMAC_MAX_TX_RINGS; i++)
844 bgmac_dma_tx_reset(bgmac, &bgmac->tx_ring[i]);
845
846 bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_ML, false);
847 udelay(1);
848
849 for (i = 0; i < BGMAC_MAX_RX_RINGS; i++)
850 bgmac_dma_rx_reset(bgmac, &bgmac->rx_ring[i]);
851
852 /* TODO: Clear software multicast filter list */
853 }
854
855 iost = bcma_aread32(core, BCMA_IOST);
856 if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == 10) ||
857 (ci->id == BCMA_CHIP_ID_BCM4749 && ci->pkg == 10) ||
858 (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg == 9))
859 iost &= ~BGMAC_BCMA_IOST_ATTACHED;
860
861 if (iost & BGMAC_BCMA_IOST_ATTACHED) {
862 flags = BGMAC_BCMA_IOCTL_SW_CLKEN;
863 if (!bgmac->has_robosw)
864 flags |= BGMAC_BCMA_IOCTL_SW_RESET;
865 }
866
867 bcma_core_enable(core, flags);
868
869 if (core->id.rev > 2) {
870 bgmac_set(bgmac, BCMA_CLKCTLST, 1 << 8);
871 bgmac_wait_value(bgmac->core, BCMA_CLKCTLST, 1 << 24, 1 << 24,
872 1000);
873 }
874
875 if (ci->id == BCMA_CHIP_ID_BCM5357 || ci->id == BCMA_CHIP_ID_BCM4749 ||
876 ci->id == BCMA_CHIP_ID_BCM53572) {
877 struct bcma_drv_cc *cc = &bgmac->core->bus->drv_cc;
878 u8 et_swtype = 0;
879 u8 sw_type = BGMAC_CHIPCTL_1_SW_TYPE_EPHY |
880 BGMAC_CHIPCTL_1_IF_TYPE_RMII;
881 char buf[2];
882
883 if (nvram_getenv("et_swtype", buf, 1) > 0) {
884 if (kstrtou8(buf, 0, &et_swtype))
885 bgmac_err(bgmac, "Failed to parse et_swtype (%s)\n",
886 buf);
887 et_swtype &= 0x0f;
888 et_swtype <<= 4;
889 sw_type = et_swtype;
890 } else if (ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == 9) {
891 sw_type = BGMAC_CHIPCTL_1_SW_TYPE_EPHYRMII;
892 } else if (0) {
893 /* TODO */
894 }
895 bcma_chipco_chipctl_maskset(cc, 1,
896 ~(BGMAC_CHIPCTL_1_IF_TYPE_MASK |
897 BGMAC_CHIPCTL_1_SW_TYPE_MASK),
898 sw_type);
899 }
900
901 if (iost & BGMAC_BCMA_IOST_ATTACHED && !bgmac->has_robosw)
902 bcma_awrite32(core, BCMA_IOCTL,
903 bcma_aread32(core, BCMA_IOCTL) &
904 ~BGMAC_BCMA_IOCTL_SW_RESET);
905
906 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_reset
907 * Specs don't say about using BGMAC_CMDCFG_SR, but in this routine
908 * BGMAC_CMDCFG is read _after_ putting chip in a reset. So it has to
909 * be keps until taking MAC out of the reset.
910 */
911 bgmac_cmdcfg_maskset(bgmac,
912 ~(BGMAC_CMDCFG_TE |
913 BGMAC_CMDCFG_RE |
914 BGMAC_CMDCFG_RPI |
915 BGMAC_CMDCFG_TAI |
916 BGMAC_CMDCFG_HD |
917 BGMAC_CMDCFG_ML |
918 BGMAC_CMDCFG_CFE |
919 BGMAC_CMDCFG_RL |
920 BGMAC_CMDCFG_RED |
921 BGMAC_CMDCFG_PE |
922 BGMAC_CMDCFG_TPI |
923 BGMAC_CMDCFG_PAD_EN |
924 BGMAC_CMDCFG_PF),
925 BGMAC_CMDCFG_PROM |
926 BGMAC_CMDCFG_NLC |
927 BGMAC_CMDCFG_CFE |
928 BGMAC_CMDCFG_SR,
929 false);
930
931 bgmac_clear_mib(bgmac);
932 if (core->id.id == BCMA_CORE_4706_MAC_GBIT)
933 bcma_maskset32(bgmac->cmn, BCMA_GMAC_CMN_PHY_CTL, ~0,
934 BCMA_GMAC_CMN_PC_MTE);
935 else
936 bgmac_set(bgmac, BGMAC_PHY_CNTL, BGMAC_PC_MTE);
937 bgmac_miiconfig(bgmac);
938 bgmac_phy_init(bgmac);
939
940 bgmac->int_status = 0;
941}
942
943static void bgmac_chip_intrs_on(struct bgmac *bgmac)
944{
945 bgmac_write(bgmac, BGMAC_INT_MASK, bgmac->int_mask);
946}
947
948static void bgmac_chip_intrs_off(struct bgmac *bgmac)
949{
950 bgmac_write(bgmac, BGMAC_INT_MASK, 0);
951}
952
953/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_enable */
954static void bgmac_enable(struct bgmac *bgmac)
955{
956 struct bcma_chipinfo *ci = &bgmac->core->bus->chipinfo;
957 u32 cmdcfg;
958 u32 mode;
959 u32 rxq_ctl;
960 u32 fl_ctl;
961 u16 bp_clk;
962 u8 mdp;
963
964 cmdcfg = bgmac_read(bgmac, BGMAC_CMDCFG);
965 bgmac_cmdcfg_maskset(bgmac, ~(BGMAC_CMDCFG_TE | BGMAC_CMDCFG_RE),
966 BGMAC_CMDCFG_SR, true);
967 udelay(2);
968 cmdcfg |= BGMAC_CMDCFG_TE | BGMAC_CMDCFG_RE;
969 bgmac_write(bgmac, BGMAC_CMDCFG, cmdcfg);
970
971 mode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) & BGMAC_DS_MM_MASK) >>
972 BGMAC_DS_MM_SHIFT;
973 if (ci->id != BCMA_CHIP_ID_BCM47162 || mode != 0)
974 bgmac_set(bgmac, BCMA_CLKCTLST, BCMA_CLKCTLST_FORCEHT);
975 if (ci->id == BCMA_CHIP_ID_BCM47162 && mode == 2)
976 bcma_chipco_chipctl_maskset(&bgmac->core->bus->drv_cc, 1, ~0,
977 BGMAC_CHIPCTL_1_RXC_DLL_BYPASS);
978
979 switch (ci->id) {
980 case BCMA_CHIP_ID_BCM5357:
981 case BCMA_CHIP_ID_BCM4749:
982 case BCMA_CHIP_ID_BCM53572:
983 case BCMA_CHIP_ID_BCM4716:
984 case BCMA_CHIP_ID_BCM47162:
985 fl_ctl = 0x03cb04cb;
986 if (ci->id == BCMA_CHIP_ID_BCM5357 ||
987 ci->id == BCMA_CHIP_ID_BCM4749 ||
988 ci->id == BCMA_CHIP_ID_BCM53572)
989 fl_ctl = 0x2300e1;
990 bgmac_write(bgmac, BGMAC_FLOW_CTL_THRESH, fl_ctl);
991 bgmac_write(bgmac, BGMAC_PAUSE_CTL, 0x27fff);
992 break;
993 }
994
995 rxq_ctl = bgmac_read(bgmac, BGMAC_RXQ_CTL);
996 rxq_ctl &= ~BGMAC_RXQ_CTL_MDP_MASK;
997 bp_clk = bcma_pmu_get_bus_clock(&bgmac->core->bus->drv_cc) / 1000000;
998 mdp = (bp_clk * 128 / 1000) - 3;
999 rxq_ctl |= (mdp << BGMAC_RXQ_CTL_MDP_SHIFT);
1000 bgmac_write(bgmac, BGMAC_RXQ_CTL, rxq_ctl);
1001}
1002
1003/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipinit */
1004static void bgmac_chip_init(struct bgmac *bgmac, bool full_init)
1005{
1006 struct bgmac_dma_ring *ring;
1007 u8 *mac = bgmac->net_dev->dev_addr;
1008 u32 tmp;
1009 int i;
1010
1011 /* 1 interrupt per received frame */
1012 bgmac_write(bgmac, BGMAC_INT_RECV_LAZY, 1 << BGMAC_IRL_FC_SHIFT);
1013
1014 /* Enable 802.3x tx flow control (honor received PAUSE frames) */
1015 bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_RPI, 0, true);
1016
1017 if (bgmac->net_dev->flags & IFF_PROMISC)
1018 bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_PROM, false);
1019 else
1020 bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_PROM, 0, false);
1021
1022 /* Set MAC addr */
1023 tmp = (mac[0] << 24) | (mac[1] << 16) | (mac[2] << 8) | mac[3];
1024 bgmac_write(bgmac, BGMAC_MACADDR_HIGH, tmp);
1025 tmp = (mac[4] << 8) | mac[5];
1026 bgmac_write(bgmac, BGMAC_MACADDR_LOW, tmp);
1027
1028 if (bgmac->loopback)
1029 bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_ML, true);
1030 else
1031 bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_ML, 0, true);
1032
1033 bgmac_write(bgmac, BGMAC_RXMAX_LENGTH, 32 + ETHER_MAX_LEN);
1034
1035 if (!bgmac->autoneg) {
1036 bgmac_speed(bgmac, bgmac->speed);
1037 bgmac_phy_force(bgmac);
1038 } else if (bgmac->speed) { /* if there is anything to adv */
1039 bgmac_phy_advertise(bgmac);
1040 }
1041
1042 if (full_init) {
1043 bgmac_dma_init(bgmac);
1044 if (1) /* FIXME: is there any case we don't want IRQs? */
1045 bgmac_chip_intrs_on(bgmac);
1046 } else {
1047 for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) {
1048 ring = &bgmac->rx_ring[i];
1049 bgmac_dma_rx_enable(bgmac, ring);
1050 }
1051 }
1052
1053 bgmac_enable(bgmac);
1054}
1055
1056static irqreturn_t bgmac_interrupt(int irq, void *dev_id)
1057{
1058 struct bgmac *bgmac = netdev_priv(dev_id);
1059
1060 u32 int_status = bgmac_read(bgmac, BGMAC_INT_STATUS);
1061 int_status &= bgmac->int_mask;
1062
1063 if (!int_status)
1064 return IRQ_NONE;
1065
1066 /* Ack */
1067 bgmac_write(bgmac, BGMAC_INT_STATUS, int_status);
1068
1069 /* Disable new interrupts until handling existing ones */
1070 bgmac_chip_intrs_off(bgmac);
1071
1072 bgmac->int_status = int_status;
1073
1074 napi_schedule(&bgmac->napi);
1075
1076 return IRQ_HANDLED;
1077}
1078
1079static int bgmac_poll(struct napi_struct *napi, int weight)
1080{
1081 struct bgmac *bgmac = container_of(napi, struct bgmac, napi);
1082 struct bgmac_dma_ring *ring;
1083 int handled = 0;
1084
1085 if (bgmac->int_status & BGMAC_IS_TX0) {
1086 ring = &bgmac->tx_ring[0];
1087 bgmac_dma_tx_free(bgmac, ring);
1088 bgmac->int_status &= ~BGMAC_IS_TX0;
1089 }
1090
1091 if (bgmac->int_status & BGMAC_IS_RX) {
1092 ring = &bgmac->rx_ring[0];
1093 handled += bgmac_dma_rx_read(bgmac, ring, weight);
1094 bgmac->int_status &= ~BGMAC_IS_RX;
1095 }
1096
1097 if (bgmac->int_status) {
1098 bgmac_err(bgmac, "Unknown IRQs: 0x%08X\n", bgmac->int_status);
1099 bgmac->int_status = 0;
1100 }
1101
1102 if (handled < weight)
1103 napi_complete(napi);
1104
1105 bgmac_chip_intrs_on(bgmac);
1106
1107 return handled;
1108}
1109
1110/**************************************************
1111 * net_device_ops
1112 **************************************************/
1113
1114static int bgmac_open(struct net_device *net_dev)
1115{
1116 struct bgmac *bgmac = netdev_priv(net_dev);
1117 int err = 0;
1118
1119 bgmac_chip_reset(bgmac);
1120 /* Specs say about reclaiming rings here, but we do that in DMA init */
1121 bgmac_chip_init(bgmac, true);
1122
1123 err = request_irq(bgmac->core->irq, bgmac_interrupt, IRQF_SHARED,
1124 KBUILD_MODNAME, net_dev);
1125 if (err < 0) {
1126 bgmac_err(bgmac, "IRQ request error: %d!\n", err);
1127 goto err_out;
1128 }
1129 napi_enable(&bgmac->napi);
1130
1131 netif_carrier_on(net_dev);
1132
1133err_out:
1134 return err;
1135}
1136
1137static int bgmac_stop(struct net_device *net_dev)
1138{
1139 struct bgmac *bgmac = netdev_priv(net_dev);
1140
1141 netif_carrier_off(net_dev);
1142
1143 napi_disable(&bgmac->napi);
1144 bgmac_chip_intrs_off(bgmac);
1145 free_irq(bgmac->core->irq, net_dev);
1146
1147 bgmac_chip_reset(bgmac);
1148
1149 return 0;
1150}
1151
1152static netdev_tx_t bgmac_start_xmit(struct sk_buff *skb,
1153 struct net_device *net_dev)
1154{
1155 struct bgmac *bgmac = netdev_priv(net_dev);
1156 struct bgmac_dma_ring *ring;
1157
1158 /* No QOS support yet */
1159 ring = &bgmac->tx_ring[0];
1160 return bgmac_dma_tx_add(bgmac, ring, skb);
1161}
1162
1163static int bgmac_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
1164{
1165 struct bgmac *bgmac = netdev_priv(net_dev);
1166 struct mii_ioctl_data *data = if_mii(ifr);
1167
1168 switch (cmd) {
1169 case SIOCGMIIPHY:
1170 data->phy_id = bgmac->phyaddr;
1171 /* fallthru */
1172 case SIOCGMIIREG:
1173 if (!netif_running(net_dev))
1174 return -EAGAIN;
1175 data->val_out = bgmac_phy_read(bgmac, data->phy_id,
1176 data->reg_num & 0x1f);
1177 return 0;
1178 case SIOCSMIIREG:
1179 if (!netif_running(net_dev))
1180 return -EAGAIN;
1181 bgmac_phy_write(bgmac, data->phy_id, data->reg_num & 0x1f,
1182 data->val_in);
1183 return 0;
1184 default:
1185 return -EOPNOTSUPP;
1186 }
1187}
1188
1189static const struct net_device_ops bgmac_netdev_ops = {
1190 .ndo_open = bgmac_open,
1191 .ndo_stop = bgmac_stop,
1192 .ndo_start_xmit = bgmac_start_xmit,
1193 .ndo_set_mac_address = eth_mac_addr, /* generic, sets dev_addr */
1194 .ndo_do_ioctl = bgmac_ioctl,
1195};
1196
1197/**************************************************
1198 * ethtool_ops
1199 **************************************************/
1200
1201static int bgmac_get_settings(struct net_device *net_dev,
1202 struct ethtool_cmd *cmd)
1203{
1204 struct bgmac *bgmac = netdev_priv(net_dev);
1205
1206 cmd->supported = SUPPORTED_10baseT_Half |
1207 SUPPORTED_10baseT_Full |
1208 SUPPORTED_100baseT_Half |
1209 SUPPORTED_100baseT_Full |
1210 SUPPORTED_1000baseT_Half |
1211 SUPPORTED_1000baseT_Full |
1212 SUPPORTED_Autoneg;
1213
1214 if (bgmac->autoneg) {
1215 WARN_ON(cmd->advertising);
1216 if (bgmac->full_duplex) {
1217 if (bgmac->speed & BGMAC_SPEED_10)
1218 cmd->advertising |= ADVERTISED_10baseT_Full;
1219 if (bgmac->speed & BGMAC_SPEED_100)
1220 cmd->advertising |= ADVERTISED_100baseT_Full;
1221 if (bgmac->speed & BGMAC_SPEED_1000)
1222 cmd->advertising |= ADVERTISED_1000baseT_Full;
1223 } else {
1224 if (bgmac->speed & BGMAC_SPEED_10)
1225 cmd->advertising |= ADVERTISED_10baseT_Half;
1226 if (bgmac->speed & BGMAC_SPEED_100)
1227 cmd->advertising |= ADVERTISED_100baseT_Half;
1228 if (bgmac->speed & BGMAC_SPEED_1000)
1229 cmd->advertising |= ADVERTISED_1000baseT_Half;
1230 }
1231 } else {
1232 switch (bgmac->speed) {
1233 case BGMAC_SPEED_10:
1234 ethtool_cmd_speed_set(cmd, SPEED_10);
1235 break;
1236 case BGMAC_SPEED_100:
1237 ethtool_cmd_speed_set(cmd, SPEED_100);
1238 break;
1239 case BGMAC_SPEED_1000:
1240 ethtool_cmd_speed_set(cmd, SPEED_1000);
1241 break;
1242 }
1243 }
1244
1245 cmd->duplex = bgmac->full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
1246
1247 cmd->autoneg = bgmac->autoneg;
1248
1249 return 0;
1250}
1251
1252#if 0
1253static int bgmac_set_settings(struct net_device *net_dev,
1254 struct ethtool_cmd *cmd)
1255{
1256 struct bgmac *bgmac = netdev_priv(net_dev);
1257
1258 return -1;
1259}
1260#endif
1261
1262static void bgmac_get_drvinfo(struct net_device *net_dev,
1263 struct ethtool_drvinfo *info)
1264{
1265 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
1266 strlcpy(info->bus_info, "BCMA", sizeof(info->bus_info));
1267}
1268
1269static const struct ethtool_ops bgmac_ethtool_ops = {
1270 .get_settings = bgmac_get_settings,
1271 .get_drvinfo = bgmac_get_drvinfo,
1272};
1273
1274/**************************************************
1275 * BCMA bus ops
1276 **************************************************/
1277
1278/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipattach */
1279static int bgmac_probe(struct bcma_device *core)
1280{
1281 struct net_device *net_dev;
1282 struct bgmac *bgmac;
1283 struct ssb_sprom *sprom = &core->bus->sprom;
1284 u8 *mac = core->core_unit ? sprom->et1mac : sprom->et0mac;
1285 int err;
1286
1287 /* We don't support 2nd, 3rd, ... units, SPROM has to be adjusted */
1288 if (core->core_unit > 1) {
1289 pr_err("Unsupported core_unit %d\n", core->core_unit);
1290 return -ENOTSUPP;
1291 }
1292
1293 /* Allocation and references */
1294 net_dev = alloc_etherdev(sizeof(*bgmac));
1295 if (!net_dev)
1296 return -ENOMEM;
1297 net_dev->netdev_ops = &bgmac_netdev_ops;
1298 net_dev->irq = core->irq;
1299 SET_ETHTOOL_OPS(net_dev, &bgmac_ethtool_ops);
1300 bgmac = netdev_priv(net_dev);
1301 bgmac->net_dev = net_dev;
1302 bgmac->core = core;
1303 bcma_set_drvdata(core, bgmac);
1304
1305 /* Defaults */
1306 bgmac->autoneg = true;
1307 bgmac->full_duplex = true;
1308 bgmac->speed = BGMAC_SPEED_10 | BGMAC_SPEED_100 | BGMAC_SPEED_1000;
1309 memcpy(bgmac->net_dev->dev_addr, mac, ETH_ALEN);
1310
1311 /* On BCM4706 we need common core to access PHY */
1312 if (core->id.id == BCMA_CORE_4706_MAC_GBIT &&
1313 !core->bus->drv_gmac_cmn.core) {
1314 bgmac_err(bgmac, "GMAC CMN core not found (required for BCM4706)\n");
1315 err = -ENODEV;
1316 goto err_netdev_free;
1317 }
1318 bgmac->cmn = core->bus->drv_gmac_cmn.core;
1319
1320 bgmac->phyaddr = core->core_unit ? sprom->et1phyaddr :
1321 sprom->et0phyaddr;
1322 bgmac->phyaddr &= BGMAC_PHY_MASK;
1323 if (bgmac->phyaddr == BGMAC_PHY_MASK) {
1324 bgmac_err(bgmac, "No PHY found\n");
1325 err = -ENODEV;
1326 goto err_netdev_free;
1327 }
1328 bgmac_info(bgmac, "Found PHY addr: %d%s\n", bgmac->phyaddr,
1329 bgmac->phyaddr == BGMAC_PHY_NOREGS ? " (NOREGS)" : "");
1330
1331 if (core->bus->hosttype == BCMA_HOSTTYPE_PCI) {
1332 bgmac_err(bgmac, "PCI setup not implemented\n");
1333 err = -ENOTSUPP;
1334 goto err_netdev_free;
1335 }
1336
1337 bgmac_chip_reset(bgmac);
1338
1339 err = bgmac_dma_alloc(bgmac);
1340 if (err) {
1341 bgmac_err(bgmac, "Unable to alloc memory for DMA\n");
1342 goto err_netdev_free;
1343 }
1344
1345 bgmac->int_mask = BGMAC_IS_ERRMASK | BGMAC_IS_RX | BGMAC_IS_TX_MASK;
1346 if (nvram_getenv("et0_no_txint", NULL, 0) == 0)
1347 bgmac->int_mask &= ~BGMAC_IS_TX_MASK;
1348
1349 /* TODO: reset the external phy. Specs are needed */
1350 bgmac_phy_reset(bgmac);
1351
1352 bgmac->has_robosw = !!(core->bus->sprom.boardflags_lo &
1353 BGMAC_BFL_ENETROBO);
1354 if (bgmac->has_robosw)
1355 bgmac_warn(bgmac, "Support for Roboswitch not implemented\n");
1356
1357 if (core->bus->sprom.boardflags_lo & BGMAC_BFL_ENETADM)
1358 bgmac_warn(bgmac, "Support for ADMtek ethernet switch not implemented\n");
1359
1360 err = register_netdev(bgmac->net_dev);
1361 if (err) {
1362 bgmac_err(bgmac, "Cannot register net device\n");
1363 err = -ENOTSUPP;
1364 goto err_dma_free;
1365 }
1366
1367 netif_carrier_off(net_dev);
1368
1369 netif_napi_add(net_dev, &bgmac->napi, bgmac_poll, BGMAC_WEIGHT);
1370
1371 return 0;
1372
1373err_dma_free:
1374 bgmac_dma_free(bgmac);
1375
1376err_netdev_free:
1377 bcma_set_drvdata(core, NULL);
1378 free_netdev(net_dev);
1379
1380 return err;
1381}
1382
1383static void bgmac_remove(struct bcma_device *core)
1384{
1385 struct bgmac *bgmac = bcma_get_drvdata(core);
1386
1387 netif_napi_del(&bgmac->napi);
1388 unregister_netdev(bgmac->net_dev);
1389 bgmac_dma_free(bgmac);
1390 bcma_set_drvdata(core, NULL);
1391 free_netdev(bgmac->net_dev);
1392}
1393
1394static struct bcma_driver bgmac_bcma_driver = {
1395 .name = KBUILD_MODNAME,
1396 .id_table = bgmac_bcma_tbl,
1397 .probe = bgmac_probe,
1398 .remove = bgmac_remove,
1399};
1400
1401static int __init bgmac_init(void)
1402{
1403 int err;
1404
1405 err = bcma_driver_register(&bgmac_bcma_driver);
1406 if (err)
1407 return err;
1408 pr_info("Broadcom 47xx GBit MAC driver loaded\n");
1409
1410 return 0;
1411}
1412
1413static void __exit bgmac_exit(void)
1414{
1415 bcma_driver_unregister(&bgmac_bcma_driver);
1416}
1417
1418module_init(bgmac_init)
1419module_exit(bgmac_exit)
1420
1421MODULE_AUTHOR("Rafał Miłecki");
1422MODULE_LICENSE("GPL");