aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/octeon
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-10-09 03:08:04 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-10-09 03:08:04 -0400
commitde390bba797aa9a554bc1769b6a8771605854d79 (patch)
treece95610d4a70ec0a7307a30cfd1a66fdf0c901ab /drivers/net/ethernet/octeon
parent50e0d10232db05c6776afcf6098459bff47e8b15 (diff)
parent382fc33b4a04e2dde89b4c69a6880e0c7d9761e2 (diff)
Merge branch 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus
Pull MIPS update from Ralf Baechle: "This is the MIPS update for 3.7. A fair chunk of them are platform updates to the Cavium Octeon SOC (which involves machine generated header files of considerable size), Atheros ATH79xx, RMI aka Netlogic aka Broadcom XLP, Broadcom BCM63xx platforms. Support for the commercial MIPS simulator MIPSsim has been removed as MIPS Technologies is shifting away from this product and Qemu is offering various more powerful platforms. The generic MIPS code can now also probe for no-execute / write-only TLB features implemented without the full SmartMIPS extension as permitted by the latest MIPS processor architecture. Lots of small changes to generic code." * 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus: (78 commits) MIPS: ath79: Fix CPU/DDR frequency calculation for SRIF PLLs MIPS: ath79: use correct fractional dividers for {CPU,DDR}_PLL on AR934x MIPS: BCM63XX: Properly handle mac address octet overflow MIPS: Kconfig: Avoid build errors by hiding USE_OF from the user. MIPS: Replace `-' in defconfig filename wth `_' for consistency. MIPS: Wire kcmp syscall. MIPS: MIPSsim: Remove the MIPSsim platform. MIPS: NOTIFY_RESUME is not needed in TIF masks MIPS: Merge the identical "return from syscall" per-ABI code MIPS: Unobfuscate _TIF..._MASK MIPS: Prevent hitting do_notify_resume() with !user_mode(regs). MIPS: Replace 'kernel_uses_smartmips_rixi' with 'cpu_has_rixi'. MIPS: Add base architecture support for RI and XI. MIPS: Optimise TLB handlers for MIPS32/64 R2 cores. MIPS: uasm: Add INS and EXT instructions. MIPS: Avoid pipeline stalls on some MIPS32R2 cores. MIPS: Make VPE count to be one-based. MIPS: Add new end of interrupt functionality for GIC. MIPS: Add EIC support for GIC. MIPS: Code clean-ups for the GIC. ...
Diffstat (limited to 'drivers/net/ethernet/octeon')
-rw-r--r--drivers/net/ethernet/octeon/octeon_mgmt.c550
1 files changed, 445 insertions, 105 deletions
diff --git a/drivers/net/ethernet/octeon/octeon_mgmt.c b/drivers/net/ethernet/octeon/octeon_mgmt.c
index a688a2ddcfd6..f97719c48516 100644
--- a/drivers/net/ethernet/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/octeon/octeon_mgmt.c
@@ -3,13 +3,14 @@
3 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details. 4 * for more details.
5 * 5 *
6 * Copyright (C) 2009 Cavium Networks 6 * Copyright (C) 2009-2012 Cavium, Inc
7 */ 7 */
8 8
9#include <linux/platform_device.h> 9#include <linux/platform_device.h>
10#include <linux/dma-mapping.h> 10#include <linux/dma-mapping.h>
11#include <linux/etherdevice.h> 11#include <linux/etherdevice.h>
12#include <linux/capability.h> 12#include <linux/capability.h>
13#include <linux/net_tstamp.h>
13#include <linux/interrupt.h> 14#include <linux/interrupt.h>
14#include <linux/netdevice.h> 15#include <linux/netdevice.h>
15#include <linux/spinlock.h> 16#include <linux/spinlock.h>
@@ -33,8 +34,7 @@
33 34
34#define OCTEON_MGMT_NAPI_WEIGHT 16 35#define OCTEON_MGMT_NAPI_WEIGHT 16
35 36
36/* 37/* Ring sizes that are powers of two allow for more efficient modulo
37 * Ring sizes that are powers of two allow for more efficient modulo
38 * opertions. 38 * opertions.
39 */ 39 */
40#define OCTEON_MGMT_RX_RING_SIZE 512 40#define OCTEON_MGMT_RX_RING_SIZE 512
@@ -93,6 +93,7 @@ union mgmt_port_ring_entry {
93#define AGL_GMX_RX_ADR_CAM4 0x1a0 93#define AGL_GMX_RX_ADR_CAM4 0x1a0
94#define AGL_GMX_RX_ADR_CAM5 0x1a8 94#define AGL_GMX_RX_ADR_CAM5 0x1a8
95 95
96#define AGL_GMX_TX_CLK 0x208
96#define AGL_GMX_TX_STATS_CTL 0x268 97#define AGL_GMX_TX_STATS_CTL 0x268
97#define AGL_GMX_TX_CTL 0x270 98#define AGL_GMX_TX_CTL 0x270
98#define AGL_GMX_TX_STAT0 0x280 99#define AGL_GMX_TX_STAT0 0x280
@@ -110,8 +111,10 @@ struct octeon_mgmt {
110 struct net_device *netdev; 111 struct net_device *netdev;
111 u64 mix; 112 u64 mix;
112 u64 agl; 113 u64 agl;
114 u64 agl_prt_ctl;
113 int port; 115 int port;
114 int irq; 116 int irq;
117 bool has_rx_tstamp;
115 u64 *tx_ring; 118 u64 *tx_ring;
116 dma_addr_t tx_ring_handle; 119 dma_addr_t tx_ring_handle;
117 unsigned int tx_next; 120 unsigned int tx_next;
@@ -131,6 +134,7 @@ struct octeon_mgmt {
131 spinlock_t lock; 134 spinlock_t lock;
132 unsigned int last_duplex; 135 unsigned int last_duplex;
133 unsigned int last_link; 136 unsigned int last_link;
137 unsigned int last_speed;
134 struct device *dev; 138 struct device *dev;
135 struct napi_struct napi; 139 struct napi_struct napi;
136 struct tasklet_struct tx_clean_tasklet; 140 struct tasklet_struct tx_clean_tasklet;
@@ -140,6 +144,8 @@ struct octeon_mgmt {
140 resource_size_t mix_size; 144 resource_size_t mix_size;
141 resource_size_t agl_phys; 145 resource_size_t agl_phys;
142 resource_size_t agl_size; 146 resource_size_t agl_size;
147 resource_size_t agl_prt_ctl_phys;
148 resource_size_t agl_prt_ctl_size;
143}; 149};
144 150
145static void octeon_mgmt_set_rx_irq(struct octeon_mgmt *p, int enable) 151static void octeon_mgmt_set_rx_irq(struct octeon_mgmt *p, int enable)
@@ -166,22 +172,22 @@ static void octeon_mgmt_set_tx_irq(struct octeon_mgmt *p, int enable)
166 spin_unlock_irqrestore(&p->lock, flags); 172 spin_unlock_irqrestore(&p->lock, flags);
167} 173}
168 174
169static inline void octeon_mgmt_enable_rx_irq(struct octeon_mgmt *p) 175static void octeon_mgmt_enable_rx_irq(struct octeon_mgmt *p)
170{ 176{
171 octeon_mgmt_set_rx_irq(p, 1); 177 octeon_mgmt_set_rx_irq(p, 1);
172} 178}
173 179
174static inline void octeon_mgmt_disable_rx_irq(struct octeon_mgmt *p) 180static void octeon_mgmt_disable_rx_irq(struct octeon_mgmt *p)
175{ 181{
176 octeon_mgmt_set_rx_irq(p, 0); 182 octeon_mgmt_set_rx_irq(p, 0);
177} 183}
178 184
179static inline void octeon_mgmt_enable_tx_irq(struct octeon_mgmt *p) 185static void octeon_mgmt_enable_tx_irq(struct octeon_mgmt *p)
180{ 186{
181 octeon_mgmt_set_tx_irq(p, 1); 187 octeon_mgmt_set_tx_irq(p, 1);
182} 188}
183 189
184static inline void octeon_mgmt_disable_tx_irq(struct octeon_mgmt *p) 190static void octeon_mgmt_disable_tx_irq(struct octeon_mgmt *p)
185{ 191{
186 octeon_mgmt_set_tx_irq(p, 0); 192 octeon_mgmt_set_tx_irq(p, 0);
187} 193}
@@ -233,6 +239,28 @@ static void octeon_mgmt_rx_fill_ring(struct net_device *netdev)
233 } 239 }
234} 240}
235 241
242static ktime_t ptp_to_ktime(u64 ptptime)
243{
244 ktime_t ktimebase;
245 u64 ptpbase;
246 unsigned long flags;
247
248 local_irq_save(flags);
249 /* Fill the icache with the code */
250 ktime_get_real();
251 /* Flush all pending operations */
252 mb();
253 /* Read the time and PTP clock as close together as
254 * possible. It is important that this sequence take the same
255 * amount of time to reduce jitter
256 */
257 ktimebase = ktime_get_real();
258 ptpbase = cvmx_read_csr(CVMX_MIO_PTP_CLOCK_HI);
259 local_irq_restore(flags);
260
261 return ktime_sub_ns(ktimebase, ptpbase - ptptime);
262}
263
236static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt *p) 264static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt *p)
237{ 265{
238 union cvmx_mixx_orcnt mix_orcnt; 266 union cvmx_mixx_orcnt mix_orcnt;
@@ -272,6 +300,20 @@ static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt *p)
272 300
273 dma_unmap_single(p->dev, re.s.addr, re.s.len, 301 dma_unmap_single(p->dev, re.s.addr, re.s.len,
274 DMA_TO_DEVICE); 302 DMA_TO_DEVICE);
303
304 /* Read the hardware TX timestamp if one was recorded */
305 if (unlikely(re.s.tstamp)) {
306 struct skb_shared_hwtstamps ts;
307 /* Read the timestamp */
308 u64 ns = cvmx_read_csr(CVMX_MIXX_TSTAMP(p->port));
309 /* Remove the timestamp from the FIFO */
310 cvmx_write_csr(CVMX_MIXX_TSCTL(p->port), 0);
311 /* Tell the kernel about the timestamp */
312 ts.syststamp = ptp_to_ktime(ns);
313 ts.hwtstamp = ns_to_ktime(ns);
314 skb_tstamp_tx(skb, &ts);
315 }
316
275 dev_kfree_skb_any(skb); 317 dev_kfree_skb_any(skb);
276 cleaned++; 318 cleaned++;
277 319
@@ -372,14 +414,23 @@ static int octeon_mgmt_receive_one(struct octeon_mgmt *p)
372 /* A good packet, send it up. */ 414 /* A good packet, send it up. */
373 skb_put(skb, re.s.len); 415 skb_put(skb, re.s.len);
374good: 416good:
417 /* Process the RX timestamp if it was recorded */
418 if (p->has_rx_tstamp) {
419 /* The first 8 bytes are the timestamp */
420 u64 ns = *(u64 *)skb->data;
421 struct skb_shared_hwtstamps *ts;
422 ts = skb_hwtstamps(skb);
423 ts->hwtstamp = ns_to_ktime(ns);
424 ts->syststamp = ptp_to_ktime(ns);
425 __skb_pull(skb, 8);
426 }
375 skb->protocol = eth_type_trans(skb, netdev); 427 skb->protocol = eth_type_trans(skb, netdev);
376 netdev->stats.rx_packets++; 428 netdev->stats.rx_packets++;
377 netdev->stats.rx_bytes += skb->len; 429 netdev->stats.rx_bytes += skb->len;
378 netif_receive_skb(skb); 430 netif_receive_skb(skb);
379 rc = 0; 431 rc = 0;
380 } else if (re.s.code == RING_ENTRY_CODE_MORE) { 432 } else if (re.s.code == RING_ENTRY_CODE_MORE) {
381 /* 433 /* Packet split across skbs. This can happen if we
382 * Packet split across skbs. This can happen if we
383 * increase the MTU. Buffers that are already in the 434 * increase the MTU. Buffers that are already in the
384 * rx ring can then end up being too small. As the rx 435 * rx ring can then end up being too small. As the rx
385 * ring is refilled, buffers sized for the new MTU 436 * ring is refilled, buffers sized for the new MTU
@@ -409,8 +460,7 @@ good:
409 } else { 460 } else {
410 /* Some other error, discard it. */ 461 /* Some other error, discard it. */
411 dev_kfree_skb_any(skb); 462 dev_kfree_skb_any(skb);
412 /* 463 /* Error statistics are accumulated in
413 * Error statistics are accumulated in
414 * octeon_mgmt_update_rx_stats. 464 * octeon_mgmt_update_rx_stats.
415 */ 465 */
416 } 466 }
@@ -488,7 +538,7 @@ static void octeon_mgmt_reset_hw(struct octeon_mgmt *p)
488 mix_ctl.s.reset = 1; 538 mix_ctl.s.reset = 1;
489 cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64); 539 cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);
490 cvmx_read_csr(p->mix + MIX_CTL); 540 cvmx_read_csr(p->mix + MIX_CTL);
491 cvmx_wait(64); 541 octeon_io_clk_delay(64);
492 542
493 mix_bist.u64 = cvmx_read_csr(p->mix + MIX_BIST); 543 mix_bist.u64 = cvmx_read_csr(p->mix + MIX_BIST);
494 if (mix_bist.u64) 544 if (mix_bist.u64)
@@ -537,8 +587,7 @@ static void octeon_mgmt_set_rx_filtering(struct net_device *netdev)
537 cam_mode = 0; 587 cam_mode = 0;
538 available_cam_entries = 8; 588 available_cam_entries = 8;
539 } else { 589 } else {
540 /* 590 /* One CAM entry for the primary address, leaves seven
541 * One CAM entry for the primary address, leaves seven
542 * for the secondary addresses. 591 * for the secondary addresses.
543 */ 592 */
544 available_cam_entries = 7 - netdev->uc.count; 593 available_cam_entries = 7 - netdev->uc.count;
@@ -595,12 +644,10 @@ static void octeon_mgmt_set_rx_filtering(struct net_device *netdev)
595 644
596static int octeon_mgmt_set_mac_address(struct net_device *netdev, void *addr) 645static int octeon_mgmt_set_mac_address(struct net_device *netdev, void *addr)
597{ 646{
598 struct sockaddr *sa = addr; 647 int r = eth_mac_addr(netdev, addr);
599 648
600 if (!is_valid_ether_addr(sa->sa_data)) 649 if (r)
601 return -EADDRNOTAVAIL; 650 return r;
602
603 memcpy(netdev->dev_addr, sa->sa_data, ETH_ALEN);
604 651
605 octeon_mgmt_set_rx_filtering(netdev); 652 octeon_mgmt_set_rx_filtering(netdev);
606 653
@@ -612,8 +659,7 @@ static int octeon_mgmt_change_mtu(struct net_device *netdev, int new_mtu)
612 struct octeon_mgmt *p = netdev_priv(netdev); 659 struct octeon_mgmt *p = netdev_priv(netdev);
613 int size_without_fcs = new_mtu + OCTEON_MGMT_RX_HEADROOM; 660 int size_without_fcs = new_mtu + OCTEON_MGMT_RX_HEADROOM;
614 661
615 /* 662 /* Limit the MTU to make sure the ethernet packets are between
616 * Limit the MTU to make sure the ethernet packets are between
617 * 64 bytes and 16383 bytes. 663 * 64 bytes and 16383 bytes.
618 */ 664 */
619 if (size_without_fcs < 64 || size_without_fcs > 16383) { 665 if (size_without_fcs < 64 || size_without_fcs > 16383) {
@@ -656,53 +702,258 @@ static irqreturn_t octeon_mgmt_interrupt(int cpl, void *dev_id)
656 return IRQ_HANDLED; 702 return IRQ_HANDLED;
657} 703}
658 704
705static int octeon_mgmt_ioctl_hwtstamp(struct net_device *netdev,
706 struct ifreq *rq, int cmd)
707{
708 struct octeon_mgmt *p = netdev_priv(netdev);
709 struct hwtstamp_config config;
710 union cvmx_mio_ptp_clock_cfg ptp;
711 union cvmx_agl_gmx_rxx_frm_ctl rxx_frm_ctl;
712 bool have_hw_timestamps = false;
713
714 if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
715 return -EFAULT;
716
717 if (config.flags) /* reserved for future extensions */
718 return -EINVAL;
719
720 /* Check the status of hardware for tiemstamps */
721 if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
722 /* Get the current state of the PTP clock */
723 ptp.u64 = cvmx_read_csr(CVMX_MIO_PTP_CLOCK_CFG);
724 if (!ptp.s.ext_clk_en) {
725 /* The clock has not been configured to use an
726 * external source. Program it to use the main clock
727 * reference.
728 */
729 u64 clock_comp = (NSEC_PER_SEC << 32) / octeon_get_io_clock_rate();
730 if (!ptp.s.ptp_en)
731 cvmx_write_csr(CVMX_MIO_PTP_CLOCK_COMP, clock_comp);
732 pr_info("PTP Clock: Using sclk reference at %lld Hz\n",
733 (NSEC_PER_SEC << 32) / clock_comp);
734 } else {
735 /* The clock is already programmed to use a GPIO */
736 u64 clock_comp = cvmx_read_csr(CVMX_MIO_PTP_CLOCK_COMP);
737 pr_info("PTP Clock: Using GPIO %d at %lld Hz\n",
738 ptp.s.ext_clk_in,
739 (NSEC_PER_SEC << 32) / clock_comp);
740 }
741
742 /* Enable the clock if it wasn't done already */
743 if (!ptp.s.ptp_en) {
744 ptp.s.ptp_en = 1;
745 cvmx_write_csr(CVMX_MIO_PTP_CLOCK_CFG, ptp.u64);
746 }
747 have_hw_timestamps = true;
748 }
749
750 if (!have_hw_timestamps)
751 return -EINVAL;
752
753 switch (config.tx_type) {
754 case HWTSTAMP_TX_OFF:
755 case HWTSTAMP_TX_ON:
756 break;
757 default:
758 return -ERANGE;
759 }
760
761 switch (config.rx_filter) {
762 case HWTSTAMP_FILTER_NONE:
763 p->has_rx_tstamp = false;
764 rxx_frm_ctl.u64 = cvmx_read_csr(p->agl + AGL_GMX_RX_FRM_CTL);
765 rxx_frm_ctl.s.ptp_mode = 0;
766 cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64);
767 break;
768 case HWTSTAMP_FILTER_ALL:
769 case HWTSTAMP_FILTER_SOME:
770 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
771 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
772 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
773 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
774 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
775 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
776 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
777 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
778 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
779 case HWTSTAMP_FILTER_PTP_V2_EVENT:
780 case HWTSTAMP_FILTER_PTP_V2_SYNC:
781 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
782 p->has_rx_tstamp = have_hw_timestamps;
783 config.rx_filter = HWTSTAMP_FILTER_ALL;
784 if (p->has_rx_tstamp) {
785 rxx_frm_ctl.u64 = cvmx_read_csr(p->agl + AGL_GMX_RX_FRM_CTL);
786 rxx_frm_ctl.s.ptp_mode = 1;
787 cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64);
788 }
789 break;
790 default:
791 return -ERANGE;
792 }
793
794 if (copy_to_user(rq->ifr_data, &config, sizeof(config)))
795 return -EFAULT;
796
797 return 0;
798}
799
659static int octeon_mgmt_ioctl(struct net_device *netdev, 800static int octeon_mgmt_ioctl(struct net_device *netdev,
660 struct ifreq *rq, int cmd) 801 struct ifreq *rq, int cmd)
661{ 802{
662 struct octeon_mgmt *p = netdev_priv(netdev); 803 struct octeon_mgmt *p = netdev_priv(netdev);
663 804
664 if (!netif_running(netdev)) 805 switch (cmd) {
806 case SIOCSHWTSTAMP:
807 return octeon_mgmt_ioctl_hwtstamp(netdev, rq, cmd);
808 default:
809 if (p->phydev)
810 return phy_mii_ioctl(p->phydev, rq, cmd);
665 return -EINVAL; 811 return -EINVAL;
812 }
813}
666 814
667 if (!p->phydev) 815static void octeon_mgmt_disable_link(struct octeon_mgmt *p)
668 return -EINVAL; 816{
817 union cvmx_agl_gmx_prtx_cfg prtx_cfg;
669 818
670 return phy_mii_ioctl(p->phydev, rq, cmd); 819 /* Disable GMX before we make any changes. */
820 prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
821 prtx_cfg.s.en = 0;
822 prtx_cfg.s.tx_en = 0;
823 prtx_cfg.s.rx_en = 0;
824 cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64);
825
826 if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
827 int i;
828 for (i = 0; i < 10; i++) {
829 prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
830 if (prtx_cfg.s.tx_idle == 1 || prtx_cfg.s.rx_idle == 1)
831 break;
832 mdelay(1);
833 i++;
834 }
835 }
836}
837
838static void octeon_mgmt_enable_link(struct octeon_mgmt *p)
839{
840 union cvmx_agl_gmx_prtx_cfg prtx_cfg;
841
842 /* Restore the GMX enable state only if link is set */
843 prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
844 prtx_cfg.s.tx_en = 1;
845 prtx_cfg.s.rx_en = 1;
846 prtx_cfg.s.en = 1;
847 cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64);
848}
849
850static void octeon_mgmt_update_link(struct octeon_mgmt *p)
851{
852 union cvmx_agl_gmx_prtx_cfg prtx_cfg;
853
854 prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
855
856 if (!p->phydev->link)
857 prtx_cfg.s.duplex = 1;
858 else
859 prtx_cfg.s.duplex = p->phydev->duplex;
860
861 switch (p->phydev->speed) {
862 case 10:
863 prtx_cfg.s.speed = 0;
864 prtx_cfg.s.slottime = 0;
865
866 if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
867 prtx_cfg.s.burst = 1;
868 prtx_cfg.s.speed_msb = 1;
869 }
870 break;
871 case 100:
872 prtx_cfg.s.speed = 0;
873 prtx_cfg.s.slottime = 0;
874
875 if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
876 prtx_cfg.s.burst = 1;
877 prtx_cfg.s.speed_msb = 0;
878 }
879 break;
880 case 1000:
881 /* 1000 MBits is only supported on 6XXX chips */
882 if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
883 prtx_cfg.s.speed = 1;
884 prtx_cfg.s.speed_msb = 0;
885 /* Only matters for half-duplex */
886 prtx_cfg.s.slottime = 1;
887 prtx_cfg.s.burst = p->phydev->duplex;
888 }
889 break;
890 case 0: /* No link */
891 default:
892 break;
893 }
894
895 /* Write the new GMX setting with the port still disabled. */
896 cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64);
897
898 /* Read GMX CFG again to make sure the config is completed. */
899 prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
900
901 if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
902 union cvmx_agl_gmx_txx_clk agl_clk;
903 union cvmx_agl_prtx_ctl prtx_ctl;
904
905 prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
906 agl_clk.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_CLK);
907 /* MII (both speeds) and RGMII 1000 speed. */
908 agl_clk.s.clk_cnt = 1;
909 if (prtx_ctl.s.mode == 0) { /* RGMII mode */
910 if (p->phydev->speed == 10)
911 agl_clk.s.clk_cnt = 50;
912 else if (p->phydev->speed == 100)
913 agl_clk.s.clk_cnt = 5;
914 }
915 cvmx_write_csr(p->agl + AGL_GMX_TX_CLK, agl_clk.u64);
916 }
671} 917}
672 918
673static void octeon_mgmt_adjust_link(struct net_device *netdev) 919static void octeon_mgmt_adjust_link(struct net_device *netdev)
674{ 920{
675 struct octeon_mgmt *p = netdev_priv(netdev); 921 struct octeon_mgmt *p = netdev_priv(netdev);
676 union cvmx_agl_gmx_prtx_cfg prtx_cfg;
677 unsigned long flags; 922 unsigned long flags;
678 int link_changed = 0; 923 int link_changed = 0;
679 924
925 if (!p->phydev)
926 return;
927
680 spin_lock_irqsave(&p->lock, flags); 928 spin_lock_irqsave(&p->lock, flags);
681 if (p->phydev->link) { 929
682 if (!p->last_link) 930
683 link_changed = 1; 931 if (!p->phydev->link && p->last_link)
684 if (p->last_duplex != p->phydev->duplex) { 932 link_changed = -1;
685 p->last_duplex = p->phydev->duplex; 933
686 prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG); 934 if (p->phydev->link
687 prtx_cfg.s.duplex = p->phydev->duplex; 935 && (p->last_duplex != p->phydev->duplex
688 cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64); 936 || p->last_link != p->phydev->link
689 } 937 || p->last_speed != p->phydev->speed)) {
690 } else { 938 octeon_mgmt_disable_link(p);
691 if (p->last_link) 939 link_changed = 1;
692 link_changed = -1; 940 octeon_mgmt_update_link(p);
941 octeon_mgmt_enable_link(p);
693 } 942 }
943
694 p->last_link = p->phydev->link; 944 p->last_link = p->phydev->link;
945 p->last_speed = p->phydev->speed;
946 p->last_duplex = p->phydev->duplex;
947
695 spin_unlock_irqrestore(&p->lock, flags); 948 spin_unlock_irqrestore(&p->lock, flags);
696 949
697 if (link_changed != 0) { 950 if (link_changed != 0) {
698 if (link_changed > 0) { 951 if (link_changed > 0) {
699 netif_carrier_on(netdev);
700 pr_info("%s: Link is up - %d/%s\n", netdev->name, 952 pr_info("%s: Link is up - %d/%s\n", netdev->name,
701 p->phydev->speed, 953 p->phydev->speed,
702 DUPLEX_FULL == p->phydev->duplex ? 954 DUPLEX_FULL == p->phydev->duplex ?
703 "Full" : "Half"); 955 "Full" : "Half");
704 } else { 956 } else {
705 netif_carrier_off(netdev);
706 pr_info("%s: Link is down\n", netdev->name); 957 pr_info("%s: Link is down\n", netdev->name);
707 } 958 }
708 } 959 }
@@ -723,9 +974,7 @@ static int octeon_mgmt_init_phy(struct net_device *netdev)
723 PHY_INTERFACE_MODE_MII); 974 PHY_INTERFACE_MODE_MII);
724 975
725 if (!p->phydev) 976 if (!p->phydev)
726 return -1; 977 return -ENODEV;
727
728 phy_start_aneg(p->phydev);
729 978
730 return 0; 979 return 0;
731} 980}
@@ -733,12 +982,10 @@ static int octeon_mgmt_init_phy(struct net_device *netdev)
733static int octeon_mgmt_open(struct net_device *netdev) 982static int octeon_mgmt_open(struct net_device *netdev)
734{ 983{
735 struct octeon_mgmt *p = netdev_priv(netdev); 984 struct octeon_mgmt *p = netdev_priv(netdev);
736 int port = p->port;
737 union cvmx_mixx_ctl mix_ctl; 985 union cvmx_mixx_ctl mix_ctl;
738 union cvmx_agl_gmx_inf_mode agl_gmx_inf_mode; 986 union cvmx_agl_gmx_inf_mode agl_gmx_inf_mode;
739 union cvmx_mixx_oring1 oring1; 987 union cvmx_mixx_oring1 oring1;
740 union cvmx_mixx_iring1 iring1; 988 union cvmx_mixx_iring1 iring1;
741 union cvmx_agl_gmx_prtx_cfg prtx_cfg;
742 union cvmx_agl_gmx_rxx_frm_ctl rxx_frm_ctl; 989 union cvmx_agl_gmx_rxx_frm_ctl rxx_frm_ctl;
743 union cvmx_mixx_irhwm mix_irhwm; 990 union cvmx_mixx_irhwm mix_irhwm;
744 union cvmx_mixx_orhwm mix_orhwm; 991 union cvmx_mixx_orhwm mix_orhwm;
@@ -785,9 +1032,30 @@ static int octeon_mgmt_open(struct net_device *netdev)
785 } while (mix_ctl.s.reset); 1032 } while (mix_ctl.s.reset);
786 } 1033 }
787 1034
788 agl_gmx_inf_mode.u64 = 0; 1035 if (OCTEON_IS_MODEL(OCTEON_CN5XXX)) {
789 agl_gmx_inf_mode.s.en = 1; 1036 agl_gmx_inf_mode.u64 = 0;
790 cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64); 1037 agl_gmx_inf_mode.s.en = 1;
1038 cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64);
1039 }
1040 if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X)
1041 || OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) {
1042 /* Force compensation values, as they are not
1043 * determined properly by HW
1044 */
1045 union cvmx_agl_gmx_drv_ctl drv_ctl;
1046
1047 drv_ctl.u64 = cvmx_read_csr(CVMX_AGL_GMX_DRV_CTL);
1048 if (p->port) {
1049 drv_ctl.s.byp_en1 = 1;
1050 drv_ctl.s.nctl1 = 6;
1051 drv_ctl.s.pctl1 = 6;
1052 } else {
1053 drv_ctl.s.byp_en = 1;
1054 drv_ctl.s.nctl = 6;
1055 drv_ctl.s.pctl = 6;
1056 }
1057 cvmx_write_csr(CVMX_AGL_GMX_DRV_CTL, drv_ctl.u64);
1058 }
791 1059
792 oring1.u64 = 0; 1060 oring1.u64 = 0;
793 oring1.s.obase = p->tx_ring_handle >> 3; 1061 oring1.s.obase = p->tx_ring_handle >> 3;
@@ -799,18 +1067,12 @@ static int octeon_mgmt_open(struct net_device *netdev)
799 iring1.s.isize = OCTEON_MGMT_RX_RING_SIZE; 1067 iring1.s.isize = OCTEON_MGMT_RX_RING_SIZE;
800 cvmx_write_csr(p->mix + MIX_IRING1, iring1.u64); 1068 cvmx_write_csr(p->mix + MIX_IRING1, iring1.u64);
801 1069
802 /* Disable packet I/O. */
803 prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
804 prtx_cfg.s.en = 0;
805 cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64);
806
807 memcpy(sa.sa_data, netdev->dev_addr, ETH_ALEN); 1070 memcpy(sa.sa_data, netdev->dev_addr, ETH_ALEN);
808 octeon_mgmt_set_mac_address(netdev, &sa); 1071 octeon_mgmt_set_mac_address(netdev, &sa);
809 1072
810 octeon_mgmt_change_mtu(netdev, netdev->mtu); 1073 octeon_mgmt_change_mtu(netdev, netdev->mtu);
811 1074
812 /* 1075 /* Enable the port HW. Packets are not allowed until
813 * Enable the port HW. Packets are not allowed until
814 * cvmx_mgmt_port_enable() is called. 1076 * cvmx_mgmt_port_enable() is called.
815 */ 1077 */
816 mix_ctl.u64 = 0; 1078 mix_ctl.u64 = 0;
@@ -819,27 +1081,70 @@ static int octeon_mgmt_open(struct net_device *netdev)
819 mix_ctl.s.nbtarb = 0; /* Arbitration mode */ 1081 mix_ctl.s.nbtarb = 0; /* Arbitration mode */
820 /* MII CB-request FIFO programmable high watermark */ 1082 /* MII CB-request FIFO programmable high watermark */
821 mix_ctl.s.mrq_hwm = 1; 1083 mix_ctl.s.mrq_hwm = 1;
1084#ifdef __LITTLE_ENDIAN
1085 mix_ctl.s.lendian = 1;
1086#endif
822 cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64); 1087 cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);
823 1088
824 if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X) 1089 /* Read the PHY to find the mode of the interface. */
825 || OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) { 1090 if (octeon_mgmt_init_phy(netdev)) {
826 /* 1091 dev_err(p->dev, "Cannot initialize PHY on MIX%d.\n", p->port);
827 * Force compensation values, as they are not 1092 goto err_noirq;
828 * determined properly by HW 1093 }
829 */
830 union cvmx_agl_gmx_drv_ctl drv_ctl;
831 1094
832 drv_ctl.u64 = cvmx_read_csr(CVMX_AGL_GMX_DRV_CTL); 1095 /* Set the mode of the interface, RGMII/MII. */
833 if (port) { 1096 if (OCTEON_IS_MODEL(OCTEON_CN6XXX) && p->phydev) {
834 drv_ctl.s.byp_en1 = 1; 1097 union cvmx_agl_prtx_ctl agl_prtx_ctl;
835 drv_ctl.s.nctl1 = 6; 1098 int rgmii_mode = (p->phydev->supported &
836 drv_ctl.s.pctl1 = 6; 1099 (SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full)) != 0;
837 } else { 1100
838 drv_ctl.s.byp_en = 1; 1101 agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
839 drv_ctl.s.nctl = 6; 1102 agl_prtx_ctl.s.mode = rgmii_mode ? 0 : 1;
840 drv_ctl.s.pctl = 6; 1103 cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64);
1104
1105 /* MII clocks counts are based on the 125Mhz
1106 * reference, which has an 8nS period. So our delays
1107 * need to be multiplied by this factor.
1108 */
1109#define NS_PER_PHY_CLK 8
1110
1111 /* Take the DLL and clock tree out of reset */
1112 agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
1113 agl_prtx_ctl.s.clkrst = 0;
1114 if (rgmii_mode) {
1115 agl_prtx_ctl.s.dllrst = 0;
1116 agl_prtx_ctl.s.clktx_byp = 0;
841 } 1117 }
842 cvmx_write_csr(CVMX_AGL_GMX_DRV_CTL, drv_ctl.u64); 1118 cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64);
1119 cvmx_read_csr(p->agl_prt_ctl); /* Force write out before wait */
1120
1121 /* Wait for the DLL to lock. External 125 MHz
1122 * reference clock must be stable at this point.
1123 */
1124 ndelay(256 * NS_PER_PHY_CLK);
1125
1126 /* Enable the interface */
1127 agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
1128 agl_prtx_ctl.s.enable = 1;
1129 cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64);
1130
1131 /* Read the value back to force the previous write */
1132 agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
1133
1134 /* Enable the compensation controller */
1135 agl_prtx_ctl.s.comp = 1;
1136 agl_prtx_ctl.s.drv_byp = 0;
1137 cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64);
1138 /* Force write out before wait. */
1139 cvmx_read_csr(p->agl_prt_ctl);
1140
1141 /* For compensation state to lock. */
1142 ndelay(1040 * NS_PER_PHY_CLK);
1143
1144 /* Some Ethernet switches cannot handle standard
1145 * Interframe Gap, increase to 16 bytes.
1146 */
1147 cvmx_write_csr(CVMX_AGL_GMX_TX_IFG, 0x88);
843 } 1148 }
844 1149
845 octeon_mgmt_rx_fill_ring(netdev); 1150 octeon_mgmt_rx_fill_ring(netdev);
@@ -870,7 +1175,7 @@ static int octeon_mgmt_open(struct net_device *netdev)
870 1175
871 /* Interrupt when we have 1 or more packets to clean. */ 1176 /* Interrupt when we have 1 or more packets to clean. */
872 mix_orhwm.u64 = 0; 1177 mix_orhwm.u64 = 0;
873 mix_orhwm.s.orhwm = 1; 1178 mix_orhwm.s.orhwm = 0;
874 cvmx_write_csr(p->mix + MIX_ORHWM, mix_orhwm.u64); 1179 cvmx_write_csr(p->mix + MIX_ORHWM, mix_orhwm.u64);
875 1180
876 /* Enable receive and transmit interrupts */ 1181 /* Enable receive and transmit interrupts */
@@ -879,13 +1184,12 @@ static int octeon_mgmt_open(struct net_device *netdev)
879 mix_intena.s.othena = 1; 1184 mix_intena.s.othena = 1;
880 cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64); 1185 cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64);
881 1186
882
883 /* Enable packet I/O. */ 1187 /* Enable packet I/O. */
884 1188
885 rxx_frm_ctl.u64 = 0; 1189 rxx_frm_ctl.u64 = 0;
1190 rxx_frm_ctl.s.ptp_mode = p->has_rx_tstamp ? 1 : 0;
886 rxx_frm_ctl.s.pre_align = 1; 1191 rxx_frm_ctl.s.pre_align = 1;
887 /* 1192 /* When set, disables the length check for non-min sized pkts
888 * When set, disables the length check for non-min sized pkts
889 * with padding in the client data. 1193 * with padding in the client data.
890 */ 1194 */
891 rxx_frm_ctl.s.pad_len = 1; 1195 rxx_frm_ctl.s.pad_len = 1;
@@ -903,33 +1207,26 @@ static int octeon_mgmt_open(struct net_device *netdev)
903 rxx_frm_ctl.s.ctl_drp = 1; 1207 rxx_frm_ctl.s.ctl_drp = 1;
904 /* Strip off the preamble */ 1208 /* Strip off the preamble */
905 rxx_frm_ctl.s.pre_strp = 1; 1209 rxx_frm_ctl.s.pre_strp = 1;
906 /* 1210 /* This port is configured to send PREAMBLE+SFD to begin every
907 * This port is configured to send PREAMBLE+SFD to begin every
908 * frame. GMX checks that the PREAMBLE is sent correctly. 1211 * frame. GMX checks that the PREAMBLE is sent correctly.
909 */ 1212 */
910 rxx_frm_ctl.s.pre_chk = 1; 1213 rxx_frm_ctl.s.pre_chk = 1;
911 cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64); 1214 cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64);
912 1215
913 /* Enable the AGL block */ 1216 /* Configure the port duplex, speed and enables */
914 agl_gmx_inf_mode.u64 = 0; 1217 octeon_mgmt_disable_link(p);
915 agl_gmx_inf_mode.s.en = 1; 1218 if (p->phydev)
916 cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64); 1219 octeon_mgmt_update_link(p);
917 1220 octeon_mgmt_enable_link(p);
918 /* Configure the port duplex and enables */
919 prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
920 prtx_cfg.s.tx_en = 1;
921 prtx_cfg.s.rx_en = 1;
922 prtx_cfg.s.en = 1;
923 p->last_duplex = 1;
924 prtx_cfg.s.duplex = p->last_duplex;
925 cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64);
926 1221
927 p->last_link = 0; 1222 p->last_link = 0;
928 netif_carrier_off(netdev); 1223 p->last_speed = 0;
929 1224 /* PHY is not present in simulator. The carrier is enabled
930 if (octeon_mgmt_init_phy(netdev)) { 1225 * while initializing the phy for simulator, leave it enabled.
931 dev_err(p->dev, "Cannot initialize PHY.\n"); 1226 */
932 goto err_noirq; 1227 if (p->phydev) {
1228 netif_carrier_off(netdev);
1229 phy_start_aneg(p->phydev);
933 } 1230 }
934 1231
935 netif_wake_queue(netdev); 1232 netif_wake_queue(netdev);
@@ -959,6 +1256,7 @@ static int octeon_mgmt_stop(struct net_device *netdev)
959 1256
960 if (p->phydev) 1257 if (p->phydev)
961 phy_disconnect(p->phydev); 1258 phy_disconnect(p->phydev);
1259 p->phydev = NULL;
962 1260
963 netif_carrier_off(netdev); 1261 netif_carrier_off(netdev);
964 1262
@@ -991,6 +1289,7 @@ static int octeon_mgmt_xmit(struct sk_buff *skb, struct net_device *netdev)
991 int rv = NETDEV_TX_BUSY; 1289 int rv = NETDEV_TX_BUSY;
992 1290
993 re.d64 = 0; 1291 re.d64 = 0;
1292 re.s.tstamp = ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) != 0);
994 re.s.len = skb->len; 1293 re.s.len = skb->len;
995 re.s.addr = dma_map_single(p->dev, skb->data, 1294 re.s.addr = dma_map_single(p->dev, skb->data,
996 skb->len, 1295 skb->len,
@@ -1031,6 +1330,7 @@ static int octeon_mgmt_xmit(struct sk_buff *skb, struct net_device *netdev)
1031 /* Ring the bell. */ 1330 /* Ring the bell. */
1032 cvmx_write_csr(p->mix + MIX_ORING2, 1); 1331 cvmx_write_csr(p->mix + MIX_ORING2, 1);
1033 1332
1333 netdev->trans_start = jiffies;
1034 rv = NETDEV_TX_OK; 1334 rv = NETDEV_TX_OK;
1035out: 1335out:
1036 octeon_mgmt_update_tx_stats(netdev); 1336 octeon_mgmt_update_tx_stats(netdev);
@@ -1068,7 +1368,7 @@ static int octeon_mgmt_get_settings(struct net_device *netdev,
1068 if (p->phydev) 1368 if (p->phydev)
1069 return phy_ethtool_gset(p->phydev, cmd); 1369 return phy_ethtool_gset(p->phydev, cmd);
1070 1370
1071 return -EINVAL; 1371 return -EOPNOTSUPP;
1072} 1372}
1073 1373
1074static int octeon_mgmt_set_settings(struct net_device *netdev, 1374static int octeon_mgmt_set_settings(struct net_device *netdev,
@@ -1082,23 +1382,37 @@ static int octeon_mgmt_set_settings(struct net_device *netdev,
1082 if (p->phydev) 1382 if (p->phydev)
1083 return phy_ethtool_sset(p->phydev, cmd); 1383 return phy_ethtool_sset(p->phydev, cmd);
1084 1384
1085 return -EINVAL; 1385 return -EOPNOTSUPP;
1386}
1387
1388static int octeon_mgmt_nway_reset(struct net_device *dev)
1389{
1390 struct octeon_mgmt *p = netdev_priv(dev);
1391
1392 if (!capable(CAP_NET_ADMIN))
1393 return -EPERM;
1394
1395 if (p->phydev)
1396 return phy_start_aneg(p->phydev);
1397
1398 return -EOPNOTSUPP;
1086} 1399}
1087 1400
1088static const struct ethtool_ops octeon_mgmt_ethtool_ops = { 1401static const struct ethtool_ops octeon_mgmt_ethtool_ops = {
1089 .get_drvinfo = octeon_mgmt_get_drvinfo, 1402 .get_drvinfo = octeon_mgmt_get_drvinfo,
1090 .get_link = ethtool_op_get_link,
1091 .get_settings = octeon_mgmt_get_settings, 1403 .get_settings = octeon_mgmt_get_settings,
1092 .set_settings = octeon_mgmt_set_settings 1404 .set_settings = octeon_mgmt_set_settings,
1405 .nway_reset = octeon_mgmt_nway_reset,
1406 .get_link = ethtool_op_get_link,
1093}; 1407};
1094 1408
1095static const struct net_device_ops octeon_mgmt_ops = { 1409static const struct net_device_ops octeon_mgmt_ops = {
1096 .ndo_open = octeon_mgmt_open, 1410 .ndo_open = octeon_mgmt_open,
1097 .ndo_stop = octeon_mgmt_stop, 1411 .ndo_stop = octeon_mgmt_stop,
1098 .ndo_start_xmit = octeon_mgmt_xmit, 1412 .ndo_start_xmit = octeon_mgmt_xmit,
1099 .ndo_set_rx_mode = octeon_mgmt_set_rx_filtering, 1413 .ndo_set_rx_mode = octeon_mgmt_set_rx_filtering,
1100 .ndo_set_mac_address = octeon_mgmt_set_mac_address, 1414 .ndo_set_mac_address = octeon_mgmt_set_mac_address,
1101 .ndo_do_ioctl = octeon_mgmt_ioctl, 1415 .ndo_do_ioctl = octeon_mgmt_ioctl,
1102 .ndo_change_mtu = octeon_mgmt_change_mtu, 1416 .ndo_change_mtu = octeon_mgmt_change_mtu,
1103#ifdef CONFIG_NET_POLL_CONTROLLER 1417#ifdef CONFIG_NET_POLL_CONTROLLER
1104 .ndo_poll_controller = octeon_mgmt_poll_controller, 1418 .ndo_poll_controller = octeon_mgmt_poll_controller,
@@ -1113,6 +1427,7 @@ static int __devinit octeon_mgmt_probe(struct platform_device *pdev)
1113 const u8 *mac; 1427 const u8 *mac;
1114 struct resource *res_mix; 1428 struct resource *res_mix;
1115 struct resource *res_agl; 1429 struct resource *res_agl;
1430 struct resource *res_agl_prt_ctl;
1116 int len; 1431 int len;
1117 int result; 1432 int result;
1118 1433
@@ -1120,6 +1435,8 @@ static int __devinit octeon_mgmt_probe(struct platform_device *pdev)
1120 if (netdev == NULL) 1435 if (netdev == NULL)
1121 return -ENOMEM; 1436 return -ENOMEM;
1122 1437
1438 SET_NETDEV_DEV(netdev, &pdev->dev);
1439
1123 dev_set_drvdata(&pdev->dev, netdev); 1440 dev_set_drvdata(&pdev->dev, netdev);
1124 p = netdev_priv(netdev); 1441 p = netdev_priv(netdev);
1125 netif_napi_add(netdev, &p->napi, octeon_mgmt_napi_poll, 1442 netif_napi_add(netdev, &p->napi, octeon_mgmt_napi_poll,
@@ -1127,6 +1444,7 @@ static int __devinit octeon_mgmt_probe(struct platform_device *pdev)
1127 1444
1128 p->netdev = netdev; 1445 p->netdev = netdev;
1129 p->dev = &pdev->dev; 1446 p->dev = &pdev->dev;
1447 p->has_rx_tstamp = false;
1130 1448
1131 data = of_get_property(pdev->dev.of_node, "cell-index", &len); 1449 data = of_get_property(pdev->dev.of_node, "cell-index", &len);
1132 if (data && len == sizeof(*data)) { 1450 if (data && len == sizeof(*data)) {
@@ -1159,10 +1477,19 @@ static int __devinit octeon_mgmt_probe(struct platform_device *pdev)
1159 goto err; 1477 goto err;
1160 } 1478 }
1161 1479
1480 res_agl_prt_ctl = platform_get_resource(pdev, IORESOURCE_MEM, 3);
1481 if (res_agl_prt_ctl == NULL) {
1482 dev_err(&pdev->dev, "no 'reg' resource\n");
1483 result = -ENXIO;
1484 goto err;
1485 }
1486
1162 p->mix_phys = res_mix->start; 1487 p->mix_phys = res_mix->start;
1163 p->mix_size = resource_size(res_mix); 1488 p->mix_size = resource_size(res_mix);
1164 p->agl_phys = res_agl->start; 1489 p->agl_phys = res_agl->start;
1165 p->agl_size = resource_size(res_agl); 1490 p->agl_size = resource_size(res_agl);
1491 p->agl_prt_ctl_phys = res_agl_prt_ctl->start;
1492 p->agl_prt_ctl_size = resource_size(res_agl_prt_ctl);
1166 1493
1167 1494
1168 if (!devm_request_mem_region(&pdev->dev, p->mix_phys, p->mix_size, 1495 if (!devm_request_mem_region(&pdev->dev, p->mix_phys, p->mix_size,
@@ -1181,10 +1508,18 @@ static int __devinit octeon_mgmt_probe(struct platform_device *pdev)
1181 goto err; 1508 goto err;
1182 } 1509 }
1183 1510
1511 if (!devm_request_mem_region(&pdev->dev, p->agl_prt_ctl_phys,
1512 p->agl_prt_ctl_size, res_agl_prt_ctl->name)) {
1513 result = -ENXIO;
1514 dev_err(&pdev->dev, "request_mem_region (%s) failed\n",
1515 res_agl_prt_ctl->name);
1516 goto err;
1517 }
1184 1518
1185 p->mix = (u64)devm_ioremap(&pdev->dev, p->mix_phys, p->mix_size); 1519 p->mix = (u64)devm_ioremap(&pdev->dev, p->mix_phys, p->mix_size);
1186 p->agl = (u64)devm_ioremap(&pdev->dev, p->agl_phys, p->agl_size); 1520 p->agl = (u64)devm_ioremap(&pdev->dev, p->agl_phys, p->agl_size);
1187 1521 p->agl_prt_ctl = (u64)devm_ioremap(&pdev->dev, p->agl_prt_ctl_phys,
1522 p->agl_prt_ctl_size);
1188 spin_lock_init(&p->lock); 1523 spin_lock_init(&p->lock);
1189 1524
1190 skb_queue_head_init(&p->tx_list); 1525 skb_queue_head_init(&p->tx_list);
@@ -1199,14 +1534,19 @@ static int __devinit octeon_mgmt_probe(struct platform_device *pdev)
1199 1534
1200 mac = of_get_mac_address(pdev->dev.of_node); 1535 mac = of_get_mac_address(pdev->dev.of_node);
1201 1536
1202 if (mac) 1537 if (mac && is_valid_ether_addr(mac)) {
1203 memcpy(netdev->dev_addr, mac, 6); 1538 memcpy(netdev->dev_addr, mac, ETH_ALEN);
1539 netdev->addr_assign_type &= ~NET_ADDR_RANDOM;
1540 } else {
1541 eth_hw_addr_random(netdev);
1542 }
1204 1543
1205 p->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); 1544 p->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
1206 1545
1207 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(64); 1546 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(64);
1208 pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; 1547 pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
1209 1548
1549 netif_carrier_off(netdev);
1210 result = register_netdev(netdev); 1550 result = register_netdev(netdev);
1211 if (result) 1551 if (result)
1212 goto err; 1552 goto err;