aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorFlorian Fainelli <florian@openwrt.org>2013-06-12 15:53:05 -0400
committerDavid S. Miller <davem@davemloft.net>2013-06-13 20:22:08 -0400
commit3dc6475c0c9e55ac7f053ad6b8b398e779954545 (patch)
treeb0dfa7d618905408e6ed1fe369e263c9fc02170c
parentca4ec90b31d1ecf01087c607933cf792057bc8bf (diff)
bcm63xx_enet: add support Broadcom BCM6345 Ethernet
This patch adds support for the Broadcom BCM6345 SoC Ethernet. BCM6345 has a slightly different and older DMA engine which requires the following modifications: - the width of the DMA channels on BCM6345 is 64 bytes vs 16 bytes, which means that the helpers enet_dma{c,s} need to account for this channel width and we can no longer use macros - BCM6345 DMA engine does not have any internal SRAM for transfering buffers - BCM6345 buffer allocation and flow control is not per-channel but global (done in RSET_ENETDMA) - the DMA engine bits are right-shifted by 3 compared to other DMA generations - the DMA enable/interrupt masks are a little different (we need to enabled more bits for 6345) - some register have the same meaning but are offsetted in the ENET_DMAC space so a lookup table is required to return the proper offset The MAC itself is identical and requires no modifications to work. Signed-off-by: Florian Fainelli <florian@openwrt.org> Acked-by: Ralf Baechle <ralf@linux-mips.org> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--arch/mips/bcm63xx/dev-enet.c65
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h3
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_enet.h94
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h43
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c200
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.h15
6 files changed, 329 insertions, 91 deletions
diff --git a/arch/mips/bcm63xx/dev-enet.c b/arch/mips/bcm63xx/dev-enet.c
index 6cbaee0f6d70..52bc01df9bfe 100644
--- a/arch/mips/bcm63xx/dev-enet.c
+++ b/arch/mips/bcm63xx/dev-enet.c
@@ -9,10 +9,44 @@
9#include <linux/init.h> 9#include <linux/init.h>
10#include <linux/kernel.h> 10#include <linux/kernel.h>
11#include <linux/platform_device.h> 11#include <linux/platform_device.h>
12#include <linux/export.h>
12#include <bcm63xx_dev_enet.h> 13#include <bcm63xx_dev_enet.h>
13#include <bcm63xx_io.h> 14#include <bcm63xx_io.h>
14#include <bcm63xx_regs.h> 15#include <bcm63xx_regs.h>
15 16
17#ifdef BCMCPU_RUNTIME_DETECT
18static const unsigned long bcm6348_regs_enetdmac[] = {
19 [ENETDMAC_CHANCFG] = ENETDMAC_CHANCFG_REG,
20 [ENETDMAC_IR] = ENETDMAC_IR_REG,
21 [ENETDMAC_IRMASK] = ENETDMAC_IRMASK_REG,
22 [ENETDMAC_MAXBURST] = ENETDMAC_MAXBURST_REG,
23};
24
25static const unsigned long bcm6345_regs_enetdmac[] = {
26 [ENETDMAC_CHANCFG] = ENETDMA_6345_CHANCFG_REG,
27 [ENETDMAC_IR] = ENETDMA_6345_IR_REG,
28 [ENETDMAC_IRMASK] = ENETDMA_6345_IRMASK_REG,
29 [ENETDMAC_MAXBURST] = ENETDMA_6345_MAXBURST_REG,
30 [ENETDMAC_BUFALLOC] = ENETDMA_6345_BUFALLOC_REG,
31 [ENETDMAC_RSTART] = ENETDMA_6345_RSTART_REG,
32 [ENETDMAC_FC] = ENETDMA_6345_FC_REG,
33 [ENETDMAC_LEN] = ENETDMA_6345_LEN_REG,
34};
35
36const unsigned long *bcm63xx_regs_enetdmac;
37EXPORT_SYMBOL(bcm63xx_regs_enetdmac);
38
39static __init void bcm63xx_enetdmac_regs_init(void)
40{
41 if (BCMCPU_IS_6345())
42 bcm63xx_regs_enetdmac = bcm6345_regs_enetdmac;
43 else
44 bcm63xx_regs_enetdmac = bcm6348_regs_enetdmac;
45}
46#else
47static __init void bcm63xx_enetdmac_regs_init(void) { }
48#endif
49
16static struct resource shared_res[] = { 50static struct resource shared_res[] = {
17 { 51 {
18 .start = -1, /* filled at runtime */ 52 .start = -1, /* filled at runtime */
@@ -137,12 +171,19 @@ static int __init register_shared(void)
137 if (shared_device_registered) 171 if (shared_device_registered)
138 return 0; 172 return 0;
139 173
174 bcm63xx_enetdmac_regs_init();
175
140 shared_res[0].start = bcm63xx_regset_address(RSET_ENETDMA); 176 shared_res[0].start = bcm63xx_regset_address(RSET_ENETDMA);
141 shared_res[0].end = shared_res[0].start; 177 shared_res[0].end = shared_res[0].start;
142 shared_res[0].end += (RSET_ENETDMA_SIZE) - 1; 178 if (BCMCPU_IS_6345())
179 shared_res[0].end += (RSET_6345_ENETDMA_SIZE) - 1;
180 else
181 shared_res[0].end += (RSET_ENETDMA_SIZE) - 1;
143 182
144 if (BCMCPU_IS_6328() || BCMCPU_IS_6362() || BCMCPU_IS_6368()) 183 if (BCMCPU_IS_6328() || BCMCPU_IS_6362() || BCMCPU_IS_6368())
145 chan_count = 32; 184 chan_count = 32;
185 else if (BCMCPU_IS_6345())
186 chan_count = 8;
146 else 187 else
147 chan_count = 16; 188 chan_count = 16;
148 189
@@ -172,7 +213,7 @@ int __init bcm63xx_enet_register(int unit,
172 if (unit > 1) 213 if (unit > 1)
173 return -ENODEV; 214 return -ENODEV;
174 215
175 if (unit == 1 && BCMCPU_IS_6338()) 216 if (unit == 1 && (BCMCPU_IS_6338() || BCMCPU_IS_6345()))
176 return -ENODEV; 217 return -ENODEV;
177 218
178 ret = register_shared(); 219 ret = register_shared();
@@ -213,6 +254,21 @@ int __init bcm63xx_enet_register(int unit,
213 dpd->phy_interrupt = bcm63xx_get_irq_number(IRQ_ENET_PHY); 254 dpd->phy_interrupt = bcm63xx_get_irq_number(IRQ_ENET_PHY);
214 } 255 }
215 256
257 dpd->dma_chan_en_mask = ENETDMAC_CHANCFG_EN_MASK;
258 dpd->dma_chan_int_mask = ENETDMAC_IR_PKTDONE_MASK;
259 if (BCMCPU_IS_6345()) {
260 dpd->dma_chan_en_mask |= ENETDMAC_CHANCFG_CHAINING_MASK;
261 dpd->dma_chan_en_mask |= ENETDMAC_CHANCFG_WRAP_EN_MASK;
262 dpd->dma_chan_en_mask |= ENETDMAC_CHANCFG_FLOWC_EN_MASK;
263 dpd->dma_chan_int_mask |= ENETDMA_IR_BUFDONE_MASK;
264 dpd->dma_chan_int_mask |= ENETDMA_IR_NOTOWNER_MASK;
265 dpd->dma_chan_width = ENETDMA_6345_CHAN_WIDTH;
266 dpd->dma_desc_shift = ENETDMA_6345_DESC_SHIFT;
267 } else {
268 dpd->dma_has_sram = true;
269 dpd->dma_chan_width = ENETDMA_CHAN_WIDTH;
270 }
271
216 ret = platform_device_register(pdev); 272 ret = platform_device_register(pdev);
217 if (ret) 273 if (ret)
218 return ret; 274 return ret;
@@ -246,6 +302,11 @@ bcm63xx_enetsw_register(const struct bcm63xx_enetsw_platform_data *pd)
246 else if (BCMCPU_IS_6362() || BCMCPU_IS_6368()) 302 else if (BCMCPU_IS_6362() || BCMCPU_IS_6368())
247 enetsw_pd.num_ports = ENETSW_PORTS_6368; 303 enetsw_pd.num_ports = ENETSW_PORTS_6368;
248 304
305 enetsw_pd.dma_has_sram = true;
306 enetsw_pd.dma_chan_width = ENETDMA_CHAN_WIDTH;
307 enetsw_pd.dma_chan_en_mask = ENETDMAC_CHANCFG_EN_MASK;
308 enetsw_pd.dma_chan_int_mask = ENETDMAC_IR_PKTDONE_MASK;
309
249 ret = platform_device_register(&bcm63xx_enetsw_device); 310 ret = platform_device_register(&bcm63xx_enetsw_device);
250 if (ret) 311 if (ret)
251 return ret; 312 return ret;
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h
index 9981f4f0e42f..e6e65dc7d502 100644
--- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h
@@ -174,6 +174,7 @@ enum bcm63xx_regs_set {
174#define BCM_6368_RSET_SPI_SIZE 1804 174#define BCM_6368_RSET_SPI_SIZE 1804
175#define RSET_ENET_SIZE 2048 175#define RSET_ENET_SIZE 2048
176#define RSET_ENETDMA_SIZE 256 176#define RSET_ENETDMA_SIZE 256
177#define RSET_6345_ENETDMA_SIZE 64
177#define RSET_ENETDMAC_SIZE(chans) (16 * (chans)) 178#define RSET_ENETDMAC_SIZE(chans) (16 * (chans))
178#define RSET_ENETDMAS_SIZE(chans) (16 * (chans)) 179#define RSET_ENETDMAS_SIZE(chans) (16 * (chans))
179#define RSET_ENETSW_SIZE 65536 180#define RSET_ENETSW_SIZE 65536
@@ -300,7 +301,7 @@ enum bcm63xx_regs_set {
300#define BCM_6345_USBDMA_BASE (0xfffe2800) 301#define BCM_6345_USBDMA_BASE (0xfffe2800)
301#define BCM_6345_ENET0_BASE (0xfffe1800) 302#define BCM_6345_ENET0_BASE (0xfffe1800)
302#define BCM_6345_ENETDMA_BASE (0xfffe2800) 303#define BCM_6345_ENETDMA_BASE (0xfffe2800)
303#define BCM_6345_ENETDMAC_BASE (0xfffe2900) 304#define BCM_6345_ENETDMAC_BASE (0xfffe2840)
304#define BCM_6345_ENETDMAS_BASE (0xfffe2a00) 305#define BCM_6345_ENETDMAS_BASE (0xfffe2a00)
305#define BCM_6345_ENETSW_BASE (0xdeadbeef) 306#define BCM_6345_ENETSW_BASE (0xdeadbeef)
306#define BCM_6345_PCMCIA_BASE (0xfffe2028) 307#define BCM_6345_PCMCIA_BASE (0xfffe2028)
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_enet.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_enet.h
index 118e3c938841..753953e86242 100644
--- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_enet.h
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_enet.h
@@ -4,6 +4,8 @@
4#include <linux/if_ether.h> 4#include <linux/if_ether.h>
5#include <linux/init.h> 5#include <linux/init.h>
6 6
7#include <bcm63xx_regs.h>
8
7/* 9/*
8 * on board ethernet platform data 10 * on board ethernet platform data
9 */ 11 */
@@ -37,6 +39,21 @@ struct bcm63xx_enet_platform_data {
37 int phy_id, int reg), 39 int phy_id, int reg),
38 void (*mii_write)(struct net_device *dev, 40 void (*mii_write)(struct net_device *dev,
39 int phy_id, int reg, int val)); 41 int phy_id, int reg, int val));
42
43 /* DMA channel enable mask */
44 u32 dma_chan_en_mask;
45
46 /* DMA channel interrupt mask */
47 u32 dma_chan_int_mask;
48
49 /* DMA engine has internal SRAM */
50 bool dma_has_sram;
51
52 /* DMA channel register width */
53 unsigned int dma_chan_width;
54
55 /* DMA descriptor shift */
56 unsigned int dma_desc_shift;
40}; 57};
41 58
42/* 59/*
@@ -63,6 +80,18 @@ struct bcm63xx_enetsw_platform_data {
63 char mac_addr[ETH_ALEN]; 80 char mac_addr[ETH_ALEN];
64 int num_ports; 81 int num_ports;
65 struct bcm63xx_enetsw_port used_ports[ENETSW_MAX_PORT]; 82 struct bcm63xx_enetsw_port used_ports[ENETSW_MAX_PORT];
83
84 /* DMA channel enable mask */
85 u32 dma_chan_en_mask;
86
87 /* DMA channel interrupt mask */
88 u32 dma_chan_int_mask;
89
90 /* DMA channel register width */
91 unsigned int dma_chan_width;
92
93 /* DMA engine has internal SRAM */
94 bool dma_has_sram;
66}; 95};
67 96
68int __init bcm63xx_enet_register(int unit, 97int __init bcm63xx_enet_register(int unit,
@@ -70,4 +99,69 @@ int __init bcm63xx_enet_register(int unit,
70 99
71int bcm63xx_enetsw_register(const struct bcm63xx_enetsw_platform_data *pd); 100int bcm63xx_enetsw_register(const struct bcm63xx_enetsw_platform_data *pd);
72 101
102enum bcm63xx_regs_enetdmac {
103 ENETDMAC_CHANCFG,
104 ENETDMAC_IR,
105 ENETDMAC_IRMASK,
106 ENETDMAC_MAXBURST,
107 ENETDMAC_BUFALLOC,
108 ENETDMAC_RSTART,
109 ENETDMAC_FC,
110 ENETDMAC_LEN,
111};
112
113static inline unsigned long bcm63xx_enetdmacreg(enum bcm63xx_regs_enetdmac reg)
114{
115#ifdef BCMCPU_RUNTIME_DETECT
116 extern const unsigned long *bcm63xx_regs_enetdmac;
117
118 return bcm63xx_regs_enetdmac[reg];
119#else
120#ifdef CONFIG_BCM63XX_CPU_6345
121 switch (reg) {
122 case ENETDMAC_CHANCFG:
123 return ENETDMA_6345_CHANCFG_REG;
124 case ENETDMAC_IR:
125 return ENETDMA_6345_IR_REG;
126 case ENETDMAC_IRMASK:
127 return ENETDMA_6345_IRMASK_REG;
128 case ENETDMAC_MAXBURST:
129 return ENETDMA_6345_MAXBURST_REG;
130 case ENETDMAC_BUFALLOC:
131 return ENETDMA_6345_BUFALLOC_REG;
132 case ENETDMAC_RSTART:
133 return ENETDMA_6345_RSTART_REG;
134 case ENETDMAC_FC:
135 return ENETDMA_6345_FC_REG;
136 case ENETDMAC_LEN:
137 return ENETDMA_6345_LEN_REG;
138 }
139#endif
140#if defined(CONFIG_BCM63XX_CPU_6328) || \
141 defined(CONFIG_BCM63XX_CPU_6338) || \
142 defined(CONFIG_BCM63XX_CPU_6348) || \
143 defined(CONFIG_BCM63XX_CPU_6358) || \
144 defined(CONFIG_BCM63XX_CPU_6362) || \
145 defined(CONFIG_BCM63XX_CPU_6368)
146 switch (reg) {
147 case ENETDMAC_CHANCFG:
148 return ENETDMAC_CHANCFG_REG;
149 case ENETDMAC_IR:
150 return ENETDMAC_IR_REG;
151 case ENETDMAC_IRMASK:
152 return ENETDMAC_IRMASK_REG;
153 case ENETDMAC_MAXBURST:
154 return ENETDMAC_MAXBURST_REG;
155 case ENETDMAC_BUFALLOC:
156 case ENETDMAC_RSTART:
157 case ENETDMAC_FC:
158 case ENETDMAC_LEN:
159 return 0;
160 }
161#endif
162#endif
163 return 0;
164}
165
166
73#endif /* ! BCM63XX_DEV_ENET_H_ */ 167#endif /* ! BCM63XX_DEV_ENET_H_ */
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h
index 0a2121abb1a6..eff7ca7d12b0 100644
--- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h
@@ -727,6 +727,8 @@
727/************************************************************************* 727/*************************************************************************
728 * _REG relative to RSET_ENETDMA 728 * _REG relative to RSET_ENETDMA
729 *************************************************************************/ 729 *************************************************************************/
730#define ENETDMA_CHAN_WIDTH 0x10
731#define ENETDMA_6345_CHAN_WIDTH 0x40
730 732
731/* Controller Configuration Register */ 733/* Controller Configuration Register */
732#define ENETDMA_CFG_REG (0x0) 734#define ENETDMA_CFG_REG (0x0)
@@ -782,31 +784,56 @@
782/* State Ram Word 4 */ 784/* State Ram Word 4 */
783#define ENETDMA_SRAM4_REG(x) (0x20c + (x) * 0x10) 785#define ENETDMA_SRAM4_REG(x) (0x20c + (x) * 0x10)
784 786
787/* Broadcom 6345 ENET DMA definitions */
788#define ENETDMA_6345_CHANCFG_REG (0x00)
789
790#define ENETDMA_6345_MAXBURST_REG (0x40)
791
792#define ENETDMA_6345_RSTART_REG (0x08)
793
794#define ENETDMA_6345_LEN_REG (0x0C)
795
796#define ENETDMA_6345_IR_REG (0x14)
797
798#define ENETDMA_6345_IRMASK_REG (0x18)
799
800#define ENETDMA_6345_FC_REG (0x1C)
801
802#define ENETDMA_6345_BUFALLOC_REG (0x20)
803
804/* Shift down for EOP, SOP and WRAP bits */
805#define ENETDMA_6345_DESC_SHIFT (3)
785 806
786/************************************************************************* 807/*************************************************************************
787 * _REG relative to RSET_ENETDMAC 808 * _REG relative to RSET_ENETDMAC
788 *************************************************************************/ 809 *************************************************************************/
789 810
790/* Channel Configuration register */ 811/* Channel Configuration register */
791#define ENETDMAC_CHANCFG_REG(x) ((x) * 0x10) 812#define ENETDMAC_CHANCFG_REG (0x0)
792#define ENETDMAC_CHANCFG_EN_SHIFT 0 813#define ENETDMAC_CHANCFG_EN_SHIFT 0
793#define ENETDMAC_CHANCFG_EN_MASK (1 << ENETDMAC_CHANCFG_EN_SHIFT) 814#define ENETDMAC_CHANCFG_EN_MASK (1 << ENETDMAC_CHANCFG_EN_SHIFT)
794#define ENETDMAC_CHANCFG_PKTHALT_SHIFT 1 815#define ENETDMAC_CHANCFG_PKTHALT_SHIFT 1
795#define ENETDMAC_CHANCFG_PKTHALT_MASK (1 << ENETDMAC_CHANCFG_PKTHALT_SHIFT) 816#define ENETDMAC_CHANCFG_PKTHALT_MASK (1 << ENETDMAC_CHANCFG_PKTHALT_SHIFT)
796#define ENETDMAC_CHANCFG_BUFHALT_SHIFT 2 817#define ENETDMAC_CHANCFG_BUFHALT_SHIFT 2
797#define ENETDMAC_CHANCFG_BUFHALT_MASK (1 << ENETDMAC_CHANCFG_BUFHALT_SHIFT) 818#define ENETDMAC_CHANCFG_BUFHALT_MASK (1 << ENETDMAC_CHANCFG_BUFHALT_SHIFT)
819#define ENETDMAC_CHANCFG_CHAINING_SHIFT 2
820#define ENETDMAC_CHANCFG_CHAINING_MASK (1 << ENETDMAC_CHANCFG_CHAINING_SHIFT)
821#define ENETDMAC_CHANCFG_WRAP_EN_SHIFT 3
822#define ENETDMAC_CHANCFG_WRAP_EN_MASK (1 << ENETDMAC_CHANCFG_WRAP_EN_SHIFT)
823#define ENETDMAC_CHANCFG_FLOWC_EN_SHIFT 4
824#define ENETDMAC_CHANCFG_FLOWC_EN_MASK (1 << ENETDMAC_CHANCFG_FLOWC_EN_SHIFT)
798 825
799/* Interrupt Control/Status register */ 826/* Interrupt Control/Status register */
800#define ENETDMAC_IR_REG(x) (0x4 + (x) * 0x10) 827#define ENETDMAC_IR_REG (0x4)
801#define ENETDMAC_IR_BUFDONE_MASK (1 << 0) 828#define ENETDMAC_IR_BUFDONE_MASK (1 << 0)
802#define ENETDMAC_IR_PKTDONE_MASK (1 << 1) 829#define ENETDMAC_IR_PKTDONE_MASK (1 << 1)
803#define ENETDMAC_IR_NOTOWNER_MASK (1 << 2) 830#define ENETDMAC_IR_NOTOWNER_MASK (1 << 2)
804 831
805/* Interrupt Mask register */ 832/* Interrupt Mask register */
806#define ENETDMAC_IRMASK_REG(x) (0x8 + (x) * 0x10) 833#define ENETDMAC_IRMASK_REG (0x8)
807 834
808/* Maximum Burst Length */ 835/* Maximum Burst Length */
809#define ENETDMAC_MAXBURST_REG(x) (0xc + (x) * 0x10) 836#define ENETDMAC_MAXBURST_REG (0xc)
810 837
811 838
812/************************************************************************* 839/*************************************************************************
@@ -814,16 +841,16 @@
814 *************************************************************************/ 841 *************************************************************************/
815 842
816/* Ring Start Address register */ 843/* Ring Start Address register */
817#define ENETDMAS_RSTART_REG(x) ((x) * 0x10) 844#define ENETDMAS_RSTART_REG (0x0)
818 845
819/* State Ram Word 2 */ 846/* State Ram Word 2 */
820#define ENETDMAS_SRAM2_REG(x) (0x4 + (x) * 0x10) 847#define ENETDMAS_SRAM2_REG (0x4)
821 848
822/* State Ram Word 3 */ 849/* State Ram Word 3 */
823#define ENETDMAS_SRAM3_REG(x) (0x8 + (x) * 0x10) 850#define ENETDMAS_SRAM3_REG (0x8)
824 851
825/* State Ram Word 4 */ 852/* State Ram Word 4 */
826#define ENETDMAS_SRAM4_REG(x) (0xc + (x) * 0x10) 853#define ENETDMAS_SRAM4_REG (0xc)
827 854
828 855
829/************************************************************************* 856/*************************************************************************
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index fbbfc4acd53f..8f1ac023bb03 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -107,26 +107,28 @@ static inline void enet_dma_writel(struct bcm_enet_priv *priv,
107 bcm_writel(val, bcm_enet_shared_base[0] + off); 107 bcm_writel(val, bcm_enet_shared_base[0] + off);
108} 108}
109 109
110static inline u32 enet_dmac_readl(struct bcm_enet_priv *priv, u32 off) 110static inline u32 enet_dmac_readl(struct bcm_enet_priv *priv, u32 off, int chan)
111{ 111{
112 return bcm_readl(bcm_enet_shared_base[1] + off); 112 return bcm_readl(bcm_enet_shared_base[1] +
113 bcm63xx_enetdmacreg(off) + chan * priv->dma_chan_width);
113} 114}
114 115
115static inline void enet_dmac_writel(struct bcm_enet_priv *priv, 116static inline void enet_dmac_writel(struct bcm_enet_priv *priv,
116 u32 val, u32 off) 117 u32 val, u32 off, int chan)
117{ 118{
118 bcm_writel(val, bcm_enet_shared_base[1] + off); 119 bcm_writel(val, bcm_enet_shared_base[1] +
120 bcm63xx_enetdmacreg(off) + chan * priv->dma_chan_width);
119} 121}
120 122
121static inline u32 enet_dmas_readl(struct bcm_enet_priv *priv, u32 off) 123static inline u32 enet_dmas_readl(struct bcm_enet_priv *priv, u32 off, int chan)
122{ 124{
123 return bcm_readl(bcm_enet_shared_base[2] + off); 125 return bcm_readl(bcm_enet_shared_base[2] + off + chan * priv->dma_chan_width);
124} 126}
125 127
126static inline void enet_dmas_writel(struct bcm_enet_priv *priv, 128static inline void enet_dmas_writel(struct bcm_enet_priv *priv,
127 u32 val, u32 off) 129 u32 val, u32 off, int chan)
128{ 130{
129 bcm_writel(val, bcm_enet_shared_base[2] + off); 131 bcm_writel(val, bcm_enet_shared_base[2] + off + chan * priv->dma_chan_width);
130} 132}
131 133
132/* 134/*
@@ -262,7 +264,7 @@ static int bcm_enet_refill_rx(struct net_device *dev)
262 len_stat = priv->rx_skb_size << DMADESC_LENGTH_SHIFT; 264 len_stat = priv->rx_skb_size << DMADESC_LENGTH_SHIFT;
263 len_stat |= DMADESC_OWNER_MASK; 265 len_stat |= DMADESC_OWNER_MASK;
264 if (priv->rx_dirty_desc == priv->rx_ring_size - 1) { 266 if (priv->rx_dirty_desc == priv->rx_ring_size - 1) {
265 len_stat |= DMADESC_WRAP_MASK; 267 len_stat |= (DMADESC_WRAP_MASK >> priv->dma_desc_shift);
266 priv->rx_dirty_desc = 0; 268 priv->rx_dirty_desc = 0;
267 } else { 269 } else {
268 priv->rx_dirty_desc++; 270 priv->rx_dirty_desc++;
@@ -273,7 +275,10 @@ static int bcm_enet_refill_rx(struct net_device *dev)
273 priv->rx_desc_count++; 275 priv->rx_desc_count++;
274 276
275 /* tell dma engine we allocated one buffer */ 277 /* tell dma engine we allocated one buffer */
276 enet_dma_writel(priv, 1, ENETDMA_BUFALLOC_REG(priv->rx_chan)); 278 if (priv->dma_has_sram)
279 enet_dma_writel(priv, 1, ENETDMA_BUFALLOC_REG(priv->rx_chan));
280 else
281 enet_dmac_writel(priv, 1, ENETDMAC_BUFALLOC, priv->rx_chan);
277 } 282 }
278 283
279 /* If rx ring is still empty, set a timer to try allocating 284 /* If rx ring is still empty, set a timer to try allocating
@@ -349,7 +354,8 @@ static int bcm_enet_receive_queue(struct net_device *dev, int budget)
349 354
350 /* if the packet does not have start of packet _and_ 355 /* if the packet does not have start of packet _and_
351 * end of packet flag set, then just recycle it */ 356 * end of packet flag set, then just recycle it */
352 if ((len_stat & DMADESC_ESOP_MASK) != DMADESC_ESOP_MASK) { 357 if ((len_stat & (DMADESC_ESOP_MASK >> priv->dma_desc_shift)) !=
358 (DMADESC_ESOP_MASK >> priv->dma_desc_shift)) {
353 dev->stats.rx_dropped++; 359 dev->stats.rx_dropped++;
354 continue; 360 continue;
355 } 361 }
@@ -410,8 +416,8 @@ static int bcm_enet_receive_queue(struct net_device *dev, int budget)
410 bcm_enet_refill_rx(dev); 416 bcm_enet_refill_rx(dev);
411 417
412 /* kick rx dma */ 418 /* kick rx dma */
413 enet_dmac_writel(priv, ENETDMAC_CHANCFG_EN_MASK, 419 enet_dmac_writel(priv, priv->dma_chan_en_mask,
414 ENETDMAC_CHANCFG_REG(priv->rx_chan)); 420 ENETDMAC_CHANCFG, priv->rx_chan);
415 } 421 }
416 422
417 return processed; 423 return processed;
@@ -486,10 +492,10 @@ static int bcm_enet_poll(struct napi_struct *napi, int budget)
486 dev = priv->net_dev; 492 dev = priv->net_dev;
487 493
488 /* ack interrupts */ 494 /* ack interrupts */
489 enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK, 495 enet_dmac_writel(priv, priv->dma_chan_int_mask,
490 ENETDMAC_IR_REG(priv->rx_chan)); 496 ENETDMAC_IR, priv->rx_chan);
491 enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK, 497 enet_dmac_writel(priv, priv->dma_chan_int_mask,
492 ENETDMAC_IR_REG(priv->tx_chan)); 498 ENETDMAC_IR, priv->tx_chan);
493 499
494 /* reclaim sent skb */ 500 /* reclaim sent skb */
495 tx_work_done = bcm_enet_tx_reclaim(dev, 0); 501 tx_work_done = bcm_enet_tx_reclaim(dev, 0);
@@ -508,10 +514,10 @@ static int bcm_enet_poll(struct napi_struct *napi, int budget)
508 napi_complete(napi); 514 napi_complete(napi);
509 515
510 /* restore rx/tx interrupt */ 516 /* restore rx/tx interrupt */
511 enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK, 517 enet_dmac_writel(priv, priv->dma_chan_int_mask,
512 ENETDMAC_IRMASK_REG(priv->rx_chan)); 518 ENETDMAC_IRMASK, priv->rx_chan);
513 enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK, 519 enet_dmac_writel(priv, priv->dma_chan_int_mask,
514 ENETDMAC_IRMASK_REG(priv->tx_chan)); 520 ENETDMAC_IRMASK, priv->tx_chan);
515 521
516 return rx_work_done; 522 return rx_work_done;
517} 523}
@@ -554,8 +560,8 @@ static irqreturn_t bcm_enet_isr_dma(int irq, void *dev_id)
554 priv = netdev_priv(dev); 560 priv = netdev_priv(dev);
555 561
556 /* mask rx/tx interrupts */ 562 /* mask rx/tx interrupts */
557 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG(priv->rx_chan)); 563 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
558 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG(priv->tx_chan)); 564 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
559 565
560 napi_schedule(&priv->napi); 566 napi_schedule(&priv->napi);
561 567
@@ -616,14 +622,14 @@ static int bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
616 DMA_TO_DEVICE); 622 DMA_TO_DEVICE);
617 623
618 len_stat = (skb->len << DMADESC_LENGTH_SHIFT) & DMADESC_LENGTH_MASK; 624 len_stat = (skb->len << DMADESC_LENGTH_SHIFT) & DMADESC_LENGTH_MASK;
619 len_stat |= DMADESC_ESOP_MASK | 625 len_stat |= (DMADESC_ESOP_MASK >> priv->dma_desc_shift) |
620 DMADESC_APPEND_CRC | 626 DMADESC_APPEND_CRC |
621 DMADESC_OWNER_MASK; 627 DMADESC_OWNER_MASK;
622 628
623 priv->tx_curr_desc++; 629 priv->tx_curr_desc++;
624 if (priv->tx_curr_desc == priv->tx_ring_size) { 630 if (priv->tx_curr_desc == priv->tx_ring_size) {
625 priv->tx_curr_desc = 0; 631 priv->tx_curr_desc = 0;
626 len_stat |= DMADESC_WRAP_MASK; 632 len_stat |= (DMADESC_WRAP_MASK >> priv->dma_desc_shift);
627 } 633 }
628 priv->tx_desc_count--; 634 priv->tx_desc_count--;
629 635
@@ -634,8 +640,8 @@ static int bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
634 wmb(); 640 wmb();
635 641
636 /* kick tx dma */ 642 /* kick tx dma */
637 enet_dmac_writel(priv, ENETDMAC_CHANCFG_EN_MASK, 643 enet_dmac_writel(priv, priv->dma_chan_en_mask,
638 ENETDMAC_CHANCFG_REG(priv->tx_chan)); 644 ENETDMAC_CHANCFG, priv->tx_chan);
639 645
640 /* stop queue if no more desc available */ 646 /* stop queue if no more desc available */
641 if (!priv->tx_desc_count) 647 if (!priv->tx_desc_count)
@@ -763,6 +769,9 @@ static void bcm_enet_set_flow(struct bcm_enet_priv *priv, int rx_en, int tx_en)
763 val &= ~ENET_RXCFG_ENFLOW_MASK; 769 val &= ~ENET_RXCFG_ENFLOW_MASK;
764 enet_writel(priv, val, ENET_RXCFG_REG); 770 enet_writel(priv, val, ENET_RXCFG_REG);
765 771
772 if (!priv->dma_has_sram)
773 return;
774
766 /* tx flow control (pause frame generation) */ 775 /* tx flow control (pause frame generation) */
767 val = enet_dma_readl(priv, ENETDMA_CFG_REG); 776 val = enet_dma_readl(priv, ENETDMA_CFG_REG);
768 if (tx_en) 777 if (tx_en)
@@ -910,8 +919,8 @@ static int bcm_enet_open(struct net_device *dev)
910 919
911 /* mask all interrupts and request them */ 920 /* mask all interrupts and request them */
912 enet_writel(priv, 0, ENET_IRMASK_REG); 921 enet_writel(priv, 0, ENET_IRMASK_REG);
913 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG(priv->rx_chan)); 922 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
914 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG(priv->tx_chan)); 923 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
915 924
916 ret = request_irq(dev->irq, bcm_enet_isr_mac, 0, dev->name, dev); 925 ret = request_irq(dev->irq, bcm_enet_isr_mac, 0, dev->name, dev);
917 if (ret) 926 if (ret)
@@ -986,8 +995,12 @@ static int bcm_enet_open(struct net_device *dev)
986 priv->rx_curr_desc = 0; 995 priv->rx_curr_desc = 0;
987 996
988 /* initialize flow control buffer allocation */ 997 /* initialize flow control buffer allocation */
989 enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0, 998 if (priv->dma_has_sram)
990 ENETDMA_BUFALLOC_REG(priv->rx_chan)); 999 enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
1000 ENETDMA_BUFALLOC_REG(priv->rx_chan));
1001 else
1002 enet_dmac_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
1003 ENETDMAC_BUFALLOC, priv->rx_chan);
991 1004
992 if (bcm_enet_refill_rx(dev)) { 1005 if (bcm_enet_refill_rx(dev)) {
993 dev_err(kdev, "cannot allocate rx skb queue\n"); 1006 dev_err(kdev, "cannot allocate rx skb queue\n");
@@ -996,18 +1009,30 @@ static int bcm_enet_open(struct net_device *dev)
996 } 1009 }
997 1010
998 /* write rx & tx ring addresses */ 1011 /* write rx & tx ring addresses */
999 enet_dmas_writel(priv, priv->rx_desc_dma, 1012 if (priv->dma_has_sram) {
1000 ENETDMAS_RSTART_REG(priv->rx_chan)); 1013 enet_dmas_writel(priv, priv->rx_desc_dma,
1001 enet_dmas_writel(priv, priv->tx_desc_dma, 1014 ENETDMAS_RSTART_REG, priv->rx_chan);
1002 ENETDMAS_RSTART_REG(priv->tx_chan)); 1015 enet_dmas_writel(priv, priv->tx_desc_dma,
1016 ENETDMAS_RSTART_REG, priv->tx_chan);
1017 } else {
1018 enet_dmac_writel(priv, priv->rx_desc_dma,
1019 ENETDMAC_RSTART, priv->rx_chan);
1020 enet_dmac_writel(priv, priv->tx_desc_dma,
1021 ENETDMAC_RSTART, priv->tx_chan);
1022 }
1003 1023
1004 /* clear remaining state ram for rx & tx channel */ 1024 /* clear remaining state ram for rx & tx channel */
1005 enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG(priv->rx_chan)); 1025 if (priv->dma_has_sram) {
1006 enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG(priv->tx_chan)); 1026 enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->rx_chan);
1007 enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG(priv->rx_chan)); 1027 enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->tx_chan);
1008 enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG(priv->tx_chan)); 1028 enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->rx_chan);
1009 enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG(priv->rx_chan)); 1029 enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->tx_chan);
1010 enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG(priv->tx_chan)); 1030 enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->rx_chan);
1031 enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->tx_chan);
1032 } else {
1033 enet_dmac_writel(priv, 0, ENETDMAC_FC, priv->rx_chan);
1034 enet_dmac_writel(priv, 0, ENETDMAC_FC, priv->tx_chan);
1035 }
1011 1036
1012 /* set max rx/tx length */ 1037 /* set max rx/tx length */
1013 enet_writel(priv, priv->hw_mtu, ENET_RXMAXLEN_REG); 1038 enet_writel(priv, priv->hw_mtu, ENET_RXMAXLEN_REG);
@@ -1015,18 +1040,24 @@ static int bcm_enet_open(struct net_device *dev)
1015 1040
1016 /* set dma maximum burst len */ 1041 /* set dma maximum burst len */
1017 enet_dmac_writel(priv, priv->dma_maxburst, 1042 enet_dmac_writel(priv, priv->dma_maxburst,
1018 ENETDMAC_MAXBURST_REG(priv->rx_chan)); 1043 ENETDMAC_MAXBURST, priv->rx_chan);
1019 enet_dmac_writel(priv, priv->dma_maxburst, 1044 enet_dmac_writel(priv, priv->dma_maxburst,
1020 ENETDMAC_MAXBURST_REG(priv->tx_chan)); 1045 ENETDMAC_MAXBURST, priv->tx_chan);
1021 1046
1022 /* set correct transmit fifo watermark */ 1047 /* set correct transmit fifo watermark */
1023 enet_writel(priv, BCMENET_TX_FIFO_TRESH, ENET_TXWMARK_REG); 1048 enet_writel(priv, BCMENET_TX_FIFO_TRESH, ENET_TXWMARK_REG);
1024 1049
1025 /* set flow control low/high threshold to 1/3 / 2/3 */ 1050 /* set flow control low/high threshold to 1/3 / 2/3 */
1026 val = priv->rx_ring_size / 3; 1051 if (priv->dma_has_sram) {
1027 enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan)); 1052 val = priv->rx_ring_size / 3;
1028 val = (priv->rx_ring_size * 2) / 3; 1053 enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan));
1029 enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan)); 1054 val = (priv->rx_ring_size * 2) / 3;
1055 enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan));
1056 } else {
1057 enet_dmac_writel(priv, 5, ENETDMAC_FC, priv->rx_chan);
1058 enet_dmac_writel(priv, priv->rx_ring_size, ENETDMAC_LEN, priv->rx_chan);
1059 enet_dmac_writel(priv, priv->tx_ring_size, ENETDMAC_LEN, priv->tx_chan);
1060 }
1030 1061
1031 /* all set, enable mac and interrupts, start dma engine and 1062 /* all set, enable mac and interrupts, start dma engine and
1032 * kick rx dma channel */ 1063 * kick rx dma channel */
@@ -1035,26 +1066,26 @@ static int bcm_enet_open(struct net_device *dev)
1035 val |= ENET_CTL_ENABLE_MASK; 1066 val |= ENET_CTL_ENABLE_MASK;
1036 enet_writel(priv, val, ENET_CTL_REG); 1067 enet_writel(priv, val, ENET_CTL_REG);
1037 enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG); 1068 enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
1038 enet_dmac_writel(priv, ENETDMAC_CHANCFG_EN_MASK, 1069 enet_dmac_writel(priv, priv->dma_chan_en_mask,
1039 ENETDMAC_CHANCFG_REG(priv->rx_chan)); 1070 ENETDMAC_CHANCFG, priv->rx_chan);
1040 1071
1041 /* watch "mib counters about to overflow" interrupt */ 1072 /* watch "mib counters about to overflow" interrupt */
1042 enet_writel(priv, ENET_IR_MIB, ENET_IR_REG); 1073 enet_writel(priv, ENET_IR_MIB, ENET_IR_REG);
1043 enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG); 1074 enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG);
1044 1075
1045 /* watch "packet transferred" interrupt in rx and tx */ 1076 /* watch "packet transferred" interrupt in rx and tx */
1046 enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK, 1077 enet_dmac_writel(priv, priv->dma_chan_int_mask,
1047 ENETDMAC_IR_REG(priv->rx_chan)); 1078 ENETDMAC_IR, priv->rx_chan);
1048 enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK, 1079 enet_dmac_writel(priv, priv->dma_chan_int_mask,
1049 ENETDMAC_IR_REG(priv->tx_chan)); 1080 ENETDMAC_IR, priv->tx_chan);
1050 1081
1051 /* make sure we enable napi before rx interrupt */ 1082 /* make sure we enable napi before rx interrupt */
1052 napi_enable(&priv->napi); 1083 napi_enable(&priv->napi);
1053 1084
1054 enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK, 1085 enet_dmac_writel(priv, priv->dma_chan_int_mask,
1055 ENETDMAC_IRMASK_REG(priv->rx_chan)); 1086 ENETDMAC_IRMASK, priv->rx_chan);
1056 enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK, 1087 enet_dmac_writel(priv, priv->dma_chan_int_mask,
1057 ENETDMAC_IRMASK_REG(priv->tx_chan)); 1088 ENETDMAC_IRMASK, priv->tx_chan);
1058 1089
1059 if (priv->has_phy) 1090 if (priv->has_phy)
1060 phy_start(priv->phydev); 1091 phy_start(priv->phydev);
@@ -1134,13 +1165,13 @@ static void bcm_enet_disable_dma(struct bcm_enet_priv *priv, int chan)
1134{ 1165{
1135 int limit; 1166 int limit;
1136 1167
1137 enet_dmac_writel(priv, 0, ENETDMAC_CHANCFG_REG(chan)); 1168 enet_dmac_writel(priv, 0, ENETDMAC_CHANCFG, chan);
1138 1169
1139 limit = 1000; 1170 limit = 1000;
1140 do { 1171 do {
1141 u32 val; 1172 u32 val;
1142 1173
1143 val = enet_dmac_readl(priv, ENETDMAC_CHANCFG_REG(chan)); 1174 val = enet_dmac_readl(priv, ENETDMAC_CHANCFG, chan);
1144 if (!(val & ENETDMAC_CHANCFG_EN_MASK)) 1175 if (!(val & ENETDMAC_CHANCFG_EN_MASK))
1145 break; 1176 break;
1146 udelay(1); 1177 udelay(1);
@@ -1167,8 +1198,8 @@ static int bcm_enet_stop(struct net_device *dev)
1167 1198
1168 /* mask all interrupts */ 1199 /* mask all interrupts */
1169 enet_writel(priv, 0, ENET_IRMASK_REG); 1200 enet_writel(priv, 0, ENET_IRMASK_REG);
1170 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG(priv->rx_chan)); 1201 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
1171 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG(priv->tx_chan)); 1202 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
1172 1203
1173 /* make sure no mib update is scheduled */ 1204 /* make sure no mib update is scheduled */
1174 cancel_work_sync(&priv->mib_update_task); 1205 cancel_work_sync(&priv->mib_update_task);
@@ -1782,6 +1813,11 @@ static int bcm_enet_probe(struct platform_device *pdev)
1782 priv->pause_tx = pd->pause_tx; 1813 priv->pause_tx = pd->pause_tx;
1783 priv->force_duplex_full = pd->force_duplex_full; 1814 priv->force_duplex_full = pd->force_duplex_full;
1784 priv->force_speed_100 = pd->force_speed_100; 1815 priv->force_speed_100 = pd->force_speed_100;
1816 priv->dma_chan_en_mask = pd->dma_chan_en_mask;
1817 priv->dma_chan_int_mask = pd->dma_chan_int_mask;
1818 priv->dma_chan_width = pd->dma_chan_width;
1819 priv->dma_has_sram = pd->dma_has_sram;
1820 priv->dma_desc_shift = pd->dma_desc_shift;
1785 } 1821 }
1786 1822
1787 if (priv->mac_id == 0 && priv->has_phy && !priv->use_external_mii) { 1823 if (priv->mac_id == 0 && priv->has_phy && !priv->use_external_mii) {
@@ -2118,8 +2154,8 @@ static int bcm_enetsw_open(struct net_device *dev)
2118 kdev = &priv->pdev->dev; 2154 kdev = &priv->pdev->dev;
2119 2155
2120 /* mask all interrupts and request them */ 2156 /* mask all interrupts and request them */
2121 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG(priv->rx_chan)); 2157 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
2122 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG(priv->tx_chan)); 2158 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
2123 2159
2124 ret = request_irq(priv->irq_rx, bcm_enet_isr_dma, 2160 ret = request_irq(priv->irq_rx, bcm_enet_isr_dma,
2125 IRQF_DISABLED, dev->name, dev); 2161 IRQF_DISABLED, dev->name, dev);
@@ -2231,23 +2267,23 @@ static int bcm_enetsw_open(struct net_device *dev)
2231 2267
2232 /* write rx & tx ring addresses */ 2268 /* write rx & tx ring addresses */
2233 enet_dmas_writel(priv, priv->rx_desc_dma, 2269 enet_dmas_writel(priv, priv->rx_desc_dma,
2234 ENETDMAS_RSTART_REG(priv->rx_chan)); 2270 ENETDMAS_RSTART_REG, priv->rx_chan);
2235 enet_dmas_writel(priv, priv->tx_desc_dma, 2271 enet_dmas_writel(priv, priv->tx_desc_dma,
2236 ENETDMAS_RSTART_REG(priv->tx_chan)); 2272 ENETDMAS_RSTART_REG, priv->tx_chan);
2237 2273
2238 /* clear remaining state ram for rx & tx channel */ 2274 /* clear remaining state ram for rx & tx channel */
2239 enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG(priv->rx_chan)); 2275 enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->rx_chan);
2240 enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG(priv->tx_chan)); 2276 enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG, priv->tx_chan);
2241 enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG(priv->rx_chan)); 2277 enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->rx_chan);
2242 enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG(priv->tx_chan)); 2278 enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG, priv->tx_chan);
2243 enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG(priv->rx_chan)); 2279 enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->rx_chan);
2244 enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG(priv->tx_chan)); 2280 enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG, priv->tx_chan);
2245 2281
2246 /* set dma maximum burst len */ 2282 /* set dma maximum burst len */
2247 enet_dmac_writel(priv, priv->dma_maxburst, 2283 enet_dmac_writel(priv, priv->dma_maxburst,
2248 ENETDMAC_MAXBURST_REG(priv->rx_chan)); 2284 ENETDMAC_MAXBURST, priv->rx_chan);
2249 enet_dmac_writel(priv, priv->dma_maxburst, 2285 enet_dmac_writel(priv, priv->dma_maxburst,
2250 ENETDMAC_MAXBURST_REG(priv->tx_chan)); 2286 ENETDMAC_MAXBURST, priv->tx_chan);
2251 2287
2252 /* set flow control low/high threshold to 1/3 / 2/3 */ 2288 /* set flow control low/high threshold to 1/3 / 2/3 */
2253 val = priv->rx_ring_size / 3; 2289 val = priv->rx_ring_size / 3;
@@ -2261,21 +2297,21 @@ static int bcm_enetsw_open(struct net_device *dev)
2261 wmb(); 2297 wmb();
2262 enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG); 2298 enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
2263 enet_dmac_writel(priv, ENETDMAC_CHANCFG_EN_MASK, 2299 enet_dmac_writel(priv, ENETDMAC_CHANCFG_EN_MASK,
2264 ENETDMAC_CHANCFG_REG(priv->rx_chan)); 2300 ENETDMAC_CHANCFG, priv->rx_chan);
2265 2301
2266 /* watch "packet transferred" interrupt in rx and tx */ 2302 /* watch "packet transferred" interrupt in rx and tx */
2267 enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK, 2303 enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
2268 ENETDMAC_IR_REG(priv->rx_chan)); 2304 ENETDMAC_IR, priv->rx_chan);
2269 enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK, 2305 enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
2270 ENETDMAC_IR_REG(priv->tx_chan)); 2306 ENETDMAC_IR, priv->tx_chan);
2271 2307
2272 /* make sure we enable napi before rx interrupt */ 2308 /* make sure we enable napi before rx interrupt */
2273 napi_enable(&priv->napi); 2309 napi_enable(&priv->napi);
2274 2310
2275 enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK, 2311 enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
2276 ENETDMAC_IRMASK_REG(priv->rx_chan)); 2312 ENETDMAC_IRMASK, priv->rx_chan);
2277 enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK, 2313 enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK,
2278 ENETDMAC_IRMASK_REG(priv->tx_chan)); 2314 ENETDMAC_IRMASK, priv->tx_chan);
2279 2315
2280 netif_carrier_on(dev); 2316 netif_carrier_on(dev);
2281 netif_start_queue(dev); 2317 netif_start_queue(dev);
@@ -2377,8 +2413,8 @@ static int bcm_enetsw_stop(struct net_device *dev)
2377 del_timer_sync(&priv->rx_timeout); 2413 del_timer_sync(&priv->rx_timeout);
2378 2414
2379 /* mask all interrupts */ 2415 /* mask all interrupts */
2380 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG(priv->rx_chan)); 2416 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->rx_chan);
2381 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG(priv->tx_chan)); 2417 enet_dmac_writel(priv, 0, ENETDMAC_IRMASK, priv->tx_chan);
2382 2418
2383 /* disable dma & mac */ 2419 /* disable dma & mac */
2384 bcm_enet_disable_dma(priv, priv->tx_chan); 2420 bcm_enet_disable_dma(priv, priv->tx_chan);
@@ -2712,6 +2748,10 @@ static int bcm_enetsw_probe(struct platform_device *pdev)
2712 memcpy(priv->used_ports, pd->used_ports, 2748 memcpy(priv->used_ports, pd->used_ports,
2713 sizeof(pd->used_ports)); 2749 sizeof(pd->used_ports));
2714 priv->num_ports = pd->num_ports; 2750 priv->num_ports = pd->num_ports;
2751 priv->dma_has_sram = pd->dma_has_sram;
2752 priv->dma_chan_en_mask = pd->dma_chan_en_mask;
2753 priv->dma_chan_int_mask = pd->dma_chan_int_mask;
2754 priv->dma_chan_width = pd->dma_chan_width;
2715 } 2755 }
2716 2756
2717 ret = compute_hw_mtu(priv, dev->mtu); 2757 ret = compute_hw_mtu(priv, dev->mtu);
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.h b/drivers/net/ethernet/broadcom/bcm63xx_enet.h
index 721ffbaef8d2..f55af4310085 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.h
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.h
@@ -339,6 +339,21 @@ struct bcm_enet_priv {
339 /* used to poll switch port state */ 339 /* used to poll switch port state */
340 struct timer_list swphy_poll; 340 struct timer_list swphy_poll;
341 spinlock_t enetsw_mdio_lock; 341 spinlock_t enetsw_mdio_lock;
342
343 /* dma channel enable mask */
344 u32 dma_chan_en_mask;
345
346 /* dma channel interrupt mask */
347 u32 dma_chan_int_mask;
348
349 /* DMA engine has internal SRAM */
350 bool dma_has_sram;
351
352 /* dma channel width */
353 unsigned int dma_chan_width;
354
355 /* dma descriptor shift value */
356 unsigned int dma_desc_shift;
342}; 357};
343 358
344 359