aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-05-19 19:40:47 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-05-19 19:40:47 -0400
commitfce4a1dda2f1a9a25b3e5b7cd951070e0b42a818 (patch)
tree03a3f76c5b3d4f3b05dff44c307dbbb64ec5c510 /drivers
parente1f2084ed200eb31f2c9d1efe70569c76889c980 (diff)
parent6f6c3c33c027f2c83d53e8562cd9daa73fe8108b (diff)
Merge branch 'upstream' of git://git.linux-mips.org/pub/scm/upstream-linus
* 'upstream' of git://git.linux-mips.org/pub/scm/upstream-linus: (48 commits) MIPS: Move arch_get_unmapped_area and gang to new file. MIPS: Cleanup arch_get_unmapped_area MIPS: Octeon: Don't request interrupts for unused IPI mailbox bits. Octeon: Fix interrupt irq settings for performance counters. MIPS: Fix build warnings on defconfigs MIPS: Lemote 2F, Malta: Fix build warning MIPS: Set ELF AT_PLATFORM string for Loongson2 processors MIPS: Set ELF AT_PLATFORM string for BMIPS processors MIPS: Introduce set_elf_platform() helper function MIPS: JZ4740: setup: Autodetect physical memory. MIPS: BCM47xx: Fix MAC address parsing. MIPS: BCM47xx: Extend the filling of SPROM from NVRAM MIPS: BCM47xx: Register SSB fallback sprom callback MIPS: BCM47xx: Extend bcm47xx_fill_sprom with prefix. SSB: Change fallback sprom to callback mechanism. MIPS: Alchemy: Clean up GPIO registers and accessors MIPS: Alchemy: Cleanup DMA addresses MIPS: Alchemy: Rewrite ethernet platform setup MIPS: Alchemy: Rewrite UART setup and constants. MIPS: Alchemy: Convert dbdma.c to syscore_ops ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/mtd/maps/Kconfig7
-rw-r--r--drivers/mtd/maps/Makefile1
-rw-r--r--drivers/mtd/maps/lantiq-flash.c251
-rw-r--r--drivers/mtd/nand/au1550nd.c3
-rw-r--r--drivers/net/Kconfig7
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/lantiq_etop.c805
-rw-r--r--drivers/ssb/pci.c16
-rw-r--r--drivers/ssb/sprom.c43
-rw-r--r--drivers/ssb/ssb_private.h3
-rw-r--r--drivers/tty/serial/Kconfig8
-rw-r--r--drivers/tty/serial/Makefile1
-rw-r--r--drivers/tty/serial/lantiq.c756
-rw-r--r--drivers/watchdog/Kconfig6
-rw-r--r--drivers/watchdog/Makefile1
-rw-r--r--drivers/watchdog/lantiq_wdt.c261
-rw-r--r--drivers/watchdog/mtx-1_wdt.c21
17 files changed, 2157 insertions, 34 deletions
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index 44b1f46458ca..5069111c81cc 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -260,6 +260,13 @@ config MTD_BCM963XX
260 Support for parsing CFE image tag and creating MTD partitions on 260 Support for parsing CFE image tag and creating MTD partitions on
261 Broadcom BCM63xx boards. 261 Broadcom BCM63xx boards.
262 262
263config MTD_LANTIQ
264 tristate "Lantiq SoC NOR support"
265 depends on LANTIQ
266 select MTD_PARTITIONS
267 help
268 Support for NOR flash attached to the Lantiq SoC's External Bus Unit.
269
263config MTD_DILNETPC 270config MTD_DILNETPC
264 tristate "CFI Flash device mapped on DIL/Net PC" 271 tristate "CFI Flash device mapped on DIL/Net PC"
265 depends on X86 && MTD_PARTITIONS && MTD_CFI_INTELEXT && BROKEN 272 depends on X86 && MTD_PARTITIONS && MTD_CFI_INTELEXT && BROKEN
diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile
index 08533bd5cba7..6adf4c9b9057 100644
--- a/drivers/mtd/maps/Makefile
+++ b/drivers/mtd/maps/Makefile
@@ -60,3 +60,4 @@ obj-$(CONFIG_MTD_VMU) += vmu-flash.o
60obj-$(CONFIG_MTD_GPIO_ADDR) += gpio-addr-flash.o 60obj-$(CONFIG_MTD_GPIO_ADDR) += gpio-addr-flash.o
61obj-$(CONFIG_MTD_BCM963XX) += bcm963xx-flash.o 61obj-$(CONFIG_MTD_BCM963XX) += bcm963xx-flash.o
62obj-$(CONFIG_MTD_LATCH_ADDR) += latch-addr-flash.o 62obj-$(CONFIG_MTD_LATCH_ADDR) += latch-addr-flash.o
63obj-$(CONFIG_MTD_LANTIQ) += lantiq-flash.o
diff --git a/drivers/mtd/maps/lantiq-flash.c b/drivers/mtd/maps/lantiq-flash.c
new file mode 100644
index 000000000000..a90cabd7b84d
--- /dev/null
+++ b/drivers/mtd/maps/lantiq-flash.c
@@ -0,0 +1,251 @@
1/*
2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms of the GNU General Public License version 2 as published
4 * by the Free Software Foundation.
5 *
6 * Copyright (C) 2004 Liu Peng Infineon IFAP DC COM CPE
7 * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
8 */
9
10#include <linux/module.h>
11#include <linux/types.h>
12#include <linux/kernel.h>
13#include <linux/io.h>
14#include <linux/slab.h>
15#include <linux/init.h>
16#include <linux/mtd/mtd.h>
17#include <linux/mtd/map.h>
18#include <linux/mtd/partitions.h>
19#include <linux/mtd/cfi.h>
20#include <linux/platform_device.h>
21#include <linux/mtd/physmap.h>
22
23#include <lantiq_soc.h>
24#include <lantiq_platform.h>
25
26/*
27 * The NOR flash is connected to the same external bus unit (EBU) as PCI.
28 * To make PCI work we need to enable the endianness swapping for the address
29 * written to the EBU. This endianness swapping works for PCI correctly but
30 * fails for attached NOR devices. To workaround this we need to use a complex
31 * map. The workaround involves swapping all addresses whilst probing the chip.
32 * Once probing is complete we stop swapping the addresses but swizzle the
33 * unlock addresses to ensure that access to the NOR device works correctly.
34 */
35
36enum {
37 LTQ_NOR_PROBING,
38 LTQ_NOR_NORMAL
39};
40
41struct ltq_mtd {
42 struct resource *res;
43 struct mtd_info *mtd;
44 struct map_info *map;
45};
46
47static char ltq_map_name[] = "ltq_nor";
48
49static map_word
50ltq_read16(struct map_info *map, unsigned long adr)
51{
52 unsigned long flags;
53 map_word temp;
54
55 if (map->map_priv_1 == LTQ_NOR_PROBING)
56 adr ^= 2;
57 spin_lock_irqsave(&ebu_lock, flags);
58 temp.x[0] = *(u16 *)(map->virt + adr);
59 spin_unlock_irqrestore(&ebu_lock, flags);
60 return temp;
61}
62
63static void
64ltq_write16(struct map_info *map, map_word d, unsigned long adr)
65{
66 unsigned long flags;
67
68 if (map->map_priv_1 == LTQ_NOR_PROBING)
69 adr ^= 2;
70 spin_lock_irqsave(&ebu_lock, flags);
71 *(u16 *)(map->virt + adr) = d.x[0];
72 spin_unlock_irqrestore(&ebu_lock, flags);
73}
74
75/*
76 * The following 2 functions copy data between iomem and a cached memory
77 * section. As memcpy() makes use of pre-fetching we cannot use it here.
78 * The normal alternative of using memcpy_{to,from}io also makes use of
79 * memcpy() on MIPS so it is not applicable either. We are therefore stuck
80 * with having to use our own loop.
81 */
82static void
83ltq_copy_from(struct map_info *map, void *to,
84 unsigned long from, ssize_t len)
85{
86 unsigned char *f = (unsigned char *)map->virt + from;
87 unsigned char *t = (unsigned char *)to;
88 unsigned long flags;
89
90 spin_lock_irqsave(&ebu_lock, flags);
91 while (len--)
92 *t++ = *f++;
93 spin_unlock_irqrestore(&ebu_lock, flags);
94}
95
96static void
97ltq_copy_to(struct map_info *map, unsigned long to,
98 const void *from, ssize_t len)
99{
100 unsigned char *f = (unsigned char *)from;
101 unsigned char *t = (unsigned char *)map->virt + to;
102 unsigned long flags;
103
104 spin_lock_irqsave(&ebu_lock, flags);
105 while (len--)
106 *t++ = *f++;
107 spin_unlock_irqrestore(&ebu_lock, flags);
108}
109
110static const char const *part_probe_types[] = { "cmdlinepart", NULL };
111
112static int __init
113ltq_mtd_probe(struct platform_device *pdev)
114{
115 struct physmap_flash_data *ltq_mtd_data = dev_get_platdata(&pdev->dev);
116 struct ltq_mtd *ltq_mtd;
117 struct mtd_partition *parts;
118 struct resource *res;
119 int nr_parts = 0;
120 struct cfi_private *cfi;
121 int err;
122
123 ltq_mtd = kzalloc(sizeof(struct ltq_mtd), GFP_KERNEL);
124 platform_set_drvdata(pdev, ltq_mtd);
125
126 ltq_mtd->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
127 if (!ltq_mtd->res) {
128 dev_err(&pdev->dev, "failed to get memory resource");
129 err = -ENOENT;
130 goto err_out;
131 }
132
133 res = devm_request_mem_region(&pdev->dev, ltq_mtd->res->start,
134 resource_size(ltq_mtd->res), dev_name(&pdev->dev));
135 if (!ltq_mtd->res) {
136 dev_err(&pdev->dev, "failed to request mem resource");
137 err = -EBUSY;
138 goto err_out;
139 }
140
141 ltq_mtd->map = kzalloc(sizeof(struct map_info), GFP_KERNEL);
142 ltq_mtd->map->phys = res->start;
143 ltq_mtd->map->size = resource_size(res);
144 ltq_mtd->map->virt = devm_ioremap_nocache(&pdev->dev,
145 ltq_mtd->map->phys, ltq_mtd->map->size);
146 if (!ltq_mtd->map->virt) {
147 dev_err(&pdev->dev, "failed to ioremap!\n");
148 err = -ENOMEM;
149 goto err_free;
150 }
151
152 ltq_mtd->map->name = ltq_map_name;
153 ltq_mtd->map->bankwidth = 2;
154 ltq_mtd->map->read = ltq_read16;
155 ltq_mtd->map->write = ltq_write16;
156 ltq_mtd->map->copy_from = ltq_copy_from;
157 ltq_mtd->map->copy_to = ltq_copy_to;
158
159 ltq_mtd->map->map_priv_1 = LTQ_NOR_PROBING;
160 ltq_mtd->mtd = do_map_probe("cfi_probe", ltq_mtd->map);
161 ltq_mtd->map->map_priv_1 = LTQ_NOR_NORMAL;
162
163 if (!ltq_mtd->mtd) {
164 dev_err(&pdev->dev, "probing failed\n");
165 err = -ENXIO;
166 goto err_unmap;
167 }
168
169 ltq_mtd->mtd->owner = THIS_MODULE;
170
171 cfi = ltq_mtd->map->fldrv_priv;
172 cfi->addr_unlock1 ^= 1;
173 cfi->addr_unlock2 ^= 1;
174
175 nr_parts = parse_mtd_partitions(ltq_mtd->mtd,
176 part_probe_types, &parts, 0);
177 if (nr_parts > 0) {
178 dev_info(&pdev->dev,
179 "using %d partitions from cmdline", nr_parts);
180 } else {
181 nr_parts = ltq_mtd_data->nr_parts;
182 parts = ltq_mtd_data->parts;
183 }
184
185 err = add_mtd_partitions(ltq_mtd->mtd, parts, nr_parts);
186 if (err) {
187 dev_err(&pdev->dev, "failed to add partitions\n");
188 goto err_destroy;
189 }
190
191 return 0;
192
193err_destroy:
194 map_destroy(ltq_mtd->mtd);
195err_unmap:
196 iounmap(ltq_mtd->map->virt);
197err_free:
198 kfree(ltq_mtd->map);
199err_out:
200 kfree(ltq_mtd);
201 return err;
202}
203
204static int __devexit
205ltq_mtd_remove(struct platform_device *pdev)
206{
207 struct ltq_mtd *ltq_mtd = platform_get_drvdata(pdev);
208
209 if (ltq_mtd) {
210 if (ltq_mtd->mtd) {
211 del_mtd_partitions(ltq_mtd->mtd);
212 map_destroy(ltq_mtd->mtd);
213 }
214 if (ltq_mtd->map->virt)
215 iounmap(ltq_mtd->map->virt);
216 kfree(ltq_mtd->map);
217 kfree(ltq_mtd);
218 }
219 return 0;
220}
221
222static struct platform_driver ltq_mtd_driver = {
223 .remove = __devexit_p(ltq_mtd_remove),
224 .driver = {
225 .name = "ltq_nor",
226 .owner = THIS_MODULE,
227 },
228};
229
230static int __init
231init_ltq_mtd(void)
232{
233 int ret = platform_driver_probe(&ltq_mtd_driver, ltq_mtd_probe);
234
235 if (ret)
236 pr_err("ltq_nor: error registering platform driver");
237 return ret;
238}
239
240static void __exit
241exit_ltq_mtd(void)
242{
243 platform_driver_unregister(&ltq_mtd_driver);
244}
245
246module_init(init_ltq_mtd);
247module_exit(exit_ltq_mtd);
248
249MODULE_LICENSE("GPL");
250MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
251MODULE_DESCRIPTION("Lantiq SoC NOR");
diff --git a/drivers/mtd/nand/au1550nd.c b/drivers/mtd/nand/au1550nd.c
index 3ffe05db4923..5d513b54a7d7 100644
--- a/drivers/mtd/nand/au1550nd.c
+++ b/drivers/mtd/nand/au1550nd.c
@@ -10,6 +10,7 @@
10 */ 10 */
11 11
12#include <linux/slab.h> 12#include <linux/slab.h>
13#include <linux/gpio.h>
13#include <linux/init.h> 14#include <linux/init.h>
14#include <linux/module.h> 15#include <linux/module.h>
15#include <linux/interrupt.h> 16#include <linux/interrupt.h>
@@ -470,7 +471,7 @@ static int __init au1xxx_nand_init(void)
470 471
471#ifdef CONFIG_MIPS_PB1550 472#ifdef CONFIG_MIPS_PB1550
472 /* set gpio206 high */ 473 /* set gpio206 high */
473 au_writel(au_readl(GPIO2_DIR) & ~(1 << 6), GPIO2_DIR); 474 gpio_direction_input(206);
474 475
475 boot_swapboot = (au_readl(MEM_STSTAT) & (0x7 << 1)) | ((bcsr_read(BCSR_STATUS) >> 6) & 0x1); 476 boot_swapboot = (au_readl(MEM_STSTAT) & (0x7 << 1)) | ((bcsr_read(BCSR_STATUS) >> 6) & 0x1);
476 477
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 6c884ef1b069..19f04a34783a 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2017,6 +2017,13 @@ config FTMAC100
2017 from Faraday. It is used on Faraday A320, Andes AG101 and some 2017 from Faraday. It is used on Faraday A320, Andes AG101 and some
2018 other ARM/NDS32 SoC's. 2018 other ARM/NDS32 SoC's.
2019 2019
2020config LANTIQ_ETOP
2021 tristate "Lantiq SoC ETOP driver"
2022 depends on SOC_TYPE_XWAY
2023 help
2024 Support for the MII0 inside the Lantiq SoC
2025
2026
2020source "drivers/net/fs_enet/Kconfig" 2027source "drivers/net/fs_enet/Kconfig"
2021 2028
2022source "drivers/net/octeon/Kconfig" 2029source "drivers/net/octeon/Kconfig"
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index e5a7375685ad..209fbb70619b 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -259,6 +259,7 @@ obj-$(CONFIG_MLX4_CORE) += mlx4/
259obj-$(CONFIG_ENC28J60) += enc28j60.o 259obj-$(CONFIG_ENC28J60) += enc28j60.o
260obj-$(CONFIG_ETHOC) += ethoc.o 260obj-$(CONFIG_ETHOC) += ethoc.o
261obj-$(CONFIG_GRETH) += greth.o 261obj-$(CONFIG_GRETH) += greth.o
262obj-$(CONFIG_LANTIQ_ETOP) += lantiq_etop.o
262 263
263obj-$(CONFIG_XTENSA_XT2000_SONIC) += xtsonic.o 264obj-$(CONFIG_XTENSA_XT2000_SONIC) += xtsonic.o
264 265
diff --git a/drivers/net/lantiq_etop.c b/drivers/net/lantiq_etop.c
new file mode 100644
index 000000000000..45f252b7da30
--- /dev/null
+++ b/drivers/net/lantiq_etop.c
@@ -0,0 +1,805 @@
1/*
2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms of the GNU General Public License version 2 as published
4 * by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
14 *
15 * Copyright (C) 2011 John Crispin <blogic@openwrt.org>
16 */
17
18#include <linux/kernel.h>
19#include <linux/slab.h>
20#include <linux/errno.h>
21#include <linux/types.h>
22#include <linux/interrupt.h>
23#include <linux/uaccess.h>
24#include <linux/in.h>
25#include <linux/netdevice.h>
26#include <linux/etherdevice.h>
27#include <linux/phy.h>
28#include <linux/ip.h>
29#include <linux/tcp.h>
30#include <linux/skbuff.h>
31#include <linux/mm.h>
32#include <linux/platform_device.h>
33#include <linux/ethtool.h>
34#include <linux/init.h>
35#include <linux/delay.h>
36#include <linux/io.h>
37
38#include <asm/checksum.h>
39
40#include <lantiq_soc.h>
41#include <xway_dma.h>
42#include <lantiq_platform.h>
43
44#define LTQ_ETOP_MDIO 0x11804
45#define MDIO_REQUEST 0x80000000
46#define MDIO_READ 0x40000000
47#define MDIO_ADDR_MASK 0x1f
48#define MDIO_ADDR_OFFSET 0x15
49#define MDIO_REG_MASK 0x1f
50#define MDIO_REG_OFFSET 0x10
51#define MDIO_VAL_MASK 0xffff
52
53#define PPE32_CGEN 0x800
54#define LQ_PPE32_ENET_MAC_CFG 0x1840
55
56#define LTQ_ETOP_ENETS0 0x11850
57#define LTQ_ETOP_MAC_DA0 0x1186C
58#define LTQ_ETOP_MAC_DA1 0x11870
59#define LTQ_ETOP_CFG 0x16020
60#define LTQ_ETOP_IGPLEN 0x16080
61
62#define MAX_DMA_CHAN 0x8
63#define MAX_DMA_CRC_LEN 0x4
64#define MAX_DMA_DATA_LEN 0x600
65
66#define ETOP_FTCU BIT(28)
67#define ETOP_MII_MASK 0xf
68#define ETOP_MII_NORMAL 0xd
69#define ETOP_MII_REVERSE 0xe
70#define ETOP_PLEN_UNDER 0x40
71#define ETOP_CGEN 0x800
72
73/* use 2 static channels for TX/RX */
74#define LTQ_ETOP_TX_CHANNEL 1
75#define LTQ_ETOP_RX_CHANNEL 6
76#define IS_TX(x) (x == LTQ_ETOP_TX_CHANNEL)
77#define IS_RX(x) (x == LTQ_ETOP_RX_CHANNEL)
78
79#define ltq_etop_r32(x) ltq_r32(ltq_etop_membase + (x))
80#define ltq_etop_w32(x, y) ltq_w32(x, ltq_etop_membase + (y))
81#define ltq_etop_w32_mask(x, y, z) \
82 ltq_w32_mask(x, y, ltq_etop_membase + (z))
83
84#define DRV_VERSION "1.0"
85
86static void __iomem *ltq_etop_membase;
87
88struct ltq_etop_chan {
89 int idx;
90 int tx_free;
91 struct net_device *netdev;
92 struct napi_struct napi;
93 struct ltq_dma_channel dma;
94 struct sk_buff *skb[LTQ_DESC_NUM];
95};
96
97struct ltq_etop_priv {
98 struct net_device *netdev;
99 struct ltq_eth_data *pldata;
100 struct resource *res;
101
102 struct mii_bus *mii_bus;
103 struct phy_device *phydev;
104
105 struct ltq_etop_chan ch[MAX_DMA_CHAN];
106 int tx_free[MAX_DMA_CHAN >> 1];
107
108 spinlock_t lock;
109};
110
111static int
112ltq_etop_alloc_skb(struct ltq_etop_chan *ch)
113{
114 ch->skb[ch->dma.desc] = dev_alloc_skb(MAX_DMA_DATA_LEN);
115 if (!ch->skb[ch->dma.desc])
116 return -ENOMEM;
117 ch->dma.desc_base[ch->dma.desc].addr = dma_map_single(NULL,
118 ch->skb[ch->dma.desc]->data, MAX_DMA_DATA_LEN,
119 DMA_FROM_DEVICE);
120 ch->dma.desc_base[ch->dma.desc].addr =
121 CPHYSADDR(ch->skb[ch->dma.desc]->data);
122 ch->dma.desc_base[ch->dma.desc].ctl =
123 LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) |
124 MAX_DMA_DATA_LEN;
125 skb_reserve(ch->skb[ch->dma.desc], NET_IP_ALIGN);
126 return 0;
127}
128
129static void
130ltq_etop_hw_receive(struct ltq_etop_chan *ch)
131{
132 struct ltq_etop_priv *priv = netdev_priv(ch->netdev);
133 struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
134 struct sk_buff *skb = ch->skb[ch->dma.desc];
135 int len = (desc->ctl & LTQ_DMA_SIZE_MASK) - MAX_DMA_CRC_LEN;
136 unsigned long flags;
137
138 spin_lock_irqsave(&priv->lock, flags);
139 if (ltq_etop_alloc_skb(ch)) {
140 netdev_err(ch->netdev,
141 "failed to allocate new rx buffer, stopping DMA\n");
142 ltq_dma_close(&ch->dma);
143 }
144 ch->dma.desc++;
145 ch->dma.desc %= LTQ_DESC_NUM;
146 spin_unlock_irqrestore(&priv->lock, flags);
147
148 skb_put(skb, len);
149 skb->dev = ch->netdev;
150 skb->protocol = eth_type_trans(skb, ch->netdev);
151 netif_receive_skb(skb);
152}
153
154static int
155ltq_etop_poll_rx(struct napi_struct *napi, int budget)
156{
157 struct ltq_etop_chan *ch = container_of(napi,
158 struct ltq_etop_chan, napi);
159 int rx = 0;
160 int complete = 0;
161
162 while ((rx < budget) && !complete) {
163 struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
164
165 if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) {
166 ltq_etop_hw_receive(ch);
167 rx++;
168 } else {
169 complete = 1;
170 }
171 }
172 if (complete || !rx) {
173 napi_complete(&ch->napi);
174 ltq_dma_ack_irq(&ch->dma);
175 }
176 return rx;
177}
178
179static int
180ltq_etop_poll_tx(struct napi_struct *napi, int budget)
181{
182 struct ltq_etop_chan *ch =
183 container_of(napi, struct ltq_etop_chan, napi);
184 struct ltq_etop_priv *priv = netdev_priv(ch->netdev);
185 struct netdev_queue *txq =
186 netdev_get_tx_queue(ch->netdev, ch->idx >> 1);
187 unsigned long flags;
188
189 spin_lock_irqsave(&priv->lock, flags);
190 while ((ch->dma.desc_base[ch->tx_free].ctl &
191 (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) {
192 dev_kfree_skb_any(ch->skb[ch->tx_free]);
193 ch->skb[ch->tx_free] = NULL;
194 memset(&ch->dma.desc_base[ch->tx_free], 0,
195 sizeof(struct ltq_dma_desc));
196 ch->tx_free++;
197 ch->tx_free %= LTQ_DESC_NUM;
198 }
199 spin_unlock_irqrestore(&priv->lock, flags);
200
201 if (netif_tx_queue_stopped(txq))
202 netif_tx_start_queue(txq);
203 napi_complete(&ch->napi);
204 ltq_dma_ack_irq(&ch->dma);
205 return 1;
206}
207
208static irqreturn_t
209ltq_etop_dma_irq(int irq, void *_priv)
210{
211 struct ltq_etop_priv *priv = _priv;
212 int ch = irq - LTQ_DMA_CH0_INT;
213
214 napi_schedule(&priv->ch[ch].napi);
215 return IRQ_HANDLED;
216}
217
218static void
219ltq_etop_free_channel(struct net_device *dev, struct ltq_etop_chan *ch)
220{
221 struct ltq_etop_priv *priv = netdev_priv(dev);
222
223 ltq_dma_free(&ch->dma);
224 if (ch->dma.irq)
225 free_irq(ch->dma.irq, priv);
226 if (IS_RX(ch->idx)) {
227 int desc;
228 for (desc = 0; desc < LTQ_DESC_NUM; desc++)
229 dev_kfree_skb_any(ch->skb[ch->dma.desc]);
230 }
231}
232
233static void
234ltq_etop_hw_exit(struct net_device *dev)
235{
236 struct ltq_etop_priv *priv = netdev_priv(dev);
237 int i;
238
239 ltq_pmu_disable(PMU_PPE);
240 for (i = 0; i < MAX_DMA_CHAN; i++)
241 if (IS_TX(i) || IS_RX(i))
242 ltq_etop_free_channel(dev, &priv->ch[i]);
243}
244
245static int
246ltq_etop_hw_init(struct net_device *dev)
247{
248 struct ltq_etop_priv *priv = netdev_priv(dev);
249 int i;
250
251 ltq_pmu_enable(PMU_PPE);
252
253 switch (priv->pldata->mii_mode) {
254 case PHY_INTERFACE_MODE_RMII:
255 ltq_etop_w32_mask(ETOP_MII_MASK,
256 ETOP_MII_REVERSE, LTQ_ETOP_CFG);
257 break;
258
259 case PHY_INTERFACE_MODE_MII:
260 ltq_etop_w32_mask(ETOP_MII_MASK,
261 ETOP_MII_NORMAL, LTQ_ETOP_CFG);
262 break;
263
264 default:
265 netdev_err(dev, "unknown mii mode %d\n",
266 priv->pldata->mii_mode);
267 return -ENOTSUPP;
268 }
269
270 /* enable crc generation */
271 ltq_etop_w32(PPE32_CGEN, LQ_PPE32_ENET_MAC_CFG);
272
273 ltq_dma_init_port(DMA_PORT_ETOP);
274
275 for (i = 0; i < MAX_DMA_CHAN; i++) {
276 int irq = LTQ_DMA_CH0_INT + i;
277 struct ltq_etop_chan *ch = &priv->ch[i];
278
279 ch->idx = ch->dma.nr = i;
280
281 if (IS_TX(i)) {
282 ltq_dma_alloc_tx(&ch->dma);
283 request_irq(irq, ltq_etop_dma_irq, IRQF_DISABLED,
284 "etop_tx", priv);
285 } else if (IS_RX(i)) {
286 ltq_dma_alloc_rx(&ch->dma);
287 for (ch->dma.desc = 0; ch->dma.desc < LTQ_DESC_NUM;
288 ch->dma.desc++)
289 if (ltq_etop_alloc_skb(ch))
290 return -ENOMEM;
291 ch->dma.desc = 0;
292 request_irq(irq, ltq_etop_dma_irq, IRQF_DISABLED,
293 "etop_rx", priv);
294 }
295 ch->dma.irq = irq;
296 }
297 return 0;
298}
299
300static void
301ltq_etop_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
302{
303 strcpy(info->driver, "Lantiq ETOP");
304 strcpy(info->bus_info, "internal");
305 strcpy(info->version, DRV_VERSION);
306}
307
308static int
309ltq_etop_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
310{
311 struct ltq_etop_priv *priv = netdev_priv(dev);
312
313 return phy_ethtool_gset(priv->phydev, cmd);
314}
315
316static int
317ltq_etop_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
318{
319 struct ltq_etop_priv *priv = netdev_priv(dev);
320
321 return phy_ethtool_sset(priv->phydev, cmd);
322}
323
324static int
325ltq_etop_nway_reset(struct net_device *dev)
326{
327 struct ltq_etop_priv *priv = netdev_priv(dev);
328
329 return phy_start_aneg(priv->phydev);
330}
331
332static const struct ethtool_ops ltq_etop_ethtool_ops = {
333 .get_drvinfo = ltq_etop_get_drvinfo,
334 .get_settings = ltq_etop_get_settings,
335 .set_settings = ltq_etop_set_settings,
336 .nway_reset = ltq_etop_nway_reset,
337};
338
339static int
340ltq_etop_mdio_wr(struct mii_bus *bus, int phy_addr, int phy_reg, u16 phy_data)
341{
342 u32 val = MDIO_REQUEST |
343 ((phy_addr & MDIO_ADDR_MASK) << MDIO_ADDR_OFFSET) |
344 ((phy_reg & MDIO_REG_MASK) << MDIO_REG_OFFSET) |
345 phy_data;
346
347 while (ltq_etop_r32(LTQ_ETOP_MDIO) & MDIO_REQUEST)
348 ;
349 ltq_etop_w32(val, LTQ_ETOP_MDIO);
350 return 0;
351}
352
353static int
354ltq_etop_mdio_rd(struct mii_bus *bus, int phy_addr, int phy_reg)
355{
356 u32 val = MDIO_REQUEST | MDIO_READ |
357 ((phy_addr & MDIO_ADDR_MASK) << MDIO_ADDR_OFFSET) |
358 ((phy_reg & MDIO_REG_MASK) << MDIO_REG_OFFSET);
359
360 while (ltq_etop_r32(LTQ_ETOP_MDIO) & MDIO_REQUEST)
361 ;
362 ltq_etop_w32(val, LTQ_ETOP_MDIO);
363 while (ltq_etop_r32(LTQ_ETOP_MDIO) & MDIO_REQUEST)
364 ;
365 val = ltq_etop_r32(LTQ_ETOP_MDIO) & MDIO_VAL_MASK;
366 return val;
367}
368
369static void
370ltq_etop_mdio_link(struct net_device *dev)
371{
372 /* nothing to do */
373}
374
375static int
376ltq_etop_mdio_probe(struct net_device *dev)
377{
378 struct ltq_etop_priv *priv = netdev_priv(dev);
379 struct phy_device *phydev = NULL;
380 int phy_addr;
381
382 for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) {
383 if (priv->mii_bus->phy_map[phy_addr]) {
384 phydev = priv->mii_bus->phy_map[phy_addr];
385 break;
386 }
387 }
388
389 if (!phydev) {
390 netdev_err(dev, "no PHY found\n");
391 return -ENODEV;
392 }
393
394 phydev = phy_connect(dev, dev_name(&phydev->dev), &ltq_etop_mdio_link,
395 0, priv->pldata->mii_mode);
396
397 if (IS_ERR(phydev)) {
398 netdev_err(dev, "Could not attach to PHY\n");
399 return PTR_ERR(phydev);
400 }
401
402 phydev->supported &= (SUPPORTED_10baseT_Half
403 | SUPPORTED_10baseT_Full
404 | SUPPORTED_100baseT_Half
405 | SUPPORTED_100baseT_Full
406 | SUPPORTED_Autoneg
407 | SUPPORTED_MII
408 | SUPPORTED_TP);
409
410 phydev->advertising = phydev->supported;
411 priv->phydev = phydev;
412 pr_info("%s: attached PHY [%s] (phy_addr=%s, irq=%d)\n",
413 dev->name, phydev->drv->name,
414 dev_name(&phydev->dev), phydev->irq);
415
416 return 0;
417}
418
419static int
420ltq_etop_mdio_init(struct net_device *dev)
421{
422 struct ltq_etop_priv *priv = netdev_priv(dev);
423 int i;
424 int err;
425
426 priv->mii_bus = mdiobus_alloc();
427 if (!priv->mii_bus) {
428 netdev_err(dev, "failed to allocate mii bus\n");
429 err = -ENOMEM;
430 goto err_out;
431 }
432
433 priv->mii_bus->priv = dev;
434 priv->mii_bus->read = ltq_etop_mdio_rd;
435 priv->mii_bus->write = ltq_etop_mdio_wr;
436 priv->mii_bus->name = "ltq_mii";
437 snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%x", 0);
438 priv->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
439 if (!priv->mii_bus->irq) {
440 err = -ENOMEM;
441 goto err_out_free_mdiobus;
442 }
443
444 for (i = 0; i < PHY_MAX_ADDR; ++i)
445 priv->mii_bus->irq[i] = PHY_POLL;
446
447 if (mdiobus_register(priv->mii_bus)) {
448 err = -ENXIO;
449 goto err_out_free_mdio_irq;
450 }
451
452 if (ltq_etop_mdio_probe(dev)) {
453 err = -ENXIO;
454 goto err_out_unregister_bus;
455 }
456 return 0;
457
458err_out_unregister_bus:
459 mdiobus_unregister(priv->mii_bus);
460err_out_free_mdio_irq:
461 kfree(priv->mii_bus->irq);
462err_out_free_mdiobus:
463 mdiobus_free(priv->mii_bus);
464err_out:
465 return err;
466}
467
468static void
469ltq_etop_mdio_cleanup(struct net_device *dev)
470{
471 struct ltq_etop_priv *priv = netdev_priv(dev);
472
473 phy_disconnect(priv->phydev);
474 mdiobus_unregister(priv->mii_bus);
475 kfree(priv->mii_bus->irq);
476 mdiobus_free(priv->mii_bus);
477}
478
479static int
480ltq_etop_open(struct net_device *dev)
481{
482 struct ltq_etop_priv *priv = netdev_priv(dev);
483 int i;
484
485 for (i = 0; i < MAX_DMA_CHAN; i++) {
486 struct ltq_etop_chan *ch = &priv->ch[i];
487
488 if (!IS_TX(i) && (!IS_RX(i)))
489 continue;
490 ltq_dma_open(&ch->dma);
491 napi_enable(&ch->napi);
492 }
493 phy_start(priv->phydev);
494 netif_tx_start_all_queues(dev);
495 return 0;
496}
497
498static int
499ltq_etop_stop(struct net_device *dev)
500{
501 struct ltq_etop_priv *priv = netdev_priv(dev);
502 int i;
503
504 netif_tx_stop_all_queues(dev);
505 phy_stop(priv->phydev);
506 for (i = 0; i < MAX_DMA_CHAN; i++) {
507 struct ltq_etop_chan *ch = &priv->ch[i];
508
509 if (!IS_RX(i) && !IS_TX(i))
510 continue;
511 napi_disable(&ch->napi);
512 ltq_dma_close(&ch->dma);
513 }
514 return 0;
515}
516
517static int
518ltq_etop_tx(struct sk_buff *skb, struct net_device *dev)
519{
520 int queue = skb_get_queue_mapping(skb);
521 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue);
522 struct ltq_etop_priv *priv = netdev_priv(dev);
523 struct ltq_etop_chan *ch = &priv->ch[(queue << 1) | 1];
524 struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
525 int len;
526 unsigned long flags;
527 u32 byte_offset;
528
529 len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
530
531 if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) || ch->skb[ch->dma.desc]) {
532 dev_kfree_skb_any(skb);
533 netdev_err(dev, "tx ring full\n");
534 netif_tx_stop_queue(txq);
535 return NETDEV_TX_BUSY;
536 }
537
538 /* dma needs to start on a 16 byte aligned address */
539 byte_offset = CPHYSADDR(skb->data) % 16;
540 ch->skb[ch->dma.desc] = skb;
541
542 dev->trans_start = jiffies;
543
544 spin_lock_irqsave(&priv->lock, flags);
545 desc->addr = ((unsigned int) dma_map_single(NULL, skb->data, len,
546 DMA_TO_DEVICE)) - byte_offset;
547 wmb();
548 desc->ctl = LTQ_DMA_OWN | LTQ_DMA_SOP | LTQ_DMA_EOP |
549 LTQ_DMA_TX_OFFSET(byte_offset) | (len & LTQ_DMA_SIZE_MASK);
550 ch->dma.desc++;
551 ch->dma.desc %= LTQ_DESC_NUM;
552 spin_unlock_irqrestore(&priv->lock, flags);
553
554 if (ch->dma.desc_base[ch->dma.desc].ctl & LTQ_DMA_OWN)
555 netif_tx_stop_queue(txq);
556
557 return NETDEV_TX_OK;
558}
559
560static int
561ltq_etop_change_mtu(struct net_device *dev, int new_mtu)
562{
563 int ret = eth_change_mtu(dev, new_mtu);
564
565 if (!ret) {
566 struct ltq_etop_priv *priv = netdev_priv(dev);
567 unsigned long flags;
568
569 spin_lock_irqsave(&priv->lock, flags);
570 ltq_etop_w32((ETOP_PLEN_UNDER << 16) | new_mtu,
571 LTQ_ETOP_IGPLEN);
572 spin_unlock_irqrestore(&priv->lock, flags);
573 }
574 return ret;
575}
576
577static int
578ltq_etop_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
579{
580 struct ltq_etop_priv *priv = netdev_priv(dev);
581
582 /* TODO: mii-toll reports "No MII transceiver present!." ?!*/
583 return phy_mii_ioctl(priv->phydev, rq, cmd);
584}
585
586static int
587ltq_etop_set_mac_address(struct net_device *dev, void *p)
588{
589 int ret = eth_mac_addr(dev, p);
590
591 if (!ret) {
592 struct ltq_etop_priv *priv = netdev_priv(dev);
593 unsigned long flags;
594
595 /* store the mac for the unicast filter */
596 spin_lock_irqsave(&priv->lock, flags);
597 ltq_etop_w32(*((u32 *)dev->dev_addr), LTQ_ETOP_MAC_DA0);
598 ltq_etop_w32(*((u16 *)&dev->dev_addr[4]) << 16,
599 LTQ_ETOP_MAC_DA1);
600 spin_unlock_irqrestore(&priv->lock, flags);
601 }
602 return ret;
603}
604
605static void
606ltq_etop_set_multicast_list(struct net_device *dev)
607{
608 struct ltq_etop_priv *priv = netdev_priv(dev);
609 unsigned long flags;
610
611 /* ensure that the unicast filter is not enabled in promiscious mode */
612 spin_lock_irqsave(&priv->lock, flags);
613 if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI))
614 ltq_etop_w32_mask(ETOP_FTCU, 0, LTQ_ETOP_ENETS0);
615 else
616 ltq_etop_w32_mask(0, ETOP_FTCU, LTQ_ETOP_ENETS0);
617 spin_unlock_irqrestore(&priv->lock, flags);
618}
619
620static u16
621ltq_etop_select_queue(struct net_device *dev, struct sk_buff *skb)
622{
623 /* we are currently only using the first queue */
624 return 0;
625}
626
627static int
628ltq_etop_init(struct net_device *dev)
629{
630 struct ltq_etop_priv *priv = netdev_priv(dev);
631 struct sockaddr mac;
632 int err;
633
634 ether_setup(dev);
635 dev->watchdog_timeo = 10 * HZ;
636 err = ltq_etop_hw_init(dev);
637 if (err)
638 goto err_hw;
639 ltq_etop_change_mtu(dev, 1500);
640
641 memcpy(&mac, &priv->pldata->mac, sizeof(struct sockaddr));
642 if (!is_valid_ether_addr(mac.sa_data)) {
643 pr_warn("etop: invalid MAC, using random\n");
644 random_ether_addr(mac.sa_data);
645 }
646
647 err = ltq_etop_set_mac_address(dev, &mac);
648 if (err)
649 goto err_netdev;
650 ltq_etop_set_multicast_list(dev);
651 err = ltq_etop_mdio_init(dev);
652 if (err)
653 goto err_netdev;
654 return 0;
655
656err_netdev:
657 unregister_netdev(dev);
658 free_netdev(dev);
659err_hw:
660 ltq_etop_hw_exit(dev);
661 return err;
662}
663
664static void
665ltq_etop_tx_timeout(struct net_device *dev)
666{
667 int err;
668
669 ltq_etop_hw_exit(dev);
670 err = ltq_etop_hw_init(dev);
671 if (err)
672 goto err_hw;
673 dev->trans_start = jiffies;
674 netif_wake_queue(dev);
675 return;
676
677err_hw:
678 ltq_etop_hw_exit(dev);
679 netdev_err(dev, "failed to restart etop after TX timeout\n");
680}
681
682static const struct net_device_ops ltq_eth_netdev_ops = {
683 .ndo_open = ltq_etop_open,
684 .ndo_stop = ltq_etop_stop,
685 .ndo_start_xmit = ltq_etop_tx,
686 .ndo_change_mtu = ltq_etop_change_mtu,
687 .ndo_do_ioctl = ltq_etop_ioctl,
688 .ndo_set_mac_address = ltq_etop_set_mac_address,
689 .ndo_validate_addr = eth_validate_addr,
690 .ndo_set_multicast_list = ltq_etop_set_multicast_list,
691 .ndo_select_queue = ltq_etop_select_queue,
692 .ndo_init = ltq_etop_init,
693 .ndo_tx_timeout = ltq_etop_tx_timeout,
694};
695
696static int __init
697ltq_etop_probe(struct platform_device *pdev)
698{
699 struct net_device *dev;
700 struct ltq_etop_priv *priv;
701 struct resource *res;
702 int err;
703 int i;
704
705 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
706 if (!res) {
707 dev_err(&pdev->dev, "failed to get etop resource\n");
708 err = -ENOENT;
709 goto err_out;
710 }
711
712 res = devm_request_mem_region(&pdev->dev, res->start,
713 resource_size(res), dev_name(&pdev->dev));
714 if (!res) {
715 dev_err(&pdev->dev, "failed to request etop resource\n");
716 err = -EBUSY;
717 goto err_out;
718 }
719
720 ltq_etop_membase = devm_ioremap_nocache(&pdev->dev,
721 res->start, resource_size(res));
722 if (!ltq_etop_membase) {
723 dev_err(&pdev->dev, "failed to remap etop engine %d\n",
724 pdev->id);
725 err = -ENOMEM;
726 goto err_out;
727 }
728
729 dev = alloc_etherdev_mq(sizeof(struct ltq_etop_priv), 4);
730 strcpy(dev->name, "eth%d");
731 dev->netdev_ops = &ltq_eth_netdev_ops;
732 dev->ethtool_ops = &ltq_etop_ethtool_ops;
733 priv = netdev_priv(dev);
734 priv->res = res;
735 priv->pldata = dev_get_platdata(&pdev->dev);
736 priv->netdev = dev;
737 spin_lock_init(&priv->lock);
738
739 for (i = 0; i < MAX_DMA_CHAN; i++) {
740 if (IS_TX(i))
741 netif_napi_add(dev, &priv->ch[i].napi,
742 ltq_etop_poll_tx, 8);
743 else if (IS_RX(i))
744 netif_napi_add(dev, &priv->ch[i].napi,
745 ltq_etop_poll_rx, 32);
746 priv->ch[i].netdev = dev;
747 }
748
749 err = register_netdev(dev);
750 if (err)
751 goto err_free;
752
753 platform_set_drvdata(pdev, dev);
754 return 0;
755
756err_free:
757 kfree(dev);
758err_out:
759 return err;
760}
761
762static int __devexit
763ltq_etop_remove(struct platform_device *pdev)
764{
765 struct net_device *dev = platform_get_drvdata(pdev);
766
767 if (dev) {
768 netif_tx_stop_all_queues(dev);
769 ltq_etop_hw_exit(dev);
770 ltq_etop_mdio_cleanup(dev);
771 unregister_netdev(dev);
772 }
773 return 0;
774}
775
776static struct platform_driver ltq_mii_driver = {
777 .remove = __devexit_p(ltq_etop_remove),
778 .driver = {
779 .name = "ltq_etop",
780 .owner = THIS_MODULE,
781 },
782};
783
784int __init
785init_ltq_etop(void)
786{
787 int ret = platform_driver_probe(&ltq_mii_driver, ltq_etop_probe);
788
789 if (ret)
790 pr_err("ltq_etop: Error registering platfom driver!");
791 return ret;
792}
793
794static void __exit
795exit_ltq_etop(void)
796{
797 platform_driver_unregister(&ltq_mii_driver);
798}
799
800module_init(init_ltq_etop);
801module_exit(exit_ltq_etop);
802
803MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
804MODULE_DESCRIPTION("Lantiq SoC ETOP");
805MODULE_LICENSE("GPL");
diff --git a/drivers/ssb/pci.c b/drivers/ssb/pci.c
index 6f34963b3c64..7ad48585c5e6 100644
--- a/drivers/ssb/pci.c
+++ b/drivers/ssb/pci.c
@@ -662,7 +662,6 @@ static int sprom_extract(struct ssb_bus *bus, struct ssb_sprom *out,
662static int ssb_pci_sprom_get(struct ssb_bus *bus, 662static int ssb_pci_sprom_get(struct ssb_bus *bus,
663 struct ssb_sprom *sprom) 663 struct ssb_sprom *sprom)
664{ 664{
665 const struct ssb_sprom *fallback;
666 int err; 665 int err;
667 u16 *buf; 666 u16 *buf;
668 667
@@ -707,10 +706,17 @@ static int ssb_pci_sprom_get(struct ssb_bus *bus,
707 if (err) { 706 if (err) {
708 /* All CRC attempts failed. 707 /* All CRC attempts failed.
709 * Maybe there is no SPROM on the device? 708 * Maybe there is no SPROM on the device?
710 * If we have a fallback, use that. */ 709 * Now we ask the arch code if there is some sprom
711 fallback = ssb_get_fallback_sprom(); 710 * available for this device in some other storage */
712 if (fallback) { 711 err = ssb_fill_sprom_with_fallback(bus, sprom);
713 memcpy(sprom, fallback, sizeof(*sprom)); 712 if (err) {
713 ssb_printk(KERN_WARNING PFX "WARNING: Using"
714 " fallback SPROM failed (err %d)\n",
715 err);
716 } else {
717 ssb_dprintk(KERN_DEBUG PFX "Using SPROM"
718 " revision %d provided by"
719 " platform.\n", sprom->revision);
714 err = 0; 720 err = 0;
715 goto out_free; 721 goto out_free;
716 } 722 }
diff --git a/drivers/ssb/sprom.c b/drivers/ssb/sprom.c
index 5f34d7a3e3a5..45ff0e3a3828 100644
--- a/drivers/ssb/sprom.c
+++ b/drivers/ssb/sprom.c
@@ -17,7 +17,7 @@
17#include <linux/slab.h> 17#include <linux/slab.h>
18 18
19 19
20static const struct ssb_sprom *fallback_sprom; 20static int(*get_fallback_sprom)(struct ssb_bus *dev, struct ssb_sprom *out);
21 21
22 22
23static int sprom2hex(const u16 *sprom, char *buf, size_t buf_len, 23static int sprom2hex(const u16 *sprom, char *buf, size_t buf_len,
@@ -145,36 +145,43 @@ out:
145} 145}
146 146
147/** 147/**
148 * ssb_arch_set_fallback_sprom - Set a fallback SPROM for use if no SPROM is found. 148 * ssb_arch_register_fallback_sprom - Registers a method providing a
149 * fallback SPROM if no SPROM is found.
149 * 150 *
150 * @sprom: The SPROM data structure to register. 151 * @sprom_callback: The callback function.
151 * 152 *
152 * With this function the architecture implementation may register a fallback 153 * With this function the architecture implementation may register a
153 * SPROM data structure. The fallback is only used for PCI based SSB devices, 154 * callback handler which fills the SPROM data structure. The fallback is
154 * where no valid SPROM can be found in the shadow registers. 155 * only used for PCI based SSB devices, where no valid SPROM can be found
156 * in the shadow registers.
155 * 157 *
156 * This function is useful for weird architectures that have a half-assed SSB device 158 * This function is useful for weird architectures that have a half-assed
157 * hardwired to their PCI bus. 159 * SSB device hardwired to their PCI bus.
158 * 160 *
159 * Note that it does only work with PCI attached SSB devices. PCMCIA devices currently 161 * Note that it does only work with PCI attached SSB devices. PCMCIA
160 * don't use this fallback. 162 * devices currently don't use this fallback.
161 * Architectures must provide the SPROM for native SSB devices anyway, 163 * Architectures must provide the SPROM for native SSB devices anyway, so
162 * so the fallback also isn't used for native devices. 164 * the fallback also isn't used for native devices.
163 * 165 *
164 * This function is available for architecture code, only. So it is not exported. 166 * This function is available for architecture code, only. So it is not
167 * exported.
165 */ 168 */
166int ssb_arch_set_fallback_sprom(const struct ssb_sprom *sprom) 169int ssb_arch_register_fallback_sprom(int (*sprom_callback)(struct ssb_bus *bus,
170 struct ssb_sprom *out))
167{ 171{
168 if (fallback_sprom) 172 if (get_fallback_sprom)
169 return -EEXIST; 173 return -EEXIST;
170 fallback_sprom = sprom; 174 get_fallback_sprom = sprom_callback;
171 175
172 return 0; 176 return 0;
173} 177}
174 178
175const struct ssb_sprom *ssb_get_fallback_sprom(void) 179int ssb_fill_sprom_with_fallback(struct ssb_bus *bus, struct ssb_sprom *out)
176{ 180{
177 return fallback_sprom; 181 if (!get_fallback_sprom)
182 return -ENOENT;
183
184 return get_fallback_sprom(bus, out);
178} 185}
179 186
180/* http://bcm-v4.sipsolutions.net/802.11/IsSpromAvailable */ 187/* http://bcm-v4.sipsolutions.net/802.11/IsSpromAvailable */
diff --git a/drivers/ssb/ssb_private.h b/drivers/ssb/ssb_private.h
index 0331139a726f..77653014db0b 100644
--- a/drivers/ssb/ssb_private.h
+++ b/drivers/ssb/ssb_private.h
@@ -171,7 +171,8 @@ ssize_t ssb_attr_sprom_store(struct ssb_bus *bus,
171 const char *buf, size_t count, 171 const char *buf, size_t count,
172 int (*sprom_check_crc)(const u16 *sprom, size_t size), 172 int (*sprom_check_crc)(const u16 *sprom, size_t size),
173 int (*sprom_write)(struct ssb_bus *bus, const u16 *sprom)); 173 int (*sprom_write)(struct ssb_bus *bus, const u16 *sprom));
174extern const struct ssb_sprom *ssb_get_fallback_sprom(void); 174extern int ssb_fill_sprom_with_fallback(struct ssb_bus *bus,
175 struct ssb_sprom *out);
175 176
176 177
177/* core.c */ 178/* core.c */
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 80484af781e1..b1f0f83b870d 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -1391,6 +1391,14 @@ config SERIAL_OF_PLATFORM_NWPSERIAL_CONSOLE
1391 help 1391 help
1392 Support for Console on the NWP serial ports. 1392 Support for Console on the NWP serial ports.
1393 1393
1394config SERIAL_LANTIQ
1395 bool "Lantiq serial driver"
1396 depends on LANTIQ
1397 select SERIAL_CORE
1398 select SERIAL_CORE_CONSOLE
1399 help
1400 Support for console and UART on Lantiq SoCs.
1401
1394config SERIAL_QE 1402config SERIAL_QE
1395 tristate "Freescale QUICC Engine serial port support" 1403 tristate "Freescale QUICC Engine serial port support"
1396 depends on QUICC_ENGINE 1404 depends on QUICC_ENGINE
diff --git a/drivers/tty/serial/Makefile b/drivers/tty/serial/Makefile
index fee0690ef8e3..35276043d9d1 100644
--- a/drivers/tty/serial/Makefile
+++ b/drivers/tty/serial/Makefile
@@ -94,3 +94,4 @@ obj-$(CONFIG_SERIAL_IFX6X60) += ifx6x60.o
94obj-$(CONFIG_SERIAL_PCH_UART) += pch_uart.o 94obj-$(CONFIG_SERIAL_PCH_UART) += pch_uart.o
95obj-$(CONFIG_SERIAL_MSM_SMD) += msm_smd_tty.o 95obj-$(CONFIG_SERIAL_MSM_SMD) += msm_smd_tty.o
96obj-$(CONFIG_SERIAL_MXS_AUART) += mxs-auart.o 96obj-$(CONFIG_SERIAL_MXS_AUART) += mxs-auart.o
97obj-$(CONFIG_SERIAL_LANTIQ) += lantiq.o
diff --git a/drivers/tty/serial/lantiq.c b/drivers/tty/serial/lantiq.c
new file mode 100644
index 000000000000..58cf279ed879
--- /dev/null
+++ b/drivers/tty/serial/lantiq.c
@@ -0,0 +1,756 @@
1/*
2 * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 as published
6 * by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
16 *
17 * Copyright (C) 2004 Infineon IFAP DC COM CPE
18 * Copyright (C) 2007 Felix Fietkau <nbd@openwrt.org>
19 * Copyright (C) 2007 John Crispin <blogic@openwrt.org>
20 * Copyright (C) 2010 Thomas Langer, <thomas.langer@lantiq.com>
21 */
22
23#include <linux/slab.h>
24#include <linux/module.h>
25#include <linux/ioport.h>
26#include <linux/init.h>
27#include <linux/console.h>
28#include <linux/sysrq.h>
29#include <linux/device.h>
30#include <linux/tty.h>
31#include <linux/tty_flip.h>
32#include <linux/serial_core.h>
33#include <linux/serial.h>
34#include <linux/platform_device.h>
35#include <linux/io.h>
36#include <linux/clk.h>
37
38#include <lantiq_soc.h>
39
40#define PORT_LTQ_ASC 111
41#define MAXPORTS 2
42#define UART_DUMMY_UER_RX 1
43#define DRVNAME "ltq_asc"
44#ifdef __BIG_ENDIAN
45#define LTQ_ASC_TBUF (0x0020 + 3)
46#define LTQ_ASC_RBUF (0x0024 + 3)
47#else
48#define LTQ_ASC_TBUF 0x0020
49#define LTQ_ASC_RBUF 0x0024
50#endif
51#define LTQ_ASC_FSTAT 0x0048
52#define LTQ_ASC_WHBSTATE 0x0018
53#define LTQ_ASC_STATE 0x0014
54#define LTQ_ASC_IRNCR 0x00F8
55#define LTQ_ASC_CLC 0x0000
56#define LTQ_ASC_ID 0x0008
57#define LTQ_ASC_PISEL 0x0004
58#define LTQ_ASC_TXFCON 0x0044
59#define LTQ_ASC_RXFCON 0x0040
60#define LTQ_ASC_CON 0x0010
61#define LTQ_ASC_BG 0x0050
62#define LTQ_ASC_IRNREN 0x00F4
63
64#define ASC_IRNREN_TX 0x1
65#define ASC_IRNREN_RX 0x2
66#define ASC_IRNREN_ERR 0x4
67#define ASC_IRNREN_TX_BUF 0x8
68#define ASC_IRNCR_TIR 0x1
69#define ASC_IRNCR_RIR 0x2
70#define ASC_IRNCR_EIR 0x4
71
72#define ASCOPT_CSIZE 0x3
73#define TXFIFO_FL 1
74#define RXFIFO_FL 1
75#define ASCCLC_DISS 0x2
76#define ASCCLC_RMCMASK 0x0000FF00
77#define ASCCLC_RMCOFFSET 8
78#define ASCCON_M_8ASYNC 0x0
79#define ASCCON_M_7ASYNC 0x2
80#define ASCCON_ODD 0x00000020
81#define ASCCON_STP 0x00000080
82#define ASCCON_BRS 0x00000100
83#define ASCCON_FDE 0x00000200
84#define ASCCON_R 0x00008000
85#define ASCCON_FEN 0x00020000
86#define ASCCON_ROEN 0x00080000
87#define ASCCON_TOEN 0x00100000
88#define ASCSTATE_PE 0x00010000
89#define ASCSTATE_FE 0x00020000
90#define ASCSTATE_ROE 0x00080000
91#define ASCSTATE_ANY (ASCSTATE_ROE|ASCSTATE_PE|ASCSTATE_FE)
92#define ASCWHBSTATE_CLRREN 0x00000001
93#define ASCWHBSTATE_SETREN 0x00000002
94#define ASCWHBSTATE_CLRPE 0x00000004
95#define ASCWHBSTATE_CLRFE 0x00000008
96#define ASCWHBSTATE_CLRROE 0x00000020
97#define ASCTXFCON_TXFEN 0x0001
98#define ASCTXFCON_TXFFLU 0x0002
99#define ASCTXFCON_TXFITLMASK 0x3F00
100#define ASCTXFCON_TXFITLOFF 8
101#define ASCRXFCON_RXFEN 0x0001
102#define ASCRXFCON_RXFFLU 0x0002
103#define ASCRXFCON_RXFITLMASK 0x3F00
104#define ASCRXFCON_RXFITLOFF 8
105#define ASCFSTAT_RXFFLMASK 0x003F
106#define ASCFSTAT_TXFFLMASK 0x3F00
107#define ASCFSTAT_TXFREEMASK 0x3F000000
108#define ASCFSTAT_TXFREEOFF 24
109
110static void lqasc_tx_chars(struct uart_port *port);
111static struct ltq_uart_port *lqasc_port[MAXPORTS];
112static struct uart_driver lqasc_reg;
113static DEFINE_SPINLOCK(ltq_asc_lock);
114
115struct ltq_uart_port {
116 struct uart_port port;
117 struct clk *clk;
118 unsigned int tx_irq;
119 unsigned int rx_irq;
120 unsigned int err_irq;
121};
122
123static inline struct
124ltq_uart_port *to_ltq_uart_port(struct uart_port *port)
125{
126 return container_of(port, struct ltq_uart_port, port);
127}
128
129static void
130lqasc_stop_tx(struct uart_port *port)
131{
132 return;
133}
134
135static void
136lqasc_start_tx(struct uart_port *port)
137{
138 unsigned long flags;
139 spin_lock_irqsave(&ltq_asc_lock, flags);
140 lqasc_tx_chars(port);
141 spin_unlock_irqrestore(&ltq_asc_lock, flags);
142 return;
143}
144
145static void
146lqasc_stop_rx(struct uart_port *port)
147{
148 ltq_w32(ASCWHBSTATE_CLRREN, port->membase + LTQ_ASC_WHBSTATE);
149}
150
151static void
152lqasc_enable_ms(struct uart_port *port)
153{
154}
155
156static int
157lqasc_rx_chars(struct uart_port *port)
158{
159 struct tty_struct *tty = tty_port_tty_get(&port->state->port);
160 unsigned int ch = 0, rsr = 0, fifocnt;
161
162 if (!tty) {
163 dev_dbg(port->dev, "%s:tty is busy now", __func__);
164 return -EBUSY;
165 }
166 fifocnt =
167 ltq_r32(port->membase + LTQ_ASC_FSTAT) & ASCFSTAT_RXFFLMASK;
168 while (fifocnt--) {
169 u8 flag = TTY_NORMAL;
170 ch = ltq_r8(port->membase + LTQ_ASC_RBUF);
171 rsr = (ltq_r32(port->membase + LTQ_ASC_STATE)
172 & ASCSTATE_ANY) | UART_DUMMY_UER_RX;
173 tty_flip_buffer_push(tty);
174 port->icount.rx++;
175
176 /*
177 * Note that the error handling code is
178 * out of the main execution path
179 */
180 if (rsr & ASCSTATE_ANY) {
181 if (rsr & ASCSTATE_PE) {
182 port->icount.parity++;
183 ltq_w32_mask(0, ASCWHBSTATE_CLRPE,
184 port->membase + LTQ_ASC_WHBSTATE);
185 } else if (rsr & ASCSTATE_FE) {
186 port->icount.frame++;
187 ltq_w32_mask(0, ASCWHBSTATE_CLRFE,
188 port->membase + LTQ_ASC_WHBSTATE);
189 }
190 if (rsr & ASCSTATE_ROE) {
191 port->icount.overrun++;
192 ltq_w32_mask(0, ASCWHBSTATE_CLRROE,
193 port->membase + LTQ_ASC_WHBSTATE);
194 }
195
196 rsr &= port->read_status_mask;
197
198 if (rsr & ASCSTATE_PE)
199 flag = TTY_PARITY;
200 else if (rsr & ASCSTATE_FE)
201 flag = TTY_FRAME;
202 }
203
204 if ((rsr & port->ignore_status_mask) == 0)
205 tty_insert_flip_char(tty, ch, flag);
206
207 if (rsr & ASCSTATE_ROE)
208 /*
209 * Overrun is special, since it's reported
210 * immediately, and doesn't affect the current
211 * character
212 */
213 tty_insert_flip_char(tty, 0, TTY_OVERRUN);
214 }
215 if (ch != 0)
216 tty_flip_buffer_push(tty);
217 tty_kref_put(tty);
218 return 0;
219}
220
221static void
222lqasc_tx_chars(struct uart_port *port)
223{
224 struct circ_buf *xmit = &port->state->xmit;
225 if (uart_tx_stopped(port)) {
226 lqasc_stop_tx(port);
227 return;
228 }
229
230 while (((ltq_r32(port->membase + LTQ_ASC_FSTAT) &
231 ASCFSTAT_TXFREEMASK) >> ASCFSTAT_TXFREEOFF) != 0) {
232 if (port->x_char) {
233 ltq_w8(port->x_char, port->membase + LTQ_ASC_TBUF);
234 port->icount.tx++;
235 port->x_char = 0;
236 continue;
237 }
238
239 if (uart_circ_empty(xmit))
240 break;
241
242 ltq_w8(port->state->xmit.buf[port->state->xmit.tail],
243 port->membase + LTQ_ASC_TBUF);
244 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
245 port->icount.tx++;
246 }
247
248 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
249 uart_write_wakeup(port);
250}
251
252static irqreturn_t
253lqasc_tx_int(int irq, void *_port)
254{
255 unsigned long flags;
256 struct uart_port *port = (struct uart_port *)_port;
257 spin_lock_irqsave(&ltq_asc_lock, flags);
258 ltq_w32(ASC_IRNCR_TIR, port->membase + LTQ_ASC_IRNCR);
259 spin_unlock_irqrestore(&ltq_asc_lock, flags);
260 lqasc_start_tx(port);
261 return IRQ_HANDLED;
262}
263
264static irqreturn_t
265lqasc_err_int(int irq, void *_port)
266{
267 unsigned long flags;
268 struct uart_port *port = (struct uart_port *)_port;
269 spin_lock_irqsave(&ltq_asc_lock, flags);
270 /* clear any pending interrupts */
271 ltq_w32_mask(0, ASCWHBSTATE_CLRPE | ASCWHBSTATE_CLRFE |
272 ASCWHBSTATE_CLRROE, port->membase + LTQ_ASC_WHBSTATE);
273 spin_unlock_irqrestore(&ltq_asc_lock, flags);
274 return IRQ_HANDLED;
275}
276
277static irqreturn_t
278lqasc_rx_int(int irq, void *_port)
279{
280 unsigned long flags;
281 struct uart_port *port = (struct uart_port *)_port;
282 spin_lock_irqsave(&ltq_asc_lock, flags);
283 ltq_w32(ASC_IRNCR_RIR, port->membase + LTQ_ASC_IRNCR);
284 lqasc_rx_chars(port);
285 spin_unlock_irqrestore(&ltq_asc_lock, flags);
286 return IRQ_HANDLED;
287}
288
289static unsigned int
290lqasc_tx_empty(struct uart_port *port)
291{
292 int status;
293 status = ltq_r32(port->membase + LTQ_ASC_FSTAT) & ASCFSTAT_TXFFLMASK;
294 return status ? 0 : TIOCSER_TEMT;
295}
296
297static unsigned int
298lqasc_get_mctrl(struct uart_port *port)
299{
300 return TIOCM_CTS | TIOCM_CAR | TIOCM_DSR;
301}
302
303static void
304lqasc_set_mctrl(struct uart_port *port, u_int mctrl)
305{
306}
307
308static void
309lqasc_break_ctl(struct uart_port *port, int break_state)
310{
311}
312
313static int
314lqasc_startup(struct uart_port *port)
315{
316 struct ltq_uart_port *ltq_port = to_ltq_uart_port(port);
317 int retval;
318
319 port->uartclk = clk_get_rate(ltq_port->clk);
320
321 ltq_w32_mask(ASCCLC_DISS | ASCCLC_RMCMASK, (1 << ASCCLC_RMCOFFSET),
322 port->membase + LTQ_ASC_CLC);
323
324 ltq_w32(0, port->membase + LTQ_ASC_PISEL);
325 ltq_w32(
326 ((TXFIFO_FL << ASCTXFCON_TXFITLOFF) & ASCTXFCON_TXFITLMASK) |
327 ASCTXFCON_TXFEN | ASCTXFCON_TXFFLU,
328 port->membase + LTQ_ASC_TXFCON);
329 ltq_w32(
330 ((RXFIFO_FL << ASCRXFCON_RXFITLOFF) & ASCRXFCON_RXFITLMASK)
331 | ASCRXFCON_RXFEN | ASCRXFCON_RXFFLU,
332 port->membase + LTQ_ASC_RXFCON);
333 /* make sure other settings are written to hardware before
334 * setting enable bits
335 */
336 wmb();
337 ltq_w32_mask(0, ASCCON_M_8ASYNC | ASCCON_FEN | ASCCON_TOEN |
338 ASCCON_ROEN, port->membase + LTQ_ASC_CON);
339
340 retval = request_irq(ltq_port->tx_irq, lqasc_tx_int,
341 IRQF_DISABLED, "asc_tx", port);
342 if (retval) {
343 pr_err("failed to request lqasc_tx_int\n");
344 return retval;
345 }
346
347 retval = request_irq(ltq_port->rx_irq, lqasc_rx_int,
348 IRQF_DISABLED, "asc_rx", port);
349 if (retval) {
350 pr_err("failed to request lqasc_rx_int\n");
351 goto err1;
352 }
353
354 retval = request_irq(ltq_port->err_irq, lqasc_err_int,
355 IRQF_DISABLED, "asc_err", port);
356 if (retval) {
357 pr_err("failed to request lqasc_err_int\n");
358 goto err2;
359 }
360
361 ltq_w32(ASC_IRNREN_RX | ASC_IRNREN_ERR | ASC_IRNREN_TX,
362 port->membase + LTQ_ASC_IRNREN);
363 return 0;
364
365err2:
366 free_irq(ltq_port->rx_irq, port);
367err1:
368 free_irq(ltq_port->tx_irq, port);
369 return retval;
370}
371
372static void
373lqasc_shutdown(struct uart_port *port)
374{
375 struct ltq_uart_port *ltq_port = to_ltq_uart_port(port);
376 free_irq(ltq_port->tx_irq, port);
377 free_irq(ltq_port->rx_irq, port);
378 free_irq(ltq_port->err_irq, port);
379
380 ltq_w32(0, port->membase + LTQ_ASC_CON);
381 ltq_w32_mask(ASCRXFCON_RXFEN, ASCRXFCON_RXFFLU,
382 port->membase + LTQ_ASC_RXFCON);
383 ltq_w32_mask(ASCTXFCON_TXFEN, ASCTXFCON_TXFFLU,
384 port->membase + LTQ_ASC_TXFCON);
385}
386
387static void
388lqasc_set_termios(struct uart_port *port,
389 struct ktermios *new, struct ktermios *old)
390{
391 unsigned int cflag;
392 unsigned int iflag;
393 unsigned int divisor;
394 unsigned int baud;
395 unsigned int con = 0;
396 unsigned long flags;
397
398 cflag = new->c_cflag;
399 iflag = new->c_iflag;
400
401 switch (cflag & CSIZE) {
402 case CS7:
403 con = ASCCON_M_7ASYNC;
404 break;
405
406 case CS5:
407 case CS6:
408 default:
409 new->c_cflag &= ~ CSIZE;
410 new->c_cflag |= CS8;
411 con = ASCCON_M_8ASYNC;
412 break;
413 }
414
415 cflag &= ~CMSPAR; /* Mark/Space parity is not supported */
416
417 if (cflag & CSTOPB)
418 con |= ASCCON_STP;
419
420 if (cflag & PARENB) {
421 if (!(cflag & PARODD))
422 con &= ~ASCCON_ODD;
423 else
424 con |= ASCCON_ODD;
425 }
426
427 port->read_status_mask = ASCSTATE_ROE;
428 if (iflag & INPCK)
429 port->read_status_mask |= ASCSTATE_FE | ASCSTATE_PE;
430
431 port->ignore_status_mask = 0;
432 if (iflag & IGNPAR)
433 port->ignore_status_mask |= ASCSTATE_FE | ASCSTATE_PE;
434
435 if (iflag & IGNBRK) {
436 /*
437 * If we're ignoring parity and break indicators,
438 * ignore overruns too (for real raw support).
439 */
440 if (iflag & IGNPAR)
441 port->ignore_status_mask |= ASCSTATE_ROE;
442 }
443
444 if ((cflag & CREAD) == 0)
445 port->ignore_status_mask |= UART_DUMMY_UER_RX;
446
447 /* set error signals - framing, parity and overrun, enable receiver */
448 con |= ASCCON_FEN | ASCCON_TOEN | ASCCON_ROEN;
449
450 spin_lock_irqsave(&ltq_asc_lock, flags);
451
452 /* set up CON */
453 ltq_w32_mask(0, con, port->membase + LTQ_ASC_CON);
454
455 /* Set baud rate - take a divider of 2 into account */
456 baud = uart_get_baud_rate(port, new, old, 0, port->uartclk / 16);
457 divisor = uart_get_divisor(port, baud);
458 divisor = divisor / 2 - 1;
459
460 /* disable the baudrate generator */
461 ltq_w32_mask(ASCCON_R, 0, port->membase + LTQ_ASC_CON);
462
463 /* make sure the fractional divider is off */
464 ltq_w32_mask(ASCCON_FDE, 0, port->membase + LTQ_ASC_CON);
465
466 /* set up to use divisor of 2 */
467 ltq_w32_mask(ASCCON_BRS, 0, port->membase + LTQ_ASC_CON);
468
469 /* now we can write the new baudrate into the register */
470 ltq_w32(divisor, port->membase + LTQ_ASC_BG);
471
472 /* turn the baudrate generator back on */
473 ltq_w32_mask(0, ASCCON_R, port->membase + LTQ_ASC_CON);
474
475 /* enable rx */
476 ltq_w32(ASCWHBSTATE_SETREN, port->membase + LTQ_ASC_WHBSTATE);
477
478 spin_unlock_irqrestore(&ltq_asc_lock, flags);
479
480 /* Don't rewrite B0 */
481 if (tty_termios_baud_rate(new))
482 tty_termios_encode_baud_rate(new, baud, baud);
483}
484
485static const char*
486lqasc_type(struct uart_port *port)
487{
488 if (port->type == PORT_LTQ_ASC)
489 return DRVNAME;
490 else
491 return NULL;
492}
493
494static void
495lqasc_release_port(struct uart_port *port)
496{
497 if (port->flags & UPF_IOREMAP) {
498 iounmap(port->membase);
499 port->membase = NULL;
500 }
501}
502
503static int
504lqasc_request_port(struct uart_port *port)
505{
506 struct platform_device *pdev = to_platform_device(port->dev);
507 struct resource *res;
508 int size;
509
510 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
511 if (!res) {
512 dev_err(&pdev->dev, "cannot obtain I/O memory region");
513 return -ENODEV;
514 }
515 size = resource_size(res);
516
517 res = devm_request_mem_region(&pdev->dev, res->start,
518 size, dev_name(&pdev->dev));
519 if (!res) {
520 dev_err(&pdev->dev, "cannot request I/O memory region");
521 return -EBUSY;
522 }
523
524 if (port->flags & UPF_IOREMAP) {
525 port->membase = devm_ioremap_nocache(&pdev->dev,
526 port->mapbase, size);
527 if (port->membase == NULL)
528 return -ENOMEM;
529 }
530 return 0;
531}
532
533static void
534lqasc_config_port(struct uart_port *port, int flags)
535{
536 if (flags & UART_CONFIG_TYPE) {
537 port->type = PORT_LTQ_ASC;
538 lqasc_request_port(port);
539 }
540}
541
542static int
543lqasc_verify_port(struct uart_port *port,
544 struct serial_struct *ser)
545{
546 int ret = 0;
547 if (ser->type != PORT_UNKNOWN && ser->type != PORT_LTQ_ASC)
548 ret = -EINVAL;
549 if (ser->irq < 0 || ser->irq >= NR_IRQS)
550 ret = -EINVAL;
551 if (ser->baud_base < 9600)
552 ret = -EINVAL;
553 return ret;
554}
555
556static struct uart_ops lqasc_pops = {
557 .tx_empty = lqasc_tx_empty,
558 .set_mctrl = lqasc_set_mctrl,
559 .get_mctrl = lqasc_get_mctrl,
560 .stop_tx = lqasc_stop_tx,
561 .start_tx = lqasc_start_tx,
562 .stop_rx = lqasc_stop_rx,
563 .enable_ms = lqasc_enable_ms,
564 .break_ctl = lqasc_break_ctl,
565 .startup = lqasc_startup,
566 .shutdown = lqasc_shutdown,
567 .set_termios = lqasc_set_termios,
568 .type = lqasc_type,
569 .release_port = lqasc_release_port,
570 .request_port = lqasc_request_port,
571 .config_port = lqasc_config_port,
572 .verify_port = lqasc_verify_port,
573};
574
575static void
576lqasc_console_putchar(struct uart_port *port, int ch)
577{
578 int fifofree;
579
580 if (!port->membase)
581 return;
582
583 do {
584 fifofree = (ltq_r32(port->membase + LTQ_ASC_FSTAT)
585 & ASCFSTAT_TXFREEMASK) >> ASCFSTAT_TXFREEOFF;
586 } while (fifofree == 0);
587 ltq_w8(ch, port->membase + LTQ_ASC_TBUF);
588}
589
590
591static void
592lqasc_console_write(struct console *co, const char *s, u_int count)
593{
594 struct ltq_uart_port *ltq_port;
595 struct uart_port *port;
596 unsigned long flags;
597
598 if (co->index >= MAXPORTS)
599 return;
600
601 ltq_port = lqasc_port[co->index];
602 if (!ltq_port)
603 return;
604
605 port = &ltq_port->port;
606
607 spin_lock_irqsave(&ltq_asc_lock, flags);
608 uart_console_write(port, s, count, lqasc_console_putchar);
609 spin_unlock_irqrestore(&ltq_asc_lock, flags);
610}
611
612static int __init
613lqasc_console_setup(struct console *co, char *options)
614{
615 struct ltq_uart_port *ltq_port;
616 struct uart_port *port;
617 int baud = 115200;
618 int bits = 8;
619 int parity = 'n';
620 int flow = 'n';
621
622 if (co->index >= MAXPORTS)
623 return -ENODEV;
624
625 ltq_port = lqasc_port[co->index];
626 if (!ltq_port)
627 return -ENODEV;
628
629 port = &ltq_port->port;
630
631 port->uartclk = clk_get_rate(ltq_port->clk);
632
633 if (options)
634 uart_parse_options(options, &baud, &parity, &bits, &flow);
635 return uart_set_options(port, co, baud, parity, bits, flow);
636}
637
638static struct console lqasc_console = {
639 .name = "ttyLTQ",
640 .write = lqasc_console_write,
641 .device = uart_console_device,
642 .setup = lqasc_console_setup,
643 .flags = CON_PRINTBUFFER,
644 .index = -1,
645 .data = &lqasc_reg,
646};
647
648static int __init
649lqasc_console_init(void)
650{
651 register_console(&lqasc_console);
652 return 0;
653}
654console_initcall(lqasc_console_init);
655
656static struct uart_driver lqasc_reg = {
657 .owner = THIS_MODULE,
658 .driver_name = DRVNAME,
659 .dev_name = "ttyLTQ",
660 .major = 0,
661 .minor = 0,
662 .nr = MAXPORTS,
663 .cons = &lqasc_console,
664};
665
666static int __init
667lqasc_probe(struct platform_device *pdev)
668{
669 struct ltq_uart_port *ltq_port;
670 struct uart_port *port;
671 struct resource *mmres, *irqres;
672 int tx_irq, rx_irq, err_irq;
673 struct clk *clk;
674 int ret;
675
676 mmres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
677 irqres = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
678 if (!mmres || !irqres)
679 return -ENODEV;
680
681 if (pdev->id >= MAXPORTS)
682 return -EBUSY;
683
684 if (lqasc_port[pdev->id] != NULL)
685 return -EBUSY;
686
687 clk = clk_get(&pdev->dev, "fpi");
688 if (IS_ERR(clk)) {
689 pr_err("failed to get fpi clk\n");
690 return -ENOENT;
691 }
692
693 tx_irq = platform_get_irq_byname(pdev, "tx");
694 rx_irq = platform_get_irq_byname(pdev, "rx");
695 err_irq = platform_get_irq_byname(pdev, "err");
696 if ((tx_irq < 0) | (rx_irq < 0) | (err_irq < 0))
697 return -ENODEV;
698
699 ltq_port = kzalloc(sizeof(struct ltq_uart_port), GFP_KERNEL);
700 if (!ltq_port)
701 return -ENOMEM;
702
703 port = &ltq_port->port;
704
705 port->iotype = SERIAL_IO_MEM;
706 port->flags = ASYNC_BOOT_AUTOCONF | UPF_IOREMAP;
707 port->ops = &lqasc_pops;
708 port->fifosize = 16;
709 port->type = PORT_LTQ_ASC,
710 port->line = pdev->id;
711 port->dev = &pdev->dev;
712
713 port->irq = tx_irq; /* unused, just to be backward-compatibe */
714 port->mapbase = mmres->start;
715
716 ltq_port->clk = clk;
717
718 ltq_port->tx_irq = tx_irq;
719 ltq_port->rx_irq = rx_irq;
720 ltq_port->err_irq = err_irq;
721
722 lqasc_port[pdev->id] = ltq_port;
723 platform_set_drvdata(pdev, ltq_port);
724
725 ret = uart_add_one_port(&lqasc_reg, port);
726
727 return ret;
728}
729
730static struct platform_driver lqasc_driver = {
731 .driver = {
732 .name = DRVNAME,
733 .owner = THIS_MODULE,
734 },
735};
736
737int __init
738init_lqasc(void)
739{
740 int ret;
741
742 ret = uart_register_driver(&lqasc_reg);
743 if (ret != 0)
744 return ret;
745
746 ret = platform_driver_probe(&lqasc_driver, lqasc_probe);
747 if (ret != 0)
748 uart_unregister_driver(&lqasc_reg);
749
750 return ret;
751}
752
753module_init(init_lqasc);
754
755MODULE_DESCRIPTION("Lantiq serial port driver");
756MODULE_LICENSE("GPL");
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 1b0f98bc51b5..022f9eb0b7bf 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -990,6 +990,12 @@ config BCM63XX_WDT
990 To compile this driver as a loadable module, choose M here. 990 To compile this driver as a loadable module, choose M here.
991 The module will be called bcm63xx_wdt. 991 The module will be called bcm63xx_wdt.
992 992
993config LANTIQ_WDT
994 tristate "Lantiq SoC watchdog"
995 depends on LANTIQ
996 help
997 Hardware driver for the Lantiq SoC Watchdog Timer.
998
993# PARISC Architecture 999# PARISC Architecture
994 1000
995# POWERPC Architecture 1001# POWERPC Architecture
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index 3f8608b922a7..ed26f7094e47 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -123,6 +123,7 @@ obj-$(CONFIG_AR7_WDT) += ar7_wdt.o
123obj-$(CONFIG_TXX9_WDT) += txx9wdt.o 123obj-$(CONFIG_TXX9_WDT) += txx9wdt.o
124obj-$(CONFIG_OCTEON_WDT) += octeon-wdt.o 124obj-$(CONFIG_OCTEON_WDT) += octeon-wdt.o
125octeon-wdt-y := octeon-wdt-main.o octeon-wdt-nmi.o 125octeon-wdt-y := octeon-wdt-main.o octeon-wdt-nmi.o
126obj-$(CONFIG_LANTIQ_WDT) += lantiq_wdt.o
126 127
127# PARISC Architecture 128# PARISC Architecture
128 129
diff --git a/drivers/watchdog/lantiq_wdt.c b/drivers/watchdog/lantiq_wdt.c
new file mode 100644
index 000000000000..7d82adac1cb2
--- /dev/null
+++ b/drivers/watchdog/lantiq_wdt.c
@@ -0,0 +1,261 @@
1/*
2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms of the GNU General Public License version 2 as published
4 * by the Free Software Foundation.
5 *
6 * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
7 * Based on EP93xx wdt driver
8 */
9
10#include <linux/module.h>
11#include <linux/fs.h>
12#include <linux/miscdevice.h>
13#include <linux/watchdog.h>
14#include <linux/platform_device.h>
15#include <linux/uaccess.h>
16#include <linux/clk.h>
17#include <linux/io.h>
18
19#include <lantiq.h>
20
21/* Section 3.4 of the datasheet
22 * The password sequence protects the WDT control register from unintended
23 * write actions, which might cause malfunction of the WDT.
24 *
25 * essentially the following two magic passwords need to be written to allow
26 * IO access to the WDT core
27 */
28#define LTQ_WDT_PW1 0x00BE0000
29#define LTQ_WDT_PW2 0x00DC0000
30
31#define LTQ_WDT_CR 0x0 /* watchdog control register */
32#define LTQ_WDT_SR 0x8 /* watchdog status register */
33
34#define LTQ_WDT_SR_EN (0x1 << 31) /* enable bit */
35#define LTQ_WDT_SR_PWD (0x3 << 26) /* turn on power */
36#define LTQ_WDT_SR_CLKDIV (0x3 << 24) /* turn on clock and set */
37 /* divider to 0x40000 */
38#define LTQ_WDT_DIVIDER 0x40000
39#define LTQ_MAX_TIMEOUT ((1 << 16) - 1) /* the reload field is 16 bit */
40
41static int nowayout = WATCHDOG_NOWAYOUT;
42
43static void __iomem *ltq_wdt_membase;
44static unsigned long ltq_io_region_clk_rate;
45
46static unsigned long ltq_wdt_bootstatus;
47static unsigned long ltq_wdt_in_use;
48static int ltq_wdt_timeout = 30;
49static int ltq_wdt_ok_to_close;
50
51static void
52ltq_wdt_enable(void)
53{
54 ltq_wdt_timeout = ltq_wdt_timeout *
55 (ltq_io_region_clk_rate / LTQ_WDT_DIVIDER) + 0x1000;
56 if (ltq_wdt_timeout > LTQ_MAX_TIMEOUT)
57 ltq_wdt_timeout = LTQ_MAX_TIMEOUT;
58
59 /* write the first password magic */
60 ltq_w32(LTQ_WDT_PW1, ltq_wdt_membase + LTQ_WDT_CR);
61 /* write the second magic plus the configuration and new timeout */
62 ltq_w32(LTQ_WDT_SR_EN | LTQ_WDT_SR_PWD | LTQ_WDT_SR_CLKDIV |
63 LTQ_WDT_PW2 | ltq_wdt_timeout, ltq_wdt_membase + LTQ_WDT_CR);
64}
65
66static void
67ltq_wdt_disable(void)
68{
69 /* write the first password magic */
70 ltq_w32(LTQ_WDT_PW1, ltq_wdt_membase + LTQ_WDT_CR);
71 /* write the second password magic with no config
72 * this turns the watchdog off
73 */
74 ltq_w32(LTQ_WDT_PW2, ltq_wdt_membase + LTQ_WDT_CR);
75}
76
77static ssize_t
78ltq_wdt_write(struct file *file, const char __user *data,
79 size_t len, loff_t *ppos)
80{
81 if (len) {
82 if (!nowayout) {
83 size_t i;
84
85 ltq_wdt_ok_to_close = 0;
86 for (i = 0; i != len; i++) {
87 char c;
88
89 if (get_user(c, data + i))
90 return -EFAULT;
91 if (c == 'V')
92 ltq_wdt_ok_to_close = 1;
93 else
94 ltq_wdt_ok_to_close = 0;
95 }
96 }
97 ltq_wdt_enable();
98 }
99
100 return len;
101}
102
103static struct watchdog_info ident = {
104 .options = WDIOF_MAGICCLOSE | WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING |
105 WDIOF_CARDRESET,
106 .identity = "ltq_wdt",
107};
108
109static long
110ltq_wdt_ioctl(struct file *file,
111 unsigned int cmd, unsigned long arg)
112{
113 int ret = -ENOTTY;
114
115 switch (cmd) {
116 case WDIOC_GETSUPPORT:
117 ret = copy_to_user((struct watchdog_info __user *)arg, &ident,
118 sizeof(ident)) ? -EFAULT : 0;
119 break;
120
121 case WDIOC_GETBOOTSTATUS:
122 ret = put_user(ltq_wdt_bootstatus, (int __user *)arg);
123 break;
124
125 case WDIOC_GETSTATUS:
126 ret = put_user(0, (int __user *)arg);
127 break;
128
129 case WDIOC_SETTIMEOUT:
130 ret = get_user(ltq_wdt_timeout, (int __user *)arg);
131 if (!ret)
132 ltq_wdt_enable();
133 /* intentional drop through */
134 case WDIOC_GETTIMEOUT:
135 ret = put_user(ltq_wdt_timeout, (int __user *)arg);
136 break;
137
138 case WDIOC_KEEPALIVE:
139 ltq_wdt_enable();
140 ret = 0;
141 break;
142 }
143 return ret;
144}
145
146static int
147ltq_wdt_open(struct inode *inode, struct file *file)
148{
149 if (test_and_set_bit(0, &ltq_wdt_in_use))
150 return -EBUSY;
151 ltq_wdt_in_use = 1;
152 ltq_wdt_enable();
153
154 return nonseekable_open(inode, file);
155}
156
157static int
158ltq_wdt_release(struct inode *inode, struct file *file)
159{
160 if (ltq_wdt_ok_to_close)
161 ltq_wdt_disable();
162 else
163 pr_err("ltq_wdt: watchdog closed without warning\n");
164 ltq_wdt_ok_to_close = 0;
165 clear_bit(0, &ltq_wdt_in_use);
166
167 return 0;
168}
169
170static const struct file_operations ltq_wdt_fops = {
171 .owner = THIS_MODULE,
172 .write = ltq_wdt_write,
173 .unlocked_ioctl = ltq_wdt_ioctl,
174 .open = ltq_wdt_open,
175 .release = ltq_wdt_release,
176 .llseek = no_llseek,
177};
178
179static struct miscdevice ltq_wdt_miscdev = {
180 .minor = WATCHDOG_MINOR,
181 .name = "watchdog",
182 .fops = &ltq_wdt_fops,
183};
184
185static int __init
186ltq_wdt_probe(struct platform_device *pdev)
187{
188 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
189 struct clk *clk;
190
191 if (!res) {
192 dev_err(&pdev->dev, "cannot obtain I/O memory region");
193 return -ENOENT;
194 }
195 res = devm_request_mem_region(&pdev->dev, res->start,
196 resource_size(res), dev_name(&pdev->dev));
197 if (!res) {
198 dev_err(&pdev->dev, "cannot request I/O memory region");
199 return -EBUSY;
200 }
201 ltq_wdt_membase = devm_ioremap_nocache(&pdev->dev, res->start,
202 resource_size(res));
203 if (!ltq_wdt_membase) {
204 dev_err(&pdev->dev, "cannot remap I/O memory region\n");
205 return -ENOMEM;
206 }
207
208 /* we do not need to enable the clock as it is always running */
209 clk = clk_get(&pdev->dev, "io");
210 WARN_ON(!clk);
211 ltq_io_region_clk_rate = clk_get_rate(clk);
212 clk_put(clk);
213
214 if (ltq_reset_cause() == LTQ_RST_CAUSE_WDTRST)
215 ltq_wdt_bootstatus = WDIOF_CARDRESET;
216
217 return misc_register(&ltq_wdt_miscdev);
218}
219
220static int __devexit
221ltq_wdt_remove(struct platform_device *pdev)
222{
223 misc_deregister(&ltq_wdt_miscdev);
224
225 if (ltq_wdt_membase)
226 iounmap(ltq_wdt_membase);
227
228 return 0;
229}
230
231
232static struct platform_driver ltq_wdt_driver = {
233 .remove = __devexit_p(ltq_wdt_remove),
234 .driver = {
235 .name = "ltq_wdt",
236 .owner = THIS_MODULE,
237 },
238};
239
240static int __init
241init_ltq_wdt(void)
242{
243 return platform_driver_probe(&ltq_wdt_driver, ltq_wdt_probe);
244}
245
246static void __exit
247exit_ltq_wdt(void)
248{
249 return platform_driver_unregister(&ltq_wdt_driver);
250}
251
252module_init(init_ltq_wdt);
253module_exit(exit_ltq_wdt);
254
255module_param(nowayout, int, 0);
256MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started");
257
258MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
259MODULE_DESCRIPTION("Lantiq SoC Watchdog");
260MODULE_LICENSE("GPL");
261MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/mtx-1_wdt.c b/drivers/watchdog/mtx-1_wdt.c
index 5ec5ac1f7878..1479dc4d6129 100644
--- a/drivers/watchdog/mtx-1_wdt.c
+++ b/drivers/watchdog/mtx-1_wdt.c
@@ -66,6 +66,7 @@ static struct {
66 int default_ticks; 66 int default_ticks;
67 unsigned long inuse; 67 unsigned long inuse;
68 unsigned gpio; 68 unsigned gpio;
69 int gstate;
69} mtx1_wdt_device; 70} mtx1_wdt_device;
70 71
71static void mtx1_wdt_trigger(unsigned long unused) 72static void mtx1_wdt_trigger(unsigned long unused)
@@ -75,13 +76,13 @@ static void mtx1_wdt_trigger(unsigned long unused)
75 spin_lock(&mtx1_wdt_device.lock); 76 spin_lock(&mtx1_wdt_device.lock);
76 if (mtx1_wdt_device.running) 77 if (mtx1_wdt_device.running)
77 ticks--; 78 ticks--;
78 /* 79
79 * toggle GPIO2_15 80 /* toggle wdt gpio */
80 */ 81 mtx1_wdt_device.gstate = ~mtx1_wdt_device.gstate;
81 tmp = au_readl(GPIO2_DIR); 82 if (mtx1_wdt_device.gstate)
82 tmp = (tmp & ~(1 << mtx1_wdt_device.gpio)) | 83 gpio_direction_output(mtx1_wdt_device.gpio, 1);
83 ((~tmp) & (1 << mtx1_wdt_device.gpio)); 84 else
84 au_writel(tmp, GPIO2_DIR); 85 gpio_direction_input(mtx1_wdt_device.gpio);
85 86
86 if (mtx1_wdt_device.queue && ticks) 87 if (mtx1_wdt_device.queue && ticks)
87 mod_timer(&mtx1_wdt_device.timer, jiffies + MTX1_WDT_INTERVAL); 88 mod_timer(&mtx1_wdt_device.timer, jiffies + MTX1_WDT_INTERVAL);
@@ -103,7 +104,8 @@ static void mtx1_wdt_start(void)
103 spin_lock_irqsave(&mtx1_wdt_device.lock, flags); 104 spin_lock_irqsave(&mtx1_wdt_device.lock, flags);
104 if (!mtx1_wdt_device.queue) { 105 if (!mtx1_wdt_device.queue) {
105 mtx1_wdt_device.queue = 1; 106 mtx1_wdt_device.queue = 1;
106 gpio_set_value(mtx1_wdt_device.gpio, 1); 107 mtx1_wdt_device.gstate = 1;
108 gpio_direction_output(mtx1_wdt_device.gpio, 1);
107 mod_timer(&mtx1_wdt_device.timer, jiffies + MTX1_WDT_INTERVAL); 109 mod_timer(&mtx1_wdt_device.timer, jiffies + MTX1_WDT_INTERVAL);
108 } 110 }
109 mtx1_wdt_device.running++; 111 mtx1_wdt_device.running++;
@@ -117,7 +119,8 @@ static int mtx1_wdt_stop(void)
117 spin_lock_irqsave(&mtx1_wdt_device.lock, flags); 119 spin_lock_irqsave(&mtx1_wdt_device.lock, flags);
118 if (mtx1_wdt_device.queue) { 120 if (mtx1_wdt_device.queue) {
119 mtx1_wdt_device.queue = 0; 121 mtx1_wdt_device.queue = 0;
120 gpio_set_value(mtx1_wdt_device.gpio, 0); 122 mtx1_wdt_device.gstate = 0;
123 gpio_direction_output(mtx1_wdt_device.gpio, 0);
121 } 124 }
122 ticks = mtx1_wdt_device.default_ticks; 125 ticks = mtx1_wdt_device.default_ticks;
123 spin_unlock_irqrestore(&mtx1_wdt_device.lock, flags); 126 spin_unlock_irqrestore(&mtx1_wdt_device.lock, flags);