aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorArnd Bergmann <arnd@arndb.de>2018-03-07 16:18:39 -0500
committerArnd Bergmann <arnd@arndb.de>2018-03-07 16:18:39 -0500
commitb67aea2bbab780e412b8af3386cc9f78f61a4cac (patch)
tree93fb3f88d71a431d5a1d2203635546986dacf3f4 /drivers
parent661e50bc853209e41a5c14a290ca4decc43cbfd1 (diff)
parent8d06c3302635f0ab426937f2bb10e9b9c34087e4 (diff)
Merge tag 'metag_remove_2' of ssh://gitolite.kernel.org/pub/scm/linux/kernel/git/jhogan/metag into asm-generic
Remove metag architecture These patches remove the metag architecture and tightly dependent drivers from the kernel. With the 4.16 kernel the ancient gcc 4.2.4 based metag toolchain we have been using is hitting compiler bugs, so now seems a good time to drop it altogether. * tag 'metag_remove_2' of ssh://gitolite.kernel.org/pub/scm/linux/kernel/git/jhogan/metag: i2c: img-scb: Drop METAG dependency media: img-ir: Drop METAG dependency watchdog: imgpdc: Drop METAG dependency MAINTAINERS/CREDITS: Drop METAG ARCHITECTURE tty: Remove metag DA TTY and console driver clocksource: Remove metag generic timer driver irqchip: Remove metag irqchip drivers Drop a bunch of metag references docs: Remove remaining references to metag docs: Remove metag docs metag: Remove arch/metag/ Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/clocksource/Kconfig5
-rw-r--r--drivers/clocksource/Makefile1
-rw-r--r--drivers/clocksource/metag_generic.c161
-rw-r--r--drivers/i2c/busses/Kconfig2
-rw-r--r--drivers/irqchip/Makefile2
-rw-r--r--drivers/irqchip/irq-metag-ext.c871
-rw-r--r--drivers/irqchip/irq-metag.c343
-rw-r--r--drivers/media/rc/img-ir/Kconfig2
-rw-r--r--drivers/tty/Kconfig13
-rw-r--r--drivers/tty/Makefile1
-rw-r--r--drivers/tty/metag_da.c665
-rw-r--r--drivers/watchdog/Kconfig2
12 files changed, 3 insertions, 2065 deletions
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index b3b4ed9b6874..f99dbc2f7ee4 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -391,11 +391,6 @@ config ATMEL_ST
391 help 391 help
392 Support for the Atmel ST timer. 392 Support for the Atmel ST timer.
393 393
394config CLKSRC_METAG_GENERIC
395 def_bool y if METAG
396 help
397 This option enables support for the Meta per-thread timers.
398
399config CLKSRC_EXYNOS_MCT 394config CLKSRC_EXYNOS_MCT
400 bool "Exynos multi core timer driver" if COMPILE_TEST 395 bool "Exynos multi core timer driver" if COMPILE_TEST
401 depends on ARM || ARM64 396 depends on ARM || ARM64
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index d6dec4489d66..a2d47e9ecf91 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -61,7 +61,6 @@ obj-$(CONFIG_ARM_ARCH_TIMER) += arm_arch_timer.o
61obj-$(CONFIG_ARM_GLOBAL_TIMER) += arm_global_timer.o 61obj-$(CONFIG_ARM_GLOBAL_TIMER) += arm_global_timer.o
62obj-$(CONFIG_ARMV7M_SYSTICK) += armv7m_systick.o 62obj-$(CONFIG_ARMV7M_SYSTICK) += armv7m_systick.o
63obj-$(CONFIG_ARM_TIMER_SP804) += timer-sp804.o 63obj-$(CONFIG_ARM_TIMER_SP804) += timer-sp804.o
64obj-$(CONFIG_CLKSRC_METAG_GENERIC) += metag_generic.o
65obj-$(CONFIG_ARCH_HAS_TICK_BROADCAST) += dummy_timer.o 64obj-$(CONFIG_ARCH_HAS_TICK_BROADCAST) += dummy_timer.o
66obj-$(CONFIG_KEYSTONE_TIMER) += timer-keystone.o 65obj-$(CONFIG_KEYSTONE_TIMER) += timer-keystone.o
67obj-$(CONFIG_INTEGRATOR_AP_TIMER) += timer-integrator-ap.o 66obj-$(CONFIG_INTEGRATOR_AP_TIMER) += timer-integrator-ap.o
diff --git a/drivers/clocksource/metag_generic.c b/drivers/clocksource/metag_generic.c
deleted file mode 100644
index 3e5fa2f62d5f..000000000000
--- a/drivers/clocksource/metag_generic.c
+++ /dev/null
@@ -1,161 +0,0 @@
1/*
2 * Copyright (C) 2005-2013 Imagination Technologies Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 *
16 *
17 * Support for Meta per-thread timers.
18 *
19 * Meta hardware threads have 2 timers. The background timer (TXTIMER) is used
20 * as a free-running time base (hz clocksource), and the interrupt timer
21 * (TXTIMERI) is used for the timer interrupt (clock event). Both counters
22 * traditionally count at approximately 1MHz.
23 */
24
25#include <clocksource/metag_generic.h>
26#include <linux/cpu.h>
27#include <linux/errno.h>
28#include <linux/sched.h>
29#include <linux/kernel.h>
30#include <linux/param.h>
31#include <linux/time.h>
32#include <linux/init.h>
33#include <linux/proc_fs.h>
34#include <linux/clocksource.h>
35#include <linux/clockchips.h>
36#include <linux/interrupt.h>
37
38#include <asm/clock.h>
39#include <asm/hwthread.h>
40#include <asm/core_reg.h>
41#include <asm/metag_mem.h>
42#include <asm/tbx.h>
43
44#define HARDWARE_FREQ 1000000 /* 1MHz */
45#define HARDWARE_DIV 1 /* divide by 1 = 1MHz clock */
46#define HARDWARE_TO_NS_SHIFT 10 /* convert ticks to ns */
47
48static unsigned int hwtimer_freq = HARDWARE_FREQ;
49static DEFINE_PER_CPU(struct clock_event_device, local_clockevent);
50static DEFINE_PER_CPU(char [11], local_clockevent_name);
51
52static int metag_timer_set_next_event(unsigned long delta,
53 struct clock_event_device *dev)
54{
55 __core_reg_set(TXTIMERI, -delta);
56 return 0;
57}
58
59static u64 metag_clocksource_read(struct clocksource *cs)
60{
61 return __core_reg_get(TXTIMER);
62}
63
64static struct clocksource clocksource_metag = {
65 .name = "META",
66 .rating = 200,
67 .mask = CLOCKSOURCE_MASK(32),
68 .read = metag_clocksource_read,
69 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
70};
71
72static irqreturn_t metag_timer_interrupt(int irq, void *dummy)
73{
74 struct clock_event_device *evt = this_cpu_ptr(&local_clockevent);
75
76 evt->event_handler(evt);
77
78 return IRQ_HANDLED;
79}
80
81static struct irqaction metag_timer_irq = {
82 .name = "META core timer",
83 .handler = metag_timer_interrupt,
84 .flags = IRQF_TIMER | IRQF_IRQPOLL | IRQF_PERCPU,
85};
86
87unsigned long long sched_clock(void)
88{
89 unsigned long long ticks = __core_reg_get(TXTIMER);
90 return ticks << HARDWARE_TO_NS_SHIFT;
91}
92
93static int arch_timer_starting_cpu(unsigned int cpu)
94{
95 unsigned int txdivtime;
96 struct clock_event_device *clk = &per_cpu(local_clockevent, cpu);
97 char *name = per_cpu(local_clockevent_name, cpu);
98
99 txdivtime = __core_reg_get(TXDIVTIME);
100
101 txdivtime &= ~TXDIVTIME_DIV_BITS;
102 txdivtime |= (HARDWARE_DIV & TXDIVTIME_DIV_BITS);
103
104 __core_reg_set(TXDIVTIME, txdivtime);
105
106 sprintf(name, "META %d", cpu);
107 clk->name = name;
108 clk->features = CLOCK_EVT_FEAT_ONESHOT,
109
110 clk->rating = 200,
111 clk->shift = 12,
112 clk->irq = tbisig_map(TBID_SIGNUM_TRT),
113 clk->set_next_event = metag_timer_set_next_event,
114
115 clk->mult = div_sc(hwtimer_freq, NSEC_PER_SEC, clk->shift);
116 clk->max_delta_ns = clockevent_delta2ns(0x7fffffff, clk);
117 clk->max_delta_ticks = 0x7fffffff;
118 clk->min_delta_ns = clockevent_delta2ns(0xf, clk);
119 clk->min_delta_ticks = 0xf;
120 clk->cpumask = cpumask_of(cpu);
121
122 clockevents_register_device(clk);
123
124 /*
125 * For all non-boot CPUs we need to synchronize our free
126 * running clock (TXTIMER) with the boot CPU's clock.
127 *
128 * While this won't be accurate, it should be close enough.
129 */
130 if (cpu) {
131 unsigned int thread0 = cpu_2_hwthread_id[0];
132 unsigned long val;
133
134 val = core_reg_read(TXUCT_ID, TXTIMER_REGNUM, thread0);
135 __core_reg_set(TXTIMER, val);
136 }
137 return 0;
138}
139
140int __init metag_generic_timer_init(void)
141{
142 /*
143 * On Meta 2 SoCs, the actual frequency of the timer is based on the
144 * Meta core clock speed divided by an integer, so it is only
145 * approximately 1MHz. Calculating the real frequency here drastically
146 * reduces clock skew on these SoCs.
147 */
148#ifdef CONFIG_METAG_META21
149 hwtimer_freq = get_coreclock() / (metag_in32(EXPAND_TIMER_DIV) + 1);
150#endif
151 pr_info("Timer frequency: %u Hz\n", hwtimer_freq);
152
153 clocksource_register_hz(&clocksource_metag, hwtimer_freq);
154
155 setup_irq(tbisig_map(TBID_SIGNUM_TRT), &metag_timer_irq);
156
157 /* Hook cpu boot to configure the CPU's timers */
158 return cpuhp_setup_state(CPUHP_AP_METAG_TIMER_STARTING,
159 "clockevents/metag:starting",
160 arch_timer_starting_cpu, NULL);
161}
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index e2954fb86d65..68ceac7617ff 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -637,7 +637,7 @@ config I2C_IBM_IIC
637 637
638config I2C_IMG 638config I2C_IMG
639 tristate "Imagination Technologies I2C SCB Controller" 639 tristate "Imagination Technologies I2C SCB Controller"
640 depends on MIPS || METAG || COMPILE_TEST 640 depends on MIPS || COMPILE_TEST
641 help 641 help
642 Say Y here if you want to use the IMG I2C SCB controller, 642 Say Y here if you want to use the IMG I2C SCB controller,
643 available on the TZ1090 and other IMG SoCs. 643 available on the TZ1090 and other IMG SoCs.
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index d27e3e3619e0..b5b1f4c93413 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -15,8 +15,6 @@ obj-$(CONFIG_IRQ_MXS) += irq-mxs.o
15obj-$(CONFIG_ARCH_TEGRA) += irq-tegra.o 15obj-$(CONFIG_ARCH_TEGRA) += irq-tegra.o
16obj-$(CONFIG_ARCH_S3C24XX) += irq-s3c24xx.o 16obj-$(CONFIG_ARCH_S3C24XX) += irq-s3c24xx.o
17obj-$(CONFIG_DW_APB_ICTL) += irq-dw-apb-ictl.o 17obj-$(CONFIG_DW_APB_ICTL) += irq-dw-apb-ictl.o
18obj-$(CONFIG_METAG) += irq-metag-ext.o
19obj-$(CONFIG_METAG_PERFCOUNTER_IRQS) += irq-metag.o
20obj-$(CONFIG_CLPS711X_IRQCHIP) += irq-clps711x.o 18obj-$(CONFIG_CLPS711X_IRQCHIP) += irq-clps711x.o
21obj-$(CONFIG_OMPIC) += irq-ompic.o 19obj-$(CONFIG_OMPIC) += irq-ompic.o
22obj-$(CONFIG_OR1K_PIC) += irq-or1k-pic.o 20obj-$(CONFIG_OR1K_PIC) += irq-or1k-pic.o
diff --git a/drivers/irqchip/irq-metag-ext.c b/drivers/irqchip/irq-metag-ext.c
deleted file mode 100644
index e67483161f0f..000000000000
--- a/drivers/irqchip/irq-metag-ext.c
+++ /dev/null
@@ -1,871 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Meta External interrupt code.
4 *
5 * Copyright (C) 2005-2012 Imagination Technologies Ltd.
6 *
7 * External interrupts on Meta are configured at two-levels, in the CPU core and
8 * in the external trigger block. Interrupts from SoC peripherals are
9 * multiplexed onto a single Meta CPU "trigger" - traditionally it has always
10 * been trigger 2 (TR2). For info on how de-multiplexing happens check out
11 * meta_intc_irq_demux().
12 */
13
14#include <linux/interrupt.h>
15#include <linux/irqchip/metag-ext.h>
16#include <linux/irqdomain.h>
17#include <linux/io.h>
18#include <linux/of.h>
19#include <linux/slab.h>
20#include <linux/syscore_ops.h>
21
22#include <asm/irq.h>
23#include <asm/hwthread.h>
24
25#define HWSTAT_STRIDE 8
26#define HWVEC_BLK_STRIDE 0x1000
27
28/**
29 * struct meta_intc_priv - private meta external interrupt data
30 * @nr_banks: Number of interrupt banks
31 * @domain: IRQ domain for all banks of external IRQs
32 * @unmasked: Record of unmasked IRQs
33 * @levels_altered: Record of altered level bits
34 */
35struct meta_intc_priv {
36 unsigned int nr_banks;
37 struct irq_domain *domain;
38
39 unsigned long unmasked[4];
40
41#ifdef CONFIG_METAG_SUSPEND_MEM
42 unsigned long levels_altered[4];
43#endif
44};
45
46/* Private data for the one and only external interrupt controller */
47static struct meta_intc_priv meta_intc_priv;
48
49/**
50 * meta_intc_offset() - Get the offset into the bank of a hardware IRQ number
51 * @hw: Hardware IRQ number (within external trigger block)
52 *
53 * Returns: Bit offset into the IRQ's bank registers
54 */
55static unsigned int meta_intc_offset(irq_hw_number_t hw)
56{
57 return hw & 0x1f;
58}
59
60/**
61 * meta_intc_bank() - Get the bank number of a hardware IRQ number
62 * @hw: Hardware IRQ number (within external trigger block)
63 *
64 * Returns: Bank number indicating which register the IRQ's bits are
65 */
66static unsigned int meta_intc_bank(irq_hw_number_t hw)
67{
68 return hw >> 5;
69}
70
71/**
72 * meta_intc_stat_addr() - Get the address of a HWSTATEXT register
73 * @hw: Hardware IRQ number (within external trigger block)
74 *
75 * Returns: Address of a HWSTATEXT register containing the status bit for
76 * the specified hardware IRQ number
77 */
78static void __iomem *meta_intc_stat_addr(irq_hw_number_t hw)
79{
80 return (void __iomem *)(HWSTATEXT +
81 HWSTAT_STRIDE * meta_intc_bank(hw));
82}
83
84/**
85 * meta_intc_level_addr() - Get the address of a HWLEVELEXT register
86 * @hw: Hardware IRQ number (within external trigger block)
87 *
88 * Returns: Address of a HWLEVELEXT register containing the sense bit for
89 * the specified hardware IRQ number
90 */
91static void __iomem *meta_intc_level_addr(irq_hw_number_t hw)
92{
93 return (void __iomem *)(HWLEVELEXT +
94 HWSTAT_STRIDE * meta_intc_bank(hw));
95}
96
97/**
98 * meta_intc_mask_addr() - Get the address of a HWMASKEXT register
99 * @hw: Hardware IRQ number (within external trigger block)
100 *
101 * Returns: Address of a HWMASKEXT register containing the mask bit for the
102 * specified hardware IRQ number
103 */
104static void __iomem *meta_intc_mask_addr(irq_hw_number_t hw)
105{
106 return (void __iomem *)(HWMASKEXT +
107 HWSTAT_STRIDE * meta_intc_bank(hw));
108}
109
110/**
111 * meta_intc_vec_addr() - Get the vector address of a hardware interrupt
112 * @hw: Hardware IRQ number (within external trigger block)
113 *
114 * Returns: Address of a HWVECEXT register controlling the core trigger to
115 * vector the IRQ onto
116 */
117static inline void __iomem *meta_intc_vec_addr(irq_hw_number_t hw)
118{
119 return (void __iomem *)(HWVEC0EXT +
120 HWVEC_BLK_STRIDE * meta_intc_bank(hw) +
121 HWVECnEXT_STRIDE * meta_intc_offset(hw));
122}
123
124/**
125 * meta_intc_startup_irq() - set up an external irq
126 * @data: data for the external irq to start up
127 *
128 * Multiplex interrupts for irq onto TR2. Clear any pending interrupts and
129 * unmask irq, both using the appropriate callbacks.
130 */
131static unsigned int meta_intc_startup_irq(struct irq_data *data)
132{
133 irq_hw_number_t hw = data->hwirq;
134 void __iomem *vec_addr = meta_intc_vec_addr(hw);
135 int thread = hard_processor_id();
136
137 /* Perform any necessary acking. */
138 if (data->chip->irq_ack)
139 data->chip->irq_ack(data);
140
141 /* Wire up this interrupt to the core with HWVECxEXT. */
142 metag_out32(TBI_TRIG_VEC(TBID_SIGNUM_TR2(thread)), vec_addr);
143
144 /* Perform any necessary unmasking. */
145 data->chip->irq_unmask(data);
146
147 return 0;
148}
149
150/**
151 * meta_intc_shutdown_irq() - turn off an external irq
152 * @data: data for the external irq to turn off
153 *
154 * Mask irq using the appropriate callback and stop muxing it onto TR2.
155 */
156static void meta_intc_shutdown_irq(struct irq_data *data)
157{
158 irq_hw_number_t hw = data->hwirq;
159 void __iomem *vec_addr = meta_intc_vec_addr(hw);
160
161 /* Mask the IRQ */
162 data->chip->irq_mask(data);
163
164 /*
165 * Disable the IRQ at the core by removing the interrupt from
166 * the HW vector mapping.
167 */
168 metag_out32(0, vec_addr);
169}
170
171/**
172 * meta_intc_ack_irq() - acknowledge an external irq
173 * @data: data for the external irq to ack
174 *
175 * Clear down an edge interrupt in the status register.
176 */
177static void meta_intc_ack_irq(struct irq_data *data)
178{
179 irq_hw_number_t hw = data->hwirq;
180 unsigned int bit = 1 << meta_intc_offset(hw);
181 void __iomem *stat_addr = meta_intc_stat_addr(hw);
182
183 /* Ack the int, if it is still 'on'.
184 * NOTE - this only works for edge triggered interrupts.
185 */
186 if (metag_in32(stat_addr) & bit)
187 metag_out32(bit, stat_addr);
188}
189
190/**
191 * record_irq_is_masked() - record the IRQ masked so it doesn't get handled
192 * @data: data for the external irq to record
193 *
194 * This should get called whenever an external IRQ is masked (by whichever
195 * callback is used). It records the IRQ masked so that it doesn't get handled
196 * if it still shows up in the status register.
197 */
198static void record_irq_is_masked(struct irq_data *data)
199{
200 struct meta_intc_priv *priv = &meta_intc_priv;
201 irq_hw_number_t hw = data->hwirq;
202
203 clear_bit(meta_intc_offset(hw), &priv->unmasked[meta_intc_bank(hw)]);
204}
205
206/**
207 * record_irq_is_unmasked() - record the IRQ unmasked so it can be handled
208 * @data: data for the external irq to record
209 *
210 * This should get called whenever an external IRQ is unmasked (by whichever
211 * callback is used). It records the IRQ unmasked so that it gets handled if it
212 * shows up in the status register.
213 */
214static void record_irq_is_unmasked(struct irq_data *data)
215{
216 struct meta_intc_priv *priv = &meta_intc_priv;
217 irq_hw_number_t hw = data->hwirq;
218
219 set_bit(meta_intc_offset(hw), &priv->unmasked[meta_intc_bank(hw)]);
220}
221
222/*
223 * For use by wrapper IRQ drivers
224 */
225
226/**
227 * meta_intc_mask_irq_simple() - minimal mask used by wrapper IRQ drivers
228 * @data: data for the external irq being masked
229 *
230 * This should be called by any wrapper IRQ driver mask functions. it doesn't do
231 * any masking but records the IRQ as masked so that the core code knows the
232 * mask has taken place. It is the callers responsibility to ensure that the IRQ
233 * won't trigger an interrupt to the core.
234 */
235void meta_intc_mask_irq_simple(struct irq_data *data)
236{
237 record_irq_is_masked(data);
238}
239
240/**
241 * meta_intc_unmask_irq_simple() - minimal unmask used by wrapper IRQ drivers
242 * @data: data for the external irq being unmasked
243 *
244 * This should be called by any wrapper IRQ driver unmask functions. it doesn't
245 * do any unmasking but records the IRQ as unmasked so that the core code knows
246 * the unmask has taken place. It is the callers responsibility to ensure that
247 * the IRQ can now trigger an interrupt to the core.
248 */
249void meta_intc_unmask_irq_simple(struct irq_data *data)
250{
251 record_irq_is_unmasked(data);
252}
253
254
255/**
256 * meta_intc_mask_irq() - mask an external irq using HWMASKEXT
257 * @data: data for the external irq to mask
258 *
259 * This is a default implementation of a mask function which makes use of the
260 * HWMASKEXT registers available in newer versions.
261 *
262 * Earlier versions without these registers should use SoC level IRQ masking
263 * which call the meta_intc_*_simple() functions above, or if that isn't
264 * available should use the fallback meta_intc_*_nomask() functions below.
265 */
266static void meta_intc_mask_irq(struct irq_data *data)
267{
268 irq_hw_number_t hw = data->hwirq;
269 unsigned int bit = 1 << meta_intc_offset(hw);
270 void __iomem *mask_addr = meta_intc_mask_addr(hw);
271 unsigned long flags;
272
273 record_irq_is_masked(data);
274
275 /* update the interrupt mask */
276 __global_lock2(flags);
277 metag_out32(metag_in32(mask_addr) & ~bit, mask_addr);
278 __global_unlock2(flags);
279}
280
281/**
282 * meta_intc_unmask_irq() - unmask an external irq using HWMASKEXT
283 * @data: data for the external irq to unmask
284 *
285 * This is a default implementation of an unmask function which makes use of the
286 * HWMASKEXT registers available on new versions. It should be paired with
287 * meta_intc_mask_irq() above.
288 */
289static void meta_intc_unmask_irq(struct irq_data *data)
290{
291 irq_hw_number_t hw = data->hwirq;
292 unsigned int bit = 1 << meta_intc_offset(hw);
293 void __iomem *mask_addr = meta_intc_mask_addr(hw);
294 unsigned long flags;
295
296 record_irq_is_unmasked(data);
297
298 /* update the interrupt mask */
299 __global_lock2(flags);
300 metag_out32(metag_in32(mask_addr) | bit, mask_addr);
301 __global_unlock2(flags);
302}
303
304/**
305 * meta_intc_mask_irq_nomask() - mask an external irq by unvectoring
306 * @data: data for the external irq to mask
307 *
308 * This is the version of the mask function for older versions which don't have
309 * HWMASKEXT registers, or a SoC level means of masking IRQs. Instead the IRQ is
310 * unvectored from the core and retriggered if necessary later.
311 */
312static void meta_intc_mask_irq_nomask(struct irq_data *data)
313{
314 irq_hw_number_t hw = data->hwirq;
315 void __iomem *vec_addr = meta_intc_vec_addr(hw);
316
317 record_irq_is_masked(data);
318
319 /* there is no interrupt mask, so unvector the interrupt */
320 metag_out32(0, vec_addr);
321}
322
323/**
324 * meta_intc_unmask_edge_irq_nomask() - unmask an edge irq by revectoring
325 * @data: data for the external irq to unmask
326 *
327 * This is the version of the unmask function for older versions which don't
328 * have HWMASKEXT registers, or a SoC level means of masking IRQs. Instead the
329 * IRQ is revectored back to the core and retriggered if necessary.
330 *
331 * The retriggering done by this function is specific to edge interrupts.
332 */
333static void meta_intc_unmask_edge_irq_nomask(struct irq_data *data)
334{
335 irq_hw_number_t hw = data->hwirq;
336 unsigned int bit = 1 << meta_intc_offset(hw);
337 void __iomem *stat_addr = meta_intc_stat_addr(hw);
338 void __iomem *vec_addr = meta_intc_vec_addr(hw);
339 unsigned int thread = hard_processor_id();
340
341 record_irq_is_unmasked(data);
342
343 /* there is no interrupt mask, so revector the interrupt */
344 metag_out32(TBI_TRIG_VEC(TBID_SIGNUM_TR2(thread)), vec_addr);
345
346 /*
347 * Re-trigger interrupt
348 *
349 * Writing a 1 toggles, and a 0->1 transition triggers. We only
350 * retrigger if the status bit is already set, which means we
351 * need to clear it first. Retriggering is fundamentally racy
352 * because if the interrupt fires again after we clear it we
353 * could end up clearing it again and the interrupt handler
354 * thinking it hasn't fired. Therefore we need to keep trying to
355 * retrigger until the bit is set.
356 */
357 if (metag_in32(stat_addr) & bit) {
358 metag_out32(bit, stat_addr);
359 while (!(metag_in32(stat_addr) & bit))
360 metag_out32(bit, stat_addr);
361 }
362}
363
364/**
365 * meta_intc_unmask_level_irq_nomask() - unmask a level irq by revectoring
366 * @data: data for the external irq to unmask
367 *
368 * This is the version of the unmask function for older versions which don't
369 * have HWMASKEXT registers, or a SoC level means of masking IRQs. Instead the
370 * IRQ is revectored back to the core and retriggered if necessary.
371 *
372 * The retriggering done by this function is specific to level interrupts.
373 */
374static void meta_intc_unmask_level_irq_nomask(struct irq_data *data)
375{
376 irq_hw_number_t hw = data->hwirq;
377 unsigned int bit = 1 << meta_intc_offset(hw);
378 void __iomem *stat_addr = meta_intc_stat_addr(hw);
379 void __iomem *vec_addr = meta_intc_vec_addr(hw);
380 unsigned int thread = hard_processor_id();
381
382 record_irq_is_unmasked(data);
383
384 /* there is no interrupt mask, so revector the interrupt */
385 metag_out32(TBI_TRIG_VEC(TBID_SIGNUM_TR2(thread)), vec_addr);
386
387 /* Re-trigger interrupt */
388 /* Writing a 1 triggers interrupt */
389 if (metag_in32(stat_addr) & bit)
390 metag_out32(bit, stat_addr);
391}
392
393/**
394 * meta_intc_irq_set_type() - set the type of an external irq
395 * @data: data for the external irq to set the type of
396 * @flow_type: new irq flow type
397 *
398 * Set the flow type of an external interrupt. This updates the irq chip and irq
399 * handler depending on whether the irq is edge or level sensitive (the polarity
400 * is ignored), and also sets up the bit in HWLEVELEXT so the hardware knows
401 * when to trigger.
402 */
403static int meta_intc_irq_set_type(struct irq_data *data, unsigned int flow_type)
404{
405#ifdef CONFIG_METAG_SUSPEND_MEM
406 struct meta_intc_priv *priv = &meta_intc_priv;
407#endif
408 irq_hw_number_t hw = data->hwirq;
409 unsigned int bit = 1 << meta_intc_offset(hw);
410 void __iomem *level_addr = meta_intc_level_addr(hw);
411 unsigned long flags;
412 unsigned int level;
413
414 /* update the chip/handler */
415 if (flow_type & IRQ_TYPE_LEVEL_MASK)
416 irq_set_chip_handler_name_locked(data, &meta_intc_level_chip,
417 handle_level_irq, NULL);
418 else
419 irq_set_chip_handler_name_locked(data, &meta_intc_edge_chip,
420 handle_edge_irq, NULL);
421
422 /* and clear/set the bit in HWLEVELEXT */
423 __global_lock2(flags);
424 level = metag_in32(level_addr);
425 if (flow_type & IRQ_TYPE_LEVEL_MASK)
426 level |= bit;
427 else
428 level &= ~bit;
429 metag_out32(level, level_addr);
430#ifdef CONFIG_METAG_SUSPEND_MEM
431 priv->levels_altered[meta_intc_bank(hw)] |= bit;
432#endif
433 __global_unlock2(flags);
434
435 return 0;
436}
437
438/**
439 * meta_intc_irq_demux() - external irq de-multiplexer
440 * @desc: the interrupt description structure for this irq
441 *
442 * The cpu receives an interrupt on TR2 when a SoC interrupt has occurred. It is
443 * this function's job to demux this irq and figure out exactly which external
444 * irq needs servicing.
445 *
446 * Whilst using TR2 to detect external interrupts is a software convention it is
447 * (hopefully) unlikely to change.
448 */
449static void meta_intc_irq_demux(struct irq_desc *desc)
450{
451 struct meta_intc_priv *priv = &meta_intc_priv;
452 irq_hw_number_t hw;
453 unsigned int bank, irq_no, status;
454 void __iomem *stat_addr = meta_intc_stat_addr(0);
455
456 /*
457 * Locate which interrupt has caused our handler to run.
458 */
459 for (bank = 0; bank < priv->nr_banks; ++bank) {
460 /* Which interrupts are currently pending in this bank? */
461recalculate:
462 status = metag_in32(stat_addr) & priv->unmasked[bank];
463
464 for (hw = bank*32; status; status >>= 1, ++hw) {
465 if (status & 0x1) {
466 /*
467 * Map the hardware IRQ number to a virtual
468 * Linux IRQ number.
469 */
470 irq_no = irq_linear_revmap(priv->domain, hw);
471
472 /*
473 * Only fire off external interrupts that are
474 * registered to be handled by the kernel.
475 * Other external interrupts are probably being
476 * handled by other Meta hardware threads.
477 */
478 generic_handle_irq(irq_no);
479
480 /*
481 * The handler may have re-enabled interrupts
482 * which could have caused a nested invocation
483 * of this code and make the copy of the
484 * status register we are using invalid.
485 */
486 goto recalculate;
487 }
488 }
489 stat_addr += HWSTAT_STRIDE;
490 }
491}
492
493#ifdef CONFIG_SMP
494/**
495 * meta_intc_set_affinity() - set the affinity for an interrupt
496 * @data: data for the external irq to set the affinity of
497 * @cpumask: cpu mask representing cpus which can handle the interrupt
498 * @force: whether to force (ignored)
499 *
500 * Revector the specified external irq onto a specific cpu's TR2 trigger, so
501 * that that cpu tends to be the one who handles it.
502 */
503static int meta_intc_set_affinity(struct irq_data *data,
504 const struct cpumask *cpumask, bool force)
505{
506 irq_hw_number_t hw = data->hwirq;
507 void __iomem *vec_addr = meta_intc_vec_addr(hw);
508 unsigned int cpu, thread;
509
510 /*
511 * Wire up this interrupt from HWVECxEXT to the Meta core.
512 *
513 * Note that we can't wire up HWVECxEXT to interrupt more than
514 * one cpu (the interrupt code doesn't support it), so we just
515 * pick the first cpu we find in 'cpumask'.
516 */
517 cpu = cpumask_any_and(cpumask, cpu_online_mask);
518 thread = cpu_2_hwthread_id[cpu];
519
520 metag_out32(TBI_TRIG_VEC(TBID_SIGNUM_TR2(thread)), vec_addr);
521
522 irq_data_update_effective_affinity(data, cpumask_of(cpu));
523
524 return 0;
525}
526#else
527#define meta_intc_set_affinity NULL
528#endif
529
530#ifdef CONFIG_PM_SLEEP
531#define META_INTC_CHIP_FLAGS (IRQCHIP_MASK_ON_SUSPEND \
532 | IRQCHIP_SKIP_SET_WAKE)
533#else
534#define META_INTC_CHIP_FLAGS 0
535#endif
536
537/* public edge/level irq chips which SoCs can override */
538
539struct irq_chip meta_intc_edge_chip = {
540 .irq_startup = meta_intc_startup_irq,
541 .irq_shutdown = meta_intc_shutdown_irq,
542 .irq_ack = meta_intc_ack_irq,
543 .irq_mask = meta_intc_mask_irq,
544 .irq_unmask = meta_intc_unmask_irq,
545 .irq_set_type = meta_intc_irq_set_type,
546 .irq_set_affinity = meta_intc_set_affinity,
547 .flags = META_INTC_CHIP_FLAGS,
548};
549
550struct irq_chip meta_intc_level_chip = {
551 .irq_startup = meta_intc_startup_irq,
552 .irq_shutdown = meta_intc_shutdown_irq,
553 .irq_set_type = meta_intc_irq_set_type,
554 .irq_mask = meta_intc_mask_irq,
555 .irq_unmask = meta_intc_unmask_irq,
556 .irq_set_affinity = meta_intc_set_affinity,
557 .flags = META_INTC_CHIP_FLAGS,
558};
559
560/**
561 * meta_intc_map() - map an external irq
562 * @d: irq domain of external trigger block
563 * @irq: virtual irq number
564 * @hw: hardware irq number within external trigger block
565 *
566 * This sets up a virtual irq for a specified hardware interrupt. The irq chip
567 * and handler is configured, using the HWLEVELEXT registers to determine
568 * edge/level flow type. These registers will have been set when the irq type is
569 * set (or set to a default at init time).
570 */
571static int meta_intc_map(struct irq_domain *d, unsigned int irq,
572 irq_hw_number_t hw)
573{
574 unsigned int bit = 1 << meta_intc_offset(hw);
575 void __iomem *level_addr = meta_intc_level_addr(hw);
576
577 /* Go by the current sense in the HWLEVELEXT register */
578 if (metag_in32(level_addr) & bit)
579 irq_set_chip_and_handler(irq, &meta_intc_level_chip,
580 handle_level_irq);
581 else
582 irq_set_chip_and_handler(irq, &meta_intc_edge_chip,
583 handle_edge_irq);
584
585 irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(irq)));
586 return 0;
587}
588
589static const struct irq_domain_ops meta_intc_domain_ops = {
590 .map = meta_intc_map,
591 .xlate = irq_domain_xlate_twocell,
592};
593
594#ifdef CONFIG_METAG_SUSPEND_MEM
595
596/**
597 * struct meta_intc_context - suspend context
598 * @levels: State of HWLEVELEXT registers
599 * @masks: State of HWMASKEXT registers
600 * @vectors: State of HWVECEXT registers
601 * @txvecint: State of TxVECINT registers
602 *
603 * This structure stores the IRQ state across suspend.
604 */
605struct meta_intc_context {
606 u32 levels[4];
607 u32 masks[4];
608 u8 vectors[4*32];
609
610 u8 txvecint[4][4];
611};
612
613/* suspend context */
614static struct meta_intc_context *meta_intc_context;
615
616/**
617 * meta_intc_suspend() - store irq state
618 *
619 * To avoid interfering with other threads we only save the IRQ state of IRQs in
620 * use by Linux.
621 */
622static int meta_intc_suspend(void)
623{
624 struct meta_intc_priv *priv = &meta_intc_priv;
625 int i, j;
626 irq_hw_number_t hw;
627 unsigned int bank;
628 unsigned long flags;
629 struct meta_intc_context *context;
630 void __iomem *level_addr, *mask_addr, *vec_addr;
631 u32 mask, bit;
632
633 context = kzalloc(sizeof(*context), GFP_ATOMIC);
634 if (!context)
635 return -ENOMEM;
636
637 hw = 0;
638 level_addr = meta_intc_level_addr(0);
639 mask_addr = meta_intc_mask_addr(0);
640 for (bank = 0; bank < priv->nr_banks; ++bank) {
641 vec_addr = meta_intc_vec_addr(hw);
642
643 /* create mask of interrupts in use */
644 mask = 0;
645 for (bit = 1; bit; bit <<= 1) {
646 i = irq_linear_revmap(priv->domain, hw);
647 /* save mapped irqs which are enabled or have actions */
648 if (i && (!irqd_irq_disabled(irq_get_irq_data(i)) ||
649 irq_has_action(i))) {
650 mask |= bit;
651
652 /* save trigger vector */
653 context->vectors[hw] = metag_in32(vec_addr);
654 }
655
656 ++hw;
657 vec_addr += HWVECnEXT_STRIDE;
658 }
659
660 /* save level state if any IRQ levels altered */
661 if (priv->levels_altered[bank])
662 context->levels[bank] = metag_in32(level_addr);
663 /* save mask state if any IRQs in use */
664 if (mask)
665 context->masks[bank] = metag_in32(mask_addr);
666
667 level_addr += HWSTAT_STRIDE;
668 mask_addr += HWSTAT_STRIDE;
669 }
670
671 /* save trigger matrixing */
672 __global_lock2(flags);
673 for (i = 0; i < 4; ++i)
674 for (j = 0; j < 4; ++j)
675 context->txvecint[i][j] = metag_in32(T0VECINT_BHALT +
676 TnVECINT_STRIDE*i +
677 8*j);
678 __global_unlock2(flags);
679
680 meta_intc_context = context;
681 return 0;
682}
683
684/**
685 * meta_intc_resume() - restore saved irq state
686 *
687 * Restore the saved IRQ state and drop it.
688 */
689static void meta_intc_resume(void)
690{
691 struct meta_intc_priv *priv = &meta_intc_priv;
692 int i, j;
693 irq_hw_number_t hw;
694 unsigned int bank;
695 unsigned long flags;
696 struct meta_intc_context *context = meta_intc_context;
697 void __iomem *level_addr, *mask_addr, *vec_addr;
698 u32 mask, bit, tmp;
699
700 meta_intc_context = NULL;
701
702 hw = 0;
703 level_addr = meta_intc_level_addr(0);
704 mask_addr = meta_intc_mask_addr(0);
705 for (bank = 0; bank < priv->nr_banks; ++bank) {
706 vec_addr = meta_intc_vec_addr(hw);
707
708 /* create mask of interrupts in use */
709 mask = 0;
710 for (bit = 1; bit; bit <<= 1) {
711 i = irq_linear_revmap(priv->domain, hw);
712 /* restore mapped irqs, enabled or with actions */
713 if (i && (!irqd_irq_disabled(irq_get_irq_data(i)) ||
714 irq_has_action(i))) {
715 mask |= bit;
716
717 /* restore trigger vector */
718 metag_out32(context->vectors[hw], vec_addr);
719 }
720
721 ++hw;
722 vec_addr += HWVECnEXT_STRIDE;
723 }
724
725 if (mask) {
726 /* restore mask state */
727 __global_lock2(flags);
728 tmp = metag_in32(mask_addr);
729 tmp = (tmp & ~mask) | (context->masks[bank] & mask);
730 metag_out32(tmp, mask_addr);
731 __global_unlock2(flags);
732 }
733
734 mask = priv->levels_altered[bank];
735 if (mask) {
736 /* restore level state */
737 __global_lock2(flags);
738 tmp = metag_in32(level_addr);
739 tmp = (tmp & ~mask) | (context->levels[bank] & mask);
740 metag_out32(tmp, level_addr);
741 __global_unlock2(flags);
742 }
743
744 level_addr += HWSTAT_STRIDE;
745 mask_addr += HWSTAT_STRIDE;
746 }
747
748 /* restore trigger matrixing */
749 __global_lock2(flags);
750 for (i = 0; i < 4; ++i) {
751 for (j = 0; j < 4; ++j) {
752 metag_out32(context->txvecint[i][j],
753 T0VECINT_BHALT +
754 TnVECINT_STRIDE*i +
755 8*j);
756 }
757 }
758 __global_unlock2(flags);
759
760 kfree(context);
761}
762
763static struct syscore_ops meta_intc_syscore_ops = {
764 .suspend = meta_intc_suspend,
765 .resume = meta_intc_resume,
766};
767
768static void __init meta_intc_init_syscore_ops(struct meta_intc_priv *priv)
769{
770 register_syscore_ops(&meta_intc_syscore_ops);
771}
772#else
773#define meta_intc_init_syscore_ops(priv) do {} while (0)
774#endif
775
776/**
777 * meta_intc_init_cpu() - register with a Meta cpu
778 * @priv: private interrupt controller data
779 * @cpu: the CPU to register on
780 *
781 * Configure @cpu's TR2 irq so that we can demux external irqs.
782 */
783static void __init meta_intc_init_cpu(struct meta_intc_priv *priv, int cpu)
784{
785 unsigned int thread = cpu_2_hwthread_id[cpu];
786 unsigned int signum = TBID_SIGNUM_TR2(thread);
787 int irq = tbisig_map(signum);
788
789 /* Register the multiplexed IRQ handler */
790 irq_set_chained_handler(irq, meta_intc_irq_demux);
791 irq_set_irq_type(irq, IRQ_TYPE_LEVEL_LOW);
792}
793
794/**
795 * meta_intc_no_mask() - indicate lack of HWMASKEXT registers
796 *
797 * Called from SoC code (or init code below) to dynamically indicate the lack of
798 * HWMASKEXT registers (for example depending on some SoC revision register).
799 * This alters the irq mask and unmask callbacks to use the fallback
800 * unvectoring/retriggering technique instead of using HWMASKEXT registers.
801 */
802void __init meta_intc_no_mask(void)
803{
804 meta_intc_edge_chip.irq_mask = meta_intc_mask_irq_nomask;
805 meta_intc_edge_chip.irq_unmask = meta_intc_unmask_edge_irq_nomask;
806 meta_intc_level_chip.irq_mask = meta_intc_mask_irq_nomask;
807 meta_intc_level_chip.irq_unmask = meta_intc_unmask_level_irq_nomask;
808}
809
810/**
811 * init_external_IRQ() - initialise the external irq controller
812 *
813 * Set up the external irq controller using device tree properties. This is
814 * called from init_IRQ().
815 */
816int __init init_external_IRQ(void)
817{
818 struct meta_intc_priv *priv = &meta_intc_priv;
819 struct device_node *node;
820 int ret, cpu;
821 u32 val;
822 bool no_masks = false;
823
824 node = of_find_compatible_node(NULL, NULL, "img,meta-intc");
825 if (!node)
826 return -ENOENT;
827
828 /* Get number of banks */
829 ret = of_property_read_u32(node, "num-banks", &val);
830 if (ret) {
831 pr_err("meta-intc: No num-banks property found\n");
832 return ret;
833 }
834 if (val < 1 || val > 4) {
835 pr_err("meta-intc: num-banks (%u) out of range\n", val);
836 return -EINVAL;
837 }
838 priv->nr_banks = val;
839
840 /* Are any mask registers present? */
841 if (of_get_property(node, "no-mask", NULL))
842 no_masks = true;
843
844 /* No HWMASKEXT registers present? */
845 if (no_masks)
846 meta_intc_no_mask();
847
848 /* Set up an IRQ domain */
849 /*
850 * This is a legacy IRQ domain for now until all the platform setup code
851 * has been converted to devicetree.
852 */
853 priv->domain = irq_domain_add_linear(node, priv->nr_banks*32,
854 &meta_intc_domain_ops, priv);
855 if (unlikely(!priv->domain)) {
856 pr_err("meta-intc: cannot add IRQ domain\n");
857 return -ENOMEM;
858 }
859
860 /* Setup TR2 for all cpus. */
861 for_each_possible_cpu(cpu)
862 meta_intc_init_cpu(priv, cpu);
863
864 /* Set up system suspend/resume callbacks */
865 meta_intc_init_syscore_ops(priv);
866
867 pr_info("meta-intc: External IRQ controller initialised (%u IRQs)\n",
868 priv->nr_banks*32);
869
870 return 0;
871}
diff --git a/drivers/irqchip/irq-metag.c b/drivers/irqchip/irq-metag.c
deleted file mode 100644
index 857b946747eb..000000000000
--- a/drivers/irqchip/irq-metag.c
+++ /dev/null
@@ -1,343 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Meta internal (HWSTATMETA) interrupt code.
4 *
5 * Copyright (C) 2011-2012 Imagination Technologies Ltd.
6 *
7 * This code is based on the code in SoC/common/irq.c and SoC/comet/irq.c
8 * The code base could be generalised/merged as a lot of the functionality is
9 * similar. Until this is done, we try to keep the code simple here.
10 */
11
12#include <linux/interrupt.h>
13#include <linux/io.h>
14#include <linux/irqdomain.h>
15
16#include <asm/irq.h>
17#include <asm/hwthread.h>
18
19#define PERF0VECINT 0x04820580
20#define PERF1VECINT 0x04820588
21#define PERF0TRIG_OFFSET 16
22#define PERF1TRIG_OFFSET 17
23
24/**
25 * struct metag_internal_irq_priv - private meta internal interrupt data
26 * @domain: IRQ domain for all internal Meta IRQs (HWSTATMETA)
27 * @unmasked: Record of unmasked IRQs
28 */
29struct metag_internal_irq_priv {
30 struct irq_domain *domain;
31
32 unsigned long unmasked;
33};
34
35/* Private data for the one and only internal interrupt controller */
36static struct metag_internal_irq_priv metag_internal_irq_priv;
37
38static unsigned int metag_internal_irq_startup(struct irq_data *data);
39static void metag_internal_irq_shutdown(struct irq_data *data);
40static void metag_internal_irq_ack(struct irq_data *data);
41static void metag_internal_irq_mask(struct irq_data *data);
42static void metag_internal_irq_unmask(struct irq_data *data);
43#ifdef CONFIG_SMP
44static int metag_internal_irq_set_affinity(struct irq_data *data,
45 const struct cpumask *cpumask, bool force);
46#endif
47
48static struct irq_chip internal_irq_edge_chip = {
49 .name = "HWSTATMETA-IRQ",
50 .irq_startup = metag_internal_irq_startup,
51 .irq_shutdown = metag_internal_irq_shutdown,
52 .irq_ack = metag_internal_irq_ack,
53 .irq_mask = metag_internal_irq_mask,
54 .irq_unmask = metag_internal_irq_unmask,
55#ifdef CONFIG_SMP
56 .irq_set_affinity = metag_internal_irq_set_affinity,
57#endif
58};
59
60/*
61 * metag_hwvec_addr - get the address of *VECINT regs of irq
62 *
63 * This function is a table of supported triggers on HWSTATMETA
64 * Could do with a structure, but better keep it simple. Changes
65 * in this code should be rare.
66 */
67static inline void __iomem *metag_hwvec_addr(irq_hw_number_t hw)
68{
69 void __iomem *addr;
70
71 switch (hw) {
72 case PERF0TRIG_OFFSET:
73 addr = (void __iomem *)PERF0VECINT;
74 break;
75 case PERF1TRIG_OFFSET:
76 addr = (void __iomem *)PERF1VECINT;
77 break;
78 default:
79 addr = NULL;
80 break;
81 }
82 return addr;
83}
84
85/*
86 * metag_internal_startup - setup an internal irq
87 * @irq: the irq to startup
88 *
89 * Multiplex interrupts for @irq onto TR1. Clear any pending
90 * interrupts.
91 */
92static unsigned int metag_internal_irq_startup(struct irq_data *data)
93{
94 /* Clear (toggle) the bit in HWSTATMETA for our interrupt. */
95 metag_internal_irq_ack(data);
96
97 /* Enable the interrupt by unmasking it */
98 metag_internal_irq_unmask(data);
99
100 return 0;
101}
102
103/*
104 * metag_internal_irq_shutdown - turn off the irq
105 * @irq: the irq number to turn off
106 *
107 * Mask @irq and clear any pending interrupts.
108 * Stop muxing @irq onto TR1.
109 */
110static void metag_internal_irq_shutdown(struct irq_data *data)
111{
112 /* Disable the IRQ at the core by masking it. */
113 metag_internal_irq_mask(data);
114
115 /* Clear (toggle) the bit in HWSTATMETA for our interrupt. */
116 metag_internal_irq_ack(data);
117}
118
119/*
120 * metag_internal_irq_ack - acknowledge irq
121 * @irq: the irq to ack
122 */
123static void metag_internal_irq_ack(struct irq_data *data)
124{
125 irq_hw_number_t hw = data->hwirq;
126 unsigned int bit = 1 << hw;
127
128 if (metag_in32(HWSTATMETA) & bit)
129 metag_out32(bit, HWSTATMETA);
130}
131
132/**
133 * metag_internal_irq_mask() - mask an internal irq by unvectoring
134 * @data: data for the internal irq to mask
135 *
136 * HWSTATMETA has no mask register. Instead the IRQ is unvectored from the core
137 * and retriggered if necessary later.
138 */
139static void metag_internal_irq_mask(struct irq_data *data)
140{
141 struct metag_internal_irq_priv *priv = &metag_internal_irq_priv;
142 irq_hw_number_t hw = data->hwirq;
143 void __iomem *vec_addr = metag_hwvec_addr(hw);
144
145 clear_bit(hw, &priv->unmasked);
146
147 /* there is no interrupt mask, so unvector the interrupt */
148 metag_out32(0, vec_addr);
149}
150
151/**
152 * meta_intc_unmask_edge_irq_nomask() - unmask an edge irq by revectoring
153 * @data: data for the internal irq to unmask
154 *
155 * HWSTATMETA has no mask register. Instead the IRQ is revectored back to the
156 * core and retriggered if necessary.
157 */
158static void metag_internal_irq_unmask(struct irq_data *data)
159{
160 struct metag_internal_irq_priv *priv = &metag_internal_irq_priv;
161 irq_hw_number_t hw = data->hwirq;
162 unsigned int bit = 1 << hw;
163 void __iomem *vec_addr = metag_hwvec_addr(hw);
164 unsigned int thread = hard_processor_id();
165
166 set_bit(hw, &priv->unmasked);
167
168 /* there is no interrupt mask, so revector the interrupt */
169 metag_out32(TBI_TRIG_VEC(TBID_SIGNUM_TR1(thread)), vec_addr);
170
171 /*
172 * Re-trigger interrupt
173 *
174 * Writing a 1 toggles, and a 0->1 transition triggers. We only
175 * retrigger if the status bit is already set, which means we
176 * need to clear it first. Retriggering is fundamentally racy
177 * because if the interrupt fires again after we clear it we
178 * could end up clearing it again and the interrupt handler
179 * thinking it hasn't fired. Therefore we need to keep trying to
180 * retrigger until the bit is set.
181 */
182 if (metag_in32(HWSTATMETA) & bit) {
183 metag_out32(bit, HWSTATMETA);
184 while (!(metag_in32(HWSTATMETA) & bit))
185 metag_out32(bit, HWSTATMETA);
186 }
187}
188
189#ifdef CONFIG_SMP
190/*
191 * metag_internal_irq_set_affinity - set the affinity for an interrupt
192 */
193static int metag_internal_irq_set_affinity(struct irq_data *data,
194 const struct cpumask *cpumask, bool force)
195{
196 unsigned int cpu, thread;
197 irq_hw_number_t hw = data->hwirq;
198 /*
199 * Wire up this interrupt from *VECINT to the Meta core.
200 *
201 * Note that we can't wire up *VECINT to interrupt more than
202 * one cpu (the interrupt code doesn't support it), so we just
203 * pick the first cpu we find in 'cpumask'.
204 */
205 cpu = cpumask_any_and(cpumask, cpu_online_mask);
206 thread = cpu_2_hwthread_id[cpu];
207
208 metag_out32(TBI_TRIG_VEC(TBID_SIGNUM_TR1(thread)),
209 metag_hwvec_addr(hw));
210
211 return 0;
212}
213#endif
214
215/*
216 * metag_internal_irq_demux - irq de-multiplexer
217 * @irq: the interrupt number
218 * @desc: the interrupt description structure for this irq
219 *
220 * The cpu receives an interrupt on TR1 when an interrupt has
221 * occurred. It is this function's job to demux this irq and
222 * figure out exactly which trigger needs servicing.
223 */
224static void metag_internal_irq_demux(struct irq_desc *desc)
225{
226 struct metag_internal_irq_priv *priv = irq_desc_get_handler_data(desc);
227 irq_hw_number_t hw;
228 unsigned int irq_no;
229 u32 status;
230
231recalculate:
232 status = metag_in32(HWSTATMETA) & priv->unmasked;
233
234 for (hw = 0; status != 0; status >>= 1, ++hw) {
235 if (status & 0x1) {
236 /*
237 * Map the hardware IRQ number to a virtual Linux IRQ
238 * number.
239 */
240 irq_no = irq_linear_revmap(priv->domain, hw);
241
242 /*
243 * Only fire off interrupts that are
244 * registered to be handled by the kernel.
245 * Other interrupts are probably being
246 * handled by other Meta hardware threads.
247 */
248 generic_handle_irq(irq_no);
249
250 /*
251 * The handler may have re-enabled interrupts
252 * which could have caused a nested invocation
253 * of this code and make the copy of the
254 * status register we are using invalid.
255 */
256 goto recalculate;
257 }
258 }
259}
260
261/**
262 * internal_irq_map() - Map an internal meta IRQ to a virtual IRQ number.
263 * @hw: Number of the internal IRQ. Must be in range.
264 *
265 * Returns: The virtual IRQ number of the Meta internal IRQ specified by
266 * @hw.
267 */
268int internal_irq_map(unsigned int hw)
269{
270 struct metag_internal_irq_priv *priv = &metag_internal_irq_priv;
271 if (!priv->domain)
272 return -ENODEV;
273 return irq_create_mapping(priv->domain, hw);
274}
275
276/**
277 * metag_internal_irq_init_cpu - regsister with the Meta cpu
278 * @cpu: the CPU to register on
279 *
280 * Configure @cpu's TR1 irq so that we can demux irqs.
281 */
282static void metag_internal_irq_init_cpu(struct metag_internal_irq_priv *priv,
283 int cpu)
284{
285 unsigned int thread = cpu_2_hwthread_id[cpu];
286 unsigned int signum = TBID_SIGNUM_TR1(thread);
287 int irq = tbisig_map(signum);
288
289 /* Register the multiplexed IRQ handler */
290 irq_set_chained_handler_and_data(irq, metag_internal_irq_demux, priv);
291 irq_set_irq_type(irq, IRQ_TYPE_LEVEL_LOW);
292}
293
294/**
295 * metag_internal_intc_map() - map an internal irq
296 * @d: irq domain of internal trigger block
297 * @irq: virtual irq number
298 * @hw: hardware irq number within internal trigger block
299 *
300 * This sets up a virtual irq for a specified hardware interrupt. The irq chip
301 * and handler is configured.
302 */
303static int metag_internal_intc_map(struct irq_domain *d, unsigned int irq,
304 irq_hw_number_t hw)
305{
306 /* only register interrupt if it is mapped */
307 if (!metag_hwvec_addr(hw))
308 return -EINVAL;
309
310 irq_set_chip_and_handler(irq, &internal_irq_edge_chip,
311 handle_edge_irq);
312 return 0;
313}
314
315static const struct irq_domain_ops metag_internal_intc_domain_ops = {
316 .map = metag_internal_intc_map,
317};
318
319/**
320 * metag_internal_irq_register - register internal IRQs
321 *
322 * Register the irq chip and handler function for all internal IRQs
323 */
324int __init init_internal_IRQ(void)
325{
326 struct metag_internal_irq_priv *priv = &metag_internal_irq_priv;
327 unsigned int cpu;
328
329 /* Set up an IRQ domain */
330 priv->domain = irq_domain_add_linear(NULL, 32,
331 &metag_internal_intc_domain_ops,
332 priv);
333 if (unlikely(!priv->domain)) {
334 pr_err("meta-internal-intc: cannot add IRQ domain\n");
335 return -ENOMEM;
336 }
337
338 /* Setup TR1 for all cpus. */
339 for_each_possible_cpu(cpu)
340 metag_internal_irq_init_cpu(priv, cpu);
341
342 return 0;
343};
diff --git a/drivers/media/rc/img-ir/Kconfig b/drivers/media/rc/img-ir/Kconfig
index a896d3c83a1c..d2c6617d468e 100644
--- a/drivers/media/rc/img-ir/Kconfig
+++ b/drivers/media/rc/img-ir/Kconfig
@@ -1,7 +1,7 @@
1config IR_IMG 1config IR_IMG
2 tristate "ImgTec IR Decoder" 2 tristate "ImgTec IR Decoder"
3 depends on RC_CORE 3 depends on RC_CORE
4 depends on METAG || MIPS || COMPILE_TEST 4 depends on MIPS || COMPILE_TEST
5 select IR_IMG_HW if !IR_IMG_RAW 5 select IR_IMG_HW if !IR_IMG_RAW
6 help 6 help
7 Say Y or M here if you want to use the ImgTec infrared decoder 7 Say Y or M here if you want to use the ImgTec infrared decoder
diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig
index b811442c5ce6..75a71ebcb369 100644
--- a/drivers/tty/Kconfig
+++ b/drivers/tty/Kconfig
@@ -402,19 +402,6 @@ config GOLDFISH_TTY_EARLY_CONSOLE
402 default y if GOLDFISH_TTY=y 402 default y if GOLDFISH_TTY=y
403 select SERIAL_EARLYCON 403 select SERIAL_EARLYCON
404 404
405config DA_TTY
406 bool "DA TTY"
407 depends on METAG_DA
408 select SERIAL_NONSTANDARD
409 help
410 This enables a TTY on a Dash channel.
411
412config DA_CONSOLE
413 bool "DA Console"
414 depends on DA_TTY
415 help
416 This enables a console on a Dash channel.
417
418config MIPS_EJTAG_FDC_TTY 405config MIPS_EJTAG_FDC_TTY
419 bool "MIPS EJTAG Fast Debug Channel TTY" 406 bool "MIPS EJTAG Fast Debug Channel TTY"
420 depends on MIPS_CDMM 407 depends on MIPS_CDMM
diff --git a/drivers/tty/Makefile b/drivers/tty/Makefile
index 8ce3a8661b31..47c71f43a397 100644
--- a/drivers/tty/Makefile
+++ b/drivers/tty/Makefile
@@ -32,7 +32,6 @@ obj-$(CONFIG_SYNCLINKMP) += synclinkmp.o
32obj-$(CONFIG_SYNCLINK) += synclink.o 32obj-$(CONFIG_SYNCLINK) += synclink.o
33obj-$(CONFIG_PPC_EPAPR_HV_BYTECHAN) += ehv_bytechan.o 33obj-$(CONFIG_PPC_EPAPR_HV_BYTECHAN) += ehv_bytechan.o
34obj-$(CONFIG_GOLDFISH_TTY) += goldfish.o 34obj-$(CONFIG_GOLDFISH_TTY) += goldfish.o
35obj-$(CONFIG_DA_TTY) += metag_da.o
36obj-$(CONFIG_MIPS_EJTAG_FDC_TTY) += mips_ejtag_fdc.o 35obj-$(CONFIG_MIPS_EJTAG_FDC_TTY) += mips_ejtag_fdc.o
37obj-$(CONFIG_VCC) += vcc.o 36obj-$(CONFIG_VCC) += vcc.o
38 37
diff --git a/drivers/tty/metag_da.c b/drivers/tty/metag_da.c
deleted file mode 100644
index 99eaed4b2dbc..000000000000
--- a/drivers/tty/metag_da.c
+++ /dev/null
@@ -1,665 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * dashtty.c - tty driver for Dash channels interface.
4 *
5 * Copyright (C) 2007,2008,2012 Imagination Technologies
6 */
7
8#include <linux/atomic.h>
9#include <linux/completion.h>
10#include <linux/console.h>
11#include <linux/delay.h>
12#include <linux/export.h>
13#include <linux/init.h>
14#include <linux/kernel.h>
15#include <linux/kthread.h>
16#include <linux/moduleparam.h>
17#include <linux/mutex.h>
18#include <linux/sched.h>
19#include <linux/serial.h>
20#include <linux/slab.h>
21#include <linux/spinlock.h>
22#include <linux/string.h>
23#include <linux/timer.h>
24#include <linux/tty.h>
25#include <linux/tty_driver.h>
26#include <linux/tty_flip.h>
27#include <linux/uaccess.h>
28
29#include <asm/da.h>
30
31/* Channel error codes */
32#define CONAOK 0
33#define CONERR 1
34#define CONBAD 2
35#define CONPRM 3
36#define CONADR 4
37#define CONCNT 5
38#define CONCBF 6
39#define CONCBE 7
40#define CONBSY 8
41
42/* Default channel for the console */
43#define CONSOLE_CHANNEL 1
44
45#define NUM_TTY_CHANNELS 6
46
47/* Auto allocate */
48#define DA_TTY_MAJOR 0
49
50/* A speedy poll rate helps the userland debug process connection response.
51 * But, if you set it too high then no other userland processes get much
52 * of a look in.
53 */
54#define DA_TTY_POLL (HZ / 50)
55
56/*
57 * A short put delay improves latency but has a high throughput overhead
58 */
59#define DA_TTY_PUT_DELAY (HZ / 100)
60
61static atomic_t num_channels_need_poll = ATOMIC_INIT(0);
62
63static struct timer_list poll_timer;
64
65static struct tty_driver *channel_driver;
66
67static struct timer_list put_timer;
68static struct task_struct *dashtty_thread;
69
70/*
71 * The console_poll parameter determines whether the console channel should be
72 * polled for input.
73 * By default the console channel isn't polled at all, in order to avoid the
74 * overhead, but that means it isn't possible to have a login on /dev/console.
75 */
76static bool console_poll;
77module_param(console_poll, bool, S_IRUGO);
78
79#define RX_BUF_SIZE 1024
80
81enum {
82 INCHR = 1,
83 OUTCHR,
84 RDBUF,
85 WRBUF,
86 RDSTAT
87};
88
89/**
90 * struct dashtty_port - Wrapper struct for dashtty tty_port.
91 * @port: TTY port data
92 * @rx_lock: Lock for rx_buf.
93 * This protects between the poll timer and user context.
94 * It's also held during read SWITCH operations.
95 * @rx_buf: Read buffer
96 * @xmit_lock: Lock for xmit_*, and port.xmit_buf.
97 * This protects between user context and kernel thread.
98 * It's also held during write SWITCH operations.
99 * @xmit_cnt: Size of xmit buffer contents
100 * @xmit_head: Head of xmit buffer where data is written
101 * @xmit_tail: Tail of xmit buffer where data is read
102 * @xmit_empty: Completion for xmit buffer being empty
103 */
104struct dashtty_port {
105 struct tty_port port;
106 spinlock_t rx_lock;
107 void *rx_buf;
108 struct mutex xmit_lock;
109 unsigned int xmit_cnt;
110 unsigned int xmit_head;
111 unsigned int xmit_tail;
112 struct completion xmit_empty;
113};
114
115static struct dashtty_port dashtty_ports[NUM_TTY_CHANNELS];
116
117static atomic_t dashtty_xmit_cnt = ATOMIC_INIT(0);
118static wait_queue_head_t dashtty_waitqueue;
119
120/*
121 * Low-level DA channel access routines
122 */
123static int chancall(int in_bios_function, int in_channel,
124 int in_arg2, void *in_arg3,
125 void *in_arg4)
126{
127 register int bios_function asm("D1Ar1") = in_bios_function;
128 register int channel asm("D0Ar2") = in_channel;
129 register int arg2 asm("D1Ar3") = in_arg2;
130 register void *arg3 asm("D0Ar4") = in_arg3;
131 register void *arg4 asm("D1Ar5") = in_arg4;
132 register int bios_call asm("D0Ar6") = 3;
133 register int result asm("D0Re0");
134
135 asm volatile (
136 "MSETL [A0StP++], %6,%4,%2\n\t"
137 "ADD A0StP, A0StP, #8\n\t"
138 "SWITCH #0x0C30208\n\t"
139 "GETD %0, [A0StP+#-8]\n\t"
140 "SUB A0StP, A0StP, #(4*6)+8\n\t"
141 : "=d" (result) /* outs */
142 : "d" (bios_function),
143 "d" (channel),
144 "d" (arg2),
145 "d" (arg3),
146 "d" (arg4),
147 "d" (bios_call) /* ins */
148 : "memory");
149
150 return result;
151}
152
153/*
154 * Attempts to fetch count bytes from channel and returns actual count.
155 */
156static int fetch_data(unsigned int channel)
157{
158 struct dashtty_port *dport = &dashtty_ports[channel];
159 int received = 0;
160
161 spin_lock_bh(&dport->rx_lock);
162 /* check the port isn't being shut down */
163 if (!dport->rx_buf)
164 goto unlock;
165 if (chancall(RDBUF, channel, RX_BUF_SIZE,
166 (void *)dport->rx_buf, &received) == CONAOK) {
167 if (received) {
168 int space;
169 unsigned char *cbuf;
170
171 space = tty_prepare_flip_string(&dport->port, &cbuf,
172 received);
173
174 if (space <= 0)
175 goto unlock;
176
177 memcpy(cbuf, dport->rx_buf, space);
178 tty_flip_buffer_push(&dport->port);
179 }
180 }
181unlock:
182 spin_unlock_bh(&dport->rx_lock);
183
184 return received;
185}
186
187/**
188 * find_channel_to_poll() - Returns number of the next channel to poll.
189 * Returns: The number of the next channel to poll, or -1 if none need
190 * polling.
191 */
192static int find_channel_to_poll(void)
193{
194 static int last_polled_channel;
195 int last = last_polled_channel;
196 int chan;
197 struct dashtty_port *dport;
198
199 for (chan = last + 1; ; ++chan) {
200 if (chan >= NUM_TTY_CHANNELS)
201 chan = 0;
202
203 dport = &dashtty_ports[chan];
204 if (dport->rx_buf) {
205 last_polled_channel = chan;
206 return chan;
207 }
208
209 if (chan == last)
210 break;
211 }
212 return -1;
213}
214
215/**
216 * put_channel_data() - Write out a block of channel data.
217 * @chan: DA channel number.
218 *
219 * Write a single block of data out to the debug adapter. If the circular buffer
220 * is wrapped then only the first block is written.
221 *
222 * Returns: 1 if the remote buffer was too full to accept data.
223 * 0 otherwise.
224 */
225static int put_channel_data(unsigned int chan)
226{
227 struct dashtty_port *dport;
228 struct tty_struct *tty;
229 int number_written;
230 unsigned int count = 0;
231
232 dport = &dashtty_ports[chan];
233 mutex_lock(&dport->xmit_lock);
234 if (dport->xmit_cnt) {
235 count = min((unsigned int)(SERIAL_XMIT_SIZE - dport->xmit_tail),
236 dport->xmit_cnt);
237 chancall(WRBUF, chan, count,
238 dport->port.xmit_buf + dport->xmit_tail,
239 &number_written);
240 dport->xmit_cnt -= number_written;
241 if (!dport->xmit_cnt) {
242 /* reset pointers to avoid wraps */
243 dport->xmit_head = 0;
244 dport->xmit_tail = 0;
245 complete(&dport->xmit_empty);
246 } else {
247 dport->xmit_tail += number_written;
248 if (dport->xmit_tail >= SERIAL_XMIT_SIZE)
249 dport->xmit_tail -= SERIAL_XMIT_SIZE;
250 }
251 atomic_sub(number_written, &dashtty_xmit_cnt);
252 }
253 mutex_unlock(&dport->xmit_lock);
254
255 /* if we've made more data available, wake up tty */
256 if (count && number_written) {
257 tty = tty_port_tty_get(&dport->port);
258 if (tty) {
259 tty_wakeup(tty);
260 tty_kref_put(tty);
261 }
262 }
263
264 /* did the write fail? */
265 return count && !number_written;
266}
267
268/**
269 * put_data() - Kernel thread to write out blocks of channel data to DA.
270 * @arg: Unused.
271 *
272 * This kernel thread runs while @dashtty_xmit_cnt != 0, and loops over the
273 * channels to write out any buffered data. If any of the channels stall due to
274 * the remote buffer being full, a hold off happens to allow the debugger to
275 * drain the buffer.
276 */
277static int put_data(void *arg)
278{
279 unsigned int chan, stall;
280
281 __set_current_state(TASK_RUNNING);
282 while (!kthread_should_stop()) {
283 /*
284 * For each channel see if there's anything to transmit in the
285 * port's xmit_buf.
286 */
287 stall = 0;
288 for (chan = 0; chan < NUM_TTY_CHANNELS; ++chan)
289 stall += put_channel_data(chan);
290
291 /*
292 * If some of the buffers are full, hold off for a short while
293 * to allow them to empty.
294 */
295 if (stall)
296 msleep(25);
297
298 wait_event_interruptible(dashtty_waitqueue,
299 atomic_read(&dashtty_xmit_cnt));
300 }
301
302 return 0;
303}
304
305/*
306 * This gets called every DA_TTY_POLL and polls the channels for data
307 */
308static void dashtty_timer(struct timer_list *poll_timer)
309{
310 int channel;
311
312 /* If there are no ports open do nothing and don't poll again. */
313 if (!atomic_read(&num_channels_need_poll))
314 return;
315
316 channel = find_channel_to_poll();
317
318 /* Did we find a channel to poll? */
319 if (channel >= 0)
320 fetch_data(channel);
321
322 mod_timer(poll_timer, jiffies + DA_TTY_POLL);
323}
324
325static void add_poll_timer(struct timer_list *poll_timer)
326{
327 timer_setup(poll_timer, dashtty_timer, TIMER_PINNED);
328 poll_timer->expires = jiffies + DA_TTY_POLL;
329
330 /*
331 * Always attach the timer to the boot CPU. The DA channels are per-CPU
332 * so all polling should be from a single CPU.
333 */
334 add_timer_on(poll_timer, 0);
335}
336
337static int dashtty_port_activate(struct tty_port *port, struct tty_struct *tty)
338{
339 struct dashtty_port *dport = container_of(port, struct dashtty_port,
340 port);
341 void *rx_buf;
342
343 /* Allocate the buffer we use for writing data */
344 if (tty_port_alloc_xmit_buf(port) < 0)
345 goto err;
346
347 /* Allocate the buffer we use for reading data */
348 rx_buf = kzalloc(RX_BUF_SIZE, GFP_KERNEL);
349 if (!rx_buf)
350 goto err_free_xmit;
351
352 spin_lock_bh(&dport->rx_lock);
353 dport->rx_buf = rx_buf;
354 spin_unlock_bh(&dport->rx_lock);
355
356 /*
357 * Don't add the poll timer if we're opening a console. This
358 * avoids the overhead of polling the Dash but means it is not
359 * possible to have a login on /dev/console.
360 *
361 */
362 if (console_poll || dport != &dashtty_ports[CONSOLE_CHANNEL])
363 if (atomic_inc_return(&num_channels_need_poll) == 1)
364 add_poll_timer(&poll_timer);
365
366 return 0;
367err_free_xmit:
368 tty_port_free_xmit_buf(port);
369err:
370 return -ENOMEM;
371}
372
373static void dashtty_port_shutdown(struct tty_port *port)
374{
375 struct dashtty_port *dport = container_of(port, struct dashtty_port,
376 port);
377 void *rx_buf;
378 unsigned int count;
379
380 /* stop reading */
381 if (console_poll || dport != &dashtty_ports[CONSOLE_CHANNEL])
382 if (atomic_dec_and_test(&num_channels_need_poll))
383 del_timer_sync(&poll_timer);
384
385 mutex_lock(&dport->xmit_lock);
386 count = dport->xmit_cnt;
387 mutex_unlock(&dport->xmit_lock);
388 if (count) {
389 /*
390 * There's still data to write out, so wake and wait for the
391 * writer thread to drain the buffer.
392 */
393 del_timer(&put_timer);
394 wake_up_interruptible(&dashtty_waitqueue);
395 wait_for_completion(&dport->xmit_empty);
396 }
397
398 /* Null the read buffer (timer could still be running!) */
399 spin_lock_bh(&dport->rx_lock);
400 rx_buf = dport->rx_buf;
401 dport->rx_buf = NULL;
402 spin_unlock_bh(&dport->rx_lock);
403 /* Free the read buffer */
404 kfree(rx_buf);
405
406 /* Free the write buffer */
407 tty_port_free_xmit_buf(port);
408}
409
410static const struct tty_port_operations dashtty_port_ops = {
411 .activate = dashtty_port_activate,
412 .shutdown = dashtty_port_shutdown,
413};
414
415static int dashtty_install(struct tty_driver *driver, struct tty_struct *tty)
416{
417 return tty_port_install(&dashtty_ports[tty->index].port, driver, tty);
418}
419
420static int dashtty_open(struct tty_struct *tty, struct file *filp)
421{
422 return tty_port_open(tty->port, tty, filp);
423}
424
425static void dashtty_close(struct tty_struct *tty, struct file *filp)
426{
427 return tty_port_close(tty->port, tty, filp);
428}
429
430static void dashtty_hangup(struct tty_struct *tty)
431{
432 int channel;
433 struct dashtty_port *dport;
434
435 channel = tty->index;
436 dport = &dashtty_ports[channel];
437
438 /* drop any data in the xmit buffer */
439 mutex_lock(&dport->xmit_lock);
440 if (dport->xmit_cnt) {
441 atomic_sub(dport->xmit_cnt, &dashtty_xmit_cnt);
442 dport->xmit_cnt = 0;
443 dport->xmit_head = 0;
444 dport->xmit_tail = 0;
445 complete(&dport->xmit_empty);
446 }
447 mutex_unlock(&dport->xmit_lock);
448
449 tty_port_hangup(tty->port);
450}
451
452/**
453 * dashtty_put_timer() - Delayed wake up of kernel thread.
454 * @ignored: unused
455 *
456 * This timer function wakes up the kernel thread if any data exists in the
457 * buffers. It is used to delay the expensive writeout until the writer has
458 * stopped writing.
459 */
460static void dashtty_put_timer(struct timer_list *unused)
461{
462 if (atomic_read(&dashtty_xmit_cnt))
463 wake_up_interruptible(&dashtty_waitqueue);
464}
465
466static int dashtty_write(struct tty_struct *tty, const unsigned char *buf,
467 int total)
468{
469 int channel, count, block;
470 struct dashtty_port *dport;
471
472 /* Determine the channel */
473 channel = tty->index;
474 dport = &dashtty_ports[channel];
475
476 /*
477 * Write to output buffer.
478 *
479 * The reason that we asynchronously write the buffer is because if we
480 * were to write the buffer synchronously then because DA channels are
481 * per-CPU the buffer would be written to the channel of whatever CPU
482 * we're running on.
483 *
484 * What we actually want to happen is have all input and output done on
485 * one CPU.
486 */
487 mutex_lock(&dport->xmit_lock);
488 /* work out how many bytes we can write to the xmit buffer */
489 total = min(total, (int)(SERIAL_XMIT_SIZE - dport->xmit_cnt));
490 atomic_add(total, &dashtty_xmit_cnt);
491 dport->xmit_cnt += total;
492 /* write the actual bytes (may need splitting if it wraps) */
493 for (count = total; count; count -= block) {
494 block = min(count, (int)(SERIAL_XMIT_SIZE - dport->xmit_head));
495 memcpy(dport->port.xmit_buf + dport->xmit_head, buf, block);
496 dport->xmit_head += block;
497 if (dport->xmit_head >= SERIAL_XMIT_SIZE)
498 dport->xmit_head -= SERIAL_XMIT_SIZE;
499 buf += block;
500 }
501 count = dport->xmit_cnt;
502 /* xmit buffer no longer empty? */
503 if (count)
504 reinit_completion(&dport->xmit_empty);
505 mutex_unlock(&dport->xmit_lock);
506
507 if (total) {
508 /*
509 * If the buffer is full, wake up the kthread, otherwise allow
510 * some more time for the buffer to fill up a bit before waking
511 * it.
512 */
513 if (count == SERIAL_XMIT_SIZE) {
514 del_timer(&put_timer);
515 wake_up_interruptible(&dashtty_waitqueue);
516 } else {
517 mod_timer(&put_timer, jiffies + DA_TTY_PUT_DELAY);
518 }
519 }
520 return total;
521}
522
523static int dashtty_write_room(struct tty_struct *tty)
524{
525 struct dashtty_port *dport;
526 int channel;
527 int room;
528
529 channel = tty->index;
530 dport = &dashtty_ports[channel];
531
532 /* report the space in the xmit buffer */
533 mutex_lock(&dport->xmit_lock);
534 room = SERIAL_XMIT_SIZE - dport->xmit_cnt;
535 mutex_unlock(&dport->xmit_lock);
536
537 return room;
538}
539
540static int dashtty_chars_in_buffer(struct tty_struct *tty)
541{
542 struct dashtty_port *dport;
543 int channel;
544 int chars;
545
546 channel = tty->index;
547 dport = &dashtty_ports[channel];
548
549 /* report the number of bytes in the xmit buffer */
550 mutex_lock(&dport->xmit_lock);
551 chars = dport->xmit_cnt;
552 mutex_unlock(&dport->xmit_lock);
553
554 return chars;
555}
556
557static const struct tty_operations dashtty_ops = {
558 .install = dashtty_install,
559 .open = dashtty_open,
560 .close = dashtty_close,
561 .hangup = dashtty_hangup,
562 .write = dashtty_write,
563 .write_room = dashtty_write_room,
564 .chars_in_buffer = dashtty_chars_in_buffer,
565};
566
567static int __init dashtty_init(void)
568{
569 int ret;
570 int nport;
571 struct dashtty_port *dport;
572
573 if (!metag_da_enabled())
574 return -ENODEV;
575
576 channel_driver = tty_alloc_driver(NUM_TTY_CHANNELS,
577 TTY_DRIVER_REAL_RAW);
578 if (IS_ERR(channel_driver))
579 return PTR_ERR(channel_driver);
580
581 channel_driver->driver_name = "metag_da";
582 channel_driver->name = "ttyDA";
583 channel_driver->major = DA_TTY_MAJOR;
584 channel_driver->minor_start = 0;
585 channel_driver->type = TTY_DRIVER_TYPE_SERIAL;
586 channel_driver->subtype = SERIAL_TYPE_NORMAL;
587 channel_driver->init_termios = tty_std_termios;
588 channel_driver->init_termios.c_cflag |= CLOCAL;
589
590 tty_set_operations(channel_driver, &dashtty_ops);
591 for (nport = 0; nport < NUM_TTY_CHANNELS; nport++) {
592 dport = &dashtty_ports[nport];
593 tty_port_init(&dport->port);
594 dport->port.ops = &dashtty_port_ops;
595 spin_lock_init(&dport->rx_lock);
596 mutex_init(&dport->xmit_lock);
597 /* the xmit buffer starts empty, i.e. completely written */
598 init_completion(&dport->xmit_empty);
599 complete(&dport->xmit_empty);
600 }
601
602 timer_setup(&put_timer, dashtty_put_timer, 0);
603
604 init_waitqueue_head(&dashtty_waitqueue);
605 dashtty_thread = kthread_create(put_data, NULL, "ttyDA");
606 if (IS_ERR(dashtty_thread)) {
607 pr_err("Couldn't create dashtty thread\n");
608 ret = PTR_ERR(dashtty_thread);
609 goto err_destroy_ports;
610 }
611 /*
612 * Bind the writer thread to the boot CPU so it can't migrate.
613 * DA channels are per-CPU and we want all channel I/O to be on a single
614 * predictable CPU.
615 */
616 kthread_bind(dashtty_thread, 0);
617 wake_up_process(dashtty_thread);
618
619 ret = tty_register_driver(channel_driver);
620
621 if (ret < 0) {
622 pr_err("Couldn't install dashtty driver: err %d\n",
623 ret);
624 goto err_stop_kthread;
625 }
626
627 return 0;
628
629err_stop_kthread:
630 kthread_stop(dashtty_thread);
631err_destroy_ports:
632 for (nport = 0; nport < NUM_TTY_CHANNELS; nport++) {
633 dport = &dashtty_ports[nport];
634 tty_port_destroy(&dport->port);
635 }
636 put_tty_driver(channel_driver);
637 return ret;
638}
639device_initcall(dashtty_init);
640
641#ifdef CONFIG_DA_CONSOLE
642
643static void dash_console_write(struct console *co, const char *s,
644 unsigned int count)
645{
646 int actually_written;
647
648 chancall(WRBUF, CONSOLE_CHANNEL, count, (void *)s, &actually_written);
649}
650
651static struct tty_driver *dash_console_device(struct console *c, int *index)
652{
653 *index = c->index;
654 return channel_driver;
655}
656
657struct console dash_console = {
658 .name = "ttyDA",
659 .write = dash_console_write,
660 .device = dash_console_device,
661 .flags = CON_PRINTBUFFER,
662 .index = 1,
663};
664
665#endif
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 37460cd6cabb..0e19679348d1 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -1605,7 +1605,7 @@ config BCM7038_WDT
1605config IMGPDC_WDT 1605config IMGPDC_WDT
1606 tristate "Imagination Technologies PDC Watchdog Timer" 1606 tristate "Imagination Technologies PDC Watchdog Timer"
1607 depends on HAS_IOMEM 1607 depends on HAS_IOMEM
1608 depends on METAG || MIPS || COMPILE_TEST 1608 depends on MIPS || COMPILE_TEST
1609 select WATCHDOG_CORE 1609 select WATCHDOG_CORE
1610 help 1610 help
1611 Driver for Imagination Technologies PowerDown Controller 1611 Driver for Imagination Technologies PowerDown Controller