summaryrefslogtreecommitdiffstats
path: root/drivers/irqchip/irq-imx-irqsteer.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2019-02-23 04:53:31 -0500
committerThomas Gleixner <tglx@linutronix.de>2019-02-23 04:53:31 -0500
commita324ca9cad4736252c33c1e28cffe1d87f262d03 (patch)
treeda64e14dd8432602634773b52073928c50dfb85c /drivers/irqchip/irq-imx-irqsteer.c
parent4e6b26d23dc1faee318796d5c7f91b5692b1e6be (diff)
parent28528fca4908142bd1a3247956cba56c9c667d71 (diff)
Merge tag 'irqchip-5.1' of git://git.kernel.org/pub/scm/linux/kernel/git/maz/arm-platforms into irq/core
Pull irqchip updates from Marc Zyngier - Core pseudo-NMI handling code - Allow the default irq domain to be retrieved - A new interrupt controller for the Loongson LS1X platform - Affinity support for the SiFive PLIC - Better support for the iMX irqsteer driver - NUMA aware memory allocations for GICv3 - A handful of other fixes (i8259, GICv3, PLIC)
Diffstat (limited to 'drivers/irqchip/irq-imx-irqsteer.c')
-rw-r--r--drivers/irqchip/irq-imx-irqsteer.c115
1 files changed, 83 insertions, 32 deletions
diff --git a/drivers/irqchip/irq-imx-irqsteer.c b/drivers/irqchip/irq-imx-irqsteer.c
index 5b3f1d735685..d1098f4da6a4 100644
--- a/drivers/irqchip/irq-imx-irqsteer.c
+++ b/drivers/irqchip/irq-imx-irqsteer.c
@@ -10,10 +10,11 @@
10#include <linux/irqchip/chained_irq.h> 10#include <linux/irqchip/chained_irq.h>
11#include <linux/irqdomain.h> 11#include <linux/irqdomain.h>
12#include <linux/kernel.h> 12#include <linux/kernel.h>
13#include <linux/of_irq.h>
13#include <linux/of_platform.h> 14#include <linux/of_platform.h>
14#include <linux/spinlock.h> 15#include <linux/spinlock.h>
15 16
16#define CTRL_STRIDE_OFF(_t, _r) (_t * 8 * _r) 17#define CTRL_STRIDE_OFF(_t, _r) (_t * 4 * _r)
17#define CHANCTRL 0x0 18#define CHANCTRL 0x0
18#define CHANMASK(n, t) (CTRL_STRIDE_OFF(t, 0) + 0x4 * (n) + 0x4) 19#define CHANMASK(n, t) (CTRL_STRIDE_OFF(t, 0) + 0x4 * (n) + 0x4)
19#define CHANSET(n, t) (CTRL_STRIDE_OFF(t, 1) + 0x4 * (n) + 0x4) 20#define CHANSET(n, t) (CTRL_STRIDE_OFF(t, 1) + 0x4 * (n) + 0x4)
@@ -21,12 +22,15 @@
21#define CHAN_MINTDIS(t) (CTRL_STRIDE_OFF(t, 3) + 0x4) 22#define CHAN_MINTDIS(t) (CTRL_STRIDE_OFF(t, 3) + 0x4)
22#define CHAN_MASTRSTAT(t) (CTRL_STRIDE_OFF(t, 3) + 0x8) 23#define CHAN_MASTRSTAT(t) (CTRL_STRIDE_OFF(t, 3) + 0x8)
23 24
25#define CHAN_MAX_OUTPUT_INT 0x8
26
24struct irqsteer_data { 27struct irqsteer_data {
25 void __iomem *regs; 28 void __iomem *regs;
26 struct clk *ipg_clk; 29 struct clk *ipg_clk;
27 int irq; 30 int irq[CHAN_MAX_OUTPUT_INT];
31 int irq_count;
28 raw_spinlock_t lock; 32 raw_spinlock_t lock;
29 int irq_groups; 33 int reg_num;
30 int channel; 34 int channel;
31 struct irq_domain *domain; 35 struct irq_domain *domain;
32 u32 *saved_reg; 36 u32 *saved_reg;
@@ -35,7 +39,7 @@ struct irqsteer_data {
35static int imx_irqsteer_get_reg_index(struct irqsteer_data *data, 39static int imx_irqsteer_get_reg_index(struct irqsteer_data *data,
36 unsigned long irqnum) 40 unsigned long irqnum)
37{ 41{
38 return (data->irq_groups * 2 - irqnum / 32 - 1); 42 return (data->reg_num - irqnum / 32 - 1);
39} 43}
40 44
41static void imx_irqsteer_irq_unmask(struct irq_data *d) 45static void imx_irqsteer_irq_unmask(struct irq_data *d)
@@ -46,9 +50,9 @@ static void imx_irqsteer_irq_unmask(struct irq_data *d)
46 u32 val; 50 u32 val;
47 51
48 raw_spin_lock_irqsave(&data->lock, flags); 52 raw_spin_lock_irqsave(&data->lock, flags);
49 val = readl_relaxed(data->regs + CHANMASK(idx, data->irq_groups)); 53 val = readl_relaxed(data->regs + CHANMASK(idx, data->reg_num));
50 val |= BIT(d->hwirq % 32); 54 val |= BIT(d->hwirq % 32);
51 writel_relaxed(val, data->regs + CHANMASK(idx, data->irq_groups)); 55 writel_relaxed(val, data->regs + CHANMASK(idx, data->reg_num));
52 raw_spin_unlock_irqrestore(&data->lock, flags); 56 raw_spin_unlock_irqrestore(&data->lock, flags);
53} 57}
54 58
@@ -60,9 +64,9 @@ static void imx_irqsteer_irq_mask(struct irq_data *d)
60 u32 val; 64 u32 val;
61 65
62 raw_spin_lock_irqsave(&data->lock, flags); 66 raw_spin_lock_irqsave(&data->lock, flags);
63 val = readl_relaxed(data->regs + CHANMASK(idx, data->irq_groups)); 67 val = readl_relaxed(data->regs + CHANMASK(idx, data->reg_num));
64 val &= ~BIT(d->hwirq % 32); 68 val &= ~BIT(d->hwirq % 32);
65 writel_relaxed(val, data->regs + CHANMASK(idx, data->irq_groups)); 69 writel_relaxed(val, data->regs + CHANMASK(idx, data->reg_num));
66 raw_spin_unlock_irqrestore(&data->lock, flags); 70 raw_spin_unlock_irqrestore(&data->lock, flags);
67} 71}
68 72
@@ -87,23 +91,47 @@ static const struct irq_domain_ops imx_irqsteer_domain_ops = {
87 .xlate = irq_domain_xlate_onecell, 91 .xlate = irq_domain_xlate_onecell,
88}; 92};
89 93
94static int imx_irqsteer_get_hwirq_base(struct irqsteer_data *data, u32 irq)
95{
96 int i;
97
98 for (i = 0; i < data->irq_count; i++) {
99 if (data->irq[i] == irq)
100 return i * 64;
101 }
102
103 return -EINVAL;
104}
105
90static void imx_irqsteer_irq_handler(struct irq_desc *desc) 106static void imx_irqsteer_irq_handler(struct irq_desc *desc)
91{ 107{
92 struct irqsteer_data *data = irq_desc_get_handler_data(desc); 108 struct irqsteer_data *data = irq_desc_get_handler_data(desc);
93 int i; 109 int hwirq;
110 int irq, i;
94 111
95 chained_irq_enter(irq_desc_get_chip(desc), desc); 112 chained_irq_enter(irq_desc_get_chip(desc), desc);
96 113
97 for (i = 0; i < data->irq_groups * 64; i += 32) { 114 irq = irq_desc_get_irq(desc);
98 int idx = imx_irqsteer_get_reg_index(data, i); 115 hwirq = imx_irqsteer_get_hwirq_base(data, irq);
116 if (hwirq < 0) {
117 pr_warn("%s: unable to get hwirq base for irq %d\n",
118 __func__, irq);
119 return;
120 }
121
122 for (i = 0; i < 2; i++, hwirq += 32) {
123 int idx = imx_irqsteer_get_reg_index(data, hwirq);
99 unsigned long irqmap; 124 unsigned long irqmap;
100 int pos, virq; 125 int pos, virq;
101 126
127 if (hwirq >= data->reg_num * 32)
128 break;
129
102 irqmap = readl_relaxed(data->regs + 130 irqmap = readl_relaxed(data->regs +
103 CHANSTATUS(idx, data->irq_groups)); 131 CHANSTATUS(idx, data->reg_num));
104 132
105 for_each_set_bit(pos, &irqmap, 32) { 133 for_each_set_bit(pos, &irqmap, 32) {
106 virq = irq_find_mapping(data->domain, pos + i); 134 virq = irq_find_mapping(data->domain, pos + hwirq);
107 if (virq) 135 if (virq)
108 generic_handle_irq(virq); 136 generic_handle_irq(virq);
109 } 137 }
@@ -117,7 +145,8 @@ static int imx_irqsteer_probe(struct platform_device *pdev)
117 struct device_node *np = pdev->dev.of_node; 145 struct device_node *np = pdev->dev.of_node;
118 struct irqsteer_data *data; 146 struct irqsteer_data *data;
119 struct resource *res; 147 struct resource *res;
120 int ret; 148 u32 irqs_num;
149 int i, ret;
121 150
122 data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); 151 data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
123 if (!data) 152 if (!data)
@@ -130,12 +159,6 @@ static int imx_irqsteer_probe(struct platform_device *pdev)
130 return PTR_ERR(data->regs); 159 return PTR_ERR(data->regs);
131 } 160 }
132 161
133 data->irq = platform_get_irq(pdev, 0);
134 if (data->irq <= 0) {
135 dev_err(&pdev->dev, "failed to get irq\n");
136 return -ENODEV;
137 }
138
139 data->ipg_clk = devm_clk_get(&pdev->dev, "ipg"); 162 data->ipg_clk = devm_clk_get(&pdev->dev, "ipg");
140 if (IS_ERR(data->ipg_clk)) { 163 if (IS_ERR(data->ipg_clk)) {
141 ret = PTR_ERR(data->ipg_clk); 164 ret = PTR_ERR(data->ipg_clk);
@@ -146,12 +169,19 @@ static int imx_irqsteer_probe(struct platform_device *pdev)
146 169
147 raw_spin_lock_init(&data->lock); 170 raw_spin_lock_init(&data->lock);
148 171
149 of_property_read_u32(np, "fsl,irq-groups", &data->irq_groups); 172 of_property_read_u32(np, "fsl,num-irqs", &irqs_num);
150 of_property_read_u32(np, "fsl,channel", &data->channel); 173 of_property_read_u32(np, "fsl,channel", &data->channel);
151 174
175 /*
176 * There is one output irq for each group of 64 inputs.
177 * One register bit map can represent 32 input interrupts.
178 */
179 data->irq_count = DIV_ROUND_UP(irqs_num, 64);
180 data->reg_num = irqs_num / 32;
181
152 if (IS_ENABLED(CONFIG_PM_SLEEP)) { 182 if (IS_ENABLED(CONFIG_PM_SLEEP)) {
153 data->saved_reg = devm_kzalloc(&pdev->dev, 183 data->saved_reg = devm_kzalloc(&pdev->dev,
154 sizeof(u32) * data->irq_groups * 2, 184 sizeof(u32) * data->reg_num,
155 GFP_KERNEL); 185 GFP_KERNEL);
156 if (!data->saved_reg) 186 if (!data->saved_reg)
157 return -ENOMEM; 187 return -ENOMEM;
@@ -166,27 +196,48 @@ static int imx_irqsteer_probe(struct platform_device *pdev)
166 /* steer all IRQs into configured channel */ 196 /* steer all IRQs into configured channel */
167 writel_relaxed(BIT(data->channel), data->regs + CHANCTRL); 197 writel_relaxed(BIT(data->channel), data->regs + CHANCTRL);
168 198
169 data->domain = irq_domain_add_linear(np, data->irq_groups * 64, 199 data->domain = irq_domain_add_linear(np, data->reg_num * 32,
170 &imx_irqsteer_domain_ops, data); 200 &imx_irqsteer_domain_ops, data);
171 if (!data->domain) { 201 if (!data->domain) {
172 dev_err(&pdev->dev, "failed to create IRQ domain\n"); 202 dev_err(&pdev->dev, "failed to create IRQ domain\n");
173 clk_disable_unprepare(data->ipg_clk); 203 ret = -ENOMEM;
174 return -ENOMEM; 204 goto out;
205 }
206
207 if (!data->irq_count || data->irq_count > CHAN_MAX_OUTPUT_INT) {
208 ret = -EINVAL;
209 goto out;
175 } 210 }
176 211
177 irq_set_chained_handler_and_data(data->irq, imx_irqsteer_irq_handler, 212 for (i = 0; i < data->irq_count; i++) {
178 data); 213 data->irq[i] = irq_of_parse_and_map(np, i);
214 if (!data->irq[i]) {
215 ret = -EINVAL;
216 goto out;
217 }
218
219 irq_set_chained_handler_and_data(data->irq[i],
220 imx_irqsteer_irq_handler,
221 data);
222 }
179 223
180 platform_set_drvdata(pdev, data); 224 platform_set_drvdata(pdev, data);
181 225
182 return 0; 226 return 0;
227out:
228 clk_disable_unprepare(data->ipg_clk);
229 return ret;
183} 230}
184 231
185static int imx_irqsteer_remove(struct platform_device *pdev) 232static int imx_irqsteer_remove(struct platform_device *pdev)
186{ 233{
187 struct irqsteer_data *irqsteer_data = platform_get_drvdata(pdev); 234 struct irqsteer_data *irqsteer_data = platform_get_drvdata(pdev);
235 int i;
236
237 for (i = 0; i < irqsteer_data->irq_count; i++)
238 irq_set_chained_handler_and_data(irqsteer_data->irq[i],
239 NULL, NULL);
188 240
189 irq_set_chained_handler_and_data(irqsteer_data->irq, NULL, NULL);
190 irq_domain_remove(irqsteer_data->domain); 241 irq_domain_remove(irqsteer_data->domain);
191 242
192 clk_disable_unprepare(irqsteer_data->ipg_clk); 243 clk_disable_unprepare(irqsteer_data->ipg_clk);
@@ -199,9 +250,9 @@ static void imx_irqsteer_save_regs(struct irqsteer_data *data)
199{ 250{
200 int i; 251 int i;
201 252
202 for (i = 0; i < data->irq_groups * 2; i++) 253 for (i = 0; i < data->reg_num; i++)
203 data->saved_reg[i] = readl_relaxed(data->regs + 254 data->saved_reg[i] = readl_relaxed(data->regs +
204 CHANMASK(i, data->irq_groups)); 255 CHANMASK(i, data->reg_num));
205} 256}
206 257
207static void imx_irqsteer_restore_regs(struct irqsteer_data *data) 258static void imx_irqsteer_restore_regs(struct irqsteer_data *data)
@@ -209,9 +260,9 @@ static void imx_irqsteer_restore_regs(struct irqsteer_data *data)
209 int i; 260 int i;
210 261
211 writel_relaxed(BIT(data->channel), data->regs + CHANCTRL); 262 writel_relaxed(BIT(data->channel), data->regs + CHANCTRL);
212 for (i = 0; i < data->irq_groups * 2; i++) 263 for (i = 0; i < data->reg_num; i++)
213 writel_relaxed(data->saved_reg[i], 264 writel_relaxed(data->saved_reg[i],
214 data->regs + CHANMASK(i, data->irq_groups)); 265 data->regs + CHANMASK(i, data->reg_num));
215} 266}
216 267
217static int imx_irqsteer_suspend(struct device *dev) 268static int imx_irqsteer_suspend(struct device *dev)