diff options
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/bus/arm-cci.c | 617 | ||||
-rw-r--r-- | drivers/dma/Kconfig | 12 | ||||
-rw-r--r-- | drivers/dma/Makefile | 1 | ||||
-rw-r--r-- | drivers/dma/s3c24xx-dma.c | 1350 | ||||
-rw-r--r-- | drivers/gpio/gpio-davinci.c | 132 | ||||
-rw-r--r-- | drivers/gpio/gpio-tnetv107x.c | 1 | ||||
-rw-r--r-- | drivers/irqchip/irq-armada-370-xp.c | 202 | ||||
-rw-r--r-- | drivers/pci/host/Kconfig | 2 | ||||
-rw-r--r-- | drivers/pci/host/pci-mvebu.c | 235 |
9 files changed, 2414 insertions, 138 deletions
diff --git a/drivers/bus/arm-cci.c b/drivers/bus/arm-cci.c index 200926699778..bb5b90e8e768 100644 --- a/drivers/bus/arm-cci.c +++ b/drivers/bus/arm-cci.c | |||
@@ -18,11 +18,21 @@ | |||
18 | #include <linux/io.h> | 18 | #include <linux/io.h> |
19 | #include <linux/module.h> | 19 | #include <linux/module.h> |
20 | #include <linux/of_address.h> | 20 | #include <linux/of_address.h> |
21 | #include <linux/of_irq.h> | ||
22 | #include <linux/of_platform.h> | ||
23 | #include <linux/platform_device.h> | ||
21 | #include <linux/slab.h> | 24 | #include <linux/slab.h> |
25 | #include <linux/spinlock.h> | ||
22 | 26 | ||
23 | #include <asm/cacheflush.h> | 27 | #include <asm/cacheflush.h> |
28 | #include <asm/irq_regs.h> | ||
29 | #include <asm/pmu.h> | ||
24 | #include <asm/smp_plat.h> | 30 | #include <asm/smp_plat.h> |
25 | 31 | ||
32 | #define DRIVER_NAME "CCI-400" | ||
33 | #define DRIVER_NAME_PMU DRIVER_NAME " PMU" | ||
34 | #define PMU_NAME "CCI_400" | ||
35 | |||
26 | #define CCI_PORT_CTRL 0x0 | 36 | #define CCI_PORT_CTRL 0x0 |
27 | #define CCI_CTRL_STATUS 0xc | 37 | #define CCI_CTRL_STATUS 0xc |
28 | 38 | ||
@@ -54,6 +64,568 @@ static unsigned int nb_cci_ports; | |||
54 | static void __iomem *cci_ctrl_base; | 64 | static void __iomem *cci_ctrl_base; |
55 | static unsigned long cci_ctrl_phys; | 65 | static unsigned long cci_ctrl_phys; |
56 | 66 | ||
67 | #ifdef CONFIG_HW_PERF_EVENTS | ||
68 | |||
69 | #define CCI_PMCR 0x0100 | ||
70 | #define CCI_PID2 0x0fe8 | ||
71 | |||
72 | #define CCI_PMCR_CEN 0x00000001 | ||
73 | #define CCI_PMCR_NCNT_MASK 0x0000f800 | ||
74 | #define CCI_PMCR_NCNT_SHIFT 11 | ||
75 | |||
76 | #define CCI_PID2_REV_MASK 0xf0 | ||
77 | #define CCI_PID2_REV_SHIFT 4 | ||
78 | |||
79 | /* Port ids */ | ||
80 | #define CCI_PORT_S0 0 | ||
81 | #define CCI_PORT_S1 1 | ||
82 | #define CCI_PORT_S2 2 | ||
83 | #define CCI_PORT_S3 3 | ||
84 | #define CCI_PORT_S4 4 | ||
85 | #define CCI_PORT_M0 5 | ||
86 | #define CCI_PORT_M1 6 | ||
87 | #define CCI_PORT_M2 7 | ||
88 | |||
89 | #define CCI_REV_R0 0 | ||
90 | #define CCI_REV_R1 1 | ||
91 | #define CCI_REV_R0_P4 4 | ||
92 | #define CCI_REV_R1_P2 6 | ||
93 | |||
94 | #define CCI_PMU_EVT_SEL 0x000 | ||
95 | #define CCI_PMU_CNTR 0x004 | ||
96 | #define CCI_PMU_CNTR_CTRL 0x008 | ||
97 | #define CCI_PMU_OVRFLW 0x00c | ||
98 | |||
99 | #define CCI_PMU_OVRFLW_FLAG 1 | ||
100 | |||
101 | #define CCI_PMU_CNTR_BASE(idx) ((idx) * SZ_4K) | ||
102 | |||
103 | /* | ||
104 | * Instead of an event id to monitor CCI cycles, a dedicated counter is | ||
105 | * provided. Use 0xff to represent CCI cycles and hope that no future revisions | ||
106 | * make use of this event in hardware. | ||
107 | */ | ||
108 | enum cci400_perf_events { | ||
109 | CCI_PMU_CYCLES = 0xff | ||
110 | }; | ||
111 | |||
112 | #define CCI_PMU_EVENT_MASK 0xff | ||
113 | #define CCI_PMU_EVENT_SOURCE(event) ((event >> 5) & 0x7) | ||
114 | #define CCI_PMU_EVENT_CODE(event) (event & 0x1f) | ||
115 | |||
116 | #define CCI_PMU_MAX_HW_EVENTS 5 /* CCI PMU has 4 counters + 1 cycle counter */ | ||
117 | |||
118 | #define CCI_PMU_CYCLE_CNTR_IDX 0 | ||
119 | #define CCI_PMU_CNTR0_IDX 1 | ||
120 | #define CCI_PMU_CNTR_LAST(cci_pmu) (CCI_PMU_CYCLE_CNTR_IDX + cci_pmu->num_events - 1) | ||
121 | |||
122 | /* | ||
123 | * CCI PMU event id is an 8-bit value made of two parts - bits 7:5 for one of 8 | ||
124 | * ports and bits 4:0 are event codes. There are different event codes | ||
125 | * associated with each port type. | ||
126 | * | ||
127 | * Additionally, the range of events associated with the port types changed | ||
128 | * between Rev0 and Rev1. | ||
129 | * | ||
130 | * The constants below define the range of valid codes for each port type for | ||
131 | * the different revisions and are used to validate the event to be monitored. | ||
132 | */ | ||
133 | |||
134 | #define CCI_REV_R0_SLAVE_PORT_MIN_EV 0x00 | ||
135 | #define CCI_REV_R0_SLAVE_PORT_MAX_EV 0x13 | ||
136 | #define CCI_REV_R0_MASTER_PORT_MIN_EV 0x14 | ||
137 | #define CCI_REV_R0_MASTER_PORT_MAX_EV 0x1a | ||
138 | |||
139 | #define CCI_REV_R1_SLAVE_PORT_MIN_EV 0x00 | ||
140 | #define CCI_REV_R1_SLAVE_PORT_MAX_EV 0x14 | ||
141 | #define CCI_REV_R1_MASTER_PORT_MIN_EV 0x00 | ||
142 | #define CCI_REV_R1_MASTER_PORT_MAX_EV 0x11 | ||
143 | |||
144 | struct pmu_port_event_ranges { | ||
145 | u8 slave_min; | ||
146 | u8 slave_max; | ||
147 | u8 master_min; | ||
148 | u8 master_max; | ||
149 | }; | ||
150 | |||
151 | static struct pmu_port_event_ranges port_event_range[] = { | ||
152 | [CCI_REV_R0] = { | ||
153 | .slave_min = CCI_REV_R0_SLAVE_PORT_MIN_EV, | ||
154 | .slave_max = CCI_REV_R0_SLAVE_PORT_MAX_EV, | ||
155 | .master_min = CCI_REV_R0_MASTER_PORT_MIN_EV, | ||
156 | .master_max = CCI_REV_R0_MASTER_PORT_MAX_EV, | ||
157 | }, | ||
158 | [CCI_REV_R1] = { | ||
159 | .slave_min = CCI_REV_R1_SLAVE_PORT_MIN_EV, | ||
160 | .slave_max = CCI_REV_R1_SLAVE_PORT_MAX_EV, | ||
161 | .master_min = CCI_REV_R1_MASTER_PORT_MIN_EV, | ||
162 | .master_max = CCI_REV_R1_MASTER_PORT_MAX_EV, | ||
163 | }, | ||
164 | }; | ||
165 | |||
166 | struct cci_pmu_drv_data { | ||
167 | void __iomem *base; | ||
168 | struct arm_pmu *cci_pmu; | ||
169 | int nr_irqs; | ||
170 | int irqs[CCI_PMU_MAX_HW_EVENTS]; | ||
171 | unsigned long active_irqs; | ||
172 | struct perf_event *events[CCI_PMU_MAX_HW_EVENTS]; | ||
173 | unsigned long used_mask[BITS_TO_LONGS(CCI_PMU_MAX_HW_EVENTS)]; | ||
174 | struct pmu_port_event_ranges *port_ranges; | ||
175 | struct pmu_hw_events hw_events; | ||
176 | }; | ||
177 | static struct cci_pmu_drv_data *pmu; | ||
178 | |||
179 | static bool is_duplicate_irq(int irq, int *irqs, int nr_irqs) | ||
180 | { | ||
181 | int i; | ||
182 | |||
183 | for (i = 0; i < nr_irqs; i++) | ||
184 | if (irq == irqs[i]) | ||
185 | return true; | ||
186 | |||
187 | return false; | ||
188 | } | ||
189 | |||
190 | static int probe_cci_revision(void) | ||
191 | { | ||
192 | int rev; | ||
193 | rev = readl_relaxed(cci_ctrl_base + CCI_PID2) & CCI_PID2_REV_MASK; | ||
194 | rev >>= CCI_PID2_REV_SHIFT; | ||
195 | |||
196 | if (rev <= CCI_REV_R0_P4) | ||
197 | return CCI_REV_R0; | ||
198 | else if (rev <= CCI_REV_R1_P2) | ||
199 | return CCI_REV_R1; | ||
200 | |||
201 | return -ENOENT; | ||
202 | } | ||
203 | |||
204 | static struct pmu_port_event_ranges *port_range_by_rev(void) | ||
205 | { | ||
206 | int rev = probe_cci_revision(); | ||
207 | |||
208 | if (rev < 0) | ||
209 | return NULL; | ||
210 | |||
211 | return &port_event_range[rev]; | ||
212 | } | ||
213 | |||
214 | static int pmu_is_valid_slave_event(u8 ev_code) | ||
215 | { | ||
216 | return pmu->port_ranges->slave_min <= ev_code && | ||
217 | ev_code <= pmu->port_ranges->slave_max; | ||
218 | } | ||
219 | |||
220 | static int pmu_is_valid_master_event(u8 ev_code) | ||
221 | { | ||
222 | return pmu->port_ranges->master_min <= ev_code && | ||
223 | ev_code <= pmu->port_ranges->master_max; | ||
224 | } | ||
225 | |||
226 | static int pmu_validate_hw_event(u8 hw_event) | ||
227 | { | ||
228 | u8 ev_source = CCI_PMU_EVENT_SOURCE(hw_event); | ||
229 | u8 ev_code = CCI_PMU_EVENT_CODE(hw_event); | ||
230 | |||
231 | switch (ev_source) { | ||
232 | case CCI_PORT_S0: | ||
233 | case CCI_PORT_S1: | ||
234 | case CCI_PORT_S2: | ||
235 | case CCI_PORT_S3: | ||
236 | case CCI_PORT_S4: | ||
237 | /* Slave Interface */ | ||
238 | if (pmu_is_valid_slave_event(ev_code)) | ||
239 | return hw_event; | ||
240 | break; | ||
241 | case CCI_PORT_M0: | ||
242 | case CCI_PORT_M1: | ||
243 | case CCI_PORT_M2: | ||
244 | /* Master Interface */ | ||
245 | if (pmu_is_valid_master_event(ev_code)) | ||
246 | return hw_event; | ||
247 | break; | ||
248 | } | ||
249 | |||
250 | return -ENOENT; | ||
251 | } | ||
252 | |||
253 | static int pmu_is_valid_counter(struct arm_pmu *cci_pmu, int idx) | ||
254 | { | ||
255 | return CCI_PMU_CYCLE_CNTR_IDX <= idx && | ||
256 | idx <= CCI_PMU_CNTR_LAST(cci_pmu); | ||
257 | } | ||
258 | |||
259 | static u32 pmu_read_register(int idx, unsigned int offset) | ||
260 | { | ||
261 | return readl_relaxed(pmu->base + CCI_PMU_CNTR_BASE(idx) + offset); | ||
262 | } | ||
263 | |||
264 | static void pmu_write_register(u32 value, int idx, unsigned int offset) | ||
265 | { | ||
266 | return writel_relaxed(value, pmu->base + CCI_PMU_CNTR_BASE(idx) + offset); | ||
267 | } | ||
268 | |||
269 | static void pmu_disable_counter(int idx) | ||
270 | { | ||
271 | pmu_write_register(0, idx, CCI_PMU_CNTR_CTRL); | ||
272 | } | ||
273 | |||
274 | static void pmu_enable_counter(int idx) | ||
275 | { | ||
276 | pmu_write_register(1, idx, CCI_PMU_CNTR_CTRL); | ||
277 | } | ||
278 | |||
279 | static void pmu_set_event(int idx, unsigned long event) | ||
280 | { | ||
281 | event &= CCI_PMU_EVENT_MASK; | ||
282 | pmu_write_register(event, idx, CCI_PMU_EVT_SEL); | ||
283 | } | ||
284 | |||
285 | static u32 pmu_get_max_counters(void) | ||
286 | { | ||
287 | u32 n_cnts = (readl_relaxed(cci_ctrl_base + CCI_PMCR) & | ||
288 | CCI_PMCR_NCNT_MASK) >> CCI_PMCR_NCNT_SHIFT; | ||
289 | |||
290 | /* add 1 for cycle counter */ | ||
291 | return n_cnts + 1; | ||
292 | } | ||
293 | |||
294 | static struct pmu_hw_events *pmu_get_hw_events(void) | ||
295 | { | ||
296 | return &pmu->hw_events; | ||
297 | } | ||
298 | |||
299 | static int pmu_get_event_idx(struct pmu_hw_events *hw, struct perf_event *event) | ||
300 | { | ||
301 | struct arm_pmu *cci_pmu = to_arm_pmu(event->pmu); | ||
302 | struct hw_perf_event *hw_event = &event->hw; | ||
303 | unsigned long cci_event = hw_event->config_base & CCI_PMU_EVENT_MASK; | ||
304 | int idx; | ||
305 | |||
306 | if (cci_event == CCI_PMU_CYCLES) { | ||
307 | if (test_and_set_bit(CCI_PMU_CYCLE_CNTR_IDX, hw->used_mask)) | ||
308 | return -EAGAIN; | ||
309 | |||
310 | return CCI_PMU_CYCLE_CNTR_IDX; | ||
311 | } | ||
312 | |||
313 | for (idx = CCI_PMU_CNTR0_IDX; idx <= CCI_PMU_CNTR_LAST(cci_pmu); ++idx) | ||
314 | if (!test_and_set_bit(idx, hw->used_mask)) | ||
315 | return idx; | ||
316 | |||
317 | /* No counters available */ | ||
318 | return -EAGAIN; | ||
319 | } | ||
320 | |||
321 | static int pmu_map_event(struct perf_event *event) | ||
322 | { | ||
323 | int mapping; | ||
324 | u8 config = event->attr.config & CCI_PMU_EVENT_MASK; | ||
325 | |||
326 | if (event->attr.type < PERF_TYPE_MAX) | ||
327 | return -ENOENT; | ||
328 | |||
329 | if (config == CCI_PMU_CYCLES) | ||
330 | mapping = config; | ||
331 | else | ||
332 | mapping = pmu_validate_hw_event(config); | ||
333 | |||
334 | return mapping; | ||
335 | } | ||
336 | |||
337 | static int pmu_request_irq(struct arm_pmu *cci_pmu, irq_handler_t handler) | ||
338 | { | ||
339 | int i; | ||
340 | struct platform_device *pmu_device = cci_pmu->plat_device; | ||
341 | |||
342 | if (unlikely(!pmu_device)) | ||
343 | return -ENODEV; | ||
344 | |||
345 | if (pmu->nr_irqs < 1) { | ||
346 | dev_err(&pmu_device->dev, "no irqs for CCI PMUs defined\n"); | ||
347 | return -ENODEV; | ||
348 | } | ||
349 | |||
350 | /* | ||
351 | * Register all available CCI PMU interrupts. In the interrupt handler | ||
352 | * we iterate over the counters checking for interrupt source (the | ||
353 | * overflowing counter) and clear it. | ||
354 | * | ||
355 | * This should allow handling of non-unique interrupt for the counters. | ||
356 | */ | ||
357 | for (i = 0; i < pmu->nr_irqs; i++) { | ||
358 | int err = request_irq(pmu->irqs[i], handler, IRQF_SHARED, | ||
359 | "arm-cci-pmu", cci_pmu); | ||
360 | if (err) { | ||
361 | dev_err(&pmu_device->dev, "unable to request IRQ%d for ARM CCI PMU counters\n", | ||
362 | pmu->irqs[i]); | ||
363 | return err; | ||
364 | } | ||
365 | |||
366 | set_bit(i, &pmu->active_irqs); | ||
367 | } | ||
368 | |||
369 | return 0; | ||
370 | } | ||
371 | |||
372 | static irqreturn_t pmu_handle_irq(int irq_num, void *dev) | ||
373 | { | ||
374 | unsigned long flags; | ||
375 | struct arm_pmu *cci_pmu = (struct arm_pmu *)dev; | ||
376 | struct pmu_hw_events *events = cci_pmu->get_hw_events(); | ||
377 | struct perf_sample_data data; | ||
378 | struct pt_regs *regs; | ||
379 | int idx, handled = IRQ_NONE; | ||
380 | |||
381 | raw_spin_lock_irqsave(&events->pmu_lock, flags); | ||
382 | regs = get_irq_regs(); | ||
383 | /* | ||
384 | * Iterate over counters and update the corresponding perf events. | ||
385 | * This should work regardless of whether we have per-counter overflow | ||
386 | * interrupt or a combined overflow interrupt. | ||
387 | */ | ||
388 | for (idx = CCI_PMU_CYCLE_CNTR_IDX; idx <= CCI_PMU_CNTR_LAST(cci_pmu); idx++) { | ||
389 | struct perf_event *event = events->events[idx]; | ||
390 | struct hw_perf_event *hw_counter; | ||
391 | |||
392 | if (!event) | ||
393 | continue; | ||
394 | |||
395 | hw_counter = &event->hw; | ||
396 | |||
397 | /* Did this counter overflow? */ | ||
398 | if (!pmu_read_register(idx, CCI_PMU_OVRFLW) & CCI_PMU_OVRFLW_FLAG) | ||
399 | continue; | ||
400 | |||
401 | pmu_write_register(CCI_PMU_OVRFLW_FLAG, idx, CCI_PMU_OVRFLW); | ||
402 | |||
403 | handled = IRQ_HANDLED; | ||
404 | |||
405 | armpmu_event_update(event); | ||
406 | perf_sample_data_init(&data, 0, hw_counter->last_period); | ||
407 | if (!armpmu_event_set_period(event)) | ||
408 | continue; | ||
409 | |||
410 | if (perf_event_overflow(event, &data, regs)) | ||
411 | cci_pmu->disable(event); | ||
412 | } | ||
413 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); | ||
414 | |||
415 | return IRQ_RETVAL(handled); | ||
416 | } | ||
417 | |||
418 | static void pmu_free_irq(struct arm_pmu *cci_pmu) | ||
419 | { | ||
420 | int i; | ||
421 | |||
422 | for (i = 0; i < pmu->nr_irqs; i++) { | ||
423 | if (!test_and_clear_bit(i, &pmu->active_irqs)) | ||
424 | continue; | ||
425 | |||
426 | free_irq(pmu->irqs[i], cci_pmu); | ||
427 | } | ||
428 | } | ||
429 | |||
430 | static void pmu_enable_event(struct perf_event *event) | ||
431 | { | ||
432 | unsigned long flags; | ||
433 | struct arm_pmu *cci_pmu = to_arm_pmu(event->pmu); | ||
434 | struct pmu_hw_events *events = cci_pmu->get_hw_events(); | ||
435 | struct hw_perf_event *hw_counter = &event->hw; | ||
436 | int idx = hw_counter->idx; | ||
437 | |||
438 | if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) { | ||
439 | dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx); | ||
440 | return; | ||
441 | } | ||
442 | |||
443 | raw_spin_lock_irqsave(&events->pmu_lock, flags); | ||
444 | |||
445 | /* Configure the event to count, unless you are counting cycles */ | ||
446 | if (idx != CCI_PMU_CYCLE_CNTR_IDX) | ||
447 | pmu_set_event(idx, hw_counter->config_base); | ||
448 | |||
449 | pmu_enable_counter(idx); | ||
450 | |||
451 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); | ||
452 | } | ||
453 | |||
454 | static void pmu_disable_event(struct perf_event *event) | ||
455 | { | ||
456 | struct arm_pmu *cci_pmu = to_arm_pmu(event->pmu); | ||
457 | struct hw_perf_event *hw_counter = &event->hw; | ||
458 | int idx = hw_counter->idx; | ||
459 | |||
460 | if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) { | ||
461 | dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx); | ||
462 | return; | ||
463 | } | ||
464 | |||
465 | pmu_disable_counter(idx); | ||
466 | } | ||
467 | |||
468 | static void pmu_start(struct arm_pmu *cci_pmu) | ||
469 | { | ||
470 | u32 val; | ||
471 | unsigned long flags; | ||
472 | struct pmu_hw_events *events = cci_pmu->get_hw_events(); | ||
473 | |||
474 | raw_spin_lock_irqsave(&events->pmu_lock, flags); | ||
475 | |||
476 | /* Enable all the PMU counters. */ | ||
477 | val = readl_relaxed(cci_ctrl_base + CCI_PMCR) | CCI_PMCR_CEN; | ||
478 | writel(val, cci_ctrl_base + CCI_PMCR); | ||
479 | |||
480 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); | ||
481 | } | ||
482 | |||
483 | static void pmu_stop(struct arm_pmu *cci_pmu) | ||
484 | { | ||
485 | u32 val; | ||
486 | unsigned long flags; | ||
487 | struct pmu_hw_events *events = cci_pmu->get_hw_events(); | ||
488 | |||
489 | raw_spin_lock_irqsave(&events->pmu_lock, flags); | ||
490 | |||
491 | /* Disable all the PMU counters. */ | ||
492 | val = readl_relaxed(cci_ctrl_base + CCI_PMCR) & ~CCI_PMCR_CEN; | ||
493 | writel(val, cci_ctrl_base + CCI_PMCR); | ||
494 | |||
495 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); | ||
496 | } | ||
497 | |||
498 | static u32 pmu_read_counter(struct perf_event *event) | ||
499 | { | ||
500 | struct arm_pmu *cci_pmu = to_arm_pmu(event->pmu); | ||
501 | struct hw_perf_event *hw_counter = &event->hw; | ||
502 | int idx = hw_counter->idx; | ||
503 | u32 value; | ||
504 | |||
505 | if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) { | ||
506 | dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx); | ||
507 | return 0; | ||
508 | } | ||
509 | value = pmu_read_register(idx, CCI_PMU_CNTR); | ||
510 | |||
511 | return value; | ||
512 | } | ||
513 | |||
514 | static void pmu_write_counter(struct perf_event *event, u32 value) | ||
515 | { | ||
516 | struct arm_pmu *cci_pmu = to_arm_pmu(event->pmu); | ||
517 | struct hw_perf_event *hw_counter = &event->hw; | ||
518 | int idx = hw_counter->idx; | ||
519 | |||
520 | if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) | ||
521 | dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx); | ||
522 | else | ||
523 | pmu_write_register(value, idx, CCI_PMU_CNTR); | ||
524 | } | ||
525 | |||
526 | static int cci_pmu_init(struct arm_pmu *cci_pmu, struct platform_device *pdev) | ||
527 | { | ||
528 | *cci_pmu = (struct arm_pmu){ | ||
529 | .name = PMU_NAME, | ||
530 | .max_period = (1LLU << 32) - 1, | ||
531 | .get_hw_events = pmu_get_hw_events, | ||
532 | .get_event_idx = pmu_get_event_idx, | ||
533 | .map_event = pmu_map_event, | ||
534 | .request_irq = pmu_request_irq, | ||
535 | .handle_irq = pmu_handle_irq, | ||
536 | .free_irq = pmu_free_irq, | ||
537 | .enable = pmu_enable_event, | ||
538 | .disable = pmu_disable_event, | ||
539 | .start = pmu_start, | ||
540 | .stop = pmu_stop, | ||
541 | .read_counter = pmu_read_counter, | ||
542 | .write_counter = pmu_write_counter, | ||
543 | }; | ||
544 | |||
545 | cci_pmu->plat_device = pdev; | ||
546 | cci_pmu->num_events = pmu_get_max_counters(); | ||
547 | |||
548 | return armpmu_register(cci_pmu, -1); | ||
549 | } | ||
550 | |||
551 | static const struct of_device_id arm_cci_pmu_matches[] = { | ||
552 | { | ||
553 | .compatible = "arm,cci-400-pmu", | ||
554 | }, | ||
555 | {}, | ||
556 | }; | ||
557 | |||
558 | static int cci_pmu_probe(struct platform_device *pdev) | ||
559 | { | ||
560 | struct resource *res; | ||
561 | int i, ret, irq; | ||
562 | |||
563 | pmu = devm_kzalloc(&pdev->dev, sizeof(*pmu), GFP_KERNEL); | ||
564 | if (!pmu) | ||
565 | return -ENOMEM; | ||
566 | |||
567 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
568 | pmu->base = devm_ioremap_resource(&pdev->dev, res); | ||
569 | if (IS_ERR(pmu->base)) | ||
570 | return -ENOMEM; | ||
571 | |||
572 | /* | ||
573 | * CCI PMU has 5 overflow signals - one per counter; but some may be tied | ||
574 | * together to a common interrupt. | ||
575 | */ | ||
576 | pmu->nr_irqs = 0; | ||
577 | for (i = 0; i < CCI_PMU_MAX_HW_EVENTS; i++) { | ||
578 | irq = platform_get_irq(pdev, i); | ||
579 | if (irq < 0) | ||
580 | break; | ||
581 | |||
582 | if (is_duplicate_irq(irq, pmu->irqs, pmu->nr_irqs)) | ||
583 | continue; | ||
584 | |||
585 | pmu->irqs[pmu->nr_irqs++] = irq; | ||
586 | } | ||
587 | |||
588 | /* | ||
589 | * Ensure that the device tree has as many interrupts as the number | ||
590 | * of counters. | ||
591 | */ | ||
592 | if (i < CCI_PMU_MAX_HW_EVENTS) { | ||
593 | dev_warn(&pdev->dev, "In-correct number of interrupts: %d, should be %d\n", | ||
594 | i, CCI_PMU_MAX_HW_EVENTS); | ||
595 | return -EINVAL; | ||
596 | } | ||
597 | |||
598 | pmu->port_ranges = port_range_by_rev(); | ||
599 | if (!pmu->port_ranges) { | ||
600 | dev_warn(&pdev->dev, "CCI PMU version not supported\n"); | ||
601 | return -EINVAL; | ||
602 | } | ||
603 | |||
604 | pmu->cci_pmu = devm_kzalloc(&pdev->dev, sizeof(*(pmu->cci_pmu)), GFP_KERNEL); | ||
605 | if (!pmu->cci_pmu) | ||
606 | return -ENOMEM; | ||
607 | |||
608 | pmu->hw_events.events = pmu->events; | ||
609 | pmu->hw_events.used_mask = pmu->used_mask; | ||
610 | raw_spin_lock_init(&pmu->hw_events.pmu_lock); | ||
611 | |||
612 | ret = cci_pmu_init(pmu->cci_pmu, pdev); | ||
613 | if (ret) | ||
614 | return ret; | ||
615 | |||
616 | return 0; | ||
617 | } | ||
618 | |||
619 | static int cci_platform_probe(struct platform_device *pdev) | ||
620 | { | ||
621 | if (!cci_probed()) | ||
622 | return -ENODEV; | ||
623 | |||
624 | return of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev); | ||
625 | } | ||
626 | |||
627 | #endif /* CONFIG_HW_PERF_EVENTS */ | ||
628 | |||
57 | struct cpu_port { | 629 | struct cpu_port { |
58 | u64 mpidr; | 630 | u64 mpidr; |
59 | u32 port; | 631 | u32 port; |
@@ -120,7 +692,7 @@ int cci_ace_get_port(struct device_node *dn) | |||
120 | } | 692 | } |
121 | EXPORT_SYMBOL_GPL(cci_ace_get_port); | 693 | EXPORT_SYMBOL_GPL(cci_ace_get_port); |
122 | 694 | ||
123 | static void __init cci_ace_init_ports(void) | 695 | static void cci_ace_init_ports(void) |
124 | { | 696 | { |
125 | int port, cpu; | 697 | int port, cpu; |
126 | struct device_node *cpun; | 698 | struct device_node *cpun; |
@@ -386,7 +958,7 @@ static const struct of_device_id arm_cci_ctrl_if_matches[] = { | |||
386 | {}, | 958 | {}, |
387 | }; | 959 | }; |
388 | 960 | ||
389 | static int __init cci_probe(void) | 961 | static int cci_probe(void) |
390 | { | 962 | { |
391 | struct cci_nb_ports const *cci_config; | 963 | struct cci_nb_ports const *cci_config; |
392 | int ret, i, nb_ace = 0, nb_ace_lite = 0; | 964 | int ret, i, nb_ace = 0, nb_ace_lite = 0; |
@@ -490,7 +1062,7 @@ memalloc_err: | |||
490 | static int cci_init_status = -EAGAIN; | 1062 | static int cci_init_status = -EAGAIN; |
491 | static DEFINE_MUTEX(cci_probing); | 1063 | static DEFINE_MUTEX(cci_probing); |
492 | 1064 | ||
493 | static int __init cci_init(void) | 1065 | static int cci_init(void) |
494 | { | 1066 | { |
495 | if (cci_init_status != -EAGAIN) | 1067 | if (cci_init_status != -EAGAIN) |
496 | return cci_init_status; | 1068 | return cci_init_status; |
@@ -502,18 +1074,55 @@ static int __init cci_init(void) | |||
502 | return cci_init_status; | 1074 | return cci_init_status; |
503 | } | 1075 | } |
504 | 1076 | ||
1077 | #ifdef CONFIG_HW_PERF_EVENTS | ||
1078 | static struct platform_driver cci_pmu_driver = { | ||
1079 | .driver = { | ||
1080 | .name = DRIVER_NAME_PMU, | ||
1081 | .of_match_table = arm_cci_pmu_matches, | ||
1082 | }, | ||
1083 | .probe = cci_pmu_probe, | ||
1084 | }; | ||
1085 | |||
1086 | static struct platform_driver cci_platform_driver = { | ||
1087 | .driver = { | ||
1088 | .name = DRIVER_NAME, | ||
1089 | .of_match_table = arm_cci_matches, | ||
1090 | }, | ||
1091 | .probe = cci_platform_probe, | ||
1092 | }; | ||
1093 | |||
1094 | static int __init cci_platform_init(void) | ||
1095 | { | ||
1096 | int ret; | ||
1097 | |||
1098 | ret = platform_driver_register(&cci_pmu_driver); | ||
1099 | if (ret) | ||
1100 | return ret; | ||
1101 | |||
1102 | return platform_driver_register(&cci_platform_driver); | ||
1103 | } | ||
1104 | |||
1105 | #else | ||
1106 | |||
1107 | static int __init cci_platform_init(void) | ||
1108 | { | ||
1109 | return 0; | ||
1110 | } | ||
1111 | |||
1112 | #endif | ||
505 | /* | 1113 | /* |
506 | * To sort out early init calls ordering a helper function is provided to | 1114 | * To sort out early init calls ordering a helper function is provided to |
507 | * check if the CCI driver has beed initialized. Function check if the driver | 1115 | * check if the CCI driver has beed initialized. Function check if the driver |
508 | * has been initialized, if not it calls the init function that probes | 1116 | * has been initialized, if not it calls the init function that probes |
509 | * the driver and updates the return value. | 1117 | * the driver and updates the return value. |
510 | */ | 1118 | */ |
511 | bool __init cci_probed(void) | 1119 | bool cci_probed(void) |
512 | { | 1120 | { |
513 | return cci_init() == 0; | 1121 | return cci_init() == 0; |
514 | } | 1122 | } |
515 | EXPORT_SYMBOL_GPL(cci_probed); | 1123 | EXPORT_SYMBOL_GPL(cci_probed); |
516 | 1124 | ||
517 | early_initcall(cci_init); | 1125 | early_initcall(cci_init); |
1126 | core_initcall(cci_platform_init); | ||
518 | MODULE_LICENSE("GPL"); | 1127 | MODULE_LICENSE("GPL"); |
519 | MODULE_DESCRIPTION("ARM CCI support"); | 1128 | MODULE_DESCRIPTION("ARM CCI support"); |
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 56fe803adcb1..c61a6eccf169 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig | |||
@@ -154,6 +154,18 @@ config TEGRA20_APB_DMA | |||
154 | This DMA controller transfers data from memory to peripheral fifo | 154 | This DMA controller transfers data from memory to peripheral fifo |
155 | or vice versa. It does not support memory to memory data transfer. | 155 | or vice versa. It does not support memory to memory data transfer. |
156 | 156 | ||
157 | config S3C24XX_DMAC | ||
158 | tristate "Samsung S3C24XX DMA support" | ||
159 | depends on ARCH_S3C24XX && !S3C24XX_DMA | ||
160 | select DMA_ENGINE | ||
161 | select DMA_VIRTUAL_CHANNELS | ||
162 | help | ||
163 | Support for the Samsung S3C24XX DMA controller driver. The | ||
164 | DMA controller is having multiple DMA channels which can be | ||
165 | configured for different peripherals like audio, UART, SPI. | ||
166 | The DMA controller can transfer data from memory to peripheral, | ||
167 | periphal to memory, periphal to periphal and memory to memory. | ||
168 | |||
157 | source "drivers/dma/sh/Kconfig" | 169 | source "drivers/dma/sh/Kconfig" |
158 | 170 | ||
159 | config COH901318 | 171 | config COH901318 |
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index db89035b3626..0ce2da97e429 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile | |||
@@ -30,6 +30,7 @@ obj-$(CONFIG_SIRF_DMA) += sirf-dma.o | |||
30 | obj-$(CONFIG_TI_EDMA) += edma.o | 30 | obj-$(CONFIG_TI_EDMA) += edma.o |
31 | obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o | 31 | obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o |
32 | obj-$(CONFIG_TEGRA20_APB_DMA) += tegra20-apb-dma.o | 32 | obj-$(CONFIG_TEGRA20_APB_DMA) += tegra20-apb-dma.o |
33 | obj-$(CONFIG_S3C24XX_DMAC) += s3c24xx-dma.o | ||
33 | obj-$(CONFIG_PL330_DMA) += pl330.o | 34 | obj-$(CONFIG_PL330_DMA) += pl330.o |
34 | obj-$(CONFIG_PCH_DMA) += pch_dma.o | 35 | obj-$(CONFIG_PCH_DMA) += pch_dma.o |
35 | obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o | 36 | obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o |
diff --git a/drivers/dma/s3c24xx-dma.c b/drivers/dma/s3c24xx-dma.c new file mode 100644 index 000000000000..4cb127978636 --- /dev/null +++ b/drivers/dma/s3c24xx-dma.c | |||
@@ -0,0 +1,1350 @@ | |||
1 | /* | ||
2 | * S3C24XX DMA handling | ||
3 | * | ||
4 | * Copyright (c) 2013 Heiko Stuebner <heiko@sntech.de> | ||
5 | * | ||
6 | * based on amba-pl08x.c | ||
7 | * | ||
8 | * Copyright (c) 2006 ARM Ltd. | ||
9 | * Copyright (c) 2010 ST-Ericsson SA | ||
10 | * | ||
11 | * Author: Peter Pearse <peter.pearse@arm.com> | ||
12 | * Author: Linus Walleij <linus.walleij@stericsson.com> | ||
13 | * | ||
14 | * This program is free software; you can redistribute it and/or modify it | ||
15 | * under the terms of the GNU General Public License as published by the Free | ||
16 | * Software Foundation; either version 2 of the License, or (at your option) | ||
17 | * any later version. | ||
18 | * | ||
19 | * The DMA controllers in S3C24XX SoCs have a varying number of DMA signals | ||
20 | * that can be routed to any of the 4 to 8 hardware-channels. | ||
21 | * | ||
22 | * Therefore on these DMA controllers the number of channels | ||
23 | * and the number of incoming DMA signals are two totally different things. | ||
24 | * It is usually not possible to theoretically handle all physical signals, | ||
25 | * so a multiplexing scheme with possible denial of use is necessary. | ||
26 | * | ||
27 | * Open items: | ||
28 | * - bursts | ||
29 | */ | ||
30 | |||
31 | #include <linux/platform_device.h> | ||
32 | #include <linux/types.h> | ||
33 | #include <linux/dmaengine.h> | ||
34 | #include <linux/dma-mapping.h> | ||
35 | #include <linux/interrupt.h> | ||
36 | #include <linux/clk.h> | ||
37 | #include <linux/module.h> | ||
38 | #include <linux/slab.h> | ||
39 | #include <linux/platform_data/dma-s3c24xx.h> | ||
40 | |||
41 | #include "dmaengine.h" | ||
42 | #include "virt-dma.h" | ||
43 | |||
44 | #define MAX_DMA_CHANNELS 8 | ||
45 | |||
46 | #define S3C24XX_DISRC 0x00 | ||
47 | #define S3C24XX_DISRCC 0x04 | ||
48 | #define S3C24XX_DISRCC_INC_INCREMENT 0 | ||
49 | #define S3C24XX_DISRCC_INC_FIXED BIT(0) | ||
50 | #define S3C24XX_DISRCC_LOC_AHB 0 | ||
51 | #define S3C24XX_DISRCC_LOC_APB BIT(1) | ||
52 | |||
53 | #define S3C24XX_DIDST 0x08 | ||
54 | #define S3C24XX_DIDSTC 0x0c | ||
55 | #define S3C24XX_DIDSTC_INC_INCREMENT 0 | ||
56 | #define S3C24XX_DIDSTC_INC_FIXED BIT(0) | ||
57 | #define S3C24XX_DIDSTC_LOC_AHB 0 | ||
58 | #define S3C24XX_DIDSTC_LOC_APB BIT(1) | ||
59 | #define S3C24XX_DIDSTC_INT_TC0 0 | ||
60 | #define S3C24XX_DIDSTC_INT_RELOAD BIT(2) | ||
61 | |||
62 | #define S3C24XX_DCON 0x10 | ||
63 | |||
64 | #define S3C24XX_DCON_TC_MASK 0xfffff | ||
65 | #define S3C24XX_DCON_DSZ_BYTE (0 << 20) | ||
66 | #define S3C24XX_DCON_DSZ_HALFWORD (1 << 20) | ||
67 | #define S3C24XX_DCON_DSZ_WORD (2 << 20) | ||
68 | #define S3C24XX_DCON_DSZ_MASK (3 << 20) | ||
69 | #define S3C24XX_DCON_DSZ_SHIFT 20 | ||
70 | #define S3C24XX_DCON_AUTORELOAD 0 | ||
71 | #define S3C24XX_DCON_NORELOAD BIT(22) | ||
72 | #define S3C24XX_DCON_HWTRIG BIT(23) | ||
73 | #define S3C24XX_DCON_HWSRC_SHIFT 24 | ||
74 | #define S3C24XX_DCON_SERV_SINGLE 0 | ||
75 | #define S3C24XX_DCON_SERV_WHOLE BIT(27) | ||
76 | #define S3C24XX_DCON_TSZ_UNIT 0 | ||
77 | #define S3C24XX_DCON_TSZ_BURST4 BIT(28) | ||
78 | #define S3C24XX_DCON_INT BIT(29) | ||
79 | #define S3C24XX_DCON_SYNC_PCLK 0 | ||
80 | #define S3C24XX_DCON_SYNC_HCLK BIT(30) | ||
81 | #define S3C24XX_DCON_DEMAND 0 | ||
82 | #define S3C24XX_DCON_HANDSHAKE BIT(31) | ||
83 | |||
84 | #define S3C24XX_DSTAT 0x14 | ||
85 | #define S3C24XX_DSTAT_STAT_BUSY BIT(20) | ||
86 | #define S3C24XX_DSTAT_CURRTC_MASK 0xfffff | ||
87 | |||
88 | #define S3C24XX_DMASKTRIG 0x20 | ||
89 | #define S3C24XX_DMASKTRIG_SWTRIG BIT(0) | ||
90 | #define S3C24XX_DMASKTRIG_ON BIT(1) | ||
91 | #define S3C24XX_DMASKTRIG_STOP BIT(2) | ||
92 | |||
93 | #define S3C24XX_DMAREQSEL 0x24 | ||
94 | #define S3C24XX_DMAREQSEL_HW BIT(0) | ||
95 | |||
96 | /* | ||
97 | * S3C2410, S3C2440 and S3C2442 SoCs cannot select any physical channel | ||
98 | * for a DMA source. Instead only specific channels are valid. | ||
99 | * All of these SoCs have 4 physical channels and the number of request | ||
100 | * source bits is 3. Additionally we also need 1 bit to mark the channel | ||
101 | * as valid. | ||
102 | * Therefore we separate the chansel element of the channel data into 4 | ||
103 | * parts of 4 bits each, to hold the information if the channel is valid | ||
104 | * and the hw request source to use. | ||
105 | * | ||
106 | * Example: | ||
107 | * SDI is valid on channels 0, 2 and 3 - with varying hw request sources. | ||
108 | * For it the chansel field would look like | ||
109 | * | ||
110 | * ((BIT(3) | 1) << 3 * 4) | // channel 3, with request source 1 | ||
111 | * ((BIT(3) | 2) << 2 * 4) | // channel 2, with request source 2 | ||
112 | * ((BIT(3) | 2) << 0 * 4) // channel 0, with request source 2 | ||
113 | */ | ||
114 | #define S3C24XX_CHANSEL_WIDTH 4 | ||
115 | #define S3C24XX_CHANSEL_VALID BIT(3) | ||
116 | #define S3C24XX_CHANSEL_REQ_MASK 7 | ||
117 | |||
118 | /* | ||
119 | * struct soc_data - vendor-specific config parameters for individual SoCs | ||
120 | * @stride: spacing between the registers of each channel | ||
121 | * @has_reqsel: does the controller use the newer requestselection mechanism | ||
122 | * @has_clocks: are controllable dma-clocks present | ||
123 | */ | ||
124 | struct soc_data { | ||
125 | int stride; | ||
126 | bool has_reqsel; | ||
127 | bool has_clocks; | ||
128 | }; | ||
129 | |||
130 | /* | ||
131 | * enum s3c24xx_dma_chan_state - holds the virtual channel states | ||
132 | * @S3C24XX_DMA_CHAN_IDLE: the channel is idle | ||
133 | * @S3C24XX_DMA_CHAN_RUNNING: the channel has allocated a physical transport | ||
134 | * channel and is running a transfer on it | ||
135 | * @S3C24XX_DMA_CHAN_WAITING: the channel is waiting for a physical transport | ||
136 | * channel to become available (only pertains to memcpy channels) | ||
137 | */ | ||
138 | enum s3c24xx_dma_chan_state { | ||
139 | S3C24XX_DMA_CHAN_IDLE, | ||
140 | S3C24XX_DMA_CHAN_RUNNING, | ||
141 | S3C24XX_DMA_CHAN_WAITING, | ||
142 | }; | ||
143 | |||
144 | /* | ||
145 | * struct s3c24xx_sg - structure containing data per sg | ||
146 | * @src_addr: src address of sg | ||
147 | * @dst_addr: dst address of sg | ||
148 | * @len: transfer len in bytes | ||
149 | * @node: node for txd's dsg_list | ||
150 | */ | ||
151 | struct s3c24xx_sg { | ||
152 | dma_addr_t src_addr; | ||
153 | dma_addr_t dst_addr; | ||
154 | size_t len; | ||
155 | struct list_head node; | ||
156 | }; | ||
157 | |||
158 | /* | ||
159 | * struct s3c24xx_txd - wrapper for struct dma_async_tx_descriptor | ||
160 | * @vd: virtual DMA descriptor | ||
161 | * @dsg_list: list of children sg's | ||
162 | * @at: sg currently being transfered | ||
163 | * @width: transfer width | ||
164 | * @disrcc: value for source control register | ||
165 | * @didstc: value for destination control register | ||
166 | * @dcon: base value for dcon register | ||
167 | */ | ||
168 | struct s3c24xx_txd { | ||
169 | struct virt_dma_desc vd; | ||
170 | struct list_head dsg_list; | ||
171 | struct list_head *at; | ||
172 | u8 width; | ||
173 | u32 disrcc; | ||
174 | u32 didstc; | ||
175 | u32 dcon; | ||
176 | }; | ||
177 | |||
178 | struct s3c24xx_dma_chan; | ||
179 | |||
180 | /* | ||
181 | * struct s3c24xx_dma_phy - holder for the physical channels | ||
182 | * @id: physical index to this channel | ||
183 | * @valid: does the channel have all required elements | ||
184 | * @base: virtual memory base (remapped) for the this channel | ||
185 | * @irq: interrupt for this channel | ||
186 | * @clk: clock for this channel | ||
187 | * @lock: a lock to use when altering an instance of this struct | ||
188 | * @serving: virtual channel currently being served by this physicalchannel | ||
189 | * @host: a pointer to the host (internal use) | ||
190 | */ | ||
191 | struct s3c24xx_dma_phy { | ||
192 | unsigned int id; | ||
193 | bool valid; | ||
194 | void __iomem *base; | ||
195 | unsigned int irq; | ||
196 | struct clk *clk; | ||
197 | spinlock_t lock; | ||
198 | struct s3c24xx_dma_chan *serving; | ||
199 | struct s3c24xx_dma_engine *host; | ||
200 | }; | ||
201 | |||
202 | /* | ||
203 | * struct s3c24xx_dma_chan - this structure wraps a DMA ENGINE channel | ||
204 | * @id: the id of the channel | ||
205 | * @name: name of the channel | ||
206 | * @vc: wrappped virtual channel | ||
207 | * @phy: the physical channel utilized by this channel, if there is one | ||
208 | * @runtime_addr: address for RX/TX according to the runtime config | ||
209 | * @at: active transaction on this channel | ||
210 | * @lock: a lock for this channel data | ||
211 | * @host: a pointer to the host (internal use) | ||
212 | * @state: whether the channel is idle, running etc | ||
213 | * @slave: whether this channel is a device (slave) or for memcpy | ||
214 | */ | ||
215 | struct s3c24xx_dma_chan { | ||
216 | int id; | ||
217 | const char *name; | ||
218 | struct virt_dma_chan vc; | ||
219 | struct s3c24xx_dma_phy *phy; | ||
220 | struct dma_slave_config cfg; | ||
221 | struct s3c24xx_txd *at; | ||
222 | struct s3c24xx_dma_engine *host; | ||
223 | enum s3c24xx_dma_chan_state state; | ||
224 | bool slave; | ||
225 | }; | ||
226 | |||
227 | /* | ||
228 | * struct s3c24xx_dma_engine - the local state holder for the S3C24XX | ||
229 | * @pdev: the corresponding platform device | ||
230 | * @pdata: platform data passed in from the platform/machine | ||
231 | * @base: virtual memory base (remapped) | ||
232 | * @slave: slave engine for this instance | ||
233 | * @memcpy: memcpy engine for this instance | ||
234 | * @phy_chans: array of data for the physical channels | ||
235 | */ | ||
236 | struct s3c24xx_dma_engine { | ||
237 | struct platform_device *pdev; | ||
238 | const struct s3c24xx_dma_platdata *pdata; | ||
239 | struct soc_data *sdata; | ||
240 | void __iomem *base; | ||
241 | struct dma_device slave; | ||
242 | struct dma_device memcpy; | ||
243 | struct s3c24xx_dma_phy *phy_chans; | ||
244 | }; | ||
245 | |||
246 | /* | ||
247 | * Physical channel handling | ||
248 | */ | ||
249 | |||
250 | /* | ||
251 | * Check whether a certain channel is busy or not. | ||
252 | */ | ||
253 | static int s3c24xx_dma_phy_busy(struct s3c24xx_dma_phy *phy) | ||
254 | { | ||
255 | unsigned int val = readl(phy->base + S3C24XX_DSTAT); | ||
256 | return val & S3C24XX_DSTAT_STAT_BUSY; | ||
257 | } | ||
258 | |||
259 | static bool s3c24xx_dma_phy_valid(struct s3c24xx_dma_chan *s3cchan, | ||
260 | struct s3c24xx_dma_phy *phy) | ||
261 | { | ||
262 | struct s3c24xx_dma_engine *s3cdma = s3cchan->host; | ||
263 | const struct s3c24xx_dma_platdata *pdata = s3cdma->pdata; | ||
264 | struct s3c24xx_dma_channel *cdata = &pdata->channels[s3cchan->id]; | ||
265 | int phyvalid; | ||
266 | |||
267 | /* every phy is valid for memcopy channels */ | ||
268 | if (!s3cchan->slave) | ||
269 | return true; | ||
270 | |||
271 | /* On newer variants all phys can be used for all virtual channels */ | ||
272 | if (s3cdma->sdata->has_reqsel) | ||
273 | return true; | ||
274 | |||
275 | phyvalid = (cdata->chansel >> (phy->id * S3C24XX_CHANSEL_WIDTH)); | ||
276 | return (phyvalid & S3C24XX_CHANSEL_VALID) ? true : false; | ||
277 | } | ||
278 | |||
279 | /* | ||
280 | * Allocate a physical channel for a virtual channel | ||
281 | * | ||
282 | * Try to locate a physical channel to be used for this transfer. If all | ||
283 | * are taken return NULL and the requester will have to cope by using | ||
284 | * some fallback PIO mode or retrying later. | ||
285 | */ | ||
286 | static | ||
287 | struct s3c24xx_dma_phy *s3c24xx_dma_get_phy(struct s3c24xx_dma_chan *s3cchan) | ||
288 | { | ||
289 | struct s3c24xx_dma_engine *s3cdma = s3cchan->host; | ||
290 | const struct s3c24xx_dma_platdata *pdata = s3cdma->pdata; | ||
291 | struct s3c24xx_dma_channel *cdata; | ||
292 | struct s3c24xx_dma_phy *phy = NULL; | ||
293 | unsigned long flags; | ||
294 | int i; | ||
295 | int ret; | ||
296 | |||
297 | if (s3cchan->slave) | ||
298 | cdata = &pdata->channels[s3cchan->id]; | ||
299 | |||
300 | for (i = 0; i < s3cdma->pdata->num_phy_channels; i++) { | ||
301 | phy = &s3cdma->phy_chans[i]; | ||
302 | |||
303 | if (!phy->valid) | ||
304 | continue; | ||
305 | |||
306 | if (!s3c24xx_dma_phy_valid(s3cchan, phy)) | ||
307 | continue; | ||
308 | |||
309 | spin_lock_irqsave(&phy->lock, flags); | ||
310 | |||
311 | if (!phy->serving) { | ||
312 | phy->serving = s3cchan; | ||
313 | spin_unlock_irqrestore(&phy->lock, flags); | ||
314 | break; | ||
315 | } | ||
316 | |||
317 | spin_unlock_irqrestore(&phy->lock, flags); | ||
318 | } | ||
319 | |||
320 | /* No physical channel available, cope with it */ | ||
321 | if (i == s3cdma->pdata->num_phy_channels) { | ||
322 | dev_warn(&s3cdma->pdev->dev, "no phy channel available\n"); | ||
323 | return NULL; | ||
324 | } | ||
325 | |||
326 | /* start the phy clock */ | ||
327 | if (s3cdma->sdata->has_clocks) { | ||
328 | ret = clk_enable(phy->clk); | ||
329 | if (ret) { | ||
330 | dev_err(&s3cdma->pdev->dev, "could not enable clock for channel %d, err %d\n", | ||
331 | phy->id, ret); | ||
332 | phy->serving = NULL; | ||
333 | return NULL; | ||
334 | } | ||
335 | } | ||
336 | |||
337 | return phy; | ||
338 | } | ||
339 | |||
340 | /* | ||
341 | * Mark the physical channel as free. | ||
342 | * | ||
343 | * This drops the link between the physical and virtual channel. | ||
344 | */ | ||
345 | static inline void s3c24xx_dma_put_phy(struct s3c24xx_dma_phy *phy) | ||
346 | { | ||
347 | struct s3c24xx_dma_engine *s3cdma = phy->host; | ||
348 | |||
349 | if (s3cdma->sdata->has_clocks) | ||
350 | clk_disable(phy->clk); | ||
351 | |||
352 | phy->serving = NULL; | ||
353 | } | ||
354 | |||
355 | /* | ||
356 | * Stops the channel by writing the stop bit. | ||
357 | * This should not be used for an on-going transfer, but as a method of | ||
358 | * shutting down a channel (eg, when it's no longer used) or terminating a | ||
359 | * transfer. | ||
360 | */ | ||
361 | static void s3c24xx_dma_terminate_phy(struct s3c24xx_dma_phy *phy) | ||
362 | { | ||
363 | writel(S3C24XX_DMASKTRIG_STOP, phy->base + S3C24XX_DMASKTRIG); | ||
364 | } | ||
365 | |||
366 | /* | ||
367 | * Virtual channel handling | ||
368 | */ | ||
369 | |||
370 | static inline | ||
371 | struct s3c24xx_dma_chan *to_s3c24xx_dma_chan(struct dma_chan *chan) | ||
372 | { | ||
373 | return container_of(chan, struct s3c24xx_dma_chan, vc.chan); | ||
374 | } | ||
375 | |||
376 | static u32 s3c24xx_dma_getbytes_chan(struct s3c24xx_dma_chan *s3cchan) | ||
377 | { | ||
378 | struct s3c24xx_dma_phy *phy = s3cchan->phy; | ||
379 | struct s3c24xx_txd *txd = s3cchan->at; | ||
380 | u32 tc = readl(phy->base + S3C24XX_DSTAT) & S3C24XX_DSTAT_CURRTC_MASK; | ||
381 | |||
382 | return tc * txd->width; | ||
383 | } | ||
384 | |||
385 | static int s3c24xx_dma_set_runtime_config(struct s3c24xx_dma_chan *s3cchan, | ||
386 | struct dma_slave_config *config) | ||
387 | { | ||
388 | if (!s3cchan->slave) | ||
389 | return -EINVAL; | ||
390 | |||
391 | /* Reject definitely invalid configurations */ | ||
392 | if (config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES || | ||
393 | config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) | ||
394 | return -EINVAL; | ||
395 | |||
396 | s3cchan->cfg = *config; | ||
397 | |||
398 | return 0; | ||
399 | } | ||
400 | |||
401 | /* | ||
402 | * Transfer handling | ||
403 | */ | ||
404 | |||
405 | static inline | ||
406 | struct s3c24xx_txd *to_s3c24xx_txd(struct dma_async_tx_descriptor *tx) | ||
407 | { | ||
408 | return container_of(tx, struct s3c24xx_txd, vd.tx); | ||
409 | } | ||
410 | |||
411 | static struct s3c24xx_txd *s3c24xx_dma_get_txd(void) | ||
412 | { | ||
413 | struct s3c24xx_txd *txd = kzalloc(sizeof(*txd), GFP_NOWAIT); | ||
414 | |||
415 | if (txd) { | ||
416 | INIT_LIST_HEAD(&txd->dsg_list); | ||
417 | txd->dcon = S3C24XX_DCON_INT | S3C24XX_DCON_NORELOAD; | ||
418 | } | ||
419 | |||
420 | return txd; | ||
421 | } | ||
422 | |||
423 | static void s3c24xx_dma_free_txd(struct s3c24xx_txd *txd) | ||
424 | { | ||
425 | struct s3c24xx_sg *dsg, *_dsg; | ||
426 | |||
427 | list_for_each_entry_safe(dsg, _dsg, &txd->dsg_list, node) { | ||
428 | list_del(&dsg->node); | ||
429 | kfree(dsg); | ||
430 | } | ||
431 | |||
432 | kfree(txd); | ||
433 | } | ||
434 | |||
435 | static void s3c24xx_dma_start_next_sg(struct s3c24xx_dma_chan *s3cchan, | ||
436 | struct s3c24xx_txd *txd) | ||
437 | { | ||
438 | struct s3c24xx_dma_engine *s3cdma = s3cchan->host; | ||
439 | struct s3c24xx_dma_phy *phy = s3cchan->phy; | ||
440 | const struct s3c24xx_dma_platdata *pdata = s3cdma->pdata; | ||
441 | struct s3c24xx_sg *dsg = list_entry(txd->at, struct s3c24xx_sg, node); | ||
442 | u32 dcon = txd->dcon; | ||
443 | u32 val; | ||
444 | |||
445 | /* transfer-size and -count from len and width */ | ||
446 | switch (txd->width) { | ||
447 | case 1: | ||
448 | dcon |= S3C24XX_DCON_DSZ_BYTE | dsg->len; | ||
449 | break; | ||
450 | case 2: | ||
451 | dcon |= S3C24XX_DCON_DSZ_HALFWORD | (dsg->len / 2); | ||
452 | break; | ||
453 | case 4: | ||
454 | dcon |= S3C24XX_DCON_DSZ_WORD | (dsg->len / 4); | ||
455 | break; | ||
456 | } | ||
457 | |||
458 | if (s3cchan->slave) { | ||
459 | struct s3c24xx_dma_channel *cdata = | ||
460 | &pdata->channels[s3cchan->id]; | ||
461 | |||
462 | if (s3cdma->sdata->has_reqsel) { | ||
463 | writel_relaxed((cdata->chansel << 1) | | ||
464 | S3C24XX_DMAREQSEL_HW, | ||
465 | phy->base + S3C24XX_DMAREQSEL); | ||
466 | } else { | ||
467 | int csel = cdata->chansel >> (phy->id * | ||
468 | S3C24XX_CHANSEL_WIDTH); | ||
469 | |||
470 | csel &= S3C24XX_CHANSEL_REQ_MASK; | ||
471 | dcon |= csel << S3C24XX_DCON_HWSRC_SHIFT; | ||
472 | dcon |= S3C24XX_DCON_HWTRIG; | ||
473 | } | ||
474 | } else { | ||
475 | if (s3cdma->sdata->has_reqsel) | ||
476 | writel_relaxed(0, phy->base + S3C24XX_DMAREQSEL); | ||
477 | } | ||
478 | |||
479 | writel_relaxed(dsg->src_addr, phy->base + S3C24XX_DISRC); | ||
480 | writel_relaxed(txd->disrcc, phy->base + S3C24XX_DISRCC); | ||
481 | writel_relaxed(dsg->dst_addr, phy->base + S3C24XX_DIDST); | ||
482 | writel_relaxed(txd->didstc, phy->base + S3C24XX_DIDSTC); | ||
483 | writel_relaxed(dcon, phy->base + S3C24XX_DCON); | ||
484 | |||
485 | val = readl_relaxed(phy->base + S3C24XX_DMASKTRIG); | ||
486 | val &= ~S3C24XX_DMASKTRIG_STOP; | ||
487 | val |= S3C24XX_DMASKTRIG_ON; | ||
488 | |||
489 | /* trigger the dma operation for memcpy transfers */ | ||
490 | if (!s3cchan->slave) | ||
491 | val |= S3C24XX_DMASKTRIG_SWTRIG; | ||
492 | |||
493 | writel(val, phy->base + S3C24XX_DMASKTRIG); | ||
494 | } | ||
495 | |||
496 | /* | ||
497 | * Set the initial DMA register values and start first sg. | ||
498 | */ | ||
499 | static void s3c24xx_dma_start_next_txd(struct s3c24xx_dma_chan *s3cchan) | ||
500 | { | ||
501 | struct s3c24xx_dma_phy *phy = s3cchan->phy; | ||
502 | struct virt_dma_desc *vd = vchan_next_desc(&s3cchan->vc); | ||
503 | struct s3c24xx_txd *txd = to_s3c24xx_txd(&vd->tx); | ||
504 | |||
505 | list_del(&txd->vd.node); | ||
506 | |||
507 | s3cchan->at = txd; | ||
508 | |||
509 | /* Wait for channel inactive */ | ||
510 | while (s3c24xx_dma_phy_busy(phy)) | ||
511 | cpu_relax(); | ||
512 | |||
513 | /* point to the first element of the sg list */ | ||
514 | txd->at = txd->dsg_list.next; | ||
515 | s3c24xx_dma_start_next_sg(s3cchan, txd); | ||
516 | } | ||
517 | |||
518 | static void s3c24xx_dma_free_txd_list(struct s3c24xx_dma_engine *s3cdma, | ||
519 | struct s3c24xx_dma_chan *s3cchan) | ||
520 | { | ||
521 | LIST_HEAD(head); | ||
522 | |||
523 | vchan_get_all_descriptors(&s3cchan->vc, &head); | ||
524 | vchan_dma_desc_free_list(&s3cchan->vc, &head); | ||
525 | } | ||
526 | |||
527 | /* | ||
528 | * Try to allocate a physical channel. When successful, assign it to | ||
529 | * this virtual channel, and initiate the next descriptor. The | ||
530 | * virtual channel lock must be held at this point. | ||
531 | */ | ||
532 | static void s3c24xx_dma_phy_alloc_and_start(struct s3c24xx_dma_chan *s3cchan) | ||
533 | { | ||
534 | struct s3c24xx_dma_engine *s3cdma = s3cchan->host; | ||
535 | struct s3c24xx_dma_phy *phy; | ||
536 | |||
537 | phy = s3c24xx_dma_get_phy(s3cchan); | ||
538 | if (!phy) { | ||
539 | dev_dbg(&s3cdma->pdev->dev, "no physical channel available for xfer on %s\n", | ||
540 | s3cchan->name); | ||
541 | s3cchan->state = S3C24XX_DMA_CHAN_WAITING; | ||
542 | return; | ||
543 | } | ||
544 | |||
545 | dev_dbg(&s3cdma->pdev->dev, "allocated physical channel %d for xfer on %s\n", | ||
546 | phy->id, s3cchan->name); | ||
547 | |||
548 | s3cchan->phy = phy; | ||
549 | s3cchan->state = S3C24XX_DMA_CHAN_RUNNING; | ||
550 | |||
551 | s3c24xx_dma_start_next_txd(s3cchan); | ||
552 | } | ||
553 | |||
554 | static void s3c24xx_dma_phy_reassign_start(struct s3c24xx_dma_phy *phy, | ||
555 | struct s3c24xx_dma_chan *s3cchan) | ||
556 | { | ||
557 | struct s3c24xx_dma_engine *s3cdma = s3cchan->host; | ||
558 | |||
559 | dev_dbg(&s3cdma->pdev->dev, "reassigned physical channel %d for xfer on %s\n", | ||
560 | phy->id, s3cchan->name); | ||
561 | |||
562 | /* | ||
563 | * We do this without taking the lock; we're really only concerned | ||
564 | * about whether this pointer is NULL or not, and we're guaranteed | ||
565 | * that this will only be called when it _already_ is non-NULL. | ||
566 | */ | ||
567 | phy->serving = s3cchan; | ||
568 | s3cchan->phy = phy; | ||
569 | s3cchan->state = S3C24XX_DMA_CHAN_RUNNING; | ||
570 | s3c24xx_dma_start_next_txd(s3cchan); | ||
571 | } | ||
572 | |||
573 | /* | ||
574 | * Free a physical DMA channel, potentially reallocating it to another | ||
575 | * virtual channel if we have any pending. | ||
576 | */ | ||
577 | static void s3c24xx_dma_phy_free(struct s3c24xx_dma_chan *s3cchan) | ||
578 | { | ||
579 | struct s3c24xx_dma_engine *s3cdma = s3cchan->host; | ||
580 | struct s3c24xx_dma_chan *p, *next; | ||
581 | |||
582 | retry: | ||
583 | next = NULL; | ||
584 | |||
585 | /* Find a waiting virtual channel for the next transfer. */ | ||
586 | list_for_each_entry(p, &s3cdma->memcpy.channels, vc.chan.device_node) | ||
587 | if (p->state == S3C24XX_DMA_CHAN_WAITING) { | ||
588 | next = p; | ||
589 | break; | ||
590 | } | ||
591 | |||
592 | if (!next) { | ||
593 | list_for_each_entry(p, &s3cdma->slave.channels, | ||
594 | vc.chan.device_node) | ||
595 | if (p->state == S3C24XX_DMA_CHAN_WAITING && | ||
596 | s3c24xx_dma_phy_valid(p, s3cchan->phy)) { | ||
597 | next = p; | ||
598 | break; | ||
599 | } | ||
600 | } | ||
601 | |||
602 | /* Ensure that the physical channel is stopped */ | ||
603 | s3c24xx_dma_terminate_phy(s3cchan->phy); | ||
604 | |||
605 | if (next) { | ||
606 | bool success; | ||
607 | |||
608 | /* | ||
609 | * Eww. We know this isn't going to deadlock | ||
610 | * but lockdep probably doesn't. | ||
611 | */ | ||
612 | spin_lock(&next->vc.lock); | ||
613 | /* Re-check the state now that we have the lock */ | ||
614 | success = next->state == S3C24XX_DMA_CHAN_WAITING; | ||
615 | if (success) | ||
616 | s3c24xx_dma_phy_reassign_start(s3cchan->phy, next); | ||
617 | spin_unlock(&next->vc.lock); | ||
618 | |||
619 | /* If the state changed, try to find another channel */ | ||
620 | if (!success) | ||
621 | goto retry; | ||
622 | } else { | ||
623 | /* No more jobs, so free up the physical channel */ | ||
624 | s3c24xx_dma_put_phy(s3cchan->phy); | ||
625 | } | ||
626 | |||
627 | s3cchan->phy = NULL; | ||
628 | s3cchan->state = S3C24XX_DMA_CHAN_IDLE; | ||
629 | } | ||
630 | |||
631 | static void s3c24xx_dma_unmap_buffers(struct s3c24xx_txd *txd) | ||
632 | { | ||
633 | struct device *dev = txd->vd.tx.chan->device->dev; | ||
634 | struct s3c24xx_sg *dsg; | ||
635 | |||
636 | if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) { | ||
637 | if (txd->vd.tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE) | ||
638 | list_for_each_entry(dsg, &txd->dsg_list, node) | ||
639 | dma_unmap_single(dev, dsg->src_addr, dsg->len, | ||
640 | DMA_TO_DEVICE); | ||
641 | else { | ||
642 | list_for_each_entry(dsg, &txd->dsg_list, node) | ||
643 | dma_unmap_page(dev, dsg->src_addr, dsg->len, | ||
644 | DMA_TO_DEVICE); | ||
645 | } | ||
646 | } | ||
647 | |||
648 | if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) { | ||
649 | if (txd->vd.tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE) | ||
650 | list_for_each_entry(dsg, &txd->dsg_list, node) | ||
651 | dma_unmap_single(dev, dsg->dst_addr, dsg->len, | ||
652 | DMA_FROM_DEVICE); | ||
653 | else | ||
654 | list_for_each_entry(dsg, &txd->dsg_list, node) | ||
655 | dma_unmap_page(dev, dsg->dst_addr, dsg->len, | ||
656 | DMA_FROM_DEVICE); | ||
657 | } | ||
658 | } | ||
659 | |||
660 | static void s3c24xx_dma_desc_free(struct virt_dma_desc *vd) | ||
661 | { | ||
662 | struct s3c24xx_txd *txd = to_s3c24xx_txd(&vd->tx); | ||
663 | struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(vd->tx.chan); | ||
664 | |||
665 | if (!s3cchan->slave) | ||
666 | s3c24xx_dma_unmap_buffers(txd); | ||
667 | |||
668 | s3c24xx_dma_free_txd(txd); | ||
669 | } | ||
670 | |||
671 | static irqreturn_t s3c24xx_dma_irq(int irq, void *data) | ||
672 | { | ||
673 | struct s3c24xx_dma_phy *phy = data; | ||
674 | struct s3c24xx_dma_chan *s3cchan = phy->serving; | ||
675 | struct s3c24xx_txd *txd; | ||
676 | |||
677 | dev_dbg(&phy->host->pdev->dev, "interrupt on channel %d\n", phy->id); | ||
678 | |||
679 | /* | ||
680 | * Interrupts happen to notify the completion of a transfer and the | ||
681 | * channel should have moved into its stop state already on its own. | ||
682 | * Therefore interrupts on channels not bound to a virtual channel | ||
683 | * should never happen. Nevertheless send a terminate command to the | ||
684 | * channel if the unlikely case happens. | ||
685 | */ | ||
686 | if (unlikely(!s3cchan)) { | ||
687 | dev_err(&phy->host->pdev->dev, "interrupt on unused channel %d\n", | ||
688 | phy->id); | ||
689 | |||
690 | s3c24xx_dma_terminate_phy(phy); | ||
691 | |||
692 | return IRQ_HANDLED; | ||
693 | } | ||
694 | |||
695 | spin_lock(&s3cchan->vc.lock); | ||
696 | txd = s3cchan->at; | ||
697 | if (txd) { | ||
698 | /* when more sg's are in this txd, start the next one */ | ||
699 | if (!list_is_last(txd->at, &txd->dsg_list)) { | ||
700 | txd->at = txd->at->next; | ||
701 | s3c24xx_dma_start_next_sg(s3cchan, txd); | ||
702 | } else { | ||
703 | s3cchan->at = NULL; | ||
704 | vchan_cookie_complete(&txd->vd); | ||
705 | |||
706 | /* | ||
707 | * And start the next descriptor (if any), | ||
708 | * otherwise free this channel. | ||
709 | */ | ||
710 | if (vchan_next_desc(&s3cchan->vc)) | ||
711 | s3c24xx_dma_start_next_txd(s3cchan); | ||
712 | else | ||
713 | s3c24xx_dma_phy_free(s3cchan); | ||
714 | } | ||
715 | } | ||
716 | spin_unlock(&s3cchan->vc.lock); | ||
717 | |||
718 | return IRQ_HANDLED; | ||
719 | } | ||
720 | |||
721 | /* | ||
722 | * The DMA ENGINE API | ||
723 | */ | ||
724 | |||
725 | static int s3c24xx_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | ||
726 | unsigned long arg) | ||
727 | { | ||
728 | struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan); | ||
729 | struct s3c24xx_dma_engine *s3cdma = s3cchan->host; | ||
730 | unsigned long flags; | ||
731 | int ret = 0; | ||
732 | |||
733 | spin_lock_irqsave(&s3cchan->vc.lock, flags); | ||
734 | |||
735 | switch (cmd) { | ||
736 | case DMA_SLAVE_CONFIG: | ||
737 | ret = s3c24xx_dma_set_runtime_config(s3cchan, | ||
738 | (struct dma_slave_config *)arg); | ||
739 | break; | ||
740 | case DMA_TERMINATE_ALL: | ||
741 | if (!s3cchan->phy && !s3cchan->at) { | ||
742 | dev_err(&s3cdma->pdev->dev, "trying to terminate already stopped channel %d\n", | ||
743 | s3cchan->id); | ||
744 | ret = -EINVAL; | ||
745 | break; | ||
746 | } | ||
747 | |||
748 | s3cchan->state = S3C24XX_DMA_CHAN_IDLE; | ||
749 | |||
750 | /* Mark physical channel as free */ | ||
751 | if (s3cchan->phy) | ||
752 | s3c24xx_dma_phy_free(s3cchan); | ||
753 | |||
754 | /* Dequeue current job */ | ||
755 | if (s3cchan->at) { | ||
756 | s3c24xx_dma_desc_free(&s3cchan->at->vd); | ||
757 | s3cchan->at = NULL; | ||
758 | } | ||
759 | |||
760 | /* Dequeue jobs not yet fired as well */ | ||
761 | s3c24xx_dma_free_txd_list(s3cdma, s3cchan); | ||
762 | break; | ||
763 | default: | ||
764 | /* Unknown command */ | ||
765 | ret = -ENXIO; | ||
766 | break; | ||
767 | } | ||
768 | |||
769 | spin_unlock_irqrestore(&s3cchan->vc.lock, flags); | ||
770 | |||
771 | return ret; | ||
772 | } | ||
773 | |||
774 | static int s3c24xx_dma_alloc_chan_resources(struct dma_chan *chan) | ||
775 | { | ||
776 | return 0; | ||
777 | } | ||
778 | |||
779 | static void s3c24xx_dma_free_chan_resources(struct dma_chan *chan) | ||
780 | { | ||
781 | /* Ensure all queued descriptors are freed */ | ||
782 | vchan_free_chan_resources(to_virt_chan(chan)); | ||
783 | } | ||
784 | |||
785 | static enum dma_status s3c24xx_dma_tx_status(struct dma_chan *chan, | ||
786 | dma_cookie_t cookie, struct dma_tx_state *txstate) | ||
787 | { | ||
788 | struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan); | ||
789 | struct s3c24xx_txd *txd; | ||
790 | struct s3c24xx_sg *dsg; | ||
791 | struct virt_dma_desc *vd; | ||
792 | unsigned long flags; | ||
793 | enum dma_status ret; | ||
794 | size_t bytes = 0; | ||
795 | |||
796 | spin_lock_irqsave(&s3cchan->vc.lock, flags); | ||
797 | ret = dma_cookie_status(chan, cookie, txstate); | ||
798 | if (ret == DMA_SUCCESS) { | ||
799 | spin_unlock_irqrestore(&s3cchan->vc.lock, flags); | ||
800 | return ret; | ||
801 | } | ||
802 | |||
803 | /* | ||
804 | * There's no point calculating the residue if there's | ||
805 | * no txstate to store the value. | ||
806 | */ | ||
807 | if (!txstate) { | ||
808 | spin_unlock_irqrestore(&s3cchan->vc.lock, flags); | ||
809 | return ret; | ||
810 | } | ||
811 | |||
812 | vd = vchan_find_desc(&s3cchan->vc, cookie); | ||
813 | if (vd) { | ||
814 | /* On the issued list, so hasn't been processed yet */ | ||
815 | txd = to_s3c24xx_txd(&vd->tx); | ||
816 | |||
817 | list_for_each_entry(dsg, &txd->dsg_list, node) | ||
818 | bytes += dsg->len; | ||
819 | } else { | ||
820 | /* | ||
821 | * Currently running, so sum over the pending sg's and | ||
822 | * the currently active one. | ||
823 | */ | ||
824 | txd = s3cchan->at; | ||
825 | |||
826 | dsg = list_entry(txd->at, struct s3c24xx_sg, node); | ||
827 | list_for_each_entry_from(dsg, &txd->dsg_list, node) | ||
828 | bytes += dsg->len; | ||
829 | |||
830 | bytes += s3c24xx_dma_getbytes_chan(s3cchan); | ||
831 | } | ||
832 | spin_unlock_irqrestore(&s3cchan->vc.lock, flags); | ||
833 | |||
834 | /* | ||
835 | * This cookie not complete yet | ||
836 | * Get number of bytes left in the active transactions and queue | ||
837 | */ | ||
838 | dma_set_residue(txstate, bytes); | ||
839 | |||
840 | /* Whether waiting or running, we're in progress */ | ||
841 | return ret; | ||
842 | } | ||
843 | |||
844 | /* | ||
845 | * Initialize a descriptor to be used by memcpy submit | ||
846 | */ | ||
847 | static struct dma_async_tx_descriptor *s3c24xx_dma_prep_memcpy( | ||
848 | struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | ||
849 | size_t len, unsigned long flags) | ||
850 | { | ||
851 | struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan); | ||
852 | struct s3c24xx_dma_engine *s3cdma = s3cchan->host; | ||
853 | struct s3c24xx_txd *txd; | ||
854 | struct s3c24xx_sg *dsg; | ||
855 | int src_mod, dest_mod; | ||
856 | |||
857 | dev_dbg(&s3cdma->pdev->dev, "prepare memcpy of %d bytes from %s\n", | ||
858 | len, s3cchan->name); | ||
859 | |||
860 | if ((len & S3C24XX_DCON_TC_MASK) != len) { | ||
861 | dev_err(&s3cdma->pdev->dev, "memcpy size %d to large\n", len); | ||
862 | return NULL; | ||
863 | } | ||
864 | |||
865 | txd = s3c24xx_dma_get_txd(); | ||
866 | if (!txd) | ||
867 | return NULL; | ||
868 | |||
869 | dsg = kzalloc(sizeof(*dsg), GFP_NOWAIT); | ||
870 | if (!dsg) { | ||
871 | s3c24xx_dma_free_txd(txd); | ||
872 | return NULL; | ||
873 | } | ||
874 | list_add_tail(&dsg->node, &txd->dsg_list); | ||
875 | |||
876 | dsg->src_addr = src; | ||
877 | dsg->dst_addr = dest; | ||
878 | dsg->len = len; | ||
879 | |||
880 | /* | ||
881 | * Determine a suitable transfer width. | ||
882 | * The DMA controller cannot fetch/store information which is not | ||
883 | * naturally aligned on the bus, i.e., a 4 byte fetch must start at | ||
884 | * an address divisible by 4 - more generally addr % width must be 0. | ||
885 | */ | ||
886 | src_mod = src % 4; | ||
887 | dest_mod = dest % 4; | ||
888 | switch (len % 4) { | ||
889 | case 0: | ||
890 | txd->width = (src_mod == 0 && dest_mod == 0) ? 4 : 1; | ||
891 | break; | ||
892 | case 2: | ||
893 | txd->width = ((src_mod == 2 || src_mod == 0) && | ||
894 | (dest_mod == 2 || dest_mod == 0)) ? 2 : 1; | ||
895 | break; | ||
896 | default: | ||
897 | txd->width = 1; | ||
898 | break; | ||
899 | } | ||
900 | |||
901 | txd->disrcc = S3C24XX_DISRCC_LOC_AHB | S3C24XX_DISRCC_INC_INCREMENT; | ||
902 | txd->didstc = S3C24XX_DIDSTC_LOC_AHB | S3C24XX_DIDSTC_INC_INCREMENT; | ||
903 | txd->dcon |= S3C24XX_DCON_DEMAND | S3C24XX_DCON_SYNC_HCLK | | ||
904 | S3C24XX_DCON_SERV_WHOLE; | ||
905 | |||
906 | return vchan_tx_prep(&s3cchan->vc, &txd->vd, flags); | ||
907 | } | ||
908 | |||
909 | static struct dma_async_tx_descriptor *s3c24xx_dma_prep_slave_sg( | ||
910 | struct dma_chan *chan, struct scatterlist *sgl, | ||
911 | unsigned int sg_len, enum dma_transfer_direction direction, | ||
912 | unsigned long flags, void *context) | ||
913 | { | ||
914 | struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan); | ||
915 | struct s3c24xx_dma_engine *s3cdma = s3cchan->host; | ||
916 | const struct s3c24xx_dma_platdata *pdata = s3cdma->pdata; | ||
917 | struct s3c24xx_dma_channel *cdata = &pdata->channels[s3cchan->id]; | ||
918 | struct s3c24xx_txd *txd; | ||
919 | struct s3c24xx_sg *dsg; | ||
920 | struct scatterlist *sg; | ||
921 | dma_addr_t slave_addr; | ||
922 | u32 hwcfg = 0; | ||
923 | int tmp; | ||
924 | |||
925 | dev_dbg(&s3cdma->pdev->dev, "prepare transaction of %d bytes from %s\n", | ||
926 | sg_dma_len(sgl), s3cchan->name); | ||
927 | |||
928 | txd = s3c24xx_dma_get_txd(); | ||
929 | if (!txd) | ||
930 | return NULL; | ||
931 | |||
932 | if (cdata->handshake) | ||
933 | txd->dcon |= S3C24XX_DCON_HANDSHAKE; | ||
934 | |||
935 | switch (cdata->bus) { | ||
936 | case S3C24XX_DMA_APB: | ||
937 | txd->dcon |= S3C24XX_DCON_SYNC_PCLK; | ||
938 | hwcfg |= S3C24XX_DISRCC_LOC_APB; | ||
939 | break; | ||
940 | case S3C24XX_DMA_AHB: | ||
941 | txd->dcon |= S3C24XX_DCON_SYNC_HCLK; | ||
942 | hwcfg |= S3C24XX_DISRCC_LOC_AHB; | ||
943 | break; | ||
944 | } | ||
945 | |||
946 | /* | ||
947 | * Always assume our peripheral desintation is a fixed | ||
948 | * address in memory. | ||
949 | */ | ||
950 | hwcfg |= S3C24XX_DISRCC_INC_FIXED; | ||
951 | |||
952 | /* | ||
953 | * Individual dma operations are requested by the slave, | ||
954 | * so serve only single atomic operations (S3C24XX_DCON_SERV_SINGLE). | ||
955 | */ | ||
956 | txd->dcon |= S3C24XX_DCON_SERV_SINGLE; | ||
957 | |||
958 | if (direction == DMA_MEM_TO_DEV) { | ||
959 | txd->disrcc = S3C24XX_DISRCC_LOC_AHB | | ||
960 | S3C24XX_DISRCC_INC_INCREMENT; | ||
961 | txd->didstc = hwcfg; | ||
962 | slave_addr = s3cchan->cfg.dst_addr; | ||
963 | txd->width = s3cchan->cfg.dst_addr_width; | ||
964 | } else if (direction == DMA_DEV_TO_MEM) { | ||
965 | txd->disrcc = hwcfg; | ||
966 | txd->didstc = S3C24XX_DIDSTC_LOC_AHB | | ||
967 | S3C24XX_DIDSTC_INC_INCREMENT; | ||
968 | slave_addr = s3cchan->cfg.src_addr; | ||
969 | txd->width = s3cchan->cfg.src_addr_width; | ||
970 | } else { | ||
971 | s3c24xx_dma_free_txd(txd); | ||
972 | dev_err(&s3cdma->pdev->dev, | ||
973 | "direction %d unsupported\n", direction); | ||
974 | return NULL; | ||
975 | } | ||
976 | |||
977 | for_each_sg(sgl, sg, sg_len, tmp) { | ||
978 | dsg = kzalloc(sizeof(*dsg), GFP_NOWAIT); | ||
979 | if (!dsg) { | ||
980 | s3c24xx_dma_free_txd(txd); | ||
981 | return NULL; | ||
982 | } | ||
983 | list_add_tail(&dsg->node, &txd->dsg_list); | ||
984 | |||
985 | dsg->len = sg_dma_len(sg); | ||
986 | if (direction == DMA_MEM_TO_DEV) { | ||
987 | dsg->src_addr = sg_dma_address(sg); | ||
988 | dsg->dst_addr = slave_addr; | ||
989 | } else { /* DMA_DEV_TO_MEM */ | ||
990 | dsg->src_addr = slave_addr; | ||
991 | dsg->dst_addr = sg_dma_address(sg); | ||
992 | } | ||
993 | break; | ||
994 | } | ||
995 | |||
996 | return vchan_tx_prep(&s3cchan->vc, &txd->vd, flags); | ||
997 | } | ||
998 | |||
999 | /* | ||
1000 | * Slave transactions callback to the slave device to allow | ||
1001 | * synchronization of slave DMA signals with the DMAC enable | ||
1002 | */ | ||
1003 | static void s3c24xx_dma_issue_pending(struct dma_chan *chan) | ||
1004 | { | ||
1005 | struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan); | ||
1006 | unsigned long flags; | ||
1007 | |||
1008 | spin_lock_irqsave(&s3cchan->vc.lock, flags); | ||
1009 | if (vchan_issue_pending(&s3cchan->vc)) { | ||
1010 | if (!s3cchan->phy && s3cchan->state != S3C24XX_DMA_CHAN_WAITING) | ||
1011 | s3c24xx_dma_phy_alloc_and_start(s3cchan); | ||
1012 | } | ||
1013 | spin_unlock_irqrestore(&s3cchan->vc.lock, flags); | ||
1014 | } | ||
1015 | |||
1016 | /* | ||
1017 | * Bringup and teardown | ||
1018 | */ | ||
1019 | |||
1020 | /* | ||
1021 | * Initialise the DMAC memcpy/slave channels. | ||
1022 | * Make a local wrapper to hold required data | ||
1023 | */ | ||
1024 | static int s3c24xx_dma_init_virtual_channels(struct s3c24xx_dma_engine *s3cdma, | ||
1025 | struct dma_device *dmadev, unsigned int channels, bool slave) | ||
1026 | { | ||
1027 | struct s3c24xx_dma_chan *chan; | ||
1028 | int i; | ||
1029 | |||
1030 | INIT_LIST_HEAD(&dmadev->channels); | ||
1031 | |||
1032 | /* | ||
1033 | * Register as many many memcpy as we have physical channels, | ||
1034 | * we won't always be able to use all but the code will have | ||
1035 | * to cope with that situation. | ||
1036 | */ | ||
1037 | for (i = 0; i < channels; i++) { | ||
1038 | chan = devm_kzalloc(dmadev->dev, sizeof(*chan), GFP_KERNEL); | ||
1039 | if (!chan) { | ||
1040 | dev_err(dmadev->dev, | ||
1041 | "%s no memory for channel\n", __func__); | ||
1042 | return -ENOMEM; | ||
1043 | } | ||
1044 | |||
1045 | chan->id = i; | ||
1046 | chan->host = s3cdma; | ||
1047 | chan->state = S3C24XX_DMA_CHAN_IDLE; | ||
1048 | |||
1049 | if (slave) { | ||
1050 | chan->slave = true; | ||
1051 | chan->name = kasprintf(GFP_KERNEL, "slave%d", i); | ||
1052 | if (!chan->name) | ||
1053 | return -ENOMEM; | ||
1054 | } else { | ||
1055 | chan->name = kasprintf(GFP_KERNEL, "memcpy%d", i); | ||
1056 | if (!chan->name) | ||
1057 | return -ENOMEM; | ||
1058 | } | ||
1059 | dev_dbg(dmadev->dev, | ||
1060 | "initialize virtual channel \"%s\"\n", | ||
1061 | chan->name); | ||
1062 | |||
1063 | chan->vc.desc_free = s3c24xx_dma_desc_free; | ||
1064 | vchan_init(&chan->vc, dmadev); | ||
1065 | } | ||
1066 | dev_info(dmadev->dev, "initialized %d virtual %s channels\n", | ||
1067 | i, slave ? "slave" : "memcpy"); | ||
1068 | return i; | ||
1069 | } | ||
1070 | |||
1071 | static void s3c24xx_dma_free_virtual_channels(struct dma_device *dmadev) | ||
1072 | { | ||
1073 | struct s3c24xx_dma_chan *chan = NULL; | ||
1074 | struct s3c24xx_dma_chan *next; | ||
1075 | |||
1076 | list_for_each_entry_safe(chan, | ||
1077 | next, &dmadev->channels, vc.chan.device_node) | ||
1078 | list_del(&chan->vc.chan.device_node); | ||
1079 | } | ||
1080 | |||
1081 | /* s3c2410, s3c2440 and s3c2442 have a 0x40 stride without separate clocks */ | ||
1082 | static struct soc_data soc_s3c2410 = { | ||
1083 | .stride = 0x40, | ||
1084 | .has_reqsel = false, | ||
1085 | .has_clocks = false, | ||
1086 | }; | ||
1087 | |||
1088 | /* s3c2412 and s3c2413 have a 0x40 stride and dmareqsel mechanism */ | ||
1089 | static struct soc_data soc_s3c2412 = { | ||
1090 | .stride = 0x40, | ||
1091 | .has_reqsel = true, | ||
1092 | .has_clocks = true, | ||
1093 | }; | ||
1094 | |||
1095 | /* s3c2443 and following have a 0x100 stride and dmareqsel mechanism */ | ||
1096 | static struct soc_data soc_s3c2443 = { | ||
1097 | .stride = 0x100, | ||
1098 | .has_reqsel = true, | ||
1099 | .has_clocks = true, | ||
1100 | }; | ||
1101 | |||
1102 | static struct platform_device_id s3c24xx_dma_driver_ids[] = { | ||
1103 | { | ||
1104 | .name = "s3c2410-dma", | ||
1105 | .driver_data = (kernel_ulong_t)&soc_s3c2410, | ||
1106 | }, { | ||
1107 | .name = "s3c2412-dma", | ||
1108 | .driver_data = (kernel_ulong_t)&soc_s3c2412, | ||
1109 | }, { | ||
1110 | .name = "s3c2443-dma", | ||
1111 | .driver_data = (kernel_ulong_t)&soc_s3c2443, | ||
1112 | }, | ||
1113 | { }, | ||
1114 | }; | ||
1115 | |||
1116 | static struct soc_data *s3c24xx_dma_get_soc_data(struct platform_device *pdev) | ||
1117 | { | ||
1118 | return (struct soc_data *) | ||
1119 | platform_get_device_id(pdev)->driver_data; | ||
1120 | } | ||
1121 | |||
1122 | static int s3c24xx_dma_probe(struct platform_device *pdev) | ||
1123 | { | ||
1124 | const struct s3c24xx_dma_platdata *pdata = dev_get_platdata(&pdev->dev); | ||
1125 | struct s3c24xx_dma_engine *s3cdma; | ||
1126 | struct soc_data *sdata; | ||
1127 | struct resource *res; | ||
1128 | int ret; | ||
1129 | int i; | ||
1130 | |||
1131 | if (!pdata) { | ||
1132 | dev_err(&pdev->dev, "platform data missing\n"); | ||
1133 | return -ENODEV; | ||
1134 | } | ||
1135 | |||
1136 | /* Basic sanity check */ | ||
1137 | if (pdata->num_phy_channels > MAX_DMA_CHANNELS) { | ||
1138 | dev_err(&pdev->dev, "to many dma channels %d, max %d\n", | ||
1139 | pdata->num_phy_channels, MAX_DMA_CHANNELS); | ||
1140 | return -EINVAL; | ||
1141 | } | ||
1142 | |||
1143 | sdata = s3c24xx_dma_get_soc_data(pdev); | ||
1144 | if (!sdata) | ||
1145 | return -EINVAL; | ||
1146 | |||
1147 | s3cdma = devm_kzalloc(&pdev->dev, sizeof(*s3cdma), GFP_KERNEL); | ||
1148 | if (!s3cdma) | ||
1149 | return -ENOMEM; | ||
1150 | |||
1151 | s3cdma->pdev = pdev; | ||
1152 | s3cdma->pdata = pdata; | ||
1153 | s3cdma->sdata = sdata; | ||
1154 | |||
1155 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
1156 | s3cdma->base = devm_ioremap_resource(&pdev->dev, res); | ||
1157 | if (IS_ERR(s3cdma->base)) | ||
1158 | return PTR_ERR(s3cdma->base); | ||
1159 | |||
1160 | s3cdma->phy_chans = devm_kzalloc(&pdev->dev, | ||
1161 | sizeof(struct s3c24xx_dma_phy) * | ||
1162 | pdata->num_phy_channels, | ||
1163 | GFP_KERNEL); | ||
1164 | if (!s3cdma->phy_chans) | ||
1165 | return -ENOMEM; | ||
1166 | |||
1167 | /* aquire irqs and clocks for all physical channels */ | ||
1168 | for (i = 0; i < pdata->num_phy_channels; i++) { | ||
1169 | struct s3c24xx_dma_phy *phy = &s3cdma->phy_chans[i]; | ||
1170 | char clk_name[6]; | ||
1171 | |||
1172 | phy->id = i; | ||
1173 | phy->base = s3cdma->base + (i * sdata->stride); | ||
1174 | phy->host = s3cdma; | ||
1175 | |||
1176 | phy->irq = platform_get_irq(pdev, i); | ||
1177 | if (phy->irq < 0) { | ||
1178 | dev_err(&pdev->dev, "failed to get irq %d, err %d\n", | ||
1179 | i, phy->irq); | ||
1180 | continue; | ||
1181 | } | ||
1182 | |||
1183 | ret = devm_request_irq(&pdev->dev, phy->irq, s3c24xx_dma_irq, | ||
1184 | 0, pdev->name, phy); | ||
1185 | if (ret) { | ||
1186 | dev_err(&pdev->dev, "Unable to request irq for channel %d, error %d\n", | ||
1187 | i, ret); | ||
1188 | continue; | ||
1189 | } | ||
1190 | |||
1191 | if (sdata->has_clocks) { | ||
1192 | sprintf(clk_name, "dma.%d", i); | ||
1193 | phy->clk = devm_clk_get(&pdev->dev, clk_name); | ||
1194 | if (IS_ERR(phy->clk) && sdata->has_clocks) { | ||
1195 | dev_err(&pdev->dev, "unable to aquire clock for channel %d, error %lu", | ||
1196 | i, PTR_ERR(phy->clk)); | ||
1197 | continue; | ||
1198 | } | ||
1199 | |||
1200 | ret = clk_prepare(phy->clk); | ||
1201 | if (ret) { | ||
1202 | dev_err(&pdev->dev, "clock for phy %d failed, error %d\n", | ||
1203 | i, ret); | ||
1204 | continue; | ||
1205 | } | ||
1206 | } | ||
1207 | |||
1208 | spin_lock_init(&phy->lock); | ||
1209 | phy->valid = true; | ||
1210 | |||
1211 | dev_dbg(&pdev->dev, "physical channel %d is %s\n", | ||
1212 | i, s3c24xx_dma_phy_busy(phy) ? "BUSY" : "FREE"); | ||
1213 | } | ||
1214 | |||
1215 | /* Initialize memcpy engine */ | ||
1216 | dma_cap_set(DMA_MEMCPY, s3cdma->memcpy.cap_mask); | ||
1217 | dma_cap_set(DMA_PRIVATE, s3cdma->memcpy.cap_mask); | ||
1218 | s3cdma->memcpy.dev = &pdev->dev; | ||
1219 | s3cdma->memcpy.device_alloc_chan_resources = | ||
1220 | s3c24xx_dma_alloc_chan_resources; | ||
1221 | s3cdma->memcpy.device_free_chan_resources = | ||
1222 | s3c24xx_dma_free_chan_resources; | ||
1223 | s3cdma->memcpy.device_prep_dma_memcpy = s3c24xx_dma_prep_memcpy; | ||
1224 | s3cdma->memcpy.device_tx_status = s3c24xx_dma_tx_status; | ||
1225 | s3cdma->memcpy.device_issue_pending = s3c24xx_dma_issue_pending; | ||
1226 | s3cdma->memcpy.device_control = s3c24xx_dma_control; | ||
1227 | |||
1228 | /* Initialize slave engine for SoC internal dedicated peripherals */ | ||
1229 | dma_cap_set(DMA_SLAVE, s3cdma->slave.cap_mask); | ||
1230 | dma_cap_set(DMA_PRIVATE, s3cdma->slave.cap_mask); | ||
1231 | s3cdma->slave.dev = &pdev->dev; | ||
1232 | s3cdma->slave.device_alloc_chan_resources = | ||
1233 | s3c24xx_dma_alloc_chan_resources; | ||
1234 | s3cdma->slave.device_free_chan_resources = | ||
1235 | s3c24xx_dma_free_chan_resources; | ||
1236 | s3cdma->slave.device_tx_status = s3c24xx_dma_tx_status; | ||
1237 | s3cdma->slave.device_issue_pending = s3c24xx_dma_issue_pending; | ||
1238 | s3cdma->slave.device_prep_slave_sg = s3c24xx_dma_prep_slave_sg; | ||
1239 | s3cdma->slave.device_control = s3c24xx_dma_control; | ||
1240 | |||
1241 | /* Register as many memcpy channels as there are physical channels */ | ||
1242 | ret = s3c24xx_dma_init_virtual_channels(s3cdma, &s3cdma->memcpy, | ||
1243 | pdata->num_phy_channels, false); | ||
1244 | if (ret <= 0) { | ||
1245 | dev_warn(&pdev->dev, | ||
1246 | "%s failed to enumerate memcpy channels - %d\n", | ||
1247 | __func__, ret); | ||
1248 | goto err_memcpy; | ||
1249 | } | ||
1250 | |||
1251 | /* Register slave channels */ | ||
1252 | ret = s3c24xx_dma_init_virtual_channels(s3cdma, &s3cdma->slave, | ||
1253 | pdata->num_channels, true); | ||
1254 | if (ret <= 0) { | ||
1255 | dev_warn(&pdev->dev, | ||
1256 | "%s failed to enumerate slave channels - %d\n", | ||
1257 | __func__, ret); | ||
1258 | goto err_slave; | ||
1259 | } | ||
1260 | |||
1261 | ret = dma_async_device_register(&s3cdma->memcpy); | ||
1262 | if (ret) { | ||
1263 | dev_warn(&pdev->dev, | ||
1264 | "%s failed to register memcpy as an async device - %d\n", | ||
1265 | __func__, ret); | ||
1266 | goto err_memcpy_reg; | ||
1267 | } | ||
1268 | |||
1269 | ret = dma_async_device_register(&s3cdma->slave); | ||
1270 | if (ret) { | ||
1271 | dev_warn(&pdev->dev, | ||
1272 | "%s failed to register slave as an async device - %d\n", | ||
1273 | __func__, ret); | ||
1274 | goto err_slave_reg; | ||
1275 | } | ||
1276 | |||
1277 | platform_set_drvdata(pdev, s3cdma); | ||
1278 | dev_info(&pdev->dev, "Loaded dma driver with %d physical channels\n", | ||
1279 | pdata->num_phy_channels); | ||
1280 | |||
1281 | return 0; | ||
1282 | |||
1283 | err_slave_reg: | ||
1284 | dma_async_device_unregister(&s3cdma->memcpy); | ||
1285 | err_memcpy_reg: | ||
1286 | s3c24xx_dma_free_virtual_channels(&s3cdma->slave); | ||
1287 | err_slave: | ||
1288 | s3c24xx_dma_free_virtual_channels(&s3cdma->memcpy); | ||
1289 | err_memcpy: | ||
1290 | if (sdata->has_clocks) | ||
1291 | for (i = 0; i < pdata->num_phy_channels; i++) { | ||
1292 | struct s3c24xx_dma_phy *phy = &s3cdma->phy_chans[i]; | ||
1293 | if (phy->valid) | ||
1294 | clk_unprepare(phy->clk); | ||
1295 | } | ||
1296 | |||
1297 | return ret; | ||
1298 | } | ||
1299 | |||
1300 | static int s3c24xx_dma_remove(struct platform_device *pdev) | ||
1301 | { | ||
1302 | const struct s3c24xx_dma_platdata *pdata = dev_get_platdata(&pdev->dev); | ||
1303 | struct s3c24xx_dma_engine *s3cdma = platform_get_drvdata(pdev); | ||
1304 | struct soc_data *sdata = s3c24xx_dma_get_soc_data(pdev); | ||
1305 | int i; | ||
1306 | |||
1307 | dma_async_device_unregister(&s3cdma->slave); | ||
1308 | dma_async_device_unregister(&s3cdma->memcpy); | ||
1309 | |||
1310 | s3c24xx_dma_free_virtual_channels(&s3cdma->slave); | ||
1311 | s3c24xx_dma_free_virtual_channels(&s3cdma->memcpy); | ||
1312 | |||
1313 | if (sdata->has_clocks) | ||
1314 | for (i = 0; i < pdata->num_phy_channels; i++) { | ||
1315 | struct s3c24xx_dma_phy *phy = &s3cdma->phy_chans[i]; | ||
1316 | if (phy->valid) | ||
1317 | clk_unprepare(phy->clk); | ||
1318 | } | ||
1319 | |||
1320 | return 0; | ||
1321 | } | ||
1322 | |||
1323 | static struct platform_driver s3c24xx_dma_driver = { | ||
1324 | .driver = { | ||
1325 | .name = "s3c24xx-dma", | ||
1326 | .owner = THIS_MODULE, | ||
1327 | }, | ||
1328 | .id_table = s3c24xx_dma_driver_ids, | ||
1329 | .probe = s3c24xx_dma_probe, | ||
1330 | .remove = s3c24xx_dma_remove, | ||
1331 | }; | ||
1332 | |||
1333 | module_platform_driver(s3c24xx_dma_driver); | ||
1334 | |||
1335 | bool s3c24xx_dma_filter(struct dma_chan *chan, void *param) | ||
1336 | { | ||
1337 | struct s3c24xx_dma_chan *s3cchan; | ||
1338 | |||
1339 | if (chan->device->dev->driver != &s3c24xx_dma_driver.driver) | ||
1340 | return false; | ||
1341 | |||
1342 | s3cchan = to_s3c24xx_dma_chan(chan); | ||
1343 | |||
1344 | return s3cchan->id == (int)param; | ||
1345 | } | ||
1346 | EXPORT_SYMBOL(s3c24xx_dma_filter); | ||
1347 | |||
1348 | MODULE_DESCRIPTION("S3C24XX DMA Driver"); | ||
1349 | MODULE_AUTHOR("Heiko Stuebner"); | ||
1350 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c index 17df6db5dca7..8847adf392b7 100644 --- a/drivers/gpio/gpio-davinci.c +++ b/drivers/gpio/gpio-davinci.c | |||
@@ -15,8 +15,9 @@ | |||
15 | #include <linux/clk.h> | 15 | #include <linux/clk.h> |
16 | #include <linux/err.h> | 16 | #include <linux/err.h> |
17 | #include <linux/io.h> | 17 | #include <linux/io.h> |
18 | 18 | #include <linux/irq.h> | |
19 | #include <asm/mach/irq.h> | 19 | #include <linux/platform_device.h> |
20 | #include <linux/platform_data/gpio-davinci.h> | ||
20 | 21 | ||
21 | struct davinci_gpio_regs { | 22 | struct davinci_gpio_regs { |
22 | u32 dir; | 23 | u32 dir; |
@@ -31,13 +32,14 @@ struct davinci_gpio_regs { | |||
31 | u32 intstat; | 32 | u32 intstat; |
32 | }; | 33 | }; |
33 | 34 | ||
35 | #define BINTEN 0x8 /* GPIO Interrupt Per-Bank Enable Register */ | ||
36 | |||
34 | #define chip2controller(chip) \ | 37 | #define chip2controller(chip) \ |
35 | container_of(chip, struct davinci_gpio_controller, chip) | 38 | container_of(chip, struct davinci_gpio_controller, chip) |
36 | 39 | ||
37 | static struct davinci_gpio_controller chips[DIV_ROUND_UP(DAVINCI_N_GPIO, 32)]; | ||
38 | static void __iomem *gpio_base; | 40 | static void __iomem *gpio_base; |
39 | 41 | ||
40 | static struct davinci_gpio_regs __iomem __init *gpio2regs(unsigned gpio) | 42 | static struct davinci_gpio_regs __iomem *gpio2regs(unsigned gpio) |
41 | { | 43 | { |
42 | void __iomem *ptr; | 44 | void __iomem *ptr; |
43 | 45 | ||
@@ -65,7 +67,7 @@ static inline struct davinci_gpio_regs __iomem *irq2regs(int irq) | |||
65 | return g; | 67 | return g; |
66 | } | 68 | } |
67 | 69 | ||
68 | static int __init davinci_gpio_irq_setup(void); | 70 | static int davinci_gpio_irq_setup(struct platform_device *pdev); |
69 | 71 | ||
70 | /*--------------------------------------------------------------------------*/ | 72 | /*--------------------------------------------------------------------------*/ |
71 | 73 | ||
@@ -131,33 +133,53 @@ davinci_gpio_set(struct gpio_chip *chip, unsigned offset, int value) | |||
131 | __raw_writel((1 << offset), value ? &g->set_data : &g->clr_data); | 133 | __raw_writel((1 << offset), value ? &g->set_data : &g->clr_data); |
132 | } | 134 | } |
133 | 135 | ||
134 | static int __init davinci_gpio_setup(void) | 136 | static int davinci_gpio_probe(struct platform_device *pdev) |
135 | { | 137 | { |
136 | int i, base; | 138 | int i, base; |
137 | unsigned ngpio; | 139 | unsigned ngpio; |
138 | struct davinci_soc_info *soc_info = &davinci_soc_info; | 140 | struct davinci_gpio_controller *chips; |
139 | struct davinci_gpio_regs *regs; | 141 | struct davinci_gpio_platform_data *pdata; |
140 | 142 | struct davinci_gpio_regs __iomem *regs; | |
141 | if (soc_info->gpio_type != GPIO_TYPE_DAVINCI) | 143 | struct device *dev = &pdev->dev; |
142 | return 0; | 144 | struct resource *res; |
145 | |||
146 | pdata = dev->platform_data; | ||
147 | if (!pdata) { | ||
148 | dev_err(dev, "No platform data found\n"); | ||
149 | return -EINVAL; | ||
150 | } | ||
143 | 151 | ||
144 | /* | 152 | /* |
145 | * The gpio banks conceptually expose a segmented bitmap, | 153 | * The gpio banks conceptually expose a segmented bitmap, |
146 | * and "ngpio" is one more than the largest zero-based | 154 | * and "ngpio" is one more than the largest zero-based |
147 | * bit index that's valid. | 155 | * bit index that's valid. |
148 | */ | 156 | */ |
149 | ngpio = soc_info->gpio_num; | 157 | ngpio = pdata->ngpio; |
150 | if (ngpio == 0) { | 158 | if (ngpio == 0) { |
151 | pr_err("GPIO setup: how many GPIOs?\n"); | 159 | dev_err(dev, "How many GPIOs?\n"); |
152 | return -EINVAL; | 160 | return -EINVAL; |
153 | } | 161 | } |
154 | 162 | ||
155 | if (WARN_ON(DAVINCI_N_GPIO < ngpio)) | 163 | if (WARN_ON(DAVINCI_N_GPIO < ngpio)) |
156 | ngpio = DAVINCI_N_GPIO; | 164 | ngpio = DAVINCI_N_GPIO; |
157 | 165 | ||
158 | gpio_base = ioremap(soc_info->gpio_base, SZ_4K); | 166 | chips = devm_kzalloc(dev, |
159 | if (WARN_ON(!gpio_base)) | 167 | ngpio * sizeof(struct davinci_gpio_controller), |
168 | GFP_KERNEL); | ||
169 | if (!chips) { | ||
170 | dev_err(dev, "Memory allocation failed\n"); | ||
160 | return -ENOMEM; | 171 | return -ENOMEM; |
172 | } | ||
173 | |||
174 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
175 | if (!res) { | ||
176 | dev_err(dev, "Invalid memory resource\n"); | ||
177 | return -EBUSY; | ||
178 | } | ||
179 | |||
180 | gpio_base = devm_ioremap_resource(dev, res); | ||
181 | if (IS_ERR(gpio_base)) | ||
182 | return PTR_ERR(gpio_base); | ||
161 | 183 | ||
162 | for (i = 0, base = 0; base < ngpio; i++, base += 32) { | 184 | for (i = 0, base = 0; base < ngpio; i++, base += 32) { |
163 | chips[i].chip.label = "DaVinci"; | 185 | chips[i].chip.label = "DaVinci"; |
@@ -183,13 +205,10 @@ static int __init davinci_gpio_setup(void) | |||
183 | gpiochip_add(&chips[i].chip); | 205 | gpiochip_add(&chips[i].chip); |
184 | } | 206 | } |
185 | 207 | ||
186 | soc_info->gpio_ctlrs = chips; | 208 | platform_set_drvdata(pdev, chips); |
187 | soc_info->gpio_ctlrs_num = DIV_ROUND_UP(ngpio, 32); | 209 | davinci_gpio_irq_setup(pdev); |
188 | |||
189 | davinci_gpio_irq_setup(); | ||
190 | return 0; | 210 | return 0; |
191 | } | 211 | } |
192 | pure_initcall(davinci_gpio_setup); | ||
193 | 212 | ||
194 | /*--------------------------------------------------------------------------*/ | 213 | /*--------------------------------------------------------------------------*/ |
195 | /* | 214 | /* |
@@ -302,13 +321,14 @@ static int gpio_to_irq_banked(struct gpio_chip *chip, unsigned offset) | |||
302 | 321 | ||
303 | static int gpio_to_irq_unbanked(struct gpio_chip *chip, unsigned offset) | 322 | static int gpio_to_irq_unbanked(struct gpio_chip *chip, unsigned offset) |
304 | { | 323 | { |
305 | struct davinci_soc_info *soc_info = &davinci_soc_info; | 324 | struct davinci_gpio_controller *d = chip2controller(chip); |
306 | 325 | ||
307 | /* NOTE: we assume for now that only irqs in the first gpio_chip | 326 | /* |
327 | * NOTE: we assume for now that only irqs in the first gpio_chip | ||
308 | * can provide direct-mapped IRQs to AINTC (up to 32 GPIOs). | 328 | * can provide direct-mapped IRQs to AINTC (up to 32 GPIOs). |
309 | */ | 329 | */ |
310 | if (offset < soc_info->gpio_unbanked) | 330 | if (offset < d->irq_base) |
311 | return soc_info->gpio_irq + offset; | 331 | return d->gpio_irq + offset; |
312 | else | 332 | else |
313 | return -ENODEV; | 333 | return -ENODEV; |
314 | } | 334 | } |
@@ -317,12 +337,11 @@ static int gpio_irq_type_unbanked(struct irq_data *data, unsigned trigger) | |||
317 | { | 337 | { |
318 | struct davinci_gpio_controller *d; | 338 | struct davinci_gpio_controller *d; |
319 | struct davinci_gpio_regs __iomem *g; | 339 | struct davinci_gpio_regs __iomem *g; |
320 | struct davinci_soc_info *soc_info = &davinci_soc_info; | ||
321 | u32 mask; | 340 | u32 mask; |
322 | 341 | ||
323 | d = (struct davinci_gpio_controller *)data->handler_data; | 342 | d = (struct davinci_gpio_controller *)data->handler_data; |
324 | g = (struct davinci_gpio_regs __iomem *)d->regs; | 343 | g = (struct davinci_gpio_regs __iomem *)d->regs; |
325 | mask = __gpio_mask(data->irq - soc_info->gpio_irq); | 344 | mask = __gpio_mask(data->irq - d->gpio_irq); |
326 | 345 | ||
327 | if (trigger & ~(IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING)) | 346 | if (trigger & ~(IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING)) |
328 | return -EINVAL; | 347 | return -EINVAL; |
@@ -343,24 +362,33 @@ static int gpio_irq_type_unbanked(struct irq_data *data, unsigned trigger) | |||
343 | * (dm6446) can be set appropriately for GPIOV33 pins. | 362 | * (dm6446) can be set appropriately for GPIOV33 pins. |
344 | */ | 363 | */ |
345 | 364 | ||
346 | static int __init davinci_gpio_irq_setup(void) | 365 | static int davinci_gpio_irq_setup(struct platform_device *pdev) |
347 | { | 366 | { |
348 | unsigned gpio, irq, bank; | 367 | unsigned gpio, irq, bank; |
349 | struct clk *clk; | 368 | struct clk *clk; |
350 | u32 binten = 0; | 369 | u32 binten = 0; |
351 | unsigned ngpio, bank_irq; | 370 | unsigned ngpio, bank_irq; |
352 | struct davinci_soc_info *soc_info = &davinci_soc_info; | 371 | struct device *dev = &pdev->dev; |
353 | struct davinci_gpio_regs __iomem *g; | 372 | struct resource *res; |
373 | struct davinci_gpio_controller *chips = platform_get_drvdata(pdev); | ||
374 | struct davinci_gpio_platform_data *pdata = dev->platform_data; | ||
375 | struct davinci_gpio_regs __iomem *g; | ||
354 | 376 | ||
355 | ngpio = soc_info->gpio_num; | 377 | ngpio = pdata->ngpio; |
378 | res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); | ||
379 | if (!res) { | ||
380 | dev_err(dev, "Invalid IRQ resource\n"); | ||
381 | return -EBUSY; | ||
382 | } | ||
356 | 383 | ||
357 | bank_irq = soc_info->gpio_irq; | 384 | bank_irq = res->start; |
358 | if (bank_irq == 0) { | 385 | |
359 | printk(KERN_ERR "Don't know first GPIO bank IRQ.\n"); | 386 | if (!bank_irq) { |
360 | return -EINVAL; | 387 | dev_err(dev, "Invalid IRQ resource\n"); |
388 | return -ENODEV; | ||
361 | } | 389 | } |
362 | 390 | ||
363 | clk = clk_get(NULL, "gpio"); | 391 | clk = devm_clk_get(dev, "gpio"); |
364 | if (IS_ERR(clk)) { | 392 | if (IS_ERR(clk)) { |
365 | printk(KERN_ERR "Error %ld getting gpio clock?\n", | 393 | printk(KERN_ERR "Error %ld getting gpio clock?\n", |
366 | PTR_ERR(clk)); | 394 | PTR_ERR(clk)); |
@@ -368,16 +396,17 @@ static int __init davinci_gpio_irq_setup(void) | |||
368 | } | 396 | } |
369 | clk_prepare_enable(clk); | 397 | clk_prepare_enable(clk); |
370 | 398 | ||
371 | /* Arrange gpio_to_irq() support, handling either direct IRQs or | 399 | /* |
400 | * Arrange gpio_to_irq() support, handling either direct IRQs or | ||
372 | * banked IRQs. Having GPIOs in the first GPIO bank use direct | 401 | * banked IRQs. Having GPIOs in the first GPIO bank use direct |
373 | * IRQs, while the others use banked IRQs, would need some setup | 402 | * IRQs, while the others use banked IRQs, would need some setup |
374 | * tweaks to recognize hardware which can do that. | 403 | * tweaks to recognize hardware which can do that. |
375 | */ | 404 | */ |
376 | for (gpio = 0, bank = 0; gpio < ngpio; bank++, gpio += 32) { | 405 | for (gpio = 0, bank = 0; gpio < ngpio; bank++, gpio += 32) { |
377 | chips[bank].chip.to_irq = gpio_to_irq_banked; | 406 | chips[bank].chip.to_irq = gpio_to_irq_banked; |
378 | chips[bank].irq_base = soc_info->gpio_unbanked | 407 | chips[bank].irq_base = pdata->gpio_unbanked |
379 | ? -EINVAL | 408 | ? -EINVAL |
380 | : (soc_info->intc_irq_num + gpio); | 409 | : (pdata->intc_irq_num + gpio); |
381 | } | 410 | } |
382 | 411 | ||
383 | /* | 412 | /* |
@@ -385,7 +414,7 @@ static int __init davinci_gpio_irq_setup(void) | |||
385 | * controller only handling trigger modes. We currently assume no | 414 | * controller only handling trigger modes. We currently assume no |
386 | * IRQ mux conflicts; gpio_irq_type_unbanked() is only for GPIOs. | 415 | * IRQ mux conflicts; gpio_irq_type_unbanked() is only for GPIOs. |
387 | */ | 416 | */ |
388 | if (soc_info->gpio_unbanked) { | 417 | if (pdata->gpio_unbanked) { |
389 | static struct irq_chip_type gpio_unbanked; | 418 | static struct irq_chip_type gpio_unbanked; |
390 | 419 | ||
391 | /* pass "bank 0" GPIO IRQs to AINTC */ | 420 | /* pass "bank 0" GPIO IRQs to AINTC */ |
@@ -405,7 +434,7 @@ static int __init davinci_gpio_irq_setup(void) | |||
405 | __raw_writel(~0, &g->set_rising); | 434 | __raw_writel(~0, &g->set_rising); |
406 | 435 | ||
407 | /* set the direct IRQs up to use that irqchip */ | 436 | /* set the direct IRQs up to use that irqchip */ |
408 | for (gpio = 0; gpio < soc_info->gpio_unbanked; gpio++, irq++) { | 437 | for (gpio = 0; gpio < pdata->gpio_unbanked; gpio++, irq++) { |
409 | irq_set_chip(irq, &gpio_unbanked.chip); | 438 | irq_set_chip(irq, &gpio_unbanked.chip); |
410 | irq_set_handler_data(irq, &chips[gpio / 32]); | 439 | irq_set_handler_data(irq, &chips[gpio / 32]); |
411 | irq_set_status_flags(irq, IRQ_TYPE_EDGE_BOTH); | 440 | irq_set_status_flags(irq, IRQ_TYPE_EDGE_BOTH); |
@@ -450,12 +479,31 @@ static int __init davinci_gpio_irq_setup(void) | |||
450 | } | 479 | } |
451 | 480 | ||
452 | done: | 481 | done: |
453 | /* BINTEN -- per-bank interrupt enable. genirq would also let these | 482 | /* |
483 | * BINTEN -- per-bank interrupt enable. genirq would also let these | ||
454 | * bits be set/cleared dynamically. | 484 | * bits be set/cleared dynamically. |
455 | */ | 485 | */ |
456 | __raw_writel(binten, gpio_base + 0x08); | 486 | __raw_writel(binten, gpio_base + BINTEN); |
457 | 487 | ||
458 | printk(KERN_INFO "DaVinci: %d gpio irqs\n", irq - gpio_to_irq(0)); | 488 | printk(KERN_INFO "DaVinci: %d gpio irqs\n", irq - gpio_to_irq(0)); |
459 | 489 | ||
460 | return 0; | 490 | return 0; |
461 | } | 491 | } |
492 | |||
493 | static struct platform_driver davinci_gpio_driver = { | ||
494 | .probe = davinci_gpio_probe, | ||
495 | .driver = { | ||
496 | .name = "davinci_gpio", | ||
497 | .owner = THIS_MODULE, | ||
498 | }, | ||
499 | }; | ||
500 | |||
501 | /** | ||
502 | * GPIO driver registration needs to be done before machine_init functions | ||
503 | * access GPIO. Hence davinci_gpio_drv_reg() is a postcore_initcall. | ||
504 | */ | ||
505 | static int __init davinci_gpio_drv_reg(void) | ||
506 | { | ||
507 | return platform_driver_register(&davinci_gpio_driver); | ||
508 | } | ||
509 | postcore_initcall(davinci_gpio_drv_reg); | ||
diff --git a/drivers/gpio/gpio-tnetv107x.c b/drivers/gpio/gpio-tnetv107x.c index 3fa3e2867e19..58445bb69106 100644 --- a/drivers/gpio/gpio-tnetv107x.c +++ b/drivers/gpio/gpio-tnetv107x.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
16 | #include <linux/init.h> | 16 | #include <linux/init.h> |
17 | #include <linux/gpio.h> | 17 | #include <linux/gpio.h> |
18 | #include <linux/platform_data/gpio-davinci.h> | ||
18 | 19 | ||
19 | #include <mach/common.h> | 20 | #include <mach/common.h> |
20 | #include <mach/tnetv107x.h> | 21 | #include <mach/tnetv107x.h> |
diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c index bb328a366122..433cc8568dec 100644 --- a/drivers/irqchip/irq-armada-370-xp.c +++ b/drivers/irqchip/irq-armada-370-xp.c | |||
@@ -21,7 +21,10 @@ | |||
21 | #include <linux/io.h> | 21 | #include <linux/io.h> |
22 | #include <linux/of_address.h> | 22 | #include <linux/of_address.h> |
23 | #include <linux/of_irq.h> | 23 | #include <linux/of_irq.h> |
24 | #include <linux/of_pci.h> | ||
24 | #include <linux/irqdomain.h> | 25 | #include <linux/irqdomain.h> |
26 | #include <linux/slab.h> | ||
27 | #include <linux/msi.h> | ||
25 | #include <asm/mach/arch.h> | 28 | #include <asm/mach/arch.h> |
26 | #include <asm/exception.h> | 29 | #include <asm/exception.h> |
27 | #include <asm/smp_plat.h> | 30 | #include <asm/smp_plat.h> |
@@ -51,12 +54,22 @@ | |||
51 | #define IPI_DOORBELL_START (0) | 54 | #define IPI_DOORBELL_START (0) |
52 | #define IPI_DOORBELL_END (8) | 55 | #define IPI_DOORBELL_END (8) |
53 | #define IPI_DOORBELL_MASK 0xFF | 56 | #define IPI_DOORBELL_MASK 0xFF |
57 | #define PCI_MSI_DOORBELL_START (16) | ||
58 | #define PCI_MSI_DOORBELL_NR (16) | ||
59 | #define PCI_MSI_DOORBELL_END (32) | ||
60 | #define PCI_MSI_DOORBELL_MASK 0xFFFF0000 | ||
54 | 61 | ||
55 | static DEFINE_RAW_SPINLOCK(irq_controller_lock); | 62 | static DEFINE_RAW_SPINLOCK(irq_controller_lock); |
56 | 63 | ||
57 | static void __iomem *per_cpu_int_base; | 64 | static void __iomem *per_cpu_int_base; |
58 | static void __iomem *main_int_base; | 65 | static void __iomem *main_int_base; |
59 | static struct irq_domain *armada_370_xp_mpic_domain; | 66 | static struct irq_domain *armada_370_xp_mpic_domain; |
67 | #ifdef CONFIG_PCI_MSI | ||
68 | static struct irq_domain *armada_370_xp_msi_domain; | ||
69 | static DECLARE_BITMAP(msi_used, PCI_MSI_DOORBELL_NR); | ||
70 | static DEFINE_MUTEX(msi_used_lock); | ||
71 | static phys_addr_t msi_doorbell_addr; | ||
72 | #endif | ||
60 | 73 | ||
61 | /* | 74 | /* |
62 | * In SMP mode: | 75 | * In SMP mode: |
@@ -87,6 +100,144 @@ static void armada_370_xp_irq_unmask(struct irq_data *d) | |||
87 | ARMADA_370_XP_INT_CLEAR_MASK_OFFS); | 100 | ARMADA_370_XP_INT_CLEAR_MASK_OFFS); |
88 | } | 101 | } |
89 | 102 | ||
103 | #ifdef CONFIG_PCI_MSI | ||
104 | |||
105 | static int armada_370_xp_alloc_msi(void) | ||
106 | { | ||
107 | int hwirq; | ||
108 | |||
109 | mutex_lock(&msi_used_lock); | ||
110 | hwirq = find_first_zero_bit(&msi_used, PCI_MSI_DOORBELL_NR); | ||
111 | if (hwirq >= PCI_MSI_DOORBELL_NR) | ||
112 | hwirq = -ENOSPC; | ||
113 | else | ||
114 | set_bit(hwirq, msi_used); | ||
115 | mutex_unlock(&msi_used_lock); | ||
116 | |||
117 | return hwirq; | ||
118 | } | ||
119 | |||
120 | static void armada_370_xp_free_msi(int hwirq) | ||
121 | { | ||
122 | mutex_lock(&msi_used_lock); | ||
123 | if (!test_bit(hwirq, msi_used)) | ||
124 | pr_err("trying to free unused MSI#%d\n", hwirq); | ||
125 | else | ||
126 | clear_bit(hwirq, msi_used); | ||
127 | mutex_unlock(&msi_used_lock); | ||
128 | } | ||
129 | |||
130 | static int armada_370_xp_setup_msi_irq(struct msi_chip *chip, | ||
131 | struct pci_dev *pdev, | ||
132 | struct msi_desc *desc) | ||
133 | { | ||
134 | struct msi_msg msg; | ||
135 | irq_hw_number_t hwirq; | ||
136 | int virq; | ||
137 | |||
138 | hwirq = armada_370_xp_alloc_msi(); | ||
139 | if (hwirq < 0) | ||
140 | return hwirq; | ||
141 | |||
142 | virq = irq_create_mapping(armada_370_xp_msi_domain, hwirq); | ||
143 | if (!virq) { | ||
144 | armada_370_xp_free_msi(hwirq); | ||
145 | return -EINVAL; | ||
146 | } | ||
147 | |||
148 | irq_set_msi_desc(virq, desc); | ||
149 | |||
150 | msg.address_lo = msi_doorbell_addr; | ||
151 | msg.address_hi = 0; | ||
152 | msg.data = 0xf00 | (hwirq + 16); | ||
153 | |||
154 | write_msi_msg(virq, &msg); | ||
155 | return 0; | ||
156 | } | ||
157 | |||
158 | static void armada_370_xp_teardown_msi_irq(struct msi_chip *chip, | ||
159 | unsigned int irq) | ||
160 | { | ||
161 | struct irq_data *d = irq_get_irq_data(irq); | ||
162 | irq_dispose_mapping(irq); | ||
163 | armada_370_xp_free_msi(d->hwirq); | ||
164 | } | ||
165 | |||
166 | static struct irq_chip armada_370_xp_msi_irq_chip = { | ||
167 | .name = "armada_370_xp_msi_irq", | ||
168 | .irq_enable = unmask_msi_irq, | ||
169 | .irq_disable = mask_msi_irq, | ||
170 | .irq_mask = mask_msi_irq, | ||
171 | .irq_unmask = unmask_msi_irq, | ||
172 | }; | ||
173 | |||
174 | static int armada_370_xp_msi_map(struct irq_domain *domain, unsigned int virq, | ||
175 | irq_hw_number_t hw) | ||
176 | { | ||
177 | irq_set_chip_and_handler(virq, &armada_370_xp_msi_irq_chip, | ||
178 | handle_simple_irq); | ||
179 | set_irq_flags(virq, IRQF_VALID); | ||
180 | |||
181 | return 0; | ||
182 | } | ||
183 | |||
184 | static const struct irq_domain_ops armada_370_xp_msi_irq_ops = { | ||
185 | .map = armada_370_xp_msi_map, | ||
186 | }; | ||
187 | |||
188 | static int armada_370_xp_msi_init(struct device_node *node, | ||
189 | phys_addr_t main_int_phys_base) | ||
190 | { | ||
191 | struct msi_chip *msi_chip; | ||
192 | u32 reg; | ||
193 | int ret; | ||
194 | |||
195 | msi_doorbell_addr = main_int_phys_base + | ||
196 | ARMADA_370_XP_SW_TRIG_INT_OFFS; | ||
197 | |||
198 | msi_chip = kzalloc(sizeof(*msi_chip), GFP_KERNEL); | ||
199 | if (!msi_chip) | ||
200 | return -ENOMEM; | ||
201 | |||
202 | msi_chip->setup_irq = armada_370_xp_setup_msi_irq; | ||
203 | msi_chip->teardown_irq = armada_370_xp_teardown_msi_irq; | ||
204 | msi_chip->of_node = node; | ||
205 | |||
206 | armada_370_xp_msi_domain = | ||
207 | irq_domain_add_linear(NULL, PCI_MSI_DOORBELL_NR, | ||
208 | &armada_370_xp_msi_irq_ops, | ||
209 | NULL); | ||
210 | if (!armada_370_xp_msi_domain) { | ||
211 | kfree(msi_chip); | ||
212 | return -ENOMEM; | ||
213 | } | ||
214 | |||
215 | ret = of_pci_msi_chip_add(msi_chip); | ||
216 | if (ret < 0) { | ||
217 | irq_domain_remove(armada_370_xp_msi_domain); | ||
218 | kfree(msi_chip); | ||
219 | return ret; | ||
220 | } | ||
221 | |||
222 | reg = readl(per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_MSK_OFFS) | ||
223 | | PCI_MSI_DOORBELL_MASK; | ||
224 | |||
225 | writel(reg, per_cpu_int_base + | ||
226 | ARMADA_370_XP_IN_DRBEL_MSK_OFFS); | ||
227 | |||
228 | /* Unmask IPI interrupt */ | ||
229 | writel(1, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS); | ||
230 | |||
231 | return 0; | ||
232 | } | ||
233 | #else | ||
234 | static inline int armada_370_xp_msi_init(struct device_node *node, | ||
235 | phys_addr_t main_int_phys_base) | ||
236 | { | ||
237 | return 0; | ||
238 | } | ||
239 | #endif | ||
240 | |||
90 | #ifdef CONFIG_SMP | 241 | #ifdef CONFIG_SMP |
91 | static int armada_xp_set_affinity(struct irq_data *d, | 242 | static int armada_xp_set_affinity(struct irq_data *d, |
92 | const struct cpumask *mask_val, bool force) | 243 | const struct cpumask *mask_val, bool force) |
@@ -214,12 +365,39 @@ armada_370_xp_handle_irq(struct pt_regs *regs) | |||
214 | if (irqnr > 1022) | 365 | if (irqnr > 1022) |
215 | break; | 366 | break; |
216 | 367 | ||
217 | if (irqnr > 0) { | 368 | if (irqnr > 1) { |
218 | irqnr = irq_find_mapping(armada_370_xp_mpic_domain, | 369 | irqnr = irq_find_mapping(armada_370_xp_mpic_domain, |
219 | irqnr); | 370 | irqnr); |
220 | handle_IRQ(irqnr, regs); | 371 | handle_IRQ(irqnr, regs); |
221 | continue; | 372 | continue; |
222 | } | 373 | } |
374 | |||
375 | #ifdef CONFIG_PCI_MSI | ||
376 | /* MSI handling */ | ||
377 | if (irqnr == 1) { | ||
378 | u32 msimask, msinr; | ||
379 | |||
380 | msimask = readl_relaxed(per_cpu_int_base + | ||
381 | ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS) | ||
382 | & PCI_MSI_DOORBELL_MASK; | ||
383 | |||
384 | writel(~PCI_MSI_DOORBELL_MASK, per_cpu_int_base + | ||
385 | ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS); | ||
386 | |||
387 | for (msinr = PCI_MSI_DOORBELL_START; | ||
388 | msinr < PCI_MSI_DOORBELL_END; msinr++) { | ||
389 | int irq; | ||
390 | |||
391 | if (!(msimask & BIT(msinr))) | ||
392 | continue; | ||
393 | |||
394 | irq = irq_find_mapping(armada_370_xp_msi_domain, | ||
395 | msinr - 16); | ||
396 | handle_IRQ(irq, regs); | ||
397 | } | ||
398 | } | ||
399 | #endif | ||
400 | |||
223 | #ifdef CONFIG_SMP | 401 | #ifdef CONFIG_SMP |
224 | /* IPI Handling */ | 402 | /* IPI Handling */ |
225 | if (irqnr == 0) { | 403 | if (irqnr == 0) { |
@@ -248,12 +426,25 @@ armada_370_xp_handle_irq(struct pt_regs *regs) | |||
248 | static int __init armada_370_xp_mpic_of_init(struct device_node *node, | 426 | static int __init armada_370_xp_mpic_of_init(struct device_node *node, |
249 | struct device_node *parent) | 427 | struct device_node *parent) |
250 | { | 428 | { |
429 | struct resource main_int_res, per_cpu_int_res; | ||
251 | u32 control; | 430 | u32 control; |
252 | 431 | ||
253 | main_int_base = of_iomap(node, 0); | 432 | BUG_ON(of_address_to_resource(node, 0, &main_int_res)); |
254 | per_cpu_int_base = of_iomap(node, 1); | 433 | BUG_ON(of_address_to_resource(node, 1, &per_cpu_int_res)); |
434 | |||
435 | BUG_ON(!request_mem_region(main_int_res.start, | ||
436 | resource_size(&main_int_res), | ||
437 | node->full_name)); | ||
438 | BUG_ON(!request_mem_region(per_cpu_int_res.start, | ||
439 | resource_size(&per_cpu_int_res), | ||
440 | node->full_name)); | ||
255 | 441 | ||
442 | main_int_base = ioremap(main_int_res.start, | ||
443 | resource_size(&main_int_res)); | ||
256 | BUG_ON(!main_int_base); | 444 | BUG_ON(!main_int_base); |
445 | |||
446 | per_cpu_int_base = ioremap(per_cpu_int_res.start, | ||
447 | resource_size(&per_cpu_int_res)); | ||
257 | BUG_ON(!per_cpu_int_base); | 448 | BUG_ON(!per_cpu_int_base); |
258 | 449 | ||
259 | control = readl(main_int_base + ARMADA_370_XP_INT_CONTROL); | 450 | control = readl(main_int_base + ARMADA_370_XP_INT_CONTROL); |
@@ -262,8 +453,7 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node, | |||
262 | irq_domain_add_linear(node, (control >> 2) & 0x3ff, | 453 | irq_domain_add_linear(node, (control >> 2) & 0x3ff, |
263 | &armada_370_xp_mpic_irq_ops, NULL); | 454 | &armada_370_xp_mpic_irq_ops, NULL); |
264 | 455 | ||
265 | if (!armada_370_xp_mpic_domain) | 456 | BUG_ON(!armada_370_xp_mpic_domain); |
266 | panic("Unable to add Armada_370_Xp MPIC irq domain (DT)\n"); | ||
267 | 457 | ||
268 | irq_set_default_host(armada_370_xp_mpic_domain); | 458 | irq_set_default_host(armada_370_xp_mpic_domain); |
269 | 459 | ||
@@ -280,6 +470,8 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node, | |||
280 | 470 | ||
281 | #endif | 471 | #endif |
282 | 472 | ||
473 | armada_370_xp_msi_init(node, main_int_res.start); | ||
474 | |||
283 | set_handle_irq(armada_370_xp_handle_irq); | 475 | set_handle_irq(armada_370_xp_handle_irq); |
284 | 476 | ||
285 | return 0; | 477 | return 0; |
diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig index 3d9504811126..43186feb4294 100644 --- a/drivers/pci/host/Kconfig +++ b/drivers/pci/host/Kconfig | |||
@@ -3,7 +3,7 @@ menu "PCI host controller drivers" | |||
3 | 3 | ||
4 | config PCI_MVEBU | 4 | config PCI_MVEBU |
5 | bool "Marvell EBU PCIe controller" | 5 | bool "Marvell EBU PCIe controller" |
6 | depends on ARCH_MVEBU || ARCH_KIRKWOOD | 6 | depends on ARCH_MVEBU || ARCH_DOVE || ARCH_KIRKWOOD |
7 | depends on OF | 7 | depends on OF |
8 | 8 | ||
9 | config PCIE_DW | 9 | config PCIE_DW |
diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c index 729d5a101d62..80b2250ea19a 100644 --- a/drivers/pci/host/pci-mvebu.c +++ b/drivers/pci/host/pci-mvebu.c | |||
@@ -9,13 +9,17 @@ | |||
9 | #include <linux/kernel.h> | 9 | #include <linux/kernel.h> |
10 | #include <linux/pci.h> | 10 | #include <linux/pci.h> |
11 | #include <linux/clk.h> | 11 | #include <linux/clk.h> |
12 | #include <linux/delay.h> | ||
13 | #include <linux/gpio.h> | ||
12 | #include <linux/module.h> | 14 | #include <linux/module.h> |
13 | #include <linux/mbus.h> | 15 | #include <linux/mbus.h> |
16 | #include <linux/msi.h> | ||
14 | #include <linux/slab.h> | 17 | #include <linux/slab.h> |
15 | #include <linux/platform_device.h> | 18 | #include <linux/platform_device.h> |
16 | #include <linux/of_address.h> | 19 | #include <linux/of_address.h> |
17 | #include <linux/of_pci.h> | ||
18 | #include <linux/of_irq.h> | 20 | #include <linux/of_irq.h> |
21 | #include <linux/of_gpio.h> | ||
22 | #include <linux/of_pci.h> | ||
19 | #include <linux/of_platform.h> | 23 | #include <linux/of_platform.h> |
20 | 24 | ||
21 | /* | 25 | /* |
@@ -103,6 +107,7 @@ struct mvebu_pcie_port; | |||
103 | struct mvebu_pcie { | 107 | struct mvebu_pcie { |
104 | struct platform_device *pdev; | 108 | struct platform_device *pdev; |
105 | struct mvebu_pcie_port *ports; | 109 | struct mvebu_pcie_port *ports; |
110 | struct msi_chip *msi; | ||
106 | struct resource io; | 111 | struct resource io; |
107 | struct resource realio; | 112 | struct resource realio; |
108 | struct resource mem; | 113 | struct resource mem; |
@@ -115,7 +120,6 @@ struct mvebu_pcie_port { | |||
115 | char *name; | 120 | char *name; |
116 | void __iomem *base; | 121 | void __iomem *base; |
117 | spinlock_t conf_lock; | 122 | spinlock_t conf_lock; |
118 | int haslink; | ||
119 | u32 port; | 123 | u32 port; |
120 | u32 lane; | 124 | u32 lane; |
121 | int devfn; | 125 | int devfn; |
@@ -124,6 +128,9 @@ struct mvebu_pcie_port { | |||
124 | unsigned int io_target; | 128 | unsigned int io_target; |
125 | unsigned int io_attr; | 129 | unsigned int io_attr; |
126 | struct clk *clk; | 130 | struct clk *clk; |
131 | int reset_gpio; | ||
132 | int reset_active_low; | ||
133 | char *reset_name; | ||
127 | struct mvebu_sw_pci_bridge bridge; | 134 | struct mvebu_sw_pci_bridge bridge; |
128 | struct device_node *dn; | 135 | struct device_node *dn; |
129 | struct mvebu_pcie *pcie; | 136 | struct mvebu_pcie *pcie; |
@@ -133,29 +140,39 @@ struct mvebu_pcie_port { | |||
133 | size_t iowin_size; | 140 | size_t iowin_size; |
134 | }; | 141 | }; |
135 | 142 | ||
143 | static inline void mvebu_writel(struct mvebu_pcie_port *port, u32 val, u32 reg) | ||
144 | { | ||
145 | writel(val, port->base + reg); | ||
146 | } | ||
147 | |||
148 | static inline u32 mvebu_readl(struct mvebu_pcie_port *port, u32 reg) | ||
149 | { | ||
150 | return readl(port->base + reg); | ||
151 | } | ||
152 | |||
136 | static bool mvebu_pcie_link_up(struct mvebu_pcie_port *port) | 153 | static bool mvebu_pcie_link_up(struct mvebu_pcie_port *port) |
137 | { | 154 | { |
138 | return !(readl(port->base + PCIE_STAT_OFF) & PCIE_STAT_LINK_DOWN); | 155 | return !(mvebu_readl(port, PCIE_STAT_OFF) & PCIE_STAT_LINK_DOWN); |
139 | } | 156 | } |
140 | 157 | ||
141 | static void mvebu_pcie_set_local_bus_nr(struct mvebu_pcie_port *port, int nr) | 158 | static void mvebu_pcie_set_local_bus_nr(struct mvebu_pcie_port *port, int nr) |
142 | { | 159 | { |
143 | u32 stat; | 160 | u32 stat; |
144 | 161 | ||
145 | stat = readl(port->base + PCIE_STAT_OFF); | 162 | stat = mvebu_readl(port, PCIE_STAT_OFF); |
146 | stat &= ~PCIE_STAT_BUS; | 163 | stat &= ~PCIE_STAT_BUS; |
147 | stat |= nr << 8; | 164 | stat |= nr << 8; |
148 | writel(stat, port->base + PCIE_STAT_OFF); | 165 | mvebu_writel(port, stat, PCIE_STAT_OFF); |
149 | } | 166 | } |
150 | 167 | ||
151 | static void mvebu_pcie_set_local_dev_nr(struct mvebu_pcie_port *port, int nr) | 168 | static void mvebu_pcie_set_local_dev_nr(struct mvebu_pcie_port *port, int nr) |
152 | { | 169 | { |
153 | u32 stat; | 170 | u32 stat; |
154 | 171 | ||
155 | stat = readl(port->base + PCIE_STAT_OFF); | 172 | stat = mvebu_readl(port, PCIE_STAT_OFF); |
156 | stat &= ~PCIE_STAT_DEV; | 173 | stat &= ~PCIE_STAT_DEV; |
157 | stat |= nr << 16; | 174 | stat |= nr << 16; |
158 | writel(stat, port->base + PCIE_STAT_OFF); | 175 | mvebu_writel(port, stat, PCIE_STAT_OFF); |
159 | } | 176 | } |
160 | 177 | ||
161 | /* | 178 | /* |
@@ -163,7 +180,7 @@ static void mvebu_pcie_set_local_dev_nr(struct mvebu_pcie_port *port, int nr) | |||
163 | * BAR[0,2] -> disabled, BAR[1] -> covers all DRAM banks | 180 | * BAR[0,2] -> disabled, BAR[1] -> covers all DRAM banks |
164 | * WIN[0-3] -> DRAM bank[0-3] | 181 | * WIN[0-3] -> DRAM bank[0-3] |
165 | */ | 182 | */ |
166 | static void __init mvebu_pcie_setup_wins(struct mvebu_pcie_port *port) | 183 | static void mvebu_pcie_setup_wins(struct mvebu_pcie_port *port) |
167 | { | 184 | { |
168 | const struct mbus_dram_target_info *dram; | 185 | const struct mbus_dram_target_info *dram; |
169 | u32 size; | 186 | u32 size; |
@@ -173,33 +190,34 @@ static void __init mvebu_pcie_setup_wins(struct mvebu_pcie_port *port) | |||
173 | 190 | ||
174 | /* First, disable and clear BARs and windows. */ | 191 | /* First, disable and clear BARs and windows. */ |
175 | for (i = 1; i < 3; i++) { | 192 | for (i = 1; i < 3; i++) { |
176 | writel(0, port->base + PCIE_BAR_CTRL_OFF(i)); | 193 | mvebu_writel(port, 0, PCIE_BAR_CTRL_OFF(i)); |
177 | writel(0, port->base + PCIE_BAR_LO_OFF(i)); | 194 | mvebu_writel(port, 0, PCIE_BAR_LO_OFF(i)); |
178 | writel(0, port->base + PCIE_BAR_HI_OFF(i)); | 195 | mvebu_writel(port, 0, PCIE_BAR_HI_OFF(i)); |
179 | } | 196 | } |
180 | 197 | ||
181 | for (i = 0; i < 5; i++) { | 198 | for (i = 0; i < 5; i++) { |
182 | writel(0, port->base + PCIE_WIN04_CTRL_OFF(i)); | 199 | mvebu_writel(port, 0, PCIE_WIN04_CTRL_OFF(i)); |
183 | writel(0, port->base + PCIE_WIN04_BASE_OFF(i)); | 200 | mvebu_writel(port, 0, PCIE_WIN04_BASE_OFF(i)); |
184 | writel(0, port->base + PCIE_WIN04_REMAP_OFF(i)); | 201 | mvebu_writel(port, 0, PCIE_WIN04_REMAP_OFF(i)); |
185 | } | 202 | } |
186 | 203 | ||
187 | writel(0, port->base + PCIE_WIN5_CTRL_OFF); | 204 | mvebu_writel(port, 0, PCIE_WIN5_CTRL_OFF); |
188 | writel(0, port->base + PCIE_WIN5_BASE_OFF); | 205 | mvebu_writel(port, 0, PCIE_WIN5_BASE_OFF); |
189 | writel(0, port->base + PCIE_WIN5_REMAP_OFF); | 206 | mvebu_writel(port, 0, PCIE_WIN5_REMAP_OFF); |
190 | 207 | ||
191 | /* Setup windows for DDR banks. Count total DDR size on the fly. */ | 208 | /* Setup windows for DDR banks. Count total DDR size on the fly. */ |
192 | size = 0; | 209 | size = 0; |
193 | for (i = 0; i < dram->num_cs; i++) { | 210 | for (i = 0; i < dram->num_cs; i++) { |
194 | const struct mbus_dram_window *cs = dram->cs + i; | 211 | const struct mbus_dram_window *cs = dram->cs + i; |
195 | 212 | ||
196 | writel(cs->base & 0xffff0000, | 213 | mvebu_writel(port, cs->base & 0xffff0000, |
197 | port->base + PCIE_WIN04_BASE_OFF(i)); | 214 | PCIE_WIN04_BASE_OFF(i)); |
198 | writel(0, port->base + PCIE_WIN04_REMAP_OFF(i)); | 215 | mvebu_writel(port, 0, PCIE_WIN04_REMAP_OFF(i)); |
199 | writel(((cs->size - 1) & 0xffff0000) | | 216 | mvebu_writel(port, |
200 | (cs->mbus_attr << 8) | | 217 | ((cs->size - 1) & 0xffff0000) | |
201 | (dram->mbus_dram_target_id << 4) | 1, | 218 | (cs->mbus_attr << 8) | |
202 | port->base + PCIE_WIN04_CTRL_OFF(i)); | 219 | (dram->mbus_dram_target_id << 4) | 1, |
220 | PCIE_WIN04_CTRL_OFF(i)); | ||
203 | 221 | ||
204 | size += cs->size; | 222 | size += cs->size; |
205 | } | 223 | } |
@@ -209,41 +227,40 @@ static void __init mvebu_pcie_setup_wins(struct mvebu_pcie_port *port) | |||
209 | size = 1 << fls(size); | 227 | size = 1 << fls(size); |
210 | 228 | ||
211 | /* Setup BAR[1] to all DRAM banks. */ | 229 | /* Setup BAR[1] to all DRAM banks. */ |
212 | writel(dram->cs[0].base, port->base + PCIE_BAR_LO_OFF(1)); | 230 | mvebu_writel(port, dram->cs[0].base, PCIE_BAR_LO_OFF(1)); |
213 | writel(0, port->base + PCIE_BAR_HI_OFF(1)); | 231 | mvebu_writel(port, 0, PCIE_BAR_HI_OFF(1)); |
214 | writel(((size - 1) & 0xffff0000) | 1, | 232 | mvebu_writel(port, ((size - 1) & 0xffff0000) | 1, |
215 | port->base + PCIE_BAR_CTRL_OFF(1)); | 233 | PCIE_BAR_CTRL_OFF(1)); |
216 | } | 234 | } |
217 | 235 | ||
218 | static void __init mvebu_pcie_setup_hw(struct mvebu_pcie_port *port) | 236 | static void mvebu_pcie_setup_hw(struct mvebu_pcie_port *port) |
219 | { | 237 | { |
220 | u16 cmd; | 238 | u32 cmd, mask; |
221 | u32 mask; | ||
222 | 239 | ||
223 | /* Point PCIe unit MBUS decode windows to DRAM space. */ | 240 | /* Point PCIe unit MBUS decode windows to DRAM space. */ |
224 | mvebu_pcie_setup_wins(port); | 241 | mvebu_pcie_setup_wins(port); |
225 | 242 | ||
226 | /* Master + slave enable. */ | 243 | /* Master + slave enable. */ |
227 | cmd = readw(port->base + PCIE_CMD_OFF); | 244 | cmd = mvebu_readl(port, PCIE_CMD_OFF); |
228 | cmd |= PCI_COMMAND_IO; | 245 | cmd |= PCI_COMMAND_IO; |
229 | cmd |= PCI_COMMAND_MEMORY; | 246 | cmd |= PCI_COMMAND_MEMORY; |
230 | cmd |= PCI_COMMAND_MASTER; | 247 | cmd |= PCI_COMMAND_MASTER; |
231 | writew(cmd, port->base + PCIE_CMD_OFF); | 248 | mvebu_writel(port, cmd, PCIE_CMD_OFF); |
232 | 249 | ||
233 | /* Enable interrupt lines A-D. */ | 250 | /* Enable interrupt lines A-D. */ |
234 | mask = readl(port->base + PCIE_MASK_OFF); | 251 | mask = mvebu_readl(port, PCIE_MASK_OFF); |
235 | mask |= PCIE_MASK_ENABLE_INTS; | 252 | mask |= PCIE_MASK_ENABLE_INTS; |
236 | writel(mask, port->base + PCIE_MASK_OFF); | 253 | mvebu_writel(port, mask, PCIE_MASK_OFF); |
237 | } | 254 | } |
238 | 255 | ||
239 | static int mvebu_pcie_hw_rd_conf(struct mvebu_pcie_port *port, | 256 | static int mvebu_pcie_hw_rd_conf(struct mvebu_pcie_port *port, |
240 | struct pci_bus *bus, | 257 | struct pci_bus *bus, |
241 | u32 devfn, int where, int size, u32 *val) | 258 | u32 devfn, int where, int size, u32 *val) |
242 | { | 259 | { |
243 | writel(PCIE_CONF_ADDR(bus->number, devfn, where), | 260 | mvebu_writel(port, PCIE_CONF_ADDR(bus->number, devfn, where), |
244 | port->base + PCIE_CONF_ADDR_OFF); | 261 | PCIE_CONF_ADDR_OFF); |
245 | 262 | ||
246 | *val = readl(port->base + PCIE_CONF_DATA_OFF); | 263 | *val = mvebu_readl(port, PCIE_CONF_DATA_OFF); |
247 | 264 | ||
248 | if (size == 1) | 265 | if (size == 1) |
249 | *val = (*val >> (8 * (where & 3))) & 0xff; | 266 | *val = (*val >> (8 * (where & 3))) & 0xff; |
@@ -257,21 +274,24 @@ static int mvebu_pcie_hw_wr_conf(struct mvebu_pcie_port *port, | |||
257 | struct pci_bus *bus, | 274 | struct pci_bus *bus, |
258 | u32 devfn, int where, int size, u32 val) | 275 | u32 devfn, int where, int size, u32 val) |
259 | { | 276 | { |
260 | int ret = PCIBIOS_SUCCESSFUL; | 277 | u32 _val, shift = 8 * (where & 3); |
261 | 278 | ||
262 | writel(PCIE_CONF_ADDR(bus->number, devfn, where), | 279 | mvebu_writel(port, PCIE_CONF_ADDR(bus->number, devfn, where), |
263 | port->base + PCIE_CONF_ADDR_OFF); | 280 | PCIE_CONF_ADDR_OFF); |
281 | _val = mvebu_readl(port, PCIE_CONF_DATA_OFF); | ||
264 | 282 | ||
265 | if (size == 4) | 283 | if (size == 4) |
266 | writel(val, port->base + PCIE_CONF_DATA_OFF); | 284 | _val = val; |
267 | else if (size == 2) | 285 | else if (size == 2) |
268 | writew(val, port->base + PCIE_CONF_DATA_OFF + (where & 3)); | 286 | _val = (_val & ~(0xffff << shift)) | ((val & 0xffff) << shift); |
269 | else if (size == 1) | 287 | else if (size == 1) |
270 | writeb(val, port->base + PCIE_CONF_DATA_OFF + (where & 3)); | 288 | _val = (_val & ~(0xff << shift)) | ((val & 0xff) << shift); |
271 | else | 289 | else |
272 | ret = PCIBIOS_BAD_REGISTER_NUMBER; | 290 | return PCIBIOS_BAD_REGISTER_NUMBER; |
273 | 291 | ||
274 | return ret; | 292 | mvebu_writel(port, _val, PCIE_CONF_DATA_OFF); |
293 | |||
294 | return PCIBIOS_SUCCESSFUL; | ||
275 | } | 295 | } |
276 | 296 | ||
277 | static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port) | 297 | static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port) |
@@ -552,7 +572,7 @@ static int mvebu_pcie_wr_conf(struct pci_bus *bus, u32 devfn, | |||
552 | if (bus->number == 0) | 572 | if (bus->number == 0) |
553 | return mvebu_sw_pci_bridge_write(port, where, size, val); | 573 | return mvebu_sw_pci_bridge_write(port, where, size, val); |
554 | 574 | ||
555 | if (!port->haslink) | 575 | if (!mvebu_pcie_link_up(port)) |
556 | return PCIBIOS_DEVICE_NOT_FOUND; | 576 | return PCIBIOS_DEVICE_NOT_FOUND; |
557 | 577 | ||
558 | /* | 578 | /* |
@@ -594,7 +614,7 @@ static int mvebu_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where, | |||
594 | if (bus->number == 0) | 614 | if (bus->number == 0) |
595 | return mvebu_sw_pci_bridge_read(port, where, size, val); | 615 | return mvebu_sw_pci_bridge_read(port, where, size, val); |
596 | 616 | ||
597 | if (!port->haslink) { | 617 | if (!mvebu_pcie_link_up(port)) { |
598 | *val = 0xffffffff; | 618 | *val = 0xffffffff; |
599 | return PCIBIOS_DEVICE_NOT_FOUND; | 619 | return PCIBIOS_DEVICE_NOT_FOUND; |
600 | } | 620 | } |
@@ -626,7 +646,7 @@ static struct pci_ops mvebu_pcie_ops = { | |||
626 | .write = mvebu_pcie_wr_conf, | 646 | .write = mvebu_pcie_wr_conf, |
627 | }; | 647 | }; |
628 | 648 | ||
629 | static int __init mvebu_pcie_setup(int nr, struct pci_sys_data *sys) | 649 | static int mvebu_pcie_setup(int nr, struct pci_sys_data *sys) |
630 | { | 650 | { |
631 | struct mvebu_pcie *pcie = sys_to_pcie(sys); | 651 | struct mvebu_pcie *pcie = sys_to_pcie(sys); |
632 | int i; | 652 | int i; |
@@ -645,7 +665,7 @@ static int __init mvebu_pcie_setup(int nr, struct pci_sys_data *sys) | |||
645 | return 1; | 665 | return 1; |
646 | } | 666 | } |
647 | 667 | ||
648 | static int __init mvebu_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) | 668 | static int mvebu_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) |
649 | { | 669 | { |
650 | struct of_irq oirq; | 670 | struct of_irq oirq; |
651 | int ret; | 671 | int ret; |
@@ -673,11 +693,17 @@ static struct pci_bus *mvebu_pcie_scan_bus(int nr, struct pci_sys_data *sys) | |||
673 | return bus; | 693 | return bus; |
674 | } | 694 | } |
675 | 695 | ||
676 | resource_size_t mvebu_pcie_align_resource(struct pci_dev *dev, | 696 | static void mvebu_pcie_add_bus(struct pci_bus *bus) |
677 | const struct resource *res, | 697 | { |
678 | resource_size_t start, | 698 | struct mvebu_pcie *pcie = sys_to_pcie(bus->sysdata); |
679 | resource_size_t size, | 699 | bus->msi = pcie->msi; |
680 | resource_size_t align) | 700 | } |
701 | |||
702 | static resource_size_t mvebu_pcie_align_resource(struct pci_dev *dev, | ||
703 | const struct resource *res, | ||
704 | resource_size_t start, | ||
705 | resource_size_t size, | ||
706 | resource_size_t align) | ||
681 | { | 707 | { |
682 | if (dev->bus->number != 0) | 708 | if (dev->bus->number != 0) |
683 | return start; | 709 | return start; |
@@ -696,7 +722,7 @@ resource_size_t mvebu_pcie_align_resource(struct pci_dev *dev, | |||
696 | return start; | 722 | return start; |
697 | } | 723 | } |
698 | 724 | ||
699 | static void __init mvebu_pcie_enable(struct mvebu_pcie *pcie) | 725 | static void mvebu_pcie_enable(struct mvebu_pcie *pcie) |
700 | { | 726 | { |
701 | struct hw_pci hw; | 727 | struct hw_pci hw; |
702 | 728 | ||
@@ -709,6 +735,7 @@ static void __init mvebu_pcie_enable(struct mvebu_pcie *pcie) | |||
709 | hw.map_irq = mvebu_pcie_map_irq; | 735 | hw.map_irq = mvebu_pcie_map_irq; |
710 | hw.ops = &mvebu_pcie_ops; | 736 | hw.ops = &mvebu_pcie_ops; |
711 | hw.align_resource = mvebu_pcie_align_resource; | 737 | hw.align_resource = mvebu_pcie_align_resource; |
738 | hw.add_bus = mvebu_pcie_add_bus; | ||
712 | 739 | ||
713 | pci_common_init(&hw); | 740 | pci_common_init(&hw); |
714 | } | 741 | } |
@@ -718,10 +745,8 @@ static void __init mvebu_pcie_enable(struct mvebu_pcie *pcie) | |||
718 | * <...> property for one that matches the given port/lane. Once | 745 | * <...> property for one that matches the given port/lane. Once |
719 | * found, maps it. | 746 | * found, maps it. |
720 | */ | 747 | */ |
721 | static void __iomem * __init | 748 | static void __iomem *mvebu_pcie_map_registers(struct platform_device *pdev, |
722 | mvebu_pcie_map_registers(struct platform_device *pdev, | 749 | struct device_node *np, struct mvebu_pcie_port *port) |
723 | struct device_node *np, | ||
724 | struct mvebu_pcie_port *port) | ||
725 | { | 750 | { |
726 | struct resource regs; | 751 | struct resource regs; |
727 | int ret = 0; | 752 | int ret = 0; |
@@ -777,7 +802,22 @@ static int mvebu_get_tgt_attr(struct device_node *np, int devfn, | |||
777 | return -ENOENT; | 802 | return -ENOENT; |
778 | } | 803 | } |
779 | 804 | ||
780 | static int __init mvebu_pcie_probe(struct platform_device *pdev) | 805 | static void mvebu_pcie_msi_enable(struct mvebu_pcie *pcie) |
806 | { | ||
807 | struct device_node *msi_node; | ||
808 | |||
809 | msi_node = of_parse_phandle(pcie->pdev->dev.of_node, | ||
810 | "msi-parent", 0); | ||
811 | if (!msi_node) | ||
812 | return; | ||
813 | |||
814 | pcie->msi = of_pci_find_msi_chip_by_node(msi_node); | ||
815 | |||
816 | if (pcie->msi) | ||
817 | pcie->msi->dev = &pcie->pdev->dev; | ||
818 | } | ||
819 | |||
820 | static int mvebu_pcie_probe(struct platform_device *pdev) | ||
781 | { | 821 | { |
782 | struct mvebu_pcie *pcie; | 822 | struct mvebu_pcie *pcie; |
783 | struct device_node *np = pdev->dev.of_node; | 823 | struct device_node *np = pdev->dev.of_node; |
@@ -790,6 +830,7 @@ static int __init mvebu_pcie_probe(struct platform_device *pdev) | |||
790 | return -ENOMEM; | 830 | return -ENOMEM; |
791 | 831 | ||
792 | pcie->pdev = pdev; | 832 | pcie->pdev = pdev; |
833 | platform_set_drvdata(pdev, pcie); | ||
793 | 834 | ||
794 | /* Get the PCIe memory and I/O aperture */ | 835 | /* Get the PCIe memory and I/O aperture */ |
795 | mvebu_mbus_get_pcie_mem_aperture(&pcie->mem); | 836 | mvebu_mbus_get_pcie_mem_aperture(&pcie->mem); |
@@ -818,13 +859,14 @@ static int __init mvebu_pcie_probe(struct platform_device *pdev) | |||
818 | return ret; | 859 | return ret; |
819 | } | 860 | } |
820 | 861 | ||
862 | i = 0; | ||
821 | for_each_child_of_node(pdev->dev.of_node, child) { | 863 | for_each_child_of_node(pdev->dev.of_node, child) { |
822 | if (!of_device_is_available(child)) | 864 | if (!of_device_is_available(child)) |
823 | continue; | 865 | continue; |
824 | pcie->nports++; | 866 | i++; |
825 | } | 867 | } |
826 | 868 | ||
827 | pcie->ports = devm_kzalloc(&pdev->dev, pcie->nports * | 869 | pcie->ports = devm_kzalloc(&pdev->dev, i * |
828 | sizeof(struct mvebu_pcie_port), | 870 | sizeof(struct mvebu_pcie_port), |
829 | GFP_KERNEL); | 871 | GFP_KERNEL); |
830 | if (!pcie->ports) | 872 | if (!pcie->ports) |
@@ -833,6 +875,7 @@ static int __init mvebu_pcie_probe(struct platform_device *pdev) | |||
833 | i = 0; | 875 | i = 0; |
834 | for_each_child_of_node(pdev->dev.of_node, child) { | 876 | for_each_child_of_node(pdev->dev.of_node, child) { |
835 | struct mvebu_pcie_port *port = &pcie->ports[i]; | 877 | struct mvebu_pcie_port *port = &pcie->ports[i]; |
878 | enum of_gpio_flags flags; | ||
836 | 879 | ||
837 | if (!of_device_is_available(child)) | 880 | if (!of_device_is_available(child)) |
838 | continue; | 881 | continue; |
@@ -873,45 +916,68 @@ static int __init mvebu_pcie_probe(struct platform_device *pdev) | |||
873 | continue; | 916 | continue; |
874 | } | 917 | } |
875 | 918 | ||
919 | port->reset_gpio = of_get_named_gpio_flags(child, | ||
920 | "reset-gpios", 0, &flags); | ||
921 | if (gpio_is_valid(port->reset_gpio)) { | ||
922 | u32 reset_udelay = 20000; | ||
923 | |||
924 | port->reset_active_low = flags & OF_GPIO_ACTIVE_LOW; | ||
925 | port->reset_name = kasprintf(GFP_KERNEL, | ||
926 | "pcie%d.%d-reset", port->port, port->lane); | ||
927 | of_property_read_u32(child, "reset-delay-us", | ||
928 | &reset_udelay); | ||
929 | |||
930 | ret = devm_gpio_request_one(&pdev->dev, | ||
931 | port->reset_gpio, GPIOF_DIR_OUT, port->reset_name); | ||
932 | if (ret) { | ||
933 | if (ret == -EPROBE_DEFER) | ||
934 | return ret; | ||
935 | continue; | ||
936 | } | ||
937 | |||
938 | gpio_set_value(port->reset_gpio, | ||
939 | (port->reset_active_low) ? 1 : 0); | ||
940 | msleep(reset_udelay/1000); | ||
941 | } | ||
942 | |||
943 | port->clk = of_clk_get_by_name(child, NULL); | ||
944 | if (IS_ERR(port->clk)) { | ||
945 | dev_err(&pdev->dev, "PCIe%d.%d: cannot get clock\n", | ||
946 | port->port, port->lane); | ||
947 | continue; | ||
948 | } | ||
949 | |||
950 | ret = clk_prepare_enable(port->clk); | ||
951 | if (ret) | ||
952 | continue; | ||
953 | |||
876 | port->base = mvebu_pcie_map_registers(pdev, child, port); | 954 | port->base = mvebu_pcie_map_registers(pdev, child, port); |
877 | if (IS_ERR(port->base)) { | 955 | if (IS_ERR(port->base)) { |
878 | dev_err(&pdev->dev, "PCIe%d.%d: cannot map registers\n", | 956 | dev_err(&pdev->dev, "PCIe%d.%d: cannot map registers\n", |
879 | port->port, port->lane); | 957 | port->port, port->lane); |
880 | port->base = NULL; | 958 | port->base = NULL; |
959 | clk_disable_unprepare(port->clk); | ||
881 | continue; | 960 | continue; |
882 | } | 961 | } |
883 | 962 | ||
884 | mvebu_pcie_set_local_dev_nr(port, 1); | 963 | mvebu_pcie_set_local_dev_nr(port, 1); |
885 | 964 | ||
886 | if (mvebu_pcie_link_up(port)) { | ||
887 | port->haslink = 1; | ||
888 | dev_info(&pdev->dev, "PCIe%d.%d: link up\n", | ||
889 | port->port, port->lane); | ||
890 | } else { | ||
891 | port->haslink = 0; | ||
892 | dev_info(&pdev->dev, "PCIe%d.%d: link down\n", | ||
893 | port->port, port->lane); | ||
894 | } | ||
895 | |||
896 | port->clk = of_clk_get_by_name(child, NULL); | 965 | port->clk = of_clk_get_by_name(child, NULL); |
897 | if (IS_ERR(port->clk)) { | 966 | if (IS_ERR(port->clk)) { |
898 | dev_err(&pdev->dev, "PCIe%d.%d: cannot get clock\n", | 967 | dev_err(&pdev->dev, "PCIe%d.%d: cannot get clock\n", |
899 | port->port, port->lane); | 968 | port->port, port->lane); |
900 | iounmap(port->base); | 969 | iounmap(port->base); |
901 | port->haslink = 0; | ||
902 | continue; | 970 | continue; |
903 | } | 971 | } |
904 | 972 | ||
905 | port->dn = child; | 973 | port->dn = child; |
906 | |||
907 | clk_prepare_enable(port->clk); | ||
908 | spin_lock_init(&port->conf_lock); | 974 | spin_lock_init(&port->conf_lock); |
909 | |||
910 | mvebu_sw_pci_bridge_init(port); | 975 | mvebu_sw_pci_bridge_init(port); |
911 | |||
912 | i++; | 976 | i++; |
913 | } | 977 | } |
914 | 978 | ||
979 | pcie->nports = i; | ||
980 | mvebu_pcie_msi_enable(pcie); | ||
915 | mvebu_pcie_enable(pcie); | 981 | mvebu_pcie_enable(pcie); |
916 | 982 | ||
917 | return 0; | 983 | return 0; |
@@ -920,6 +986,7 @@ static int __init mvebu_pcie_probe(struct platform_device *pdev) | |||
920 | static const struct of_device_id mvebu_pcie_of_match_table[] = { | 986 | static const struct of_device_id mvebu_pcie_of_match_table[] = { |
921 | { .compatible = "marvell,armada-xp-pcie", }, | 987 | { .compatible = "marvell,armada-xp-pcie", }, |
922 | { .compatible = "marvell,armada-370-pcie", }, | 988 | { .compatible = "marvell,armada-370-pcie", }, |
989 | { .compatible = "marvell,dove-pcie", }, | ||
923 | { .compatible = "marvell,kirkwood-pcie", }, | 990 | { .compatible = "marvell,kirkwood-pcie", }, |
924 | {}, | 991 | {}, |
925 | }; | 992 | }; |
@@ -931,16 +998,12 @@ static struct platform_driver mvebu_pcie_driver = { | |||
931 | .name = "mvebu-pcie", | 998 | .name = "mvebu-pcie", |
932 | .of_match_table = | 999 | .of_match_table = |
933 | of_match_ptr(mvebu_pcie_of_match_table), | 1000 | of_match_ptr(mvebu_pcie_of_match_table), |
1001 | /* driver unloading/unbinding currently not supported */ | ||
1002 | .suppress_bind_attrs = true, | ||
934 | }, | 1003 | }, |
1004 | .probe = mvebu_pcie_probe, | ||
935 | }; | 1005 | }; |
936 | 1006 | module_platform_driver(mvebu_pcie_driver); | |
937 | static int __init mvebu_pcie_init(void) | ||
938 | { | ||
939 | return platform_driver_probe(&mvebu_pcie_driver, | ||
940 | mvebu_pcie_probe); | ||
941 | } | ||
942 | |||
943 | subsys_initcall(mvebu_pcie_init); | ||
944 | 1007 | ||
945 | MODULE_AUTHOR("Thomas Petazzoni <thomas.petazzoni@free-electrons.com>"); | 1008 | MODULE_AUTHOR("Thomas Petazzoni <thomas.petazzoni@free-electrons.com>"); |
946 | MODULE_DESCRIPTION("Marvell EBU PCIe driver"); | 1009 | MODULE_DESCRIPTION("Marvell EBU PCIe driver"); |