diff options
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/Kconfig | 2 | ||||
-rw-r--r-- | drivers/soc/Kconfig | 1 | ||||
-rw-r--r-- | drivers/soc/Makefile | 1 | ||||
-rw-r--r-- | drivers/soc/ti/Kconfig | 21 | ||||
-rw-r--r-- | drivers/soc/ti/Makefile | 4 | ||||
-rw-r--r-- | drivers/soc/ti/knav_qmss.h | 386 | ||||
-rw-r--r-- | drivers/soc/ti/knav_qmss_acc.c | 591 | ||||
-rw-r--r-- | drivers/soc/ti/knav_qmss_queue.c | 1816 |
8 files changed, 2822 insertions, 0 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig index 622fa266b29e..1a693d3f9d51 100644 --- a/drivers/Kconfig +++ b/drivers/Kconfig | |||
@@ -148,6 +148,8 @@ source "drivers/remoteproc/Kconfig" | |||
148 | 148 | ||
149 | source "drivers/rpmsg/Kconfig" | 149 | source "drivers/rpmsg/Kconfig" |
150 | 150 | ||
151 | source "drivers/soc/Kconfig" | ||
152 | |||
151 | source "drivers/devfreq/Kconfig" | 153 | source "drivers/devfreq/Kconfig" |
152 | 154 | ||
153 | source "drivers/extcon/Kconfig" | 155 | source "drivers/extcon/Kconfig" |
diff --git a/drivers/soc/Kconfig b/drivers/soc/Kconfig index c8543855aa82..49e3f0cc71af 100644 --- a/drivers/soc/Kconfig +++ b/drivers/soc/Kconfig | |||
@@ -1,5 +1,6 @@ | |||
1 | menu "SOC (System On Chip) specific Drivers" | 1 | menu "SOC (System On Chip) specific Drivers" |
2 | 2 | ||
3 | source "drivers/soc/qcom/Kconfig" | 3 | source "drivers/soc/qcom/Kconfig" |
4 | source "drivers/soc/ti/Kconfig" | ||
4 | 5 | ||
5 | endmenu | 6 | endmenu |
diff --git a/drivers/soc/Makefile b/drivers/soc/Makefile index 3b1b95d932d1..0d6e35dfea8c 100644 --- a/drivers/soc/Makefile +++ b/drivers/soc/Makefile | |||
@@ -4,3 +4,4 @@ | |||
4 | 4 | ||
5 | obj-$(CONFIG_ARCH_QCOM) += qcom/ | 5 | obj-$(CONFIG_ARCH_QCOM) += qcom/ |
6 | obj-$(CONFIG_ARCH_TEGRA) += tegra/ | 6 | obj-$(CONFIG_ARCH_TEGRA) += tegra/ |
7 | obj-$(CONFIG_SOC_TI) += ti/ | ||
diff --git a/drivers/soc/ti/Kconfig b/drivers/soc/ti/Kconfig new file mode 100644 index 000000000000..f73896f762e8 --- /dev/null +++ b/drivers/soc/ti/Kconfig | |||
@@ -0,0 +1,21 @@ | |||
1 | # | ||
2 | # TI SOC drivers | ||
3 | # | ||
4 | menuconfig SOC_TI | ||
5 | bool "TI SOC drivers support" | ||
6 | |||
7 | if SOC_TI | ||
8 | |||
9 | config KEYSTONE_NAVIGATOR_QMSS | ||
10 | tristate "Keystone Queue Manager Sub System" | ||
11 | depends on ARCH_KEYSTONE | ||
12 | help | ||
13 | Say y here to support the Keystone multicore Navigator Queue | ||
14 | Manager support. The Queue Manager is a hardware module that | ||
15 | is responsible for accelerating management of the packet queues. | ||
16 | Packets are queued/de-queued by writing/reading descriptor address | ||
17 | to a particular memory mapped location in the Queue Manager module. | ||
18 | |||
19 | If unsure, say N. | ||
20 | |||
21 | endif # SOC_TI | ||
diff --git a/drivers/soc/ti/Makefile b/drivers/soc/ti/Makefile new file mode 100644 index 000000000000..bf85cacd5b85 --- /dev/null +++ b/drivers/soc/ti/Makefile | |||
@@ -0,0 +1,4 @@ | |||
1 | # | ||
2 | # TI Keystone SOC drivers | ||
3 | # | ||
4 | obj-$(CONFIG_KEYSTONE_NAVIGATOR_QMSS) += knav_qmss_queue.o knav_qmss_acc.o | ||
diff --git a/drivers/soc/ti/knav_qmss.h b/drivers/soc/ti/knav_qmss.h new file mode 100644 index 000000000000..bc9dcc8cc3ce --- /dev/null +++ b/drivers/soc/ti/knav_qmss.h | |||
@@ -0,0 +1,386 @@ | |||
1 | /* | ||
2 | * Keystone Navigator QMSS driver internal header | ||
3 | * | ||
4 | * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com | ||
5 | * Author: Sandeep Nair <sandeep_n@ti.com> | ||
6 | * Cyril Chemparathy <cyril@ti.com> | ||
7 | * Santosh Shilimkar <santosh.shilimkar@ti.com> | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or | ||
10 | * modify it under the terms of the GNU General Public License | ||
11 | * version 2 as published by the Free Software Foundation. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, but | ||
14 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
16 | * General Public License for more details. | ||
17 | */ | ||
18 | |||
19 | #ifndef __KNAV_QMSS_H__ | ||
20 | #define __KNAV_QMSS_H__ | ||
21 | |||
22 | #define THRESH_GTE BIT(7) | ||
23 | #define THRESH_LT 0 | ||
24 | |||
25 | #define PDSP_CTRL_PC_MASK 0xffff0000 | ||
26 | #define PDSP_CTRL_SOFT_RESET BIT(0) | ||
27 | #define PDSP_CTRL_ENABLE BIT(1) | ||
28 | #define PDSP_CTRL_RUNNING BIT(15) | ||
29 | |||
30 | #define ACC_MAX_CHANNEL 48 | ||
31 | #define ACC_DEFAULT_PERIOD 25 /* usecs */ | ||
32 | |||
33 | #define ACC_CHANNEL_INT_BASE 2 | ||
34 | |||
35 | #define ACC_LIST_ENTRY_TYPE 1 | ||
36 | #define ACC_LIST_ENTRY_WORDS (1 << ACC_LIST_ENTRY_TYPE) | ||
37 | #define ACC_LIST_ENTRY_QUEUE_IDX 0 | ||
38 | #define ACC_LIST_ENTRY_DESC_IDX (ACC_LIST_ENTRY_WORDS - 1) | ||
39 | |||
40 | #define ACC_CMD_DISABLE_CHANNEL 0x80 | ||
41 | #define ACC_CMD_ENABLE_CHANNEL 0x81 | ||
42 | #define ACC_CFG_MULTI_QUEUE BIT(21) | ||
43 | |||
44 | #define ACC_INTD_OFFSET_EOI (0x0010) | ||
45 | #define ACC_INTD_OFFSET_COUNT(ch) (0x0300 + 4 * (ch)) | ||
46 | #define ACC_INTD_OFFSET_STATUS(ch) (0x0200 + 4 * ((ch) / 32)) | ||
47 | |||
48 | #define RANGE_MAX_IRQS 64 | ||
49 | |||
50 | #define ACC_DESCS_MAX SZ_1K | ||
51 | #define ACC_DESCS_MASK (ACC_DESCS_MAX - 1) | ||
52 | #define DESC_SIZE_MASK 0xful | ||
53 | #define DESC_PTR_MASK (~DESC_SIZE_MASK) | ||
54 | |||
55 | #define KNAV_NAME_SIZE 32 | ||
56 | |||
57 | enum knav_acc_result { | ||
58 | ACC_RET_IDLE, | ||
59 | ACC_RET_SUCCESS, | ||
60 | ACC_RET_INVALID_COMMAND, | ||
61 | ACC_RET_INVALID_CHANNEL, | ||
62 | ACC_RET_INACTIVE_CHANNEL, | ||
63 | ACC_RET_ACTIVE_CHANNEL, | ||
64 | ACC_RET_INVALID_QUEUE, | ||
65 | ACC_RET_INVALID_RET, | ||
66 | }; | ||
67 | |||
68 | struct knav_reg_config { | ||
69 | u32 revision; | ||
70 | u32 __pad1; | ||
71 | u32 divert; | ||
72 | u32 link_ram_base0; | ||
73 | u32 link_ram_size0; | ||
74 | u32 link_ram_base1; | ||
75 | u32 __pad2[2]; | ||
76 | u32 starvation[0]; | ||
77 | }; | ||
78 | |||
79 | struct knav_reg_region { | ||
80 | u32 base; | ||
81 | u32 start_index; | ||
82 | u32 size_count; | ||
83 | u32 __pad; | ||
84 | }; | ||
85 | |||
86 | struct knav_reg_pdsp_regs { | ||
87 | u32 control; | ||
88 | u32 status; | ||
89 | u32 cycle_count; | ||
90 | u32 stall_count; | ||
91 | }; | ||
92 | |||
93 | struct knav_reg_acc_command { | ||
94 | u32 command; | ||
95 | u32 queue_mask; | ||
96 | u32 list_phys; | ||
97 | u32 queue_num; | ||
98 | u32 timer_config; | ||
99 | }; | ||
100 | |||
101 | struct knav_link_ram_block { | ||
102 | dma_addr_t phys; | ||
103 | void *virt; | ||
104 | size_t size; | ||
105 | }; | ||
106 | |||
107 | struct knav_acc_info { | ||
108 | u32 pdsp_id; | ||
109 | u32 start_channel; | ||
110 | u32 list_entries; | ||
111 | u32 pacing_mode; | ||
112 | u32 timer_count; | ||
113 | int mem_size; | ||
114 | int list_size; | ||
115 | struct knav_pdsp_info *pdsp; | ||
116 | }; | ||
117 | |||
118 | struct knav_acc_channel { | ||
119 | u32 channel; | ||
120 | u32 list_index; | ||
121 | u32 open_mask; | ||
122 | u32 *list_cpu[2]; | ||
123 | dma_addr_t list_dma[2]; | ||
124 | char name[KNAV_NAME_SIZE]; | ||
125 | atomic_t retrigger_count; | ||
126 | }; | ||
127 | |||
128 | struct knav_pdsp_info { | ||
129 | const char *name; | ||
130 | struct knav_reg_pdsp_regs __iomem *regs; | ||
131 | union { | ||
132 | void __iomem *command; | ||
133 | struct knav_reg_acc_command __iomem *acc_command; | ||
134 | u32 __iomem *qos_command; | ||
135 | }; | ||
136 | void __iomem *intd; | ||
137 | u32 __iomem *iram; | ||
138 | const char *firmware; | ||
139 | u32 id; | ||
140 | struct list_head list; | ||
141 | }; | ||
142 | |||
143 | struct knav_qmgr_info { | ||
144 | unsigned start_queue; | ||
145 | unsigned num_queues; | ||
146 | struct knav_reg_config __iomem *reg_config; | ||
147 | struct knav_reg_region __iomem *reg_region; | ||
148 | struct knav_reg_queue __iomem *reg_push, *reg_pop, *reg_peek; | ||
149 | void __iomem *reg_status; | ||
150 | struct list_head list; | ||
151 | }; | ||
152 | |||
153 | #define KNAV_NUM_LINKRAM 2 | ||
154 | |||
155 | /** | ||
156 | * struct knav_queue_stats: queue statistics | ||
157 | * pushes: number of push operations | ||
158 | * pops: number of pop operations | ||
159 | * push_errors: number of push errors | ||
160 | * pop_errors: number of pop errors | ||
161 | * notifies: notifier counts | ||
162 | */ | ||
163 | struct knav_queue_stats { | ||
164 | atomic_t pushes; | ||
165 | atomic_t pops; | ||
166 | atomic_t push_errors; | ||
167 | atomic_t pop_errors; | ||
168 | atomic_t notifies; | ||
169 | }; | ||
170 | |||
171 | /** | ||
172 | * struct knav_reg_queue: queue registers | ||
173 | * @entry_count: valid entries in the queue | ||
174 | * @byte_count: total byte count in thhe queue | ||
175 | * @packet_size: packet size for the queue | ||
176 | * @ptr_size_thresh: packet pointer size threshold | ||
177 | */ | ||
178 | struct knav_reg_queue { | ||
179 | u32 entry_count; | ||
180 | u32 byte_count; | ||
181 | u32 packet_size; | ||
182 | u32 ptr_size_thresh; | ||
183 | }; | ||
184 | |||
185 | /** | ||
186 | * struct knav_region: qmss region info | ||
187 | * @dma_start, dma_end: start and end dma address | ||
188 | * @virt_start, virt_end: start and end virtual address | ||
189 | * @desc_size: descriptor size | ||
190 | * @used_desc: consumed descriptors | ||
191 | * @id: region number | ||
192 | * @num_desc: total descriptors | ||
193 | * @link_index: index of the first descriptor | ||
194 | * @name: region name | ||
195 | * @list: instance in the device's region list | ||
196 | * @pools: list of descriptor pools in the region | ||
197 | */ | ||
198 | struct knav_region { | ||
199 | dma_addr_t dma_start, dma_end; | ||
200 | void *virt_start, *virt_end; | ||
201 | unsigned desc_size; | ||
202 | unsigned used_desc; | ||
203 | unsigned id; | ||
204 | unsigned num_desc; | ||
205 | unsigned link_index; | ||
206 | const char *name; | ||
207 | struct list_head list; | ||
208 | struct list_head pools; | ||
209 | }; | ||
210 | |||
211 | /** | ||
212 | * struct knav_pool: qmss pools | ||
213 | * @dev: device pointer | ||
214 | * @region: qmss region info | ||
215 | * @queue: queue registers | ||
216 | * @kdev: qmss device pointer | ||
217 | * @region_offset: offset from the base | ||
218 | * @num_desc: total descriptors | ||
219 | * @desc_size: descriptor size | ||
220 | * @region_id: region number | ||
221 | * @name: pool name | ||
222 | * @list: list head | ||
223 | * @region_inst: instance in the region's pool list | ||
224 | */ | ||
225 | struct knav_pool { | ||
226 | struct device *dev; | ||
227 | struct knav_region *region; | ||
228 | struct knav_queue *queue; | ||
229 | struct knav_device *kdev; | ||
230 | int region_offset; | ||
231 | int num_desc; | ||
232 | int desc_size; | ||
233 | int region_id; | ||
234 | const char *name; | ||
235 | struct list_head list; | ||
236 | struct list_head region_inst; | ||
237 | }; | ||
238 | |||
239 | /** | ||
240 | * struct knav_queue_inst: qmss queue instace properties | ||
241 | * @descs: descriptor pointer | ||
242 | * @desc_head, desc_tail, desc_count: descriptor counters | ||
243 | * @acc: accumulator channel pointer | ||
244 | * @kdev: qmss device pointer | ||
245 | * @range: range info | ||
246 | * @qmgr: queue manager info | ||
247 | * @id: queue instace id | ||
248 | * @irq_num: irq line number | ||
249 | * @notify_needed: notifier needed based on queue type | ||
250 | * @num_notifiers: total notifiers | ||
251 | * @handles: list head | ||
252 | * @name: queue instance name | ||
253 | * @irq_name: irq line name | ||
254 | */ | ||
255 | struct knav_queue_inst { | ||
256 | u32 *descs; | ||
257 | atomic_t desc_head, desc_tail, desc_count; | ||
258 | struct knav_acc_channel *acc; | ||
259 | struct knav_device *kdev; | ||
260 | struct knav_range_info *range; | ||
261 | struct knav_qmgr_info *qmgr; | ||
262 | u32 id; | ||
263 | int irq_num; | ||
264 | int notify_needed; | ||
265 | atomic_t num_notifiers; | ||
266 | struct list_head handles; | ||
267 | const char *name; | ||
268 | const char *irq_name; | ||
269 | }; | ||
270 | |||
271 | /** | ||
272 | * struct knav_queue: qmss queue properties | ||
273 | * @reg_push, reg_pop, reg_peek: push, pop queue registers | ||
274 | * @inst: qmss queue instace properties | ||
275 | * @notifier_fn: notifier function | ||
276 | * @notifier_fn_arg: notifier function argument | ||
277 | * @notifier_enabled: notier enabled for a give queue | ||
278 | * @rcu: rcu head | ||
279 | * @flags: queue flags | ||
280 | * @list: list head | ||
281 | */ | ||
282 | struct knav_queue { | ||
283 | struct knav_reg_queue __iomem *reg_push, *reg_pop, *reg_peek; | ||
284 | struct knav_queue_inst *inst; | ||
285 | struct knav_queue_stats stats; | ||
286 | knav_queue_notify_fn notifier_fn; | ||
287 | void *notifier_fn_arg; | ||
288 | atomic_t notifier_enabled; | ||
289 | struct rcu_head rcu; | ||
290 | unsigned flags; | ||
291 | struct list_head list; | ||
292 | }; | ||
293 | |||
294 | struct knav_device { | ||
295 | struct device *dev; | ||
296 | unsigned base_id; | ||
297 | unsigned num_queues; | ||
298 | unsigned num_queues_in_use; | ||
299 | unsigned inst_shift; | ||
300 | struct knav_link_ram_block link_rams[KNAV_NUM_LINKRAM]; | ||
301 | void *instances; | ||
302 | struct list_head regions; | ||
303 | struct list_head queue_ranges; | ||
304 | struct list_head pools; | ||
305 | struct list_head pdsps; | ||
306 | struct list_head qmgrs; | ||
307 | }; | ||
308 | |||
309 | struct knav_range_ops { | ||
310 | int (*init_range)(struct knav_range_info *range); | ||
311 | int (*free_range)(struct knav_range_info *range); | ||
312 | int (*init_queue)(struct knav_range_info *range, | ||
313 | struct knav_queue_inst *inst); | ||
314 | int (*open_queue)(struct knav_range_info *range, | ||
315 | struct knav_queue_inst *inst, unsigned flags); | ||
316 | int (*close_queue)(struct knav_range_info *range, | ||
317 | struct knav_queue_inst *inst); | ||
318 | int (*set_notify)(struct knav_range_info *range, | ||
319 | struct knav_queue_inst *inst, bool enabled); | ||
320 | }; | ||
321 | |||
322 | struct knav_irq_info { | ||
323 | int irq; | ||
324 | u32 cpu_map; | ||
325 | }; | ||
326 | |||
327 | struct knav_range_info { | ||
328 | const char *name; | ||
329 | struct knav_device *kdev; | ||
330 | unsigned queue_base; | ||
331 | unsigned num_queues; | ||
332 | void *queue_base_inst; | ||
333 | unsigned flags; | ||
334 | struct list_head list; | ||
335 | struct knav_range_ops *ops; | ||
336 | struct knav_acc_info acc_info; | ||
337 | struct knav_acc_channel *acc; | ||
338 | unsigned num_irqs; | ||
339 | struct knav_irq_info irqs[RANGE_MAX_IRQS]; | ||
340 | }; | ||
341 | |||
342 | #define RANGE_RESERVED BIT(0) | ||
343 | #define RANGE_HAS_IRQ BIT(1) | ||
344 | #define RANGE_HAS_ACCUMULATOR BIT(2) | ||
345 | #define RANGE_MULTI_QUEUE BIT(3) | ||
346 | |||
347 | #define for_each_region(kdev, region) \ | ||
348 | list_for_each_entry(region, &kdev->regions, list) | ||
349 | |||
350 | #define first_region(kdev) \ | ||
351 | list_first_entry(&kdev->regions, \ | ||
352 | struct knav_region, list) | ||
353 | |||
354 | #define for_each_queue_range(kdev, range) \ | ||
355 | list_for_each_entry(range, &kdev->queue_ranges, list) | ||
356 | |||
357 | #define first_queue_range(kdev) \ | ||
358 | list_first_entry(&kdev->queue_ranges, \ | ||
359 | struct knav_range_info, list) | ||
360 | |||
361 | #define for_each_pool(kdev, pool) \ | ||
362 | list_for_each_entry(pool, &kdev->pools, list) | ||
363 | |||
364 | #define for_each_pdsp(kdev, pdsp) \ | ||
365 | list_for_each_entry(pdsp, &kdev->pdsps, list) | ||
366 | |||
367 | #define for_each_qmgr(kdev, qmgr) \ | ||
368 | list_for_each_entry(qmgr, &kdev->qmgrs, list) | ||
369 | |||
370 | static inline struct knav_pdsp_info * | ||
371 | knav_find_pdsp(struct knav_device *kdev, unsigned pdsp_id) | ||
372 | { | ||
373 | struct knav_pdsp_info *pdsp; | ||
374 | |||
375 | for_each_pdsp(kdev, pdsp) | ||
376 | if (pdsp_id == pdsp->id) | ||
377 | return pdsp; | ||
378 | return NULL; | ||
379 | } | ||
380 | |||
381 | extern int knav_init_acc_range(struct knav_device *kdev, | ||
382 | struct device_node *node, | ||
383 | struct knav_range_info *range); | ||
384 | extern void knav_queue_notify(struct knav_queue_inst *inst); | ||
385 | |||
386 | #endif /* __KNAV_QMSS_H__ */ | ||
diff --git a/drivers/soc/ti/knav_qmss_acc.c b/drivers/soc/ti/knav_qmss_acc.c new file mode 100644 index 000000000000..6fbfde6e748f --- /dev/null +++ b/drivers/soc/ti/knav_qmss_acc.c | |||
@@ -0,0 +1,591 @@ | |||
1 | /* | ||
2 | * Keystone accumulator queue manager | ||
3 | * | ||
4 | * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com | ||
5 | * Author: Sandeep Nair <sandeep_n@ti.com> | ||
6 | * Cyril Chemparathy <cyril@ti.com> | ||
7 | * Santosh Shilimkar <santosh.shilimkar@ti.com> | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or | ||
10 | * modify it under the terms of the GNU General Public License | ||
11 | * version 2 as published by the Free Software Foundation. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, but | ||
14 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
16 | * General Public License for more details. | ||
17 | */ | ||
18 | |||
19 | #include <linux/kernel.h> | ||
20 | #include <linux/module.h> | ||
21 | #include <linux/device.h> | ||
22 | #include <linux/io.h> | ||
23 | #include <linux/interrupt.h> | ||
24 | #include <linux/bitops.h> | ||
25 | #include <linux/slab.h> | ||
26 | #include <linux/spinlock.h> | ||
27 | #include <linux/soc/ti/knav_qmss.h> | ||
28 | #include <linux/platform_device.h> | ||
29 | #include <linux/dma-mapping.h> | ||
30 | #include <linux/of.h> | ||
31 | #include <linux/of_device.h> | ||
32 | #include <linux/of_address.h> | ||
33 | #include <linux/firmware.h> | ||
34 | |||
35 | #include "knav_qmss.h" | ||
36 | |||
37 | #define knav_range_offset_to_inst(kdev, range, q) \ | ||
38 | (range->queue_base_inst + (q << kdev->inst_shift)) | ||
39 | |||
40 | static void __knav_acc_notify(struct knav_range_info *range, | ||
41 | struct knav_acc_channel *acc) | ||
42 | { | ||
43 | struct knav_device *kdev = range->kdev; | ||
44 | struct knav_queue_inst *inst; | ||
45 | int range_base, queue; | ||
46 | |||
47 | range_base = kdev->base_id + range->queue_base; | ||
48 | |||
49 | if (range->flags & RANGE_MULTI_QUEUE) { | ||
50 | for (queue = 0; queue < range->num_queues; queue++) { | ||
51 | inst = knav_range_offset_to_inst(kdev, range, | ||
52 | queue); | ||
53 | if (inst->notify_needed) { | ||
54 | inst->notify_needed = 0; | ||
55 | dev_dbg(kdev->dev, "acc-irq: notifying %d\n", | ||
56 | range_base + queue); | ||
57 | knav_queue_notify(inst); | ||
58 | } | ||
59 | } | ||
60 | } else { | ||
61 | queue = acc->channel - range->acc_info.start_channel; | ||
62 | inst = knav_range_offset_to_inst(kdev, range, queue); | ||
63 | dev_dbg(kdev->dev, "acc-irq: notifying %d\n", | ||
64 | range_base + queue); | ||
65 | knav_queue_notify(inst); | ||
66 | } | ||
67 | } | ||
68 | |||
69 | static int knav_acc_set_notify(struct knav_range_info *range, | ||
70 | struct knav_queue_inst *kq, | ||
71 | bool enabled) | ||
72 | { | ||
73 | struct knav_pdsp_info *pdsp = range->acc_info.pdsp; | ||
74 | struct knav_device *kdev = range->kdev; | ||
75 | u32 mask, offset; | ||
76 | |||
77 | /* | ||
78 | * when enabling, we need to re-trigger an interrupt if we | ||
79 | * have descriptors pending | ||
80 | */ | ||
81 | if (!enabled || atomic_read(&kq->desc_count) <= 0) | ||
82 | return 0; | ||
83 | |||
84 | kq->notify_needed = 1; | ||
85 | atomic_inc(&kq->acc->retrigger_count); | ||
86 | mask = BIT(kq->acc->channel % 32); | ||
87 | offset = ACC_INTD_OFFSET_STATUS(kq->acc->channel); | ||
88 | dev_dbg(kdev->dev, "setup-notify: re-triggering irq for %s\n", | ||
89 | kq->acc->name); | ||
90 | writel_relaxed(mask, pdsp->intd + offset); | ||
91 | return 0; | ||
92 | } | ||
93 | |||
94 | static irqreturn_t knav_acc_int_handler(int irq, void *_instdata) | ||
95 | { | ||
96 | struct knav_acc_channel *acc; | ||
97 | struct knav_queue_inst *kq = NULL; | ||
98 | struct knav_range_info *range; | ||
99 | struct knav_pdsp_info *pdsp; | ||
100 | struct knav_acc_info *info; | ||
101 | struct knav_device *kdev; | ||
102 | |||
103 | u32 *list, *list_cpu, val, idx, notifies; | ||
104 | int range_base, channel, queue = 0; | ||
105 | dma_addr_t list_dma; | ||
106 | |||
107 | range = _instdata; | ||
108 | info = &range->acc_info; | ||
109 | kdev = range->kdev; | ||
110 | pdsp = range->acc_info.pdsp; | ||
111 | acc = range->acc; | ||
112 | |||
113 | range_base = kdev->base_id + range->queue_base; | ||
114 | if ((range->flags & RANGE_MULTI_QUEUE) == 0) { | ||
115 | for (queue = 0; queue < range->num_irqs; queue++) | ||
116 | if (range->irqs[queue].irq == irq) | ||
117 | break; | ||
118 | kq = knav_range_offset_to_inst(kdev, range, queue); | ||
119 | acc += queue; | ||
120 | } | ||
121 | |||
122 | channel = acc->channel; | ||
123 | list_dma = acc->list_dma[acc->list_index]; | ||
124 | list_cpu = acc->list_cpu[acc->list_index]; | ||
125 | dev_dbg(kdev->dev, "acc-irq: channel %d, list %d, virt %p, phys %x\n", | ||
126 | channel, acc->list_index, list_cpu, list_dma); | ||
127 | if (atomic_read(&acc->retrigger_count)) { | ||
128 | atomic_dec(&acc->retrigger_count); | ||
129 | __knav_acc_notify(range, acc); | ||
130 | writel_relaxed(1, pdsp->intd + ACC_INTD_OFFSET_COUNT(channel)); | ||
131 | /* ack the interrupt */ | ||
132 | writel_relaxed(ACC_CHANNEL_INT_BASE + channel, | ||
133 | pdsp->intd + ACC_INTD_OFFSET_EOI); | ||
134 | |||
135 | return IRQ_HANDLED; | ||
136 | } | ||
137 | |||
138 | notifies = readl_relaxed(pdsp->intd + ACC_INTD_OFFSET_COUNT(channel)); | ||
139 | WARN_ON(!notifies); | ||
140 | dma_sync_single_for_cpu(kdev->dev, list_dma, info->list_size, | ||
141 | DMA_FROM_DEVICE); | ||
142 | |||
143 | for (list = list_cpu; list < list_cpu + (info->list_size / sizeof(u32)); | ||
144 | list += ACC_LIST_ENTRY_WORDS) { | ||
145 | if (ACC_LIST_ENTRY_WORDS == 1) { | ||
146 | dev_dbg(kdev->dev, | ||
147 | "acc-irq: list %d, entry @%p, %08x\n", | ||
148 | acc->list_index, list, list[0]); | ||
149 | } else if (ACC_LIST_ENTRY_WORDS == 2) { | ||
150 | dev_dbg(kdev->dev, | ||
151 | "acc-irq: list %d, entry @%p, %08x %08x\n", | ||
152 | acc->list_index, list, list[0], list[1]); | ||
153 | } else if (ACC_LIST_ENTRY_WORDS == 4) { | ||
154 | dev_dbg(kdev->dev, | ||
155 | "acc-irq: list %d, entry @%p, %08x %08x %08x %08x\n", | ||
156 | acc->list_index, list, list[0], list[1], | ||
157 | list[2], list[3]); | ||
158 | } | ||
159 | |||
160 | val = list[ACC_LIST_ENTRY_DESC_IDX]; | ||
161 | if (!val) | ||
162 | break; | ||
163 | |||
164 | if (range->flags & RANGE_MULTI_QUEUE) { | ||
165 | queue = list[ACC_LIST_ENTRY_QUEUE_IDX] >> 16; | ||
166 | if (queue < range_base || | ||
167 | queue >= range_base + range->num_queues) { | ||
168 | dev_err(kdev->dev, | ||
169 | "bad queue %d, expecting %d-%d\n", | ||
170 | queue, range_base, | ||
171 | range_base + range->num_queues); | ||
172 | break; | ||
173 | } | ||
174 | queue -= range_base; | ||
175 | kq = knav_range_offset_to_inst(kdev, range, | ||
176 | queue); | ||
177 | } | ||
178 | |||
179 | if (atomic_inc_return(&kq->desc_count) >= ACC_DESCS_MAX) { | ||
180 | atomic_dec(&kq->desc_count); | ||
181 | dev_err(kdev->dev, | ||
182 | "acc-irq: queue %d full, entry dropped\n", | ||
183 | queue + range_base); | ||
184 | continue; | ||
185 | } | ||
186 | |||
187 | idx = atomic_inc_return(&kq->desc_tail) & ACC_DESCS_MASK; | ||
188 | kq->descs[idx] = val; | ||
189 | kq->notify_needed = 1; | ||
190 | dev_dbg(kdev->dev, "acc-irq: enqueue %08x at %d, queue %d\n", | ||
191 | val, idx, queue + range_base); | ||
192 | } | ||
193 | |||
194 | __knav_acc_notify(range, acc); | ||
195 | memset(list_cpu, 0, info->list_size); | ||
196 | dma_sync_single_for_device(kdev->dev, list_dma, info->list_size, | ||
197 | DMA_TO_DEVICE); | ||
198 | |||
199 | /* flip to the other list */ | ||
200 | acc->list_index ^= 1; | ||
201 | |||
202 | /* reset the interrupt counter */ | ||
203 | writel_relaxed(1, pdsp->intd + ACC_INTD_OFFSET_COUNT(channel)); | ||
204 | |||
205 | /* ack the interrupt */ | ||
206 | writel_relaxed(ACC_CHANNEL_INT_BASE + channel, | ||
207 | pdsp->intd + ACC_INTD_OFFSET_EOI); | ||
208 | |||
209 | return IRQ_HANDLED; | ||
210 | } | ||
211 | |||
212 | int knav_range_setup_acc_irq(struct knav_range_info *range, | ||
213 | int queue, bool enabled) | ||
214 | { | ||
215 | struct knav_device *kdev = range->kdev; | ||
216 | struct knav_acc_channel *acc; | ||
217 | unsigned long cpu_map; | ||
218 | int ret = 0, irq; | ||
219 | u32 old, new; | ||
220 | |||
221 | if (range->flags & RANGE_MULTI_QUEUE) { | ||
222 | acc = range->acc; | ||
223 | irq = range->irqs[0].irq; | ||
224 | cpu_map = range->irqs[0].cpu_map; | ||
225 | } else { | ||
226 | acc = range->acc + queue; | ||
227 | irq = range->irqs[queue].irq; | ||
228 | cpu_map = range->irqs[queue].cpu_map; | ||
229 | } | ||
230 | |||
231 | old = acc->open_mask; | ||
232 | if (enabled) | ||
233 | new = old | BIT(queue); | ||
234 | else | ||
235 | new = old & ~BIT(queue); | ||
236 | acc->open_mask = new; | ||
237 | |||
238 | dev_dbg(kdev->dev, | ||
239 | "setup-acc-irq: open mask old %08x, new %08x, channel %s\n", | ||
240 | old, new, acc->name); | ||
241 | |||
242 | if (likely(new == old)) | ||
243 | return 0; | ||
244 | |||
245 | if (new && !old) { | ||
246 | dev_dbg(kdev->dev, | ||
247 | "setup-acc-irq: requesting %s for channel %s\n", | ||
248 | acc->name, acc->name); | ||
249 | ret = request_irq(irq, knav_acc_int_handler, 0, acc->name, | ||
250 | range); | ||
251 | if (!ret && cpu_map) { | ||
252 | ret = irq_set_affinity_hint(irq, to_cpumask(&cpu_map)); | ||
253 | if (ret) { | ||
254 | dev_warn(range->kdev->dev, | ||
255 | "Failed to set IRQ affinity\n"); | ||
256 | return ret; | ||
257 | } | ||
258 | } | ||
259 | } | ||
260 | |||
261 | if (old && !new) { | ||
262 | dev_dbg(kdev->dev, "setup-acc-irq: freeing %s for channel %s\n", | ||
263 | acc->name, acc->name); | ||
264 | free_irq(irq, range); | ||
265 | } | ||
266 | |||
267 | return ret; | ||
268 | } | ||
269 | |||
270 | static const char *knav_acc_result_str(enum knav_acc_result result) | ||
271 | { | ||
272 | static const char * const result_str[] = { | ||
273 | [ACC_RET_IDLE] = "idle", | ||
274 | [ACC_RET_SUCCESS] = "success", | ||
275 | [ACC_RET_INVALID_COMMAND] = "invalid command", | ||
276 | [ACC_RET_INVALID_CHANNEL] = "invalid channel", | ||
277 | [ACC_RET_INACTIVE_CHANNEL] = "inactive channel", | ||
278 | [ACC_RET_ACTIVE_CHANNEL] = "active channel", | ||
279 | [ACC_RET_INVALID_QUEUE] = "invalid queue", | ||
280 | [ACC_RET_INVALID_RET] = "invalid return code", | ||
281 | }; | ||
282 | |||
283 | if (result >= ARRAY_SIZE(result_str)) | ||
284 | return result_str[ACC_RET_INVALID_RET]; | ||
285 | else | ||
286 | return result_str[result]; | ||
287 | } | ||
288 | |||
289 | static enum knav_acc_result | ||
290 | knav_acc_write(struct knav_device *kdev, struct knav_pdsp_info *pdsp, | ||
291 | struct knav_reg_acc_command *cmd) | ||
292 | { | ||
293 | u32 result; | ||
294 | |||
295 | dev_dbg(kdev->dev, "acc command %08x %08x %08x %08x %08x\n", | ||
296 | cmd->command, cmd->queue_mask, cmd->list_phys, | ||
297 | cmd->queue_num, cmd->timer_config); | ||
298 | |||
299 | writel_relaxed(cmd->timer_config, &pdsp->acc_command->timer_config); | ||
300 | writel_relaxed(cmd->queue_num, &pdsp->acc_command->queue_num); | ||
301 | writel_relaxed(cmd->list_phys, &pdsp->acc_command->list_phys); | ||
302 | writel_relaxed(cmd->queue_mask, &pdsp->acc_command->queue_mask); | ||
303 | writel_relaxed(cmd->command, &pdsp->acc_command->command); | ||
304 | |||
305 | /* wait for the command to clear */ | ||
306 | do { | ||
307 | result = readl_relaxed(&pdsp->acc_command->command); | ||
308 | } while ((result >> 8) & 0xff); | ||
309 | |||
310 | return (result >> 24) & 0xff; | ||
311 | } | ||
312 | |||
313 | static void knav_acc_setup_cmd(struct knav_device *kdev, | ||
314 | struct knav_range_info *range, | ||
315 | struct knav_reg_acc_command *cmd, | ||
316 | int queue) | ||
317 | { | ||
318 | struct knav_acc_info *info = &range->acc_info; | ||
319 | struct knav_acc_channel *acc; | ||
320 | int queue_base; | ||
321 | u32 queue_mask; | ||
322 | |||
323 | if (range->flags & RANGE_MULTI_QUEUE) { | ||
324 | acc = range->acc; | ||
325 | queue_base = range->queue_base; | ||
326 | queue_mask = BIT(range->num_queues) - 1; | ||
327 | } else { | ||
328 | acc = range->acc + queue; | ||
329 | queue_base = range->queue_base + queue; | ||
330 | queue_mask = 0; | ||
331 | } | ||
332 | |||
333 | memset(cmd, 0, sizeof(*cmd)); | ||
334 | cmd->command = acc->channel; | ||
335 | cmd->queue_mask = queue_mask; | ||
336 | cmd->list_phys = acc->list_dma[0]; | ||
337 | cmd->queue_num = info->list_entries << 16; | ||
338 | cmd->queue_num |= queue_base; | ||
339 | |||
340 | cmd->timer_config = ACC_LIST_ENTRY_TYPE << 18; | ||
341 | if (range->flags & RANGE_MULTI_QUEUE) | ||
342 | cmd->timer_config |= ACC_CFG_MULTI_QUEUE; | ||
343 | cmd->timer_config |= info->pacing_mode << 16; | ||
344 | cmd->timer_config |= info->timer_count; | ||
345 | } | ||
346 | |||
347 | static void knav_acc_stop(struct knav_device *kdev, | ||
348 | struct knav_range_info *range, | ||
349 | int queue) | ||
350 | { | ||
351 | struct knav_reg_acc_command cmd; | ||
352 | struct knav_acc_channel *acc; | ||
353 | enum knav_acc_result result; | ||
354 | |||
355 | acc = range->acc + queue; | ||
356 | |||
357 | knav_acc_setup_cmd(kdev, range, &cmd, queue); | ||
358 | cmd.command |= ACC_CMD_DISABLE_CHANNEL << 8; | ||
359 | result = knav_acc_write(kdev, range->acc_info.pdsp, &cmd); | ||
360 | |||
361 | dev_dbg(kdev->dev, "stopped acc channel %s, result %s\n", | ||
362 | acc->name, knav_acc_result_str(result)); | ||
363 | } | ||
364 | |||
365 | static enum knav_acc_result knav_acc_start(struct knav_device *kdev, | ||
366 | struct knav_range_info *range, | ||
367 | int queue) | ||
368 | { | ||
369 | struct knav_reg_acc_command cmd; | ||
370 | struct knav_acc_channel *acc; | ||
371 | enum knav_acc_result result; | ||
372 | |||
373 | acc = range->acc + queue; | ||
374 | |||
375 | knav_acc_setup_cmd(kdev, range, &cmd, queue); | ||
376 | cmd.command |= ACC_CMD_ENABLE_CHANNEL << 8; | ||
377 | result = knav_acc_write(kdev, range->acc_info.pdsp, &cmd); | ||
378 | |||
379 | dev_dbg(kdev->dev, "started acc channel %s, result %s\n", | ||
380 | acc->name, knav_acc_result_str(result)); | ||
381 | |||
382 | return result; | ||
383 | } | ||
384 | |||
385 | static int knav_acc_init_range(struct knav_range_info *range) | ||
386 | { | ||
387 | struct knav_device *kdev = range->kdev; | ||
388 | struct knav_acc_channel *acc; | ||
389 | enum knav_acc_result result; | ||
390 | int queue; | ||
391 | |||
392 | for (queue = 0; queue < range->num_queues; queue++) { | ||
393 | acc = range->acc + queue; | ||
394 | |||
395 | knav_acc_stop(kdev, range, queue); | ||
396 | acc->list_index = 0; | ||
397 | result = knav_acc_start(kdev, range, queue); | ||
398 | |||
399 | if (result != ACC_RET_SUCCESS) | ||
400 | return -EIO; | ||
401 | |||
402 | if (range->flags & RANGE_MULTI_QUEUE) | ||
403 | return 0; | ||
404 | } | ||
405 | return 0; | ||
406 | } | ||
407 | |||
408 | static int knav_acc_init_queue(struct knav_range_info *range, | ||
409 | struct knav_queue_inst *kq) | ||
410 | { | ||
411 | unsigned id = kq->id - range->queue_base; | ||
412 | |||
413 | kq->descs = devm_kzalloc(range->kdev->dev, | ||
414 | ACC_DESCS_MAX * sizeof(u32), GFP_KERNEL); | ||
415 | if (!kq->descs) | ||
416 | return -ENOMEM; | ||
417 | |||
418 | kq->acc = range->acc; | ||
419 | if ((range->flags & RANGE_MULTI_QUEUE) == 0) | ||
420 | kq->acc += id; | ||
421 | return 0; | ||
422 | } | ||
423 | |||
424 | static int knav_acc_open_queue(struct knav_range_info *range, | ||
425 | struct knav_queue_inst *inst, unsigned flags) | ||
426 | { | ||
427 | unsigned id = inst->id - range->queue_base; | ||
428 | |||
429 | return knav_range_setup_acc_irq(range, id, true); | ||
430 | } | ||
431 | |||
432 | static int knav_acc_close_queue(struct knav_range_info *range, | ||
433 | struct knav_queue_inst *inst) | ||
434 | { | ||
435 | unsigned id = inst->id - range->queue_base; | ||
436 | |||
437 | return knav_range_setup_acc_irq(range, id, false); | ||
438 | } | ||
439 | |||
440 | static int knav_acc_free_range(struct knav_range_info *range) | ||
441 | { | ||
442 | struct knav_device *kdev = range->kdev; | ||
443 | struct knav_acc_channel *acc; | ||
444 | struct knav_acc_info *info; | ||
445 | int channel, channels; | ||
446 | |||
447 | info = &range->acc_info; | ||
448 | |||
449 | if (range->flags & RANGE_MULTI_QUEUE) | ||
450 | channels = 1; | ||
451 | else | ||
452 | channels = range->num_queues; | ||
453 | |||
454 | for (channel = 0; channel < channels; channel++) { | ||
455 | acc = range->acc + channel; | ||
456 | if (!acc->list_cpu[0]) | ||
457 | continue; | ||
458 | dma_unmap_single(kdev->dev, acc->list_dma[0], | ||
459 | info->mem_size, DMA_BIDIRECTIONAL); | ||
460 | free_pages_exact(acc->list_cpu[0], info->mem_size); | ||
461 | } | ||
462 | devm_kfree(range->kdev->dev, range->acc); | ||
463 | return 0; | ||
464 | } | ||
465 | |||
466 | struct knav_range_ops knav_acc_range_ops = { | ||
467 | .set_notify = knav_acc_set_notify, | ||
468 | .init_queue = knav_acc_init_queue, | ||
469 | .open_queue = knav_acc_open_queue, | ||
470 | .close_queue = knav_acc_close_queue, | ||
471 | .init_range = knav_acc_init_range, | ||
472 | .free_range = knav_acc_free_range, | ||
473 | }; | ||
474 | |||
475 | /** | ||
476 | * knav_init_acc_range: Initialise accumulator ranges | ||
477 | * | ||
478 | * @kdev: qmss device | ||
479 | * @node: device node | ||
480 | * @range: qmms range information | ||
481 | * | ||
482 | * Return 0 on success or error | ||
483 | */ | ||
484 | int knav_init_acc_range(struct knav_device *kdev, | ||
485 | struct device_node *node, | ||
486 | struct knav_range_info *range) | ||
487 | { | ||
488 | struct knav_acc_channel *acc; | ||
489 | struct knav_pdsp_info *pdsp; | ||
490 | struct knav_acc_info *info; | ||
491 | int ret, channel, channels; | ||
492 | int list_size, mem_size; | ||
493 | dma_addr_t list_dma; | ||
494 | void *list_mem; | ||
495 | u32 config[5]; | ||
496 | |||
497 | range->flags |= RANGE_HAS_ACCUMULATOR; | ||
498 | info = &range->acc_info; | ||
499 | |||
500 | ret = of_property_read_u32_array(node, "accumulator", config, 5); | ||
501 | if (ret) | ||
502 | return ret; | ||
503 | |||
504 | info->pdsp_id = config[0]; | ||
505 | info->start_channel = config[1]; | ||
506 | info->list_entries = config[2]; | ||
507 | info->pacing_mode = config[3]; | ||
508 | info->timer_count = config[4] / ACC_DEFAULT_PERIOD; | ||
509 | |||
510 | if (info->start_channel > ACC_MAX_CHANNEL) { | ||
511 | dev_err(kdev->dev, "channel %d invalid for range %s\n", | ||
512 | info->start_channel, range->name); | ||
513 | return -EINVAL; | ||
514 | } | ||
515 | |||
516 | if (info->pacing_mode > 3) { | ||
517 | dev_err(kdev->dev, "pacing mode %d invalid for range %s\n", | ||
518 | info->pacing_mode, range->name); | ||
519 | return -EINVAL; | ||
520 | } | ||
521 | |||
522 | pdsp = knav_find_pdsp(kdev, info->pdsp_id); | ||
523 | if (!pdsp) { | ||
524 | dev_err(kdev->dev, "pdsp id %d not found for range %s\n", | ||
525 | info->pdsp_id, range->name); | ||
526 | return -EINVAL; | ||
527 | } | ||
528 | |||
529 | info->pdsp = pdsp; | ||
530 | channels = range->num_queues; | ||
531 | if (of_get_property(node, "multi-queue", NULL)) { | ||
532 | range->flags |= RANGE_MULTI_QUEUE; | ||
533 | channels = 1; | ||
534 | if (range->queue_base & (32 - 1)) { | ||
535 | dev_err(kdev->dev, | ||
536 | "misaligned multi-queue accumulator range %s\n", | ||
537 | range->name); | ||
538 | return -EINVAL; | ||
539 | } | ||
540 | if (range->num_queues > 32) { | ||
541 | dev_err(kdev->dev, | ||
542 | "too many queues in accumulator range %s\n", | ||
543 | range->name); | ||
544 | return -EINVAL; | ||
545 | } | ||
546 | } | ||
547 | |||
548 | /* figure out list size */ | ||
549 | list_size = info->list_entries; | ||
550 | list_size *= ACC_LIST_ENTRY_WORDS * sizeof(u32); | ||
551 | info->list_size = list_size; | ||
552 | mem_size = PAGE_ALIGN(list_size * 2); | ||
553 | info->mem_size = mem_size; | ||
554 | range->acc = devm_kzalloc(kdev->dev, channels * sizeof(*range->acc), | ||
555 | GFP_KERNEL); | ||
556 | if (!range->acc) | ||
557 | return -ENOMEM; | ||
558 | |||
559 | for (channel = 0; channel < channels; channel++) { | ||
560 | acc = range->acc + channel; | ||
561 | acc->channel = info->start_channel + channel; | ||
562 | |||
563 | /* allocate memory for the two lists */ | ||
564 | list_mem = alloc_pages_exact(mem_size, GFP_KERNEL | GFP_DMA); | ||
565 | if (!list_mem) | ||
566 | return -ENOMEM; | ||
567 | |||
568 | list_dma = dma_map_single(kdev->dev, list_mem, mem_size, | ||
569 | DMA_BIDIRECTIONAL); | ||
570 | if (dma_mapping_error(kdev->dev, list_dma)) { | ||
571 | free_pages_exact(list_mem, mem_size); | ||
572 | return -ENOMEM; | ||
573 | } | ||
574 | |||
575 | memset(list_mem, 0, mem_size); | ||
576 | dma_sync_single_for_device(kdev->dev, list_dma, mem_size, | ||
577 | DMA_TO_DEVICE); | ||
578 | scnprintf(acc->name, sizeof(acc->name), "hwqueue-acc-%d", | ||
579 | acc->channel); | ||
580 | acc->list_cpu[0] = list_mem; | ||
581 | acc->list_cpu[1] = list_mem + list_size; | ||
582 | acc->list_dma[0] = list_dma; | ||
583 | acc->list_dma[1] = list_dma + list_size; | ||
584 | dev_dbg(kdev->dev, "%s: channel %d, phys %08x, virt %8p\n", | ||
585 | acc->name, acc->channel, list_dma, list_mem); | ||
586 | } | ||
587 | |||
588 | range->ops = &knav_acc_range_ops; | ||
589 | return 0; | ||
590 | } | ||
591 | EXPORT_SYMBOL_GPL(knav_init_acc_range); | ||
diff --git a/drivers/soc/ti/knav_qmss_queue.c b/drivers/soc/ti/knav_qmss_queue.c new file mode 100644 index 000000000000..0a2c8634c48b --- /dev/null +++ b/drivers/soc/ti/knav_qmss_queue.c | |||
@@ -0,0 +1,1816 @@ | |||
1 | /* | ||
2 | * Keystone Queue Manager subsystem driver | ||
3 | * | ||
4 | * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com | ||
5 | * Authors: Sandeep Nair <sandeep_n@ti.com> | ||
6 | * Cyril Chemparathy <cyril@ti.com> | ||
7 | * Santosh Shilimkar <santosh.shilimkar@ti.com> | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or | ||
10 | * modify it under the terms of the GNU General Public License | ||
11 | * version 2 as published by the Free Software Foundation. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, but | ||
14 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
16 | * General Public License for more details. | ||
17 | */ | ||
18 | |||
19 | #include <linux/kernel.h> | ||
20 | #include <linux/module.h> | ||
21 | #include <linux/device.h> | ||
22 | #include <linux/clk.h> | ||
23 | #include <linux/io.h> | ||
24 | #include <linux/interrupt.h> | ||
25 | #include <linux/bitops.h> | ||
26 | #include <linux/slab.h> | ||
27 | #include <linux/spinlock.h> | ||
28 | #include <linux/platform_device.h> | ||
29 | #include <linux/dma-mapping.h> | ||
30 | #include <linux/of.h> | ||
31 | #include <linux/of_irq.h> | ||
32 | #include <linux/of_device.h> | ||
33 | #include <linux/of_address.h> | ||
34 | #include <linux/pm_runtime.h> | ||
35 | #include <linux/firmware.h> | ||
36 | #include <linux/debugfs.h> | ||
37 | #include <linux/seq_file.h> | ||
38 | #include <linux/string.h> | ||
39 | #include <linux/soc/ti/knav_qmss.h> | ||
40 | |||
41 | #include "knav_qmss.h" | ||
42 | |||
43 | static struct knav_device *kdev; | ||
44 | static DEFINE_MUTEX(knav_dev_lock); | ||
45 | |||
46 | /* Queue manager register indices in DTS */ | ||
47 | #define KNAV_QUEUE_PEEK_REG_INDEX 0 | ||
48 | #define KNAV_QUEUE_STATUS_REG_INDEX 1 | ||
49 | #define KNAV_QUEUE_CONFIG_REG_INDEX 2 | ||
50 | #define KNAV_QUEUE_REGION_REG_INDEX 3 | ||
51 | #define KNAV_QUEUE_PUSH_REG_INDEX 4 | ||
52 | #define KNAV_QUEUE_POP_REG_INDEX 5 | ||
53 | |||
54 | /* PDSP register indices in DTS */ | ||
55 | #define KNAV_QUEUE_PDSP_IRAM_REG_INDEX 0 | ||
56 | #define KNAV_QUEUE_PDSP_REGS_REG_INDEX 1 | ||
57 | #define KNAV_QUEUE_PDSP_INTD_REG_INDEX 2 | ||
58 | #define KNAV_QUEUE_PDSP_CMD_REG_INDEX 3 | ||
59 | |||
60 | #define knav_queue_idx_to_inst(kdev, idx) \ | ||
61 | (kdev->instances + (idx << kdev->inst_shift)) | ||
62 | |||
63 | #define for_each_handle_rcu(qh, inst) \ | ||
64 | list_for_each_entry_rcu(qh, &inst->handles, list) | ||
65 | |||
66 | #define for_each_instance(idx, inst, kdev) \ | ||
67 | for (idx = 0, inst = kdev->instances; \ | ||
68 | idx < (kdev)->num_queues_in_use; \ | ||
69 | idx++, inst = knav_queue_idx_to_inst(kdev, idx)) | ||
70 | |||
71 | /** | ||
72 | * knav_queue_notify: qmss queue notfier call | ||
73 | * | ||
74 | * @inst: qmss queue instance like accumulator | ||
75 | */ | ||
76 | void knav_queue_notify(struct knav_queue_inst *inst) | ||
77 | { | ||
78 | struct knav_queue *qh; | ||
79 | |||
80 | if (!inst) | ||
81 | return; | ||
82 | |||
83 | rcu_read_lock(); | ||
84 | for_each_handle_rcu(qh, inst) { | ||
85 | if (atomic_read(&qh->notifier_enabled) <= 0) | ||
86 | continue; | ||
87 | if (WARN_ON(!qh->notifier_fn)) | ||
88 | continue; | ||
89 | atomic_inc(&qh->stats.notifies); | ||
90 | qh->notifier_fn(qh->notifier_fn_arg); | ||
91 | } | ||
92 | rcu_read_unlock(); | ||
93 | } | ||
94 | EXPORT_SYMBOL_GPL(knav_queue_notify); | ||
95 | |||
96 | static irqreturn_t knav_queue_int_handler(int irq, void *_instdata) | ||
97 | { | ||
98 | struct knav_queue_inst *inst = _instdata; | ||
99 | |||
100 | knav_queue_notify(inst); | ||
101 | return IRQ_HANDLED; | ||
102 | } | ||
103 | |||
104 | static int knav_queue_setup_irq(struct knav_range_info *range, | ||
105 | struct knav_queue_inst *inst) | ||
106 | { | ||
107 | unsigned queue = inst->id - range->queue_base; | ||
108 | unsigned long cpu_map; | ||
109 | int ret = 0, irq; | ||
110 | |||
111 | if (range->flags & RANGE_HAS_IRQ) { | ||
112 | irq = range->irqs[queue].irq; | ||
113 | cpu_map = range->irqs[queue].cpu_map; | ||
114 | ret = request_irq(irq, knav_queue_int_handler, 0, | ||
115 | inst->irq_name, inst); | ||
116 | if (ret) | ||
117 | return ret; | ||
118 | disable_irq(irq); | ||
119 | if (cpu_map) { | ||
120 | ret = irq_set_affinity_hint(irq, to_cpumask(&cpu_map)); | ||
121 | if (ret) { | ||
122 | dev_warn(range->kdev->dev, | ||
123 | "Failed to set IRQ affinity\n"); | ||
124 | return ret; | ||
125 | } | ||
126 | } | ||
127 | } | ||
128 | return ret; | ||
129 | } | ||
130 | |||
131 | static void knav_queue_free_irq(struct knav_queue_inst *inst) | ||
132 | { | ||
133 | struct knav_range_info *range = inst->range; | ||
134 | unsigned queue = inst->id - inst->range->queue_base; | ||
135 | int irq; | ||
136 | |||
137 | if (range->flags & RANGE_HAS_IRQ) { | ||
138 | irq = range->irqs[queue].irq; | ||
139 | irq_set_affinity_hint(irq, NULL); | ||
140 | free_irq(irq, inst); | ||
141 | } | ||
142 | } | ||
143 | |||
144 | static inline bool knav_queue_is_busy(struct knav_queue_inst *inst) | ||
145 | { | ||
146 | return !list_empty(&inst->handles); | ||
147 | } | ||
148 | |||
149 | static inline bool knav_queue_is_reserved(struct knav_queue_inst *inst) | ||
150 | { | ||
151 | return inst->range->flags & RANGE_RESERVED; | ||
152 | } | ||
153 | |||
154 | static inline bool knav_queue_is_shared(struct knav_queue_inst *inst) | ||
155 | { | ||
156 | struct knav_queue *tmp; | ||
157 | |||
158 | rcu_read_lock(); | ||
159 | for_each_handle_rcu(tmp, inst) { | ||
160 | if (tmp->flags & KNAV_QUEUE_SHARED) { | ||
161 | rcu_read_unlock(); | ||
162 | return true; | ||
163 | } | ||
164 | } | ||
165 | rcu_read_unlock(); | ||
166 | return false; | ||
167 | } | ||
168 | |||
169 | static inline bool knav_queue_match_type(struct knav_queue_inst *inst, | ||
170 | unsigned type) | ||
171 | { | ||
172 | if ((type == KNAV_QUEUE_QPEND) && | ||
173 | (inst->range->flags & RANGE_HAS_IRQ)) { | ||
174 | return true; | ||
175 | } else if ((type == KNAV_QUEUE_ACC) && | ||
176 | (inst->range->flags & RANGE_HAS_ACCUMULATOR)) { | ||
177 | return true; | ||
178 | } else if ((type == KNAV_QUEUE_GP) && | ||
179 | !(inst->range->flags & | ||
180 | (RANGE_HAS_ACCUMULATOR | RANGE_HAS_IRQ))) { | ||
181 | return true; | ||
182 | } | ||
183 | return false; | ||
184 | } | ||
185 | |||
186 | static inline struct knav_queue_inst * | ||
187 | knav_queue_match_id_to_inst(struct knav_device *kdev, unsigned id) | ||
188 | { | ||
189 | struct knav_queue_inst *inst; | ||
190 | int idx; | ||
191 | |||
192 | for_each_instance(idx, inst, kdev) { | ||
193 | if (inst->id == id) | ||
194 | return inst; | ||
195 | } | ||
196 | return NULL; | ||
197 | } | ||
198 | |||
199 | static inline struct knav_queue_inst *knav_queue_find_by_id(int id) | ||
200 | { | ||
201 | if (kdev->base_id <= id && | ||
202 | kdev->base_id + kdev->num_queues > id) { | ||
203 | id -= kdev->base_id; | ||
204 | return knav_queue_match_id_to_inst(kdev, id); | ||
205 | } | ||
206 | return NULL; | ||
207 | } | ||
208 | |||
209 | static struct knav_queue *__knav_queue_open(struct knav_queue_inst *inst, | ||
210 | const char *name, unsigned flags) | ||
211 | { | ||
212 | struct knav_queue *qh; | ||
213 | unsigned id; | ||
214 | int ret = 0; | ||
215 | |||
216 | qh = devm_kzalloc(inst->kdev->dev, sizeof(*qh), GFP_KERNEL); | ||
217 | if (!qh) | ||
218 | return ERR_PTR(-ENOMEM); | ||
219 | |||
220 | qh->flags = flags; | ||
221 | qh->inst = inst; | ||
222 | id = inst->id - inst->qmgr->start_queue; | ||
223 | qh->reg_push = &inst->qmgr->reg_push[id]; | ||
224 | qh->reg_pop = &inst->qmgr->reg_pop[id]; | ||
225 | qh->reg_peek = &inst->qmgr->reg_peek[id]; | ||
226 | |||
227 | /* first opener? */ | ||
228 | if (!knav_queue_is_busy(inst)) { | ||
229 | struct knav_range_info *range = inst->range; | ||
230 | |||
231 | inst->name = kstrndup(name, KNAV_NAME_SIZE, GFP_KERNEL); | ||
232 | if (range->ops && range->ops->open_queue) | ||
233 | ret = range->ops->open_queue(range, inst, flags); | ||
234 | |||
235 | if (ret) { | ||
236 | devm_kfree(inst->kdev->dev, qh); | ||
237 | return ERR_PTR(ret); | ||
238 | } | ||
239 | } | ||
240 | list_add_tail_rcu(&qh->list, &inst->handles); | ||
241 | return qh; | ||
242 | } | ||
243 | |||
244 | static struct knav_queue * | ||
245 | knav_queue_open_by_id(const char *name, unsigned id, unsigned flags) | ||
246 | { | ||
247 | struct knav_queue_inst *inst; | ||
248 | struct knav_queue *qh; | ||
249 | |||
250 | mutex_lock(&knav_dev_lock); | ||
251 | |||
252 | qh = ERR_PTR(-ENODEV); | ||
253 | inst = knav_queue_find_by_id(id); | ||
254 | if (!inst) | ||
255 | goto unlock_ret; | ||
256 | |||
257 | qh = ERR_PTR(-EEXIST); | ||
258 | if (!(flags & KNAV_QUEUE_SHARED) && knav_queue_is_busy(inst)) | ||
259 | goto unlock_ret; | ||
260 | |||
261 | qh = ERR_PTR(-EBUSY); | ||
262 | if ((flags & KNAV_QUEUE_SHARED) && | ||
263 | (knav_queue_is_busy(inst) && !knav_queue_is_shared(inst))) | ||
264 | goto unlock_ret; | ||
265 | |||
266 | qh = __knav_queue_open(inst, name, flags); | ||
267 | |||
268 | unlock_ret: | ||
269 | mutex_unlock(&knav_dev_lock); | ||
270 | |||
271 | return qh; | ||
272 | } | ||
273 | |||
274 | static struct knav_queue *knav_queue_open_by_type(const char *name, | ||
275 | unsigned type, unsigned flags) | ||
276 | { | ||
277 | struct knav_queue_inst *inst; | ||
278 | struct knav_queue *qh = ERR_PTR(-EINVAL); | ||
279 | int idx; | ||
280 | |||
281 | mutex_lock(&knav_dev_lock); | ||
282 | |||
283 | for_each_instance(idx, inst, kdev) { | ||
284 | if (knav_queue_is_reserved(inst)) | ||
285 | continue; | ||
286 | if (!knav_queue_match_type(inst, type)) | ||
287 | continue; | ||
288 | if (knav_queue_is_busy(inst)) | ||
289 | continue; | ||
290 | qh = __knav_queue_open(inst, name, flags); | ||
291 | goto unlock_ret; | ||
292 | } | ||
293 | |||
294 | unlock_ret: | ||
295 | mutex_unlock(&knav_dev_lock); | ||
296 | return qh; | ||
297 | } | ||
298 | |||
299 | static void knav_queue_set_notify(struct knav_queue_inst *inst, bool enabled) | ||
300 | { | ||
301 | struct knav_range_info *range = inst->range; | ||
302 | |||
303 | if (range->ops && range->ops->set_notify) | ||
304 | range->ops->set_notify(range, inst, enabled); | ||
305 | } | ||
306 | |||
307 | static int knav_queue_enable_notifier(struct knav_queue *qh) | ||
308 | { | ||
309 | struct knav_queue_inst *inst = qh->inst; | ||
310 | bool first; | ||
311 | |||
312 | if (WARN_ON(!qh->notifier_fn)) | ||
313 | return -EINVAL; | ||
314 | |||
315 | /* Adjust the per handle notifier count */ | ||
316 | first = (atomic_inc_return(&qh->notifier_enabled) == 1); | ||
317 | if (!first) | ||
318 | return 0; /* nothing to do */ | ||
319 | |||
320 | /* Now adjust the per instance notifier count */ | ||
321 | first = (atomic_inc_return(&inst->num_notifiers) == 1); | ||
322 | if (first) | ||
323 | knav_queue_set_notify(inst, true); | ||
324 | |||
325 | return 0; | ||
326 | } | ||
327 | |||
328 | static int knav_queue_disable_notifier(struct knav_queue *qh) | ||
329 | { | ||
330 | struct knav_queue_inst *inst = qh->inst; | ||
331 | bool last; | ||
332 | |||
333 | last = (atomic_dec_return(&qh->notifier_enabled) == 0); | ||
334 | if (!last) | ||
335 | return 0; /* nothing to do */ | ||
336 | |||
337 | last = (atomic_dec_return(&inst->num_notifiers) == 0); | ||
338 | if (last) | ||
339 | knav_queue_set_notify(inst, false); | ||
340 | |||
341 | return 0; | ||
342 | } | ||
343 | |||
344 | static int knav_queue_set_notifier(struct knav_queue *qh, | ||
345 | struct knav_queue_notify_config *cfg) | ||
346 | { | ||
347 | knav_queue_notify_fn old_fn = qh->notifier_fn; | ||
348 | |||
349 | if (!cfg) | ||
350 | return -EINVAL; | ||
351 | |||
352 | if (!(qh->inst->range->flags & (RANGE_HAS_ACCUMULATOR | RANGE_HAS_IRQ))) | ||
353 | return -ENOTSUPP; | ||
354 | |||
355 | if (!cfg->fn && old_fn) | ||
356 | knav_queue_disable_notifier(qh); | ||
357 | |||
358 | qh->notifier_fn = cfg->fn; | ||
359 | qh->notifier_fn_arg = cfg->fn_arg; | ||
360 | |||
361 | if (cfg->fn && !old_fn) | ||
362 | knav_queue_enable_notifier(qh); | ||
363 | |||
364 | return 0; | ||
365 | } | ||
366 | |||
367 | static int knav_gp_set_notify(struct knav_range_info *range, | ||
368 | struct knav_queue_inst *inst, | ||
369 | bool enabled) | ||
370 | { | ||
371 | unsigned queue; | ||
372 | |||
373 | if (range->flags & RANGE_HAS_IRQ) { | ||
374 | queue = inst->id - range->queue_base; | ||
375 | if (enabled) | ||
376 | enable_irq(range->irqs[queue].irq); | ||
377 | else | ||
378 | disable_irq_nosync(range->irqs[queue].irq); | ||
379 | } | ||
380 | return 0; | ||
381 | } | ||
382 | |||
383 | static int knav_gp_open_queue(struct knav_range_info *range, | ||
384 | struct knav_queue_inst *inst, unsigned flags) | ||
385 | { | ||
386 | return knav_queue_setup_irq(range, inst); | ||
387 | } | ||
388 | |||
389 | static int knav_gp_close_queue(struct knav_range_info *range, | ||
390 | struct knav_queue_inst *inst) | ||
391 | { | ||
392 | knav_queue_free_irq(inst); | ||
393 | return 0; | ||
394 | } | ||
395 | |||
396 | struct knav_range_ops knav_gp_range_ops = { | ||
397 | .set_notify = knav_gp_set_notify, | ||
398 | .open_queue = knav_gp_open_queue, | ||
399 | .close_queue = knav_gp_close_queue, | ||
400 | }; | ||
401 | |||
402 | |||
403 | static int knav_queue_get_count(void *qhandle) | ||
404 | { | ||
405 | struct knav_queue *qh = qhandle; | ||
406 | struct knav_queue_inst *inst = qh->inst; | ||
407 | |||
408 | return readl_relaxed(&qh->reg_peek[0].entry_count) + | ||
409 | atomic_read(&inst->desc_count); | ||
410 | } | ||
411 | |||
412 | static void knav_queue_debug_show_instance(struct seq_file *s, | ||
413 | struct knav_queue_inst *inst) | ||
414 | { | ||
415 | struct knav_device *kdev = inst->kdev; | ||
416 | struct knav_queue *qh; | ||
417 | |||
418 | if (!knav_queue_is_busy(inst)) | ||
419 | return; | ||
420 | |||
421 | seq_printf(s, "\tqueue id %d (%s)\n", | ||
422 | kdev->base_id + inst->id, inst->name); | ||
423 | for_each_handle_rcu(qh, inst) { | ||
424 | seq_printf(s, "\t\thandle %p: ", qh); | ||
425 | seq_printf(s, "pushes %8d, ", | ||
426 | atomic_read(&qh->stats.pushes)); | ||
427 | seq_printf(s, "pops %8d, ", | ||
428 | atomic_read(&qh->stats.pops)); | ||
429 | seq_printf(s, "count %8d, ", | ||
430 | knav_queue_get_count(qh)); | ||
431 | seq_printf(s, "notifies %8d, ", | ||
432 | atomic_read(&qh->stats.notifies)); | ||
433 | seq_printf(s, "push errors %8d, ", | ||
434 | atomic_read(&qh->stats.push_errors)); | ||
435 | seq_printf(s, "pop errors %8d\n", | ||
436 | atomic_read(&qh->stats.pop_errors)); | ||
437 | } | ||
438 | } | ||
439 | |||
440 | static int knav_queue_debug_show(struct seq_file *s, void *v) | ||
441 | { | ||
442 | struct knav_queue_inst *inst; | ||
443 | int idx; | ||
444 | |||
445 | mutex_lock(&knav_dev_lock); | ||
446 | seq_printf(s, "%s: %u-%u\n", | ||
447 | dev_name(kdev->dev), kdev->base_id, | ||
448 | kdev->base_id + kdev->num_queues - 1); | ||
449 | for_each_instance(idx, inst, kdev) | ||
450 | knav_queue_debug_show_instance(s, inst); | ||
451 | mutex_unlock(&knav_dev_lock); | ||
452 | |||
453 | return 0; | ||
454 | } | ||
455 | |||
456 | static int knav_queue_debug_open(struct inode *inode, struct file *file) | ||
457 | { | ||
458 | return single_open(file, knav_queue_debug_show, NULL); | ||
459 | } | ||
460 | |||
461 | static const struct file_operations knav_queue_debug_ops = { | ||
462 | .open = knav_queue_debug_open, | ||
463 | .read = seq_read, | ||
464 | .llseek = seq_lseek, | ||
465 | .release = single_release, | ||
466 | }; | ||
467 | |||
468 | static inline int knav_queue_pdsp_wait(u32 * __iomem addr, unsigned timeout, | ||
469 | u32 flags) | ||
470 | { | ||
471 | unsigned long end; | ||
472 | u32 val = 0; | ||
473 | |||
474 | end = jiffies + msecs_to_jiffies(timeout); | ||
475 | while (time_after(end, jiffies)) { | ||
476 | val = readl_relaxed(addr); | ||
477 | if (flags) | ||
478 | val &= flags; | ||
479 | if (!val) | ||
480 | break; | ||
481 | cpu_relax(); | ||
482 | } | ||
483 | return val ? -ETIMEDOUT : 0; | ||
484 | } | ||
485 | |||
486 | |||
487 | static int knav_queue_flush(struct knav_queue *qh) | ||
488 | { | ||
489 | struct knav_queue_inst *inst = qh->inst; | ||
490 | unsigned id = inst->id - inst->qmgr->start_queue; | ||
491 | |||
492 | atomic_set(&inst->desc_count, 0); | ||
493 | writel_relaxed(0, &inst->qmgr->reg_push[id].ptr_size_thresh); | ||
494 | return 0; | ||
495 | } | ||
496 | |||
497 | /** | ||
498 | * knav_queue_open() - open a hardware queue | ||
499 | * @name - name to give the queue handle | ||
500 | * @id - desired queue number if any or specifes the type | ||
501 | * of queue | ||
502 | * @flags - the following flags are applicable to queues: | ||
503 | * KNAV_QUEUE_SHARED - allow the queue to be shared. Queues are | ||
504 | * exclusive by default. | ||
505 | * Subsequent attempts to open a shared queue should | ||
506 | * also have this flag. | ||
507 | * | ||
508 | * Returns a handle to the open hardware queue if successful. Use IS_ERR() | ||
509 | * to check the returned value for error codes. | ||
510 | */ | ||
511 | void *knav_queue_open(const char *name, unsigned id, | ||
512 | unsigned flags) | ||
513 | { | ||
514 | struct knav_queue *qh = ERR_PTR(-EINVAL); | ||
515 | |||
516 | switch (id) { | ||
517 | case KNAV_QUEUE_QPEND: | ||
518 | case KNAV_QUEUE_ACC: | ||
519 | case KNAV_QUEUE_GP: | ||
520 | qh = knav_queue_open_by_type(name, id, flags); | ||
521 | break; | ||
522 | |||
523 | default: | ||
524 | qh = knav_queue_open_by_id(name, id, flags); | ||
525 | break; | ||
526 | } | ||
527 | return qh; | ||
528 | } | ||
529 | EXPORT_SYMBOL_GPL(knav_queue_open); | ||
530 | |||
531 | /** | ||
532 | * knav_queue_close() - close a hardware queue handle | ||
533 | * @qh - handle to close | ||
534 | */ | ||
535 | void knav_queue_close(void *qhandle) | ||
536 | { | ||
537 | struct knav_queue *qh = qhandle; | ||
538 | struct knav_queue_inst *inst = qh->inst; | ||
539 | |||
540 | while (atomic_read(&qh->notifier_enabled) > 0) | ||
541 | knav_queue_disable_notifier(qh); | ||
542 | |||
543 | mutex_lock(&knav_dev_lock); | ||
544 | list_del_rcu(&qh->list); | ||
545 | mutex_unlock(&knav_dev_lock); | ||
546 | synchronize_rcu(); | ||
547 | if (!knav_queue_is_busy(inst)) { | ||
548 | struct knav_range_info *range = inst->range; | ||
549 | |||
550 | if (range->ops && range->ops->close_queue) | ||
551 | range->ops->close_queue(range, inst); | ||
552 | } | ||
553 | devm_kfree(inst->kdev->dev, qh); | ||
554 | } | ||
555 | EXPORT_SYMBOL_GPL(knav_queue_close); | ||
556 | |||
557 | /** | ||
558 | * knav_queue_device_control() - Perform control operations on a queue | ||
559 | * @qh - queue handle | ||
560 | * @cmd - control commands | ||
561 | * @arg - command argument | ||
562 | * | ||
563 | * Returns 0 on success, errno otherwise. | ||
564 | */ | ||
565 | int knav_queue_device_control(void *qhandle, enum knav_queue_ctrl_cmd cmd, | ||
566 | unsigned long arg) | ||
567 | { | ||
568 | struct knav_queue *qh = qhandle; | ||
569 | struct knav_queue_notify_config *cfg; | ||
570 | int ret; | ||
571 | |||
572 | switch ((int)cmd) { | ||
573 | case KNAV_QUEUE_GET_ID: | ||
574 | ret = qh->inst->kdev->base_id + qh->inst->id; | ||
575 | break; | ||
576 | |||
577 | case KNAV_QUEUE_FLUSH: | ||
578 | ret = knav_queue_flush(qh); | ||
579 | break; | ||
580 | |||
581 | case KNAV_QUEUE_SET_NOTIFIER: | ||
582 | cfg = (void *)arg; | ||
583 | ret = knav_queue_set_notifier(qh, cfg); | ||
584 | break; | ||
585 | |||
586 | case KNAV_QUEUE_ENABLE_NOTIFY: | ||
587 | ret = knav_queue_enable_notifier(qh); | ||
588 | break; | ||
589 | |||
590 | case KNAV_QUEUE_DISABLE_NOTIFY: | ||
591 | ret = knav_queue_disable_notifier(qh); | ||
592 | break; | ||
593 | |||
594 | case KNAV_QUEUE_GET_COUNT: | ||
595 | ret = knav_queue_get_count(qh); | ||
596 | break; | ||
597 | |||
598 | default: | ||
599 | ret = -ENOTSUPP; | ||
600 | break; | ||
601 | } | ||
602 | return ret; | ||
603 | } | ||
604 | EXPORT_SYMBOL_GPL(knav_queue_device_control); | ||
605 | |||
606 | |||
607 | |||
608 | /** | ||
609 | * knav_queue_push() - push data (or descriptor) to the tail of a queue | ||
610 | * @qh - hardware queue handle | ||
611 | * @data - data to push | ||
612 | * @size - size of data to push | ||
613 | * @flags - can be used to pass additional information | ||
614 | * | ||
615 | * Returns 0 on success, errno otherwise. | ||
616 | */ | ||
617 | int knav_queue_push(void *qhandle, dma_addr_t dma, | ||
618 | unsigned size, unsigned flags) | ||
619 | { | ||
620 | struct knav_queue *qh = qhandle; | ||
621 | u32 val; | ||
622 | |||
623 | val = (u32)dma | ((size / 16) - 1); | ||
624 | writel_relaxed(val, &qh->reg_push[0].ptr_size_thresh); | ||
625 | |||
626 | atomic_inc(&qh->stats.pushes); | ||
627 | return 0; | ||
628 | } | ||
629 | |||
630 | /** | ||
631 | * knav_queue_pop() - pop data (or descriptor) from the head of a queue | ||
632 | * @qh - hardware queue handle | ||
633 | * @size - (optional) size of the data pop'ed. | ||
634 | * | ||
635 | * Returns a DMA address on success, 0 on failure. | ||
636 | */ | ||
637 | dma_addr_t knav_queue_pop(void *qhandle, unsigned *size) | ||
638 | { | ||
639 | struct knav_queue *qh = qhandle; | ||
640 | struct knav_queue_inst *inst = qh->inst; | ||
641 | dma_addr_t dma; | ||
642 | u32 val, idx; | ||
643 | |||
644 | /* are we accumulated? */ | ||
645 | if (inst->descs) { | ||
646 | if (unlikely(atomic_dec_return(&inst->desc_count) < 0)) { | ||
647 | atomic_inc(&inst->desc_count); | ||
648 | return 0; | ||
649 | } | ||
650 | idx = atomic_inc_return(&inst->desc_head); | ||
651 | idx &= ACC_DESCS_MASK; | ||
652 | val = inst->descs[idx]; | ||
653 | } else { | ||
654 | val = readl_relaxed(&qh->reg_pop[0].ptr_size_thresh); | ||
655 | if (unlikely(!val)) | ||
656 | return 0; | ||
657 | } | ||
658 | |||
659 | dma = val & DESC_PTR_MASK; | ||
660 | if (size) | ||
661 | *size = ((val & DESC_SIZE_MASK) + 1) * 16; | ||
662 | |||
663 | atomic_inc(&qh->stats.pops); | ||
664 | return dma; | ||
665 | } | ||
666 | |||
667 | /* carve out descriptors and push into queue */ | ||
668 | static void kdesc_fill_pool(struct knav_pool *pool) | ||
669 | { | ||
670 | struct knav_region *region; | ||
671 | int i; | ||
672 | |||
673 | region = pool->region; | ||
674 | pool->desc_size = region->desc_size; | ||
675 | for (i = 0; i < pool->num_desc; i++) { | ||
676 | int index = pool->region_offset + i; | ||
677 | dma_addr_t dma_addr; | ||
678 | unsigned dma_size; | ||
679 | dma_addr = region->dma_start + (region->desc_size * index); | ||
680 | dma_size = ALIGN(pool->desc_size, SMP_CACHE_BYTES); | ||
681 | dma_sync_single_for_device(pool->dev, dma_addr, dma_size, | ||
682 | DMA_TO_DEVICE); | ||
683 | knav_queue_push(pool->queue, dma_addr, dma_size, 0); | ||
684 | } | ||
685 | } | ||
686 | |||
687 | /* pop out descriptors and close the queue */ | ||
688 | static void kdesc_empty_pool(struct knav_pool *pool) | ||
689 | { | ||
690 | dma_addr_t dma; | ||
691 | unsigned size; | ||
692 | void *desc; | ||
693 | int i; | ||
694 | |||
695 | if (!pool->queue) | ||
696 | return; | ||
697 | |||
698 | for (i = 0;; i++) { | ||
699 | dma = knav_queue_pop(pool->queue, &size); | ||
700 | if (!dma) | ||
701 | break; | ||
702 | desc = knav_pool_desc_dma_to_virt(pool, dma); | ||
703 | if (!desc) { | ||
704 | dev_dbg(pool->kdev->dev, | ||
705 | "couldn't unmap desc, continuing\n"); | ||
706 | continue; | ||
707 | } | ||
708 | } | ||
709 | WARN_ON(i != pool->num_desc); | ||
710 | knav_queue_close(pool->queue); | ||
711 | } | ||
712 | |||
713 | |||
714 | /* Get the DMA address of a descriptor */ | ||
715 | dma_addr_t knav_pool_desc_virt_to_dma(void *ph, void *virt) | ||
716 | { | ||
717 | struct knav_pool *pool = ph; | ||
718 | return pool->region->dma_start + (virt - pool->region->virt_start); | ||
719 | } | ||
720 | |||
721 | void *knav_pool_desc_dma_to_virt(void *ph, dma_addr_t dma) | ||
722 | { | ||
723 | struct knav_pool *pool = ph; | ||
724 | return pool->region->virt_start + (dma - pool->region->dma_start); | ||
725 | } | ||
726 | |||
727 | /** | ||
728 | * knav_pool_create() - Create a pool of descriptors | ||
729 | * @name - name to give the pool handle | ||
730 | * @num_desc - numbers of descriptors in the pool | ||
731 | * @region_id - QMSS region id from which the descriptors are to be | ||
732 | * allocated. | ||
733 | * | ||
734 | * Returns a pool handle on success. | ||
735 | * Use IS_ERR_OR_NULL() to identify error values on return. | ||
736 | */ | ||
737 | void *knav_pool_create(const char *name, | ||
738 | int num_desc, int region_id) | ||
739 | { | ||
740 | struct knav_region *reg_itr, *region = NULL; | ||
741 | struct knav_pool *pool, *pi; | ||
742 | struct list_head *node; | ||
743 | unsigned last_offset; | ||
744 | bool slot_found; | ||
745 | int ret; | ||
746 | |||
747 | if (!kdev->dev) | ||
748 | return ERR_PTR(-ENODEV); | ||
749 | |||
750 | pool = devm_kzalloc(kdev->dev, sizeof(*pool), GFP_KERNEL); | ||
751 | if (!pool) { | ||
752 | dev_err(kdev->dev, "out of memory allocating pool\n"); | ||
753 | return ERR_PTR(-ENOMEM); | ||
754 | } | ||
755 | |||
756 | for_each_region(kdev, reg_itr) { | ||
757 | if (reg_itr->id != region_id) | ||
758 | continue; | ||
759 | region = reg_itr; | ||
760 | break; | ||
761 | } | ||
762 | |||
763 | if (!region) { | ||
764 | dev_err(kdev->dev, "region-id(%d) not found\n", region_id); | ||
765 | ret = -EINVAL; | ||
766 | goto err; | ||
767 | } | ||
768 | |||
769 | pool->queue = knav_queue_open(name, KNAV_QUEUE_GP, 0); | ||
770 | if (IS_ERR_OR_NULL(pool->queue)) { | ||
771 | dev_err(kdev->dev, | ||
772 | "failed to open queue for pool(%s), error %ld\n", | ||
773 | name, PTR_ERR(pool->queue)); | ||
774 | ret = PTR_ERR(pool->queue); | ||
775 | goto err; | ||
776 | } | ||
777 | |||
778 | pool->name = kstrndup(name, KNAV_NAME_SIZE, GFP_KERNEL); | ||
779 | pool->kdev = kdev; | ||
780 | pool->dev = kdev->dev; | ||
781 | |||
782 | mutex_lock(&knav_dev_lock); | ||
783 | |||
784 | if (num_desc > (region->num_desc - region->used_desc)) { | ||
785 | dev_err(kdev->dev, "out of descs in region(%d) for pool(%s)\n", | ||
786 | region_id, name); | ||
787 | ret = -ENOMEM; | ||
788 | goto err; | ||
789 | } | ||
790 | |||
791 | /* Region maintains a sorted (by region offset) list of pools | ||
792 | * use the first free slot which is large enough to accomodate | ||
793 | * the request | ||
794 | */ | ||
795 | last_offset = 0; | ||
796 | slot_found = false; | ||
797 | node = ®ion->pools; | ||
798 | list_for_each_entry(pi, ®ion->pools, region_inst) { | ||
799 | if ((pi->region_offset - last_offset) >= num_desc) { | ||
800 | slot_found = true; | ||
801 | break; | ||
802 | } | ||
803 | last_offset = pi->region_offset + pi->num_desc; | ||
804 | } | ||
805 | node = &pi->region_inst; | ||
806 | |||
807 | if (slot_found) { | ||
808 | pool->region = region; | ||
809 | pool->num_desc = num_desc; | ||
810 | pool->region_offset = last_offset; | ||
811 | region->used_desc += num_desc; | ||
812 | list_add_tail(&pool->list, &kdev->pools); | ||
813 | list_add_tail(&pool->region_inst, node); | ||
814 | } else { | ||
815 | dev_err(kdev->dev, "pool(%s) create failed: fragmented desc pool in region(%d)\n", | ||
816 | name, region_id); | ||
817 | ret = -ENOMEM; | ||
818 | goto err; | ||
819 | } | ||
820 | |||
821 | mutex_unlock(&knav_dev_lock); | ||
822 | kdesc_fill_pool(pool); | ||
823 | return pool; | ||
824 | |||
825 | err: | ||
826 | mutex_unlock(&knav_dev_lock); | ||
827 | kfree(pool->name); | ||
828 | devm_kfree(kdev->dev, pool); | ||
829 | return ERR_PTR(ret); | ||
830 | } | ||
831 | EXPORT_SYMBOL_GPL(knav_pool_create); | ||
832 | |||
833 | /** | ||
834 | * knav_pool_destroy() - Free a pool of descriptors | ||
835 | * @pool - pool handle | ||
836 | */ | ||
837 | void knav_pool_destroy(void *ph) | ||
838 | { | ||
839 | struct knav_pool *pool = ph; | ||
840 | |||
841 | if (!pool) | ||
842 | return; | ||
843 | |||
844 | if (!pool->region) | ||
845 | return; | ||
846 | |||
847 | kdesc_empty_pool(pool); | ||
848 | mutex_lock(&knav_dev_lock); | ||
849 | |||
850 | pool->region->used_desc -= pool->num_desc; | ||
851 | list_del(&pool->region_inst); | ||
852 | list_del(&pool->list); | ||
853 | |||
854 | mutex_unlock(&knav_dev_lock); | ||
855 | kfree(pool->name); | ||
856 | devm_kfree(kdev->dev, pool); | ||
857 | } | ||
858 | EXPORT_SYMBOL_GPL(knav_pool_destroy); | ||
859 | |||
860 | |||
861 | /** | ||
862 | * knav_pool_desc_get() - Get a descriptor from the pool | ||
863 | * @pool - pool handle | ||
864 | * | ||
865 | * Returns descriptor from the pool. | ||
866 | */ | ||
867 | void *knav_pool_desc_get(void *ph) | ||
868 | { | ||
869 | struct knav_pool *pool = ph; | ||
870 | dma_addr_t dma; | ||
871 | unsigned size; | ||
872 | void *data; | ||
873 | |||
874 | dma = knav_queue_pop(pool->queue, &size); | ||
875 | if (unlikely(!dma)) | ||
876 | return ERR_PTR(-ENOMEM); | ||
877 | data = knav_pool_desc_dma_to_virt(pool, dma); | ||
878 | return data; | ||
879 | } | ||
880 | |||
881 | /** | ||
882 | * knav_pool_desc_put() - return a descriptor to the pool | ||
883 | * @pool - pool handle | ||
884 | */ | ||
885 | void knav_pool_desc_put(void *ph, void *desc) | ||
886 | { | ||
887 | struct knav_pool *pool = ph; | ||
888 | dma_addr_t dma; | ||
889 | dma = knav_pool_desc_virt_to_dma(pool, desc); | ||
890 | knav_queue_push(pool->queue, dma, pool->region->desc_size, 0); | ||
891 | } | ||
892 | |||
893 | /** | ||
894 | * knav_pool_desc_map() - Map descriptor for DMA transfer | ||
895 | * @pool - pool handle | ||
896 | * @desc - address of descriptor to map | ||
897 | * @size - size of descriptor to map | ||
898 | * @dma - DMA address return pointer | ||
899 | * @dma_sz - adjusted return pointer | ||
900 | * | ||
901 | * Returns 0 on success, errno otherwise. | ||
902 | */ | ||
903 | int knav_pool_desc_map(void *ph, void *desc, unsigned size, | ||
904 | dma_addr_t *dma, unsigned *dma_sz) | ||
905 | { | ||
906 | struct knav_pool *pool = ph; | ||
907 | *dma = knav_pool_desc_virt_to_dma(pool, desc); | ||
908 | size = min(size, pool->region->desc_size); | ||
909 | size = ALIGN(size, SMP_CACHE_BYTES); | ||
910 | *dma_sz = size; | ||
911 | dma_sync_single_for_device(pool->dev, *dma, size, DMA_TO_DEVICE); | ||
912 | |||
913 | /* Ensure the descriptor reaches to the memory */ | ||
914 | __iowmb(); | ||
915 | |||
916 | return 0; | ||
917 | } | ||
918 | |||
919 | /** | ||
920 | * knav_pool_desc_unmap() - Unmap descriptor after DMA transfer | ||
921 | * @pool - pool handle | ||
922 | * @dma - DMA address of descriptor to unmap | ||
923 | * @dma_sz - size of descriptor to unmap | ||
924 | * | ||
925 | * Returns descriptor address on success, Use IS_ERR_OR_NULL() to identify | ||
926 | * error values on return. | ||
927 | */ | ||
928 | void *knav_pool_desc_unmap(void *ph, dma_addr_t dma, unsigned dma_sz) | ||
929 | { | ||
930 | struct knav_pool *pool = ph; | ||
931 | unsigned desc_sz; | ||
932 | void *desc; | ||
933 | |||
934 | desc_sz = min(dma_sz, pool->region->desc_size); | ||
935 | desc = knav_pool_desc_dma_to_virt(pool, dma); | ||
936 | dma_sync_single_for_cpu(pool->dev, dma, desc_sz, DMA_FROM_DEVICE); | ||
937 | prefetch(desc); | ||
938 | return desc; | ||
939 | } | ||
940 | |||
941 | /** | ||
942 | * knav_pool_count() - Get the number of descriptors in pool. | ||
943 | * @pool - pool handle | ||
944 | * Returns number of elements in the pool. | ||
945 | */ | ||
946 | int knav_pool_count(void *ph) | ||
947 | { | ||
948 | struct knav_pool *pool = ph; | ||
949 | return knav_queue_get_count(pool->queue); | ||
950 | } | ||
951 | |||
952 | static void knav_queue_setup_region(struct knav_device *kdev, | ||
953 | struct knav_region *region) | ||
954 | { | ||
955 | unsigned hw_num_desc, hw_desc_size, size; | ||
956 | struct knav_reg_region __iomem *regs; | ||
957 | struct knav_qmgr_info *qmgr; | ||
958 | struct knav_pool *pool; | ||
959 | int id = region->id; | ||
960 | struct page *page; | ||
961 | |||
962 | /* unused region? */ | ||
963 | if (!region->num_desc) { | ||
964 | dev_warn(kdev->dev, "unused region %s\n", region->name); | ||
965 | return; | ||
966 | } | ||
967 | |||
968 | /* get hardware descriptor value */ | ||
969 | hw_num_desc = ilog2(region->num_desc - 1) + 1; | ||
970 | |||
971 | /* did we force fit ourselves into nothingness? */ | ||
972 | if (region->num_desc < 32) { | ||
973 | region->num_desc = 0; | ||
974 | dev_warn(kdev->dev, "too few descriptors in region %s\n", | ||
975 | region->name); | ||
976 | return; | ||
977 | } | ||
978 | |||
979 | size = region->num_desc * region->desc_size; | ||
980 | region->virt_start = alloc_pages_exact(size, GFP_KERNEL | GFP_DMA | | ||
981 | GFP_DMA32); | ||
982 | if (!region->virt_start) { | ||
983 | region->num_desc = 0; | ||
984 | dev_err(kdev->dev, "memory alloc failed for region %s\n", | ||
985 | region->name); | ||
986 | return; | ||
987 | } | ||
988 | region->virt_end = region->virt_start + size; | ||
989 | page = virt_to_page(region->virt_start); | ||
990 | |||
991 | region->dma_start = dma_map_page(kdev->dev, page, 0, size, | ||
992 | DMA_BIDIRECTIONAL); | ||
993 | if (dma_mapping_error(kdev->dev, region->dma_start)) { | ||
994 | dev_err(kdev->dev, "dma map failed for region %s\n", | ||
995 | region->name); | ||
996 | goto fail; | ||
997 | } | ||
998 | region->dma_end = region->dma_start + size; | ||
999 | |||
1000 | pool = devm_kzalloc(kdev->dev, sizeof(*pool), GFP_KERNEL); | ||
1001 | if (!pool) { | ||
1002 | dev_err(kdev->dev, "out of memory allocating dummy pool\n"); | ||
1003 | goto fail; | ||
1004 | } | ||
1005 | pool->num_desc = 0; | ||
1006 | pool->region_offset = region->num_desc; | ||
1007 | list_add(&pool->region_inst, ®ion->pools); | ||
1008 | |||
1009 | dev_dbg(kdev->dev, | ||
1010 | "region %s (%d): size:%d, link:%d@%d, phys:%08x-%08x, virt:%p-%p\n", | ||
1011 | region->name, id, region->desc_size, region->num_desc, | ||
1012 | region->link_index, region->dma_start, region->dma_end, | ||
1013 | region->virt_start, region->virt_end); | ||
1014 | |||
1015 | hw_desc_size = (region->desc_size / 16) - 1; | ||
1016 | hw_num_desc -= 5; | ||
1017 | |||
1018 | for_each_qmgr(kdev, qmgr) { | ||
1019 | regs = qmgr->reg_region + id; | ||
1020 | writel_relaxed(region->dma_start, ®s->base); | ||
1021 | writel_relaxed(region->link_index, ®s->start_index); | ||
1022 | writel_relaxed(hw_desc_size << 16 | hw_num_desc, | ||
1023 | ®s->size_count); | ||
1024 | } | ||
1025 | return; | ||
1026 | |||
1027 | fail: | ||
1028 | if (region->dma_start) | ||
1029 | dma_unmap_page(kdev->dev, region->dma_start, size, | ||
1030 | DMA_BIDIRECTIONAL); | ||
1031 | if (region->virt_start) | ||
1032 | free_pages_exact(region->virt_start, size); | ||
1033 | region->num_desc = 0; | ||
1034 | return; | ||
1035 | } | ||
1036 | |||
1037 | static const char *knav_queue_find_name(struct device_node *node) | ||
1038 | { | ||
1039 | const char *name; | ||
1040 | |||
1041 | if (of_property_read_string(node, "label", &name) < 0) | ||
1042 | name = node->name; | ||
1043 | if (!name) | ||
1044 | name = "unknown"; | ||
1045 | return name; | ||
1046 | } | ||
1047 | |||
1048 | static int knav_queue_setup_regions(struct knav_device *kdev, | ||
1049 | struct device_node *regions) | ||
1050 | { | ||
1051 | struct device *dev = kdev->dev; | ||
1052 | struct knav_region *region; | ||
1053 | struct device_node *child; | ||
1054 | u32 temp[2]; | ||
1055 | int ret; | ||
1056 | |||
1057 | for_each_child_of_node(regions, child) { | ||
1058 | region = devm_kzalloc(dev, sizeof(*region), GFP_KERNEL); | ||
1059 | if (!region) { | ||
1060 | dev_err(dev, "out of memory allocating region\n"); | ||
1061 | return -ENOMEM; | ||
1062 | } | ||
1063 | |||
1064 | region->name = knav_queue_find_name(child); | ||
1065 | of_property_read_u32(child, "id", ®ion->id); | ||
1066 | ret = of_property_read_u32_array(child, "region-spec", temp, 2); | ||
1067 | if (!ret) { | ||
1068 | region->num_desc = temp[0]; | ||
1069 | region->desc_size = temp[1]; | ||
1070 | } else { | ||
1071 | dev_err(dev, "invalid region info %s\n", region->name); | ||
1072 | devm_kfree(dev, region); | ||
1073 | continue; | ||
1074 | } | ||
1075 | |||
1076 | if (!of_get_property(child, "link-index", NULL)) { | ||
1077 | dev_err(dev, "No link info for %s\n", region->name); | ||
1078 | devm_kfree(dev, region); | ||
1079 | continue; | ||
1080 | } | ||
1081 | ret = of_property_read_u32(child, "link-index", | ||
1082 | ®ion->link_index); | ||
1083 | if (ret) { | ||
1084 | dev_err(dev, "link index not found for %s\n", | ||
1085 | region->name); | ||
1086 | devm_kfree(dev, region); | ||
1087 | continue; | ||
1088 | } | ||
1089 | |||
1090 | INIT_LIST_HEAD(®ion->pools); | ||
1091 | list_add_tail(®ion->list, &kdev->regions); | ||
1092 | } | ||
1093 | if (list_empty(&kdev->regions)) { | ||
1094 | dev_err(dev, "no valid region information found\n"); | ||
1095 | return -ENODEV; | ||
1096 | } | ||
1097 | |||
1098 | /* Next, we run through the regions and set things up */ | ||
1099 | for_each_region(kdev, region) | ||
1100 | knav_queue_setup_region(kdev, region); | ||
1101 | |||
1102 | return 0; | ||
1103 | } | ||
1104 | |||
1105 | static int knav_get_link_ram(struct knav_device *kdev, | ||
1106 | const char *name, | ||
1107 | struct knav_link_ram_block *block) | ||
1108 | { | ||
1109 | struct platform_device *pdev = to_platform_device(kdev->dev); | ||
1110 | struct device_node *node = pdev->dev.of_node; | ||
1111 | u32 temp[2]; | ||
1112 | |||
1113 | /* | ||
1114 | * Note: link ram resources are specified in "entry" sized units. In | ||
1115 | * reality, although entries are ~40bits in hardware, we treat them as | ||
1116 | * 64-bit entities here. | ||
1117 | * | ||
1118 | * For example, to specify the internal link ram for Keystone-I class | ||
1119 | * devices, we would set the linkram0 resource to 0x80000-0x83fff. | ||
1120 | * | ||
1121 | * This gets a bit weird when other link rams are used. For example, | ||
1122 | * if the range specified is 0x0c000000-0x0c003fff (i.e., 16K entries | ||
1123 | * in MSMC SRAM), the actual memory used is 0x0c000000-0x0c020000, | ||
1124 | * which accounts for 64-bits per entry, for 16K entries. | ||
1125 | */ | ||
1126 | if (!of_property_read_u32_array(node, name , temp, 2)) { | ||
1127 | if (temp[0]) { | ||
1128 | /* | ||
1129 | * queue_base specified => using internal or onchip | ||
1130 | * link ram WARNING - we do not "reserve" this block | ||
1131 | */ | ||
1132 | block->phys = (dma_addr_t)temp[0]; | ||
1133 | block->virt = NULL; | ||
1134 | block->size = temp[1]; | ||
1135 | } else { | ||
1136 | block->size = temp[1]; | ||
1137 | /* queue_base not specific => allocate requested size */ | ||
1138 | block->virt = dmam_alloc_coherent(kdev->dev, | ||
1139 | 8 * block->size, &block->phys, | ||
1140 | GFP_KERNEL); | ||
1141 | if (!block->virt) { | ||
1142 | dev_err(kdev->dev, "failed to alloc linkram\n"); | ||
1143 | return -ENOMEM; | ||
1144 | } | ||
1145 | } | ||
1146 | } else { | ||
1147 | return -ENODEV; | ||
1148 | } | ||
1149 | return 0; | ||
1150 | } | ||
1151 | |||
1152 | static int knav_queue_setup_link_ram(struct knav_device *kdev) | ||
1153 | { | ||
1154 | struct knav_link_ram_block *block; | ||
1155 | struct knav_qmgr_info *qmgr; | ||
1156 | |||
1157 | for_each_qmgr(kdev, qmgr) { | ||
1158 | block = &kdev->link_rams[0]; | ||
1159 | dev_dbg(kdev->dev, "linkram0: phys:%x, virt:%p, size:%x\n", | ||
1160 | block->phys, block->virt, block->size); | ||
1161 | writel_relaxed(block->phys, &qmgr->reg_config->link_ram_base0); | ||
1162 | writel_relaxed(block->size, &qmgr->reg_config->link_ram_size0); | ||
1163 | |||
1164 | block++; | ||
1165 | if (!block->size) | ||
1166 | return 0; | ||
1167 | |||
1168 | dev_dbg(kdev->dev, "linkram1: phys:%x, virt:%p, size:%x\n", | ||
1169 | block->phys, block->virt, block->size); | ||
1170 | writel_relaxed(block->phys, &qmgr->reg_config->link_ram_base1); | ||
1171 | } | ||
1172 | |||
1173 | return 0; | ||
1174 | } | ||
1175 | |||
1176 | static int knav_setup_queue_range(struct knav_device *kdev, | ||
1177 | struct device_node *node) | ||
1178 | { | ||
1179 | struct device *dev = kdev->dev; | ||
1180 | struct knav_range_info *range; | ||
1181 | struct knav_qmgr_info *qmgr; | ||
1182 | u32 temp[2], start, end, id, index; | ||
1183 | int ret, i; | ||
1184 | |||
1185 | range = devm_kzalloc(dev, sizeof(*range), GFP_KERNEL); | ||
1186 | if (!range) { | ||
1187 | dev_err(dev, "out of memory allocating range\n"); | ||
1188 | return -ENOMEM; | ||
1189 | } | ||
1190 | |||
1191 | range->kdev = kdev; | ||
1192 | range->name = knav_queue_find_name(node); | ||
1193 | ret = of_property_read_u32_array(node, "qrange", temp, 2); | ||
1194 | if (!ret) { | ||
1195 | range->queue_base = temp[0] - kdev->base_id; | ||
1196 | range->num_queues = temp[1]; | ||
1197 | } else { | ||
1198 | dev_err(dev, "invalid queue range %s\n", range->name); | ||
1199 | devm_kfree(dev, range); | ||
1200 | return -EINVAL; | ||
1201 | } | ||
1202 | |||
1203 | for (i = 0; i < RANGE_MAX_IRQS; i++) { | ||
1204 | struct of_phandle_args oirq; | ||
1205 | |||
1206 | if (of_irq_parse_one(node, i, &oirq)) | ||
1207 | break; | ||
1208 | |||
1209 | range->irqs[i].irq = irq_create_of_mapping(&oirq); | ||
1210 | if (range->irqs[i].irq == IRQ_NONE) | ||
1211 | break; | ||
1212 | |||
1213 | range->num_irqs++; | ||
1214 | |||
1215 | if (oirq.args_count == 3) | ||
1216 | range->irqs[i].cpu_map = | ||
1217 | (oirq.args[2] & 0x0000ff00) >> 8; | ||
1218 | } | ||
1219 | |||
1220 | range->num_irqs = min(range->num_irqs, range->num_queues); | ||
1221 | if (range->num_irqs) | ||
1222 | range->flags |= RANGE_HAS_IRQ; | ||
1223 | |||
1224 | if (of_get_property(node, "qalloc-by-id", NULL)) | ||
1225 | range->flags |= RANGE_RESERVED; | ||
1226 | |||
1227 | if (of_get_property(node, "accumulator", NULL)) { | ||
1228 | ret = knav_init_acc_range(kdev, node, range); | ||
1229 | if (ret < 0) { | ||
1230 | devm_kfree(dev, range); | ||
1231 | return ret; | ||
1232 | } | ||
1233 | } else { | ||
1234 | range->ops = &knav_gp_range_ops; | ||
1235 | } | ||
1236 | |||
1237 | /* set threshold to 1, and flush out the queues */ | ||
1238 | for_each_qmgr(kdev, qmgr) { | ||
1239 | start = max(qmgr->start_queue, range->queue_base); | ||
1240 | end = min(qmgr->start_queue + qmgr->num_queues, | ||
1241 | range->queue_base + range->num_queues); | ||
1242 | for (id = start; id < end; id++) { | ||
1243 | index = id - qmgr->start_queue; | ||
1244 | writel_relaxed(THRESH_GTE | 1, | ||
1245 | &qmgr->reg_peek[index].ptr_size_thresh); | ||
1246 | writel_relaxed(0, | ||
1247 | &qmgr->reg_push[index].ptr_size_thresh); | ||
1248 | } | ||
1249 | } | ||
1250 | |||
1251 | list_add_tail(&range->list, &kdev->queue_ranges); | ||
1252 | dev_dbg(dev, "added range %s: %d-%d, %d irqs%s%s%s\n", | ||
1253 | range->name, range->queue_base, | ||
1254 | range->queue_base + range->num_queues - 1, | ||
1255 | range->num_irqs, | ||
1256 | (range->flags & RANGE_HAS_IRQ) ? ", has irq" : "", | ||
1257 | (range->flags & RANGE_RESERVED) ? ", reserved" : "", | ||
1258 | (range->flags & RANGE_HAS_ACCUMULATOR) ? ", acc" : ""); | ||
1259 | kdev->num_queues_in_use += range->num_queues; | ||
1260 | return 0; | ||
1261 | } | ||
1262 | |||
1263 | static int knav_setup_queue_pools(struct knav_device *kdev, | ||
1264 | struct device_node *queue_pools) | ||
1265 | { | ||
1266 | struct device_node *type, *range; | ||
1267 | int ret; | ||
1268 | |||
1269 | for_each_child_of_node(queue_pools, type) { | ||
1270 | for_each_child_of_node(type, range) { | ||
1271 | ret = knav_setup_queue_range(kdev, range); | ||
1272 | /* return value ignored, we init the rest... */ | ||
1273 | } | ||
1274 | } | ||
1275 | |||
1276 | /* ... and barf if they all failed! */ | ||
1277 | if (list_empty(&kdev->queue_ranges)) { | ||
1278 | dev_err(kdev->dev, "no valid queue range found\n"); | ||
1279 | return -ENODEV; | ||
1280 | } | ||
1281 | return 0; | ||
1282 | } | ||
1283 | |||
1284 | static void knav_free_queue_range(struct knav_device *kdev, | ||
1285 | struct knav_range_info *range) | ||
1286 | { | ||
1287 | if (range->ops && range->ops->free_range) | ||
1288 | range->ops->free_range(range); | ||
1289 | list_del(&range->list); | ||
1290 | devm_kfree(kdev->dev, range); | ||
1291 | } | ||
1292 | |||
1293 | static void knav_free_queue_ranges(struct knav_device *kdev) | ||
1294 | { | ||
1295 | struct knav_range_info *range; | ||
1296 | |||
1297 | for (;;) { | ||
1298 | range = first_queue_range(kdev); | ||
1299 | if (!range) | ||
1300 | break; | ||
1301 | knav_free_queue_range(kdev, range); | ||
1302 | } | ||
1303 | } | ||
1304 | |||
1305 | static void knav_queue_free_regions(struct knav_device *kdev) | ||
1306 | { | ||
1307 | struct knav_region *region; | ||
1308 | struct knav_pool *pool; | ||
1309 | unsigned size; | ||
1310 | |||
1311 | for (;;) { | ||
1312 | region = first_region(kdev); | ||
1313 | if (!region) | ||
1314 | break; | ||
1315 | list_for_each_entry(pool, ®ion->pools, region_inst) | ||
1316 | knav_pool_destroy(pool); | ||
1317 | |||
1318 | size = region->virt_end - region->virt_start; | ||
1319 | if (size) | ||
1320 | free_pages_exact(region->virt_start, size); | ||
1321 | list_del(®ion->list); | ||
1322 | devm_kfree(kdev->dev, region); | ||
1323 | } | ||
1324 | } | ||
1325 | |||
1326 | static void __iomem *knav_queue_map_reg(struct knav_device *kdev, | ||
1327 | struct device_node *node, int index) | ||
1328 | { | ||
1329 | struct resource res; | ||
1330 | void __iomem *regs; | ||
1331 | int ret; | ||
1332 | |||
1333 | ret = of_address_to_resource(node, index, &res); | ||
1334 | if (ret) { | ||
1335 | dev_err(kdev->dev, "Can't translate of node(%s) address for index(%d)\n", | ||
1336 | node->name, index); | ||
1337 | return ERR_PTR(ret); | ||
1338 | } | ||
1339 | |||
1340 | regs = devm_ioremap_resource(kdev->dev, &res); | ||
1341 | if (IS_ERR(regs)) | ||
1342 | dev_err(kdev->dev, "Failed to map register base for index(%d) node(%s)\n", | ||
1343 | index, node->name); | ||
1344 | return regs; | ||
1345 | } | ||
1346 | |||
1347 | static int knav_queue_init_qmgrs(struct knav_device *kdev, | ||
1348 | struct device_node *qmgrs) | ||
1349 | { | ||
1350 | struct device *dev = kdev->dev; | ||
1351 | struct knav_qmgr_info *qmgr; | ||
1352 | struct device_node *child; | ||
1353 | u32 temp[2]; | ||
1354 | int ret; | ||
1355 | |||
1356 | for_each_child_of_node(qmgrs, child) { | ||
1357 | qmgr = devm_kzalloc(dev, sizeof(*qmgr), GFP_KERNEL); | ||
1358 | if (!qmgr) { | ||
1359 | dev_err(dev, "out of memory allocating qmgr\n"); | ||
1360 | return -ENOMEM; | ||
1361 | } | ||
1362 | |||
1363 | ret = of_property_read_u32_array(child, "managed-queues", | ||
1364 | temp, 2); | ||
1365 | if (!ret) { | ||
1366 | qmgr->start_queue = temp[0]; | ||
1367 | qmgr->num_queues = temp[1]; | ||
1368 | } else { | ||
1369 | dev_err(dev, "invalid qmgr queue range\n"); | ||
1370 | devm_kfree(dev, qmgr); | ||
1371 | continue; | ||
1372 | } | ||
1373 | |||
1374 | dev_info(dev, "qmgr start queue %d, number of queues %d\n", | ||
1375 | qmgr->start_queue, qmgr->num_queues); | ||
1376 | |||
1377 | qmgr->reg_peek = | ||
1378 | knav_queue_map_reg(kdev, child, | ||
1379 | KNAV_QUEUE_PEEK_REG_INDEX); | ||
1380 | qmgr->reg_status = | ||
1381 | knav_queue_map_reg(kdev, child, | ||
1382 | KNAV_QUEUE_STATUS_REG_INDEX); | ||
1383 | qmgr->reg_config = | ||
1384 | knav_queue_map_reg(kdev, child, | ||
1385 | KNAV_QUEUE_CONFIG_REG_INDEX); | ||
1386 | qmgr->reg_region = | ||
1387 | knav_queue_map_reg(kdev, child, | ||
1388 | KNAV_QUEUE_REGION_REG_INDEX); | ||
1389 | qmgr->reg_push = | ||
1390 | knav_queue_map_reg(kdev, child, | ||
1391 | KNAV_QUEUE_PUSH_REG_INDEX); | ||
1392 | qmgr->reg_pop = | ||
1393 | knav_queue_map_reg(kdev, child, | ||
1394 | KNAV_QUEUE_POP_REG_INDEX); | ||
1395 | |||
1396 | if (IS_ERR(qmgr->reg_peek) || IS_ERR(qmgr->reg_status) || | ||
1397 | IS_ERR(qmgr->reg_config) || IS_ERR(qmgr->reg_region) || | ||
1398 | IS_ERR(qmgr->reg_push) || IS_ERR(qmgr->reg_pop)) { | ||
1399 | dev_err(dev, "failed to map qmgr regs\n"); | ||
1400 | if (!IS_ERR(qmgr->reg_peek)) | ||
1401 | devm_iounmap(dev, qmgr->reg_peek); | ||
1402 | if (!IS_ERR(qmgr->reg_status)) | ||
1403 | devm_iounmap(dev, qmgr->reg_status); | ||
1404 | if (!IS_ERR(qmgr->reg_config)) | ||
1405 | devm_iounmap(dev, qmgr->reg_config); | ||
1406 | if (!IS_ERR(qmgr->reg_region)) | ||
1407 | devm_iounmap(dev, qmgr->reg_region); | ||
1408 | if (!IS_ERR(qmgr->reg_push)) | ||
1409 | devm_iounmap(dev, qmgr->reg_push); | ||
1410 | if (!IS_ERR(qmgr->reg_pop)) | ||
1411 | devm_iounmap(dev, qmgr->reg_pop); | ||
1412 | devm_kfree(dev, qmgr); | ||
1413 | continue; | ||
1414 | } | ||
1415 | |||
1416 | list_add_tail(&qmgr->list, &kdev->qmgrs); | ||
1417 | dev_info(dev, "added qmgr start queue %d, num of queues %d, reg_peek %p, reg_status %p, reg_config %p, reg_region %p, reg_push %p, reg_pop %p\n", | ||
1418 | qmgr->start_queue, qmgr->num_queues, | ||
1419 | qmgr->reg_peek, qmgr->reg_status, | ||
1420 | qmgr->reg_config, qmgr->reg_region, | ||
1421 | qmgr->reg_push, qmgr->reg_pop); | ||
1422 | } | ||
1423 | return 0; | ||
1424 | } | ||
1425 | |||
1426 | static int knav_queue_init_pdsps(struct knav_device *kdev, | ||
1427 | struct device_node *pdsps) | ||
1428 | { | ||
1429 | struct device *dev = kdev->dev; | ||
1430 | struct knav_pdsp_info *pdsp; | ||
1431 | struct device_node *child; | ||
1432 | int ret; | ||
1433 | |||
1434 | for_each_child_of_node(pdsps, child) { | ||
1435 | pdsp = devm_kzalloc(dev, sizeof(*pdsp), GFP_KERNEL); | ||
1436 | if (!pdsp) { | ||
1437 | dev_err(dev, "out of memory allocating pdsp\n"); | ||
1438 | return -ENOMEM; | ||
1439 | } | ||
1440 | pdsp->name = knav_queue_find_name(child); | ||
1441 | ret = of_property_read_string(child, "firmware", | ||
1442 | &pdsp->firmware); | ||
1443 | if (ret < 0 || !pdsp->firmware) { | ||
1444 | dev_err(dev, "unknown firmware for pdsp %s\n", | ||
1445 | pdsp->name); | ||
1446 | devm_kfree(dev, pdsp); | ||
1447 | continue; | ||
1448 | } | ||
1449 | dev_dbg(dev, "pdsp name %s fw name :%s\n", pdsp->name, | ||
1450 | pdsp->firmware); | ||
1451 | |||
1452 | pdsp->iram = | ||
1453 | knav_queue_map_reg(kdev, child, | ||
1454 | KNAV_QUEUE_PDSP_IRAM_REG_INDEX); | ||
1455 | pdsp->regs = | ||
1456 | knav_queue_map_reg(kdev, child, | ||
1457 | KNAV_QUEUE_PDSP_REGS_REG_INDEX); | ||
1458 | pdsp->intd = | ||
1459 | knav_queue_map_reg(kdev, child, | ||
1460 | KNAV_QUEUE_PDSP_INTD_REG_INDEX); | ||
1461 | pdsp->command = | ||
1462 | knav_queue_map_reg(kdev, child, | ||
1463 | KNAV_QUEUE_PDSP_CMD_REG_INDEX); | ||
1464 | |||
1465 | if (IS_ERR(pdsp->command) || IS_ERR(pdsp->iram) || | ||
1466 | IS_ERR(pdsp->regs) || IS_ERR(pdsp->intd)) { | ||
1467 | dev_err(dev, "failed to map pdsp %s regs\n", | ||
1468 | pdsp->name); | ||
1469 | if (!IS_ERR(pdsp->command)) | ||
1470 | devm_iounmap(dev, pdsp->command); | ||
1471 | if (!IS_ERR(pdsp->iram)) | ||
1472 | devm_iounmap(dev, pdsp->iram); | ||
1473 | if (!IS_ERR(pdsp->regs)) | ||
1474 | devm_iounmap(dev, pdsp->regs); | ||
1475 | if (!IS_ERR(pdsp->intd)) | ||
1476 | devm_iounmap(dev, pdsp->intd); | ||
1477 | devm_kfree(dev, pdsp); | ||
1478 | continue; | ||
1479 | } | ||
1480 | of_property_read_u32(child, "id", &pdsp->id); | ||
1481 | list_add_tail(&pdsp->list, &kdev->pdsps); | ||
1482 | dev_dbg(dev, "added pdsp %s: command %p, iram %p, regs %p, intd %p, firmware %s\n", | ||
1483 | pdsp->name, pdsp->command, pdsp->iram, pdsp->regs, | ||
1484 | pdsp->intd, pdsp->firmware); | ||
1485 | } | ||
1486 | return 0; | ||
1487 | } | ||
1488 | |||
1489 | static int knav_queue_stop_pdsp(struct knav_device *kdev, | ||
1490 | struct knav_pdsp_info *pdsp) | ||
1491 | { | ||
1492 | u32 val, timeout = 1000; | ||
1493 | int ret; | ||
1494 | |||
1495 | val = readl_relaxed(&pdsp->regs->control) & ~PDSP_CTRL_ENABLE; | ||
1496 | writel_relaxed(val, &pdsp->regs->control); | ||
1497 | ret = knav_queue_pdsp_wait(&pdsp->regs->control, timeout, | ||
1498 | PDSP_CTRL_RUNNING); | ||
1499 | if (ret < 0) { | ||
1500 | dev_err(kdev->dev, "timed out on pdsp %s stop\n", pdsp->name); | ||
1501 | return ret; | ||
1502 | } | ||
1503 | return 0; | ||
1504 | } | ||
1505 | |||
1506 | static int knav_queue_load_pdsp(struct knav_device *kdev, | ||
1507 | struct knav_pdsp_info *pdsp) | ||
1508 | { | ||
1509 | int i, ret, fwlen; | ||
1510 | const struct firmware *fw; | ||
1511 | u32 *fwdata; | ||
1512 | |||
1513 | ret = request_firmware(&fw, pdsp->firmware, kdev->dev); | ||
1514 | if (ret) { | ||
1515 | dev_err(kdev->dev, "failed to get firmware %s for pdsp %s\n", | ||
1516 | pdsp->firmware, pdsp->name); | ||
1517 | return ret; | ||
1518 | } | ||
1519 | writel_relaxed(pdsp->id + 1, pdsp->command + 0x18); | ||
1520 | /* download the firmware */ | ||
1521 | fwdata = (u32 *)fw->data; | ||
1522 | fwlen = (fw->size + sizeof(u32) - 1) / sizeof(u32); | ||
1523 | for (i = 0; i < fwlen; i++) | ||
1524 | writel_relaxed(be32_to_cpu(fwdata[i]), pdsp->iram + i); | ||
1525 | |||
1526 | release_firmware(fw); | ||
1527 | return 0; | ||
1528 | } | ||
1529 | |||
1530 | static int knav_queue_start_pdsp(struct knav_device *kdev, | ||
1531 | struct knav_pdsp_info *pdsp) | ||
1532 | { | ||
1533 | u32 val, timeout = 1000; | ||
1534 | int ret; | ||
1535 | |||
1536 | /* write a command for sync */ | ||
1537 | writel_relaxed(0xffffffff, pdsp->command); | ||
1538 | while (readl_relaxed(pdsp->command) != 0xffffffff) | ||
1539 | cpu_relax(); | ||
1540 | |||
1541 | /* soft reset the PDSP */ | ||
1542 | val = readl_relaxed(&pdsp->regs->control); | ||
1543 | val &= ~(PDSP_CTRL_PC_MASK | PDSP_CTRL_SOFT_RESET); | ||
1544 | writel_relaxed(val, &pdsp->regs->control); | ||
1545 | |||
1546 | /* enable pdsp */ | ||
1547 | val = readl_relaxed(&pdsp->regs->control) | PDSP_CTRL_ENABLE; | ||
1548 | writel_relaxed(val, &pdsp->regs->control); | ||
1549 | |||
1550 | /* wait for command register to clear */ | ||
1551 | ret = knav_queue_pdsp_wait(pdsp->command, timeout, 0); | ||
1552 | if (ret < 0) { | ||
1553 | dev_err(kdev->dev, | ||
1554 | "timed out on pdsp %s command register wait\n", | ||
1555 | pdsp->name); | ||
1556 | return ret; | ||
1557 | } | ||
1558 | return 0; | ||
1559 | } | ||
1560 | |||
1561 | static void knav_queue_stop_pdsps(struct knav_device *kdev) | ||
1562 | { | ||
1563 | struct knav_pdsp_info *pdsp; | ||
1564 | |||
1565 | /* disable all pdsps */ | ||
1566 | for_each_pdsp(kdev, pdsp) | ||
1567 | knav_queue_stop_pdsp(kdev, pdsp); | ||
1568 | } | ||
1569 | |||
1570 | static int knav_queue_start_pdsps(struct knav_device *kdev) | ||
1571 | { | ||
1572 | struct knav_pdsp_info *pdsp; | ||
1573 | int ret; | ||
1574 | |||
1575 | knav_queue_stop_pdsps(kdev); | ||
1576 | /* now load them all */ | ||
1577 | for_each_pdsp(kdev, pdsp) { | ||
1578 | ret = knav_queue_load_pdsp(kdev, pdsp); | ||
1579 | if (ret < 0) | ||
1580 | return ret; | ||
1581 | } | ||
1582 | |||
1583 | for_each_pdsp(kdev, pdsp) { | ||
1584 | ret = knav_queue_start_pdsp(kdev, pdsp); | ||
1585 | WARN_ON(ret); | ||
1586 | } | ||
1587 | return 0; | ||
1588 | } | ||
1589 | |||
1590 | static inline struct knav_qmgr_info *knav_find_qmgr(unsigned id) | ||
1591 | { | ||
1592 | struct knav_qmgr_info *qmgr; | ||
1593 | |||
1594 | for_each_qmgr(kdev, qmgr) { | ||
1595 | if ((id >= qmgr->start_queue) && | ||
1596 | (id < qmgr->start_queue + qmgr->num_queues)) | ||
1597 | return qmgr; | ||
1598 | } | ||
1599 | return NULL; | ||
1600 | } | ||
1601 | |||
1602 | static int knav_queue_init_queue(struct knav_device *kdev, | ||
1603 | struct knav_range_info *range, | ||
1604 | struct knav_queue_inst *inst, | ||
1605 | unsigned id) | ||
1606 | { | ||
1607 | char irq_name[KNAV_NAME_SIZE]; | ||
1608 | inst->qmgr = knav_find_qmgr(id); | ||
1609 | if (!inst->qmgr) | ||
1610 | return -1; | ||
1611 | |||
1612 | INIT_LIST_HEAD(&inst->handles); | ||
1613 | inst->kdev = kdev; | ||
1614 | inst->range = range; | ||
1615 | inst->irq_num = -1; | ||
1616 | inst->id = id; | ||
1617 | scnprintf(irq_name, sizeof(irq_name), "hwqueue-%d", id); | ||
1618 | inst->irq_name = kstrndup(irq_name, sizeof(irq_name), GFP_KERNEL); | ||
1619 | |||
1620 | if (range->ops && range->ops->init_queue) | ||
1621 | return range->ops->init_queue(range, inst); | ||
1622 | else | ||
1623 | return 0; | ||
1624 | } | ||
1625 | |||
1626 | static int knav_queue_init_queues(struct knav_device *kdev) | ||
1627 | { | ||
1628 | struct knav_range_info *range; | ||
1629 | int size, id, base_idx; | ||
1630 | int idx = 0, ret = 0; | ||
1631 | |||
1632 | /* how much do we need for instance data? */ | ||
1633 | size = sizeof(struct knav_queue_inst); | ||
1634 | |||
1635 | /* round this up to a power of 2, keep the index to instance | ||
1636 | * arithmetic fast. | ||
1637 | * */ | ||
1638 | kdev->inst_shift = order_base_2(size); | ||
1639 | size = (1 << kdev->inst_shift) * kdev->num_queues_in_use; | ||
1640 | kdev->instances = devm_kzalloc(kdev->dev, size, GFP_KERNEL); | ||
1641 | if (!kdev->instances) | ||
1642 | return -1; | ||
1643 | |||
1644 | for_each_queue_range(kdev, range) { | ||
1645 | if (range->ops && range->ops->init_range) | ||
1646 | range->ops->init_range(range); | ||
1647 | base_idx = idx; | ||
1648 | for (id = range->queue_base; | ||
1649 | id < range->queue_base + range->num_queues; id++, idx++) { | ||
1650 | ret = knav_queue_init_queue(kdev, range, | ||
1651 | knav_queue_idx_to_inst(kdev, idx), id); | ||
1652 | if (ret < 0) | ||
1653 | return ret; | ||
1654 | } | ||
1655 | range->queue_base_inst = | ||
1656 | knav_queue_idx_to_inst(kdev, base_idx); | ||
1657 | } | ||
1658 | return 0; | ||
1659 | } | ||
1660 | |||
1661 | static int knav_queue_probe(struct platform_device *pdev) | ||
1662 | { | ||
1663 | struct device_node *node = pdev->dev.of_node; | ||
1664 | struct device_node *qmgrs, *queue_pools, *regions, *pdsps; | ||
1665 | struct device *dev = &pdev->dev; | ||
1666 | u32 temp[2]; | ||
1667 | int ret; | ||
1668 | |||
1669 | if (!node) { | ||
1670 | dev_err(dev, "device tree info unavailable\n"); | ||
1671 | return -ENODEV; | ||
1672 | } | ||
1673 | |||
1674 | kdev = devm_kzalloc(dev, sizeof(struct knav_device), GFP_KERNEL); | ||
1675 | if (!kdev) { | ||
1676 | dev_err(dev, "memory allocation failed\n"); | ||
1677 | return -ENOMEM; | ||
1678 | } | ||
1679 | |||
1680 | platform_set_drvdata(pdev, kdev); | ||
1681 | kdev->dev = dev; | ||
1682 | INIT_LIST_HEAD(&kdev->queue_ranges); | ||
1683 | INIT_LIST_HEAD(&kdev->qmgrs); | ||
1684 | INIT_LIST_HEAD(&kdev->pools); | ||
1685 | INIT_LIST_HEAD(&kdev->regions); | ||
1686 | INIT_LIST_HEAD(&kdev->pdsps); | ||
1687 | |||
1688 | pm_runtime_enable(&pdev->dev); | ||
1689 | ret = pm_runtime_get_sync(&pdev->dev); | ||
1690 | if (ret < 0) { | ||
1691 | dev_err(dev, "Failed to enable QMSS\n"); | ||
1692 | return ret; | ||
1693 | } | ||
1694 | |||
1695 | if (of_property_read_u32_array(node, "queue-range", temp, 2)) { | ||
1696 | dev_err(dev, "queue-range not specified\n"); | ||
1697 | ret = -ENODEV; | ||
1698 | goto err; | ||
1699 | } | ||
1700 | kdev->base_id = temp[0]; | ||
1701 | kdev->num_queues = temp[1]; | ||
1702 | |||
1703 | /* Initialize queue managers using device tree configuration */ | ||
1704 | qmgrs = of_get_child_by_name(node, "qmgrs"); | ||
1705 | if (!qmgrs) { | ||
1706 | dev_err(dev, "queue manager info not specified\n"); | ||
1707 | ret = -ENODEV; | ||
1708 | goto err; | ||
1709 | } | ||
1710 | ret = knav_queue_init_qmgrs(kdev, qmgrs); | ||
1711 | of_node_put(qmgrs); | ||
1712 | if (ret) | ||
1713 | goto err; | ||
1714 | |||
1715 | /* get pdsp configuration values from device tree */ | ||
1716 | pdsps = of_get_child_by_name(node, "pdsps"); | ||
1717 | if (pdsps) { | ||
1718 | ret = knav_queue_init_pdsps(kdev, pdsps); | ||
1719 | if (ret) | ||
1720 | goto err; | ||
1721 | |||
1722 | ret = knav_queue_start_pdsps(kdev); | ||
1723 | if (ret) | ||
1724 | goto err; | ||
1725 | } | ||
1726 | of_node_put(pdsps); | ||
1727 | |||
1728 | /* get usable queue range values from device tree */ | ||
1729 | queue_pools = of_get_child_by_name(node, "queue-pools"); | ||
1730 | if (!queue_pools) { | ||
1731 | dev_err(dev, "queue-pools not specified\n"); | ||
1732 | ret = -ENODEV; | ||
1733 | goto err; | ||
1734 | } | ||
1735 | ret = knav_setup_queue_pools(kdev, queue_pools); | ||
1736 | of_node_put(queue_pools); | ||
1737 | if (ret) | ||
1738 | goto err; | ||
1739 | |||
1740 | ret = knav_get_link_ram(kdev, "linkram0", &kdev->link_rams[0]); | ||
1741 | if (ret) { | ||
1742 | dev_err(kdev->dev, "could not setup linking ram\n"); | ||
1743 | goto err; | ||
1744 | } | ||
1745 | |||
1746 | ret = knav_get_link_ram(kdev, "linkram1", &kdev->link_rams[1]); | ||
1747 | if (ret) { | ||
1748 | /* | ||
1749 | * nothing really, we have one linking ram already, so we just | ||
1750 | * live within our means | ||
1751 | */ | ||
1752 | } | ||
1753 | |||
1754 | ret = knav_queue_setup_link_ram(kdev); | ||
1755 | if (ret) | ||
1756 | goto err; | ||
1757 | |||
1758 | regions = of_get_child_by_name(node, "descriptor-regions"); | ||
1759 | if (!regions) { | ||
1760 | dev_err(dev, "descriptor-regions not specified\n"); | ||
1761 | goto err; | ||
1762 | } | ||
1763 | ret = knav_queue_setup_regions(kdev, regions); | ||
1764 | of_node_put(regions); | ||
1765 | if (ret) | ||
1766 | goto err; | ||
1767 | |||
1768 | ret = knav_queue_init_queues(kdev); | ||
1769 | if (ret < 0) { | ||
1770 | dev_err(dev, "hwqueue initialization failed\n"); | ||
1771 | goto err; | ||
1772 | } | ||
1773 | |||
1774 | debugfs_create_file("qmss", S_IFREG | S_IRUGO, NULL, NULL, | ||
1775 | &knav_queue_debug_ops); | ||
1776 | return 0; | ||
1777 | |||
1778 | err: | ||
1779 | knav_queue_stop_pdsps(kdev); | ||
1780 | knav_queue_free_regions(kdev); | ||
1781 | knav_free_queue_ranges(kdev); | ||
1782 | pm_runtime_put_sync(&pdev->dev); | ||
1783 | pm_runtime_disable(&pdev->dev); | ||
1784 | return ret; | ||
1785 | } | ||
1786 | |||
1787 | static int knav_queue_remove(struct platform_device *pdev) | ||
1788 | { | ||
1789 | /* TODO: Free resources */ | ||
1790 | pm_runtime_put_sync(&pdev->dev); | ||
1791 | pm_runtime_disable(&pdev->dev); | ||
1792 | return 0; | ||
1793 | } | ||
1794 | |||
1795 | /* Match table for of_platform binding */ | ||
1796 | static struct of_device_id keystone_qmss_of_match[] = { | ||
1797 | { .compatible = "ti,keystone-navigator-qmss", }, | ||
1798 | {}, | ||
1799 | }; | ||
1800 | MODULE_DEVICE_TABLE(of, keystone_qmss_of_match); | ||
1801 | |||
1802 | static struct platform_driver keystone_qmss_driver = { | ||
1803 | .probe = knav_queue_probe, | ||
1804 | .remove = knav_queue_remove, | ||
1805 | .driver = { | ||
1806 | .name = "keystone-navigator-qmss", | ||
1807 | .owner = THIS_MODULE, | ||
1808 | .of_match_table = keystone_qmss_of_match, | ||
1809 | }, | ||
1810 | }; | ||
1811 | module_platform_driver(keystone_qmss_driver); | ||
1812 | |||
1813 | MODULE_LICENSE("GPL v2"); | ||
1814 | MODULE_DESCRIPTION("TI QMSS driver for Keystone SOCs"); | ||
1815 | MODULE_AUTHOR("Sandeep Nair <sandeep_n@ti.com>"); | ||
1816 | MODULE_AUTHOR("Santosh Shilimkar <santosh.shilimkar@ti.com>"); | ||